xref: /illumos-gate/usr/src/lib/libc/amd64/unwind/call_frame_inst.c (revision 1fa2a66491e7d8ae0be84e7da4da8e812480c710)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2012 Milan Jurik. All rights reserved.
26  * Copyright 2020 Oxide Computer Company
27  */
28 
29 /*
30  * interface used by unwind support to query frame descriptor info
31  */
32 
33 #ifndef _LIBCRUN_
34 #include "lint.h"
35 #endif
36 #include <sys/types.h>
37 #include "stack_unwind.h"
38 #include "unwind_context.h"
39 #include "reg_num.h"
40 
41 enum CFA_ops {
42 	DW_CFA_nop = 0x00,
43 	DW_CFA_set_loc = 0x01,
44 	DW_CFA_advance_loc1 = 0x02,
45 	DW_CFA_advance_loc2 = 0x03,
46 	DW_CFA_advance_loc4 = 0x04,
47 	DW_CFA_offset_extended = 0x05,
48 	DW_CFA_restore_extended = 0x06,
49 	DW_CFA_undefined = 0x07,
50 	DW_CFA_same_value = 0x08,
51 	DW_CFA_register = 0x09,
52 	DW_CFA_remember_state = 0x0a,
53 	DW_CFA_restore_state = 0x0b,
54 	DW_CFA_def_cfa = 0x0c,
55 	DW_CFA_def_cfa_register = 0x0d,
56 	DW_CFA_def_cfa_offset = 0x0e,
57 	DW_CFA_def_cfa_expression = 0x0f,
58 	DW_CFA_expression = 0x10,
59 	DW_CFA_offset_extended_sf = 0x11,
60 	DW_CFA_def_cfa_sf = 0x12,
61 	DW_CFA_def_cfa_offset_sf = 0x13,
62 	/* skip 9 values */
63 	DW_CFA_SUNW_advance_loc = 0x1d,
64 	DW_CFA_SUNW_offset = 0x1e,
65 	DW_CFA_SUNW_restore = 0x1f,
66 	DW_CFA_advance_loc = 0x40,
67 	DW_CFA_offset = 0x80,
68 	DW_CFA_restore = 0xc0
69 };
70 
71 struct operation_desc {
72 	enum operand_desc op1;
73 	enum operand_desc op2;
74 };
75 
76 struct operation_desc cfa_operations[] = {
77 	{NO_OPR, NO_OPR},	/* DW_CFA_nop */
78 	{ADDR, NO_OPR},		/* DW_CFA_set_loc - address */
79 	{UNUM8, NO_OPR},	/* DW_CFA_advance_loc1 - delta */
80 	{UNUM16, NO_OPR},	/* DW_CFA_advance_loc2 - delta */
81 	{UNUM32, NO_OPR},	/* DW_CFA_advance_loc4 - delta */
82 	{ULEB128, ULEB128_FAC},	/* DW_CFA_offset_extended - reg, */
83 				/* data factored offset */
84 	{ULEB128, NO_OPR},	/* DW_CFA_restore_extended - register */
85 	{ULEB128, NO_OPR},	/* DW_CFA_undefined - register */
86 	{ULEB128, NO_OPR},	/* DW_CFA_same_value - register */
87 	{ULEB128, ULEB128_SREG}, /* DW_CFA_register - register, register */
88 	{NO_OPR, NO_OPR},	/* DW_CFA_remember_state */
89 	{NO_OPR, NO_OPR},	/* DW_CFA_restore_state */
90 	{ULEB128_SREG, ULEB128}, /* DW_CFA_def_cfa - register, offset */
91 	{ULEB128_SREG, NO_OPR},	/* DW_CFA_def_cfa_register - register */
92 	{ULEB128, NO_OPR},	/* DW_CFA_def_cfa_offset - offset */
93 	{BLOCK, NO_OPR},	/* DW_CFA_def_cfa_expression - expression */
94 	{ULEB128, BLOCK},	/* DW_CFA_expression - reg, expression */
95 	{ULEB128, SLEB128_FAC},	/* DW_CFA_offset_extended_sf - reg, */
96 				/* data factored offset */
97 	{ULEB128_SREG, SLEB128_FAC},	/* DW_CFA_def_cfa_sf - reg, */
98 					/* data factored offset */
99 	{SLEB128_FAC, NO_OPR},	/* DW_CFA_def_cfa_offset_sf - */
100 				/* data fctored offset */
101 	{NO_OPR, NO_OPR},
102 	{NO_OPR, NO_OPR},
103 	{NO_OPR, NO_OPR},
104 	{NO_OPR, NO_OPR},
105 	{NO_OPR, NO_OPR},
106 	{NO_OPR, NO_OPR},
107 	{NO_OPR, NO_OPR},
108 	{NO_OPR, NO_OPR},
109 	{NO_OPR, NO_OPR},
110 	{UNUM6_CFAC, NO_OPR},	/* DW_CFA_SUNW_advance_loc - */
111 				/* code factored delta */
112 	{UNUM6, ULEB128_FAC},	/* DW_CFA_SUNW_offset - reg */
113 				/* data factored offset */
114 	{UNUM6, NO_OPR}		/* DW_CFA_SUNW_restore */
115 };
116 
117 uint64_t interpret_ops(void *data, void *data_end,
118 		ptrdiff_t reloc, uint64_t current_loc, uint64_t pc,
119 		struct register_state f_state[],
120 		struct register_state f_start_state[],
121 		int daf, int caf, int enc);
122 
123 /*
124  * The entry-point state of old_ctx defines the current
125  * suspended state of the caller (in new_ctx). If the old info
126  * will not be refered to again, old_ctx == new_ctx is OK
127  */
128 void
129 _Unw_Propagate_Registers(struct _Unwind_Context *old_ctx,
130 	struct _Unwind_Context *new_ctx)
131 {
132 	new_ctx->current_regs[SP_RSP] = old_ctx->cfa;
133 	new_ctx->pc = old_ctx->ra;
134 	new_ctx->current_regs[FP_RBP] = old_ctx->entry_regs[FP_RBP];
135 	new_ctx->current_regs[GPR_RBX] = old_ctx->entry_regs[GPR_RBX];
136 	new_ctx->current_regs[EIR_R12] = old_ctx->entry_regs[EIR_R12];
137 	new_ctx->current_regs[EIR_R13] = old_ctx->entry_regs[EIR_R13];
138 	new_ctx->current_regs[EIR_R14] = old_ctx->entry_regs[EIR_R14];
139 	new_ctx->current_regs[EIR_R15] = old_ctx->entry_regs[EIR_R15];
140 }
141 
142 void
143 fix_cfa(struct _Unwind_Context *ctx, struct register_state *rs)
144 {
145 	switch (rs[CF_ADDR].rule) {
146 	default:
147 		ctx->cfa = 0;
148 		break;
149 	case register_rule:	/* CFA = offset + source_reg */
150 		ctx->cfa = (ctx->current_regs)[rs[CF_ADDR].source_reg] +
151 		    rs[CF_ADDR].offset;
152 		break;
153 	case constant_rule:	/* CFA = offset */
154 		ctx->cfa = rs[CF_ADDR].offset;
155 		break;
156 	case indirect_rule:	/* CFA = *(offset + source_reg) */
157 		ctx->cfa = *(uint64_t *)
158 		    (ctx->current_regs[rs[CF_ADDR].source_reg] +
159 		    rs[CF_ADDR].offset);
160 		break;
161 	}
162 	ctx->entry_regs[SP_RSP] = ctx->cfa;
163 }
164 
165 void
166 fix_ra(struct _Unwind_Context *ctx, struct register_state *rs)
167 {
168 	switch (rs[RET_ADD].rule) {
169 	case undefined_rule:
170 	default:
171 		ctx->ra = 0;
172 		break;
173 	case offset_rule:	/* RA = *(offset + CFA) */
174 		ctx->ra = *(uint64_t *)(ctx->cfa + rs[RET_ADD].offset);
175 		break;
176 	case register_rule:	/* RA = offset + source_reg */
177 		ctx->ra = ctx->current_regs[rs[RET_ADD].source_reg] +
178 		    rs[RET_ADD].offset;
179 		break;
180 	case indirect_rule:	/* RA = *(offset + source_reg) */
181 		ctx->ra = *(uint64_t *)
182 		    (ctx->current_regs[rs[RET_ADD].source_reg] +
183 		    rs[RET_ADD].offset);
184 		break;
185 	}
186 }
187 
188 void
189 fix_reg(struct _Unwind_Context *ctx, struct register_state *rs, int index)
190 {
191 	switch (rs[index].rule) {
192 	default:
193 		ctx->entry_regs[index] = ctx->current_regs[index];
194 		break;
195 	case offset_rule:	/* target_reg = *(offset + CFA) */
196 		ctx->entry_regs[index] = *(uint64_t *)
197 		    (ctx->cfa + rs[index].offset);
198 		break;
199 	case is_offset_rule:	/* target_reg = offset + CFA */
200 		ctx->entry_regs[index] = ctx->cfa + rs[index].offset;
201 		break;
202 	case register_rule:	/* target_reg = offset + source_reg */
203 		ctx->entry_regs[index] =
204 		    ctx->current_regs[rs[index].source_reg] +
205 		    rs[index].offset;
206 		break;
207 	case constant_rule:	/* target_reg = offset */
208 		ctx->entry_regs[index] = rs[index].offset;
209 		break;
210 	case indirect_rule:	/* target_reg = *(offset + source_reg) */
211 		ctx->entry_regs[index] = *(uint64_t *)
212 		    (ctx->current_regs[rs[index].source_reg] +
213 		    rs[index].offset);
214 		break;
215 	}
216 }
217 
218 
219 /*
220  * Input: f->{cie_ops, cie_ops_end, fde_ops, fde_ops_end}
221  *			+ location of DWARF opcodes
222  *		  ctx->{current_regs, pc}
223  *			+ register values and pc at point of suspension
224  * Output: ctx->{entry_regs, cfa, ra}
225  *			+ register values when function was entered
226  *			+ Cannonical Frame Address
227  *			+ return address
228  */
229 uint64_t
230 _Unw_Rollback_Registers(struct eh_frame_fields *f,
231 	struct _Unwind_Context *ctx)
232 {
233 	/* GPRs, RET_ADD, and CF_ADDR */
234 	struct register_state func_state[18];
235 	struct register_state func_start_state[18];
236 	struct register_state nop = { 0, undefined_rule, 0 };
237 	int i;
238 	uint64_t  first_pc;
239 
240 	if (f == 0) {
241 		/*
242 		 * When no FDE we assume all routines have a frame pointer
243 		 * and pass back existing callee saves registers
244 		 */
245 		if (ctx->current_regs[FP_RBP] < ctx->current_regs[SP_RSP]) {
246 			ctx->cfa = 0;
247 			ctx->ra = 0;
248 			ctx->pc = 0;
249 			return (0);
250 		}
251 		ctx->entry_regs[FP_RBP] = ((uint64_t *)
252 		    (ctx->current_regs[FP_RBP]))[0];
253 		ctx->cfa = ctx->current_regs[FP_RBP] + 16;
254 		ctx->entry_regs[SP_RSP] = ctx->cfa;
255 		ctx->entry_regs[GPR_RBX] = ctx->current_regs[GPR_RBX];
256 		ctx->entry_regs[EIR_R12] = ctx->current_regs[EIR_R12];
257 		ctx->entry_regs[EIR_R13] = ctx->current_regs[EIR_R13];
258 		ctx->entry_regs[EIR_R14] = ctx->current_regs[EIR_R14];
259 		ctx->entry_regs[EIR_R15] = ctx->current_regs[EIR_R15];
260 		ctx->ra = ((uint64_t *)ctx->cfa)[-1];
261 		return (ctx->cfa);
262 	}
263 
264 	for (i = 0; i < 18; i++)
265 		func_start_state[i] = nop;
266 	first_pc = interpret_ops(f->cie_ops, f->cie_ops_end,
267 	    f->cie_reloc, ctx->func, ctx->pc, func_start_state, 0,
268 	    f->data_align, f->code_align, f->code_enc);
269 	for (i = 0; i < 18; i++)
270 		func_state[i] = func_start_state[i];
271 	(void) interpret_ops(f->fde_ops, f->fde_ops_end,
272 	    f->fde_reloc, first_pc, ctx->pc, func_state, func_start_state,
273 	    f->data_align, f->code_align, f->code_enc);
274 
275 	fix_cfa(ctx, func_state);
276 	if (ctx->cfa < ctx->current_regs[SP_RSP]) {
277 		ctx->cfa = 0;
278 		ctx->ra = 0;
279 		ctx->pc = 0;
280 		return (0);
281 	}
282 	fix_ra(ctx, func_state);
283 	fix_reg(ctx, func_state, GPR_RBX);
284 	fix_reg(ctx, func_state, FP_RBP);
285 	fix_reg(ctx, func_state, EIR_R12);
286 	fix_reg(ctx, func_state, EIR_R13);
287 	fix_reg(ctx, func_state, EIR_R14);
288 	fix_reg(ctx, func_state, EIR_R15);
289 
290 	return (ctx->cfa);
291 }
292 
293 /*
294  * remap two-bit opcodes into a separate range or grab eight-bit opcode
295  * and advance pointer past it.
296  */
297 static enum CFA_ops
298 separate_op(void **pp)
299 {
300 	uint8_t c = **((uint8_t **)pp);
301 
302 	if (c & 0xc0) {
303 		switch (c & 0xc0) {
304 		case DW_CFA_advance_loc:
305 			return (DW_CFA_SUNW_advance_loc);
306 		case DW_CFA_offset:
307 			return (DW_CFA_SUNW_offset);
308 		case DW_CFA_restore:
309 			return (DW_CFA_SUNW_restore);
310 		}
311 	} else {
312 		*pp = (void *)((*(intptr_t *)pp) + 1);
313 	}
314 	return (c);
315 }
316 
317 static uint64_t
318 extractuleb(void **datap)
319 {
320 	uint8_t *data = *(uint8_t **)datap;
321 	uint64_t res = 0;
322 	int more = 1;
323 	int shift = 0;
324 	int val;
325 
326 	while (more) {
327 		val = (*data) & 0x7f;
328 		more = ((*data++) & 0x80) >> 7;
329 		res = res | val << shift;
330 		shift += 7;
331 	}
332 	*datap = (void *)data;
333 	return (res);
334 }
335 
336 static uint64_t
337 extractsleb(void** datap)
338 {
339 	uint8_t *data = *datap;
340 	int64_t res = 0;
341 	int more = 1;
342 	int shift = 0;
343 	unsigned int val;
344 
345 	while (more) {
346 		val = (*data) & 0x7f;
347 		more = ((*data++) & 0x80) >> 7;
348 		res = res | val<< shift;
349 		shift += 7;
350 	}
351 	*datap = (void*) data;
352 	res = (res << (64 - shift)) >> (64 - shift);
353 	return (res);
354 }
355 
356 static uint64_t get_encoded_val(void **datap, ptrdiff_t reloc, int enc);
357 
358 /*
359  * do all field extractions needed for CFA operands and encoded FDE
360  * fields
361  */
362 uint64_t
363 _Unw_get_val(void **datap, ptrdiff_t reloc,
364 	enum operand_desc opr, int daf, int caf, int enc)
365 {
366 	intptr_t data = (intptr_t)*datap;
367 	uint64_t res;
368 	char *dp, *rp;
369 
370 	switch (opr) {
371 	case NO_OPR:
372 		res = 0;
373 		break;
374 	case ULEB128_FAC:
375 		return (daf * extractuleb(datap));
376 	case ULEB128:
377 		return (extractuleb(datap));
378 	case ULEB128_SREG:
379 		res = (uint64_t)(*((uint8_t *)data));
380 		data += 1;
381 		switch (res) {
382 			/* verify that register is one which is being tracked */
383 		case GPR_RBX:
384 		case FP_RBP:
385 		case SP_RSP:
386 		case EIR_R12:
387 		case EIR_R13:
388 		case EIR_R14:
389 		case EIR_R15:
390 			break;
391 		default:
392 			res = BAD_REG;
393 			break;
394 		}
395 		break;
396 	case UNUM6:
397 		res = (uint64_t)(0x3f & *((uint8_t *)data));
398 		data += 1;
399 		break;
400 	case UNUM8:
401 		res = (uint64_t)(*((uint8_t *)data));
402 		data += 1;
403 		break;
404 	case UNUM16:
405 		res = (uint64_t)(*((uint16_t *)data));
406 		data += 2;
407 		break;
408 	case UNUM32:
409 		res = (uint64_t)(*((uint32_t *)data));
410 		data += 4;
411 		break;
412 	case UNUM6_CFAC:
413 		res = caf * (uint64_t)(0x3f & *((uint8_t *)data));
414 		data += 1;
415 		break;
416 	case UNUM8_CFAC:
417 		res = caf * (uint64_t)(*((uint8_t *)data));
418 		data += 1;
419 		break;
420 	case UNUM16_CFAC:
421 		res = caf * (uint64_t)(*((uint16_t *)data));
422 		data += 2;
423 		break;
424 	case UNUM32_CFAC:
425 		res = caf * (uint64_t)(*((uint32_t *)data));
426 		data += 4;
427 		break;
428 	case UNUM64:
429 		res = (uint64_t)(*((uint64_t *)data));
430 		data += 8;
431 		break;
432 	case SNUM8:
433 		res = (uint64_t)(int64_t)(*((int8_t *)data));
434 		data += 1;
435 		break;
436 	case SNUM16:
437 		res = (uint64_t)(int64_t)(*((int16_t *)data));
438 		data += 2;
439 		break;
440 	case SNUM32:
441 		res = (uint64_t)(int64_t)(*((int32_t *)data));
442 		data += 4;
443 		break;
444 	case SNUM64:
445 		res = (uint64_t)(*((int64_t *)data));
446 		data += 8;
447 		break;
448 	case SLEB128_FAC:
449 		return (daf * extractsleb(datap));
450 	case SLEB128:
451 		return (extractsleb(datap));
452 	case ZTSTRING:
453 		/* max length of augmentation string is 4 */
454 		rp = (char *)&res;
455 		dp = (char *)data;
456 		while (*rp++ = *dp++)
457 			;
458 		data = (intptr_t)dp;
459 		break;
460 	case ADDR:
461 		return (get_encoded_val(datap, reloc, enc));
462 	case SIZE:
463 		return (get_encoded_val(datap, reloc, enc & 0x7));
464 	case BLOCK:
465 		res = 0;  /* not implemented */
466 		break;
467 	}
468 	*datap = (void*)data;
469 	return (res);
470 }
471 
472 static uint64_t
473 get_encoded_val(void **datap, ptrdiff_t reloc, int enc)
474 {
475 	const uint8_t val = enc & 0xf;
476 	const uint8_t rel = enc & 0x70;
477 	const boolean_t indirect = (enc & 0x80) != 0;
478 	intptr_t loc = ((intptr_t)*datap) + reloc;
479 	uint64_t res = 0;
480 
481 	/*
482 	 * Calculate the offset represented by the pointer encoding.  These
483 	 * DWARF extensions are defined in the Core Generic document set of the
484 	 * LSB specification.
485 	 */
486 	switch (val) {
487 	case 0x01:
488 		res = _Unw_get_val(datap, reloc, ULEB128, 1, 1, 0);
489 		break;
490 	case 0x02:
491 		res = _Unw_get_val(datap, reloc, UNUM16, 1, 1, 0);
492 		break;
493 	case 0x03:
494 		res = _Unw_get_val(datap, reloc, UNUM32, 1, 1, 0);
495 		break;
496 	case 0x04:
497 		res = _Unw_get_val(datap, reloc, UNUM64, 1, 1, 0);
498 		break;
499 	case 0x09:
500 		res = _Unw_get_val(datap, reloc, SLEB128, 1, 1, 0);
501 		break;
502 	case 0x0a:
503 		res = _Unw_get_val(datap, reloc, SNUM16, 1, 1, 0);
504 		break;
505 	case 0x0b:
506 		res = _Unw_get_val(datap, reloc, SNUM32, 1, 1, 0);
507 		break;
508 	case 0x0c:
509 		res = _Unw_get_val(datap, reloc, SNUM64, 1, 1, 0);
510 		break;
511 	}
512 	switch (rel) {
513 	case 0x00:
514 		break;
515 	case 0x10:
516 		/* DW_EH_PE_pcrel */
517 		if (res != 0)
518 			res += loc;
519 		break;
520 	default:
521 		/* remainder not implemented */
522 		break;
523 	}
524 
525 	/*
526 	 * The high bit of the pointer encoding (DW_EH_PE_indirect = 0x80)
527 	 * indicates that a pointer-sized value should be read from the
528 	 * calculated address as the final result.
529 	 *
530 	 * Shockingly, this is not documented in any specification to date, but
531 	 * has been implemented in various unwind implementations through
532 	 * reverse-engineering of GCC.
533 	 */
534 	if (indirect) {
535 		void *addr = (void *)(uintptr_t)res;
536 
537 		/*
538 		 * Built only for amd64, we can count on a 64-bit pointer size
539 		 * for the indirect handling.
540 		 */
541 		res = _Unw_get_val(&addr, reloc, UNUM64, 1, 1, 0);
542 	}
543 
544 	return (res);
545 }
546 
547 
548 int interpret_op(void **datap, ptrdiff_t reloc,
549 	uint64_t *reached_pc_p, uint64_t pc,
550 	struct register_state f_state[],
551 	struct register_state f_start_state[],
552 	int daf, int caf, int enc);
553 
554 uint64_t
555 interpret_ops(void *data, void *data_end,
556 	ptrdiff_t reloc,
557 	uint64_t start_pc, uint64_t pc,
558 	struct register_state f_state[],
559 	struct register_state f_start_state[],
560 	int daf, int caf, int enc)
561 {
562 	void *d = data;
563 	uint64_t reached_pc = start_pc;
564 
565 	while (d < data_end) {
566 		if (interpret_op(&d, reloc, &reached_pc, pc,
567 		    f_state, f_start_state, daf, caf, enc))
568 			break;
569 	}
570 	return (reached_pc);
571 }
572 
573 int
574 interpret_op(void **datap, ptrdiff_t reloc,
575 	uint64_t *reached_pc_p, uint64_t pc,
576 	struct register_state f_state[],
577 	struct register_state f_start_state[],
578 	int daf, int caf, int enc)
579 {
580 	enum CFA_ops op = separate_op(datap);
581 	enum operand_desc opr1 = (cfa_operations[op]).op1;
582 	enum operand_desc opr2 = (cfa_operations[op]).op2;
583 
584 	uint64_t val1 = _Unw_get_val(datap, reloc, opr1, daf, caf, enc);
585 	uint64_t val2 = _Unw_get_val(datap, reloc, opr2, daf, caf, enc);
586 	if ((opr1 == ULEB128_SREG && val1 == BAD_REG) ||
587 	    (opr2 == ULEB128_SREG && val2 == BAD_REG))
588 		return (0);
589 	switch (op) {
590 	case DW_CFA_nop:
591 		break;
592 	case DW_CFA_set_loc:
593 		if (val1 > pc)
594 			return (1);
595 		*reached_pc_p = val1;
596 		break;
597 	case DW_CFA_advance_loc1:
598 	case DW_CFA_advance_loc2:
599 	case DW_CFA_advance_loc4:
600 		if (*reached_pc_p + val1 > pc)
601 			return (1);
602 		*reached_pc_p += val1;
603 		break;
604 	case DW_CFA_offset_extended:
605 		f_state[val1].rule = offset_rule;
606 		f_state[val1].source_reg = CF_ADDR;
607 		f_state[val1].offset = val2;
608 		break;
609 	case DW_CFA_restore_extended:
610 		if (f_start_state != 0)
611 			f_state[val1] = f_start_state[val1];
612 		break;
613 	case DW_CFA_undefined:
614 		f_state[val1].rule = undefined_rule;
615 		break;
616 	case DW_CFA_same_value:
617 		f_state[val1].rule = same_value_rule;
618 		break;
619 	case DW_CFA_register:
620 		f_state[val1].rule = register_rule;
621 		f_state[val1].source_reg = val2;
622 		f_state[val1].offset = 0;
623 		break;
624 	case DW_CFA_remember_state:
625 		break;
626 	case DW_CFA_restore_state:
627 		break;
628 	case DW_CFA_def_cfa:
629 		f_state[CF_ADDR].rule = register_rule;
630 		f_state[CF_ADDR].source_reg = val1;
631 		f_state[CF_ADDR].offset = val2;
632 		break;
633 	case DW_CFA_def_cfa_register:
634 		f_state[CF_ADDR].source_reg = val1;
635 		break;
636 	case DW_CFA_def_cfa_offset:
637 		f_state[CF_ADDR].offset = val1;
638 		break;
639 	case DW_CFA_def_cfa_expression:
640 		break;
641 	case DW_CFA_expression:
642 		break;
643 	case DW_CFA_offset_extended_sf:
644 		f_state[val1].rule = offset_rule;
645 		f_state[val1].source_reg = CF_ADDR;
646 		f_state[val1].offset = val2;
647 		break;
648 	case DW_CFA_def_cfa_sf:
649 		f_state[CF_ADDR].rule = register_rule;
650 		f_state[CF_ADDR].source_reg = val1;
651 		f_state[CF_ADDR].offset = val2;
652 		break;
653 	case DW_CFA_def_cfa_offset_sf:
654 		f_state[CF_ADDR].offset = val1;
655 		break;
656 	case DW_CFA_SUNW_advance_loc:
657 		if (*reached_pc_p + val1 > pc)
658 			return (1);
659 		*reached_pc_p += val1;
660 		break;
661 	case DW_CFA_SUNW_offset:
662 		f_state[val1].rule = offset_rule;
663 		f_state[val1].source_reg = CF_ADDR;
664 		f_state[val1].offset = val2;
665 		break;
666 	case DW_CFA_SUNW_restore:
667 		if (f_start_state != 0)
668 			f_state[val1] = f_start_state[val1];
669 		break;
670 	}
671 	return (0);
672 }
673