xref: /titanic_41/usr/src/uts/sparc/v9/os/simulator.c (revision b9175c69691c8949bec97fb8f689b7d1efdb05bb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /* common code with bug fixes from original version in trap.c */
27 
28 #include <sys/param.h>
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/archsystm.h>
32 #include <sys/vmsystm.h>
33 #include <sys/fpu/fpusystm.h>
34 #include <sys/fpu/fpu_simulator.h>
35 #include <sys/inline.h>
36 #include <sys/debug.h>
37 #include <sys/privregs.h>
38 #include <sys/machpcb.h>
39 #include <sys/simulate.h>
40 #include <sys/proc.h>
41 #include <sys/cmn_err.h>
42 #include <sys/stack.h>
43 #include <sys/watchpoint.h>
44 #include <sys/trap.h>
45 #include <sys/machtrap.h>
46 #include <sys/mman.h>
47 #include <sys/asi.h>
48 #include <sys/copyops.h>
49 #include <vm/as.h>
50 #include <vm/page.h>
51 #include <sys/model.h>
52 #include <vm/seg_vn.h>
53 #include <sys/byteorder.h>
54 
55 #define	IS_IBIT_SET(x)	(x & 0x2000)
56 #define	IS_VIS1(op, op3)(op == 2 && op3 == 0x36)
57 #define	IS_FLOAT_QUAD_OP(op, op3)(op == 2 && (op3 == 0x34 ||	\
58 		op3 == 0x35))
59 #define	IS_PARTIAL_OR_SHORT_FLOAT_LD_ST(op, op3, asi)		\
60 		(op == 3 && (op3 == IOP_V8_LDDFA ||		\
61 		op3 == IOP_V8_STDFA) &&	asi > ASI_SNFL)
62 
63 static int aligndebug = 0;
64 
65 /*
66  * For the sake of those who must be compatible with unaligned
67  * architectures, users can link their programs to use a
68  * corrective trap handler that will fix unaligned references
69  * a special trap #6 (T_FIX_ALIGN) enables this 'feature'.
70  * Returns 1 for success, 0 for failure.
71  */
72 
73 int
74 do_unaligned(struct regs *rp, caddr_t *badaddr)
75 {
76 	uint_t	inst, op3, asi = 0;
77 	uint_t	rd, rs1, rs2;
78 	int	sz, nf = 0, ltlend = 0;
79 	int	floatflg;
80 	int	fsrflg;
81 	int	immflg;
82 	int	lddstdflg;
83 	caddr_t	addr;
84 	uint64_t val;
85 	union {
86 		uint64_t	l[2];
87 		uint32_t	i[4];
88 		uint16_t	s[8];
89 		uint8_t		c[16];
90 	} data;
91 
92 	ASSERT(USERMODE(rp->r_tstate));
93 	inst = fetch_user_instr((caddr_t)rp->r_pc);
94 
95 	op3 = (inst >> 19) & 0x3f;
96 	rd = (inst >> 25) & 0x1f;
97 	rs1 = (inst >> 14) & 0x1f;
98 	rs2 = inst & 0x1f;
99 	floatflg = (inst >> 24) & 1;
100 	immflg = (inst >> 13) & 1;
101 	lddstdflg = fsrflg = 0;
102 
103 	/* if not load or store do nothing */
104 	if ((inst >> 30) != 3)
105 		return (0);
106 
107 	/* if ldstub or swap, do nothing */
108 	if ((inst & 0xc1680000) == 0xc0680000)
109 		return (0);
110 
111 	/* if cas/casx, do nothing */
112 	if ((inst & 0xc1e00000) == 0xc1e00000)
113 		return (0);
114 
115 	if (floatflg) {
116 		switch ((inst >> 19) & 3) {	/* map size bits to a number */
117 		case 0: sz = 4;
118 			break;			/* ldf{a}/stf{a} */
119 		case 1: fsrflg = 1;
120 			if (rd == 0)
121 				sz = 4;		/* ldfsr/stfsr */
122 			else  if (rd == 1)
123 				sz = 8;		/* ldxfsr/stxfsr */
124 			else
125 				return (SIMU_ILLEGAL);
126 			break;
127 		case 2: sz = 16;
128 			break;		/* ldqf{a}/stqf{a} */
129 		case 3: sz = 8;
130 			break;		/* lddf{a}/stdf{a} */
131 		}
132 		/*
133 		 * Fix to access extra double register encoding plus
134 		 * compensate to access the correct fpu_dreg.
135 		 */
136 		if ((sz > 4) && (fsrflg == 0)) {
137 			if ((rd & 1) == 1)
138 				rd = (rd & 0x1e) | 0x20;
139 			rd = rd >> 1;
140 			if ((sz == 16) && ((rd & 0x1) != 0))
141 				return (SIMU_ILLEGAL);
142 		}
143 	} else {
144 		int sz_bits = (inst >> 19) & 0xf;
145 		switch (sz_bits) {		/* map size bits to a number */
146 		case 0:				/* lduw{a} */
147 		case 4:				/* stw{a} */
148 		case 8:				/* ldsw{a} */
149 		case 0xf:			/* swap */
150 			sz = 4; break;
151 		case 1:				/* ldub{a} */
152 		case 5:				/* stb{a} */
153 		case 9:				/* ldsb{a} */
154 		case 0xd:			/* ldstub */
155 			sz = 1; break;
156 		case 2:				/* lduh{a} */
157 		case 6:				/* sth{a} */
158 		case 0xa:			/* ldsh{a} */
159 			sz = 2; break;
160 		case 3:				/* ldd{a} */
161 		case 7:				/* std{a} */
162 			lddstdflg = 1;
163 			sz = 8; break;
164 		case 0xb:			/* ldx{a} */
165 		case 0xe:			/* stx{a} */
166 			sz = 8; break;
167 		}
168 	}
169 
170 
171 	/* only support primary and secondary asi's */
172 	if ((op3 >> 4) & 1) {
173 		if (immflg) {
174 			asi = (uint_t)(rp->r_tstate >> TSTATE_ASI_SHIFT) &
175 			    TSTATE_ASI_MASK;
176 		} else {
177 			asi = (inst >> 5) & 0xff;
178 		}
179 		switch (asi) {
180 		case ASI_P:
181 		case ASI_S:
182 			break;
183 		case ASI_PNF:
184 		case ASI_SNF:
185 			nf = 1;
186 			break;
187 		case ASI_PL:
188 		case ASI_SL:
189 			ltlend = 1;
190 			break;
191 		case ASI_PNFL:
192 		case ASI_SNFL:
193 			ltlend = 1;
194 			nf = 1;
195 			break;
196 		default:
197 			return (0);
198 		}
199 		/*
200 		 * Non-faulting stores generate a data_access_exception trap,
201 		 * according to the Spitfire manual, which should be signaled
202 		 * as an illegal instruction trap, because it can't be fixed.
203 		 */
204 		if ((nf) && ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA)))
205 			return (SIMU_ILLEGAL);
206 	}
207 
208 	if (aligndebug) {
209 		printf("unaligned access at %p, instruction: 0x%x\n",
210 		    (void *)rp->r_pc, inst);
211 		printf("type %s", (((inst >> 21) & 1) ? "st" : "ld"));
212 		if (((inst >> 21) & 1) == 0)
213 			printf(" %s", (((inst >> 22) & 1) ?
214 			    "signed" : "unsigned"));
215 		printf(" asi 0x%x size %d immflg %d\n", asi, sz, immflg);
216 		printf("rd = %d, op3 = 0x%x, rs1 = %d, rs2 = %d, imm13=0x%x\n",
217 		    rd, op3, rs1, rs2, (inst & 0x1fff));
218 	}
219 
220 	(void) flush_user_windows_to_stack(NULL);
221 	if (getreg(rp, rs1, &val, badaddr))
222 		return (SIMU_FAULT);
223 	addr = (caddr_t)val;		/* convert to 32/64 bit address */
224 	if (aligndebug)
225 		printf("addr 1 = %p\n", (void *)addr);
226 
227 	/* check immediate bit and use immediate field or reg (rs2) */
228 	if (immflg) {
229 		int imm;
230 		imm  = inst & 0x1fff;		/* mask out immediate field */
231 		imm <<= 19;			/* sign extend it */
232 		imm >>= 19;
233 		addr += imm;			/* compute address */
234 	} else {
235 		if (getreg(rp, rs2, &val, badaddr))
236 			return (SIMU_FAULT);
237 		addr += val;
238 	}
239 
240 	/*
241 	 * If this is a 32-bit program, chop the address accordingly.  The
242 	 * intermediate uintptr_t casts prevent warnings under a certain
243 	 * compiler, and the temporary 32 bit storage is intended to force
244 	 * proper code generation and break up what would otherwise be a
245 	 * quadruple cast.
246 	 */
247 	if (curproc->p_model == DATAMODEL_ILP32) {
248 		caddr32_t addr32 = (caddr32_t)(uintptr_t)addr;
249 		addr = (caddr_t)(uintptr_t)addr32;
250 	}
251 
252 	if (aligndebug)
253 		printf("addr 2 = %p\n", (void *)addr);
254 
255 	if (addr >= curproc->p_as->a_userlimit) {
256 		*badaddr = addr;
257 		goto badret;
258 	}
259 
260 	/* a single bit differentiates ld and st */
261 	if ((inst >> 21) & 1) {			/* store */
262 		if (floatflg) {
263 			klwp_id_t lwp = ttolwp(curthread);
264 			kfpu_t *fp = lwptofpu(lwp);
265 			/* Ensure fp has been enabled */
266 			if (fpu_exists) {
267 				if (!(_fp_read_fprs() & FPRS_FEF))
268 					fp_enable();
269 			} else {
270 				if (!fp->fpu_en)
271 					fp_enable();
272 			}
273 			/* if fpu_exists read fpu reg */
274 			if (fpu_exists) {
275 				if (fsrflg) {
276 					_fp_read_pfsr(&data.l[0]);
277 				} else {
278 					if (sz == 4) {
279 						data.i[0] = 0;
280 						_fp_read_pfreg(
281 						    (unsigned *)&data.i[1], rd);
282 					}
283 					if (sz >= 8)
284 						_fp_read_pdreg(
285 						    &data.l[0], rd);
286 					if (sz == 16)
287 						_fp_read_pdreg(
288 						    &data.l[1], rd+1);
289 				}
290 			} else {
291 				if (fsrflg) {
292 					/* Clear reserved bits, set version=7 */
293 					fp->fpu_fsr &= ~0x30301000;
294 					fp->fpu_fsr |= 0xE0000;
295 					data.l[0] = fp->fpu_fsr;
296 				} else {
297 					if (sz == 4) {
298 						data.i[0] = 0;
299 						data.i[1] =
300 						    (unsigned)fp->
301 						    fpu_fr.fpu_regs[rd];
302 					}
303 					if (sz >= 8)
304 						data.l[0] =
305 						    fp->fpu_fr.fpu_dregs[rd];
306 					if (sz == 16)
307 						data.l[1] =
308 						    fp->fpu_fr.fpu_dregs[rd+1];
309 				}
310 			}
311 		} else {
312 			if (lddstdflg) {		/* combine the data */
313 				if (getreg(rp, rd, &data.l[0], badaddr))
314 					return (SIMU_FAULT);
315 				if (getreg(rp, rd+1, &data.l[1], badaddr))
316 					return (SIMU_FAULT);
317 				if (ltlend) {
318 					/*
319 					 * For STD, each 32-bit word is byte-
320 					 * swapped individually.  For
321 					 * simplicity we don't want to do that
322 					 * below, so we swap the words now to
323 					 * get the desired result in the end.
324 					 */
325 					data.i[0] = data.i[3];
326 				} else {
327 					data.i[0] = data.i[1];
328 					data.i[1] = data.i[3];
329 				}
330 			} else {
331 				if (getreg(rp, rd, &data.l[0], badaddr))
332 					return (SIMU_FAULT);
333 			}
334 		}
335 
336 		if (aligndebug) {
337 			if (sz == 16) {
338 				printf("data %x %x %x %x\n",
339 				    data.i[0], data.i[1], data.i[2], data.c[3]);
340 			} else {
341 				printf("data %x %x %x %x %x %x %x %x\n",
342 				    data.c[0], data.c[1], data.c[2], data.c[3],
343 				    data.c[4], data.c[5], data.c[6], data.c[7]);
344 			}
345 		}
346 
347 		if (ltlend) {
348 			if (sz == 1) {
349 				if (xcopyout_little(&data.c[7], addr,
350 				    (size_t)sz) != 0)
351 					goto badret;
352 			} else if (sz == 2) {
353 				if (xcopyout_little(&data.s[3], addr,
354 				    (size_t)sz) != 0)
355 					goto badret;
356 			} else if (sz == 4) {
357 				if (xcopyout_little(&data.i[1], addr,
358 				    (size_t)sz) != 0)
359 					goto badret;
360 			} else {
361 				if (xcopyout_little(&data.l[0], addr,
362 				    (size_t)sz) != 0)
363 					goto badret;
364 			}
365 		} else {
366 			if (sz == 1) {
367 				if (copyout(&data.c[7], addr, (size_t)sz) == -1)
368 					goto badret;
369 			} else if (sz == 2) {
370 				if (copyout(&data.s[3], addr, (size_t)sz) == -1)
371 					goto badret;
372 			} else if (sz == 4) {
373 				if (copyout(&data.i[1], addr, (size_t)sz) == -1)
374 					goto badret;
375 			} else {
376 				if (copyout(&data.l[0], addr, (size_t)sz) == -1)
377 					goto badret;
378 			}
379 		}
380 	} else {				/* load */
381 		if (sz == 1) {
382 			if (ltlend) {
383 				if (xcopyin_little(addr, &data.c[7],
384 				    (size_t)sz) != 0) {
385 					if (nf)
386 						data.c[7] = 0;
387 					else
388 						goto badret;
389 				}
390 			} else {
391 				if (copyin(addr, &data.c[7],
392 				    (size_t)sz) == -1) {
393 					if (nf)
394 						data.c[7] = 0;
395 					else
396 						goto badret;
397 				}
398 			}
399 			/* if signed and the sign bit is set extend it */
400 			if (((inst >> 22) & 1) && ((data.c[7] >> 7) & 1)) {
401 				data.i[0] = (uint_t)-1;	/* extend sign bit */
402 				data.s[2] = (ushort_t)-1;
403 				data.c[6] = (uchar_t)-1;
404 			} else {
405 				data.i[0] = 0;	/* clear upper 32+24 bits */
406 				data.s[2] = 0;
407 				data.c[6] = 0;
408 			}
409 		} else if (sz == 2) {
410 			if (ltlend) {
411 				if (xcopyin_little(addr, &data.s[3],
412 				    (size_t)sz) != 0) {
413 					if (nf)
414 						data.s[3] = 0;
415 					else
416 						goto badret;
417 				}
418 			} else {
419 				if (copyin(addr, &data.s[3],
420 				    (size_t)sz) == -1) {
421 					if (nf)
422 						data.s[3] = 0;
423 					else
424 						goto badret;
425 				}
426 			}
427 			/* if signed and the sign bit is set extend it */
428 			if (((inst >> 22) & 1) && ((data.s[3] >> 15) & 1)) {
429 				data.i[0] = (uint_t)-1;	/* extend sign bit */
430 				data.s[2] = (ushort_t)-1;
431 			} else {
432 				data.i[0] = 0;	/* clear upper 32+16 bits */
433 				data.s[2] = 0;
434 			}
435 		} else if (sz == 4) {
436 			if (ltlend) {
437 				if (xcopyin_little(addr, &data.i[1],
438 				    (size_t)sz) != 0) {
439 					if (!nf)
440 						goto badret;
441 					data.i[1] = 0;
442 				}
443 			} else {
444 				if (copyin(addr, &data.i[1],
445 				    (size_t)sz) == -1) {
446 					if (!nf)
447 						goto badret;
448 					data.i[1] = 0;
449 				}
450 			}
451 			/* if signed and the sign bit is set extend it */
452 			if (((inst >> 22) & 1) && ((data.i[1] >> 31) & 1)) {
453 				data.i[0] = (uint_t)-1;	/* extend sign bit */
454 			} else {
455 				data.i[0] = 0;	/* clear upper 32 bits */
456 			}
457 		} else {
458 			if (ltlend) {
459 				if (xcopyin_little(addr, &data.l[0],
460 				    (size_t)sz) != 0) {
461 					if (!nf)
462 						goto badret;
463 					data.l[0] = 0;
464 				}
465 			} else {
466 				if (copyin(addr, &data.l[0],
467 				    (size_t)sz) == -1) {
468 					if (!nf)
469 						goto badret;
470 					data.l[0] = 0;
471 				}
472 			}
473 		}
474 
475 		if (aligndebug) {
476 			if (sz == 16) {
477 				printf("data %x %x %x %x\n",
478 				    data.i[0], data.i[1], data.i[2], data.c[3]);
479 			} else {
480 				printf("data %x %x %x %x %x %x %x %x\n",
481 				    data.c[0], data.c[1], data.c[2], data.c[3],
482 				    data.c[4], data.c[5], data.c[6], data.c[7]);
483 			}
484 		}
485 
486 		if (floatflg) {		/* if fpu_exists write fpu reg */
487 			klwp_id_t lwp = ttolwp(curthread);
488 			kfpu_t *fp = lwptofpu(lwp);
489 			/* Ensure fp has been enabled */
490 			if (fpu_exists) {
491 				if (!(_fp_read_fprs() & FPRS_FEF))
492 					fp_enable();
493 			} else {
494 				if (!fp->fpu_en)
495 					fp_enable();
496 			}
497 			/* if fpu_exists read fpu reg */
498 			if (fpu_exists) {
499 				if (fsrflg) {
500 					_fp_write_pfsr(&data.l[0]);
501 				} else {
502 					if (sz == 4)
503 						_fp_write_pfreg(
504 						    (unsigned *)&data.i[1], rd);
505 					if (sz >= 8)
506 						_fp_write_pdreg(
507 						    &data.l[0], rd);
508 					if (sz == 16)
509 						_fp_write_pdreg(
510 						    &data.l[1], rd+1);
511 				}
512 			} else {
513 				if (fsrflg) {
514 					fp->fpu_fsr = data.l[0];
515 				} else {
516 					if (sz == 4)
517 						fp->fpu_fr.fpu_regs[rd] =
518 						    (unsigned)data.i[1];
519 					if (sz >= 8)
520 						fp->fpu_fr.fpu_dregs[rd] =
521 						    data.l[0];
522 					if (sz == 16)
523 						fp->fpu_fr.fpu_dregs[rd+1] =
524 						    data.l[1];
525 				}
526 			}
527 		} else {
528 			if (lddstdflg) {		/* split the data */
529 				if (ltlend) {
530 					/*
531 					 * For LDD, each 32-bit word is byte-
532 					 * swapped individually.  We didn't
533 					 * do that above, but this will give
534 					 * us the desired result.
535 					 */
536 					data.i[3] = data.i[0];
537 				} else {
538 					data.i[3] = data.i[1];
539 					data.i[1] = data.i[0];
540 				}
541 				data.i[0] = 0;
542 				data.i[2] = 0;
543 				if (putreg(&data.l[0], rp, rd, badaddr) == -1)
544 					goto badret;
545 				if (putreg(&data.l[1], rp, rd+1, badaddr) == -1)
546 					goto badret;
547 			} else {
548 				if (putreg(&data.l[0], rp, rd, badaddr) == -1)
549 					goto badret;
550 			}
551 		}
552 	}
553 	return (SIMU_SUCCESS);
554 badret:
555 	return (SIMU_FAULT);
556 }
557 
558 
559 int
560 simulate_lddstd(struct regs *rp, caddr_t *badaddr)
561 {
562 	uint_t	inst, op3, asi = 0;
563 	uint_t	rd, rs1, rs2;
564 	int	nf = 0, ltlend = 0, usermode;
565 	int	immflg;
566 	uint64_t reven;
567 	uint64_t rodd;
568 	caddr_t	addr;
569 	uint64_t val;
570 	uint64_t data;
571 
572 	usermode = USERMODE(rp->r_tstate);
573 
574 	if (usermode)
575 		inst = fetch_user_instr((caddr_t)rp->r_pc);
576 	else
577 		inst = *(uint_t *)rp->r_pc;
578 
579 	op3 = (inst >> 19) & 0x3f;
580 	rd = (inst >> 25) & 0x1f;
581 	rs1 = (inst >> 14) & 0x1f;
582 	rs2 = inst & 0x1f;
583 	immflg = (inst >> 13) & 1;
584 
585 	if (USERMODE(rp->r_tstate))
586 		(void) flush_user_windows_to_stack(NULL);
587 	else
588 		flush_windows();
589 
590 	if ((op3 >> 4) & 1) {		/* is this LDDA/STDA? */
591 		if (immflg) {
592 			asi = (uint_t)(rp->r_tstate >> TSTATE_ASI_SHIFT) &
593 			    TSTATE_ASI_MASK;
594 		} else {
595 			asi = (inst >> 5) & 0xff;
596 		}
597 		switch (asi) {
598 		case ASI_P:
599 		case ASI_S:
600 			break;
601 		case ASI_PNF:
602 		case ASI_SNF:
603 			nf = 1;
604 			break;
605 		case ASI_PL:
606 		case ASI_SL:
607 			ltlend = 1;
608 			break;
609 		case ASI_PNFL:
610 		case ASI_SNFL:
611 			ltlend = 1;
612 			nf = 1;
613 			break;
614 		case ASI_AIUP:
615 		case ASI_AIUS:
616 			usermode = 1;
617 			break;
618 		case ASI_AIUPL:
619 		case ASI_AIUSL:
620 			usermode = 1;
621 			ltlend = 1;
622 			break;
623 		default:
624 			return (SIMU_ILLEGAL);
625 		}
626 	}
627 
628 	if (getreg(rp, rs1, &val, badaddr))
629 		return (SIMU_FAULT);
630 	addr = (caddr_t)val;		/* convert to 32/64 bit address */
631 
632 	/* check immediate bit and use immediate field or reg (rs2) */
633 	if (immflg) {
634 		int imm;
635 		imm  = inst & 0x1fff;		/* mask out immediate field */
636 		imm <<= 19;			/* sign extend it */
637 		imm >>= 19;
638 		addr += imm;			/* compute address */
639 	} else {
640 		if (getreg(rp, rs2, &val, badaddr))
641 			return (SIMU_FAULT);
642 		addr += val;
643 	}
644 
645 	/*
646 	 * T_UNIMP_LDD and T_UNIMP_STD are higher priority than
647 	 * T_ALIGNMENT.  So we have to make sure that the address is
648 	 * kosher before trying to use it, because the hardware hasn't
649 	 * checked it for us yet.
650 	 */
651 	if (((uintptr_t)addr & 0x7) != 0) {
652 		if (curproc->p_fixalignment)
653 			return (do_unaligned(rp, badaddr));
654 		else
655 			return (SIMU_UNALIGN);
656 	}
657 
658 	/*
659 	 * If this is a 32-bit program, chop the address accordingly.  The
660 	 * intermediate uintptr_t casts prevent warnings under a certain
661 	 * compiler, and the temporary 32 bit storage is intended to force
662 	 * proper code generation and break up what would otherwise be a
663 	 * quadruple cast.
664 	 */
665 	if (curproc->p_model == DATAMODEL_ILP32 && usermode) {
666 		caddr32_t addr32 = (caddr32_t)(uintptr_t)addr;
667 		addr = (caddr_t)(uintptr_t)addr32;
668 	}
669 
670 	if ((inst >> 21) & 1) {			/* store */
671 		if (getreg(rp, rd, &reven, badaddr))
672 			return (SIMU_FAULT);
673 		if (getreg(rp, rd+1, &rodd, badaddr))
674 			return (SIMU_FAULT);
675 		if (ltlend) {
676 			reven = BSWAP_32(reven);
677 			rodd  = BSWAP_32(rodd);
678 		}
679 		data = (reven << 32) | rodd;
680 		if (usermode) {
681 			if (suword64_nowatch(addr, data) == -1)
682 				return (SIMU_FAULT);
683 		} else {
684 			*(uint64_t *)addr = data;
685 		}
686 	} else {				/* load */
687 		if (usermode) {
688 			if (fuword64_nowatch(addr, &data)) {
689 				if (nf)
690 					data = 0;
691 				else
692 					return (SIMU_FAULT);
693 			}
694 		} else
695 			data = *(uint64_t *)addr;
696 
697 		reven = (data >> 32);
698 		rodd  = (uint64_t)(uint32_t)data;
699 		if (ltlend) {
700 			reven = BSWAP_32(reven);
701 			rodd  = BSWAP_32(rodd);
702 		}
703 
704 		if (putreg(&reven, rp, rd, badaddr) == -1)
705 			return (SIMU_FAULT);
706 		if (putreg(&rodd, rp, rd+1, badaddr) == -1)
707 			return (SIMU_FAULT);
708 	}
709 	return (SIMU_SUCCESS);
710 }
711 
712 
713 /*
714  * simulate popc
715  */
716 static int
717 simulate_popc(struct regs *rp, caddr_t *badaddr, uint_t inst)
718 {
719 	uint_t	rd, rs2, rs1;
720 	uint_t	immflg;
721 	uint64_t val, cnt = 0;
722 
723 	rd = (inst >> 25) & 0x1f;
724 	rs1 = (inst >> 14) & 0x1f;
725 	rs2 = inst & 0x1f;
726 	immflg = (inst >> 13) & 1;
727 
728 	if (rs1 > 0)
729 		return (SIMU_ILLEGAL);
730 
731 	(void) flush_user_windows_to_stack(NULL);
732 
733 	/* check immediate bit and use immediate field or reg (rs2) */
734 	if (immflg) {
735 		int64_t imm;
736 		imm  = inst & 0x1fff;		/* mask out immediate field */
737 		imm <<= 51;			/* sign extend it */
738 		imm >>= 51;
739 		if (imm != 0) {
740 			for (cnt = 0; imm != 0; imm &= imm-1)
741 				cnt++;
742 		}
743 	} else {
744 		if (getreg(rp, rs2, &val, badaddr))
745 			return (SIMU_FAULT);
746 		if (val != 0) {
747 			for (cnt = 0; val != 0; val &= val-1)
748 				cnt++;
749 		}
750 	}
751 
752 	if (putreg(&cnt, rp, rd, badaddr) == -1)
753 		return (SIMU_FAULT);
754 
755 	return (SIMU_SUCCESS);
756 }
757 
758 /*
759  * simulate mulscc
760  */
761 static int
762 simulate_mulscc(struct regs *rp, caddr_t *badaddr, uint_t inst)
763 {
764 	uint32_t	s1, s2;
765 	uint32_t	c, d, v;
766 	uint_t		rd, rs1;
767 	int64_t		d64;
768 	uint64_t	ud64;
769 	uint64_t	drs1;
770 
771 	(void) flush_user_windows_to_stack(NULL);
772 
773 	if ((inst >> 13) & 1) {		/* immediate */
774 		d64 = inst & 0x1fff;
775 		d64 <<= 51;		/* sign extend it */
776 		d64 >>= 51;
777 	} else {
778 		uint_t		rs2;
779 		uint64_t	drs2;
780 
781 		if (inst & 0x1fe0) {
782 			return (SIMU_ILLEGAL);
783 		}
784 		rs2 = inst & 0x1f;
785 		if (getreg(rp, rs2, &drs2, badaddr)) {
786 			return (SIMU_FAULT);
787 		}
788 		d64 = (int64_t)drs2;
789 	}
790 
791 	rs1 = (inst >> 14) & 0x1f;
792 	if (getreg(rp, rs1, &drs1, badaddr)) {
793 		return (SIMU_FAULT);
794 	}
795 	/* icc.n xor icc.v */
796 	s1 = ((rp->r_tstate & TSTATE_IN) >> (TSTATE_CCR_SHIFT + 3)) ^
797 	    ((rp->r_tstate & TSTATE_IV) >> (TSTATE_CCR_SHIFT + 1));
798 	s1 = (s1 << 31) | (((uint32_t)drs1) >> 1);
799 
800 	if (rp->r_y & 1) {
801 		s2 = (uint32_t)d64;
802 	} else {
803 		s2 = 0;
804 	}
805 	d = s1 + s2;
806 
807 	ud64 = (uint64_t)d;
808 
809 	/* set the icc flags */
810 	v = (s1 & s2 & ~d) | (~s1 & ~s2 & d);
811 	c = (s1 & s2) | (~d & (s1 | s2));
812 	rp->r_tstate &= ~TSTATE_ICC;
813 	rp->r_tstate |= (uint64_t)((c >> 31) & 1) << (TSTATE_CCR_SHIFT + 0);
814 	rp->r_tstate |= (uint64_t)((v >> 31) & 1) << (TSTATE_CCR_SHIFT + 1);
815 	rp->r_tstate |= (uint64_t)(d ? 0 : 1) << (TSTATE_CCR_SHIFT + 2);
816 	rp->r_tstate |= (uint64_t)((d >> 31) & 1) << (TSTATE_CCR_SHIFT + 3);
817 
818 	if (rp->r_tstate & TSTATE_IC) {
819 		ud64 |= (1ULL << 32);
820 	}
821 
822 	/* set the xcc flags */
823 	rp->r_tstate &= ~TSTATE_XCC;
824 	if (ud64 == 0) {
825 		rp->r_tstate |= TSTATE_XZ;
826 	}
827 
828 	rd = (inst >> 25) & 0x1f;
829 	if (putreg(&ud64, rp, rd, badaddr)) {
830 		return (SIMU_FAULT);
831 	}
832 
833 	d64 = (drs1 << 32) | (uint32_t)rp->r_y;
834 	d64 >>= 1;
835 	rp->r_y = (uint32_t)d64;
836 
837 	return (SIMU_SUCCESS);
838 }
839 
840 /*
841  * simulate unimplemented instructions (popc, ldqf{a}, stqf{a})
842  */
843 int
844 simulate_unimp(struct regs *rp, caddr_t *badaddr)
845 {
846 	uint_t	inst, optype, op3, asi;
847 	uint_t	rs1, rd;
848 	uint_t	ignor, i;
849 	machpcb_t *mpcb = lwptompcb(ttolwp(curthread));
850 	int	nomatch = 0;
851 	caddr_t	addr = (caddr_t)rp->r_pc;
852 	struct as *as;
853 	caddr_t	ka;
854 	pfn_t	pfnum;
855 	page_t *pp;
856 	proc_t *p = ttoproc(curthread);
857 	struct seg *mapseg;
858 	struct segvn_data *svd;
859 
860 	ASSERT(USERMODE(rp->r_tstate));
861 	inst = fetch_user_instr(addr);
862 	if (inst == (uint_t)-1) {
863 		mpcb->mpcb_illexcaddr = addr;
864 		mpcb->mpcb_illexcinsn = (uint32_t)-1;
865 		return (SIMU_ILLEGAL);
866 	}
867 
868 	/*
869 	 * When fixing dirty v8 instructions there's a race if two processors
870 	 * are executing the dirty executable at the same time.  If one
871 	 * cleans the instruction as the other is executing it the second
872 	 * processor will see a clean instruction when it comes through this
873 	 * code and will return SIMU_ILLEGAL.  To work around the race
874 	 * this code will keep track of the last illegal instruction seen
875 	 * by each lwp and will only take action if the illegal instruction
876 	 * is repeatable.
877 	 */
878 	if (addr != mpcb->mpcb_illexcaddr ||
879 	    inst != mpcb->mpcb_illexcinsn)
880 		nomatch = 1;
881 	mpcb->mpcb_illexcaddr = addr;
882 	mpcb->mpcb_illexcinsn = inst;
883 
884 	/* instruction fields */
885 	i = (inst >> 13) & 0x1;
886 	rd = (inst >> 25) & 0x1f;
887 	optype = (inst >> 30) & 0x3;
888 	op3 = (inst >> 19) & 0x3f;
889 	ignor = (inst >> 5) & 0xff;
890 	if (IS_IBIT_SET(inst)) {
891 		asi = (uint32_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
892 		    TSTATE_ASI_MASK);
893 	} else {
894 		asi = ignor;
895 	}
896 
897 	if (IS_VIS1(optype, op3) ||
898 	    IS_PARTIAL_OR_SHORT_FLOAT_LD_ST(optype, op3, asi) ||
899 	    IS_FLOAT_QUAD_OP(optype, op3)) {
900 		klwp_t *lwp = ttolwp(curthread);
901 		kfpu_t *fp = lwptofpu(lwp);
902 		if (fpu_exists) {
903 			if (!(_fp_read_fprs() & FPRS_FEF))
904 				fp_enable();
905 			_fp_read_pfsr(&fp->fpu_fsr);
906 		} else {
907 			if (!fp->fpu_en)
908 				fp_enable();
909 		}
910 		fp_precise(rp);
911 		return (SIMU_RETRY);
912 	}
913 
914 	if (optype == 2 && op3 == IOP_V8_POPC) {
915 		return (simulate_popc(rp, badaddr, inst));
916 	} else if (optype == 3 && op3 == IOP_V8_POPC) {
917 		return (SIMU_ILLEGAL);
918 	} else if (optype == OP_V8_ARITH && op3 == IOP_V8_MULScc) {
919 		return (simulate_mulscc(rp, badaddr, inst));
920 	}
921 
922 	if (optype == OP_V8_LDSTR) {
923 		if (op3 == IOP_V8_LDQF || op3 == IOP_V8_LDQFA ||
924 		    op3 == IOP_V8_STQF || op3 == IOP_V8_STQFA)
925 			return (do_unaligned(rp, badaddr));
926 	}
927 
928 	if (nomatch)
929 		return (SIMU_RETRY);
930 
931 	/*
932 	 * The rest of the code handles v8 binaries with instructions
933 	 * that have dirty (non-zero) bits in reserved or 'ignored'
934 	 * fields; these will cause core dumps on v9 machines.
935 	 *
936 	 * We only clean dirty instructions in 32-bit programs (ie, v8)
937 	 * running on SPARCv9 processors.  True v9 programs are forced
938 	 * to use the instruction set as intended.
939 	 */
940 	if (lwp_getdatamodel(curthread->t_lwp) != DATAMODEL_ILP32)
941 		return (SIMU_ILLEGAL);
942 	switch (optype) {
943 	case OP_V8_BRANCH:
944 	case OP_V8_CALL:
945 		return (SIMU_ILLEGAL);	/* these don't have ignored fields */
946 		/*NOTREACHED*/
947 	case OP_V8_ARITH:
948 		switch (op3) {
949 		case IOP_V8_RETT:
950 			if (rd == 0 && !(i == 0 && ignor))
951 				return (SIMU_ILLEGAL);
952 			if (rd)
953 				inst &= ~(0x1f << 25);
954 			if (i == 0 && ignor)
955 				inst &= ~(0xff << 5);
956 			break;
957 		case IOP_V8_TCC:
958 			if (i == 0 && ignor != 0) {
959 				inst &= ~(0xff << 5);
960 			} else if (i == 1 && (((inst >> 7) & 0x3f) != 0)) {
961 				inst &= ~(0x3f << 7);
962 			} else {
963 				return (SIMU_ILLEGAL);
964 			}
965 			break;
966 		case IOP_V8_JMPL:
967 		case IOP_V8_RESTORE:
968 		case IOP_V8_SAVE:
969 			if ((op3 == IOP_V8_RETT && rd) ||
970 			    (i == 0 && ignor)) {
971 				inst &= ~(0xff << 5);
972 			} else {
973 				return (SIMU_ILLEGAL);
974 			}
975 			break;
976 		case IOP_V8_FCMP:
977 			if (rd == 0)
978 				return (SIMU_ILLEGAL);
979 			inst &= ~(0x1f << 25);
980 			break;
981 		case IOP_V8_RDASR:
982 			rs1 = ((inst >> 14) & 0x1f);
983 			if (rs1 == 1 || (rs1 >= 7 && rs1 <= 14)) {
984 				/*
985 				 * The instruction specifies an invalid
986 				 * state register - better bail out than
987 				 * "fix" it when we're not sure what was
988 				 * intended.
989 				 */
990 				return (SIMU_ILLEGAL);
991 			}
992 				/*
993 				 * Note: this case includes the 'stbar'
994 				 * instruction (rs1 == 15 && i == 0).
995 				 */
996 				if ((ignor = (inst & 0x3fff)) != 0)
997 					inst &= ~(0x3fff);
998 			break;
999 		case IOP_V8_SRA:
1000 		case IOP_V8_SRL:
1001 		case IOP_V8_SLL:
1002 			if (ignor == 0)
1003 				return (SIMU_ILLEGAL);
1004 			inst &= ~(0xff << 5);
1005 			break;
1006 		case IOP_V8_ADD:
1007 		case IOP_V8_AND:
1008 		case IOP_V8_OR:
1009 		case IOP_V8_XOR:
1010 		case IOP_V8_SUB:
1011 		case IOP_V8_ANDN:
1012 		case IOP_V8_ORN:
1013 		case IOP_V8_XNOR:
1014 		case IOP_V8_ADDC:
1015 		case IOP_V8_UMUL:
1016 		case IOP_V8_SMUL:
1017 		case IOP_V8_SUBC:
1018 		case IOP_V8_UDIV:
1019 		case IOP_V8_SDIV:
1020 		case IOP_V8_ADDcc:
1021 		case IOP_V8_ANDcc:
1022 		case IOP_V8_ORcc:
1023 		case IOP_V8_XORcc:
1024 		case IOP_V8_SUBcc:
1025 		case IOP_V8_ANDNcc:
1026 		case IOP_V8_ORNcc:
1027 		case IOP_V8_XNORcc:
1028 		case IOP_V8_ADDCcc:
1029 		case IOP_V8_UMULcc:
1030 		case IOP_V8_SMULcc:
1031 		case IOP_V8_SUBCcc:
1032 		case IOP_V8_UDIVcc:
1033 		case IOP_V8_SDIVcc:
1034 		case IOP_V8_TADDcc:
1035 		case IOP_V8_TSUBcc:
1036 		case IOP_V8_TADDccTV:
1037 		case IOP_V8_TSUBccTV:
1038 		case IOP_V8_MULScc:
1039 		case IOP_V8_WRASR:
1040 		case IOP_V8_FLUSH:
1041 			if (i != 0 || ignor == 0)
1042 				return (SIMU_ILLEGAL);
1043 			inst &= ~(0xff << 5);
1044 			break;
1045 		default:
1046 			return (SIMU_ILLEGAL);
1047 		}
1048 		break;
1049 	case OP_V8_LDSTR:
1050 		switch (op3) {
1051 		case IOP_V8_STFSR:
1052 		case IOP_V8_LDFSR:
1053 			if (rd == 0 && !(i == 0 && ignor))
1054 				return (SIMU_ILLEGAL);
1055 			if (rd)
1056 				inst &= ~(0x1f << 25);
1057 			if (i == 0 && ignor)
1058 				inst &= ~(0xff << 5);
1059 			break;
1060 		default:
1061 			if (optype == OP_V8_LDSTR && !IS_LDST_ALT(op3) &&
1062 			    i == 0 && ignor)
1063 				inst &= ~(0xff << 5);
1064 			else
1065 				return (SIMU_ILLEGAL);
1066 			break;
1067 		}
1068 		break;
1069 	default:
1070 		return (SIMU_ILLEGAL);
1071 	}
1072 
1073 	as = p->p_as;
1074 
1075 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1076 	mapseg = as_findseg(as, (caddr_t)rp->r_pc, 0);
1077 	ASSERT(mapseg != NULL);
1078 	svd = (struct segvn_data *)mapseg->s_data;
1079 
1080 	/*
1081 	 * We only create COW page for MAP_PRIVATE mappings.
1082 	 */
1083 	SEGVN_LOCK_ENTER(as, &svd->lock, RW_READER);
1084 	if ((svd->type & MAP_TYPE) & MAP_SHARED) {
1085 		SEGVN_LOCK_EXIT(as, &svd->lock);
1086 		AS_LOCK_EXIT(as, &as->a_lock);
1087 		return (SIMU_ILLEGAL);
1088 	}
1089 	SEGVN_LOCK_EXIT(as, &svd->lock);
1090 	AS_LOCK_EXIT(as, &as->a_lock);
1091 
1092 	/*
1093 	 * A "flush" instruction using the user PC's vaddr will not work
1094 	 * here, at least on Spitfire. Instead we create a temporary kernel
1095 	 * mapping to the user's text page, then modify and flush that.
1096 	 * Break COW by locking user page.
1097 	 */
1098 	if (as_fault(as->a_hat, as, (caddr_t)(rp->r_pc & PAGEMASK), PAGESIZE,
1099 	    F_SOFTLOCK, S_READ))
1100 		return (SIMU_FAULT);
1101 
1102 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1103 	pfnum = hat_getpfnum(as->a_hat, (caddr_t)rp->r_pc);
1104 	AS_LOCK_EXIT(as, &as->a_lock);
1105 	if (pf_is_memory(pfnum)) {
1106 		pp = page_numtopp_nolock(pfnum);
1107 		ASSERT(pp == NULL || PAGE_LOCKED(pp));
1108 	} else {
1109 		(void) as_fault(as->a_hat, as, (caddr_t)(rp->r_pc & PAGEMASK),
1110 		    PAGESIZE, F_SOFTUNLOCK, S_READ);
1111 		return (SIMU_FAULT);
1112 	}
1113 
1114 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1115 	ka = ppmapin(pp, PROT_READ|PROT_WRITE, (caddr_t)rp->r_pc);
1116 	*(uint_t *)(ka + (uintptr_t)(rp->r_pc % PAGESIZE)) = inst;
1117 	doflush(ka + (uintptr_t)(rp->r_pc % PAGESIZE));
1118 	ppmapout(ka);
1119 	AS_LOCK_EXIT(as, &as->a_lock);
1120 
1121 	(void) as_fault(as->a_hat, as, (caddr_t)(rp->r_pc & PAGEMASK),
1122 	    PAGESIZE, F_SOFTUNLOCK, S_READ);
1123 	return (SIMU_RETRY);
1124 }
1125 
1126 /*
1127  * Get the value of a register for instruction simulation
1128  * by using the regs or window structure pointers.
1129  * Return 0 for success, and -1 for failure.  If there is a failure,
1130  * save the faulting address using badaddr pointer.
1131  * We have 64 bit globals and outs, and 32 or 64 bit ins and locals.
1132  * Don't truncate globals/outs for 32 bit programs, for v8+ support.
1133  */
1134 int
1135 getreg(struct regs *rp, uint_t reg, uint64_t *val, caddr_t *badaddr)
1136 {
1137 	uint64_t *rgs, *sp;
1138 	int rv = 0;
1139 
1140 	rgs = (uint64_t *)&rp->r_ps;		/* globals and outs */
1141 	sp = (uint64_t *)rp->r_sp;		/* ins and locals */
1142 	if (reg == 0) {
1143 		*val = 0;
1144 	} else if (reg < 16) {
1145 		*val = rgs[reg];
1146 	} else if (IS_V9STACK(sp)) {
1147 		uint64_t *rw = (uint64_t *)((uintptr_t)sp + V9BIAS64);
1148 		uint64_t *addr = (uint64_t *)&rw[reg - 16];
1149 		uint64_t res;
1150 
1151 		if (USERMODE(rp->r_tstate)) {
1152 			if (fuword64_nowatch(addr, &res) == -1) {
1153 				*badaddr = (caddr_t)addr;
1154 				rv = -1;
1155 			}
1156 		} else {
1157 			res = *addr;
1158 		}
1159 		*val = res;
1160 	} else {
1161 		caddr32_t sp32 = (caddr32_t)(uintptr_t)sp;
1162 		uint32_t *rw = (uint32_t *)(uintptr_t)sp32;
1163 		uint32_t *addr = (uint32_t *)&rw[reg - 16];
1164 		uint32_t res;
1165 
1166 		if (USERMODE(rp->r_tstate)) {
1167 			if (fuword32_nowatch(addr, &res) == -1) {
1168 				*badaddr = (caddr_t)addr;
1169 				rv = -1;
1170 			}
1171 		} else {
1172 			res = *addr;
1173 		}
1174 		*val = (uint64_t)res;
1175 	}
1176 	return (rv);
1177 }
1178 
1179 /*
1180  * Set the value of a register after instruction simulation
1181  * by using the regs or window structure pointers.
1182  * Return 0 for succes -1 failure.
1183  * save the faulting address using badaddr pointer.
1184  * We have 64 bit globals and outs, and 32 or 64 bit ins and locals.
1185  * Don't truncate globals/outs for 32 bit programs, for v8+ support.
1186  */
1187 int
1188 putreg(uint64_t	*data, struct regs *rp, uint_t reg, caddr_t *badaddr)
1189 {
1190 	uint64_t *rgs, *sp;
1191 	int rv = 0;
1192 
1193 	rgs = (uint64_t *)&rp->r_ps;		/* globals and outs */
1194 	sp = (uint64_t *)rp->r_sp;		/* ins and locals */
1195 	if (reg == 0) {
1196 		return (0);
1197 	} else if (reg < 16) {
1198 		rgs[reg] = *data;
1199 	} else if (IS_V9STACK(sp)) {
1200 		uint64_t *rw = (uint64_t *)((uintptr_t)sp + V9BIAS64);
1201 		uint64_t *addr = (uint64_t *)&rw[reg - 16];
1202 		uint64_t res;
1203 
1204 		if (USERMODE(rp->r_tstate)) {
1205 			struct machpcb *mpcb = lwptompcb(curthread->t_lwp);
1206 
1207 			res = *data;
1208 			if (suword64_nowatch(addr, res) != 0) {
1209 				*badaddr = (caddr_t)addr;
1210 				rv = -1;
1211 			}
1212 			/*
1213 			 * We have changed a local or in register;
1214 			 * nuke the watchpoint return windows.
1215 			 */
1216 			mpcb->mpcb_rsp[0] = NULL;
1217 			mpcb->mpcb_rsp[1] = NULL;
1218 		} else {
1219 			res = *data;
1220 			*addr = res;
1221 		}
1222 	} else {
1223 		caddr32_t sp32 = (caddr32_t)(uintptr_t)sp;
1224 		uint32_t *rw = (uint32_t *)(uintptr_t)sp32;
1225 		uint32_t *addr = (uint32_t *)&rw[reg - 16];
1226 		uint32_t res;
1227 
1228 		if (USERMODE(rp->r_tstate)) {
1229 			struct machpcb *mpcb = lwptompcb(curthread->t_lwp);
1230 
1231 			res = (uint_t)*data;
1232 			if (suword32_nowatch(addr, res) != 0) {
1233 				*badaddr = (caddr_t)addr;
1234 				rv = -1;
1235 			}
1236 			/*
1237 			 * We have changed a local or in register;
1238 			 * nuke the watchpoint return windows.
1239 			 */
1240 			mpcb->mpcb_rsp[0] = NULL;
1241 			mpcb->mpcb_rsp[1] = NULL;
1242 
1243 		} else {
1244 			res = (uint_t)*data;
1245 			*addr = res;
1246 		}
1247 	}
1248 	return (rv);
1249 }
1250 
1251 /*
1252  * Calculate a memory reference address from instruction
1253  * operands, used to return the address of a fault, instead
1254  * of the instruction when an error occurs.  This is code that is
1255  * common with most of the routines that simulate instructions.
1256  */
1257 int
1258 calc_memaddr(struct regs *rp, caddr_t *badaddr)
1259 {
1260 	uint_t	inst;
1261 	uint_t	rd, rs1, rs2;
1262 	int	sz;
1263 	int	immflg;
1264 	int	floatflg;
1265 	caddr_t  addr;
1266 	uint64_t val;
1267 
1268 	if (USERMODE(rp->r_tstate))
1269 		inst = fetch_user_instr((caddr_t)rp->r_pc);
1270 	else
1271 		inst = *(uint_t *)rp->r_pc;
1272 
1273 	rd = (inst >> 25) & 0x1f;
1274 	rs1 = (inst >> 14) & 0x1f;
1275 	rs2 = inst & 0x1f;
1276 	floatflg = (inst >> 24) & 1;
1277 	immflg = (inst >> 13) & 1;
1278 
1279 	if (floatflg) {
1280 		switch ((inst >> 19) & 3) {	/* map size bits to a number */
1281 		case 0: sz = 4; break;		/* ldf/stf */
1282 		case 1: return (0);		/* ld[x]fsr/st[x]fsr */
1283 		case 2: sz = 16; break;		/* ldqf/stqf */
1284 		case 3: sz = 8; break;		/* lddf/stdf */
1285 		}
1286 		/*
1287 		 * Fix to access extra double register encoding plus
1288 		 * compensate to access the correct fpu_dreg.
1289 		 */
1290 		if (sz > 4) {
1291 			if ((rd & 1) == 1)
1292 				rd = (rd & 0x1e) | 0x20;
1293 			rd = rd >> 1;
1294 		}
1295 	} else {
1296 		switch ((inst >> 19) & 0xf) {	/* map size bits to a number */
1297 		case 0:				/* lduw */
1298 		case 4:				/* stw */
1299 		case 8:				/* ldsw */
1300 		case 0xf:			/* swap */
1301 			sz = 4; break;
1302 		case 1:				/* ldub */
1303 		case 5:				/* stb */
1304 		case 9:				/* ldsb */
1305 		case 0xd:			/* ldstub */
1306 			sz = 1; break;
1307 		case 2:				/* lduh */
1308 		case 6:				/* sth */
1309 		case 0xa:			/* ldsh */
1310 			sz = 2; break;
1311 		case 3:				/* ldd */
1312 		case 7:				/* std */
1313 		case 0xb:			/* ldx */
1314 		case 0xe:			/* stx */
1315 			sz = 8; break;
1316 		}
1317 	}
1318 
1319 	if (USERMODE(rp->r_tstate))
1320 		(void) flush_user_windows_to_stack(NULL);
1321 	else
1322 		flush_windows();
1323 
1324 	if (getreg(rp, rs1, &val, badaddr))
1325 		return (SIMU_FAULT);
1326 	addr = (caddr_t)val;
1327 
1328 	/* check immediate bit and use immediate field or reg (rs2) */
1329 	if (immflg) {
1330 		int imm;
1331 		imm = inst & 0x1fff;		/* mask out immediate field */
1332 		imm <<= 19;			/* sign extend it */
1333 		imm >>= 19;
1334 		addr += imm;			/* compute address */
1335 	} else {
1336 		if (getreg(rp, rs2, &val, badaddr))
1337 			return (SIMU_FAULT);
1338 		addr += val;
1339 	}
1340 
1341 	/*
1342 	 * If this is a 32-bit program, chop the address accordingly.  The
1343 	 * intermediate uintptr_t casts prevent warnings under a certain
1344 	 * compiler, and the temporary 32 bit storage is intended to force
1345 	 * proper code generation and break up what would otherwise be a
1346 	 * quadruple cast.
1347 	 */
1348 	if (curproc->p_model == DATAMODEL_ILP32 && USERMODE(rp->r_tstate)) {
1349 		caddr32_t addr32 = (caddr32_t)(uintptr_t)addr;
1350 		addr = (caddr_t)(uintptr_t)addr32;
1351 	}
1352 
1353 	*badaddr = addr;
1354 	return ((uintptr_t)addr & (sz - 1) ? SIMU_UNALIGN : SIMU_SUCCESS);
1355 }
1356 
1357 /*
1358  * Return the size of a load or store instruction (1, 2, 4, 8, 16, 64).
1359  * Also compute the precise address by instruction disassembly.
1360  * (v9 page faults only provide the page address via the hardware.)
1361  * Return 0 on failure (not a load or store instruction).
1362  */
1363 int
1364 instr_size(struct regs *rp, caddr_t *addrp, enum seg_rw rdwr)
1365 {
1366 	uint_t	inst, op3, asi;
1367 	uint_t	rd, rs1, rs2;
1368 	int	sz = 0;
1369 	int	immflg;
1370 	int	floatflg;
1371 	caddr_t	addr;
1372 	caddr_t badaddr;
1373 	uint64_t val;
1374 
1375 	if (rdwr == S_EXEC) {
1376 		*addrp = (caddr_t)rp->r_pc;
1377 		return (4);
1378 	}
1379 
1380 	/*
1381 	 * Fetch the instruction from user-level.
1382 	 * We would like to assert this:
1383 	 *   ASSERT(USERMODE(rp->r_tstate));
1384 	 * but we can't because we can reach this point from a
1385 	 * register window underflow/overflow and the v9 wbuf
1386 	 * traps call trap() with T_USER even though r_tstate
1387 	 * indicates a system trap, not a user trap.
1388 	 */
1389 	inst = fetch_user_instr((caddr_t)rp->r_pc);
1390 
1391 	op3 = (inst >> 19) & 0x3f;
1392 	rd = (inst >> 25) & 0x1f;
1393 	rs1 = (inst >> 14) & 0x1f;
1394 	rs2 = inst & 0x1f;
1395 	floatflg = (inst >> 24) & 1;
1396 	immflg = (inst >> 13) & 1;
1397 
1398 	/* if not load or store do nothing.  can't happen? */
1399 	if ((inst >> 30) != 3)
1400 		return (0);
1401 
1402 	if (immflg)
1403 		asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
1404 		    TSTATE_ASI_MASK);
1405 	else
1406 		asi = (inst >> 5) & 0xff;
1407 
1408 	if (floatflg) {
1409 		/* check for ld/st alternate and highest defined V9 asi */
1410 		if ((op3 & 0x30) == 0x30 && asi > ASI_SNFL) {
1411 			sz = extended_asi_size(asi);
1412 		} else {
1413 			switch (op3 & 3) {
1414 			case 0:
1415 				sz = 4;			/* ldf/stf/cas */
1416 				break;
1417 			case 1:
1418 				if (rd == 0)
1419 					sz = 4;		/* ldfsr/stfsr */
1420 				else
1421 					sz = 8;		/* ldxfsr/stxfsr */
1422 				break;
1423 			case 2:
1424 				if (op3 == 0x3e)
1425 					sz = 8;		/* casx */
1426 				else
1427 					sz = 16;	/* ldqf/stqf */
1428 				break;
1429 			case 3:
1430 				sz = 8;			/* lddf/stdf */
1431 				break;
1432 			}
1433 		}
1434 	} else {
1435 		switch (op3 & 0xf) {		/* map size bits to a number */
1436 		case 0:				/* lduw */
1437 		case 4:				/* stw */
1438 		case 8:				/* ldsw */
1439 		case 0xf:			/* swap */
1440 			sz = 4; break;
1441 		case 1:				/* ldub */
1442 		case 5:				/* stb */
1443 		case 9:				/* ldsb */
1444 		case 0xd:			/* ldstub */
1445 			sz = 1; break;
1446 		case 2:				/* lduh */
1447 		case 6:				/* sth */
1448 		case 0xa:			/* ldsh */
1449 			sz = 2; break;
1450 		case 3:				/* ldd */
1451 		case 7:				/* std */
1452 		case 0xb:			/* ldx */
1453 		case 0xe:			/* stx */
1454 			sz = 8; break;
1455 		}
1456 	}
1457 
1458 	if (sz == 0)	/* can't happen? */
1459 		return (0);
1460 	(void) flush_user_windows_to_stack(NULL);
1461 
1462 	if (getreg(rp, rs1, &val, &badaddr))
1463 		return (0);
1464 	addr = (caddr_t)val;
1465 
1466 	/* cas/casx don't use rs2 / simm13 to compute the address */
1467 	if ((op3 & 0x3d) != 0x3c) {
1468 		/* check immediate bit and use immediate field or reg (rs2) */
1469 		if (immflg) {
1470 			int imm;
1471 			imm  = inst & 0x1fff;	/* mask out immediate field */
1472 			imm <<= 19;		/* sign extend it */
1473 			imm >>= 19;
1474 			addr += imm;		/* compute address */
1475 		} else {
1476 			/*
1477 			 * asi's in the 0xCx range are partial store
1478 			 * instructions.  For these, rs2 is a mask, not part of
1479 			 * the address.
1480 			 */
1481 			if (!(floatflg && (asi & 0xf0) == 0xc0)) {
1482 				if (getreg(rp, rs2, &val, &badaddr))
1483 					return (0);
1484 				addr += val;
1485 			}
1486 		}
1487 	}
1488 
1489 	/*
1490 	 * If this is a 32-bit program, chop the address accordingly.  The
1491 	 * intermediate uintptr_t casts prevent warnings under a certain
1492 	 * compiler, and the temporary 32 bit storage is intended to force
1493 	 * proper code generation and break up what would otherwise be a
1494 	 * quadruple cast.
1495 	 */
1496 	if (curproc->p_model == DATAMODEL_ILP32) {
1497 		caddr32_t addr32 = (caddr32_t)(uintptr_t)addr;
1498 		addr = (caddr_t)(uintptr_t)addr32;
1499 	}
1500 
1501 	*addrp = addr;
1502 	ASSERT(sz != 0);
1503 	return (sz);
1504 }
1505 
1506 /*
1507  * Fetch an instruction from user-level.
1508  * Deal with watchpoints, if they are in effect.
1509  */
1510 int32_t
1511 fetch_user_instr(caddr_t vaddr)
1512 {
1513 	proc_t *p = curproc;
1514 	int32_t instr;
1515 
1516 	/*
1517 	 * If this is a 32-bit program, chop the address accordingly.  The
1518 	 * intermediate uintptr_t casts prevent warnings under a certain
1519 	 * compiler, and the temporary 32 bit storage is intended to force
1520 	 * proper code generation and break up what would otherwise be a
1521 	 * quadruple cast.
1522 	 */
1523 	if (p->p_model == DATAMODEL_ILP32) {
1524 		caddr32_t vaddr32 = (caddr32_t)(uintptr_t)vaddr;
1525 		vaddr = (caddr_t)(uintptr_t)vaddr32;
1526 	}
1527 
1528 	if (fuword32_nowatch(vaddr, (uint32_t *)&instr) == -1)
1529 		instr = -1;
1530 
1531 	return (instr);
1532 }
1533