xref: /illumos-gate/usr/src/uts/intel/fs/proc/prmachdep.c (revision fc910014e8a32a65612105835a10995f2c13d942)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved	*/
29 
30 /*
31  * Copyright 2023 Oxide Computer Company
32  */
33 
34 #include <sys/types.h>
35 #include <sys/t_lock.h>
36 #include <sys/param.h>
37 #include <sys/cred.h>
38 #include <sys/debug.h>
39 #include <sys/inline.h>
40 #include <sys/kmem.h>
41 #include <sys/proc.h>
42 #include <sys/regset.h>
43 #include <sys/privregs.h>
44 #include <sys/sysmacros.h>
45 #include <sys/systm.h>
46 #include <sys/vfs.h>
47 #include <sys/vnode.h>
48 #include <sys/psw.h>
49 #include <sys/pcb.h>
50 #include <sys/buf.h>
51 #include <sys/signal.h>
52 #include <sys/user.h>
53 #include <sys/cpuvar.h>
54 #include <sys/stdalign.h>
55 
56 #include <sys/fault.h>
57 #include <sys/syscall.h>
58 #include <sys/procfs.h>
59 #include <sys/cmn_err.h>
60 #include <sys/stack.h>
61 #include <sys/debugreg.h>
62 #include <sys/copyops.h>
63 
64 #include <sys/vmem.h>
65 #include <sys/mman.h>
66 #include <sys/vmparam.h>
67 #include <sys/fp.h>
68 #include <sys/archsystm.h>
69 #include <sys/vmsystm.h>
70 #include <vm/hat.h>
71 #include <vm/as.h>
72 #include <vm/seg.h>
73 #include <vm/seg_kmem.h>
74 #include <vm/seg_kp.h>
75 #include <vm/page.h>
76 
77 #include <sys/sysi86.h>
78 
79 #include <fs/proc/prdata.h>
80 
81 int	prnwatch = 10000;	/* maximum number of watched areas */
82 
83 /*
84  * Force a thread into the kernel if it is not already there.
85  * This is a no-op on uniprocessors.
86  */
87 /* ARGSUSED */
88 void
89 prpokethread(kthread_t *t)
90 {
91 	if (t->t_state == TS_ONPROC && t->t_cpu != CPU)
92 		poke_cpu(t->t_cpu->cpu_id);
93 }
94 
95 /*
96  * Return general registers.
97  */
98 void
99 prgetprregs(klwp_t *lwp, prgregset_t prp)
100 {
101 	ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
102 
103 	getgregs(lwp, prp);
104 }
105 
106 /*
107  * Set general registers.
108  * (Note: This can be an alias to setgregs().)
109  */
110 void
111 prsetprregs(klwp_t *lwp, prgregset_t prp, int initial)
112 {
113 	if (initial)		/* set initial values */
114 		lwptoregs(lwp)->r_ps = PSL_USER;
115 	(void) setgregs(lwp, prp);
116 }
117 
118 #ifdef _SYSCALL32_IMPL
119 
120 /*
121  * Convert prgregset32 to native prgregset
122  */
123 void
124 prgregset_32ton(klwp_t *lwp, prgregset32_t src, prgregset_t dst)
125 {
126 	struct regs *rp = lwptoregs(lwp);
127 
128 	dst[REG_GSBASE] = lwp->lwp_pcb.pcb_gsbase;
129 	dst[REG_FSBASE] = lwp->lwp_pcb.pcb_fsbase;
130 
131 	dst[REG_DS] = (uint16_t)src[DS];
132 	dst[REG_ES] = (uint16_t)src[ES];
133 
134 	dst[REG_GS] = (uint16_t)src[GS];
135 	dst[REG_FS] = (uint16_t)src[FS];
136 	dst[REG_SS] = (uint16_t)src[SS];
137 	dst[REG_RSP] = (uint32_t)src[UESP];
138 	dst[REG_RFL] =
139 	    (rp->r_ps & ~PSL_USERMASK) | (src[EFL] & PSL_USERMASK);
140 	dst[REG_CS] = (uint16_t)src[CS];
141 	dst[REG_RIP] = (uint32_t)src[EIP];
142 	dst[REG_ERR] = (uint32_t)src[ERR];
143 	dst[REG_TRAPNO] = (uint32_t)src[TRAPNO];
144 	dst[REG_RAX] = (uint32_t)src[EAX];
145 	dst[REG_RCX] = (uint32_t)src[ECX];
146 	dst[REG_RDX] = (uint32_t)src[EDX];
147 	dst[REG_RBX] = (uint32_t)src[EBX];
148 	dst[REG_RBP] = (uint32_t)src[EBP];
149 	dst[REG_RSI] = (uint32_t)src[ESI];
150 	dst[REG_RDI] = (uint32_t)src[EDI];
151 	dst[REG_R8] = dst[REG_R9] = dst[REG_R10] = dst[REG_R11] =
152 	    dst[REG_R12] = dst[REG_R13] = dst[REG_R14] = dst[REG_R15] = 0;
153 }
154 
155 /*
156  * Return 32-bit general registers
157  */
158 void
159 prgetprregs32(klwp_t *lwp, prgregset32_t prp)
160 {
161 	ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
162 	getgregs32(lwp, prp);
163 }
164 
165 #endif	/* _SYSCALL32_IMPL */
166 
167 /*
168  * Get the syscall return values for the lwp.
169  */
170 int
171 prgetrvals(klwp_t *lwp, long *rval1, long *rval2)
172 {
173 	struct regs *r = lwptoregs(lwp);
174 
175 	if (r->r_ps & PS_C)
176 		return (r->r_r0);
177 	if (lwp->lwp_eosys == JUSTRETURN) {
178 		*rval1 = 0;
179 		*rval2 = 0;
180 	} else if (lwp_getdatamodel(lwp) != DATAMODEL_NATIVE) {
181 		/*
182 		 * XX64	Not sure we -really- need to do this, because the
183 		 *	syscall return already masks off the bottom values ..?
184 		 */
185 		*rval1 = r->r_r0 & (uint32_t)0xffffffffu;
186 		*rval2 = r->r_r1 & (uint32_t)0xffffffffu;
187 	} else {
188 		*rval1 = r->r_r0;
189 		*rval2 = r->r_r1;
190 	}
191 	return (0);
192 }
193 
194 /*
195  * Does the system support floating-point, either through hardware
196  * or by trapping and emulating floating-point machine instructions?
197  */
198 int
199 prhasfp(void)
200 {
201 	extern int fp_kind;
202 
203 	return (fp_kind != FP_NO);
204 }
205 
206 /*
207  * Get floating-point registers.
208  */
209 void
210 prgetprfpregs(klwp_t *lwp, prfpregset_t *pfp)
211 {
212 	bzero(pfp, sizeof (prfpregset_t));
213 	getfpregs(lwp, pfp);
214 }
215 
216 #if defined(_SYSCALL32_IMPL)
217 void
218 prgetprfpregs32(klwp_t *lwp, prfpregset32_t *pfp)
219 {
220 	bzero(pfp, sizeof (*pfp));
221 	getfpregs32(lwp, pfp);
222 }
223 #endif	/* _SYSCALL32_IMPL */
224 
225 /*
226  * Set floating-point registers.
227  * (Note: This can be an alias to setfpregs().)
228  */
229 void
230 prsetprfpregs(klwp_t *lwp, prfpregset_t *pfp)
231 {
232 	setfpregs(lwp, pfp);
233 }
234 
235 #if defined(_SYSCALL32_IMPL)
236 void
237 prsetprfpregs32(klwp_t *lwp, prfpregset32_t *pfp)
238 {
239 	setfpregs32(lwp, pfp);
240 }
241 #endif	/* _SYSCALL32_IMPL */
242 
243 /*
244  * This is a general function that the main part of /proc and the rest of the
245  * system uses to ask does a given process actually have extended state. Right
246  * now, this question is not process-specific, but rather CPU specific. We look
247  * at whether xsave has been enabled to determine that. While strictly speaking
248  * one could make the argument that all amd64 CPUs support fxsave and we could
249  * emulate something that only supports that, we don't think that makes sense.
250  */
251 int
252 prhasx(proc_t *p)
253 {
254 	return (fpu_xsave_enabled());
255 }
256 
257 /*
258  * Return the minimum size that we need to determine the full size of a
259  * prxregset_t.
260  */
261 boolean_t
262 prwriteminxreg(size_t *sizep)
263 {
264 	*sizep = sizeof (prxregset_hdr_t);
265 	return (B_TRUE);
266 }
267 
268 /*
269  * This routine services both ILP32 and LP64 callers. We cannot assume anything
270  * about the alignment of argp and must bcopy things to known structures that we
271  * care about. We are guaranteed to have prxregset_hdr_t bytes because we asked
272  * for them above.
273  */
274 boolean_t
275 prwritesizexreg(const void *argp, size_t *sizep)
276 {
277 	prxregset_hdr_t hdr;
278 
279 	/*
280 	 * While it's tempting to validate everything here, the only thing we
281 	 * care about is that we understand the type and the size meets our
282 	 * constraints:
283 	 *
284 	 *  o We actually have an item of type PR_TYPE_XSAVE, otherwise we
285 	 *    don't know what this is.
286 	 *  o The indicated size actually contains at least the
287 	 *    prxregset_hdr_t.
288 	 *  o The indicated size isn't larger than what the FPU tells us is
289 	 *    allowed.
290 	 *
291 	 * We do not check if the reset of the structure makes semantic sense at
292 	 * this point. We save all other validation for the normal set function
293 	 * as that's when we'll have the rest of our data.
294 	 */
295 	bcopy(argp, &hdr, sizeof (hdr));
296 	if (hdr.pr_type != PR_TYPE_XSAVE ||
297 	    hdr.pr_size > fpu_proc_xregs_max_size() ||
298 	    hdr.pr_size < sizeof (prxregset_hdr_t)) {
299 		return (B_FALSE);
300 	}
301 
302 	*sizep = hdr.pr_size - sizeof (prxregset_hdr_t);
303 	return (B_TRUE);
304 }
305 
306 /*
307  * Get the size of the extra registers. The ultimate size here depends on a
308  * combination of a few different things. Right now the xregs always have our
309  * header, the illumos-specific XCR information, the xsave information, and then
310  * otherwise this varies based on the items that the CPU supports.
311  *
312  * The ultimate size here is going to be:
313  *
314  *  o 1x prxregset_hdr_t
315  *  o n  prxregset_info_t structures
316  *  o The individual data for each one
317  */
318 size_t
319 prgetprxregsize(proc_t *p)
320 {
321 	uint32_t size;
322 
323 	fpu_proc_xregs_info(p, NULL, &size, NULL);
324 	return (size);
325 }
326 
327 /*
328  * Get extra registers.
329  */
330 void
331 prgetprxregs(klwp_t *lwp, prxregset_t *prx)
332 {
333 	fpu_proc_xregs_get(lwp, prx);
334 }
335 
336 /*
337  * Set extra registers.
338  *
339  * We've been given a regset to set. Before we hand it off to the FPU, we have
340  * to go through and make sure that the different parts of this actually make
341  * sense. The kernel has guaranteed us through the functions above that we have
342  * the number of bytes that the header indicates are present. In particular we
343  * need to validate:
344  *
345  *   o The information in the header is reasonable: we have a known type, flags
346  *     and padding are zero, and there is at least one info structure.
347  *   o Each of the info structures has a valid type, size, and fits within the
348  *     data we were given.
349  *   o We do not validate or modify the actual data in the different pieces for
350  *     validity. That is considered something that the FPU does. Similarly if
351  *     something is read-only or not used, that is something that it checks.
352  *
353  * While we would like to return something other than EINVAL, the /proc APIs
354  * pretty much lead that to being the primary errno for all sorts of situations.
355  */
356 int
357 prsetprxregs(klwp_t *lwp, prxregset_t *prx)
358 {
359 	size_t infosz;
360 	prxregset_hdr_t *hdr = (prxregset_hdr_t *)prx;
361 
362 	if (hdr->pr_type != PR_TYPE_XSAVE || hdr->pr_flags != 0 ||
363 	    hdr->pr_pad[0] != 0 || hdr->pr_pad[1] != 0 || hdr->pr_pad[2] != 0 ||
364 	    hdr->pr_pad[3] != 0 || hdr->pr_ninfo == 0) {
365 		return (EINVAL);
366 	}
367 
368 	infosz = hdr->pr_ninfo * sizeof (prxregset_info_t) +
369 	    sizeof (prxregset_hdr_t);
370 	if (infosz > hdr->pr_size) {
371 		return (EINVAL);
372 	}
373 
374 	for (uint32_t i = 0; i < hdr->pr_ninfo; i++) {
375 		uint32_t exp_size;
376 		size_t need_len, exp_align;
377 		const prxregset_info_t *info = &hdr->pr_info[i];
378 
379 		switch (info->pri_type) {
380 		case PRX_INFO_XCR:
381 			exp_size = sizeof (prxregset_xcr_t);
382 			exp_align = alignof (prxregset_xcr_t);
383 			break;
384 		case PRX_INFO_XSAVE:
385 			exp_size = sizeof (prxregset_xsave_t);
386 			exp_align = alignof (prxregset_xsave_t);
387 			break;
388 		case PRX_INFO_YMM:
389 			exp_size = sizeof (prxregset_ymm_t);
390 			exp_align = alignof (prxregset_ymm_t);
391 			break;
392 		case PRX_INFO_OPMASK:
393 			exp_size = sizeof (prxregset_opmask_t);
394 			exp_align = alignof (prxregset_opmask_t);
395 			break;
396 		case PRX_INFO_ZMM:
397 			exp_size = sizeof (prxregset_zmm_t);
398 			exp_align = alignof (prxregset_zmm_t);
399 			break;
400 		case PRX_INFO_HI_ZMM:
401 			exp_size = sizeof (prxregset_hi_zmm_t);
402 			exp_align = alignof (prxregset_hi_zmm_t);
403 			break;
404 		default:
405 			return (EINVAL);
406 		}
407 
408 		if (info->pri_flags != 0 || info->pri_size != exp_size) {
409 			return (EINVAL);
410 		}
411 
412 		if ((info->pri_offset % exp_align) != 0) {
413 			return (EINVAL);
414 		}
415 
416 		/*
417 		 * No bytes of this item's entry should overlap with the
418 		 * information area. If users want to overlap the actual data
419 		 * information for some odd reason, we don't check that and let
420 		 * them do what they want. However, the total data for this
421 		 * region must actually fit. Because exp_size and pri_offset are
422 		 * uint32_t's, we can sum them without overflow worries in an
423 		 * LP64 environment.
424 		 *
425 		 * While we try to grantee alignment when writing this structure
426 		 * out to userland, that is in no way a requirement and users
427 		 * are allowed to start these structures wherever they want.
428 		 * Hence that is not checked here.
429 		 */
430 		need_len = (size_t)exp_size + (size_t)info->pri_offset;
431 		if (info->pri_offset < infosz ||
432 		    need_len > (size_t)hdr->pr_size) {
433 			return (EINVAL);
434 		}
435 	}
436 
437 	return (fpu_proc_xregs_set(lwp, prx));
438 }
439 
440 /*
441  * Return the base (lower limit) of the process stack.
442  */
443 caddr_t
444 prgetstackbase(proc_t *p)
445 {
446 	return (p->p_usrstack - p->p_stksize);
447 }
448 
449 /*
450  * Return the "addr" field for pr_addr in prpsinfo_t.
451  * This is a vestige of the past, so whatever we return is OK.
452  */
453 caddr_t
454 prgetpsaddr(proc_t *p)
455 {
456 	return ((caddr_t)p);
457 }
458 
459 /*
460  * Arrange to single-step the lwp.
461  */
462 void
463 prstep(klwp_t *lwp, int watchstep)
464 {
465 	ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
466 
467 	/*
468 	 * flag LWP so that its r_efl trace bit (PS_T) will be set on
469 	 * next return to usermode.
470 	 */
471 	lwp->lwp_pcb.pcb_flags |= REQUEST_STEP;
472 	lwp->lwp_pcb.pcb_flags &= ~REQUEST_NOSTEP;
473 
474 	if (watchstep)
475 		lwp->lwp_pcb.pcb_flags |= WATCH_STEP;
476 	else
477 		lwp->lwp_pcb.pcb_flags |= NORMAL_STEP;
478 
479 	aston(lwptot(lwp));	/* let trap() set PS_T in rp->r_efl */
480 }
481 
482 /*
483  * Undo prstep().
484  */
485 void
486 prnostep(klwp_t *lwp)
487 {
488 	ASSERT(ttolwp(curthread) == lwp ||
489 	    MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
490 
491 	/*
492 	 * flag LWP so that its r_efl trace bit (PS_T) will be cleared on
493 	 * next return to usermode.
494 	 */
495 	lwp->lwp_pcb.pcb_flags |= REQUEST_NOSTEP;
496 
497 	lwp->lwp_pcb.pcb_flags &=
498 	    ~(REQUEST_STEP|NORMAL_STEP|WATCH_STEP|DEBUG_PENDING);
499 
500 	aston(lwptot(lwp));	/* let trap() clear PS_T in rp->r_efl */
501 }
502 
503 /*
504  * Return non-zero if a single-step is in effect.
505  */
506 int
507 prisstep(klwp_t *lwp)
508 {
509 	ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
510 
511 	return ((lwp->lwp_pcb.pcb_flags &
512 	    (NORMAL_STEP|WATCH_STEP|DEBUG_PENDING)) != 0);
513 }
514 
515 /*
516  * Set the PC to the specified virtual address.
517  */
518 void
519 prsvaddr(klwp_t *lwp, caddr_t vaddr)
520 {
521 	struct regs *r = lwptoregs(lwp);
522 
523 	ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock));
524 
525 	r->r_pc = (uintptr_t)vaddr;
526 }
527 
528 /*
529  * Map address "addr" in address space "as" into a kernel virtual address.
530  * The memory is guaranteed to be resident and locked down.
531  */
532 caddr_t
533 prmapin(struct as *as, caddr_t addr, int writing)
534 {
535 	page_t *pp;
536 	caddr_t kaddr;
537 	pfn_t pfnum;
538 
539 	/*
540 	 * XXX - Because of past mistakes, we have bits being returned
541 	 * by getpfnum that are actually the page type bits of the pte.
542 	 * When the object we are trying to map is a memory page with
543 	 * a page structure everything is ok and we can use the optimal
544 	 * method, ppmapin.  Otherwise, we have to do something special.
545 	 */
546 	pfnum = hat_getpfnum(as->a_hat, addr);
547 	if (pf_is_memory(pfnum)) {
548 		pp = page_numtopp_nolock(pfnum);
549 		if (pp != NULL) {
550 			ASSERT(PAGE_LOCKED(pp));
551 			kaddr = ppmapin(pp, writing ?
552 			    (PROT_READ | PROT_WRITE) : PROT_READ, (caddr_t)-1);
553 			return (kaddr + ((uintptr_t)addr & PAGEOFFSET));
554 		}
555 	}
556 
557 	/*
558 	 * Oh well, we didn't have a page struct for the object we were
559 	 * trying to map in; ppmapin doesn't handle devices, but allocating a
560 	 * heap address allows ppmapout to free virtual space when done.
561 	 */
562 	kaddr = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
563 
564 	hat_devload(kas.a_hat, kaddr, MMU_PAGESIZE,  pfnum,
565 	    writing ? (PROT_READ | PROT_WRITE) : PROT_READ, 0);
566 
567 	return (kaddr + ((uintptr_t)addr & PAGEOFFSET));
568 }
569 
570 /*
571  * Unmap address "addr" in address space "as"; inverse of prmapin().
572  */
573 /* ARGSUSED */
574 void
575 prmapout(struct as *as, caddr_t addr, caddr_t vaddr, int writing)
576 {
577 	extern void ppmapout(caddr_t);
578 
579 	vaddr = (caddr_t)((uintptr_t)vaddr & PAGEMASK);
580 	ppmapout(vaddr);
581 }
582 
583 /*
584  * Make sure the lwp is in an orderly state
585  * for inspection by a debugger through /proc.
586  *
587  * This needs to be called only once while the current thread remains in the
588  * kernel and needs to be called while holding no resources (mutex locks, etc).
589  *
590  * As a hedge against these conditions, if prstop() is called repeatedly
591  * before prunstop() is called, it does nothing and just returns.
592  *
593  * prunstop() must be called before the thread returns to user level.
594  */
595 /* ARGSUSED */
596 void
597 prstop(int why, int what)
598 {
599 	klwp_t *lwp = ttolwp(curthread);
600 	struct regs *r = lwptoregs(lwp);
601 
602 	if (lwp->lwp_pcb.pcb_flags & PRSTOP_CALLED)
603 		return;
604 
605 	/*
606 	 * Make sure we don't deadlock on a recursive call
607 	 * to prstop().  stop() tests the lwp_nostop flag.
608 	 */
609 	ASSERT(lwp->lwp_nostop == 0);
610 	lwp->lwp_nostop = 1;
611 
612 	if (copyin_nowatch((caddr_t)r->r_pc, &lwp->lwp_pcb.pcb_instr,
613 	    sizeof (lwp->lwp_pcb.pcb_instr)) == 0)
614 		lwp->lwp_pcb.pcb_flags |= INSTR_VALID;
615 	else {
616 		lwp->lwp_pcb.pcb_flags &= ~INSTR_VALID;
617 		lwp->lwp_pcb.pcb_instr = 0;
618 	}
619 
620 	(void) save_syscall_args();
621 	ASSERT(lwp->lwp_nostop == 1);
622 	lwp->lwp_nostop = 0;
623 
624 	lwp->lwp_pcb.pcb_flags |= PRSTOP_CALLED;
625 	aston(curthread);	/* so prunstop() will be called */
626 }
627 
628 /*
629  * Inform prstop() that it should do its work again
630  * the next time it is called.
631  */
632 void
633 prunstop(void)
634 {
635 	ttolwp(curthread)->lwp_pcb.pcb_flags &= ~PRSTOP_CALLED;
636 }
637 
638 /*
639  * Fetch the user-level instruction on which the lwp is stopped.
640  * It was saved by the lwp itself, in prstop().
641  * Return non-zero if the instruction is valid.
642  */
643 int
644 prfetchinstr(klwp_t *lwp, ulong_t *ip)
645 {
646 	*ip = (ulong_t)(instr_t)lwp->lwp_pcb.pcb_instr;
647 	return (lwp->lwp_pcb.pcb_flags & INSTR_VALID);
648 }
649 
650 /*
651  * Called from trap() when a load or store instruction
652  * falls in a watched page but is not a watchpoint.
653  * We emulate the instruction in the kernel.
654  */
655 /* ARGSUSED */
656 int
657 pr_watch_emul(struct regs *rp, caddr_t addr, enum seg_rw rw)
658 {
659 #ifdef SOMEDAY
660 	int res;
661 	proc_t *p = curproc;
662 	char *badaddr = (caddr_t)(-1);
663 	int mapped;
664 
665 	/* prevent recursive calls to pr_watch_emul() */
666 	ASSERT(!(curthread->t_flag & T_WATCHPT));
667 	curthread->t_flag |= T_WATCHPT;
668 
669 	watch_disable_addr(addr, 8, rw);
670 	res = do_unaligned(rp, &badaddr);
671 	watch_enable_addr(addr, 8, rw);
672 
673 	curthread->t_flag &= ~T_WATCHPT;
674 	if (res == SIMU_SUCCESS) {
675 		/* adjust the pc */
676 		return (1);
677 	}
678 #endif
679 	return (0);
680 }
681 
682 /*
683  * Return the number of active entries in the local descriptor table.
684  */
685 int
686 prnldt(proc_t *p)
687 {
688 	int limit, i, n;
689 	user_desc_t *udp;
690 
691 	ASSERT(MUTEX_HELD(&p->p_ldtlock));
692 
693 	/*
694 	 * Currently 64 bit processes cannot have private LDTs.
695 	 */
696 	ASSERT(p->p_model != DATAMODEL_LP64 || p->p_ldt == NULL);
697 
698 	if (p->p_ldt == NULL)
699 		return (0);
700 	n = 0;
701 	limit = p->p_ldtlimit;
702 	ASSERT(limit >= 0 && limit < MAXNLDT);
703 
704 	/*
705 	 * Count all present user descriptors.
706 	 */
707 	for (i = LDT_UDBASE, udp = &p->p_ldt[i]; i <= limit; i++, udp++)
708 		if (udp->usd_type != 0 || udp->usd_dpl != 0 || udp->usd_p != 0)
709 			n++;
710 	return (n);
711 }
712 
713 /*
714  * Fetch the active entries from the local descriptor table.
715  */
716 void
717 prgetldt(proc_t *p, struct ssd *ssd)
718 {
719 	int i, limit;
720 	user_desc_t *udp;
721 
722 	ASSERT(MUTEX_HELD(&p->p_ldtlock));
723 
724 	if (p->p_ldt == NULL)
725 		return;
726 
727 	limit = p->p_ldtlimit;
728 	ASSERT(limit >= 0 && limit < MAXNLDT);
729 
730 	/*
731 	 * All present user descriptors.
732 	 */
733 	for (i = LDT_UDBASE, udp = &p->p_ldt[i]; i <= limit; i++, udp++)
734 		if (udp->usd_type != 0 || udp->usd_dpl != 0 ||
735 		    udp->usd_p != 0)
736 			usd_to_ssd(udp, ssd++, SEL_LDT(i));
737 }
738