1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/types.h> 34 #include <sys/t_lock.h> 35 #include <sys/param.h> 36 #include <sys/cred.h> 37 #include <sys/debug.h> 38 #include <sys/inline.h> 39 #include <sys/kmem.h> 40 #include <sys/proc.h> 41 #include <sys/regset.h> 42 #include <sys/privregs.h> 43 #include <sys/sysmacros.h> 44 #include <sys/systm.h> 45 #include <sys/vfs.h> 46 #include <sys/vnode.h> 47 #include <sys/psw.h> 48 #include <sys/pcb.h> 49 #include <sys/buf.h> 50 #include <sys/signal.h> 51 #include <sys/user.h> 52 #include <sys/cpuvar.h> 53 54 #include <sys/fault.h> 55 #include <sys/syscall.h> 56 #include <sys/procfs.h> 57 #include <sys/cmn_err.h> 58 #include <sys/stack.h> 59 #include <sys/debugreg.h> 60 #include <sys/copyops.h> 61 62 #include <sys/mmu.h> 63 #include <sys/pte.h> 64 #include <sys/vmem.h> 65 #include <sys/mman.h> 66 #include <sys/vmparam.h> 67 #include <sys/fp.h> 68 #include <sys/archsystm.h> 69 #include <sys/vmsystm.h> 70 #include <vm/hat.h> 71 #include <vm/as.h> 72 #include <vm/seg.h> 73 #include <vm/seg_kmem.h> 74 #include <vm/seg_kp.h> 75 #include <vm/page.h> 76 77 #include <sys/sysi86.h> 78 79 #include <fs/proc/prdata.h> 80 81 int prnwatch = 10000; /* maximum number of watched areas */ 82 83 /* 84 * Force a thread into the kernel if it is not already there. 85 * This is a no-op on uniprocessors. 86 */ 87 /* ARGSUSED */ 88 void 89 prpokethread(kthread_t *t) 90 { 91 if (t->t_state == TS_ONPROC && t->t_cpu != CPU) 92 poke_cpu(t->t_cpu->cpu_id); 93 } 94 95 /* 96 * Return general registers. 97 */ 98 void 99 prgetprregs(klwp_t *lwp, prgregset_t prp) 100 { 101 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock)); 102 103 getgregs(lwp, prp); 104 } 105 106 /* 107 * Set general registers. 108 * (Note: This can be an alias to setgregs().) 109 */ 110 void 111 prsetprregs(klwp_t *lwp, prgregset_t prp, int initial) 112 { 113 if (initial) /* set initial values */ 114 lwptoregs(lwp)->r_ps = PSL_USER; 115 (void) setgregs(lwp, prp); 116 } 117 118 #ifdef _SYSCALL32_IMPL 119 120 /* 121 * Convert prgregset32 to native prgregset 122 */ 123 void 124 prgregset_32ton(klwp_t *lwp, prgregset32_t src, prgregset_t dst) 125 { 126 struct regs *rp = lwptoregs(lwp); 127 128 dst[REG_GSBASE] = lwp->lwp_pcb.pcb_gsbase; 129 dst[REG_FSBASE] = lwp->lwp_pcb.pcb_fsbase; 130 131 dst[REG_DS] = (uint16_t)src[DS]; 132 dst[REG_ES] = (uint16_t)src[ES]; 133 134 dst[REG_GS] = (uint16_t)src[GS]; 135 dst[REG_FS] = (uint16_t)src[FS]; 136 dst[REG_SS] = (uint16_t)src[SS]; 137 dst[REG_RSP] = (uint32_t)src[UESP]; 138 dst[REG_RFL] = 139 (rp->r_ps & ~PSL_USERMASK) | (src[EFL] & PSL_USERMASK); 140 dst[REG_CS] = (uint16_t)src[CS]; 141 dst[REG_RIP] = (uint32_t)src[EIP]; 142 dst[REG_ERR] = (uint32_t)src[ERR]; 143 dst[REG_TRAPNO] = (uint32_t)src[TRAPNO]; 144 dst[REG_RAX] = (uint32_t)src[EAX]; 145 dst[REG_RCX] = (uint32_t)src[ECX]; 146 dst[REG_RDX] = (uint32_t)src[EDX]; 147 dst[REG_RBX] = (uint32_t)src[EBX]; 148 dst[REG_RBP] = (uint32_t)src[EBP]; 149 dst[REG_RSI] = (uint32_t)src[ESI]; 150 dst[REG_RDI] = (uint32_t)src[EDI]; 151 dst[REG_R8] = dst[REG_R9] = dst[REG_R10] = dst[REG_R11] = 152 dst[REG_R12] = dst[REG_R13] = dst[REG_R14] = dst[REG_R15] = 0; 153 } 154 155 /* 156 * Return 32-bit general registers 157 */ 158 void 159 prgetprregs32(klwp_t *lwp, prgregset32_t prp) 160 { 161 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock)); 162 getgregs32(lwp, prp); 163 } 164 165 #endif /* _SYSCALL32_IMPL */ 166 167 /* 168 * Get the syscall return values for the lwp. 169 */ 170 int 171 prgetrvals(klwp_t *lwp, long *rval1, long *rval2) 172 { 173 struct regs *r = lwptoregs(lwp); 174 175 if (r->r_ps & PS_C) 176 return (r->r_r0); 177 if (lwp->lwp_eosys == JUSTRETURN) { 178 *rval1 = 0; 179 *rval2 = 0; 180 } else if (lwp_getdatamodel(lwp) != DATAMODEL_NATIVE) { 181 /* 182 * XX64 Not sure we -really- need to do this, because the 183 * syscall return already masks off the bottom values ..? 184 */ 185 *rval1 = r->r_r0 & (uint32_t)0xffffffffu; 186 *rval2 = r->r_r1 & (uint32_t)0xffffffffu; 187 } else { 188 *rval1 = r->r_r0; 189 *rval2 = r->r_r1; 190 } 191 return (0); 192 } 193 194 /* 195 * Does the system support floating-point, either through hardware 196 * or by trapping and emulating floating-point machine instructions? 197 */ 198 int 199 prhasfp(void) 200 { 201 extern int fp_kind; 202 203 return (fp_kind != FP_NO); 204 } 205 206 /* 207 * Get floating-point registers. 208 */ 209 void 210 prgetprfpregs(klwp_t *lwp, prfpregset_t *pfp) 211 { 212 bzero(pfp, sizeof (prfpregset_t)); 213 getfpregs(lwp, pfp); 214 } 215 216 #if defined(_SYSCALL32_IMPL) 217 void 218 prgetprfpregs32(klwp_t *lwp, prfpregset32_t *pfp) 219 { 220 bzero(pfp, sizeof (*pfp)); 221 getfpregs32(lwp, pfp); 222 } 223 #endif /* _SYSCALL32_IMPL */ 224 225 /* 226 * Set floating-point registers. 227 * (Note: This can be an alias to setfpregs().) 228 */ 229 void 230 prsetprfpregs(klwp_t *lwp, prfpregset_t *pfp) 231 { 232 setfpregs(lwp, pfp); 233 } 234 235 #if defined(_SYSCALL32_IMPL) 236 void 237 prsetprfpregs32(klwp_t *lwp, prfpregset32_t *pfp) 238 { 239 setfpregs32(lwp, pfp); 240 } 241 #endif /* _SYSCALL32_IMPL */ 242 243 /* 244 * Does the system support extra register state? 245 */ 246 /* ARGSUSED */ 247 int 248 prhasx(proc_t *p) 249 { 250 return (0); 251 } 252 253 /* 254 * Get the size of the extra registers. 255 */ 256 /* ARGSUSED */ 257 int 258 prgetprxregsize(proc_t *p) 259 { 260 return (0); 261 } 262 263 /* 264 * Get extra registers. 265 */ 266 /*ARGSUSED*/ 267 void 268 prgetprxregs(klwp_t *lwp, caddr_t prx) 269 { 270 /* no extra registers */ 271 } 272 273 /* 274 * Set extra registers. 275 */ 276 /*ARGSUSED*/ 277 void 278 prsetprxregs(klwp_t *lwp, caddr_t prx) 279 { 280 /* no extra registers */ 281 } 282 283 /* 284 * Return the base (lower limit) of the process stack. 285 */ 286 caddr_t 287 prgetstackbase(proc_t *p) 288 { 289 return (p->p_usrstack - p->p_stksize); 290 } 291 292 /* 293 * Return the "addr" field for pr_addr in prpsinfo_t. 294 * This is a vestige of the past, so whatever we return is OK. 295 */ 296 caddr_t 297 prgetpsaddr(proc_t *p) 298 { 299 return ((caddr_t)p); 300 } 301 302 /* 303 * Arrange to single-step the lwp. 304 */ 305 void 306 prstep(klwp_t *lwp, int watchstep) 307 { 308 struct regs *r = lwptoregs(lwp); 309 310 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock)); 311 312 if (watchstep) 313 lwp->lwp_pcb.pcb_flags |= WATCH_STEP; 314 else 315 lwp->lwp_pcb.pcb_flags |= NORMAL_STEP; 316 317 r->r_ps |= PS_T; /* set the trace flag in PSW */ 318 } 319 320 /* 321 * Undo prstep(). 322 */ 323 void 324 prnostep(klwp_t *lwp) 325 { 326 struct regs *r = lwptoregs(lwp); 327 328 ASSERT(ttolwp(curthread) == lwp || 329 MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock)); 330 331 r->r_ps &= ~PS_T; /* turn off trace flag in PSW */ 332 lwp->lwp_pcb.pcb_flags &= ~(NORMAL_STEP|WATCH_STEP|DEBUG_PENDING); 333 } 334 335 /* 336 * Return non-zero if a single-step is in effect. 337 */ 338 int 339 prisstep(klwp_t *lwp) 340 { 341 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock)); 342 343 return ((lwp->lwp_pcb.pcb_flags & 344 (NORMAL_STEP|WATCH_STEP|DEBUG_PENDING)) != 0); 345 } 346 347 /* 348 * Set the PC to the specified virtual address. 349 */ 350 void 351 prsvaddr(klwp_t *lwp, caddr_t vaddr) 352 { 353 struct regs *r = lwptoregs(lwp); 354 355 ASSERT(MUTEX_NOT_HELD(&lwptoproc(lwp)->p_lock)); 356 357 r->r_pc = (uintptr_t)vaddr; 358 } 359 360 /* 361 * Map address "addr" in address space "as" into a kernel virtual address. 362 * The memory is guaranteed to be resident and locked down. 363 */ 364 caddr_t 365 prmapin(struct as *as, caddr_t addr, int writing) 366 { 367 page_t *pp; 368 caddr_t kaddr; 369 pfn_t pfnum; 370 371 /* 372 * XXX - Because of past mistakes, we have bits being returned 373 * by getpfnum that are actually the page type bits of the pte. 374 * When the object we are trying to map is a memory page with 375 * a page structure everything is ok and we can use the optimal 376 * method, ppmapin. Otherwise, we have to do something special. 377 */ 378 pfnum = hat_getpfnum(as->a_hat, addr); 379 if (pf_is_memory(pfnum)) { 380 pp = page_numtopp_nolock(pfnum); 381 if (pp != NULL) { 382 ASSERT(PAGE_LOCKED(pp)); 383 kaddr = ppmapin(pp, writing ? 384 (PROT_READ | PROT_WRITE) : PROT_READ, (caddr_t)-1); 385 return (kaddr + ((uintptr_t)addr & PAGEOFFSET)); 386 } 387 } 388 389 /* 390 * Oh well, we didn't have a page struct for the object we were 391 * trying to map in; ppmapin doesn't handle devices, but allocating a 392 * heap address allows ppmapout to free virtual space when done. 393 */ 394 kaddr = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); 395 396 hat_devload(kas.a_hat, kaddr, MMU_PAGESIZE, pfnum, 397 writing ? (PROT_READ | PROT_WRITE) : PROT_READ, 0); 398 399 return (kaddr + ((uintptr_t)addr & PAGEOFFSET)); 400 } 401 402 /* 403 * Unmap address "addr" in address space "as"; inverse of prmapin(). 404 */ 405 /* ARGSUSED */ 406 void 407 prmapout(struct as *as, caddr_t addr, caddr_t vaddr, int writing) 408 { 409 extern void ppmapout(caddr_t); 410 411 vaddr = (caddr_t)((uintptr_t)vaddr & PAGEMASK); 412 ppmapout(vaddr); 413 } 414 415 /* 416 * Make sure the lwp is in an orderly state 417 * for inspection by a debugger through /proc. 418 * Called from stop() and from syslwp_create(). 419 */ 420 /* ARGSUSED */ 421 void 422 prstop(int why, int what) 423 { 424 klwp_t *lwp = ttolwp(curthread); 425 struct regs *r = lwptoregs(lwp); 426 427 /* 428 * Make sure we don't deadlock on a recursive call 429 * to prstop(). stop() tests the lwp_nostop flag. 430 */ 431 ASSERT(lwp->lwp_nostop == 0); 432 lwp->lwp_nostop = 1; 433 434 if (copyin_nowatch((caddr_t)r->r_pc, &lwp->lwp_pcb.pcb_instr, 435 sizeof (lwp->lwp_pcb.pcb_instr)) == 0) 436 lwp->lwp_pcb.pcb_flags |= INSTR_VALID; 437 else { 438 lwp->lwp_pcb.pcb_flags &= ~INSTR_VALID; 439 lwp->lwp_pcb.pcb_instr = 0; 440 } 441 442 (void) save_syscall_args(); 443 ASSERT(lwp->lwp_nostop == 1); 444 lwp->lwp_nostop = 0; 445 } 446 447 /* 448 * Fetch the user-level instruction on which the lwp is stopped. 449 * It was saved by the lwp itself, in prstop(). 450 * Return non-zero if the instruction is valid. 451 */ 452 int 453 prfetchinstr(klwp_t *lwp, ulong_t *ip) 454 { 455 *ip = (ulong_t)(instr_t)lwp->lwp_pcb.pcb_instr; 456 return (lwp->lwp_pcb.pcb_flags & INSTR_VALID); 457 } 458 459 /* 460 * Called from trap() when a load or store instruction 461 * falls in a watched page but is not a watchpoint. 462 * We emulate the instruction in the kernel. 463 */ 464 /* ARGSUSED */ 465 int 466 pr_watch_emul(struct regs *rp, caddr_t addr, enum seg_rw rw) 467 { 468 #ifdef SOMEDAY 469 int res; 470 proc_t *p = curproc; 471 char *badaddr = (caddr_t)(-1); 472 int mapped; 473 474 /* prevent recursive calls to pr_watch_emul() */ 475 ASSERT(!(curthread->t_flag & T_WATCHPT)); 476 curthread->t_flag |= T_WATCHPT; 477 478 watch_disable_addr(addr, 8, rw); 479 res = do_unaligned(rp, &badaddr); 480 watch_enable_addr(addr, 8, rw); 481 482 curthread->t_flag &= ~T_WATCHPT; 483 if (res == SIMU_SUCCESS) { 484 /* adjust the pc */ 485 return (1); 486 } 487 #endif 488 return (0); 489 } 490 491 /* 492 * Return the number of active entries in the local descriptor table. 493 */ 494 int 495 prnldt(proc_t *p) 496 { 497 int limit, i, n; 498 user_desc_t *udp; 499 500 ASSERT(MUTEX_HELD(&p->p_ldtlock)); 501 502 /* 503 * Currently 64 bit processes cannot have a private ldt. 504 */ 505 ASSERT(get_udatamodel() != DATAMODEL_LP64 || p->p_ldt == NULL); 506 507 508 if (p->p_ldt == NULL) 509 return (0); 510 n = 0; 511 limit = p->p_ldtlimit; 512 ASSERT(limit >= 0 && limit < MAXNLDT); 513 514 /* 515 * Count all present user descriptors. 516 */ 517 for (i = LDT_UDBASE, udp = &p->p_ldt[i]; i <= limit; i++, udp++) 518 if (udp->usd_type != 0 || udp->usd_dpl != 0 || udp->usd_p != 0) 519 n++; 520 return (n); 521 } 522 523 /* 524 * Fetch the active entries from the local descriptor table. 525 */ 526 void 527 prgetldt(proc_t *p, struct ssd *ssd) 528 { 529 int i, limit; 530 user_desc_t *udp; 531 532 ASSERT(MUTEX_HELD(&p->p_ldtlock)); 533 534 if (p->p_ldt == NULL) 535 return; 536 537 limit = p->p_ldtlimit; 538 ASSERT(limit >= 0 && limit < MAXNLDT); 539 540 /* 541 * All present user descriptors. 542 */ 543 for (i = LDT_UDBASE, udp = &p->p_ldt[i]; i <= limit; i++, udp++) 544 if (udp->usd_type != 0 || udp->usd_dpl != 0 || 545 udp->usd_p != 0) 546 usd_to_ssd(udp, ssd++, SEL_LDT(i)); 547 } 548