1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 /* All Rights Reserved */
27 /*
28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 */
31
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/vmparam.h>
35 #include <sys/systm.h>
36 #include <sys/signal.h>
37 #include <sys/stack.h>
38 #include <sys/regset.h>
39 #include <sys/privregs.h>
40 #include <sys/frame.h>
41 #include <sys/proc.h>
42 #include <sys/psw.h>
43 #include <sys/siginfo.h>
44 #include <sys/cpuvar.h>
45 #include <sys/asm_linkage.h>
46 #include <sys/kmem.h>
47 #include <sys/errno.h>
48 #include <sys/bootconf.h>
49 #include <sys/archsystm.h>
50 #include <sys/debug.h>
51 #include <sys/elf.h>
52 #include <sys/spl.h>
53 #include <sys/time.h>
54 #include <sys/atomic.h>
55 #include <sys/sysmacros.h>
56 #include <sys/cmn_err.h>
57 #include <sys/modctl.h>
58 #include <sys/kobj.h>
59 #include <sys/panic.h>
60 #include <sys/reboot.h>
61 #include <sys/time.h>
62 #include <sys/fp.h>
63 #include <sys/x86_archext.h>
64 #include <sys/auxv.h>
65 #include <sys/auxv_386.h>
66 #include <sys/dtrace.h>
67 #include <sys/brand.h>
68 #include <sys/machbrand.h>
69 #include <sys/cmn_err.h>
70
71 extern const struct fnsave_state x87_initial;
72 extern const struct fxsave_state sse_initial;
73
74 /*
75 * Map an fnsave-formatted save area into an fxsave-formatted save area.
76 *
77 * Most fields are the same width, content and semantics. However
78 * the tag word is compressed.
79 */
80 static void
fnsave_to_fxsave(const struct fnsave_state * fn,struct fxsave_state * fx)81 fnsave_to_fxsave(const struct fnsave_state *fn, struct fxsave_state *fx)
82 {
83 uint_t i, tagbits;
84
85 fx->fx_fcw = fn->f_fcw;
86 fx->fx_fsw = fn->f_fsw;
87
88 /*
89 * copy element by element (because of holes)
90 */
91 for (i = 0; i < 8; i++)
92 bcopy(&fn->f_st[i].fpr_16[0], &fx->fx_st[i].fpr_16[0],
93 sizeof (fn->f_st[0].fpr_16)); /* 80-bit x87-style floats */
94
95 /*
96 * synthesize compressed tag bits
97 */
98 fx->fx_fctw = 0;
99 for (tagbits = fn->f_ftw, i = 0; i < 8; i++, tagbits >>= 2)
100 if ((tagbits & 3) != 3)
101 fx->fx_fctw |= (1 << i);
102
103 fx->fx_fop = fn->f_fop;
104
105 #if defined(__amd64)
106 fx->fx_rip = (uint64_t)fn->f_eip;
107 fx->fx_rdp = (uint64_t)fn->f_dp;
108 #else
109 fx->fx_eip = fn->f_eip;
110 fx->fx_cs = fn->f_cs;
111 fx->__fx_ign0 = 0;
112 fx->fx_dp = fn->f_dp;
113 fx->fx_ds = fn->f_ds;
114 fx->__fx_ign1 = 0;
115 #endif
116 }
117
118 /*
119 * Map from an fxsave-format save area to an fnsave-format save area.
120 */
121 static void
fxsave_to_fnsave(const struct fxsave_state * fx,struct fnsave_state * fn)122 fxsave_to_fnsave(const struct fxsave_state *fx, struct fnsave_state *fn)
123 {
124 uint_t i, top, tagbits;
125
126 fn->f_fcw = fx->fx_fcw;
127 fn->__f_ign0 = 0;
128 fn->f_fsw = fx->fx_fsw;
129 fn->__f_ign1 = 0;
130
131 top = (fx->fx_fsw & FPS_TOP) >> 11;
132
133 /*
134 * copy element by element (because of holes)
135 */
136 for (i = 0; i < 8; i++)
137 bcopy(&fx->fx_st[i].fpr_16[0], &fn->f_st[i].fpr_16[0],
138 sizeof (fn->f_st[0].fpr_16)); /* 80-bit x87-style floats */
139
140 /*
141 * synthesize uncompressed tag bits
142 */
143 fn->f_ftw = 0;
144 for (tagbits = fx->fx_fctw, i = 0; i < 8; i++, tagbits >>= 1) {
145 uint_t ibit, expo;
146 const uint16_t *fpp;
147 static const uint16_t zero[5] = { 0, 0, 0, 0, 0 };
148
149 if ((tagbits & 1) == 0) {
150 fn->f_ftw |= 3 << (i << 1); /* empty */
151 continue;
152 }
153
154 /*
155 * (tags refer to *physical* registers)
156 */
157 fpp = &fx->fx_st[(i - top + 8) & 7].fpr_16[0];
158 ibit = fpp[3] >> 15;
159 expo = fpp[4] & 0x7fff;
160
161 if (ibit && expo != 0 && expo != 0x7fff)
162 continue; /* valid fp number */
163
164 if (bcmp(fpp, &zero, sizeof (zero)))
165 fn->f_ftw |= 2 << (i << 1); /* NaN */
166 else
167 fn->f_ftw |= 1 << (i << 1); /* fp zero */
168 }
169
170 fn->f_fop = fx->fx_fop;
171
172 fn->__f_ign2 = 0;
173 #if defined(__amd64)
174 fn->f_eip = (uint32_t)fx->fx_rip;
175 fn->f_cs = U32CS_SEL;
176 fn->f_dp = (uint32_t)fx->fx_rdp;
177 fn->f_ds = UDS_SEL;
178 #else
179 fn->f_eip = fx->fx_eip;
180 fn->f_cs = fx->fx_cs;
181 fn->f_dp = fx->fx_dp;
182 fn->f_ds = fx->fx_ds;
183 #endif
184 fn->__f_ign3 = 0;
185 }
186
187 /*
188 * Map from an fpregset_t into an fxsave-format save area
189 */
190 static void
fpregset_to_fxsave(const fpregset_t * fp,struct fxsave_state * fx)191 fpregset_to_fxsave(const fpregset_t *fp, struct fxsave_state *fx)
192 {
193 #if defined(__amd64)
194 bcopy(fp, fx, sizeof (*fx));
195 #else
196 const struct fpchip_state *fc = &fp->fp_reg_set.fpchip_state;
197
198 fnsave_to_fxsave((const struct fnsave_state *)fc, fx);
199 fx->fx_mxcsr = fc->mxcsr;
200 bcopy(&fc->xmm[0], &fx->fx_xmm[0], sizeof (fc->xmm));
201 #endif
202 /*
203 * avoid useless #gp exceptions - mask reserved bits
204 */
205 fx->fx_mxcsr &= sse_mxcsr_mask;
206 }
207
208 /*
209 * Map from an fxsave-format save area into a fpregset_t
210 */
211 static void
fxsave_to_fpregset(const struct fxsave_state * fx,fpregset_t * fp)212 fxsave_to_fpregset(const struct fxsave_state *fx, fpregset_t *fp)
213 {
214 #if defined(__amd64)
215 bcopy(fx, fp, sizeof (*fx));
216 #else
217 struct fpchip_state *fc = &fp->fp_reg_set.fpchip_state;
218
219 fxsave_to_fnsave(fx, (struct fnsave_state *)fc);
220 fc->mxcsr = fx->fx_mxcsr;
221 bcopy(&fx->fx_xmm[0], &fc->xmm[0], sizeof (fc->xmm));
222 #endif
223 }
224
225 #if defined(_SYSCALL32_IMPL)
226 static void
fpregset32_to_fxsave(const fpregset32_t * fp,struct fxsave_state * fx)227 fpregset32_to_fxsave(const fpregset32_t *fp, struct fxsave_state *fx)
228 {
229 const struct fpchip32_state *fc = &fp->fp_reg_set.fpchip_state;
230
231 fnsave_to_fxsave((const struct fnsave_state *)fc, fx);
232 /*
233 * avoid useless #gp exceptions - mask reserved bits
234 */
235 fx->fx_mxcsr = sse_mxcsr_mask & fc->mxcsr;
236 bcopy(&fc->xmm[0], &fx->fx_xmm[0], sizeof (fc->xmm));
237 }
238
239 static void
fxsave_to_fpregset32(const struct fxsave_state * fx,fpregset32_t * fp)240 fxsave_to_fpregset32(const struct fxsave_state *fx, fpregset32_t *fp)
241 {
242 struct fpchip32_state *fc = &fp->fp_reg_set.fpchip_state;
243
244 fxsave_to_fnsave(fx, (struct fnsave_state *)fc);
245 fc->mxcsr = fx->fx_mxcsr;
246 bcopy(&fx->fx_xmm[0], &fc->xmm[0], sizeof (fc->xmm));
247 }
248
249 static void
fpregset_nto32(const fpregset_t * src,fpregset32_t * dst)250 fpregset_nto32(const fpregset_t *src, fpregset32_t *dst)
251 {
252 fxsave_to_fpregset32((struct fxsave_state *)src, dst);
253 dst->fp_reg_set.fpchip_state.status =
254 src->fp_reg_set.fpchip_state.status;
255 dst->fp_reg_set.fpchip_state.xstatus =
256 src->fp_reg_set.fpchip_state.xstatus;
257 }
258
259 static void
fpregset_32ton(const fpregset32_t * src,fpregset_t * dst)260 fpregset_32ton(const fpregset32_t *src, fpregset_t *dst)
261 {
262 fpregset32_to_fxsave(src, (struct fxsave_state *)dst);
263 dst->fp_reg_set.fpchip_state.status =
264 src->fp_reg_set.fpchip_state.status;
265 dst->fp_reg_set.fpchip_state.xstatus =
266 src->fp_reg_set.fpchip_state.xstatus;
267 }
268 #endif
269
270 /*
271 * Set floating-point registers from a native fpregset_t.
272 */
273 void
setfpregs(klwp_t * lwp,fpregset_t * fp)274 setfpregs(klwp_t *lwp, fpregset_t *fp)
275 {
276 struct fpu_ctx *fpu = &lwp->lwp_pcb.pcb_fpu;
277
278 if (fpu->fpu_flags & FPU_EN) {
279 if (!(fpu->fpu_flags & FPU_VALID)) {
280 /*
281 * FPU context is still active, release the
282 * ownership.
283 */
284 fp_free(fpu, 0);
285 }
286 }
287 /*
288 * Else: if we are trying to change the FPU state of a thread which
289 * hasn't yet initialized floating point, store the state in
290 * the pcb and indicate that the state is valid. When the
291 * thread enables floating point, it will use this state instead
292 * of the default state.
293 */
294
295 switch (fp_save_mech) {
296 #if defined(__i386)
297 case FP_FNSAVE:
298 bcopy(fp, &fpu->fpu_regs.kfpu_u.kfpu_fn,
299 sizeof (fpu->fpu_regs.kfpu_u.kfpu_fn));
300 break;
301 #endif
302 case FP_FXSAVE:
303 fpregset_to_fxsave(fp, &fpu->fpu_regs.kfpu_u.kfpu_fx);
304 fpu->fpu_regs.kfpu_xstatus =
305 fp->fp_reg_set.fpchip_state.xstatus;
306 break;
307
308 case FP_XSAVE:
309 fpregset_to_fxsave(fp,
310 &fpu->fpu_regs.kfpu_u.kfpu_xs.xs_fxsave);
311 fpu->fpu_regs.kfpu_xstatus =
312 fp->fp_reg_set.fpchip_state.xstatus;
313 fpu->fpu_regs.kfpu_u.kfpu_xs.xs_xstate_bv |=
314 (XFEATURE_LEGACY_FP | XFEATURE_SSE);
315 break;
316 default:
317 panic("Invalid fp_save_mech");
318 /*NOTREACHED*/
319 }
320
321 fpu->fpu_regs.kfpu_status = fp->fp_reg_set.fpchip_state.status;
322 fpu->fpu_flags |= FPU_VALID;
323 }
324
325 /*
326 * Get floating-point registers into a native fpregset_t.
327 */
328 void
getfpregs(klwp_t * lwp,fpregset_t * fp)329 getfpregs(klwp_t *lwp, fpregset_t *fp)
330 {
331 struct fpu_ctx *fpu = &lwp->lwp_pcb.pcb_fpu;
332
333 kpreempt_disable();
334 if (fpu->fpu_flags & FPU_EN) {
335 /*
336 * If we have FPU hw and the thread's pcb doesn't have
337 * a valid FPU state then get the state from the hw.
338 */
339 if (fpu_exists && ttolwp(curthread) == lwp &&
340 !(fpu->fpu_flags & FPU_VALID))
341 fp_save(fpu); /* get the current FPU state */
342 }
343
344 /*
345 * There are 3 possible cases we have to be aware of here:
346 *
347 * 1. FPU is enabled. FPU state is stored in the current LWP.
348 *
349 * 2. FPU is not enabled, and there have been no intervening /proc
350 * modifications. Return initial FPU state.
351 *
352 * 3. FPU is not enabled, but a /proc consumer has modified FPU state.
353 * FPU state is stored in the current LWP.
354 */
355 if ((fpu->fpu_flags & FPU_EN) || (fpu->fpu_flags & FPU_VALID)) {
356 /*
357 * Cases 1 and 3.
358 */
359 switch (fp_save_mech) {
360 #if defined(__i386)
361 case FP_FNSAVE:
362 bcopy(&fpu->fpu_regs.kfpu_u.kfpu_fn, fp,
363 sizeof (fpu->fpu_regs.kfpu_u.kfpu_fn));
364 break;
365 #endif
366 case FP_FXSAVE:
367 fxsave_to_fpregset(&fpu->fpu_regs.kfpu_u.kfpu_fx, fp);
368 fp->fp_reg_set.fpchip_state.xstatus =
369 fpu->fpu_regs.kfpu_xstatus;
370 break;
371 case FP_XSAVE:
372 fxsave_to_fpregset(
373 &fpu->fpu_regs.kfpu_u.kfpu_xs.xs_fxsave, fp);
374 fp->fp_reg_set.fpchip_state.xstatus =
375 fpu->fpu_regs.kfpu_xstatus;
376 break;
377 default:
378 panic("Invalid fp_save_mech");
379 /*NOTREACHED*/
380 }
381 fp->fp_reg_set.fpchip_state.status = fpu->fpu_regs.kfpu_status;
382 } else {
383 /*
384 * Case 2.
385 */
386 switch (fp_save_mech) {
387 #if defined(__i386)
388 case FP_FNSAVE:
389 bcopy(&x87_initial, fp, sizeof (x87_initial));
390 break;
391 #endif
392 case FP_FXSAVE:
393 case FP_XSAVE:
394 /*
395 * For now, we don't have any AVX specific field in ABI.
396 * If we add any in the future, we need to initial them
397 * as well.
398 */
399 fxsave_to_fpregset(&sse_initial, fp);
400 fp->fp_reg_set.fpchip_state.xstatus =
401 fpu->fpu_regs.kfpu_xstatus;
402 break;
403 default:
404 panic("Invalid fp_save_mech");
405 /*NOTREACHED*/
406 }
407 fp->fp_reg_set.fpchip_state.status = fpu->fpu_regs.kfpu_status;
408 }
409 kpreempt_enable();
410 }
411
412 #if defined(_SYSCALL32_IMPL)
413
414 /*
415 * Set floating-point registers from an fpregset32_t.
416 */
417 void
setfpregs32(klwp_t * lwp,fpregset32_t * fp)418 setfpregs32(klwp_t *lwp, fpregset32_t *fp)
419 {
420 fpregset_t fpregs;
421
422 fpregset_32ton(fp, &fpregs);
423 setfpregs(lwp, &fpregs);
424 }
425
426 /*
427 * Get floating-point registers into an fpregset32_t.
428 */
429 void
getfpregs32(klwp_t * lwp,fpregset32_t * fp)430 getfpregs32(klwp_t *lwp, fpregset32_t *fp)
431 {
432 fpregset_t fpregs;
433
434 getfpregs(lwp, &fpregs);
435 fpregset_nto32(&fpregs, fp);
436 }
437
438 #endif /* _SYSCALL32_IMPL */
439
440 /*
441 * Return the general registers
442 */
443 void
getgregs(klwp_t * lwp,gregset_t grp)444 getgregs(klwp_t *lwp, gregset_t grp)
445 {
446 struct regs *rp = lwptoregs(lwp);
447 #if defined(__amd64)
448 struct pcb *pcb = &lwp->lwp_pcb;
449 int thisthread = lwptot(lwp) == curthread;
450
451 grp[REG_RDI] = rp->r_rdi;
452 grp[REG_RSI] = rp->r_rsi;
453 grp[REG_RDX] = rp->r_rdx;
454 grp[REG_RCX] = rp->r_rcx;
455 grp[REG_R8] = rp->r_r8;
456 grp[REG_R9] = rp->r_r9;
457 grp[REG_RAX] = rp->r_rax;
458 grp[REG_RBX] = rp->r_rbx;
459 grp[REG_RBP] = rp->r_rbp;
460 grp[REG_R10] = rp->r_r10;
461 grp[REG_R11] = rp->r_r11;
462 grp[REG_R12] = rp->r_r12;
463 grp[REG_R13] = rp->r_r13;
464 grp[REG_R14] = rp->r_r14;
465 grp[REG_R15] = rp->r_r15;
466 grp[REG_FSBASE] = pcb->pcb_fsbase;
467 grp[REG_GSBASE] = pcb->pcb_gsbase;
468 if (thisthread)
469 kpreempt_disable();
470 if (pcb->pcb_rupdate == 1) {
471 grp[REG_DS] = pcb->pcb_ds;
472 grp[REG_ES] = pcb->pcb_es;
473 grp[REG_FS] = pcb->pcb_fs;
474 grp[REG_GS] = pcb->pcb_gs;
475 } else {
476 grp[REG_DS] = rp->r_ds;
477 grp[REG_ES] = rp->r_es;
478 grp[REG_FS] = rp->r_fs;
479 grp[REG_GS] = rp->r_gs;
480 }
481 if (thisthread)
482 kpreempt_enable();
483 grp[REG_TRAPNO] = rp->r_trapno;
484 grp[REG_ERR] = rp->r_err;
485 grp[REG_RIP] = rp->r_rip;
486 grp[REG_CS] = rp->r_cs;
487 grp[REG_SS] = rp->r_ss;
488 grp[REG_RFL] = rp->r_rfl;
489 grp[REG_RSP] = rp->r_rsp;
490 #else
491 bcopy(&rp->r_gs, grp, sizeof (gregset_t));
492 #endif
493 }
494
495 #if defined(_SYSCALL32_IMPL)
496
497 void
getgregs32(klwp_t * lwp,gregset32_t grp)498 getgregs32(klwp_t *lwp, gregset32_t grp)
499 {
500 struct regs *rp = lwptoregs(lwp);
501 struct pcb *pcb = &lwp->lwp_pcb;
502 int thisthread = lwptot(lwp) == curthread;
503
504 if (thisthread)
505 kpreempt_disable();
506 if (pcb->pcb_rupdate == 1) {
507 grp[GS] = (uint16_t)pcb->pcb_gs;
508 grp[FS] = (uint16_t)pcb->pcb_fs;
509 grp[DS] = (uint16_t)pcb->pcb_ds;
510 grp[ES] = (uint16_t)pcb->pcb_es;
511 } else {
512 grp[GS] = (uint16_t)rp->r_gs;
513 grp[FS] = (uint16_t)rp->r_fs;
514 grp[DS] = (uint16_t)rp->r_ds;
515 grp[ES] = (uint16_t)rp->r_es;
516 }
517 if (thisthread)
518 kpreempt_enable();
519 grp[EDI] = (greg32_t)rp->r_rdi;
520 grp[ESI] = (greg32_t)rp->r_rsi;
521 grp[EBP] = (greg32_t)rp->r_rbp;
522 grp[ESP] = 0;
523 grp[EBX] = (greg32_t)rp->r_rbx;
524 grp[EDX] = (greg32_t)rp->r_rdx;
525 grp[ECX] = (greg32_t)rp->r_rcx;
526 grp[EAX] = (greg32_t)rp->r_rax;
527 grp[TRAPNO] = (greg32_t)rp->r_trapno;
528 grp[ERR] = (greg32_t)rp->r_err;
529 grp[EIP] = (greg32_t)rp->r_rip;
530 grp[CS] = (uint16_t)rp->r_cs;
531 grp[EFL] = (greg32_t)rp->r_rfl;
532 grp[UESP] = (greg32_t)rp->r_rsp;
533 grp[SS] = (uint16_t)rp->r_ss;
534 }
535
536 void
ucontext_32ton(const ucontext32_t * src,ucontext_t * dst)537 ucontext_32ton(const ucontext32_t *src, ucontext_t *dst)
538 {
539 mcontext_t *dmc = &dst->uc_mcontext;
540 const mcontext32_t *smc = &src->uc_mcontext;
541
542 bzero(dst, sizeof (*dst));
543 dst->uc_flags = src->uc_flags;
544 dst->uc_link = (ucontext_t *)(uintptr_t)src->uc_link;
545
546 bcopy(&src->uc_sigmask, &dst->uc_sigmask, sizeof (dst->uc_sigmask));
547
548 dst->uc_stack.ss_sp = (void *)(uintptr_t)src->uc_stack.ss_sp;
549 dst->uc_stack.ss_size = (size_t)src->uc_stack.ss_size;
550 dst->uc_stack.ss_flags = src->uc_stack.ss_flags;
551
552 dmc->gregs[REG_GS] = (greg_t)(uint32_t)smc->gregs[GS];
553 dmc->gregs[REG_FS] = (greg_t)(uint32_t)smc->gregs[FS];
554 dmc->gregs[REG_ES] = (greg_t)(uint32_t)smc->gregs[ES];
555 dmc->gregs[REG_DS] = (greg_t)(uint32_t)smc->gregs[DS];
556 dmc->gregs[REG_RDI] = (greg_t)(uint32_t)smc->gregs[EDI];
557 dmc->gregs[REG_RSI] = (greg_t)(uint32_t)smc->gregs[ESI];
558 dmc->gregs[REG_RBP] = (greg_t)(uint32_t)smc->gregs[EBP];
559 dmc->gregs[REG_RBX] = (greg_t)(uint32_t)smc->gregs[EBX];
560 dmc->gregs[REG_RDX] = (greg_t)(uint32_t)smc->gregs[EDX];
561 dmc->gregs[REG_RCX] = (greg_t)(uint32_t)smc->gregs[ECX];
562 dmc->gregs[REG_RAX] = (greg_t)(uint32_t)smc->gregs[EAX];
563 dmc->gregs[REG_TRAPNO] = (greg_t)(uint32_t)smc->gregs[TRAPNO];
564 dmc->gregs[REG_ERR] = (greg_t)(uint32_t)smc->gregs[ERR];
565 dmc->gregs[REG_RIP] = (greg_t)(uint32_t)smc->gregs[EIP];
566 dmc->gregs[REG_CS] = (greg_t)(uint32_t)smc->gregs[CS];
567 dmc->gregs[REG_RFL] = (greg_t)(uint32_t)smc->gregs[EFL];
568 dmc->gregs[REG_RSP] = (greg_t)(uint32_t)smc->gregs[UESP];
569 dmc->gregs[REG_SS] = (greg_t)(uint32_t)smc->gregs[SS];
570
571 /*
572 * A valid fpregs is only copied in if uc.uc_flags has UC_FPU set
573 * otherwise there is no guarantee that anything in fpregs is valid.
574 */
575 if (src->uc_flags & UC_FPU)
576 fpregset_32ton(&src->uc_mcontext.fpregs,
577 &dst->uc_mcontext.fpregs);
578 }
579
580 #endif /* _SYSCALL32_IMPL */
581
582 /*
583 * Return the user-level PC.
584 * If in a system call, return the address of the syscall trap.
585 */
586 greg_t
getuserpc()587 getuserpc()
588 {
589 greg_t upc = lwptoregs(ttolwp(curthread))->r_pc;
590 uint32_t insn;
591
592 if (curthread->t_sysnum == 0)
593 return (upc);
594
595 /*
596 * We might've gotten here from sysenter (0xf 0x34),
597 * syscall (0xf 0x5) or lcall (0x9a 0 0 0 0 0x27 0).
598 *
599 * Go peek at the binary to figure it out..
600 */
601 if (fuword32((void *)(upc - 2), &insn) != -1 &&
602 (insn & 0xffff) == 0x340f || (insn & 0xffff) == 0x050f)
603 return (upc - 2);
604 return (upc - 7);
605 }
606
607 /*
608 * Protect segment registers from non-user privilege levels and GDT selectors
609 * other than USER_CS, USER_DS and lwp FS and GS values. If the segment
610 * selector is non-null and not USER_CS/USER_DS, we make sure that the
611 * TI bit is set to point into the LDT and that the RPL is set to 3.
612 *
613 * Since struct regs stores each 16-bit segment register as a 32-bit greg_t, we
614 * also explicitly zero the top 16 bits since they may be coming from the
615 * user's address space via setcontext(2) or /proc.
616 *
617 * Note about null selector. When running on the hypervisor if we allow a
618 * process to set its %cs to null selector with RPL of 0 the hypervisor will
619 * crash the domain. If running on bare metal we would get a #gp fault and
620 * be able to kill the process and continue on. Therefore we make sure to
621 * force RPL to SEL_UPL even for null selector when setting %cs.
622 */
623
624 #if defined(IS_CS) || defined(IS_NOT_CS)
625 #error "IS_CS and IS_NOT_CS already defined"
626 #endif
627
628 #define IS_CS 1
629 #define IS_NOT_CS 0
630
631 /*ARGSUSED*/
632 static greg_t
fix_segreg(greg_t sr,int iscs,model_t datamodel)633 fix_segreg(greg_t sr, int iscs, model_t datamodel)
634 {
635 switch (sr &= 0xffff) {
636
637 case 0:
638 if (iscs == IS_CS)
639 return (0 | SEL_UPL);
640 else
641 return (0);
642
643 #if defined(__amd64)
644 /*
645 * If lwp attempts to switch data model then force their
646 * code selector to be null selector.
647 */
648 case U32CS_SEL:
649 if (datamodel == DATAMODEL_NATIVE)
650 return (0 | SEL_UPL);
651 else
652 return (sr);
653
654 case UCS_SEL:
655 if (datamodel == DATAMODEL_ILP32)
656 return (0 | SEL_UPL);
657 #elif defined(__i386)
658 case UCS_SEL:
659 #endif
660 /*FALLTHROUGH*/
661 case UDS_SEL:
662 case LWPFS_SEL:
663 case LWPGS_SEL:
664 case SEL_UPL:
665 return (sr);
666 default:
667 break;
668 }
669
670 /*
671 * Force it into the LDT in ring 3 for 32-bit processes, which by
672 * default do not have an LDT, so that any attempt to use an invalid
673 * selector will reference the (non-existant) LDT, and cause a #gp
674 * fault for the process.
675 *
676 * 64-bit processes get the null gdt selector since they
677 * are not allowed to have a private LDT.
678 */
679 #if defined(__amd64)
680 if (datamodel == DATAMODEL_ILP32) {
681 return (sr | SEL_TI_LDT | SEL_UPL);
682 } else {
683 if (iscs == IS_CS)
684 return (0 | SEL_UPL);
685 else
686 return (0);
687 }
688
689 #elif defined(__i386)
690 return (sr | SEL_TI_LDT | SEL_UPL);
691 #endif
692 }
693
694 /*
695 * Set general registers.
696 */
697 void
setgregs(klwp_t * lwp,gregset_t grp)698 setgregs(klwp_t *lwp, gregset_t grp)
699 {
700 struct regs *rp = lwptoregs(lwp);
701 model_t datamodel = lwp_getdatamodel(lwp);
702
703 #if defined(__amd64)
704 struct pcb *pcb = &lwp->lwp_pcb;
705 int thisthread = lwptot(lwp) == curthread;
706
707 if (datamodel == DATAMODEL_NATIVE) {
708
709 if (thisthread)
710 (void) save_syscall_args(); /* copy the args */
711
712 rp->r_rdi = grp[REG_RDI];
713 rp->r_rsi = grp[REG_RSI];
714 rp->r_rdx = grp[REG_RDX];
715 rp->r_rcx = grp[REG_RCX];
716 rp->r_r8 = grp[REG_R8];
717 rp->r_r9 = grp[REG_R9];
718 rp->r_rax = grp[REG_RAX];
719 rp->r_rbx = grp[REG_RBX];
720 rp->r_rbp = grp[REG_RBP];
721 rp->r_r10 = grp[REG_R10];
722 rp->r_r11 = grp[REG_R11];
723 rp->r_r12 = grp[REG_R12];
724 rp->r_r13 = grp[REG_R13];
725 rp->r_r14 = grp[REG_R14];
726 rp->r_r15 = grp[REG_R15];
727 rp->r_trapno = grp[REG_TRAPNO];
728 rp->r_err = grp[REG_ERR];
729 rp->r_rip = grp[REG_RIP];
730 /*
731 * Setting %cs or %ss to anything else is quietly but
732 * quite definitely forbidden!
733 */
734 rp->r_cs = UCS_SEL;
735 rp->r_ss = UDS_SEL;
736 rp->r_rsp = grp[REG_RSP];
737
738 if (thisthread)
739 kpreempt_disable();
740
741 pcb->pcb_ds = UDS_SEL;
742 pcb->pcb_es = UDS_SEL;
743
744 /*
745 * 64-bit processes -are- allowed to set their fsbase/gsbase
746 * values directly, but only if they're using the segment
747 * selectors that allow that semantic.
748 *
749 * (32-bit processes must use lwp_set_private().)
750 */
751 pcb->pcb_fsbase = grp[REG_FSBASE];
752 pcb->pcb_gsbase = grp[REG_GSBASE];
753 pcb->pcb_fs = fix_segreg(grp[REG_FS], IS_NOT_CS, datamodel);
754 pcb->pcb_gs = fix_segreg(grp[REG_GS], IS_NOT_CS, datamodel);
755
756 /*
757 * Ensure that we go out via update_sregs
758 */
759 pcb->pcb_rupdate = 1;
760 lwptot(lwp)->t_post_sys = 1;
761 if (thisthread)
762 kpreempt_enable();
763 #if defined(_SYSCALL32_IMPL)
764 } else {
765 rp->r_rdi = (uint32_t)grp[REG_RDI];
766 rp->r_rsi = (uint32_t)grp[REG_RSI];
767 rp->r_rdx = (uint32_t)grp[REG_RDX];
768 rp->r_rcx = (uint32_t)grp[REG_RCX];
769 rp->r_rax = (uint32_t)grp[REG_RAX];
770 rp->r_rbx = (uint32_t)grp[REG_RBX];
771 rp->r_rbp = (uint32_t)grp[REG_RBP];
772 rp->r_trapno = (uint32_t)grp[REG_TRAPNO];
773 rp->r_err = (uint32_t)grp[REG_ERR];
774 rp->r_rip = (uint32_t)grp[REG_RIP];
775
776 rp->r_cs = fix_segreg(grp[REG_CS], IS_CS, datamodel);
777 rp->r_ss = fix_segreg(grp[REG_DS], IS_NOT_CS, datamodel);
778
779 rp->r_rsp = (uint32_t)grp[REG_RSP];
780
781 if (thisthread)
782 kpreempt_disable();
783
784 pcb->pcb_ds = fix_segreg(grp[REG_DS], IS_NOT_CS, datamodel);
785 pcb->pcb_es = fix_segreg(grp[REG_ES], IS_NOT_CS, datamodel);
786
787 /*
788 * (See fsbase/gsbase commentary above)
789 */
790 pcb->pcb_fs = fix_segreg(grp[REG_FS], IS_NOT_CS, datamodel);
791 pcb->pcb_gs = fix_segreg(grp[REG_GS], IS_NOT_CS, datamodel);
792
793 /*
794 * Ensure that we go out via update_sregs
795 */
796 pcb->pcb_rupdate = 1;
797 lwptot(lwp)->t_post_sys = 1;
798 if (thisthread)
799 kpreempt_enable();
800 #endif
801 }
802
803 /*
804 * Only certain bits of the flags register can be modified.
805 */
806 rp->r_rfl = (rp->r_rfl & ~PSL_USERMASK) |
807 (grp[REG_RFL] & PSL_USERMASK);
808
809 #elif defined(__i386)
810
811 /*
812 * Only certain bits of the flags register can be modified.
813 */
814 grp[EFL] = (rp->r_efl & ~PSL_USERMASK) | (grp[EFL] & PSL_USERMASK);
815
816 /*
817 * Copy saved registers from user stack.
818 */
819 bcopy(grp, &rp->r_gs, sizeof (gregset_t));
820
821 rp->r_cs = fix_segreg(rp->r_cs, IS_CS, datamodel);
822 rp->r_ss = fix_segreg(rp->r_ss, IS_NOT_CS, datamodel);
823 rp->r_ds = fix_segreg(rp->r_ds, IS_NOT_CS, datamodel);
824 rp->r_es = fix_segreg(rp->r_es, IS_NOT_CS, datamodel);
825 rp->r_fs = fix_segreg(rp->r_fs, IS_NOT_CS, datamodel);
826 rp->r_gs = fix_segreg(rp->r_gs, IS_NOT_CS, datamodel);
827
828 #endif /* __i386 */
829 }
830
831 /*
832 * Determine whether eip is likely to have an interrupt frame
833 * on the stack. We do this by comparing the address to the
834 * range of addresses spanned by several well-known routines.
835 */
836 extern void _interrupt();
837 extern void _allsyscalls();
838 extern void _cmntrap();
839 extern void fakesoftint();
840
841 extern size_t _interrupt_size;
842 extern size_t _allsyscalls_size;
843 extern size_t _cmntrap_size;
844 extern size_t _fakesoftint_size;
845
846 /*
847 * Get a pc-only stacktrace. Used for kmem_alloc() buffer ownership tracking.
848 * Returns MIN(current stack depth, pcstack_limit).
849 */
850 int
getpcstack(pc_t * pcstack,int pcstack_limit)851 getpcstack(pc_t *pcstack, int pcstack_limit)
852 {
853 struct frame *fp = (struct frame *)getfp();
854 struct frame *nextfp, *minfp, *stacktop;
855 int depth = 0;
856 int on_intr;
857 uintptr_t pc;
858
859 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
860 stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
861 else
862 stacktop = (struct frame *)curthread->t_stk;
863 minfp = fp;
864
865 pc = ((struct regs *)fp)->r_pc;
866
867 while (depth < pcstack_limit) {
868 nextfp = (struct frame *)fp->fr_savfp;
869 pc = fp->fr_savpc;
870 if (nextfp <= minfp || nextfp >= stacktop) {
871 if (on_intr) {
872 /*
873 * Hop from interrupt stack to thread stack.
874 */
875 stacktop = (struct frame *)curthread->t_stk;
876 minfp = (struct frame *)curthread->t_stkbase;
877 on_intr = 0;
878 continue;
879 }
880 break;
881 }
882 pcstack[depth++] = (pc_t)pc;
883 fp = nextfp;
884 minfp = fp;
885 }
886 return (depth);
887 }
888
889 /*
890 * The following ELF header fields are defined as processor-specific
891 * in the V8 ABI:
892 *
893 * e_ident[EI_DATA] encoding of the processor-specific
894 * data in the object file
895 * e_machine processor identification
896 * e_flags processor-specific flags associated
897 * with the file
898 */
899
900 /*
901 * The value of at_flags reflects a platform's cpu module support.
902 * at_flags is used to check for allowing a binary to execute and
903 * is passed as the value of the AT_FLAGS auxiliary vector.
904 */
905 int at_flags = 0;
906
907 /*
908 * Check the processor-specific fields of an ELF header.
909 *
910 * returns 1 if the fields are valid, 0 otherwise
911 */
912 /*ARGSUSED2*/
913 int
elfheadcheck(unsigned char e_data,Elf32_Half e_machine,Elf32_Word e_flags)914 elfheadcheck(
915 unsigned char e_data,
916 Elf32_Half e_machine,
917 Elf32_Word e_flags)
918 {
919 if (e_data != ELFDATA2LSB)
920 return (0);
921 #if defined(__amd64)
922 if (e_machine == EM_AMD64)
923 return (1);
924 #endif
925 return (e_machine == EM_386);
926 }
927
928 uint_t auxv_hwcap_include = 0; /* patch to enable unrecognized features */
929 uint_t auxv_hwcap_include_2 = 0; /* second word */
930 uint_t auxv_hwcap_exclude = 0; /* patch for broken cpus, debugging */
931 uint_t auxv_hwcap_exclude_2 = 0; /* second word */
932 #if defined(_SYSCALL32_IMPL)
933 uint_t auxv_hwcap32_include = 0; /* ditto for 32-bit apps */
934 uint_t auxv_hwcap32_include_2 = 0; /* ditto for 32-bit apps */
935 uint_t auxv_hwcap32_exclude = 0; /* ditto for 32-bit apps */
936 uint_t auxv_hwcap32_exclude_2 = 0; /* ditto for 32-bit apps */
937 #endif
938
939 /*
940 * Gather information about the processor and place it into auxv_hwcap
941 * so that it can be exported to the linker via the aux vector.
942 *
943 * We use this seemingly complicated mechanism so that we can ensure
944 * that /etc/system can be used to override what the system can or
945 * cannot discover for itself.
946 */
947 void
bind_hwcap(void)948 bind_hwcap(void)
949 {
950 uint_t cpu_hwcap_flags[2];
951 cpuid_pass4(NULL, cpu_hwcap_flags);
952
953 auxv_hwcap = (auxv_hwcap_include | cpu_hwcap_flags[0]) &
954 ~auxv_hwcap_exclude;
955 auxv_hwcap_2 = (auxv_hwcap_include_2 | cpu_hwcap_flags[1]) &
956 ~auxv_hwcap_exclude_2;
957
958 #if defined(__amd64)
959 /*
960 * On AMD processors, sysenter just doesn't work at all
961 * when the kernel is in long mode. On IA-32e processors
962 * it does, but there's no real point in all the alternate
963 * mechanism when syscall works on both.
964 *
965 * Besides, the kernel's sysenter handler is expecting a
966 * 32-bit lwp ...
967 */
968 auxv_hwcap &= ~AV_386_SEP;
969 #else
970 /*
971 * 32-bit processes can -always- use the lahf/sahf instructions
972 */
973 auxv_hwcap |= AV_386_AHF;
974 #endif
975
976 if (auxv_hwcap_include || auxv_hwcap_exclude || auxv_hwcap_include_2 ||
977 auxv_hwcap_exclude_2) {
978 /*
979 * The below assignment is regrettably required to get lint
980 * to accept the validity of our format string. The format
981 * string is in fact valid, but whatever intelligence in lint
982 * understands the cmn_err()-specific %b appears to have an
983 * off-by-one error: it (mistakenly) complains about bit
984 * number 32 (even though this is explicitly permitted).
985 * Normally, one would will away such warnings with a "LINTED"
986 * directive, but for reasons unclear and unknown, lint
987 * refuses to be assuaged in this case. Fortunately, lint
988 * doesn't pretend to have solved the Halting Problem --
989 * and as soon as the format string is programmatic, it
990 * knows enough to shut up.
991 */
992 char *fmt = "?user ABI extensions: %b\n";
993 cmn_err(CE_CONT, fmt, auxv_hwcap, FMT_AV_386);
994 fmt = "?user ABI extensions (word 2): %b\n";
995 cmn_err(CE_CONT, fmt, auxv_hwcap_2, FMT_AV_386_2);
996 }
997
998 #if defined(_SYSCALL32_IMPL)
999 auxv_hwcap32 = (auxv_hwcap32_include | cpu_hwcap_flags[0]) &
1000 ~auxv_hwcap32_exclude;
1001 auxv_hwcap32_2 = (auxv_hwcap32_include_2 | cpu_hwcap_flags[1]) &
1002 ~auxv_hwcap32_exclude_2;
1003
1004 #if defined(__amd64)
1005 /*
1006 * If this is an amd64 architecture machine from Intel, then
1007 * syscall -doesn't- work in compatibility mode, only sysenter does.
1008 *
1009 * Sigh.
1010 */
1011 if (!cpuid_syscall32_insn(NULL))
1012 auxv_hwcap32 &= ~AV_386_AMD_SYSC;
1013
1014 /*
1015 * 32-bit processes can -always- use the lahf/sahf instructions
1016 */
1017 auxv_hwcap32 |= AV_386_AHF;
1018 #endif
1019
1020 if (auxv_hwcap32_include || auxv_hwcap32_exclude ||
1021 auxv_hwcap32_include_2 || auxv_hwcap32_exclude_2) {
1022 /*
1023 * See the block comment in the cmn_err() of auxv_hwcap, above.
1024 */
1025 char *fmt = "?32-bit user ABI extensions: %b\n";
1026 cmn_err(CE_CONT, fmt, auxv_hwcap32, FMT_AV_386);
1027 fmt = "?32-bit user ABI extensions (word 2): %b\n";
1028 cmn_err(CE_CONT, fmt, auxv_hwcap32_2, FMT_AV_386_2);
1029 }
1030 #endif
1031 }
1032
1033 /*
1034 * sync_icache() - this is called
1035 * in proc/fs/prusrio.c. x86 has an unified cache and therefore
1036 * this is a nop.
1037 */
1038 /* ARGSUSED */
1039 void
sync_icache(caddr_t addr,uint_t len)1040 sync_icache(caddr_t addr, uint_t len)
1041 {
1042 /* Do nothing for now */
1043 }
1044
1045 /*ARGSUSED*/
1046 void
sync_data_memory(caddr_t va,size_t len)1047 sync_data_memory(caddr_t va, size_t len)
1048 {
1049 /* Not implemented for this platform */
1050 }
1051
1052 int
__ipltospl(int ipl)1053 __ipltospl(int ipl)
1054 {
1055 return (ipltospl(ipl));
1056 }
1057
1058 /*
1059 * The panic code invokes panic_saveregs() to record the contents of a
1060 * regs structure into the specified panic_data structure for debuggers.
1061 */
1062 void
panic_saveregs(panic_data_t * pdp,struct regs * rp)1063 panic_saveregs(panic_data_t *pdp, struct regs *rp)
1064 {
1065 panic_nv_t *pnv = PANICNVGET(pdp);
1066
1067 struct cregs creg;
1068
1069 getcregs(&creg);
1070
1071 #if defined(__amd64)
1072 PANICNVADD(pnv, "rdi", rp->r_rdi);
1073 PANICNVADD(pnv, "rsi", rp->r_rsi);
1074 PANICNVADD(pnv, "rdx", rp->r_rdx);
1075 PANICNVADD(pnv, "rcx", rp->r_rcx);
1076 PANICNVADD(pnv, "r8", rp->r_r8);
1077 PANICNVADD(pnv, "r9", rp->r_r9);
1078 PANICNVADD(pnv, "rax", rp->r_rax);
1079 PANICNVADD(pnv, "rbx", rp->r_rbx);
1080 PANICNVADD(pnv, "rbp", rp->r_rbp);
1081 PANICNVADD(pnv, "r10", rp->r_r10);
1082 PANICNVADD(pnv, "r11", rp->r_r11);
1083 PANICNVADD(pnv, "r12", rp->r_r12);
1084 PANICNVADD(pnv, "r13", rp->r_r13);
1085 PANICNVADD(pnv, "r14", rp->r_r14);
1086 PANICNVADD(pnv, "r15", rp->r_r15);
1087 PANICNVADD(pnv, "fsbase", rdmsr(MSR_AMD_FSBASE));
1088 PANICNVADD(pnv, "gsbase", rdmsr(MSR_AMD_GSBASE));
1089 PANICNVADD(pnv, "ds", rp->r_ds);
1090 PANICNVADD(pnv, "es", rp->r_es);
1091 PANICNVADD(pnv, "fs", rp->r_fs);
1092 PANICNVADD(pnv, "gs", rp->r_gs);
1093 PANICNVADD(pnv, "trapno", rp->r_trapno);
1094 PANICNVADD(pnv, "err", rp->r_err);
1095 PANICNVADD(pnv, "rip", rp->r_rip);
1096 PANICNVADD(pnv, "cs", rp->r_cs);
1097 PANICNVADD(pnv, "rflags", rp->r_rfl);
1098 PANICNVADD(pnv, "rsp", rp->r_rsp);
1099 PANICNVADD(pnv, "ss", rp->r_ss);
1100 PANICNVADD(pnv, "gdt_hi", (uint64_t)(creg.cr_gdt._l[3]));
1101 PANICNVADD(pnv, "gdt_lo", (uint64_t)(creg.cr_gdt._l[0]));
1102 PANICNVADD(pnv, "idt_hi", (uint64_t)(creg.cr_idt._l[3]));
1103 PANICNVADD(pnv, "idt_lo", (uint64_t)(creg.cr_idt._l[0]));
1104 #elif defined(__i386)
1105 PANICNVADD(pnv, "gs", (uint32_t)rp->r_gs);
1106 PANICNVADD(pnv, "fs", (uint32_t)rp->r_fs);
1107 PANICNVADD(pnv, "es", (uint32_t)rp->r_es);
1108 PANICNVADD(pnv, "ds", (uint32_t)rp->r_ds);
1109 PANICNVADD(pnv, "edi", (uint32_t)rp->r_edi);
1110 PANICNVADD(pnv, "esi", (uint32_t)rp->r_esi);
1111 PANICNVADD(pnv, "ebp", (uint32_t)rp->r_ebp);
1112 PANICNVADD(pnv, "esp", (uint32_t)rp->r_esp);
1113 PANICNVADD(pnv, "ebx", (uint32_t)rp->r_ebx);
1114 PANICNVADD(pnv, "edx", (uint32_t)rp->r_edx);
1115 PANICNVADD(pnv, "ecx", (uint32_t)rp->r_ecx);
1116 PANICNVADD(pnv, "eax", (uint32_t)rp->r_eax);
1117 PANICNVADD(pnv, "trapno", (uint32_t)rp->r_trapno);
1118 PANICNVADD(pnv, "err", (uint32_t)rp->r_err);
1119 PANICNVADD(pnv, "eip", (uint32_t)rp->r_eip);
1120 PANICNVADD(pnv, "cs", (uint32_t)rp->r_cs);
1121 PANICNVADD(pnv, "eflags", (uint32_t)rp->r_efl);
1122 PANICNVADD(pnv, "uesp", (uint32_t)rp->r_uesp);
1123 PANICNVADD(pnv, "ss", (uint32_t)rp->r_ss);
1124 PANICNVADD(pnv, "gdt", creg.cr_gdt);
1125 PANICNVADD(pnv, "idt", creg.cr_idt);
1126 #endif /* __i386 */
1127
1128 PANICNVADD(pnv, "ldt", creg.cr_ldt);
1129 PANICNVADD(pnv, "task", creg.cr_task);
1130 PANICNVADD(pnv, "cr0", creg.cr_cr0);
1131 PANICNVADD(pnv, "cr2", creg.cr_cr2);
1132 PANICNVADD(pnv, "cr3", creg.cr_cr3);
1133 if (creg.cr_cr4)
1134 PANICNVADD(pnv, "cr4", creg.cr_cr4);
1135
1136 PANICNVSET(pdp, pnv);
1137 }
1138
1139 #define TR_ARG_MAX 6 /* Max args to print, same as SPARC */
1140
1141 #if !defined(__amd64)
1142
1143 /*
1144 * Given a return address (%eip), determine the likely number of arguments
1145 * that were pushed on the stack prior to its execution. We do this by
1146 * expecting that a typical call sequence consists of pushing arguments on
1147 * the stack, executing a call instruction, and then performing an add
1148 * on %esp to restore it to the value prior to pushing the arguments for
1149 * the call. We attempt to detect such an add, and divide the addend
1150 * by the size of a word to determine the number of pushed arguments.
1151 *
1152 * If we do not find such an add, we punt and return TR_ARG_MAX. It is not
1153 * possible to reliably determine if a function took no arguments (i.e. was
1154 * void) because assembler routines do not reliably perform an add on %esp
1155 * immediately upon returning (eg. _sys_call()), so returning TR_ARG_MAX is
1156 * safer than returning 0.
1157 */
1158 static ulong_t
argcount(uintptr_t eip)1159 argcount(uintptr_t eip)
1160 {
1161 const uint8_t *ins = (const uint8_t *)eip;
1162 ulong_t n;
1163
1164 enum {
1165 M_MODRM_ESP = 0xc4, /* Mod/RM byte indicates %esp */
1166 M_ADD_IMM32 = 0x81, /* ADD imm32 to r/m32 */
1167 M_ADD_IMM8 = 0x83 /* ADD imm8 to r/m32 */
1168 };
1169
1170 if (eip < KERNELBASE || ins[1] != M_MODRM_ESP)
1171 return (TR_ARG_MAX);
1172
1173 switch (ins[0]) {
1174 case M_ADD_IMM32:
1175 n = ins[2] + (ins[3] << 8) + (ins[4] << 16) + (ins[5] << 24);
1176 break;
1177
1178 case M_ADD_IMM8:
1179 n = ins[2];
1180 break;
1181
1182 default:
1183 return (TR_ARG_MAX);
1184 }
1185
1186 n /= sizeof (long);
1187 return (MIN(n, TR_ARG_MAX));
1188 }
1189
1190 #endif /* !__amd64 */
1191
1192 /*
1193 * Print a stack backtrace using the specified frame pointer. We delay two
1194 * seconds before continuing, unless this is the panic traceback.
1195 * If we are in the process of panicking, we also attempt to write the
1196 * stack backtrace to a staticly assigned buffer, to allow the panic
1197 * code to find it and write it in to uncompressed pages within the
1198 * system crash dump.
1199 * Note that the frame for the starting stack pointer value is omitted because
1200 * the corresponding %eip is not known.
1201 */
1202
1203 extern char *dump_stack_scratch;
1204
1205 #if defined(__amd64)
1206
1207 void
traceback(caddr_t fpreg)1208 traceback(caddr_t fpreg)
1209 {
1210 struct frame *fp = (struct frame *)fpreg;
1211 struct frame *nextfp;
1212 uintptr_t pc, nextpc;
1213 ulong_t off;
1214 char args[TR_ARG_MAX * 2 + 16], *sym;
1215 uint_t offset = 0;
1216 uint_t next_offset = 0;
1217 char stack_buffer[1024];
1218
1219 if (!panicstr)
1220 printf("traceback: %%fp = %p\n", (void *)fp);
1221
1222 if (panicstr && !dump_stack_scratch) {
1223 printf("Warning - stack not written to the dump buffer\n");
1224 }
1225
1226 fp = (struct frame *)plat_traceback(fpreg);
1227 if ((uintptr_t)fp < KERNELBASE)
1228 goto out;
1229
1230 pc = fp->fr_savpc;
1231 fp = (struct frame *)fp->fr_savfp;
1232
1233 while ((uintptr_t)fp >= KERNELBASE) {
1234 /*
1235 * XX64 Until port is complete tolerate 8-byte aligned
1236 * frame pointers but flag with a warning so they can
1237 * be fixed.
1238 */
1239 if (((uintptr_t)fp & (STACK_ALIGN - 1)) != 0) {
1240 if (((uintptr_t)fp & (8 - 1)) == 0) {
1241 printf(" >> warning! 8-byte"
1242 " aligned %%fp = %p\n", (void *)fp);
1243 } else {
1244 printf(
1245 " >> mis-aligned %%fp = %p\n", (void *)fp);
1246 break;
1247 }
1248 }
1249
1250 args[0] = '\0';
1251 nextpc = (uintptr_t)fp->fr_savpc;
1252 nextfp = (struct frame *)fp->fr_savfp;
1253 if ((sym = kobj_getsymname(pc, &off)) != NULL) {
1254 printf("%016lx %s:%s+%lx (%s)\n", (uintptr_t)fp,
1255 mod_containing_pc((caddr_t)pc), sym, off, args);
1256 (void) snprintf(stack_buffer, sizeof (stack_buffer),
1257 "%s:%s+%lx (%s) | ",
1258 mod_containing_pc((caddr_t)pc), sym, off, args);
1259 } else {
1260 printf("%016lx %lx (%s)\n",
1261 (uintptr_t)fp, pc, args);
1262 (void) snprintf(stack_buffer, sizeof (stack_buffer),
1263 "%lx (%s) | ", pc, args);
1264 }
1265
1266 if (panicstr && dump_stack_scratch) {
1267 next_offset = offset + strlen(stack_buffer);
1268 if (next_offset < STACK_BUF_SIZE) {
1269 bcopy(stack_buffer, dump_stack_scratch + offset,
1270 strlen(stack_buffer));
1271 offset = next_offset;
1272 } else {
1273 /*
1274 * In attempting to save the panic stack
1275 * to the dumpbuf we have overflowed that area.
1276 * Print a warning and continue to printf the
1277 * stack to the msgbuf
1278 */
1279 printf("Warning: stack in the dump buffer"
1280 " may be incomplete\n");
1281 offset = next_offset;
1282 }
1283 }
1284
1285 pc = nextpc;
1286 fp = nextfp;
1287 }
1288 out:
1289 if (!panicstr) {
1290 printf("end of traceback\n");
1291 DELAY(2 * MICROSEC);
1292 } else if (dump_stack_scratch) {
1293 dump_stack_scratch[offset] = '\0';
1294 }
1295 }
1296
1297 #elif defined(__i386)
1298
1299 void
traceback(caddr_t fpreg)1300 traceback(caddr_t fpreg)
1301 {
1302 struct frame *fp = (struct frame *)fpreg;
1303 struct frame *nextfp, *minfp, *stacktop;
1304 uintptr_t pc, nextpc;
1305 uint_t offset = 0;
1306 uint_t next_offset = 0;
1307 char stack_buffer[1024];
1308
1309 cpu_t *cpu;
1310
1311 /*
1312 * args[] holds TR_ARG_MAX hex long args, plus ", " or '\0'.
1313 */
1314 char args[TR_ARG_MAX * 2 + 8], *p;
1315
1316 int on_intr;
1317 ulong_t off;
1318 char *sym;
1319
1320 if (!panicstr)
1321 printf("traceback: %%fp = %p\n", (void *)fp);
1322
1323 if (panicstr && !dump_stack_scratch) {
1324 printf("Warning - stack not written to the dumpbuf\n");
1325 }
1326
1327 /*
1328 * If we are panicking, all high-level interrupt information in
1329 * CPU was overwritten. panic_cpu has the correct values.
1330 */
1331 kpreempt_disable(); /* prevent migration */
1332
1333 cpu = (panicstr && CPU->cpu_id == panic_cpu.cpu_id)? &panic_cpu : CPU;
1334
1335 if ((on_intr = CPU_ON_INTR(cpu)) != 0)
1336 stacktop = (struct frame *)(cpu->cpu_intr_stack + SA(MINFRAME));
1337 else
1338 stacktop = (struct frame *)curthread->t_stk;
1339
1340 kpreempt_enable();
1341
1342 fp = (struct frame *)plat_traceback(fpreg);
1343 if ((uintptr_t)fp < KERNELBASE)
1344 goto out;
1345
1346 minfp = fp; /* Baseline minimum frame pointer */
1347 pc = fp->fr_savpc;
1348 fp = (struct frame *)fp->fr_savfp;
1349
1350 while ((uintptr_t)fp >= KERNELBASE) {
1351 ulong_t argc;
1352 long *argv;
1353
1354 if (fp <= minfp || fp >= stacktop) {
1355 if (on_intr) {
1356 /*
1357 * Hop from interrupt stack to thread stack.
1358 */
1359 stacktop = (struct frame *)curthread->t_stk;
1360 minfp = (struct frame *)curthread->t_stkbase;
1361 on_intr = 0;
1362 continue;
1363 }
1364 break; /* we're outside of the expected range */
1365 }
1366
1367 if ((uintptr_t)fp & (STACK_ALIGN - 1)) {
1368 printf(" >> mis-aligned %%fp = %p\n", (void *)fp);
1369 break;
1370 }
1371
1372 nextpc = fp->fr_savpc;
1373 nextfp = (struct frame *)fp->fr_savfp;
1374 argc = argcount(nextpc);
1375 argv = (long *)((char *)fp + sizeof (struct frame));
1376
1377 args[0] = '\0';
1378 p = args;
1379 while (argc-- > 0 && argv < (long *)stacktop) {
1380 p += snprintf(p, args + sizeof (args) - p,
1381 "%s%lx", (p == args) ? "" : ", ", *argv++);
1382 }
1383
1384 if ((sym = kobj_getsymname(pc, &off)) != NULL) {
1385 printf("%08lx %s:%s+%lx (%s)\n", (uintptr_t)fp,
1386 mod_containing_pc((caddr_t)pc), sym, off, args);
1387 (void) snprintf(stack_buffer, sizeof (stack_buffer),
1388 "%s:%s+%lx (%s) | ",
1389 mod_containing_pc((caddr_t)pc), sym, off, args);
1390
1391 } else {
1392 printf("%08lx %lx (%s)\n",
1393 (uintptr_t)fp, pc, args);
1394 (void) snprintf(stack_buffer, sizeof (stack_buffer),
1395 "%lx (%s) | ", pc, args);
1396
1397 }
1398
1399 if (panicstr && dump_stack_scratch) {
1400 next_offset = offset + strlen(stack_buffer);
1401 if (next_offset < STACK_BUF_SIZE) {
1402 bcopy(stack_buffer, dump_stack_scratch + offset,
1403 strlen(stack_buffer));
1404 offset = next_offset;
1405 } else {
1406 /*
1407 * In attempting to save the panic stack
1408 * to the dumpbuf we have overflowed that area.
1409 * Print a warning and continue to printf the
1410 * stack to the msgbuf
1411 */
1412 printf("Warning: stack in the dumpbuf"
1413 " may be incomplete\n");
1414 offset = next_offset;
1415 }
1416 }
1417
1418 minfp = fp;
1419 pc = nextpc;
1420 fp = nextfp;
1421 }
1422 out:
1423 if (!panicstr) {
1424 printf("end of traceback\n");
1425 DELAY(2 * MICROSEC);
1426 } else if (dump_stack_scratch) {
1427 dump_stack_scratch[offset] = '\0';
1428 }
1429
1430 }
1431
1432 #endif /* __i386 */
1433
1434 /*
1435 * Generate a stack backtrace from a saved register set.
1436 */
1437 void
traceregs(struct regs * rp)1438 traceregs(struct regs *rp)
1439 {
1440 traceback((caddr_t)rp->r_fp);
1441 }
1442
1443 void
exec_set_sp(size_t stksize)1444 exec_set_sp(size_t stksize)
1445 {
1446 klwp_t *lwp = ttolwp(curthread);
1447
1448 lwptoregs(lwp)->r_sp = (uintptr_t)curproc->p_usrstack - stksize;
1449 }
1450
1451 hrtime_t
gethrtime_waitfree(void)1452 gethrtime_waitfree(void)
1453 {
1454 return (dtrace_gethrtime());
1455 }
1456
1457 hrtime_t
gethrtime(void)1458 gethrtime(void)
1459 {
1460 return (gethrtimef());
1461 }
1462
1463 hrtime_t
gethrtime_unscaled(void)1464 gethrtime_unscaled(void)
1465 {
1466 return (gethrtimeunscaledf());
1467 }
1468
1469 void
scalehrtime(hrtime_t * hrt)1470 scalehrtime(hrtime_t *hrt)
1471 {
1472 scalehrtimef(hrt);
1473 }
1474
1475 uint64_t
unscalehrtime(hrtime_t nsecs)1476 unscalehrtime(hrtime_t nsecs)
1477 {
1478 return (unscalehrtimef(nsecs));
1479 }
1480
1481 void
gethrestime(timespec_t * tp)1482 gethrestime(timespec_t *tp)
1483 {
1484 gethrestimef(tp);
1485 }
1486
1487 #if defined(__amd64)
1488 /*
1489 * Part of the implementation of hres_tick(); this routine is
1490 * easier in C than assembler .. called with the hres_lock held.
1491 *
1492 * XX64 Many of these timekeeping variables need to be extern'ed in a header
1493 */
1494
1495 #include <sys/time.h>
1496 #include <sys/machlock.h>
1497
1498 extern int one_sec;
1499 extern int max_hres_adj;
1500
1501 void
__adj_hrestime(void)1502 __adj_hrestime(void)
1503 {
1504 long long adj;
1505
1506 if (hrestime_adj == 0)
1507 adj = 0;
1508 else if (hrestime_adj > 0) {
1509 if (hrestime_adj < max_hres_adj)
1510 adj = hrestime_adj;
1511 else
1512 adj = max_hres_adj;
1513 } else {
1514 if (hrestime_adj < -max_hres_adj)
1515 adj = -max_hres_adj;
1516 else
1517 adj = hrestime_adj;
1518 }
1519
1520 timedelta -= adj;
1521 hrestime_adj = timedelta;
1522 hrestime.tv_nsec += adj;
1523
1524 while (hrestime.tv_nsec >= NANOSEC) {
1525 one_sec++;
1526 hrestime.tv_sec++;
1527 hrestime.tv_nsec -= NANOSEC;
1528 }
1529 }
1530 #endif
1531
1532 /*
1533 * Wrapper functions to maintain backwards compability
1534 */
1535 int
xcopyin(const void * uaddr,void * kaddr,size_t count)1536 xcopyin(const void *uaddr, void *kaddr, size_t count)
1537 {
1538 return (xcopyin_nta(uaddr, kaddr, count, UIO_COPY_CACHED));
1539 }
1540
1541 int
xcopyout(const void * kaddr,void * uaddr,size_t count)1542 xcopyout(const void *kaddr, void *uaddr, size_t count)
1543 {
1544 return (xcopyout_nta(kaddr, uaddr, count, UIO_COPY_CACHED));
1545 }
1546