1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 #include <sys/param.h>
27 #include <sys/vmparam.h>
28 #include <sys/types.h>
29 #include <sys/sysmacros.h>
30 #include <sys/systm.h>
31 #include <sys/signal.h>
32 #include <sys/stack.h>
33 #include <sys/cred.h>
34 #include <sys/cmn_err.h>
35 #include <sys/user.h>
36 #include <sys/privregs.h>
37 #include <sys/psw.h>
38 #include <sys/debug.h>
39 #include <sys/errno.h>
40 #include <sys/proc.h>
41 #include <sys/modctl.h>
42 #include <sys/var.h>
43 #include <sys/inline.h>
44 #include <sys/syscall.h>
45 #include <sys/ucontext.h>
46 #include <sys/cpuvar.h>
47 #include <sys/siginfo.h>
48 #include <sys/trap.h>
49 #include <sys/vtrace.h>
50 #include <sys/sysinfo.h>
51 #include <sys/procfs.h>
52 #include <sys/prsystm.h>
53 #include <c2/audit.h>
54 #include <sys/modctl.h>
55 #include <sys/aio_impl.h>
56 #include <sys/tnf.h>
57 #include <sys/tnf_probe.h>
58 #include <sys/copyops.h>
59 #include <sys/priv.h>
60 #include <sys/msacct.h>
61
62 int syscalltrace = 0;
63 #ifdef SYSCALLTRACE
64 static kmutex_t systrace_lock; /* syscall tracing lock */
65 #else
66 #define syscalltrace 0
67 #endif /* SYSCALLTRACE */
68
69 typedef int64_t (*llfcn_t)(); /* function returning long long */
70
71 int pre_syscall(void);
72 void post_syscall(long rval1, long rval2);
73 static krwlock_t *lock_syscall(struct sysent *, uint_t);
74 void deferred_singlestep_trap(caddr_t);
75
76 #ifdef _SYSCALL32_IMPL
77 #define LWP_GETSYSENT(lwp) \
78 (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ? sysent : sysent32)
79 #else
80 #define LWP_GETSYSENT(lwp) (sysent)
81 #endif
82
83 /*
84 * If watchpoints are active, don't make copying in of
85 * system call arguments take a read watchpoint trap.
86 */
87 static int
copyin_args(struct regs * rp,long * ap,uint_t nargs)88 copyin_args(struct regs *rp, long *ap, uint_t nargs)
89 {
90 greg_t *sp = 1 + (greg_t *)rp->r_sp; /* skip ret addr */
91
92 ASSERT(nargs <= MAXSYSARGS);
93
94 return (copyin_nowatch(sp, ap, nargs * sizeof (*sp)));
95 }
96
97 #if defined(_SYSCALL32_IMPL)
98 static int
copyin_args32(struct regs * rp,long * ap,uint_t nargs)99 copyin_args32(struct regs *rp, long *ap, uint_t nargs)
100 {
101 greg32_t *sp = 1 + (greg32_t *)rp->r_sp; /* skip ret addr */
102 uint32_t a32[MAXSYSARGS];
103 int rc;
104
105 ASSERT(nargs <= MAXSYSARGS);
106
107 if ((rc = copyin_nowatch(sp, a32, nargs * sizeof (*sp))) == 0) {
108 uint32_t *a32p = &a32[0];
109
110 while (nargs--)
111 *ap++ = (ulong_t)*a32p++;
112 }
113 return (rc);
114 }
115 #define COPYIN_ARGS32 copyin_args32
116 #else
117 #define COPYIN_ARGS32 copyin_args
118 #endif
119
120 /*
121 * Error handler for system calls where arg copy gets fault.
122 */
123 static longlong_t
syscall_err()124 syscall_err()
125 {
126 return (0);
127 }
128
129 /*
130 * Corresponding sysent entry to allow syscall_entry caller
131 * to invoke syscall_err.
132 */
133 static struct sysent sysent_err = {
134 0, SE_32RVAL1, NULL, NULL, (llfcn_t)syscall_err
135 };
136
137 /*
138 * Called from syscall() when a non-trivial 32-bit system call occurs.
139 * Sets up the args and returns a pointer to the handler.
140 */
141 struct sysent *
syscall_entry(kthread_t * t,long * argp)142 syscall_entry(kthread_t *t, long *argp)
143 {
144 klwp_t *lwp = ttolwp(t);
145 struct regs *rp = lwptoregs(lwp);
146 unsigned int code;
147 struct sysent *callp;
148 struct sysent *se = LWP_GETSYSENT(lwp);
149 int error = 0;
150 uint_t nargs;
151
152 ASSERT(t == curthread && curthread->t_schedflag & TS_DONT_SWAP);
153
154 lwp->lwp_ru.sysc++;
155 lwp->lwp_eosys = NORMALRETURN; /* assume this will be normal */
156
157 /*
158 * Set lwp_ap to point to the args, even if none are needed for this
159 * system call. This is for the loadable-syscall case where the
160 * number of args won't be known until the system call is loaded, and
161 * also maintains a non-NULL lwp_ap setup for get_syscall_args(). Note
162 * that lwp_ap MUST be set to a non-NULL value _BEFORE_ t_sysnum is
163 * set to non-zero; otherwise get_syscall_args(), seeing a non-zero
164 * t_sysnum for this thread, will charge ahead and dereference lwp_ap.
165 */
166 lwp->lwp_ap = argp; /* for get_syscall_args */
167
168 code = rp->r_r0;
169 t->t_sysnum = (short)code;
170 callp = code >= NSYSCALL ? &nosys_ent : se + code;
171
172 if ((t->t_pre_sys | syscalltrace) != 0) {
173 error = pre_syscall();
174
175 /*
176 * pre_syscall() has taken care so that lwp_ap is current;
177 * it either points to syscall-entry-saved amd64 regs,
178 * or it points to lwp_arg[], which has been re-copied from
179 * the ia32 ustack, but either way, it's a current copy after
180 * /proc has possibly mucked with the syscall args.
181 */
182
183 if (error)
184 return (&sysent_err); /* use dummy handler */
185 }
186
187 /*
188 * Fetch the system call arguments to the kernel stack copy used
189 * for syscall handling.
190 * Note: for loadable system calls the number of arguments required
191 * may not be known at this point, and will be zero if the system call
192 * was never loaded. Once the system call has been loaded, the number
193 * of args is not allowed to be changed.
194 */
195 if ((nargs = (uint_t)callp->sy_narg) != 0 &&
196 COPYIN_ARGS32(rp, argp, nargs)) {
197 (void) set_errno(EFAULT);
198 return (&sysent_err); /* use dummy handler */
199 }
200
201 return (callp); /* return sysent entry for caller */
202 }
203
204 void
syscall_exit(kthread_t * t,long rval1,long rval2)205 syscall_exit(kthread_t *t, long rval1, long rval2)
206 {
207 /*
208 * Handle signals and other post-call events if necessary.
209 */
210 if ((t->t_post_sys_ast | syscalltrace) == 0) {
211 klwp_t *lwp = ttolwp(t);
212 struct regs *rp = lwptoregs(lwp);
213
214 /*
215 * Normal return.
216 * Clear error indication and set return values.
217 */
218 rp->r_ps &= ~PS_C; /* reset carry bit */
219 rp->r_r0 = rval1;
220 rp->r_r1 = rval2;
221 lwp->lwp_state = LWP_USER;
222 } else
223 post_syscall(rval1, rval2);
224 t->t_sysnum = 0; /* invalidate args */
225 }
226
227 /*
228 * Perform pre-system-call processing, including stopping for tracing,
229 * auditing, etc.
230 *
231 * This routine is called only if the t_pre_sys flag is set. Any condition
232 * requiring pre-syscall handling must set the t_pre_sys flag. If the
233 * condition is persistent, this routine will repost t_pre_sys.
234 */
235 int
pre_syscall()236 pre_syscall()
237 {
238 kthread_t *t = curthread;
239 unsigned code = t->t_sysnum;
240 klwp_t *lwp = ttolwp(t);
241 proc_t *p = ttoproc(t);
242 int repost;
243
244 t->t_pre_sys = repost = 0; /* clear pre-syscall processing flag */
245
246 ASSERT(t->t_schedflag & TS_DONT_SWAP);
247
248 #if defined(DEBUG)
249 /*
250 * On the i386 kernel, lwp_ap points at the piece of the thread
251 * stack that we copy the users arguments into.
252 *
253 * On the amd64 kernel, the syscall arguments in the rdi..r9
254 * registers should be pointed at by lwp_ap. If the args need to
255 * be copied so that those registers can be changed without losing
256 * the ability to get the args for /proc, they can be saved by
257 * save_syscall_args(), and lwp_ap will be restored by post_syscall().
258 */
259 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
260 #if defined(_LP64)
261 ASSERT(lwp->lwp_ap == (long *)&lwptoregs(lwp)->r_rdi);
262 } else {
263 #endif
264 ASSERT((caddr_t)lwp->lwp_ap > t->t_stkbase &&
265 (caddr_t)lwp->lwp_ap < t->t_stk);
266 }
267 #endif /* DEBUG */
268
269 /*
270 * Make sure the thread is holding the latest credentials for the
271 * process. The credentials in the process right now apply to this
272 * thread for the entire system call.
273 */
274 if (t->t_cred != p->p_cred) {
275 cred_t *oldcred = t->t_cred;
276 /*
277 * DTrace accesses t_cred in probe context. t_cred must
278 * always be either NULL, or point to a valid, allocated cred
279 * structure.
280 */
281 t->t_cred = crgetcred();
282 crfree(oldcred);
283 }
284
285 /*
286 * From the proc(4) manual page:
287 * When entry to a system call is being traced, the traced process
288 * stops after having begun the call to the system but before the
289 * system call arguments have been fetched from the process.
290 */
291 if (PTOU(p)->u_systrap) {
292 if (prismember(&PTOU(p)->u_entrymask, code)) {
293 mutex_enter(&p->p_lock);
294 /*
295 * Recheck stop condition, now that lock is held.
296 */
297 if (PTOU(p)->u_systrap &&
298 prismember(&PTOU(p)->u_entrymask, code)) {
299 stop(PR_SYSENTRY, code);
300
301 /*
302 * /proc may have modified syscall args,
303 * either in regs for amd64 or on ustack
304 * for ia32. Either way, arrange to
305 * copy them again, both for the syscall
306 * handler and for other consumers in
307 * post_syscall (like audit). Here, we
308 * only do amd64, and just set lwp_ap
309 * back to the kernel-entry stack copy;
310 * the syscall ml code redoes
311 * move-from-regs to set up for the
312 * syscall handler after we return. For
313 * ia32, save_syscall_args() below makes
314 * an lwp_ap-accessible copy.
315 */
316 #if defined(_LP64)
317 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
318 lwp->lwp_argsaved = 0;
319 lwp->lwp_ap =
320 (long *)&lwptoregs(lwp)->r_rdi;
321 }
322 #endif
323 }
324 mutex_exit(&p->p_lock);
325 }
326 repost = 1;
327 }
328
329 /*
330 * ia32 kernel, or ia32 proc on amd64 kernel: keep args in
331 * lwp_arg for post-syscall processing, regardless of whether
332 * they might have been changed in /proc above.
333 */
334 #if defined(_LP64)
335 if (lwp_getdatamodel(lwp) != DATAMODEL_NATIVE)
336 #endif
337 (void) save_syscall_args();
338
339 if (lwp->lwp_sysabort) {
340 /*
341 * lwp_sysabort may have been set via /proc while the process
342 * was stopped on PR_SYSENTRY. If so, abort the system call.
343 * Override any error from the copyin() of the arguments.
344 */
345 lwp->lwp_sysabort = 0;
346 (void) set_errno(EINTR); /* forces post_sys */
347 t->t_pre_sys = 1; /* repost anyway */
348 return (1); /* don't do system call, return EINTR */
349 }
350
351 /*
352 * begin auditing for this syscall if the c2audit module is loaded
353 * and auditing is enabled
354 */
355 if (audit_active == C2AUDIT_LOADED) {
356 uint32_t auditing = au_zone_getstate(NULL);
357
358 if (auditing & AU_AUDIT_MASK) {
359 int error;
360 if (error = audit_start(T_SYSCALL, code, auditing, \
361 0, lwp)) {
362 t->t_pre_sys = 1; /* repost anyway */
363 (void) set_errno(error);
364 return (1);
365 }
366 repost = 1;
367 }
368 }
369
370 #ifndef NPROBE
371 /* Kernel probe */
372 if (tnf_tracing_active) {
373 TNF_PROBE_1(syscall_start, "syscall thread", /* CSTYLED */,
374 tnf_sysnum, sysnum, t->t_sysnum);
375 t->t_post_sys = 1; /* make sure post_syscall runs */
376 repost = 1;
377 }
378 #endif /* NPROBE */
379
380 #ifdef SYSCALLTRACE
381 if (syscalltrace) {
382 int i;
383 long *ap;
384 char *cp;
385 char *sysname;
386 struct sysent *callp;
387
388 if (code >= NSYSCALL)
389 callp = &nosys_ent; /* nosys has no args */
390 else
391 callp = LWP_GETSYSENT(lwp) + code;
392 (void) save_syscall_args();
393 mutex_enter(&systrace_lock);
394 printf("%d: ", p->p_pid);
395 if (code >= NSYSCALL)
396 printf("0x%x", code);
397 else {
398 sysname = mod_getsysname(code);
399 printf("%s[0x%x/0x%p]", sysname == NULL ? "NULL" :
400 sysname, code, callp->sy_callc);
401 }
402 cp = "(";
403 for (i = 0, ap = lwp->lwp_ap; i < callp->sy_narg; i++, ap++) {
404 printf("%s%lx", cp, *ap);
405 cp = ", ";
406 }
407 if (i)
408 printf(")");
409 printf(" %s id=0x%p\n", PTOU(p)->u_comm, curthread);
410 mutex_exit(&systrace_lock);
411 }
412 #endif /* SYSCALLTRACE */
413
414 /*
415 * If there was a continuing reason for pre-syscall processing,
416 * set the t_pre_sys flag for the next system call.
417 */
418 if (repost)
419 t->t_pre_sys = 1;
420 lwp->lwp_error = 0; /* for old drivers */
421 lwp->lwp_badpriv = PRIV_NONE;
422 return (0);
423 }
424
425
426 /*
427 * Post-syscall processing. Perform abnormal system call completion
428 * actions such as /proc tracing, profiling, signals, preemption, etc.
429 *
430 * This routine is called only if t_post_sys, t_sig_check, or t_astflag is set.
431 * Any condition requiring pre-syscall handling must set one of these.
432 * If the condition is persistent, this routine will repost t_post_sys.
433 */
434 void
post_syscall(long rval1,long rval2)435 post_syscall(long rval1, long rval2)
436 {
437 kthread_t *t = curthread;
438 klwp_t *lwp = ttolwp(t);
439 proc_t *p = ttoproc(t);
440 struct regs *rp = lwptoregs(lwp);
441 uint_t error;
442 uint_t code = t->t_sysnum;
443 int repost = 0;
444 int proc_stop = 0; /* non-zero if stopping */
445 int sigprof = 0; /* non-zero if sending SIGPROF */
446
447 t->t_post_sys = 0;
448
449 error = lwp->lwp_errno;
450
451 /*
452 * Code can be zero if this is a new LWP returning after a forkall(),
453 * other than the one which matches the one in the parent which called
454 * forkall(). In these LWPs, skip most of post-syscall activity.
455 */
456 if (code == 0)
457 goto sig_check;
458 /*
459 * If the trace flag is set, mark the lwp to take a single-step trap
460 * on return to user level (below). The x86 lcall interface and
461 * sysenter has already done this, and turned off the flag, but
462 * amd64 syscall interface has not.
463 */
464 if (rp->r_ps & PS_T) {
465 lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
466 rp->r_ps &= ~PS_T;
467 aston(curthread);
468 }
469
470 /* put out audit record for this syscall */
471 if (AU_AUDITING()) {
472 rval_t rval;
473
474 /* XX64 -- truncation of 64-bit return values? */
475 rval.r_val1 = (int)rval1;
476 rval.r_val2 = (int)rval2;
477 audit_finish(T_SYSCALL, code, error, &rval);
478 repost = 1;
479 }
480
481 if (curthread->t_pdmsg != NULL) {
482 char *m = curthread->t_pdmsg;
483
484 uprintf("%s", m);
485 kmem_free(m, strlen(m) + 1);
486 curthread->t_pdmsg = NULL;
487 }
488
489 /*
490 * If we're going to stop for /proc tracing, set the flag and
491 * save the arguments so that the return values don't smash them.
492 */
493 if (PTOU(p)->u_systrap) {
494 if (prismember(&PTOU(p)->u_exitmask, code)) {
495 if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
496 (void) save_syscall_args();
497 proc_stop = 1;
498 }
499 repost = 1;
500 }
501
502 /*
503 * Similarly check to see if SIGPROF might be sent.
504 */
505 if (curthread->t_rprof != NULL &&
506 curthread->t_rprof->rp_anystate != 0) {
507 if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
508 (void) save_syscall_args();
509 sigprof = 1;
510 }
511
512 if (lwp->lwp_eosys == NORMALRETURN) {
513 if (error == 0) {
514 #ifdef SYSCALLTRACE
515 if (syscalltrace) {
516 mutex_enter(&systrace_lock);
517 printf(
518 "%d: r_val1=0x%lx, r_val2=0x%lx, id 0x%p\n",
519 p->p_pid, rval1, rval2, curthread);
520 mutex_exit(&systrace_lock);
521 }
522 #endif /* SYSCALLTRACE */
523 rp->r_ps &= ~PS_C;
524 rp->r_r0 = rval1;
525 rp->r_r1 = rval2;
526 } else {
527 int sig;
528 #ifdef SYSCALLTRACE
529 if (syscalltrace) {
530 mutex_enter(&systrace_lock);
531 printf("%d: error=%d, id 0x%p\n",
532 p->p_pid, error, curthread);
533 mutex_exit(&systrace_lock);
534 }
535 #endif /* SYSCALLTRACE */
536 if (error == EINTR && t->t_activefd.a_stale)
537 error = EBADF;
538 if (error == EINTR &&
539 (sig = lwp->lwp_cursig) != 0 &&
540 sigismember(&PTOU(p)->u_sigrestart, sig) &&
541 PTOU(p)->u_signal[sig - 1] != SIG_DFL &&
542 PTOU(p)->u_signal[sig - 1] != SIG_IGN)
543 error = ERESTART;
544 rp->r_r0 = error;
545 rp->r_ps |= PS_C;
546 }
547 }
548
549 /*
550 * From the proc(4) manual page:
551 * When exit from a system call is being traced, the traced process
552 * stops on completion of the system call just prior to checking for
553 * signals and returning to user level. At this point all return
554 * values have been stored into the traced process's saved registers.
555 */
556 if (proc_stop) {
557 mutex_enter(&p->p_lock);
558 if (PTOU(p)->u_systrap &&
559 prismember(&PTOU(p)->u_exitmask, code))
560 stop(PR_SYSEXIT, code);
561 mutex_exit(&p->p_lock);
562 }
563
564 /*
565 * If we are the parent returning from a successful
566 * vfork, wait for the child to exec or exit.
567 * This code must be here and not in the bowels of the system
568 * so that /proc can intercept exit from vfork in a timely way.
569 */
570 if (t->t_flag & T_VFPARENT) {
571 ASSERT(code == SYS_vfork || code == SYS_forksys);
572 ASSERT(rp->r_r1 == 0 && error == 0);
573 vfwait((pid_t)rval1);
574 t->t_flag &= ~T_VFPARENT;
575 }
576
577 /*
578 * If profiling is active, bill the current PC in user-land
579 * and keep reposting until profiling is disabled.
580 */
581 if (p->p_prof.pr_scale) {
582 if (lwp->lwp_oweupc)
583 profil_tick(rp->r_pc);
584 repost = 1;
585 }
586
587 sig_check:
588 /*
589 * Reset flag for next time.
590 * We must do this after stopping on PR_SYSEXIT
591 * because /proc uses the information in lwp_eosys.
592 */
593 lwp->lwp_eosys = NORMALRETURN;
594 clear_stale_fd();
595 t->t_flag &= ~T_FORKALL;
596
597 if (t->t_astflag | t->t_sig_check) {
598 /*
599 * Turn off the AST flag before checking all the conditions that
600 * may have caused an AST. This flag is on whenever a signal or
601 * unusual condition should be handled after the next trap or
602 * syscall.
603 */
604 astoff(t);
605 /*
606 * If a single-step trap occurred on a syscall (see trap())
607 * recognize it now. Do this before checking for signals
608 * because deferred_singlestep_trap() may generate a SIGTRAP to
609 * the LWP or may otherwise mark the LWP to call issig(FORREAL).
610 */
611 if (lwp->lwp_pcb.pcb_flags & DEBUG_PENDING)
612 deferred_singlestep_trap((caddr_t)rp->r_pc);
613
614 t->t_sig_check = 0;
615
616 /*
617 * The following check is legal for the following reasons:
618 * 1) The thread we are checking, is ourselves, so there is
619 * no way the proc can go away.
620 * 2) The only time we need to be protected by the
621 * lock is if the binding is changed.
622 *
623 * Note we will still take the lock and check the binding
624 * if the condition was true without the lock held. This
625 * prevents lock contention among threads owned by the
626 * same proc.
627 */
628
629 if (curthread->t_proc_flag & TP_CHANGEBIND) {
630 mutex_enter(&p->p_lock);
631 if (curthread->t_proc_flag & TP_CHANGEBIND) {
632 timer_lwpbind();
633 curthread->t_proc_flag &= ~TP_CHANGEBIND;
634 }
635 mutex_exit(&p->p_lock);
636 }
637
638 /*
639 * for kaio requests on the special kaio poll queue,
640 * copyout their results to user memory.
641 */
642 if (p->p_aio)
643 aio_cleanup(0);
644 /*
645 * If this LWP was asked to hold, call holdlwp(), which will
646 * stop. holdlwps() sets this up and calls pokelwps() which
647 * sets the AST flag.
648 *
649 * Also check TP_EXITLWP, since this is used by fresh new LWPs
650 * through lwp_rtt(). That flag is set if the lwp_create(2)
651 * syscall failed after creating the LWP.
652 */
653 if (ISHOLD(p) || (t->t_proc_flag & TP_EXITLWP))
654 holdlwp();
655
656 /*
657 * All code that sets signals and makes ISSIG_PENDING
658 * evaluate true must set t_sig_check afterwards.
659 */
660 if (ISSIG_PENDING(t, lwp, p)) {
661 if (issig(FORREAL))
662 psig();
663 t->t_sig_check = 1; /* recheck next time */
664 }
665
666 if (sigprof) {
667 int nargs = (code > 0 && code < NSYSCALL)?
668 LWP_GETSYSENT(lwp)[code].sy_narg : 0;
669 realsigprof(code, nargs, error);
670 t->t_sig_check = 1; /* recheck next time */
671 }
672
673 /*
674 * If a performance counter overflow interrupt was
675 * delivered *during* the syscall, then re-enable the
676 * AST so that we take a trip through trap() to cause
677 * the SIGEMT to be delivered.
678 */
679 if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW)
680 aston(t);
681
682 /*
683 * /proc can't enable/disable the trace bit itself
684 * because that could race with the call gate used by
685 * system calls via "lcall". If that happened, an
686 * invalid EFLAGS would result. prstep()/prnostep()
687 * therefore schedule an AST for the purpose.
688 */
689 if (lwp->lwp_pcb.pcb_flags & REQUEST_STEP) {
690 lwp->lwp_pcb.pcb_flags &= ~REQUEST_STEP;
691 rp->r_ps |= PS_T;
692 }
693 if (lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP) {
694 lwp->lwp_pcb.pcb_flags &= ~REQUEST_NOSTEP;
695 rp->r_ps &= ~PS_T;
696 }
697 }
698
699 lwp->lwp_errno = 0; /* clear error for next time */
700
701 #ifndef NPROBE
702 /* Kernel probe */
703 if (tnf_tracing_active) {
704 TNF_PROBE_3(syscall_end, "syscall thread", /* CSTYLED */,
705 tnf_long, rval1, rval1,
706 tnf_long, rval2, rval2,
707 tnf_long, errno, (long)error);
708 repost = 1;
709 }
710 #endif /* NPROBE */
711
712 /*
713 * Set state to LWP_USER here so preempt won't give us a kernel
714 * priority if it occurs after this point. Call CL_TRAPRET() to
715 * restore the user-level priority.
716 *
717 * It is important that no locks (other than spinlocks) be entered
718 * after this point before returning to user mode (unless lwp_state
719 * is set back to LWP_SYS).
720 *
721 * XXX Sampled times past this point are charged to the user.
722 */
723 lwp->lwp_state = LWP_USER;
724
725 if (t->t_trapret) {
726 t->t_trapret = 0;
727 thread_lock(t);
728 CL_TRAPRET(t);
729 thread_unlock(t);
730 }
731 if (CPU->cpu_runrun || t->t_schedflag & TS_ANYWAITQ)
732 preempt();
733 prunstop();
734
735 lwp->lwp_errno = 0; /* clear error for next time */
736
737 /*
738 * The thread lock must be held in order to clear sysnum and reset
739 * lwp_ap atomically with respect to other threads in the system that
740 * may be looking at the args via lwp_ap from get_syscall_args().
741 */
742
743 thread_lock(t);
744 t->t_sysnum = 0; /* no longer in a system call */
745
746 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
747 #if defined(_LP64)
748 /*
749 * In case the args were copied to the lwp, reset the
750 * pointer so the next syscall will have the right
751 * lwp_ap pointer.
752 */
753 lwp->lwp_ap = (long *)&rp->r_rdi;
754 } else {
755 #endif
756 lwp->lwp_ap = NULL; /* reset on every syscall entry */
757 }
758 thread_unlock(t);
759
760 lwp->lwp_argsaved = 0;
761
762 /*
763 * If there was a continuing reason for post-syscall processing,
764 * set the t_post_sys flag for the next system call.
765 */
766 if (repost)
767 t->t_post_sys = 1;
768
769 /*
770 * If there is a ustack registered for this lwp, and the stack rlimit
771 * has been altered, read in the ustack. If the saved stack rlimit
772 * matches the bounds of the ustack, update the ustack to reflect
773 * the new rlimit. If the new stack rlimit is RLIM_INFINITY, disable
774 * stack checking by setting the size to 0.
775 */
776 if (lwp->lwp_ustack != 0 && lwp->lwp_old_stk_ctl != 0) {
777 rlim64_t new_size;
778 caddr_t top;
779 stack_t stk;
780 struct rlimit64 rl;
781
782 mutex_enter(&p->p_lock);
783 new_size = p->p_stk_ctl;
784 top = p->p_usrstack;
785 (void) rctl_rlimit_get(rctlproc_legacy[RLIMIT_STACK], p, &rl);
786 mutex_exit(&p->p_lock);
787
788 if (rl.rlim_cur == RLIM64_INFINITY)
789 new_size = 0;
790
791 if (copyin((stack_t *)lwp->lwp_ustack, &stk,
792 sizeof (stack_t)) == 0 &&
793 (stk.ss_size == lwp->lwp_old_stk_ctl ||
794 stk.ss_size == 0) &&
795 stk.ss_sp == top - stk.ss_size) {
796 stk.ss_sp = (void *)((uintptr_t)stk.ss_sp +
797 stk.ss_size - (uintptr_t)new_size);
798 stk.ss_size = new_size;
799
800 (void) copyout(&stk, (stack_t *)lwp->lwp_ustack,
801 sizeof (stack_t));
802 }
803
804 lwp->lwp_old_stk_ctl = 0;
805 }
806 }
807
808 /*
809 * Called from post_syscall() when a deferred singlestep is to be taken.
810 */
811 void
deferred_singlestep_trap(caddr_t pc)812 deferred_singlestep_trap(caddr_t pc)
813 {
814 proc_t *p = ttoproc(curthread);
815 klwp_t *lwp = ttolwp(curthread);
816 pcb_t *pcb = &lwp->lwp_pcb;
817 uint_t fault = 0;
818 k_siginfo_t siginfo;
819
820 bzero(&siginfo, sizeof (siginfo));
821
822 /*
823 * If both NORMAL_STEP and WATCH_STEP are in
824 * effect, give precedence to WATCH_STEP.
825 * If neither is set, user must have set the
826 * PS_T bit in %efl; treat this as NORMAL_STEP.
827 */
828 if ((fault = undo_watch_step(&siginfo)) == 0 &&
829 ((pcb->pcb_flags & NORMAL_STEP) ||
830 !(pcb->pcb_flags & WATCH_STEP))) {
831 siginfo.si_signo = SIGTRAP;
832 siginfo.si_code = TRAP_TRACE;
833 siginfo.si_addr = pc;
834 fault = FLTTRACE;
835 }
836 pcb->pcb_flags &= ~(DEBUG_PENDING|NORMAL_STEP|WATCH_STEP);
837
838 if (fault) {
839 /*
840 * Remember the fault and fault adddress
841 * for real-time (SIGPROF) profiling.
842 */
843 lwp->lwp_lastfault = fault;
844 lwp->lwp_lastfaddr = siginfo.si_addr;
845 /*
846 * If a debugger has declared this fault to be an
847 * event of interest, stop the lwp. Otherwise just
848 * deliver the associated signal.
849 */
850 if (prismember(&p->p_fltmask, fault) &&
851 stop_on_fault(fault, &siginfo) == 0)
852 siginfo.si_signo = 0;
853 }
854
855 if (siginfo.si_signo)
856 trapsig(&siginfo, 1);
857 }
858
859 /*
860 * nonexistent system call-- signal lwp (may want to handle it)
861 * flag error if lwp won't see signal immediately
862 */
863 int64_t
nosys()864 nosys()
865 {
866 tsignal(curthread, SIGSYS);
867 return (set_errno(ENOSYS));
868 }
869
870 /*
871 * Execute a 32-bit system call on behalf of the current thread.
872 */
873 void
dosyscall(void)874 dosyscall(void)
875 {
876 /*
877 * Need space on the stack to store syscall arguments.
878 */
879 long syscall_args[MAXSYSARGS];
880 struct sysent *se;
881 int64_t ret;
882
883 syscall_mstate(LMS_TRAP, LMS_SYSTEM);
884
885 ASSERT(curproc->p_model == DATAMODEL_ILP32);
886
887 CPU_STATS_ENTER_K();
888 CPU_STATS_ADDQ(CPU, sys, syscall, 1);
889 CPU_STATS_EXIT_K();
890
891 se = syscall_entry(curthread, syscall_args);
892
893 /*
894 * syscall_entry() copied all 8 arguments into syscall_args.
895 */
896 ret = se->sy_callc(syscall_args[0], syscall_args[1], syscall_args[2],
897 syscall_args[3], syscall_args[4], syscall_args[5], syscall_args[6],
898 syscall_args[7]);
899
900 syscall_exit(curthread, (int)ret & 0xffffffffu, (int)(ret >> 32));
901 syscall_mstate(LMS_SYSTEM, LMS_TRAP);
902 }
903
904 /*
905 * Get the arguments to the current system call. See comment atop
906 * save_syscall_args() regarding lwp_ap usage.
907 */
908
909 uint_t
get_syscall_args(klwp_t * lwp,long * argp,int * nargsp)910 get_syscall_args(klwp_t *lwp, long *argp, int *nargsp)
911 {
912 kthread_t *t = lwptot(lwp);
913 ulong_t mask = 0xfffffffful;
914 uint_t code;
915 long *ap;
916 int nargs;
917
918 #if defined(_LP64)
919 if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
920 mask = 0xfffffffffffffffful;
921 #endif
922
923 /*
924 * The thread lock must be held while looking at the arguments to ensure
925 * they don't go away via post_syscall().
926 * get_syscall_args() is the only routine to read them which is callable
927 * outside the LWP in question and hence the only one that must be
928 * synchronized in this manner.
929 */
930 thread_lock(t);
931
932 code = t->t_sysnum;
933 ap = lwp->lwp_ap;
934
935 thread_unlock(t);
936
937 if (code != 0 && code < NSYSCALL) {
938 nargs = LWP_GETSYSENT(lwp)[code].sy_narg;
939
940 ASSERT(nargs <= MAXSYSARGS);
941
942 *nargsp = nargs;
943 while (nargs-- > 0)
944 *argp++ = *ap++ & mask;
945 } else {
946 *nargsp = 0;
947 }
948
949 return (code);
950 }
951
952 #ifdef _SYSCALL32_IMPL
953 /*
954 * Get the arguments to the current 32-bit system call.
955 */
956 uint_t
get_syscall32_args(klwp_t * lwp,int * argp,int * nargsp)957 get_syscall32_args(klwp_t *lwp, int *argp, int *nargsp)
958 {
959 long args[MAXSYSARGS];
960 uint_t i, code;
961
962 code = get_syscall_args(lwp, args, nargsp);
963
964 for (i = 0; i != *nargsp; i++)
965 *argp++ = (int)args[i];
966 return (code);
967 }
968 #endif
969
970 /*
971 * Save the system call arguments in a safe place.
972 *
973 * On the i386 kernel:
974 *
975 * Copy the users args prior to changing the stack or stack pointer.
976 * This is so /proc will be able to get a valid copy of the
977 * args from the user stack even after the user stack has been changed.
978 * Note that the kernel stack copy of the args may also have been
979 * changed by a system call handler which takes C-style arguments.
980 *
981 * Note that this may be called by stop() from trap(). In that case
982 * t_sysnum will be zero (syscall_exit clears it), so no args will be
983 * copied.
984 *
985 * On the amd64 kernel:
986 *
987 * For 64-bit applications, lwp->lwp_ap normally points to %rdi..%r9
988 * in the reg structure. If the user is going to change the argument
989 * registers, rax, or the stack and might want to get the args (for
990 * /proc tracing), it must copy the args elsewhere via save_syscall_args().
991 *
992 * For 32-bit applications, lwp->lwp_ap normally points to a copy of
993 * the system call arguments on the kernel stack made from the user
994 * stack. Copy the args prior to change the stack or stack pointer.
995 * This is so /proc will be able to get a valid copy of the args
996 * from the user stack even after that stack has been changed.
997 *
998 * This may be called from stop() even when we're not in a system call.
999 * Since there's no easy way to tell, this must be safe (not panic).
1000 * If the copyins get data faults, return non-zero.
1001 */
1002 int
save_syscall_args()1003 save_syscall_args()
1004 {
1005 kthread_t *t = curthread;
1006 klwp_t *lwp = ttolwp(t);
1007 uint_t code = t->t_sysnum;
1008 uint_t nargs;
1009
1010 if (lwp->lwp_argsaved || code == 0)
1011 return (0); /* args already saved or not needed */
1012
1013 if (code >= NSYSCALL) {
1014 nargs = 0; /* illegal syscall */
1015 } else {
1016 struct sysent *se = LWP_GETSYSENT(lwp);
1017 struct sysent *callp = se + code;
1018
1019 nargs = callp->sy_narg;
1020 if (LOADABLE_SYSCALL(callp) && nargs == 0) {
1021 krwlock_t *module_lock;
1022
1023 /*
1024 * Find out how many arguments the system
1025 * call uses.
1026 *
1027 * We have the property that loaded syscalls
1028 * never change the number of arguments they
1029 * use after they've been loaded once. This
1030 * allows us to stop for /proc tracing without
1031 * holding the module lock.
1032 * /proc is assured that sy_narg is valid.
1033 */
1034 module_lock = lock_syscall(se, code);
1035 nargs = callp->sy_narg;
1036 rw_exit(module_lock);
1037 }
1038 }
1039
1040 /*
1041 * Fetch the system call arguments.
1042 */
1043 if (nargs == 0)
1044 goto out;
1045
1046 ASSERT(nargs <= MAXSYSARGS);
1047
1048 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
1049 #if defined(_LP64)
1050 struct regs *rp = lwptoregs(lwp);
1051
1052 lwp->lwp_arg[0] = rp->r_rdi;
1053 lwp->lwp_arg[1] = rp->r_rsi;
1054 lwp->lwp_arg[2] = rp->r_rdx;
1055 lwp->lwp_arg[3] = rp->r_rcx;
1056 lwp->lwp_arg[4] = rp->r_r8;
1057 lwp->lwp_arg[5] = rp->r_r9;
1058 if (nargs > 6 && copyin_args(rp, &lwp->lwp_arg[6], nargs - 6))
1059 return (-1);
1060 } else {
1061 #endif
1062 if (COPYIN_ARGS32(lwptoregs(lwp), lwp->lwp_arg, nargs))
1063 return (-1);
1064 }
1065 out:
1066 lwp->lwp_ap = lwp->lwp_arg;
1067 lwp->lwp_argsaved = 1;
1068 t->t_post_sys = 1; /* so lwp_ap will be reset */
1069 return (0);
1070 }
1071
1072 void
reset_syscall_args(void)1073 reset_syscall_args(void)
1074 {
1075 ttolwp(curthread)->lwp_argsaved = 0;
1076 }
1077
1078 /*
1079 * Call a system call which takes a pointer to the user args struct and
1080 * a pointer to the return values. This is a bit slower than the standard
1081 * C arg-passing method in some cases.
1082 */
1083 int64_t
syscall_ap(void)1084 syscall_ap(void)
1085 {
1086 uint_t error;
1087 struct sysent *callp;
1088 rval_t rval;
1089 kthread_t *t = curthread;
1090 klwp_t *lwp = ttolwp(t);
1091 struct regs *rp = lwptoregs(lwp);
1092
1093 callp = LWP_GETSYSENT(lwp) + t->t_sysnum;
1094
1095 #if defined(__amd64)
1096 /*
1097 * If the arguments don't fit in registers %rdi-%r9, make sure they
1098 * have been copied to the lwp_arg array.
1099 */
1100 if (callp->sy_narg > 6 && save_syscall_args())
1101 return ((int64_t)set_errno(EFAULT));
1102 #endif
1103
1104 rval.r_val1 = 0;
1105 rval.r_val2 = rp->r_r1;
1106 lwp->lwp_error = 0; /* for old drivers */
1107 error = (*(callp->sy_call))(lwp->lwp_ap, &rval);
1108 if (error)
1109 return ((longlong_t)set_errno(error));
1110 return (rval.r_vals);
1111 }
1112
1113 /*
1114 * Load system call module.
1115 * Returns with pointer to held read lock for module.
1116 */
1117 static krwlock_t *
lock_syscall(struct sysent * table,uint_t code)1118 lock_syscall(struct sysent *table, uint_t code)
1119 {
1120 krwlock_t *module_lock;
1121 struct modctl *modp;
1122 int id;
1123 struct sysent *callp;
1124
1125 callp = table + code;
1126 module_lock = callp->sy_lock;
1127
1128 /*
1129 * Optimization to only call modload if we don't have a loaded
1130 * syscall.
1131 */
1132 rw_enter(module_lock, RW_READER);
1133 if (LOADED_SYSCALL(callp))
1134 return (module_lock);
1135 rw_exit(module_lock);
1136
1137 for (;;) {
1138 if ((id = modload("sys", syscallnames[code])) == -1)
1139 break;
1140
1141 /*
1142 * If we loaded successfully at least once, the modctl
1143 * will still be valid, so we try to grab it by filename.
1144 * If this call fails, it's because the mod_filename
1145 * was changed after the call to modload() (mod_hold_by_name()
1146 * is the likely culprit). We can safely just take
1147 * another lap if this is the case; the modload() will
1148 * change the mod_filename back to one by which we can
1149 * find the modctl.
1150 */
1151 modp = mod_find_by_filename("sys", syscallnames[code]);
1152
1153 if (modp == NULL)
1154 continue;
1155
1156 mutex_enter(&mod_lock);
1157
1158 if (!modp->mod_installed) {
1159 mutex_exit(&mod_lock);
1160 continue;
1161 }
1162 break;
1163 }
1164 rw_enter(module_lock, RW_READER);
1165
1166 if (id != -1)
1167 mutex_exit(&mod_lock);
1168
1169 return (module_lock);
1170 }
1171
1172 /*
1173 * Loadable syscall support.
1174 * If needed, load the module, then reserve it by holding a read
1175 * lock for the duration of the call.
1176 * Later, if the syscall is not unloadable, it could patch the vector.
1177 */
1178 /*ARGSUSED*/
1179 int64_t
loadable_syscall(long a0,long a1,long a2,long a3,long a4,long a5,long a6,long a7)1180 loadable_syscall(
1181 long a0, long a1, long a2, long a3,
1182 long a4, long a5, long a6, long a7)
1183 {
1184 klwp_t *lwp = ttolwp(curthread);
1185 int64_t rval;
1186 struct sysent *callp;
1187 struct sysent *se = LWP_GETSYSENT(lwp);
1188 krwlock_t *module_lock;
1189 int code, error = 0;
1190 int64_t (*sy_call)();
1191
1192 code = curthread->t_sysnum;
1193 callp = se + code;
1194
1195 /*
1196 * Try to autoload the system call if necessary
1197 */
1198 module_lock = lock_syscall(se, code);
1199 THREAD_KPRI_RELEASE(); /* drop priority given by rw_enter */
1200
1201 /*
1202 * we've locked either the loaded syscall or nosys
1203 */
1204
1205 if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
1206 #if defined(_LP64)
1207 if (callp->sy_flags & SE_ARGC) {
1208 sy_call = (int64_t (*)())callp->sy_call;
1209 rval = (*sy_call)(a0, a1, a2, a3, a4, a5);
1210 } else
1211 rval = syscall_ap();
1212 } else {
1213 #endif
1214 /*
1215 * Now that it's loaded, make sure enough args were copied.
1216 */
1217 if (COPYIN_ARGS32(lwptoregs(lwp), lwp->lwp_ap, callp->sy_narg))
1218 error = EFAULT;
1219 if (error) {
1220 rval = set_errno(error);
1221 } else if (callp->sy_flags & SE_ARGC) {
1222 sy_call = (int64_t (*)())callp->sy_call;
1223 rval = (*sy_call)(lwp->lwp_ap[0], lwp->lwp_ap[1],
1224 lwp->lwp_ap[2], lwp->lwp_ap[3], lwp->lwp_ap[4],
1225 lwp->lwp_ap[5]);
1226 } else
1227 rval = syscall_ap();
1228 }
1229
1230 THREAD_KPRI_REQUEST(); /* regain priority from read lock */
1231 rw_exit(module_lock);
1232 return (rval);
1233 }
1234
1235 /*
1236 * Indirect syscall handled in libc on x86 architectures
1237 */
1238 int64_t
indir()1239 indir()
1240 {
1241 return (nosys());
1242 }
1243
1244 /*
1245 * set_errno - set an error return from the current system call.
1246 * This could be a macro.
1247 * This returns the value it is passed, so that the caller can
1248 * use tail-recursion-elimination and do return (set_errno(ERRNO));
1249 */
1250 uint_t
set_errno(uint_t error)1251 set_errno(uint_t error)
1252 {
1253 ASSERT(error != 0); /* must not be used to clear errno */
1254
1255 curthread->t_post_sys = 1; /* have post_syscall do error return */
1256 return (ttolwp(curthread)->lwp_errno = error);
1257 }
1258
1259 /*
1260 * set_proc_pre_sys - Set pre-syscall processing for entire process.
1261 */
1262 void
set_proc_pre_sys(proc_t * p)1263 set_proc_pre_sys(proc_t *p)
1264 {
1265 kthread_t *t;
1266 kthread_t *first;
1267
1268 ASSERT(MUTEX_HELD(&p->p_lock));
1269
1270 t = first = p->p_tlist;
1271 do {
1272 t->t_pre_sys = 1;
1273 } while ((t = t->t_forw) != first);
1274 }
1275
1276 /*
1277 * set_proc_post_sys - Set post-syscall processing for entire process.
1278 */
1279 void
set_proc_post_sys(proc_t * p)1280 set_proc_post_sys(proc_t *p)
1281 {
1282 kthread_t *t;
1283 kthread_t *first;
1284
1285 ASSERT(MUTEX_HELD(&p->p_lock));
1286
1287 t = first = p->p_tlist;
1288 do {
1289 t->t_post_sys = 1;
1290 } while ((t = t->t_forw) != first);
1291 }
1292
1293 /*
1294 * set_proc_sys - Set pre- and post-syscall processing for entire process.
1295 */
1296 void
set_proc_sys(proc_t * p)1297 set_proc_sys(proc_t *p)
1298 {
1299 kthread_t *t;
1300 kthread_t *first;
1301
1302 ASSERT(MUTEX_HELD(&p->p_lock));
1303
1304 t = first = p->p_tlist;
1305 do {
1306 t->t_pre_sys = 1;
1307 t->t_post_sys = 1;
1308 } while ((t = t->t_forw) != first);
1309 }
1310
1311 /*
1312 * set_all_proc_sys - set pre- and post-syscall processing flags for all
1313 * user processes.
1314 *
1315 * This is needed when auditing, tracing, or other facilities which affect
1316 * all processes are turned on.
1317 */
1318 void
set_all_proc_sys()1319 set_all_proc_sys()
1320 {
1321 kthread_t *t;
1322 kthread_t *first;
1323
1324 mutex_enter(&pidlock);
1325 t = first = curthread;
1326 do {
1327 t->t_pre_sys = 1;
1328 t->t_post_sys = 1;
1329 } while ((t = t->t_next) != first);
1330 mutex_exit(&pidlock);
1331 }
1332
1333 /*
1334 * set_all_zone_usr_proc_sys - set pre- and post-syscall processing flags for
1335 * all user processes running in the zone of the current process
1336 *
1337 * This is needed when auditing, tracing, or other facilities which affect
1338 * all processes are turned on.
1339 */
1340 void
set_all_zone_usr_proc_sys(zoneid_t zoneid)1341 set_all_zone_usr_proc_sys(zoneid_t zoneid)
1342 {
1343 proc_t *p;
1344 kthread_t *t;
1345
1346 mutex_enter(&pidlock);
1347 for (p = practive; p != NULL; p = p->p_next) {
1348 /* skip kernel and incomplete processes */
1349 if (p->p_exec == NULLVP || p->p_as == &kas ||
1350 p->p_stat == SIDL || p->p_stat == SZOMB ||
1351 (p->p_flag & (SSYS | SEXITING | SEXITLWPS)))
1352 continue;
1353 /*
1354 * Only processes in the given zone (eventually in
1355 * all zones) are taken into account
1356 */
1357 if (zoneid == ALL_ZONES || p->p_zone->zone_id == zoneid) {
1358 mutex_enter(&p->p_lock);
1359 if ((t = p->p_tlist) == NULL) {
1360 mutex_exit(&p->p_lock);
1361 continue;
1362 }
1363 /*
1364 * Set pre- and post-syscall processing flags
1365 * for all threads of the process
1366 */
1367 do {
1368 t->t_pre_sys = 1;
1369 t->t_post_sys = 1;
1370 } while (p->p_tlist != (t = t->t_forw));
1371 mutex_exit(&p->p_lock);
1372 }
1373 }
1374 mutex_exit(&pidlock);
1375 }
1376
1377 /*
1378 * set_proc_ast - Set asynchronous service trap (AST) flag for all
1379 * threads in process.
1380 */
1381 void
set_proc_ast(proc_t * p)1382 set_proc_ast(proc_t *p)
1383 {
1384 kthread_t *t;
1385 kthread_t *first;
1386
1387 ASSERT(MUTEX_HELD(&p->p_lock));
1388
1389 t = first = p->p_tlist;
1390 do {
1391 aston(t);
1392 } while ((t = t->t_forw) != first);
1393 }
1394