xref: /freebsd/sys/kern/kern_synch.c (revision 78704ef45793e56c8e064611c05c9bb8a0067e9f)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD$
40  */
41 
42 #include "opt_ddb.h"
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/condvar.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/signalvar.h>
56 #include <sys/smp.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysproto.h>
60 #include <sys/vmmeter.h>
61 #ifdef DDB
62 #include <ddb/ddb.h>
63 #endif
64 #ifdef KTRACE
65 #include <sys/uio.h>
66 #include <sys/ktrace.h>
67 #endif
68 
69 #include <machine/cpu.h>
70 
71 static void sched_setup(void *dummy);
72 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
73 
74 int	hogticks;
75 int	lbolt;
76 
77 static struct callout loadav_callout;
78 
79 struct loadavg averunnable =
80 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
81 /*
82  * Constants for averages over 1, 5, and 15 minutes
83  * when sampling at 5 second intervals.
84  */
85 static fixpt_t cexp[3] = {
86 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
87 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
88 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
89 };
90 
91 static void	endtsleep(void *);
92 static void	loadav(void *arg);
93 
94 /*
95  * We're only looking at 7 bits of the address; everything is
96  * aligned to 4, lots of things are aligned to greater powers
97  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
98  */
99 #define TABLESIZE	128
100 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
101 #define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
102 
103 void
104 sleepinit(void)
105 {
106 	int i;
107 
108 	hogticks = (hz / 10) * 2;	/* Default only. */
109 	for (i = 0; i < TABLESIZE; i++)
110 		TAILQ_INIT(&slpque[i]);
111 }
112 
113 /*
114  * General sleep call.  Suspends the current process until a wakeup is
115  * performed on the specified identifier.  The process will then be made
116  * runnable with the specified priority.  Sleeps at most timo/hz seconds
117  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
118  * before and after sleeping, else signals are not checked.  Returns 0 if
119  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
120  * signal needs to be delivered, ERESTART is returned if the current system
121  * call should be restarted if possible, and EINTR is returned if the system
122  * call should be interrupted by the signal (return EINTR).
123  *
124  * The mutex argument is exited before the caller is suspended, and
125  * entered before msleep returns.  If priority includes the PDROP
126  * flag the mutex is not entered before returning.
127  */
128 
129 int
130 msleep(ident, mtx, priority, wmesg, timo)
131 	void *ident;
132 	struct mtx *mtx;
133 	int priority, timo;
134 	const char *wmesg;
135 {
136 	struct thread *td = curthread;
137 	struct proc *p = td->td_proc;
138 	int sig, catch = priority & PCATCH;
139 	int rval = 0;
140 	WITNESS_SAVE_DECL(mtx);
141 
142 #ifdef KTRACE
143 	if (KTRPOINT(td, KTR_CSW))
144 		ktrcsw(1, 0);
145 #endif
146 	WITNESS_SLEEP(0, &mtx->mtx_object);
147 	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
148 	    ("sleeping without a mutex"));
149 	/*
150 	 * If we are capable of async syscalls and there isn't already
151 	 * another one ready to return, start a new thread
152 	 * and queue it as ready to run. Note that there is danger here
153 	 * because we need to make sure that we don't sleep allocating
154 	 * the thread (recursion here might be bad).
155 	 * Hence the TDF_INMSLEEP flag.
156 	 */
157 	if (p->p_flag & P_KSES) {
158 		/* Just don't bother if we are exiting
159 				and not the exiting thread. */
160 		if ((p->p_flag & P_WEXIT) && catch && p->p_singlethread != td)
161 			return (EINTR);
162 		if (td->td_mailbox && (!(td->td_flags & TDF_INMSLEEP))) {
163 			/*
164 			 * Arrange for an upcall to be readied.
165 			 * it will not actually happen until all
166 			 * pending in-kernel work for this KSEGRP
167 			 * has been done.
168 			 */
169 			mtx_lock_spin(&sched_lock);
170 			/* Don't recurse here! */
171 			td->td_flags |= TDF_INMSLEEP;
172 			thread_schedule_upcall(td, td->td_kse);
173 			td->td_flags &= ~TDF_INMSLEEP;
174 			mtx_unlock_spin(&sched_lock);
175 		}
176 	}
177 	mtx_lock_spin(&sched_lock);
178 	if (cold ) {
179 		/*
180 		 * During autoconfiguration, just give interrupts
181 		 * a chance, then just return.
182 		 * Don't run any other procs or panic below,
183 		 * in case this is the idle process and already asleep.
184 		 */
185 		if (mtx != NULL && priority & PDROP)
186 			mtx_unlock(mtx);
187 		mtx_unlock_spin(&sched_lock);
188 		return (0);
189 	}
190 
191 	DROP_GIANT();
192 
193 	if (mtx != NULL) {
194 		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
195 		WITNESS_SAVE(&mtx->mtx_object, mtx);
196 		mtx_unlock(mtx);
197 		if (priority & PDROP)
198 			mtx = NULL;
199 	}
200 
201 	KASSERT(p != NULL, ("msleep1"));
202 	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
203 
204 	CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
205 	    td, p->p_pid, p->p_comm, wmesg, ident);
206 
207 	td->td_wchan = ident;
208 	td->td_wmesg = wmesg;
209 	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
210 	TD_SET_ON_SLEEPQ(td);
211 	if (timo)
212 		callout_reset(&td->td_slpcallout, timo, endtsleep, td);
213 	/*
214 	 * We put ourselves on the sleep queue and start our timeout
215 	 * before calling thread_suspend_check, as we could stop there, and
216 	 * a wakeup or a SIGCONT (or both) could occur while we were stopped.
217 	 * without resuming us, thus we must be ready for sleep
218 	 * when cursig is called.  If the wakeup happens while we're
219 	 * stopped, td->td_wchan will be 0 upon return from cursig.
220 	 */
221 	if (catch) {
222 		CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td,
223 		    p->p_pid, p->p_comm);
224 		td->td_flags |= TDF_SINTR;
225 		mtx_unlock_spin(&sched_lock);
226 		PROC_LOCK(p);
227 		sig = cursig(td);
228 		if (sig == 0 && thread_suspend_check(1))
229 			sig = SIGSTOP;
230 		mtx_lock_spin(&sched_lock);
231 		PROC_UNLOCK(p);
232 		if (sig != 0) {
233 			if (TD_ON_SLEEPQ(td))
234 				unsleep(td);
235 		} else if (!TD_ON_SLEEPQ(td))
236 			catch = 0;
237 	} else
238 		sig = 0;
239 
240 	/*
241 	 * Let the scheduler know we're about to voluntarily go to sleep.
242 	 */
243 	sched_sleep(td, priority & PRIMASK);
244 
245 	if (TD_ON_SLEEPQ(td)) {
246 		p->p_stats->p_ru.ru_nvcsw++;
247 		TD_SET_SLEEPING(td);
248 		mi_switch();
249 	}
250 	/*
251 	 * We're awake from voluntary sleep.
252 	 */
253 	CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
254 	    p->p_comm);
255 	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
256 	td->td_flags &= ~TDF_SINTR;
257 	if (td->td_flags & TDF_TIMEOUT) {
258 		td->td_flags &= ~TDF_TIMEOUT;
259 		if (sig == 0)
260 			rval = EWOULDBLOCK;
261 	} else if (td->td_flags & TDF_TIMOFAIL) {
262 		td->td_flags &= ~TDF_TIMOFAIL;
263 	} else if (timo && callout_stop(&td->td_slpcallout) == 0) {
264 		/*
265 		 * This isn't supposed to be pretty.  If we are here, then
266 		 * the endtsleep() callout is currently executing on another
267 		 * CPU and is either spinning on the sched_lock or will be
268 		 * soon.  If we don't synchronize here, there is a chance
269 		 * that this process may msleep() again before the callout
270 		 * has a chance to run and the callout may end up waking up
271 		 * the wrong msleep().  Yuck.
272 		 */
273 		TD_SET_SLEEPING(td);
274 		p->p_stats->p_ru.ru_nivcsw++;
275 		mi_switch();
276 		td->td_flags &= ~TDF_TIMOFAIL;
277 	}
278 	mtx_unlock_spin(&sched_lock);
279 
280 	if (rval == 0 && catch) {
281 		PROC_LOCK(p);
282 		/* XXX: shouldn't we always be calling cursig() */
283 		if (sig != 0 || (sig = cursig(td))) {
284 			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
285 				rval = EINTR;
286 			else
287 				rval = ERESTART;
288 		}
289 		PROC_UNLOCK(p);
290 	}
291 #ifdef KTRACE
292 	if (KTRPOINT(td, KTR_CSW))
293 		ktrcsw(0, 0);
294 #endif
295 	PICKUP_GIANT();
296 	if (mtx != NULL) {
297 		mtx_lock(mtx);
298 		WITNESS_RESTORE(&mtx->mtx_object, mtx);
299 	}
300 	return (rval);
301 }
302 
303 /*
304  * Implement timeout for msleep()
305  *
306  * If process hasn't been awakened (wchan non-zero),
307  * set timeout flag and undo the sleep.  If proc
308  * is stopped, just unsleep so it will remain stopped.
309  * MP-safe, called without the Giant mutex.
310  */
311 static void
312 endtsleep(arg)
313 	void *arg;
314 {
315 	register struct thread *td = arg;
316 
317 	CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)",
318 	    td, td->td_proc->p_pid, td->td_proc->p_comm);
319 	mtx_lock_spin(&sched_lock);
320 	/*
321 	 * This is the other half of the synchronization with msleep()
322 	 * described above.  If the TDS_TIMEOUT flag is set, we lost the
323 	 * race and just need to put the process back on the runqueue.
324 	 */
325 	if (TD_ON_SLEEPQ(td)) {
326 		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
327 		TD_CLR_ON_SLEEPQ(td);
328 		td->td_flags |= TDF_TIMEOUT;
329 	} else {
330 		td->td_flags |= TDF_TIMOFAIL;
331 	}
332 	TD_CLR_SLEEPING(td);
333 	setrunnable(td);
334 	mtx_unlock_spin(&sched_lock);
335 }
336 
337 /*
338  * Abort a thread, as if an interrupt had occured.  Only abort
339  * interruptable waits (unfortunatly it isn't only safe to abort others).
340  * This is about identical to cv_abort().
341  * Think about merging them?
342  * Also, whatever the signal code does...
343  */
344 void
345 abortsleep(struct thread *td)
346 {
347 
348 	mtx_assert(&sched_lock, MA_OWNED);
349 	/*
350 	 * If the TDF_TIMEOUT flag is set, just leave. A
351 	 * timeout is scheduled anyhow.
352 	 */
353 	if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
354 		if (TD_ON_SLEEPQ(td)) {
355 			unsleep(td);
356 			TD_CLR_SLEEPING(td);
357 			setrunnable(td);
358 		}
359 	}
360 }
361 
362 /*
363  * Remove a process from its wait queue
364  */
365 void
366 unsleep(struct thread *td)
367 {
368 
369 	mtx_lock_spin(&sched_lock);
370 	if (TD_ON_SLEEPQ(td)) {
371 		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
372 		TD_CLR_ON_SLEEPQ(td);
373 	}
374 	mtx_unlock_spin(&sched_lock);
375 }
376 
377 /*
378  * Make all processes sleeping on the specified identifier runnable.
379  */
380 void
381 wakeup(ident)
382 	register void *ident;
383 {
384 	register struct slpquehead *qp;
385 	register struct thread *td;
386 	struct thread *ntd;
387 	struct proc *p;
388 
389 	mtx_lock_spin(&sched_lock);
390 	qp = &slpque[LOOKUP(ident)];
391 restart:
392 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
393 		ntd = TAILQ_NEXT(td, td_slpq);
394 		if (td->td_wchan == ident) {
395 			unsleep(td);
396 			TD_CLR_SLEEPING(td);
397 			setrunnable(td);
398 			p = td->td_proc;
399 			CTR3(KTR_PROC,"wakeup: thread %p (pid %d, %s)",
400 			    td, p->p_pid, p->p_comm);
401 			goto restart;
402 		}
403 	}
404 	mtx_unlock_spin(&sched_lock);
405 }
406 
407 /*
408  * Make a process sleeping on the specified identifier runnable.
409  * May wake more than one process if a target process is currently
410  * swapped out.
411  */
412 void
413 wakeup_one(ident)
414 	register void *ident;
415 {
416 	register struct slpquehead *qp;
417 	register struct thread *td;
418 	register struct proc *p;
419 	struct thread *ntd;
420 
421 	mtx_lock_spin(&sched_lock);
422 	qp = &slpque[LOOKUP(ident)];
423 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
424 		ntd = TAILQ_NEXT(td, td_slpq);
425 		if (td->td_wchan == ident) {
426 			unsleep(td);
427 			TD_CLR_SLEEPING(td);
428 			setrunnable(td);
429 			p = td->td_proc;
430 			CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
431 			    td, p->p_pid, p->p_comm);
432 			break;
433 		}
434 	}
435 	mtx_unlock_spin(&sched_lock);
436 }
437 
438 /*
439  * The machine independent parts of mi_switch().
440  */
441 void
442 mi_switch(void)
443 {
444 	struct bintime new_switchtime;
445 	struct thread *td = curthread;	/* XXX */
446 	struct proc *p = td->td_proc;	/* XXX */
447 	struct kse *ke = td->td_kse;
448 	u_int sched_nest;
449 
450 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
451 
452 	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
453 #ifdef INVARIANTS
454 	if (!TD_ON_LOCK(td) &&
455 	    !TD_ON_RUNQ(td) &&
456 	    !TD_IS_RUNNING(td))
457 		mtx_assert(&Giant, MA_NOTOWNED);
458 #endif
459 	KASSERT(td->td_critnest == 1,
460 	    ("mi_switch: switch in a critical section"));
461 
462 	/*
463 	 * Compute the amount of time during which the current
464 	 * process was running, and add that to its total so far.
465 	 */
466 	binuptime(&new_switchtime);
467 	bintime_add(&p->p_runtime, &new_switchtime);
468 	bintime_sub(&p->p_runtime, PCPU_PTR(switchtime));
469 
470 #ifdef DDB
471 	/*
472 	 * Don't perform context switches from the debugger.
473 	 */
474 	if (db_active) {
475 		mtx_unlock_spin(&sched_lock);
476 		db_error("Context switches not allowed in the debugger.");
477 	}
478 #endif
479 
480 	/*
481 	 * Check if the process exceeds its cpu resource allocation.  If
482 	 * over max, arrange to kill the process in ast().
483 	 */
484 	if (p->p_cpulimit != RLIM_INFINITY &&
485 	    p->p_runtime.sec > p->p_cpulimit) {
486 		p->p_sflag |= PS_XCPU;
487 		ke->ke_flags |= KEF_ASTPENDING;
488 	}
489 
490 	/*
491 	 * Finish up stats for outgoing thread.
492 	 */
493 	cnt.v_swtch++;
494 	PCPU_SET(switchtime, new_switchtime);
495 	CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid,
496 	    p->p_comm);
497 
498 	sched_nest = sched_lock.mtx_recurse;
499 	sched_switchout(td);
500 
501 	cpu_switch();		/* SHAZAM!!*/
502 
503 	sched_lock.mtx_recurse = sched_nest;
504 	sched_lock.mtx_lock = (uintptr_t)td;
505 	sched_switchin(td);
506 
507 	/*
508 	 * Start setting up stats etc. for the incoming thread.
509 	 * Similar code in fork_exit() is returned to by cpu_switch()
510 	 * in the case of a new thread/process.
511 	 */
512 	CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,
513 	    p->p_comm);
514 	if (PCPU_GET(switchtime.sec) == 0)
515 		binuptime(PCPU_PTR(switchtime));
516 	PCPU_SET(switchticks, ticks);
517 
518 	/*
519 	 * Call the switchin function while still holding the scheduler lock
520 	 * (used by the idlezero code and the general page-zeroing code)
521 	 */
522 	if (td->td_switchin)
523 		td->td_switchin();
524 }
525 
526 /*
527  * Change process state to be runnable,
528  * placing it on the run queue if it is in memory,
529  * and awakening the swapper if it isn't in memory.
530  */
531 void
532 setrunnable(struct thread *td)
533 {
534 	struct proc *p = td->td_proc;
535 
536 	mtx_assert(&sched_lock, MA_OWNED);
537 	switch (p->p_state) {
538 	case PRS_ZOMBIE:
539 		panic("setrunnable(1)");
540 	default:
541 		break;
542 	}
543 	switch (td->td_state) {
544 	case TDS_RUNNING:
545 	case TDS_RUNQ:
546 		return;
547 	case TDS_INHIBITED:
548 		/*
549 		 * If we are only inhibited because we are swapped out
550 		 * then arange to swap in this process. Otherwise just return.
551 		 */
552 		if (td->td_inhibitors != TDI_SWAPPED)
553 			return;
554 	case TDS_CAN_RUN:
555 		break;
556 	default:
557 		printf("state is 0x%x", td->td_state);
558 		panic("setrunnable(2)");
559 	}
560 	if ((p->p_sflag & PS_INMEM) == 0) {
561 		if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
562 			p->p_sflag |= PS_SWAPINREQ;
563 			wakeup(&proc0);
564 		}
565 	} else
566 		sched_wakeup(td);
567 }
568 
569 /*
570  * Compute a tenex style load average of a quantity on
571  * 1, 5 and 15 minute intervals.
572  * XXXKSE   Needs complete rewrite when correct info is available.
573  * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
574  */
575 static void
576 loadav(void *arg)
577 {
578 	int i, nrun;
579 	struct loadavg *avg;
580 	struct proc *p;
581 	struct thread *td;
582 
583 	avg = &averunnable;
584 	sx_slock(&allproc_lock);
585 	nrun = 0;
586 	FOREACH_PROC_IN_SYSTEM(p) {
587 		FOREACH_THREAD_IN_PROC(p, td) {
588 			switch (td->td_state) {
589 			case TDS_RUNQ:
590 			case TDS_RUNNING:
591 				if ((p->p_flag & P_NOLOAD) != 0)
592 					goto nextproc;
593 				nrun++; /* XXXKSE */
594 			default:
595 				break;
596 			}
597 nextproc:
598 			continue;
599 		}
600 	}
601 	sx_sunlock(&allproc_lock);
602 	for (i = 0; i < 3; i++)
603 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
604 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
605 
606 	/*
607 	 * Schedule the next update to occur after 5 seconds, but add a
608 	 * random variation to avoid synchronisation with processes that
609 	 * run at regular intervals.
610 	 */
611 	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
612 	    loadav, NULL);
613 }
614 
615 /* ARGSUSED */
616 static void
617 sched_setup(dummy)
618 	void *dummy;
619 {
620 	callout_init(&loadav_callout, 0);
621 
622 	/* Kick off timeout driven events by calling first time. */
623 	loadav(NULL);
624 }
625 
626 /*
627  * General purpose yield system call
628  */
629 int
630 yield(struct thread *td, struct yield_args *uap)
631 {
632 	struct ksegrp *kg = td->td_ksegrp;
633 
634 	mtx_assert(&Giant, MA_NOTOWNED);
635 	mtx_lock_spin(&sched_lock);
636 	kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
637 	sched_prio(td, PRI_MAX_TIMESHARE);
638 	mi_switch();
639 	mtx_unlock_spin(&sched_lock);
640 	td->td_retval[0] = 0;
641 
642 	return (0);
643 }
644 
645