xref: /freebsd/sys/kern/sched_4bsd.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #define kse td_sched
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/kthread.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/sysctl.h>
52 #include <sys/sx.h>
53 #include <sys/turnstile.h>
54 #include <machine/smp.h>
55 
56 /*
57  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
58  * the range 100-256 Hz (approximately).
59  */
60 #define	ESTCPULIM(e) \
61     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
62     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
63 #ifdef SMP
64 #define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
65 #else
66 #define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
67 #endif
68 #define	NICE_WEIGHT		1	/* Priorities per nice level. */
69 
70 /*
71  * The schedulable entity that can be given a context to run.
72  * A process may have several of these. Probably one per processor
73  * but posibly a few more. In this universe they are grouped
74  * with a KSEG that contains the priority and niceness
75  * for the group.
76  */
77 struct kse {
78 	TAILQ_ENTRY(kse) ke_procq;	/* (j/z) Run queue. */
79 	struct thread	*ke_thread;	/* (*) Active associated thread. */
80 	fixpt_t		ke_pctcpu;	/* (j) %cpu during p_swtime. */
81 	char		ke_rqindex;	/* (j) Run queue index. */
82 	enum {
83 		KES_THREAD = 0x0,	/* slaved to thread state */
84 		KES_ONRUNQ
85 	} ke_state;			/* (j) KSE status. */
86 	int		ke_cpticks;	/* (j) Ticks of cpu time. */
87 	struct runq	*ke_runq;	/* runq the kse is currently on */
88 };
89 
90 #define ke_proc		ke_thread->td_proc
91 #define ke_ksegrp	ke_thread->td_ksegrp
92 
93 #define td_kse td_sched
94 
95 /* flags kept in td_flags */
96 #define TDF_DIDRUN	TDF_SCHED0	/* KSE actually ran. */
97 #define TDF_EXIT	TDF_SCHED1	/* KSE is being killed. */
98 #define TDF_BOUND	TDF_SCHED2
99 
100 #define ke_flags	ke_thread->td_flags
101 #define KEF_DIDRUN	TDF_DIDRUN /* KSE actually ran. */
102 #define KEF_EXIT	TDF_EXIT /* KSE is being killed. */
103 #define KEF_BOUND	TDF_BOUND /* stuck to one CPU */
104 
105 #define SKE_RUNQ_PCPU(ke)						\
106     ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq)
107 
108 struct kg_sched {
109 	struct thread	*skg_last_assigned; /* (j) Last thread assigned to */
110 					   /* the system scheduler. */
111 	int	skg_avail_opennings;	/* (j) Num KSEs requested in group. */
112 	int	skg_concurrency;	/* (j) Num KSEs requested in group. */
113 };
114 #define kg_last_assigned	kg_sched->skg_last_assigned
115 #define kg_avail_opennings	kg_sched->skg_avail_opennings
116 #define kg_concurrency		kg_sched->skg_concurrency
117 
118 #define SLOT_RELEASE(kg)						\
119 do {									\
120 	kg->kg_avail_opennings++; 					\
121 	CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)",		\
122 	kg,								\
123 	kg->kg_concurrency,						\
124 	 kg->kg_avail_opennings);					\
125 /*	KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency),		\
126 	    ("slots out of whack"));*/					\
127 } while (0)
128 
129 #define SLOT_USE(kg)							\
130 do {									\
131 	kg->kg_avail_opennings--; 					\
132 	CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)",			\
133 	kg,								\
134 	kg->kg_concurrency,						\
135 	 kg->kg_avail_opennings);					\
136 /*	KASSERT((kg->kg_avail_opennings >= 0),				\
137 	    ("slots out of whack"));*/					\
138 } while (0)
139 
140 /*
141  * KSE_CAN_MIGRATE macro returns true if the kse can migrate between
142  * cpus.
143  */
144 #define KSE_CAN_MIGRATE(ke)						\
145     ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
146 
147 static struct kse kse0;
148 static struct kg_sched kg_sched0;
149 
150 static int	sched_tdcnt;	/* Total runnable threads in the system. */
151 static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
152 #define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
153 
154 static struct callout roundrobin_callout;
155 
156 static void	slot_fill(struct ksegrp *kg);
157 static struct kse *sched_choose(void);		/* XXX Should be thread * */
158 
159 static void	setup_runqs(void);
160 static void	roundrobin(void *arg);
161 static void	schedcpu(void);
162 static void	schedcpu_thread(void);
163 static void	sched_priority(struct thread *td, u_char prio);
164 static void	sched_setup(void *dummy);
165 static void	maybe_resched(struct thread *td);
166 static void	updatepri(struct ksegrp *kg);
167 static void	resetpriority(struct ksegrp *kg);
168 static void	resetpriority_thread(struct thread *td, struct ksegrp *kg);
169 #ifdef SMP
170 static int	forward_wakeup(int  cpunum);
171 #endif
172 
173 static struct kproc_desc sched_kp = {
174         "schedcpu",
175         schedcpu_thread,
176         NULL
177 };
178 SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp)
179 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
180 
181 /*
182  * Global run queue.
183  */
184 static struct runq runq;
185 
186 #ifdef SMP
187 /*
188  * Per-CPU run queues
189  */
190 static struct runq runq_pcpu[MAXCPU];
191 #endif
192 
193 static void
194 setup_runqs(void)
195 {
196 #ifdef SMP
197 	int i;
198 
199 	for (i = 0; i < MAXCPU; ++i)
200 		runq_init(&runq_pcpu[i]);
201 #endif
202 
203 	runq_init(&runq);
204 }
205 
206 static int
207 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
208 {
209 	int error, new_val;
210 
211 	new_val = sched_quantum * tick;
212 	error = sysctl_handle_int(oidp, &new_val, 0, req);
213         if (error != 0 || req->newptr == NULL)
214 		return (error);
215 	if (new_val < tick)
216 		return (EINVAL);
217 	sched_quantum = new_val / tick;
218 	hogticks = 2 * sched_quantum;
219 	return (0);
220 }
221 
222 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
223 
224 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
225     "Scheduler name");
226 
227 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
228     0, sizeof sched_quantum, sysctl_kern_quantum, "I",
229     "Roundrobin scheduling quantum in microseconds");
230 
231 #ifdef SMP
232 /* Enable forwarding of wakeups to all other cpus */
233 SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
234 
235 static int forward_wakeup_enabled = 1;
236 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
237 	   &forward_wakeup_enabled, 0,
238 	   "Forwarding of wakeup to idle CPUs");
239 
240 static int forward_wakeups_requested = 0;
241 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
242 	   &forward_wakeups_requested, 0,
243 	   "Requests for Forwarding of wakeup to idle CPUs");
244 
245 static int forward_wakeups_delivered = 0;
246 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
247 	   &forward_wakeups_delivered, 0,
248 	   "Completed Forwarding of wakeup to idle CPUs");
249 
250 static int forward_wakeup_use_mask = 1;
251 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
252 	   &forward_wakeup_use_mask, 0,
253 	   "Use the mask of idle cpus");
254 
255 static int forward_wakeup_use_loop = 0;
256 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
257 	   &forward_wakeup_use_loop, 0,
258 	   "Use a loop to find idle cpus");
259 
260 static int forward_wakeup_use_single = 0;
261 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
262 	   &forward_wakeup_use_single, 0,
263 	   "Only signal one idle cpu");
264 
265 static int forward_wakeup_use_htt = 0;
266 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
267 	   &forward_wakeup_use_htt, 0,
268 	   "account for htt");
269 
270 #endif
271 static int sched_followon = 0;
272 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
273 	   &sched_followon, 0,
274 	   "allow threads to share a quantum");
275 
276 static int sched_pfollowons = 0;
277 SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD,
278 	   &sched_pfollowons, 0,
279 	   "number of followons done to a different ksegrp");
280 
281 static int sched_kgfollowons = 0;
282 SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD,
283 	   &sched_kgfollowons, 0,
284 	   "number of followons done in a ksegrp");
285 
286 static __inline void
287 sched_load_add(void)
288 {
289 	sched_tdcnt++;
290 	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
291 }
292 
293 static __inline void
294 sched_load_rem(void)
295 {
296 	sched_tdcnt--;
297 	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
298 }
299 /*
300  * Arrange to reschedule if necessary, taking the priorities and
301  * schedulers into account.
302  */
303 static void
304 maybe_resched(struct thread *td)
305 {
306 
307 	mtx_assert(&sched_lock, MA_OWNED);
308 	if (td->td_priority < curthread->td_priority)
309 		curthread->td_flags |= TDF_NEEDRESCHED;
310 }
311 
312 /*
313  * Force switch among equal priority processes every 100ms.
314  * We don't actually need to force a context switch of the current process.
315  * The act of firing the event triggers a context switch to softclock() and
316  * then switching back out again which is equivalent to a preemption, thus
317  * no further work is needed on the local CPU.
318  */
319 /* ARGSUSED */
320 static void
321 roundrobin(void *arg)
322 {
323 
324 #ifdef SMP
325 	mtx_lock_spin(&sched_lock);
326 	forward_roundrobin();
327 	mtx_unlock_spin(&sched_lock);
328 #endif
329 
330 	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
331 }
332 
333 /*
334  * Constants for digital decay and forget:
335  *	90% of (kg_estcpu) usage in 5 * loadav time
336  *	95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
337  *          Note that, as ps(1) mentions, this can let percentages
338  *          total over 100% (I've seen 137.9% for 3 processes).
339  *
340  * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
341  *
342  * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
343  * That is, the system wants to compute a value of decay such
344  * that the following for loop:
345  * 	for (i = 0; i < (5 * loadavg); i++)
346  * 		kg_estcpu *= decay;
347  * will compute
348  * 	kg_estcpu *= 0.1;
349  * for all values of loadavg:
350  *
351  * Mathematically this loop can be expressed by saying:
352  * 	decay ** (5 * loadavg) ~= .1
353  *
354  * The system computes decay as:
355  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
356  *
357  * We wish to prove that the system's computation of decay
358  * will always fulfill the equation:
359  * 	decay ** (5 * loadavg) ~= .1
360  *
361  * If we compute b as:
362  * 	b = 2 * loadavg
363  * then
364  * 	decay = b / (b + 1)
365  *
366  * We now need to prove two things:
367  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
368  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
369  *
370  * Facts:
371  *         For x close to zero, exp(x) =~ 1 + x, since
372  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
373  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
374  *         For x close to zero, ln(1+x) =~ x, since
375  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
376  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
377  *         ln(.1) =~ -2.30
378  *
379  * Proof of (1):
380  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
381  *	solving for factor,
382  *      ln(factor) =~ (-2.30/5*loadav), or
383  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
384  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
385  *
386  * Proof of (2):
387  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
388  *	solving for power,
389  *      power*ln(b/(b+1)) =~ -2.30, or
390  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
391  *
392  * Actual power values for the implemented algorithm are as follows:
393  *      loadav: 1       2       3       4
394  *      power:  5.68    10.32   14.94   19.55
395  */
396 
397 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
398 #define	loadfactor(loadav)	(2 * (loadav))
399 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
400 
401 /* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
402 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
403 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
404 
405 /*
406  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
407  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
408  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
409  *
410  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
411  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
412  *
413  * If you don't want to bother with the faster/more-accurate formula, you
414  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
415  * (more general) method of calculating the %age of CPU used by a process.
416  */
417 #define	CCPU_SHIFT	11
418 
419 /*
420  * Recompute process priorities, every hz ticks.
421  * MP-safe, called without the Giant mutex.
422  */
423 /* ARGSUSED */
424 static void
425 schedcpu(void)
426 {
427 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
428 	struct thread *td;
429 	struct proc *p;
430 	struct kse *ke;
431 	struct ksegrp *kg;
432 	int awake, realstathz;
433 
434 	realstathz = stathz ? stathz : hz;
435 	sx_slock(&allproc_lock);
436 	FOREACH_PROC_IN_SYSTEM(p) {
437 		/*
438 		 * Prevent state changes and protect run queue.
439 		 */
440 		mtx_lock_spin(&sched_lock);
441 		/*
442 		 * Increment time in/out of memory.  We ignore overflow; with
443 		 * 16-bit int's (remember them?) overflow takes 45 days.
444 		 */
445 		p->p_swtime++;
446 		FOREACH_KSEGRP_IN_PROC(p, kg) {
447 			awake = 0;
448 			FOREACH_THREAD_IN_GROUP(kg, td) {
449 				ke = td->td_kse;
450 				/*
451 				 * Increment sleep time (if sleeping).  We
452 				 * ignore overflow, as above.
453 				 */
454 				/*
455 				 * The kse slptimes are not touched in wakeup
456 				 * because the thread may not HAVE a KSE.
457 				 */
458 				if (ke->ke_state == KES_ONRUNQ) {
459 					awake = 1;
460 					ke->ke_flags &= ~KEF_DIDRUN;
461 				} else if ((ke->ke_state == KES_THREAD) &&
462 				    (TD_IS_RUNNING(td))) {
463 					awake = 1;
464 					/* Do not clear KEF_DIDRUN */
465 				} else if (ke->ke_flags & KEF_DIDRUN) {
466 					awake = 1;
467 					ke->ke_flags &= ~KEF_DIDRUN;
468 				}
469 
470 				/*
471 				 * ke_pctcpu is only for ps and ttyinfo().
472 				 * Do it per kse, and add them up at the end?
473 				 * XXXKSE
474 				 */
475 				ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
476 				    FSHIFT;
477 				/*
478 				 * If the kse has been idle the entire second,
479 				 * stop recalculating its priority until
480 				 * it wakes up.
481 				 */
482 				if (ke->ke_cpticks == 0)
483 					continue;
484 #if	(FSHIFT >= CCPU_SHIFT)
485 				ke->ke_pctcpu += (realstathz == 100)
486 				    ? ((fixpt_t) ke->ke_cpticks) <<
487 				    (FSHIFT - CCPU_SHIFT) :
488 				    100 * (((fixpt_t) ke->ke_cpticks)
489 				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
490 #else
491 				ke->ke_pctcpu += ((FSCALE - ccpu) *
492 				    (ke->ke_cpticks *
493 				    FSCALE / realstathz)) >> FSHIFT;
494 #endif
495 				ke->ke_cpticks = 0;
496 			} /* end of kse loop */
497 			/*
498 			 * If there are ANY running threads in this KSEGRP,
499 			 * then don't count it as sleeping.
500 			 */
501 			if (awake) {
502 				if (kg->kg_slptime > 1) {
503 					/*
504 					 * In an ideal world, this should not
505 					 * happen, because whoever woke us
506 					 * up from the long sleep should have
507 					 * unwound the slptime and reset our
508 					 * priority before we run at the stale
509 					 * priority.  Should KASSERT at some
510 					 * point when all the cases are fixed.
511 					 */
512 					updatepri(kg);
513 				}
514 				kg->kg_slptime = 0;
515 			} else
516 				kg->kg_slptime++;
517 			if (kg->kg_slptime > 1)
518 				continue;
519 			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
520 		      	resetpriority(kg);
521 			FOREACH_THREAD_IN_GROUP(kg, td) {
522 				resetpriority_thread(td, kg);
523 			}
524 		} /* end of ksegrp loop */
525 		mtx_unlock_spin(&sched_lock);
526 	} /* end of process loop */
527 	sx_sunlock(&allproc_lock);
528 }
529 
530 /*
531  * Main loop for a kthread that executes schedcpu once a second.
532  */
533 static void
534 schedcpu_thread(void)
535 {
536 	int nowake;
537 
538 	for (;;) {
539 		schedcpu();
540 		tsleep(&nowake, curthread->td_priority, "-", hz);
541 	}
542 }
543 
544 /*
545  * Recalculate the priority of a process after it has slept for a while.
546  * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
547  * least six times the loadfactor will decay kg_estcpu to zero.
548  */
549 static void
550 updatepri(struct ksegrp *kg)
551 {
552 	register fixpt_t loadfac;
553 	register unsigned int newcpu;
554 
555 	loadfac = loadfactor(averunnable.ldavg[0]);
556 	if (kg->kg_slptime > 5 * loadfac)
557 		kg->kg_estcpu = 0;
558 	else {
559 		newcpu = kg->kg_estcpu;
560 		kg->kg_slptime--;	/* was incremented in schedcpu() */
561 		while (newcpu && --kg->kg_slptime)
562 			newcpu = decay_cpu(loadfac, newcpu);
563 		kg->kg_estcpu = newcpu;
564 	}
565 }
566 
567 /*
568  * Compute the priority of a process when running in user mode.
569  * Arrange to reschedule if the resulting priority is better
570  * than that of the current process.
571  */
572 static void
573 resetpriority(struct ksegrp *kg)
574 {
575 	register unsigned int newpriority;
576 
577 	if (kg->kg_pri_class == PRI_TIMESHARE) {
578 		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
579 		    NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN);
580 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
581 		    PRI_MAX_TIMESHARE);
582 		kg->kg_user_pri = newpriority;
583 	}
584 }
585 
586 /*
587  * Update the thread's priority when the associated ksegroup's user
588  * priority changes.
589  */
590 static void
591 resetpriority_thread(struct thread *td, struct ksegrp *kg)
592 {
593 
594 	/* Only change threads with a time sharing user priority. */
595 	if (td->td_priority < PRI_MIN_TIMESHARE ||
596 	    td->td_priority > PRI_MAX_TIMESHARE)
597 		return;
598 
599 	/* XXX the whole needresched thing is broken, but not silly. */
600 	maybe_resched(td);
601 
602 	sched_prio(td, kg->kg_user_pri);
603 }
604 
605 /* ARGSUSED */
606 static void
607 sched_setup(void *dummy)
608 {
609 	setup_runqs();
610 
611 	if (sched_quantum == 0)
612 		sched_quantum = SCHED_QUANTUM;
613 	hogticks = 2 * sched_quantum;
614 
615 	callout_init(&roundrobin_callout, CALLOUT_MPSAFE);
616 
617 	/* Kick off timeout driven events by calling first time. */
618 	roundrobin(NULL);
619 
620 	/* Account for thread0. */
621 	sched_load_add();
622 }
623 
624 /* External interfaces start here */
625 /*
626  * Very early in the boot some setup of scheduler-specific
627  * parts of proc0 and of soem scheduler resources needs to be done.
628  * Called from:
629  *  proc0_init()
630  */
631 void
632 schedinit(void)
633 {
634 	/*
635 	 * Set up the scheduler specific parts of proc0.
636 	 */
637 	proc0.p_sched = NULL; /* XXX */
638 	ksegrp0.kg_sched = &kg_sched0;
639 	thread0.td_sched = &kse0;
640 	kse0.ke_thread = &thread0;
641 	kse0.ke_state = KES_THREAD;
642 	kg_sched0.skg_concurrency = 1;
643 	kg_sched0.skg_avail_opennings = 0; /* we are already running */
644 }
645 
646 int
647 sched_runnable(void)
648 {
649 #ifdef SMP
650 	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
651 #else
652 	return runq_check(&runq);
653 #endif
654 }
655 
656 int
657 sched_rr_interval(void)
658 {
659 	if (sched_quantum == 0)
660 		sched_quantum = SCHED_QUANTUM;
661 	return (sched_quantum);
662 }
663 
664 /*
665  * We adjust the priority of the current process.  The priority of
666  * a process gets worse as it accumulates CPU time.  The cpu usage
667  * estimator (kg_estcpu) is increased here.  resetpriority() will
668  * compute a different priority each time kg_estcpu increases by
669  * INVERSE_ESTCPU_WEIGHT
670  * (until MAXPRI is reached).  The cpu usage estimator ramps up
671  * quite quickly when the process is running (linearly), and decays
672  * away exponentially, at a rate which is proportionally slower when
673  * the system is busy.  The basic principle is that the system will
674  * 90% forget that the process used a lot of CPU time in 5 * loadav
675  * seconds.  This causes the system to favor processes which haven't
676  * run much recently, and to round-robin among other processes.
677  */
678 void
679 sched_clock(struct thread *td)
680 {
681 	struct ksegrp *kg;
682 	struct kse *ke;
683 
684 	mtx_assert(&sched_lock, MA_OWNED);
685 	kg = td->td_ksegrp;
686 	ke = td->td_kse;
687 
688 	ke->ke_cpticks++;
689 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
690 	if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
691 		resetpriority(kg);
692 		resetpriority_thread(td, kg);
693 	}
694 }
695 
696 /*
697  * charge childs scheduling cpu usage to parent.
698  *
699  * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
700  * Charge it to the ksegrp that did the wait since process estcpu is sum of
701  * all ksegrps, this is strictly as expected.  Assume that the child process
702  * aggregated all the estcpu into the 'built-in' ksegrp.
703  */
704 void
705 sched_exit(struct proc *p, struct thread *td)
706 {
707 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
708 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
709 }
710 
711 void
712 sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd)
713 {
714 
715 	mtx_assert(&sched_lock, MA_OWNED);
716 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu);
717 }
718 
719 void
720 sched_exit_thread(struct thread *td, struct thread *child)
721 {
722 	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
723 	    child, child->td_proc->p_comm, child->td_priority);
724 	if ((child->td_proc->p_flag & P_NOLOAD) == 0)
725 		sched_load_rem();
726 }
727 
728 void
729 sched_fork(struct thread *td, struct thread *childtd)
730 {
731 	sched_fork_ksegrp(td, childtd->td_ksegrp);
732 	sched_fork_thread(td, childtd);
733 }
734 
735 void
736 sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
737 {
738 	mtx_assert(&sched_lock, MA_OWNED);
739 	child->kg_estcpu = td->td_ksegrp->kg_estcpu;
740 }
741 
742 void
743 sched_fork_thread(struct thread *td, struct thread *childtd)
744 {
745 	sched_newthread(childtd);
746 }
747 
748 void
749 sched_nice(struct proc *p, int nice)
750 {
751 	struct ksegrp *kg;
752 	struct thread *td;
753 
754 	PROC_LOCK_ASSERT(p, MA_OWNED);
755 	mtx_assert(&sched_lock, MA_OWNED);
756 	p->p_nice = nice;
757 	FOREACH_KSEGRP_IN_PROC(p, kg) {
758 		resetpriority(kg);
759 		FOREACH_THREAD_IN_GROUP(kg, td) {
760 			resetpriority_thread(td, kg);
761 		}
762 	}
763 }
764 
765 void
766 sched_class(struct ksegrp *kg, int class)
767 {
768 	mtx_assert(&sched_lock, MA_OWNED);
769 	kg->kg_pri_class = class;
770 }
771 
772 /*
773  * Adjust the priority of a thread.
774  * This may include moving the thread within the KSEGRP,
775  * changing the assignment of a kse to the thread,
776  * and moving a KSE in the system run queue.
777  */
778 static void
779 sched_priority(struct thread *td, u_char prio)
780 {
781 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
782 	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
783 	    curthread->td_proc->p_comm);
784 
785 	mtx_assert(&sched_lock, MA_OWNED);
786 	if (td->td_priority == prio)
787 		return;
788 	if (TD_ON_RUNQ(td)) {
789 		adjustrunqueue(td, prio);
790 	} else {
791 		td->td_priority = prio;
792 	}
793 }
794 
795 /*
796  * Update a thread's priority when it is lent another thread's
797  * priority.
798  */
799 void
800 sched_lend_prio(struct thread *td, u_char prio)
801 {
802 
803 	td->td_flags |= TDF_BORROWING;
804 	sched_priority(td, prio);
805 }
806 
807 /*
808  * Restore a thread's priority when priority propagation is
809  * over.  The prio argument is the minimum priority the thread
810  * needs to have to satisfy other possible priority lending
811  * requests.  If the thread's regulary priority is less
812  * important than prio the thread will keep a priority boost
813  * of prio.
814  */
815 void
816 sched_unlend_prio(struct thread *td, u_char prio)
817 {
818 	u_char base_pri;
819 
820 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
821 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
822 		base_pri = td->td_ksegrp->kg_user_pri;
823 	else
824 		base_pri = td->td_base_pri;
825 	if (prio >= base_pri) {
826 		td->td_flags &= ~TDF_BORROWING;
827 		sched_prio(td, base_pri);
828 	} else
829 		sched_lend_prio(td, prio);
830 }
831 
832 void
833 sched_prio(struct thread *td, u_char prio)
834 {
835 	u_char oldprio;
836 
837 	/* First, update the base priority. */
838 	td->td_base_pri = prio;
839 
840 	/*
841 	 * If the thread is borrowing another thread's priority, don't ever
842 	 * lower the priority.
843 	 */
844 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
845 		return;
846 
847 	/* Change the real priority. */
848 	oldprio = td->td_priority;
849 	sched_priority(td, prio);
850 
851 	/*
852 	 * If the thread is on a turnstile, then let the turnstile update
853 	 * its state.
854 	 */
855 	if (TD_ON_LOCK(td) && oldprio != prio)
856 		turnstile_adjust(td, oldprio);
857 }
858 
859 void
860 sched_sleep(struct thread *td)
861 {
862 
863 	mtx_assert(&sched_lock, MA_OWNED);
864 	td->td_ksegrp->kg_slptime = 0;
865 }
866 
867 static void remrunqueue(struct thread *td);
868 
869 void
870 sched_switch(struct thread *td, struct thread *newtd, int flags)
871 {
872 	struct kse *ke;
873 	struct ksegrp *kg;
874 	struct proc *p;
875 
876 	ke = td->td_kse;
877 	p = td->td_proc;
878 
879 	mtx_assert(&sched_lock, MA_OWNED);
880 
881 	if ((p->p_flag & P_NOLOAD) == 0)
882 		sched_load_rem();
883 	/*
884 	 * We are volunteering to switch out so we get to nominate
885 	 * a successor for the rest of our quantum
886 	 * First try another thread in our ksegrp, and then look for
887 	 * other ksegrps in our process.
888 	 */
889 	if (sched_followon &&
890 	    (p->p_flag & P_HADTHREADS) &&
891 	    (flags & SW_VOL) &&
892 	    newtd == NULL) {
893 		/* lets schedule another thread from this process */
894 		 kg = td->td_ksegrp;
895 		 if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
896 			remrunqueue(newtd);
897 			sched_kgfollowons++;
898 		 } else {
899 			FOREACH_KSEGRP_IN_PROC(p, kg) {
900 				if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
901 					sched_pfollowons++;
902 					remrunqueue(newtd);
903 					break;
904 				}
905 			}
906 		}
907 	}
908 
909 	if (newtd)
910 		newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
911 
912 	td->td_lastcpu = td->td_oncpu;
913 	td->td_flags &= ~TDF_NEEDRESCHED;
914 	td->td_pflags &= ~TDP_OWEPREEMPT;
915 	td->td_oncpu = NOCPU;
916 	/*
917 	 * At the last moment, if this thread is still marked RUNNING,
918 	 * then put it back on the run queue as it has not been suspended
919 	 * or stopped or any thing else similar.  We never put the idle
920 	 * threads on the run queue, however.
921 	 */
922 	if (td == PCPU_GET(idlethread))
923 		TD_SET_CAN_RUN(td);
924 	else {
925 		SLOT_RELEASE(td->td_ksegrp);
926 		if (TD_IS_RUNNING(td)) {
927 			/* Put us back on the run queue (kse and all). */
928 			setrunqueue(td, (flags & SW_PREEMPT) ?
929 			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
930 			    SRQ_OURSELF|SRQ_YIELDING);
931 		} else if (p->p_flag & P_HADTHREADS) {
932 			/*
933 			 * We will not be on the run queue. So we must be
934 			 * sleeping or similar. As it's available,
935 			 * someone else can use the KSE if they need it.
936 			 * It's NOT available if we are about to need it
937 			 */
938 			if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)
939 				slot_fill(td->td_ksegrp);
940 		}
941 	}
942 	if (newtd) {
943 		/*
944 		 * The thread we are about to run needs to be counted
945 		 * as if it had been added to the run queue and selected.
946 		 * It came from:
947 		 * * A preemption
948 		 * * An upcall
949 		 * * A followon
950 		 */
951 		KASSERT((newtd->td_inhibitors == 0),
952 			("trying to run inhibitted thread"));
953 		SLOT_USE(newtd->td_ksegrp);
954 		newtd->td_kse->ke_flags |= KEF_DIDRUN;
955         	TD_SET_RUNNING(newtd);
956 		if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
957 			sched_load_add();
958 	} else {
959 		newtd = choosethread();
960 	}
961 
962 	if (td != newtd)
963 		cpu_switch(td, newtd);
964 	sched_lock.mtx_lock = (uintptr_t)td;
965 	td->td_oncpu = PCPU_GET(cpuid);
966 }
967 
968 void
969 sched_wakeup(struct thread *td)
970 {
971 	struct ksegrp *kg;
972 
973 	mtx_assert(&sched_lock, MA_OWNED);
974 	kg = td->td_ksegrp;
975 	if (kg->kg_slptime > 1) {
976 		updatepri(kg);
977 		resetpriority(kg);
978 	}
979 	kg->kg_slptime = 0;
980 	setrunqueue(td, SRQ_BORING);
981 }
982 
983 #ifdef SMP
984 /* enable HTT_2 if you have a 2-way HTT cpu.*/
985 static int
986 forward_wakeup(int  cpunum)
987 {
988 	cpumask_t map, me, dontuse;
989 	cpumask_t map2;
990 	struct pcpu *pc;
991 	cpumask_t id, map3;
992 
993 	mtx_assert(&sched_lock, MA_OWNED);
994 
995 	CTR0(KTR_RUNQ, "forward_wakeup()");
996 
997 	if ((!forward_wakeup_enabled) ||
998 	     (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
999 		return (0);
1000 	if (!smp_started || cold || panicstr)
1001 		return (0);
1002 
1003 	forward_wakeups_requested++;
1004 
1005 /*
1006  * check the idle mask we received against what we calculated before
1007  * in the old version.
1008  */
1009 	me = PCPU_GET(cpumask);
1010 	/*
1011 	 * don't bother if we should be doing it ourself..
1012 	 */
1013 	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
1014 		return (0);
1015 
1016 	dontuse = me | stopped_cpus | hlt_cpus_mask;
1017 	map3 = 0;
1018 	if (forward_wakeup_use_loop) {
1019 		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1020 			id = pc->pc_cpumask;
1021 			if ( (id & dontuse) == 0 &&
1022 			    pc->pc_curthread == pc->pc_idlethread) {
1023 				map3 |= id;
1024 			}
1025 		}
1026 	}
1027 
1028 	if (forward_wakeup_use_mask) {
1029 		map = 0;
1030 		map = idle_cpus_mask & ~dontuse;
1031 
1032 		/* If they are both on, compare and use loop if different */
1033 		if (forward_wakeup_use_loop) {
1034 			if (map != map3) {
1035 				printf("map (%02X) != map3 (%02X)\n",
1036 						map, map3);
1037 				map = map3;
1038 			}
1039 		}
1040 	} else {
1041 		map = map3;
1042 	}
1043 	/* If we only allow a specific CPU, then mask off all the others */
1044 	if (cpunum != NOCPU) {
1045 		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
1046 		map &= (1 << cpunum);
1047 	} else {
1048 		/* Try choose an idle die. */
1049 		if (forward_wakeup_use_htt) {
1050 			map2 =  (map & (map >> 1)) & 0x5555;
1051 			if (map2) {
1052 				map = map2;
1053 			}
1054 		}
1055 
1056 		/* set only one bit */
1057 		if (forward_wakeup_use_single) {
1058 			map = map & ((~map) + 1);
1059 		}
1060 	}
1061 	if (map) {
1062 		forward_wakeups_delivered++;
1063 		ipi_selected(map, IPI_AST);
1064 		return (1);
1065 	}
1066 	if (cpunum == NOCPU)
1067 		printf("forward_wakeup: Idle processor not found\n");
1068 	return (0);
1069 }
1070 #endif
1071 
1072 void
1073 sched_add(struct thread *td, int flags)
1074 {
1075 	struct kse *ke;
1076 #ifdef SMP
1077 	int forwarded = 0;
1078 	int cpu;
1079 #endif
1080 
1081 	ke = td->td_kse;
1082 	mtx_assert(&sched_lock, MA_OWNED);
1083 	KASSERT(ke->ke_state != KES_ONRUNQ,
1084 	    ("sched_add: kse %p (%s) already in run queue", ke,
1085 	    ke->ke_proc->p_comm));
1086 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1087 	    ("sched_add: process swapped out"));
1088 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1089 	    td, td->td_proc->p_comm, td->td_priority, curthread,
1090 	    curthread->td_proc->p_comm);
1091 
1092 #ifdef SMP
1093 	if (KSE_CAN_MIGRATE(ke)) {
1094 		CTR2(KTR_RUNQ,
1095 		    "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
1096 		cpu = NOCPU;
1097 		ke->ke_runq = &runq;
1098 	} else {
1099 		if (!SKE_RUNQ_PCPU(ke))
1100 			ke->ke_runq = &runq_pcpu[(cpu = PCPU_GET(cpuid))];
1101 		else
1102 			cpu = td->td_lastcpu;
1103 		CTR3(KTR_RUNQ,
1104 		    "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu);
1105 	}
1106 #else
1107 	CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
1108 	ke->ke_runq = &runq;
1109 
1110 #endif
1111 	/*
1112 	 * If we are yielding (on the way out anyhow)
1113 	 * or the thread being saved is US,
1114 	 * then don't try be smart about preemption
1115 	 * or kicking off another CPU
1116 	 * as it won't help and may hinder.
1117 	 * In the YIEDLING case, we are about to run whoever is
1118 	 * being put in the queue anyhow, and in the
1119 	 * OURSELF case, we are puting ourself on the run queue
1120 	 * which also only happens when we are about to yield.
1121 	 */
1122 	if((flags & SRQ_YIELDING) == 0) {
1123 #ifdef SMP
1124 		cpumask_t me = PCPU_GET(cpumask);
1125 		int idle = idle_cpus_mask & me;
1126 		/*
1127 		 * Only try to kick off another CPU if
1128 		 * the thread is unpinned
1129 		 * or pinned to another cpu,
1130 		 * and there are other available and idle CPUs.
1131 		 * if we are idle, or it's an interrupt,
1132 		 * then skip straight to preemption.
1133 		 */
1134 		if ( (! idle) && ((flags & SRQ_INTR) == 0) &&
1135 		    (idle_cpus_mask & ~(hlt_cpus_mask | me)) &&
1136 		    ( KSE_CAN_MIGRATE(ke) ||
1137 		      ke->ke_runq != &runq_pcpu[PCPU_GET(cpuid)])) {
1138 			forwarded = forward_wakeup(cpu);
1139 		}
1140 		/*
1141 		 * If we failed to kick off another cpu, then look to
1142 		 * see if we should preempt this CPU. Only allow this
1143 		 * if it is not pinned or IS pinned to this CPU.
1144 		 * If we are the idle thread, we also try do preempt.
1145 		 * as it will be quicker and being idle, we won't
1146 		 * lose in doing so..
1147 		 */
1148 		if ((!forwarded) &&
1149 		    (ke->ke_runq == &runq ||
1150 		     ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)]))
1151 #endif
1152 
1153 		{
1154 			if (maybe_preempt(td))
1155 				return;
1156 		}
1157 	}
1158 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1159 		sched_load_add();
1160 	SLOT_USE(td->td_ksegrp);
1161 	runq_add(ke->ke_runq, ke, flags);
1162 	ke->ke_state = KES_ONRUNQ;
1163 	maybe_resched(td);
1164 }
1165 
1166 void
1167 sched_rem(struct thread *td)
1168 {
1169 	struct kse *ke;
1170 
1171 	ke = td->td_kse;
1172 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1173 	    ("sched_rem: process swapped out"));
1174 	KASSERT((ke->ke_state == KES_ONRUNQ),
1175 	    ("sched_rem: KSE not on run queue"));
1176 	mtx_assert(&sched_lock, MA_OWNED);
1177 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1178 	    td, td->td_proc->p_comm, td->td_priority, curthread,
1179 	    curthread->td_proc->p_comm);
1180 
1181 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1182 		sched_load_rem();
1183 	SLOT_RELEASE(td->td_ksegrp);
1184 	runq_remove(ke->ke_runq, ke);
1185 
1186 	ke->ke_state = KES_THREAD;
1187 }
1188 
1189 /*
1190  * Select threads to run.
1191  * Notice that the running threads still consume a slot.
1192  */
1193 struct kse *
1194 sched_choose(void)
1195 {
1196 	struct kse *ke;
1197 	struct runq *rq;
1198 
1199 #ifdef SMP
1200 	struct kse *kecpu;
1201 
1202 	rq = &runq;
1203 	ke = runq_choose(&runq);
1204 	kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1205 
1206 	if (ke == NULL ||
1207 	    (kecpu != NULL &&
1208 	     kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
1209 		CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu,
1210 		     PCPU_GET(cpuid));
1211 		ke = kecpu;
1212 		rq = &runq_pcpu[PCPU_GET(cpuid)];
1213 	} else {
1214 		CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke);
1215 	}
1216 
1217 #else
1218 	rq = &runq;
1219 	ke = runq_choose(&runq);
1220 #endif
1221 
1222 	if (ke != NULL) {
1223 		runq_remove(rq, ke);
1224 		ke->ke_state = KES_THREAD;
1225 
1226 		KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1227 		    ("sched_choose: process swapped out"));
1228 	}
1229 	return (ke);
1230 }
1231 
1232 void
1233 sched_userret(struct thread *td)
1234 {
1235 	struct ksegrp *kg;
1236 	/*
1237 	 * XXX we cheat slightly on the locking here to avoid locking in
1238 	 * the usual case.  Setting td_priority here is essentially an
1239 	 * incomplete workaround for not setting it properly elsewhere.
1240 	 * Now that some interrupt handlers are threads, not setting it
1241 	 * properly elsewhere can clobber it in the window between setting
1242 	 * it here and returning to user mode, so don't waste time setting
1243 	 * it perfectly here.
1244 	 */
1245 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1246 	    ("thread with borrowed priority returning to userland"));
1247 	kg = td->td_ksegrp;
1248 	if (td->td_priority != kg->kg_user_pri) {
1249 		mtx_lock_spin(&sched_lock);
1250 		td->td_priority = kg->kg_user_pri;
1251 		td->td_base_pri = kg->kg_user_pri;
1252 		mtx_unlock_spin(&sched_lock);
1253 	}
1254 }
1255 
1256 void
1257 sched_bind(struct thread *td, int cpu)
1258 {
1259 	struct kse *ke;
1260 
1261 	mtx_assert(&sched_lock, MA_OWNED);
1262 	KASSERT(TD_IS_RUNNING(td),
1263 	    ("sched_bind: cannot bind non-running thread"));
1264 
1265 	ke = td->td_kse;
1266 
1267 	ke->ke_flags |= KEF_BOUND;
1268 #ifdef SMP
1269 	ke->ke_runq = &runq_pcpu[cpu];
1270 	if (PCPU_GET(cpuid) == cpu)
1271 		return;
1272 
1273 	ke->ke_state = KES_THREAD;
1274 
1275 	mi_switch(SW_VOL, NULL);
1276 #endif
1277 }
1278 
1279 void
1280 sched_unbind(struct thread* td)
1281 {
1282 	mtx_assert(&sched_lock, MA_OWNED);
1283 	td->td_kse->ke_flags &= ~KEF_BOUND;
1284 }
1285 
1286 int
1287 sched_load(void)
1288 {
1289 	return (sched_tdcnt);
1290 }
1291 
1292 int
1293 sched_sizeof_ksegrp(void)
1294 {
1295 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1296 }
1297 int
1298 sched_sizeof_proc(void)
1299 {
1300 	return (sizeof(struct proc));
1301 }
1302 int
1303 sched_sizeof_thread(void)
1304 {
1305 	return (sizeof(struct thread) + sizeof(struct kse));
1306 }
1307 
1308 fixpt_t
1309 sched_pctcpu(struct thread *td)
1310 {
1311 	struct kse *ke;
1312 
1313 	ke = td->td_kse;
1314 	return (ke->ke_pctcpu);
1315 
1316 	return (0);
1317 }
1318 #define KERN_SWITCH_INCLUDE 1
1319 #include "kern/kern_switch.c"
1320