xref: /freebsd/sys/kern/sched_4bsd.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_hwpmc_hooks.h"
39 
40 #define kse td_sched
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/kthread.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54 #include <sys/sx.h>
55 #include <sys/turnstile.h>
56 #include <machine/pcb.h>
57 #include <machine/smp.h>
58 
59 #ifdef HWPMC_HOOKS
60 #include <sys/pmckern.h>
61 #endif
62 
63 /*
64  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
65  * the range 100-256 Hz (approximately).
66  */
67 #define	ESTCPULIM(e) \
68     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
69     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
70 #ifdef SMP
71 #define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
72 #else
73 #define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
74 #endif
75 #define	NICE_WEIGHT		1	/* Priorities per nice level. */
76 
77 /*
78  * The schedulable entity that can be given a context to run.
79  * A process may have several of these. Probably one per processor
80  * but possibly a few more. In this universe they are grouped
81  * with a KSEG that contains the priority and niceness
82  * for the group.
83  */
84 struct kse {
85 	TAILQ_ENTRY(kse) ke_procq;	/* (j/z) Run queue. */
86 	struct thread	*ke_thread;	/* (*) Active associated thread. */
87 	fixpt_t		ke_pctcpu;	/* (j) %cpu during p_swtime. */
88 	u_char		ke_rqindex;	/* (j) Run queue index. */
89 	enum {
90 		KES_THREAD = 0x0,	/* slaved to thread state */
91 		KES_ONRUNQ
92 	} ke_state;			/* (j) KSE status. */
93 	int		ke_cpticks;	/* (j) Ticks of cpu time. */
94 	struct runq	*ke_runq;	/* runq the kse is currently on */
95 };
96 
97 #define ke_proc		ke_thread->td_proc
98 #define ke_ksegrp	ke_thread->td_ksegrp
99 
100 #define td_kse td_sched
101 
102 /* flags kept in td_flags */
103 #define TDF_DIDRUN	TDF_SCHED0	/* KSE actually ran. */
104 #define TDF_EXIT	TDF_SCHED1	/* KSE is being killed. */
105 #define TDF_BOUND	TDF_SCHED2
106 
107 #define ke_flags	ke_thread->td_flags
108 #define KEF_DIDRUN	TDF_DIDRUN /* KSE actually ran. */
109 #define KEF_EXIT	TDF_EXIT /* KSE is being killed. */
110 #define KEF_BOUND	TDF_BOUND /* stuck to one CPU */
111 
112 #define SKE_RUNQ_PCPU(ke)						\
113     ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq)
114 
115 struct kg_sched {
116 	struct thread	*skg_last_assigned; /* (j) Last thread assigned to */
117 					   /* the system scheduler. */
118 	int	skg_avail_opennings;	/* (j) Num KSEs requested in group. */
119 	int	skg_concurrency;	/* (j) Num KSEs requested in group. */
120 };
121 #define kg_last_assigned	kg_sched->skg_last_assigned
122 #define kg_avail_opennings	kg_sched->skg_avail_opennings
123 #define kg_concurrency		kg_sched->skg_concurrency
124 
125 #define SLOT_RELEASE(kg)						\
126 do {									\
127 	kg->kg_avail_opennings++; 					\
128 	CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)",		\
129 	    kg,								\
130 	    kg->kg_concurrency,						\
131 	    kg->kg_avail_opennings);					\
132 /*	KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency),		\
133 	    ("slots out of whack"));*/					\
134 } while (0)
135 
136 #define SLOT_USE(kg)							\
137 do {									\
138 	kg->kg_avail_opennings--; 					\
139 	CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)",			\
140 	    kg,								\
141 	    kg->kg_concurrency,						\
142 	    kg->kg_avail_opennings);					\
143 /*	KASSERT((kg->kg_avail_opennings >= 0),				\
144 	    ("slots out of whack"));*/					\
145 } while (0)
146 
147 /*
148  * KSE_CAN_MIGRATE macro returns true if the kse can migrate between
149  * cpus.
150  */
151 #define KSE_CAN_MIGRATE(ke)						\
152     ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
153 
154 static struct kse kse0;
155 static struct kg_sched kg_sched0;
156 
157 static int	sched_tdcnt;	/* Total runnable threads in the system. */
158 static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
159 #define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
160 
161 static struct callout roundrobin_callout;
162 
163 static void	slot_fill(struct ksegrp *kg);
164 static struct kse *sched_choose(void);		/* XXX Should be thread * */
165 
166 static void	setup_runqs(void);
167 static void	roundrobin(void *arg);
168 static void	schedcpu(void);
169 static void	schedcpu_thread(void);
170 static void	sched_priority(struct thread *td, u_char prio);
171 static void	sched_setup(void *dummy);
172 static void	maybe_resched(struct thread *td);
173 static void	updatepri(struct ksegrp *kg);
174 static void	resetpriority(struct ksegrp *kg);
175 static void	resetpriority_thread(struct thread *td, struct ksegrp *kg);
176 #ifdef SMP
177 static int	forward_wakeup(int  cpunum);
178 #endif
179 
180 static struct kproc_desc sched_kp = {
181         "schedcpu",
182         schedcpu_thread,
183         NULL
184 };
185 SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp)
186 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
187 
188 /*
189  * Global run queue.
190  */
191 static struct runq runq;
192 
193 #ifdef SMP
194 /*
195  * Per-CPU run queues
196  */
197 static struct runq runq_pcpu[MAXCPU];
198 #endif
199 
200 static void
201 setup_runqs(void)
202 {
203 #ifdef SMP
204 	int i;
205 
206 	for (i = 0; i < MAXCPU; ++i)
207 		runq_init(&runq_pcpu[i]);
208 #endif
209 
210 	runq_init(&runq);
211 }
212 
213 static int
214 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
215 {
216 	int error, new_val;
217 
218 	new_val = sched_quantum * tick;
219 	error = sysctl_handle_int(oidp, &new_val, 0, req);
220         if (error != 0 || req->newptr == NULL)
221 		return (error);
222 	if (new_val < tick)
223 		return (EINVAL);
224 	sched_quantum = new_val / tick;
225 	hogticks = 2 * sched_quantum;
226 	return (0);
227 }
228 
229 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
230 
231 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
232     "Scheduler name");
233 
234 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
235     0, sizeof sched_quantum, sysctl_kern_quantum, "I",
236     "Roundrobin scheduling quantum in microseconds");
237 
238 #ifdef SMP
239 /* Enable forwarding of wakeups to all other cpus */
240 SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
241 
242 static int forward_wakeup_enabled = 1;
243 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
244 	   &forward_wakeup_enabled, 0,
245 	   "Forwarding of wakeup to idle CPUs");
246 
247 static int forward_wakeups_requested = 0;
248 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
249 	   &forward_wakeups_requested, 0,
250 	   "Requests for Forwarding of wakeup to idle CPUs");
251 
252 static int forward_wakeups_delivered = 0;
253 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
254 	   &forward_wakeups_delivered, 0,
255 	   "Completed Forwarding of wakeup to idle CPUs");
256 
257 static int forward_wakeup_use_mask = 1;
258 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
259 	   &forward_wakeup_use_mask, 0,
260 	   "Use the mask of idle cpus");
261 
262 static int forward_wakeup_use_loop = 0;
263 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
264 	   &forward_wakeup_use_loop, 0,
265 	   "Use a loop to find idle cpus");
266 
267 static int forward_wakeup_use_single = 0;
268 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
269 	   &forward_wakeup_use_single, 0,
270 	   "Only signal one idle cpu");
271 
272 static int forward_wakeup_use_htt = 0;
273 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
274 	   &forward_wakeup_use_htt, 0,
275 	   "account for htt");
276 
277 #endif
278 static int sched_followon = 0;
279 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
280 	   &sched_followon, 0,
281 	   "allow threads to share a quantum");
282 
283 static int sched_pfollowons = 0;
284 SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD,
285 	   &sched_pfollowons, 0,
286 	   "number of followons done to a different ksegrp");
287 
288 static int sched_kgfollowons = 0;
289 SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD,
290 	   &sched_kgfollowons, 0,
291 	   "number of followons done in a ksegrp");
292 
293 static __inline void
294 sched_load_add(void)
295 {
296 	sched_tdcnt++;
297 	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
298 }
299 
300 static __inline void
301 sched_load_rem(void)
302 {
303 	sched_tdcnt--;
304 	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
305 }
306 /*
307  * Arrange to reschedule if necessary, taking the priorities and
308  * schedulers into account.
309  */
310 static void
311 maybe_resched(struct thread *td)
312 {
313 
314 	mtx_assert(&sched_lock, MA_OWNED);
315 	if (td->td_priority < curthread->td_priority)
316 		curthread->td_flags |= TDF_NEEDRESCHED;
317 }
318 
319 /*
320  * Force switch among equal priority processes every 100ms.
321  * We don't actually need to force a context switch of the current process.
322  * The act of firing the event triggers a context switch to softclock() and
323  * then switching back out again which is equivalent to a preemption, thus
324  * no further work is needed on the local CPU.
325  */
326 /* ARGSUSED */
327 static void
328 roundrobin(void *arg)
329 {
330 
331 #ifdef SMP
332 	mtx_lock_spin(&sched_lock);
333 	forward_roundrobin();
334 	mtx_unlock_spin(&sched_lock);
335 #endif
336 
337 	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
338 }
339 
340 /*
341  * Constants for digital decay and forget:
342  *	90% of (kg_estcpu) usage in 5 * loadav time
343  *	95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
344  *          Note that, as ps(1) mentions, this can let percentages
345  *          total over 100% (I've seen 137.9% for 3 processes).
346  *
347  * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
348  *
349  * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
350  * That is, the system wants to compute a value of decay such
351  * that the following for loop:
352  * 	for (i = 0; i < (5 * loadavg); i++)
353  * 		kg_estcpu *= decay;
354  * will compute
355  * 	kg_estcpu *= 0.1;
356  * for all values of loadavg:
357  *
358  * Mathematically this loop can be expressed by saying:
359  * 	decay ** (5 * loadavg) ~= .1
360  *
361  * The system computes decay as:
362  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
363  *
364  * We wish to prove that the system's computation of decay
365  * will always fulfill the equation:
366  * 	decay ** (5 * loadavg) ~= .1
367  *
368  * If we compute b as:
369  * 	b = 2 * loadavg
370  * then
371  * 	decay = b / (b + 1)
372  *
373  * We now need to prove two things:
374  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
375  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
376  *
377  * Facts:
378  *         For x close to zero, exp(x) =~ 1 + x, since
379  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
380  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
381  *         For x close to zero, ln(1+x) =~ x, since
382  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
383  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
384  *         ln(.1) =~ -2.30
385  *
386  * Proof of (1):
387  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
388  *	solving for factor,
389  *      ln(factor) =~ (-2.30/5*loadav), or
390  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
391  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
392  *
393  * Proof of (2):
394  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
395  *	solving for power,
396  *      power*ln(b/(b+1)) =~ -2.30, or
397  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
398  *
399  * Actual power values for the implemented algorithm are as follows:
400  *      loadav: 1       2       3       4
401  *      power:  5.68    10.32   14.94   19.55
402  */
403 
404 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
405 #define	loadfactor(loadav)	(2 * (loadav))
406 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
407 
408 /* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
409 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
410 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
411 
412 /*
413  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
414  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
415  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
416  *
417  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
418  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
419  *
420  * If you don't want to bother with the faster/more-accurate formula, you
421  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
422  * (more general) method of calculating the %age of CPU used by a process.
423  */
424 #define	CCPU_SHIFT	11
425 
426 /*
427  * Recompute process priorities, every hz ticks.
428  * MP-safe, called without the Giant mutex.
429  */
430 /* ARGSUSED */
431 static void
432 schedcpu(void)
433 {
434 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
435 	struct thread *td;
436 	struct proc *p;
437 	struct kse *ke;
438 	struct ksegrp *kg;
439 	int awake, realstathz;
440 
441 	realstathz = stathz ? stathz : hz;
442 	sx_slock(&allproc_lock);
443 	FOREACH_PROC_IN_SYSTEM(p) {
444 		/*
445 		 * Prevent state changes and protect run queue.
446 		 */
447 		mtx_lock_spin(&sched_lock);
448 		/*
449 		 * Increment time in/out of memory.  We ignore overflow; with
450 		 * 16-bit int's (remember them?) overflow takes 45 days.
451 		 */
452 		p->p_swtime++;
453 		FOREACH_KSEGRP_IN_PROC(p, kg) {
454 			awake = 0;
455 			FOREACH_THREAD_IN_GROUP(kg, td) {
456 				ke = td->td_kse;
457 				/*
458 				 * Increment sleep time (if sleeping).  We
459 				 * ignore overflow, as above.
460 				 */
461 				/*
462 				 * The kse slptimes are not touched in wakeup
463 				 * because the thread may not HAVE a KSE.
464 				 */
465 				if (ke->ke_state == KES_ONRUNQ) {
466 					awake = 1;
467 					ke->ke_flags &= ~KEF_DIDRUN;
468 				} else if ((ke->ke_state == KES_THREAD) &&
469 				    (TD_IS_RUNNING(td))) {
470 					awake = 1;
471 					/* Do not clear KEF_DIDRUN */
472 				} else if (ke->ke_flags & KEF_DIDRUN) {
473 					awake = 1;
474 					ke->ke_flags &= ~KEF_DIDRUN;
475 				}
476 
477 				/*
478 				 * ke_pctcpu is only for ps and ttyinfo().
479 				 * Do it per kse, and add them up at the end?
480 				 * XXXKSE
481 				 */
482 				ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
483 				    FSHIFT;
484 				/*
485 				 * If the kse has been idle the entire second,
486 				 * stop recalculating its priority until
487 				 * it wakes up.
488 				 */
489 				if (ke->ke_cpticks == 0)
490 					continue;
491 #if	(FSHIFT >= CCPU_SHIFT)
492 				ke->ke_pctcpu += (realstathz == 100)
493 				    ? ((fixpt_t) ke->ke_cpticks) <<
494 				    (FSHIFT - CCPU_SHIFT) :
495 				    100 * (((fixpt_t) ke->ke_cpticks)
496 				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
497 #else
498 				ke->ke_pctcpu += ((FSCALE - ccpu) *
499 				    (ke->ke_cpticks *
500 				    FSCALE / realstathz)) >> FSHIFT;
501 #endif
502 				ke->ke_cpticks = 0;
503 			} /* end of kse loop */
504 			/*
505 			 * If there are ANY running threads in this KSEGRP,
506 			 * then don't count it as sleeping.
507 			 */
508 			if (awake) {
509 				if (kg->kg_slptime > 1) {
510 					/*
511 					 * In an ideal world, this should not
512 					 * happen, because whoever woke us
513 					 * up from the long sleep should have
514 					 * unwound the slptime and reset our
515 					 * priority before we run at the stale
516 					 * priority.  Should KASSERT at some
517 					 * point when all the cases are fixed.
518 					 */
519 					updatepri(kg);
520 				}
521 				kg->kg_slptime = 0;
522 			} else
523 				kg->kg_slptime++;
524 			if (kg->kg_slptime > 1)
525 				continue;
526 			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
527 		      	resetpriority(kg);
528 			FOREACH_THREAD_IN_GROUP(kg, td) {
529 				resetpriority_thread(td, kg);
530 			}
531 		} /* end of ksegrp loop */
532 		mtx_unlock_spin(&sched_lock);
533 	} /* end of process loop */
534 	sx_sunlock(&allproc_lock);
535 }
536 
537 /*
538  * Main loop for a kthread that executes schedcpu once a second.
539  */
540 static void
541 schedcpu_thread(void)
542 {
543 	int nowake;
544 
545 	for (;;) {
546 		schedcpu();
547 		tsleep(&nowake, 0, "-", hz);
548 	}
549 }
550 
551 /*
552  * Recalculate the priority of a process after it has slept for a while.
553  * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
554  * least six times the loadfactor will decay kg_estcpu to zero.
555  */
556 static void
557 updatepri(struct ksegrp *kg)
558 {
559 	register fixpt_t loadfac;
560 	register unsigned int newcpu;
561 
562 	loadfac = loadfactor(averunnable.ldavg[0]);
563 	if (kg->kg_slptime > 5 * loadfac)
564 		kg->kg_estcpu = 0;
565 	else {
566 		newcpu = kg->kg_estcpu;
567 		kg->kg_slptime--;	/* was incremented in schedcpu() */
568 		while (newcpu && --kg->kg_slptime)
569 			newcpu = decay_cpu(loadfac, newcpu);
570 		kg->kg_estcpu = newcpu;
571 	}
572 }
573 
574 /*
575  * Compute the priority of a process when running in user mode.
576  * Arrange to reschedule if the resulting priority is better
577  * than that of the current process.
578  */
579 static void
580 resetpriority(struct ksegrp *kg)
581 {
582 	register unsigned int newpriority;
583 
584 	if (kg->kg_pri_class == PRI_TIMESHARE) {
585 		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
586 		    NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN);
587 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
588 		    PRI_MAX_TIMESHARE);
589 		kg->kg_user_pri = newpriority;
590 	}
591 }
592 
593 /*
594  * Update the thread's priority when the associated ksegroup's user
595  * priority changes.
596  */
597 static void
598 resetpriority_thread(struct thread *td, struct ksegrp *kg)
599 {
600 
601 	/* Only change threads with a time sharing user priority. */
602 	if (td->td_priority < PRI_MIN_TIMESHARE ||
603 	    td->td_priority > PRI_MAX_TIMESHARE)
604 		return;
605 
606 	/* XXX the whole needresched thing is broken, but not silly. */
607 	maybe_resched(td);
608 
609 	sched_prio(td, kg->kg_user_pri);
610 }
611 
612 /* ARGSUSED */
613 static void
614 sched_setup(void *dummy)
615 {
616 	setup_runqs();
617 
618 	if (sched_quantum == 0)
619 		sched_quantum = SCHED_QUANTUM;
620 	hogticks = 2 * sched_quantum;
621 
622 	callout_init(&roundrobin_callout, CALLOUT_MPSAFE);
623 
624 	/* Kick off timeout driven events by calling first time. */
625 	roundrobin(NULL);
626 
627 	/* Account for thread0. */
628 	sched_load_add();
629 }
630 
631 /* External interfaces start here */
632 /*
633  * Very early in the boot some setup of scheduler-specific
634  * parts of proc0 and of some scheduler resources needs to be done.
635  * Called from:
636  *  proc0_init()
637  */
638 void
639 schedinit(void)
640 {
641 	/*
642 	 * Set up the scheduler specific parts of proc0.
643 	 */
644 	proc0.p_sched = NULL; /* XXX */
645 	ksegrp0.kg_sched = &kg_sched0;
646 	thread0.td_sched = &kse0;
647 	kse0.ke_thread = &thread0;
648 	kse0.ke_state = KES_THREAD;
649 	kg_sched0.skg_concurrency = 1;
650 	kg_sched0.skg_avail_opennings = 0; /* we are already running */
651 }
652 
653 int
654 sched_runnable(void)
655 {
656 #ifdef SMP
657 	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
658 #else
659 	return runq_check(&runq);
660 #endif
661 }
662 
663 int
664 sched_rr_interval(void)
665 {
666 	if (sched_quantum == 0)
667 		sched_quantum = SCHED_QUANTUM;
668 	return (sched_quantum);
669 }
670 
671 /*
672  * We adjust the priority of the current process.  The priority of
673  * a process gets worse as it accumulates CPU time.  The cpu usage
674  * estimator (kg_estcpu) is increased here.  resetpriority() will
675  * compute a different priority each time kg_estcpu increases by
676  * INVERSE_ESTCPU_WEIGHT
677  * (until MAXPRI is reached).  The cpu usage estimator ramps up
678  * quite quickly when the process is running (linearly), and decays
679  * away exponentially, at a rate which is proportionally slower when
680  * the system is busy.  The basic principle is that the system will
681  * 90% forget that the process used a lot of CPU time in 5 * loadav
682  * seconds.  This causes the system to favor processes which haven't
683  * run much recently, and to round-robin among other processes.
684  */
685 void
686 sched_clock(struct thread *td)
687 {
688 	struct ksegrp *kg;
689 	struct kse *ke;
690 
691 	mtx_assert(&sched_lock, MA_OWNED);
692 	kg = td->td_ksegrp;
693 	ke = td->td_kse;
694 
695 	ke->ke_cpticks++;
696 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
697 	if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
698 		resetpriority(kg);
699 		resetpriority_thread(td, kg);
700 	}
701 }
702 
703 /*
704  * charge childs scheduling cpu usage to parent.
705  *
706  * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
707  * Charge it to the ksegrp that did the wait since process estcpu is sum of
708  * all ksegrps, this is strictly as expected.  Assume that the child process
709  * aggregated all the estcpu into the 'built-in' ksegrp.
710  */
711 void
712 sched_exit(struct proc *p, struct thread *td)
713 {
714 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
715 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
716 }
717 
718 void
719 sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd)
720 {
721 
722 	mtx_assert(&sched_lock, MA_OWNED);
723 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu);
724 }
725 
726 void
727 sched_exit_thread(struct thread *td, struct thread *child)
728 {
729 	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
730 	    child, child->td_proc->p_comm, child->td_priority);
731 	if ((child->td_proc->p_flag & P_NOLOAD) == 0)
732 		sched_load_rem();
733 }
734 
735 void
736 sched_fork(struct thread *td, struct thread *childtd)
737 {
738 	sched_fork_ksegrp(td, childtd->td_ksegrp);
739 	sched_fork_thread(td, childtd);
740 }
741 
742 void
743 sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
744 {
745 	mtx_assert(&sched_lock, MA_OWNED);
746 	child->kg_estcpu = td->td_ksegrp->kg_estcpu;
747 }
748 
749 void
750 sched_fork_thread(struct thread *td, struct thread *childtd)
751 {
752 	sched_newthread(childtd);
753 }
754 
755 void
756 sched_nice(struct proc *p, int nice)
757 {
758 	struct ksegrp *kg;
759 	struct thread *td;
760 
761 	PROC_LOCK_ASSERT(p, MA_OWNED);
762 	mtx_assert(&sched_lock, MA_OWNED);
763 	p->p_nice = nice;
764 	FOREACH_KSEGRP_IN_PROC(p, kg) {
765 		resetpriority(kg);
766 		FOREACH_THREAD_IN_GROUP(kg, td) {
767 			resetpriority_thread(td, kg);
768 		}
769 	}
770 }
771 
772 void
773 sched_class(struct ksegrp *kg, int class)
774 {
775 	mtx_assert(&sched_lock, MA_OWNED);
776 	kg->kg_pri_class = class;
777 }
778 
779 /*
780  * Adjust the priority of a thread.
781  * This may include moving the thread within the KSEGRP,
782  * changing the assignment of a kse to the thread,
783  * and moving a KSE in the system run queue.
784  */
785 static void
786 sched_priority(struct thread *td, u_char prio)
787 {
788 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
789 	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
790 	    curthread->td_proc->p_comm);
791 
792 	mtx_assert(&sched_lock, MA_OWNED);
793 	if (td->td_priority == prio)
794 		return;
795 	if (TD_ON_RUNQ(td)) {
796 		adjustrunqueue(td, prio);
797 	} else {
798 		td->td_priority = prio;
799 	}
800 }
801 
802 /*
803  * Update a thread's priority when it is lent another thread's
804  * priority.
805  */
806 void
807 sched_lend_prio(struct thread *td, u_char prio)
808 {
809 
810 	td->td_flags |= TDF_BORROWING;
811 	sched_priority(td, prio);
812 }
813 
814 /*
815  * Restore a thread's priority when priority propagation is
816  * over.  The prio argument is the minimum priority the thread
817  * needs to have to satisfy other possible priority lending
818  * requests.  If the thread's regulary priority is less
819  * important than prio the thread will keep a priority boost
820  * of prio.
821  */
822 void
823 sched_unlend_prio(struct thread *td, u_char prio)
824 {
825 	u_char base_pri;
826 
827 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
828 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
829 		base_pri = td->td_ksegrp->kg_user_pri;
830 	else
831 		base_pri = td->td_base_pri;
832 	if (prio >= base_pri) {
833 		td->td_flags &= ~TDF_BORROWING;
834 		sched_prio(td, base_pri);
835 	} else
836 		sched_lend_prio(td, prio);
837 }
838 
839 void
840 sched_prio(struct thread *td, u_char prio)
841 {
842 	u_char oldprio;
843 
844 	/* First, update the base priority. */
845 	td->td_base_pri = prio;
846 
847 	/*
848 	 * If the thread is borrowing another thread's priority, don't ever
849 	 * lower the priority.
850 	 */
851 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
852 		return;
853 
854 	/* Change the real priority. */
855 	oldprio = td->td_priority;
856 	sched_priority(td, prio);
857 
858 	/*
859 	 * If the thread is on a turnstile, then let the turnstile update
860 	 * its state.
861 	 */
862 	if (TD_ON_LOCK(td) && oldprio != prio)
863 		turnstile_adjust(td, oldprio);
864 }
865 
866 void
867 sched_sleep(struct thread *td)
868 {
869 
870 	mtx_assert(&sched_lock, MA_OWNED);
871 	td->td_ksegrp->kg_slptime = 0;
872 }
873 
874 static void remrunqueue(struct thread *td);
875 
876 void
877 sched_switch(struct thread *td, struct thread *newtd, int flags)
878 {
879 	struct kse *ke;
880 	struct ksegrp *kg;
881 	struct proc *p;
882 
883 	ke = td->td_kse;
884 	p = td->td_proc;
885 
886 	mtx_assert(&sched_lock, MA_OWNED);
887 
888 	if ((p->p_flag & P_NOLOAD) == 0)
889 		sched_load_rem();
890 	/*
891 	 * We are volunteering to switch out so we get to nominate
892 	 * a successor for the rest of our quantum
893 	 * First try another thread in our ksegrp, and then look for
894 	 * other ksegrps in our process.
895 	 */
896 	if (sched_followon &&
897 	    (p->p_flag & P_HADTHREADS) &&
898 	    (flags & SW_VOL) &&
899 	    newtd == NULL) {
900 		/* lets schedule another thread from this process */
901 		 kg = td->td_ksegrp;
902 		 if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
903 			remrunqueue(newtd);
904 			sched_kgfollowons++;
905 		 } else {
906 			FOREACH_KSEGRP_IN_PROC(p, kg) {
907 				if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
908 					sched_pfollowons++;
909 					remrunqueue(newtd);
910 					break;
911 				}
912 			}
913 		}
914 	}
915 
916 	if (newtd)
917 		newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
918 
919 	td->td_lastcpu = td->td_oncpu;
920 	td->td_flags &= ~TDF_NEEDRESCHED;
921 	td->td_owepreempt = 0;
922 	td->td_oncpu = NOCPU;
923 	/*
924 	 * At the last moment, if this thread is still marked RUNNING,
925 	 * then put it back on the run queue as it has not been suspended
926 	 * or stopped or any thing else similar.  We never put the idle
927 	 * threads on the run queue, however.
928 	 */
929 	if (td == PCPU_GET(idlethread))
930 		TD_SET_CAN_RUN(td);
931 	else {
932 		SLOT_RELEASE(td->td_ksegrp);
933 		if (TD_IS_RUNNING(td)) {
934 			/* Put us back on the run queue (kse and all). */
935 			setrunqueue(td, (flags & SW_PREEMPT) ?
936 			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
937 			    SRQ_OURSELF|SRQ_YIELDING);
938 		} else if (p->p_flag & P_HADTHREADS) {
939 			/*
940 			 * We will not be on the run queue. So we must be
941 			 * sleeping or similar. As it's available,
942 			 * someone else can use the KSE if they need it.
943 			 * It's NOT available if we are about to need it
944 			 */
945 			if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)
946 				slot_fill(td->td_ksegrp);
947 		}
948 	}
949 	if (newtd) {
950 		/*
951 		 * The thread we are about to run needs to be counted
952 		 * as if it had been added to the run queue and selected.
953 		 * It came from:
954 		 * * A preemption
955 		 * * An upcall
956 		 * * A followon
957 		 */
958 		KASSERT((newtd->td_inhibitors == 0),
959 			("trying to run inhibitted thread"));
960 		SLOT_USE(newtd->td_ksegrp);
961 		newtd->td_kse->ke_flags |= KEF_DIDRUN;
962         	TD_SET_RUNNING(newtd);
963 		if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
964 			sched_load_add();
965 	} else {
966 		newtd = choosethread();
967 	}
968 
969 	if (td != newtd) {
970 #ifdef	HWPMC_HOOKS
971 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
972 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
973 #endif
974 		cpu_switch(td, newtd);
975 #ifdef	HWPMC_HOOKS
976 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
977 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
978 #endif
979 	}
980 
981 	sched_lock.mtx_lock = (uintptr_t)td;
982 	td->td_oncpu = PCPU_GET(cpuid);
983 }
984 
985 void
986 sched_wakeup(struct thread *td)
987 {
988 	struct ksegrp *kg;
989 
990 	mtx_assert(&sched_lock, MA_OWNED);
991 	kg = td->td_ksegrp;
992 	if (kg->kg_slptime > 1) {
993 		updatepri(kg);
994 		resetpriority(kg);
995 	}
996 	kg->kg_slptime = 0;
997 	setrunqueue(td, SRQ_BORING);
998 }
999 
1000 #ifdef SMP
1001 /* enable HTT_2 if you have a 2-way HTT cpu.*/
1002 static int
1003 forward_wakeup(int  cpunum)
1004 {
1005 	cpumask_t map, me, dontuse;
1006 	cpumask_t map2;
1007 	struct pcpu *pc;
1008 	cpumask_t id, map3;
1009 
1010 	mtx_assert(&sched_lock, MA_OWNED);
1011 
1012 	CTR0(KTR_RUNQ, "forward_wakeup()");
1013 
1014 	if ((!forward_wakeup_enabled) ||
1015 	     (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
1016 		return (0);
1017 	if (!smp_started || cold || panicstr)
1018 		return (0);
1019 
1020 	forward_wakeups_requested++;
1021 
1022 /*
1023  * check the idle mask we received against what we calculated before
1024  * in the old version.
1025  */
1026 	me = PCPU_GET(cpumask);
1027 	/*
1028 	 * don't bother if we should be doing it ourself..
1029 	 */
1030 	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
1031 		return (0);
1032 
1033 	dontuse = me | stopped_cpus | hlt_cpus_mask;
1034 	map3 = 0;
1035 	if (forward_wakeup_use_loop) {
1036 		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1037 			id = pc->pc_cpumask;
1038 			if ( (id & dontuse) == 0 &&
1039 			    pc->pc_curthread == pc->pc_idlethread) {
1040 				map3 |= id;
1041 			}
1042 		}
1043 	}
1044 
1045 	if (forward_wakeup_use_mask) {
1046 		map = 0;
1047 		map = idle_cpus_mask & ~dontuse;
1048 
1049 		/* If they are both on, compare and use loop if different */
1050 		if (forward_wakeup_use_loop) {
1051 			if (map != map3) {
1052 				printf("map (%02X) != map3 (%02X)\n",
1053 						map, map3);
1054 				map = map3;
1055 			}
1056 		}
1057 	} else {
1058 		map = map3;
1059 	}
1060 	/* If we only allow a specific CPU, then mask off all the others */
1061 	if (cpunum != NOCPU) {
1062 		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
1063 		map &= (1 << cpunum);
1064 	} else {
1065 		/* Try choose an idle die. */
1066 		if (forward_wakeup_use_htt) {
1067 			map2 =  (map & (map >> 1)) & 0x5555;
1068 			if (map2) {
1069 				map = map2;
1070 			}
1071 		}
1072 
1073 		/* set only one bit */
1074 		if (forward_wakeup_use_single) {
1075 			map = map & ((~map) + 1);
1076 		}
1077 	}
1078 	if (map) {
1079 		forward_wakeups_delivered++;
1080 		ipi_selected(map, IPI_AST);
1081 		return (1);
1082 	}
1083 	if (cpunum == NOCPU)
1084 		printf("forward_wakeup: Idle processor not found\n");
1085 	return (0);
1086 }
1087 #endif
1088 
1089 #ifdef SMP
1090 static void kick_other_cpu(int pri,int cpuid);
1091 
1092 static void
1093 kick_other_cpu(int pri,int cpuid)
1094 {
1095 	struct pcpu * pcpu = pcpu_find(cpuid);
1096 	int cpri = pcpu->pc_curthread->td_priority;
1097 
1098 	if (idle_cpus_mask & pcpu->pc_cpumask) {
1099 		forward_wakeups_delivered++;
1100 		ipi_selected(pcpu->pc_cpumask, IPI_AST);
1101 		return;
1102 	}
1103 
1104 	if (pri >= cpri)
1105 		return;
1106 
1107 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
1108 #if !defined(FULL_PREEMPTION)
1109 	if (pri <= PRI_MAX_ITHD)
1110 #endif /* ! FULL_PREEMPTION */
1111 	{
1112 		ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT);
1113 		return;
1114 	}
1115 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
1116 
1117 	pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
1118 	ipi_selected( pcpu->pc_cpumask , IPI_AST);
1119 	return;
1120 }
1121 #endif /* SMP */
1122 
1123 void
1124 sched_add(struct thread *td, int flags)
1125 #ifdef SMP
1126 {
1127 	struct kse *ke;
1128 	int forwarded = 0;
1129 	int cpu;
1130 	int single_cpu = 0;
1131 
1132 	ke = td->td_kse;
1133 	mtx_assert(&sched_lock, MA_OWNED);
1134 	KASSERT(ke->ke_state != KES_ONRUNQ,
1135 	    ("sched_add: kse %p (%s) already in run queue", ke,
1136 	    ke->ke_proc->p_comm));
1137 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1138 	    ("sched_add: process swapped out"));
1139 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1140 	    td, td->td_proc->p_comm, td->td_priority, curthread,
1141 	    curthread->td_proc->p_comm);
1142 
1143 
1144 	if (td->td_pinned != 0) {
1145 		cpu = td->td_lastcpu;
1146 		ke->ke_runq = &runq_pcpu[cpu];
1147 		single_cpu = 1;
1148 		CTR3(KTR_RUNQ,
1149 		    "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu);
1150 	} else if ((ke)->ke_flags & KEF_BOUND) {
1151 		/* Find CPU from bound runq */
1152 		KASSERT(SKE_RUNQ_PCPU(ke),("sched_add: bound kse not on cpu runq"));
1153 		cpu = ke->ke_runq - &runq_pcpu[0];
1154 		single_cpu = 1;
1155 		CTR3(KTR_RUNQ,
1156 		    "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu);
1157 	} else {
1158 		CTR2(KTR_RUNQ,
1159 		    "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
1160 		cpu = NOCPU;
1161 		ke->ke_runq = &runq;
1162 	}
1163 
1164 	if (single_cpu && (cpu != PCPU_GET(cpuid))) {
1165 	        kick_other_cpu(td->td_priority,cpu);
1166 	} else {
1167 
1168 		if (!single_cpu) {
1169 			cpumask_t me = PCPU_GET(cpumask);
1170 			int idle = idle_cpus_mask & me;
1171 
1172 			if (!idle && ((flags & SRQ_INTR) == 0) &&
1173 			    (idle_cpus_mask & ~(hlt_cpus_mask | me)))
1174 				forwarded = forward_wakeup(cpu);
1175 		}
1176 
1177 		if (!forwarded) {
1178 			if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
1179 				return;
1180 			else
1181 				maybe_resched(td);
1182 		}
1183 	}
1184 
1185 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1186 		sched_load_add();
1187 	SLOT_USE(td->td_ksegrp);
1188 	runq_add(ke->ke_runq, ke, flags);
1189 	ke->ke_state = KES_ONRUNQ;
1190 }
1191 #else /* SMP */
1192 {
1193 	struct kse *ke;
1194 	ke = td->td_kse;
1195 	mtx_assert(&sched_lock, MA_OWNED);
1196 	KASSERT(ke->ke_state != KES_ONRUNQ,
1197 	    ("sched_add: kse %p (%s) already in run queue", ke,
1198 	    ke->ke_proc->p_comm));
1199 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1200 	    ("sched_add: process swapped out"));
1201 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1202 	    td, td->td_proc->p_comm, td->td_priority, curthread,
1203 	    curthread->td_proc->p_comm);
1204 	CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
1205 	ke->ke_runq = &runq;
1206 
1207 	/*
1208 	 * If we are yielding (on the way out anyhow)
1209 	 * or the thread being saved is US,
1210 	 * then don't try be smart about preemption
1211 	 * or kicking off another CPU
1212 	 * as it won't help and may hinder.
1213 	 * In the YIEDLING case, we are about to run whoever is
1214 	 * being put in the queue anyhow, and in the
1215 	 * OURSELF case, we are puting ourself on the run queue
1216 	 * which also only happens when we are about to yield.
1217 	 */
1218 	if((flags & SRQ_YIELDING) == 0) {
1219 		if (maybe_preempt(td))
1220 			return;
1221 	}
1222 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1223 		sched_load_add();
1224 	SLOT_USE(td->td_ksegrp);
1225 	runq_add(ke->ke_runq, ke, flags);
1226 	ke->ke_state = KES_ONRUNQ;
1227 	maybe_resched(td);
1228 }
1229 #endif /* SMP */
1230 
1231 void
1232 sched_rem(struct thread *td)
1233 {
1234 	struct kse *ke;
1235 
1236 	ke = td->td_kse;
1237 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1238 	    ("sched_rem: process swapped out"));
1239 	KASSERT((ke->ke_state == KES_ONRUNQ),
1240 	    ("sched_rem: KSE not on run queue"));
1241 	mtx_assert(&sched_lock, MA_OWNED);
1242 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1243 	    td, td->td_proc->p_comm, td->td_priority, curthread,
1244 	    curthread->td_proc->p_comm);
1245 
1246 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1247 		sched_load_rem();
1248 	SLOT_RELEASE(td->td_ksegrp);
1249 	runq_remove(ke->ke_runq, ke);
1250 
1251 	ke->ke_state = KES_THREAD;
1252 }
1253 
1254 /*
1255  * Select threads to run.
1256  * Notice that the running threads still consume a slot.
1257  */
1258 struct kse *
1259 sched_choose(void)
1260 {
1261 	struct kse *ke;
1262 	struct runq *rq;
1263 
1264 #ifdef SMP
1265 	struct kse *kecpu;
1266 
1267 	rq = &runq;
1268 	ke = runq_choose(&runq);
1269 	kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1270 
1271 	if (ke == NULL ||
1272 	    (kecpu != NULL &&
1273 	     kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
1274 		CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu,
1275 		     PCPU_GET(cpuid));
1276 		ke = kecpu;
1277 		rq = &runq_pcpu[PCPU_GET(cpuid)];
1278 	} else {
1279 		CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke);
1280 	}
1281 
1282 #else
1283 	rq = &runq;
1284 	ke = runq_choose(&runq);
1285 #endif
1286 
1287 	if (ke != NULL) {
1288 		runq_remove(rq, ke);
1289 		ke->ke_state = KES_THREAD;
1290 
1291 		KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1292 		    ("sched_choose: process swapped out"));
1293 	}
1294 	return (ke);
1295 }
1296 
1297 void
1298 sched_userret(struct thread *td)
1299 {
1300 	struct ksegrp *kg;
1301 	/*
1302 	 * XXX we cheat slightly on the locking here to avoid locking in
1303 	 * the usual case.  Setting td_priority here is essentially an
1304 	 * incomplete workaround for not setting it properly elsewhere.
1305 	 * Now that some interrupt handlers are threads, not setting it
1306 	 * properly elsewhere can clobber it in the window between setting
1307 	 * it here and returning to user mode, so don't waste time setting
1308 	 * it perfectly here.
1309 	 */
1310 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1311 	    ("thread with borrowed priority returning to userland"));
1312 	kg = td->td_ksegrp;
1313 	if (td->td_priority != kg->kg_user_pri) {
1314 		mtx_lock_spin(&sched_lock);
1315 		td->td_priority = kg->kg_user_pri;
1316 		td->td_base_pri = kg->kg_user_pri;
1317 		mtx_unlock_spin(&sched_lock);
1318 	}
1319 }
1320 
1321 void
1322 sched_bind(struct thread *td, int cpu)
1323 {
1324 	struct kse *ke;
1325 
1326 	mtx_assert(&sched_lock, MA_OWNED);
1327 	KASSERT(TD_IS_RUNNING(td),
1328 	    ("sched_bind: cannot bind non-running thread"));
1329 
1330 	ke = td->td_kse;
1331 
1332 	ke->ke_flags |= KEF_BOUND;
1333 #ifdef SMP
1334 	ke->ke_runq = &runq_pcpu[cpu];
1335 	if (PCPU_GET(cpuid) == cpu)
1336 		return;
1337 
1338 	ke->ke_state = KES_THREAD;
1339 
1340 	mi_switch(SW_VOL, NULL);
1341 #endif
1342 }
1343 
1344 void
1345 sched_unbind(struct thread* td)
1346 {
1347 	mtx_assert(&sched_lock, MA_OWNED);
1348 	td->td_kse->ke_flags &= ~KEF_BOUND;
1349 }
1350 
1351 int
1352 sched_is_bound(struct thread *td)
1353 {
1354 	mtx_assert(&sched_lock, MA_OWNED);
1355 	return (td->td_kse->ke_flags & KEF_BOUND);
1356 }
1357 
1358 void
1359 sched_relinquish(struct thread *td)
1360 {
1361 	struct ksegrp *kg;
1362 
1363 	kg = td->td_ksegrp;
1364 	mtx_lock_spin(&sched_lock);
1365 	if (kg->kg_pri_class == PRI_TIMESHARE)
1366 		sched_prio(td, PRI_MAX_TIMESHARE);
1367 	mi_switch(SW_VOL, NULL);
1368 	mtx_unlock_spin(&sched_lock);
1369 }
1370 
1371 int
1372 sched_load(void)
1373 {
1374 	return (sched_tdcnt);
1375 }
1376 
1377 int
1378 sched_sizeof_ksegrp(void)
1379 {
1380 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1381 }
1382 
1383 int
1384 sched_sizeof_proc(void)
1385 {
1386 	return (sizeof(struct proc));
1387 }
1388 
1389 int
1390 sched_sizeof_thread(void)
1391 {
1392 	return (sizeof(struct thread) + sizeof(struct kse));
1393 }
1394 
1395 fixpt_t
1396 sched_pctcpu(struct thread *td)
1397 {
1398 	struct kse *ke;
1399 
1400 	ke = td->td_kse;
1401 	return (ke->ke_pctcpu);
1402 }
1403 
1404 void
1405 sched_tick(void)
1406 {
1407 }
1408 #define KERN_SWITCH_INCLUDE 1
1409 #include "kern/kern_switch.c"
1410