xref: /freebsd/sys/kern/sched_4bsd.c (revision 8ef24a0d4b28fe230e20637f56869cc4148cd2ca)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_hwpmc_hooks.h"
39 #include "opt_sched.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/cpuset.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/kthread.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sched.h>
52 #include <sys/sdt.h>
53 #include <sys/smp.h>
54 #include <sys/sysctl.h>
55 #include <sys/sx.h>
56 #include <sys/turnstile.h>
57 #include <sys/umtx.h>
58 #include <machine/pcb.h>
59 #include <machine/smp.h>
60 
61 #ifdef HWPMC_HOOKS
62 #include <sys/pmckern.h>
63 #endif
64 
65 #ifdef KDTRACE_HOOKS
66 #include <sys/dtrace_bsd.h>
67 int				dtrace_vtime_active;
68 dtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
69 #endif
70 
71 /*
72  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
73  * the range 100-256 Hz (approximately).
74  */
75 #define	ESTCPULIM(e) \
76     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
77     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
78 #ifdef SMP
79 #define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
80 #else
81 #define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
82 #endif
83 #define	NICE_WEIGHT		1	/* Priorities per nice level. */
84 
85 #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
86 
87 /*
88  * The schedulable entity that runs a context.
89  * This is  an extension to the thread structure and is tailored to
90  * the requirements of this scheduler.
91  * All fields are protected by the scheduler lock.
92  */
93 struct td_sched {
94 	fixpt_t		ts_pctcpu;	/* %cpu during p_swtime. */
95 	u_int		ts_estcpu;	/* Estimated cpu utilization. */
96 	int		ts_cpticks;	/* Ticks of cpu time. */
97 	int		ts_slptime;	/* Seconds !RUNNING. */
98 	int		ts_slice;	/* Remaining part of time slice. */
99 	int		ts_flags;
100 	struct runq	*ts_runq;	/* runq the thread is currently on */
101 #ifdef KTR
102 	char		ts_name[TS_NAME_LEN];
103 #endif
104 };
105 
106 /* flags kept in td_flags */
107 #define TDF_DIDRUN	TDF_SCHED0	/* thread actually ran. */
108 #define TDF_BOUND	TDF_SCHED1	/* Bound to one CPU. */
109 #define	TDF_SLICEEND	TDF_SCHED2	/* Thread time slice is over. */
110 
111 /* flags kept in ts_flags */
112 #define	TSF_AFFINITY	0x0001		/* Has a non-"full" CPU set. */
113 
114 #define SKE_RUNQ_PCPU(ts)						\
115     ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
116 
117 #define	THREAD_CAN_SCHED(td, cpu)	\
118     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
119 
120 static struct td_sched td_sched0;
121 static struct mtx sched_lock;
122 
123 static int	realstathz = 127; /* stathz is sometimes 0 and run off of hz. */
124 static int	sched_tdcnt;	/* Total runnable threads in the system. */
125 static int	sched_slice = 12; /* Thread run time before rescheduling. */
126 
127 static void	setup_runqs(void);
128 static void	schedcpu(void);
129 static void	schedcpu_thread(void);
130 static void	sched_priority(struct thread *td, u_char prio);
131 static void	sched_setup(void *dummy);
132 static void	maybe_resched(struct thread *td);
133 static void	updatepri(struct thread *td);
134 static void	resetpriority(struct thread *td);
135 static void	resetpriority_thread(struct thread *td);
136 #ifdef SMP
137 static int	sched_pickcpu(struct thread *td);
138 static int	forward_wakeup(int cpunum);
139 static void	kick_other_cpu(int pri, int cpuid);
140 #endif
141 
142 static struct kproc_desc sched_kp = {
143         "schedcpu",
144         schedcpu_thread,
145         NULL
146 };
147 SYSINIT(schedcpu, SI_SUB_LAST, SI_ORDER_FIRST, kproc_start,
148     &sched_kp);
149 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
150 
151 static void sched_initticks(void *dummy);
152 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
153     NULL);
154 
155 /*
156  * Global run queue.
157  */
158 static struct runq runq;
159 
160 #ifdef SMP
161 /*
162  * Per-CPU run queues
163  */
164 static struct runq runq_pcpu[MAXCPU];
165 long runq_length[MAXCPU];
166 
167 static cpuset_t idle_cpus_mask;
168 #endif
169 
170 struct pcpuidlestat {
171 	u_int idlecalls;
172 	u_int oldidlecalls;
173 };
174 static DPCPU_DEFINE(struct pcpuidlestat, idlestat);
175 
176 static void
177 setup_runqs(void)
178 {
179 #ifdef SMP
180 	int i;
181 
182 	for (i = 0; i < MAXCPU; ++i)
183 		runq_init(&runq_pcpu[i]);
184 #endif
185 
186 	runq_init(&runq);
187 }
188 
189 static int
190 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
191 {
192 	int error, new_val, period;
193 
194 	period = 1000000 / realstathz;
195 	new_val = period * sched_slice;
196 	error = sysctl_handle_int(oidp, &new_val, 0, req);
197 	if (error != 0 || req->newptr == NULL)
198 		return (error);
199 	if (new_val <= 0)
200 		return (EINVAL);
201 	sched_slice = imax(1, (new_val + period / 2) / period);
202 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
203 	    realstathz);
204 	return (0);
205 }
206 
207 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
208 
209 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
210     "Scheduler name");
211 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
212     NULL, 0, sysctl_kern_quantum, "I",
213     "Quantum for timeshare threads in microseconds");
214 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
215     "Quantum for timeshare threads in stathz ticks");
216 #ifdef SMP
217 /* Enable forwarding of wakeups to all other cpus */
218 static SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL,
219     "Kernel SMP");
220 
221 static int runq_fuzz = 1;
222 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
223 
224 static int forward_wakeup_enabled = 1;
225 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
226 	   &forward_wakeup_enabled, 0,
227 	   "Forwarding of wakeup to idle CPUs");
228 
229 static int forward_wakeups_requested = 0;
230 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
231 	   &forward_wakeups_requested, 0,
232 	   "Requests for Forwarding of wakeup to idle CPUs");
233 
234 static int forward_wakeups_delivered = 0;
235 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
236 	   &forward_wakeups_delivered, 0,
237 	   "Completed Forwarding of wakeup to idle CPUs");
238 
239 static int forward_wakeup_use_mask = 1;
240 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
241 	   &forward_wakeup_use_mask, 0,
242 	   "Use the mask of idle cpus");
243 
244 static int forward_wakeup_use_loop = 0;
245 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
246 	   &forward_wakeup_use_loop, 0,
247 	   "Use a loop to find idle cpus");
248 
249 #endif
250 #if 0
251 static int sched_followon = 0;
252 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
253 	   &sched_followon, 0,
254 	   "allow threads to share a quantum");
255 #endif
256 
257 SDT_PROVIDER_DEFINE(sched);
258 
259 SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *",
260     "struct proc *", "uint8_t");
261 SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *",
262     "struct proc *", "void *");
263 SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *",
264     "struct proc *", "void *", "int");
265 SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *",
266     "struct proc *", "uint8_t", "struct thread *");
267 SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int");
268 SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *",
269     "struct proc *");
270 SDT_PROBE_DEFINE(sched, , , on__cpu);
271 SDT_PROBE_DEFINE(sched, , , remain__cpu);
272 SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
273     "struct proc *");
274 
275 static __inline void
276 sched_load_add(void)
277 {
278 
279 	sched_tdcnt++;
280 	KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
281 	SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt);
282 }
283 
284 static __inline void
285 sched_load_rem(void)
286 {
287 
288 	sched_tdcnt--;
289 	KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
290 	SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt);
291 }
292 /*
293  * Arrange to reschedule if necessary, taking the priorities and
294  * schedulers into account.
295  */
296 static void
297 maybe_resched(struct thread *td)
298 {
299 
300 	THREAD_LOCK_ASSERT(td, MA_OWNED);
301 	if (td->td_priority < curthread->td_priority)
302 		curthread->td_flags |= TDF_NEEDRESCHED;
303 }
304 
305 /*
306  * This function is called when a thread is about to be put on run queue
307  * because it has been made runnable or its priority has been adjusted.  It
308  * determines if the new thread should be immediately preempted to.  If so,
309  * it switches to it and eventually returns true.  If not, it returns false
310  * so that the caller may place the thread on an appropriate run queue.
311  */
312 int
313 maybe_preempt(struct thread *td)
314 {
315 #ifdef PREEMPTION
316 	struct thread *ctd;
317 	int cpri, pri;
318 
319 	/*
320 	 * The new thread should not preempt the current thread if any of the
321 	 * following conditions are true:
322 	 *
323 	 *  - The kernel is in the throes of crashing (panicstr).
324 	 *  - The current thread has a higher (numerically lower) or
325 	 *    equivalent priority.  Note that this prevents curthread from
326 	 *    trying to preempt to itself.
327 	 *  - It is too early in the boot for context switches (cold is set).
328 	 *  - The current thread has an inhibitor set or is in the process of
329 	 *    exiting.  In this case, the current thread is about to switch
330 	 *    out anyways, so there's no point in preempting.  If we did,
331 	 *    the current thread would not be properly resumed as well, so
332 	 *    just avoid that whole landmine.
333 	 *  - If the new thread's priority is not a realtime priority and
334 	 *    the current thread's priority is not an idle priority and
335 	 *    FULL_PREEMPTION is disabled.
336 	 *
337 	 * If all of these conditions are false, but the current thread is in
338 	 * a nested critical section, then we have to defer the preemption
339 	 * until we exit the critical section.  Otherwise, switch immediately
340 	 * to the new thread.
341 	 */
342 	ctd = curthread;
343 	THREAD_LOCK_ASSERT(td, MA_OWNED);
344 	KASSERT((td->td_inhibitors == 0),
345 			("maybe_preempt: trying to run inhibited thread"));
346 	pri = td->td_priority;
347 	cpri = ctd->td_priority;
348 	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
349 	    TD_IS_INHIBITED(ctd))
350 		return (0);
351 #ifndef FULL_PREEMPTION
352 	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
353 		return (0);
354 #endif
355 
356 	if (ctd->td_critnest > 1) {
357 		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
358 		    ctd->td_critnest);
359 		ctd->td_owepreempt = 1;
360 		return (0);
361 	}
362 	/*
363 	 * Thread is runnable but not yet put on system run queue.
364 	 */
365 	MPASS(ctd->td_lock == td->td_lock);
366 	MPASS(TD_ON_RUNQ(td));
367 	TD_SET_RUNNING(td);
368 	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
369 	    td->td_proc->p_pid, td->td_name);
370 	mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
371 	/*
372 	 * td's lock pointer may have changed.  We have to return with it
373 	 * locked.
374 	 */
375 	spinlock_enter();
376 	thread_unlock(ctd);
377 	thread_lock(td);
378 	spinlock_exit();
379 	return (1);
380 #else
381 	return (0);
382 #endif
383 }
384 
385 /*
386  * Constants for digital decay and forget:
387  *	90% of (ts_estcpu) usage in 5 * loadav time
388  *	95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
389  *          Note that, as ps(1) mentions, this can let percentages
390  *          total over 100% (I've seen 137.9% for 3 processes).
391  *
392  * Note that schedclock() updates ts_estcpu and p_cpticks asynchronously.
393  *
394  * We wish to decay away 90% of ts_estcpu in (5 * loadavg) seconds.
395  * That is, the system wants to compute a value of decay such
396  * that the following for loop:
397  * 	for (i = 0; i < (5 * loadavg); i++)
398  * 		ts_estcpu *= decay;
399  * will compute
400  * 	ts_estcpu *= 0.1;
401  * for all values of loadavg:
402  *
403  * Mathematically this loop can be expressed by saying:
404  * 	decay ** (5 * loadavg) ~= .1
405  *
406  * The system computes decay as:
407  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
408  *
409  * We wish to prove that the system's computation of decay
410  * will always fulfill the equation:
411  * 	decay ** (5 * loadavg) ~= .1
412  *
413  * If we compute b as:
414  * 	b = 2 * loadavg
415  * then
416  * 	decay = b / (b + 1)
417  *
418  * We now need to prove two things:
419  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
420  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
421  *
422  * Facts:
423  *         For x close to zero, exp(x) =~ 1 + x, since
424  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
425  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
426  *         For x close to zero, ln(1+x) =~ x, since
427  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
428  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
429  *         ln(.1) =~ -2.30
430  *
431  * Proof of (1):
432  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
433  *	solving for factor,
434  *      ln(factor) =~ (-2.30/5*loadav), or
435  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
436  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
437  *
438  * Proof of (2):
439  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
440  *	solving for power,
441  *      power*ln(b/(b+1)) =~ -2.30, or
442  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
443  *
444  * Actual power values for the implemented algorithm are as follows:
445  *      loadav: 1       2       3       4
446  *      power:  5.68    10.32   14.94   19.55
447  */
448 
449 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
450 #define	loadfactor(loadav)	(2 * (loadav))
451 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
452 
453 /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
454 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
455 SYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
456 
457 /*
458  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
459  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
460  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
461  *
462  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
463  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
464  *
465  * If you don't want to bother with the faster/more-accurate formula, you
466  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
467  * (more general) method of calculating the %age of CPU used by a process.
468  */
469 #define	CCPU_SHIFT	11
470 
471 /*
472  * Recompute process priorities, every hz ticks.
473  * MP-safe, called without the Giant mutex.
474  */
475 /* ARGSUSED */
476 static void
477 schedcpu(void)
478 {
479 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
480 	struct thread *td;
481 	struct proc *p;
482 	struct td_sched *ts;
483 	int awake;
484 
485 	sx_slock(&allproc_lock);
486 	FOREACH_PROC_IN_SYSTEM(p) {
487 		PROC_LOCK(p);
488 		if (p->p_state == PRS_NEW) {
489 			PROC_UNLOCK(p);
490 			continue;
491 		}
492 		FOREACH_THREAD_IN_PROC(p, td) {
493 			awake = 0;
494 			thread_lock(td);
495 			ts = td->td_sched;
496 			/*
497 			 * Increment sleep time (if sleeping).  We
498 			 * ignore overflow, as above.
499 			 */
500 			/*
501 			 * The td_sched slptimes are not touched in wakeup
502 			 * because the thread may not HAVE everything in
503 			 * memory? XXX I think this is out of date.
504 			 */
505 			if (TD_ON_RUNQ(td)) {
506 				awake = 1;
507 				td->td_flags &= ~TDF_DIDRUN;
508 			} else if (TD_IS_RUNNING(td)) {
509 				awake = 1;
510 				/* Do not clear TDF_DIDRUN */
511 			} else if (td->td_flags & TDF_DIDRUN) {
512 				awake = 1;
513 				td->td_flags &= ~TDF_DIDRUN;
514 			}
515 
516 			/*
517 			 * ts_pctcpu is only for ps and ttyinfo().
518 			 */
519 			ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
520 			/*
521 			 * If the td_sched has been idle the entire second,
522 			 * stop recalculating its priority until
523 			 * it wakes up.
524 			 */
525 			if (ts->ts_cpticks != 0) {
526 #if	(FSHIFT >= CCPU_SHIFT)
527 				ts->ts_pctcpu += (realstathz == 100)
528 				    ? ((fixpt_t) ts->ts_cpticks) <<
529 				    (FSHIFT - CCPU_SHIFT) :
530 				    100 * (((fixpt_t) ts->ts_cpticks)
531 				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
532 #else
533 				ts->ts_pctcpu += ((FSCALE - ccpu) *
534 				    (ts->ts_cpticks *
535 				    FSCALE / realstathz)) >> FSHIFT;
536 #endif
537 				ts->ts_cpticks = 0;
538 			}
539 			/*
540 			 * If there are ANY running threads in this process,
541 			 * then don't count it as sleeping.
542 			 * XXX: this is broken.
543 			 */
544 			if (awake) {
545 				if (ts->ts_slptime > 1) {
546 					/*
547 					 * In an ideal world, this should not
548 					 * happen, because whoever woke us
549 					 * up from the long sleep should have
550 					 * unwound the slptime and reset our
551 					 * priority before we run at the stale
552 					 * priority.  Should KASSERT at some
553 					 * point when all the cases are fixed.
554 					 */
555 					updatepri(td);
556 				}
557 				ts->ts_slptime = 0;
558 			} else
559 				ts->ts_slptime++;
560 			if (ts->ts_slptime > 1) {
561 				thread_unlock(td);
562 				continue;
563 			}
564 			ts->ts_estcpu = decay_cpu(loadfac, ts->ts_estcpu);
565 		      	resetpriority(td);
566 			resetpriority_thread(td);
567 			thread_unlock(td);
568 		}
569 		PROC_UNLOCK(p);
570 	}
571 	sx_sunlock(&allproc_lock);
572 }
573 
574 /*
575  * Main loop for a kthread that executes schedcpu once a second.
576  */
577 static void
578 schedcpu_thread(void)
579 {
580 
581 	for (;;) {
582 		schedcpu();
583 		pause("-", hz);
584 	}
585 }
586 
587 /*
588  * Recalculate the priority of a process after it has slept for a while.
589  * For all load averages >= 1 and max ts_estcpu of 255, sleeping for at
590  * least six times the loadfactor will decay ts_estcpu to zero.
591  */
592 static void
593 updatepri(struct thread *td)
594 {
595 	struct td_sched *ts;
596 	fixpt_t loadfac;
597 	unsigned int newcpu;
598 
599 	ts = td->td_sched;
600 	loadfac = loadfactor(averunnable.ldavg[0]);
601 	if (ts->ts_slptime > 5 * loadfac)
602 		ts->ts_estcpu = 0;
603 	else {
604 		newcpu = ts->ts_estcpu;
605 		ts->ts_slptime--;	/* was incremented in schedcpu() */
606 		while (newcpu && --ts->ts_slptime)
607 			newcpu = decay_cpu(loadfac, newcpu);
608 		ts->ts_estcpu = newcpu;
609 	}
610 }
611 
612 /*
613  * Compute the priority of a process when running in user mode.
614  * Arrange to reschedule if the resulting priority is better
615  * than that of the current process.
616  */
617 static void
618 resetpriority(struct thread *td)
619 {
620 	u_int newpriority;
621 
622 	if (td->td_pri_class != PRI_TIMESHARE)
623 		return;
624 	newpriority = PUSER + td->td_sched->ts_estcpu / INVERSE_ESTCPU_WEIGHT +
625 	    NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
626 	newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
627 	    PRI_MAX_TIMESHARE);
628 	sched_user_prio(td, newpriority);
629 }
630 
631 /*
632  * Update the thread's priority when the associated process's user
633  * priority changes.
634  */
635 static void
636 resetpriority_thread(struct thread *td)
637 {
638 
639 	/* Only change threads with a time sharing user priority. */
640 	if (td->td_priority < PRI_MIN_TIMESHARE ||
641 	    td->td_priority > PRI_MAX_TIMESHARE)
642 		return;
643 
644 	/* XXX the whole needresched thing is broken, but not silly. */
645 	maybe_resched(td);
646 
647 	sched_prio(td, td->td_user_pri);
648 }
649 
650 /* ARGSUSED */
651 static void
652 sched_setup(void *dummy)
653 {
654 
655 	setup_runqs();
656 
657 	/* Account for thread0. */
658 	sched_load_add();
659 }
660 
661 /*
662  * This routine determines time constants after stathz and hz are setup.
663  */
664 static void
665 sched_initticks(void *dummy)
666 {
667 
668 	realstathz = stathz ? stathz : hz;
669 	sched_slice = realstathz / 10;	/* ~100ms */
670 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
671 	    realstathz);
672 }
673 
674 /* External interfaces start here */
675 
676 /*
677  * Very early in the boot some setup of scheduler-specific
678  * parts of proc0 and of some scheduler resources needs to be done.
679  * Called from:
680  *  proc0_init()
681  */
682 void
683 schedinit(void)
684 {
685 	/*
686 	 * Set up the scheduler specific parts of proc0.
687 	 */
688 	proc0.p_sched = NULL; /* XXX */
689 	thread0.td_sched = &td_sched0;
690 	thread0.td_lock = &sched_lock;
691 	td_sched0.ts_slice = sched_slice;
692 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
693 }
694 
695 int
696 sched_runnable(void)
697 {
698 #ifdef SMP
699 	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
700 #else
701 	return runq_check(&runq);
702 #endif
703 }
704 
705 int
706 sched_rr_interval(void)
707 {
708 
709 	/* Convert sched_slice from stathz to hz. */
710 	return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz));
711 }
712 
713 /*
714  * We adjust the priority of the current process.  The priority of a
715  * process gets worse as it accumulates CPU time.  The cpu usage
716  * estimator (ts_estcpu) is increased here.  resetpriority() will
717  * compute a different priority each time ts_estcpu increases by
718  * INVERSE_ESTCPU_WEIGHT (until PRI_MAX_TIMESHARE is reached).  The
719  * cpu usage estimator ramps up quite quickly when the process is
720  * running (linearly), and decays away exponentially, at a rate which
721  * is proportionally slower when the system is busy.  The basic
722  * principle is that the system will 90% forget that the process used
723  * a lot of CPU time in 5 * loadav seconds.  This causes the system to
724  * favor processes which haven't run much recently, and to round-robin
725  * among other processes.
726  */
727 void
728 sched_clock(struct thread *td)
729 {
730 	struct pcpuidlestat *stat;
731 	struct td_sched *ts;
732 
733 	THREAD_LOCK_ASSERT(td, MA_OWNED);
734 	ts = td->td_sched;
735 
736 	ts->ts_cpticks++;
737 	ts->ts_estcpu = ESTCPULIM(ts->ts_estcpu + 1);
738 	if ((ts->ts_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
739 		resetpriority(td);
740 		resetpriority_thread(td);
741 	}
742 
743 	/*
744 	 * Force a context switch if the current thread has used up a full
745 	 * time slice (default is 100ms).
746 	 */
747 	if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
748 		ts->ts_slice = sched_slice;
749 		td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
750 	}
751 
752 	stat = DPCPU_PTR(idlestat);
753 	stat->oldidlecalls = stat->idlecalls;
754 	stat->idlecalls = 0;
755 }
756 
757 /*
758  * Charge child's scheduling CPU usage to parent.
759  */
760 void
761 sched_exit(struct proc *p, struct thread *td)
762 {
763 
764 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
765 	    "prio:%d", td->td_priority);
766 
767 	PROC_LOCK_ASSERT(p, MA_OWNED);
768 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
769 }
770 
771 void
772 sched_exit_thread(struct thread *td, struct thread *child)
773 {
774 
775 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
776 	    "prio:%d", child->td_priority);
777 	thread_lock(td);
778 	td->td_sched->ts_estcpu = ESTCPULIM(td->td_sched->ts_estcpu +
779 	    child->td_sched->ts_estcpu);
780 	thread_unlock(td);
781 	thread_lock(child);
782 	if ((child->td_flags & TDF_NOLOAD) == 0)
783 		sched_load_rem();
784 	thread_unlock(child);
785 }
786 
787 void
788 sched_fork(struct thread *td, struct thread *childtd)
789 {
790 	sched_fork_thread(td, childtd);
791 }
792 
793 void
794 sched_fork_thread(struct thread *td, struct thread *childtd)
795 {
796 	struct td_sched *ts;
797 
798 	childtd->td_oncpu = NOCPU;
799 	childtd->td_lastcpu = NOCPU;
800 	childtd->td_lock = &sched_lock;
801 	childtd->td_cpuset = cpuset_ref(td->td_cpuset);
802 	childtd->td_priority = childtd->td_base_pri;
803 	ts = childtd->td_sched;
804 	bzero(ts, sizeof(*ts));
805 	ts->ts_estcpu = td->td_sched->ts_estcpu;
806 	ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
807 	ts->ts_slice = 1;
808 }
809 
810 void
811 sched_nice(struct proc *p, int nice)
812 {
813 	struct thread *td;
814 
815 	PROC_LOCK_ASSERT(p, MA_OWNED);
816 	p->p_nice = nice;
817 	FOREACH_THREAD_IN_PROC(p, td) {
818 		thread_lock(td);
819 		resetpriority(td);
820 		resetpriority_thread(td);
821 		thread_unlock(td);
822 	}
823 }
824 
825 void
826 sched_class(struct thread *td, int class)
827 {
828 	THREAD_LOCK_ASSERT(td, MA_OWNED);
829 	td->td_pri_class = class;
830 }
831 
832 /*
833  * Adjust the priority of a thread.
834  */
835 static void
836 sched_priority(struct thread *td, u_char prio)
837 {
838 
839 
840 	KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
841 	    "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
842 	    sched_tdname(curthread));
843 	SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
844 	if (td != curthread && prio > td->td_priority) {
845 		KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
846 		    "lend prio", "prio:%d", td->td_priority, "new prio:%d",
847 		    prio, KTR_ATTR_LINKED, sched_tdname(td));
848 		SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
849 		    curthread);
850 	}
851 	THREAD_LOCK_ASSERT(td, MA_OWNED);
852 	if (td->td_priority == prio)
853 		return;
854 	td->td_priority = prio;
855 	if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
856 		sched_rem(td);
857 		sched_add(td, SRQ_BORING);
858 	}
859 }
860 
861 /*
862  * Update a thread's priority when it is lent another thread's
863  * priority.
864  */
865 void
866 sched_lend_prio(struct thread *td, u_char prio)
867 {
868 
869 	td->td_flags |= TDF_BORROWING;
870 	sched_priority(td, prio);
871 }
872 
873 /*
874  * Restore a thread's priority when priority propagation is
875  * over.  The prio argument is the minimum priority the thread
876  * needs to have to satisfy other possible priority lending
877  * requests.  If the thread's regulary priority is less
878  * important than prio the thread will keep a priority boost
879  * of prio.
880  */
881 void
882 sched_unlend_prio(struct thread *td, u_char prio)
883 {
884 	u_char base_pri;
885 
886 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
887 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
888 		base_pri = td->td_user_pri;
889 	else
890 		base_pri = td->td_base_pri;
891 	if (prio >= base_pri) {
892 		td->td_flags &= ~TDF_BORROWING;
893 		sched_prio(td, base_pri);
894 	} else
895 		sched_lend_prio(td, prio);
896 }
897 
898 void
899 sched_prio(struct thread *td, u_char prio)
900 {
901 	u_char oldprio;
902 
903 	/* First, update the base priority. */
904 	td->td_base_pri = prio;
905 
906 	/*
907 	 * If the thread is borrowing another thread's priority, don't ever
908 	 * lower the priority.
909 	 */
910 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
911 		return;
912 
913 	/* Change the real priority. */
914 	oldprio = td->td_priority;
915 	sched_priority(td, prio);
916 
917 	/*
918 	 * If the thread is on a turnstile, then let the turnstile update
919 	 * its state.
920 	 */
921 	if (TD_ON_LOCK(td) && oldprio != prio)
922 		turnstile_adjust(td, oldprio);
923 }
924 
925 void
926 sched_user_prio(struct thread *td, u_char prio)
927 {
928 
929 	THREAD_LOCK_ASSERT(td, MA_OWNED);
930 	td->td_base_user_pri = prio;
931 	if (td->td_lend_user_pri <= prio)
932 		return;
933 	td->td_user_pri = prio;
934 }
935 
936 void
937 sched_lend_user_prio(struct thread *td, u_char prio)
938 {
939 
940 	THREAD_LOCK_ASSERT(td, MA_OWNED);
941 	td->td_lend_user_pri = prio;
942 	td->td_user_pri = min(prio, td->td_base_user_pri);
943 	if (td->td_priority > td->td_user_pri)
944 		sched_prio(td, td->td_user_pri);
945 	else if (td->td_priority != td->td_user_pri)
946 		td->td_flags |= TDF_NEEDRESCHED;
947 }
948 
949 void
950 sched_sleep(struct thread *td, int pri)
951 {
952 
953 	THREAD_LOCK_ASSERT(td, MA_OWNED);
954 	td->td_slptick = ticks;
955 	td->td_sched->ts_slptime = 0;
956 	if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
957 		sched_prio(td, pri);
958 	if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
959 		td->td_flags |= TDF_CANSWAP;
960 }
961 
962 void
963 sched_switch(struct thread *td, struct thread *newtd, int flags)
964 {
965 	struct mtx *tmtx;
966 	struct td_sched *ts;
967 	struct proc *p;
968 	int preempted;
969 
970 	tmtx = NULL;
971 	ts = td->td_sched;
972 	p = td->td_proc;
973 
974 	THREAD_LOCK_ASSERT(td, MA_OWNED);
975 
976 	/*
977 	 * Switch to the sched lock to fix things up and pick
978 	 * a new thread.
979 	 * Block the td_lock in order to avoid breaking the critical path.
980 	 */
981 	if (td->td_lock != &sched_lock) {
982 		mtx_lock_spin(&sched_lock);
983 		tmtx = thread_lock_block(td);
984 	}
985 
986 	if ((td->td_flags & TDF_NOLOAD) == 0)
987 		sched_load_rem();
988 
989 	td->td_lastcpu = td->td_oncpu;
990 	preempted = !((td->td_flags & TDF_SLICEEND) ||
991 	    (flags & SWT_RELINQUISH));
992 	td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
993 	td->td_owepreempt = 0;
994 	td->td_oncpu = NOCPU;
995 
996 	/*
997 	 * At the last moment, if this thread is still marked RUNNING,
998 	 * then put it back on the run queue as it has not been suspended
999 	 * or stopped or any thing else similar.  We never put the idle
1000 	 * threads on the run queue, however.
1001 	 */
1002 	if (td->td_flags & TDF_IDLETD) {
1003 		TD_SET_CAN_RUN(td);
1004 #ifdef SMP
1005 		CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask);
1006 #endif
1007 	} else {
1008 		if (TD_IS_RUNNING(td)) {
1009 			/* Put us back on the run queue. */
1010 			sched_add(td, preempted ?
1011 			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1012 			    SRQ_OURSELF|SRQ_YIELDING);
1013 		}
1014 	}
1015 	if (newtd) {
1016 		/*
1017 		 * The thread we are about to run needs to be counted
1018 		 * as if it had been added to the run queue and selected.
1019 		 * It came from:
1020 		 * * A preemption
1021 		 * * An upcall
1022 		 * * A followon
1023 		 */
1024 		KASSERT((newtd->td_inhibitors == 0),
1025 			("trying to run inhibited thread"));
1026 		newtd->td_flags |= TDF_DIDRUN;
1027         	TD_SET_RUNNING(newtd);
1028 		if ((newtd->td_flags & TDF_NOLOAD) == 0)
1029 			sched_load_add();
1030 	} else {
1031 		newtd = choosethread();
1032 		MPASS(newtd->td_lock == &sched_lock);
1033 	}
1034 
1035 	if (td != newtd) {
1036 #ifdef	HWPMC_HOOKS
1037 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1038 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1039 #endif
1040 
1041 		SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
1042 
1043                 /* I feel sleepy */
1044 		lock_profile_release_lock(&sched_lock.lock_object);
1045 #ifdef KDTRACE_HOOKS
1046 		/*
1047 		 * If DTrace has set the active vtime enum to anything
1048 		 * other than INACTIVE (0), then it should have set the
1049 		 * function to call.
1050 		 */
1051 		if (dtrace_vtime_active)
1052 			(*dtrace_vtime_switch_func)(newtd);
1053 #endif
1054 
1055 		cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
1056 		lock_profile_obtain_lock_success(&sched_lock.lock_object,
1057 		    0, 0, __FILE__, __LINE__);
1058 		/*
1059 		 * Where am I?  What year is it?
1060 		 * We are in the same thread that went to sleep above,
1061 		 * but any amount of time may have passed. All our context
1062 		 * will still be available as will local variables.
1063 		 * PCPU values however may have changed as we may have
1064 		 * changed CPU so don't trust cached values of them.
1065 		 * New threads will go to fork_exit() instead of here
1066 		 * so if you change things here you may need to change
1067 		 * things there too.
1068 		 *
1069 		 * If the thread above was exiting it will never wake
1070 		 * up again here, so either it has saved everything it
1071 		 * needed to, or the thread_wait() or wait() will
1072 		 * need to reap it.
1073 		 */
1074 
1075 		SDT_PROBE0(sched, , , on__cpu);
1076 #ifdef	HWPMC_HOOKS
1077 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1078 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1079 #endif
1080 	} else
1081 		SDT_PROBE0(sched, , , remain__cpu);
1082 
1083 #ifdef SMP
1084 	if (td->td_flags & TDF_IDLETD)
1085 		CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask);
1086 #endif
1087 	sched_lock.mtx_lock = (uintptr_t)td;
1088 	td->td_oncpu = PCPU_GET(cpuid);
1089 	MPASS(td->td_lock == &sched_lock);
1090 }
1091 
1092 void
1093 sched_wakeup(struct thread *td)
1094 {
1095 	struct td_sched *ts;
1096 
1097 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1098 	ts = td->td_sched;
1099 	td->td_flags &= ~TDF_CANSWAP;
1100 	if (ts->ts_slptime > 1) {
1101 		updatepri(td);
1102 		resetpriority(td);
1103 	}
1104 	td->td_slptick = 0;
1105 	ts->ts_slptime = 0;
1106 	ts->ts_slice = sched_slice;
1107 	sched_add(td, SRQ_BORING);
1108 }
1109 
1110 #ifdef SMP
1111 static int
1112 forward_wakeup(int cpunum)
1113 {
1114 	struct pcpu *pc;
1115 	cpuset_t dontuse, map, map2;
1116 	u_int id, me;
1117 	int iscpuset;
1118 
1119 	mtx_assert(&sched_lock, MA_OWNED);
1120 
1121 	CTR0(KTR_RUNQ, "forward_wakeup()");
1122 
1123 	if ((!forward_wakeup_enabled) ||
1124 	     (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
1125 		return (0);
1126 	if (!smp_started || cold || panicstr)
1127 		return (0);
1128 
1129 	forward_wakeups_requested++;
1130 
1131 	/*
1132 	 * Check the idle mask we received against what we calculated
1133 	 * before in the old version.
1134 	 */
1135 	me = PCPU_GET(cpuid);
1136 
1137 	/* Don't bother if we should be doing it ourself. */
1138 	if (CPU_ISSET(me, &idle_cpus_mask) &&
1139 	    (cpunum == NOCPU || me == cpunum))
1140 		return (0);
1141 
1142 	CPU_SETOF(me, &dontuse);
1143 	CPU_OR(&dontuse, &stopped_cpus);
1144 	CPU_OR(&dontuse, &hlt_cpus_mask);
1145 	CPU_ZERO(&map2);
1146 	if (forward_wakeup_use_loop) {
1147 		STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1148 			id = pc->pc_cpuid;
1149 			if (!CPU_ISSET(id, &dontuse) &&
1150 			    pc->pc_curthread == pc->pc_idlethread) {
1151 				CPU_SET(id, &map2);
1152 			}
1153 		}
1154 	}
1155 
1156 	if (forward_wakeup_use_mask) {
1157 		map = idle_cpus_mask;
1158 		CPU_NAND(&map, &dontuse);
1159 
1160 		/* If they are both on, compare and use loop if different. */
1161 		if (forward_wakeup_use_loop) {
1162 			if (CPU_CMP(&map, &map2)) {
1163 				printf("map != map2, loop method preferred\n");
1164 				map = map2;
1165 			}
1166 		}
1167 	} else {
1168 		map = map2;
1169 	}
1170 
1171 	/* If we only allow a specific CPU, then mask off all the others. */
1172 	if (cpunum != NOCPU) {
1173 		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
1174 		iscpuset = CPU_ISSET(cpunum, &map);
1175 		if (iscpuset == 0)
1176 			CPU_ZERO(&map);
1177 		else
1178 			CPU_SETOF(cpunum, &map);
1179 	}
1180 	if (!CPU_EMPTY(&map)) {
1181 		forward_wakeups_delivered++;
1182 		STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1183 			id = pc->pc_cpuid;
1184 			if (!CPU_ISSET(id, &map))
1185 				continue;
1186 			if (cpu_idle_wakeup(pc->pc_cpuid))
1187 				CPU_CLR(id, &map);
1188 		}
1189 		if (!CPU_EMPTY(&map))
1190 			ipi_selected(map, IPI_AST);
1191 		return (1);
1192 	}
1193 	if (cpunum == NOCPU)
1194 		printf("forward_wakeup: Idle processor not found\n");
1195 	return (0);
1196 }
1197 
1198 static void
1199 kick_other_cpu(int pri, int cpuid)
1200 {
1201 	struct pcpu *pcpu;
1202 	int cpri;
1203 
1204 	pcpu = pcpu_find(cpuid);
1205 	if (CPU_ISSET(cpuid, &idle_cpus_mask)) {
1206 		forward_wakeups_delivered++;
1207 		if (!cpu_idle_wakeup(cpuid))
1208 			ipi_cpu(cpuid, IPI_AST);
1209 		return;
1210 	}
1211 
1212 	cpri = pcpu->pc_curthread->td_priority;
1213 	if (pri >= cpri)
1214 		return;
1215 
1216 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
1217 #if !defined(FULL_PREEMPTION)
1218 	if (pri <= PRI_MAX_ITHD)
1219 #endif /* ! FULL_PREEMPTION */
1220 	{
1221 		ipi_cpu(cpuid, IPI_PREEMPT);
1222 		return;
1223 	}
1224 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
1225 
1226 	pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
1227 	ipi_cpu(cpuid, IPI_AST);
1228 	return;
1229 }
1230 #endif /* SMP */
1231 
1232 #ifdef SMP
1233 static int
1234 sched_pickcpu(struct thread *td)
1235 {
1236 	int best, cpu;
1237 
1238 	mtx_assert(&sched_lock, MA_OWNED);
1239 
1240 	if (THREAD_CAN_SCHED(td, td->td_lastcpu))
1241 		best = td->td_lastcpu;
1242 	else
1243 		best = NOCPU;
1244 	CPU_FOREACH(cpu) {
1245 		if (!THREAD_CAN_SCHED(td, cpu))
1246 			continue;
1247 
1248 		if (best == NOCPU)
1249 			best = cpu;
1250 		else if (runq_length[cpu] < runq_length[best])
1251 			best = cpu;
1252 	}
1253 	KASSERT(best != NOCPU, ("no valid CPUs"));
1254 
1255 	return (best);
1256 }
1257 #endif
1258 
1259 void
1260 sched_add(struct thread *td, int flags)
1261 #ifdef SMP
1262 {
1263 	cpuset_t tidlemsk;
1264 	struct td_sched *ts;
1265 	u_int cpu, cpuid;
1266 	int forwarded = 0;
1267 	int single_cpu = 0;
1268 
1269 	ts = td->td_sched;
1270 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1271 	KASSERT((td->td_inhibitors == 0),
1272 	    ("sched_add: trying to run inhibited thread"));
1273 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1274 	    ("sched_add: bad thread state"));
1275 	KASSERT(td->td_flags & TDF_INMEM,
1276 	    ("sched_add: thread swapped out"));
1277 
1278 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1279 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1280 	    sched_tdname(curthread));
1281 	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1282 	    KTR_ATTR_LINKED, sched_tdname(td));
1283 	SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1284 	    flags & SRQ_PREEMPTED);
1285 
1286 
1287 	/*
1288 	 * Now that the thread is moving to the run-queue, set the lock
1289 	 * to the scheduler's lock.
1290 	 */
1291 	if (td->td_lock != &sched_lock) {
1292 		mtx_lock_spin(&sched_lock);
1293 		thread_lock_set(td, &sched_lock);
1294 	}
1295 	TD_SET_RUNQ(td);
1296 
1297 	/*
1298 	 * If SMP is started and the thread is pinned or otherwise limited to
1299 	 * a specific set of CPUs, queue the thread to a per-CPU run queue.
1300 	 * Otherwise, queue the thread to the global run queue.
1301 	 *
1302 	 * If SMP has not yet been started we must use the global run queue
1303 	 * as per-CPU state may not be initialized yet and we may crash if we
1304 	 * try to access the per-CPU run queues.
1305 	 */
1306 	if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND ||
1307 	    ts->ts_flags & TSF_AFFINITY)) {
1308 		if (td->td_pinned != 0)
1309 			cpu = td->td_lastcpu;
1310 		else if (td->td_flags & TDF_BOUND) {
1311 			/* Find CPU from bound runq. */
1312 			KASSERT(SKE_RUNQ_PCPU(ts),
1313 			    ("sched_add: bound td_sched not on cpu runq"));
1314 			cpu = ts->ts_runq - &runq_pcpu[0];
1315 		} else
1316 			/* Find a valid CPU for our cpuset */
1317 			cpu = sched_pickcpu(td);
1318 		ts->ts_runq = &runq_pcpu[cpu];
1319 		single_cpu = 1;
1320 		CTR3(KTR_RUNQ,
1321 		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1322 		    cpu);
1323 	} else {
1324 		CTR2(KTR_RUNQ,
1325 		    "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
1326 		    td);
1327 		cpu = NOCPU;
1328 		ts->ts_runq = &runq;
1329 	}
1330 
1331 	cpuid = PCPU_GET(cpuid);
1332 	if (single_cpu && cpu != cpuid) {
1333 	        kick_other_cpu(td->td_priority, cpu);
1334 	} else {
1335 		if (!single_cpu) {
1336 			tidlemsk = idle_cpus_mask;
1337 			CPU_NAND(&tidlemsk, &hlt_cpus_mask);
1338 			CPU_CLR(cpuid, &tidlemsk);
1339 
1340 			if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
1341 			    ((flags & SRQ_INTR) == 0) &&
1342 			    !CPU_EMPTY(&tidlemsk))
1343 				forwarded = forward_wakeup(cpu);
1344 		}
1345 
1346 		if (!forwarded) {
1347 			if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
1348 				return;
1349 			else
1350 				maybe_resched(td);
1351 		}
1352 	}
1353 
1354 	if ((td->td_flags & TDF_NOLOAD) == 0)
1355 		sched_load_add();
1356 	runq_add(ts->ts_runq, td, flags);
1357 	if (cpu != NOCPU)
1358 		runq_length[cpu]++;
1359 }
1360 #else /* SMP */
1361 {
1362 	struct td_sched *ts;
1363 
1364 	ts = td->td_sched;
1365 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1366 	KASSERT((td->td_inhibitors == 0),
1367 	    ("sched_add: trying to run inhibited thread"));
1368 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1369 	    ("sched_add: bad thread state"));
1370 	KASSERT(td->td_flags & TDF_INMEM,
1371 	    ("sched_add: thread swapped out"));
1372 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1373 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1374 	    sched_tdname(curthread));
1375 	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1376 	    KTR_ATTR_LINKED, sched_tdname(td));
1377 	SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1378 	    flags & SRQ_PREEMPTED);
1379 
1380 	/*
1381 	 * Now that the thread is moving to the run-queue, set the lock
1382 	 * to the scheduler's lock.
1383 	 */
1384 	if (td->td_lock != &sched_lock) {
1385 		mtx_lock_spin(&sched_lock);
1386 		thread_lock_set(td, &sched_lock);
1387 	}
1388 	TD_SET_RUNQ(td);
1389 	CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1390 	ts->ts_runq = &runq;
1391 
1392 	/*
1393 	 * If we are yielding (on the way out anyhow) or the thread
1394 	 * being saved is US, then don't try be smart about preemption
1395 	 * or kicking off another CPU as it won't help and may hinder.
1396 	 * In the YIEDLING case, we are about to run whoever is being
1397 	 * put in the queue anyhow, and in the OURSELF case, we are
1398 	 * putting ourself on the run queue which also only happens
1399 	 * when we are about to yield.
1400 	 */
1401 	if ((flags & SRQ_YIELDING) == 0) {
1402 		if (maybe_preempt(td))
1403 			return;
1404 	}
1405 	if ((td->td_flags & TDF_NOLOAD) == 0)
1406 		sched_load_add();
1407 	runq_add(ts->ts_runq, td, flags);
1408 	maybe_resched(td);
1409 }
1410 #endif /* SMP */
1411 
1412 void
1413 sched_rem(struct thread *td)
1414 {
1415 	struct td_sched *ts;
1416 
1417 	ts = td->td_sched;
1418 	KASSERT(td->td_flags & TDF_INMEM,
1419 	    ("sched_rem: thread swapped out"));
1420 	KASSERT(TD_ON_RUNQ(td),
1421 	    ("sched_rem: thread not on run queue"));
1422 	mtx_assert(&sched_lock, MA_OWNED);
1423 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
1424 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1425 	    sched_tdname(curthread));
1426 	SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
1427 
1428 	if ((td->td_flags & TDF_NOLOAD) == 0)
1429 		sched_load_rem();
1430 #ifdef SMP
1431 	if (ts->ts_runq != &runq)
1432 		runq_length[ts->ts_runq - runq_pcpu]--;
1433 #endif
1434 	runq_remove(ts->ts_runq, td);
1435 	TD_SET_CAN_RUN(td);
1436 }
1437 
1438 /*
1439  * Select threads to run.  Note that running threads still consume a
1440  * slot.
1441  */
1442 struct thread *
1443 sched_choose(void)
1444 {
1445 	struct thread *td;
1446 	struct runq *rq;
1447 
1448 	mtx_assert(&sched_lock,  MA_OWNED);
1449 #ifdef SMP
1450 	struct thread *tdcpu;
1451 
1452 	rq = &runq;
1453 	td = runq_choose_fuzz(&runq, runq_fuzz);
1454 	tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1455 
1456 	if (td == NULL ||
1457 	    (tdcpu != NULL &&
1458 	     tdcpu->td_priority < td->td_priority)) {
1459 		CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
1460 		     PCPU_GET(cpuid));
1461 		td = tdcpu;
1462 		rq = &runq_pcpu[PCPU_GET(cpuid)];
1463 	} else {
1464 		CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
1465 	}
1466 
1467 #else
1468 	rq = &runq;
1469 	td = runq_choose(&runq);
1470 #endif
1471 
1472 	if (td) {
1473 #ifdef SMP
1474 		if (td == tdcpu)
1475 			runq_length[PCPU_GET(cpuid)]--;
1476 #endif
1477 		runq_remove(rq, td);
1478 		td->td_flags |= TDF_DIDRUN;
1479 
1480 		KASSERT(td->td_flags & TDF_INMEM,
1481 		    ("sched_choose: thread swapped out"));
1482 		return (td);
1483 	}
1484 	return (PCPU_GET(idlethread));
1485 }
1486 
1487 void
1488 sched_preempt(struct thread *td)
1489 {
1490 
1491 	SDT_PROBE2(sched, , , surrender, td, td->td_proc);
1492 	thread_lock(td);
1493 	if (td->td_critnest > 1)
1494 		td->td_owepreempt = 1;
1495 	else
1496 		mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL);
1497 	thread_unlock(td);
1498 }
1499 
1500 void
1501 sched_userret(struct thread *td)
1502 {
1503 	/*
1504 	 * XXX we cheat slightly on the locking here to avoid locking in
1505 	 * the usual case.  Setting td_priority here is essentially an
1506 	 * incomplete workaround for not setting it properly elsewhere.
1507 	 * Now that some interrupt handlers are threads, not setting it
1508 	 * properly elsewhere can clobber it in the window between setting
1509 	 * it here and returning to user mode, so don't waste time setting
1510 	 * it perfectly here.
1511 	 */
1512 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1513 	    ("thread with borrowed priority returning to userland"));
1514 	if (td->td_priority != td->td_user_pri) {
1515 		thread_lock(td);
1516 		td->td_priority = td->td_user_pri;
1517 		td->td_base_pri = td->td_user_pri;
1518 		thread_unlock(td);
1519 	}
1520 }
1521 
1522 void
1523 sched_bind(struct thread *td, int cpu)
1524 {
1525 	struct td_sched *ts;
1526 
1527 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
1528 	KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
1529 
1530 	ts = td->td_sched;
1531 
1532 	td->td_flags |= TDF_BOUND;
1533 #ifdef SMP
1534 	ts->ts_runq = &runq_pcpu[cpu];
1535 	if (PCPU_GET(cpuid) == cpu)
1536 		return;
1537 
1538 	mi_switch(SW_VOL, NULL);
1539 #endif
1540 }
1541 
1542 void
1543 sched_unbind(struct thread* td)
1544 {
1545 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1546 	KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
1547 	td->td_flags &= ~TDF_BOUND;
1548 }
1549 
1550 int
1551 sched_is_bound(struct thread *td)
1552 {
1553 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1554 	return (td->td_flags & TDF_BOUND);
1555 }
1556 
1557 void
1558 sched_relinquish(struct thread *td)
1559 {
1560 	thread_lock(td);
1561 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
1562 	thread_unlock(td);
1563 }
1564 
1565 int
1566 sched_load(void)
1567 {
1568 	return (sched_tdcnt);
1569 }
1570 
1571 int
1572 sched_sizeof_proc(void)
1573 {
1574 	return (sizeof(struct proc));
1575 }
1576 
1577 int
1578 sched_sizeof_thread(void)
1579 {
1580 	return (sizeof(struct thread) + sizeof(struct td_sched));
1581 }
1582 
1583 fixpt_t
1584 sched_pctcpu(struct thread *td)
1585 {
1586 	struct td_sched *ts;
1587 
1588 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1589 	ts = td->td_sched;
1590 	return (ts->ts_pctcpu);
1591 }
1592 
1593 #ifdef RACCT
1594 /*
1595  * Calculates the contribution to the thread cpu usage for the latest
1596  * (unfinished) second.
1597  */
1598 fixpt_t
1599 sched_pctcpu_delta(struct thread *td)
1600 {
1601 	struct td_sched *ts;
1602 	fixpt_t delta;
1603 	int realstathz;
1604 
1605 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1606 	ts = td->td_sched;
1607 	delta = 0;
1608 	realstathz = stathz ? stathz : hz;
1609 	if (ts->ts_cpticks != 0) {
1610 #if	(FSHIFT >= CCPU_SHIFT)
1611 		delta = (realstathz == 100)
1612 		    ? ((fixpt_t) ts->ts_cpticks) <<
1613 		    (FSHIFT - CCPU_SHIFT) :
1614 		    100 * (((fixpt_t) ts->ts_cpticks)
1615 		    << (FSHIFT - CCPU_SHIFT)) / realstathz;
1616 #else
1617 		delta = ((FSCALE - ccpu) *
1618 		    (ts->ts_cpticks *
1619 		    FSCALE / realstathz)) >> FSHIFT;
1620 #endif
1621 	}
1622 
1623 	return (delta);
1624 }
1625 #endif
1626 
1627 u_int
1628 sched_estcpu(struct thread *td)
1629 {
1630 
1631 	return (td->td_sched->ts_estcpu);
1632 }
1633 
1634 /*
1635  * The actual idle process.
1636  */
1637 void
1638 sched_idletd(void *dummy)
1639 {
1640 	struct pcpuidlestat *stat;
1641 
1642 	THREAD_NO_SLEEPING();
1643 	stat = DPCPU_PTR(idlestat);
1644 	for (;;) {
1645 		mtx_assert(&Giant, MA_NOTOWNED);
1646 
1647 		while (sched_runnable() == 0) {
1648 			cpu_idle(stat->idlecalls + stat->oldidlecalls > 64);
1649 			stat->idlecalls++;
1650 		}
1651 
1652 		mtx_lock_spin(&sched_lock);
1653 		mi_switch(SW_VOL | SWT_IDLE, NULL);
1654 		mtx_unlock_spin(&sched_lock);
1655 	}
1656 }
1657 
1658 /*
1659  * A CPU is entering for the first time or a thread is exiting.
1660  */
1661 void
1662 sched_throw(struct thread *td)
1663 {
1664 	/*
1665 	 * Correct spinlock nesting.  The idle thread context that we are
1666 	 * borrowing was created so that it would start out with a single
1667 	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
1668 	 * explicitly acquired locks in this function, the nesting count
1669 	 * is now 2 rather than 1.  Since we are nested, calling
1670 	 * spinlock_exit() will simply adjust the counts without allowing
1671 	 * spin lock using code to interrupt us.
1672 	 */
1673 	if (td == NULL) {
1674 		mtx_lock_spin(&sched_lock);
1675 		spinlock_exit();
1676 		PCPU_SET(switchtime, cpu_ticks());
1677 		PCPU_SET(switchticks, ticks);
1678 	} else {
1679 		lock_profile_release_lock(&sched_lock.lock_object);
1680 		MPASS(td->td_lock == &sched_lock);
1681 		td->td_lastcpu = td->td_oncpu;
1682 		td->td_oncpu = NOCPU;
1683 	}
1684 	mtx_assert(&sched_lock, MA_OWNED);
1685 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
1686 	cpu_throw(td, choosethread());	/* doesn't return */
1687 }
1688 
1689 void
1690 sched_fork_exit(struct thread *td)
1691 {
1692 
1693 	/*
1694 	 * Finish setting up thread glue so that it begins execution in a
1695 	 * non-nested critical section with sched_lock held but not recursed.
1696 	 */
1697 	td->td_oncpu = PCPU_GET(cpuid);
1698 	sched_lock.mtx_lock = (uintptr_t)td;
1699 	lock_profile_obtain_lock_success(&sched_lock.lock_object,
1700 	    0, 0, __FILE__, __LINE__);
1701 	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1702 }
1703 
1704 char *
1705 sched_tdname(struct thread *td)
1706 {
1707 #ifdef KTR
1708 	struct td_sched *ts;
1709 
1710 	ts = td->td_sched;
1711 	if (ts->ts_name[0] == '\0')
1712 		snprintf(ts->ts_name, sizeof(ts->ts_name),
1713 		    "%s tid %d", td->td_name, td->td_tid);
1714 	return (ts->ts_name);
1715 #else
1716 	return (td->td_name);
1717 #endif
1718 }
1719 
1720 #ifdef KTR
1721 void
1722 sched_clear_tdname(struct thread *td)
1723 {
1724 	struct td_sched *ts;
1725 
1726 	ts = td->td_sched;
1727 	ts->ts_name[0] = '\0';
1728 }
1729 #endif
1730 
1731 void
1732 sched_affinity(struct thread *td)
1733 {
1734 #ifdef SMP
1735 	struct td_sched *ts;
1736 	int cpu;
1737 
1738 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1739 
1740 	/*
1741 	 * Set the TSF_AFFINITY flag if there is at least one CPU this
1742 	 * thread can't run on.
1743 	 */
1744 	ts = td->td_sched;
1745 	ts->ts_flags &= ~TSF_AFFINITY;
1746 	CPU_FOREACH(cpu) {
1747 		if (!THREAD_CAN_SCHED(td, cpu)) {
1748 			ts->ts_flags |= TSF_AFFINITY;
1749 			break;
1750 		}
1751 	}
1752 
1753 	/*
1754 	 * If this thread can run on all CPUs, nothing else to do.
1755 	 */
1756 	if (!(ts->ts_flags & TSF_AFFINITY))
1757 		return;
1758 
1759 	/* Pinned threads and bound threads should be left alone. */
1760 	if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
1761 		return;
1762 
1763 	switch (td->td_state) {
1764 	case TDS_RUNQ:
1765 		/*
1766 		 * If we are on a per-CPU runqueue that is in the set,
1767 		 * then nothing needs to be done.
1768 		 */
1769 		if (ts->ts_runq != &runq &&
1770 		    THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
1771 			return;
1772 
1773 		/* Put this thread on a valid per-CPU runqueue. */
1774 		sched_rem(td);
1775 		sched_add(td, SRQ_BORING);
1776 		break;
1777 	case TDS_RUNNING:
1778 		/*
1779 		 * See if our current CPU is in the set.  If not, force a
1780 		 * context switch.
1781 		 */
1782 		if (THREAD_CAN_SCHED(td, td->td_oncpu))
1783 			return;
1784 
1785 		td->td_flags |= TDF_NEEDRESCHED;
1786 		if (td != curthread)
1787 			ipi_cpu(cpu, IPI_AST);
1788 		break;
1789 	default:
1790 		break;
1791 	}
1792 #endif
1793 }
1794