xref: /freebsd/sys/kern/sched_4bsd.c (revision eebc148f25c3012b943083b48fbfc13494e9c77f)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1990, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/cdefs.h>
38 #include "opt_hwpmc_hooks.h"
39 #include "opt_sched.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/cpuset.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/kthread.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/resourcevar.h>
51 #include <sys/runq.h>
52 #include <sys/sched.h>
53 #include <sys/sdt.h>
54 #include <sys/smp.h>
55 #include <sys/sysctl.h>
56 #include <sys/sx.h>
57 #include <sys/turnstile.h>
58 #include <sys/umtxvar.h>
59 #include <machine/pcb.h>
60 #include <machine/smp.h>
61 
62 #ifdef HWPMC_HOOKS
63 #include <sys/pmckern.h>
64 #endif
65 
66 #ifdef KDTRACE_HOOKS
67 #include <sys/dtrace_bsd.h>
68 int __read_mostly		dtrace_vtime_active;
69 dtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
70 #endif
71 
72 /*
73  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
74  * the range 100-256 Hz (approximately).
75  */
76 #ifdef SMP
77 #define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
78 #else
79 #define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
80 #endif
81 #define	NICE_WEIGHT		1	/* Priorities per nice level. */
82 #define	ESTCPULIM(e)							\
83 	min((e), INVERSE_ESTCPU_WEIGHT *				\
84 	    (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) +			\
85 	    PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE)			\
86 	    + INVERSE_ESTCPU_WEIGHT - 1)
87 
88 #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
89 
90 /*
91  * The schedulable entity that runs a context.
92  * This is  an extension to the thread structure and is tailored to
93  * the requirements of this scheduler.
94  * All fields are protected by the scheduler lock.
95  */
96 struct td_sched {
97 	fixpt_t		ts_pctcpu;	/* %cpu during p_swtime. */
98 	u_int		ts_estcpu;	/* Estimated cpu utilization. */
99 	int		ts_cpticks;	/* Ticks of cpu time. */
100 	int		ts_slptime;	/* Seconds !RUNNING. */
101 	int		ts_slice;	/* Remaining part of time slice. */
102 	int		ts_flags;
103 	struct runq	*ts_runq;	/* runq the thread is currently on */
104 #ifdef KTR
105 	char		ts_name[TS_NAME_LEN];
106 #endif
107 };
108 
109 /* flags kept in td_flags */
110 #define TDF_DIDRUN	TDF_SCHED0	/* thread actually ran. */
111 #define TDF_BOUND	TDF_SCHED1	/* Bound to one CPU. */
112 #define	TDF_SLICEEND	TDF_SCHED2	/* Thread time slice is over. */
113 
114 /* flags kept in ts_flags */
115 #define	TSF_AFFINITY	0x0001		/* Has a non-"full" CPU set. */
116 
117 #define SKE_RUNQ_PCPU(ts)						\
118     ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
119 
120 #define	THREAD_CAN_SCHED(td, cpu)	\
121     CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
122 
123 _Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <=
124     sizeof(struct thread0_storage),
125     "increase struct thread0_storage.t0st_sched size");
126 
127 static struct mtx sched_lock;
128 
129 static int	realstathz = 127; /* stathz is sometimes 0 and run off of hz. */
130 static int	sched_tdcnt;	/* Total runnable threads in the system. */
131 static int	sched_slice = 12; /* Thread run time before rescheduling. */
132 
133 static void	setup_runqs(void);
134 static void	schedcpu(void);
135 static void	schedcpu_thread(void);
136 static void	sched_priority(struct thread *td, u_char prio);
137 static void	sched_setup(void *dummy);
138 static void	maybe_resched(struct thread *td);
139 static void	updatepri(struct thread *td);
140 static void	resetpriority(struct thread *td);
141 static void	resetpriority_thread(struct thread *td);
142 #ifdef SMP
143 static int	sched_pickcpu(struct thread *td);
144 static int	forward_wakeup(int cpunum);
145 static void	kick_other_cpu(int pri, int cpuid);
146 #endif
147 
148 static struct kproc_desc sched_kp = {
149         "schedcpu",
150         schedcpu_thread,
151         NULL
152 };
153 SYSINIT(schedcpu, SI_SUB_LAST, SI_ORDER_FIRST, kproc_start,
154     &sched_kp);
155 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
156 
157 static void sched_initticks(void *dummy);
158 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
159     NULL);
160 
161 /*
162  * Global run queue.
163  */
164 static struct runq runq;
165 
166 #ifdef SMP
167 /*
168  * Per-CPU run queues
169  */
170 static struct runq runq_pcpu[MAXCPU];
171 long runq_length[MAXCPU];
172 
173 static cpuset_t idle_cpus_mask;
174 #endif
175 
176 struct pcpuidlestat {
177 	u_int idlecalls;
178 	u_int oldidlecalls;
179 };
180 DPCPU_DEFINE_STATIC(struct pcpuidlestat, idlestat);
181 
182 static void
setup_runqs(void)183 setup_runqs(void)
184 {
185 #ifdef SMP
186 	int i;
187 
188 	for (i = 0; i < MAXCPU; ++i)
189 		runq_init(&runq_pcpu[i]);
190 #endif
191 
192 	runq_init(&runq);
193 }
194 
195 static int
sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)196 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
197 {
198 	int error, new_val, period;
199 
200 	period = 1000000 / realstathz;
201 	new_val = period * sched_slice;
202 	error = sysctl_handle_int(oidp, &new_val, 0, req);
203 	if (error != 0 || req->newptr == NULL)
204 		return (error);
205 	if (new_val <= 0)
206 		return (EINVAL);
207 	sched_slice = imax(1, (new_val + period / 2) / period);
208 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
209 	    realstathz);
210 	return (0);
211 }
212 
213 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
214     "Scheduler");
215 
216 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
217     "Scheduler name");
218 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum,
219     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
220     sysctl_kern_quantum, "I",
221     "Quantum for timeshare threads in microseconds");
222 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
223     "Quantum for timeshare threads in stathz ticks");
224 #ifdef SMP
225 /* Enable forwarding of wakeups to all other cpus */
226 static SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup,
227     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
228     "Kernel SMP");
229 
230 static int runq_fuzz = 1;
231 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
232 
233 static int forward_wakeup_enabled = 1;
234 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
235 	   &forward_wakeup_enabled, 0,
236 	   "Forwarding of wakeup to idle CPUs");
237 
238 static int forward_wakeups_requested = 0;
239 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
240 	   &forward_wakeups_requested, 0,
241 	   "Requests for Forwarding of wakeup to idle CPUs");
242 
243 static int forward_wakeups_delivered = 0;
244 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
245 	   &forward_wakeups_delivered, 0,
246 	   "Completed Forwarding of wakeup to idle CPUs");
247 
248 static int forward_wakeup_use_mask = 1;
249 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
250 	   &forward_wakeup_use_mask, 0,
251 	   "Use the mask of idle cpus");
252 
253 static int forward_wakeup_use_loop = 0;
254 SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
255 	   &forward_wakeup_use_loop, 0,
256 	   "Use a loop to find idle cpus");
257 
258 #endif
259 #if 0
260 static int sched_followon = 0;
261 SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
262 	   &sched_followon, 0,
263 	   "allow threads to share a quantum");
264 #endif
265 
266 SDT_PROVIDER_DEFINE(sched);
267 
268 SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *",
269     "struct proc *", "uint8_t");
270 SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *",
271     "struct proc *", "void *");
272 SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *",
273     "struct proc *", "void *", "int");
274 SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *",
275     "struct proc *", "uint8_t", "struct thread *");
276 SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int");
277 SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *",
278     "struct proc *");
279 SDT_PROBE_DEFINE(sched, , , on__cpu);
280 SDT_PROBE_DEFINE(sched, , , remain__cpu);
281 SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
282     "struct proc *");
283 
284 static __inline void
sched_load_add(void)285 sched_load_add(void)
286 {
287 
288 	sched_tdcnt++;
289 	KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
290 	SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt);
291 }
292 
293 static __inline void
sched_load_rem(void)294 sched_load_rem(void)
295 {
296 
297 	sched_tdcnt--;
298 	KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
299 	SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt);
300 }
301 /*
302  * Arrange to reschedule if necessary, taking the priorities and
303  * schedulers into account.
304  */
305 static void
maybe_resched(struct thread * td)306 maybe_resched(struct thread *td)
307 {
308 
309 	THREAD_LOCK_ASSERT(td, MA_OWNED);
310 	if (td->td_priority < curthread->td_priority)
311 		ast_sched_locked(curthread, TDA_SCHED);
312 }
313 
314 /*
315  * This function is called when a thread is about to be put on run queue
316  * because it has been made runnable or its priority has been adjusted.  It
317  * determines if the new thread should preempt the current thread.  If so,
318  * it sets td_owepreempt to request a preemption.
319  */
320 int
maybe_preempt(struct thread * td)321 maybe_preempt(struct thread *td)
322 {
323 #ifdef PREEMPTION
324 	struct thread *ctd;
325 	int cpri, pri;
326 
327 	/*
328 	 * The new thread should not preempt the current thread if any of the
329 	 * following conditions are true:
330 	 *
331 	 *  - The kernel is in the throes of crashing (panicstr).
332 	 *  - The current thread has a higher (numerically lower) or
333 	 *    equivalent priority.  Note that this prevents curthread from
334 	 *    trying to preempt to itself.
335 	 *  - The current thread has an inhibitor set or is in the process of
336 	 *    exiting.  In this case, the current thread is about to switch
337 	 *    out anyways, so there's no point in preempting.  If we did,
338 	 *    the current thread would not be properly resumed as well, so
339 	 *    just avoid that whole landmine.
340 	 *  - If the new thread's priority is not a realtime priority and
341 	 *    the current thread's priority is not an idle priority and
342 	 *    FULL_PREEMPTION is disabled.
343 	 *
344 	 * If all of these conditions are false, but the current thread is in
345 	 * a nested critical section, then we have to defer the preemption
346 	 * until we exit the critical section.  Otherwise, switch immediately
347 	 * to the new thread.
348 	 */
349 	ctd = curthread;
350 	THREAD_LOCK_ASSERT(td, MA_OWNED);
351 	KASSERT((td->td_inhibitors == 0),
352 			("maybe_preempt: trying to run inhibited thread"));
353 	pri = td->td_priority;
354 	cpri = ctd->td_priority;
355 	if (KERNEL_PANICKED() || pri >= cpri /* || dumping */ ||
356 	    TD_IS_INHIBITED(ctd))
357 		return (0);
358 #ifndef FULL_PREEMPTION
359 	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
360 		return (0);
361 #endif
362 
363 	CTR0(KTR_PROC, "maybe_preempt: scheduling preemption");
364 	ctd->td_owepreempt = 1;
365 	return (1);
366 #else
367 	return (0);
368 #endif
369 }
370 
371 /*
372  * Constants for digital decay and forget:
373  *	90% of (ts_estcpu) usage in 5 * loadav time
374  *	95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
375  *          Note that, as ps(1) mentions, this can let percentages
376  *          total over 100% (I've seen 137.9% for 3 processes).
377  *
378  * Note that schedclock() updates ts_estcpu and p_cpticks asynchronously.
379  *
380  * We wish to decay away 90% of ts_estcpu in (5 * loadavg) seconds.
381  * That is, the system wants to compute a value of decay such
382  * that the following for loop:
383  * 	for (i = 0; i < (5 * loadavg); i++)
384  * 		ts_estcpu *= decay;
385  * will compute
386  * 	ts_estcpu *= 0.1;
387  * for all values of loadavg:
388  *
389  * Mathematically this loop can be expressed by saying:
390  * 	decay ** (5 * loadavg) ~= .1
391  *
392  * The system computes decay as:
393  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
394  *
395  * We wish to prove that the system's computation of decay
396  * will always fulfill the equation:
397  * 	decay ** (5 * loadavg) ~= .1
398  *
399  * If we compute b as:
400  * 	b = 2 * loadavg
401  * then
402  * 	decay = b / (b + 1)
403  *
404  * We now need to prove two things:
405  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
406  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
407  *
408  * Facts:
409  *         For x close to zero, exp(x) =~ 1 + x, since
410  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
411  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
412  *         For x close to zero, ln(1+x) =~ x, since
413  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
414  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
415  *         ln(.1) =~ -2.30
416  *
417  * Proof of (1):
418  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
419  *	solving for factor,
420  *      ln(factor) =~ (-2.30/5*loadav), or
421  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
422  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
423  *
424  * Proof of (2):
425  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
426  *	solving for power,
427  *      power*ln(b/(b+1)) =~ -2.30, or
428  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
429  *
430  * Actual power values for the implemented algorithm are as follows:
431  *      loadav: 1       2       3       4
432  *      power:  5.68    10.32   14.94   19.55
433  */
434 
435 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
436 #define	loadfactor(loadav)	(2 * (loadav))
437 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
438 
439 /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
440 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
441 SYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0,
442     "Decay factor used for updating %CPU");
443 
444 /*
445  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
446  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
447  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
448  *
449  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
450  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
451  *
452  * If you don't want to bother with the faster/more-accurate formula, you
453  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
454  * (more general) method of calculating the %age of CPU used by a process.
455  */
456 #define	CCPU_SHIFT	11
457 
458 /*
459  * Recompute process priorities, every hz ticks.
460  * MP-safe, called without the Giant mutex.
461  */
462 /* ARGSUSED */
463 static void
schedcpu(void)464 schedcpu(void)
465 {
466 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
467 	struct thread *td;
468 	struct proc *p;
469 	struct td_sched *ts;
470 	int awake;
471 
472 	sx_slock(&allproc_lock);
473 	FOREACH_PROC_IN_SYSTEM(p) {
474 		PROC_LOCK(p);
475 		if (p->p_state == PRS_NEW) {
476 			PROC_UNLOCK(p);
477 			continue;
478 		}
479 		FOREACH_THREAD_IN_PROC(p, td) {
480 			awake = 0;
481 			ts = td_get_sched(td);
482 			thread_lock(td);
483 			/*
484 			 * Increment sleep time (if sleeping).  We
485 			 * ignore overflow, as above.
486 			 */
487 			/*
488 			 * The td_sched slptimes are not touched in wakeup
489 			 * because the thread may not HAVE everything in
490 			 * memory? XXX I think this is out of date.
491 			 */
492 			if (TD_ON_RUNQ(td)) {
493 				awake = 1;
494 				td->td_flags &= ~TDF_DIDRUN;
495 			} else if (TD_IS_RUNNING(td)) {
496 				awake = 1;
497 				/* Do not clear TDF_DIDRUN */
498 			} else if (td->td_flags & TDF_DIDRUN) {
499 				awake = 1;
500 				td->td_flags &= ~TDF_DIDRUN;
501 			}
502 
503 			/*
504 			 * ts_pctcpu is only for ps and ttyinfo().
505 			 */
506 			ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
507 			/*
508 			 * If the td_sched has been idle the entire second,
509 			 * stop recalculating its priority until
510 			 * it wakes up.
511 			 */
512 			if (ts->ts_cpticks != 0) {
513 #if	(FSHIFT >= CCPU_SHIFT)
514 				ts->ts_pctcpu += (realstathz == 100)
515 				    ? ((fixpt_t) ts->ts_cpticks) <<
516 				    (FSHIFT - CCPU_SHIFT) :
517 				    100 * (((fixpt_t) ts->ts_cpticks)
518 				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
519 #else
520 				ts->ts_pctcpu += ((FSCALE - ccpu) *
521 				    (ts->ts_cpticks *
522 				    FSCALE / realstathz)) >> FSHIFT;
523 #endif
524 				ts->ts_cpticks = 0;
525 			}
526 			/*
527 			 * If there are ANY running threads in this process,
528 			 * then don't count it as sleeping.
529 			 * XXX: this is broken.
530 			 */
531 			if (awake) {
532 				if (ts->ts_slptime > 1) {
533 					/*
534 					 * In an ideal world, this should not
535 					 * happen, because whoever woke us
536 					 * up from the long sleep should have
537 					 * unwound the slptime and reset our
538 					 * priority before we run at the stale
539 					 * priority.  Should KASSERT at some
540 					 * point when all the cases are fixed.
541 					 */
542 					updatepri(td);
543 				}
544 				ts->ts_slptime = 0;
545 			} else
546 				ts->ts_slptime++;
547 			if (ts->ts_slptime > 1) {
548 				thread_unlock(td);
549 				continue;
550 			}
551 			ts->ts_estcpu = decay_cpu(loadfac, ts->ts_estcpu);
552 		      	resetpriority(td);
553 			resetpriority_thread(td);
554 			thread_unlock(td);
555 		}
556 		PROC_UNLOCK(p);
557 	}
558 	sx_sunlock(&allproc_lock);
559 }
560 
561 /*
562  * Main loop for a kthread that executes schedcpu once a second.
563  */
564 static void
schedcpu_thread(void)565 schedcpu_thread(void)
566 {
567 
568 	for (;;) {
569 		schedcpu();
570 		pause("-", hz);
571 	}
572 }
573 
574 /*
575  * Recalculate the priority of a process after it has slept for a while.
576  * For all load averages >= 1 and max ts_estcpu of 255, sleeping for at
577  * least six times the loadfactor will decay ts_estcpu to zero.
578  */
579 static void
updatepri(struct thread * td)580 updatepri(struct thread *td)
581 {
582 	struct td_sched *ts;
583 	fixpt_t loadfac;
584 	unsigned int newcpu;
585 
586 	ts = td_get_sched(td);
587 	loadfac = loadfactor(averunnable.ldavg[0]);
588 	if (ts->ts_slptime > 5 * loadfac)
589 		ts->ts_estcpu = 0;
590 	else {
591 		newcpu = ts->ts_estcpu;
592 		ts->ts_slptime--;	/* was incremented in schedcpu() */
593 		while (newcpu && --ts->ts_slptime)
594 			newcpu = decay_cpu(loadfac, newcpu);
595 		ts->ts_estcpu = newcpu;
596 	}
597 }
598 
599 /*
600  * Compute the priority of a process when running in user mode.
601  * Arrange to reschedule if the resulting priority is better
602  * than that of the current process.
603  */
604 static void
resetpriority(struct thread * td)605 resetpriority(struct thread *td)
606 {
607 	u_int newpriority;
608 
609 	if (td->td_pri_class != PRI_TIMESHARE)
610 		return;
611 	newpriority = PUSER +
612 	    td_get_sched(td)->ts_estcpu / INVERSE_ESTCPU_WEIGHT +
613 	    NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
614 	newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
615 	    PRI_MAX_TIMESHARE);
616 	sched_user_prio(td, newpriority);
617 }
618 
619 /*
620  * Update the thread's priority when the associated process's user
621  * priority changes.
622  */
623 static void
resetpriority_thread(struct thread * td)624 resetpriority_thread(struct thread *td)
625 {
626 
627 	/* Only change threads with a time sharing user priority. */
628 	if (td->td_priority < PRI_MIN_TIMESHARE ||
629 	    td->td_priority > PRI_MAX_TIMESHARE)
630 		return;
631 
632 	/* XXX the whole needresched thing is broken, but not silly. */
633 	maybe_resched(td);
634 
635 	sched_prio(td, td->td_user_pri);
636 }
637 
638 /* ARGSUSED */
639 static void
sched_setup(void * dummy)640 sched_setup(void *dummy)
641 {
642 
643 	setup_runqs();
644 
645 	/* Account for thread0. */
646 	sched_load_add();
647 }
648 
649 /*
650  * This routine determines time constants after stathz and hz are setup.
651  */
652 static void
sched_initticks(void * dummy)653 sched_initticks(void *dummy)
654 {
655 
656 	realstathz = stathz ? stathz : hz;
657 	sched_slice = realstathz / 10;	/* ~100ms */
658 	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
659 	    realstathz);
660 }
661 
662 /* External interfaces start here */
663 
664 /*
665  * Very early in the boot some setup of scheduler-specific
666  * parts of proc0 and of some scheduler resources needs to be done.
667  * Called from:
668  *  proc0_init()
669  */
670 void
schedinit(void)671 schedinit(void)
672 {
673 
674 	/*
675 	 * Set up the scheduler specific parts of thread0.
676 	 */
677 	thread0.td_lock = &sched_lock;
678 	td_get_sched(&thread0)->ts_slice = sched_slice;
679 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN);
680 }
681 
682 void
schedinit_ap(void)683 schedinit_ap(void)
684 {
685 
686 	/* Nothing needed. */
687 }
688 
689 bool
sched_runnable(void)690 sched_runnable(void)
691 {
692 #ifdef SMP
693 	return (runq_not_empty(&runq) ||
694 	    runq_not_empty(&runq_pcpu[PCPU_GET(cpuid)]));
695 #else
696 	return (runq_not_empty(&runq));
697 #endif
698 }
699 
700 int
sched_rr_interval(void)701 sched_rr_interval(void)
702 {
703 
704 	/* Convert sched_slice from stathz to hz. */
705 	return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz));
706 }
707 
708 SCHED_STAT_DEFINE(ithread_demotions, "Interrupt thread priority demotions");
709 SCHED_STAT_DEFINE(ithread_preemptions,
710     "Interrupt thread preemptions due to time-sharing");
711 
712 /*
713  * We adjust the priority of the current process.  The priority of a
714  * process gets worse as it accumulates CPU time.  The cpu usage
715  * estimator (ts_estcpu) is increased here.  resetpriority() will
716  * compute a different priority each time ts_estcpu increases by
717  * INVERSE_ESTCPU_WEIGHT (until PRI_MAX_TIMESHARE is reached).  The
718  * cpu usage estimator ramps up quite quickly when the process is
719  * running (linearly), and decays away exponentially, at a rate which
720  * is proportionally slower when the system is busy.  The basic
721  * principle is that the system will 90% forget that the process used
722  * a lot of CPU time in 5 * loadav seconds.  This causes the system to
723  * favor processes which haven't run much recently, and to round-robin
724  * among other processes.
725  */
726 static void
sched_clock_tick(struct thread * td)727 sched_clock_tick(struct thread *td)
728 {
729 	struct pcpuidlestat *stat;
730 	struct td_sched *ts;
731 
732 	THREAD_LOCK_ASSERT(td, MA_OWNED);
733 	ts = td_get_sched(td);
734 
735 	ts->ts_cpticks++;
736 	ts->ts_estcpu = ESTCPULIM(ts->ts_estcpu + 1);
737 	if ((ts->ts_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
738 		resetpriority(td);
739 		resetpriority_thread(td);
740 	}
741 
742 	/*
743 	 * Force a context switch if the current thread has used up a full
744 	 * time slice (default is 100ms).
745 	 */
746 	if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
747 		ts->ts_slice = sched_slice;
748 
749 		/*
750 		 * If an ithread uses a full quantum, demote its
751 		 * priority and preempt it.
752 		 */
753 		if (PRI_BASE(td->td_pri_class) == PRI_ITHD) {
754 			SCHED_STAT_INC(ithread_preemptions);
755 			td->td_owepreempt = 1;
756 			if (td->td_base_pri + RQ_PPQ < PRI_MAX_ITHD) {
757 				SCHED_STAT_INC(ithread_demotions);
758 				sched_prio(td, td->td_base_pri + RQ_PPQ);
759 			}
760 		} else {
761 			td->td_flags |= TDF_SLICEEND;
762 			ast_sched_locked(td, TDA_SCHED);
763 		}
764 	}
765 
766 	stat = DPCPU_PTR(idlestat);
767 	stat->oldidlecalls = stat->idlecalls;
768 	stat->idlecalls = 0;
769 }
770 
771 void
sched_clock(struct thread * td,int cnt)772 sched_clock(struct thread *td, int cnt)
773 {
774 
775 	for ( ; cnt > 0; cnt--)
776 		sched_clock_tick(td);
777 }
778 
779 /*
780  * Charge child's scheduling CPU usage to parent.
781  */
782 void
sched_exit(struct proc * p,struct thread * td)783 sched_exit(struct proc *p, struct thread *td)
784 {
785 
786 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
787 	    "prio:%d", td->td_priority);
788 
789 	PROC_LOCK_ASSERT(p, MA_OWNED);
790 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
791 }
792 
793 void
sched_exit_thread(struct thread * td,struct thread * child)794 sched_exit_thread(struct thread *td, struct thread *child)
795 {
796 
797 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
798 	    "prio:%d", child->td_priority);
799 	thread_lock(td);
800 	td_get_sched(td)->ts_estcpu = ESTCPULIM(td_get_sched(td)->ts_estcpu +
801 	    td_get_sched(child)->ts_estcpu);
802 	thread_unlock(td);
803 	thread_lock(child);
804 	if ((child->td_flags & TDF_NOLOAD) == 0)
805 		sched_load_rem();
806 	thread_unlock(child);
807 }
808 
809 void
sched_fork(struct thread * td,struct thread * childtd)810 sched_fork(struct thread *td, struct thread *childtd)
811 {
812 	sched_fork_thread(td, childtd);
813 }
814 
815 void
sched_fork_thread(struct thread * td,struct thread * childtd)816 sched_fork_thread(struct thread *td, struct thread *childtd)
817 {
818 	struct td_sched *ts, *tsc;
819 
820 	childtd->td_oncpu = NOCPU;
821 	childtd->td_lastcpu = NOCPU;
822 	childtd->td_lock = &sched_lock;
823 	childtd->td_cpuset = cpuset_ref(td->td_cpuset);
824 	childtd->td_domain.dr_policy = td->td_cpuset->cs_domain;
825 	childtd->td_priority = childtd->td_base_pri;
826 	ts = td_get_sched(childtd);
827 	bzero(ts, sizeof(*ts));
828 	tsc = td_get_sched(td);
829 	ts->ts_estcpu = tsc->ts_estcpu;
830 	ts->ts_flags |= (tsc->ts_flags & TSF_AFFINITY);
831 	ts->ts_slice = 1;
832 }
833 
834 void
sched_nice(struct proc * p,int nice)835 sched_nice(struct proc *p, int nice)
836 {
837 	struct thread *td;
838 
839 	PROC_LOCK_ASSERT(p, MA_OWNED);
840 	p->p_nice = nice;
841 	FOREACH_THREAD_IN_PROC(p, td) {
842 		thread_lock(td);
843 		resetpriority(td);
844 		resetpriority_thread(td);
845 		thread_unlock(td);
846 	}
847 }
848 
849 void
sched_class(struct thread * td,int class)850 sched_class(struct thread *td, int class)
851 {
852 	THREAD_LOCK_ASSERT(td, MA_OWNED);
853 	td->td_pri_class = class;
854 }
855 
856 /*
857  * Adjust the priority of a thread.
858  */
859 static void
sched_priority(struct thread * td,u_char prio)860 sched_priority(struct thread *td, u_char prio)
861 {
862 
863 	KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
864 	    "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
865 	    sched_tdname(curthread));
866 	SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
867 	if (td != curthread && prio > td->td_priority) {
868 		KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
869 		    "lend prio", "prio:%d", td->td_priority, "new prio:%d",
870 		    prio, KTR_ATTR_LINKED, sched_tdname(td));
871 		SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
872 		    curthread);
873 	}
874 	THREAD_LOCK_ASSERT(td, MA_OWNED);
875 	if (td->td_priority == prio)
876 		return;
877 	td->td_priority = prio;
878 	if (TD_ON_RUNQ(td) && td->td_rqindex != RQ_PRI_TO_QUEUE_IDX(prio)) {
879 		sched_rem(td);
880 		sched_add(td, SRQ_BORING | SRQ_HOLDTD);
881 	}
882 }
883 
884 /*
885  * Update a thread's priority when it is lent another thread's
886  * priority.
887  */
888 void
sched_lend_prio(struct thread * td,u_char prio)889 sched_lend_prio(struct thread *td, u_char prio)
890 {
891 
892 	td->td_flags |= TDF_BORROWING;
893 	sched_priority(td, prio);
894 }
895 
896 /*
897  * Restore a thread's priority when priority propagation is
898  * over.  The prio argument is the minimum priority the thread
899  * needs to have to satisfy other possible priority lending
900  * requests.  If the thread's regulary priority is less
901  * important than prio the thread will keep a priority boost
902  * of prio.
903  */
904 void
sched_unlend_prio(struct thread * td,u_char prio)905 sched_unlend_prio(struct thread *td, u_char prio)
906 {
907 	u_char base_pri;
908 
909 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
910 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
911 		base_pri = td->td_user_pri;
912 	else
913 		base_pri = td->td_base_pri;
914 	if (prio >= base_pri) {
915 		td->td_flags &= ~TDF_BORROWING;
916 		sched_prio(td, base_pri);
917 	} else
918 		sched_lend_prio(td, prio);
919 }
920 
921 void
sched_prio(struct thread * td,u_char prio)922 sched_prio(struct thread *td, u_char prio)
923 {
924 	u_char oldprio;
925 
926 	/* First, update the base priority. */
927 	td->td_base_pri = prio;
928 
929 	/*
930 	 * If the thread is borrowing another thread's priority, don't ever
931 	 * lower the priority.
932 	 */
933 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
934 		return;
935 
936 	/* Change the real priority. */
937 	oldprio = td->td_priority;
938 	sched_priority(td, prio);
939 
940 	/*
941 	 * If the thread is on a turnstile, then let the turnstile update
942 	 * its state.
943 	 */
944 	if (TD_ON_LOCK(td) && oldprio != prio)
945 		turnstile_adjust(td, oldprio);
946 }
947 
948 void
sched_ithread_prio(struct thread * td,u_char prio)949 sched_ithread_prio(struct thread *td, u_char prio)
950 {
951 	THREAD_LOCK_ASSERT(td, MA_OWNED);
952 	MPASS(td->td_pri_class == PRI_ITHD);
953 	td->td_base_ithread_pri = prio;
954 	sched_prio(td, prio);
955 }
956 
957 void
sched_user_prio(struct thread * td,u_char prio)958 sched_user_prio(struct thread *td, u_char prio)
959 {
960 
961 	THREAD_LOCK_ASSERT(td, MA_OWNED);
962 	td->td_base_user_pri = prio;
963 	if (td->td_lend_user_pri <= prio)
964 		return;
965 	td->td_user_pri = prio;
966 }
967 
968 void
sched_lend_user_prio(struct thread * td,u_char prio)969 sched_lend_user_prio(struct thread *td, u_char prio)
970 {
971 
972 	THREAD_LOCK_ASSERT(td, MA_OWNED);
973 	td->td_lend_user_pri = prio;
974 	td->td_user_pri = min(prio, td->td_base_user_pri);
975 	if (td->td_priority > td->td_user_pri)
976 		sched_prio(td, td->td_user_pri);
977 	else if (td->td_priority != td->td_user_pri)
978 		ast_sched_locked(td, TDA_SCHED);
979 }
980 
981 /*
982  * Like the above but first check if there is anything to do.
983  */
984 void
sched_lend_user_prio_cond(struct thread * td,u_char prio)985 sched_lend_user_prio_cond(struct thread *td, u_char prio)
986 {
987 
988 	if (td->td_lend_user_pri == prio)
989 		return;
990 
991 	thread_lock(td);
992 	sched_lend_user_prio(td, prio);
993 	thread_unlock(td);
994 }
995 
996 void
sched_sleep(struct thread * td,int pri)997 sched_sleep(struct thread *td, int pri)
998 {
999 
1000 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1001 	td->td_slptick = ticks;
1002 	td_get_sched(td)->ts_slptime = 0;
1003 	if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
1004 		sched_prio(td, pri);
1005 }
1006 
1007 void
sched_switch(struct thread * td,int flags)1008 sched_switch(struct thread *td, int flags)
1009 {
1010 	struct thread *newtd;
1011 	struct mtx *tmtx;
1012 	int preempted;
1013 
1014 	tmtx = &sched_lock;
1015 
1016 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1017 
1018 	td->td_lastcpu = td->td_oncpu;
1019 	preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
1020 	    (flags & SW_PREEMPT) != 0;
1021 	td->td_flags &= ~TDF_SLICEEND;
1022 	ast_unsched_locked(td, TDA_SCHED);
1023 	td->td_owepreempt = 0;
1024 	td->td_oncpu = NOCPU;
1025 
1026 	/*
1027 	 * At the last moment, if this thread is still marked RUNNING,
1028 	 * then put it back on the run queue as it has not been suspended
1029 	 * or stopped or any thing else similar.  We never put the idle
1030 	 * threads on the run queue, however.
1031 	 */
1032 	if (td->td_flags & TDF_IDLETD) {
1033 		TD_SET_CAN_RUN(td);
1034 #ifdef SMP
1035 		CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask);
1036 #endif
1037 	} else {
1038 		if (TD_IS_RUNNING(td)) {
1039 			/* Put us back on the run queue. */
1040 			sched_add(td, SRQ_HOLDTD | SRQ_OURSELF | SRQ_YIELDING |
1041 			    (preempted ? SRQ_PREEMPTED : 0));
1042 		}
1043 	}
1044 
1045 	/*
1046 	 * Switch to the sched lock to fix things up and pick
1047 	 * a new thread.  Block the td_lock in order to avoid
1048 	 * breaking the critical path.
1049 	 */
1050 	if (td->td_lock != &sched_lock) {
1051 		mtx_lock_spin(&sched_lock);
1052 		tmtx = thread_lock_block(td);
1053 		mtx_unlock_spin(tmtx);
1054 	}
1055 
1056 	if ((td->td_flags & TDF_NOLOAD) == 0)
1057 		sched_load_rem();
1058 
1059 	newtd = choosethread();
1060 	MPASS(newtd->td_lock == &sched_lock);
1061 
1062 #if (KTR_COMPILE & KTR_SCHED) != 0
1063 	if (TD_IS_IDLETHREAD(td))
1064 		KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
1065 		    "prio:%d", td->td_priority);
1066 	else
1067 		KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
1068 		    "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
1069 		    "lockname:\"%s\"", td->td_lockname);
1070 #endif
1071 
1072 	if (td != newtd) {
1073 #ifdef	HWPMC_HOOKS
1074 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1075 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1076 #endif
1077 
1078 		SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
1079 
1080                 /* I feel sleepy */
1081 		lock_profile_release_lock(&sched_lock.lock_object, true);
1082 #ifdef KDTRACE_HOOKS
1083 		/*
1084 		 * If DTrace has set the active vtime enum to anything
1085 		 * other than INACTIVE (0), then it should have set the
1086 		 * function to call.
1087 		 */
1088 		if (dtrace_vtime_active)
1089 			(*dtrace_vtime_switch_func)(newtd);
1090 #endif
1091 
1092 		cpu_switch(td, newtd, tmtx);
1093 		lock_profile_obtain_lock_success(&sched_lock.lock_object, true,
1094 		    0, 0, __FILE__, __LINE__);
1095 		/*
1096 		 * Where am I?  What year is it?
1097 		 * We are in the same thread that went to sleep above,
1098 		 * but any amount of time may have passed. All our context
1099 		 * will still be available as will local variables.
1100 		 * PCPU values however may have changed as we may have
1101 		 * changed CPU so don't trust cached values of them.
1102 		 * New threads will go to fork_exit() instead of here
1103 		 * so if you change things here you may need to change
1104 		 * things there too.
1105 		 *
1106 		 * If the thread above was exiting it will never wake
1107 		 * up again here, so either it has saved everything it
1108 		 * needed to, or the thread_wait() or wait() will
1109 		 * need to reap it.
1110 		 */
1111 
1112 		SDT_PROBE0(sched, , , on__cpu);
1113 #ifdef	HWPMC_HOOKS
1114 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1115 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1116 #endif
1117 	} else {
1118 		td->td_lock = &sched_lock;
1119 		SDT_PROBE0(sched, , , remain__cpu);
1120 	}
1121 
1122 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
1123 	    "prio:%d", td->td_priority);
1124 
1125 #ifdef SMP
1126 	if (td->td_flags & TDF_IDLETD)
1127 		CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask);
1128 #endif
1129 	sched_lock.mtx_lock = (uintptr_t)td;
1130 	td->td_oncpu = PCPU_GET(cpuid);
1131 	spinlock_enter();
1132 	mtx_unlock_spin(&sched_lock);
1133 }
1134 
1135 void
sched_wakeup(struct thread * td,int srqflags)1136 sched_wakeup(struct thread *td, int srqflags)
1137 {
1138 	struct td_sched *ts;
1139 
1140 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1141 	ts = td_get_sched(td);
1142 	if (ts->ts_slptime > 1) {
1143 		updatepri(td);
1144 		resetpriority(td);
1145 	}
1146 	td->td_slptick = 0;
1147 	ts->ts_slptime = 0;
1148 	ts->ts_slice = sched_slice;
1149 
1150 	/*
1151 	 * When resuming an idle ithread, restore its base ithread
1152 	 * priority.
1153 	 */
1154 	if (PRI_BASE(td->td_pri_class) == PRI_ITHD &&
1155 	    td->td_base_pri != td->td_base_ithread_pri)
1156 		sched_prio(td, td->td_base_ithread_pri);
1157 
1158 	sched_add(td, srqflags);
1159 }
1160 
1161 #ifdef SMP
1162 static int
forward_wakeup(int cpunum)1163 forward_wakeup(int cpunum)
1164 {
1165 	struct pcpu *pc;
1166 	cpuset_t dontuse, map, map2;
1167 	u_int id, me;
1168 	int iscpuset;
1169 
1170 	mtx_assert(&sched_lock, MA_OWNED);
1171 
1172 	CTR0(KTR_RUNQ, "forward_wakeup()");
1173 
1174 	if ((!forward_wakeup_enabled) ||
1175 	     (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
1176 		return (0);
1177 	if (!smp_started || KERNEL_PANICKED())
1178 		return (0);
1179 
1180 	forward_wakeups_requested++;
1181 
1182 	/*
1183 	 * Check the idle mask we received against what we calculated
1184 	 * before in the old version.
1185 	 */
1186 	me = PCPU_GET(cpuid);
1187 
1188 	/* Don't bother if we should be doing it ourself. */
1189 	if (CPU_ISSET(me, &idle_cpus_mask) &&
1190 	    (cpunum == NOCPU || me == cpunum))
1191 		return (0);
1192 
1193 	CPU_SETOF(me, &dontuse);
1194 	CPU_OR(&dontuse, &dontuse, &stopped_cpus);
1195 	CPU_OR(&dontuse, &dontuse, &hlt_cpus_mask);
1196 	CPU_ZERO(&map2);
1197 	if (forward_wakeup_use_loop) {
1198 		STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1199 			id = pc->pc_cpuid;
1200 			if (!CPU_ISSET(id, &dontuse) &&
1201 			    pc->pc_curthread == pc->pc_idlethread) {
1202 				CPU_SET(id, &map2);
1203 			}
1204 		}
1205 	}
1206 
1207 	if (forward_wakeup_use_mask) {
1208 		map = idle_cpus_mask;
1209 		CPU_ANDNOT(&map, &map, &dontuse);
1210 
1211 		/* If they are both on, compare and use loop if different. */
1212 		if (forward_wakeup_use_loop) {
1213 			if (CPU_CMP(&map, &map2)) {
1214 				printf("map != map2, loop method preferred\n");
1215 				map = map2;
1216 			}
1217 		}
1218 	} else {
1219 		map = map2;
1220 	}
1221 
1222 	/* If we only allow a specific CPU, then mask off all the others. */
1223 	if (cpunum != NOCPU) {
1224 		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
1225 		iscpuset = CPU_ISSET(cpunum, &map);
1226 		if (iscpuset == 0)
1227 			CPU_ZERO(&map);
1228 		else
1229 			CPU_SETOF(cpunum, &map);
1230 	}
1231 	if (!CPU_EMPTY(&map)) {
1232 		forward_wakeups_delivered++;
1233 		STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1234 			id = pc->pc_cpuid;
1235 			if (!CPU_ISSET(id, &map))
1236 				continue;
1237 			if (cpu_idle_wakeup(pc->pc_cpuid))
1238 				CPU_CLR(id, &map);
1239 		}
1240 		if (!CPU_EMPTY(&map))
1241 			ipi_selected(map, IPI_AST);
1242 		return (1);
1243 	}
1244 	if (cpunum == NOCPU)
1245 		printf("forward_wakeup: Idle processor not found\n");
1246 	return (0);
1247 }
1248 
1249 static void
kick_other_cpu(int pri,int cpuid)1250 kick_other_cpu(int pri, int cpuid)
1251 {
1252 	struct pcpu *pcpu;
1253 	int cpri;
1254 
1255 	pcpu = pcpu_find(cpuid);
1256 	if (CPU_ISSET(cpuid, &idle_cpus_mask)) {
1257 		forward_wakeups_delivered++;
1258 		if (!cpu_idle_wakeup(cpuid))
1259 			ipi_cpu(cpuid, IPI_AST);
1260 		return;
1261 	}
1262 
1263 	cpri = pcpu->pc_curthread->td_priority;
1264 	if (pri >= cpri)
1265 		return;
1266 
1267 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
1268 #if !defined(FULL_PREEMPTION)
1269 	if (pri <= PRI_MAX_ITHD)
1270 #endif /* ! FULL_PREEMPTION */
1271 	{
1272 		ipi_cpu(cpuid, IPI_PREEMPT);
1273 		return;
1274 	}
1275 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
1276 
1277 	if (pcpu->pc_curthread->td_lock == &sched_lock) {
1278 		ast_sched_locked(pcpu->pc_curthread, TDA_SCHED);
1279 		ipi_cpu(cpuid, IPI_AST);
1280 	}
1281 }
1282 #endif /* SMP */
1283 
1284 #ifdef SMP
1285 static int
sched_pickcpu(struct thread * td)1286 sched_pickcpu(struct thread *td)
1287 {
1288 	int best, cpu;
1289 
1290 	mtx_assert(&sched_lock, MA_OWNED);
1291 
1292 	if (td->td_lastcpu != NOCPU && THREAD_CAN_SCHED(td, td->td_lastcpu))
1293 		best = td->td_lastcpu;
1294 	else
1295 		best = NOCPU;
1296 	CPU_FOREACH(cpu) {
1297 		if (!THREAD_CAN_SCHED(td, cpu))
1298 			continue;
1299 
1300 		if (best == NOCPU)
1301 			best = cpu;
1302 		else if (runq_length[cpu] < runq_length[best])
1303 			best = cpu;
1304 	}
1305 	KASSERT(best != NOCPU, ("no valid CPUs"));
1306 
1307 	return (best);
1308 }
1309 #endif
1310 
1311 void
sched_add(struct thread * td,int flags)1312 sched_add(struct thread *td, int flags)
1313 #ifdef SMP
1314 {
1315 	cpuset_t tidlemsk;
1316 	struct td_sched *ts;
1317 	u_int cpu, cpuid;
1318 	int forwarded = 0;
1319 	int single_cpu = 0;
1320 
1321 	ts = td_get_sched(td);
1322 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1323 	KASSERT((td->td_inhibitors == 0),
1324 	    ("sched_add: trying to run inhibited thread"));
1325 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1326 	    ("sched_add: bad thread state"));
1327 	KASSERT(td->td_flags & TDF_INMEM,
1328 	    ("sched_add: thread swapped out"));
1329 
1330 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1331 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1332 	    sched_tdname(curthread));
1333 	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1334 	    KTR_ATTR_LINKED, sched_tdname(td));
1335 	SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1336 	    flags & SRQ_PREEMPTED);
1337 
1338 	/*
1339 	 * Now that the thread is moving to the run-queue, set the lock
1340 	 * to the scheduler's lock.
1341 	 */
1342 	if (td->td_lock != &sched_lock) {
1343 		mtx_lock_spin(&sched_lock);
1344 		if ((flags & SRQ_HOLD) != 0)
1345 			td->td_lock = &sched_lock;
1346 		else
1347 			thread_lock_set(td, &sched_lock);
1348 	}
1349 	TD_SET_RUNQ(td);
1350 
1351 	/*
1352 	 * If SMP is started and the thread is pinned or otherwise limited to
1353 	 * a specific set of CPUs, queue the thread to a per-CPU run queue.
1354 	 * Otherwise, queue the thread to the global run queue.
1355 	 *
1356 	 * If SMP has not yet been started we must use the global run queue
1357 	 * as per-CPU state may not be initialized yet and we may crash if we
1358 	 * try to access the per-CPU run queues.
1359 	 */
1360 	if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND ||
1361 	    ts->ts_flags & TSF_AFFINITY)) {
1362 		if (td->td_pinned != 0)
1363 			cpu = td->td_lastcpu;
1364 		else if (td->td_flags & TDF_BOUND) {
1365 			/* Find CPU from bound runq. */
1366 			KASSERT(SKE_RUNQ_PCPU(ts),
1367 			    ("sched_add: bound td_sched not on cpu runq"));
1368 			cpu = ts->ts_runq - &runq_pcpu[0];
1369 		} else
1370 			/* Find a valid CPU for our cpuset */
1371 			cpu = sched_pickcpu(td);
1372 		ts->ts_runq = &runq_pcpu[cpu];
1373 		single_cpu = 1;
1374 		CTR3(KTR_RUNQ,
1375 		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1376 		    cpu);
1377 	} else {
1378 		CTR2(KTR_RUNQ,
1379 		    "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
1380 		    td);
1381 		cpu = NOCPU;
1382 		ts->ts_runq = &runq;
1383 	}
1384 
1385 	if ((td->td_flags & TDF_NOLOAD) == 0)
1386 		sched_load_add();
1387 	runq_add(ts->ts_runq, td, flags);
1388 	if (cpu != NOCPU)
1389 		runq_length[cpu]++;
1390 
1391 	cpuid = PCPU_GET(cpuid);
1392 	if (single_cpu && cpu != cpuid) {
1393 	        kick_other_cpu(td->td_priority, cpu);
1394 	} else {
1395 		if (!single_cpu) {
1396 			tidlemsk = idle_cpus_mask;
1397 			CPU_ANDNOT(&tidlemsk, &tidlemsk, &hlt_cpus_mask);
1398 			CPU_CLR(cpuid, &tidlemsk);
1399 
1400 			if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
1401 			    ((flags & SRQ_INTR) == 0) &&
1402 			    !CPU_EMPTY(&tidlemsk))
1403 				forwarded = forward_wakeup(cpu);
1404 		}
1405 
1406 		if (!forwarded) {
1407 			if (!maybe_preempt(td))
1408 				maybe_resched(td);
1409 		}
1410 	}
1411 	if ((flags & SRQ_HOLDTD) == 0)
1412 		thread_unlock(td);
1413 }
1414 #else /* SMP */
1415 {
1416 	struct td_sched *ts;
1417 
1418 	ts = td_get_sched(td);
1419 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1420 	KASSERT((td->td_inhibitors == 0),
1421 	    ("sched_add: trying to run inhibited thread"));
1422 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1423 	    ("sched_add: bad thread state"));
1424 	KASSERT(td->td_flags & TDF_INMEM,
1425 	    ("sched_add: thread swapped out"));
1426 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1427 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1428 	    sched_tdname(curthread));
1429 	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1430 	    KTR_ATTR_LINKED, sched_tdname(td));
1431 	SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1432 	    flags & SRQ_PREEMPTED);
1433 
1434 	/*
1435 	 * Now that the thread is moving to the run-queue, set the lock
1436 	 * to the scheduler's lock.
1437 	 */
1438 	if (td->td_lock != &sched_lock) {
1439 		mtx_lock_spin(&sched_lock);
1440 		if ((flags & SRQ_HOLD) != 0)
1441 			td->td_lock = &sched_lock;
1442 		else
1443 			thread_lock_set(td, &sched_lock);
1444 	}
1445 	TD_SET_RUNQ(td);
1446 	CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1447 	ts->ts_runq = &runq;
1448 
1449 	if ((td->td_flags & TDF_NOLOAD) == 0)
1450 		sched_load_add();
1451 	runq_add(ts->ts_runq, td, flags);
1452 	if (!maybe_preempt(td))
1453 		maybe_resched(td);
1454 	if ((flags & SRQ_HOLDTD) == 0)
1455 		thread_unlock(td);
1456 }
1457 #endif /* SMP */
1458 
1459 void
sched_rem(struct thread * td)1460 sched_rem(struct thread *td)
1461 {
1462 	struct td_sched *ts;
1463 
1464 	ts = td_get_sched(td);
1465 	KASSERT(td->td_flags & TDF_INMEM,
1466 	    ("sched_rem: thread swapped out"));
1467 	KASSERT(TD_ON_RUNQ(td),
1468 	    ("sched_rem: thread not on run queue"));
1469 	mtx_assert(&sched_lock, MA_OWNED);
1470 	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
1471 	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1472 	    sched_tdname(curthread));
1473 	SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
1474 
1475 	if ((td->td_flags & TDF_NOLOAD) == 0)
1476 		sched_load_rem();
1477 #ifdef SMP
1478 	if (ts->ts_runq != &runq)
1479 		runq_length[ts->ts_runq - runq_pcpu]--;
1480 #endif
1481 	runq_remove(ts->ts_runq, td);
1482 	TD_SET_CAN_RUN(td);
1483 }
1484 
1485 /*
1486  * Select threads to run.  Note that running threads still consume a
1487  * slot.
1488  */
1489 struct thread *
sched_choose(void)1490 sched_choose(void)
1491 {
1492 	struct thread *td;
1493 	struct runq *rq;
1494 
1495 	mtx_assert(&sched_lock,  MA_OWNED);
1496 #ifdef SMP
1497 	struct thread *tdcpu;
1498 
1499 	rq = &runq;
1500 	td = runq_choose_fuzz(&runq, runq_fuzz);
1501 	tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1502 
1503 	if (td == NULL ||
1504 	    (tdcpu != NULL &&
1505 	     tdcpu->td_priority < td->td_priority)) {
1506 		CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
1507 		     PCPU_GET(cpuid));
1508 		td = tdcpu;
1509 		rq = &runq_pcpu[PCPU_GET(cpuid)];
1510 	} else {
1511 		CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
1512 	}
1513 
1514 #else
1515 	rq = &runq;
1516 	td = runq_choose(&runq);
1517 #endif
1518 
1519 	if (td) {
1520 #ifdef SMP
1521 		if (td == tdcpu)
1522 			runq_length[PCPU_GET(cpuid)]--;
1523 #endif
1524 		runq_remove(rq, td);
1525 		td->td_flags |= TDF_DIDRUN;
1526 
1527 		KASSERT(td->td_flags & TDF_INMEM,
1528 		    ("sched_choose: thread swapped out"));
1529 		return (td);
1530 	}
1531 	return (PCPU_GET(idlethread));
1532 }
1533 
1534 void
sched_preempt(struct thread * td)1535 sched_preempt(struct thread *td)
1536 {
1537 	int flags;
1538 
1539 	SDT_PROBE2(sched, , , surrender, td, td->td_proc);
1540 	if (td->td_critnest > 1) {
1541 		td->td_owepreempt = 1;
1542 	} else {
1543 		thread_lock(td);
1544 		flags = SW_INVOL | SW_PREEMPT;
1545 		flags |= TD_IS_IDLETHREAD(td) ? SWT_REMOTEWAKEIDLE :
1546 		    SWT_REMOTEPREEMPT;
1547 		mi_switch(flags);
1548 	}
1549 }
1550 
1551 void
sched_userret_slowpath(struct thread * td)1552 sched_userret_slowpath(struct thread *td)
1553 {
1554 
1555 	thread_lock(td);
1556 	td->td_priority = td->td_user_pri;
1557 	td->td_base_pri = td->td_user_pri;
1558 	thread_unlock(td);
1559 }
1560 
1561 void
sched_bind(struct thread * td,int cpu)1562 sched_bind(struct thread *td, int cpu)
1563 {
1564 #ifdef SMP
1565 	struct td_sched *ts = td_get_sched(td);
1566 #endif
1567 
1568 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
1569 	KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
1570 
1571 	td->td_flags |= TDF_BOUND;
1572 #ifdef SMP
1573 	ts->ts_runq = &runq_pcpu[cpu];
1574 	if (PCPU_GET(cpuid) == cpu)
1575 		return;
1576 
1577 	mi_switch(SW_VOL | SWT_BIND);
1578 	thread_lock(td);
1579 #endif
1580 }
1581 
1582 void
sched_unbind(struct thread * td)1583 sched_unbind(struct thread* td)
1584 {
1585 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1586 	KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
1587 	td->td_flags &= ~TDF_BOUND;
1588 }
1589 
1590 int
sched_is_bound(struct thread * td)1591 sched_is_bound(struct thread *td)
1592 {
1593 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1594 	return (td->td_flags & TDF_BOUND);
1595 }
1596 
1597 void
sched_relinquish(struct thread * td)1598 sched_relinquish(struct thread *td)
1599 {
1600 	thread_lock(td);
1601 	mi_switch(SW_VOL | SWT_RELINQUISH);
1602 }
1603 
1604 int
sched_load(void)1605 sched_load(void)
1606 {
1607 	return (sched_tdcnt);
1608 }
1609 
1610 int
sched_sizeof_proc(void)1611 sched_sizeof_proc(void)
1612 {
1613 	return (sizeof(struct proc));
1614 }
1615 
1616 int
sched_sizeof_thread(void)1617 sched_sizeof_thread(void)
1618 {
1619 	return (sizeof(struct thread) + sizeof(struct td_sched));
1620 }
1621 
1622 fixpt_t
sched_pctcpu(struct thread * td)1623 sched_pctcpu(struct thread *td)
1624 {
1625 	struct td_sched *ts;
1626 
1627 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1628 	ts = td_get_sched(td);
1629 	return (ts->ts_pctcpu);
1630 }
1631 
1632 #ifdef RACCT
1633 /*
1634  * Calculates the contribution to the thread cpu usage for the latest
1635  * (unfinished) second.
1636  */
1637 fixpt_t
sched_pctcpu_delta(struct thread * td)1638 sched_pctcpu_delta(struct thread *td)
1639 {
1640 	struct td_sched *ts;
1641 	fixpt_t delta;
1642 	int realstathz;
1643 
1644 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1645 	ts = td_get_sched(td);
1646 	delta = 0;
1647 	realstathz = stathz ? stathz : hz;
1648 	if (ts->ts_cpticks != 0) {
1649 #if	(FSHIFT >= CCPU_SHIFT)
1650 		delta = (realstathz == 100)
1651 		    ? ((fixpt_t) ts->ts_cpticks) <<
1652 		    (FSHIFT - CCPU_SHIFT) :
1653 		    100 * (((fixpt_t) ts->ts_cpticks)
1654 		    << (FSHIFT - CCPU_SHIFT)) / realstathz;
1655 #else
1656 		delta = ((FSCALE - ccpu) *
1657 		    (ts->ts_cpticks *
1658 		    FSCALE / realstathz)) >> FSHIFT;
1659 #endif
1660 	}
1661 
1662 	return (delta);
1663 }
1664 #endif
1665 
1666 u_int
sched_estcpu(struct thread * td)1667 sched_estcpu(struct thread *td)
1668 {
1669 
1670 	return (td_get_sched(td)->ts_estcpu);
1671 }
1672 
1673 /*
1674  * The actual idle process.
1675  */
1676 void
sched_idletd(void * dummy)1677 sched_idletd(void *dummy)
1678 {
1679 	struct pcpuidlestat *stat;
1680 
1681 	THREAD_NO_SLEEPING();
1682 	stat = DPCPU_PTR(idlestat);
1683 	for (;;) {
1684 		mtx_assert(&Giant, MA_NOTOWNED);
1685 
1686 		while (!sched_runnable()) {
1687 			cpu_idle(stat->idlecalls + stat->oldidlecalls > 64);
1688 			stat->idlecalls++;
1689 		}
1690 
1691 		mtx_lock_spin(&sched_lock);
1692 		mi_switch(SW_VOL | SWT_IDLE);
1693 	}
1694 }
1695 
1696 static void
sched_throw_tail(struct thread * td)1697 sched_throw_tail(struct thread *td)
1698 {
1699 
1700 	mtx_assert(&sched_lock, MA_OWNED);
1701 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
1702 	cpu_throw(td, choosethread());	/* doesn't return */
1703 }
1704 
1705 /*
1706  * A CPU is entering for the first time.
1707  */
1708 void
sched_ap_entry(void)1709 sched_ap_entry(void)
1710 {
1711 
1712 	/*
1713 	 * Correct spinlock nesting.  The idle thread context that we are
1714 	 * borrowing was created so that it would start out with a single
1715 	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
1716 	 * explicitly acquired locks in this function, the nesting count
1717 	 * is now 2 rather than 1.  Since we are nested, calling
1718 	 * spinlock_exit() will simply adjust the counts without allowing
1719 	 * spin lock using code to interrupt us.
1720 	 */
1721 	mtx_lock_spin(&sched_lock);
1722 	spinlock_exit();
1723 	PCPU_SET(switchtime, cpu_ticks());
1724 	PCPU_SET(switchticks, ticks);
1725 
1726 	sched_throw_tail(NULL);
1727 }
1728 
1729 /*
1730  * A thread is exiting.
1731  */
1732 void
sched_throw(struct thread * td)1733 sched_throw(struct thread *td)
1734 {
1735 
1736 	MPASS(td != NULL);
1737 	MPASS(td->td_lock == &sched_lock);
1738 
1739 	lock_profile_release_lock(&sched_lock.lock_object, true);
1740 	td->td_lastcpu = td->td_oncpu;
1741 	td->td_oncpu = NOCPU;
1742 
1743 	sched_throw_tail(td);
1744 }
1745 
1746 void
sched_fork_exit(struct thread * td)1747 sched_fork_exit(struct thread *td)
1748 {
1749 
1750 	/*
1751 	 * Finish setting up thread glue so that it begins execution in a
1752 	 * non-nested critical section with sched_lock held but not recursed.
1753 	 */
1754 	td->td_oncpu = PCPU_GET(cpuid);
1755 	sched_lock.mtx_lock = (uintptr_t)td;
1756 	lock_profile_obtain_lock_success(&sched_lock.lock_object, true,
1757 	    0, 0, __FILE__, __LINE__);
1758 	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1759 
1760 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
1761 	    "prio:%d", td->td_priority);
1762 	SDT_PROBE0(sched, , , on__cpu);
1763 }
1764 
1765 char *
sched_tdname(struct thread * td)1766 sched_tdname(struct thread *td)
1767 {
1768 #ifdef KTR
1769 	struct td_sched *ts;
1770 
1771 	ts = td_get_sched(td);
1772 	if (ts->ts_name[0] == '\0')
1773 		snprintf(ts->ts_name, sizeof(ts->ts_name),
1774 		    "%s tid %d", td->td_name, td->td_tid);
1775 	return (ts->ts_name);
1776 #else
1777 	return (td->td_name);
1778 #endif
1779 }
1780 
1781 #ifdef KTR
1782 void
sched_clear_tdname(struct thread * td)1783 sched_clear_tdname(struct thread *td)
1784 {
1785 	struct td_sched *ts;
1786 
1787 	ts = td_get_sched(td);
1788 	ts->ts_name[0] = '\0';
1789 }
1790 #endif
1791 
1792 void
sched_affinity(struct thread * td)1793 sched_affinity(struct thread *td)
1794 {
1795 #ifdef SMP
1796 	struct td_sched *ts;
1797 	int cpu;
1798 
1799 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1800 
1801 	/*
1802 	 * Set the TSF_AFFINITY flag if there is at least one CPU this
1803 	 * thread can't run on.
1804 	 */
1805 	ts = td_get_sched(td);
1806 	ts->ts_flags &= ~TSF_AFFINITY;
1807 	CPU_FOREACH(cpu) {
1808 		if (!THREAD_CAN_SCHED(td, cpu)) {
1809 			ts->ts_flags |= TSF_AFFINITY;
1810 			break;
1811 		}
1812 	}
1813 
1814 	/*
1815 	 * If this thread can run on all CPUs, nothing else to do.
1816 	 */
1817 	if (!(ts->ts_flags & TSF_AFFINITY))
1818 		return;
1819 
1820 	/* Pinned threads and bound threads should be left alone. */
1821 	if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
1822 		return;
1823 
1824 	switch (TD_GET_STATE(td)) {
1825 	case TDS_RUNQ:
1826 		/*
1827 		 * If we are on a per-CPU runqueue that is in the set,
1828 		 * then nothing needs to be done.
1829 		 */
1830 		if (ts->ts_runq != &runq &&
1831 		    THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
1832 			return;
1833 
1834 		/* Put this thread on a valid per-CPU runqueue. */
1835 		sched_rem(td);
1836 		sched_add(td, SRQ_HOLDTD | SRQ_BORING);
1837 		break;
1838 	case TDS_RUNNING:
1839 		/*
1840 		 * See if our current CPU is in the set.  If not, force a
1841 		 * context switch.
1842 		 */
1843 		if (THREAD_CAN_SCHED(td, td->td_oncpu))
1844 			return;
1845 
1846 		ast_sched_locked(td, TDA_SCHED);
1847 		if (td != curthread)
1848 			ipi_cpu(cpu, IPI_AST);
1849 		break;
1850 	default:
1851 		break;
1852 	}
1853 #endif
1854 }
1855