xref: /freebsd/sys/kern/sched_ule.c (revision c47f202b45d903b4575f96e32f8a238b867f0856)
1 /*-
2  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * This file implements the ULE scheduler.  ULE supports independent CPU
29  * run queues and fine grain locking.  It has superior interactive
30  * performance under load even on uni-processor systems.
31  *
32  * etymology:
33  *   ULE is the last three letters in schedule.  It owes it's name to a
34  * generic user created for a scheduling system by Paul Mikesell at
35  * Isilon Systems and a general lack of creativity on the part of the author.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/resource.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/smp.h>
56 #include <sys/sx.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
59 #include <sys/turnstile.h>
60 #include <sys/umtx.h>
61 #include <sys/vmmeter.h>
62 #ifdef KTRACE
63 #include <sys/uio.h>
64 #include <sys/ktrace.h>
65 #endif
66 
67 #ifdef HWPMC_HOOKS
68 #include <sys/pmckern.h>
69 #endif
70 
71 #include <machine/cpu.h>
72 #include <machine/smp.h>
73 
74 #ifndef PREEMPTION
75 #error	"SCHED_ULE requires options PREEMPTION"
76 #endif
77 
78 #define	KTR_ULE	0
79 
80 /*
81  * Thread scheduler specific section.  All fields are protected
82  * by the thread lock.
83  */
84 struct td_sched {
85 	TAILQ_ENTRY(td_sched) ts_procq;	/* Run queue. */
86 	struct thread	*ts_thread;	/* Active associated thread. */
87 	struct runq	*ts_runq;	/* Run-queue we're queued on. */
88 	short		ts_flags;	/* TSF_* flags. */
89 	u_char		ts_rqindex;	/* Run queue index. */
90 	u_char		ts_cpu;		/* CPU that we have affinity for. */
91 	int		ts_slptick;	/* Tick when we went to sleep. */
92 	int		ts_slice;	/* Ticks of slice remaining. */
93 	u_int		ts_slptime;	/* Number of ticks we vol. slept */
94 	u_int		ts_runtime;	/* Number of ticks we were running */
95 	/* The following variables are only used for pctcpu calculation */
96 	int		ts_ltick;	/* Last tick that we were running on */
97 	int		ts_ftick;	/* First tick that we were running on */
98 	int		ts_ticks;	/* Tick count */
99 #ifdef SMP
100 	int		ts_rltick;	/* Real last tick, for affinity. */
101 #endif
102 };
103 /* flags kept in ts_flags */
104 #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
105 #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
106 
107 static struct td_sched td_sched0;
108 
109 /*
110  * Cpu percentage computation macros and defines.
111  *
112  * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
113  * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
114  * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
115  * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
116  * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
117  * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
118  */
119 #define	SCHED_TICK_SECS		10
120 #define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
121 #define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
122 #define	SCHED_TICK_SHIFT	10
123 #define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
124 #define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
125 
126 /*
127  * These macros determine priorities for non-interactive threads.  They are
128  * assigned a priority based on their recent cpu utilization as expressed
129  * by the ratio of ticks to the tick total.  NHALF priorities at the start
130  * and end of the MIN to MAX timeshare range are only reachable with negative
131  * or positive nice respectively.
132  *
133  * PRI_RANGE:	Priority range for utilization dependent priorities.
134  * PRI_NRESV:	Number of nice values.
135  * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
136  * PRI_NICE:	Determines the part of the priority inherited from nice.
137  */
138 #define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
139 #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
140 #define	SCHED_PRI_MIN		(PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
141 #define	SCHED_PRI_MAX		(PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
142 #define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN)
143 #define	SCHED_PRI_TICKS(ts)						\
144     (SCHED_TICK_HZ((ts)) /						\
145     (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
146 #define	SCHED_PRI_NICE(nice)	(nice)
147 
148 /*
149  * These determine the interactivity of a process.  Interactivity differs from
150  * cpu utilization in that it expresses the voluntary time slept vs time ran
151  * while cpu utilization includes all time not running.  This more accurately
152  * models the intent of the thread.
153  *
154  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
155  *		before throttling back.
156  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
157  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
158  * INTERACT_THRESH:	Threshhold for placement on the current runq.
159  */
160 #define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
161 #define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
162 #define	SCHED_INTERACT_MAX	(100)
163 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
164 #define	SCHED_INTERACT_THRESH	(30)
165 
166 /*
167  * tickincr:		Converts a stathz tick into a hz domain scaled by
168  *			the shift factor.  Without the shift the error rate
169  *			due to rounding would be unacceptably high.
170  * realstathz:		stathz is sometimes 0 and run off of hz.
171  * sched_slice:		Runtime of each thread before rescheduling.
172  * preempt_thresh:	Priority threshold for preemption and remote IPIs.
173  */
174 static int sched_interact = SCHED_INTERACT_THRESH;
175 static int realstathz;
176 static int tickincr;
177 static int sched_slice;
178 static int preempt_thresh = PRI_MIN_KERN;
179 
180 /*
181  * tdq - per processor runqs and statistics.  All fields are protected by the
182  * tdq_lock.  The load and lowpri may be accessed without to avoid excess
183  * locking in sched_pickcpu();
184  */
185 struct tdq {
186 	struct mtx	*tdq_lock;		/* Pointer to group lock. */
187 	struct runq	tdq_realtime;		/* real-time run queue. */
188 	struct runq	tdq_timeshare;		/* timeshare run queue. */
189 	struct runq	tdq_idle;		/* Queue of IDLE threads. */
190 	int		tdq_load;		/* Aggregate load. */
191 	u_char		tdq_idx;		/* Current insert index. */
192 	u_char		tdq_ridx;		/* Current removal index. */
193 #ifdef SMP
194 	u_char		tdq_lowpri;		/* Lowest priority thread. */
195 	int		tdq_transferable;	/* Transferable thread count. */
196 	LIST_ENTRY(tdq)	tdq_siblings;		/* Next in tdq group. */
197 	struct tdq_group *tdq_group;		/* Our processor group. */
198 #else
199 	int		tdq_sysload;		/* For loadavg, !ITHD load. */
200 #endif
201 } __aligned(64);
202 
203 
204 #ifdef SMP
205 /*
206  * tdq groups are groups of processors which can cheaply share threads.  When
207  * one processor in the group goes idle it will check the runqs of the other
208  * processors in its group prior to halting and waiting for an interrupt.
209  * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
210  * In a numa environment we'd want an idle bitmap per group and a two tiered
211  * load balancer.
212  */
213 struct tdq_group {
214 	struct mtx	tdg_lock;	/* Protects all fields below. */
215 	int		tdg_cpus;	/* Count of CPUs in this tdq group. */
216 	cpumask_t 	tdg_cpumask;	/* Mask of cpus in this group. */
217 	cpumask_t 	tdg_idlemask;	/* Idle cpus in this group. */
218 	cpumask_t 	tdg_mask;	/* Bit mask for first cpu. */
219 	int		tdg_load;	/* Total load of this group. */
220 	int	tdg_transferable;	/* Transferable load of this group. */
221 	LIST_HEAD(, tdq) tdg_members;	/* Linked list of all members. */
222 	char		tdg_name[16];	/* lock name. */
223 } __aligned(64);
224 
225 #define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 300))
226 #define	SCHED_AFFINITY(ts)	((ts)->ts_rltick > ticks - affinity)
227 
228 /*
229  * Run-time tunables.
230  */
231 static int rebalance = 1;
232 static int balance_secs = 1;
233 static int pick_pri = 1;
234 static int affinity;
235 static int tryself = 1;
236 static int steal_htt = 0;
237 static int steal_idle = 1;
238 static int steal_thresh = 2;
239 static int topology = 0;
240 
241 /*
242  * One thread queue per processor.
243  */
244 static volatile cpumask_t tdq_idle;
245 static int tdg_maxid;
246 static struct tdq	tdq_cpu[MAXCPU];
247 static struct tdq_group tdq_groups[MAXCPU];
248 static struct callout balco;
249 static struct callout gbalco;
250 
251 #define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
252 #define	TDQ_CPU(x)	(&tdq_cpu[(x)])
253 #define	TDQ_ID(x)	((int)((x) - tdq_cpu))
254 #define	TDQ_GROUP(x)	(&tdq_groups[(x)])
255 #define	TDG_ID(x)	((int)((x) - tdq_groups))
256 #else	/* !SMP */
257 static struct tdq	tdq_cpu;
258 static struct mtx	tdq_lock;
259 
260 #define	TDQ_ID(x)	(0)
261 #define	TDQ_SELF()	(&tdq_cpu)
262 #define	TDQ_CPU(x)	(&tdq_cpu)
263 #endif
264 
265 #define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
266 #define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
267 #define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
268 #define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
269 #define	TDQ_LOCKPTR(t)		((t)->tdq_lock)
270 
271 static void sched_priority(struct thread *);
272 static void sched_thread_priority(struct thread *, u_char);
273 static int sched_interact_score(struct thread *);
274 static void sched_interact_update(struct thread *);
275 static void sched_interact_fork(struct thread *);
276 static void sched_pctcpu_update(struct td_sched *);
277 
278 /* Operations on per processor queues */
279 static struct td_sched * tdq_choose(struct tdq *);
280 static void tdq_setup(struct tdq *);
281 static void tdq_load_add(struct tdq *, struct td_sched *);
282 static void tdq_load_rem(struct tdq *, struct td_sched *);
283 static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
284 static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
285 void tdq_print(int cpu);
286 static void runq_print(struct runq *rq);
287 static void tdq_add(struct tdq *, struct thread *, int);
288 #ifdef SMP
289 static void tdq_move(struct tdq *, struct tdq *);
290 static int tdq_idled(struct tdq *);
291 static void tdq_notify(struct td_sched *);
292 static struct td_sched *tdq_steal(struct tdq *, int);
293 static struct td_sched *runq_steal(struct runq *);
294 static int sched_pickcpu(struct td_sched *, int);
295 static void sched_balance(void *);
296 static void sched_balance_groups(void *);
297 static void sched_balance_group(struct tdq_group *);
298 static void sched_balance_pair(struct tdq *, struct tdq *);
299 static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
300 static inline struct tdq *sched_switchcpu(struct td_sched *, int, int);
301 static inline struct mtx *thread_block_switch(struct thread *);
302 static inline void thread_unblock_switch(struct thread *, struct mtx *);
303 static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
304 
305 #define	THREAD_CAN_MIGRATE(td)	 ((td)->td_pinned == 0)
306 #endif
307 
308 static void sched_setup(void *dummy);
309 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
310 
311 static void sched_initticks(void *dummy);
312 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
313 
314 /*
315  * Print the threads waiting on a run-queue.
316  */
317 static void
318 runq_print(struct runq *rq)
319 {
320 	struct rqhead *rqh;
321 	struct td_sched *ts;
322 	int pri;
323 	int j;
324 	int i;
325 
326 	for (i = 0; i < RQB_LEN; i++) {
327 		printf("\t\trunq bits %d 0x%zx\n",
328 		    i, rq->rq_status.rqb_bits[i]);
329 		for (j = 0; j < RQB_BPW; j++)
330 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
331 				pri = j + (i << RQB_L2BPW);
332 				rqh = &rq->rq_queues[pri];
333 				TAILQ_FOREACH(ts, rqh, ts_procq) {
334 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
335 					    ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
336 				}
337 			}
338 	}
339 }
340 
341 /*
342  * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
343  */
344 void
345 tdq_print(int cpu)
346 {
347 	struct tdq *tdq;
348 
349 	tdq = TDQ_CPU(cpu);
350 
351 	printf("tdq %d:\n", TDQ_ID(tdq));
352 	printf("\tlockptr         %p\n", TDQ_LOCKPTR(tdq));
353 	printf("\tload:           %d\n", tdq->tdq_load);
354 	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
355 	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
356 	printf("\trealtime runq:\n");
357 	runq_print(&tdq->tdq_realtime);
358 	printf("\ttimeshare runq:\n");
359 	runq_print(&tdq->tdq_timeshare);
360 	printf("\tidle runq:\n");
361 	runq_print(&tdq->tdq_idle);
362 #ifdef SMP
363 	printf("\tload transferable: %d\n", tdq->tdq_transferable);
364 	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
365 	printf("\tgroup:             %d\n", TDG_ID(tdq->tdq_group));
366 	printf("\tLock name:         %s\n", tdq->tdq_group->tdg_name);
367 #endif
368 }
369 
370 #define	TS_RQ_PPQ	(((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
371 /*
372  * Add a thread to the actual run-queue.  Keeps transferable counts up to
373  * date with what is actually on the run-queue.  Selects the correct
374  * queue position for timeshare threads.
375  */
376 static __inline void
377 tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
378 {
379 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
380 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
381 #ifdef SMP
382 	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
383 		tdq->tdq_transferable++;
384 		tdq->tdq_group->tdg_transferable++;
385 		ts->ts_flags |= TSF_XFERABLE;
386 	}
387 #endif
388 	if (ts->ts_runq == &tdq->tdq_timeshare) {
389 		u_char pri;
390 
391 		pri = ts->ts_thread->td_priority;
392 		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
393 			("Invalid priority %d on timeshare runq", pri));
394 		/*
395 		 * This queue contains only priorities between MIN and MAX
396 		 * realtime.  Use the whole queue to represent these values.
397 		 */
398 		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
399 			pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
400 			pri = (pri + tdq->tdq_idx) % RQ_NQS;
401 			/*
402 			 * This effectively shortens the queue by one so we
403 			 * can have a one slot difference between idx and
404 			 * ridx while we wait for threads to drain.
405 			 */
406 			if (tdq->tdq_ridx != tdq->tdq_idx &&
407 			    pri == tdq->tdq_ridx)
408 				pri = (unsigned char)(pri - 1) % RQ_NQS;
409 		} else
410 			pri = tdq->tdq_ridx;
411 		runq_add_pri(ts->ts_runq, ts, pri, flags);
412 	} else
413 		runq_add(ts->ts_runq, ts, flags);
414 }
415 
416 /*
417  * Remove a thread from a run-queue.  This typically happens when a thread
418  * is selected to run.  Running threads are not on the queue and the
419  * transferable count does not reflect them.
420  */
421 static __inline void
422 tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
423 {
424 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
425 	KASSERT(ts->ts_runq != NULL,
426 	    ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
427 #ifdef SMP
428 	if (ts->ts_flags & TSF_XFERABLE) {
429 		tdq->tdq_transferable--;
430 		tdq->tdq_group->tdg_transferable--;
431 		ts->ts_flags &= ~TSF_XFERABLE;
432 	}
433 #endif
434 	if (ts->ts_runq == &tdq->tdq_timeshare) {
435 		if (tdq->tdq_idx != tdq->tdq_ridx)
436 			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
437 		else
438 			runq_remove_idx(ts->ts_runq, ts, NULL);
439 		/*
440 		 * For timeshare threads we update the priority here so
441 		 * the priority reflects the time we've been sleeping.
442 		 */
443 		ts->ts_ltick = ticks;
444 		sched_pctcpu_update(ts);
445 		sched_priority(ts->ts_thread);
446 	} else
447 		runq_remove(ts->ts_runq, ts);
448 }
449 
450 /*
451  * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
452  * for this thread to the referenced thread queue.
453  */
454 static void
455 tdq_load_add(struct tdq *tdq, struct td_sched *ts)
456 {
457 	int class;
458 
459 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
460 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
461 	class = PRI_BASE(ts->ts_thread->td_pri_class);
462 	tdq->tdq_load++;
463 	CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
464 	if (class != PRI_ITHD &&
465 	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
466 #ifdef SMP
467 		tdq->tdq_group->tdg_load++;
468 #else
469 		tdq->tdq_sysload++;
470 #endif
471 }
472 
473 /*
474  * Remove the load from a thread that is transitioning to a sleep state or
475  * exiting.
476  */
477 static void
478 tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
479 {
480 	int class;
481 
482 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
483 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
484 	class = PRI_BASE(ts->ts_thread->td_pri_class);
485 	if (class != PRI_ITHD &&
486 	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
487 #ifdef SMP
488 		tdq->tdq_group->tdg_load--;
489 #else
490 		tdq->tdq_sysload--;
491 #endif
492 	KASSERT(tdq->tdq_load != 0,
493 	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
494 	tdq->tdq_load--;
495 	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
496 	ts->ts_runq = NULL;
497 }
498 
499 #ifdef SMP
500 /*
501  * sched_balance is a simple CPU load balancing algorithm.  It operates by
502  * finding the least loaded and most loaded cpu and equalizing their load
503  * by migrating some processes.
504  *
505  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
506  * installations will only have 2 cpus.  Secondly, load balancing too much at
507  * once can have an unpleasant effect on the system.  The scheduler rarely has
508  * enough information to make perfect decisions.  So this algorithm chooses
509  * simplicity and more gradual effects on load in larger systems.
510  *
511  */
512 static void
513 sched_balance(void *arg)
514 {
515 	struct tdq_group *high;
516 	struct tdq_group *low;
517 	struct tdq_group *tdg;
518 	int cnt;
519 	int i;
520 
521 	callout_reset(&balco, max(hz / 2, random() % (hz * balance_secs)),
522 	    sched_balance, NULL);
523 	if (smp_started == 0 || rebalance == 0)
524 		return;
525 	low = high = NULL;
526 	i = random() % (tdg_maxid + 1);
527 	for (cnt = 0; cnt <= tdg_maxid; cnt++) {
528 		tdg = TDQ_GROUP(i);
529 		/*
530 		 * Find the CPU with the highest load that has some
531 		 * threads to transfer.
532 		 */
533 		if ((high == NULL || tdg->tdg_load > high->tdg_load)
534 		    && tdg->tdg_transferable)
535 			high = tdg;
536 		if (low == NULL || tdg->tdg_load < low->tdg_load)
537 			low = tdg;
538 		if (++i > tdg_maxid)
539 			i = 0;
540 	}
541 	if (low != NULL && high != NULL && high != low)
542 		sched_balance_pair(LIST_FIRST(&high->tdg_members),
543 		    LIST_FIRST(&low->tdg_members));
544 }
545 
546 /*
547  * Balance load between CPUs in a group.  Will only migrate within the group.
548  */
549 static void
550 sched_balance_groups(void *arg)
551 {
552 	int i;
553 
554 	callout_reset(&gbalco, max(hz / 2, random() % (hz * balance_secs)),
555 	    sched_balance_groups, NULL);
556 	if (smp_started == 0 || rebalance == 0)
557 		return;
558 	for (i = 0; i <= tdg_maxid; i++)
559 		sched_balance_group(TDQ_GROUP(i));
560 }
561 
562 /*
563  * Finds the greatest imbalance between two tdqs in a group.
564  */
565 static void
566 sched_balance_group(struct tdq_group *tdg)
567 {
568 	struct tdq *tdq;
569 	struct tdq *high;
570 	struct tdq *low;
571 	int load;
572 
573 	if (tdg->tdg_transferable == 0)
574 		return;
575 	low = NULL;
576 	high = NULL;
577 	LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
578 		load = tdq->tdq_load;
579 		if (high == NULL || load > high->tdq_load)
580 			high = tdq;
581 		if (low == NULL || load < low->tdq_load)
582 			low = tdq;
583 	}
584 	if (high != NULL && low != NULL && high != low)
585 		sched_balance_pair(high, low);
586 }
587 
588 /*
589  * Lock two thread queues using their address to maintain lock order.
590  */
591 static void
592 tdq_lock_pair(struct tdq *one, struct tdq *two)
593 {
594 	if (one < two) {
595 		TDQ_LOCK(one);
596 		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
597 	} else {
598 		TDQ_LOCK(two);
599 		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
600 	}
601 }
602 
603 /*
604  * Transfer load between two imbalanced thread queues.
605  */
606 static void
607 sched_balance_pair(struct tdq *high, struct tdq *low)
608 {
609 	int transferable;
610 	int high_load;
611 	int low_load;
612 	int move;
613 	int diff;
614 	int i;
615 
616 	tdq_lock_pair(high, low);
617 	/*
618 	 * If we're transfering within a group we have to use this specific
619 	 * tdq's transferable count, otherwise we can steal from other members
620 	 * of the group.
621 	 */
622 	if (high->tdq_group == low->tdq_group) {
623 		transferable = high->tdq_transferable;
624 		high_load = high->tdq_load;
625 		low_load = low->tdq_load;
626 	} else {
627 		transferable = high->tdq_group->tdg_transferable;
628 		high_load = high->tdq_group->tdg_load;
629 		low_load = low->tdq_group->tdg_load;
630 	}
631 	/*
632 	 * Determine what the imbalance is and then adjust that to how many
633 	 * threads we actually have to give up (transferable).
634 	 */
635 	if (transferable != 0) {
636 		diff = high_load - low_load;
637 		move = diff / 2;
638 		if (diff & 0x1)
639 			move++;
640 		move = min(move, transferable);
641 		for (i = 0; i < move; i++)
642 			tdq_move(high, low);
643 	}
644 	TDQ_UNLOCK(high);
645 	TDQ_UNLOCK(low);
646 	return;
647 }
648 
649 /*
650  * Move a thread from one thread queue to another.
651  */
652 static void
653 tdq_move(struct tdq *from, struct tdq *to)
654 {
655 	struct td_sched *ts;
656 	struct thread *td;
657 	struct tdq *tdq;
658 	int cpu;
659 
660 	tdq = from;
661 	cpu = TDQ_ID(to);
662 	ts = tdq_steal(tdq, 1);
663 	if (ts == NULL) {
664 		struct tdq_group *tdg;
665 
666 		tdg = tdq->tdq_group;
667 		LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
668 			if (tdq == from || tdq->tdq_transferable == 0)
669 				continue;
670 			ts = tdq_steal(tdq, 1);
671 			break;
672 		}
673 		if (ts == NULL)
674 			return;
675 	}
676 	if (tdq == to)
677 		return;
678 	td = ts->ts_thread;
679 	/*
680 	 * Although the run queue is locked the thread may be blocked.  Lock
681 	 * it to clear this.
682 	 */
683 	thread_lock(td);
684 	/* Drop recursive lock on from. */
685 	TDQ_UNLOCK(from);
686 	sched_rem(td);
687 	ts->ts_cpu = cpu;
688 	td->td_lock = TDQ_LOCKPTR(to);
689 	tdq_add(to, td, SRQ_YIELDING);
690 	tdq_notify(ts);
691 }
692 
693 /*
694  * This tdq has idled.  Try to steal a thread from another cpu and switch
695  * to it.
696  */
697 static int
698 tdq_idled(struct tdq *tdq)
699 {
700 	struct tdq_group *tdg;
701 	struct tdq *steal;
702 	struct td_sched *ts;
703 	struct thread *td;
704 	int highload;
705 	int highcpu;
706 	int load;
707 	int cpu;
708 
709 	/* We don't want to be preempted while we're iterating over tdqs */
710 	spinlock_enter();
711 	tdg = tdq->tdq_group;
712 	/*
713 	 * If we're in a cpu group, try and steal threads from another cpu in
714 	 * the group before idling.
715 	 */
716 	if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
717 		LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
718 			if (steal == tdq || steal->tdq_transferable == 0)
719 				continue;
720 			TDQ_LOCK(steal);
721 			ts = tdq_steal(steal, 0);
722 			if (ts)
723 				goto steal;
724 			TDQ_UNLOCK(steal);
725 		}
726 	}
727 	for (;;) {
728 		if (steal_idle == 0)
729 			break;
730 		highcpu = 0;
731 		highload = 0;
732 		for (cpu = 0; cpu <= mp_maxid; cpu++) {
733 			if (CPU_ABSENT(cpu))
734 				continue;
735 			steal = TDQ_CPU(cpu);
736 			load = TDQ_CPU(cpu)->tdq_transferable;
737 			if (load < highload)
738 				continue;
739 			highload = load;
740 			highcpu = cpu;
741 		}
742 		if (highload < steal_thresh)
743 			break;
744 		steal = TDQ_CPU(highcpu);
745 		TDQ_LOCK(steal);
746 		if (steal->tdq_transferable >= steal_thresh &&
747 		    (ts = tdq_steal(steal, 1)) != NULL)
748 			goto steal;
749 		TDQ_UNLOCK(steal);
750 		break;
751 	}
752 	spinlock_exit();
753 	return (1);
754 steal:
755 	td = ts->ts_thread;
756 	thread_lock(td);
757 	spinlock_exit();
758 	MPASS(td->td_lock == TDQ_LOCKPTR(steal));
759 	TDQ_UNLOCK(steal);
760 	sched_rem(td);
761 	sched_setcpu(ts, PCPU_GET(cpuid), SRQ_YIELDING);
762 	tdq_add(tdq, td, SRQ_YIELDING);
763 	MPASS(td->td_lock == curthread->td_lock);
764 	mi_switch(SW_VOL, NULL);
765 	thread_unlock(curthread);
766 
767 	return (0);
768 }
769 
770 /*
771  * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
772  */
773 static void
774 tdq_notify(struct td_sched *ts)
775 {
776 	struct thread *ctd;
777 	struct pcpu *pcpu;
778 	int cpri;
779 	int pri;
780 	int cpu;
781 
782 	cpu = ts->ts_cpu;
783 	pri = ts->ts_thread->td_priority;
784 	pcpu = pcpu_find(cpu);
785 	ctd = pcpu->pc_curthread;
786 	cpri = ctd->td_priority;
787 
788 	/*
789 	 * If our priority is not better than the current priority there is
790 	 * nothing to do.
791 	 */
792 	if (pri > cpri)
793 		return;
794 	/*
795 	 * Always IPI idle.
796 	 */
797 	if (cpri > PRI_MIN_IDLE)
798 		goto sendipi;
799 	/*
800 	 * If we're realtime or better and there is timeshare or worse running
801 	 * send an IPI.
802 	 */
803 	if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
804 		goto sendipi;
805 	/*
806 	 * Otherwise only IPI if we exceed the threshold.
807 	 */
808 	if (pri > preempt_thresh)
809 		return;
810 sendipi:
811 	ctd->td_flags |= TDF_NEEDRESCHED;
812 	ipi_selected(1 << cpu, IPI_PREEMPT);
813 }
814 
815 /*
816  * Steals load from a timeshare queue.  Honors the rotating queue head
817  * index.
818  */
819 static struct td_sched *
820 runq_steal_from(struct runq *rq, u_char start)
821 {
822 	struct td_sched *ts;
823 	struct rqbits *rqb;
824 	struct rqhead *rqh;
825 	int first;
826 	int bit;
827 	int pri;
828 	int i;
829 
830 	rqb = &rq->rq_status;
831 	bit = start & (RQB_BPW -1);
832 	pri = 0;
833 	first = 0;
834 again:
835 	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
836 		if (rqb->rqb_bits[i] == 0)
837 			continue;
838 		if (bit != 0) {
839 			for (pri = bit; pri < RQB_BPW; pri++)
840 				if (rqb->rqb_bits[i] & (1ul << pri))
841 					break;
842 			if (pri >= RQB_BPW)
843 				continue;
844 		} else
845 			pri = RQB_FFS(rqb->rqb_bits[i]);
846 		pri += (i << RQB_L2BPW);
847 		rqh = &rq->rq_queues[pri];
848 		TAILQ_FOREACH(ts, rqh, ts_procq) {
849 			if (first && THREAD_CAN_MIGRATE(ts->ts_thread))
850 				return (ts);
851 			first = 1;
852 		}
853 	}
854 	if (start != 0) {
855 		start = 0;
856 		goto again;
857 	}
858 
859 	return (NULL);
860 }
861 
862 /*
863  * Steals load from a standard linear queue.
864  */
865 static struct td_sched *
866 runq_steal(struct runq *rq)
867 {
868 	struct rqhead *rqh;
869 	struct rqbits *rqb;
870 	struct td_sched *ts;
871 	int word;
872 	int bit;
873 
874 	rqb = &rq->rq_status;
875 	for (word = 0; word < RQB_LEN; word++) {
876 		if (rqb->rqb_bits[word] == 0)
877 			continue;
878 		for (bit = 0; bit < RQB_BPW; bit++) {
879 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
880 				continue;
881 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
882 			TAILQ_FOREACH(ts, rqh, ts_procq)
883 				if (THREAD_CAN_MIGRATE(ts->ts_thread))
884 					return (ts);
885 		}
886 	}
887 	return (NULL);
888 }
889 
890 /*
891  * Attempt to steal a thread in priority order from a thread queue.
892  */
893 static struct td_sched *
894 tdq_steal(struct tdq *tdq, int stealidle)
895 {
896 	struct td_sched *ts;
897 
898 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
899 	if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
900 		return (ts);
901 	if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL)
902 		return (ts);
903 	if (stealidle)
904 		return (runq_steal(&tdq->tdq_idle));
905 	return (NULL);
906 }
907 
908 /*
909  * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
910  * current lock and returns with the assigned queue locked.  If this is
911  * via sched_switch() we leave the thread in a blocked state as an
912  * optimization.
913  */
914 static inline struct tdq *
915 sched_setcpu(struct td_sched *ts, int cpu, int flags)
916 {
917 	struct thread *td;
918 	struct tdq *tdq;
919 
920 	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
921 
922 	tdq = TDQ_CPU(cpu);
923 	td = ts->ts_thread;
924 	ts->ts_cpu = cpu;
925 
926 	/* If the lock matches just return the queue. */
927 	if (td->td_lock == TDQ_LOCKPTR(tdq))
928 		return (tdq);
929 #ifdef notyet
930 	/*
931 	 * If the thread isn't running it's lockptr is a
932 	 * turnstile or a sleepqueue.  We can just lock_set without
933 	 * blocking.
934 	 */
935 	if (TD_CAN_RUN(td)) {
936 		TDQ_LOCK(tdq);
937 		thread_lock_set(td, TDQ_LOCKPTR(tdq));
938 		return (tdq);
939 	}
940 #endif
941 	/*
942 	 * The hard case, migration, we need to block the thread first to
943 	 * prevent order reversals with other cpus locks.
944 	 */
945 	thread_lock_block(td);
946 	TDQ_LOCK(tdq);
947 	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
948 	return (tdq);
949 }
950 
951 /*
952  * Find the thread queue running the lowest priority thread.
953  */
954 static int
955 tdq_lowestpri(void)
956 {
957 	struct tdq *tdq;
958 	int lowpri;
959 	int lowcpu;
960 	int lowload;
961 	int load;
962 	int cpu;
963 	int pri;
964 
965 	lowload = 0;
966 	lowpri = lowcpu = 0;
967 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
968 		if (CPU_ABSENT(cpu))
969 			continue;
970 		tdq = TDQ_CPU(cpu);
971 		pri = tdq->tdq_lowpri;
972 		load = TDQ_CPU(cpu)->tdq_load;
973 		CTR4(KTR_ULE,
974 		    "cpu %d pri %d lowcpu %d lowpri %d",
975 		    cpu, pri, lowcpu, lowpri);
976 		if (pri < lowpri)
977 			continue;
978 		if (lowpri && lowpri == pri && load > lowload)
979 			continue;
980 		lowpri = pri;
981 		lowcpu = cpu;
982 		lowload = load;
983 	}
984 
985 	return (lowcpu);
986 }
987 
988 /*
989  * Find the thread queue with the least load.
990  */
991 static int
992 tdq_lowestload(void)
993 {
994 	struct tdq *tdq;
995 	int lowload;
996 	int lowpri;
997 	int lowcpu;
998 	int load;
999 	int cpu;
1000 	int pri;
1001 
1002 	lowcpu = 0;
1003 	lowload = TDQ_CPU(0)->tdq_load;
1004 	lowpri = TDQ_CPU(0)->tdq_lowpri;
1005 	for (cpu = 1; cpu <= mp_maxid; cpu++) {
1006 		if (CPU_ABSENT(cpu))
1007 			continue;
1008 		tdq = TDQ_CPU(cpu);
1009 		load = tdq->tdq_load;
1010 		pri = tdq->tdq_lowpri;
1011 		CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d",
1012 		    cpu, load, lowcpu, lowload);
1013 		if (load > lowload)
1014 			continue;
1015 		if (load == lowload && pri < lowpri)
1016 			continue;
1017 		lowcpu = cpu;
1018 		lowload = load;
1019 		lowpri = pri;
1020 	}
1021 
1022 	return (lowcpu);
1023 }
1024 
1025 /*
1026  * Pick the destination cpu for sched_add().  Respects affinity and makes
1027  * a determination based on load or priority of available processors.
1028  */
1029 static int
1030 sched_pickcpu(struct td_sched *ts, int flags)
1031 {
1032 	struct tdq *tdq;
1033 	int self;
1034 	int pri;
1035 	int cpu;
1036 
1037 	cpu = self = PCPU_GET(cpuid);
1038 	if (smp_started == 0)
1039 		return (self);
1040 	/*
1041 	 * Don't migrate a running thread from sched_switch().
1042 	 */
1043 	if (flags & SRQ_OURSELF) {
1044 		CTR1(KTR_ULE, "YIELDING %d",
1045 		    curthread->td_priority);
1046 		return (self);
1047 	}
1048 	pri = ts->ts_thread->td_priority;
1049 	cpu = ts->ts_cpu;
1050 	/*
1051 	 * Regardless of affinity, if the last cpu is idle send it there.
1052 	 */
1053 	tdq = TDQ_CPU(cpu);
1054 	if (tdq->tdq_lowpri > PRI_MIN_IDLE) {
1055 		CTR5(KTR_ULE,
1056 		    "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
1057 		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
1058 		    tdq->tdq_lowpri);
1059 		return (ts->ts_cpu);
1060 	}
1061 	/*
1062 	 * If we have affinity, try to place it on the cpu we last ran on.
1063 	 */
1064 	if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) {
1065 		CTR5(KTR_ULE,
1066 		    "affinity for %d, ltick %d ticks %d pri %d curthread %d",
1067 		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
1068 		    tdq->tdq_lowpri);
1069 		return (ts->ts_cpu);
1070 	}
1071 	/*
1072 	 * Look for an idle group.
1073 	 */
1074 	CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
1075 	cpu = ffs(tdq_idle);
1076 	if (cpu)
1077 		return (--cpu);
1078 	/*
1079 	 * If there are no idle cores see if we can run the thread locally.  This may
1080 	 * improve locality among sleepers and wakers when there is shared data.
1081 	 */
1082 	if (tryself && pri < curthread->td_priority) {
1083 		CTR1(KTR_ULE, "tryself %d",
1084 		    curthread->td_priority);
1085 		return (self);
1086 	}
1087 	/*
1088  	 * Now search for the cpu running the lowest priority thread with
1089 	 * the least load.
1090 	 */
1091 	if (pick_pri)
1092 		cpu = tdq_lowestpri();
1093 	else
1094 		cpu = tdq_lowestload();
1095 	return (cpu);
1096 }
1097 
1098 #endif	/* SMP */
1099 
1100 /*
1101  * Pick the highest priority task we have and return it.
1102  */
1103 static struct td_sched *
1104 tdq_choose(struct tdq *tdq)
1105 {
1106 	struct td_sched *ts;
1107 
1108 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1109 	ts = runq_choose(&tdq->tdq_realtime);
1110 	if (ts != NULL)
1111 		return (ts);
1112 	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1113 	if (ts != NULL) {
1114 		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1115 		    ("tdq_choose: Invalid priority on timeshare queue %d",
1116 		    ts->ts_thread->td_priority));
1117 		return (ts);
1118 	}
1119 
1120 	ts = runq_choose(&tdq->tdq_idle);
1121 	if (ts != NULL) {
1122 		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1123 		    ("tdq_choose: Invalid priority on idle queue %d",
1124 		    ts->ts_thread->td_priority));
1125 		return (ts);
1126 	}
1127 
1128 	return (NULL);
1129 }
1130 
1131 /*
1132  * Initialize a thread queue.
1133  */
1134 static void
1135 tdq_setup(struct tdq *tdq)
1136 {
1137 
1138 	if (bootverbose)
1139 		printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1140 	runq_init(&tdq->tdq_realtime);
1141 	runq_init(&tdq->tdq_timeshare);
1142 	runq_init(&tdq->tdq_idle);
1143 	tdq->tdq_load = 0;
1144 }
1145 
1146 #ifdef SMP
1147 static void
1148 tdg_setup(struct tdq_group *tdg)
1149 {
1150 	if (bootverbose)
1151 		printf("ULE: setup cpu group %d\n", TDG_ID(tdg));
1152 	snprintf(tdg->tdg_name, sizeof(tdg->tdg_name),
1153 	    "sched lock %d", (int)TDG_ID(tdg));
1154 	mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock",
1155 	    MTX_SPIN | MTX_RECURSE);
1156 	LIST_INIT(&tdg->tdg_members);
1157 	tdg->tdg_load = 0;
1158 	tdg->tdg_transferable = 0;
1159 	tdg->tdg_cpus = 0;
1160 	tdg->tdg_mask = 0;
1161 	tdg->tdg_cpumask = 0;
1162 	tdg->tdg_idlemask = 0;
1163 }
1164 
1165 static void
1166 tdg_add(struct tdq_group *tdg, struct tdq *tdq)
1167 {
1168 	if (tdg->tdg_mask == 0)
1169 		tdg->tdg_mask |= 1 << TDQ_ID(tdq);
1170 	tdg->tdg_cpumask |= 1 << TDQ_ID(tdq);
1171 	tdg->tdg_cpus++;
1172 	tdq->tdq_group = tdg;
1173 	tdq->tdq_lock = &tdg->tdg_lock;
1174 	LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1175 	if (bootverbose)
1176 		printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n",
1177 		    TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask);
1178 }
1179 
1180 static void
1181 sched_setup_topology(void)
1182 {
1183 	struct tdq_group *tdg;
1184 	struct cpu_group *cg;
1185 	int balance_groups;
1186 	struct tdq *tdq;
1187 	int i;
1188 	int j;
1189 
1190 	topology = 1;
1191 	balance_groups = 0;
1192 	for (i = 0; i < smp_topology->ct_count; i++) {
1193 		cg = &smp_topology->ct_group[i];
1194 		tdg = &tdq_groups[i];
1195 		/*
1196 		 * Initialize the group.
1197 		 */
1198 		tdg_setup(tdg);
1199 		/*
1200 		 * Find all of the group members and add them.
1201 		 */
1202 		for (j = 0; j < MAXCPU; j++) {
1203 			if ((cg->cg_mask & (1 << j)) != 0) {
1204 				tdq = TDQ_CPU(j);
1205 				tdq_setup(tdq);
1206 				tdg_add(tdg, tdq);
1207 			}
1208 		}
1209 		if (tdg->tdg_cpus > 1)
1210 			balance_groups = 1;
1211 	}
1212 	tdg_maxid = smp_topology->ct_count - 1;
1213 	if (balance_groups)
1214 		sched_balance_groups(NULL);
1215 }
1216 
1217 static void
1218 sched_setup_smp(void)
1219 {
1220 	struct tdq_group *tdg;
1221 	struct tdq *tdq;
1222 	int cpus;
1223 	int i;
1224 
1225 	for (cpus = 0, i = 0; i < MAXCPU; i++) {
1226 		if (CPU_ABSENT(i))
1227 			continue;
1228 		tdq = &tdq_cpu[i];
1229 		tdg = &tdq_groups[i];
1230 		/*
1231 		 * Setup a tdq group with one member.
1232 		 */
1233 		tdg_setup(tdg);
1234 		tdq_setup(tdq);
1235 		tdg_add(tdg, tdq);
1236 		cpus++;
1237 	}
1238 	tdg_maxid = cpus - 1;
1239 }
1240 
1241 /*
1242  * Fake a topology with one group containing all CPUs.
1243  */
1244 static void
1245 sched_fake_topo(void)
1246 {
1247 #ifdef SCHED_FAKE_TOPOLOGY
1248 	static struct cpu_top top;
1249 	static struct cpu_group group;
1250 
1251 	top.ct_count = 1;
1252 	top.ct_group = &group;
1253 	group.cg_mask = all_cpus;
1254 	group.cg_count = mp_ncpus;
1255 	group.cg_children = 0;
1256 	smp_topology = &top;
1257 #endif
1258 }
1259 #endif
1260 
1261 /*
1262  * Setup the thread queues and initialize the topology based on MD
1263  * information.
1264  */
1265 static void
1266 sched_setup(void *dummy)
1267 {
1268 	struct tdq *tdq;
1269 
1270 	tdq = TDQ_SELF();
1271 #ifdef SMP
1272 	/*
1273 	 * Initialize long-term cpu balancing algorithm.
1274 	 */
1275 	callout_init(&balco, CALLOUT_MPSAFE);
1276 	callout_init(&gbalco, CALLOUT_MPSAFE);
1277 	sched_fake_topo();
1278 	/*
1279 	 * Setup tdqs based on a topology configuration or vanilla SMP based
1280 	 * on mp_maxid.
1281 	 */
1282 	if (smp_topology == NULL)
1283 		sched_setup_smp();
1284 	else
1285 		sched_setup_topology();
1286 	sched_balance(NULL);
1287 #else
1288 	tdq_setup(tdq);
1289 	mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE);
1290 	tdq->tdq_lock = &tdq_lock;
1291 #endif
1292 	/*
1293 	 * To avoid divide-by-zero, we set realstathz a dummy value
1294 	 * in case which sched_clock() called before sched_initticks().
1295 	 */
1296 	realstathz = hz;
1297 	sched_slice = (realstathz/10);	/* ~100ms */
1298 	tickincr = 1 << SCHED_TICK_SHIFT;
1299 
1300 	/* Add thread0's load since it's running. */
1301 	TDQ_LOCK(tdq);
1302 	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1303 	tdq_load_add(tdq, &td_sched0);
1304 	TDQ_UNLOCK(tdq);
1305 }
1306 
1307 /*
1308  * This routine determines the tickincr after stathz and hz are setup.
1309  */
1310 /* ARGSUSED */
1311 static void
1312 sched_initticks(void *dummy)
1313 {
1314 	int incr;
1315 
1316 	realstathz = stathz ? stathz : hz;
1317 	sched_slice = (realstathz/10);	/* ~100ms */
1318 
1319 	/*
1320 	 * tickincr is shifted out by 10 to avoid rounding errors due to
1321 	 * hz not being evenly divisible by stathz on all platforms.
1322 	 */
1323 	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1324 	/*
1325 	 * This does not work for values of stathz that are more than
1326 	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1327 	 */
1328 	if (incr == 0)
1329 		incr = 1;
1330 	tickincr = incr;
1331 #ifdef SMP
1332 	affinity = SCHED_AFFINITY_DEFAULT;
1333 #endif
1334 }
1335 
1336 
1337 /*
1338  * This is the core of the interactivity algorithm.  Determines a score based
1339  * on past behavior.  It is the ratio of sleep time to run time scaled to
1340  * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1341  * differs from the cpu usage because it does not account for time spent
1342  * waiting on a run-queue.  Would be prettier if we had floating point.
1343  */
1344 static int
1345 sched_interact_score(struct thread *td)
1346 {
1347 	struct td_sched *ts;
1348 	int div;
1349 
1350 	ts = td->td_sched;
1351 	/*
1352 	 * The score is only needed if this is likely to be an interactive
1353 	 * task.  Don't go through the expense of computing it if there's
1354 	 * no chance.
1355 	 */
1356 	if (sched_interact <= SCHED_INTERACT_HALF &&
1357 		ts->ts_runtime >= ts->ts_slptime)
1358 			return (SCHED_INTERACT_HALF);
1359 
1360 	if (ts->ts_runtime > ts->ts_slptime) {
1361 		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1362 		return (SCHED_INTERACT_HALF +
1363 		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1364 	}
1365 	if (ts->ts_slptime > ts->ts_runtime) {
1366 		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1367 		return (ts->ts_runtime / div);
1368 	}
1369 	/* runtime == slptime */
1370 	if (ts->ts_runtime)
1371 		return (SCHED_INTERACT_HALF);
1372 
1373 	/*
1374 	 * This can happen if slptime and runtime are 0.
1375 	 */
1376 	return (0);
1377 
1378 }
1379 
1380 /*
1381  * Scale the scheduling priority according to the "interactivity" of this
1382  * process.
1383  */
1384 static void
1385 sched_priority(struct thread *td)
1386 {
1387 	int score;
1388 	int pri;
1389 
1390 	if (td->td_pri_class != PRI_TIMESHARE)
1391 		return;
1392 	/*
1393 	 * If the score is interactive we place the thread in the realtime
1394 	 * queue with a priority that is less than kernel and interrupt
1395 	 * priorities.  These threads are not subject to nice restrictions.
1396 	 *
1397 	 * Scores greater than this are placed on the normal timeshare queue
1398 	 * where the priority is partially decided by the most recent cpu
1399 	 * utilization and the rest is decided by nice value.
1400 	 */
1401 	score = sched_interact_score(td);
1402 	if (score < sched_interact) {
1403 		pri = PRI_MIN_REALTIME;
1404 		pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1405 		    * score;
1406 		KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1407 		    ("sched_priority: invalid interactive priority %d score %d",
1408 		    pri, score));
1409 	} else {
1410 		pri = SCHED_PRI_MIN;
1411 		if (td->td_sched->ts_ticks)
1412 			pri += SCHED_PRI_TICKS(td->td_sched);
1413 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1414 		KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
1415 		    ("sched_priority: invalid priority %d: nice %d, "
1416 		    "ticks %d ftick %d ltick %d tick pri %d",
1417 		    pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1418 		    td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1419 		    SCHED_PRI_TICKS(td->td_sched)));
1420 	}
1421 	sched_user_prio(td, pri);
1422 
1423 	return;
1424 }
1425 
1426 /*
1427  * This routine enforces a maximum limit on the amount of scheduling history
1428  * kept.  It is called after either the slptime or runtime is adjusted.  This
1429  * function is ugly due to integer math.
1430  */
1431 static void
1432 sched_interact_update(struct thread *td)
1433 {
1434 	struct td_sched *ts;
1435 	u_int sum;
1436 
1437 	ts = td->td_sched;
1438 	sum = ts->ts_runtime + ts->ts_slptime;
1439 	if (sum < SCHED_SLP_RUN_MAX)
1440 		return;
1441 	/*
1442 	 * This only happens from two places:
1443 	 * 1) We have added an unusual amount of run time from fork_exit.
1444 	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1445 	 */
1446 	if (sum > SCHED_SLP_RUN_MAX * 2) {
1447 		if (ts->ts_runtime > ts->ts_slptime) {
1448 			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1449 			ts->ts_slptime = 1;
1450 		} else {
1451 			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1452 			ts->ts_runtime = 1;
1453 		}
1454 		return;
1455 	}
1456 	/*
1457 	 * If we have exceeded by more than 1/5th then the algorithm below
1458 	 * will not bring us back into range.  Dividing by two here forces
1459 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1460 	 */
1461 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1462 		ts->ts_runtime /= 2;
1463 		ts->ts_slptime /= 2;
1464 		return;
1465 	}
1466 	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1467 	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1468 }
1469 
1470 /*
1471  * Scale back the interactivity history when a child thread is created.  The
1472  * history is inherited from the parent but the thread may behave totally
1473  * differently.  For example, a shell spawning a compiler process.  We want
1474  * to learn that the compiler is behaving badly very quickly.
1475  */
1476 static void
1477 sched_interact_fork(struct thread *td)
1478 {
1479 	int ratio;
1480 	int sum;
1481 
1482 	sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1483 	if (sum > SCHED_SLP_RUN_FORK) {
1484 		ratio = sum / SCHED_SLP_RUN_FORK;
1485 		td->td_sched->ts_runtime /= ratio;
1486 		td->td_sched->ts_slptime /= ratio;
1487 	}
1488 }
1489 
1490 /*
1491  * Called from proc0_init() to setup the scheduler fields.
1492  */
1493 void
1494 schedinit(void)
1495 {
1496 
1497 	/*
1498 	 * Set up the scheduler specific parts of proc0.
1499 	 */
1500 	proc0.p_sched = NULL; /* XXX */
1501 	thread0.td_sched = &td_sched0;
1502 	td_sched0.ts_ltick = ticks;
1503 	td_sched0.ts_ftick = ticks;
1504 	td_sched0.ts_thread = &thread0;
1505 }
1506 
1507 /*
1508  * This is only somewhat accurate since given many processes of the same
1509  * priority they will switch when their slices run out, which will be
1510  * at most sched_slice stathz ticks.
1511  */
1512 int
1513 sched_rr_interval(void)
1514 {
1515 
1516 	/* Convert sched_slice to hz */
1517 	return (hz/(realstathz/sched_slice));
1518 }
1519 
1520 /*
1521  * Update the percent cpu tracking information when it is requested or
1522  * the total history exceeds the maximum.  We keep a sliding history of
1523  * tick counts that slowly decays.  This is less precise than the 4BSD
1524  * mechanism since it happens with less regular and frequent events.
1525  */
1526 static void
1527 sched_pctcpu_update(struct td_sched *ts)
1528 {
1529 
1530 	if (ts->ts_ticks == 0)
1531 		return;
1532 	if (ticks - (hz / 10) < ts->ts_ltick &&
1533 	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1534 		return;
1535 	/*
1536 	 * Adjust counters and watermark for pctcpu calc.
1537 	 */
1538 	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1539 		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1540 			    SCHED_TICK_TARG;
1541 	else
1542 		ts->ts_ticks = 0;
1543 	ts->ts_ltick = ticks;
1544 	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1545 }
1546 
1547 /*
1548  * Adjust the priority of a thread.  Move it to the appropriate run-queue
1549  * if necessary.  This is the back-end for several priority related
1550  * functions.
1551  */
1552 static void
1553 sched_thread_priority(struct thread *td, u_char prio)
1554 {
1555 	struct td_sched *ts;
1556 
1557 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1558 	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1559 	    curthread->td_proc->p_comm);
1560 	ts = td->td_sched;
1561 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1562 	if (td->td_priority == prio)
1563 		return;
1564 
1565 	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1566 		/*
1567 		 * If the priority has been elevated due to priority
1568 		 * propagation, we may have to move ourselves to a new
1569 		 * queue.  This could be optimized to not re-add in some
1570 		 * cases.
1571 		 */
1572 		sched_rem(td);
1573 		td->td_priority = prio;
1574 		sched_add(td, SRQ_BORROWING);
1575 	} else {
1576 #ifdef SMP
1577 		struct tdq *tdq;
1578 
1579 		tdq = TDQ_CPU(ts->ts_cpu);
1580 		if (prio < tdq->tdq_lowpri)
1581 			tdq->tdq_lowpri = prio;
1582 #endif
1583 		td->td_priority = prio;
1584 	}
1585 }
1586 
1587 /*
1588  * Update a thread's priority when it is lent another thread's
1589  * priority.
1590  */
1591 void
1592 sched_lend_prio(struct thread *td, u_char prio)
1593 {
1594 
1595 	td->td_flags |= TDF_BORROWING;
1596 	sched_thread_priority(td, prio);
1597 }
1598 
1599 /*
1600  * Restore a thread's priority when priority propagation is
1601  * over.  The prio argument is the minimum priority the thread
1602  * needs to have to satisfy other possible priority lending
1603  * requests.  If the thread's regular priority is less
1604  * important than prio, the thread will keep a priority boost
1605  * of prio.
1606  */
1607 void
1608 sched_unlend_prio(struct thread *td, u_char prio)
1609 {
1610 	u_char base_pri;
1611 
1612 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1613 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1614 		base_pri = td->td_user_pri;
1615 	else
1616 		base_pri = td->td_base_pri;
1617 	if (prio >= base_pri) {
1618 		td->td_flags &= ~TDF_BORROWING;
1619 		sched_thread_priority(td, base_pri);
1620 	} else
1621 		sched_lend_prio(td, prio);
1622 }
1623 
1624 /*
1625  * Standard entry for setting the priority to an absolute value.
1626  */
1627 void
1628 sched_prio(struct thread *td, u_char prio)
1629 {
1630 	u_char oldprio;
1631 
1632 	/* First, update the base priority. */
1633 	td->td_base_pri = prio;
1634 
1635 	/*
1636 	 * If the thread is borrowing another thread's priority, don't
1637 	 * ever lower the priority.
1638 	 */
1639 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1640 		return;
1641 
1642 	/* Change the real priority. */
1643 	oldprio = td->td_priority;
1644 	sched_thread_priority(td, prio);
1645 
1646 	/*
1647 	 * If the thread is on a turnstile, then let the turnstile update
1648 	 * its state.
1649 	 */
1650 	if (TD_ON_LOCK(td) && oldprio != prio)
1651 		turnstile_adjust(td, oldprio);
1652 }
1653 
1654 /*
1655  * Set the base user priority, does not effect current running priority.
1656  */
1657 void
1658 sched_user_prio(struct thread *td, u_char prio)
1659 {
1660 	u_char oldprio;
1661 
1662 	td->td_base_user_pri = prio;
1663 	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1664                 return;
1665 	oldprio = td->td_user_pri;
1666 	td->td_user_pri = prio;
1667 
1668 	if (TD_ON_UPILOCK(td) && oldprio != prio)
1669 		umtx_pi_adjust(td, oldprio);
1670 }
1671 
1672 void
1673 sched_lend_user_prio(struct thread *td, u_char prio)
1674 {
1675 	u_char oldprio;
1676 
1677 	td->td_flags |= TDF_UBORROWING;
1678 
1679 	oldprio = td->td_user_pri;
1680 	td->td_user_pri = prio;
1681 
1682 	if (TD_ON_UPILOCK(td) && oldprio != prio)
1683 		umtx_pi_adjust(td, oldprio);
1684 }
1685 
1686 void
1687 sched_unlend_user_prio(struct thread *td, u_char prio)
1688 {
1689 	u_char base_pri;
1690 
1691 	base_pri = td->td_base_user_pri;
1692 	if (prio >= base_pri) {
1693 		td->td_flags &= ~TDF_UBORROWING;
1694 		sched_user_prio(td, base_pri);
1695 	} else
1696 		sched_lend_user_prio(td, prio);
1697 }
1698 
1699 /*
1700  * Add the thread passed as 'newtd' to the run queue before selecting
1701  * the next thread to run.  This is only used for KSE.
1702  */
1703 static void
1704 sched_switchin(struct tdq *tdq, struct thread *td)
1705 {
1706 #ifdef SMP
1707 	spinlock_enter();
1708 	TDQ_UNLOCK(tdq);
1709 	thread_lock(td);
1710 	spinlock_exit();
1711 	sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
1712 #else
1713 	td->td_lock = TDQ_LOCKPTR(tdq);
1714 #endif
1715 	tdq_add(tdq, td, SRQ_YIELDING);
1716 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1717 }
1718 
1719 /*
1720  * Handle migration from sched_switch().  This happens only for
1721  * cpu binding.
1722  */
1723 static struct mtx *
1724 sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1725 {
1726 	struct tdq *tdn;
1727 
1728 	tdn = TDQ_CPU(td->td_sched->ts_cpu);
1729 #ifdef SMP
1730 	/*
1731 	 * Do the lock dance required to avoid LOR.  We grab an extra
1732 	 * spinlock nesting to prevent preemption while we're
1733 	 * not holding either run-queue lock.
1734 	 */
1735 	spinlock_enter();
1736 	thread_block_switch(td);	/* This releases the lock on tdq. */
1737 	TDQ_LOCK(tdn);
1738 	tdq_add(tdn, td, flags);
1739 	tdq_notify(td->td_sched);
1740 	/*
1741 	 * After we unlock tdn the new cpu still can't switch into this
1742 	 * thread until we've unblocked it in cpu_switch().  The lock
1743 	 * pointers may match in the case of HTT cores.  Don't unlock here
1744 	 * or we can deadlock when the other CPU runs the IPI handler.
1745 	 */
1746 	if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1747 		TDQ_UNLOCK(tdn);
1748 		TDQ_LOCK(tdq);
1749 	}
1750 	spinlock_exit();
1751 #endif
1752 	return (TDQ_LOCKPTR(tdn));
1753 }
1754 
1755 /*
1756  * Block a thread for switching.  Similar to thread_block() but does not
1757  * bump the spin count.
1758  */
1759 static inline struct mtx *
1760 thread_block_switch(struct thread *td)
1761 {
1762 	struct mtx *lock;
1763 
1764 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1765 	lock = td->td_lock;
1766 	td->td_lock = &blocked_lock;
1767 	mtx_unlock_spin(lock);
1768 
1769 	return (lock);
1770 }
1771 
1772 /*
1773  * Release a thread that was blocked with thread_block_switch().
1774  */
1775 static inline void
1776 thread_unblock_switch(struct thread *td, struct mtx *mtx)
1777 {
1778 	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1779 	    (uintptr_t)mtx);
1780 }
1781 
1782 /*
1783  * Switch threads.  This function has to handle threads coming in while
1784  * blocked for some reason, running, or idle.  It also must deal with
1785  * migrating a thread from one queue to another as running threads may
1786  * be assigned elsewhere via binding.
1787  */
1788 void
1789 sched_switch(struct thread *td, struct thread *newtd, int flags)
1790 {
1791 	struct tdq *tdq;
1792 	struct td_sched *ts;
1793 	struct mtx *mtx;
1794 	int srqflag;
1795 	int cpuid;
1796 
1797 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1798 
1799 	cpuid = PCPU_GET(cpuid);
1800 	tdq = TDQ_CPU(cpuid);
1801 	ts = td->td_sched;
1802 	mtx = td->td_lock;
1803 #ifdef SMP
1804 	ts->ts_rltick = ticks;
1805 	if (newtd && newtd->td_priority < tdq->tdq_lowpri)
1806 		tdq->tdq_lowpri = newtd->td_priority;
1807 #endif
1808 	td->td_lastcpu = td->td_oncpu;
1809 	td->td_oncpu = NOCPU;
1810 	td->td_flags &= ~TDF_NEEDRESCHED;
1811 	td->td_owepreempt = 0;
1812 	/*
1813 	 * The lock pointer in an idle thread should never change.  Reset it
1814 	 * to CAN_RUN as well.
1815 	 */
1816 	if (TD_IS_IDLETHREAD(td)) {
1817 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1818 		TD_SET_CAN_RUN(td);
1819 	} else if (TD_IS_RUNNING(td)) {
1820 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1821 		tdq_load_rem(tdq, ts);
1822 		srqflag = (flags & SW_PREEMPT) ?
1823 		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1824 		    SRQ_OURSELF|SRQ_YIELDING;
1825 		if (ts->ts_cpu == cpuid)
1826 			tdq_add(tdq, td, srqflag);
1827 		else
1828 			mtx = sched_switch_migrate(tdq, td, srqflag);
1829 	} else {
1830 		/* This thread must be going to sleep. */
1831 		TDQ_LOCK(tdq);
1832 		mtx = thread_block_switch(td);
1833 		tdq_load_rem(tdq, ts);
1834 	}
1835 	/*
1836 	 * We enter here with the thread blocked and assigned to the
1837 	 * appropriate cpu run-queue or sleep-queue and with the current
1838 	 * thread-queue locked.
1839 	 */
1840 	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1841 	/*
1842 	 * If KSE assigned a new thread just add it here and let choosethread
1843 	 * select the best one.
1844 	 */
1845 	if (newtd != NULL)
1846 		sched_switchin(tdq, newtd);
1847 	newtd = choosethread();
1848 	/*
1849 	 * Call the MD code to switch contexts if necessary.
1850 	 */
1851 	if (td != newtd) {
1852 #ifdef	HWPMC_HOOKS
1853 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1854 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1855 #endif
1856 		cpu_switch(td, newtd, mtx);
1857 		/*
1858 		 * We may return from cpu_switch on a different cpu.  However,
1859 		 * we always return with td_lock pointing to the current cpu's
1860 		 * run queue lock.
1861 		 */
1862 		cpuid = PCPU_GET(cpuid);
1863 		tdq = TDQ_CPU(cpuid);
1864 		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td;
1865 #ifdef	HWPMC_HOOKS
1866 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1867 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1868 #endif
1869 	} else
1870 		thread_unblock_switch(td, mtx);
1871 	/*
1872 	 * Assert that all went well and return.
1873 	 */
1874 #ifdef SMP
1875 	/* We should always get here with the lowest priority td possible */
1876 	tdq->tdq_lowpri = td->td_priority;
1877 #endif
1878 	TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1879 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1880 	td->td_oncpu = cpuid;
1881 }
1882 
1883 /*
1884  * Adjust thread priorities as a result of a nice request.
1885  */
1886 void
1887 sched_nice(struct proc *p, int nice)
1888 {
1889 	struct thread *td;
1890 
1891 	PROC_LOCK_ASSERT(p, MA_OWNED);
1892 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1893 
1894 	p->p_nice = nice;
1895 	FOREACH_THREAD_IN_PROC(p, td) {
1896 		thread_lock(td);
1897 		sched_priority(td);
1898 		sched_prio(td, td->td_base_user_pri);
1899 		thread_unlock(td);
1900 	}
1901 }
1902 
1903 /*
1904  * Record the sleep time for the interactivity scorer.
1905  */
1906 void
1907 sched_sleep(struct thread *td)
1908 {
1909 
1910 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1911 
1912 	td->td_sched->ts_slptick = ticks;
1913 }
1914 
1915 /*
1916  * Schedule a thread to resume execution and record how long it voluntarily
1917  * slept.  We also update the pctcpu, interactivity, and priority.
1918  */
1919 void
1920 sched_wakeup(struct thread *td)
1921 {
1922 	struct td_sched *ts;
1923 	int slptick;
1924 
1925 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1926 	ts = td->td_sched;
1927 	/*
1928 	 * If we slept for more than a tick update our interactivity and
1929 	 * priority.
1930 	 */
1931 	slptick = ts->ts_slptick;
1932 	ts->ts_slptick = 0;
1933 	if (slptick && slptick != ticks) {
1934 		u_int hzticks;
1935 
1936 		hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
1937 		ts->ts_slptime += hzticks;
1938 		sched_interact_update(td);
1939 		sched_pctcpu_update(ts);
1940 		sched_priority(td);
1941 	}
1942 	/* Reset the slice value after we sleep. */
1943 	ts->ts_slice = sched_slice;
1944 	sched_add(td, SRQ_BORING);
1945 }
1946 
1947 /*
1948  * Penalize the parent for creating a new child and initialize the child's
1949  * priority.
1950  */
1951 void
1952 sched_fork(struct thread *td, struct thread *child)
1953 {
1954 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1955 	sched_fork_thread(td, child);
1956 	/*
1957 	 * Penalize the parent and child for forking.
1958 	 */
1959 	sched_interact_fork(child);
1960 	sched_priority(child);
1961 	td->td_sched->ts_runtime += tickincr;
1962 	sched_interact_update(td);
1963 	sched_priority(td);
1964 }
1965 
1966 /*
1967  * Fork a new thread, may be within the same process.
1968  */
1969 void
1970 sched_fork_thread(struct thread *td, struct thread *child)
1971 {
1972 	struct td_sched *ts;
1973 	struct td_sched *ts2;
1974 
1975 	/*
1976 	 * Initialize child.
1977 	 */
1978 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1979 	sched_newthread(child);
1980 	child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
1981 	ts = td->td_sched;
1982 	ts2 = child->td_sched;
1983 	ts2->ts_cpu = ts->ts_cpu;
1984 	ts2->ts_runq = NULL;
1985 	/*
1986 	 * Grab our parents cpu estimation information and priority.
1987 	 */
1988 	ts2->ts_ticks = ts->ts_ticks;
1989 	ts2->ts_ltick = ts->ts_ltick;
1990 	ts2->ts_ftick = ts->ts_ftick;
1991 	child->td_user_pri = td->td_user_pri;
1992 	child->td_base_user_pri = td->td_base_user_pri;
1993 	/*
1994 	 * And update interactivity score.
1995 	 */
1996 	ts2->ts_slptime = ts->ts_slptime;
1997 	ts2->ts_runtime = ts->ts_runtime;
1998 	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
1999 }
2000 
2001 /*
2002  * Adjust the priority class of a thread.
2003  */
2004 void
2005 sched_class(struct thread *td, int class)
2006 {
2007 
2008 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2009 	if (td->td_pri_class == class)
2010 		return;
2011 
2012 #ifdef SMP
2013 	/*
2014 	 * On SMP if we're on the RUNQ we must adjust the transferable
2015 	 * count because could be changing to or from an interrupt
2016 	 * class.
2017 	 */
2018 	if (TD_ON_RUNQ(td)) {
2019 		struct tdq *tdq;
2020 
2021 		tdq = TDQ_CPU(td->td_sched->ts_cpu);
2022 		if (THREAD_CAN_MIGRATE(td)) {
2023 			tdq->tdq_transferable--;
2024 			tdq->tdq_group->tdg_transferable--;
2025 		}
2026 		td->td_pri_class = class;
2027 		if (THREAD_CAN_MIGRATE(td)) {
2028 			tdq->tdq_transferable++;
2029 			tdq->tdq_group->tdg_transferable++;
2030 		}
2031 	}
2032 #endif
2033 	td->td_pri_class = class;
2034 }
2035 
2036 /*
2037  * Return some of the child's priority and interactivity to the parent.
2038  */
2039 void
2040 sched_exit(struct proc *p, struct thread *child)
2041 {
2042 	struct thread *td;
2043 
2044 	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
2045 	    child, child->td_proc->p_comm, child->td_priority);
2046 
2047 	PROC_SLOCK_ASSERT(p, MA_OWNED);
2048 	td = FIRST_THREAD_IN_PROC(p);
2049 	sched_exit_thread(td, child);
2050 }
2051 
2052 /*
2053  * Penalize another thread for the time spent on this one.  This helps to
2054  * worsen the priority and interactivity of processes which schedule batch
2055  * jobs such as make.  This has little effect on the make process itself but
2056  * causes new processes spawned by it to receive worse scores immediately.
2057  */
2058 void
2059 sched_exit_thread(struct thread *td, struct thread *child)
2060 {
2061 
2062 	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
2063 	    child, child->td_proc->p_comm, child->td_priority);
2064 
2065 #ifdef KSE
2066 	/*
2067 	 * KSE forks and exits so often that this penalty causes short-lived
2068 	 * threads to always be non-interactive.  This causes mozilla to
2069 	 * crawl under load.
2070 	 */
2071 	if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
2072 		return;
2073 #endif
2074 	/*
2075 	 * Give the child's runtime to the parent without returning the
2076 	 * sleep time as a penalty to the parent.  This causes shells that
2077 	 * launch expensive things to mark their children as expensive.
2078 	 */
2079 	thread_lock(td);
2080 	td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2081 	sched_interact_update(td);
2082 	sched_priority(td);
2083 	thread_unlock(td);
2084 }
2085 
2086 /*
2087  * Fix priorities on return to user-space.  Priorities may be elevated due
2088  * to static priorities in msleep() or similar.
2089  */
2090 void
2091 sched_userret(struct thread *td)
2092 {
2093 	/*
2094 	 * XXX we cheat slightly on the locking here to avoid locking in
2095 	 * the usual case.  Setting td_priority here is essentially an
2096 	 * incomplete workaround for not setting it properly elsewhere.
2097 	 * Now that some interrupt handlers are threads, not setting it
2098 	 * properly elsewhere can clobber it in the window between setting
2099 	 * it here and returning to user mode, so don't waste time setting
2100 	 * it perfectly here.
2101 	 */
2102 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
2103 	    ("thread with borrowed priority returning to userland"));
2104 	if (td->td_priority != td->td_user_pri) {
2105 		thread_lock(td);
2106 		td->td_priority = td->td_user_pri;
2107 		td->td_base_pri = td->td_user_pri;
2108 		thread_unlock(td);
2109         }
2110 }
2111 
2112 /*
2113  * Handle a stathz tick.  This is really only relevant for timeshare
2114  * threads.
2115  */
2116 void
2117 sched_clock(struct thread *td)
2118 {
2119 	struct tdq *tdq;
2120 	struct td_sched *ts;
2121 
2122 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2123 	tdq = TDQ_SELF();
2124 	/*
2125 	 * Advance the insert index once for each tick to ensure that all
2126 	 * threads get a chance to run.
2127 	 */
2128 	if (tdq->tdq_idx == tdq->tdq_ridx) {
2129 		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
2130 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
2131 			tdq->tdq_ridx = tdq->tdq_idx;
2132 	}
2133 	ts = td->td_sched;
2134 	/*
2135 	 * We only do slicing code for TIMESHARE threads.
2136 	 */
2137 	if (td->td_pri_class != PRI_TIMESHARE)
2138 		return;
2139 	/*
2140 	 * We used a tick; charge it to the thread so that we can compute our
2141 	 * interactivity.
2142 	 */
2143 	td->td_sched->ts_runtime += tickincr;
2144 	sched_interact_update(td);
2145 	/*
2146 	 * We used up one time slice.
2147 	 */
2148 	if (--ts->ts_slice > 0)
2149 		return;
2150 	/*
2151 	 * We're out of time, recompute priorities and requeue.
2152 	 */
2153 	sched_priority(td);
2154 	td->td_flags |= TDF_NEEDRESCHED;
2155 }
2156 
2157 /*
2158  * Called once per hz tick.  Used for cpu utilization information.  This
2159  * is easier than trying to scale based on stathz.
2160  */
2161 void
2162 sched_tick(void)
2163 {
2164 	struct td_sched *ts;
2165 
2166 	ts = curthread->td_sched;
2167 	/* Adjust ticks for pctcpu */
2168 	ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2169 	ts->ts_ltick = ticks;
2170 	/*
2171 	 * Update if we've exceeded our desired tick threshhold by over one
2172 	 * second.
2173 	 */
2174 	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2175 		sched_pctcpu_update(ts);
2176 }
2177 
2178 /*
2179  * Return whether the current CPU has runnable tasks.  Used for in-kernel
2180  * cooperative idle threads.
2181  */
2182 int
2183 sched_runnable(void)
2184 {
2185 	struct tdq *tdq;
2186 	int load;
2187 
2188 	load = 1;
2189 
2190 	tdq = TDQ_SELF();
2191 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2192 		if (tdq->tdq_load > 0)
2193 			goto out;
2194 	} else
2195 		if (tdq->tdq_load - 1 > 0)
2196 			goto out;
2197 	load = 0;
2198 out:
2199 	return (load);
2200 }
2201 
2202 /*
2203  * Choose the highest priority thread to run.  The thread is removed from
2204  * the run-queue while running however the load remains.  For SMP we set
2205  * the tdq in the global idle bitmask if it idles here.
2206  */
2207 struct thread *
2208 sched_choose(void)
2209 {
2210 #ifdef SMP
2211 	struct tdq_group *tdg;
2212 #endif
2213 	struct td_sched *ts;
2214 	struct tdq *tdq;
2215 
2216 	tdq = TDQ_SELF();
2217 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2218 	ts = tdq_choose(tdq);
2219 	if (ts) {
2220 		tdq_runq_rem(tdq, ts);
2221 		return (ts->ts_thread);
2222 	}
2223 #ifdef SMP
2224 	/*
2225 	 * We only set the idled bit when all of the cpus in the group are
2226 	 * idle.  Otherwise we could get into a situation where a thread bounces
2227 	 * back and forth between two idle cores on seperate physical CPUs.
2228 	 */
2229 	tdg = tdq->tdq_group;
2230 	tdg->tdg_idlemask |= PCPU_GET(cpumask);
2231 	if (tdg->tdg_idlemask == tdg->tdg_cpumask)
2232 		atomic_set_int(&tdq_idle, tdg->tdg_mask);
2233 	tdq->tdq_lowpri = PRI_MAX_IDLE;
2234 #endif
2235 	return (PCPU_GET(idlethread));
2236 }
2237 
2238 /*
2239  * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2240  * we always request it once we exit a critical section.
2241  */
2242 static inline void
2243 sched_setpreempt(struct thread *td)
2244 {
2245 	struct thread *ctd;
2246 	int cpri;
2247 	int pri;
2248 
2249 	ctd = curthread;
2250 	pri = td->td_priority;
2251 	cpri = ctd->td_priority;
2252 	if (td->td_priority < ctd->td_priority)
2253 		curthread->td_flags |= TDF_NEEDRESCHED;
2254 	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2255 		return;
2256 	/*
2257 	 * Always preempt IDLE threads.  Otherwise only if the preempting
2258 	 * thread is an ithread.
2259 	 */
2260 	if (pri > preempt_thresh && cpri < PRI_MIN_IDLE)
2261 		return;
2262 	ctd->td_owepreempt = 1;
2263 	return;
2264 }
2265 
2266 /*
2267  * Add a thread to a thread queue.  Initializes priority, slice, runq, and
2268  * add it to the appropriate queue.  This is the internal function called
2269  * when the tdq is predetermined.
2270  */
2271 void
2272 tdq_add(struct tdq *tdq, struct thread *td, int flags)
2273 {
2274 	struct td_sched *ts;
2275 	int class;
2276 #ifdef SMP
2277 	int cpumask;
2278 #endif
2279 
2280 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2281 	KASSERT((td->td_inhibitors == 0),
2282 	    ("sched_add: trying to run inhibited thread"));
2283 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2284 	    ("sched_add: bad thread state"));
2285 	KASSERT(td->td_proc->p_sflag & PS_INMEM,
2286 	    ("sched_add: process swapped out"));
2287 
2288 	ts = td->td_sched;
2289 	class = PRI_BASE(td->td_pri_class);
2290         TD_SET_RUNQ(td);
2291 	if (ts->ts_slice == 0)
2292 		ts->ts_slice = sched_slice;
2293 	/*
2294 	 * Pick the run queue based on priority.
2295 	 */
2296 	if (td->td_priority <= PRI_MAX_REALTIME)
2297 		ts->ts_runq = &tdq->tdq_realtime;
2298 	else if (td->td_priority <= PRI_MAX_TIMESHARE)
2299 		ts->ts_runq = &tdq->tdq_timeshare;
2300 	else
2301 		ts->ts_runq = &tdq->tdq_idle;
2302 #ifdef SMP
2303 	cpumask = 1 << ts->ts_cpu;
2304 	/*
2305 	 * If we had been idle, clear our bit in the group and potentially
2306 	 * the global bitmap.
2307 	 */
2308 	if ((class != PRI_IDLE && class != PRI_ITHD) &&
2309 	    (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
2310 		/*
2311 		 * Check to see if our group is unidling, and if so, remove it
2312 		 * from the global idle mask.
2313 		 */
2314 		if (tdq->tdq_group->tdg_idlemask ==
2315 		    tdq->tdq_group->tdg_cpumask)
2316 			atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
2317 		/*
2318 		 * Now remove ourselves from the group specific idle mask.
2319 		 */
2320 		tdq->tdq_group->tdg_idlemask &= ~cpumask;
2321 	}
2322 	if (td->td_priority < tdq->tdq_lowpri)
2323 		tdq->tdq_lowpri = td->td_priority;
2324 #endif
2325 	tdq_runq_add(tdq, ts, flags);
2326 	tdq_load_add(tdq, ts);
2327 }
2328 
2329 /*
2330  * Select the target thread queue and add a thread to it.  Request
2331  * preemption or IPI a remote processor if required.
2332  */
2333 void
2334 sched_add(struct thread *td, int flags)
2335 {
2336 	struct td_sched *ts;
2337 	struct tdq *tdq;
2338 #ifdef SMP
2339 	int cpuid;
2340 	int cpu;
2341 #endif
2342 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2343 	    td, td->td_proc->p_comm, td->td_priority, curthread,
2344 	    curthread->td_proc->p_comm);
2345 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2346 	ts = td->td_sched;
2347 	/*
2348 	 * Recalculate the priority before we select the target cpu or
2349 	 * run-queue.
2350 	 */
2351 	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2352 		sched_priority(td);
2353 #ifdef SMP
2354 	cpuid = PCPU_GET(cpuid);
2355 	/*
2356 	 * Pick the destination cpu and if it isn't ours transfer to the
2357 	 * target cpu.
2358 	 */
2359 	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td))
2360 		cpu = cpuid;
2361 	else if (!THREAD_CAN_MIGRATE(td))
2362 		cpu = ts->ts_cpu;
2363 	else
2364 		cpu = sched_pickcpu(ts, flags);
2365 	tdq = sched_setcpu(ts, cpu, flags);
2366 	tdq_add(tdq, td, flags);
2367 	if (cpu != cpuid) {
2368 		tdq_notify(ts);
2369 		return;
2370 	}
2371 #else
2372 	tdq = TDQ_SELF();
2373 	TDQ_LOCK(tdq);
2374 	/*
2375 	 * Now that the thread is moving to the run-queue, set the lock
2376 	 * to the scheduler's lock.
2377 	 */
2378 	thread_lock_set(td, TDQ_LOCKPTR(tdq));
2379 	tdq_add(tdq, td, flags);
2380 #endif
2381 	if (!(flags & SRQ_YIELDING))
2382 		sched_setpreempt(td);
2383 }
2384 
2385 /*
2386  * Remove a thread from a run-queue without running it.  This is used
2387  * when we're stealing a thread from a remote queue.  Otherwise all threads
2388  * exit by calling sched_exit_thread() and sched_throw() themselves.
2389  */
2390 void
2391 sched_rem(struct thread *td)
2392 {
2393 	struct tdq *tdq;
2394 	struct td_sched *ts;
2395 
2396 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2397 	    td, td->td_proc->p_comm, td->td_priority, curthread,
2398 	    curthread->td_proc->p_comm);
2399 	ts = td->td_sched;
2400 	tdq = TDQ_CPU(ts->ts_cpu);
2401 	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2402 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2403 	KASSERT(TD_ON_RUNQ(td),
2404 	    ("sched_rem: thread not on run queue"));
2405 	tdq_runq_rem(tdq, ts);
2406 	tdq_load_rem(tdq, ts);
2407 	TD_SET_CAN_RUN(td);
2408 }
2409 
2410 /*
2411  * Fetch cpu utilization information.  Updates on demand.
2412  */
2413 fixpt_t
2414 sched_pctcpu(struct thread *td)
2415 {
2416 	fixpt_t pctcpu;
2417 	struct td_sched *ts;
2418 
2419 	pctcpu = 0;
2420 	ts = td->td_sched;
2421 	if (ts == NULL)
2422 		return (0);
2423 
2424 	thread_lock(td);
2425 	if (ts->ts_ticks) {
2426 		int rtick;
2427 
2428 		sched_pctcpu_update(ts);
2429 		/* How many rtick per second ? */
2430 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2431 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
2432 	}
2433 	td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
2434 	thread_unlock(td);
2435 
2436 	return (pctcpu);
2437 }
2438 
2439 /*
2440  * Bind a thread to a target cpu.
2441  */
2442 void
2443 sched_bind(struct thread *td, int cpu)
2444 {
2445 	struct td_sched *ts;
2446 
2447 	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2448 	ts = td->td_sched;
2449 	if (ts->ts_flags & TSF_BOUND)
2450 		sched_unbind(td);
2451 	ts->ts_flags |= TSF_BOUND;
2452 #ifdef SMP
2453 	sched_pin();
2454 	if (PCPU_GET(cpuid) == cpu)
2455 		return;
2456 	ts->ts_cpu = cpu;
2457 	/* When we return from mi_switch we'll be on the correct cpu. */
2458 	mi_switch(SW_VOL, NULL);
2459 #endif
2460 }
2461 
2462 /*
2463  * Release a bound thread.
2464  */
2465 void
2466 sched_unbind(struct thread *td)
2467 {
2468 	struct td_sched *ts;
2469 
2470 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2471 	ts = td->td_sched;
2472 	if ((ts->ts_flags & TSF_BOUND) == 0)
2473 		return;
2474 	ts->ts_flags &= ~TSF_BOUND;
2475 #ifdef SMP
2476 	sched_unpin();
2477 #endif
2478 }
2479 
2480 int
2481 sched_is_bound(struct thread *td)
2482 {
2483 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2484 	return (td->td_sched->ts_flags & TSF_BOUND);
2485 }
2486 
2487 /*
2488  * Basic yield call.
2489  */
2490 void
2491 sched_relinquish(struct thread *td)
2492 {
2493 	thread_lock(td);
2494 	if (td->td_pri_class == PRI_TIMESHARE)
2495 		sched_prio(td, PRI_MAX_TIMESHARE);
2496 	SCHED_STAT_INC(switch_relinquish);
2497 	mi_switch(SW_VOL, NULL);
2498 	thread_unlock(td);
2499 }
2500 
2501 /*
2502  * Return the total system load.
2503  */
2504 int
2505 sched_load(void)
2506 {
2507 #ifdef SMP
2508 	int total;
2509 	int i;
2510 
2511 	total = 0;
2512 	for (i = 0; i <= tdg_maxid; i++)
2513 		total += TDQ_GROUP(i)->tdg_load;
2514 	return (total);
2515 #else
2516 	return (TDQ_SELF()->tdq_sysload);
2517 #endif
2518 }
2519 
2520 int
2521 sched_sizeof_proc(void)
2522 {
2523 	return (sizeof(struct proc));
2524 }
2525 
2526 int
2527 sched_sizeof_thread(void)
2528 {
2529 	return (sizeof(struct thread) + sizeof(struct td_sched));
2530 }
2531 
2532 /*
2533  * The actual idle process.
2534  */
2535 void
2536 sched_idletd(void *dummy)
2537 {
2538 	struct thread *td;
2539 	struct tdq *tdq;
2540 
2541 	td = curthread;
2542 	tdq = TDQ_SELF();
2543 	mtx_assert(&Giant, MA_NOTOWNED);
2544 	/* ULE relies on preemption for idle interruption. */
2545 	for (;;) {
2546 #ifdef SMP
2547 		if (tdq_idled(tdq))
2548 			cpu_idle();
2549 #else
2550 		cpu_idle();
2551 #endif
2552 	}
2553 }
2554 
2555 /*
2556  * A CPU is entering for the first time or a thread is exiting.
2557  */
2558 void
2559 sched_throw(struct thread *td)
2560 {
2561 	struct tdq *tdq;
2562 
2563 	tdq = TDQ_SELF();
2564 	if (td == NULL) {
2565 		/* Correct spinlock nesting and acquire the correct lock. */
2566 		TDQ_LOCK(tdq);
2567 		spinlock_exit();
2568 	} else {
2569 		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2570 		tdq_load_rem(tdq, td->td_sched);
2571 	}
2572 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2573 	PCPU_SET(switchtime, cpu_ticks());
2574 	PCPU_SET(switchticks, ticks);
2575 	cpu_throw(td, choosethread());	/* doesn't return */
2576 }
2577 
2578 /*
2579  * This is called from fork_exit().  Just acquire the correct locks and
2580  * let fork do the rest of the work.
2581  */
2582 void
2583 sched_fork_exit(struct thread *td)
2584 {
2585 	struct td_sched *ts;
2586 	struct tdq *tdq;
2587 	int cpuid;
2588 
2589 	/*
2590 	 * Finish setting up thread glue so that it begins execution in a
2591 	 * non-nested critical section with the scheduler lock held.
2592 	 */
2593 	cpuid = PCPU_GET(cpuid);
2594 	tdq = TDQ_CPU(cpuid);
2595 	ts = td->td_sched;
2596 	if (TD_IS_IDLETHREAD(td))
2597 		td->td_lock = TDQ_LOCKPTR(tdq);
2598 	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2599 	td->td_oncpu = cpuid;
2600 	TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td;
2601 	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
2602 }
2603 
2604 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2605     "Scheduler");
2606 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2607     "Scheduler name");
2608 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2609     "Slice size for timeshare threads");
2610 SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2611      "Interactivity score threshold");
2612 SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2613      0,"Min priority for preemption, lower priorities have greater precedence");
2614 #ifdef SMP
2615 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0,
2616     "Pick the target cpu based on priority rather than load.");
2617 SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2618     "Number of hz ticks to keep thread affinity for");
2619 SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, "");
2620 SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2621     "Enables the long-term load balancer");
2622 SYSCTL_INT(_kern_sched, OID_AUTO, balance_secs, CTLFLAG_RW, &balance_secs, 0,
2623     "Average frequence in seconds to run the long-term balancer");
2624 SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2625     "Steals work from another hyper-threaded core on idle");
2626 SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2627     "Attempts to steal work from other cores before idling");
2628 SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
2629     "Minimum load on remote cpu before we'll steal");
2630 SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0,
2631     "True when a topology has been specified by the MD code.");
2632 #endif
2633 
2634 /* ps compat */
2635 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
2636 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2637 
2638 
2639 #define KERN_SWITCH_INCLUDE 1
2640 #include "kern/kern_switch.c"
2641