xref: /freebsd/sys/kern/sched_ule.c (revision 0c927cdd8e6e05387fc5a9ffcb5dbe128d4ad749)
1 /*-
2  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_hwpmc_hooks.h"
31 #include "opt_sched.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kdb.h>
36 #include <sys/kernel.h>
37 #include <sys/ktr.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/resource.h>
42 #include <sys/resourcevar.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/sx.h>
46 #include <sys/sysctl.h>
47 #include <sys/sysproto.h>
48 #include <sys/turnstile.h>
49 #include <sys/umtx.h>
50 #include <sys/vmmeter.h>
51 #ifdef KTRACE
52 #include <sys/uio.h>
53 #include <sys/ktrace.h>
54 #endif
55 
56 #ifdef HWPMC_HOOKS
57 #include <sys/pmckern.h>
58 #endif
59 
60 #include <machine/cpu.h>
61 #include <machine/smp.h>
62 
63 #ifndef PREEMPTION
64 #error	"SCHED_ULE requires options PREEMPTION"
65 #endif
66 
67 /*
68  * TODO:
69  *	Pick idle from affinity group or self group first.
70  *	Implement pick_score.
71  */
72 
73 #define	KTR_ULE	0x0		/* Enable for pickpri debugging. */
74 
75 /*
76  * Thread scheduler specific section.
77  */
78 struct td_sched {
79 	TAILQ_ENTRY(td_sched) ts_procq;	/* (j/z) Run queue. */
80 	int		ts_flags;	/* (j) TSF_* flags. */
81 	struct thread	*ts_thread;	/* (*) Active associated thread. */
82 	u_char		ts_rqindex;	/* (j) Run queue index. */
83 	int		ts_slptime;
84 	int		ts_slice;
85 	struct runq	*ts_runq;
86 	u_char		ts_cpu;		/* CPU that we have affinity for. */
87 	/* The following variables are only used for pctcpu calculation */
88 	int		ts_ltick;	/* Last tick that we were running on */
89 	int		ts_ftick;	/* First tick that we were running on */
90 	int		ts_ticks;	/* Tick count */
91 #ifdef SMP
92 	int		ts_rltick;	/* Real last tick, for affinity. */
93 #endif
94 
95 	/* originally from kg_sched */
96 	u_int	skg_slptime;		/* Number of ticks we vol. slept */
97 	u_int	skg_runtime;		/* Number of ticks we were running */
98 };
99 /* flags kept in ts_flags */
100 #define	TSF_BOUND	0x0001		/* Thread can not migrate. */
101 #define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
102 
103 static struct td_sched td_sched0;
104 
105 /*
106  * Cpu percentage computation macros and defines.
107  *
108  * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
109  * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
110  * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
111  * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
112  * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
113  * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
114  */
115 #define	SCHED_TICK_SECS		10
116 #define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
117 #define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
118 #define	SCHED_TICK_SHIFT	10
119 #define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
120 #define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
121 
122 /*
123  * These macros determine priorities for non-interactive threads.  They are
124  * assigned a priority based on their recent cpu utilization as expressed
125  * by the ratio of ticks to the tick total.  NHALF priorities at the start
126  * and end of the MIN to MAX timeshare range are only reachable with negative
127  * or positive nice respectively.
128  *
129  * PRI_RANGE:	Priority range for utilization dependent priorities.
130  * PRI_NRESV:	Number of nice values.
131  * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
132  * PRI_NICE:	Determines the part of the priority inherited from nice.
133  */
134 #define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
135 #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
136 #define	SCHED_PRI_MIN		(PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
137 #define	SCHED_PRI_MAX		(PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
138 #define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
139 #define	SCHED_PRI_TICKS(ts)						\
140     (SCHED_TICK_HZ((ts)) /						\
141     (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
142 #define	SCHED_PRI_NICE(nice)	(nice)
143 
144 /*
145  * These determine the interactivity of a process.  Interactivity differs from
146  * cpu utilization in that it expresses the voluntary time slept vs time ran
147  * while cpu utilization includes all time not running.  This more accurately
148  * models the intent of the thread.
149  *
150  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
151  *		before throttling back.
152  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
153  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
154  * INTERACT_THRESH:	Threshhold for placement on the current runq.
155  */
156 #define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
157 #define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
158 #define	SCHED_INTERACT_MAX	(100)
159 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
160 #define	SCHED_INTERACT_THRESH	(30)
161 
162 /*
163  * tickincr:		Converts a stathz tick into a hz domain scaled by
164  *			the shift factor.  Without the shift the error rate
165  *			due to rounding would be unacceptably high.
166  * realstathz:		stathz is sometimes 0 and run off of hz.
167  * sched_slice:		Runtime of each thread before rescheduling.
168  */
169 static int sched_interact = SCHED_INTERACT_THRESH;
170 static int realstathz;
171 static int tickincr;
172 static int sched_slice;
173 
174 /*
175  * tdq - per processor runqs and statistics.
176  */
177 struct tdq {
178 	struct runq	tdq_idle;		/* Queue of IDLE threads. */
179 	struct runq	tdq_timeshare;		/* timeshare run queue. */
180 	struct runq	tdq_realtime;		/* real-time run queue. */
181 	u_char		tdq_idx;		/* Current insert index. */
182 	u_char		tdq_ridx;		/* Current removal index. */
183 	short		tdq_flags;		/* Thread queue flags */
184 	int		tdq_load;		/* Aggregate load. */
185 #ifdef SMP
186 	int		tdq_transferable;
187 	LIST_ENTRY(tdq)	tdq_siblings;		/* Next in tdq group. */
188 	struct tdq_group *tdq_group;		/* Our processor group. */
189 #else
190 	int		tdq_sysload;		/* For loadavg, !ITHD load. */
191 #endif
192 };
193 
194 #define	TDQF_BUSY	0x0001			/* Queue is marked as busy */
195 
196 #ifdef SMP
197 /*
198  * tdq groups are groups of processors which can cheaply share threads.  When
199  * one processor in the group goes idle it will check the runqs of the other
200  * processors in its group prior to halting and waiting for an interrupt.
201  * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
202  * In a numa environment we'd want an idle bitmap per group and a two tiered
203  * load balancer.
204  */
205 struct tdq_group {
206 	int	tdg_cpus;		/* Count of CPUs in this tdq group. */
207 	cpumask_t tdg_cpumask;		/* Mask of cpus in this group. */
208 	cpumask_t tdg_idlemask;		/* Idle cpus in this group. */
209 	cpumask_t tdg_mask;		/* Bit mask for first cpu. */
210 	int	tdg_load;		/* Total load of this group. */
211 	int	tdg_transferable;	/* Transferable load of this group. */
212 	LIST_HEAD(, tdq) tdg_members;	/* Linked list of all members. */
213 };
214 
215 #define	SCHED_AFFINITY_DEFAULT	(hz / 100)
216 #define	SCHED_AFFINITY(ts)	((ts)->ts_rltick > ticks - affinity)
217 
218 /*
219  * Run-time tunables.
220  */
221 static int rebalance = 0;
222 static int pick_pri = 0;
223 static int affinity;
224 static int tryself = 1;
225 static int tryselfidle = 1;
226 static int ipi_ast = 0;
227 static int ipi_preempt = 1;
228 static int ipi_thresh = PRI_MIN_KERN;
229 static int steal_htt = 1;
230 static int steal_busy = 1;
231 static int busy_thresh = 4;
232 static int topology = 0;
233 
234 /*
235  * One thread queue per processor.
236  */
237 static volatile cpumask_t tdq_idle;
238 static volatile cpumask_t tdq_busy;
239 static int tdg_maxid;
240 static struct tdq	tdq_cpu[MAXCPU];
241 static struct tdq_group tdq_groups[MAXCPU];
242 static int bal_tick;
243 static int gbal_tick;
244 static int balance_groups;
245 
246 #define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
247 #define	TDQ_CPU(x)	(&tdq_cpu[(x)])
248 #define	TDQ_ID(x)	((x) - tdq_cpu)
249 #define	TDQ_GROUP(x)	(&tdq_groups[(x)])
250 #else	/* !SMP */
251 static struct tdq	tdq_cpu;
252 
253 #define	TDQ_ID(x)	(0)
254 #define	TDQ_SELF()	(&tdq_cpu)
255 #define	TDQ_CPU(x)	(&tdq_cpu)
256 #endif
257 
258 static void sched_priority(struct thread *);
259 static void sched_thread_priority(struct thread *, u_char);
260 static int sched_interact_score(struct thread *);
261 static void sched_interact_update(struct thread *);
262 static void sched_interact_fork(struct thread *);
263 static void sched_pctcpu_update(struct td_sched *);
264 static inline void sched_pin_td(struct thread *td);
265 static inline void sched_unpin_td(struct thread *td);
266 
267 /* Operations on per processor queues */
268 static struct td_sched * tdq_choose(struct tdq *);
269 static void tdq_setup(struct tdq *);
270 static void tdq_load_add(struct tdq *, struct td_sched *);
271 static void tdq_load_rem(struct tdq *, struct td_sched *);
272 static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
273 static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
274 void tdq_print(int cpu);
275 static void runq_print(struct runq *rq);
276 #ifdef SMP
277 static int tdq_pickidle(struct tdq *, struct td_sched *);
278 static int tdq_pickpri(struct tdq *, struct td_sched *, int);
279 static struct td_sched *runq_steal(struct runq *);
280 static void sched_balance(void);
281 static void sched_balance_groups(void);
282 static void sched_balance_group(struct tdq_group *);
283 static void sched_balance_pair(struct tdq *, struct tdq *);
284 static void sched_smp_tick(struct thread *);
285 static void tdq_move(struct tdq *, int);
286 static int tdq_idled(struct tdq *);
287 static void tdq_notify(struct td_sched *);
288 static struct td_sched *tdq_steal(struct tdq *, int);
289 
290 #define	THREAD_CAN_MIGRATE(td)	 ((td)->td_pinned == 0)
291 #endif
292 
293 static void sched_setup(void *dummy);
294 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
295 
296 static void sched_initticks(void *dummy);
297 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
298 
299 static inline void
300 sched_pin_td(struct thread *td)
301 {
302 	td->td_pinned++;
303 }
304 
305 static inline void
306 sched_unpin_td(struct thread *td)
307 {
308 	td->td_pinned--;
309 }
310 
311 static void
312 runq_print(struct runq *rq)
313 {
314 	struct rqhead *rqh;
315 	struct td_sched *ts;
316 	int pri;
317 	int j;
318 	int i;
319 
320 	for (i = 0; i < RQB_LEN; i++) {
321 		printf("\t\trunq bits %d 0x%zx\n",
322 		    i, rq->rq_status.rqb_bits[i]);
323 		for (j = 0; j < RQB_BPW; j++)
324 			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
325 				pri = j + (i << RQB_L2BPW);
326 				rqh = &rq->rq_queues[pri];
327 				TAILQ_FOREACH(ts, rqh, ts_procq) {
328 					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
329 					    ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
330 				}
331 			}
332 	}
333 }
334 
335 void
336 tdq_print(int cpu)
337 {
338 	struct tdq *tdq;
339 
340 	tdq = TDQ_CPU(cpu);
341 
342 	printf("tdq:\n");
343 	printf("\tload:           %d\n", tdq->tdq_load);
344 	printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
345 	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
346 	printf("\trealtime runq:\n");
347 	runq_print(&tdq->tdq_realtime);
348 	printf("\ttimeshare runq:\n");
349 	runq_print(&tdq->tdq_timeshare);
350 	printf("\tidle runq:\n");
351 	runq_print(&tdq->tdq_idle);
352 #ifdef SMP
353 	printf("\tload transferable: %d\n", tdq->tdq_transferable);
354 #endif
355 }
356 
357 static __inline void
358 tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
359 {
360 #ifdef SMP
361 	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
362 		tdq->tdq_transferable++;
363 		tdq->tdq_group->tdg_transferable++;
364 		ts->ts_flags |= TSF_XFERABLE;
365 		if (tdq->tdq_transferable >= busy_thresh &&
366 		    (tdq->tdq_flags & TDQF_BUSY) == 0) {
367 			tdq->tdq_flags |= TDQF_BUSY;
368 			atomic_set_int(&tdq_busy, 1 << TDQ_ID(tdq));
369 		}
370 	}
371 #endif
372 	if (ts->ts_runq == &tdq->tdq_timeshare) {
373 		u_char pri;
374 
375 		pri = ts->ts_thread->td_priority;
376 		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
377 			("Invalid priority %d on timeshare runq", pri));
378 		/*
379 		 * This queue contains only priorities between MIN and MAX
380 		 * realtime.  Use the whole queue to represent these values.
381 		 */
382 #define	TS_RQ_PPQ	(((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
383 		if ((flags & SRQ_BORROWING) == 0) {
384 			pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
385 			pri = (pri + tdq->tdq_idx) % RQ_NQS;
386 			/*
387 			 * This effectively shortens the queue by one so we
388 			 * can have a one slot difference between idx and
389 			 * ridx while we wait for threads to drain.
390 			 */
391 			if (tdq->tdq_ridx != tdq->tdq_idx &&
392 			    pri == tdq->tdq_ridx)
393 				pri = (unsigned char)(pri - 1) % RQ_NQS;
394 		} else
395 			pri = tdq->tdq_ridx;
396 		runq_add_pri(ts->ts_runq, ts, pri, flags);
397 	} else
398 		runq_add(ts->ts_runq, ts, flags);
399 }
400 
401 static __inline void
402 tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
403 {
404 #ifdef SMP
405 	if (ts->ts_flags & TSF_XFERABLE) {
406 		tdq->tdq_transferable--;
407 		tdq->tdq_group->tdg_transferable--;
408 		ts->ts_flags &= ~TSF_XFERABLE;
409 		if (tdq->tdq_transferable < busy_thresh &&
410 		    (tdq->tdq_flags & TDQF_BUSY)) {
411 			atomic_clear_int(&tdq_busy, 1 << TDQ_ID(tdq));
412 			tdq->tdq_flags &= ~TDQF_BUSY;
413 		}
414 	}
415 #endif
416 	if (ts->ts_runq == &tdq->tdq_timeshare) {
417 		if (tdq->tdq_idx != tdq->tdq_ridx)
418 			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
419 		else
420 			runq_remove_idx(ts->ts_runq, ts, NULL);
421 		/*
422 		 * For timeshare threads we update the priority here so
423 		 * the priority reflects the time we've been sleeping.
424 		 */
425 		ts->ts_ltick = ticks;
426 		sched_pctcpu_update(ts);
427 		sched_priority(ts->ts_thread);
428 	} else
429 		runq_remove(ts->ts_runq, ts);
430 }
431 
432 static void
433 tdq_load_add(struct tdq *tdq, struct td_sched *ts)
434 {
435 	int class;
436 	mtx_assert(&sched_lock, MA_OWNED);
437 	class = PRI_BASE(ts->ts_thread->td_pri_class);
438 	tdq->tdq_load++;
439 	CTR2(KTR_SCHED, "cpu %jd load: %d", TDQ_ID(tdq), tdq->tdq_load);
440 	if (class != PRI_ITHD &&
441 	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
442 #ifdef SMP
443 		tdq->tdq_group->tdg_load++;
444 #else
445 		tdq->tdq_sysload++;
446 #endif
447 }
448 
449 static void
450 tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
451 {
452 	int class;
453 	mtx_assert(&sched_lock, MA_OWNED);
454 	class = PRI_BASE(ts->ts_thread->td_pri_class);
455 	if (class != PRI_ITHD &&
456 	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
457 #ifdef SMP
458 		tdq->tdq_group->tdg_load--;
459 #else
460 		tdq->tdq_sysload--;
461 #endif
462 	tdq->tdq_load--;
463 	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
464 	ts->ts_runq = NULL;
465 }
466 
467 #ifdef SMP
468 static void
469 sched_smp_tick(struct thread *td)
470 {
471 	struct tdq *tdq;
472 
473 	tdq = TDQ_SELF();
474 	if (rebalance) {
475 		if (ticks >= bal_tick)
476 			sched_balance();
477 		if (ticks >= gbal_tick && balance_groups)
478 			sched_balance_groups();
479 	}
480 	td->td_sched->ts_rltick = ticks;
481 }
482 
483 /*
484  * sched_balance is a simple CPU load balancing algorithm.  It operates by
485  * finding the least loaded and most loaded cpu and equalizing their load
486  * by migrating some processes.
487  *
488  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
489  * installations will only have 2 cpus.  Secondly, load balancing too much at
490  * once can have an unpleasant effect on the system.  The scheduler rarely has
491  * enough information to make perfect decisions.  So this algorithm chooses
492  * algorithm simplicity and more gradual effects on load in larger systems.
493  *
494  * It could be improved by considering the priorities and slices assigned to
495  * each task prior to balancing them.  There are many pathological cases with
496  * any approach and so the semi random algorithm below may work as well as any.
497  *
498  */
499 static void
500 sched_balance(void)
501 {
502 	struct tdq_group *high;
503 	struct tdq_group *low;
504 	struct tdq_group *tdg;
505 	int cnt;
506 	int i;
507 
508 	bal_tick = ticks + (random() % (hz * 2));
509 	if (smp_started == 0)
510 		return;
511 	low = high = NULL;
512 	i = random() % (tdg_maxid + 1);
513 	for (cnt = 0; cnt <= tdg_maxid; cnt++) {
514 		tdg = TDQ_GROUP(i);
515 		/*
516 		 * Find the CPU with the highest load that has some
517 		 * threads to transfer.
518 		 */
519 		if ((high == NULL || tdg->tdg_load > high->tdg_load)
520 		    && tdg->tdg_transferable)
521 			high = tdg;
522 		if (low == NULL || tdg->tdg_load < low->tdg_load)
523 			low = tdg;
524 		if (++i > tdg_maxid)
525 			i = 0;
526 	}
527 	if (low != NULL && high != NULL && high != low)
528 		sched_balance_pair(LIST_FIRST(&high->tdg_members),
529 		    LIST_FIRST(&low->tdg_members));
530 }
531 
532 static void
533 sched_balance_groups(void)
534 {
535 	int i;
536 
537 	gbal_tick = ticks + (random() % (hz * 2));
538 	mtx_assert(&sched_lock, MA_OWNED);
539 	if (smp_started)
540 		for (i = 0; i <= tdg_maxid; i++)
541 			sched_balance_group(TDQ_GROUP(i));
542 }
543 
544 static void
545 sched_balance_group(struct tdq_group *tdg)
546 {
547 	struct tdq *tdq;
548 	struct tdq *high;
549 	struct tdq *low;
550 	int load;
551 
552 	if (tdg->tdg_transferable == 0)
553 		return;
554 	low = NULL;
555 	high = NULL;
556 	LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
557 		load = tdq->tdq_load;
558 		if (high == NULL || load > high->tdq_load)
559 			high = tdq;
560 		if (low == NULL || load < low->tdq_load)
561 			low = tdq;
562 	}
563 	if (high != NULL && low != NULL && high != low)
564 		sched_balance_pair(high, low);
565 }
566 
567 static void
568 sched_balance_pair(struct tdq *high, struct tdq *low)
569 {
570 	int transferable;
571 	int high_load;
572 	int low_load;
573 	int move;
574 	int diff;
575 	int i;
576 
577 	/*
578 	 * If we're transfering within a group we have to use this specific
579 	 * tdq's transferable count, otherwise we can steal from other members
580 	 * of the group.
581 	 */
582 	if (high->tdq_group == low->tdq_group) {
583 		transferable = high->tdq_transferable;
584 		high_load = high->tdq_load;
585 		low_load = low->tdq_load;
586 	} else {
587 		transferable = high->tdq_group->tdg_transferable;
588 		high_load = high->tdq_group->tdg_load;
589 		low_load = low->tdq_group->tdg_load;
590 	}
591 	if (transferable == 0)
592 		return;
593 	/*
594 	 * Determine what the imbalance is and then adjust that to how many
595 	 * threads we actually have to give up (transferable).
596 	 */
597 	diff = high_load - low_load;
598 	move = diff / 2;
599 	if (diff & 0x1)
600 		move++;
601 	move = min(move, transferable);
602 	for (i = 0; i < move; i++)
603 		tdq_move(high, TDQ_ID(low));
604 	return;
605 }
606 
607 static void
608 tdq_move(struct tdq *from, int cpu)
609 {
610 	struct tdq *tdq;
611 	struct tdq *to;
612 	struct td_sched *ts;
613 
614 	tdq = from;
615 	to = TDQ_CPU(cpu);
616 	ts = tdq_steal(tdq, 1);
617 	if (ts == NULL) {
618 		struct tdq_group *tdg;
619 
620 		tdg = tdq->tdq_group;
621 		LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
622 			if (tdq == from || tdq->tdq_transferable == 0)
623 				continue;
624 			ts = tdq_steal(tdq, 1);
625 			break;
626 		}
627 		if (ts == NULL)
628 			panic("tdq_move: No threads available with a "
629 			    "transferable count of %d\n",
630 			    tdg->tdg_transferable);
631 	}
632 	if (tdq == to)
633 		return;
634 	sched_rem(ts->ts_thread);
635 	ts->ts_cpu = cpu;
636 	sched_pin_td(ts->ts_thread);
637 	sched_add(ts->ts_thread, SRQ_YIELDING);
638 	sched_unpin_td(ts->ts_thread);
639 }
640 
641 static int
642 tdq_idled(struct tdq *tdq)
643 {
644 	struct tdq_group *tdg;
645 	struct tdq *steal;
646 	struct td_sched *ts;
647 
648 	tdg = tdq->tdq_group;
649 	/*
650 	 * If we're in a cpu group, try and steal threads from another cpu in
651 	 * the group before idling.
652 	 */
653 	if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
654 		LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
655 			if (steal == tdq || steal->tdq_transferable == 0)
656 				continue;
657 			ts = tdq_steal(steal, 0);
658 			if (ts)
659 				goto steal;
660 		}
661 	}
662 	if (steal_busy) {
663 		while (tdq_busy) {
664 			int cpu;
665 
666 			cpu = ffs(tdq_busy);
667 			if (cpu == 0)
668 				break;
669 			cpu--;
670 			steal = TDQ_CPU(cpu);
671 			if (steal->tdq_transferable == 0)
672 				continue;
673 			ts = tdq_steal(steal, 1);
674 			if (ts == NULL)
675 				continue;
676 			CTR5(KTR_ULE,
677 			    "tdq_idled: stealing td %p(%s) pri %d from %d busy 0x%X",
678 			    ts->ts_thread, ts->ts_thread->td_proc->p_comm,
679 			    ts->ts_thread->td_priority, cpu, tdq_busy);
680 			goto steal;
681 		}
682 	}
683 	/*
684 	 * We only set the idled bit when all of the cpus in the group are
685 	 * idle.  Otherwise we could get into a situation where a thread bounces
686 	 * back and forth between two idle cores on seperate physical CPUs.
687 	 */
688 	tdg->tdg_idlemask |= PCPU_GET(cpumask);
689 	if (tdg->tdg_idlemask == tdg->tdg_cpumask)
690 		atomic_set_int(&tdq_idle, tdg->tdg_mask);
691 	return (1);
692 steal:
693 	sched_rem(ts->ts_thread);
694 	ts->ts_cpu = PCPU_GET(cpuid);
695 	sched_pin_td(ts->ts_thread);
696 	sched_add(ts->ts_thread, SRQ_YIELDING);
697 	sched_unpin_td(ts->ts_thread);
698 
699 	return (0);
700 }
701 
702 static void
703 tdq_notify(struct td_sched *ts)
704 {
705 	struct thread *ctd;
706 	struct pcpu *pcpu;
707 	int cpri;
708 	int pri;
709 	int cpu;
710 
711 	cpu = ts->ts_cpu;
712 	pri = ts->ts_thread->td_priority;
713 	pcpu = pcpu_find(cpu);
714 	ctd = pcpu->pc_curthread;
715 	cpri = ctd->td_priority;
716 
717 	/*
718 	 * If our priority is not better than the current priority there is
719 	 * nothing to do.
720 	 */
721 	if (pri > cpri)
722 		return;
723 	/*
724 	 * Always IPI idle.
725 	 */
726 	if (cpri > PRI_MIN_IDLE)
727 		goto sendipi;
728 	/*
729 	 * If we're realtime or better and there is timeshare or worse running
730 	 * send an IPI.
731 	 */
732 	if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
733 		goto sendipi;
734 	/*
735 	 * Otherwise only IPI if we exceed the threshold.
736 	 */
737 	if (pri > ipi_thresh)
738 		return;
739 sendipi:
740 	ctd->td_flags |= TDF_NEEDRESCHED;
741 	if (cpri < PRI_MIN_IDLE) {
742 		if (ipi_ast)
743 			ipi_selected(1 << cpu, IPI_AST);
744 		else if (ipi_preempt)
745 			ipi_selected(1 << cpu, IPI_PREEMPT);
746 	} else
747 		ipi_selected(1 << cpu, IPI_PREEMPT);
748 }
749 
750 static struct td_sched *
751 runq_steal(struct runq *rq)
752 {
753 	struct rqhead *rqh;
754 	struct rqbits *rqb;
755 	struct td_sched *ts;
756 	int word;
757 	int bit;
758 
759 	mtx_assert(&sched_lock, MA_OWNED);
760 	rqb = &rq->rq_status;
761 	for (word = 0; word < RQB_LEN; word++) {
762 		if (rqb->rqb_bits[word] == 0)
763 			continue;
764 		for (bit = 0; bit < RQB_BPW; bit++) {
765 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
766 				continue;
767 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
768 			TAILQ_FOREACH(ts, rqh, ts_procq) {
769 				if (THREAD_CAN_MIGRATE(ts->ts_thread))
770 					return (ts);
771 			}
772 		}
773 	}
774 	return (NULL);
775 }
776 
777 static struct td_sched *
778 tdq_steal(struct tdq *tdq, int stealidle)
779 {
780 	struct td_sched *ts;
781 
782 	/*
783 	 * Steal from next first to try to get a non-interactive task that
784 	 * may not have run for a while.
785 	 * XXX Need to effect steal order for timeshare threads.
786 	 */
787 	if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
788 		return (ts);
789 	if ((ts = runq_steal(&tdq->tdq_timeshare)) != NULL)
790 		return (ts);
791 	if (stealidle)
792 		return (runq_steal(&tdq->tdq_idle));
793 	return (NULL);
794 }
795 
796 int
797 tdq_pickidle(struct tdq *tdq, struct td_sched *ts)
798 {
799 	struct tdq_group *tdg;
800 	int self;
801 	int cpu;
802 
803 	self = PCPU_GET(cpuid);
804 	if (smp_started == 0)
805 		return (self);
806 	/*
807 	 * If the current CPU has idled, just run it here.
808 	 */
809 	if ((tdq->tdq_group->tdg_idlemask & PCPU_GET(cpumask)) != 0)
810 		return (self);
811 	/*
812 	 * Try the last group we ran on.
813 	 */
814 	tdg = TDQ_CPU(ts->ts_cpu)->tdq_group;
815 	cpu = ffs(tdg->tdg_idlemask);
816 	if (cpu)
817 		return (cpu - 1);
818 	/*
819 	 * Search for an idle group.
820 	 */
821 	cpu = ffs(tdq_idle);
822 	if (cpu)
823 		return (cpu - 1);
824 	/*
825 	 * XXX If there are no idle groups, check for an idle core.
826 	 */
827 	/*
828 	 * No idle CPUs?
829 	 */
830 	return (self);
831 }
832 
833 static int
834 tdq_pickpri(struct tdq *tdq, struct td_sched *ts, int flags)
835 {
836 	struct pcpu *pcpu;
837 	int lowpri;
838 	int lowcpu;
839 	int lowload;
840 	int load;
841 	int self;
842 	int pri;
843 	int cpu;
844 
845 	self = PCPU_GET(cpuid);
846 	if (smp_started == 0)
847 		return (self);
848 
849 	pri = ts->ts_thread->td_priority;
850 	/*
851 	 * Regardless of affinity, if the last cpu is idle send it there.
852 	 */
853 	pcpu = pcpu_find(ts->ts_cpu);
854 	if (pcpu->pc_curthread->td_priority > PRI_MIN_IDLE) {
855 		CTR5(KTR_ULE,
856 		    "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
857 		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
858 		    pcpu->pc_curthread->td_priority);
859 		return (ts->ts_cpu);
860 	}
861 	/*
862 	 * If we have affinity, try to place it on the cpu we last ran on.
863 	 */
864 	if (SCHED_AFFINITY(ts) && pcpu->pc_curthread->td_priority > pri) {
865 		CTR5(KTR_ULE,
866 		    "affinity for %d, ltick %d ticks %d pri %d curthread %d",
867 		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
868 		    pcpu->pc_curthread->td_priority);
869 		return (ts->ts_cpu);
870 	}
871 	/*
872 	 * Try ourself first; If we're running something lower priority this
873 	 * may have some locality with the waking thread and execute faster
874 	 * here.
875 	 */
876 	if (tryself) {
877 		/*
878 		 * If we're being awoken by an interrupt thread or the waker
879 		 * is going right to sleep run here as well.
880 		 */
881 		if ((TDQ_SELF()->tdq_load == 1) && (flags & SRQ_YIELDING ||
882 		    curthread->td_pri_class == PRI_ITHD)) {
883 			CTR2(KTR_ULE, "tryself load %d flags %d",
884 			    TDQ_SELF()->tdq_load, flags);
885 			return (self);
886 		}
887 	}
888 	/*
889 	 * Look for an idle group.
890 	 */
891 	CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
892 	cpu = ffs(tdq_idle);
893 	if (cpu)
894 		return (cpu - 1);
895 	if (tryselfidle && pri < curthread->td_priority) {
896 		CTR1(KTR_ULE, "tryself %d",
897 		    curthread->td_priority);
898 		return (self);
899 	}
900 	/*
901  	 * Now search for the cpu running the lowest priority thread with
902 	 * the least load.
903 	 */
904 	lowload = 0;
905 	lowpri = lowcpu = 0;
906 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
907 		if (CPU_ABSENT(cpu))
908 			continue;
909 		pcpu = pcpu_find(cpu);
910 		pri = pcpu->pc_curthread->td_priority;
911 		CTR4(KTR_ULE,
912 		    "cpu %d pri %d lowcpu %d lowpri %d",
913 		    cpu, pri, lowcpu, lowpri);
914 		if (pri < lowpri)
915 			continue;
916 		load = TDQ_CPU(cpu)->tdq_load;
917 		if (lowpri && lowpri == pri && load > lowload)
918 			continue;
919 		lowpri = pri;
920 		lowcpu = cpu;
921 		lowload = load;
922 	}
923 
924 	return (lowcpu);
925 }
926 
927 #endif	/* SMP */
928 
929 /*
930  * Pick the highest priority task we have and return it.
931  */
932 
933 static struct td_sched *
934 tdq_choose(struct tdq *tdq)
935 {
936 	struct td_sched *ts;
937 
938 	mtx_assert(&sched_lock, MA_OWNED);
939 
940 	ts = runq_choose(&tdq->tdq_realtime);
941 	if (ts != NULL) {
942 		KASSERT(ts->ts_thread->td_priority <= PRI_MAX_REALTIME,
943 		    ("tdq_choose: Invalid priority on realtime queue %d",
944 		    ts->ts_thread->td_priority));
945 		return (ts);
946 	}
947 	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
948 	if (ts != NULL) {
949 		KASSERT(ts->ts_thread->td_priority <= PRI_MAX_TIMESHARE &&
950 		    ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
951 		    ("tdq_choose: Invalid priority on timeshare queue %d",
952 		    ts->ts_thread->td_priority));
953 		return (ts);
954 	}
955 
956 	ts = runq_choose(&tdq->tdq_idle);
957 	if (ts != NULL) {
958 		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
959 		    ("tdq_choose: Invalid priority on idle queue %d",
960 		    ts->ts_thread->td_priority));
961 		return (ts);
962 	}
963 
964 	return (NULL);
965 }
966 
967 static void
968 tdq_setup(struct tdq *tdq)
969 {
970 	runq_init(&tdq->tdq_realtime);
971 	runq_init(&tdq->tdq_timeshare);
972 	runq_init(&tdq->tdq_idle);
973 	tdq->tdq_load = 0;
974 }
975 
976 static void
977 sched_setup(void *dummy)
978 {
979 #ifdef SMP
980 	int i;
981 #endif
982 
983 	/*
984 	 * To avoid divide-by-zero, we set realstathz a dummy value
985 	 * in case which sched_clock() called before sched_initticks().
986 	 */
987 	realstathz = hz;
988 	sched_slice = (realstathz/10);	/* ~100ms */
989 	tickincr = 1 << SCHED_TICK_SHIFT;
990 
991 #ifdef SMP
992 	balance_groups = 0;
993 	/*
994 	 * Initialize the tdqs.
995 	 */
996 	for (i = 0; i < MAXCPU; i++) {
997 		struct tdq *tdq;
998 
999 		tdq = &tdq_cpu[i];
1000 		tdq_setup(&tdq_cpu[i]);
1001 	}
1002 	if (smp_topology == NULL) {
1003 		struct tdq_group *tdg;
1004 		struct tdq *tdq;
1005 		int cpus;
1006 
1007 		for (cpus = 0, i = 0; i < MAXCPU; i++) {
1008 			if (CPU_ABSENT(i))
1009 				continue;
1010 			tdq = &tdq_cpu[i];
1011 			tdg = &tdq_groups[cpus];
1012 			/*
1013 			 * Setup a tdq group with one member.
1014 			 */
1015 			tdq->tdq_transferable = 0;
1016 			tdq->tdq_group = tdg;
1017 			tdg->tdg_cpus = 1;
1018 			tdg->tdg_idlemask = 0;
1019 			tdg->tdg_cpumask = tdg->tdg_mask = 1 << i;
1020 			tdg->tdg_load = 0;
1021 			tdg->tdg_transferable = 0;
1022 			LIST_INIT(&tdg->tdg_members);
1023 			LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1024 			cpus++;
1025 		}
1026 		tdg_maxid = cpus - 1;
1027 	} else {
1028 		struct tdq_group *tdg;
1029 		struct cpu_group *cg;
1030 		int j;
1031 
1032 		topology = 1;
1033 		for (i = 0; i < smp_topology->ct_count; i++) {
1034 			cg = &smp_topology->ct_group[i];
1035 			tdg = &tdq_groups[i];
1036 			/*
1037 			 * Initialize the group.
1038 			 */
1039 			tdg->tdg_idlemask = 0;
1040 			tdg->tdg_load = 0;
1041 			tdg->tdg_transferable = 0;
1042 			tdg->tdg_cpus = cg->cg_count;
1043 			tdg->tdg_cpumask = cg->cg_mask;
1044 			LIST_INIT(&tdg->tdg_members);
1045 			/*
1046 			 * Find all of the group members and add them.
1047 			 */
1048 			for (j = 0; j < MAXCPU; j++) {
1049 				if ((cg->cg_mask & (1 << j)) != 0) {
1050 					if (tdg->tdg_mask == 0)
1051 						tdg->tdg_mask = 1 << j;
1052 					tdq_cpu[j].tdq_transferable = 0;
1053 					tdq_cpu[j].tdq_group = tdg;
1054 					LIST_INSERT_HEAD(&tdg->tdg_members,
1055 					    &tdq_cpu[j], tdq_siblings);
1056 				}
1057 			}
1058 			if (tdg->tdg_cpus > 1)
1059 				balance_groups = 1;
1060 		}
1061 		tdg_maxid = smp_topology->ct_count - 1;
1062 	}
1063 	/*
1064 	 * Stagger the group and global load balancer so they do not
1065 	 * interfere with each other.
1066 	 */
1067 	bal_tick = ticks + hz;
1068 	if (balance_groups)
1069 		gbal_tick = ticks + (hz / 2);
1070 #else
1071 	tdq_setup(TDQ_SELF());
1072 #endif
1073 	mtx_lock_spin(&sched_lock);
1074 	tdq_load_add(TDQ_SELF(), &td_sched0);
1075 	mtx_unlock_spin(&sched_lock);
1076 }
1077 
1078 /* ARGSUSED */
1079 static void
1080 sched_initticks(void *dummy)
1081 {
1082 	mtx_lock_spin(&sched_lock);
1083 	realstathz = stathz ? stathz : hz;
1084 	sched_slice = (realstathz/10);	/* ~100ms */
1085 
1086 	/*
1087 	 * tickincr is shifted out by 10 to avoid rounding errors due to
1088 	 * hz not being evenly divisible by stathz on all platforms.
1089 	 */
1090 	tickincr = (hz << SCHED_TICK_SHIFT) / realstathz;
1091 	/*
1092 	 * This does not work for values of stathz that are more than
1093 	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1094 	 */
1095 	if (tickincr == 0)
1096 		tickincr = 1;
1097 #ifdef SMP
1098 	affinity = SCHED_AFFINITY_DEFAULT;
1099 #endif
1100 	mtx_unlock_spin(&sched_lock);
1101 }
1102 
1103 
1104 /*
1105  * Scale the scheduling priority according to the "interactivity" of this
1106  * process.
1107  */
1108 static void
1109 sched_priority(struct thread *td)
1110 {
1111 	int score;
1112 	int pri;
1113 
1114 	if (td->td_pri_class != PRI_TIMESHARE)
1115 		return;
1116 	/*
1117 	 * If the score is interactive we place the thread in the realtime
1118 	 * queue with a priority that is less than kernel and interrupt
1119 	 * priorities.  These threads are not subject to nice restrictions.
1120 	 *
1121 	 * Scores greater than this are placed on the normal realtime queue
1122 	 * where the priority is partially decided by the most recent cpu
1123 	 * utilization and the rest is decided by nice value.
1124 	 */
1125 	score = sched_interact_score(td);
1126 	if (score < sched_interact) {
1127 		pri = PRI_MIN_REALTIME;
1128 		pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1129 		    * score;
1130 		KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1131 		    ("sched_priority: invalid interactive priority %d score %d",
1132 		    pri, score));
1133 	} else {
1134 		pri = SCHED_PRI_MIN;
1135 		if (td->td_sched->ts_ticks)
1136 			pri += SCHED_PRI_TICKS(td->td_sched);
1137 		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1138 		if (!(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE)) {
1139 			static int once = 1;
1140 			if (once) {
1141 				printf("sched_priority: invalid priority %d",
1142 				    pri);
1143 				printf("nice %d, ticks %d ftick %d ltick %d tick pri %d\n",
1144 				    td->td_proc->p_nice,
1145 				    td->td_sched->ts_ticks,
1146 				    td->td_sched->ts_ftick,
1147 				    td->td_sched->ts_ltick,
1148 				    SCHED_PRI_TICKS(td->td_sched));
1149 				once = 0;
1150 			}
1151 			pri = min(max(pri, PRI_MIN_TIMESHARE),
1152 			    PRI_MAX_TIMESHARE);
1153 		}
1154 	}
1155 	sched_user_prio(td, pri);
1156 
1157 	return;
1158 }
1159 
1160 /*
1161  * This routine enforces a maximum limit on the amount of scheduling history
1162  * kept.  It is called after either the slptime or runtime is adjusted.
1163  */
1164 static void
1165 sched_interact_update(struct thread *td)
1166 {
1167 	struct td_sched *ts;
1168 	u_int sum;
1169 
1170 	ts = td->td_sched;
1171 	sum = ts->skg_runtime + ts->skg_slptime;
1172 	if (sum < SCHED_SLP_RUN_MAX)
1173 		return;
1174 	/*
1175 	 * This only happens from two places:
1176 	 * 1) We have added an unusual amount of run time from fork_exit.
1177 	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1178 	 */
1179 	if (sum > SCHED_SLP_RUN_MAX * 2) {
1180 		if (ts->skg_runtime > ts->skg_slptime) {
1181 			ts->skg_runtime = SCHED_SLP_RUN_MAX;
1182 			ts->skg_slptime = 1;
1183 		} else {
1184 			ts->skg_slptime = SCHED_SLP_RUN_MAX;
1185 			ts->skg_runtime = 1;
1186 		}
1187 		return;
1188 	}
1189 	/*
1190 	 * If we have exceeded by more than 1/5th then the algorithm below
1191 	 * will not bring us back into range.  Dividing by two here forces
1192 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1193 	 */
1194 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1195 		ts->skg_runtime /= 2;
1196 		ts->skg_slptime /= 2;
1197 		return;
1198 	}
1199 	ts->skg_runtime = (ts->skg_runtime / 5) * 4;
1200 	ts->skg_slptime = (ts->skg_slptime / 5) * 4;
1201 }
1202 
1203 static void
1204 sched_interact_fork(struct thread *td)
1205 {
1206 	int ratio;
1207 	int sum;
1208 
1209 	sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime;
1210 	if (sum > SCHED_SLP_RUN_FORK) {
1211 		ratio = sum / SCHED_SLP_RUN_FORK;
1212 		td->td_sched->skg_runtime /= ratio;
1213 		td->td_sched->skg_slptime /= ratio;
1214 	}
1215 }
1216 
1217 static int
1218 sched_interact_score(struct thread *td)
1219 {
1220 	int div;
1221 
1222 	if (td->td_sched->skg_runtime > td->td_sched->skg_slptime) {
1223 		div = max(1, td->td_sched->skg_runtime / SCHED_INTERACT_HALF);
1224 		return (SCHED_INTERACT_HALF +
1225 		    (SCHED_INTERACT_HALF - (td->td_sched->skg_slptime / div)));
1226 	}
1227 	if (td->td_sched->skg_slptime > td->td_sched->skg_runtime) {
1228 		div = max(1, td->td_sched->skg_slptime / SCHED_INTERACT_HALF);
1229 		return (td->td_sched->skg_runtime / div);
1230 	}
1231 	/* runtime == slptime */
1232 	if (td->td_sched->skg_runtime)
1233 		return (SCHED_INTERACT_HALF);
1234 
1235 	/*
1236 	 * This can happen if slptime and runtime are 0.
1237 	 */
1238 	return (0);
1239 
1240 }
1241 
1242 /*
1243  * Called from proc0_init() to bootstrap the scheduler.
1244  */
1245 void
1246 schedinit(void)
1247 {
1248 
1249 	/*
1250 	 * Set up the scheduler specific parts of proc0.
1251 	 */
1252 	proc0.p_sched = NULL; /* XXX */
1253 	thread0.td_sched = &td_sched0;
1254 	thread0.td_lock = &sched_lock;
1255 	td_sched0.ts_ltick = ticks;
1256 	td_sched0.ts_ftick = ticks;
1257 	td_sched0.ts_thread = &thread0;
1258 }
1259 
1260 /*
1261  * This is only somewhat accurate since given many processes of the same
1262  * priority they will switch when their slices run out, which will be
1263  * at most sched_slice stathz ticks.
1264  */
1265 int
1266 sched_rr_interval(void)
1267 {
1268 
1269 	/* Convert sched_slice to hz */
1270 	return (hz/(realstathz/sched_slice));
1271 }
1272 
1273 static void
1274 sched_pctcpu_update(struct td_sched *ts)
1275 {
1276 
1277 	if (ts->ts_ticks == 0)
1278 		return;
1279 	if (ticks - (hz / 10) < ts->ts_ltick &&
1280 	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1281 		return;
1282 	/*
1283 	 * Adjust counters and watermark for pctcpu calc.
1284 	 */
1285 	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1286 		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1287 			    SCHED_TICK_TARG;
1288 	else
1289 		ts->ts_ticks = 0;
1290 	ts->ts_ltick = ticks;
1291 	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1292 }
1293 
1294 static void
1295 sched_thread_priority(struct thread *td, u_char prio)
1296 {
1297 	struct td_sched *ts;
1298 
1299 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1300 	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1301 	    curthread->td_proc->p_comm);
1302 	ts = td->td_sched;
1303 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1304 	if (td->td_priority == prio)
1305 		return;
1306 
1307 	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1308 		/*
1309 		 * If the priority has been elevated due to priority
1310 		 * propagation, we may have to move ourselves to a new
1311 		 * queue.  This could be optimized to not re-add in some
1312 		 * cases.
1313 		 */
1314 		MPASS(td->td_lock == &sched_lock);
1315 		sched_rem(td);
1316 		td->td_priority = prio;
1317 		sched_add(td, SRQ_BORROWING|SRQ_OURSELF);
1318 	} else
1319 		td->td_priority = prio;
1320 }
1321 
1322 /*
1323  * Update a thread's priority when it is lent another thread's
1324  * priority.
1325  */
1326 void
1327 sched_lend_prio(struct thread *td, u_char prio)
1328 {
1329 
1330 	td->td_flags |= TDF_BORROWING;
1331 	sched_thread_priority(td, prio);
1332 }
1333 
1334 /*
1335  * Restore a thread's priority when priority propagation is
1336  * over.  The prio argument is the minimum priority the thread
1337  * needs to have to satisfy other possible priority lending
1338  * requests.  If the thread's regular priority is less
1339  * important than prio, the thread will keep a priority boost
1340  * of prio.
1341  */
1342 void
1343 sched_unlend_prio(struct thread *td, u_char prio)
1344 {
1345 	u_char base_pri;
1346 
1347 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1348 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1349 		base_pri = td->td_user_pri;
1350 	else
1351 		base_pri = td->td_base_pri;
1352 	if (prio >= base_pri) {
1353 		td->td_flags &= ~TDF_BORROWING;
1354 		sched_thread_priority(td, base_pri);
1355 	} else
1356 		sched_lend_prio(td, prio);
1357 }
1358 
1359 void
1360 sched_prio(struct thread *td, u_char prio)
1361 {
1362 	u_char oldprio;
1363 
1364 	/* First, update the base priority. */
1365 	td->td_base_pri = prio;
1366 
1367 	/*
1368 	 * If the thread is borrowing another thread's priority, don't
1369 	 * ever lower the priority.
1370 	 */
1371 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1372 		return;
1373 
1374 	/* Change the real priority. */
1375 	oldprio = td->td_priority;
1376 	sched_thread_priority(td, prio);
1377 
1378 	/*
1379 	 * If the thread is on a turnstile, then let the turnstile update
1380 	 * its state.
1381 	 */
1382 	if (TD_ON_LOCK(td) && oldprio != prio)
1383 		turnstile_adjust(td, oldprio);
1384 }
1385 
1386 void
1387 sched_user_prio(struct thread *td, u_char prio)
1388 {
1389 	u_char oldprio;
1390 
1391 	td->td_base_user_pri = prio;
1392 	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1393                 return;
1394 	oldprio = td->td_user_pri;
1395 	td->td_user_pri = prio;
1396 
1397 	if (TD_ON_UPILOCK(td) && oldprio != prio)
1398 		umtx_pi_adjust(td, oldprio);
1399 }
1400 
1401 void
1402 sched_lend_user_prio(struct thread *td, u_char prio)
1403 {
1404 	u_char oldprio;
1405 
1406 	td->td_flags |= TDF_UBORROWING;
1407 
1408 	oldprio = td->td_user_pri;
1409 	td->td_user_pri = prio;
1410 
1411 	if (TD_ON_UPILOCK(td) && oldprio != prio)
1412 		umtx_pi_adjust(td, oldprio);
1413 }
1414 
1415 void
1416 sched_unlend_user_prio(struct thread *td, u_char prio)
1417 {
1418 	u_char base_pri;
1419 
1420 	base_pri = td->td_base_user_pri;
1421 	if (prio >= base_pri) {
1422 		td->td_flags &= ~TDF_UBORROWING;
1423 		sched_user_prio(td, base_pri);
1424 	} else
1425 		sched_lend_user_prio(td, prio);
1426 }
1427 
1428 void
1429 sched_switch(struct thread *td, struct thread *newtd, int flags)
1430 {
1431 	struct tdq *tdq;
1432 	struct td_sched *ts;
1433 	int preempt;
1434 
1435 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1436 
1437 	preempt = flags & SW_PREEMPT;
1438 	tdq = TDQ_SELF();
1439 	ts = td->td_sched;
1440 	td->td_lastcpu = td->td_oncpu;
1441 	td->td_oncpu = NOCPU;
1442 	td->td_flags &= ~TDF_NEEDRESCHED;
1443 	td->td_owepreempt = 0;
1444 	/*
1445 	 * If the thread has been assigned it may be in the process of switching
1446 	 * to the new cpu.  This is the case in sched_bind().
1447 	 */
1448 	/*
1449 	 * Switch to the sched lock to fix things up and pick
1450 	 * a new thread.
1451 	 */
1452 	if (td->td_lock != &sched_lock) {
1453 		mtx_lock_spin(&sched_lock);
1454 		thread_unlock(td);
1455 	}
1456 	if (TD_IS_IDLETHREAD(td)) {
1457 		MPASS(td->td_lock == &sched_lock);
1458 		TD_SET_CAN_RUN(td);
1459 	} else if (TD_IS_RUNNING(td)) {
1460 		/*
1461 		 * Don't allow the thread to migrate
1462 		 * from a preemption.
1463 		 */
1464 		tdq_load_rem(tdq, ts);
1465 		if (preempt)
1466 			sched_pin_td(td);
1467 		sched_add(td, preempt ?
1468 		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1469 		    SRQ_OURSELF|SRQ_YIELDING);
1470 		if (preempt)
1471 			sched_unpin_td(td);
1472 	} else
1473 		tdq_load_rem(tdq, ts);
1474 	mtx_assert(&sched_lock, MA_OWNED);
1475 	if (newtd != NULL) {
1476 		/*
1477 		 * If we bring in a thread account for it as if it had been
1478 		 * added to the run queue and then chosen.
1479 		 */
1480 		TD_SET_RUNNING(newtd);
1481 		tdq_load_add(TDQ_SELF(), newtd->td_sched);
1482 	} else
1483 		newtd = choosethread();
1484 	if (td != newtd) {
1485 #ifdef	HWPMC_HOOKS
1486 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1487 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1488 #endif
1489 
1490 		cpu_switch(td, newtd, td->td_lock);
1491 #ifdef	HWPMC_HOOKS
1492 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1493 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1494 #endif
1495 	}
1496 	sched_lock.mtx_lock = (uintptr_t)td;
1497 	td->td_oncpu = PCPU_GET(cpuid);
1498 	MPASS(td->td_lock == &sched_lock);
1499 }
1500 
1501 void
1502 sched_nice(struct proc *p, int nice)
1503 {
1504 	struct thread *td;
1505 
1506 	PROC_LOCK_ASSERT(p, MA_OWNED);
1507 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1508 
1509 	p->p_nice = nice;
1510 	FOREACH_THREAD_IN_PROC(p, td) {
1511 		thread_lock(td);
1512 		sched_priority(td);
1513 		sched_prio(td, td->td_base_user_pri);
1514 		thread_unlock(td);
1515 	}
1516 }
1517 
1518 void
1519 sched_sleep(struct thread *td)
1520 {
1521 
1522 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1523 
1524 	td->td_sched->ts_slptime = ticks;
1525 }
1526 
1527 void
1528 sched_wakeup(struct thread *td)
1529 {
1530 	struct td_sched *ts;
1531 	int slptime;
1532 
1533 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1534 	ts = td->td_sched;
1535 	/*
1536 	 * If we slept for more than a tick update our interactivity and
1537 	 * priority.
1538 	 */
1539 	slptime = ts->ts_slptime;
1540 	ts->ts_slptime = 0;
1541 	if (slptime && slptime != ticks) {
1542 		u_int hzticks;
1543 
1544 		hzticks = (ticks - slptime) << SCHED_TICK_SHIFT;
1545 		ts->skg_slptime += hzticks;
1546 		sched_interact_update(td);
1547 		sched_pctcpu_update(ts);
1548 		sched_priority(td);
1549 	}
1550 	/* Reset the slice value after we sleep. */
1551 	ts->ts_slice = sched_slice;
1552 	sched_add(td, SRQ_BORING);
1553 }
1554 
1555 /*
1556  * Penalize the parent for creating a new child and initialize the child's
1557  * priority.
1558  */
1559 void
1560 sched_fork(struct thread *td, struct thread *child)
1561 {
1562 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1563 	sched_fork_thread(td, child);
1564 	/*
1565 	 * Penalize the parent and child for forking.
1566 	 */
1567 	sched_interact_fork(child);
1568 	sched_priority(child);
1569 	td->td_sched->skg_runtime += tickincr;
1570 	sched_interact_update(td);
1571 	sched_priority(td);
1572 }
1573 
1574 void
1575 sched_fork_thread(struct thread *td, struct thread *child)
1576 {
1577 	struct td_sched *ts;
1578 	struct td_sched *ts2;
1579 
1580 	/*
1581 	 * Initialize child.
1582 	 */
1583 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1584 	sched_newthread(child);
1585 	child->td_lock = &sched_lock;
1586 	ts = td->td_sched;
1587 	ts2 = child->td_sched;
1588 	ts2->ts_cpu = ts->ts_cpu;
1589 	ts2->ts_runq = NULL;
1590 	/*
1591 	 * Grab our parents cpu estimation information and priority.
1592 	 */
1593 	ts2->ts_ticks = ts->ts_ticks;
1594 	ts2->ts_ltick = ts->ts_ltick;
1595 	ts2->ts_ftick = ts->ts_ftick;
1596 	child->td_user_pri = td->td_user_pri;
1597 	child->td_base_user_pri = td->td_base_user_pri;
1598 	/*
1599 	 * And update interactivity score.
1600 	 */
1601 	ts2->skg_slptime = ts->skg_slptime;
1602 	ts2->skg_runtime = ts->skg_runtime;
1603 	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
1604 }
1605 
1606 void
1607 sched_class(struct thread *td, int class)
1608 {
1609 
1610 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1611 	if (td->td_pri_class == class)
1612 		return;
1613 
1614 #ifdef SMP
1615 	/*
1616 	 * On SMP if we're on the RUNQ we must adjust the transferable
1617 	 * count because could be changing to or from an interrupt
1618 	 * class.
1619 	 */
1620 	if (TD_ON_RUNQ(td)) {
1621 		struct tdq *tdq;
1622 
1623 		tdq = TDQ_CPU(td->td_sched->ts_cpu);
1624 		if (THREAD_CAN_MIGRATE(td)) {
1625 			tdq->tdq_transferable--;
1626 			tdq->tdq_group->tdg_transferable--;
1627 		}
1628 		td->td_pri_class = class;
1629 		if (THREAD_CAN_MIGRATE(td)) {
1630 			tdq->tdq_transferable++;
1631 			tdq->tdq_group->tdg_transferable++;
1632 		}
1633 	}
1634 #endif
1635 	td->td_pri_class = class;
1636 }
1637 
1638 /*
1639  * Return some of the child's priority and interactivity to the parent.
1640  */
1641 void
1642 sched_exit(struct proc *p, struct thread *child)
1643 {
1644 	struct thread *td;
1645 
1646 	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
1647 	    child, child->td_proc->p_comm, child->td_priority);
1648 
1649 	PROC_SLOCK_ASSERT(p, MA_OWNED);
1650 	td = FIRST_THREAD_IN_PROC(p);
1651 	sched_exit_thread(td, child);
1652 }
1653 
1654 void
1655 sched_exit_thread(struct thread *td, struct thread *child)
1656 {
1657 
1658 	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1659 	    child, child->td_proc->p_comm, child->td_priority);
1660 
1661 	thread_lock(child);
1662 	tdq_load_rem(TDQ_CPU(child->td_sched->ts_cpu), child->td_sched);
1663 	thread_unlock(child);
1664 #ifdef KSE
1665 	/*
1666 	 * KSE forks and exits so often that this penalty causes short-lived
1667 	 * threads to always be non-interactive.  This causes mozilla to
1668 	 * crawl under load.
1669 	 */
1670 	if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
1671 		return;
1672 #endif
1673 	/*
1674 	 * Give the child's runtime to the parent without returning the
1675 	 * sleep time as a penalty to the parent.  This causes shells that
1676 	 * launch expensive things to mark their children as expensive.
1677 	 */
1678 	thread_lock(td);
1679 	td->td_sched->skg_runtime += child->td_sched->skg_runtime;
1680 	sched_interact_update(td);
1681 	sched_priority(td);
1682 	thread_unlock(td);
1683 }
1684 
1685 void
1686 sched_userret(struct thread *td)
1687 {
1688 	/*
1689 	 * XXX we cheat slightly on the locking here to avoid locking in
1690 	 * the usual case.  Setting td_priority here is essentially an
1691 	 * incomplete workaround for not setting it properly elsewhere.
1692 	 * Now that some interrupt handlers are threads, not setting it
1693 	 * properly elsewhere can clobber it in the window between setting
1694 	 * it here and returning to user mode, so don't waste time setting
1695 	 * it perfectly here.
1696 	 */
1697 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1698 	    ("thread with borrowed priority returning to userland"));
1699 	if (td->td_priority != td->td_user_pri) {
1700 		thread_lock(td);
1701 		td->td_priority = td->td_user_pri;
1702 		td->td_base_pri = td->td_user_pri;
1703 		thread_unlock(td);
1704         }
1705 }
1706 
1707 void
1708 sched_clock(struct thread *td)
1709 {
1710 	struct tdq *tdq;
1711 	struct td_sched *ts;
1712 
1713 	mtx_assert(&sched_lock, MA_OWNED);
1714 #ifdef SMP
1715 	sched_smp_tick(td);
1716 #endif
1717 	tdq = TDQ_SELF();
1718 	/*
1719 	 * Advance the insert index once for each tick to ensure that all
1720 	 * threads get a chance to run.
1721 	 */
1722 	if (tdq->tdq_idx == tdq->tdq_ridx) {
1723 		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
1724 		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
1725 			tdq->tdq_ridx = tdq->tdq_idx;
1726 	}
1727 	ts = td->td_sched;
1728 	/*
1729 	 * We only do slicing code for TIMESHARE threads.
1730 	 */
1731 	if (td->td_pri_class != PRI_TIMESHARE)
1732 		return;
1733 	/*
1734 	 * We used a tick; charge it to the thread so that we can compute our
1735 	 * interactivity.
1736 	 */
1737 	td->td_sched->skg_runtime += tickincr;
1738 	sched_interact_update(td);
1739 	/*
1740 	 * We used up one time slice.
1741 	 */
1742 	if (--ts->ts_slice > 0)
1743 		return;
1744 	/*
1745 	 * We're out of time, recompute priorities and requeue.
1746 	 */
1747 	sched_priority(td);
1748 	td->td_flags |= TDF_NEEDRESCHED;
1749 }
1750 
1751 int
1752 sched_runnable(void)
1753 {
1754 	struct tdq *tdq;
1755 	int load;
1756 
1757 	load = 1;
1758 
1759 	tdq = TDQ_SELF();
1760 #ifdef SMP
1761 	if (tdq_busy)
1762 		goto out;
1763 #endif
1764 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1765 		if (tdq->tdq_load > 0)
1766 			goto out;
1767 	} else
1768 		if (tdq->tdq_load - 1 > 0)
1769 			goto out;
1770 	load = 0;
1771 out:
1772 	return (load);
1773 }
1774 
1775 struct thread *
1776 sched_choose(void)
1777 {
1778 	struct tdq *tdq;
1779 	struct td_sched *ts;
1780 
1781 	mtx_assert(&sched_lock, MA_OWNED);
1782 	tdq = TDQ_SELF();
1783 #ifdef SMP
1784 restart:
1785 #endif
1786 	ts = tdq_choose(tdq);
1787 	if (ts) {
1788 #ifdef SMP
1789 		if (ts->ts_thread->td_priority > PRI_MIN_IDLE)
1790 			if (tdq_idled(tdq) == 0)
1791 				goto restart;
1792 #endif
1793 		tdq_runq_rem(tdq, ts);
1794 		return (ts->ts_thread);
1795 	}
1796 #ifdef SMP
1797 	if (tdq_idled(tdq) == 0)
1798 		goto restart;
1799 #endif
1800 	return (PCPU_GET(idlethread));
1801 }
1802 
1803 static int
1804 sched_preempt(struct thread *td)
1805 {
1806 	struct thread *ctd;
1807 	int cpri;
1808 	int pri;
1809 
1810 	ctd = curthread;
1811 	pri = td->td_priority;
1812 	cpri = ctd->td_priority;
1813 	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
1814 		return (0);
1815 	/*
1816 	 * Always preempt IDLE threads.  Otherwise only if the preempting
1817 	 * thread is an ithread.
1818 	 */
1819 	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
1820 		return (0);
1821 	if (ctd->td_critnest > 1) {
1822 		CTR1(KTR_PROC, "sched_preempt: in critical section %d",
1823 		    ctd->td_critnest);
1824 		ctd->td_owepreempt = 1;
1825 		return (0);
1826 	}
1827 	/*
1828 	 * Thread is runnable but not yet put on system run queue.
1829 	 */
1830 	MPASS(TD_ON_RUNQ(td));
1831 	TD_SET_RUNNING(td);
1832 	MPASS(ctd->td_lock == &sched_lock);
1833 	MPASS(td->td_lock == &sched_lock);
1834 	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
1835 	    td->td_proc->p_pid, td->td_proc->p_comm);
1836 	/*
1837 	 * We enter the switch with two runnable threads that both have
1838 	 * the same lock.  When we return td may be sleeping so we need
1839 	 * to switch locks to make sure he's locked correctly.
1840 	 */
1841 	SCHED_STAT_INC(switch_preempt);
1842 	mi_switch(SW_INVOL|SW_PREEMPT, td);
1843 	spinlock_enter();
1844 	thread_unlock(ctd);
1845 	thread_lock(td);
1846 	spinlock_exit();
1847 
1848 	return (1);
1849 }
1850 
1851 void
1852 sched_add(struct thread *td, int flags)
1853 {
1854 	struct tdq *tdq;
1855 	struct td_sched *ts;
1856 	int preemptive;
1857 	int class;
1858 #ifdef SMP
1859 	int cpuid;
1860 	int cpumask;
1861 #endif
1862 	ts = td->td_sched;
1863 
1864 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1865 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1866 	    td, td->td_proc->p_comm, td->td_priority, curthread,
1867 	    curthread->td_proc->p_comm);
1868 	KASSERT((td->td_inhibitors == 0),
1869 	    ("sched_add: trying to run inhibited thread"));
1870 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1871 	    ("sched_add: bad thread state"));
1872 	KASSERT(td->td_proc->p_sflag & PS_INMEM,
1873 	    ("sched_add: process swapped out"));
1874 	/*
1875 	 * Now that the thread is moving to the run-queue, set the lock
1876 	 * to the scheduler's lock.
1877 	 */
1878 	if (td->td_lock != &sched_lock) {
1879 		mtx_lock_spin(&sched_lock);
1880 		thread_lock_set(td, &sched_lock);
1881 	}
1882 	mtx_assert(&sched_lock, MA_OWNED);
1883         TD_SET_RUNQ(td);
1884 	tdq = TDQ_SELF();
1885 	class = PRI_BASE(td->td_pri_class);
1886 	preemptive = !(flags & SRQ_YIELDING);
1887 	/*
1888 	 * Recalculate the priority before we select the target cpu or
1889 	 * run-queue.
1890 	 */
1891 	if (class == PRI_TIMESHARE)
1892 		sched_priority(td);
1893 	if (ts->ts_slice == 0)
1894 		ts->ts_slice = sched_slice;
1895 #ifdef SMP
1896 	cpuid = PCPU_GET(cpuid);
1897 	/*
1898 	 * Pick the destination cpu and if it isn't ours transfer to the
1899 	 * target cpu.
1900 	 */
1901 	if (THREAD_CAN_MIGRATE(td)) {
1902 		if (td->td_priority <= PRI_MAX_ITHD) {
1903 			CTR2(KTR_ULE, "ithd %d < %d",
1904 			    td->td_priority, PRI_MAX_ITHD);
1905 			ts->ts_cpu = cpuid;
1906 		} else if (pick_pri)
1907 			ts->ts_cpu = tdq_pickpri(tdq, ts, flags);
1908 		else
1909 			ts->ts_cpu = tdq_pickidle(tdq, ts);
1910 	} else
1911 		CTR1(KTR_ULE, "pinned %d", td->td_pinned);
1912 	if (ts->ts_cpu != cpuid)
1913 		preemptive = 0;
1914 	tdq = TDQ_CPU(ts->ts_cpu);
1915 	cpumask = 1 << ts->ts_cpu;
1916 	/*
1917 	 * If we had been idle, clear our bit in the group and potentially
1918 	 * the global bitmap.
1919 	 */
1920 	if ((class != PRI_IDLE && class != PRI_ITHD) &&
1921 	    (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
1922 		/*
1923 		 * Check to see if our group is unidling, and if so, remove it
1924 		 * from the global idle mask.
1925 		 */
1926 		if (tdq->tdq_group->tdg_idlemask ==
1927 		    tdq->tdq_group->tdg_cpumask)
1928 			atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
1929 		/*
1930 		 * Now remove ourselves from the group specific idle mask.
1931 		 */
1932 		tdq->tdq_group->tdg_idlemask &= ~cpumask;
1933 	}
1934 #endif
1935 	/*
1936 	 * Pick the run queue based on priority.
1937 	 */
1938 	if (td->td_priority <= PRI_MAX_REALTIME)
1939 		ts->ts_runq = &tdq->tdq_realtime;
1940 	else if (td->td_priority <= PRI_MAX_TIMESHARE)
1941 		ts->ts_runq = &tdq->tdq_timeshare;
1942 	else
1943 		ts->ts_runq = &tdq->tdq_idle;
1944 	if (preemptive && sched_preempt(td))
1945 		return;
1946 	tdq_runq_add(tdq, ts, flags);
1947 	tdq_load_add(tdq, ts);
1948 #ifdef SMP
1949 	if (ts->ts_cpu != cpuid) {
1950 		tdq_notify(ts);
1951 		return;
1952 	}
1953 #endif
1954 	if (td->td_priority < curthread->td_priority)
1955 		curthread->td_flags |= TDF_NEEDRESCHED;
1956 }
1957 
1958 void
1959 sched_rem(struct thread *td)
1960 {
1961 	struct tdq *tdq;
1962 	struct td_sched *ts;
1963 
1964 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1965 	    td, td->td_proc->p_comm, td->td_priority, curthread,
1966 	    curthread->td_proc->p_comm);
1967 	THREAD_LOCK_ASSERT(td, MA_OWNED);
1968 	ts = td->td_sched;
1969 	KASSERT(TD_ON_RUNQ(td),
1970 	    ("sched_rem: thread not on run queue"));
1971 
1972 	tdq = TDQ_CPU(ts->ts_cpu);
1973 	tdq_runq_rem(tdq, ts);
1974 	tdq_load_rem(tdq, ts);
1975 	TD_SET_CAN_RUN(td);
1976 }
1977 
1978 fixpt_t
1979 sched_pctcpu(struct thread *td)
1980 {
1981 	fixpt_t pctcpu;
1982 	struct td_sched *ts;
1983 
1984 	pctcpu = 0;
1985 	ts = td->td_sched;
1986 	if (ts == NULL)
1987 		return (0);
1988 
1989 	thread_lock(td);
1990 	if (ts->ts_ticks) {
1991 		int rtick;
1992 
1993 		sched_pctcpu_update(ts);
1994 		/* How many rtick per second ? */
1995 		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
1996 		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
1997 	}
1998 	td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
1999 	thread_unlock(td);
2000 
2001 	return (pctcpu);
2002 }
2003 
2004 void
2005 sched_bind(struct thread *td, int cpu)
2006 {
2007 	struct td_sched *ts;
2008 
2009 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2010 	ts = td->td_sched;
2011 	if (ts->ts_flags & TSF_BOUND)
2012 		sched_unbind(td);
2013 	ts->ts_flags |= TSF_BOUND;
2014 #ifdef SMP
2015 	sched_pin();
2016 	if (PCPU_GET(cpuid) == cpu)
2017 		return;
2018 	ts->ts_cpu = cpu;
2019 	/* When we return from mi_switch we'll be on the correct cpu. */
2020 	mi_switch(SW_VOL, NULL);
2021 #endif
2022 }
2023 
2024 void
2025 sched_unbind(struct thread *td)
2026 {
2027 	struct td_sched *ts;
2028 
2029 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2030 	ts = td->td_sched;
2031 	if ((ts->ts_flags & TSF_BOUND) == 0)
2032 		return;
2033 	ts->ts_flags &= ~TSF_BOUND;
2034 #ifdef SMP
2035 	sched_unpin();
2036 #endif
2037 }
2038 
2039 int
2040 sched_is_bound(struct thread *td)
2041 {
2042 	THREAD_LOCK_ASSERT(td, MA_OWNED);
2043 	return (td->td_sched->ts_flags & TSF_BOUND);
2044 }
2045 
2046 void
2047 sched_relinquish(struct thread *td)
2048 {
2049 	thread_lock(td);
2050 	if (td->td_pri_class == PRI_TIMESHARE)
2051 		sched_prio(td, PRI_MAX_TIMESHARE);
2052 	SCHED_STAT_INC(switch_relinquish);
2053 	mi_switch(SW_VOL, NULL);
2054 	thread_unlock(td);
2055 }
2056 
2057 int
2058 sched_load(void)
2059 {
2060 #ifdef SMP
2061 	int total;
2062 	int i;
2063 
2064 	total = 0;
2065 	for (i = 0; i <= tdg_maxid; i++)
2066 		total += TDQ_GROUP(i)->tdg_load;
2067 	return (total);
2068 #else
2069 	return (TDQ_SELF()->tdq_sysload);
2070 #endif
2071 }
2072 
2073 int
2074 sched_sizeof_proc(void)
2075 {
2076 	return (sizeof(struct proc));
2077 }
2078 
2079 int
2080 sched_sizeof_thread(void)
2081 {
2082 	return (sizeof(struct thread) + sizeof(struct td_sched));
2083 }
2084 
2085 void
2086 sched_tick(void)
2087 {
2088 	struct td_sched *ts;
2089 
2090 	ts = curthread->td_sched;
2091 	/* Adjust ticks for pctcpu */
2092 	ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2093 	ts->ts_ltick = ticks;
2094 	/*
2095 	 * Update if we've exceeded our desired tick threshhold by over one
2096 	 * second.
2097 	 */
2098 	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2099 		sched_pctcpu_update(ts);
2100 }
2101 
2102 /*
2103  * The actual idle process.
2104  */
2105 void
2106 sched_idletd(void *dummy)
2107 {
2108 	struct proc *p;
2109 	struct thread *td;
2110 
2111 	td = curthread;
2112 	p = td->td_proc;
2113 	mtx_assert(&Giant, MA_NOTOWNED);
2114 	/* ULE Relies on preemption for idle interruption. */
2115 	for (;;)
2116 		cpu_idle();
2117 }
2118 
2119 /*
2120  * A CPU is entering for the first time or a thread is exiting.
2121  */
2122 void
2123 sched_throw(struct thread *td)
2124 {
2125 	/*
2126 	 * Correct spinlock nesting.  The idle thread context that we are
2127 	 * borrowing was created so that it would start out with a single
2128 	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
2129 	 * explicitly acquired locks in this function, the nesting count
2130 	 * is now 2 rather than 1.  Since we are nested, calling
2131 	 * spinlock_exit() will simply adjust the counts without allowing
2132 	 * spin lock using code to interrupt us.
2133 	 */
2134 	if (td == NULL) {
2135 		mtx_lock_spin(&sched_lock);
2136 		spinlock_exit();
2137 	} else {
2138 		MPASS(td->td_lock == &sched_lock);
2139 	}
2140 	mtx_assert(&sched_lock, MA_OWNED);
2141 	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2142 	PCPU_SET(switchtime, cpu_ticks());
2143 	PCPU_SET(switchticks, ticks);
2144 	cpu_throw(td, choosethread());	/* doesn't return */
2145 }
2146 
2147 void
2148 sched_fork_exit(struct thread *ctd)
2149 {
2150 	struct thread *td;
2151 
2152 	/*
2153 	 * Finish setting up thread glue so that it begins execution in a
2154 	 * non-nested critical section with sched_lock held but not recursed.
2155 	 */
2156 	ctd->td_oncpu = PCPU_GET(cpuid);
2157 	sched_lock.mtx_lock = (uintptr_t)ctd;
2158 	THREAD_LOCK_ASSERT(ctd, MA_OWNED | MA_NOTRECURSED);
2159 	/*
2160 	 * Processes normally resume in mi_switch() after being
2161 	 * cpu_switch()'ed to, but when children start up they arrive here
2162 	 * instead, so we must do much the same things as mi_switch() would.
2163 	 */
2164 	if ((td = PCPU_GET(deadthread))) {
2165 		PCPU_SET(deadthread, NULL);
2166 		thread_stash(td);
2167 	}
2168 	thread_unlock(ctd);
2169 }
2170 
2171 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
2172 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
2173     "Scheduler name");
2174 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "");
2175 SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, "");
2176 SYSCTL_INT(_kern_sched, OID_AUTO, tickincr, CTLFLAG_RD, &tickincr, 0, "");
2177 SYSCTL_INT(_kern_sched, OID_AUTO, realstathz, CTLFLAG_RD, &realstathz, 0, "");
2178 #ifdef SMP
2179 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0, "");
2180 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_affinity, CTLFLAG_RW,
2181     &affinity, 0, "");
2182 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_tryself, CTLFLAG_RW,
2183     &tryself, 0, "");
2184 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_tryselfidle, CTLFLAG_RW,
2185     &tryselfidle, 0, "");
2186 SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, "");
2187 SYSCTL_INT(_kern_sched, OID_AUTO, ipi_preempt, CTLFLAG_RW, &ipi_preempt, 0, "");
2188 SYSCTL_INT(_kern_sched, OID_AUTO, ipi_ast, CTLFLAG_RW, &ipi_ast, 0, "");
2189 SYSCTL_INT(_kern_sched, OID_AUTO, ipi_thresh, CTLFLAG_RW, &ipi_thresh, 0, "");
2190 SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, "");
2191 SYSCTL_INT(_kern_sched, OID_AUTO, steal_busy, CTLFLAG_RW, &steal_busy, 0, "");
2192 SYSCTL_INT(_kern_sched, OID_AUTO, busy_thresh, CTLFLAG_RW, &busy_thresh, 0, "");
2193 SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0, "");
2194 #endif
2195 
2196 /* ps compat */
2197 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
2198 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2199 
2200 
2201 #define KERN_SWITCH_INCLUDE 1
2202 #include "kern/kern_switch.c"
2203