xref: /freebsd/sys/kern/sched_ule.c (revision 5203edcdc553fda6caa1da8826a89b1a02dad1bf)
1 /*-
2  * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/resource.h>
38 #include <sys/resourcevar.h>
39 #include <sys/sched.h>
40 #include <sys/smp.h>
41 #include <sys/sx.h>
42 #include <sys/sysctl.h>
43 #include <sys/sysproto.h>
44 #include <sys/vmmeter.h>
45 #ifdef DDB
46 #include <ddb/ddb.h>
47 #endif
48 #ifdef KTRACE
49 #include <sys/uio.h>
50 #include <sys/ktrace.h>
51 #endif
52 
53 #include <machine/cpu.h>
54 #include <machine/smp.h>
55 
56 #define KTR_ULE         KTR_NFS
57 
58 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
59 /* XXX This is bogus compatability crap for ps */
60 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
61 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
62 
63 static void sched_setup(void *dummy);
64 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
65 
66 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
67 
68 #define ULE_NAME	"ule"
69 #define ULE_NAME_LEN	3
70 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, ULE_NAME, ULE_NAME_LEN,
71 	      "System is using the ULE scheduler");
72 
73 static int slice_min = 1;
74 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
75 
76 static int slice_max = 10;
77 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
78 
79 int realstathz;
80 int tickincr = 1;
81 
82 /*
83  * These datastructures are allocated within their parent datastructure but
84  * are scheduler specific.
85  */
86 
87 struct ke_sched {
88 	int		ske_slice;
89 	struct runq	*ske_runq;
90 	/* The following variables are only used for pctcpu calculation */
91 	int		ske_ltick;	/* Last tick that we were running on */
92 	int		ske_ftick;	/* First tick that we were running on */
93 	int		ske_ticks;	/* Tick count */
94 	/* CPU that we have affinity for. */
95 	u_char		ske_cpu;
96 };
97 #define	ke_slice	ke_sched->ske_slice
98 #define	ke_runq		ke_sched->ske_runq
99 #define	ke_ltick	ke_sched->ske_ltick
100 #define	ke_ftick	ke_sched->ske_ftick
101 #define	ke_ticks	ke_sched->ske_ticks
102 #define	ke_cpu		ke_sched->ske_cpu
103 #define	ke_assign	ke_procq.tqe_next
104 
105 #define	KEF_ASSIGNED	KEF_SCHED0	/* KSE is being migrated. */
106 #define	KEF_BOUND	KEF_SCHED1	/* KSE can not migrate. */
107 
108 struct kg_sched {
109 	int	skg_slptime;		/* Number of ticks we vol. slept */
110 	int	skg_runtime;		/* Number of ticks we were running */
111 };
112 #define	kg_slptime	kg_sched->skg_slptime
113 #define	kg_runtime	kg_sched->skg_runtime
114 
115 struct td_sched {
116 	int	std_slptime;
117 };
118 #define	td_slptime	td_sched->std_slptime
119 
120 struct td_sched td_sched;
121 struct ke_sched ke_sched;
122 struct kg_sched kg_sched;
123 
124 struct ke_sched *kse0_sched = &ke_sched;
125 struct kg_sched *ksegrp0_sched = &kg_sched;
126 struct p_sched *proc0_sched = NULL;
127 struct td_sched *thread0_sched = &td_sched;
128 
129 /*
130  * The priority is primarily determined by the interactivity score.  Thus, we
131  * give lower(better) priorities to kse groups that use less CPU.  The nice
132  * value is then directly added to this to allow nice to have some effect
133  * on latency.
134  *
135  * PRI_RANGE:	Total priority range for timeshare threads.
136  * PRI_NRESV:	Number of nice values.
137  * PRI_BASE:	The start of the dynamic range.
138  */
139 #define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
140 #define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
141 #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
142 #define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
143 #define	SCHED_PRI_INTERACT(score)					\
144     ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
145 
146 /*
147  * These determine the interactivity of a process.
148  *
149  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
150  *		before throttling back.
151  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
152  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
153  * INTERACT_THRESH:	Threshhold for placement on the current runq.
154  */
155 #define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
156 #define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
157 #define	SCHED_INTERACT_MAX	(100)
158 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
159 #define	SCHED_INTERACT_THRESH	(30)
160 
161 /*
162  * These parameters and macros determine the size of the time slice that is
163  * granted to each thread.
164  *
165  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
166  * SLICE_MAX:	Maximum time slice granted.
167  * SLICE_RANGE:	Range of available time slices scaled by hz.
168  * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
169  * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
170  * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
171  */
172 #define	SCHED_SLICE_MIN			(slice_min)
173 #define	SCHED_SLICE_MAX			(slice_max)
174 #define	SCHED_SLICE_INTERACTIVE		(slice_max)
175 #define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
176 #define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
177 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
178 #define	SCHED_SLICE_NICE(nice)						\
179     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
180 
181 /*
182  * This macro determines whether or not the kse belongs on the current or
183  * next run queue.
184  */
185 #define	SCHED_INTERACTIVE(kg)						\
186     (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
187 #define	SCHED_CURR(kg, ke)						\
188     (ke->ke_thread->td_priority < kg->kg_user_pri ||			\
189     SCHED_INTERACTIVE(kg))
190 
191 /*
192  * Cpu percentage computation macros and defines.
193  *
194  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
195  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
196  */
197 
198 #define	SCHED_CPU_TIME	10
199 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
200 
201 /*
202  * kseq - per processor runqs and statistics.
203  */
204 struct kseq {
205 	struct runq	ksq_idle;		/* Queue of IDLE threads. */
206 	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
207 	struct runq	*ksq_next;		/* Next timeshare queue. */
208 	struct runq	*ksq_curr;		/* Current queue. */
209 	int		ksq_load_timeshare;	/* Load for timeshare. */
210 	int		ksq_load;		/* Aggregate load. */
211 	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
212 	short		ksq_nicemin;		/* Least nice. */
213 #ifdef SMP
214 	int			ksq_transferable;
215 	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
216 	struct kseq_group	*ksq_group;	/* Our processor group. */
217 	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
218 #else
219 	int		ksq_sysload;		/* For loadavg, !ITHD load. */
220 #endif
221 };
222 
223 #ifdef SMP
224 /*
225  * kseq groups are groups of processors which can cheaply share threads.  When
226  * one processor in the group goes idle it will check the runqs of the other
227  * processors in its group prior to halting and waiting for an interrupt.
228  * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
229  * In a numa environment we'd want an idle bitmap per group and a two tiered
230  * load balancer.
231  */
232 struct kseq_group {
233 	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
234 	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
235 	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
236 	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
237 	int	ksg_load;		/* Total load of this group. */
238 	int	ksg_transferable;	/* Transferable load of this group. */
239 	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
240 };
241 #endif
242 
243 /*
244  * One kse queue per processor.
245  */
246 #ifdef SMP
247 static cpumask_t kseq_idle;
248 static int ksg_maxid;
249 static struct kseq	kseq_cpu[MAXCPU];
250 static struct kseq_group kseq_groups[MAXCPU];
251 static int bal_tick;
252 static int gbal_tick;
253 
254 #define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
255 #define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
256 #define	KSEQ_ID(x)	((x) - kseq_cpu)
257 #define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
258 #else	/* !SMP */
259 static struct kseq	kseq_cpu;
260 
261 #define	KSEQ_SELF()	(&kseq_cpu)
262 #define	KSEQ_CPU(x)	(&kseq_cpu)
263 #endif
264 
265 static void sched_slice(struct kse *ke);
266 static void sched_priority(struct ksegrp *kg);
267 static int sched_interact_score(struct ksegrp *kg);
268 static void sched_interact_update(struct ksegrp *kg);
269 static void sched_interact_fork(struct ksegrp *kg);
270 static void sched_pctcpu_update(struct kse *ke);
271 
272 /* Operations on per processor queues */
273 static struct kse * kseq_choose(struct kseq *kseq);
274 static void kseq_setup(struct kseq *kseq);
275 static void kseq_load_add(struct kseq *kseq, struct kse *ke);
276 static void kseq_load_rem(struct kseq *kseq, struct kse *ke);
277 static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke);
278 static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke);
279 static void kseq_nice_add(struct kseq *kseq, int nice);
280 static void kseq_nice_rem(struct kseq *kseq, int nice);
281 void kseq_print(int cpu);
282 #ifdef SMP
283 static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class);
284 static struct kse *runq_steal(struct runq *rq);
285 static void sched_balance(void);
286 static void sched_balance_groups(void);
287 static void sched_balance_group(struct kseq_group *ksg);
288 static void sched_balance_pair(struct kseq *high, struct kseq *low);
289 static void kseq_move(struct kseq *from, int cpu);
290 static int kseq_idled(struct kseq *kseq);
291 static void kseq_notify(struct kse *ke, int cpu);
292 static void kseq_assign(struct kseq *);
293 static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
294 /*
295  * On P4 Xeons the round-robin interrupt delivery is broken.  As a result of
296  * this, we can't pin interrupts to the cpu that they were delivered to,
297  * otherwise all ithreads only run on CPU 0.
298  */
299 #ifdef __i386__
300 #define	KSE_CAN_MIGRATE(ke, class)					\
301     ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
302 #else /* !__i386__ */
303 #define	KSE_CAN_MIGRATE(ke, class)					\
304     ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 &&		\
305     ((ke)->ke_flags & KEF_BOUND) == 0)
306 #endif /* !__i386__ */
307 #endif
308 
309 void
310 kseq_print(int cpu)
311 {
312 	struct kseq *kseq;
313 	int i;
314 
315 	kseq = KSEQ_CPU(cpu);
316 
317 	printf("kseq:\n");
318 	printf("\tload:           %d\n", kseq->ksq_load);
319 	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
320 #ifdef SMP
321 	printf("\tload transferable: %d\n", kseq->ksq_transferable);
322 #endif
323 	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
324 	printf("\tnice counts:\n");
325 	for (i = 0; i < SCHED_PRI_NRESV; i++)
326 		if (kseq->ksq_nice[i])
327 			printf("\t\t%d = %d\n",
328 			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
329 }
330 
331 static __inline void
332 kseq_runq_add(struct kseq *kseq, struct kse *ke)
333 {
334 #ifdef SMP
335 	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
336 		kseq->ksq_transferable++;
337 		kseq->ksq_group->ksg_transferable++;
338 	}
339 #endif
340 	runq_add(ke->ke_runq, ke);
341 }
342 
343 static __inline void
344 kseq_runq_rem(struct kseq *kseq, struct kse *ke)
345 {
346 #ifdef SMP
347 	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
348 		kseq->ksq_transferable--;
349 		kseq->ksq_group->ksg_transferable--;
350 	}
351 #endif
352 	runq_remove(ke->ke_runq, ke);
353 }
354 
355 static void
356 kseq_load_add(struct kseq *kseq, struct kse *ke)
357 {
358 	int class;
359 	mtx_assert(&sched_lock, MA_OWNED);
360 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
361 	if (class == PRI_TIMESHARE)
362 		kseq->ksq_load_timeshare++;
363 	kseq->ksq_load++;
364 	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
365 #ifdef SMP
366 		kseq->ksq_group->ksg_load++;
367 #else
368 		kseq->ksq_sysload++;
369 #endif
370 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
371 		CTR6(KTR_ULE,
372 		    "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
373 		    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
374 		    ke->ke_proc->p_nice, kseq->ksq_nicemin);
375 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
376 		kseq_nice_add(kseq, ke->ke_proc->p_nice);
377 }
378 
379 static void
380 kseq_load_rem(struct kseq *kseq, struct kse *ke)
381 {
382 	int class;
383 	mtx_assert(&sched_lock, MA_OWNED);
384 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
385 	if (class == PRI_TIMESHARE)
386 		kseq->ksq_load_timeshare--;
387 	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
388 #ifdef SMP
389 		kseq->ksq_group->ksg_load--;
390 #else
391 		kseq->ksq_sysload--;
392 #endif
393 	kseq->ksq_load--;
394 	ke->ke_runq = NULL;
395 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
396 		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
397 }
398 
399 static void
400 kseq_nice_add(struct kseq *kseq, int nice)
401 {
402 	mtx_assert(&sched_lock, MA_OWNED);
403 	/* Normalize to zero. */
404 	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
405 	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
406 		kseq->ksq_nicemin = nice;
407 }
408 
409 static void
410 kseq_nice_rem(struct kseq *kseq, int nice)
411 {
412 	int n;
413 
414 	mtx_assert(&sched_lock, MA_OWNED);
415 	/* Normalize to zero. */
416 	n = nice + SCHED_PRI_NHALF;
417 	kseq->ksq_nice[n]--;
418 	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
419 
420 	/*
421 	 * If this wasn't the smallest nice value or there are more in
422 	 * this bucket we can just return.  Otherwise we have to recalculate
423 	 * the smallest nice.
424 	 */
425 	if (nice != kseq->ksq_nicemin ||
426 	    kseq->ksq_nice[n] != 0 ||
427 	    kseq->ksq_load_timeshare == 0)
428 		return;
429 
430 	for (; n < SCHED_PRI_NRESV; n++)
431 		if (kseq->ksq_nice[n]) {
432 			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
433 			return;
434 		}
435 }
436 
437 #ifdef SMP
438 /*
439  * sched_balance is a simple CPU load balancing algorithm.  It operates by
440  * finding the least loaded and most loaded cpu and equalizing their load
441  * by migrating some processes.
442  *
443  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
444  * installations will only have 2 cpus.  Secondly, load balancing too much at
445  * once can have an unpleasant effect on the system.  The scheduler rarely has
446  * enough information to make perfect decisions.  So this algorithm chooses
447  * algorithm simplicity and more gradual effects on load in larger systems.
448  *
449  * It could be improved by considering the priorities and slices assigned to
450  * each task prior to balancing them.  There are many pathological cases with
451  * any approach and so the semi random algorithm below may work as well as any.
452  *
453  */
454 static void
455 sched_balance(void)
456 {
457 	struct kseq_group *high;
458 	struct kseq_group *low;
459 	struct kseq_group *ksg;
460 	int cnt;
461 	int i;
462 
463 	if (smp_started == 0)
464 		goto out;
465 	low = high = NULL;
466 	i = random() % (ksg_maxid + 1);
467 	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
468 		ksg = KSEQ_GROUP(i);
469 		/*
470 		 * Find the CPU with the highest load that has some
471 		 * threads to transfer.
472 		 */
473 		if ((high == NULL || ksg->ksg_load > high->ksg_load)
474 		    && ksg->ksg_transferable)
475 			high = ksg;
476 		if (low == NULL || ksg->ksg_load < low->ksg_load)
477 			low = ksg;
478 		if (++i > ksg_maxid)
479 			i = 0;
480 	}
481 	if (low != NULL && high != NULL && high != low)
482 		sched_balance_pair(LIST_FIRST(&high->ksg_members),
483 		    LIST_FIRST(&low->ksg_members));
484 out:
485 	bal_tick = ticks + (random() % (hz * 2));
486 }
487 
488 static void
489 sched_balance_groups(void)
490 {
491 	int i;
492 
493 	mtx_assert(&sched_lock, MA_OWNED);
494 	if (smp_started)
495 		for (i = 0; i <= ksg_maxid; i++)
496 			sched_balance_group(KSEQ_GROUP(i));
497 	gbal_tick = ticks + (random() % (hz * 2));
498 }
499 
500 static void
501 sched_balance_group(struct kseq_group *ksg)
502 {
503 	struct kseq *kseq;
504 	struct kseq *high;
505 	struct kseq *low;
506 	int load;
507 
508 	if (ksg->ksg_transferable == 0)
509 		return;
510 	low = NULL;
511 	high = NULL;
512 	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
513 		load = kseq->ksq_load;
514 		if (high == NULL || load > high->ksq_load)
515 			high = kseq;
516 		if (low == NULL || load < low->ksq_load)
517 			low = kseq;
518 	}
519 	if (high != NULL && low != NULL && high != low)
520 		sched_balance_pair(high, low);
521 }
522 
523 static void
524 sched_balance_pair(struct kseq *high, struct kseq *low)
525 {
526 	int transferable;
527 	int high_load;
528 	int low_load;
529 	int move;
530 	int diff;
531 	int i;
532 
533 	/*
534 	 * If we're transfering within a group we have to use this specific
535 	 * kseq's transferable count, otherwise we can steal from other members
536 	 * of the group.
537 	 */
538 	if (high->ksq_group == low->ksq_group) {
539 		transferable = high->ksq_transferable;
540 		high_load = high->ksq_load;
541 		low_load = low->ksq_load;
542 	} else {
543 		transferable = high->ksq_group->ksg_transferable;
544 		high_load = high->ksq_group->ksg_load;
545 		low_load = low->ksq_group->ksg_load;
546 	}
547 	if (transferable == 0)
548 		return;
549 	/*
550 	 * Determine what the imbalance is and then adjust that to how many
551 	 * kses we actually have to give up (transferable).
552 	 */
553 	diff = high_load - low_load;
554 	move = diff / 2;
555 	if (diff & 0x1)
556 		move++;
557 	move = min(move, transferable);
558 	for (i = 0; i < move; i++)
559 		kseq_move(high, KSEQ_ID(low));
560 	return;
561 }
562 
563 static void
564 kseq_move(struct kseq *from, int cpu)
565 {
566 	struct kseq *kseq;
567 	struct kseq *to;
568 	struct kse *ke;
569 
570 	kseq = from;
571 	to = KSEQ_CPU(cpu);
572 	ke = kseq_steal(kseq, 1);
573 	if (ke == NULL) {
574 		struct kseq_group *ksg;
575 
576 		ksg = kseq->ksq_group;
577 		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
578 			if (kseq == from || kseq->ksq_transferable == 0)
579 				continue;
580 			ke = kseq_steal(kseq, 1);
581 			break;
582 		}
583 		if (ke == NULL)
584 			panic("kseq_move: No KSEs available with a "
585 			    "transferable count of %d\n",
586 			    ksg->ksg_transferable);
587 	}
588 	if (kseq == to)
589 		return;
590 	ke->ke_state = KES_THREAD;
591 	kseq_runq_rem(kseq, ke);
592 	kseq_load_rem(kseq, ke);
593 	kseq_notify(ke, cpu);
594 }
595 
596 static int
597 kseq_idled(struct kseq *kseq)
598 {
599 	struct kseq_group *ksg;
600 	struct kseq *steal;
601 	struct kse *ke;
602 
603 	ksg = kseq->ksq_group;
604 	/*
605 	 * If we're in a cpu group, try and steal kses from another cpu in
606 	 * the group before idling.
607 	 */
608 	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
609 		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
610 			if (steal == kseq || steal->ksq_transferable == 0)
611 				continue;
612 			ke = kseq_steal(steal, 0);
613 			if (ke == NULL)
614 				continue;
615 			ke->ke_state = KES_THREAD;
616 			kseq_runq_rem(steal, ke);
617 			kseq_load_rem(steal, ke);
618 			ke->ke_cpu = PCPU_GET(cpuid);
619 			sched_add(ke->ke_thread);
620 			return (0);
621 		}
622 	}
623 	/*
624 	 * We only set the idled bit when all of the cpus in the group are
625 	 * idle.  Otherwise we could get into a situation where a KSE bounces
626 	 * back and forth between two idle cores on seperate physical CPUs.
627 	 */
628 	ksg->ksg_idlemask |= PCPU_GET(cpumask);
629 	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
630 		return (1);
631 	atomic_set_int(&kseq_idle, ksg->ksg_mask);
632 	return (1);
633 }
634 
635 static void
636 kseq_assign(struct kseq *kseq)
637 {
638 	struct kse *nke;
639 	struct kse *ke;
640 
641 	do {
642 		(volatile struct kse *)ke = kseq->ksq_assigned;
643 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
644 	for (; ke != NULL; ke = nke) {
645 		nke = ke->ke_assign;
646 		ke->ke_flags &= ~KEF_ASSIGNED;
647 		sched_add(ke->ke_thread);
648 	}
649 }
650 
651 static void
652 kseq_notify(struct kse *ke, int cpu)
653 {
654 	struct kseq *kseq;
655 	struct thread *td;
656 	struct pcpu *pcpu;
657 
658 	ke->ke_cpu = cpu;
659 	ke->ke_flags |= KEF_ASSIGNED;
660 
661 	kseq = KSEQ_CPU(cpu);
662 
663 	/*
664 	 * Place a KSE on another cpu's queue and force a resched.
665 	 */
666 	do {
667 		(volatile struct kse *)ke->ke_assign = kseq->ksq_assigned;
668 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
669 	pcpu = pcpu_find(cpu);
670 	td = pcpu->pc_curthread;
671 	if (ke->ke_thread->td_priority < td->td_priority ||
672 	    td == pcpu->pc_idlethread) {
673 		td->td_flags |= TDF_NEEDRESCHED;
674 		ipi_selected(1 << cpu, IPI_AST);
675 	}
676 }
677 
678 static struct kse *
679 runq_steal(struct runq *rq)
680 {
681 	struct rqhead *rqh;
682 	struct rqbits *rqb;
683 	struct kse *ke;
684 	int word;
685 	int bit;
686 
687 	mtx_assert(&sched_lock, MA_OWNED);
688 	rqb = &rq->rq_status;
689 	for (word = 0; word < RQB_LEN; word++) {
690 		if (rqb->rqb_bits[word] == 0)
691 			continue;
692 		for (bit = 0; bit < RQB_BPW; bit++) {
693 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
694 				continue;
695 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
696 			TAILQ_FOREACH(ke, rqh, ke_procq) {
697 				if (KSE_CAN_MIGRATE(ke,
698 				    PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
699 					return (ke);
700 			}
701 		}
702 	}
703 	return (NULL);
704 }
705 
706 static struct kse *
707 kseq_steal(struct kseq *kseq, int stealidle)
708 {
709 	struct kse *ke;
710 
711 	/*
712 	 * Steal from next first to try to get a non-interactive task that
713 	 * may not have run for a while.
714 	 */
715 	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
716 		return (ke);
717 	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
718 		return (ke);
719 	if (stealidle)
720 		return (runq_steal(&kseq->ksq_idle));
721 	return (NULL);
722 }
723 
724 int
725 kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
726 {
727 	struct kseq_group *ksg;
728 	int cpu;
729 
730 	if (smp_started == 0)
731 		return (0);
732 	cpu = 0;
733 	ksg = kseq->ksq_group;
734 
735 	/*
736 	 * If there are any idle groups, give them our extra load.  The
737 	 * threshold at which we start to reassign kses has a large impact
738 	 * on the overall performance of the system.  Tuned too high and
739 	 * some CPUs may idle.  Too low and there will be excess migration
740 	 * and context switches.
741 	 */
742 	if (ksg->ksg_load > (ksg->ksg_cpus * 2) && kseq_idle) {
743 		/*
744 		 * Multiple cpus could find this bit simultaneously
745 		 * but the race shouldn't be terrible.
746 		 */
747 		cpu = ffs(kseq_idle);
748 		if (cpu)
749 			atomic_clear_int(&kseq_idle, 1 << (cpu - 1));
750 	}
751 	/*
752 	 * If another cpu in this group has idled, assign a thread over
753 	 * to them after checking to see if there are idled groups.
754 	 */
755 	if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) {
756 		cpu = ffs(ksg->ksg_idlemask);
757 		if (cpu)
758 			ksg->ksg_idlemask &= ~(1 << (cpu - 1));
759 	}
760 	/*
761 	 * Now that we've found an idle CPU, migrate the thread.
762 	 */
763 	if (cpu) {
764 		cpu--;
765 		ke->ke_runq = NULL;
766 		kseq_notify(ke, cpu);
767 		return (1);
768 	}
769 	return (0);
770 }
771 
772 #endif	/* SMP */
773 
774 /*
775  * Pick the highest priority task we have and return it.
776  */
777 
778 static struct kse *
779 kseq_choose(struct kseq *kseq)
780 {
781 	struct kse *ke;
782 	struct runq *swap;
783 
784 	mtx_assert(&sched_lock, MA_OWNED);
785 	swap = NULL;
786 
787 	for (;;) {
788 		ke = runq_choose(kseq->ksq_curr);
789 		if (ke == NULL) {
790 			/*
791 			 * We already swapped once and didn't get anywhere.
792 			 */
793 			if (swap)
794 				break;
795 			swap = kseq->ksq_curr;
796 			kseq->ksq_curr = kseq->ksq_next;
797 			kseq->ksq_next = swap;
798 			continue;
799 		}
800 		/*
801 		 * If we encounter a slice of 0 the kse is in a
802 		 * TIMESHARE kse group and its nice was too far out
803 		 * of the range that receives slices.
804 		 */
805 		if (ke->ke_slice == 0) {
806 			runq_remove(ke->ke_runq, ke);
807 			sched_slice(ke);
808 			ke->ke_runq = kseq->ksq_next;
809 			runq_add(ke->ke_runq, ke);
810 			continue;
811 		}
812 		return (ke);
813 	}
814 
815 	return (runq_choose(&kseq->ksq_idle));
816 }
817 
818 static void
819 kseq_setup(struct kseq *kseq)
820 {
821 	runq_init(&kseq->ksq_timeshare[0]);
822 	runq_init(&kseq->ksq_timeshare[1]);
823 	runq_init(&kseq->ksq_idle);
824 	kseq->ksq_curr = &kseq->ksq_timeshare[0];
825 	kseq->ksq_next = &kseq->ksq_timeshare[1];
826 	kseq->ksq_load = 0;
827 	kseq->ksq_load_timeshare = 0;
828 }
829 
830 static void
831 sched_setup(void *dummy)
832 {
833 #ifdef SMP
834 	int balance_groups;
835 	int i;
836 #endif
837 
838 	slice_min = (hz/100);	/* 10ms */
839 	slice_max = (hz/7);	/* ~140ms */
840 
841 #ifdef SMP
842 	balance_groups = 0;
843 	/*
844 	 * Initialize the kseqs.
845 	 */
846 	for (i = 0; i < MAXCPU; i++) {
847 		struct kseq *ksq;
848 
849 		ksq = &kseq_cpu[i];
850 		ksq->ksq_assigned = NULL;
851 		kseq_setup(&kseq_cpu[i]);
852 	}
853 	if (smp_topology == NULL) {
854 		struct kseq_group *ksg;
855 		struct kseq *ksq;
856 
857 		for (i = 0; i < MAXCPU; i++) {
858 			ksq = &kseq_cpu[i];
859 			ksg = &kseq_groups[i];
860 			/*
861 			 * Setup a kseq group with one member.
862 			 */
863 			ksq->ksq_transferable = 0;
864 			ksq->ksq_group = ksg;
865 			ksg->ksg_cpus = 1;
866 			ksg->ksg_idlemask = 0;
867 			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
868 			ksg->ksg_load = 0;
869 			ksg->ksg_transferable = 0;
870 			LIST_INIT(&ksg->ksg_members);
871 			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
872 		}
873 	} else {
874 		struct kseq_group *ksg;
875 		struct cpu_group *cg;
876 		int j;
877 
878 		for (i = 0; i < smp_topology->ct_count; i++) {
879 			cg = &smp_topology->ct_group[i];
880 			ksg = &kseq_groups[i];
881 			/*
882 			 * Initialize the group.
883 			 */
884 			ksg->ksg_idlemask = 0;
885 			ksg->ksg_load = 0;
886 			ksg->ksg_transferable = 0;
887 			ksg->ksg_cpus = cg->cg_count;
888 			ksg->ksg_cpumask = cg->cg_mask;
889 			LIST_INIT(&ksg->ksg_members);
890 			/*
891 			 * Find all of the group members and add them.
892 			 */
893 			for (j = 0; j < MAXCPU; j++) {
894 				if ((cg->cg_mask & (1 << j)) != 0) {
895 					if (ksg->ksg_mask == 0)
896 						ksg->ksg_mask = 1 << j;
897 					kseq_cpu[j].ksq_transferable = 0;
898 					kseq_cpu[j].ksq_group = ksg;
899 					LIST_INSERT_HEAD(&ksg->ksg_members,
900 					    &kseq_cpu[j], ksq_siblings);
901 				}
902 			}
903 			if (ksg->ksg_cpus > 1)
904 				balance_groups = 1;
905 		}
906 		ksg_maxid = smp_topology->ct_count - 1;
907 	}
908 	/*
909 	 * Stagger the group and global load balancer so they do not
910 	 * interfere with each other.
911 	 */
912 	bal_tick = ticks + hz;
913 	if (balance_groups)
914 		gbal_tick = ticks + (hz / 2);
915 #else
916 	kseq_setup(KSEQ_SELF());
917 #endif
918 	mtx_lock_spin(&sched_lock);
919 	kseq_load_add(KSEQ_SELF(), &kse0);
920 	mtx_unlock_spin(&sched_lock);
921 }
922 
923 /*
924  * Scale the scheduling priority according to the "interactivity" of this
925  * process.
926  */
927 static void
928 sched_priority(struct ksegrp *kg)
929 {
930 	int pri;
931 
932 	if (kg->kg_pri_class != PRI_TIMESHARE)
933 		return;
934 
935 	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
936 	pri += SCHED_PRI_BASE;
937 	pri += kg->kg_proc->p_nice;
938 
939 	if (pri > PRI_MAX_TIMESHARE)
940 		pri = PRI_MAX_TIMESHARE;
941 	else if (pri < PRI_MIN_TIMESHARE)
942 		pri = PRI_MIN_TIMESHARE;
943 
944 	kg->kg_user_pri = pri;
945 
946 	return;
947 }
948 
949 /*
950  * Calculate a time slice based on the properties of the kseg and the runq
951  * that we're on.  This is only for PRI_TIMESHARE ksegrps.
952  */
953 static void
954 sched_slice(struct kse *ke)
955 {
956 	struct kseq *kseq;
957 	struct ksegrp *kg;
958 
959 	kg = ke->ke_ksegrp;
960 	kseq = KSEQ_CPU(ke->ke_cpu);
961 
962 	/*
963 	 * Rationale:
964 	 * KSEs in interactive ksegs get the minimum slice so that we
965 	 * quickly notice if it abuses its advantage.
966 	 *
967 	 * KSEs in non-interactive ksegs are assigned a slice that is
968 	 * based on the ksegs nice value relative to the least nice kseg
969 	 * on the run queue for this cpu.
970 	 *
971 	 * If the KSE is less nice than all others it gets the maximum
972 	 * slice and other KSEs will adjust their slice relative to
973 	 * this when they first expire.
974 	 *
975 	 * There is 20 point window that starts relative to the least
976 	 * nice kse on the run queue.  Slice size is determined by
977 	 * the kse distance from the last nice ksegrp.
978 	 *
979 	 * If the kse is outside of the window it will get no slice
980 	 * and will be reevaluated each time it is selected on the
981 	 * run queue.  The exception to this is nice 0 ksegs when
982 	 * a nice -20 is running.  They are always granted a minimum
983 	 * slice.
984 	 */
985 	if (!SCHED_INTERACTIVE(kg)) {
986 		int nice;
987 
988 		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
989 		if (kseq->ksq_load_timeshare == 0 ||
990 		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
991 			ke->ke_slice = SCHED_SLICE_MAX;
992 		else if (nice <= SCHED_SLICE_NTHRESH)
993 			ke->ke_slice = SCHED_SLICE_NICE(nice);
994 		else if (kg->kg_proc->p_nice == 0)
995 			ke->ke_slice = SCHED_SLICE_MIN;
996 		else
997 			ke->ke_slice = 0;
998 	} else
999 		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1000 
1001 	CTR6(KTR_ULE,
1002 	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
1003 	    ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin,
1004 	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
1005 
1006 	return;
1007 }
1008 
1009 /*
1010  * This routine enforces a maximum limit on the amount of scheduling history
1011  * kept.  It is called after either the slptime or runtime is adjusted.
1012  * This routine will not operate correctly when slp or run times have been
1013  * adjusted to more than double their maximum.
1014  */
1015 static void
1016 sched_interact_update(struct ksegrp *kg)
1017 {
1018 	int sum;
1019 
1020 	sum = kg->kg_runtime + kg->kg_slptime;
1021 	if (sum < SCHED_SLP_RUN_MAX)
1022 		return;
1023 	/*
1024 	 * If we have exceeded by more than 1/5th then the algorithm below
1025 	 * will not bring us back into range.  Dividing by two here forces
1026 	 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1027 	 */
1028 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1029 		kg->kg_runtime /= 2;
1030 		kg->kg_slptime /= 2;
1031 		return;
1032 	}
1033 	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1034 	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1035 }
1036 
1037 static void
1038 sched_interact_fork(struct ksegrp *kg)
1039 {
1040 	int ratio;
1041 	int sum;
1042 
1043 	sum = kg->kg_runtime + kg->kg_slptime;
1044 	if (sum > SCHED_SLP_RUN_FORK) {
1045 		ratio = sum / SCHED_SLP_RUN_FORK;
1046 		kg->kg_runtime /= ratio;
1047 		kg->kg_slptime /= ratio;
1048 	}
1049 }
1050 
1051 static int
1052 sched_interact_score(struct ksegrp *kg)
1053 {
1054 	int div;
1055 
1056 	if (kg->kg_runtime > kg->kg_slptime) {
1057 		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1058 		return (SCHED_INTERACT_HALF +
1059 		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1060 	} if (kg->kg_slptime > kg->kg_runtime) {
1061 		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1062 		return (kg->kg_runtime / div);
1063 	}
1064 
1065 	/*
1066 	 * This can happen if slptime and runtime are 0.
1067 	 */
1068 	return (0);
1069 
1070 }
1071 
1072 /*
1073  * This is only somewhat accurate since given many processes of the same
1074  * priority they will switch when their slices run out, which will be
1075  * at most SCHED_SLICE_MAX.
1076  */
1077 int
1078 sched_rr_interval(void)
1079 {
1080 	return (SCHED_SLICE_MAX);
1081 }
1082 
1083 static void
1084 sched_pctcpu_update(struct kse *ke)
1085 {
1086 	/*
1087 	 * Adjust counters and watermark for pctcpu calc.
1088 	 */
1089 	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1090 		/*
1091 		 * Shift the tick count out so that the divide doesn't
1092 		 * round away our results.
1093 		 */
1094 		ke->ke_ticks <<= 10;
1095 		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1096 			    SCHED_CPU_TICKS;
1097 		ke->ke_ticks >>= 10;
1098 	} else
1099 		ke->ke_ticks = 0;
1100 	ke->ke_ltick = ticks;
1101 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1102 }
1103 
1104 void
1105 sched_prio(struct thread *td, u_char prio)
1106 {
1107 	struct kse *ke;
1108 
1109 	ke = td->td_kse;
1110 	mtx_assert(&sched_lock, MA_OWNED);
1111 	if (TD_ON_RUNQ(td)) {
1112 		/*
1113 		 * If the priority has been elevated due to priority
1114 		 * propagation, we may have to move ourselves to a new
1115 		 * queue.  We still call adjustrunqueue below in case kse
1116 		 * needs to fix things up.
1117 		 */
1118 		if (prio < td->td_priority && ke &&
1119 		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1120 		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1121 			runq_remove(ke->ke_runq, ke);
1122 			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1123 			runq_add(ke->ke_runq, ke);
1124 		}
1125 		adjustrunqueue(td, prio);
1126 	} else
1127 		td->td_priority = prio;
1128 }
1129 
1130 void
1131 sched_switch(struct thread *td, struct thread *newtd)
1132 {
1133 	struct kse *ke;
1134 
1135 	mtx_assert(&sched_lock, MA_OWNED);
1136 
1137 	ke = td->td_kse;
1138 
1139 	td->td_last_kse = ke;
1140         td->td_lastcpu = td->td_oncpu;
1141 	td->td_oncpu = NOCPU;
1142 	td->td_flags &= ~(TDF_NEEDRESCHED | TDF_OWEPREEMPT);
1143 
1144 	/*
1145 	 * If the KSE has been assigned it may be in the process of switching
1146 	 * to the new cpu.  This is the case in sched_bind().
1147 	 */
1148 	if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1149 		if (td == PCPU_GET(idlethread))
1150 			TD_SET_CAN_RUN(td);
1151 		else if (TD_IS_RUNNING(td)) {
1152 			kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1153 			setrunqueue(td);
1154 		} else {
1155 			if (ke->ke_runq) {
1156 				kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1157 			} else if ((td->td_flags & TDF_IDLETD) == 0)
1158 				backtrace();
1159 			/*
1160 			 * We will not be on the run queue. So we must be
1161 			 * sleeping or similar.
1162 			 */
1163 			if (td->td_proc->p_flag & P_SA)
1164 				kse_reassign(ke);
1165 		}
1166 	}
1167 	if (newtd == NULL)
1168 		newtd = choosethread();
1169 	else
1170 		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1171 	if (td != newtd)
1172 		cpu_switch(td, newtd);
1173 	sched_lock.mtx_lock = (uintptr_t)td;
1174 
1175 	td->td_oncpu = PCPU_GET(cpuid);
1176 }
1177 
1178 void
1179 sched_nice(struct proc *p, int nice)
1180 {
1181 	struct ksegrp *kg;
1182 	struct kse *ke;
1183 	struct thread *td;
1184 	struct kseq *kseq;
1185 
1186 	PROC_LOCK_ASSERT(p, MA_OWNED);
1187 	mtx_assert(&sched_lock, MA_OWNED);
1188 	/*
1189 	 * We need to adjust the nice counts for running KSEs.
1190 	 */
1191 	FOREACH_KSEGRP_IN_PROC(p, kg) {
1192 		if (kg->kg_pri_class == PRI_TIMESHARE) {
1193 			FOREACH_KSE_IN_GROUP(kg, ke) {
1194 				if (ke->ke_runq == NULL)
1195 					continue;
1196 				kseq = KSEQ_CPU(ke->ke_cpu);
1197 				kseq_nice_rem(kseq, p->p_nice);
1198 				kseq_nice_add(kseq, nice);
1199 			}
1200 		}
1201 	}
1202 	p->p_nice = nice;
1203 	FOREACH_KSEGRP_IN_PROC(p, kg) {
1204 		sched_priority(kg);
1205 		FOREACH_THREAD_IN_GROUP(kg, td)
1206 			td->td_flags |= TDF_NEEDRESCHED;
1207 	}
1208 }
1209 
1210 void
1211 sched_sleep(struct thread *td)
1212 {
1213 	mtx_assert(&sched_lock, MA_OWNED);
1214 
1215 	td->td_slptime = ticks;
1216 	td->td_base_pri = td->td_priority;
1217 
1218 	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1219 	    td->td_kse, td->td_slptime);
1220 }
1221 
1222 void
1223 sched_wakeup(struct thread *td)
1224 {
1225 	mtx_assert(&sched_lock, MA_OWNED);
1226 
1227 	/*
1228 	 * Let the kseg know how long we slept for.  This is because process
1229 	 * interactivity behavior is modeled in the kseg.
1230 	 */
1231 	if (td->td_slptime) {
1232 		struct ksegrp *kg;
1233 		int hzticks;
1234 
1235 		kg = td->td_ksegrp;
1236 		hzticks = (ticks - td->td_slptime) << 10;
1237 		if (hzticks >= SCHED_SLP_RUN_MAX) {
1238 			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1239 			kg->kg_runtime = 1;
1240 		} else {
1241 			kg->kg_slptime += hzticks;
1242 			sched_interact_update(kg);
1243 		}
1244 		sched_priority(kg);
1245 		if (td->td_kse)
1246 			sched_slice(td->td_kse);
1247 		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1248 		    td->td_kse, hzticks);
1249 		td->td_slptime = 0;
1250 	}
1251 	setrunqueue(td);
1252 }
1253 
1254 /*
1255  * Penalize the parent for creating a new child and initialize the child's
1256  * priority.
1257  */
1258 void
1259 sched_fork(struct proc *p, struct proc *p1)
1260 {
1261 
1262 	mtx_assert(&sched_lock, MA_OWNED);
1263 
1264 	p1->p_nice = p->p_nice;
1265 	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
1266 	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
1267 	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
1268 }
1269 
1270 void
1271 sched_fork_kse(struct kse *ke, struct kse *child)
1272 {
1273 
1274 	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1275 	child->ke_cpu = ke->ke_cpu;
1276 	child->ke_runq = NULL;
1277 
1278 	/* Grab our parents cpu estimation information. */
1279 	child->ke_ticks = ke->ke_ticks;
1280 	child->ke_ltick = ke->ke_ltick;
1281 	child->ke_ftick = ke->ke_ftick;
1282 }
1283 
1284 void
1285 sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1286 {
1287 	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1288 
1289 	child->kg_slptime = kg->kg_slptime;
1290 	child->kg_runtime = kg->kg_runtime;
1291 	child->kg_user_pri = kg->kg_user_pri;
1292 	sched_interact_fork(child);
1293 	kg->kg_runtime += tickincr << 10;
1294 	sched_interact_update(kg);
1295 
1296 	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1297 	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1298 	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1299 }
1300 
1301 void
1302 sched_fork_thread(struct thread *td, struct thread *child)
1303 {
1304 }
1305 
1306 void
1307 sched_class(struct ksegrp *kg, int class)
1308 {
1309 	struct kseq *kseq;
1310 	struct kse *ke;
1311 	int nclass;
1312 	int oclass;
1313 
1314 	mtx_assert(&sched_lock, MA_OWNED);
1315 	if (kg->kg_pri_class == class)
1316 		return;
1317 
1318 	nclass = PRI_BASE(class);
1319 	oclass = PRI_BASE(kg->kg_pri_class);
1320 	FOREACH_KSE_IN_GROUP(kg, ke) {
1321 		if (ke->ke_state != KES_ONRUNQ &&
1322 		    ke->ke_state != KES_THREAD)
1323 			continue;
1324 		kseq = KSEQ_CPU(ke->ke_cpu);
1325 
1326 #ifdef SMP
1327 		/*
1328 		 * On SMP if we're on the RUNQ we must adjust the transferable
1329 		 * count because could be changing to or from an interrupt
1330 		 * class.
1331 		 */
1332 		if (ke->ke_state == KES_ONRUNQ) {
1333 			if (KSE_CAN_MIGRATE(ke, oclass)) {
1334 				kseq->ksq_transferable--;
1335 				kseq->ksq_group->ksg_transferable--;
1336 			}
1337 			if (KSE_CAN_MIGRATE(ke, nclass)) {
1338 				kseq->ksq_transferable++;
1339 				kseq->ksq_group->ksg_transferable++;
1340 			}
1341 		}
1342 #endif
1343 		if (oclass == PRI_TIMESHARE) {
1344 			kseq->ksq_load_timeshare--;
1345 			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1346 		}
1347 		if (nclass == PRI_TIMESHARE) {
1348 			kseq->ksq_load_timeshare++;
1349 			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1350 		}
1351 	}
1352 
1353 	kg->kg_pri_class = class;
1354 }
1355 
1356 /*
1357  * Return some of the child's priority and interactivity to the parent.
1358  */
1359 void
1360 sched_exit(struct proc *p, struct proc *child)
1361 {
1362 	mtx_assert(&sched_lock, MA_OWNED);
1363 	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
1364 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
1365 }
1366 
1367 void
1368 sched_exit_kse(struct kse *ke, struct kse *child)
1369 {
1370 	kseq_load_rem(KSEQ_CPU(child->ke_cpu), child);
1371 }
1372 
1373 void
1374 sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1375 {
1376 	/* kg->kg_slptime += child->kg_slptime; */
1377 	kg->kg_runtime += child->kg_runtime;
1378 	sched_interact_update(kg);
1379 }
1380 
1381 void
1382 sched_exit_thread(struct thread *td, struct thread *child)
1383 {
1384 }
1385 
1386 void
1387 sched_clock(struct thread *td)
1388 {
1389 	struct kseq *kseq;
1390 	struct ksegrp *kg;
1391 	struct kse *ke;
1392 
1393 	mtx_assert(&sched_lock, MA_OWNED);
1394 #ifdef SMP
1395 	if (ticks == bal_tick)
1396 		sched_balance();
1397 	if (ticks == gbal_tick)
1398 		sched_balance_groups();
1399 #endif
1400 	/*
1401 	 * sched_setup() apparently happens prior to stathz being set.  We
1402 	 * need to resolve the timers earlier in the boot so we can avoid
1403 	 * calculating this here.
1404 	 */
1405 	if (realstathz == 0) {
1406 		realstathz = stathz ? stathz : hz;
1407 		tickincr = hz / realstathz;
1408 		/*
1409 		 * XXX This does not work for values of stathz that are much
1410 		 * larger than hz.
1411 		 */
1412 		if (tickincr == 0)
1413 			tickincr = 1;
1414 	}
1415 
1416 	ke = td->td_kse;
1417 	kg = ke->ke_ksegrp;
1418 
1419 	/* Adjust ticks for pctcpu */
1420 	ke->ke_ticks++;
1421 	ke->ke_ltick = ticks;
1422 
1423 	/* Go up to one second beyond our max and then trim back down */
1424 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1425 		sched_pctcpu_update(ke);
1426 
1427 	if (td->td_flags & TDF_IDLETD)
1428 		return;
1429 
1430 	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1431 	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1432 	/*
1433 	 * We only do slicing code for TIMESHARE ksegrps.
1434 	 */
1435 	if (kg->kg_pri_class != PRI_TIMESHARE)
1436 		return;
1437 	/*
1438 	 * We used a tick charge it to the ksegrp so that we can compute our
1439 	 * interactivity.
1440 	 */
1441 	kg->kg_runtime += tickincr << 10;
1442 	sched_interact_update(kg);
1443 
1444 	/*
1445 	 * We used up one time slice.
1446 	 */
1447 	if (--ke->ke_slice > 0)
1448 		return;
1449 	/*
1450 	 * We're out of time, recompute priorities and requeue.
1451 	 */
1452 	kseq = KSEQ_SELF();
1453 	kseq_load_rem(kseq, ke);
1454 	sched_priority(kg);
1455 	sched_slice(ke);
1456 	if (SCHED_CURR(kg, ke))
1457 		ke->ke_runq = kseq->ksq_curr;
1458 	else
1459 		ke->ke_runq = kseq->ksq_next;
1460 	kseq_load_add(kseq, ke);
1461 	td->td_flags |= TDF_NEEDRESCHED;
1462 }
1463 
1464 int
1465 sched_runnable(void)
1466 {
1467 	struct kseq *kseq;
1468 	int load;
1469 
1470 	load = 1;
1471 
1472 	kseq = KSEQ_SELF();
1473 #ifdef SMP
1474 	if (kseq->ksq_assigned) {
1475 		mtx_lock_spin(&sched_lock);
1476 		kseq_assign(kseq);
1477 		mtx_unlock_spin(&sched_lock);
1478 	}
1479 #endif
1480 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1481 		if (kseq->ksq_load > 0)
1482 			goto out;
1483 	} else
1484 		if (kseq->ksq_load - 1 > 0)
1485 			goto out;
1486 	load = 0;
1487 out:
1488 	return (load);
1489 }
1490 
1491 void
1492 sched_userret(struct thread *td)
1493 {
1494 	struct ksegrp *kg;
1495 
1496 	kg = td->td_ksegrp;
1497 
1498 	if (td->td_priority != kg->kg_user_pri) {
1499 		mtx_lock_spin(&sched_lock);
1500 		td->td_priority = kg->kg_user_pri;
1501 		mtx_unlock_spin(&sched_lock);
1502 	}
1503 }
1504 
1505 struct kse *
1506 sched_choose(void)
1507 {
1508 	struct kseq *kseq;
1509 	struct kse *ke;
1510 
1511 	mtx_assert(&sched_lock, MA_OWNED);
1512 	kseq = KSEQ_SELF();
1513 #ifdef SMP
1514 restart:
1515 	if (kseq->ksq_assigned)
1516 		kseq_assign(kseq);
1517 #endif
1518 	ke = kseq_choose(kseq);
1519 	if (ke) {
1520 #ifdef SMP
1521 		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1522 			if (kseq_idled(kseq) == 0)
1523 				goto restart;
1524 #endif
1525 		kseq_runq_rem(kseq, ke);
1526 		ke->ke_state = KES_THREAD;
1527 
1528 		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1529 			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1530 			    ke, ke->ke_runq, ke->ke_slice,
1531 			    ke->ke_thread->td_priority);
1532 		}
1533 		return (ke);
1534 	}
1535 #ifdef SMP
1536 	if (kseq_idled(kseq) == 0)
1537 		goto restart;
1538 #endif
1539 	return (NULL);
1540 }
1541 
1542 void
1543 sched_add(struct thread *td)
1544 {
1545 	struct kseq *kseq;
1546 	struct ksegrp *kg;
1547 	struct kse *ke;
1548 	int class;
1549 
1550 	mtx_assert(&sched_lock, MA_OWNED);
1551 	ke = td->td_kse;
1552 	kg = td->td_ksegrp;
1553 	if (ke->ke_flags & KEF_ASSIGNED)
1554 		return;
1555 	kseq = KSEQ_SELF();
1556 	KASSERT((ke->ke_thread != NULL),
1557 	    ("sched_add: No thread on KSE"));
1558 	KASSERT((ke->ke_thread->td_kse != NULL),
1559 	    ("sched_add: No KSE on thread"));
1560 	KASSERT(ke->ke_state != KES_ONRUNQ,
1561 	    ("sched_add: kse %p (%s) already in run queue", ke,
1562 	    ke->ke_proc->p_comm));
1563 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1564 	    ("sched_add: process swapped out"));
1565 	KASSERT(ke->ke_runq == NULL,
1566 	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1567 
1568 	class = PRI_BASE(kg->kg_pri_class);
1569 	switch (class) {
1570 	case PRI_ITHD:
1571 	case PRI_REALTIME:
1572 		ke->ke_runq = kseq->ksq_curr;
1573 		ke->ke_slice = SCHED_SLICE_MAX;
1574 		ke->ke_cpu = PCPU_GET(cpuid);
1575 		break;
1576 	case PRI_TIMESHARE:
1577 		if (SCHED_CURR(kg, ke))
1578 			ke->ke_runq = kseq->ksq_curr;
1579 		else
1580 			ke->ke_runq = kseq->ksq_next;
1581 		break;
1582 	case PRI_IDLE:
1583 		/*
1584 		 * This is for priority prop.
1585 		 */
1586 		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1587 			ke->ke_runq = kseq->ksq_curr;
1588 		else
1589 			ke->ke_runq = &kseq->ksq_idle;
1590 		ke->ke_slice = SCHED_SLICE_MIN;
1591 		break;
1592 	default:
1593 		panic("Unknown pri class.");
1594 		break;
1595 	}
1596 #ifdef SMP
1597 	if (ke->ke_cpu != PCPU_GET(cpuid)) {
1598 		ke->ke_runq = NULL;
1599 		kseq_notify(ke, ke->ke_cpu);
1600 		return;
1601 	}
1602 	/*
1603 	 * If we had been idle, clear our bit in the group and potentially
1604 	 * the global bitmap.  If not, see if we should transfer this thread.
1605 	 */
1606 	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1607 	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1608 		/*
1609 		 * Check to see if our group is unidling, and if so, remove it
1610 		 * from the global idle mask.
1611 		 */
1612 		if (kseq->ksq_group->ksg_idlemask ==
1613 		    kseq->ksq_group->ksg_cpumask)
1614 			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1615 		/*
1616 		 * Now remove ourselves from the group specific idle mask.
1617 		 */
1618 		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1619 	} else if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class))
1620 		if (kseq_transfer(kseq, ke, class))
1621 			return;
1622 #endif
1623         if (td->td_priority < curthread->td_priority)
1624                 curthread->td_flags |= TDF_NEEDRESCHED;
1625 
1626 #ifdef SMP
1627 	/*
1628 	 * Only try to preempt if the thread is unpinned or pinned to the
1629 	 * current CPU.
1630 	 */
1631 	if (KSE_CAN_MIGRATE(ke, class) || ke->ke_cpu == PCPU_GET(cpuid))
1632 #endif
1633 	if (maybe_preempt(td))
1634 		return;
1635 	ke->ke_ksegrp->kg_runq_kses++;
1636 	ke->ke_state = KES_ONRUNQ;
1637 
1638 	kseq_runq_add(kseq, ke);
1639 	kseq_load_add(kseq, ke);
1640 }
1641 
1642 void
1643 sched_rem(struct thread *td)
1644 {
1645 	struct kseq *kseq;
1646 	struct kse *ke;
1647 
1648 	ke = td->td_kse;
1649 	/*
1650 	 * It is safe to just return here because sched_rem() is only ever
1651 	 * used in places where we're immediately going to add the
1652 	 * kse back on again.  In that case it'll be added with the correct
1653 	 * thread and priority when the caller drops the sched_lock.
1654 	 */
1655 	if (ke->ke_flags & KEF_ASSIGNED)
1656 		return;
1657 	mtx_assert(&sched_lock, MA_OWNED);
1658 	KASSERT((ke->ke_state == KES_ONRUNQ),
1659 	    ("sched_rem: KSE not on run queue"));
1660 
1661 	ke->ke_state = KES_THREAD;
1662 	ke->ke_ksegrp->kg_runq_kses--;
1663 	kseq = KSEQ_CPU(ke->ke_cpu);
1664 	kseq_runq_rem(kseq, ke);
1665 	kseq_load_rem(kseq, ke);
1666 }
1667 
1668 fixpt_t
1669 sched_pctcpu(struct thread *td)
1670 {
1671 	fixpt_t pctcpu;
1672 	struct kse *ke;
1673 
1674 	pctcpu = 0;
1675 	ke = td->td_kse;
1676 	if (ke == NULL)
1677 		return (0);
1678 
1679 	mtx_lock_spin(&sched_lock);
1680 	if (ke->ke_ticks) {
1681 		int rtick;
1682 
1683 		/*
1684 		 * Don't update more frequently than twice a second.  Allowing
1685 		 * this causes the cpu usage to decay away too quickly due to
1686 		 * rounding errors.
1687 		 */
1688 		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1689 		    ke->ke_ltick < (ticks - (hz / 2)))
1690 			sched_pctcpu_update(ke);
1691 		/* How many rtick per second ? */
1692 		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1693 		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1694 	}
1695 
1696 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1697 	mtx_unlock_spin(&sched_lock);
1698 
1699 	return (pctcpu);
1700 }
1701 
1702 void
1703 sched_bind(struct thread *td, int cpu)
1704 {
1705 	struct kse *ke;
1706 
1707 	mtx_assert(&sched_lock, MA_OWNED);
1708 	ke = td->td_kse;
1709 	ke->ke_flags |= KEF_BOUND;
1710 #ifdef SMP
1711 	if (PCPU_GET(cpuid) == cpu)
1712 		return;
1713 	/* sched_rem without the runq_remove */
1714 	ke->ke_state = KES_THREAD;
1715 	ke->ke_ksegrp->kg_runq_kses--;
1716 	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1717 	kseq_notify(ke, cpu);
1718 	/* When we return from mi_switch we'll be on the correct cpu. */
1719 	mi_switch(SW_VOL, NULL);
1720 #endif
1721 }
1722 
1723 void
1724 sched_unbind(struct thread *td)
1725 {
1726 	mtx_assert(&sched_lock, MA_OWNED);
1727 	td->td_kse->ke_flags &= ~KEF_BOUND;
1728 }
1729 
1730 int
1731 sched_load(void)
1732 {
1733 #ifdef SMP
1734 	int total;
1735 	int i;
1736 
1737 	total = 0;
1738 	for (i = 0; i <= ksg_maxid; i++)
1739 		total += KSEQ_GROUP(i)->ksg_load;
1740 	return (total);
1741 #else
1742 	return (KSEQ_SELF()->ksq_sysload);
1743 #endif
1744 }
1745 
1746 int
1747 sched_sizeof_kse(void)
1748 {
1749 	return (sizeof(struct kse) + sizeof(struct ke_sched));
1750 }
1751 
1752 int
1753 sched_sizeof_ksegrp(void)
1754 {
1755 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1756 }
1757 
1758 int
1759 sched_sizeof_proc(void)
1760 {
1761 	return (sizeof(struct proc));
1762 }
1763 
1764 int
1765 sched_sizeof_thread(void)
1766 {
1767 	return (sizeof(struct thread) + sizeof(struct td_sched));
1768 }
1769