xref: /freebsd/sys/kern/sched_ule.c (revision 2454aaf51cd2189147701ef40e88d1e2e3be27a2)
1 /*-
2  * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kdb.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/resource.h>
39 #include <sys/resourcevar.h>
40 #include <sys/sched.h>
41 #include <sys/smp.h>
42 #include <sys/sx.h>
43 #include <sys/sysctl.h>
44 #include <sys/sysproto.h>
45 #include <sys/vmmeter.h>
46 #ifdef KTRACE
47 #include <sys/uio.h>
48 #include <sys/ktrace.h>
49 #endif
50 
51 #include <machine/cpu.h>
52 #include <machine/smp.h>
53 
54 #define KTR_ULE         KTR_NFS
55 
56 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
57 /* XXX This is bogus compatability crap for ps */
58 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
59 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
60 
61 static void sched_setup(void *dummy);
62 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
63 
64 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
65 
66 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
67     "Scheduler name");
68 
69 static int slice_min = 1;
70 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
71 
72 static int slice_max = 10;
73 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
74 
75 int realstathz;
76 int tickincr = 1;
77 
78 /*
79  * These datastructures are allocated within their parent datastructure but
80  * are scheduler specific.
81  */
82 
83 struct ke_sched {
84 	int		ske_slice;
85 	struct runq	*ske_runq;
86 	/* The following variables are only used for pctcpu calculation */
87 	int		ske_ltick;	/* Last tick that we were running on */
88 	int		ske_ftick;	/* First tick that we were running on */
89 	int		ske_ticks;	/* Tick count */
90 	/* CPU that we have affinity for. */
91 	u_char		ske_cpu;
92 };
93 #define	ke_slice	ke_sched->ske_slice
94 #define	ke_runq		ke_sched->ske_runq
95 #define	ke_ltick	ke_sched->ske_ltick
96 #define	ke_ftick	ke_sched->ske_ftick
97 #define	ke_ticks	ke_sched->ske_ticks
98 #define	ke_cpu		ke_sched->ske_cpu
99 #define	ke_assign	ke_procq.tqe_next
100 
101 #define	KEF_ASSIGNED	KEF_SCHED0	/* KSE is being migrated. */
102 #define	KEF_BOUND	KEF_SCHED1	/* KSE can not migrate. */
103 #define	KEF_XFERABLE	KEF_SCHED2	/* KSE was added as transferable. */
104 
105 struct kg_sched {
106 	int	skg_slptime;		/* Number of ticks we vol. slept */
107 	int	skg_runtime;		/* Number of ticks we were running */
108 };
109 #define	kg_slptime	kg_sched->skg_slptime
110 #define	kg_runtime	kg_sched->skg_runtime
111 
112 struct td_sched {
113 	int	std_slptime;
114 };
115 #define	td_slptime	td_sched->std_slptime
116 
117 struct td_sched td_sched;
118 struct ke_sched ke_sched;
119 struct kg_sched kg_sched;
120 
121 struct ke_sched *kse0_sched = &ke_sched;
122 struct kg_sched *ksegrp0_sched = &kg_sched;
123 struct p_sched *proc0_sched = NULL;
124 struct td_sched *thread0_sched = &td_sched;
125 
126 /*
127  * The priority is primarily determined by the interactivity score.  Thus, we
128  * give lower(better) priorities to kse groups that use less CPU.  The nice
129  * value is then directly added to this to allow nice to have some effect
130  * on latency.
131  *
132  * PRI_RANGE:	Total priority range for timeshare threads.
133  * PRI_NRESV:	Number of nice values.
134  * PRI_BASE:	The start of the dynamic range.
135  */
136 #define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
137 #define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
138 #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
139 #define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
140 #define	SCHED_PRI_INTERACT(score)					\
141     ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
142 
143 /*
144  * These determine the interactivity of a process.
145  *
146  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
147  *		before throttling back.
148  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
149  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
150  * INTERACT_THRESH:	Threshhold for placement on the current runq.
151  */
152 #define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
153 #define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
154 #define	SCHED_INTERACT_MAX	(100)
155 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
156 #define	SCHED_INTERACT_THRESH	(30)
157 
158 /*
159  * These parameters and macros determine the size of the time slice that is
160  * granted to each thread.
161  *
162  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
163  * SLICE_MAX:	Maximum time slice granted.
164  * SLICE_RANGE:	Range of available time slices scaled by hz.
165  * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
166  * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
167  * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
168  */
169 #define	SCHED_SLICE_MIN			(slice_min)
170 #define	SCHED_SLICE_MAX			(slice_max)
171 #define	SCHED_SLICE_INTERACTIVE		(slice_max)
172 #define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
173 #define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
174 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
175 #define	SCHED_SLICE_NICE(nice)						\
176     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
177 
178 /*
179  * This macro determines whether or not the kse belongs on the current or
180  * next run queue.
181  */
182 #define	SCHED_INTERACTIVE(kg)						\
183     (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
184 #define	SCHED_CURR(kg, ke)						\
185     (ke->ke_thread->td_priority < kg->kg_user_pri ||			\
186     SCHED_INTERACTIVE(kg))
187 
188 /*
189  * Cpu percentage computation macros and defines.
190  *
191  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
192  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
193  */
194 
195 #define	SCHED_CPU_TIME	10
196 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
197 
198 /*
199  * kseq - per processor runqs and statistics.
200  */
201 struct kseq {
202 	struct runq	ksq_idle;		/* Queue of IDLE threads. */
203 	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
204 	struct runq	*ksq_next;		/* Next timeshare queue. */
205 	struct runq	*ksq_curr;		/* Current queue. */
206 	int		ksq_load_timeshare;	/* Load for timeshare. */
207 	int		ksq_load;		/* Aggregate load. */
208 	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
209 	short		ksq_nicemin;		/* Least nice. */
210 #ifdef SMP
211 	int			ksq_transferable;
212 	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
213 	struct kseq_group	*ksq_group;	/* Our processor group. */
214 	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
215 #else
216 	int		ksq_sysload;		/* For loadavg, !ITHD load. */
217 #endif
218 };
219 
220 #ifdef SMP
221 /*
222  * kseq groups are groups of processors which can cheaply share threads.  When
223  * one processor in the group goes idle it will check the runqs of the other
224  * processors in its group prior to halting and waiting for an interrupt.
225  * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
226  * In a numa environment we'd want an idle bitmap per group and a two tiered
227  * load balancer.
228  */
229 struct kseq_group {
230 	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
231 	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
232 	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
233 	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
234 	int	ksg_load;		/* Total load of this group. */
235 	int	ksg_transferable;	/* Transferable load of this group. */
236 	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
237 };
238 #endif
239 
240 /*
241  * One kse queue per processor.
242  */
243 #ifdef SMP
244 static cpumask_t kseq_idle;
245 static int ksg_maxid;
246 static struct kseq	kseq_cpu[MAXCPU];
247 static struct kseq_group kseq_groups[MAXCPU];
248 static int bal_tick;
249 static int gbal_tick;
250 
251 #define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
252 #define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
253 #define	KSEQ_ID(x)	((x) - kseq_cpu)
254 #define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
255 #else	/* !SMP */
256 static struct kseq	kseq_cpu;
257 
258 #define	KSEQ_SELF()	(&kseq_cpu)
259 #define	KSEQ_CPU(x)	(&kseq_cpu)
260 #endif
261 
262 static void sched_add_internal(struct thread *td, int preemptive);
263 static void sched_slice(struct kse *ke);
264 static void sched_priority(struct ksegrp *kg);
265 static int sched_interact_score(struct ksegrp *kg);
266 static void sched_interact_update(struct ksegrp *kg);
267 static void sched_interact_fork(struct ksegrp *kg);
268 static void sched_pctcpu_update(struct kse *ke);
269 
270 /* Operations on per processor queues */
271 static struct kse * kseq_choose(struct kseq *kseq);
272 static void kseq_setup(struct kseq *kseq);
273 static void kseq_load_add(struct kseq *kseq, struct kse *ke);
274 static void kseq_load_rem(struct kseq *kseq, struct kse *ke);
275 static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke);
276 static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke);
277 static void kseq_nice_add(struct kseq *kseq, int nice);
278 static void kseq_nice_rem(struct kseq *kseq, int nice);
279 void kseq_print(int cpu);
280 #ifdef SMP
281 static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class);
282 static struct kse *runq_steal(struct runq *rq);
283 static void sched_balance(void);
284 static void sched_balance_groups(void);
285 static void sched_balance_group(struct kseq_group *ksg);
286 static void sched_balance_pair(struct kseq *high, struct kseq *low);
287 static void kseq_move(struct kseq *from, int cpu);
288 static int kseq_idled(struct kseq *kseq);
289 static void kseq_notify(struct kse *ke, int cpu);
290 static void kseq_assign(struct kseq *);
291 static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
292 /*
293  * On P4 Xeons the round-robin interrupt delivery is broken.  As a result of
294  * this, we can't pin interrupts to the cpu that they were delivered to,
295  * otherwise all ithreads only run on CPU 0.
296  */
297 #ifdef __i386__
298 #define	KSE_CAN_MIGRATE(ke, class)					\
299     ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
300 #else /* !__i386__ */
301 #define	KSE_CAN_MIGRATE(ke, class)					\
302     ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 &&		\
303     ((ke)->ke_flags & KEF_BOUND) == 0)
304 #endif /* !__i386__ */
305 #endif
306 
307 void
308 kseq_print(int cpu)
309 {
310 	struct kseq *kseq;
311 	int i;
312 
313 	kseq = KSEQ_CPU(cpu);
314 
315 	printf("kseq:\n");
316 	printf("\tload:           %d\n", kseq->ksq_load);
317 	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
318 #ifdef SMP
319 	printf("\tload transferable: %d\n", kseq->ksq_transferable);
320 #endif
321 	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
322 	printf("\tnice counts:\n");
323 	for (i = 0; i < SCHED_PRI_NRESV; i++)
324 		if (kseq->ksq_nice[i])
325 			printf("\t\t%d = %d\n",
326 			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
327 }
328 
329 static __inline void
330 kseq_runq_add(struct kseq *kseq, struct kse *ke)
331 {
332 #ifdef SMP
333 	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
334 		kseq->ksq_transferable++;
335 		kseq->ksq_group->ksg_transferable++;
336 		ke->ke_flags |= KEF_XFERABLE;
337 	}
338 #endif
339 	runq_add(ke->ke_runq, ke);
340 }
341 
342 static __inline void
343 kseq_runq_rem(struct kseq *kseq, struct kse *ke)
344 {
345 #ifdef SMP
346 	if (ke->ke_flags & KEF_XFERABLE) {
347 		kseq->ksq_transferable--;
348 		kseq->ksq_group->ksg_transferable--;
349 		ke->ke_flags &= ~KEF_XFERABLE;
350 	}
351 #endif
352 	runq_remove(ke->ke_runq, ke);
353 }
354 
355 static void
356 kseq_load_add(struct kseq *kseq, struct kse *ke)
357 {
358 	int class;
359 	mtx_assert(&sched_lock, MA_OWNED);
360 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
361 	if (class == PRI_TIMESHARE)
362 		kseq->ksq_load_timeshare++;
363 	kseq->ksq_load++;
364 	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
365 #ifdef SMP
366 		kseq->ksq_group->ksg_load++;
367 #else
368 		kseq->ksq_sysload++;
369 #endif
370 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
371 		CTR6(KTR_ULE,
372 		    "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
373 		    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
374 		    ke->ke_proc->p_nice, kseq->ksq_nicemin);
375 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
376 		kseq_nice_add(kseq, ke->ke_proc->p_nice);
377 }
378 
379 static void
380 kseq_load_rem(struct kseq *kseq, struct kse *ke)
381 {
382 	int class;
383 	mtx_assert(&sched_lock, MA_OWNED);
384 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
385 	if (class == PRI_TIMESHARE)
386 		kseq->ksq_load_timeshare--;
387 	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
388 #ifdef SMP
389 		kseq->ksq_group->ksg_load--;
390 #else
391 		kseq->ksq_sysload--;
392 #endif
393 	kseq->ksq_load--;
394 	ke->ke_runq = NULL;
395 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
396 		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
397 }
398 
399 static void
400 kseq_nice_add(struct kseq *kseq, int nice)
401 {
402 	mtx_assert(&sched_lock, MA_OWNED);
403 	/* Normalize to zero. */
404 	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
405 	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
406 		kseq->ksq_nicemin = nice;
407 }
408 
409 static void
410 kseq_nice_rem(struct kseq *kseq, int nice)
411 {
412 	int n;
413 
414 	mtx_assert(&sched_lock, MA_OWNED);
415 	/* Normalize to zero. */
416 	n = nice + SCHED_PRI_NHALF;
417 	kseq->ksq_nice[n]--;
418 	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
419 
420 	/*
421 	 * If this wasn't the smallest nice value or there are more in
422 	 * this bucket we can just return.  Otherwise we have to recalculate
423 	 * the smallest nice.
424 	 */
425 	if (nice != kseq->ksq_nicemin ||
426 	    kseq->ksq_nice[n] != 0 ||
427 	    kseq->ksq_load_timeshare == 0)
428 		return;
429 
430 	for (; n < SCHED_PRI_NRESV; n++)
431 		if (kseq->ksq_nice[n]) {
432 			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
433 			return;
434 		}
435 }
436 
437 #ifdef SMP
438 /*
439  * sched_balance is a simple CPU load balancing algorithm.  It operates by
440  * finding the least loaded and most loaded cpu and equalizing their load
441  * by migrating some processes.
442  *
443  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
444  * installations will only have 2 cpus.  Secondly, load balancing too much at
445  * once can have an unpleasant effect on the system.  The scheduler rarely has
446  * enough information to make perfect decisions.  So this algorithm chooses
447  * algorithm simplicity and more gradual effects on load in larger systems.
448  *
449  * It could be improved by considering the priorities and slices assigned to
450  * each task prior to balancing them.  There are many pathological cases with
451  * any approach and so the semi random algorithm below may work as well as any.
452  *
453  */
454 static void
455 sched_balance(void)
456 {
457 	struct kseq_group *high;
458 	struct kseq_group *low;
459 	struct kseq_group *ksg;
460 	int cnt;
461 	int i;
462 
463 	if (smp_started == 0)
464 		goto out;
465 	low = high = NULL;
466 	i = random() % (ksg_maxid + 1);
467 	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
468 		ksg = KSEQ_GROUP(i);
469 		/*
470 		 * Find the CPU with the highest load that has some
471 		 * threads to transfer.
472 		 */
473 		if ((high == NULL || ksg->ksg_load > high->ksg_load)
474 		    && ksg->ksg_transferable)
475 			high = ksg;
476 		if (low == NULL || ksg->ksg_load < low->ksg_load)
477 			low = ksg;
478 		if (++i > ksg_maxid)
479 			i = 0;
480 	}
481 	if (low != NULL && high != NULL && high != low)
482 		sched_balance_pair(LIST_FIRST(&high->ksg_members),
483 		    LIST_FIRST(&low->ksg_members));
484 out:
485 	bal_tick = ticks + (random() % (hz * 2));
486 }
487 
488 static void
489 sched_balance_groups(void)
490 {
491 	int i;
492 
493 	mtx_assert(&sched_lock, MA_OWNED);
494 	if (smp_started)
495 		for (i = 0; i <= ksg_maxid; i++)
496 			sched_balance_group(KSEQ_GROUP(i));
497 	gbal_tick = ticks + (random() % (hz * 2));
498 }
499 
500 static void
501 sched_balance_group(struct kseq_group *ksg)
502 {
503 	struct kseq *kseq;
504 	struct kseq *high;
505 	struct kseq *low;
506 	int load;
507 
508 	if (ksg->ksg_transferable == 0)
509 		return;
510 	low = NULL;
511 	high = NULL;
512 	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
513 		load = kseq->ksq_load;
514 		if (high == NULL || load > high->ksq_load)
515 			high = kseq;
516 		if (low == NULL || load < low->ksq_load)
517 			low = kseq;
518 	}
519 	if (high != NULL && low != NULL && high != low)
520 		sched_balance_pair(high, low);
521 }
522 
523 static void
524 sched_balance_pair(struct kseq *high, struct kseq *low)
525 {
526 	int transferable;
527 	int high_load;
528 	int low_load;
529 	int move;
530 	int diff;
531 	int i;
532 
533 	/*
534 	 * If we're transfering within a group we have to use this specific
535 	 * kseq's transferable count, otherwise we can steal from other members
536 	 * of the group.
537 	 */
538 	if (high->ksq_group == low->ksq_group) {
539 		transferable = high->ksq_transferable;
540 		high_load = high->ksq_load;
541 		low_load = low->ksq_load;
542 	} else {
543 		transferable = high->ksq_group->ksg_transferable;
544 		high_load = high->ksq_group->ksg_load;
545 		low_load = low->ksq_group->ksg_load;
546 	}
547 	if (transferable == 0)
548 		return;
549 	/*
550 	 * Determine what the imbalance is and then adjust that to how many
551 	 * kses we actually have to give up (transferable).
552 	 */
553 	diff = high_load - low_load;
554 	move = diff / 2;
555 	if (diff & 0x1)
556 		move++;
557 	move = min(move, transferable);
558 	for (i = 0; i < move; i++)
559 		kseq_move(high, KSEQ_ID(low));
560 	return;
561 }
562 
563 static void
564 kseq_move(struct kseq *from, int cpu)
565 {
566 	struct kseq *kseq;
567 	struct kseq *to;
568 	struct kse *ke;
569 
570 	kseq = from;
571 	to = KSEQ_CPU(cpu);
572 	ke = kseq_steal(kseq, 1);
573 	if (ke == NULL) {
574 		struct kseq_group *ksg;
575 
576 		ksg = kseq->ksq_group;
577 		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
578 			if (kseq == from || kseq->ksq_transferable == 0)
579 				continue;
580 			ke = kseq_steal(kseq, 1);
581 			break;
582 		}
583 		if (ke == NULL)
584 			panic("kseq_move: No KSEs available with a "
585 			    "transferable count of %d\n",
586 			    ksg->ksg_transferable);
587 	}
588 	if (kseq == to)
589 		return;
590 	ke->ke_state = KES_THREAD;
591 	kseq_runq_rem(kseq, ke);
592 	kseq_load_rem(kseq, ke);
593 	kseq_notify(ke, cpu);
594 }
595 
596 static int
597 kseq_idled(struct kseq *kseq)
598 {
599 	struct kseq_group *ksg;
600 	struct kseq *steal;
601 	struct kse *ke;
602 
603 	ksg = kseq->ksq_group;
604 	/*
605 	 * If we're in a cpu group, try and steal kses from another cpu in
606 	 * the group before idling.
607 	 */
608 	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
609 		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
610 			if (steal == kseq || steal->ksq_transferable == 0)
611 				continue;
612 			ke = kseq_steal(steal, 0);
613 			if (ke == NULL)
614 				continue;
615 			ke->ke_state = KES_THREAD;
616 			kseq_runq_rem(steal, ke);
617 			kseq_load_rem(steal, ke);
618 			ke->ke_cpu = PCPU_GET(cpuid);
619 			sched_add_internal(ke->ke_thread, 0);
620 			return (0);
621 		}
622 	}
623 	/*
624 	 * We only set the idled bit when all of the cpus in the group are
625 	 * idle.  Otherwise we could get into a situation where a KSE bounces
626 	 * back and forth between two idle cores on seperate physical CPUs.
627 	 */
628 	ksg->ksg_idlemask |= PCPU_GET(cpumask);
629 	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
630 		return (1);
631 	atomic_set_int(&kseq_idle, ksg->ksg_mask);
632 	return (1);
633 }
634 
635 static void
636 kseq_assign(struct kseq *kseq)
637 {
638 	struct kse *nke;
639 	struct kse *ke;
640 
641 	do {
642 		*(volatile struct kse **)&ke = kseq->ksq_assigned;
643 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
644 	for (; ke != NULL; ke = nke) {
645 		nke = ke->ke_assign;
646 		ke->ke_flags &= ~KEF_ASSIGNED;
647 		sched_add_internal(ke->ke_thread, 0);
648 	}
649 }
650 
651 static void
652 kseq_notify(struct kse *ke, int cpu)
653 {
654 	struct kseq *kseq;
655 	struct thread *td;
656 	struct pcpu *pcpu;
657 	int prio;
658 
659 	ke->ke_cpu = cpu;
660 	ke->ke_flags |= KEF_ASSIGNED;
661 	prio = ke->ke_thread->td_priority;
662 
663 	kseq = KSEQ_CPU(cpu);
664 
665 	/*
666 	 * Place a KSE on another cpu's queue and force a resched.
667 	 */
668 	do {
669 		*(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
670 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
671 	/*
672 	 * Without sched_lock we could lose a race where we set NEEDRESCHED
673 	 * on a thread that is switched out before the IPI is delivered.  This
674 	 * would lead us to miss the resched.  This will be a problem once
675 	 * sched_lock is pushed down.
676 	 */
677 	pcpu = pcpu_find(cpu);
678 	td = pcpu->pc_curthread;
679 	if (ke->ke_thread->td_priority < td->td_priority ||
680 	    td == pcpu->pc_idlethread) {
681 		td->td_flags |= TDF_NEEDRESCHED;
682 		ipi_selected(1 << cpu, IPI_AST);
683 	}
684 }
685 
686 static struct kse *
687 runq_steal(struct runq *rq)
688 {
689 	struct rqhead *rqh;
690 	struct rqbits *rqb;
691 	struct kse *ke;
692 	int word;
693 	int bit;
694 
695 	mtx_assert(&sched_lock, MA_OWNED);
696 	rqb = &rq->rq_status;
697 	for (word = 0; word < RQB_LEN; word++) {
698 		if (rqb->rqb_bits[word] == 0)
699 			continue;
700 		for (bit = 0; bit < RQB_BPW; bit++) {
701 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
702 				continue;
703 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
704 			TAILQ_FOREACH(ke, rqh, ke_procq) {
705 				if (KSE_CAN_MIGRATE(ke,
706 				    PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
707 					return (ke);
708 			}
709 		}
710 	}
711 	return (NULL);
712 }
713 
714 static struct kse *
715 kseq_steal(struct kseq *kseq, int stealidle)
716 {
717 	struct kse *ke;
718 
719 	/*
720 	 * Steal from next first to try to get a non-interactive task that
721 	 * may not have run for a while.
722 	 */
723 	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
724 		return (ke);
725 	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
726 		return (ke);
727 	if (stealidle)
728 		return (runq_steal(&kseq->ksq_idle));
729 	return (NULL);
730 }
731 
732 int
733 kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
734 {
735 	struct kseq_group *ksg;
736 	int cpu;
737 
738 	if (smp_started == 0)
739 		return (0);
740 	cpu = 0;
741 	/*
742 	 * If our load exceeds a certain threshold we should attempt to
743 	 * reassign this thread.  The first candidate is the cpu that
744 	 * originally ran the thread.  If it is idle, assign it there,
745 	 * otherwise, pick an idle cpu.
746 	 *
747 	 * The threshold at which we start to reassign kses has a large impact
748 	 * on the overall performance of the system.  Tuned too high and
749 	 * some CPUs may idle.  Too low and there will be excess migration
750 	 * and context switches.
751 	 */
752 	ksg = kseq->ksq_group;
753 	if (ksg->ksg_load > ksg->ksg_cpus && kseq_idle) {
754 		ksg = KSEQ_CPU(ke->ke_cpu)->ksq_group;
755 		if (kseq_idle & ksg->ksg_mask) {
756 			cpu = ffs(ksg->ksg_idlemask);
757 			if (cpu)
758 				goto migrate;
759 		}
760 		/*
761 		 * Multiple cpus could find this bit simultaneously
762 		 * but the race shouldn't be terrible.
763 		 */
764 		cpu = ffs(kseq_idle);
765 		if (cpu)
766 			goto migrate;
767 	}
768 	/*
769 	 * If another cpu in this group has idled, assign a thread over
770 	 * to them after checking to see if there are idled groups.
771 	 */
772 	ksg = kseq->ksq_group;
773 	if (ksg->ksg_idlemask) {
774 		cpu = ffs(ksg->ksg_idlemask);
775 		if (cpu)
776 			goto migrate;
777 	}
778 	/*
779 	 * No new CPU was found.
780 	 */
781 	return (0);
782 migrate:
783 	/*
784 	 * Now that we've found an idle CPU, migrate the thread.
785 	 */
786 	cpu--;
787 	ke->ke_runq = NULL;
788 	kseq_notify(ke, cpu);
789 
790 	return (1);
791 }
792 
793 #endif	/* SMP */
794 
795 /*
796  * Pick the highest priority task we have and return it.
797  */
798 
799 static struct kse *
800 kseq_choose(struct kseq *kseq)
801 {
802 	struct kse *ke;
803 	struct runq *swap;
804 
805 	mtx_assert(&sched_lock, MA_OWNED);
806 	swap = NULL;
807 
808 	for (;;) {
809 		ke = runq_choose(kseq->ksq_curr);
810 		if (ke == NULL) {
811 			/*
812 			 * We already swapped once and didn't get anywhere.
813 			 */
814 			if (swap)
815 				break;
816 			swap = kseq->ksq_curr;
817 			kseq->ksq_curr = kseq->ksq_next;
818 			kseq->ksq_next = swap;
819 			continue;
820 		}
821 		/*
822 		 * If we encounter a slice of 0 the kse is in a
823 		 * TIMESHARE kse group and its nice was too far out
824 		 * of the range that receives slices.
825 		 */
826 		if (ke->ke_slice == 0) {
827 			runq_remove(ke->ke_runq, ke);
828 			sched_slice(ke);
829 			ke->ke_runq = kseq->ksq_next;
830 			runq_add(ke->ke_runq, ke);
831 			continue;
832 		}
833 		return (ke);
834 	}
835 
836 	return (runq_choose(&kseq->ksq_idle));
837 }
838 
839 static void
840 kseq_setup(struct kseq *kseq)
841 {
842 	runq_init(&kseq->ksq_timeshare[0]);
843 	runq_init(&kseq->ksq_timeshare[1]);
844 	runq_init(&kseq->ksq_idle);
845 	kseq->ksq_curr = &kseq->ksq_timeshare[0];
846 	kseq->ksq_next = &kseq->ksq_timeshare[1];
847 	kseq->ksq_load = 0;
848 	kseq->ksq_load_timeshare = 0;
849 }
850 
851 static void
852 sched_setup(void *dummy)
853 {
854 #ifdef SMP
855 	int balance_groups;
856 	int i;
857 #endif
858 
859 	slice_min = (hz/100);	/* 10ms */
860 	slice_max = (hz/7);	/* ~140ms */
861 
862 #ifdef SMP
863 	balance_groups = 0;
864 	/*
865 	 * Initialize the kseqs.
866 	 */
867 	for (i = 0; i < MAXCPU; i++) {
868 		struct kseq *ksq;
869 
870 		ksq = &kseq_cpu[i];
871 		ksq->ksq_assigned = NULL;
872 		kseq_setup(&kseq_cpu[i]);
873 	}
874 	if (smp_topology == NULL) {
875 		struct kseq_group *ksg;
876 		struct kseq *ksq;
877 
878 		for (i = 0; i < MAXCPU; i++) {
879 			ksq = &kseq_cpu[i];
880 			ksg = &kseq_groups[i];
881 			/*
882 			 * Setup a kseq group with one member.
883 			 */
884 			ksq->ksq_transferable = 0;
885 			ksq->ksq_group = ksg;
886 			ksg->ksg_cpus = 1;
887 			ksg->ksg_idlemask = 0;
888 			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
889 			ksg->ksg_load = 0;
890 			ksg->ksg_transferable = 0;
891 			LIST_INIT(&ksg->ksg_members);
892 			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
893 		}
894 	} else {
895 		struct kseq_group *ksg;
896 		struct cpu_group *cg;
897 		int j;
898 
899 		for (i = 0; i < smp_topology->ct_count; i++) {
900 			cg = &smp_topology->ct_group[i];
901 			ksg = &kseq_groups[i];
902 			/*
903 			 * Initialize the group.
904 			 */
905 			ksg->ksg_idlemask = 0;
906 			ksg->ksg_load = 0;
907 			ksg->ksg_transferable = 0;
908 			ksg->ksg_cpus = cg->cg_count;
909 			ksg->ksg_cpumask = cg->cg_mask;
910 			LIST_INIT(&ksg->ksg_members);
911 			/*
912 			 * Find all of the group members and add them.
913 			 */
914 			for (j = 0; j < MAXCPU; j++) {
915 				if ((cg->cg_mask & (1 << j)) != 0) {
916 					if (ksg->ksg_mask == 0)
917 						ksg->ksg_mask = 1 << j;
918 					kseq_cpu[j].ksq_transferable = 0;
919 					kseq_cpu[j].ksq_group = ksg;
920 					LIST_INSERT_HEAD(&ksg->ksg_members,
921 					    &kseq_cpu[j], ksq_siblings);
922 				}
923 			}
924 			if (ksg->ksg_cpus > 1)
925 				balance_groups = 1;
926 		}
927 		ksg_maxid = smp_topology->ct_count - 1;
928 	}
929 	/*
930 	 * Stagger the group and global load balancer so they do not
931 	 * interfere with each other.
932 	 */
933 	bal_tick = ticks + hz;
934 	if (balance_groups)
935 		gbal_tick = ticks + (hz / 2);
936 #else
937 	kseq_setup(KSEQ_SELF());
938 #endif
939 	mtx_lock_spin(&sched_lock);
940 	kseq_load_add(KSEQ_SELF(), &kse0);
941 	mtx_unlock_spin(&sched_lock);
942 }
943 
944 /*
945  * Scale the scheduling priority according to the "interactivity" of this
946  * process.
947  */
948 static void
949 sched_priority(struct ksegrp *kg)
950 {
951 	int pri;
952 
953 	if (kg->kg_pri_class != PRI_TIMESHARE)
954 		return;
955 
956 	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
957 	pri += SCHED_PRI_BASE;
958 	pri += kg->kg_proc->p_nice;
959 
960 	if (pri > PRI_MAX_TIMESHARE)
961 		pri = PRI_MAX_TIMESHARE;
962 	else if (pri < PRI_MIN_TIMESHARE)
963 		pri = PRI_MIN_TIMESHARE;
964 
965 	kg->kg_user_pri = pri;
966 
967 	return;
968 }
969 
970 /*
971  * Calculate a time slice based on the properties of the kseg and the runq
972  * that we're on.  This is only for PRI_TIMESHARE ksegrps.
973  */
974 static void
975 sched_slice(struct kse *ke)
976 {
977 	struct kseq *kseq;
978 	struct ksegrp *kg;
979 
980 	kg = ke->ke_ksegrp;
981 	kseq = KSEQ_CPU(ke->ke_cpu);
982 
983 	/*
984 	 * Rationale:
985 	 * KSEs in interactive ksegs get a minimal slice so that we
986 	 * quickly notice if it abuses its advantage.
987 	 *
988 	 * KSEs in non-interactive ksegs are assigned a slice that is
989 	 * based on the ksegs nice value relative to the least nice kseg
990 	 * on the run queue for this cpu.
991 	 *
992 	 * If the KSE is less nice than all others it gets the maximum
993 	 * slice and other KSEs will adjust their slice relative to
994 	 * this when they first expire.
995 	 *
996 	 * There is 20 point window that starts relative to the least
997 	 * nice kse on the run queue.  Slice size is determined by
998 	 * the kse distance from the last nice ksegrp.
999 	 *
1000 	 * If the kse is outside of the window it will get no slice
1001 	 * and will be reevaluated each time it is selected on the
1002 	 * run queue.  The exception to this is nice 0 ksegs when
1003 	 * a nice -20 is running.  They are always granted a minimum
1004 	 * slice.
1005 	 */
1006 	if (!SCHED_INTERACTIVE(kg)) {
1007 		int nice;
1008 
1009 		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
1010 		if (kseq->ksq_load_timeshare == 0 ||
1011 		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
1012 			ke->ke_slice = SCHED_SLICE_MAX;
1013 		else if (nice <= SCHED_SLICE_NTHRESH)
1014 			ke->ke_slice = SCHED_SLICE_NICE(nice);
1015 		else if (kg->kg_proc->p_nice == 0)
1016 			ke->ke_slice = SCHED_SLICE_MIN;
1017 		else
1018 			ke->ke_slice = 0;
1019 	} else
1020 		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1021 
1022 	CTR6(KTR_ULE,
1023 	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
1024 	    ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin,
1025 	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
1026 
1027 	return;
1028 }
1029 
1030 /*
1031  * This routine enforces a maximum limit on the amount of scheduling history
1032  * kept.  It is called after either the slptime or runtime is adjusted.
1033  * This routine will not operate correctly when slp or run times have been
1034  * adjusted to more than double their maximum.
1035  */
1036 static void
1037 sched_interact_update(struct ksegrp *kg)
1038 {
1039 	int sum;
1040 
1041 	sum = kg->kg_runtime + kg->kg_slptime;
1042 	if (sum < SCHED_SLP_RUN_MAX)
1043 		return;
1044 	/*
1045 	 * If we have exceeded by more than 1/5th then the algorithm below
1046 	 * will not bring us back into range.  Dividing by two here forces
1047 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1048 	 */
1049 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1050 		kg->kg_runtime /= 2;
1051 		kg->kg_slptime /= 2;
1052 		return;
1053 	}
1054 	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1055 	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1056 }
1057 
1058 static void
1059 sched_interact_fork(struct ksegrp *kg)
1060 {
1061 	int ratio;
1062 	int sum;
1063 
1064 	sum = kg->kg_runtime + kg->kg_slptime;
1065 	if (sum > SCHED_SLP_RUN_FORK) {
1066 		ratio = sum / SCHED_SLP_RUN_FORK;
1067 		kg->kg_runtime /= ratio;
1068 		kg->kg_slptime /= ratio;
1069 	}
1070 }
1071 
1072 static int
1073 sched_interact_score(struct ksegrp *kg)
1074 {
1075 	int div;
1076 
1077 	if (kg->kg_runtime > kg->kg_slptime) {
1078 		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1079 		return (SCHED_INTERACT_HALF +
1080 		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1081 	} if (kg->kg_slptime > kg->kg_runtime) {
1082 		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1083 		return (kg->kg_runtime / div);
1084 	}
1085 
1086 	/*
1087 	 * This can happen if slptime and runtime are 0.
1088 	 */
1089 	return (0);
1090 
1091 }
1092 
1093 /*
1094  * This is only somewhat accurate since given many processes of the same
1095  * priority they will switch when their slices run out, which will be
1096  * at most SCHED_SLICE_MAX.
1097  */
1098 int
1099 sched_rr_interval(void)
1100 {
1101 	return (SCHED_SLICE_MAX);
1102 }
1103 
1104 static void
1105 sched_pctcpu_update(struct kse *ke)
1106 {
1107 	/*
1108 	 * Adjust counters and watermark for pctcpu calc.
1109 	 */
1110 	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1111 		/*
1112 		 * Shift the tick count out so that the divide doesn't
1113 		 * round away our results.
1114 		 */
1115 		ke->ke_ticks <<= 10;
1116 		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1117 			    SCHED_CPU_TICKS;
1118 		ke->ke_ticks >>= 10;
1119 	} else
1120 		ke->ke_ticks = 0;
1121 	ke->ke_ltick = ticks;
1122 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1123 }
1124 
1125 void
1126 sched_prio(struct thread *td, u_char prio)
1127 {
1128 	struct kse *ke;
1129 
1130 	ke = td->td_kse;
1131 	mtx_assert(&sched_lock, MA_OWNED);
1132 	if (TD_ON_RUNQ(td)) {
1133 		/*
1134 		 * If the priority has been elevated due to priority
1135 		 * propagation, we may have to move ourselves to a new
1136 		 * queue.  We still call adjustrunqueue below in case kse
1137 		 * needs to fix things up.
1138 		 */
1139 		if (prio < td->td_priority && ke &&
1140 		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1141 		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1142 			runq_remove(ke->ke_runq, ke);
1143 			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1144 			runq_add(ke->ke_runq, ke);
1145 		}
1146 		adjustrunqueue(td, prio);
1147 	} else
1148 		td->td_priority = prio;
1149 }
1150 
1151 void
1152 sched_switch(struct thread *td, struct thread *newtd)
1153 {
1154 	struct kse *ke;
1155 
1156 	mtx_assert(&sched_lock, MA_OWNED);
1157 
1158 	ke = td->td_kse;
1159 
1160 	td->td_last_kse = ke;
1161         td->td_lastcpu = td->td_oncpu;
1162 	td->td_oncpu = NOCPU;
1163 	td->td_flags &= ~TDF_NEEDRESCHED;
1164 	td->td_pflags &= ~TDP_OWEPREEMPT;
1165 
1166 	/*
1167 	 * If the KSE has been assigned it may be in the process of switching
1168 	 * to the new cpu.  This is the case in sched_bind().
1169 	 */
1170 	if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1171 		if (td == PCPU_GET(idlethread)) {
1172 			TD_SET_CAN_RUN(td);
1173 		} else if (TD_IS_RUNNING(td)) {
1174 			kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1175 			setrunqueue(td);
1176 		} else {
1177 			if (ke->ke_runq) {
1178 				kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1179 			} else if ((td->td_flags & TDF_IDLETD) == 0)
1180 				kdb_backtrace();
1181 			/*
1182 			 * We will not be on the run queue. So we must be
1183 			 * sleeping or similar.
1184 			 */
1185 			if (td->td_proc->p_flag & P_SA)
1186 				kse_reassign(ke);
1187 		}
1188 	}
1189 	if (newtd != NULL) {
1190 		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1191 		ke->ke_cpu = PCPU_GET(cpuid);
1192 		ke->ke_runq = KSEQ_SELF()->ksq_curr;
1193 	} else
1194 		newtd = choosethread();
1195 	if (td != newtd)
1196 		cpu_switch(td, newtd);
1197 	sched_lock.mtx_lock = (uintptr_t)td;
1198 
1199 	td->td_oncpu = PCPU_GET(cpuid);
1200 }
1201 
1202 void
1203 sched_nice(struct proc *p, int nice)
1204 {
1205 	struct ksegrp *kg;
1206 	struct kse *ke;
1207 	struct thread *td;
1208 	struct kseq *kseq;
1209 
1210 	PROC_LOCK_ASSERT(p, MA_OWNED);
1211 	mtx_assert(&sched_lock, MA_OWNED);
1212 	/*
1213 	 * We need to adjust the nice counts for running KSEs.
1214 	 */
1215 	FOREACH_KSEGRP_IN_PROC(p, kg) {
1216 		if (kg->kg_pri_class == PRI_TIMESHARE) {
1217 			FOREACH_KSE_IN_GROUP(kg, ke) {
1218 				if (ke->ke_runq == NULL)
1219 					continue;
1220 				kseq = KSEQ_CPU(ke->ke_cpu);
1221 				kseq_nice_rem(kseq, p->p_nice);
1222 				kseq_nice_add(kseq, nice);
1223 			}
1224 		}
1225 	}
1226 	p->p_nice = nice;
1227 	FOREACH_KSEGRP_IN_PROC(p, kg) {
1228 		sched_priority(kg);
1229 		FOREACH_THREAD_IN_GROUP(kg, td)
1230 			td->td_flags |= TDF_NEEDRESCHED;
1231 	}
1232 }
1233 
1234 void
1235 sched_sleep(struct thread *td)
1236 {
1237 	mtx_assert(&sched_lock, MA_OWNED);
1238 
1239 	td->td_slptime = ticks;
1240 	td->td_base_pri = td->td_priority;
1241 
1242 	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1243 	    td->td_kse, td->td_slptime);
1244 }
1245 
1246 void
1247 sched_wakeup(struct thread *td)
1248 {
1249 	mtx_assert(&sched_lock, MA_OWNED);
1250 
1251 	/*
1252 	 * Let the kseg know how long we slept for.  This is because process
1253 	 * interactivity behavior is modeled in the kseg.
1254 	 */
1255 	if (td->td_slptime) {
1256 		struct ksegrp *kg;
1257 		int hzticks;
1258 
1259 		kg = td->td_ksegrp;
1260 		hzticks = (ticks - td->td_slptime) << 10;
1261 		if (hzticks >= SCHED_SLP_RUN_MAX) {
1262 			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1263 			kg->kg_runtime = 1;
1264 		} else {
1265 			kg->kg_slptime += hzticks;
1266 			sched_interact_update(kg);
1267 		}
1268 		sched_priority(kg);
1269 		if (td->td_kse)
1270 			sched_slice(td->td_kse);
1271 		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1272 		    td->td_kse, hzticks);
1273 		td->td_slptime = 0;
1274 	}
1275 	setrunqueue(td);
1276 }
1277 
1278 /*
1279  * Penalize the parent for creating a new child and initialize the child's
1280  * priority.
1281  */
1282 void
1283 sched_fork(struct thread *td, struct proc *p1)
1284 {
1285 
1286 	mtx_assert(&sched_lock, MA_OWNED);
1287 
1288 	p1->p_nice = td->td_proc->p_nice;
1289 	sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1));
1290 	sched_fork_kse(td, FIRST_KSE_IN_PROC(p1));
1291 	sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1));
1292 }
1293 
1294 void
1295 sched_fork_kse(struct thread *td, struct kse *child)
1296 {
1297 
1298 	struct kse *ke = td->td_kse;
1299 
1300 	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1301 	child->ke_cpu = ke->ke_cpu;
1302 	child->ke_runq = NULL;
1303 
1304 	/* Grab our parents cpu estimation information. */
1305 	child->ke_ticks = ke->ke_ticks;
1306 	child->ke_ltick = ke->ke_ltick;
1307 	child->ke_ftick = ke->ke_ftick;
1308 }
1309 
1310 void
1311 sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1312 {
1313 	struct ksegrp *kg = td->td_ksegrp;
1314 	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1315 
1316 	child->kg_slptime = kg->kg_slptime;
1317 	child->kg_runtime = kg->kg_runtime;
1318 	child->kg_user_pri = kg->kg_user_pri;
1319 	sched_interact_fork(child);
1320 	kg->kg_runtime += tickincr << 10;
1321 	sched_interact_update(kg);
1322 
1323 	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1324 	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1325 	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1326 }
1327 
1328 void
1329 sched_fork_thread(struct thread *td, struct thread *child)
1330 {
1331 }
1332 
1333 void
1334 sched_class(struct ksegrp *kg, int class)
1335 {
1336 	struct kseq *kseq;
1337 	struct kse *ke;
1338 	int nclass;
1339 	int oclass;
1340 
1341 	mtx_assert(&sched_lock, MA_OWNED);
1342 	if (kg->kg_pri_class == class)
1343 		return;
1344 
1345 	nclass = PRI_BASE(class);
1346 	oclass = PRI_BASE(kg->kg_pri_class);
1347 	FOREACH_KSE_IN_GROUP(kg, ke) {
1348 		if (ke->ke_state != KES_ONRUNQ &&
1349 		    ke->ke_state != KES_THREAD)
1350 			continue;
1351 		kseq = KSEQ_CPU(ke->ke_cpu);
1352 
1353 #ifdef SMP
1354 		/*
1355 		 * On SMP if we're on the RUNQ we must adjust the transferable
1356 		 * count because could be changing to or from an interrupt
1357 		 * class.
1358 		 */
1359 		if (ke->ke_state == KES_ONRUNQ) {
1360 			if (KSE_CAN_MIGRATE(ke, oclass)) {
1361 				kseq->ksq_transferable--;
1362 				kseq->ksq_group->ksg_transferable--;
1363 			}
1364 			if (KSE_CAN_MIGRATE(ke, nclass)) {
1365 				kseq->ksq_transferable++;
1366 				kseq->ksq_group->ksg_transferable++;
1367 			}
1368 		}
1369 #endif
1370 		if (oclass == PRI_TIMESHARE) {
1371 			kseq->ksq_load_timeshare--;
1372 			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1373 		}
1374 		if (nclass == PRI_TIMESHARE) {
1375 			kseq->ksq_load_timeshare++;
1376 			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1377 		}
1378 	}
1379 
1380 	kg->kg_pri_class = class;
1381 }
1382 
1383 /*
1384  * Return some of the child's priority and interactivity to the parent.
1385  */
1386 void
1387 sched_exit(struct proc *p, struct thread *td)
1388 {
1389 	mtx_assert(&sched_lock, MA_OWNED);
1390 	sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
1391 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
1392 }
1393 
1394 void
1395 sched_exit_kse(struct kse *ke, struct thread *td)
1396 {
1397 	kseq_load_rem(KSEQ_CPU(td->td_kse->ke_cpu), td->td_kse);
1398 }
1399 
1400 void
1401 sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
1402 {
1403 	/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
1404 	kg->kg_runtime += td->td_ksegrp->kg_runtime;
1405 	sched_interact_update(kg);
1406 }
1407 
1408 void
1409 sched_exit_thread(struct thread *td, struct thread *child)
1410 {
1411 }
1412 
1413 void
1414 sched_clock(struct thread *td)
1415 {
1416 	struct kseq *kseq;
1417 	struct ksegrp *kg;
1418 	struct kse *ke;
1419 
1420 	mtx_assert(&sched_lock, MA_OWNED);
1421 	kseq = KSEQ_SELF();
1422 #ifdef SMP
1423 	if (ticks == bal_tick)
1424 		sched_balance();
1425 	if (ticks == gbal_tick)
1426 		sched_balance_groups();
1427 	/*
1428 	 * We could have been assigned a non real-time thread without an
1429 	 * IPI.
1430 	 */
1431 	if (kseq->ksq_assigned)
1432 		kseq_assign(kseq);	/* Potentially sets NEEDRESCHED */
1433 #endif
1434 	/*
1435 	 * sched_setup() apparently happens prior to stathz being set.  We
1436 	 * need to resolve the timers earlier in the boot so we can avoid
1437 	 * calculating this here.
1438 	 */
1439 	if (realstathz == 0) {
1440 		realstathz = stathz ? stathz : hz;
1441 		tickincr = hz / realstathz;
1442 		/*
1443 		 * XXX This does not work for values of stathz that are much
1444 		 * larger than hz.
1445 		 */
1446 		if (tickincr == 0)
1447 			tickincr = 1;
1448 	}
1449 
1450 	ke = td->td_kse;
1451 	kg = ke->ke_ksegrp;
1452 
1453 	/* Adjust ticks for pctcpu */
1454 	ke->ke_ticks++;
1455 	ke->ke_ltick = ticks;
1456 
1457 	/* Go up to one second beyond our max and then trim back down */
1458 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1459 		sched_pctcpu_update(ke);
1460 
1461 	if (td->td_flags & TDF_IDLETD)
1462 		return;
1463 
1464 	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1465 	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1466 	/*
1467 	 * We only do slicing code for TIMESHARE ksegrps.
1468 	 */
1469 	if (kg->kg_pri_class != PRI_TIMESHARE)
1470 		return;
1471 	/*
1472 	 * We used a tick charge it to the ksegrp so that we can compute our
1473 	 * interactivity.
1474 	 */
1475 	kg->kg_runtime += tickincr << 10;
1476 	sched_interact_update(kg);
1477 
1478 	/*
1479 	 * We used up one time slice.
1480 	 */
1481 	if (--ke->ke_slice > 0)
1482 		return;
1483 	/*
1484 	 * We're out of time, recompute priorities and requeue.
1485 	 */
1486 	kseq_load_rem(kseq, ke);
1487 	sched_priority(kg);
1488 	sched_slice(ke);
1489 	if (SCHED_CURR(kg, ke))
1490 		ke->ke_runq = kseq->ksq_curr;
1491 	else
1492 		ke->ke_runq = kseq->ksq_next;
1493 	kseq_load_add(kseq, ke);
1494 	td->td_flags |= TDF_NEEDRESCHED;
1495 }
1496 
1497 int
1498 sched_runnable(void)
1499 {
1500 	struct kseq *kseq;
1501 	int load;
1502 
1503 	load = 1;
1504 
1505 	kseq = KSEQ_SELF();
1506 #ifdef SMP
1507 	if (kseq->ksq_assigned) {
1508 		mtx_lock_spin(&sched_lock);
1509 		kseq_assign(kseq);
1510 		mtx_unlock_spin(&sched_lock);
1511 	}
1512 #endif
1513 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1514 		if (kseq->ksq_load > 0)
1515 			goto out;
1516 	} else
1517 		if (kseq->ksq_load - 1 > 0)
1518 			goto out;
1519 	load = 0;
1520 out:
1521 	return (load);
1522 }
1523 
1524 void
1525 sched_userret(struct thread *td)
1526 {
1527 	struct ksegrp *kg;
1528 
1529 	kg = td->td_ksegrp;
1530 
1531 	if (td->td_priority != kg->kg_user_pri) {
1532 		mtx_lock_spin(&sched_lock);
1533 		td->td_priority = kg->kg_user_pri;
1534 		mtx_unlock_spin(&sched_lock);
1535 	}
1536 }
1537 
1538 struct kse *
1539 sched_choose(void)
1540 {
1541 	struct kseq *kseq;
1542 	struct kse *ke;
1543 
1544 	mtx_assert(&sched_lock, MA_OWNED);
1545 	kseq = KSEQ_SELF();
1546 #ifdef SMP
1547 restart:
1548 	if (kseq->ksq_assigned)
1549 		kseq_assign(kseq);
1550 #endif
1551 	ke = kseq_choose(kseq);
1552 	if (ke) {
1553 #ifdef SMP
1554 		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1555 			if (kseq_idled(kseq) == 0)
1556 				goto restart;
1557 #endif
1558 		kseq_runq_rem(kseq, ke);
1559 		ke->ke_state = KES_THREAD;
1560 
1561 		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1562 			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1563 			    ke, ke->ke_runq, ke->ke_slice,
1564 			    ke->ke_thread->td_priority);
1565 		}
1566 		return (ke);
1567 	}
1568 #ifdef SMP
1569 	if (kseq_idled(kseq) == 0)
1570 		goto restart;
1571 #endif
1572 	return (NULL);
1573 }
1574 
1575 void
1576 sched_add(struct thread *td)
1577 {
1578 
1579 	sched_add_internal(td, 1);
1580 }
1581 
1582 static void
1583 sched_add_internal(struct thread *td, int preemptive)
1584 {
1585 	struct kseq *kseq;
1586 	struct ksegrp *kg;
1587 	struct kse *ke;
1588 #ifdef SMP
1589 	int canmigrate;
1590 #endif
1591 	int class;
1592 
1593 	mtx_assert(&sched_lock, MA_OWNED);
1594 	ke = td->td_kse;
1595 	kg = td->td_ksegrp;
1596 	if (ke->ke_flags & KEF_ASSIGNED)
1597 		return;
1598 	kseq = KSEQ_SELF();
1599 	KASSERT((ke->ke_thread != NULL),
1600 	    ("sched_add: No thread on KSE"));
1601 	KASSERT((ke->ke_thread->td_kse != NULL),
1602 	    ("sched_add: No KSE on thread"));
1603 	KASSERT(ke->ke_state != KES_ONRUNQ,
1604 	    ("sched_add: kse %p (%s) already in run queue", ke,
1605 	    ke->ke_proc->p_comm));
1606 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1607 	    ("sched_add: process swapped out"));
1608 	KASSERT(ke->ke_runq == NULL,
1609 	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1610 
1611 	class = PRI_BASE(kg->kg_pri_class);
1612 	switch (class) {
1613 	case PRI_ITHD:
1614 	case PRI_REALTIME:
1615 		ke->ke_runq = kseq->ksq_curr;
1616 		ke->ke_slice = SCHED_SLICE_MAX;
1617 		ke->ke_cpu = PCPU_GET(cpuid);
1618 		break;
1619 	case PRI_TIMESHARE:
1620 		if (SCHED_CURR(kg, ke))
1621 			ke->ke_runq = kseq->ksq_curr;
1622 		else
1623 			ke->ke_runq = kseq->ksq_next;
1624 		break;
1625 	case PRI_IDLE:
1626 		/*
1627 		 * This is for priority prop.
1628 		 */
1629 		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1630 			ke->ke_runq = kseq->ksq_curr;
1631 		else
1632 			ke->ke_runq = &kseq->ksq_idle;
1633 		ke->ke_slice = SCHED_SLICE_MIN;
1634 		break;
1635 	default:
1636 		panic("Unknown pri class.");
1637 		break;
1638 	}
1639 #ifdef SMP
1640 	/*
1641 	 * Don't migrate running threads here.  Force the long term balancer
1642 	 * to do it.
1643 	 */
1644 	canmigrate = KSE_CAN_MIGRATE(ke, class);
1645 	if (TD_IS_RUNNING(td))
1646 		canmigrate = 0;
1647 
1648 	/*
1649 	 * If this thread is pinned or bound, notify the target cpu.
1650 	 */
1651 	if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) {
1652 		ke->ke_runq = NULL;
1653 		kseq_notify(ke, ke->ke_cpu);
1654 		return;
1655 	}
1656 	/*
1657 	 * If we had been idle, clear our bit in the group and potentially
1658 	 * the global bitmap.  If not, see if we should transfer this thread.
1659 	 */
1660 	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1661 	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1662 		/*
1663 		 * Check to see if our group is unidling, and if so, remove it
1664 		 * from the global idle mask.
1665 		 */
1666 		if (kseq->ksq_group->ksg_idlemask ==
1667 		    kseq->ksq_group->ksg_cpumask)
1668 			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1669 		/*
1670 		 * Now remove ourselves from the group specific idle mask.
1671 		 */
1672 		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1673 	} else if (kseq->ksq_load > 1 && canmigrate)
1674 		if (kseq_transfer(kseq, ke, class))
1675 			return;
1676 	ke->ke_cpu = PCPU_GET(cpuid);
1677 #endif
1678 	/*
1679 	 * XXX With preemption this is not necessary.
1680 	 */
1681         if (td->td_priority < curthread->td_priority)
1682                 curthread->td_flags |= TDF_NEEDRESCHED;
1683 	if (preemptive && maybe_preempt(td))
1684 		return;
1685 	ke->ke_ksegrp->kg_runq_kses++;
1686 	ke->ke_state = KES_ONRUNQ;
1687 
1688 	kseq_runq_add(kseq, ke);
1689 	kseq_load_add(kseq, ke);
1690 }
1691 
1692 void
1693 sched_rem(struct thread *td)
1694 {
1695 	struct kseq *kseq;
1696 	struct kse *ke;
1697 
1698 	ke = td->td_kse;
1699 	/*
1700 	 * It is safe to just return here because sched_rem() is only ever
1701 	 * used in places where we're immediately going to add the
1702 	 * kse back on again.  In that case it'll be added with the correct
1703 	 * thread and priority when the caller drops the sched_lock.
1704 	 */
1705 	if (ke->ke_flags & KEF_ASSIGNED)
1706 		return;
1707 	mtx_assert(&sched_lock, MA_OWNED);
1708 	KASSERT((ke->ke_state == KES_ONRUNQ),
1709 	    ("sched_rem: KSE not on run queue"));
1710 
1711 	ke->ke_state = KES_THREAD;
1712 	ke->ke_ksegrp->kg_runq_kses--;
1713 	kseq = KSEQ_CPU(ke->ke_cpu);
1714 	kseq_runq_rem(kseq, ke);
1715 	kseq_load_rem(kseq, ke);
1716 }
1717 
1718 fixpt_t
1719 sched_pctcpu(struct thread *td)
1720 {
1721 	fixpt_t pctcpu;
1722 	struct kse *ke;
1723 
1724 	pctcpu = 0;
1725 	ke = td->td_kse;
1726 	if (ke == NULL)
1727 		return (0);
1728 
1729 	mtx_lock_spin(&sched_lock);
1730 	if (ke->ke_ticks) {
1731 		int rtick;
1732 
1733 		/*
1734 		 * Don't update more frequently than twice a second.  Allowing
1735 		 * this causes the cpu usage to decay away too quickly due to
1736 		 * rounding errors.
1737 		 */
1738 		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1739 		    ke->ke_ltick < (ticks - (hz / 2)))
1740 			sched_pctcpu_update(ke);
1741 		/* How many rtick per second ? */
1742 		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1743 		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1744 	}
1745 
1746 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1747 	mtx_unlock_spin(&sched_lock);
1748 
1749 	return (pctcpu);
1750 }
1751 
1752 void
1753 sched_bind(struct thread *td, int cpu)
1754 {
1755 	struct kse *ke;
1756 
1757 	mtx_assert(&sched_lock, MA_OWNED);
1758 	ke = td->td_kse;
1759 	ke->ke_flags |= KEF_BOUND;
1760 #ifdef SMP
1761 	if (PCPU_GET(cpuid) == cpu)
1762 		return;
1763 	/* sched_rem without the runq_remove */
1764 	ke->ke_state = KES_THREAD;
1765 	ke->ke_ksegrp->kg_runq_kses--;
1766 	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1767 	kseq_notify(ke, cpu);
1768 	/* When we return from mi_switch we'll be on the correct cpu. */
1769 	mi_switch(SW_VOL, NULL);
1770 #endif
1771 }
1772 
1773 void
1774 sched_unbind(struct thread *td)
1775 {
1776 	mtx_assert(&sched_lock, MA_OWNED);
1777 	td->td_kse->ke_flags &= ~KEF_BOUND;
1778 }
1779 
1780 int
1781 sched_load(void)
1782 {
1783 #ifdef SMP
1784 	int total;
1785 	int i;
1786 
1787 	total = 0;
1788 	for (i = 0; i <= ksg_maxid; i++)
1789 		total += KSEQ_GROUP(i)->ksg_load;
1790 	return (total);
1791 #else
1792 	return (KSEQ_SELF()->ksq_sysload);
1793 #endif
1794 }
1795 
1796 int
1797 sched_sizeof_kse(void)
1798 {
1799 	return (sizeof(struct kse) + sizeof(struct ke_sched));
1800 }
1801 
1802 int
1803 sched_sizeof_ksegrp(void)
1804 {
1805 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1806 }
1807 
1808 int
1809 sched_sizeof_proc(void)
1810 {
1811 	return (sizeof(struct proc));
1812 }
1813 
1814 int
1815 sched_sizeof_thread(void)
1816 {
1817 	return (sizeof(struct thread) + sizeof(struct td_sched));
1818 }
1819