xref: /freebsd/sys/kern/sched_ule.c (revision 2357939bc239bd5334a169b62313806178dd8f30)
1 /*-
2  * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/resource.h>
38 #include <sys/resourcevar.h>
39 #include <sys/sched.h>
40 #include <sys/smp.h>
41 #include <sys/sx.h>
42 #include <sys/sysctl.h>
43 #include <sys/sysproto.h>
44 #include <sys/vmmeter.h>
45 #ifdef DDB
46 #include <ddb/ddb.h>
47 #endif
48 #ifdef KTRACE
49 #include <sys/uio.h>
50 #include <sys/ktrace.h>
51 #endif
52 
53 #include <machine/cpu.h>
54 #include <machine/smp.h>
55 
56 #define KTR_ULE         KTR_NFS
57 
58 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
59 /* XXX This is bogus compatability crap for ps */
60 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
61 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
62 
63 static void sched_setup(void *dummy);
64 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
65 
66 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
67 
68 static int slice_min = 1;
69 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
70 
71 static int slice_max = 10;
72 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
73 
74 int realstathz;
75 int tickincr = 1;
76 
77 #ifdef SMP
78 /* Callouts to handle load balancing SMP systems. */
79 static struct callout kseq_lb_callout;
80 static struct callout kseq_group_callout;
81 #endif
82 
83 /*
84  * These datastructures are allocated within their parent datastructure but
85  * are scheduler specific.
86  */
87 
88 struct ke_sched {
89 	int		ske_slice;
90 	struct runq	*ske_runq;
91 	/* The following variables are only used for pctcpu calculation */
92 	int		ske_ltick;	/* Last tick that we were running on */
93 	int		ske_ftick;	/* First tick that we were running on */
94 	int		ske_ticks;	/* Tick count */
95 	/* CPU that we have affinity for. */
96 	u_char		ske_cpu;
97 };
98 #define	ke_slice	ke_sched->ske_slice
99 #define	ke_runq		ke_sched->ske_runq
100 #define	ke_ltick	ke_sched->ske_ltick
101 #define	ke_ftick	ke_sched->ske_ftick
102 #define	ke_ticks	ke_sched->ske_ticks
103 #define	ke_cpu		ke_sched->ske_cpu
104 #define	ke_assign	ke_procq.tqe_next
105 
106 #define	KEF_ASSIGNED	KEF_SCHED0	/* KSE is being migrated. */
107 #define	KEF_BOUND	KEF_SCHED1	/* KSE can not migrate. */
108 
109 struct kg_sched {
110 	int	skg_slptime;		/* Number of ticks we vol. slept */
111 	int	skg_runtime;		/* Number of ticks we were running */
112 };
113 #define	kg_slptime	kg_sched->skg_slptime
114 #define	kg_runtime	kg_sched->skg_runtime
115 
116 struct td_sched {
117 	int	std_slptime;
118 };
119 #define	td_slptime	td_sched->std_slptime
120 
121 struct td_sched td_sched;
122 struct ke_sched ke_sched;
123 struct kg_sched kg_sched;
124 
125 struct ke_sched *kse0_sched = &ke_sched;
126 struct kg_sched *ksegrp0_sched = &kg_sched;
127 struct p_sched *proc0_sched = NULL;
128 struct td_sched *thread0_sched = &td_sched;
129 
130 /*
131  * The priority is primarily determined by the interactivity score.  Thus, we
132  * give lower(better) priorities to kse groups that use less CPU.  The nice
133  * value is then directly added to this to allow nice to have some effect
134  * on latency.
135  *
136  * PRI_RANGE:	Total priority range for timeshare threads.
137  * PRI_NRESV:	Number of nice values.
138  * PRI_BASE:	The start of the dynamic range.
139  */
140 #define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
141 #define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
142 #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
143 #define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
144 #define	SCHED_PRI_INTERACT(score)					\
145     ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
146 
147 /*
148  * These determine the interactivity of a process.
149  *
150  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
151  *		before throttling back.
152  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
153  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
154  * INTERACT_THRESH:	Threshhold for placement on the current runq.
155  */
156 #define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
157 #define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
158 #define	SCHED_INTERACT_MAX	(100)
159 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
160 #define	SCHED_INTERACT_THRESH	(30)
161 
162 /*
163  * These parameters and macros determine the size of the time slice that is
164  * granted to each thread.
165  *
166  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
167  * SLICE_MAX:	Maximum time slice granted.
168  * SLICE_RANGE:	Range of available time slices scaled by hz.
169  * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
170  * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
171  * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
172  */
173 #define	SCHED_SLICE_MIN			(slice_min)
174 #define	SCHED_SLICE_MAX			(slice_max)
175 #define	SCHED_SLICE_INTERACTIVE		(slice_max)
176 #define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
177 #define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
178 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
179 #define	SCHED_SLICE_NICE(nice)						\
180     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
181 
182 /*
183  * This macro determines whether or not the kse belongs on the current or
184  * next run queue.
185  */
186 #define	SCHED_INTERACTIVE(kg)						\
187     (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
188 #define	SCHED_CURR(kg, ke)						\
189     (ke->ke_thread->td_priority < kg->kg_user_pri ||			\
190     SCHED_INTERACTIVE(kg))
191 
192 /*
193  * Cpu percentage computation macros and defines.
194  *
195  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
196  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
197  */
198 
199 #define	SCHED_CPU_TIME	10
200 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
201 
202 /*
203  * kseq - per processor runqs and statistics.
204  */
205 struct kseq {
206 	struct runq	ksq_idle;		/* Queue of IDLE threads. */
207 	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
208 	struct runq	*ksq_next;		/* Next timeshare queue. */
209 	struct runq	*ksq_curr;		/* Current queue. */
210 	int		ksq_load_timeshare;	/* Load for timeshare. */
211 	int		ksq_load;		/* Aggregate load. */
212 	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
213 	short		ksq_nicemin;		/* Least nice. */
214 #ifdef SMP
215 	int			ksq_transferable;
216 	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
217 	struct kseq_group	*ksq_group;	/* Our processor group. */
218 	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
219 #else
220 	int		ksq_sysload;		/* For loadavg, !ITHD load. */
221 #endif
222 };
223 
224 #ifdef SMP
225 /*
226  * kseq groups are groups of processors which can cheaply share threads.  When
227  * one processor in the group goes idle it will check the runqs of the other
228  * processors in its group prior to halting and waiting for an interrupt.
229  * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
230  * In a numa environment we'd want an idle bitmap per group and a two tiered
231  * load balancer.
232  */
233 struct kseq_group {
234 	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
235 	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
236 	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
237 	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
238 	int	ksg_load;		/* Total load of this group. */
239 	int	ksg_transferable;	/* Transferable load of this group. */
240 	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
241 };
242 #endif
243 
244 /*
245  * One kse queue per processor.
246  */
247 #ifdef SMP
248 static cpumask_t kseq_idle;
249 static int ksg_maxid;
250 static struct kseq	kseq_cpu[MAXCPU];
251 static struct kseq_group kseq_groups[MAXCPU];
252 #define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
253 #define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
254 #define	KSEQ_ID(x)	((x) - kseq_cpu)
255 #define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
256 #else	/* !SMP */
257 static struct kseq	kseq_cpu;
258 #define	KSEQ_SELF()	(&kseq_cpu)
259 #define	KSEQ_CPU(x)	(&kseq_cpu)
260 #endif
261 
262 static void sched_slice(struct kse *ke);
263 static void sched_priority(struct ksegrp *kg);
264 static int sched_interact_score(struct ksegrp *kg);
265 static void sched_interact_update(struct ksegrp *kg);
266 static void sched_interact_fork(struct ksegrp *kg);
267 static void sched_pctcpu_update(struct kse *ke);
268 
269 /* Operations on per processor queues */
270 static struct kse * kseq_choose(struct kseq *kseq);
271 static void kseq_setup(struct kseq *kseq);
272 static void kseq_load_add(struct kseq *kseq, struct kse *ke);
273 static void kseq_load_rem(struct kseq *kseq, struct kse *ke);
274 static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke);
275 static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke);
276 static void kseq_nice_add(struct kseq *kseq, int nice);
277 static void kseq_nice_rem(struct kseq *kseq, int nice);
278 void kseq_print(int cpu);
279 #ifdef SMP
280 static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class);
281 static struct kse *runq_steal(struct runq *rq);
282 static void sched_balance(void *arg);
283 static void sched_balance_group(struct kseq_group *ksg);
284 static void sched_balance_pair(struct kseq *high, struct kseq *low);
285 static void kseq_move(struct kseq *from, int cpu);
286 static int kseq_idled(struct kseq *kseq);
287 static void kseq_notify(struct kse *ke, int cpu);
288 static void kseq_assign(struct kseq *);
289 static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
290 /*
291  * On P4 Xeons the round-robin interrupt delivery is broken.  As a result of
292  * this, we can't pin interrupts to the cpu that they were delivered to,
293  * otherwise all ithreads only run on CPU 0.
294  */
295 #ifdef __i386__
296 #define	KSE_CAN_MIGRATE(ke, class)					\
297     ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
298 #else /* !__i386__ */
299 #define	KSE_CAN_MIGRATE(ke, class)					\
300     ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 &&		\
301     ((ke)->ke_flags & KEF_BOUND) == 0)
302 #endif /* !__i386__ */
303 #endif
304 
305 void
306 kseq_print(int cpu)
307 {
308 	struct kseq *kseq;
309 	int i;
310 
311 	kseq = KSEQ_CPU(cpu);
312 
313 	printf("kseq:\n");
314 	printf("\tload:           %d\n", kseq->ksq_load);
315 	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
316 #ifdef SMP
317 	printf("\tload transferable: %d\n", kseq->ksq_transferable);
318 #endif
319 	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
320 	printf("\tnice counts:\n");
321 	for (i = 0; i < SCHED_PRI_NRESV; i++)
322 		if (kseq->ksq_nice[i])
323 			printf("\t\t%d = %d\n",
324 			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
325 }
326 
327 static __inline void
328 kseq_runq_add(struct kseq *kseq, struct kse *ke)
329 {
330 #ifdef SMP
331 	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
332 		kseq->ksq_transferable++;
333 		kseq->ksq_group->ksg_transferable++;
334 	}
335 #endif
336 	runq_add(ke->ke_runq, ke);
337 }
338 
339 static __inline void
340 kseq_runq_rem(struct kseq *kseq, struct kse *ke)
341 {
342 #ifdef SMP
343 	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
344 		kseq->ksq_transferable--;
345 		kseq->ksq_group->ksg_transferable--;
346 	}
347 #endif
348 	runq_remove(ke->ke_runq, ke);
349 }
350 
351 static void
352 kseq_load_add(struct kseq *kseq, struct kse *ke)
353 {
354 	int class;
355 	mtx_assert(&sched_lock, MA_OWNED);
356 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
357 	if (class == PRI_TIMESHARE)
358 		kseq->ksq_load_timeshare++;
359 	kseq->ksq_load++;
360 	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
361 #ifdef SMP
362 		kseq->ksq_group->ksg_load++;
363 #else
364 		kseq->ksq_sysload++;
365 #endif
366 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
367 		CTR6(KTR_ULE,
368 		    "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
369 		    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
370 		    ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
371 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
372 		kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
373 }
374 
375 static void
376 kseq_load_rem(struct kseq *kseq, struct kse *ke)
377 {
378 	int class;
379 	mtx_assert(&sched_lock, MA_OWNED);
380 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
381 	if (class == PRI_TIMESHARE)
382 		kseq->ksq_load_timeshare--;
383 	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
384 #ifdef SMP
385 		kseq->ksq_group->ksg_load--;
386 #else
387 		kseq->ksq_sysload--;
388 #endif
389 	kseq->ksq_load--;
390 	ke->ke_runq = NULL;
391 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
392 		kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
393 }
394 
395 static void
396 kseq_nice_add(struct kseq *kseq, int nice)
397 {
398 	mtx_assert(&sched_lock, MA_OWNED);
399 	/* Normalize to zero. */
400 	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
401 	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
402 		kseq->ksq_nicemin = nice;
403 }
404 
405 static void
406 kseq_nice_rem(struct kseq *kseq, int nice)
407 {
408 	int n;
409 
410 	mtx_assert(&sched_lock, MA_OWNED);
411 	/* Normalize to zero. */
412 	n = nice + SCHED_PRI_NHALF;
413 	kseq->ksq_nice[n]--;
414 	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
415 
416 	/*
417 	 * If this wasn't the smallest nice value or there are more in
418 	 * this bucket we can just return.  Otherwise we have to recalculate
419 	 * the smallest nice.
420 	 */
421 	if (nice != kseq->ksq_nicemin ||
422 	    kseq->ksq_nice[n] != 0 ||
423 	    kseq->ksq_load_timeshare == 0)
424 		return;
425 
426 	for (; n < SCHED_PRI_NRESV; n++)
427 		if (kseq->ksq_nice[n]) {
428 			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
429 			return;
430 		}
431 }
432 
433 #ifdef SMP
434 /*
435  * sched_balance is a simple CPU load balancing algorithm.  It operates by
436  * finding the least loaded and most loaded cpu and equalizing their load
437  * by migrating some processes.
438  *
439  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
440  * installations will only have 2 cpus.  Secondly, load balancing too much at
441  * once can have an unpleasant effect on the system.  The scheduler rarely has
442  * enough information to make perfect decisions.  So this algorithm chooses
443  * algorithm simplicity and more gradual effects on load in larger systems.
444  *
445  * It could be improved by considering the priorities and slices assigned to
446  * each task prior to balancing them.  There are many pathological cases with
447  * any approach and so the semi random algorithm below may work as well as any.
448  *
449  */
450 static void
451 sched_balance(void *arg)
452 {
453 	struct kseq_group *high;
454 	struct kseq_group *low;
455 	struct kseq_group *ksg;
456 	int timo;
457 	int cnt;
458 	int i;
459 
460 	mtx_lock_spin(&sched_lock);
461 	if (smp_started == 0)
462 		goto out;
463 	low = high = NULL;
464 	i = random() % (ksg_maxid + 1);
465 	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
466 		ksg = KSEQ_GROUP(i);
467 		/*
468 		 * Find the CPU with the highest load that has some
469 		 * threads to transfer.
470 		 */
471 		if ((high == NULL || ksg->ksg_load > high->ksg_load)
472 		    && ksg->ksg_transferable)
473 			high = ksg;
474 		if (low == NULL || ksg->ksg_load < low->ksg_load)
475 			low = ksg;
476 		if (++i > ksg_maxid)
477 			i = 0;
478 	}
479 	if (low != NULL && high != NULL && high != low)
480 		sched_balance_pair(LIST_FIRST(&high->ksg_members),
481 		    LIST_FIRST(&low->ksg_members));
482 out:
483 	mtx_unlock_spin(&sched_lock);
484 	timo = random() % (hz * 2);
485 	callout_reset(&kseq_lb_callout, timo, sched_balance, NULL);
486 }
487 
488 static void
489 sched_balance_groups(void *arg)
490 {
491 	int timo;
492 	int i;
493 
494 	mtx_lock_spin(&sched_lock);
495 	if (smp_started)
496 		for (i = 0; i <= ksg_maxid; i++)
497 			sched_balance_group(KSEQ_GROUP(i));
498 	mtx_unlock_spin(&sched_lock);
499 	timo = random() % (hz * 2);
500 	callout_reset(&kseq_group_callout, timo, sched_balance_groups, NULL);
501 }
502 
503 static void
504 sched_balance_group(struct kseq_group *ksg)
505 {
506 	struct kseq *kseq;
507 	struct kseq *high;
508 	struct kseq *low;
509 	int load;
510 
511 	if (ksg->ksg_transferable == 0)
512 		return;
513 	low = NULL;
514 	high = NULL;
515 	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
516 		load = kseq->ksq_load;
517 		if (kseq == KSEQ_CPU(0))
518 			load--;
519 		if (high == NULL || load > high->ksq_load)
520 			high = kseq;
521 		if (low == NULL || load < low->ksq_load)
522 			low = kseq;
523 	}
524 	if (high != NULL && low != NULL && high != low)
525 		sched_balance_pair(high, low);
526 }
527 
528 static void
529 sched_balance_pair(struct kseq *high, struct kseq *low)
530 {
531 	int transferable;
532 	int high_load;
533 	int low_load;
534 	int move;
535 	int diff;
536 	int i;
537 
538 	/*
539 	 * If we're transfering within a group we have to use this specific
540 	 * kseq's transferable count, otherwise we can steal from other members
541 	 * of the group.
542 	 */
543 	if (high->ksq_group == low->ksq_group) {
544 		transferable = high->ksq_transferable;
545 		high_load = high->ksq_load;
546 		low_load = low->ksq_load;
547 		/*
548 		 * XXX If we encounter cpu 0 we must remember to reduce it's
549 		 * load by 1 to reflect the swi that is running the callout.
550 		 * At some point we should really fix load balancing of the
551 		 * swi and then this wont matter.
552 		 */
553 		if (high == KSEQ_CPU(0))
554 			high_load--;
555 		if (low == KSEQ_CPU(0))
556 			low_load--;
557 	} else {
558 		transferable = high->ksq_group->ksg_transferable;
559 		high_load = high->ksq_group->ksg_load;
560 		low_load = low->ksq_group->ksg_load;
561 	}
562 	if (transferable == 0)
563 		return;
564 	/*
565 	 * Determine what the imbalance is and then adjust that to how many
566 	 * kses we actually have to give up (transferable).
567 	 */
568 	diff = high_load - low_load;
569 	move = diff / 2;
570 	if (diff & 0x1)
571 		move++;
572 	move = min(move, transferable);
573 	for (i = 0; i < move; i++)
574 		kseq_move(high, KSEQ_ID(low));
575 	return;
576 }
577 
578 static void
579 kseq_move(struct kseq *from, int cpu)
580 {
581 	struct kseq *kseq;
582 	struct kseq *to;
583 	struct kse *ke;
584 
585 	kseq = from;
586 	to = KSEQ_CPU(cpu);
587 	ke = kseq_steal(kseq, 1);
588 	if (ke == NULL) {
589 		struct kseq_group *ksg;
590 
591 		ksg = kseq->ksq_group;
592 		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
593 			if (kseq == from || kseq->ksq_transferable == 0)
594 				continue;
595 			ke = kseq_steal(kseq, 1);
596 			break;
597 		}
598 		if (ke == NULL)
599 			panic("kseq_move: No KSEs available with a "
600 			    "transferable count of %d\n",
601 			    ksg->ksg_transferable);
602 	}
603 	if (kseq == to)
604 		return;
605 	ke->ke_state = KES_THREAD;
606 	kseq_runq_rem(kseq, ke);
607 	kseq_load_rem(kseq, ke);
608 	kseq_notify(ke, cpu);
609 }
610 
611 static int
612 kseq_idled(struct kseq *kseq)
613 {
614 	struct kseq_group *ksg;
615 	struct kseq *steal;
616 	struct kse *ke;
617 
618 	ksg = kseq->ksq_group;
619 	/*
620 	 * If we're in a cpu group, try and steal kses from another cpu in
621 	 * the group before idling.
622 	 */
623 	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
624 		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
625 			if (steal == kseq || steal->ksq_transferable == 0)
626 				continue;
627 			ke = kseq_steal(steal, 0);
628 			if (ke == NULL)
629 				continue;
630 			ke->ke_state = KES_THREAD;
631 			kseq_runq_rem(steal, ke);
632 			kseq_load_rem(steal, ke);
633 			ke->ke_cpu = PCPU_GET(cpuid);
634 			sched_add(ke->ke_thread);
635 			return (0);
636 		}
637 	}
638 	/*
639 	 * We only set the idled bit when all of the cpus in the group are
640 	 * idle.  Otherwise we could get into a situation where a KSE bounces
641 	 * back and forth between two idle cores on seperate physical CPUs.
642 	 */
643 	ksg->ksg_idlemask |= PCPU_GET(cpumask);
644 	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
645 		return (1);
646 	atomic_set_int(&kseq_idle, ksg->ksg_mask);
647 	return (1);
648 }
649 
650 static void
651 kseq_assign(struct kseq *kseq)
652 {
653 	struct kse *nke;
654 	struct kse *ke;
655 
656 	do {
657 		(volatile struct kse *)ke = kseq->ksq_assigned;
658 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
659 	for (; ke != NULL; ke = nke) {
660 		nke = ke->ke_assign;
661 		ke->ke_flags &= ~KEF_ASSIGNED;
662 		sched_add(ke->ke_thread);
663 	}
664 }
665 
666 static void
667 kseq_notify(struct kse *ke, int cpu)
668 {
669 	struct kseq *kseq;
670 	struct thread *td;
671 	struct pcpu *pcpu;
672 
673 	ke->ke_cpu = cpu;
674 	ke->ke_flags |= KEF_ASSIGNED;
675 
676 	kseq = KSEQ_CPU(cpu);
677 
678 	/*
679 	 * Place a KSE on another cpu's queue and force a resched.
680 	 */
681 	do {
682 		(volatile struct kse *)ke->ke_assign = kseq->ksq_assigned;
683 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
684 	pcpu = pcpu_find(cpu);
685 	td = pcpu->pc_curthread;
686 	if (ke->ke_thread->td_priority < td->td_priority ||
687 	    td == pcpu->pc_idlethread) {
688 		td->td_flags |= TDF_NEEDRESCHED;
689 		ipi_selected(1 << cpu, IPI_AST);
690 	}
691 }
692 
693 static struct kse *
694 runq_steal(struct runq *rq)
695 {
696 	struct rqhead *rqh;
697 	struct rqbits *rqb;
698 	struct kse *ke;
699 	int word;
700 	int bit;
701 
702 	mtx_assert(&sched_lock, MA_OWNED);
703 	rqb = &rq->rq_status;
704 	for (word = 0; word < RQB_LEN; word++) {
705 		if (rqb->rqb_bits[word] == 0)
706 			continue;
707 		for (bit = 0; bit < RQB_BPW; bit++) {
708 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
709 				continue;
710 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
711 			TAILQ_FOREACH(ke, rqh, ke_procq) {
712 				if (KSE_CAN_MIGRATE(ke,
713 				    PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
714 					return (ke);
715 			}
716 		}
717 	}
718 	return (NULL);
719 }
720 
721 static struct kse *
722 kseq_steal(struct kseq *kseq, int stealidle)
723 {
724 	struct kse *ke;
725 
726 	/*
727 	 * Steal from next first to try to get a non-interactive task that
728 	 * may not have run for a while.
729 	 */
730 	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
731 		return (ke);
732 	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
733 		return (ke);
734 	if (stealidle)
735 		return (runq_steal(&kseq->ksq_idle));
736 	return (NULL);
737 }
738 
739 int
740 kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
741 {
742 	struct kseq_group *ksg;
743 	int cpu;
744 
745 	if (smp_started == 0)
746 		return (0);
747 	cpu = 0;
748 	ksg = kseq->ksq_group;
749 
750 	/*
751 	 * If there are any idle groups, give them our extra load.  The
752 	 * threshold at which we start to reassign kses has a large impact
753 	 * on the overall performance of the system.  Tuned too high and
754 	 * some CPUs may idle.  Too low and there will be excess migration
755 	 * and context switches.
756 	 */
757 	if (ksg->ksg_load > (ksg->ksg_cpus * 2) && kseq_idle) {
758 		/*
759 		 * Multiple cpus could find this bit simultaneously
760 		 * but the race shouldn't be terrible.
761 		 */
762 		cpu = ffs(kseq_idle);
763 		if (cpu)
764 			atomic_clear_int(&kseq_idle, 1 << (cpu - 1));
765 	}
766 	/*
767 	 * If another cpu in this group has idled, assign a thread over
768 	 * to them after checking to see if there are idled groups.
769 	 */
770 	if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) {
771 		cpu = ffs(ksg->ksg_idlemask);
772 		if (cpu)
773 			ksg->ksg_idlemask &= ~(1 << (cpu - 1));
774 	}
775 	/*
776 	 * Now that we've found an idle CPU, migrate the thread.
777 	 */
778 	if (cpu) {
779 		cpu--;
780 		ke->ke_runq = NULL;
781 		kseq_notify(ke, cpu);
782 		return (1);
783 	}
784 	return (0);
785 }
786 
787 #endif	/* SMP */
788 
789 /*
790  * Pick the highest priority task we have and return it.
791  */
792 
793 static struct kse *
794 kseq_choose(struct kseq *kseq)
795 {
796 	struct kse *ke;
797 	struct runq *swap;
798 
799 	mtx_assert(&sched_lock, MA_OWNED);
800 	swap = NULL;
801 
802 	for (;;) {
803 		ke = runq_choose(kseq->ksq_curr);
804 		if (ke == NULL) {
805 			/*
806 			 * We already swaped once and didn't get anywhere.
807 			 */
808 			if (swap)
809 				break;
810 			swap = kseq->ksq_curr;
811 			kseq->ksq_curr = kseq->ksq_next;
812 			kseq->ksq_next = swap;
813 			continue;
814 		}
815 		/*
816 		 * If we encounter a slice of 0 the kse is in a
817 		 * TIMESHARE kse group and its nice was too far out
818 		 * of the range that receives slices.
819 		 */
820 		if (ke->ke_slice == 0) {
821 			runq_remove(ke->ke_runq, ke);
822 			sched_slice(ke);
823 			ke->ke_runq = kseq->ksq_next;
824 			runq_add(ke->ke_runq, ke);
825 			continue;
826 		}
827 		return (ke);
828 	}
829 
830 	return (runq_choose(&kseq->ksq_idle));
831 }
832 
833 static void
834 kseq_setup(struct kseq *kseq)
835 {
836 	runq_init(&kseq->ksq_timeshare[0]);
837 	runq_init(&kseq->ksq_timeshare[1]);
838 	runq_init(&kseq->ksq_idle);
839 	kseq->ksq_curr = &kseq->ksq_timeshare[0];
840 	kseq->ksq_next = &kseq->ksq_timeshare[1];
841 	kseq->ksq_load = 0;
842 	kseq->ksq_load_timeshare = 0;
843 }
844 
845 static void
846 sched_setup(void *dummy)
847 {
848 #ifdef SMP
849 	int balance_groups;
850 	int i;
851 #endif
852 
853 	slice_min = (hz/100);	/* 10ms */
854 	slice_max = (hz/7);	/* ~140ms */
855 
856 #ifdef SMP
857 	balance_groups = 0;
858 	/*
859 	 * Initialize the kseqs.
860 	 */
861 	for (i = 0; i < MAXCPU; i++) {
862 		struct kseq *ksq;
863 
864 		ksq = &kseq_cpu[i];
865 		ksq->ksq_assigned = NULL;
866 		kseq_setup(&kseq_cpu[i]);
867 	}
868 	if (smp_topology == NULL) {
869 		struct kseq_group *ksg;
870 		struct kseq *ksq;
871 
872 		for (i = 0; i < MAXCPU; i++) {
873 			ksq = &kseq_cpu[i];
874 			ksg = &kseq_groups[i];
875 			/*
876 			 * Setup a kse group with one member.
877 			 */
878 			ksq->ksq_transferable = 0;
879 			ksq->ksq_group = ksg;
880 			ksg->ksg_cpus = 1;
881 			ksg->ksg_idlemask = 0;
882 			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
883 			ksg->ksg_load = 0;
884 			ksg->ksg_transferable = 0;
885 			LIST_INIT(&ksg->ksg_members);
886 			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
887 		}
888 	} else {
889 		struct kseq_group *ksg;
890 		struct cpu_group *cg;
891 		int j;
892 
893 		for (i = 0; i < smp_topology->ct_count; i++) {
894 			cg = &smp_topology->ct_group[i];
895 			ksg = &kseq_groups[i];
896 			/*
897 			 * Initialize the group.
898 			 */
899 			ksg->ksg_idlemask = 0;
900 			ksg->ksg_load = 0;
901 			ksg->ksg_transferable = 0;
902 			ksg->ksg_cpus = cg->cg_count;
903 			ksg->ksg_cpumask = cg->cg_mask;
904 			LIST_INIT(&ksg->ksg_members);
905 			/*
906 			 * Find all of the group members and add them.
907 			 */
908 			for (j = 0; j < MAXCPU; j++) {
909 				if ((cg->cg_mask & (1 << j)) != 0) {
910 					if (ksg->ksg_mask == 0)
911 						ksg->ksg_mask = 1 << j;
912 					kseq_cpu[j].ksq_transferable = 0;
913 					kseq_cpu[j].ksq_group = ksg;
914 					LIST_INSERT_HEAD(&ksg->ksg_members,
915 					    &kseq_cpu[j], ksq_siblings);
916 				}
917 			}
918 			if (ksg->ksg_cpus > 1)
919 				balance_groups = 1;
920 		}
921 		ksg_maxid = smp_topology->ct_count - 1;
922 	}
923 	callout_init(&kseq_lb_callout, CALLOUT_MPSAFE);
924 	callout_init(&kseq_group_callout, CALLOUT_MPSAFE);
925 	sched_balance(NULL);
926 	/*
927 	 * Stagger the group and global load balancer so they do not
928 	 * interfere with each other.
929 	 */
930 	if (balance_groups)
931 		callout_reset(&kseq_group_callout, hz / 2,
932 		    sched_balance_groups, NULL);
933 #else
934 	kseq_setup(KSEQ_SELF());
935 #endif
936 	mtx_lock_spin(&sched_lock);
937 	kseq_load_add(KSEQ_SELF(), &kse0);
938 	mtx_unlock_spin(&sched_lock);
939 }
940 
941 /*
942  * Scale the scheduling priority according to the "interactivity" of this
943  * process.
944  */
945 static void
946 sched_priority(struct ksegrp *kg)
947 {
948 	int pri;
949 
950 	if (kg->kg_pri_class != PRI_TIMESHARE)
951 		return;
952 
953 	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
954 	pri += SCHED_PRI_BASE;
955 	pri += kg->kg_nice;
956 
957 	if (pri > PRI_MAX_TIMESHARE)
958 		pri = PRI_MAX_TIMESHARE;
959 	else if (pri < PRI_MIN_TIMESHARE)
960 		pri = PRI_MIN_TIMESHARE;
961 
962 	kg->kg_user_pri = pri;
963 
964 	return;
965 }
966 
967 /*
968  * Calculate a time slice based on the properties of the kseg and the runq
969  * that we're on.  This is only for PRI_TIMESHARE ksegrps.
970  */
971 static void
972 sched_slice(struct kse *ke)
973 {
974 	struct kseq *kseq;
975 	struct ksegrp *kg;
976 
977 	kg = ke->ke_ksegrp;
978 	kseq = KSEQ_CPU(ke->ke_cpu);
979 
980 	/*
981 	 * Rationale:
982 	 * KSEs in interactive ksegs get the minimum slice so that we
983 	 * quickly notice if it abuses its advantage.
984 	 *
985 	 * KSEs in non-interactive ksegs are assigned a slice that is
986 	 * based on the ksegs nice value relative to the least nice kseg
987 	 * on the run queue for this cpu.
988 	 *
989 	 * If the KSE is less nice than all others it gets the maximum
990 	 * slice and other KSEs will adjust their slice relative to
991 	 * this when they first expire.
992 	 *
993 	 * There is 20 point window that starts relative to the least
994 	 * nice kse on the run queue.  Slice size is determined by
995 	 * the kse distance from the last nice ksegrp.
996 	 *
997 	 * If the kse is outside of the window it will get no slice
998 	 * and will be reevaluated each time it is selected on the
999 	 * run queue.  The exception to this is nice 0 ksegs when
1000 	 * a nice -20 is running.  They are always granted a minimum
1001 	 * slice.
1002 	 */
1003 	if (!SCHED_INTERACTIVE(kg)) {
1004 		int nice;
1005 
1006 		nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
1007 		if (kseq->ksq_load_timeshare == 0 ||
1008 		    kg->kg_nice < kseq->ksq_nicemin)
1009 			ke->ke_slice = SCHED_SLICE_MAX;
1010 		else if (nice <= SCHED_SLICE_NTHRESH)
1011 			ke->ke_slice = SCHED_SLICE_NICE(nice);
1012 		else if (kg->kg_nice == 0)
1013 			ke->ke_slice = SCHED_SLICE_MIN;
1014 		else
1015 			ke->ke_slice = 0;
1016 	} else
1017 		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1018 
1019 	CTR6(KTR_ULE,
1020 	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
1021 	    ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
1022 	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
1023 
1024 	return;
1025 }
1026 
1027 /*
1028  * This routine enforces a maximum limit on the amount of scheduling history
1029  * kept.  It is called after either the slptime or runtime is adjusted.
1030  * This routine will not operate correctly when slp or run times have been
1031  * adjusted to more than double their maximum.
1032  */
1033 static void
1034 sched_interact_update(struct ksegrp *kg)
1035 {
1036 	int sum;
1037 
1038 	sum = kg->kg_runtime + kg->kg_slptime;
1039 	if (sum < SCHED_SLP_RUN_MAX)
1040 		return;
1041 	/*
1042 	 * If we have exceeded by more than 1/5th then the algorithm below
1043 	 * will not bring us back into range.  Dividing by two here forces
1044 	 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1045 	 */
1046 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1047 		kg->kg_runtime /= 2;
1048 		kg->kg_slptime /= 2;
1049 		return;
1050 	}
1051 	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1052 	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1053 }
1054 
1055 static void
1056 sched_interact_fork(struct ksegrp *kg)
1057 {
1058 	int ratio;
1059 	int sum;
1060 
1061 	sum = kg->kg_runtime + kg->kg_slptime;
1062 	if (sum > SCHED_SLP_RUN_FORK) {
1063 		ratio = sum / SCHED_SLP_RUN_FORK;
1064 		kg->kg_runtime /= ratio;
1065 		kg->kg_slptime /= ratio;
1066 	}
1067 }
1068 
1069 static int
1070 sched_interact_score(struct ksegrp *kg)
1071 {
1072 	int div;
1073 
1074 	if (kg->kg_runtime > kg->kg_slptime) {
1075 		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1076 		return (SCHED_INTERACT_HALF +
1077 		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1078 	} if (kg->kg_slptime > kg->kg_runtime) {
1079 		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1080 		return (kg->kg_runtime / div);
1081 	}
1082 
1083 	/*
1084 	 * This can happen if slptime and runtime are 0.
1085 	 */
1086 	return (0);
1087 
1088 }
1089 
1090 /*
1091  * This is only somewhat accurate since given many processes of the same
1092  * priority they will switch when their slices run out, which will be
1093  * at most SCHED_SLICE_MAX.
1094  */
1095 int
1096 sched_rr_interval(void)
1097 {
1098 	return (SCHED_SLICE_MAX);
1099 }
1100 
1101 static void
1102 sched_pctcpu_update(struct kse *ke)
1103 {
1104 	/*
1105 	 * Adjust counters and watermark for pctcpu calc.
1106 	 */
1107 	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1108 		/*
1109 		 * Shift the tick count out so that the divide doesn't
1110 		 * round away our results.
1111 		 */
1112 		ke->ke_ticks <<= 10;
1113 		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1114 			    SCHED_CPU_TICKS;
1115 		ke->ke_ticks >>= 10;
1116 	} else
1117 		ke->ke_ticks = 0;
1118 	ke->ke_ltick = ticks;
1119 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1120 }
1121 
1122 void
1123 sched_prio(struct thread *td, u_char prio)
1124 {
1125 	struct kse *ke;
1126 
1127 	ke = td->td_kse;
1128 	mtx_assert(&sched_lock, MA_OWNED);
1129 	if (TD_ON_RUNQ(td)) {
1130 		/*
1131 		 * If the priority has been elevated due to priority
1132 		 * propagation, we may have to move ourselves to a new
1133 		 * queue.  We still call adjustrunqueue below in case kse
1134 		 * needs to fix things up.
1135 		 */
1136 		if (prio < td->td_priority && ke &&
1137 		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1138 		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1139 			runq_remove(ke->ke_runq, ke);
1140 			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1141 			runq_add(ke->ke_runq, ke);
1142 		}
1143 		adjustrunqueue(td, prio);
1144 	} else
1145 		td->td_priority = prio;
1146 }
1147 
1148 void
1149 sched_switch(struct thread *td)
1150 {
1151 	struct thread *newtd;
1152 	struct kse *ke;
1153 
1154 	mtx_assert(&sched_lock, MA_OWNED);
1155 
1156 	ke = td->td_kse;
1157 
1158 	td->td_last_kse = ke;
1159         td->td_lastcpu = td->td_oncpu;
1160 	td->td_oncpu = NOCPU;
1161         td->td_flags &= ~TDF_NEEDRESCHED;
1162 
1163 	/*
1164 	 * If the KSE has been assigned it may be in the process of switching
1165 	 * to the new cpu.  This is the case in sched_bind().
1166 	 */
1167 	if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1168 		if (TD_IS_RUNNING(td)) {
1169 			kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1170 			setrunqueue(td);
1171 		} else {
1172 			if (ke->ke_runq) {
1173 				kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1174 			} else if ((td->td_flags & TDF_IDLETD) == 0)
1175 				backtrace();
1176 			/*
1177 			 * We will not be on the run queue. So we must be
1178 			 * sleeping or similar.
1179 			 */
1180 			if (td->td_proc->p_flag & P_SA)
1181 				kse_reassign(ke);
1182 		}
1183 	}
1184 	newtd = choosethread();
1185 	if (td != newtd)
1186 		cpu_switch(td, newtd);
1187 	sched_lock.mtx_lock = (uintptr_t)td;
1188 
1189 	td->td_oncpu = PCPU_GET(cpuid);
1190 }
1191 
1192 void
1193 sched_nice(struct ksegrp *kg, int nice)
1194 {
1195 	struct kse *ke;
1196 	struct thread *td;
1197 	struct kseq *kseq;
1198 
1199 	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
1200 	mtx_assert(&sched_lock, MA_OWNED);
1201 	/*
1202 	 * We need to adjust the nice counts for running KSEs.
1203 	 */
1204 	if (kg->kg_pri_class == PRI_TIMESHARE)
1205 		FOREACH_KSE_IN_GROUP(kg, ke) {
1206 			if (ke->ke_runq == NULL)
1207 				continue;
1208 			kseq = KSEQ_CPU(ke->ke_cpu);
1209 			kseq_nice_rem(kseq, kg->kg_nice);
1210 			kseq_nice_add(kseq, nice);
1211 		}
1212 	kg->kg_nice = nice;
1213 	sched_priority(kg);
1214 	FOREACH_THREAD_IN_GROUP(kg, td)
1215 		td->td_flags |= TDF_NEEDRESCHED;
1216 }
1217 
1218 void
1219 sched_sleep(struct thread *td)
1220 {
1221 	mtx_assert(&sched_lock, MA_OWNED);
1222 
1223 	td->td_slptime = ticks;
1224 	td->td_base_pri = td->td_priority;
1225 
1226 	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1227 	    td->td_kse, td->td_slptime);
1228 }
1229 
1230 void
1231 sched_wakeup(struct thread *td)
1232 {
1233 	mtx_assert(&sched_lock, MA_OWNED);
1234 
1235 	/*
1236 	 * Let the kseg know how long we slept for.  This is because process
1237 	 * interactivity behavior is modeled in the kseg.
1238 	 */
1239 	if (td->td_slptime) {
1240 		struct ksegrp *kg;
1241 		int hzticks;
1242 
1243 		kg = td->td_ksegrp;
1244 		hzticks = (ticks - td->td_slptime) << 10;
1245 		if (hzticks >= SCHED_SLP_RUN_MAX) {
1246 			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1247 			kg->kg_runtime = 1;
1248 		} else {
1249 			kg->kg_slptime += hzticks;
1250 			sched_interact_update(kg);
1251 		}
1252 		sched_priority(kg);
1253 		if (td->td_kse)
1254 			sched_slice(td->td_kse);
1255 		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1256 		    td->td_kse, hzticks);
1257 		td->td_slptime = 0;
1258 	}
1259 	setrunqueue(td);
1260 }
1261 
1262 /*
1263  * Penalize the parent for creating a new child and initialize the child's
1264  * priority.
1265  */
1266 void
1267 sched_fork(struct proc *p, struct proc *p1)
1268 {
1269 
1270 	mtx_assert(&sched_lock, MA_OWNED);
1271 
1272 	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
1273 	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
1274 	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
1275 }
1276 
1277 void
1278 sched_fork_kse(struct kse *ke, struct kse *child)
1279 {
1280 
1281 	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1282 	child->ke_cpu = ke->ke_cpu;
1283 	child->ke_runq = NULL;
1284 
1285 	/* Grab our parents cpu estimation information. */
1286 	child->ke_ticks = ke->ke_ticks;
1287 	child->ke_ltick = ke->ke_ltick;
1288 	child->ke_ftick = ke->ke_ftick;
1289 }
1290 
1291 void
1292 sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1293 {
1294 	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1295 
1296 	child->kg_slptime = kg->kg_slptime;
1297 	child->kg_runtime = kg->kg_runtime;
1298 	child->kg_user_pri = kg->kg_user_pri;
1299 	child->kg_nice = kg->kg_nice;
1300 	sched_interact_fork(child);
1301 	kg->kg_runtime += tickincr << 10;
1302 	sched_interact_update(kg);
1303 
1304 	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1305 	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1306 	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1307 }
1308 
1309 void
1310 sched_fork_thread(struct thread *td, struct thread *child)
1311 {
1312 }
1313 
1314 void
1315 sched_class(struct ksegrp *kg, int class)
1316 {
1317 	struct kseq *kseq;
1318 	struct kse *ke;
1319 	int nclass;
1320 	int oclass;
1321 
1322 	mtx_assert(&sched_lock, MA_OWNED);
1323 	if (kg->kg_pri_class == class)
1324 		return;
1325 
1326 	nclass = PRI_BASE(class);
1327 	oclass = PRI_BASE(kg->kg_pri_class);
1328 	FOREACH_KSE_IN_GROUP(kg, ke) {
1329 		if (ke->ke_state != KES_ONRUNQ &&
1330 		    ke->ke_state != KES_THREAD)
1331 			continue;
1332 		kseq = KSEQ_CPU(ke->ke_cpu);
1333 
1334 #ifdef SMP
1335 		/*
1336 		 * On SMP if we're on the RUNQ we must adjust the transferable
1337 		 * count because could be changing to or from an interrupt
1338 		 * class.
1339 		 */
1340 		if (ke->ke_state == KES_ONRUNQ) {
1341 			if (KSE_CAN_MIGRATE(ke, oclass)) {
1342 				kseq->ksq_transferable--;
1343 				kseq->ksq_group->ksg_transferable--;
1344 			}
1345 			if (KSE_CAN_MIGRATE(ke, nclass)) {
1346 				kseq->ksq_transferable++;
1347 				kseq->ksq_group->ksg_transferable++;
1348 			}
1349 		}
1350 #endif
1351 		if (oclass == PRI_TIMESHARE) {
1352 			kseq->ksq_load_timeshare--;
1353 			kseq_nice_rem(kseq, kg->kg_nice);
1354 		}
1355 		if (nclass == PRI_TIMESHARE) {
1356 			kseq->ksq_load_timeshare++;
1357 			kseq_nice_add(kseq, kg->kg_nice);
1358 		}
1359 	}
1360 
1361 	kg->kg_pri_class = class;
1362 }
1363 
1364 /*
1365  * Return some of the child's priority and interactivity to the parent.
1366  */
1367 void
1368 sched_exit(struct proc *p, struct proc *child)
1369 {
1370 	mtx_assert(&sched_lock, MA_OWNED);
1371 	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
1372 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
1373 }
1374 
1375 void
1376 sched_exit_kse(struct kse *ke, struct kse *child)
1377 {
1378 	kseq_load_rem(KSEQ_CPU(child->ke_cpu), child);
1379 }
1380 
1381 void
1382 sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1383 {
1384 	/* kg->kg_slptime += child->kg_slptime; */
1385 	kg->kg_runtime += child->kg_runtime;
1386 	sched_interact_update(kg);
1387 }
1388 
1389 void
1390 sched_exit_thread(struct thread *td, struct thread *child)
1391 {
1392 }
1393 
1394 void
1395 sched_clock(struct thread *td)
1396 {
1397 	struct kseq *kseq;
1398 	struct ksegrp *kg;
1399 	struct kse *ke;
1400 
1401 	/*
1402 	 * sched_setup() apparently happens prior to stathz being set.  We
1403 	 * need to resolve the timers earlier in the boot so we can avoid
1404 	 * calculating this here.
1405 	 */
1406 	if (realstathz == 0) {
1407 		realstathz = stathz ? stathz : hz;
1408 		tickincr = hz / realstathz;
1409 		/*
1410 		 * XXX This does not work for values of stathz that are much
1411 		 * larger than hz.
1412 		 */
1413 		if (tickincr == 0)
1414 			tickincr = 1;
1415 	}
1416 
1417 	ke = td->td_kse;
1418 	kg = ke->ke_ksegrp;
1419 
1420 	mtx_assert(&sched_lock, MA_OWNED);
1421 	/* Adjust ticks for pctcpu */
1422 	ke->ke_ticks++;
1423 	ke->ke_ltick = ticks;
1424 
1425 	/* Go up to one second beyond our max and then trim back down */
1426 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1427 		sched_pctcpu_update(ke);
1428 
1429 	if (td->td_flags & TDF_IDLETD)
1430 		return;
1431 
1432 	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1433 	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1434 	/*
1435 	 * We only do slicing code for TIMESHARE ksegrps.
1436 	 */
1437 	if (kg->kg_pri_class != PRI_TIMESHARE)
1438 		return;
1439 	/*
1440 	 * We used a tick charge it to the ksegrp so that we can compute our
1441 	 * interactivity.
1442 	 */
1443 	kg->kg_runtime += tickincr << 10;
1444 	sched_interact_update(kg);
1445 
1446 	/*
1447 	 * We used up one time slice.
1448 	 */
1449 	if (--ke->ke_slice > 0)
1450 		return;
1451 	/*
1452 	 * We're out of time, recompute priorities and requeue.
1453 	 */
1454 	kseq = KSEQ_SELF();
1455 	kseq_load_rem(kseq, ke);
1456 	sched_priority(kg);
1457 	sched_slice(ke);
1458 	if (SCHED_CURR(kg, ke))
1459 		ke->ke_runq = kseq->ksq_curr;
1460 	else
1461 		ke->ke_runq = kseq->ksq_next;
1462 	kseq_load_add(kseq, ke);
1463 	td->td_flags |= TDF_NEEDRESCHED;
1464 }
1465 
1466 int
1467 sched_runnable(void)
1468 {
1469 	struct kseq *kseq;
1470 	int load;
1471 
1472 	load = 1;
1473 
1474 	kseq = KSEQ_SELF();
1475 #ifdef SMP
1476 	if (kseq->ksq_assigned) {
1477 		mtx_lock_spin(&sched_lock);
1478 		kseq_assign(kseq);
1479 		mtx_unlock_spin(&sched_lock);
1480 	}
1481 #endif
1482 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1483 		if (kseq->ksq_load > 0)
1484 			goto out;
1485 	} else
1486 		if (kseq->ksq_load - 1 > 0)
1487 			goto out;
1488 	load = 0;
1489 out:
1490 	return (load);
1491 }
1492 
1493 void
1494 sched_userret(struct thread *td)
1495 {
1496 	struct ksegrp *kg;
1497 
1498 	kg = td->td_ksegrp;
1499 
1500 	if (td->td_priority != kg->kg_user_pri) {
1501 		mtx_lock_spin(&sched_lock);
1502 		td->td_priority = kg->kg_user_pri;
1503 		mtx_unlock_spin(&sched_lock);
1504 	}
1505 }
1506 
1507 struct kse *
1508 sched_choose(void)
1509 {
1510 	struct kseq *kseq;
1511 	struct kse *ke;
1512 
1513 	mtx_assert(&sched_lock, MA_OWNED);
1514 	kseq = KSEQ_SELF();
1515 #ifdef SMP
1516 restart:
1517 	if (kseq->ksq_assigned)
1518 		kseq_assign(kseq);
1519 #endif
1520 	ke = kseq_choose(kseq);
1521 	if (ke) {
1522 #ifdef SMP
1523 		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1524 			if (kseq_idled(kseq) == 0)
1525 				goto restart;
1526 #endif
1527 		kseq_runq_rem(kseq, ke);
1528 		ke->ke_state = KES_THREAD;
1529 
1530 		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1531 			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1532 			    ke, ke->ke_runq, ke->ke_slice,
1533 			    ke->ke_thread->td_priority);
1534 		}
1535 		return (ke);
1536 	}
1537 #ifdef SMP
1538 	if (kseq_idled(kseq) == 0)
1539 		goto restart;
1540 #endif
1541 	return (NULL);
1542 }
1543 
1544 void
1545 sched_add(struct thread *td)
1546 {
1547 	struct kseq *kseq;
1548 	struct ksegrp *kg;
1549 	struct kse *ke;
1550 	int class;
1551 
1552 	mtx_assert(&sched_lock, MA_OWNED);
1553 	ke = td->td_kse;
1554 	kg = td->td_ksegrp;
1555 	if (ke->ke_flags & KEF_ASSIGNED)
1556 		return;
1557 	kseq = KSEQ_SELF();
1558 	KASSERT((ke->ke_thread != NULL),
1559 	    ("sched_add: No thread on KSE"));
1560 	KASSERT((ke->ke_thread->td_kse != NULL),
1561 	    ("sched_add: No KSE on thread"));
1562 	KASSERT(ke->ke_state != KES_ONRUNQ,
1563 	    ("sched_add: kse %p (%s) already in run queue", ke,
1564 	    ke->ke_proc->p_comm));
1565 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1566 	    ("sched_add: process swapped out"));
1567 	KASSERT(ke->ke_runq == NULL,
1568 	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1569 
1570 	class = PRI_BASE(kg->kg_pri_class);
1571 	switch (class) {
1572 	case PRI_ITHD:
1573 	case PRI_REALTIME:
1574 		ke->ke_runq = kseq->ksq_curr;
1575 		ke->ke_slice = SCHED_SLICE_MAX;
1576 		ke->ke_cpu = PCPU_GET(cpuid);
1577 		break;
1578 	case PRI_TIMESHARE:
1579 		if (SCHED_CURR(kg, ke))
1580 			ke->ke_runq = kseq->ksq_curr;
1581 		else
1582 			ke->ke_runq = kseq->ksq_next;
1583 		break;
1584 	case PRI_IDLE:
1585 		/*
1586 		 * This is for priority prop.
1587 		 */
1588 		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1589 			ke->ke_runq = kseq->ksq_curr;
1590 		else
1591 			ke->ke_runq = &kseq->ksq_idle;
1592 		ke->ke_slice = SCHED_SLICE_MIN;
1593 		break;
1594 	default:
1595 		panic("Unknown pri class.");
1596 		break;
1597 	}
1598 #ifdef SMP
1599 	if (ke->ke_cpu != PCPU_GET(cpuid)) {
1600 		ke->ke_runq = NULL;
1601 		kseq_notify(ke, ke->ke_cpu);
1602 		return;
1603 	}
1604 	/*
1605 	 * If we had been idle, clear our bit in the group and potentially
1606 	 * the global bitmap.  If not, see if we should transfer this thread.
1607 	 */
1608 	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1609 	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1610 		/*
1611 		 * Check to see if our group is unidling, and if so, remove it
1612 		 * from the global idle mask.
1613 		 */
1614 		if (kseq->ksq_group->ksg_idlemask ==
1615 		    kseq->ksq_group->ksg_cpumask)
1616 			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1617 		/*
1618 		 * Now remove ourselves from the group specific idle mask.
1619 		 */
1620 		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1621 	} else if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class))
1622 		if (kseq_transfer(kseq, ke, class))
1623 			return;
1624 #endif
1625         if (td->td_priority < curthread->td_priority)
1626                 curthread->td_flags |= TDF_NEEDRESCHED;
1627 
1628 	ke->ke_ksegrp->kg_runq_kses++;
1629 	ke->ke_state = KES_ONRUNQ;
1630 
1631 	kseq_runq_add(kseq, ke);
1632 	kseq_load_add(kseq, ke);
1633 }
1634 
1635 void
1636 sched_rem(struct thread *td)
1637 {
1638 	struct kseq *kseq;
1639 	struct kse *ke;
1640 
1641 	ke = td->td_kse;
1642 	/*
1643 	 * It is safe to just return here because sched_rem() is only ever
1644 	 * used in places where we're immediately going to add the
1645 	 * kse back on again.  In that case it'll be added with the correct
1646 	 * thread and priority when the caller drops the sched_lock.
1647 	 */
1648 	if (ke->ke_flags & KEF_ASSIGNED)
1649 		return;
1650 	mtx_assert(&sched_lock, MA_OWNED);
1651 	KASSERT((ke->ke_state == KES_ONRUNQ),
1652 	    ("sched_rem: KSE not on run queue"));
1653 
1654 	ke->ke_state = KES_THREAD;
1655 	ke->ke_ksegrp->kg_runq_kses--;
1656 	kseq = KSEQ_CPU(ke->ke_cpu);
1657 	kseq_runq_rem(kseq, ke);
1658 	kseq_load_rem(kseq, ke);
1659 }
1660 
1661 fixpt_t
1662 sched_pctcpu(struct thread *td)
1663 {
1664 	fixpt_t pctcpu;
1665 	struct kse *ke;
1666 
1667 	pctcpu = 0;
1668 	ke = td->td_kse;
1669 	if (ke == NULL)
1670 		return (0);
1671 
1672 	mtx_lock_spin(&sched_lock);
1673 	if (ke->ke_ticks) {
1674 		int rtick;
1675 
1676 		/*
1677 		 * Don't update more frequently than twice a second.  Allowing
1678 		 * this causes the cpu usage to decay away too quickly due to
1679 		 * rounding errors.
1680 		 */
1681 		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1682 		    ke->ke_ltick < (ticks - (hz / 2)))
1683 			sched_pctcpu_update(ke);
1684 		/* How many rtick per second ? */
1685 		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1686 		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1687 	}
1688 
1689 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1690 	mtx_unlock_spin(&sched_lock);
1691 
1692 	return (pctcpu);
1693 }
1694 
1695 void
1696 sched_bind(struct thread *td, int cpu)
1697 {
1698 	struct kse *ke;
1699 
1700 	mtx_assert(&sched_lock, MA_OWNED);
1701 	ke = td->td_kse;
1702 	ke->ke_flags |= KEF_BOUND;
1703 #ifdef SMP
1704 	if (PCPU_GET(cpuid) == cpu)
1705 		return;
1706 	/* sched_rem without the runq_remove */
1707 	ke->ke_state = KES_THREAD;
1708 	ke->ke_ksegrp->kg_runq_kses--;
1709 	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1710 	kseq_notify(ke, cpu);
1711 	/* When we return from mi_switch we'll be on the correct cpu. */
1712 	mi_switch(SW_VOL);
1713 #endif
1714 }
1715 
1716 void
1717 sched_unbind(struct thread *td)
1718 {
1719 	mtx_assert(&sched_lock, MA_OWNED);
1720 	td->td_kse->ke_flags &= ~KEF_BOUND;
1721 }
1722 
1723 int
1724 sched_load(void)
1725 {
1726 #ifdef SMP
1727 	int total;
1728 	int i;
1729 
1730 	total = 0;
1731 	for (i = 0; i <= ksg_maxid; i++)
1732 		total += KSEQ_GROUP(i)->ksg_load;
1733 	return (total);
1734 #else
1735 	return (KSEQ_SELF()->ksq_sysload);
1736 #endif
1737 }
1738 
1739 int
1740 sched_sizeof_kse(void)
1741 {
1742 	return (sizeof(struct kse) + sizeof(struct ke_sched));
1743 }
1744 
1745 int
1746 sched_sizeof_ksegrp(void)
1747 {
1748 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1749 }
1750 
1751 int
1752 sched_sizeof_proc(void)
1753 {
1754 	return (sizeof(struct proc));
1755 }
1756 
1757 int
1758 sched_sizeof_thread(void)
1759 {
1760 	return (sizeof(struct thread) + sizeof(struct td_sched));
1761 }
1762