xref: /freebsd/sys/kern/sched_ule.c (revision e038d3542229c5609dd36f4dfbddd59e092b2e39)
1 /*-
2  * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kdb.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/resource.h>
39 #include <sys/resourcevar.h>
40 #include <sys/sched.h>
41 #include <sys/smp.h>
42 #include <sys/sx.h>
43 #include <sys/sysctl.h>
44 #include <sys/sysproto.h>
45 #include <sys/vmmeter.h>
46 #ifdef KTRACE
47 #include <sys/uio.h>
48 #include <sys/ktrace.h>
49 #endif
50 
51 #include <machine/cpu.h>
52 #include <machine/smp.h>
53 
54 #define KTR_ULE         KTR_NFS
55 
56 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
57 /* XXX This is bogus compatability crap for ps */
58 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
59 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
60 
61 static void sched_setup(void *dummy);
62 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
63 
64 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
65 
66 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
67     "Scheduler name");
68 
69 static int slice_min = 1;
70 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
71 
72 static int slice_max = 10;
73 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
74 
75 int realstathz;
76 int tickincr = 1;
77 
78 /*
79  * These datastructures are allocated within their parent datastructure but
80  * are scheduler specific.
81  */
82 
83 struct ke_sched {
84 	int		ske_slice;
85 	struct runq	*ske_runq;
86 	/* The following variables are only used for pctcpu calculation */
87 	int		ske_ltick;	/* Last tick that we were running on */
88 	int		ske_ftick;	/* First tick that we were running on */
89 	int		ske_ticks;	/* Tick count */
90 	/* CPU that we have affinity for. */
91 	u_char		ske_cpu;
92 };
93 #define	ke_slice	ke_sched->ske_slice
94 #define	ke_runq		ke_sched->ske_runq
95 #define	ke_ltick	ke_sched->ske_ltick
96 #define	ke_ftick	ke_sched->ske_ftick
97 #define	ke_ticks	ke_sched->ske_ticks
98 #define	ke_cpu		ke_sched->ske_cpu
99 #define	ke_assign	ke_procq.tqe_next
100 
101 #define	KEF_ASSIGNED	KEF_SCHED0	/* KSE is being migrated. */
102 #define	KEF_BOUND	KEF_SCHED1	/* KSE can not migrate. */
103 
104 struct kg_sched {
105 	int	skg_slptime;		/* Number of ticks we vol. slept */
106 	int	skg_runtime;		/* Number of ticks we were running */
107 };
108 #define	kg_slptime	kg_sched->skg_slptime
109 #define	kg_runtime	kg_sched->skg_runtime
110 
111 struct td_sched {
112 	int	std_slptime;
113 };
114 #define	td_slptime	td_sched->std_slptime
115 
116 struct td_sched td_sched;
117 struct ke_sched ke_sched;
118 struct kg_sched kg_sched;
119 
120 struct ke_sched *kse0_sched = &ke_sched;
121 struct kg_sched *ksegrp0_sched = &kg_sched;
122 struct p_sched *proc0_sched = NULL;
123 struct td_sched *thread0_sched = &td_sched;
124 
125 /*
126  * The priority is primarily determined by the interactivity score.  Thus, we
127  * give lower(better) priorities to kse groups that use less CPU.  The nice
128  * value is then directly added to this to allow nice to have some effect
129  * on latency.
130  *
131  * PRI_RANGE:	Total priority range for timeshare threads.
132  * PRI_NRESV:	Number of nice values.
133  * PRI_BASE:	The start of the dynamic range.
134  */
135 #define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
136 #define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
137 #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
138 #define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
139 #define	SCHED_PRI_INTERACT(score)					\
140     ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
141 
142 /*
143  * These determine the interactivity of a process.
144  *
145  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
146  *		before throttling back.
147  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
148  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
149  * INTERACT_THRESH:	Threshhold for placement on the current runq.
150  */
151 #define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
152 #define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
153 #define	SCHED_INTERACT_MAX	(100)
154 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
155 #define	SCHED_INTERACT_THRESH	(30)
156 
157 /*
158  * These parameters and macros determine the size of the time slice that is
159  * granted to each thread.
160  *
161  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
162  * SLICE_MAX:	Maximum time slice granted.
163  * SLICE_RANGE:	Range of available time slices scaled by hz.
164  * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
165  * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
166  * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
167  */
168 #define	SCHED_SLICE_MIN			(slice_min)
169 #define	SCHED_SLICE_MAX			(slice_max)
170 #define	SCHED_SLICE_INTERACTIVE		(slice_max)
171 #define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
172 #define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
173 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
174 #define	SCHED_SLICE_NICE(nice)						\
175     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
176 
177 /*
178  * This macro determines whether or not the kse belongs on the current or
179  * next run queue.
180  */
181 #define	SCHED_INTERACTIVE(kg)						\
182     (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
183 #define	SCHED_CURR(kg, ke)						\
184     (ke->ke_thread->td_priority < kg->kg_user_pri ||			\
185     SCHED_INTERACTIVE(kg))
186 
187 /*
188  * Cpu percentage computation macros and defines.
189  *
190  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
191  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
192  */
193 
194 #define	SCHED_CPU_TIME	10
195 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
196 
197 /*
198  * kseq - per processor runqs and statistics.
199  */
200 struct kseq {
201 	struct runq	ksq_idle;		/* Queue of IDLE threads. */
202 	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
203 	struct runq	*ksq_next;		/* Next timeshare queue. */
204 	struct runq	*ksq_curr;		/* Current queue. */
205 	int		ksq_load_timeshare;	/* Load for timeshare. */
206 	int		ksq_load;		/* Aggregate load. */
207 	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
208 	short		ksq_nicemin;		/* Least nice. */
209 #ifdef SMP
210 	int			ksq_transferable;
211 	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
212 	struct kseq_group	*ksq_group;	/* Our processor group. */
213 	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
214 #else
215 	int		ksq_sysload;		/* For loadavg, !ITHD load. */
216 #endif
217 };
218 
219 #ifdef SMP
220 /*
221  * kseq groups are groups of processors which can cheaply share threads.  When
222  * one processor in the group goes idle it will check the runqs of the other
223  * processors in its group prior to halting and waiting for an interrupt.
224  * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
225  * In a numa environment we'd want an idle bitmap per group and a two tiered
226  * load balancer.
227  */
228 struct kseq_group {
229 	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
230 	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
231 	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
232 	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
233 	int	ksg_load;		/* Total load of this group. */
234 	int	ksg_transferable;	/* Transferable load of this group. */
235 	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
236 };
237 #endif
238 
239 /*
240  * One kse queue per processor.
241  */
242 #ifdef SMP
243 static cpumask_t kseq_idle;
244 static int ksg_maxid;
245 static struct kseq	kseq_cpu[MAXCPU];
246 static struct kseq_group kseq_groups[MAXCPU];
247 static int bal_tick;
248 static int gbal_tick;
249 
250 #define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
251 #define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
252 #define	KSEQ_ID(x)	((x) - kseq_cpu)
253 #define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
254 #else	/* !SMP */
255 static struct kseq	kseq_cpu;
256 
257 #define	KSEQ_SELF()	(&kseq_cpu)
258 #define	KSEQ_CPU(x)	(&kseq_cpu)
259 #endif
260 
261 static void sched_add_internal(struct thread *td, int preemptive);
262 static void sched_slice(struct kse *ke);
263 static void sched_priority(struct ksegrp *kg);
264 static int sched_interact_score(struct ksegrp *kg);
265 static void sched_interact_update(struct ksegrp *kg);
266 static void sched_interact_fork(struct ksegrp *kg);
267 static void sched_pctcpu_update(struct kse *ke);
268 
269 /* Operations on per processor queues */
270 static struct kse * kseq_choose(struct kseq *kseq);
271 static void kseq_setup(struct kseq *kseq);
272 static void kseq_load_add(struct kseq *kseq, struct kse *ke);
273 static void kseq_load_rem(struct kseq *kseq, struct kse *ke);
274 static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke);
275 static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke);
276 static void kseq_nice_add(struct kseq *kseq, int nice);
277 static void kseq_nice_rem(struct kseq *kseq, int nice);
278 void kseq_print(int cpu);
279 #ifdef SMP
280 static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class);
281 static struct kse *runq_steal(struct runq *rq);
282 static void sched_balance(void);
283 static void sched_balance_groups(void);
284 static void sched_balance_group(struct kseq_group *ksg);
285 static void sched_balance_pair(struct kseq *high, struct kseq *low);
286 static void kseq_move(struct kseq *from, int cpu);
287 static int kseq_idled(struct kseq *kseq);
288 static void kseq_notify(struct kse *ke, int cpu);
289 static void kseq_assign(struct kseq *);
290 static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
291 /*
292  * On P4 Xeons the round-robin interrupt delivery is broken.  As a result of
293  * this, we can't pin interrupts to the cpu that they were delivered to,
294  * otherwise all ithreads only run on CPU 0.
295  */
296 #ifdef __i386__
297 #define	KSE_CAN_MIGRATE(ke, class)					\
298     ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
299 #else /* !__i386__ */
300 #define	KSE_CAN_MIGRATE(ke, class)					\
301     ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 &&		\
302     ((ke)->ke_flags & KEF_BOUND) == 0)
303 #endif /* !__i386__ */
304 #endif
305 
306 void
307 kseq_print(int cpu)
308 {
309 	struct kseq *kseq;
310 	int i;
311 
312 	kseq = KSEQ_CPU(cpu);
313 
314 	printf("kseq:\n");
315 	printf("\tload:           %d\n", kseq->ksq_load);
316 	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
317 #ifdef SMP
318 	printf("\tload transferable: %d\n", kseq->ksq_transferable);
319 #endif
320 	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
321 	printf("\tnice counts:\n");
322 	for (i = 0; i < SCHED_PRI_NRESV; i++)
323 		if (kseq->ksq_nice[i])
324 			printf("\t\t%d = %d\n",
325 			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
326 }
327 
328 static __inline void
329 kseq_runq_add(struct kseq *kseq, struct kse *ke)
330 {
331 #ifdef SMP
332 	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
333 		kseq->ksq_transferable++;
334 		kseq->ksq_group->ksg_transferable++;
335 	}
336 #endif
337 	runq_add(ke->ke_runq, ke);
338 }
339 
340 static __inline void
341 kseq_runq_rem(struct kseq *kseq, struct kse *ke)
342 {
343 #ifdef SMP
344 	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
345 		kseq->ksq_transferable--;
346 		kseq->ksq_group->ksg_transferable--;
347 	}
348 #endif
349 	runq_remove(ke->ke_runq, ke);
350 }
351 
352 static void
353 kseq_load_add(struct kseq *kseq, struct kse *ke)
354 {
355 	int class;
356 	mtx_assert(&sched_lock, MA_OWNED);
357 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
358 	if (class == PRI_TIMESHARE)
359 		kseq->ksq_load_timeshare++;
360 	kseq->ksq_load++;
361 	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
362 #ifdef SMP
363 		kseq->ksq_group->ksg_load++;
364 #else
365 		kseq->ksq_sysload++;
366 #endif
367 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
368 		CTR6(KTR_ULE,
369 		    "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
370 		    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
371 		    ke->ke_proc->p_nice, kseq->ksq_nicemin);
372 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
373 		kseq_nice_add(kseq, ke->ke_proc->p_nice);
374 }
375 
376 static void
377 kseq_load_rem(struct kseq *kseq, struct kse *ke)
378 {
379 	int class;
380 	mtx_assert(&sched_lock, MA_OWNED);
381 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
382 	if (class == PRI_TIMESHARE)
383 		kseq->ksq_load_timeshare--;
384 	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
385 #ifdef SMP
386 		kseq->ksq_group->ksg_load--;
387 #else
388 		kseq->ksq_sysload--;
389 #endif
390 	kseq->ksq_load--;
391 	ke->ke_runq = NULL;
392 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
393 		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
394 }
395 
396 static void
397 kseq_nice_add(struct kseq *kseq, int nice)
398 {
399 	mtx_assert(&sched_lock, MA_OWNED);
400 	/* Normalize to zero. */
401 	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
402 	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
403 		kseq->ksq_nicemin = nice;
404 }
405 
406 static void
407 kseq_nice_rem(struct kseq *kseq, int nice)
408 {
409 	int n;
410 
411 	mtx_assert(&sched_lock, MA_OWNED);
412 	/* Normalize to zero. */
413 	n = nice + SCHED_PRI_NHALF;
414 	kseq->ksq_nice[n]--;
415 	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
416 
417 	/*
418 	 * If this wasn't the smallest nice value or there are more in
419 	 * this bucket we can just return.  Otherwise we have to recalculate
420 	 * the smallest nice.
421 	 */
422 	if (nice != kseq->ksq_nicemin ||
423 	    kseq->ksq_nice[n] != 0 ||
424 	    kseq->ksq_load_timeshare == 0)
425 		return;
426 
427 	for (; n < SCHED_PRI_NRESV; n++)
428 		if (kseq->ksq_nice[n]) {
429 			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
430 			return;
431 		}
432 }
433 
434 #ifdef SMP
435 /*
436  * sched_balance is a simple CPU load balancing algorithm.  It operates by
437  * finding the least loaded and most loaded cpu and equalizing their load
438  * by migrating some processes.
439  *
440  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
441  * installations will only have 2 cpus.  Secondly, load balancing too much at
442  * once can have an unpleasant effect on the system.  The scheduler rarely has
443  * enough information to make perfect decisions.  So this algorithm chooses
444  * algorithm simplicity and more gradual effects on load in larger systems.
445  *
446  * It could be improved by considering the priorities and slices assigned to
447  * each task prior to balancing them.  There are many pathological cases with
448  * any approach and so the semi random algorithm below may work as well as any.
449  *
450  */
451 static void
452 sched_balance(void)
453 {
454 	struct kseq_group *high;
455 	struct kseq_group *low;
456 	struct kseq_group *ksg;
457 	int cnt;
458 	int i;
459 
460 	if (smp_started == 0)
461 		goto out;
462 	low = high = NULL;
463 	i = random() % (ksg_maxid + 1);
464 	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
465 		ksg = KSEQ_GROUP(i);
466 		/*
467 		 * Find the CPU with the highest load that has some
468 		 * threads to transfer.
469 		 */
470 		if ((high == NULL || ksg->ksg_load > high->ksg_load)
471 		    && ksg->ksg_transferable)
472 			high = ksg;
473 		if (low == NULL || ksg->ksg_load < low->ksg_load)
474 			low = ksg;
475 		if (++i > ksg_maxid)
476 			i = 0;
477 	}
478 	if (low != NULL && high != NULL && high != low)
479 		sched_balance_pair(LIST_FIRST(&high->ksg_members),
480 		    LIST_FIRST(&low->ksg_members));
481 out:
482 	bal_tick = ticks + (random() % (hz * 2));
483 }
484 
485 static void
486 sched_balance_groups(void)
487 {
488 	int i;
489 
490 	mtx_assert(&sched_lock, MA_OWNED);
491 	if (smp_started)
492 		for (i = 0; i <= ksg_maxid; i++)
493 			sched_balance_group(KSEQ_GROUP(i));
494 	gbal_tick = ticks + (random() % (hz * 2));
495 }
496 
497 static void
498 sched_balance_group(struct kseq_group *ksg)
499 {
500 	struct kseq *kseq;
501 	struct kseq *high;
502 	struct kseq *low;
503 	int load;
504 
505 	if (ksg->ksg_transferable == 0)
506 		return;
507 	low = NULL;
508 	high = NULL;
509 	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
510 		load = kseq->ksq_load;
511 		if (high == NULL || load > high->ksq_load)
512 			high = kseq;
513 		if (low == NULL || load < low->ksq_load)
514 			low = kseq;
515 	}
516 	if (high != NULL && low != NULL && high != low)
517 		sched_balance_pair(high, low);
518 }
519 
520 static void
521 sched_balance_pair(struct kseq *high, struct kseq *low)
522 {
523 	int transferable;
524 	int high_load;
525 	int low_load;
526 	int move;
527 	int diff;
528 	int i;
529 
530 	/*
531 	 * If we're transfering within a group we have to use this specific
532 	 * kseq's transferable count, otherwise we can steal from other members
533 	 * of the group.
534 	 */
535 	if (high->ksq_group == low->ksq_group) {
536 		transferable = high->ksq_transferable;
537 		high_load = high->ksq_load;
538 		low_load = low->ksq_load;
539 	} else {
540 		transferable = high->ksq_group->ksg_transferable;
541 		high_load = high->ksq_group->ksg_load;
542 		low_load = low->ksq_group->ksg_load;
543 	}
544 	if (transferable == 0)
545 		return;
546 	/*
547 	 * Determine what the imbalance is and then adjust that to how many
548 	 * kses we actually have to give up (transferable).
549 	 */
550 	diff = high_load - low_load;
551 	move = diff / 2;
552 	if (diff & 0x1)
553 		move++;
554 	move = min(move, transferable);
555 	for (i = 0; i < move; i++)
556 		kseq_move(high, KSEQ_ID(low));
557 	return;
558 }
559 
560 static void
561 kseq_move(struct kseq *from, int cpu)
562 {
563 	struct kseq *kseq;
564 	struct kseq *to;
565 	struct kse *ke;
566 
567 	kseq = from;
568 	to = KSEQ_CPU(cpu);
569 	ke = kseq_steal(kseq, 1);
570 	if (ke == NULL) {
571 		struct kseq_group *ksg;
572 
573 		ksg = kseq->ksq_group;
574 		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
575 			if (kseq == from || kseq->ksq_transferable == 0)
576 				continue;
577 			ke = kseq_steal(kseq, 1);
578 			break;
579 		}
580 		if (ke == NULL)
581 			panic("kseq_move: No KSEs available with a "
582 			    "transferable count of %d\n",
583 			    ksg->ksg_transferable);
584 	}
585 	if (kseq == to)
586 		return;
587 	ke->ke_state = KES_THREAD;
588 	kseq_runq_rem(kseq, ke);
589 	kseq_load_rem(kseq, ke);
590 	kseq_notify(ke, cpu);
591 }
592 
593 static int
594 kseq_idled(struct kseq *kseq)
595 {
596 	struct kseq_group *ksg;
597 	struct kseq *steal;
598 	struct kse *ke;
599 
600 	ksg = kseq->ksq_group;
601 	/*
602 	 * If we're in a cpu group, try and steal kses from another cpu in
603 	 * the group before idling.
604 	 */
605 	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
606 		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
607 			if (steal == kseq || steal->ksq_transferable == 0)
608 				continue;
609 			ke = kseq_steal(steal, 0);
610 			if (ke == NULL)
611 				continue;
612 			ke->ke_state = KES_THREAD;
613 			kseq_runq_rem(steal, ke);
614 			kseq_load_rem(steal, ke);
615 			ke->ke_cpu = PCPU_GET(cpuid);
616 			sched_add_internal(ke->ke_thread, 0);
617 			return (0);
618 		}
619 	}
620 	/*
621 	 * We only set the idled bit when all of the cpus in the group are
622 	 * idle.  Otherwise we could get into a situation where a KSE bounces
623 	 * back and forth between two idle cores on seperate physical CPUs.
624 	 */
625 	ksg->ksg_idlemask |= PCPU_GET(cpumask);
626 	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
627 		return (1);
628 	atomic_set_int(&kseq_idle, ksg->ksg_mask);
629 	return (1);
630 }
631 
632 static void
633 kseq_assign(struct kseq *kseq)
634 {
635 	struct kse *nke;
636 	struct kse *ke;
637 
638 	do {
639 		(volatile struct kse *)ke = kseq->ksq_assigned;
640 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
641 	for (; ke != NULL; ke = nke) {
642 		nke = ke->ke_assign;
643 		ke->ke_flags &= ~KEF_ASSIGNED;
644 		sched_add_internal(ke->ke_thread, 0);
645 	}
646 }
647 
648 static void
649 kseq_notify(struct kse *ke, int cpu)
650 {
651 	struct kseq *kseq;
652 	struct thread *td;
653 	struct pcpu *pcpu;
654 
655 	ke->ke_cpu = cpu;
656 	ke->ke_flags |= KEF_ASSIGNED;
657 
658 	kseq = KSEQ_CPU(cpu);
659 
660 	/*
661 	 * Place a KSE on another cpu's queue and force a resched.
662 	 */
663 	do {
664 		(volatile struct kse *)ke->ke_assign = kseq->ksq_assigned;
665 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
666 	pcpu = pcpu_find(cpu);
667 	td = pcpu->pc_curthread;
668 	if (ke->ke_thread->td_priority < td->td_priority ||
669 	    td == pcpu->pc_idlethread) {
670 		td->td_flags |= TDF_NEEDRESCHED;
671 		ipi_selected(1 << cpu, IPI_AST);
672 	}
673 }
674 
675 static struct kse *
676 runq_steal(struct runq *rq)
677 {
678 	struct rqhead *rqh;
679 	struct rqbits *rqb;
680 	struct kse *ke;
681 	int word;
682 	int bit;
683 
684 	mtx_assert(&sched_lock, MA_OWNED);
685 	rqb = &rq->rq_status;
686 	for (word = 0; word < RQB_LEN; word++) {
687 		if (rqb->rqb_bits[word] == 0)
688 			continue;
689 		for (bit = 0; bit < RQB_BPW; bit++) {
690 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
691 				continue;
692 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
693 			TAILQ_FOREACH(ke, rqh, ke_procq) {
694 				if (KSE_CAN_MIGRATE(ke,
695 				    PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
696 					return (ke);
697 			}
698 		}
699 	}
700 	return (NULL);
701 }
702 
703 static struct kse *
704 kseq_steal(struct kseq *kseq, int stealidle)
705 {
706 	struct kse *ke;
707 
708 	/*
709 	 * Steal from next first to try to get a non-interactive task that
710 	 * may not have run for a while.
711 	 */
712 	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
713 		return (ke);
714 	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
715 		return (ke);
716 	if (stealidle)
717 		return (runq_steal(&kseq->ksq_idle));
718 	return (NULL);
719 }
720 
721 int
722 kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
723 {
724 	struct kseq_group *ksg;
725 	int cpu;
726 
727 	if (smp_started == 0)
728 		return (0);
729 	cpu = 0;
730 	ksg = kseq->ksq_group;
731 
732 	/*
733 	 * If there are any idle groups, give them our extra load.  The
734 	 * threshold at which we start to reassign kses has a large impact
735 	 * on the overall performance of the system.  Tuned too high and
736 	 * some CPUs may idle.  Too low and there will be excess migration
737 	 * and context switches.
738 	 */
739 	if (ksg->ksg_load > (ksg->ksg_cpus * 2) && kseq_idle) {
740 		/*
741 		 * Multiple cpus could find this bit simultaneously
742 		 * but the race shouldn't be terrible.
743 		 */
744 		cpu = ffs(kseq_idle);
745 		if (cpu)
746 			atomic_clear_int(&kseq_idle, 1 << (cpu - 1));
747 	}
748 	/*
749 	 * If another cpu in this group has idled, assign a thread over
750 	 * to them after checking to see if there are idled groups.
751 	 */
752 	if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) {
753 		cpu = ffs(ksg->ksg_idlemask);
754 		if (cpu)
755 			ksg->ksg_idlemask &= ~(1 << (cpu - 1));
756 	}
757 	/*
758 	 * Now that we've found an idle CPU, migrate the thread.
759 	 */
760 	if (cpu) {
761 		cpu--;
762 		ke->ke_runq = NULL;
763 		kseq_notify(ke, cpu);
764 		return (1);
765 	}
766 	return (0);
767 }
768 
769 #endif	/* SMP */
770 
771 /*
772  * Pick the highest priority task we have and return it.
773  */
774 
775 static struct kse *
776 kseq_choose(struct kseq *kseq)
777 {
778 	struct kse *ke;
779 	struct runq *swap;
780 
781 	mtx_assert(&sched_lock, MA_OWNED);
782 	swap = NULL;
783 
784 	for (;;) {
785 		ke = runq_choose(kseq->ksq_curr);
786 		if (ke == NULL) {
787 			/*
788 			 * We already swapped once and didn't get anywhere.
789 			 */
790 			if (swap)
791 				break;
792 			swap = kseq->ksq_curr;
793 			kseq->ksq_curr = kseq->ksq_next;
794 			kseq->ksq_next = swap;
795 			continue;
796 		}
797 		/*
798 		 * If we encounter a slice of 0 the kse is in a
799 		 * TIMESHARE kse group and its nice was too far out
800 		 * of the range that receives slices.
801 		 */
802 		if (ke->ke_slice == 0) {
803 			runq_remove(ke->ke_runq, ke);
804 			sched_slice(ke);
805 			ke->ke_runq = kseq->ksq_next;
806 			runq_add(ke->ke_runq, ke);
807 			continue;
808 		}
809 		return (ke);
810 	}
811 
812 	return (runq_choose(&kseq->ksq_idle));
813 }
814 
815 static void
816 kseq_setup(struct kseq *kseq)
817 {
818 	runq_init(&kseq->ksq_timeshare[0]);
819 	runq_init(&kseq->ksq_timeshare[1]);
820 	runq_init(&kseq->ksq_idle);
821 	kseq->ksq_curr = &kseq->ksq_timeshare[0];
822 	kseq->ksq_next = &kseq->ksq_timeshare[1];
823 	kseq->ksq_load = 0;
824 	kseq->ksq_load_timeshare = 0;
825 }
826 
827 static void
828 sched_setup(void *dummy)
829 {
830 #ifdef SMP
831 	int balance_groups;
832 	int i;
833 #endif
834 
835 	slice_min = (hz/100);	/* 10ms */
836 	slice_max = (hz/7);	/* ~140ms */
837 
838 #ifdef SMP
839 	balance_groups = 0;
840 	/*
841 	 * Initialize the kseqs.
842 	 */
843 	for (i = 0; i < MAXCPU; i++) {
844 		struct kseq *ksq;
845 
846 		ksq = &kseq_cpu[i];
847 		ksq->ksq_assigned = NULL;
848 		kseq_setup(&kseq_cpu[i]);
849 	}
850 	if (smp_topology == NULL) {
851 		struct kseq_group *ksg;
852 		struct kseq *ksq;
853 
854 		for (i = 0; i < MAXCPU; i++) {
855 			ksq = &kseq_cpu[i];
856 			ksg = &kseq_groups[i];
857 			/*
858 			 * Setup a kseq group with one member.
859 			 */
860 			ksq->ksq_transferable = 0;
861 			ksq->ksq_group = ksg;
862 			ksg->ksg_cpus = 1;
863 			ksg->ksg_idlemask = 0;
864 			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
865 			ksg->ksg_load = 0;
866 			ksg->ksg_transferable = 0;
867 			LIST_INIT(&ksg->ksg_members);
868 			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
869 		}
870 	} else {
871 		struct kseq_group *ksg;
872 		struct cpu_group *cg;
873 		int j;
874 
875 		for (i = 0; i < smp_topology->ct_count; i++) {
876 			cg = &smp_topology->ct_group[i];
877 			ksg = &kseq_groups[i];
878 			/*
879 			 * Initialize the group.
880 			 */
881 			ksg->ksg_idlemask = 0;
882 			ksg->ksg_load = 0;
883 			ksg->ksg_transferable = 0;
884 			ksg->ksg_cpus = cg->cg_count;
885 			ksg->ksg_cpumask = cg->cg_mask;
886 			LIST_INIT(&ksg->ksg_members);
887 			/*
888 			 * Find all of the group members and add them.
889 			 */
890 			for (j = 0; j < MAXCPU; j++) {
891 				if ((cg->cg_mask & (1 << j)) != 0) {
892 					if (ksg->ksg_mask == 0)
893 						ksg->ksg_mask = 1 << j;
894 					kseq_cpu[j].ksq_transferable = 0;
895 					kseq_cpu[j].ksq_group = ksg;
896 					LIST_INSERT_HEAD(&ksg->ksg_members,
897 					    &kseq_cpu[j], ksq_siblings);
898 				}
899 			}
900 			if (ksg->ksg_cpus > 1)
901 				balance_groups = 1;
902 		}
903 		ksg_maxid = smp_topology->ct_count - 1;
904 	}
905 	/*
906 	 * Stagger the group and global load balancer so they do not
907 	 * interfere with each other.
908 	 */
909 	bal_tick = ticks + hz;
910 	if (balance_groups)
911 		gbal_tick = ticks + (hz / 2);
912 #else
913 	kseq_setup(KSEQ_SELF());
914 #endif
915 	mtx_lock_spin(&sched_lock);
916 	kseq_load_add(KSEQ_SELF(), &kse0);
917 	mtx_unlock_spin(&sched_lock);
918 }
919 
920 /*
921  * Scale the scheduling priority according to the "interactivity" of this
922  * process.
923  */
924 static void
925 sched_priority(struct ksegrp *kg)
926 {
927 	int pri;
928 
929 	if (kg->kg_pri_class != PRI_TIMESHARE)
930 		return;
931 
932 	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
933 	pri += SCHED_PRI_BASE;
934 	pri += kg->kg_proc->p_nice;
935 
936 	if (pri > PRI_MAX_TIMESHARE)
937 		pri = PRI_MAX_TIMESHARE;
938 	else if (pri < PRI_MIN_TIMESHARE)
939 		pri = PRI_MIN_TIMESHARE;
940 
941 	kg->kg_user_pri = pri;
942 
943 	return;
944 }
945 
946 /*
947  * Calculate a time slice based on the properties of the kseg and the runq
948  * that we're on.  This is only for PRI_TIMESHARE ksegrps.
949  */
950 static void
951 sched_slice(struct kse *ke)
952 {
953 	struct kseq *kseq;
954 	struct ksegrp *kg;
955 
956 	kg = ke->ke_ksegrp;
957 	kseq = KSEQ_CPU(ke->ke_cpu);
958 
959 	/*
960 	 * Rationale:
961 	 * KSEs in interactive ksegs get the minimum slice so that we
962 	 * quickly notice if it abuses its advantage.
963 	 *
964 	 * KSEs in non-interactive ksegs are assigned a slice that is
965 	 * based on the ksegs nice value relative to the least nice kseg
966 	 * on the run queue for this cpu.
967 	 *
968 	 * If the KSE is less nice than all others it gets the maximum
969 	 * slice and other KSEs will adjust their slice relative to
970 	 * this when they first expire.
971 	 *
972 	 * There is 20 point window that starts relative to the least
973 	 * nice kse on the run queue.  Slice size is determined by
974 	 * the kse distance from the last nice ksegrp.
975 	 *
976 	 * If the kse is outside of the window it will get no slice
977 	 * and will be reevaluated each time it is selected on the
978 	 * run queue.  The exception to this is nice 0 ksegs when
979 	 * a nice -20 is running.  They are always granted a minimum
980 	 * slice.
981 	 */
982 	if (!SCHED_INTERACTIVE(kg)) {
983 		int nice;
984 
985 		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
986 		if (kseq->ksq_load_timeshare == 0 ||
987 		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
988 			ke->ke_slice = SCHED_SLICE_MAX;
989 		else if (nice <= SCHED_SLICE_NTHRESH)
990 			ke->ke_slice = SCHED_SLICE_NICE(nice);
991 		else if (kg->kg_proc->p_nice == 0)
992 			ke->ke_slice = SCHED_SLICE_MIN;
993 		else
994 			ke->ke_slice = 0;
995 	} else
996 		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
997 
998 	CTR6(KTR_ULE,
999 	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
1000 	    ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin,
1001 	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
1002 
1003 	return;
1004 }
1005 
1006 /*
1007  * This routine enforces a maximum limit on the amount of scheduling history
1008  * kept.  It is called after either the slptime or runtime is adjusted.
1009  * This routine will not operate correctly when slp or run times have been
1010  * adjusted to more than double their maximum.
1011  */
1012 static void
1013 sched_interact_update(struct ksegrp *kg)
1014 {
1015 	int sum;
1016 
1017 	sum = kg->kg_runtime + kg->kg_slptime;
1018 	if (sum < SCHED_SLP_RUN_MAX)
1019 		return;
1020 	/*
1021 	 * If we have exceeded by more than 1/5th then the algorithm below
1022 	 * will not bring us back into range.  Dividing by two here forces
1023 	 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1024 	 */
1025 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1026 		kg->kg_runtime /= 2;
1027 		kg->kg_slptime /= 2;
1028 		return;
1029 	}
1030 	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1031 	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1032 }
1033 
1034 static void
1035 sched_interact_fork(struct ksegrp *kg)
1036 {
1037 	int ratio;
1038 	int sum;
1039 
1040 	sum = kg->kg_runtime + kg->kg_slptime;
1041 	if (sum > SCHED_SLP_RUN_FORK) {
1042 		ratio = sum / SCHED_SLP_RUN_FORK;
1043 		kg->kg_runtime /= ratio;
1044 		kg->kg_slptime /= ratio;
1045 	}
1046 }
1047 
1048 static int
1049 sched_interact_score(struct ksegrp *kg)
1050 {
1051 	int div;
1052 
1053 	if (kg->kg_runtime > kg->kg_slptime) {
1054 		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1055 		return (SCHED_INTERACT_HALF +
1056 		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1057 	} if (kg->kg_slptime > kg->kg_runtime) {
1058 		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1059 		return (kg->kg_runtime / div);
1060 	}
1061 
1062 	/*
1063 	 * This can happen if slptime and runtime are 0.
1064 	 */
1065 	return (0);
1066 
1067 }
1068 
1069 /*
1070  * This is only somewhat accurate since given many processes of the same
1071  * priority they will switch when their slices run out, which will be
1072  * at most SCHED_SLICE_MAX.
1073  */
1074 int
1075 sched_rr_interval(void)
1076 {
1077 	return (SCHED_SLICE_MAX);
1078 }
1079 
1080 static void
1081 sched_pctcpu_update(struct kse *ke)
1082 {
1083 	/*
1084 	 * Adjust counters and watermark for pctcpu calc.
1085 	 */
1086 	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1087 		/*
1088 		 * Shift the tick count out so that the divide doesn't
1089 		 * round away our results.
1090 		 */
1091 		ke->ke_ticks <<= 10;
1092 		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1093 			    SCHED_CPU_TICKS;
1094 		ke->ke_ticks >>= 10;
1095 	} else
1096 		ke->ke_ticks = 0;
1097 	ke->ke_ltick = ticks;
1098 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1099 }
1100 
1101 void
1102 sched_prio(struct thread *td, u_char prio)
1103 {
1104 	struct kse *ke;
1105 
1106 	ke = td->td_kse;
1107 	mtx_assert(&sched_lock, MA_OWNED);
1108 	if (TD_ON_RUNQ(td)) {
1109 		/*
1110 		 * If the priority has been elevated due to priority
1111 		 * propagation, we may have to move ourselves to a new
1112 		 * queue.  We still call adjustrunqueue below in case kse
1113 		 * needs to fix things up.
1114 		 */
1115 		if (prio < td->td_priority && ke &&
1116 		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1117 		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1118 			runq_remove(ke->ke_runq, ke);
1119 			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1120 			runq_add(ke->ke_runq, ke);
1121 		}
1122 		adjustrunqueue(td, prio);
1123 	} else
1124 		td->td_priority = prio;
1125 }
1126 
1127 void
1128 sched_switch(struct thread *td, struct thread *newtd)
1129 {
1130 	struct kse *ke;
1131 
1132 	mtx_assert(&sched_lock, MA_OWNED);
1133 
1134 	ke = td->td_kse;
1135 
1136 	td->td_last_kse = ke;
1137         td->td_lastcpu = td->td_oncpu;
1138 	td->td_oncpu = NOCPU;
1139 	td->td_flags &= ~TDF_NEEDRESCHED;
1140 	td->td_pflags &= ~TDP_OWEPREEMPT;
1141 
1142 	/*
1143 	 * If the KSE has been assigned it may be in the process of switching
1144 	 * to the new cpu.  This is the case in sched_bind().
1145 	 */
1146 	if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1147 		if (td == PCPU_GET(idlethread))
1148 			TD_SET_CAN_RUN(td);
1149 		else if (TD_IS_RUNNING(td)) {
1150 			kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1151 			setrunqueue(td);
1152 		} else {
1153 			if (ke->ke_runq) {
1154 				kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1155 			} else if ((td->td_flags & TDF_IDLETD) == 0)
1156 				kdb_backtrace();
1157 			/*
1158 			 * We will not be on the run queue. So we must be
1159 			 * sleeping or similar.
1160 			 */
1161 			if (td->td_proc->p_flag & P_SA)
1162 				kse_reassign(ke);
1163 		}
1164 	}
1165 	if (newtd == NULL)
1166 		newtd = choosethread();
1167 	else
1168 		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1169 	if (td != newtd)
1170 		cpu_switch(td, newtd);
1171 	sched_lock.mtx_lock = (uintptr_t)td;
1172 
1173 	td->td_oncpu = PCPU_GET(cpuid);
1174 }
1175 
1176 void
1177 sched_nice(struct proc *p, int nice)
1178 {
1179 	struct ksegrp *kg;
1180 	struct kse *ke;
1181 	struct thread *td;
1182 	struct kseq *kseq;
1183 
1184 	PROC_LOCK_ASSERT(p, MA_OWNED);
1185 	mtx_assert(&sched_lock, MA_OWNED);
1186 	/*
1187 	 * We need to adjust the nice counts for running KSEs.
1188 	 */
1189 	FOREACH_KSEGRP_IN_PROC(p, kg) {
1190 		if (kg->kg_pri_class == PRI_TIMESHARE) {
1191 			FOREACH_KSE_IN_GROUP(kg, ke) {
1192 				if (ke->ke_runq == NULL)
1193 					continue;
1194 				kseq = KSEQ_CPU(ke->ke_cpu);
1195 				kseq_nice_rem(kseq, p->p_nice);
1196 				kseq_nice_add(kseq, nice);
1197 			}
1198 		}
1199 	}
1200 	p->p_nice = nice;
1201 	FOREACH_KSEGRP_IN_PROC(p, kg) {
1202 		sched_priority(kg);
1203 		FOREACH_THREAD_IN_GROUP(kg, td)
1204 			td->td_flags |= TDF_NEEDRESCHED;
1205 	}
1206 }
1207 
1208 void
1209 sched_sleep(struct thread *td)
1210 {
1211 	mtx_assert(&sched_lock, MA_OWNED);
1212 
1213 	td->td_slptime = ticks;
1214 	td->td_base_pri = td->td_priority;
1215 
1216 	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1217 	    td->td_kse, td->td_slptime);
1218 }
1219 
1220 void
1221 sched_wakeup(struct thread *td)
1222 {
1223 	mtx_assert(&sched_lock, MA_OWNED);
1224 
1225 	/*
1226 	 * Let the kseg know how long we slept for.  This is because process
1227 	 * interactivity behavior is modeled in the kseg.
1228 	 */
1229 	if (td->td_slptime) {
1230 		struct ksegrp *kg;
1231 		int hzticks;
1232 
1233 		kg = td->td_ksegrp;
1234 		hzticks = (ticks - td->td_slptime) << 10;
1235 		if (hzticks >= SCHED_SLP_RUN_MAX) {
1236 			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1237 			kg->kg_runtime = 1;
1238 		} else {
1239 			kg->kg_slptime += hzticks;
1240 			sched_interact_update(kg);
1241 		}
1242 		sched_priority(kg);
1243 		if (td->td_kse)
1244 			sched_slice(td->td_kse);
1245 		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1246 		    td->td_kse, hzticks);
1247 		td->td_slptime = 0;
1248 	}
1249 	setrunqueue(td);
1250 }
1251 
1252 /*
1253  * Penalize the parent for creating a new child and initialize the child's
1254  * priority.
1255  */
1256 void
1257 sched_fork(struct thread *td, struct proc *p1)
1258 {
1259 
1260 	mtx_assert(&sched_lock, MA_OWNED);
1261 
1262 	p1->p_nice = td->td_proc->p_nice;
1263 	sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1));
1264 	sched_fork_kse(td, FIRST_KSE_IN_PROC(p1));
1265 	sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1));
1266 }
1267 
1268 void
1269 sched_fork_kse(struct thread *td, struct kse *child)
1270 {
1271 
1272 	struct kse *ke = td->td_kse;
1273 
1274 	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1275 	child->ke_cpu = ke->ke_cpu;
1276 	child->ke_runq = NULL;
1277 
1278 	/* Grab our parents cpu estimation information. */
1279 	child->ke_ticks = ke->ke_ticks;
1280 	child->ke_ltick = ke->ke_ltick;
1281 	child->ke_ftick = ke->ke_ftick;
1282 }
1283 
1284 void
1285 sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1286 {
1287 	struct ksegrp *kg = td->td_ksegrp;
1288 	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1289 
1290 	child->kg_slptime = kg->kg_slptime;
1291 	child->kg_runtime = kg->kg_runtime;
1292 	child->kg_user_pri = kg->kg_user_pri;
1293 	sched_interact_fork(child);
1294 	kg->kg_runtime += tickincr << 10;
1295 	sched_interact_update(kg);
1296 
1297 	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1298 	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1299 	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1300 }
1301 
1302 void
1303 sched_fork_thread(struct thread *td, struct thread *child)
1304 {
1305 }
1306 
1307 void
1308 sched_class(struct ksegrp *kg, int class)
1309 {
1310 	struct kseq *kseq;
1311 	struct kse *ke;
1312 	int nclass;
1313 	int oclass;
1314 
1315 	mtx_assert(&sched_lock, MA_OWNED);
1316 	if (kg->kg_pri_class == class)
1317 		return;
1318 
1319 	nclass = PRI_BASE(class);
1320 	oclass = PRI_BASE(kg->kg_pri_class);
1321 	FOREACH_KSE_IN_GROUP(kg, ke) {
1322 		if (ke->ke_state != KES_ONRUNQ &&
1323 		    ke->ke_state != KES_THREAD)
1324 			continue;
1325 		kseq = KSEQ_CPU(ke->ke_cpu);
1326 
1327 #ifdef SMP
1328 		/*
1329 		 * On SMP if we're on the RUNQ we must adjust the transferable
1330 		 * count because could be changing to or from an interrupt
1331 		 * class.
1332 		 */
1333 		if (ke->ke_state == KES_ONRUNQ) {
1334 			if (KSE_CAN_MIGRATE(ke, oclass)) {
1335 				kseq->ksq_transferable--;
1336 				kseq->ksq_group->ksg_transferable--;
1337 			}
1338 			if (KSE_CAN_MIGRATE(ke, nclass)) {
1339 				kseq->ksq_transferable++;
1340 				kseq->ksq_group->ksg_transferable++;
1341 			}
1342 		}
1343 #endif
1344 		if (oclass == PRI_TIMESHARE) {
1345 			kseq->ksq_load_timeshare--;
1346 			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1347 		}
1348 		if (nclass == PRI_TIMESHARE) {
1349 			kseq->ksq_load_timeshare++;
1350 			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1351 		}
1352 	}
1353 
1354 	kg->kg_pri_class = class;
1355 }
1356 
1357 /*
1358  * Return some of the child's priority and interactivity to the parent.
1359  */
1360 void
1361 sched_exit(struct proc *p, struct thread *td)
1362 {
1363 	mtx_assert(&sched_lock, MA_OWNED);
1364 	sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
1365 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
1366 }
1367 
1368 void
1369 sched_exit_kse(struct kse *ke, struct thread *td)
1370 {
1371 	kseq_load_rem(KSEQ_CPU(td->td_kse->ke_cpu), td->td_kse);
1372 }
1373 
1374 void
1375 sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
1376 {
1377 	/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
1378 	kg->kg_runtime += td->td_ksegrp->kg_runtime;
1379 	sched_interact_update(kg);
1380 }
1381 
1382 void
1383 sched_exit_thread(struct thread *td, struct thread *child)
1384 {
1385 }
1386 
1387 void
1388 sched_clock(struct thread *td)
1389 {
1390 	struct kseq *kseq;
1391 	struct ksegrp *kg;
1392 	struct kse *ke;
1393 
1394 	mtx_assert(&sched_lock, MA_OWNED);
1395 #ifdef SMP
1396 	if (ticks == bal_tick)
1397 		sched_balance();
1398 	if (ticks == gbal_tick)
1399 		sched_balance_groups();
1400 #endif
1401 	/*
1402 	 * sched_setup() apparently happens prior to stathz being set.  We
1403 	 * need to resolve the timers earlier in the boot so we can avoid
1404 	 * calculating this here.
1405 	 */
1406 	if (realstathz == 0) {
1407 		realstathz = stathz ? stathz : hz;
1408 		tickincr = hz / realstathz;
1409 		/*
1410 		 * XXX This does not work for values of stathz that are much
1411 		 * larger than hz.
1412 		 */
1413 		if (tickincr == 0)
1414 			tickincr = 1;
1415 	}
1416 
1417 	ke = td->td_kse;
1418 	kg = ke->ke_ksegrp;
1419 
1420 	/* Adjust ticks for pctcpu */
1421 	ke->ke_ticks++;
1422 	ke->ke_ltick = ticks;
1423 
1424 	/* Go up to one second beyond our max and then trim back down */
1425 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1426 		sched_pctcpu_update(ke);
1427 
1428 	if (td->td_flags & TDF_IDLETD)
1429 		return;
1430 
1431 	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1432 	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1433 	/*
1434 	 * We only do slicing code for TIMESHARE ksegrps.
1435 	 */
1436 	if (kg->kg_pri_class != PRI_TIMESHARE)
1437 		return;
1438 	/*
1439 	 * We used a tick charge it to the ksegrp so that we can compute our
1440 	 * interactivity.
1441 	 */
1442 	kg->kg_runtime += tickincr << 10;
1443 	sched_interact_update(kg);
1444 
1445 	/*
1446 	 * We used up one time slice.
1447 	 */
1448 	if (--ke->ke_slice > 0)
1449 		return;
1450 	/*
1451 	 * We're out of time, recompute priorities and requeue.
1452 	 */
1453 	kseq = KSEQ_SELF();
1454 	kseq_load_rem(kseq, ke);
1455 	sched_priority(kg);
1456 	sched_slice(ke);
1457 	if (SCHED_CURR(kg, ke))
1458 		ke->ke_runq = kseq->ksq_curr;
1459 	else
1460 		ke->ke_runq = kseq->ksq_next;
1461 	kseq_load_add(kseq, ke);
1462 	td->td_flags |= TDF_NEEDRESCHED;
1463 }
1464 
1465 int
1466 sched_runnable(void)
1467 {
1468 	struct kseq *kseq;
1469 	int load;
1470 
1471 	load = 1;
1472 
1473 	kseq = KSEQ_SELF();
1474 #ifdef SMP
1475 	if (kseq->ksq_assigned) {
1476 		mtx_lock_spin(&sched_lock);
1477 		kseq_assign(kseq);
1478 		mtx_unlock_spin(&sched_lock);
1479 	}
1480 #endif
1481 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1482 		if (kseq->ksq_load > 0)
1483 			goto out;
1484 	} else
1485 		if (kseq->ksq_load - 1 > 0)
1486 			goto out;
1487 	load = 0;
1488 out:
1489 	return (load);
1490 }
1491 
1492 void
1493 sched_userret(struct thread *td)
1494 {
1495 	struct ksegrp *kg;
1496 
1497 	kg = td->td_ksegrp;
1498 
1499 	if (td->td_priority != kg->kg_user_pri) {
1500 		mtx_lock_spin(&sched_lock);
1501 		td->td_priority = kg->kg_user_pri;
1502 		mtx_unlock_spin(&sched_lock);
1503 	}
1504 }
1505 
1506 struct kse *
1507 sched_choose(void)
1508 {
1509 	struct kseq *kseq;
1510 	struct kse *ke;
1511 
1512 	mtx_assert(&sched_lock, MA_OWNED);
1513 	kseq = KSEQ_SELF();
1514 #ifdef SMP
1515 restart:
1516 	if (kseq->ksq_assigned)
1517 		kseq_assign(kseq);
1518 #endif
1519 	ke = kseq_choose(kseq);
1520 	if (ke) {
1521 #ifdef SMP
1522 		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1523 			if (kseq_idled(kseq) == 0)
1524 				goto restart;
1525 #endif
1526 		kseq_runq_rem(kseq, ke);
1527 		ke->ke_state = KES_THREAD;
1528 
1529 		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1530 			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1531 			    ke, ke->ke_runq, ke->ke_slice,
1532 			    ke->ke_thread->td_priority);
1533 		}
1534 		return (ke);
1535 	}
1536 #ifdef SMP
1537 	if (kseq_idled(kseq) == 0)
1538 		goto restart;
1539 #endif
1540 	return (NULL);
1541 }
1542 
1543 void
1544 sched_add(struct thread *td)
1545 {
1546 
1547 	sched_add_internal(td, 1);
1548 }
1549 
1550 static void
1551 sched_add_internal(struct thread *td, int preemptive)
1552 {
1553 	struct kseq *kseq;
1554 	struct ksegrp *kg;
1555 	struct kse *ke;
1556 	int class;
1557 
1558 	mtx_assert(&sched_lock, MA_OWNED);
1559 	ke = td->td_kse;
1560 	kg = td->td_ksegrp;
1561 	if (ke->ke_flags & KEF_ASSIGNED)
1562 		return;
1563 	kseq = KSEQ_SELF();
1564 	KASSERT((ke->ke_thread != NULL),
1565 	    ("sched_add: No thread on KSE"));
1566 	KASSERT((ke->ke_thread->td_kse != NULL),
1567 	    ("sched_add: No KSE on thread"));
1568 	KASSERT(ke->ke_state != KES_ONRUNQ,
1569 	    ("sched_add: kse %p (%s) already in run queue", ke,
1570 	    ke->ke_proc->p_comm));
1571 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1572 	    ("sched_add: process swapped out"));
1573 	KASSERT(ke->ke_runq == NULL,
1574 	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1575 
1576 	class = PRI_BASE(kg->kg_pri_class);
1577 	switch (class) {
1578 	case PRI_ITHD:
1579 	case PRI_REALTIME:
1580 		ke->ke_runq = kseq->ksq_curr;
1581 		ke->ke_slice = SCHED_SLICE_MAX;
1582 		ke->ke_cpu = PCPU_GET(cpuid);
1583 		break;
1584 	case PRI_TIMESHARE:
1585 		if (SCHED_CURR(kg, ke))
1586 			ke->ke_runq = kseq->ksq_curr;
1587 		else
1588 			ke->ke_runq = kseq->ksq_next;
1589 		break;
1590 	case PRI_IDLE:
1591 		/*
1592 		 * This is for priority prop.
1593 		 */
1594 		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1595 			ke->ke_runq = kseq->ksq_curr;
1596 		else
1597 			ke->ke_runq = &kseq->ksq_idle;
1598 		ke->ke_slice = SCHED_SLICE_MIN;
1599 		break;
1600 	default:
1601 		panic("Unknown pri class.");
1602 		break;
1603 	}
1604 #ifdef SMP
1605 	if (ke->ke_cpu != PCPU_GET(cpuid)) {
1606 		ke->ke_runq = NULL;
1607 		kseq_notify(ke, ke->ke_cpu);
1608 		return;
1609 	}
1610 	/*
1611 	 * If we had been idle, clear our bit in the group and potentially
1612 	 * the global bitmap.  If not, see if we should transfer this thread.
1613 	 */
1614 	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1615 	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1616 		/*
1617 		 * Check to see if our group is unidling, and if so, remove it
1618 		 * from the global idle mask.
1619 		 */
1620 		if (kseq->ksq_group->ksg_idlemask ==
1621 		    kseq->ksq_group->ksg_cpumask)
1622 			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1623 		/*
1624 		 * Now remove ourselves from the group specific idle mask.
1625 		 */
1626 		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1627 	} else if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class))
1628 		if (kseq_transfer(kseq, ke, class))
1629 			return;
1630 #endif
1631         if (td->td_priority < curthread->td_priority)
1632                 curthread->td_flags |= TDF_NEEDRESCHED;
1633 
1634 #ifdef SMP
1635 	/*
1636 	 * Only try to preempt if the thread is unpinned or pinned to the
1637 	 * current CPU.
1638 	 */
1639 	if (KSE_CAN_MIGRATE(ke, class) || ke->ke_cpu == PCPU_GET(cpuid))
1640 #endif
1641 	if (preemptive && maybe_preempt(td))
1642 		return;
1643 	ke->ke_ksegrp->kg_runq_kses++;
1644 	ke->ke_state = KES_ONRUNQ;
1645 
1646 	kseq_runq_add(kseq, ke);
1647 	kseq_load_add(kseq, ke);
1648 }
1649 
1650 void
1651 sched_rem(struct thread *td)
1652 {
1653 	struct kseq *kseq;
1654 	struct kse *ke;
1655 
1656 	ke = td->td_kse;
1657 	/*
1658 	 * It is safe to just return here because sched_rem() is only ever
1659 	 * used in places where we're immediately going to add the
1660 	 * kse back on again.  In that case it'll be added with the correct
1661 	 * thread and priority when the caller drops the sched_lock.
1662 	 */
1663 	if (ke->ke_flags & KEF_ASSIGNED)
1664 		return;
1665 	mtx_assert(&sched_lock, MA_OWNED);
1666 	KASSERT((ke->ke_state == KES_ONRUNQ),
1667 	    ("sched_rem: KSE not on run queue"));
1668 
1669 	ke->ke_state = KES_THREAD;
1670 	ke->ke_ksegrp->kg_runq_kses--;
1671 	kseq = KSEQ_CPU(ke->ke_cpu);
1672 	kseq_runq_rem(kseq, ke);
1673 	kseq_load_rem(kseq, ke);
1674 }
1675 
1676 fixpt_t
1677 sched_pctcpu(struct thread *td)
1678 {
1679 	fixpt_t pctcpu;
1680 	struct kse *ke;
1681 
1682 	pctcpu = 0;
1683 	ke = td->td_kse;
1684 	if (ke == NULL)
1685 		return (0);
1686 
1687 	mtx_lock_spin(&sched_lock);
1688 	if (ke->ke_ticks) {
1689 		int rtick;
1690 
1691 		/*
1692 		 * Don't update more frequently than twice a second.  Allowing
1693 		 * this causes the cpu usage to decay away too quickly due to
1694 		 * rounding errors.
1695 		 */
1696 		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1697 		    ke->ke_ltick < (ticks - (hz / 2)))
1698 			sched_pctcpu_update(ke);
1699 		/* How many rtick per second ? */
1700 		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1701 		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1702 	}
1703 
1704 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1705 	mtx_unlock_spin(&sched_lock);
1706 
1707 	return (pctcpu);
1708 }
1709 
1710 void
1711 sched_bind(struct thread *td, int cpu)
1712 {
1713 	struct kse *ke;
1714 
1715 	mtx_assert(&sched_lock, MA_OWNED);
1716 	ke = td->td_kse;
1717 	ke->ke_flags |= KEF_BOUND;
1718 #ifdef SMP
1719 	if (PCPU_GET(cpuid) == cpu)
1720 		return;
1721 	/* sched_rem without the runq_remove */
1722 	ke->ke_state = KES_THREAD;
1723 	ke->ke_ksegrp->kg_runq_kses--;
1724 	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1725 	kseq_notify(ke, cpu);
1726 	/* When we return from mi_switch we'll be on the correct cpu. */
1727 	mi_switch(SW_VOL, NULL);
1728 #endif
1729 }
1730 
1731 void
1732 sched_unbind(struct thread *td)
1733 {
1734 	mtx_assert(&sched_lock, MA_OWNED);
1735 	td->td_kse->ke_flags &= ~KEF_BOUND;
1736 }
1737 
1738 int
1739 sched_load(void)
1740 {
1741 #ifdef SMP
1742 	int total;
1743 	int i;
1744 
1745 	total = 0;
1746 	for (i = 0; i <= ksg_maxid; i++)
1747 		total += KSEQ_GROUP(i)->ksg_load;
1748 	return (total);
1749 #else
1750 	return (KSEQ_SELF()->ksq_sysload);
1751 #endif
1752 }
1753 
1754 int
1755 sched_sizeof_kse(void)
1756 {
1757 	return (sizeof(struct kse) + sizeof(struct ke_sched));
1758 }
1759 
1760 int
1761 sched_sizeof_ksegrp(void)
1762 {
1763 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1764 }
1765 
1766 int
1767 sched_sizeof_proc(void)
1768 {
1769 	return (sizeof(struct proc));
1770 }
1771 
1772 int
1773 sched_sizeof_thread(void)
1774 {
1775 	return (sizeof(struct thread) + sizeof(struct td_sched));
1776 }
1777