xref: /freebsd/sys/kern/sched_ule.c (revision 3193579b66fd7067f898dbc54bdea81a0e6f9bd0)
1 /*-
2  * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/resource.h>
38 #include <sys/resourcevar.h>
39 #include <sys/sched.h>
40 #include <sys/smp.h>
41 #include <sys/sx.h>
42 #include <sys/sysctl.h>
43 #include <sys/sysproto.h>
44 #include <sys/vmmeter.h>
45 #ifdef DDB
46 #include <ddb/ddb.h>
47 #endif
48 #ifdef KTRACE
49 #include <sys/uio.h>
50 #include <sys/ktrace.h>
51 #endif
52 
53 #include <machine/cpu.h>
54 #include <machine/smp.h>
55 
56 #define KTR_ULE         KTR_NFS
57 
58 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
59 /* XXX This is bogus compatability crap for ps */
60 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
61 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
62 
63 static void sched_setup(void *dummy);
64 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
65 
66 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
67 
68 static int sched_strict;
69 SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, "");
70 
71 static int slice_min = 1;
72 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
73 
74 static int slice_max = 10;
75 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
76 
77 int realstathz;
78 int tickincr = 1;
79 
80 #ifdef SMP
81 /* Callout to handle load balancing SMP systems. */
82 static struct callout kseq_lb_callout;
83 #endif
84 
85 /*
86  * These datastructures are allocated within their parent datastructure but
87  * are scheduler specific.
88  */
89 
90 struct ke_sched {
91 	int		ske_slice;
92 	struct runq	*ske_runq;
93 	/* The following variables are only used for pctcpu calculation */
94 	int		ske_ltick;	/* Last tick that we were running on */
95 	int		ske_ftick;	/* First tick that we were running on */
96 	int		ske_ticks;	/* Tick count */
97 	/* CPU that we have affinity for. */
98 	u_char		ske_cpu;
99 };
100 #define	ke_slice	ke_sched->ske_slice
101 #define	ke_runq		ke_sched->ske_runq
102 #define	ke_ltick	ke_sched->ske_ltick
103 #define	ke_ftick	ke_sched->ske_ftick
104 #define	ke_ticks	ke_sched->ske_ticks
105 #define	ke_cpu		ke_sched->ske_cpu
106 #define	ke_assign	ke_procq.tqe_next
107 
108 #define	KEF_ASSIGNED	KEF_SCHED0	/* KSE is being migrated. */
109 #define	KEF_BOUND	KEF_SCHED1	/* KSE can not migrate. */
110 
111 struct kg_sched {
112 	int	skg_slptime;		/* Number of ticks we vol. slept */
113 	int	skg_runtime;		/* Number of ticks we were running */
114 };
115 #define	kg_slptime	kg_sched->skg_slptime
116 #define	kg_runtime	kg_sched->skg_runtime
117 
118 struct td_sched {
119 	int	std_slptime;
120 };
121 #define	td_slptime	td_sched->std_slptime
122 
123 struct td_sched td_sched;
124 struct ke_sched ke_sched;
125 struct kg_sched kg_sched;
126 
127 struct ke_sched *kse0_sched = &ke_sched;
128 struct kg_sched *ksegrp0_sched = &kg_sched;
129 struct p_sched *proc0_sched = NULL;
130 struct td_sched *thread0_sched = &td_sched;
131 
132 /*
133  * The priority is primarily determined by the interactivity score.  Thus, we
134  * give lower(better) priorities to kse groups that use less CPU.  The nice
135  * value is then directly added to this to allow nice to have some effect
136  * on latency.
137  *
138  * PRI_RANGE:	Total priority range for timeshare threads.
139  * PRI_NRESV:	Number of nice values.
140  * PRI_BASE:	The start of the dynamic range.
141  */
142 #define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
143 #define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
144 #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
145 #define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
146 #define	SCHED_PRI_INTERACT(score)					\
147     ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
148 
149 /*
150  * These determine the interactivity of a process.
151  *
152  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
153  *		before throttling back.
154  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
155  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
156  * INTERACT_THRESH:	Threshhold for placement on the current runq.
157  */
158 #define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
159 #define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
160 #define	SCHED_INTERACT_MAX	(100)
161 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
162 #define	SCHED_INTERACT_THRESH	(30)
163 
164 /*
165  * These parameters and macros determine the size of the time slice that is
166  * granted to each thread.
167  *
168  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
169  * SLICE_MAX:	Maximum time slice granted.
170  * SLICE_RANGE:	Range of available time slices scaled by hz.
171  * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
172  * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
173  * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
174  */
175 #define	SCHED_SLICE_MIN			(slice_min)
176 #define	SCHED_SLICE_MAX			(slice_max)
177 #define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
178 #define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
179 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
180 #define	SCHED_SLICE_NICE(nice)						\
181     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
182 
183 /*
184  * This macro determines whether or not the kse belongs on the current or
185  * next run queue.
186  */
187 #define	SCHED_INTERACTIVE(kg)						\
188     (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
189 #define	SCHED_CURR(kg, ke)						\
190     (ke->ke_thread->td_priority != kg->kg_user_pri ||			\
191     SCHED_INTERACTIVE(kg))
192 
193 /*
194  * Cpu percentage computation macros and defines.
195  *
196  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
197  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
198  */
199 
200 #define	SCHED_CPU_TIME	10
201 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
202 
203 /*
204  * kseq - per processor runqs and statistics.
205  */
206 
207 #define	KSEQ_NCLASS	(PRI_IDLE + 1)	/* Number of run classes. */
208 
209 struct kseq {
210 	struct runq	ksq_idle;		/* Queue of IDLE threads. */
211 	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
212 	struct runq	*ksq_next;		/* Next timeshare queue. */
213 	struct runq	*ksq_curr;		/* Current queue. */
214 	int		ksq_load_timeshare;	/* Load for timeshare. */
215 	int		ksq_load;		/* Aggregate load. */
216 	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
217 	short		ksq_nicemin;		/* Least nice. */
218 #ifdef SMP
219 	int		ksq_load_transferable;	/* kses that may be migrated. */
220 	int		ksq_idled;
221 	int		ksq_cpus;	/* Count of CPUs in this kseq. */
222 	volatile struct kse *ksq_assigned;	/* assigned by another CPU. */
223 #endif
224 };
225 
226 /*
227  * One kse queue per processor.
228  */
229 #ifdef SMP
230 static int kseq_idle;
231 static struct kseq	kseq_cpu[MAXCPU];
232 static struct kseq	*kseq_idmap[MAXCPU];
233 #define	KSEQ_SELF()	(kseq_idmap[PCPU_GET(cpuid)])
234 #define	KSEQ_CPU(x)	(kseq_idmap[(x)])
235 #else
236 static struct kseq	kseq_cpu;
237 #define	KSEQ_SELF()	(&kseq_cpu)
238 #define	KSEQ_CPU(x)	(&kseq_cpu)
239 #endif
240 
241 static void sched_slice(struct kse *ke);
242 static void sched_priority(struct ksegrp *kg);
243 static int sched_interact_score(struct ksegrp *kg);
244 static void sched_interact_update(struct ksegrp *kg);
245 static void sched_interact_fork(struct ksegrp *kg);
246 static void sched_pctcpu_update(struct kse *ke);
247 
248 /* Operations on per processor queues */
249 static struct kse * kseq_choose(struct kseq *kseq);
250 static void kseq_setup(struct kseq *kseq);
251 static void kseq_load_add(struct kseq *kseq, struct kse *ke);
252 static void kseq_load_rem(struct kseq *kseq, struct kse *ke);
253 static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke);
254 static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke);
255 static void kseq_nice_add(struct kseq *kseq, int nice);
256 static void kseq_nice_rem(struct kseq *kseq, int nice);
257 void kseq_print(int cpu);
258 #ifdef SMP
259 static struct kse *runq_steal(struct runq *rq);
260 static void sched_balance(void *arg);
261 static void kseq_move(struct kseq *from, int cpu);
262 static __inline void kseq_setidle(struct kseq *kseq);
263 static void kseq_notify(struct kse *ke, int cpu);
264 static void kseq_assign(struct kseq *);
265 static struct kse *kseq_steal(struct kseq *kseq);
266 #define	KSE_CAN_MIGRATE(ke, class)					\
267     ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 &&		\
268     ((ke)->ke_flags & KEF_BOUND) == 0)
269 #endif
270 
271 void
272 kseq_print(int cpu)
273 {
274 	struct kseq *kseq;
275 	int i;
276 
277 	kseq = KSEQ_CPU(cpu);
278 
279 	printf("kseq:\n");
280 	printf("\tload:           %d\n", kseq->ksq_load);
281 	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
282 #ifdef SMP
283 	printf("\tload transferable: %d\n", kseq->ksq_load_transferable);
284 #endif
285 	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
286 	printf("\tnice counts:\n");
287 	for (i = 0; i < SCHED_PRI_NRESV; i++)
288 		if (kseq->ksq_nice[i])
289 			printf("\t\t%d = %d\n",
290 			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
291 }
292 
293 static __inline void
294 kseq_runq_add(struct kseq *kseq, struct kse *ke)
295 {
296 #ifdef SMP
297 	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
298 		kseq->ksq_load_transferable++;
299 #endif
300 	runq_add(ke->ke_runq, ke);
301 }
302 
303 static __inline void
304 kseq_runq_rem(struct kseq *kseq, struct kse *ke)
305 {
306 #ifdef SMP
307 	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
308 		kseq->ksq_load_transferable--;
309 #endif
310 	runq_remove(ke->ke_runq, ke);
311 }
312 
313 static void
314 kseq_load_add(struct kseq *kseq, struct kse *ke)
315 {
316 	int class;
317 	mtx_assert(&sched_lock, MA_OWNED);
318 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
319 	if (class == PRI_TIMESHARE)
320 		kseq->ksq_load_timeshare++;
321 	kseq->ksq_load++;
322 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
323 		CTR6(KTR_ULE,
324 		    "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
325 		    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
326 		    ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
327 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
328 		kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
329 }
330 
331 static void
332 kseq_load_rem(struct kseq *kseq, struct kse *ke)
333 {
334 	int class;
335 	mtx_assert(&sched_lock, MA_OWNED);
336 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
337 	if (class == PRI_TIMESHARE)
338 		kseq->ksq_load_timeshare--;
339 	kseq->ksq_load--;
340 	ke->ke_runq = NULL;
341 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
342 		kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
343 }
344 
345 static void
346 kseq_nice_add(struct kseq *kseq, int nice)
347 {
348 	mtx_assert(&sched_lock, MA_OWNED);
349 	/* Normalize to zero. */
350 	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
351 	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
352 		kseq->ksq_nicemin = nice;
353 }
354 
355 static void
356 kseq_nice_rem(struct kseq *kseq, int nice)
357 {
358 	int n;
359 
360 	mtx_assert(&sched_lock, MA_OWNED);
361 	/* Normalize to zero. */
362 	n = nice + SCHED_PRI_NHALF;
363 	kseq->ksq_nice[n]--;
364 	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
365 
366 	/*
367 	 * If this wasn't the smallest nice value or there are more in
368 	 * this bucket we can just return.  Otherwise we have to recalculate
369 	 * the smallest nice.
370 	 */
371 	if (nice != kseq->ksq_nicemin ||
372 	    kseq->ksq_nice[n] != 0 ||
373 	    kseq->ksq_load_timeshare == 0)
374 		return;
375 
376 	for (; n < SCHED_PRI_NRESV; n++)
377 		if (kseq->ksq_nice[n]) {
378 			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
379 			return;
380 		}
381 }
382 
383 #ifdef SMP
384 /*
385  * sched_balance is a simple CPU load balancing algorithm.  It operates by
386  * finding the least loaded and most loaded cpu and equalizing their load
387  * by migrating some processes.
388  *
389  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
390  * installations will only have 2 cpus.  Secondly, load balancing too much at
391  * once can have an unpleasant effect on the system.  The scheduler rarely has
392  * enough information to make perfect decisions.  So this algorithm chooses
393  * algorithm simplicity and more gradual effects on load in larger systems.
394  *
395  * It could be improved by considering the priorities and slices assigned to
396  * each task prior to balancing them.  There are many pathological cases with
397  * any approach and so the semi random algorithm below may work as well as any.
398  *
399  */
400 static void
401 sched_balance(void *arg)
402 {
403 	struct kseq *kseq;
404 	int high_load;
405 	int low_load;
406 	int high_cpu;
407 	int low_cpu;
408 	int move;
409 	int diff;
410 	int i;
411 
412 	high_cpu = 0;
413 	low_cpu = 0;
414 	high_load = 0;
415 	low_load = -1;
416 
417 	mtx_lock_spin(&sched_lock);
418 	if (smp_started == 0)
419 		goto out;
420 
421 	for (i = 0; i < mp_maxid; i++) {
422 		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
423 			continue;
424 		kseq = KSEQ_CPU(i);
425 		if (kseq->ksq_load_transferable > high_load) {
426 			high_load = kseq->ksq_load_transferable;
427 			high_cpu = i;
428 		}
429 		if (low_load == -1 || kseq->ksq_load < low_load) {
430 			low_load = kseq->ksq_load;
431 			low_cpu = i;
432 		}
433 	}
434 	kseq = KSEQ_CPU(high_cpu);
435 	/*
436 	 * Nothing to do.
437 	 */
438 	if (high_load == 0 || low_load >= kseq->ksq_load)
439 		goto out;
440 	/*
441 	 * Determine what the imbalance is and then adjust that to how many
442 	 * kses we actually have to give up (load_transferable).
443 	 */
444 	diff = kseq->ksq_load - low_load;
445 	move = diff / 2;
446 	if (diff & 0x1)
447 		move++;
448 	move = min(move, high_load);
449 	for (i = 0; i < move; i++)
450 		kseq_move(kseq, low_cpu);
451 out:
452 	mtx_unlock_spin(&sched_lock);
453 	callout_reset(&kseq_lb_callout, hz, sched_balance, NULL);
454 
455 	return;
456 }
457 
458 static void
459 kseq_move(struct kseq *from, int cpu)
460 {
461 	struct kse *ke;
462 
463 	ke = kseq_steal(from);
464 	ke->ke_state = KES_THREAD;
465 	kseq_runq_rem(from, ke);
466 	kseq_load_rem(from, ke);
467 
468 	ke->ke_cpu = cpu;
469 	kseq_notify(ke, cpu);
470 }
471 
472 static __inline void
473 kseq_setidle(struct kseq *kseq)
474 {
475 	if (kseq->ksq_idled)
476 		return;
477 	kseq->ksq_idled = 1;
478 	atomic_set_int(&kseq_idle, PCPU_GET(cpumask));
479 	return;
480 }
481 
482 static void
483 kseq_assign(struct kseq *kseq)
484 {
485 	struct kse *nke;
486 	struct kse *ke;
487 
488 	do {
489 		(volatile struct kse *)ke = kseq->ksq_assigned;
490 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
491 	for (; ke != NULL; ke = nke) {
492 		nke = ke->ke_assign;
493 		ke->ke_flags &= ~KEF_ASSIGNED;
494 		sched_add(ke->ke_thread);
495 	}
496 }
497 
498 static void
499 kseq_notify(struct kse *ke, int cpu)
500 {
501 	struct kseq *kseq;
502 	struct thread *td;
503 	struct pcpu *pcpu;
504 
505 	ke->ke_flags |= KEF_ASSIGNED;
506 
507 	kseq = KSEQ_CPU(cpu);
508 
509 	/*
510 	 * Place a KSE on another cpu's queue and force a resched.
511 	 */
512 	do {
513 		(volatile struct kse *)ke->ke_assign = kseq->ksq_assigned;
514 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
515 	pcpu = pcpu_find(cpu);
516 	td = pcpu->pc_curthread;
517 	if (ke->ke_thread->td_priority < td->td_priority ||
518 	    td == pcpu->pc_idlethread) {
519 		td->td_flags |= TDF_NEEDRESCHED;
520 		ipi_selected(1 << cpu, IPI_AST);
521 	}
522 }
523 
524 static struct kse *
525 runq_steal(struct runq *rq)
526 {
527 	struct rqhead *rqh;
528 	struct rqbits *rqb;
529 	struct kse *ke;
530 	int word;
531 	int bit;
532 
533 	mtx_assert(&sched_lock, MA_OWNED);
534 	rqb = &rq->rq_status;
535 	for (word = 0; word < RQB_LEN; word++) {
536 		if (rqb->rqb_bits[word] == 0)
537 			continue;
538 		for (bit = 0; bit < RQB_BPW; bit++) {
539 			if ((rqb->rqb_bits[word] & (1 << bit)) == 0)
540 				continue;
541 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
542 			TAILQ_FOREACH(ke, rqh, ke_procq) {
543 				if (KSE_CAN_MIGRATE(ke,
544 				    PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
545 					return (ke);
546 			}
547 		}
548 	}
549 	return (NULL);
550 }
551 
552 static struct kse *
553 kseq_steal(struct kseq *kseq)
554 {
555 	struct kse *ke;
556 
557 	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
558 		return (ke);
559 	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
560 		return (ke);
561 	return (runq_steal(&kseq->ksq_idle));
562 }
563 #endif	/* SMP */
564 
565 /*
566  * Pick the highest priority task we have and return it.
567  */
568 
569 static struct kse *
570 kseq_choose(struct kseq *kseq)
571 {
572 	struct kse *ke;
573 	struct runq *swap;
574 
575 	mtx_assert(&sched_lock, MA_OWNED);
576 	swap = NULL;
577 
578 	for (;;) {
579 		ke = runq_choose(kseq->ksq_curr);
580 		if (ke == NULL) {
581 			/*
582 			 * We already swaped once and didn't get anywhere.
583 			 */
584 			if (swap)
585 				break;
586 			swap = kseq->ksq_curr;
587 			kseq->ksq_curr = kseq->ksq_next;
588 			kseq->ksq_next = swap;
589 			continue;
590 		}
591 		/*
592 		 * If we encounter a slice of 0 the kse is in a
593 		 * TIMESHARE kse group and its nice was too far out
594 		 * of the range that receives slices.
595 		 */
596 		if (ke->ke_slice == 0) {
597 			runq_remove(ke->ke_runq, ke);
598 			sched_slice(ke);
599 			ke->ke_runq = kseq->ksq_next;
600 			runq_add(ke->ke_runq, ke);
601 			continue;
602 		}
603 		return (ke);
604 	}
605 
606 	return (runq_choose(&kseq->ksq_idle));
607 }
608 
609 static void
610 kseq_setup(struct kseq *kseq)
611 {
612 	runq_init(&kseq->ksq_timeshare[0]);
613 	runq_init(&kseq->ksq_timeshare[1]);
614 	runq_init(&kseq->ksq_idle);
615 	kseq->ksq_curr = &kseq->ksq_timeshare[0];
616 	kseq->ksq_next = &kseq->ksq_timeshare[1];
617 	kseq->ksq_load = 0;
618 	kseq->ksq_load_timeshare = 0;
619 #ifdef SMP
620 	kseq->ksq_load_transferable = 0;
621 	kseq->ksq_idled = 0;
622 	kseq->ksq_assigned = NULL;
623 #endif
624 }
625 
626 static void
627 sched_setup(void *dummy)
628 {
629 #ifdef SMP
630 	int i;
631 #endif
632 
633 	slice_min = (hz/100);	/* 10ms */
634 	slice_max = (hz/7);	/* ~140ms */
635 
636 #ifdef SMP
637 	/* init kseqs */
638 	/* Create the idmap. */
639 #ifdef ULE_HTT_EXPERIMENTAL
640 	if (smp_topology == NULL) {
641 #else
642 	if (1) {
643 #endif
644 		for (i = 0; i < MAXCPU; i++) {
645 			kseq_setup(&kseq_cpu[i]);
646 			kseq_idmap[i] = &kseq_cpu[i];
647 			kseq_cpu[i].ksq_cpus = 1;
648 		}
649 	} else {
650 		int j;
651 
652 		for (i = 0; i < smp_topology->ct_count; i++) {
653 			struct cpu_group *cg;
654 
655 			cg = &smp_topology->ct_group[i];
656 			kseq_setup(&kseq_cpu[i]);
657 
658 			for (j = 0; j < MAXCPU; j++)
659 				if ((cg->cg_mask & (1 << j)) != 0)
660 					kseq_idmap[j] = &kseq_cpu[i];
661 			kseq_cpu[i].ksq_cpus = cg->cg_count;
662 		}
663 	}
664 	callout_init(&kseq_lb_callout, CALLOUT_MPSAFE);
665 	sched_balance(NULL);
666 #else
667 	kseq_setup(KSEQ_SELF());
668 #endif
669 	mtx_lock_spin(&sched_lock);
670 	kseq_load_add(KSEQ_SELF(), &kse0);
671 	mtx_unlock_spin(&sched_lock);
672 }
673 
674 /*
675  * Scale the scheduling priority according to the "interactivity" of this
676  * process.
677  */
678 static void
679 sched_priority(struct ksegrp *kg)
680 {
681 	int pri;
682 
683 	if (kg->kg_pri_class != PRI_TIMESHARE)
684 		return;
685 
686 	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
687 	pri += SCHED_PRI_BASE;
688 	pri += kg->kg_nice;
689 
690 	if (pri > PRI_MAX_TIMESHARE)
691 		pri = PRI_MAX_TIMESHARE;
692 	else if (pri < PRI_MIN_TIMESHARE)
693 		pri = PRI_MIN_TIMESHARE;
694 
695 	kg->kg_user_pri = pri;
696 
697 	return;
698 }
699 
700 /*
701  * Calculate a time slice based on the properties of the kseg and the runq
702  * that we're on.  This is only for PRI_TIMESHARE ksegrps.
703  */
704 static void
705 sched_slice(struct kse *ke)
706 {
707 	struct kseq *kseq;
708 	struct ksegrp *kg;
709 
710 	kg = ke->ke_ksegrp;
711 	kseq = KSEQ_CPU(ke->ke_cpu);
712 
713 	/*
714 	 * Rationale:
715 	 * KSEs in interactive ksegs get the minimum slice so that we
716 	 * quickly notice if it abuses its advantage.
717 	 *
718 	 * KSEs in non-interactive ksegs are assigned a slice that is
719 	 * based on the ksegs nice value relative to the least nice kseg
720 	 * on the run queue for this cpu.
721 	 *
722 	 * If the KSE is less nice than all others it gets the maximum
723 	 * slice and other KSEs will adjust their slice relative to
724 	 * this when they first expire.
725 	 *
726 	 * There is 20 point window that starts relative to the least
727 	 * nice kse on the run queue.  Slice size is determined by
728 	 * the kse distance from the last nice ksegrp.
729 	 *
730 	 * If the kse is outside of the window it will get no slice
731 	 * and will be reevaluated each time it is selected on the
732 	 * run queue.  The exception to this is nice 0 ksegs when
733 	 * a nice -20 is running.  They are always granted a minimum
734 	 * slice.
735 	 */
736 	if (!SCHED_INTERACTIVE(kg)) {
737 		int nice;
738 
739 		nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
740 		if (kseq->ksq_load_timeshare == 0 ||
741 		    kg->kg_nice < kseq->ksq_nicemin)
742 			ke->ke_slice = SCHED_SLICE_MAX;
743 		else if (nice <= SCHED_SLICE_NTHRESH)
744 			ke->ke_slice = SCHED_SLICE_NICE(nice);
745 		else if (kg->kg_nice == 0)
746 			ke->ke_slice = SCHED_SLICE_MIN;
747 		else
748 			ke->ke_slice = 0;
749 	} else
750 		ke->ke_slice = SCHED_SLICE_MIN;
751 
752 	CTR6(KTR_ULE,
753 	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
754 	    ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
755 	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
756 
757 	return;
758 }
759 
760 /*
761  * This routine enforces a maximum limit on the amount of scheduling history
762  * kept.  It is called after either the slptime or runtime is adjusted.
763  * This routine will not operate correctly when slp or run times have been
764  * adjusted to more than double their maximum.
765  */
766 static void
767 sched_interact_update(struct ksegrp *kg)
768 {
769 	int sum;
770 
771 	sum = kg->kg_runtime + kg->kg_slptime;
772 	if (sum < SCHED_SLP_RUN_MAX)
773 		return;
774 	/*
775 	 * If we have exceeded by more than 1/5th then the algorithm below
776 	 * will not bring us back into range.  Dividing by two here forces
777 	 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
778 	 */
779 	if (sum > (SCHED_INTERACT_MAX / 5) * 6) {
780 		kg->kg_runtime /= 2;
781 		kg->kg_slptime /= 2;
782 		return;
783 	}
784 	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
785 	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
786 }
787 
788 static void
789 sched_interact_fork(struct ksegrp *kg)
790 {
791 	int ratio;
792 	int sum;
793 
794 	sum = kg->kg_runtime + kg->kg_slptime;
795 	if (sum > SCHED_SLP_RUN_FORK) {
796 		ratio = sum / SCHED_SLP_RUN_FORK;
797 		kg->kg_runtime /= ratio;
798 		kg->kg_slptime /= ratio;
799 	}
800 }
801 
802 static int
803 sched_interact_score(struct ksegrp *kg)
804 {
805 	int div;
806 
807 	if (kg->kg_runtime > kg->kg_slptime) {
808 		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
809 		return (SCHED_INTERACT_HALF +
810 		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
811 	} if (kg->kg_slptime > kg->kg_runtime) {
812 		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
813 		return (kg->kg_runtime / div);
814 	}
815 
816 	/*
817 	 * This can happen if slptime and runtime are 0.
818 	 */
819 	return (0);
820 
821 }
822 
823 /*
824  * This is only somewhat accurate since given many processes of the same
825  * priority they will switch when their slices run out, which will be
826  * at most SCHED_SLICE_MAX.
827  */
828 int
829 sched_rr_interval(void)
830 {
831 	return (SCHED_SLICE_MAX);
832 }
833 
834 static void
835 sched_pctcpu_update(struct kse *ke)
836 {
837 	/*
838 	 * Adjust counters and watermark for pctcpu calc.
839 	 */
840 	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
841 		/*
842 		 * Shift the tick count out so that the divide doesn't
843 		 * round away our results.
844 		 */
845 		ke->ke_ticks <<= 10;
846 		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
847 			    SCHED_CPU_TICKS;
848 		ke->ke_ticks >>= 10;
849 	} else
850 		ke->ke_ticks = 0;
851 	ke->ke_ltick = ticks;
852 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
853 }
854 
855 void
856 sched_prio(struct thread *td, u_char prio)
857 {
858 	struct kse *ke;
859 
860 	ke = td->td_kse;
861 	mtx_assert(&sched_lock, MA_OWNED);
862 	if (TD_ON_RUNQ(td)) {
863 		/*
864 		 * If the priority has been elevated due to priority
865 		 * propagation, we may have to move ourselves to a new
866 		 * queue.  We still call adjustrunqueue below in case kse
867 		 * needs to fix things up.
868 		 */
869 		if (prio < td->td_priority && ke &&
870 		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
871 		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
872 			runq_remove(ke->ke_runq, ke);
873 			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
874 			runq_add(ke->ke_runq, ke);
875 		}
876 		adjustrunqueue(td, prio);
877 	} else
878 		td->td_priority = prio;
879 }
880 
881 void
882 sched_switch(struct thread *td)
883 {
884 	struct thread *newtd;
885 	struct kse *ke;
886 
887 	mtx_assert(&sched_lock, MA_OWNED);
888 
889 	ke = td->td_kse;
890 
891 	td->td_last_kse = ke;
892         td->td_lastcpu = td->td_oncpu;
893 	td->td_oncpu = NOCPU;
894         td->td_flags &= ~TDF_NEEDRESCHED;
895 
896 	if (TD_IS_RUNNING(td)) {
897 		if (td->td_proc->p_flag & P_SA) {
898 			kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
899 			setrunqueue(td);
900 		} else {
901 			/*
902 			 * This queue is always correct except for idle threads
903 			 * which have a higher priority due to priority
904 			 * propagation.
905 			 */
906 			if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) {
907 				if (td->td_priority < PRI_MIN_IDLE)
908 					ke->ke_runq = KSEQ_SELF()->ksq_curr;
909 				else
910 					ke->ke_runq = &KSEQ_SELF()->ksq_idle;
911 			}
912 			kseq_runq_add(KSEQ_SELF(), ke);
913 		}
914 	} else {
915 		if (ke->ke_runq)
916 			kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
917 		/*
918 		 * We will not be on the run queue. So we must be
919 		 * sleeping or similar.
920 		 */
921 		if (td->td_proc->p_flag & P_SA)
922 			kse_reassign(ke);
923 	}
924 	newtd = choosethread();
925 	if (td != newtd)
926 		cpu_switch(td, newtd);
927 	sched_lock.mtx_lock = (uintptr_t)td;
928 
929 	td->td_oncpu = PCPU_GET(cpuid);
930 }
931 
932 void
933 sched_nice(struct ksegrp *kg, int nice)
934 {
935 	struct kse *ke;
936 	struct thread *td;
937 	struct kseq *kseq;
938 
939 	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
940 	mtx_assert(&sched_lock, MA_OWNED);
941 	/*
942 	 * We need to adjust the nice counts for running KSEs.
943 	 */
944 	if (kg->kg_pri_class == PRI_TIMESHARE)
945 		FOREACH_KSE_IN_GROUP(kg, ke) {
946 			if (ke->ke_runq == NULL)
947 				continue;
948 			kseq = KSEQ_CPU(ke->ke_cpu);
949 			kseq_nice_rem(kseq, kg->kg_nice);
950 			kseq_nice_add(kseq, nice);
951 		}
952 	kg->kg_nice = nice;
953 	sched_priority(kg);
954 	FOREACH_THREAD_IN_GROUP(kg, td)
955 		td->td_flags |= TDF_NEEDRESCHED;
956 }
957 
958 void
959 sched_sleep(struct thread *td, u_char prio)
960 {
961 	mtx_assert(&sched_lock, MA_OWNED);
962 
963 	td->td_slptime = ticks;
964 	td->td_priority = prio;
965 
966 	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
967 	    td->td_kse, td->td_slptime);
968 }
969 
970 void
971 sched_wakeup(struct thread *td)
972 {
973 	mtx_assert(&sched_lock, MA_OWNED);
974 
975 	/*
976 	 * Let the kseg know how long we slept for.  This is because process
977 	 * interactivity behavior is modeled in the kseg.
978 	 */
979 	if (td->td_slptime) {
980 		struct ksegrp *kg;
981 		int hzticks;
982 
983 		kg = td->td_ksegrp;
984 		hzticks = (ticks - td->td_slptime) << 10;
985 		if (hzticks >= SCHED_SLP_RUN_MAX) {
986 			kg->kg_slptime = SCHED_SLP_RUN_MAX;
987 			kg->kg_runtime = 1;
988 		} else {
989 			kg->kg_slptime += hzticks;
990 			sched_interact_update(kg);
991 		}
992 		sched_priority(kg);
993 		if (td->td_kse)
994 			sched_slice(td->td_kse);
995 		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
996 		    td->td_kse, hzticks);
997 		td->td_slptime = 0;
998 	}
999 	setrunqueue(td);
1000 }
1001 
1002 /*
1003  * Penalize the parent for creating a new child and initialize the child's
1004  * priority.
1005  */
1006 void
1007 sched_fork(struct proc *p, struct proc *p1)
1008 {
1009 
1010 	mtx_assert(&sched_lock, MA_OWNED);
1011 
1012 	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
1013 	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
1014 	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
1015 }
1016 
1017 void
1018 sched_fork_kse(struct kse *ke, struct kse *child)
1019 {
1020 
1021 	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1022 	child->ke_cpu = ke->ke_cpu;
1023 	child->ke_runq = NULL;
1024 
1025 	/* Grab our parents cpu estimation information. */
1026 	child->ke_ticks = ke->ke_ticks;
1027 	child->ke_ltick = ke->ke_ltick;
1028 	child->ke_ftick = ke->ke_ftick;
1029 }
1030 
1031 void
1032 sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1033 {
1034 	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1035 
1036 	child->kg_slptime = kg->kg_slptime;
1037 	child->kg_runtime = kg->kg_runtime;
1038 	child->kg_user_pri = kg->kg_user_pri;
1039 	child->kg_nice = kg->kg_nice;
1040 	sched_interact_fork(child);
1041 	kg->kg_runtime += tickincr << 10;
1042 	sched_interact_update(kg);
1043 
1044 	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1045 	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1046 	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1047 }
1048 
1049 void
1050 sched_fork_thread(struct thread *td, struct thread *child)
1051 {
1052 }
1053 
1054 void
1055 sched_class(struct ksegrp *kg, int class)
1056 {
1057 	struct kseq *kseq;
1058 	struct kse *ke;
1059 	int nclass;
1060 	int oclass;
1061 
1062 	mtx_assert(&sched_lock, MA_OWNED);
1063 	if (kg->kg_pri_class == class)
1064 		return;
1065 
1066 	nclass = PRI_BASE(class);
1067 	oclass = PRI_BASE(kg->kg_pri_class);
1068 	FOREACH_KSE_IN_GROUP(kg, ke) {
1069 		if (ke->ke_state != KES_ONRUNQ &&
1070 		    ke->ke_state != KES_THREAD)
1071 			continue;
1072 		kseq = KSEQ_CPU(ke->ke_cpu);
1073 
1074 #ifdef SMP
1075 		/*
1076 		 * On SMP if we're on the RUNQ we must adjust the transferable
1077 		 * count because could be changing to or from an interrupt
1078 		 * class.
1079 		 */
1080 		if (ke->ke_state == KES_ONRUNQ) {
1081 			if (KSE_CAN_MIGRATE(ke, oclass))
1082 				kseq->ksq_load_transferable--;
1083 			if (KSE_CAN_MIGRATE(ke, nclass))
1084 				kseq->ksq_load_transferable++;
1085 		}
1086 #endif
1087 		if (oclass == PRI_TIMESHARE) {
1088 			kseq->ksq_load_timeshare--;
1089 			kseq_nice_rem(kseq, kg->kg_nice);
1090 		}
1091 		if (nclass == PRI_TIMESHARE) {
1092 			kseq->ksq_load_timeshare++;
1093 			kseq_nice_add(kseq, kg->kg_nice);
1094 		}
1095 	}
1096 
1097 	kg->kg_pri_class = class;
1098 }
1099 
1100 /*
1101  * Return some of the child's priority and interactivity to the parent.
1102  */
1103 void
1104 sched_exit(struct proc *p, struct proc *child)
1105 {
1106 	mtx_assert(&sched_lock, MA_OWNED);
1107 	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
1108 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
1109 }
1110 
1111 void
1112 sched_exit_kse(struct kse *ke, struct kse *child)
1113 {
1114 	kseq_load_rem(KSEQ_CPU(child->ke_cpu), child);
1115 }
1116 
1117 void
1118 sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1119 {
1120 	/* kg->kg_slptime += child->kg_slptime; */
1121 	kg->kg_runtime += child->kg_runtime;
1122 	sched_interact_update(kg);
1123 }
1124 
1125 void
1126 sched_exit_thread(struct thread *td, struct thread *child)
1127 {
1128 }
1129 
1130 void
1131 sched_clock(struct thread *td)
1132 {
1133 	struct kseq *kseq;
1134 	struct ksegrp *kg;
1135 	struct kse *ke;
1136 
1137 	/*
1138 	 * sched_setup() apparently happens prior to stathz being set.  We
1139 	 * need to resolve the timers earlier in the boot so we can avoid
1140 	 * calculating this here.
1141 	 */
1142 	if (realstathz == 0) {
1143 		realstathz = stathz ? stathz : hz;
1144 		tickincr = hz / realstathz;
1145 		/*
1146 		 * XXX This does not work for values of stathz that are much
1147 		 * larger than hz.
1148 		 */
1149 		if (tickincr == 0)
1150 			tickincr = 1;
1151 	}
1152 
1153 	ke = td->td_kse;
1154 	kg = ke->ke_ksegrp;
1155 
1156 	mtx_assert(&sched_lock, MA_OWNED);
1157 	KASSERT((td != NULL), ("schedclock: null thread pointer"));
1158 
1159 	/* Adjust ticks for pctcpu */
1160 	ke->ke_ticks++;
1161 	ke->ke_ltick = ticks;
1162 
1163 	/* Go up to one second beyond our max and then trim back down */
1164 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1165 		sched_pctcpu_update(ke);
1166 
1167 	if (td->td_flags & TDF_IDLETD)
1168 		return;
1169 
1170 	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1171 	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1172 	/*
1173 	 * We only do slicing code for TIMESHARE ksegrps.
1174 	 */
1175 	if (kg->kg_pri_class != PRI_TIMESHARE)
1176 		return;
1177 	/*
1178 	 * We used a tick charge it to the ksegrp so that we can compute our
1179 	 * interactivity.
1180 	 */
1181 	kg->kg_runtime += tickincr << 10;
1182 	sched_interact_update(kg);
1183 
1184 	/*
1185 	 * We used up one time slice.
1186 	 */
1187 	if (--ke->ke_slice > 0)
1188 		return;
1189 	/*
1190 	 * We're out of time, recompute priorities and requeue.
1191 	 */
1192 	kseq = KSEQ_SELF();
1193 	kseq_load_rem(kseq, ke);
1194 	sched_priority(kg);
1195 	sched_slice(ke);
1196 	if (SCHED_CURR(kg, ke))
1197 		ke->ke_runq = kseq->ksq_curr;
1198 	else
1199 		ke->ke_runq = kseq->ksq_next;
1200 	kseq_load_add(kseq, ke);
1201 	td->td_flags |= TDF_NEEDRESCHED;
1202 }
1203 
1204 int
1205 sched_runnable(void)
1206 {
1207 	struct kseq *kseq;
1208 	int load;
1209 
1210 	load = 1;
1211 
1212 	kseq = KSEQ_SELF();
1213 #ifdef SMP
1214 	if (kseq->ksq_assigned) {
1215 		mtx_lock_spin(&sched_lock);
1216 		kseq_assign(kseq);
1217 		mtx_unlock_spin(&sched_lock);
1218 	}
1219 #endif
1220 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1221 		if (kseq->ksq_load > 0)
1222 			goto out;
1223 	} else
1224 		if (kseq->ksq_load - 1 > 0)
1225 			goto out;
1226 	load = 0;
1227 out:
1228 	return (load);
1229 }
1230 
1231 void
1232 sched_userret(struct thread *td)
1233 {
1234 	struct ksegrp *kg;
1235 
1236 	kg = td->td_ksegrp;
1237 
1238 	if (td->td_priority != kg->kg_user_pri) {
1239 		mtx_lock_spin(&sched_lock);
1240 		td->td_priority = kg->kg_user_pri;
1241 		mtx_unlock_spin(&sched_lock);
1242 	}
1243 }
1244 
1245 struct kse *
1246 sched_choose(void)
1247 {
1248 	struct kseq *kseq;
1249 	struct kse *ke;
1250 
1251 	mtx_assert(&sched_lock, MA_OWNED);
1252 	kseq = KSEQ_SELF();
1253 #ifdef SMP
1254 	if (kseq->ksq_assigned)
1255 		kseq_assign(kseq);
1256 #endif
1257 	ke = kseq_choose(kseq);
1258 	if (ke) {
1259 #ifdef SMP
1260 		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1261 			kseq_setidle(kseq);
1262 #endif
1263 		kseq_runq_rem(kseq, ke);
1264 		ke->ke_state = KES_THREAD;
1265 
1266 		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1267 			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1268 			    ke, ke->ke_runq, ke->ke_slice,
1269 			    ke->ke_thread->td_priority);
1270 		}
1271 		return (ke);
1272 	}
1273 #ifdef SMP
1274 	kseq_setidle(kseq);
1275 #endif
1276 	return (NULL);
1277 }
1278 
1279 void
1280 sched_add(struct thread *td)
1281 {
1282 	struct kseq *kseq;
1283 	struct ksegrp *kg;
1284 	struct kse *ke;
1285 	int class;
1286 
1287 	mtx_assert(&sched_lock, MA_OWNED);
1288 	ke = td->td_kse;
1289 	kg = td->td_ksegrp;
1290 	if (ke->ke_flags & KEF_ASSIGNED)
1291 		return;
1292 	kseq = KSEQ_SELF();
1293 	KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
1294 	KASSERT((ke->ke_thread->td_kse != NULL),
1295 	    ("sched_add: No KSE on thread"));
1296 	KASSERT(ke->ke_state != KES_ONRUNQ,
1297 	    ("sched_add: kse %p (%s) already in run queue", ke,
1298 	    ke->ke_proc->p_comm));
1299 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1300 	    ("sched_add: process swapped out"));
1301 	KASSERT(ke->ke_runq == NULL,
1302 	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1303 
1304 	class = PRI_BASE(kg->kg_pri_class);
1305 	switch (class) {
1306 	case PRI_ITHD:
1307 	case PRI_REALTIME:
1308 		ke->ke_runq = kseq->ksq_curr;
1309 		ke->ke_slice = SCHED_SLICE_MAX;
1310 		ke->ke_cpu = PCPU_GET(cpuid);
1311 		break;
1312 	case PRI_TIMESHARE:
1313 #ifdef SMP
1314 		if (ke->ke_cpu != PCPU_GET(cpuid)) {
1315 			kseq_notify(ke, ke->ke_cpu);
1316 			return;
1317 		}
1318 #endif
1319 		if (SCHED_CURR(kg, ke))
1320 			ke->ke_runq = kseq->ksq_curr;
1321 		else
1322 			ke->ke_runq = kseq->ksq_next;
1323 		break;
1324 	case PRI_IDLE:
1325 #ifdef SMP
1326 		if (ke->ke_cpu != PCPU_GET(cpuid)) {
1327 			kseq_notify(ke, ke->ke_cpu);
1328 			return;
1329 		}
1330 #endif
1331 		/*
1332 		 * This is for priority prop.
1333 		 */
1334 		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1335 			ke->ke_runq = kseq->ksq_curr;
1336 		else
1337 			ke->ke_runq = &kseq->ksq_idle;
1338 		ke->ke_slice = SCHED_SLICE_MIN;
1339 		break;
1340 	default:
1341 		panic("Unknown pri class.");
1342 		break;
1343 	}
1344 #ifdef SMP
1345 	/*
1346 	 * If there are any idle processors, give them our extra load.  The
1347 	 * threshold at which we start to reassign kses has a large impact
1348 	 * on the overall performance of the system.  Tuned too high and
1349 	 * some CPUs may idle.  Too low and there will be excess migration
1350 	 * and context swiches.
1351 	 */
1352 	if (kseq->ksq_load_transferable > kseq->ksq_cpus &&
1353 	    KSE_CAN_MIGRATE(ke, class) && kseq_idle) {
1354 		int cpu;
1355 
1356 		/*
1357 		 * Multiple cpus could find this bit simultaneously but the
1358 		 * race shouldn't be terrible.
1359 		 */
1360 		cpu = ffs(kseq_idle);
1361 		if (cpu) {
1362 			cpu--;
1363 			atomic_clear_int(&kseq_idle, 1 << cpu);
1364 			ke->ke_cpu = cpu;
1365 			ke->ke_runq = NULL;
1366 			kseq_notify(ke, cpu);
1367 			return;
1368 		}
1369 	}
1370 	if (kseq->ksq_idled &&
1371 	    (class == PRI_TIMESHARE || class == PRI_REALTIME)) {
1372 		atomic_clear_int(&kseq_idle, PCPU_GET(cpumask));
1373 		kseq->ksq_idled = 0;
1374 	}
1375 #endif
1376         if (td->td_priority < curthread->td_priority)
1377                 curthread->td_flags |= TDF_NEEDRESCHED;
1378 
1379 	ke->ke_ksegrp->kg_runq_kses++;
1380 	ke->ke_state = KES_ONRUNQ;
1381 
1382 	kseq_runq_add(kseq, ke);
1383 	kseq_load_add(kseq, ke);
1384 }
1385 
1386 void
1387 sched_rem(struct thread *td)
1388 {
1389 	struct kseq *kseq;
1390 	struct kse *ke;
1391 
1392 	ke = td->td_kse;
1393 	/*
1394 	 * It is safe to just return here because sched_rem() is only ever
1395 	 * used in places where we're immediately going to add the
1396 	 * kse back on again.  In that case it'll be added with the correct
1397 	 * thread and priority when the caller drops the sched_lock.
1398 	 */
1399 	if (ke->ke_flags & KEF_ASSIGNED)
1400 		return;
1401 	mtx_assert(&sched_lock, MA_OWNED);
1402 	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
1403 
1404 	ke->ke_state = KES_THREAD;
1405 	ke->ke_ksegrp->kg_runq_kses--;
1406 	kseq = KSEQ_CPU(ke->ke_cpu);
1407 	kseq_runq_rem(kseq, ke);
1408 	kseq_load_rem(kseq, ke);
1409 }
1410 
1411 fixpt_t
1412 sched_pctcpu(struct thread *td)
1413 {
1414 	fixpt_t pctcpu;
1415 	struct kse *ke;
1416 
1417 	pctcpu = 0;
1418 	ke = td->td_kse;
1419 	if (ke == NULL)
1420 		return (0);
1421 
1422 	mtx_lock_spin(&sched_lock);
1423 	if (ke->ke_ticks) {
1424 		int rtick;
1425 
1426 		/*
1427 		 * Don't update more frequently than twice a second.  Allowing
1428 		 * this causes the cpu usage to decay away too quickly due to
1429 		 * rounding errors.
1430 		 */
1431 		if (ke->ke_ltick < (ticks - (hz / 2)))
1432 			sched_pctcpu_update(ke);
1433 		/* How many rtick per second ? */
1434 		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1435 		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1436 	}
1437 
1438 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1439 	mtx_unlock_spin(&sched_lock);
1440 
1441 	return (pctcpu);
1442 }
1443 
1444 void
1445 sched_bind(struct thread *td, int cpu)
1446 {
1447 	struct kse *ke;
1448 
1449 	mtx_assert(&sched_lock, MA_OWNED);
1450 	ke = td->td_kse;
1451 #ifndef SMP
1452 	ke->ke_flags |= KEF_BOUND;
1453 #else
1454 	if (PCPU_GET(cpuid) == cpu) {
1455 		ke->ke_flags |= KEF_BOUND;
1456 		return;
1457 	}
1458 	/* sched_rem without the runq_remove */
1459 	ke->ke_state = KES_THREAD;
1460 	ke->ke_ksegrp->kg_runq_kses--;
1461 	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1462 	ke->ke_cpu = cpu;
1463 	kseq_notify(ke, cpu);
1464 	/* When we return from mi_switch we'll be on the correct cpu. */
1465 	td->td_proc->p_stats->p_ru.ru_nvcsw++;
1466 	mi_switch();
1467 #endif
1468 }
1469 
1470 void
1471 sched_unbind(struct thread *td)
1472 {
1473 	mtx_assert(&sched_lock, MA_OWNED);
1474 	td->td_kse->ke_flags &= ~KEF_BOUND;
1475 }
1476 
1477 int
1478 sched_sizeof_kse(void)
1479 {
1480 	return (sizeof(struct kse) + sizeof(struct ke_sched));
1481 }
1482 
1483 int
1484 sched_sizeof_ksegrp(void)
1485 {
1486 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1487 }
1488 
1489 int
1490 sched_sizeof_proc(void)
1491 {
1492 	return (sizeof(struct proc));
1493 }
1494 
1495 int
1496 sched_sizeof_thread(void)
1497 {
1498 	return (sizeof(struct thread) + sizeof(struct td_sched));
1499 }
1500