xref: /freebsd/sys/kern/sched_ule.c (revision 21381d1b9ed390b0b5e711af8bca90fc8b9cb4aa)
1 /*-
2  * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <opt_sched.h>
31 
32 #define kse td_sched
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kdb.h>
37 #include <sys/kernel.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/resource.h>
43 #include <sys/resourcevar.h>
44 #include <sys/sched.h>
45 #include <sys/smp.h>
46 #include <sys/sx.h>
47 #include <sys/sysctl.h>
48 #include <sys/sysproto.h>
49 #include <sys/turnstile.h>
50 #include <sys/vmmeter.h>
51 #ifdef KTRACE
52 #include <sys/uio.h>
53 #include <sys/ktrace.h>
54 #endif
55 
56 #ifdef HWPMC_HOOKS
57 #include <sys/pmckern.h>
58 #endif
59 
60 #include <machine/cpu.h>
61 #include <machine/smp.h>
62 
63 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
64 /* XXX This is bogus compatability crap for ps */
65 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
66 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
67 
68 static void sched_setup(void *dummy);
69 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
70 
71 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
72 
73 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
74     "Scheduler name");
75 
76 static int slice_min = 1;
77 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
78 
79 static int slice_max = 10;
80 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
81 
82 int realstathz;
83 int tickincr = 1;
84 
85 /*
86  * The following datastructures are allocated within their parent structure
87  * but are scheduler specific.
88  */
89 /*
90  * The schedulable entity that can be given a context to run.  A process may
91  * have several of these.
92  */
93 struct kse {
94 	TAILQ_ENTRY(kse) ke_procq;	/* (j/z) Run queue. */
95 	int		ke_flags;	/* (j) KEF_* flags. */
96 	struct thread	*ke_thread;	/* (*) Active associated thread. */
97 	fixpt_t		ke_pctcpu;	/* (j) %cpu during p_swtime. */
98 	char		ke_rqindex;	/* (j) Run queue index. */
99 	enum {
100 		KES_THREAD = 0x0,	/* slaved to thread state */
101 		KES_ONRUNQ
102 	} ke_state;			/* (j) thread sched specific status. */
103 	int		ke_slptime;
104 	int		ke_slice;
105 	struct runq	*ke_runq;
106 	u_char		ke_cpu;		/* CPU that we have affinity for. */
107 	/* The following variables are only used for pctcpu calculation */
108 	int		ke_ltick;	/* Last tick that we were running on */
109 	int		ke_ftick;	/* First tick that we were running on */
110 	int		ke_ticks;	/* Tick count */
111 
112 };
113 #define	td_kse			td_sched
114 #define	td_slptime		td_kse->ke_slptime
115 #define ke_proc			ke_thread->td_proc
116 #define ke_ksegrp		ke_thread->td_ksegrp
117 #define	ke_assign		ke_procq.tqe_next
118 /* flags kept in ke_flags */
119 #define	KEF_ASSIGNED	0x0001		/* Thread is being migrated. */
120 #define	KEF_BOUND	0x0002		/* Thread can not migrate. */
121 #define	KEF_XFERABLE	0x0004		/* Thread was added as transferable. */
122 #define	KEF_HOLD	0x0008		/* Thread is temporarily bound. */
123 #define	KEF_REMOVED	0x0010		/* Thread was removed while ASSIGNED */
124 #define	KEF_INTERNAL	0x0020		/* Thread added due to migration. */
125 #define	KEF_DIDRUN	0x02000		/* Thread actually ran. */
126 #define	KEF_EXIT	0x04000		/* Thread is being killed. */
127 
128 struct kg_sched {
129 	struct thread	*skg_last_assigned; /* (j) Last thread assigned to */
130 					   /* the system scheduler */
131 	int	skg_slptime;		/* Number of ticks we vol. slept */
132 	int	skg_runtime;		/* Number of ticks we were running */
133 	int	skg_avail_opennings;	/* (j) Num unfilled slots in group.*/
134 	int	skg_concurrency;	/* (j) Num threads requested in group.*/
135 };
136 #define kg_last_assigned	kg_sched->skg_last_assigned
137 #define kg_avail_opennings	kg_sched->skg_avail_opennings
138 #define kg_concurrency		kg_sched->skg_concurrency
139 #define kg_runtime		kg_sched->skg_runtime
140 #define kg_slptime		kg_sched->skg_slptime
141 
142 #define SLOT_RELEASE(kg)	(kg)->kg_avail_opennings++
143 #define	SLOT_USE(kg)		(kg)->kg_avail_opennings--
144 
145 static struct kse kse0;
146 static struct kg_sched kg_sched0;
147 
148 /*
149  * The priority is primarily determined by the interactivity score.  Thus, we
150  * give lower(better) priorities to kse groups that use less CPU.  The nice
151  * value is then directly added to this to allow nice to have some effect
152  * on latency.
153  *
154  * PRI_RANGE:	Total priority range for timeshare threads.
155  * PRI_NRESV:	Number of nice values.
156  * PRI_BASE:	The start of the dynamic range.
157  */
158 #define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
159 #define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
160 #define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
161 #define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
162 #define	SCHED_PRI_INTERACT(score)					\
163     ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
164 
165 /*
166  * These determine the interactivity of a process.
167  *
168  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
169  *		before throttling back.
170  * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
171  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
172  * INTERACT_THRESH:	Threshhold for placement on the current runq.
173  */
174 #define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
175 #define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
176 #define	SCHED_INTERACT_MAX	(100)
177 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
178 #define	SCHED_INTERACT_THRESH	(30)
179 
180 /*
181  * These parameters and macros determine the size of the time slice that is
182  * granted to each thread.
183  *
184  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
185  * SLICE_MAX:	Maximum time slice granted.
186  * SLICE_RANGE:	Range of available time slices scaled by hz.
187  * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
188  * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
189  * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
190  */
191 #define	SCHED_SLICE_MIN			(slice_min)
192 #define	SCHED_SLICE_MAX			(slice_max)
193 #define	SCHED_SLICE_INTERACTIVE		(slice_max)
194 #define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
195 #define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
196 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
197 #define	SCHED_SLICE_NICE(nice)						\
198     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
199 
200 /*
201  * This macro determines whether or not the thread belongs on the current or
202  * next run queue.
203  */
204 #define	SCHED_INTERACTIVE(kg)						\
205     (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
206 #define	SCHED_CURR(kg, ke)						\
207     ((ke->ke_thread->td_flags & TDF_BORROWING) || SCHED_INTERACTIVE(kg))
208 
209 /*
210  * Cpu percentage computation macros and defines.
211  *
212  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
213  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
214  */
215 
216 #define	SCHED_CPU_TIME	10
217 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
218 
219 /*
220  * kseq - per processor runqs and statistics.
221  */
222 struct kseq {
223 	struct runq	ksq_idle;		/* Queue of IDLE threads. */
224 	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
225 	struct runq	*ksq_next;		/* Next timeshare queue. */
226 	struct runq	*ksq_curr;		/* Current queue. */
227 	int		ksq_load_timeshare;	/* Load for timeshare. */
228 	int		ksq_load;		/* Aggregate load. */
229 	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
230 	short		ksq_nicemin;		/* Least nice. */
231 #ifdef SMP
232 	int			ksq_transferable;
233 	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
234 	struct kseq_group	*ksq_group;	/* Our processor group. */
235 	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
236 #else
237 	int		ksq_sysload;		/* For loadavg, !ITHD load. */
238 #endif
239 };
240 
241 #ifdef SMP
242 /*
243  * kseq groups are groups of processors which can cheaply share threads.  When
244  * one processor in the group goes idle it will check the runqs of the other
245  * processors in its group prior to halting and waiting for an interrupt.
246  * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
247  * In a numa environment we'd want an idle bitmap per group and a two tiered
248  * load balancer.
249  */
250 struct kseq_group {
251 	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
252 	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
253 	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
254 	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
255 	int	ksg_load;		/* Total load of this group. */
256 	int	ksg_transferable;	/* Transferable load of this group. */
257 	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
258 };
259 #endif
260 
261 /*
262  * One kse queue per processor.
263  */
264 #ifdef SMP
265 static cpumask_t kseq_idle;
266 static int ksg_maxid;
267 static struct kseq	kseq_cpu[MAXCPU];
268 static struct kseq_group kseq_groups[MAXCPU];
269 static int bal_tick;
270 static int gbal_tick;
271 static int balance_groups;
272 
273 #define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
274 #define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
275 #define	KSEQ_ID(x)	((x) - kseq_cpu)
276 #define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
277 #else	/* !SMP */
278 static struct kseq	kseq_cpu;
279 
280 #define	KSEQ_SELF()	(&kseq_cpu)
281 #define	KSEQ_CPU(x)	(&kseq_cpu)
282 #endif
283 
284 static void slot_fill(struct ksegrp *);
285 static struct kse *sched_choose(void);		/* XXX Should be thread * */
286 static void sched_slice(struct kse *);
287 static void sched_priority(struct ksegrp *);
288 static void sched_thread_priority(struct thread *, u_char);
289 static int sched_interact_score(struct ksegrp *);
290 static void sched_interact_update(struct ksegrp *);
291 static void sched_interact_fork(struct ksegrp *);
292 static void sched_pctcpu_update(struct kse *);
293 
294 /* Operations on per processor queues */
295 static struct kse * kseq_choose(struct kseq *);
296 static void kseq_setup(struct kseq *);
297 static void kseq_load_add(struct kseq *, struct kse *);
298 static void kseq_load_rem(struct kseq *, struct kse *);
299 static __inline void kseq_runq_add(struct kseq *, struct kse *, int);
300 static __inline void kseq_runq_rem(struct kseq *, struct kse *);
301 static void kseq_nice_add(struct kseq *, int);
302 static void kseq_nice_rem(struct kseq *, int);
303 void kseq_print(int cpu);
304 #ifdef SMP
305 static int kseq_transfer(struct kseq *, struct kse *, int);
306 static struct kse *runq_steal(struct runq *);
307 static void sched_balance(void);
308 static void sched_balance_groups(void);
309 static void sched_balance_group(struct kseq_group *);
310 static void sched_balance_pair(struct kseq *, struct kseq *);
311 static void kseq_move(struct kseq *, int);
312 static int kseq_idled(struct kseq *);
313 static void kseq_notify(struct kse *, int);
314 static void kseq_assign(struct kseq *);
315 static struct kse *kseq_steal(struct kseq *, int);
316 #define	KSE_CAN_MIGRATE(ke)						\
317     ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
318 #endif
319 
320 void
321 kseq_print(int cpu)
322 {
323 	struct kseq *kseq;
324 	int i;
325 
326 	kseq = KSEQ_CPU(cpu);
327 
328 	printf("kseq:\n");
329 	printf("\tload:           %d\n", kseq->ksq_load);
330 	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
331 #ifdef SMP
332 	printf("\tload transferable: %d\n", kseq->ksq_transferable);
333 #endif
334 	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
335 	printf("\tnice counts:\n");
336 	for (i = 0; i < SCHED_PRI_NRESV; i++)
337 		if (kseq->ksq_nice[i])
338 			printf("\t\t%d = %d\n",
339 			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
340 }
341 
342 static __inline void
343 kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags)
344 {
345 #ifdef SMP
346 	if (KSE_CAN_MIGRATE(ke)) {
347 		kseq->ksq_transferable++;
348 		kseq->ksq_group->ksg_transferable++;
349 		ke->ke_flags |= KEF_XFERABLE;
350 	}
351 #endif
352 	runq_add(ke->ke_runq, ke, flags);
353 }
354 
355 static __inline void
356 kseq_runq_rem(struct kseq *kseq, struct kse *ke)
357 {
358 #ifdef SMP
359 	if (ke->ke_flags & KEF_XFERABLE) {
360 		kseq->ksq_transferable--;
361 		kseq->ksq_group->ksg_transferable--;
362 		ke->ke_flags &= ~KEF_XFERABLE;
363 	}
364 #endif
365 	runq_remove(ke->ke_runq, ke);
366 }
367 
368 static void
369 kseq_load_add(struct kseq *kseq, struct kse *ke)
370 {
371 	int class;
372 	mtx_assert(&sched_lock, MA_OWNED);
373 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
374 	if (class == PRI_TIMESHARE)
375 		kseq->ksq_load_timeshare++;
376 	kseq->ksq_load++;
377 	CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
378 	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
379 #ifdef SMP
380 		kseq->ksq_group->ksg_load++;
381 #else
382 		kseq->ksq_sysload++;
383 #endif
384 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
385 		kseq_nice_add(kseq, ke->ke_proc->p_nice);
386 }
387 
388 static void
389 kseq_load_rem(struct kseq *kseq, struct kse *ke)
390 {
391 	int class;
392 	mtx_assert(&sched_lock, MA_OWNED);
393 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
394 	if (class == PRI_TIMESHARE)
395 		kseq->ksq_load_timeshare--;
396 	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
397 #ifdef SMP
398 		kseq->ksq_group->ksg_load--;
399 #else
400 		kseq->ksq_sysload--;
401 #endif
402 	kseq->ksq_load--;
403 	CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
404 	ke->ke_runq = NULL;
405 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
406 		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
407 }
408 
409 static void
410 kseq_nice_add(struct kseq *kseq, int nice)
411 {
412 	mtx_assert(&sched_lock, MA_OWNED);
413 	/* Normalize to zero. */
414 	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
415 	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
416 		kseq->ksq_nicemin = nice;
417 }
418 
419 static void
420 kseq_nice_rem(struct kseq *kseq, int nice)
421 {
422 	int n;
423 
424 	mtx_assert(&sched_lock, MA_OWNED);
425 	/* Normalize to zero. */
426 	n = nice + SCHED_PRI_NHALF;
427 	kseq->ksq_nice[n]--;
428 	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
429 
430 	/*
431 	 * If this wasn't the smallest nice value or there are more in
432 	 * this bucket we can just return.  Otherwise we have to recalculate
433 	 * the smallest nice.
434 	 */
435 	if (nice != kseq->ksq_nicemin ||
436 	    kseq->ksq_nice[n] != 0 ||
437 	    kseq->ksq_load_timeshare == 0)
438 		return;
439 
440 	for (; n < SCHED_PRI_NRESV; n++)
441 		if (kseq->ksq_nice[n]) {
442 			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
443 			return;
444 		}
445 }
446 
447 #ifdef SMP
448 /*
449  * sched_balance is a simple CPU load balancing algorithm.  It operates by
450  * finding the least loaded and most loaded cpu and equalizing their load
451  * by migrating some processes.
452  *
453  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
454  * installations will only have 2 cpus.  Secondly, load balancing too much at
455  * once can have an unpleasant effect on the system.  The scheduler rarely has
456  * enough information to make perfect decisions.  So this algorithm chooses
457  * algorithm simplicity and more gradual effects on load in larger systems.
458  *
459  * It could be improved by considering the priorities and slices assigned to
460  * each task prior to balancing them.  There are many pathological cases with
461  * any approach and so the semi random algorithm below may work as well as any.
462  *
463  */
464 static void
465 sched_balance(void)
466 {
467 	struct kseq_group *high;
468 	struct kseq_group *low;
469 	struct kseq_group *ksg;
470 	int cnt;
471 	int i;
472 
473 	bal_tick = ticks + (random() % (hz * 2));
474 	if (smp_started == 0)
475 		return;
476 	low = high = NULL;
477 	i = random() % (ksg_maxid + 1);
478 	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
479 		ksg = KSEQ_GROUP(i);
480 		/*
481 		 * Find the CPU with the highest load that has some
482 		 * threads to transfer.
483 		 */
484 		if ((high == NULL || ksg->ksg_load > high->ksg_load)
485 		    && ksg->ksg_transferable)
486 			high = ksg;
487 		if (low == NULL || ksg->ksg_load < low->ksg_load)
488 			low = ksg;
489 		if (++i > ksg_maxid)
490 			i = 0;
491 	}
492 	if (low != NULL && high != NULL && high != low)
493 		sched_balance_pair(LIST_FIRST(&high->ksg_members),
494 		    LIST_FIRST(&low->ksg_members));
495 }
496 
497 static void
498 sched_balance_groups(void)
499 {
500 	int i;
501 
502 	gbal_tick = ticks + (random() % (hz * 2));
503 	mtx_assert(&sched_lock, MA_OWNED);
504 	if (smp_started)
505 		for (i = 0; i <= ksg_maxid; i++)
506 			sched_balance_group(KSEQ_GROUP(i));
507 }
508 
509 static void
510 sched_balance_group(struct kseq_group *ksg)
511 {
512 	struct kseq *kseq;
513 	struct kseq *high;
514 	struct kseq *low;
515 	int load;
516 
517 	if (ksg->ksg_transferable == 0)
518 		return;
519 	low = NULL;
520 	high = NULL;
521 	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
522 		load = kseq->ksq_load;
523 		if (high == NULL || load > high->ksq_load)
524 			high = kseq;
525 		if (low == NULL || load < low->ksq_load)
526 			low = kseq;
527 	}
528 	if (high != NULL && low != NULL && high != low)
529 		sched_balance_pair(high, low);
530 }
531 
532 static void
533 sched_balance_pair(struct kseq *high, struct kseq *low)
534 {
535 	int transferable;
536 	int high_load;
537 	int low_load;
538 	int move;
539 	int diff;
540 	int i;
541 
542 	/*
543 	 * If we're transfering within a group we have to use this specific
544 	 * kseq's transferable count, otherwise we can steal from other members
545 	 * of the group.
546 	 */
547 	if (high->ksq_group == low->ksq_group) {
548 		transferable = high->ksq_transferable;
549 		high_load = high->ksq_load;
550 		low_load = low->ksq_load;
551 	} else {
552 		transferable = high->ksq_group->ksg_transferable;
553 		high_load = high->ksq_group->ksg_load;
554 		low_load = low->ksq_group->ksg_load;
555 	}
556 	if (transferable == 0)
557 		return;
558 	/*
559 	 * Determine what the imbalance is and then adjust that to how many
560 	 * kses we actually have to give up (transferable).
561 	 */
562 	diff = high_load - low_load;
563 	move = diff / 2;
564 	if (diff & 0x1)
565 		move++;
566 	move = min(move, transferable);
567 	for (i = 0; i < move; i++)
568 		kseq_move(high, KSEQ_ID(low));
569 	return;
570 }
571 
572 static void
573 kseq_move(struct kseq *from, int cpu)
574 {
575 	struct kseq *kseq;
576 	struct kseq *to;
577 	struct kse *ke;
578 
579 	kseq = from;
580 	to = KSEQ_CPU(cpu);
581 	ke = kseq_steal(kseq, 1);
582 	if (ke == NULL) {
583 		struct kseq_group *ksg;
584 
585 		ksg = kseq->ksq_group;
586 		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
587 			if (kseq == from || kseq->ksq_transferable == 0)
588 				continue;
589 			ke = kseq_steal(kseq, 1);
590 			break;
591 		}
592 		if (ke == NULL)
593 			panic("kseq_move: No KSEs available with a "
594 			    "transferable count of %d\n",
595 			    ksg->ksg_transferable);
596 	}
597 	if (kseq == to)
598 		return;
599 	ke->ke_state = KES_THREAD;
600 	kseq_runq_rem(kseq, ke);
601 	kseq_load_rem(kseq, ke);
602 	kseq_notify(ke, cpu);
603 }
604 
605 static int
606 kseq_idled(struct kseq *kseq)
607 {
608 	struct kseq_group *ksg;
609 	struct kseq *steal;
610 	struct kse *ke;
611 
612 	ksg = kseq->ksq_group;
613 	/*
614 	 * If we're in a cpu group, try and steal kses from another cpu in
615 	 * the group before idling.
616 	 */
617 	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
618 		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
619 			if (steal == kseq || steal->ksq_transferable == 0)
620 				continue;
621 			ke = kseq_steal(steal, 0);
622 			if (ke == NULL)
623 				continue;
624 			ke->ke_state = KES_THREAD;
625 			kseq_runq_rem(steal, ke);
626 			kseq_load_rem(steal, ke);
627 			ke->ke_cpu = PCPU_GET(cpuid);
628 			ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
629 			sched_add(ke->ke_thread, SRQ_YIELDING);
630 			return (0);
631 		}
632 	}
633 	/*
634 	 * We only set the idled bit when all of the cpus in the group are
635 	 * idle.  Otherwise we could get into a situation where a KSE bounces
636 	 * back and forth between two idle cores on seperate physical CPUs.
637 	 */
638 	ksg->ksg_idlemask |= PCPU_GET(cpumask);
639 	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
640 		return (1);
641 	atomic_set_int(&kseq_idle, ksg->ksg_mask);
642 	return (1);
643 }
644 
645 static void
646 kseq_assign(struct kseq *kseq)
647 {
648 	struct kse *nke;
649 	struct kse *ke;
650 
651 	do {
652 		*(volatile struct kse **)&ke = kseq->ksq_assigned;
653 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
654 	for (; ke != NULL; ke = nke) {
655 		nke = ke->ke_assign;
656 		kseq->ksq_group->ksg_load--;
657 		kseq->ksq_load--;
658 		ke->ke_flags &= ~KEF_ASSIGNED;
659 		ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
660 		sched_add(ke->ke_thread, SRQ_YIELDING);
661 	}
662 }
663 
664 static void
665 kseq_notify(struct kse *ke, int cpu)
666 {
667 	struct kseq *kseq;
668 	struct thread *td;
669 	struct pcpu *pcpu;
670 	int class;
671 	int prio;
672 
673 	kseq = KSEQ_CPU(cpu);
674 	/* XXX */
675 	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
676 	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
677 	    (kseq_idle & kseq->ksq_group->ksg_mask))
678 		atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
679 	kseq->ksq_group->ksg_load++;
680 	kseq->ksq_load++;
681 	ke->ke_cpu = cpu;
682 	ke->ke_flags |= KEF_ASSIGNED;
683 	prio = ke->ke_thread->td_priority;
684 
685 	/*
686 	 * Place a KSE on another cpu's queue and force a resched.
687 	 */
688 	do {
689 		*(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
690 	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
691 	/*
692 	 * Without sched_lock we could lose a race where we set NEEDRESCHED
693 	 * on a thread that is switched out before the IPI is delivered.  This
694 	 * would lead us to miss the resched.  This will be a problem once
695 	 * sched_lock is pushed down.
696 	 */
697 	pcpu = pcpu_find(cpu);
698 	td = pcpu->pc_curthread;
699 	if (ke->ke_thread->td_priority < td->td_priority ||
700 	    td == pcpu->pc_idlethread) {
701 		td->td_flags |= TDF_NEEDRESCHED;
702 		ipi_selected(1 << cpu, IPI_AST);
703 	}
704 }
705 
706 static struct kse *
707 runq_steal(struct runq *rq)
708 {
709 	struct rqhead *rqh;
710 	struct rqbits *rqb;
711 	struct kse *ke;
712 	int word;
713 	int bit;
714 
715 	mtx_assert(&sched_lock, MA_OWNED);
716 	rqb = &rq->rq_status;
717 	for (word = 0; word < RQB_LEN; word++) {
718 		if (rqb->rqb_bits[word] == 0)
719 			continue;
720 		for (bit = 0; bit < RQB_BPW; bit++) {
721 			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
722 				continue;
723 			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
724 			TAILQ_FOREACH(ke, rqh, ke_procq) {
725 				if (KSE_CAN_MIGRATE(ke))
726 					return (ke);
727 			}
728 		}
729 	}
730 	return (NULL);
731 }
732 
733 static struct kse *
734 kseq_steal(struct kseq *kseq, int stealidle)
735 {
736 	struct kse *ke;
737 
738 	/*
739 	 * Steal from next first to try to get a non-interactive task that
740 	 * may not have run for a while.
741 	 */
742 	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
743 		return (ke);
744 	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
745 		return (ke);
746 	if (stealidle)
747 		return (runq_steal(&kseq->ksq_idle));
748 	return (NULL);
749 }
750 
751 int
752 kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
753 {
754 	struct kseq_group *nksg;
755 	struct kseq_group *ksg;
756 	struct kseq *old;
757 	int cpu;
758 	int idx;
759 
760 	if (smp_started == 0)
761 		return (0);
762 	cpu = 0;
763 	/*
764 	 * If our load exceeds a certain threshold we should attempt to
765 	 * reassign this thread.  The first candidate is the cpu that
766 	 * originally ran the thread.  If it is idle, assign it there,
767 	 * otherwise, pick an idle cpu.
768 	 *
769 	 * The threshold at which we start to reassign kses has a large impact
770 	 * on the overall performance of the system.  Tuned too high and
771 	 * some CPUs may idle.  Too low and there will be excess migration
772 	 * and context switches.
773 	 */
774 	old = KSEQ_CPU(ke->ke_cpu);
775 	nksg = old->ksq_group;
776 	ksg = kseq->ksq_group;
777 	if (kseq_idle) {
778 		if (kseq_idle & nksg->ksg_mask) {
779 			cpu = ffs(nksg->ksg_idlemask);
780 			if (cpu) {
781 				CTR2(KTR_SCHED,
782 				    "kseq_transfer: %p found old cpu %X "
783 				    "in idlemask.", ke, cpu);
784 				goto migrate;
785 			}
786 		}
787 		/*
788 		 * Multiple cpus could find this bit simultaneously
789 		 * but the race shouldn't be terrible.
790 		 */
791 		cpu = ffs(kseq_idle);
792 		if (cpu) {
793 			CTR2(KTR_SCHED, "kseq_transfer: %p found %X "
794 			    "in idlemask.", ke, cpu);
795 			goto migrate;
796 		}
797 	}
798 	idx = 0;
799 #if 0
800 	if (old->ksq_load < kseq->ksq_load) {
801 		cpu = ke->ke_cpu + 1;
802 		CTR2(KTR_SCHED, "kseq_transfer: %p old cpu %X "
803 		    "load less than ours.", ke, cpu);
804 		goto migrate;
805 	}
806 	/*
807 	 * No new CPU was found, look for one with less load.
808 	 */
809 	for (idx = 0; idx <= ksg_maxid; idx++) {
810 		nksg = KSEQ_GROUP(idx);
811 		if (nksg->ksg_load /*+ (nksg->ksg_cpus  * 2)*/ < ksg->ksg_load) {
812 			cpu = ffs(nksg->ksg_cpumask);
813 			CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X load less "
814 			    "than ours.", ke, cpu);
815 			goto migrate;
816 		}
817 	}
818 #endif
819 	/*
820 	 * If another cpu in this group has idled, assign a thread over
821 	 * to them after checking to see if there are idled groups.
822 	 */
823 	if (ksg->ksg_idlemask) {
824 		cpu = ffs(ksg->ksg_idlemask);
825 		if (cpu) {
826 			CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X idle in "
827 			    "group.", ke, cpu);
828 			goto migrate;
829 		}
830 	}
831 	return (0);
832 migrate:
833 	/*
834 	 * Now that we've found an idle CPU, migrate the thread.
835 	 */
836 	cpu--;
837 	ke->ke_runq = NULL;
838 	kseq_notify(ke, cpu);
839 
840 	return (1);
841 }
842 
843 #endif	/* SMP */
844 
845 /*
846  * Pick the highest priority task we have and return it.
847  */
848 
849 static struct kse *
850 kseq_choose(struct kseq *kseq)
851 {
852 	struct runq *swap;
853 	struct kse *ke;
854 	int nice;
855 
856 	mtx_assert(&sched_lock, MA_OWNED);
857 	swap = NULL;
858 
859 	for (;;) {
860 		ke = runq_choose(kseq->ksq_curr);
861 		if (ke == NULL) {
862 			/*
863 			 * We already swapped once and didn't get anywhere.
864 			 */
865 			if (swap)
866 				break;
867 			swap = kseq->ksq_curr;
868 			kseq->ksq_curr = kseq->ksq_next;
869 			kseq->ksq_next = swap;
870 			continue;
871 		}
872 		/*
873 		 * If we encounter a slice of 0 the kse is in a
874 		 * TIMESHARE kse group and its nice was too far out
875 		 * of the range that receives slices.
876 		 */
877 		nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin);
878 		if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH &&
879 		    ke->ke_proc->p_nice != 0)) {
880 			runq_remove(ke->ke_runq, ke);
881 			sched_slice(ke);
882 			ke->ke_runq = kseq->ksq_next;
883 			runq_add(ke->ke_runq, ke, 0);
884 			continue;
885 		}
886 		return (ke);
887 	}
888 
889 	return (runq_choose(&kseq->ksq_idle));
890 }
891 
892 static void
893 kseq_setup(struct kseq *kseq)
894 {
895 	runq_init(&kseq->ksq_timeshare[0]);
896 	runq_init(&kseq->ksq_timeshare[1]);
897 	runq_init(&kseq->ksq_idle);
898 	kseq->ksq_curr = &kseq->ksq_timeshare[0];
899 	kseq->ksq_next = &kseq->ksq_timeshare[1];
900 	kseq->ksq_load = 0;
901 	kseq->ksq_load_timeshare = 0;
902 }
903 
904 static void
905 sched_setup(void *dummy)
906 {
907 #ifdef SMP
908 	int i;
909 #endif
910 
911 	slice_min = (hz/100);	/* 10ms */
912 	slice_max = (hz/7);	/* ~140ms */
913 
914 #ifdef SMP
915 	balance_groups = 0;
916 	/*
917 	 * Initialize the kseqs.
918 	 */
919 	for (i = 0; i < MAXCPU; i++) {
920 		struct kseq *ksq;
921 
922 		ksq = &kseq_cpu[i];
923 		ksq->ksq_assigned = NULL;
924 		kseq_setup(&kseq_cpu[i]);
925 	}
926 	if (smp_topology == NULL) {
927 		struct kseq_group *ksg;
928 		struct kseq *ksq;
929 		int cpus;
930 
931 		for (cpus = 0, i = 0; i < MAXCPU; i++) {
932 			if (CPU_ABSENT(i))
933 				continue;
934 			ksq = &kseq_cpu[cpus];
935 			ksg = &kseq_groups[cpus];
936 			/*
937 			 * Setup a kseq group with one member.
938 			 */
939 			ksq->ksq_transferable = 0;
940 			ksq->ksq_group = ksg;
941 			ksg->ksg_cpus = 1;
942 			ksg->ksg_idlemask = 0;
943 			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
944 			ksg->ksg_load = 0;
945 			ksg->ksg_transferable = 0;
946 			LIST_INIT(&ksg->ksg_members);
947 			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
948 			cpus++;
949 		}
950 		ksg_maxid = cpus - 1;
951 	} else {
952 		struct kseq_group *ksg;
953 		struct cpu_group *cg;
954 		int j;
955 
956 		for (i = 0; i < smp_topology->ct_count; i++) {
957 			cg = &smp_topology->ct_group[i];
958 			ksg = &kseq_groups[i];
959 			/*
960 			 * Initialize the group.
961 			 */
962 			ksg->ksg_idlemask = 0;
963 			ksg->ksg_load = 0;
964 			ksg->ksg_transferable = 0;
965 			ksg->ksg_cpus = cg->cg_count;
966 			ksg->ksg_cpumask = cg->cg_mask;
967 			LIST_INIT(&ksg->ksg_members);
968 			/*
969 			 * Find all of the group members and add them.
970 			 */
971 			for (j = 0; j < MAXCPU; j++) {
972 				if ((cg->cg_mask & (1 << j)) != 0) {
973 					if (ksg->ksg_mask == 0)
974 						ksg->ksg_mask = 1 << j;
975 					kseq_cpu[j].ksq_transferable = 0;
976 					kseq_cpu[j].ksq_group = ksg;
977 					LIST_INSERT_HEAD(&ksg->ksg_members,
978 					    &kseq_cpu[j], ksq_siblings);
979 				}
980 			}
981 			if (ksg->ksg_cpus > 1)
982 				balance_groups = 1;
983 		}
984 		ksg_maxid = smp_topology->ct_count - 1;
985 	}
986 	/*
987 	 * Stagger the group and global load balancer so they do not
988 	 * interfere with each other.
989 	 */
990 	bal_tick = ticks + hz;
991 	if (balance_groups)
992 		gbal_tick = ticks + (hz / 2);
993 #else
994 	kseq_setup(KSEQ_SELF());
995 #endif
996 	mtx_lock_spin(&sched_lock);
997 	kseq_load_add(KSEQ_SELF(), &kse0);
998 	mtx_unlock_spin(&sched_lock);
999 }
1000 
1001 /*
1002  * Scale the scheduling priority according to the "interactivity" of this
1003  * process.
1004  */
1005 static void
1006 sched_priority(struct ksegrp *kg)
1007 {
1008 	int pri;
1009 
1010 	if (kg->kg_pri_class != PRI_TIMESHARE)
1011 		return;
1012 
1013 	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
1014 	pri += SCHED_PRI_BASE;
1015 	pri += kg->kg_proc->p_nice;
1016 
1017 	if (pri > PRI_MAX_TIMESHARE)
1018 		pri = PRI_MAX_TIMESHARE;
1019 	else if (pri < PRI_MIN_TIMESHARE)
1020 		pri = PRI_MIN_TIMESHARE;
1021 
1022 	kg->kg_user_pri = pri;
1023 
1024 	return;
1025 }
1026 
1027 /*
1028  * Calculate a time slice based on the properties of the kseg and the runq
1029  * that we're on.  This is only for PRI_TIMESHARE ksegrps.
1030  */
1031 static void
1032 sched_slice(struct kse *ke)
1033 {
1034 	struct kseq *kseq;
1035 	struct ksegrp *kg;
1036 
1037 	kg = ke->ke_ksegrp;
1038 	kseq = KSEQ_CPU(ke->ke_cpu);
1039 
1040 	if (ke->ke_thread->td_flags & TDF_BORROWING) {
1041 		ke->ke_slice = SCHED_SLICE_MIN;
1042 		return;
1043 	}
1044 
1045 	/*
1046 	 * Rationale:
1047 	 * KSEs in interactive ksegs get a minimal slice so that we
1048 	 * quickly notice if it abuses its advantage.
1049 	 *
1050 	 * KSEs in non-interactive ksegs are assigned a slice that is
1051 	 * based on the ksegs nice value relative to the least nice kseg
1052 	 * on the run queue for this cpu.
1053 	 *
1054 	 * If the KSE is less nice than all others it gets the maximum
1055 	 * slice and other KSEs will adjust their slice relative to
1056 	 * this when they first expire.
1057 	 *
1058 	 * There is 20 point window that starts relative to the least
1059 	 * nice kse on the run queue.  Slice size is determined by
1060 	 * the kse distance from the last nice ksegrp.
1061 	 *
1062 	 * If the kse is outside of the window it will get no slice
1063 	 * and will be reevaluated each time it is selected on the
1064 	 * run queue.  The exception to this is nice 0 ksegs when
1065 	 * a nice -20 is running.  They are always granted a minimum
1066 	 * slice.
1067 	 */
1068 	if (!SCHED_INTERACTIVE(kg)) {
1069 		int nice;
1070 
1071 		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
1072 		if (kseq->ksq_load_timeshare == 0 ||
1073 		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
1074 			ke->ke_slice = SCHED_SLICE_MAX;
1075 		else if (nice <= SCHED_SLICE_NTHRESH)
1076 			ke->ke_slice = SCHED_SLICE_NICE(nice);
1077 		else if (kg->kg_proc->p_nice == 0)
1078 			ke->ke_slice = SCHED_SLICE_MIN;
1079 		else
1080 			ke->ke_slice = 0;
1081 	} else
1082 		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1083 
1084 	return;
1085 }
1086 
1087 /*
1088  * This routine enforces a maximum limit on the amount of scheduling history
1089  * kept.  It is called after either the slptime or runtime is adjusted.
1090  * This routine will not operate correctly when slp or run times have been
1091  * adjusted to more than double their maximum.
1092  */
1093 static void
1094 sched_interact_update(struct ksegrp *kg)
1095 {
1096 	int sum;
1097 
1098 	sum = kg->kg_runtime + kg->kg_slptime;
1099 	if (sum < SCHED_SLP_RUN_MAX)
1100 		return;
1101 	/*
1102 	 * If we have exceeded by more than 1/5th then the algorithm below
1103 	 * will not bring us back into range.  Dividing by two here forces
1104 	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1105 	 */
1106 	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1107 		kg->kg_runtime /= 2;
1108 		kg->kg_slptime /= 2;
1109 		return;
1110 	}
1111 	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1112 	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1113 }
1114 
1115 static void
1116 sched_interact_fork(struct ksegrp *kg)
1117 {
1118 	int ratio;
1119 	int sum;
1120 
1121 	sum = kg->kg_runtime + kg->kg_slptime;
1122 	if (sum > SCHED_SLP_RUN_FORK) {
1123 		ratio = sum / SCHED_SLP_RUN_FORK;
1124 		kg->kg_runtime /= ratio;
1125 		kg->kg_slptime /= ratio;
1126 	}
1127 }
1128 
1129 static int
1130 sched_interact_score(struct ksegrp *kg)
1131 {
1132 	int div;
1133 
1134 	if (kg->kg_runtime > kg->kg_slptime) {
1135 		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1136 		return (SCHED_INTERACT_HALF +
1137 		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1138 	} if (kg->kg_slptime > kg->kg_runtime) {
1139 		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1140 		return (kg->kg_runtime / div);
1141 	}
1142 
1143 	/*
1144 	 * This can happen if slptime and runtime are 0.
1145 	 */
1146 	return (0);
1147 
1148 }
1149 
1150 /*
1151  * Very early in the boot some setup of scheduler-specific
1152  * parts of proc0 and of soem scheduler resources needs to be done.
1153  * Called from:
1154  *  proc0_init()
1155  */
1156 void
1157 schedinit(void)
1158 {
1159 	/*
1160 	 * Set up the scheduler specific parts of proc0.
1161 	 */
1162 	proc0.p_sched = NULL; /* XXX */
1163 	ksegrp0.kg_sched = &kg_sched0;
1164 	thread0.td_sched = &kse0;
1165 	kse0.ke_thread = &thread0;
1166 	kse0.ke_state = KES_THREAD;
1167 	kg_sched0.skg_concurrency = 1;
1168 	kg_sched0.skg_avail_opennings = 0; /* we are already running */
1169 }
1170 
1171 /*
1172  * This is only somewhat accurate since given many processes of the same
1173  * priority they will switch when their slices run out, which will be
1174  * at most SCHED_SLICE_MAX.
1175  */
1176 int
1177 sched_rr_interval(void)
1178 {
1179 	return (SCHED_SLICE_MAX);
1180 }
1181 
1182 static void
1183 sched_pctcpu_update(struct kse *ke)
1184 {
1185 	/*
1186 	 * Adjust counters and watermark for pctcpu calc.
1187 	 */
1188 	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1189 		/*
1190 		 * Shift the tick count out so that the divide doesn't
1191 		 * round away our results.
1192 		 */
1193 		ke->ke_ticks <<= 10;
1194 		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1195 			    SCHED_CPU_TICKS;
1196 		ke->ke_ticks >>= 10;
1197 	} else
1198 		ke->ke_ticks = 0;
1199 	ke->ke_ltick = ticks;
1200 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1201 }
1202 
1203 void
1204 sched_thread_priority(struct thread *td, u_char prio)
1205 {
1206 	struct kse *ke;
1207 
1208 	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1209 	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1210 	    curthread->td_proc->p_comm);
1211 	ke = td->td_kse;
1212 	mtx_assert(&sched_lock, MA_OWNED);
1213 	if (td->td_priority == prio)
1214 		return;
1215 	if (TD_ON_RUNQ(td)) {
1216 		/*
1217 		 * If the priority has been elevated due to priority
1218 		 * propagation, we may have to move ourselves to a new
1219 		 * queue.  We still call adjustrunqueue below in case kse
1220 		 * needs to fix things up.
1221 		 */
1222 		if (prio < td->td_priority && ke->ke_runq != NULL &&
1223 		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1224 		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1225 			runq_remove(ke->ke_runq, ke);
1226 			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1227 			runq_add(ke->ke_runq, ke, 0);
1228 		}
1229 		/*
1230 		 * Hold this kse on this cpu so that sched_prio() doesn't
1231 		 * cause excessive migration.  We only want migration to
1232 		 * happen as the result of a wakeup.
1233 		 */
1234 		ke->ke_flags |= KEF_HOLD;
1235 		adjustrunqueue(td, prio);
1236 		ke->ke_flags &= ~KEF_HOLD;
1237 	} else
1238 		td->td_priority = prio;
1239 }
1240 
1241 /*
1242  * Update a thread's priority when it is lent another thread's
1243  * priority.
1244  */
1245 void
1246 sched_lend_prio(struct thread *td, u_char prio)
1247 {
1248 
1249 	td->td_flags |= TDF_BORROWING;
1250 	sched_thread_priority(td, prio);
1251 }
1252 
1253 /*
1254  * Restore a thread's priority when priority propagation is
1255  * over.  The prio argument is the minimum priority the thread
1256  * needs to have to satisfy other possible priority lending
1257  * requests.  If the thread's regular priority is less
1258  * important than prio, the thread will keep a priority boost
1259  * of prio.
1260  */
1261 void
1262 sched_unlend_prio(struct thread *td, u_char prio)
1263 {
1264 	u_char base_pri;
1265 
1266 	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1267 	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1268 		base_pri = td->td_ksegrp->kg_user_pri;
1269 	else
1270 		base_pri = td->td_base_pri;
1271 	if (prio >= base_pri) {
1272 		td->td_flags &= ~TDF_BORROWING;
1273 		sched_thread_priority(td, base_pri);
1274 	} else
1275 		sched_lend_prio(td, prio);
1276 }
1277 
1278 void
1279 sched_prio(struct thread *td, u_char prio)
1280 {
1281 	u_char oldprio;
1282 
1283 	/* First, update the base priority. */
1284 	td->td_base_pri = prio;
1285 
1286 	/*
1287 	 * If the thread is borrowing another thread's priority, don't
1288 	 * ever lower the priority.
1289 	 */
1290 	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1291 		return;
1292 
1293 	/* Change the real priority. */
1294 	oldprio = td->td_priority;
1295 	sched_thread_priority(td, prio);
1296 
1297 	/*
1298 	 * If the thread is on a turnstile, then let the turnstile update
1299 	 * its state.
1300 	 */
1301 	if (TD_ON_LOCK(td) && oldprio != prio)
1302 		turnstile_adjust(td, oldprio);
1303 }
1304 
1305 void
1306 sched_switch(struct thread *td, struct thread *newtd, int flags)
1307 {
1308 	struct kseq *ksq;
1309 	struct kse *ke;
1310 
1311 	mtx_assert(&sched_lock, MA_OWNED);
1312 
1313 	ke = td->td_kse;
1314 	ksq = KSEQ_SELF();
1315 
1316 	td->td_lastcpu = td->td_oncpu;
1317 	td->td_oncpu = NOCPU;
1318 	td->td_flags &= ~TDF_NEEDRESCHED;
1319 	td->td_owepreempt = 0;
1320 
1321 	/*
1322 	 * If the KSE has been assigned it may be in the process of switching
1323 	 * to the new cpu.  This is the case in sched_bind().
1324 	 */
1325 	if (td == PCPU_GET(idlethread)) {
1326 		TD_SET_CAN_RUN(td);
1327 	} else if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1328 		/* We are ending our run so make our slot available again */
1329 		SLOT_RELEASE(td->td_ksegrp);
1330 		kseq_load_rem(ksq, ke);
1331 		if (TD_IS_RUNNING(td)) {
1332 			/*
1333 			 * Don't allow the thread to migrate
1334 			 * from a preemption.
1335 			 */
1336 			ke->ke_flags |= KEF_HOLD;
1337 			setrunqueue(td, (flags & SW_PREEMPT) ?
1338 			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1339 			    SRQ_OURSELF|SRQ_YIELDING);
1340 			ke->ke_flags &= ~KEF_HOLD;
1341 		} else if ((td->td_proc->p_flag & P_HADTHREADS) &&
1342 		    (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp))
1343 			/*
1344 			 * We will not be on the run queue.
1345 			 * So we must be sleeping or similar.
1346 			 * Don't use the slot if we will need it
1347 			 * for newtd.
1348 			 */
1349 			slot_fill(td->td_ksegrp);
1350 	}
1351 	if (newtd != NULL) {
1352 		/*
1353 		 * If we bring in a thread,
1354 		 * then account for it as if it had been added to the
1355 		 * run queue and then chosen.
1356 		 */
1357 		newtd->td_kse->ke_flags |= KEF_DIDRUN;
1358 		newtd->td_kse->ke_runq = ksq->ksq_curr;
1359 		TD_SET_RUNNING(newtd);
1360 		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1361 	} else
1362 		newtd = choosethread();
1363 	if (td != newtd) {
1364 #ifdef	HWPMC_HOOKS
1365 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1366 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1367 #endif
1368 		cpu_switch(td, newtd);
1369 #ifdef	HWPMC_HOOKS
1370 		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1371 			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1372 #endif
1373 	}
1374 
1375 	sched_lock.mtx_lock = (uintptr_t)td;
1376 
1377 	td->td_oncpu = PCPU_GET(cpuid);
1378 }
1379 
1380 void
1381 sched_nice(struct proc *p, int nice)
1382 {
1383 	struct ksegrp *kg;
1384 	struct kse *ke;
1385 	struct thread *td;
1386 	struct kseq *kseq;
1387 
1388 	PROC_LOCK_ASSERT(p, MA_OWNED);
1389 	mtx_assert(&sched_lock, MA_OWNED);
1390 	/*
1391 	 * We need to adjust the nice counts for running KSEs.
1392 	 */
1393 	FOREACH_KSEGRP_IN_PROC(p, kg) {
1394 		if (kg->kg_pri_class == PRI_TIMESHARE) {
1395 			FOREACH_THREAD_IN_GROUP(kg, td) {
1396 				ke = td->td_kse;
1397 				if (ke->ke_runq == NULL)
1398 					continue;
1399 				kseq = KSEQ_CPU(ke->ke_cpu);
1400 				kseq_nice_rem(kseq, p->p_nice);
1401 				kseq_nice_add(kseq, nice);
1402 			}
1403 		}
1404 	}
1405 	p->p_nice = nice;
1406 	FOREACH_KSEGRP_IN_PROC(p, kg) {
1407 		sched_priority(kg);
1408 		FOREACH_THREAD_IN_GROUP(kg, td)
1409 			td->td_flags |= TDF_NEEDRESCHED;
1410 	}
1411 }
1412 
1413 void
1414 sched_sleep(struct thread *td)
1415 {
1416 	mtx_assert(&sched_lock, MA_OWNED);
1417 
1418 	td->td_slptime = ticks;
1419 }
1420 
1421 void
1422 sched_wakeup(struct thread *td)
1423 {
1424 	mtx_assert(&sched_lock, MA_OWNED);
1425 
1426 	/*
1427 	 * Let the kseg know how long we slept for.  This is because process
1428 	 * interactivity behavior is modeled in the kseg.
1429 	 */
1430 	if (td->td_slptime) {
1431 		struct ksegrp *kg;
1432 		int hzticks;
1433 
1434 		kg = td->td_ksegrp;
1435 		hzticks = (ticks - td->td_slptime) << 10;
1436 		if (hzticks >= SCHED_SLP_RUN_MAX) {
1437 			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1438 			kg->kg_runtime = 1;
1439 		} else {
1440 			kg->kg_slptime += hzticks;
1441 			sched_interact_update(kg);
1442 		}
1443 		sched_priority(kg);
1444 		sched_slice(td->td_kse);
1445 		td->td_slptime = 0;
1446 	}
1447 	setrunqueue(td, SRQ_BORING);
1448 }
1449 
1450 /*
1451  * Penalize the parent for creating a new child and initialize the child's
1452  * priority.
1453  */
1454 void
1455 sched_fork(struct thread *td, struct thread *childtd)
1456 {
1457 
1458 	mtx_assert(&sched_lock, MA_OWNED);
1459 
1460 	sched_fork_ksegrp(td, childtd->td_ksegrp);
1461 	sched_fork_thread(td, childtd);
1462 }
1463 
1464 void
1465 sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1466 {
1467 	struct ksegrp *kg = td->td_ksegrp;
1468 	mtx_assert(&sched_lock, MA_OWNED);
1469 
1470 	child->kg_slptime = kg->kg_slptime;
1471 	child->kg_runtime = kg->kg_runtime;
1472 	child->kg_user_pri = kg->kg_user_pri;
1473 	sched_interact_fork(child);
1474 	kg->kg_runtime += tickincr << 10;
1475 	sched_interact_update(kg);
1476 }
1477 
1478 void
1479 sched_fork_thread(struct thread *td, struct thread *child)
1480 {
1481 	struct kse *ke;
1482 	struct kse *ke2;
1483 
1484 	sched_newthread(child);
1485 	ke = td->td_kse;
1486 	ke2 = child->td_kse;
1487 	ke2->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1488 	ke2->ke_cpu = ke->ke_cpu;
1489 	ke2->ke_runq = NULL;
1490 
1491 	/* Grab our parents cpu estimation information. */
1492 	ke2->ke_ticks = ke->ke_ticks;
1493 	ke2->ke_ltick = ke->ke_ltick;
1494 	ke2->ke_ftick = ke->ke_ftick;
1495 }
1496 
1497 void
1498 sched_class(struct ksegrp *kg, int class)
1499 {
1500 	struct kseq *kseq;
1501 	struct kse *ke;
1502 	struct thread *td;
1503 	int nclass;
1504 	int oclass;
1505 
1506 	mtx_assert(&sched_lock, MA_OWNED);
1507 	if (kg->kg_pri_class == class)
1508 		return;
1509 
1510 	nclass = PRI_BASE(class);
1511 	oclass = PRI_BASE(kg->kg_pri_class);
1512 	FOREACH_THREAD_IN_GROUP(kg, td) {
1513 		ke = td->td_kse;
1514 		if ((ke->ke_state != KES_ONRUNQ &&
1515 		    ke->ke_state != KES_THREAD) || ke->ke_runq == NULL)
1516 			continue;
1517 		kseq = KSEQ_CPU(ke->ke_cpu);
1518 
1519 #ifdef SMP
1520 		/*
1521 		 * On SMP if we're on the RUNQ we must adjust the transferable
1522 		 * count because could be changing to or from an interrupt
1523 		 * class.
1524 		 */
1525 		if (ke->ke_state == KES_ONRUNQ) {
1526 			if (KSE_CAN_MIGRATE(ke)) {
1527 				kseq->ksq_transferable--;
1528 				kseq->ksq_group->ksg_transferable--;
1529 			}
1530 			if (KSE_CAN_MIGRATE(ke)) {
1531 				kseq->ksq_transferable++;
1532 				kseq->ksq_group->ksg_transferable++;
1533 			}
1534 		}
1535 #endif
1536 		if (oclass == PRI_TIMESHARE) {
1537 			kseq->ksq_load_timeshare--;
1538 			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1539 		}
1540 		if (nclass == PRI_TIMESHARE) {
1541 			kseq->ksq_load_timeshare++;
1542 			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1543 		}
1544 	}
1545 
1546 	kg->kg_pri_class = class;
1547 }
1548 
1549 /*
1550  * Return some of the child's priority and interactivity to the parent.
1551  */
1552 void
1553 sched_exit(struct proc *p, struct thread *childtd)
1554 {
1555 	mtx_assert(&sched_lock, MA_OWNED);
1556 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd);
1557 	sched_exit_thread(NULL, childtd);
1558 }
1559 
1560 void
1561 sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
1562 {
1563 	/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
1564 	kg->kg_runtime += td->td_ksegrp->kg_runtime;
1565 	sched_interact_update(kg);
1566 }
1567 
1568 void
1569 sched_exit_thread(struct thread *td, struct thread *childtd)
1570 {
1571 	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1572 	    childtd, childtd->td_proc->p_comm, childtd->td_priority);
1573 	kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse);
1574 }
1575 
1576 void
1577 sched_clock(struct thread *td)
1578 {
1579 	struct kseq *kseq;
1580 	struct ksegrp *kg;
1581 	struct kse *ke;
1582 
1583 	mtx_assert(&sched_lock, MA_OWNED);
1584 	kseq = KSEQ_SELF();
1585 #ifdef SMP
1586 	if (ticks >= bal_tick)
1587 		sched_balance();
1588 	if (ticks >= gbal_tick && balance_groups)
1589 		sched_balance_groups();
1590 	/*
1591 	 * We could have been assigned a non real-time thread without an
1592 	 * IPI.
1593 	 */
1594 	if (kseq->ksq_assigned)
1595 		kseq_assign(kseq);	/* Potentially sets NEEDRESCHED */
1596 #endif
1597 	/*
1598 	 * sched_setup() apparently happens prior to stathz being set.  We
1599 	 * need to resolve the timers earlier in the boot so we can avoid
1600 	 * calculating this here.
1601 	 */
1602 	if (realstathz == 0) {
1603 		realstathz = stathz ? stathz : hz;
1604 		tickincr = hz / realstathz;
1605 		/*
1606 		 * XXX This does not work for values of stathz that are much
1607 		 * larger than hz.
1608 		 */
1609 		if (tickincr == 0)
1610 			tickincr = 1;
1611 	}
1612 
1613 	ke = td->td_kse;
1614 	kg = ke->ke_ksegrp;
1615 
1616 	/* Adjust ticks for pctcpu */
1617 	ke->ke_ticks++;
1618 	ke->ke_ltick = ticks;
1619 
1620 	/* Go up to one second beyond our max and then trim back down */
1621 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1622 		sched_pctcpu_update(ke);
1623 
1624 	if (td->td_flags & TDF_IDLETD)
1625 		return;
1626 	/*
1627 	 * We only do slicing code for TIMESHARE ksegrps.
1628 	 */
1629 	if (kg->kg_pri_class != PRI_TIMESHARE)
1630 		return;
1631 	/*
1632 	 * We used a tick charge it to the ksegrp so that we can compute our
1633 	 * interactivity.
1634 	 */
1635 	kg->kg_runtime += tickincr << 10;
1636 	sched_interact_update(kg);
1637 
1638 	/*
1639 	 * We used up one time slice.
1640 	 */
1641 	if (--ke->ke_slice > 0)
1642 		return;
1643 	/*
1644 	 * We're out of time, recompute priorities and requeue.
1645 	 */
1646 	kseq_load_rem(kseq, ke);
1647 	sched_priority(kg);
1648 	sched_slice(ke);
1649 	if (SCHED_CURR(kg, ke))
1650 		ke->ke_runq = kseq->ksq_curr;
1651 	else
1652 		ke->ke_runq = kseq->ksq_next;
1653 	kseq_load_add(kseq, ke);
1654 	td->td_flags |= TDF_NEEDRESCHED;
1655 }
1656 
1657 int
1658 sched_runnable(void)
1659 {
1660 	struct kseq *kseq;
1661 	int load;
1662 
1663 	load = 1;
1664 
1665 	kseq = KSEQ_SELF();
1666 #ifdef SMP
1667 	if (kseq->ksq_assigned) {
1668 		mtx_lock_spin(&sched_lock);
1669 		kseq_assign(kseq);
1670 		mtx_unlock_spin(&sched_lock);
1671 	}
1672 #endif
1673 	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1674 		if (kseq->ksq_load > 0)
1675 			goto out;
1676 	} else
1677 		if (kseq->ksq_load - 1 > 0)
1678 			goto out;
1679 	load = 0;
1680 out:
1681 	return (load);
1682 }
1683 
1684 void
1685 sched_userret(struct thread *td)
1686 {
1687 	struct ksegrp *kg;
1688 
1689 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1690 	    ("thread with borrowed priority returning to userland"));
1691 	kg = td->td_ksegrp;
1692 	if (td->td_priority != kg->kg_user_pri) {
1693 		mtx_lock_spin(&sched_lock);
1694 		td->td_priority = kg->kg_user_pri;
1695 		td->td_base_pri = kg->kg_user_pri;
1696 		mtx_unlock_spin(&sched_lock);
1697 	}
1698 }
1699 
1700 struct kse *
1701 sched_choose(void)
1702 {
1703 	struct kseq *kseq;
1704 	struct kse *ke;
1705 
1706 	mtx_assert(&sched_lock, MA_OWNED);
1707 	kseq = KSEQ_SELF();
1708 #ifdef SMP
1709 restart:
1710 	if (kseq->ksq_assigned)
1711 		kseq_assign(kseq);
1712 #endif
1713 	ke = kseq_choose(kseq);
1714 	if (ke) {
1715 #ifdef SMP
1716 		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1717 			if (kseq_idled(kseq) == 0)
1718 				goto restart;
1719 #endif
1720 		kseq_runq_rem(kseq, ke);
1721 		ke->ke_state = KES_THREAD;
1722 		return (ke);
1723 	}
1724 #ifdef SMP
1725 	if (kseq_idled(kseq) == 0)
1726 		goto restart;
1727 #endif
1728 	return (NULL);
1729 }
1730 
1731 void
1732 sched_add(struct thread *td, int flags)
1733 {
1734 	struct kseq *kseq;
1735 	struct ksegrp *kg;
1736 	struct kse *ke;
1737 	int preemptive;
1738 	int canmigrate;
1739 	int class;
1740 
1741 	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1742 	    td, td->td_proc->p_comm, td->td_priority, curthread,
1743 	    curthread->td_proc->p_comm);
1744 	mtx_assert(&sched_lock, MA_OWNED);
1745 	ke = td->td_kse;
1746 	kg = td->td_ksegrp;
1747 	canmigrate = 1;
1748 	preemptive = !(flags & SRQ_YIELDING);
1749 	class = PRI_BASE(kg->kg_pri_class);
1750 	kseq = KSEQ_SELF();
1751 	if ((ke->ke_flags & KEF_INTERNAL) == 0)
1752 		SLOT_USE(td->td_ksegrp);
1753 	ke->ke_flags &= ~KEF_INTERNAL;
1754 #ifdef SMP
1755 	if (ke->ke_flags & KEF_ASSIGNED) {
1756 		if (ke->ke_flags & KEF_REMOVED)
1757 			ke->ke_flags &= ~KEF_REMOVED;
1758 		return;
1759 	}
1760 	canmigrate = KSE_CAN_MIGRATE(ke);
1761 #endif
1762 	KASSERT(ke->ke_state != KES_ONRUNQ,
1763 	    ("sched_add: kse %p (%s) already in run queue", ke,
1764 	    ke->ke_proc->p_comm));
1765 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1766 	    ("sched_add: process swapped out"));
1767 	KASSERT(ke->ke_runq == NULL,
1768 	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1769 	switch (class) {
1770 	case PRI_ITHD:
1771 	case PRI_REALTIME:
1772 		ke->ke_runq = kseq->ksq_curr;
1773 		ke->ke_slice = SCHED_SLICE_MAX;
1774 		if (canmigrate)
1775 			ke->ke_cpu = PCPU_GET(cpuid);
1776 		break;
1777 	case PRI_TIMESHARE:
1778 		if (SCHED_CURR(kg, ke))
1779 			ke->ke_runq = kseq->ksq_curr;
1780 		else
1781 			ke->ke_runq = kseq->ksq_next;
1782 		break;
1783 	case PRI_IDLE:
1784 		/*
1785 		 * This is for priority prop.
1786 		 */
1787 		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1788 			ke->ke_runq = kseq->ksq_curr;
1789 		else
1790 			ke->ke_runq = &kseq->ksq_idle;
1791 		ke->ke_slice = SCHED_SLICE_MIN;
1792 		break;
1793 	default:
1794 		panic("Unknown pri class.");
1795 		break;
1796 	}
1797 #ifdef SMP
1798 	/*
1799 	 * Don't migrate running threads here.  Force the long term balancer
1800 	 * to do it.
1801 	 */
1802 	if (ke->ke_flags & KEF_HOLD) {
1803 		ke->ke_flags &= ~KEF_HOLD;
1804 		canmigrate = 0;
1805 	}
1806 	/*
1807 	 * If this thread is pinned or bound, notify the target cpu.
1808 	 */
1809 	if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) {
1810 		ke->ke_runq = NULL;
1811 		kseq_notify(ke, ke->ke_cpu);
1812 		return;
1813 	}
1814 	/*
1815 	 * If we had been idle, clear our bit in the group and potentially
1816 	 * the global bitmap.  If not, see if we should transfer this thread.
1817 	 */
1818 	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1819 	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1820 		/*
1821 		 * Check to see if our group is unidling, and if so, remove it
1822 		 * from the global idle mask.
1823 		 */
1824 		if (kseq->ksq_group->ksg_idlemask ==
1825 		    kseq->ksq_group->ksg_cpumask)
1826 			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1827 		/*
1828 		 * Now remove ourselves from the group specific idle mask.
1829 		 */
1830 		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1831 	} else if (canmigrate && kseq->ksq_load > 1 && class != PRI_ITHD)
1832 		if (kseq_transfer(kseq, ke, class))
1833 			return;
1834 	ke->ke_cpu = PCPU_GET(cpuid);
1835 #endif
1836 	if (td->td_priority < curthread->td_priority &&
1837 	    ke->ke_runq == kseq->ksq_curr)
1838 		curthread->td_flags |= TDF_NEEDRESCHED;
1839 	if (preemptive && maybe_preempt(td))
1840 		return;
1841 	ke->ke_state = KES_ONRUNQ;
1842 
1843 	kseq_runq_add(kseq, ke, flags);
1844 	kseq_load_add(kseq, ke);
1845 }
1846 
1847 void
1848 sched_rem(struct thread *td)
1849 {
1850 	struct kseq *kseq;
1851 	struct kse *ke;
1852 
1853 	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1854 	    td, td->td_proc->p_comm, td->td_priority, curthread,
1855 	    curthread->td_proc->p_comm);
1856 	mtx_assert(&sched_lock, MA_OWNED);
1857 	ke = td->td_kse;
1858 	SLOT_RELEASE(td->td_ksegrp);
1859 	if (ke->ke_flags & KEF_ASSIGNED) {
1860 		ke->ke_flags |= KEF_REMOVED;
1861 		return;
1862 	}
1863 	KASSERT((ke->ke_state == KES_ONRUNQ),
1864 	    ("sched_rem: KSE not on run queue"));
1865 
1866 	ke->ke_state = KES_THREAD;
1867 	kseq = KSEQ_CPU(ke->ke_cpu);
1868 	kseq_runq_rem(kseq, ke);
1869 	kseq_load_rem(kseq, ke);
1870 }
1871 
1872 fixpt_t
1873 sched_pctcpu(struct thread *td)
1874 {
1875 	fixpt_t pctcpu;
1876 	struct kse *ke;
1877 
1878 	pctcpu = 0;
1879 	ke = td->td_kse;
1880 	if (ke == NULL)
1881 		return (0);
1882 
1883 	mtx_lock_spin(&sched_lock);
1884 	if (ke->ke_ticks) {
1885 		int rtick;
1886 
1887 		/*
1888 		 * Don't update more frequently than twice a second.  Allowing
1889 		 * this causes the cpu usage to decay away too quickly due to
1890 		 * rounding errors.
1891 		 */
1892 		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1893 		    ke->ke_ltick < (ticks - (hz / 2)))
1894 			sched_pctcpu_update(ke);
1895 		/* How many rtick per second ? */
1896 		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1897 		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1898 	}
1899 
1900 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1901 	mtx_unlock_spin(&sched_lock);
1902 
1903 	return (pctcpu);
1904 }
1905 
1906 void
1907 sched_bind(struct thread *td, int cpu)
1908 {
1909 	struct kse *ke;
1910 
1911 	mtx_assert(&sched_lock, MA_OWNED);
1912 	ke = td->td_kse;
1913 	ke->ke_flags |= KEF_BOUND;
1914 #ifdef SMP
1915 	if (PCPU_GET(cpuid) == cpu)
1916 		return;
1917 	/* sched_rem without the runq_remove */
1918 	ke->ke_state = KES_THREAD;
1919 	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1920 	kseq_notify(ke, cpu);
1921 	/* When we return from mi_switch we'll be on the correct cpu. */
1922 	mi_switch(SW_VOL, NULL);
1923 #endif
1924 }
1925 
1926 void
1927 sched_unbind(struct thread *td)
1928 {
1929 	mtx_assert(&sched_lock, MA_OWNED);
1930 	td->td_kse->ke_flags &= ~KEF_BOUND;
1931 }
1932 
1933 int
1934 sched_is_bound(struct thread *td)
1935 {
1936 	mtx_assert(&sched_lock, MA_OWNED);
1937 	return (td->td_kse->ke_flags & KEF_BOUND);
1938 }
1939 
1940 int
1941 sched_load(void)
1942 {
1943 #ifdef SMP
1944 	int total;
1945 	int i;
1946 
1947 	total = 0;
1948 	for (i = 0; i <= ksg_maxid; i++)
1949 		total += KSEQ_GROUP(i)->ksg_load;
1950 	return (total);
1951 #else
1952 	return (KSEQ_SELF()->ksq_sysload);
1953 #endif
1954 }
1955 
1956 int
1957 sched_sizeof_ksegrp(void)
1958 {
1959 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1960 }
1961 
1962 int
1963 sched_sizeof_proc(void)
1964 {
1965 	return (sizeof(struct proc));
1966 }
1967 
1968 int
1969 sched_sizeof_thread(void)
1970 {
1971 	return (sizeof(struct thread) + sizeof(struct td_sched));
1972 }
1973 #define KERN_SWITCH_INCLUDE 1
1974 #include "kern/kern_switch.c"
1975