xref: /freebsd/sys/kern/sched_4bsd.c (revision 181fd12f6c811dcbea933624cf738c04e97a394b)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/lock.h>
43 #include <sys/kthread.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 #include <sys/sysctl.h>
50 #include <sys/sx.h>
51 
52 /*
53  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
54  * the range 100-256 Hz (approximately).
55  */
56 #define	ESTCPULIM(e) \
57     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
58     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
59 #ifdef SMP
60 #define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
61 #else
62 #define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
63 #endif
64 #define	NICE_WEIGHT		1	/* Priorities per nice level. */
65 
66 struct ke_sched {
67 	int		ske_cpticks;	/* (j) Ticks of cpu time. */
68 	struct runq	*ske_runq;	/* runq the kse is currently on */
69 };
70 #define ke_runq 	ke_sched->ske_runq
71 #define KEF_BOUND	KEF_SCHED1
72 
73 #define SKE_RUNQ_PCPU(ke)						\
74     ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq)
75 
76 /*
77  * KSE_CAN_MIGRATE macro returns true if the kse can migrate between
78  * cpus.
79  */
80 #define KSE_CAN_MIGRATE(ke)						\
81     ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
82 static struct ke_sched ke_sched;
83 
84 struct ke_sched *kse0_sched = &ke_sched;
85 struct kg_sched *ksegrp0_sched = NULL;
86 struct p_sched *proc0_sched = NULL;
87 struct td_sched *thread0_sched = NULL;
88 
89 static int	sched_tdcnt;	/* Total runnable threads in the system. */
90 static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
91 #define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
92 
93 static struct callout roundrobin_callout;
94 
95 static void	setup_runqs(void);
96 static void	roundrobin(void *arg);
97 static void	schedcpu(void);
98 static void	schedcpu_thread(void);
99 static void	sched_setup(void *dummy);
100 static void	maybe_resched(struct thread *td);
101 static void	updatepri(struct ksegrp *kg);
102 static void	resetpriority(struct ksegrp *kg);
103 
104 static struct kproc_desc sched_kp = {
105         "schedcpu",
106         schedcpu_thread,
107         NULL
108 };
109 SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp)
110 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
111 
112 /*
113  * Global run queue.
114  */
115 static struct runq runq;
116 
117 #ifdef SMP
118 /*
119  * Per-CPU run queues
120  */
121 static struct runq runq_pcpu[MAXCPU];
122 #endif
123 
124 static void
125 setup_runqs(void)
126 {
127 #ifdef SMP
128 	int i;
129 
130 	for (i = 0; i < MAXCPU; ++i)
131 		runq_init(&runq_pcpu[i]);
132 #endif
133 
134 	runq_init(&runq);
135 }
136 
137 static int
138 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
139 {
140 	int error, new_val;
141 
142 	new_val = sched_quantum * tick;
143 	error = sysctl_handle_int(oidp, &new_val, 0, req);
144         if (error != 0 || req->newptr == NULL)
145 		return (error);
146 	if (new_val < tick)
147 		return (EINVAL);
148 	sched_quantum = new_val / tick;
149 	hogticks = 2 * sched_quantum;
150 	return (0);
151 }
152 
153 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
154 
155 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
156     "Scheduler name");
157 
158 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
159     0, sizeof sched_quantum, sysctl_kern_quantum, "I",
160     "Roundrobin scheduling quantum in microseconds");
161 
162 /*
163  * Arrange to reschedule if necessary, taking the priorities and
164  * schedulers into account.
165  */
166 static void
167 maybe_resched(struct thread *td)
168 {
169 
170 	mtx_assert(&sched_lock, MA_OWNED);
171 	if (td->td_priority < curthread->td_priority && curthread->td_kse)
172 		curthread->td_flags |= TDF_NEEDRESCHED;
173 }
174 
175 /*
176  * Force switch among equal priority processes every 100ms.
177  * We don't actually need to force a context switch of the current process.
178  * The act of firing the event triggers a context switch to softclock() and
179  * then switching back out again which is equivalent to a preemption, thus
180  * no further work is needed on the local CPU.
181  */
182 /* ARGSUSED */
183 static void
184 roundrobin(void *arg)
185 {
186 
187 #ifdef SMP
188 	mtx_lock_spin(&sched_lock);
189 	forward_roundrobin();
190 	mtx_unlock_spin(&sched_lock);
191 #endif
192 
193 	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
194 }
195 
196 /*
197  * Constants for digital decay and forget:
198  *	90% of (kg_estcpu) usage in 5 * loadav time
199  *	95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
200  *          Note that, as ps(1) mentions, this can let percentages
201  *          total over 100% (I've seen 137.9% for 3 processes).
202  *
203  * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
204  *
205  * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
206  * That is, the system wants to compute a value of decay such
207  * that the following for loop:
208  * 	for (i = 0; i < (5 * loadavg); i++)
209  * 		kg_estcpu *= decay;
210  * will compute
211  * 	kg_estcpu *= 0.1;
212  * for all values of loadavg:
213  *
214  * Mathematically this loop can be expressed by saying:
215  * 	decay ** (5 * loadavg) ~= .1
216  *
217  * The system computes decay as:
218  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
219  *
220  * We wish to prove that the system's computation of decay
221  * will always fulfill the equation:
222  * 	decay ** (5 * loadavg) ~= .1
223  *
224  * If we compute b as:
225  * 	b = 2 * loadavg
226  * then
227  * 	decay = b / (b + 1)
228  *
229  * We now need to prove two things:
230  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
231  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
232  *
233  * Facts:
234  *         For x close to zero, exp(x) =~ 1 + x, since
235  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
236  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
237  *         For x close to zero, ln(1+x) =~ x, since
238  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
239  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
240  *         ln(.1) =~ -2.30
241  *
242  * Proof of (1):
243  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
244  *	solving for factor,
245  *      ln(factor) =~ (-2.30/5*loadav), or
246  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
247  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
248  *
249  * Proof of (2):
250  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
251  *	solving for power,
252  *      power*ln(b/(b+1)) =~ -2.30, or
253  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
254  *
255  * Actual power values for the implemented algorithm are as follows:
256  *      loadav: 1       2       3       4
257  *      power:  5.68    10.32   14.94   19.55
258  */
259 
260 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
261 #define	loadfactor(loadav)	(2 * (loadav))
262 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
263 
264 /* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
265 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
266 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
267 
268 /*
269  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
270  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
271  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
272  *
273  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
274  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
275  *
276  * If you don't want to bother with the faster/more-accurate formula, you
277  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
278  * (more general) method of calculating the %age of CPU used by a process.
279  */
280 #define	CCPU_SHIFT	11
281 
282 /*
283  * Recompute process priorities, every hz ticks.
284  * MP-safe, called without the Giant mutex.
285  */
286 /* ARGSUSED */
287 static void
288 schedcpu(void)
289 {
290 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
291 	struct thread *td;
292 	struct proc *p;
293 	struct kse *ke;
294 	struct ksegrp *kg;
295 	int awake, realstathz;
296 
297 	realstathz = stathz ? stathz : hz;
298 	sx_slock(&allproc_lock);
299 	FOREACH_PROC_IN_SYSTEM(p) {
300 		/*
301 		 * Prevent state changes and protect run queue.
302 		 */
303 		mtx_lock_spin(&sched_lock);
304 		/*
305 		 * Increment time in/out of memory.  We ignore overflow; with
306 		 * 16-bit int's (remember them?) overflow takes 45 days.
307 		 */
308 		p->p_swtime++;
309 		FOREACH_KSEGRP_IN_PROC(p, kg) {
310 			awake = 0;
311 			FOREACH_KSE_IN_GROUP(kg, ke) {
312 				/*
313 				 * Increment sleep time (if sleeping).  We
314 				 * ignore overflow, as above.
315 				 */
316 				/*
317 				 * The kse slptimes are not touched in wakeup
318 				 * because the thread may not HAVE a KSE.
319 				 */
320 				if (ke->ke_state == KES_ONRUNQ) {
321 					awake = 1;
322 					ke->ke_flags &= ~KEF_DIDRUN;
323 				} else if ((ke->ke_state == KES_THREAD) &&
324 				    (TD_IS_RUNNING(ke->ke_thread))) {
325 					awake = 1;
326 					/* Do not clear KEF_DIDRUN */
327 				} else if (ke->ke_flags & KEF_DIDRUN) {
328 					awake = 1;
329 					ke->ke_flags &= ~KEF_DIDRUN;
330 				}
331 
332 				/*
333 				 * ke_pctcpu is only for ps and ttyinfo().
334 				 * Do it per kse, and add them up at the end?
335 				 * XXXKSE
336 				 */
337 				ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
338 				    FSHIFT;
339 				/*
340 				 * If the kse has been idle the entire second,
341 				 * stop recalculating its priority until
342 				 * it wakes up.
343 				 */
344 				if (ke->ke_sched->ske_cpticks == 0)
345 					continue;
346 #if	(FSHIFT >= CCPU_SHIFT)
347 				ke->ke_pctcpu += (realstathz == 100)
348 				    ? ((fixpt_t) ke->ke_sched->ske_cpticks) <<
349 				    (FSHIFT - CCPU_SHIFT) :
350 				    100 * (((fixpt_t) ke->ke_sched->ske_cpticks)
351 				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
352 #else
353 				ke->ke_pctcpu += ((FSCALE - ccpu) *
354 				    (ke->ke_sched->ske_cpticks *
355 				    FSCALE / realstathz)) >> FSHIFT;
356 #endif
357 				ke->ke_sched->ske_cpticks = 0;
358 			} /* end of kse loop */
359 			/*
360 			 * If there are ANY running threads in this KSEGRP,
361 			 * then don't count it as sleeping.
362 			 */
363 			if (awake) {
364 				if (kg->kg_slptime > 1) {
365 					/*
366 					 * In an ideal world, this should not
367 					 * happen, because whoever woke us
368 					 * up from the long sleep should have
369 					 * unwound the slptime and reset our
370 					 * priority before we run at the stale
371 					 * priority.  Should KASSERT at some
372 					 * point when all the cases are fixed.
373 					 */
374 					updatepri(kg);
375 				}
376 				kg->kg_slptime = 0;
377 			} else
378 				kg->kg_slptime++;
379 			if (kg->kg_slptime > 1)
380 				continue;
381 			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
382 		      	resetpriority(kg);
383 			FOREACH_THREAD_IN_GROUP(kg, td) {
384 				if (td->td_priority >= PUSER) {
385 					sched_prio(td, kg->kg_user_pri);
386 				}
387 			}
388 		} /* end of ksegrp loop */
389 		mtx_unlock_spin(&sched_lock);
390 	} /* end of process loop */
391 	sx_sunlock(&allproc_lock);
392 }
393 
394 /*
395  * Main loop for a kthread that executes schedcpu once a second.
396  */
397 static void
398 schedcpu_thread(void)
399 {
400 	int nowake;
401 
402 	for (;;) {
403 		schedcpu();
404 		tsleep(&nowake, curthread->td_priority, "-", hz);
405 	}
406 }
407 
408 /*
409  * Recalculate the priority of a process after it has slept for a while.
410  * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
411  * least six times the loadfactor will decay kg_estcpu to zero.
412  */
413 static void
414 updatepri(struct ksegrp *kg)
415 {
416 	register fixpt_t loadfac;
417 	register unsigned int newcpu;
418 
419 	loadfac = loadfactor(averunnable.ldavg[0]);
420 	if (kg->kg_slptime > 5 * loadfac)
421 		kg->kg_estcpu = 0;
422 	else {
423 		newcpu = kg->kg_estcpu;
424 		kg->kg_slptime--;	/* was incremented in schedcpu() */
425 		while (newcpu && --kg->kg_slptime)
426 			newcpu = decay_cpu(loadfac, newcpu);
427 		kg->kg_estcpu = newcpu;
428 	}
429 	resetpriority(kg);
430 }
431 
432 /*
433  * Compute the priority of a process when running in user mode.
434  * Arrange to reschedule if the resulting priority is better
435  * than that of the current process.
436  */
437 static void
438 resetpriority(struct ksegrp *kg)
439 {
440 	register unsigned int newpriority;
441 	struct thread *td;
442 
443 	if (kg->kg_pri_class == PRI_TIMESHARE) {
444 		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
445 		    NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN);
446 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
447 		    PRI_MAX_TIMESHARE);
448 		kg->kg_user_pri = newpriority;
449 	}
450 	FOREACH_THREAD_IN_GROUP(kg, td) {
451 		maybe_resched(td);			/* XXXKSE silly */
452 	}
453 }
454 
455 /* ARGSUSED */
456 static void
457 sched_setup(void *dummy)
458 {
459 	setup_runqs();
460 
461 	if (sched_quantum == 0)
462 		sched_quantum = SCHED_QUANTUM;
463 	hogticks = 2 * sched_quantum;
464 
465 	callout_init(&roundrobin_callout, CALLOUT_MPSAFE);
466 
467 	/* Kick off timeout driven events by calling first time. */
468 	roundrobin(NULL);
469 
470 	/* Account for thread0. */
471 	sched_tdcnt++;
472 }
473 
474 /* External interfaces start here */
475 int
476 sched_runnable(void)
477 {
478 #ifdef SMP
479 	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
480 #else
481 	return runq_check(&runq);
482 #endif
483 }
484 
485 int
486 sched_rr_interval(void)
487 {
488 	if (sched_quantum == 0)
489 		sched_quantum = SCHED_QUANTUM;
490 	return (sched_quantum);
491 }
492 
493 /*
494  * We adjust the priority of the current process.  The priority of
495  * a process gets worse as it accumulates CPU time.  The cpu usage
496  * estimator (kg_estcpu) is increased here.  resetpriority() will
497  * compute a different priority each time kg_estcpu increases by
498  * INVERSE_ESTCPU_WEIGHT
499  * (until MAXPRI is reached).  The cpu usage estimator ramps up
500  * quite quickly when the process is running (linearly), and decays
501  * away exponentially, at a rate which is proportionally slower when
502  * the system is busy.  The basic principle is that the system will
503  * 90% forget that the process used a lot of CPU time in 5 * loadav
504  * seconds.  This causes the system to favor processes which haven't
505  * run much recently, and to round-robin among other processes.
506  */
507 void
508 sched_clock(struct thread *td)
509 {
510 	struct ksegrp *kg;
511 	struct kse *ke;
512 
513 	mtx_assert(&sched_lock, MA_OWNED);
514 	kg = td->td_ksegrp;
515 	ke = td->td_kse;
516 
517 	ke->ke_sched->ske_cpticks++;
518 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
519 	if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
520 		resetpriority(kg);
521 		if (td->td_priority >= PUSER)
522 			td->td_priority = kg->kg_user_pri;
523 	}
524 }
525 
526 /*
527  * charge childs scheduling cpu usage to parent.
528  *
529  * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
530  * Charge it to the ksegrp that did the wait since process estcpu is sum of
531  * all ksegrps, this is strictly as expected.  Assume that the child process
532  * aggregated all the estcpu into the 'built-in' ksegrp.
533  */
534 void
535 sched_exit(struct proc *p, struct thread *td)
536 {
537 	sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
538 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
539 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
540 }
541 
542 void
543 sched_exit_kse(struct kse *ke, struct thread *child)
544 {
545 }
546 
547 void
548 sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd)
549 {
550 
551 	mtx_assert(&sched_lock, MA_OWNED);
552 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu);
553 }
554 
555 void
556 sched_exit_thread(struct thread *td, struct thread *child)
557 {
558 	if ((child->td_proc->p_flag & P_NOLOAD) == 0)
559 		sched_tdcnt--;
560 }
561 
562 void
563 sched_fork(struct thread *td, struct proc *p1)
564 {
565 	sched_fork_kse(td, FIRST_KSE_IN_PROC(p1));
566 	sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1));
567 	sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1));
568 }
569 
570 void
571 sched_fork_kse(struct thread *td, struct kse *child)
572 {
573 	child->ke_sched->ske_cpticks = 0;
574 }
575 
576 void
577 sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
578 {
579 	mtx_assert(&sched_lock, MA_OWNED);
580 	child->kg_estcpu = td->td_ksegrp->kg_estcpu;
581 }
582 
583 void
584 sched_fork_thread(struct thread *td, struct thread *child)
585 {
586 }
587 
588 void
589 sched_nice(struct proc *p, int nice)
590 {
591 	struct ksegrp *kg;
592 
593 	PROC_LOCK_ASSERT(p, MA_OWNED);
594 	mtx_assert(&sched_lock, MA_OWNED);
595 	p->p_nice = nice;
596 	FOREACH_KSEGRP_IN_PROC(p, kg) {
597 		resetpriority(kg);
598 	}
599 }
600 
601 void
602 sched_class(struct ksegrp *kg, int class)
603 {
604 	mtx_assert(&sched_lock, MA_OWNED);
605 	kg->kg_pri_class = class;
606 }
607 
608 /*
609  * Adjust the priority of a thread.
610  * This may include moving the thread within the KSEGRP,
611  * changing the assignment of a kse to the thread,
612  * and moving a KSE in the system run queue.
613  */
614 void
615 sched_prio(struct thread *td, u_char prio)
616 {
617 
618 	mtx_assert(&sched_lock, MA_OWNED);
619 	if (TD_ON_RUNQ(td)) {
620 		adjustrunqueue(td, prio);
621 	} else {
622 		td->td_priority = prio;
623 	}
624 }
625 
626 void
627 sched_sleep(struct thread *td)
628 {
629 
630 	mtx_assert(&sched_lock, MA_OWNED);
631 	td->td_ksegrp->kg_slptime = 0;
632 	td->td_base_pri = td->td_priority;
633 }
634 
635 void
636 sched_switch(struct thread *td, struct thread *newtd)
637 {
638 	struct kse *ke;
639 	struct proc *p;
640 
641 	ke = td->td_kse;
642 	p = td->td_proc;
643 
644 	mtx_assert(&sched_lock, MA_OWNED);
645 	KASSERT((ke->ke_state == KES_THREAD), ("sched_switch: kse state?"));
646 
647 	if ((p->p_flag & P_NOLOAD) == 0)
648 		sched_tdcnt--;
649 	if (newtd != NULL && (newtd->td_proc->p_flag & P_NOLOAD) == 0)
650 		sched_tdcnt++;
651 	td->td_lastcpu = td->td_oncpu;
652 	td->td_last_kse = ke;
653 	td->td_flags &= ~TDF_NEEDRESCHED;
654 	td->td_pflags &= ~TDP_OWEPREEMPT;
655 	td->td_oncpu = NOCPU;
656 	/*
657 	 * At the last moment, if this thread is still marked RUNNING,
658 	 * then put it back on the run queue as it has not been suspended
659 	 * or stopped or any thing else similar.  We never put the idle
660 	 * threads on the run queue, however.
661 	 */
662 	if (td == PCPU_GET(idlethread))
663 		TD_SET_CAN_RUN(td);
664 	else if (TD_IS_RUNNING(td)) {
665 		/* Put us back on the run queue (kse and all). */
666 		setrunqueue(td);
667 	} else if (p->p_flag & P_SA) {
668 		/*
669 		 * We will not be on the run queue. So we must be
670 		 * sleeping or similar. As it's available,
671 		 * someone else can use the KSE if they need it.
672 		 */
673 		kse_reassign(ke);
674 	}
675 	if (newtd == NULL)
676 		newtd = choosethread();
677 	if (td != newtd)
678 		cpu_switch(td, newtd);
679 	sched_lock.mtx_lock = (uintptr_t)td;
680 	td->td_oncpu = PCPU_GET(cpuid);
681 }
682 
683 void
684 sched_wakeup(struct thread *td)
685 {
686 	struct ksegrp *kg;
687 
688 	mtx_assert(&sched_lock, MA_OWNED);
689 	kg = td->td_ksegrp;
690 	if (kg->kg_slptime > 1)
691 		updatepri(kg);
692 	kg->kg_slptime = 0;
693 	setrunqueue(td);
694 }
695 
696 void
697 sched_add(struct thread *td)
698 {
699 	struct kse *ke;
700 
701 	ke = td->td_kse;
702 	mtx_assert(&sched_lock, MA_OWNED);
703 	KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
704 	KASSERT((ke->ke_thread->td_kse != NULL),
705 	    ("sched_add: No KSE on thread"));
706 	KASSERT(ke->ke_state != KES_ONRUNQ,
707 	    ("sched_add: kse %p (%s) already in run queue", ke,
708 	    ke->ke_proc->p_comm));
709 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
710 	    ("sched_add: process swapped out"));
711 
712 #ifdef SMP
713 	/*
714 	 * Only try to preempt if the thread is unpinned or pinned to the
715 	 * current CPU.
716 	 */
717 	if (KSE_CAN_MIGRATE(ke) || ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)])
718 #endif
719 	if (maybe_preempt(td))
720 		return;
721 
722 #ifdef SMP
723 	if (KSE_CAN_MIGRATE(ke)) {
724 		CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
725 		ke->ke_runq = &runq;
726 	} else {
727 		CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p)to pcpu runq", ke, td);
728 		if (!SKE_RUNQ_PCPU(ke))
729 			ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)];
730 	}
731 #else
732 	CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
733 	ke->ke_runq = &runq;
734 #endif
735 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
736 		sched_tdcnt++;
737 	runq_add(ke->ke_runq, ke);
738 	ke->ke_ksegrp->kg_runq_kses++;
739 	ke->ke_state = KES_ONRUNQ;
740 	maybe_resched(td);
741 }
742 
743 void
744 sched_rem(struct thread *td)
745 {
746 	struct kse *ke;
747 
748 	ke = td->td_kse;
749 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
750 	    ("sched_rem: process swapped out"));
751 	KASSERT((ke->ke_state == KES_ONRUNQ),
752 	    ("sched_rem: KSE not on run queue"));
753 	mtx_assert(&sched_lock, MA_OWNED);
754 
755 	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
756 		sched_tdcnt--;
757 	runq_remove(ke->ke_sched->ske_runq, ke);
758 
759 	ke->ke_state = KES_THREAD;
760 	ke->ke_ksegrp->kg_runq_kses--;
761 }
762 
763 struct kse *
764 sched_choose(void)
765 {
766 	struct kse *ke;
767 	struct runq *rq;
768 
769 #ifdef SMP
770 	struct kse *kecpu;
771 
772 	rq = &runq;
773 	ke = runq_choose(&runq);
774 	kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
775 
776 	if (ke == NULL ||
777 	    (kecpu != NULL &&
778 	     kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
779 		CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu,
780 		     PCPU_GET(cpuid));
781 		ke = kecpu;
782 		rq = &runq_pcpu[PCPU_GET(cpuid)];
783 	} else {
784 		CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke);
785 	}
786 
787 #else
788 	rq = &runq;
789 	ke = runq_choose(&runq);
790 #endif
791 
792 	if (ke != NULL) {
793 		runq_remove(rq, ke);
794 		ke->ke_state = KES_THREAD;
795 		ke->ke_ksegrp->kg_runq_kses--;
796 
797 		KASSERT((ke->ke_thread != NULL),
798 		    ("sched_choose: No thread on KSE"));
799 		KASSERT((ke->ke_thread->td_kse != NULL),
800 		    ("sched_choose: No KSE on thread"));
801 		KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
802 		    ("sched_choose: process swapped out"));
803 	}
804 	return (ke);
805 }
806 
807 void
808 sched_userret(struct thread *td)
809 {
810 	struct ksegrp *kg;
811 	/*
812 	 * XXX we cheat slightly on the locking here to avoid locking in
813 	 * the usual case.  Setting td_priority here is essentially an
814 	 * incomplete workaround for not setting it properly elsewhere.
815 	 * Now that some interrupt handlers are threads, not setting it
816 	 * properly elsewhere can clobber it in the window between setting
817 	 * it here and returning to user mode, so don't waste time setting
818 	 * it perfectly here.
819 	 */
820 	kg = td->td_ksegrp;
821 	if (td->td_priority != kg->kg_user_pri) {
822 		mtx_lock_spin(&sched_lock);
823 		td->td_priority = kg->kg_user_pri;
824 		mtx_unlock_spin(&sched_lock);
825 	}
826 }
827 
828 void
829 sched_bind(struct thread *td, int cpu)
830 {
831 	struct kse *ke;
832 
833 	mtx_assert(&sched_lock, MA_OWNED);
834 	KASSERT(TD_IS_RUNNING(td),
835 	    ("sched_bind: cannot bind non-running thread"));
836 
837 	ke = td->td_kse;
838 
839 	ke->ke_flags |= KEF_BOUND;
840 #ifdef SMP
841 	ke->ke_runq = &runq_pcpu[cpu];
842 	if (PCPU_GET(cpuid) == cpu)
843 		return;
844 
845 	ke->ke_state = KES_THREAD;
846 
847 	mi_switch(SW_VOL, NULL);
848 #endif
849 }
850 
851 void
852 sched_unbind(struct thread* td)
853 {
854 	mtx_assert(&sched_lock, MA_OWNED);
855 	td->td_kse->ke_flags &= ~KEF_BOUND;
856 }
857 
858 int
859 sched_load(void)
860 {
861 	return (sched_tdcnt);
862 }
863 
864 int
865 sched_sizeof_kse(void)
866 {
867 	return (sizeof(struct kse) + sizeof(struct ke_sched));
868 }
869 int
870 sched_sizeof_ksegrp(void)
871 {
872 	return (sizeof(struct ksegrp));
873 }
874 int
875 sched_sizeof_proc(void)
876 {
877 	return (sizeof(struct proc));
878 }
879 int
880 sched_sizeof_thread(void)
881 {
882 	return (sizeof(struct thread));
883 }
884 
885 fixpt_t
886 sched_pctcpu(struct thread *td)
887 {
888 	struct kse *ke;
889 
890 	ke = td->td_kse;
891 	if (ke == NULL)
892 		ke = td->td_last_kse;
893 	if (ke)
894 		return (ke->ke_pctcpu);
895 
896 	return (0);
897 }
898