xref: /freebsd/sys/kern/kern_switch.c (revision 53071ed1c96db7f89defc99c95b0ad1031d48f45)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_sched.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kdb.h>
37 #include <sys/kernel.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/queue.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/sysctl.h>
46 
47 #include <machine/cpu.h>
48 
49 /* Uncomment this to enable logging of critical_enter/exit. */
50 #if 0
51 #define	KTR_CRITICAL	KTR_SCHED
52 #else
53 #define	KTR_CRITICAL	0
54 #endif
55 
56 #ifdef FULL_PREEMPTION
57 #ifndef PREEMPTION
58 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
59 #endif
60 #endif
61 
62 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
63 
64 /*
65  * kern.sched.preemption allows user space to determine if preemption support
66  * is compiled in or not.  It is not currently a boot or runtime flag that
67  * can be changed.
68  */
69 #ifdef PREEMPTION
70 static int kern_sched_preemption = 1;
71 #else
72 static int kern_sched_preemption = 0;
73 #endif
74 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
75     &kern_sched_preemption, 0, "Kernel preemption enabled");
76 
77 /*
78  * Support for scheduler stats exported via kern.sched.stats.  All stats may
79  * be reset with kern.sched.stats.reset = 1.  Stats may be defined elsewhere
80  * with SCHED_STAT_DEFINE().
81  */
82 #ifdef SCHED_STATS
83 SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
84 
85 /* Switch reasons from mi_switch(). */
86 DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]);
87 SCHED_STAT_DEFINE_VAR(uncategorized,
88     &DPCPU_NAME(sched_switch_stats[SWT_NONE]), "");
89 SCHED_STAT_DEFINE_VAR(preempt,
90     &DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), "");
91 SCHED_STAT_DEFINE_VAR(owepreempt,
92     &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), "");
93 SCHED_STAT_DEFINE_VAR(turnstile,
94     &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), "");
95 SCHED_STAT_DEFINE_VAR(sleepq,
96     &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), "");
97 SCHED_STAT_DEFINE_VAR(sleepqtimo,
98     &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), "");
99 SCHED_STAT_DEFINE_VAR(relinquish,
100     &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), "");
101 SCHED_STAT_DEFINE_VAR(needresched,
102     &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), "");
103 SCHED_STAT_DEFINE_VAR(idle,
104     &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), "");
105 SCHED_STAT_DEFINE_VAR(iwait,
106     &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), "");
107 SCHED_STAT_DEFINE_VAR(suspend,
108     &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), "");
109 SCHED_STAT_DEFINE_VAR(remotepreempt,
110     &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), "");
111 SCHED_STAT_DEFINE_VAR(remotewakeidle,
112     &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), "");
113 
114 static int
115 sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
116 {
117 	struct sysctl_oid *p;
118 	uintptr_t counter;
119         int error;
120 	int val;
121 	int i;
122 
123         val = 0;
124         error = sysctl_handle_int(oidp, &val, 0, req);
125         if (error != 0 || req->newptr == NULL)
126                 return (error);
127         if (val == 0)
128                 return (0);
129 	/*
130 	 * Traverse the list of children of _kern_sched_stats and reset each
131 	 * to 0.  Skip the reset entry.
132 	 */
133 	SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
134 		if (p == oidp || p->oid_arg1 == NULL)
135 			continue;
136 		counter = (uintptr_t)p->oid_arg1;
137 		CPU_FOREACH(i) {
138 			*(long *)(dpcpu_off[i] + counter) = 0;
139 		}
140 	}
141 	return (0);
142 }
143 
144 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
145     0, sysctl_stats_reset, "I", "Reset scheduler statistics");
146 #endif
147 
148 /************************************************************************
149  * Functions that manipulate runnability from a thread perspective.	*
150  ************************************************************************/
151 /*
152  * Select the thread that will be run next.
153  */
154 
155 static __noinline struct thread *
156 choosethread_panic(struct thread *td)
157 {
158 
159 	/*
160 	 * If we are in panic, only allow system threads,
161 	 * plus the one we are running in, to be run.
162 	 */
163 retry:
164 	if (((td->td_proc->p_flag & P_SYSTEM) == 0 &&
165 	    (td->td_flags & TDF_INPANIC) == 0)) {
166 		/* note that it is no longer on the run queue */
167 		TD_SET_CAN_RUN(td);
168 		td = sched_choose();
169 		goto retry;
170 	}
171 
172 	TD_SET_RUNNING(td);
173 	return (td);
174 }
175 
176 struct thread *
177 choosethread(void)
178 {
179 	struct thread *td;
180 
181 	td = sched_choose();
182 
183 	if (KERNEL_PANICKED())
184 		return (choosethread_panic(td));
185 
186 	TD_SET_RUNNING(td);
187 	return (td);
188 }
189 
190 /*
191  * Kernel thread preemption implementation.  Critical sections mark
192  * regions of code in which preemptions are not allowed.
193  *
194  * It might seem a good idea to inline critical_enter() but, in order
195  * to prevent instructions reordering by the compiler, a __compiler_membar()
196  * would have to be used here (the same as sched_pin()).  The performance
197  * penalty imposed by the membar could, then, produce slower code than
198  * the function call itself, for most cases.
199  */
200 void
201 critical_enter_KBI(void)
202 {
203 #ifdef KTR
204 	struct thread *td = curthread;
205 #endif
206 	critical_enter();
207 	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
208 	    (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
209 }
210 
211 void __noinline
212 critical_exit_preempt(void)
213 {
214 	struct thread *td;
215 	int flags;
216 
217 	/*
218 	 * If td_critnest is 0, it is possible that we are going to get
219 	 * preempted again before reaching the code below. This happens
220 	 * rarely and is harmless. However, this means td_owepreempt may
221 	 * now be unset.
222 	 */
223 	td = curthread;
224 	if (td->td_critnest != 0)
225 		return;
226 	if (kdb_active)
227 		return;
228 
229 	/*
230 	 * Microoptimization: we committed to switch,
231 	 * disable preemption in interrupt handlers
232 	 * while spinning for the thread lock.
233 	 */
234 	td->td_critnest = 1;
235 	thread_lock(td);
236 	td->td_critnest--;
237 	flags = SW_INVOL | SW_PREEMPT;
238 	if (TD_IS_IDLETHREAD(td))
239 		flags |= SWT_IDLE;
240 	else
241 		flags |= SWT_OWEPREEMPT;
242 	mi_switch(flags);
243 }
244 
245 void
246 critical_exit_KBI(void)
247 {
248 #ifdef KTR
249 	struct thread *td = curthread;
250 #endif
251 	critical_exit();
252 	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
253 	    (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
254 }
255 
256 /************************************************************************
257  * SYSTEM RUN QUEUE manipulations and tests				*
258  ************************************************************************/
259 /*
260  * Initialize a run structure.
261  */
262 void
263 runq_init(struct runq *rq)
264 {
265 	int i;
266 
267 	bzero(rq, sizeof *rq);
268 	for (i = 0; i < RQ_NQS; i++)
269 		TAILQ_INIT(&rq->rq_queues[i]);
270 }
271 
272 /*
273  * Clear the status bit of the queue corresponding to priority level pri,
274  * indicating that it is empty.
275  */
276 static __inline void
277 runq_clrbit(struct runq *rq, int pri)
278 {
279 	struct rqbits *rqb;
280 
281 	rqb = &rq->rq_status;
282 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
283 	    rqb->rqb_bits[RQB_WORD(pri)],
284 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
285 	    RQB_BIT(pri), RQB_WORD(pri));
286 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
287 }
288 
289 /*
290  * Find the index of the first non-empty run queue.  This is done by
291  * scanning the status bits, a set bit indicates a non-empty queue.
292  */
293 static __inline int
294 runq_findbit(struct runq *rq)
295 {
296 	struct rqbits *rqb;
297 	int pri;
298 	int i;
299 
300 	rqb = &rq->rq_status;
301 	for (i = 0; i < RQB_LEN; i++)
302 		if (rqb->rqb_bits[i]) {
303 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
304 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
305 			    rqb->rqb_bits[i], i, pri);
306 			return (pri);
307 		}
308 
309 	return (-1);
310 }
311 
312 static __inline int
313 runq_findbit_from(struct runq *rq, u_char pri)
314 {
315 	struct rqbits *rqb;
316 	rqb_word_t mask;
317 	int i;
318 
319 	/*
320 	 * Set the mask for the first word so we ignore priorities before 'pri'.
321 	 */
322 	mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
323 	rqb = &rq->rq_status;
324 again:
325 	for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
326 		mask = rqb->rqb_bits[i] & mask;
327 		if (mask == 0)
328 			continue;
329 		pri = RQB_FFS(mask) + (i << RQB_L2BPW);
330 		CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
331 		    mask, i, pri);
332 		return (pri);
333 	}
334 	if (pri == 0)
335 		return (-1);
336 	/*
337 	 * Wrap back around to the beginning of the list just once so we
338 	 * scan the whole thing.
339 	 */
340 	pri = 0;
341 	goto again;
342 }
343 
344 /*
345  * Set the status bit of the queue corresponding to priority level pri,
346  * indicating that it is non-empty.
347  */
348 static __inline void
349 runq_setbit(struct runq *rq, int pri)
350 {
351 	struct rqbits *rqb;
352 
353 	rqb = &rq->rq_status;
354 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
355 	    rqb->rqb_bits[RQB_WORD(pri)],
356 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
357 	    RQB_BIT(pri), RQB_WORD(pri));
358 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
359 }
360 
361 /*
362  * Add the thread to the queue specified by its priority, and set the
363  * corresponding status bit.
364  */
365 void
366 runq_add(struct runq *rq, struct thread *td, int flags)
367 {
368 	struct rqhead *rqh;
369 	int pri;
370 
371 	pri = td->td_priority / RQ_PPQ;
372 	td->td_rqindex = pri;
373 	runq_setbit(rq, pri);
374 	rqh = &rq->rq_queues[pri];
375 	CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
376 	    td, td->td_priority, pri, rqh);
377 	if (flags & SRQ_PREEMPTED) {
378 		TAILQ_INSERT_HEAD(rqh, td, td_runq);
379 	} else {
380 		TAILQ_INSERT_TAIL(rqh, td, td_runq);
381 	}
382 }
383 
384 void
385 runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
386 {
387 	struct rqhead *rqh;
388 
389 	KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
390 	td->td_rqindex = pri;
391 	runq_setbit(rq, pri);
392 	rqh = &rq->rq_queues[pri];
393 	CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
394 	    td, td->td_priority, pri, rqh);
395 	if (flags & SRQ_PREEMPTED) {
396 		TAILQ_INSERT_HEAD(rqh, td, td_runq);
397 	} else {
398 		TAILQ_INSERT_TAIL(rqh, td, td_runq);
399 	}
400 }
401 /*
402  * Return true if there are runnable processes of any priority on the run
403  * queue, false otherwise.  Has no side effects, does not modify the run
404  * queue structure.
405  */
406 int
407 runq_check(struct runq *rq)
408 {
409 	struct rqbits *rqb;
410 	int i;
411 
412 	rqb = &rq->rq_status;
413 	for (i = 0; i < RQB_LEN; i++)
414 		if (rqb->rqb_bits[i]) {
415 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
416 			    rqb->rqb_bits[i], i);
417 			return (1);
418 		}
419 	CTR0(KTR_RUNQ, "runq_check: empty");
420 
421 	return (0);
422 }
423 
424 /*
425  * Find the highest priority process on the run queue.
426  */
427 struct thread *
428 runq_choose_fuzz(struct runq *rq, int fuzz)
429 {
430 	struct rqhead *rqh;
431 	struct thread *td;
432 	int pri;
433 
434 	while ((pri = runq_findbit(rq)) != -1) {
435 		rqh = &rq->rq_queues[pri];
436 		/* fuzz == 1 is normal.. 0 or less are ignored */
437 		if (fuzz > 1) {
438 			/*
439 			 * In the first couple of entries, check if
440 			 * there is one for our CPU as a preference.
441 			 */
442 			int count = fuzz;
443 			int cpu = PCPU_GET(cpuid);
444 			struct thread *td2;
445 			td2 = td = TAILQ_FIRST(rqh);
446 
447 			while (count-- && td2) {
448 				if (td2->td_lastcpu == cpu) {
449 					td = td2;
450 					break;
451 				}
452 				td2 = TAILQ_NEXT(td2, td_runq);
453 			}
454 		} else
455 			td = TAILQ_FIRST(rqh);
456 		KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
457 		CTR3(KTR_RUNQ,
458 		    "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
459 		return (td);
460 	}
461 	CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
462 
463 	return (NULL);
464 }
465 
466 /*
467  * Find the highest priority process on the run queue.
468  */
469 struct thread *
470 runq_choose(struct runq *rq)
471 {
472 	struct rqhead *rqh;
473 	struct thread *td;
474 	int pri;
475 
476 	while ((pri = runq_findbit(rq)) != -1) {
477 		rqh = &rq->rq_queues[pri];
478 		td = TAILQ_FIRST(rqh);
479 		KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
480 		CTR3(KTR_RUNQ,
481 		    "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
482 		return (td);
483 	}
484 	CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
485 
486 	return (NULL);
487 }
488 
489 struct thread *
490 runq_choose_from(struct runq *rq, u_char idx)
491 {
492 	struct rqhead *rqh;
493 	struct thread *td;
494 	int pri;
495 
496 	if ((pri = runq_findbit_from(rq, idx)) != -1) {
497 		rqh = &rq->rq_queues[pri];
498 		td = TAILQ_FIRST(rqh);
499 		KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
500 		CTR4(KTR_RUNQ,
501 		    "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
502 		    pri, td, td->td_rqindex, rqh);
503 		return (td);
504 	}
505 	CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
506 
507 	return (NULL);
508 }
509 /*
510  * Remove the thread from the queue specified by its priority, and clear the
511  * corresponding status bit if the queue becomes empty.
512  * Caller must set state afterwards.
513  */
514 void
515 runq_remove(struct runq *rq, struct thread *td)
516 {
517 
518 	runq_remove_idx(rq, td, NULL);
519 }
520 
521 void
522 runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
523 {
524 	struct rqhead *rqh;
525 	u_char pri;
526 
527 	KASSERT(td->td_flags & TDF_INMEM,
528 		("runq_remove_idx: thread swapped out"));
529 	pri = td->td_rqindex;
530 	KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
531 	rqh = &rq->rq_queues[pri];
532 	CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
533 	    td, td->td_priority, pri, rqh);
534 	TAILQ_REMOVE(rqh, td, td_runq);
535 	if (TAILQ_EMPTY(rqh)) {
536 		CTR0(KTR_RUNQ, "runq_remove_idx: empty");
537 		runq_clrbit(rq, pri);
538 		if (idx != NULL && *idx == pri)
539 			*idx = (pri + 1) % RQ_NQS;
540 	}
541 }
542