xref: /freebsd/sys/kern/kern_switch.c (revision b28624fde638caadd4a89f50c9b7e7da0f98c4d2)
1 /*-
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_sched.h"
32 
33 #ifndef KERN_SWITCH_INCLUDE
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kdb.h>
37 #include <sys/kernel.h>
38 #include <sys/ktr.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/queue.h>
43 #include <sys/sched.h>
44 #else  /* KERN_SWITCH_INCLUDE */
45 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
46 #include <sys/smp.h>
47 #endif
48 #if defined(SMP) && defined(SCHED_4BSD)
49 #include <sys/sysctl.h>
50 #endif
51 
52 #include <machine/cpu.h>
53 
54 /* Uncomment this to enable logging of critical_enter/exit. */
55 #if 0
56 #define	KTR_CRITICAL	KTR_SCHED
57 #else
58 #define	KTR_CRITICAL	0
59 #endif
60 
61 #ifdef FULL_PREEMPTION
62 #ifndef PREEMPTION
63 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
64 #endif
65 #endif
66 
67 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
68 
69 /*
70  * kern.sched.preemption allows user space to determine if preemption support
71  * is compiled in or not.  It is not currently a boot or runtime flag that
72  * can be changed.
73  */
74 #ifdef PREEMPTION
75 static int kern_sched_preemption = 1;
76 #else
77 static int kern_sched_preemption = 0;
78 #endif
79 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
80     &kern_sched_preemption, 0, "Kernel preemption enabled");
81 
82 #ifdef SCHED_STATS
83 long switch_preempt;
84 long switch_owepreempt;
85 long switch_turnstile;
86 long switch_sleepq;
87 long switch_sleepqtimo;
88 long switch_relinquish;
89 long switch_needresched;
90 static SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
91 SYSCTL_INT(_kern_sched_stats, OID_AUTO, preempt, CTLFLAG_RD, &switch_preempt, 0, "");
92 SYSCTL_INT(_kern_sched_stats, OID_AUTO, owepreempt, CTLFLAG_RD, &switch_owepreempt, 0, "");
93 SYSCTL_INT(_kern_sched_stats, OID_AUTO, turnstile, CTLFLAG_RD, &switch_turnstile, 0, "");
94 SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepq, CTLFLAG_RD, &switch_sleepq, 0, "");
95 SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepqtimo, CTLFLAG_RD, &switch_sleepqtimo, 0, "");
96 SYSCTL_INT(_kern_sched_stats, OID_AUTO, relinquish, CTLFLAG_RD, &switch_relinquish, 0, "");
97 SYSCTL_INT(_kern_sched_stats, OID_AUTO, needresched, CTLFLAG_RD, &switch_needresched, 0, "");
98 static int
99 sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
100 {
101         int error;
102 	int val;
103 
104         val = 0;
105         error = sysctl_handle_int(oidp, &val, 0, req);
106         if (error != 0 || req->newptr == NULL)
107                 return (error);
108         if (val == 0)
109                 return (0);
110 	switch_preempt = 0;
111 	switch_owepreempt = 0;
112 	switch_turnstile = 0;
113 	switch_sleepq = 0;
114 	switch_sleepqtimo = 0;
115 	switch_relinquish = 0;
116 	switch_needresched = 0;
117 
118 	return (0);
119 }
120 
121 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
122     0, sysctl_stats_reset, "I", "Reset scheduler statistics");
123 #endif
124 
125 /************************************************************************
126  * Functions that manipulate runnability from a thread perspective.	*
127  ************************************************************************/
128 /*
129  * Select the thread that will be run next.
130  */
131 struct thread *
132 choosethread(void)
133 {
134 	struct thread *td;
135 
136 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
137 	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
138 		/* Shutting down, run idlethread on AP's */
139 		td = PCPU_GET(idlethread);
140 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
141 		TD_SET_RUNNING(td);
142 		return (td);
143 	}
144 #endif
145 
146 retry:
147 	td = sched_choose();
148 
149 	/*
150 	 * If we are in panic, only allow system threads,
151 	 * plus the one we are running in, to be run.
152 	 */
153 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
154 	    (td->td_flags & TDF_INPANIC) == 0)) {
155 		/* note that it is no longer on the run queue */
156 		TD_SET_CAN_RUN(td);
157 		goto retry;
158 	}
159 
160 	TD_SET_RUNNING(td);
161 	return (td);
162 }
163 
164 /*
165  * Kernel thread preemption implementation.  Critical sections mark
166  * regions of code in which preemptions are not allowed.
167  */
168 void
169 critical_enter(void)
170 {
171 	struct thread *td;
172 
173 	td = curthread;
174 	td->td_critnest++;
175 	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
176 	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
177 }
178 
179 void
180 critical_exit(void)
181 {
182 	struct thread *td;
183 
184 	td = curthread;
185 	KASSERT(td->td_critnest != 0,
186 	    ("critical_exit: td_critnest == 0"));
187 #ifdef PREEMPTION
188 	if (td->td_critnest == 1) {
189 		td->td_critnest = 0;
190 		if (td->td_owepreempt) {
191 			td->td_critnest = 1;
192 			thread_lock(td);
193 			td->td_critnest--;
194 			SCHED_STAT_INC(switch_owepreempt);
195 			mi_switch(SW_INVOL|SW_PREEMPT, NULL);
196 			thread_unlock(td);
197 		}
198 	} else
199 #endif
200 		td->td_critnest--;
201 
202 	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
203 	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
204 }
205 
206 /*
207  * This function is called when a thread is about to be put on run queue
208  * because it has been made runnable or its priority has been adjusted.  It
209  * determines if the new thread should be immediately preempted to.  If so,
210  * it switches to it and eventually returns true.  If not, it returns false
211  * so that the caller may place the thread on an appropriate run queue.
212  */
213 int
214 maybe_preempt(struct thread *td)
215 {
216 #ifdef PREEMPTION
217 	struct thread *ctd;
218 	int cpri, pri;
219 #endif
220 
221 #ifdef PREEMPTION
222 	/*
223 	 * The new thread should not preempt the current thread if any of the
224 	 * following conditions are true:
225 	 *
226 	 *  - The kernel is in the throes of crashing (panicstr).
227 	 *  - The current thread has a higher (numerically lower) or
228 	 *    equivalent priority.  Note that this prevents curthread from
229 	 *    trying to preempt to itself.
230 	 *  - It is too early in the boot for context switches (cold is set).
231 	 *  - The current thread has an inhibitor set or is in the process of
232 	 *    exiting.  In this case, the current thread is about to switch
233 	 *    out anyways, so there's no point in preempting.  If we did,
234 	 *    the current thread would not be properly resumed as well, so
235 	 *    just avoid that whole landmine.
236 	 *  - If the new thread's priority is not a realtime priority and
237 	 *    the current thread's priority is not an idle priority and
238 	 *    FULL_PREEMPTION is disabled.
239 	 *
240 	 * If all of these conditions are false, but the current thread is in
241 	 * a nested critical section, then we have to defer the preemption
242 	 * until we exit the critical section.  Otherwise, switch immediately
243 	 * to the new thread.
244 	 */
245 	ctd = curthread;
246 	THREAD_LOCK_ASSERT(td, MA_OWNED);
247 	KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
248 	  ("thread has no (or wrong) sched-private part."));
249 	KASSERT((td->td_inhibitors == 0),
250 			("maybe_preempt: trying to run inhibited thread"));
251 	pri = td->td_priority;
252 	cpri = ctd->td_priority;
253 	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
254 	    TD_IS_INHIBITED(ctd))
255 		return (0);
256 #ifndef FULL_PREEMPTION
257 	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
258 		return (0);
259 #endif
260 
261 	if (ctd->td_critnest > 1) {
262 		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
263 		    ctd->td_critnest);
264 		ctd->td_owepreempt = 1;
265 		return (0);
266 	}
267 	/*
268 	 * Thread is runnable but not yet put on system run queue.
269 	 */
270 	MPASS(ctd->td_lock == td->td_lock);
271 	MPASS(TD_ON_RUNQ(td));
272 	TD_SET_RUNNING(td);
273 	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
274 	    td->td_proc->p_pid, td->td_proc->p_comm);
275 	SCHED_STAT_INC(switch_preempt);
276 	mi_switch(SW_INVOL|SW_PREEMPT, td);
277 	/*
278 	 * td's lock pointer may have changed.  We have to return with it
279 	 * locked.
280 	 */
281 	spinlock_enter();
282 	thread_unlock(ctd);
283 	thread_lock(td);
284 	spinlock_exit();
285 	return (1);
286 #else
287 	return (0);
288 #endif
289 }
290 
291 #if 0
292 #ifndef PREEMPTION
293 /* XXX: There should be a non-static version of this. */
294 static void
295 printf_caddr_t(void *data)
296 {
297 	printf("%s", (char *)data);
298 }
299 static char preempt_warning[] =
300     "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
301 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
302     preempt_warning)
303 #endif
304 #endif
305 
306 /************************************************************************
307  * SYSTEM RUN QUEUE manipulations and tests				*
308  ************************************************************************/
309 /*
310  * Initialize a run structure.
311  */
312 void
313 runq_init(struct runq *rq)
314 {
315 	int i;
316 
317 	bzero(rq, sizeof *rq);
318 	for (i = 0; i < RQ_NQS; i++)
319 		TAILQ_INIT(&rq->rq_queues[i]);
320 }
321 
322 /*
323  * Clear the status bit of the queue corresponding to priority level pri,
324  * indicating that it is empty.
325  */
326 static __inline void
327 runq_clrbit(struct runq *rq, int pri)
328 {
329 	struct rqbits *rqb;
330 
331 	rqb = &rq->rq_status;
332 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
333 	    rqb->rqb_bits[RQB_WORD(pri)],
334 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
335 	    RQB_BIT(pri), RQB_WORD(pri));
336 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
337 }
338 
339 /*
340  * Find the index of the first non-empty run queue.  This is done by
341  * scanning the status bits, a set bit indicates a non-empty queue.
342  */
343 static __inline int
344 runq_findbit(struct runq *rq)
345 {
346 	struct rqbits *rqb;
347 	int pri;
348 	int i;
349 
350 	rqb = &rq->rq_status;
351 	for (i = 0; i < RQB_LEN; i++)
352 		if (rqb->rqb_bits[i]) {
353 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
354 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
355 			    rqb->rqb_bits[i], i, pri);
356 			return (pri);
357 		}
358 
359 	return (-1);
360 }
361 
362 static __inline int
363 runq_findbit_from(struct runq *rq, u_char pri)
364 {
365 	struct rqbits *rqb;
366 	rqb_word_t mask;
367 	int i;
368 
369 	/*
370 	 * Set the mask for the first word so we ignore priorities before 'pri'.
371 	 */
372 	mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
373 	rqb = &rq->rq_status;
374 again:
375 	for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
376 		mask = rqb->rqb_bits[i] & mask;
377 		if (mask == 0)
378 			continue;
379 		pri = RQB_FFS(mask) + (i << RQB_L2BPW);
380 		CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
381 		    mask, i, pri);
382 		return (pri);
383 	}
384 	if (pri == 0)
385 		return (-1);
386 	/*
387 	 * Wrap back around to the beginning of the list just once so we
388 	 * scan the whole thing.
389 	 */
390 	pri = 0;
391 	goto again;
392 }
393 
394 /*
395  * Set the status bit of the queue corresponding to priority level pri,
396  * indicating that it is non-empty.
397  */
398 static __inline void
399 runq_setbit(struct runq *rq, int pri)
400 {
401 	struct rqbits *rqb;
402 
403 	rqb = &rq->rq_status;
404 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
405 	    rqb->rqb_bits[RQB_WORD(pri)],
406 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
407 	    RQB_BIT(pri), RQB_WORD(pri));
408 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
409 }
410 
411 /*
412  * Add the thread to the queue specified by its priority, and set the
413  * corresponding status bit.
414  */
415 void
416 runq_add(struct runq *rq, struct td_sched *ts, int flags)
417 {
418 	struct rqhead *rqh;
419 	int pri;
420 
421 	pri = ts->ts_thread->td_priority / RQ_PPQ;
422 	ts->ts_rqindex = pri;
423 	runq_setbit(rq, pri);
424 	rqh = &rq->rq_queues[pri];
425 	CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
426 	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
427 	if (flags & SRQ_PREEMPTED) {
428 		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
429 	} else {
430 		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
431 	}
432 }
433 
434 void
435 runq_add_pri(struct runq *rq, struct td_sched *ts, u_char pri, int flags)
436 {
437 	struct rqhead *rqh;
438 
439 	KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
440 	ts->ts_rqindex = pri;
441 	runq_setbit(rq, pri);
442 	rqh = &rq->rq_queues[pri];
443 	CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p",
444 	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
445 	if (flags & SRQ_PREEMPTED) {
446 		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
447 	} else {
448 		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
449 	}
450 }
451 /*
452  * Return true if there are runnable processes of any priority on the run
453  * queue, false otherwise.  Has no side effects, does not modify the run
454  * queue structure.
455  */
456 int
457 runq_check(struct runq *rq)
458 {
459 	struct rqbits *rqb;
460 	int i;
461 
462 	rqb = &rq->rq_status;
463 	for (i = 0; i < RQB_LEN; i++)
464 		if (rqb->rqb_bits[i]) {
465 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
466 			    rqb->rqb_bits[i], i);
467 			return (1);
468 		}
469 	CTR0(KTR_RUNQ, "runq_check: empty");
470 
471 	return (0);
472 }
473 
474 #if defined(SMP) && defined(SCHED_4BSD)
475 int runq_fuzz = 1;
476 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
477 #endif
478 
479 /*
480  * Find the highest priority process on the run queue.
481  */
482 struct td_sched *
483 runq_choose(struct runq *rq)
484 {
485 	struct rqhead *rqh;
486 	struct td_sched *ts;
487 	int pri;
488 
489 	while ((pri = runq_findbit(rq)) != -1) {
490 		rqh = &rq->rq_queues[pri];
491 #if defined(SMP) && defined(SCHED_4BSD)
492 		/* fuzz == 1 is normal.. 0 or less are ignored */
493 		if (runq_fuzz > 1) {
494 			/*
495 			 * In the first couple of entries, check if
496 			 * there is one for our CPU as a preference.
497 			 */
498 			int count = runq_fuzz;
499 			int cpu = PCPU_GET(cpuid);
500 			struct td_sched *ts2;
501 			ts2 = ts = TAILQ_FIRST(rqh);
502 
503 			while (count-- && ts2) {
504 				if (ts->ts_thread->td_lastcpu == cpu) {
505 					ts = ts2;
506 					break;
507 				}
508 				ts2 = TAILQ_NEXT(ts2, ts_procq);
509 			}
510 		} else
511 #endif
512 			ts = TAILQ_FIRST(rqh);
513 		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
514 		CTR3(KTR_RUNQ,
515 		    "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
516 		return (ts);
517 	}
518 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
519 
520 	return (NULL);
521 }
522 
523 struct td_sched *
524 runq_choose_from(struct runq *rq, u_char idx)
525 {
526 	struct rqhead *rqh;
527 	struct td_sched *ts;
528 	int pri;
529 
530 	if ((pri = runq_findbit_from(rq, idx)) != -1) {
531 		rqh = &rq->rq_queues[pri];
532 		ts = TAILQ_FIRST(rqh);
533 		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
534 		CTR4(KTR_RUNQ,
535 		    "runq_choose_from: pri=%d kse=%p idx=%d rqh=%p",
536 		    pri, ts, ts->ts_rqindex, rqh);
537 		return (ts);
538 	}
539 	CTR1(KTR_RUNQ, "runq_choose_from: idleproc pri=%d", pri);
540 
541 	return (NULL);
542 }
543 /*
544  * Remove the thread from the queue specified by its priority, and clear the
545  * corresponding status bit if the queue becomes empty.
546  * Caller must set state afterwards.
547  */
548 void
549 runq_remove(struct runq *rq, struct td_sched *ts)
550 {
551 
552 	runq_remove_idx(rq, ts, NULL);
553 }
554 
555 void
556 runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
557 {
558 	struct rqhead *rqh;
559 	u_char pri;
560 
561 	KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
562 		("runq_remove_idx: process swapped out"));
563 	pri = ts->ts_rqindex;
564 	KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
565 	rqh = &rq->rq_queues[pri];
566 	CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p",
567 	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
568 	{
569 		struct td_sched *nts;
570 
571 		TAILQ_FOREACH(nts, rqh, ts_procq)
572 			if (nts == ts)
573 				break;
574 		if (ts != nts)
575 			panic("runq_remove_idx: ts %p not on rqindex %d",
576 			    ts, pri);
577 	}
578 	TAILQ_REMOVE(rqh, ts, ts_procq);
579 	if (TAILQ_EMPTY(rqh)) {
580 		CTR0(KTR_RUNQ, "runq_remove_idx: empty");
581 		runq_clrbit(rq, pri);
582 		if (idx != NULL && *idx == pri)
583 			*idx = (pri + 1) % RQ_NQS;
584 	}
585 }
586 
587 /****** functions that are temporarily here ***********/
588 #include <vm/uma.h>
589 extern struct mtx kse_zombie_lock;
590 
591 /*
592  *  Allocate scheduler specific per-process resources.
593  * The thread and proc have already been linked in.
594  *
595  * Called from:
596  *  proc_init() (UMA init method)
597  */
598 void
599 sched_newproc(struct proc *p, struct thread *td)
600 {
601 }
602 
603 /*
604  * thread is being either created or recycled.
605  * Fix up the per-scheduler resources associated with it.
606  * Called from:
607  *  sched_fork_thread()
608  *  thread_dtor()  (*may go away)
609  *  thread_init()  (*may go away)
610  */
611 void
612 sched_newthread(struct thread *td)
613 {
614 	struct td_sched *ts;
615 
616 	ts = (struct td_sched *) (td + 1);
617 	bzero(ts, sizeof(*ts));
618 	td->td_sched     = ts;
619 	ts->ts_thread	= td;
620 }
621 
622 #endif /* KERN_SWITCH_INCLUDE */
623