xref: /freebsd/sys/kern/kern_switch.c (revision 262e143bd46171a6415a5b28af260a5efa2a3db8)
1 /*-
2  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /***
28 Here is the logic..
29 
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
35 
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
40 
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
46 
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
56 
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
64 be the new thread).
65 
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70 to point to it.
71 
72 The following diagram shows 2 KSEs and 3 threads from a single process.
73 
74  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75               \    \____
76                \        \
77     KSEGROUP---thread--thread--thread    (queued in priority order)
78         \                 /
79          \_______________/
80           (last_assigned)
81 
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
86 ***/
87 
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD$");
90 
91 #include "opt_sched.h"
92 
93 #ifndef KERN_SWITCH_INCLUDE
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kdb.h>
97 #include <sys/kernel.h>
98 #include <sys/ktr.h>
99 #include <sys/lock.h>
100 #include <sys/mutex.h>
101 #include <sys/proc.h>
102 #include <sys/queue.h>
103 #include <sys/sched.h>
104 #else  /* KERN_SWITCH_INCLUDE */
105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106 #include <sys/smp.h>
107 #endif
108 #if defined(SMP) && defined(SCHED_4BSD)
109 #include <sys/sysctl.h>
110 #endif
111 
112 #ifdef FULL_PREEMPTION
113 #ifndef PREEMPTION
114 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
115 #endif
116 #endif
117 
118 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
119 
120 #define td_kse td_sched
121 
122 /*
123  * kern.sched.preemption allows user space to determine if preemption support
124  * is compiled in or not.  It is not currently a boot or runtime flag that
125  * can be changed.
126  */
127 #ifdef PREEMPTION
128 static int kern_sched_preemption = 1;
129 #else
130 static int kern_sched_preemption = 0;
131 #endif
132 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
133     &kern_sched_preemption, 0, "Kernel preemption enabled");
134 
135 /************************************************************************
136  * Functions that manipulate runnability from a thread perspective.	*
137  ************************************************************************/
138 /*
139  * Select the KSE that will be run next.  From that find the thread, and
140  * remove it from the KSEGRP's run queue.  If there is thread clustering,
141  * this will be what does it.
142  */
143 struct thread *
144 choosethread(void)
145 {
146 	struct kse *ke;
147 	struct thread *td;
148 	struct ksegrp *kg;
149 
150 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
151 	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
152 		/* Shutting down, run idlethread on AP's */
153 		td = PCPU_GET(idlethread);
154 		ke = td->td_kse;
155 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
156 		ke->ke_flags |= KEF_DIDRUN;
157 		TD_SET_RUNNING(td);
158 		return (td);
159 	}
160 #endif
161 
162 retry:
163 	ke = sched_choose();
164 	if (ke) {
165 		td = ke->ke_thread;
166 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
167 		kg = ke->ke_ksegrp;
168 		if (td->td_proc->p_flag & P_HADTHREADS) {
169 			if (kg->kg_last_assigned == td) {
170 				kg->kg_last_assigned = TAILQ_PREV(td,
171 				    threadqueue, td_runq);
172 			}
173 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
174 		}
175 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
176 		    td, td->td_priority);
177 	} else {
178 		/* Simulate runq_choose() having returned the idle thread */
179 		td = PCPU_GET(idlethread);
180 		ke = td->td_kse;
181 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
182 	}
183 	ke->ke_flags |= KEF_DIDRUN;
184 
185 	/*
186 	 * If we are in panic, only allow system threads,
187 	 * plus the one we are running in, to be run.
188 	 */
189 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
190 	    (td->td_flags & TDF_INPANIC) == 0)) {
191 		/* note that it is no longer on the run queue */
192 		TD_SET_CAN_RUN(td);
193 		goto retry;
194 	}
195 
196 	TD_SET_RUNNING(td);
197 	return (td);
198 }
199 
200 /*
201  * Given a surplus system slot, try assign a new runnable thread to it.
202  * Called from:
203  *  sched_thread_exit()  (local)
204  *  sched_switch()  (local)
205  *  sched_thread_exit()  (local)
206  *  remrunqueue()  (local)  (not at the moment)
207  */
208 static void
209 slot_fill(struct ksegrp *kg)
210 {
211 	struct thread *td;
212 
213 	mtx_assert(&sched_lock, MA_OWNED);
214 	while (kg->kg_avail_opennings > 0) {
215 		/*
216 		 * Find the first unassigned thread
217 		 */
218 		if ((td = kg->kg_last_assigned) != NULL)
219 			td = TAILQ_NEXT(td, td_runq);
220 		else
221 			td = TAILQ_FIRST(&kg->kg_runq);
222 
223 		/*
224 		 * If we found one, send it to the system scheduler.
225 		 */
226 		if (td) {
227 			kg->kg_last_assigned = td;
228 			sched_add(td, SRQ_YIELDING);
229 			CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
230 		} else {
231 			/* no threads to use up the slots. quit now */
232 			break;
233 		}
234 	}
235 }
236 
237 #ifdef	SCHED_4BSD
238 /*
239  * Remove a thread from its KSEGRP's run queue.
240  * This in turn may remove it from a KSE if it was already assigned
241  * to one, possibly causing a new thread to be assigned to the KSE
242  * and the KSE getting a new priority.
243  */
244 static void
245 remrunqueue(struct thread *td)
246 {
247 	struct thread *td2, *td3;
248 	struct ksegrp *kg;
249 	struct kse *ke;
250 
251 	mtx_assert(&sched_lock, MA_OWNED);
252 	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
253 	kg = td->td_ksegrp;
254 	ke = td->td_kse;
255 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
256 	TD_SET_CAN_RUN(td);
257 	/*
258 	 * If it is not a threaded process, take the shortcut.
259 	 */
260 	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
261 		/* remve from sys run queue and free up a slot */
262 		sched_rem(td);
263 		ke->ke_state = KES_THREAD;
264 		return;
265 	}
266    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
267 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
268 	if (ke->ke_state == KES_ONRUNQ) {
269 		/*
270 		 * This thread has been assigned to the system run queue.
271 		 * We need to dissociate it and try assign the
272 		 * KSE to the next available thread. Then, we should
273 		 * see if we need to move the KSE in the run queues.
274 		 */
275 		sched_rem(td);
276 		ke->ke_state = KES_THREAD;
277 		td2 = kg->kg_last_assigned;
278 		KASSERT((td2 != NULL), ("last assigned has wrong value"));
279 		if (td2 == td)
280 			kg->kg_last_assigned = td3;
281 		/* slot_fill(kg); */ /* will replace it with another */
282 	}
283 }
284 #endif
285 
286 /*
287  * Change the priority of a thread that is on the run queue.
288  */
289 void
290 adjustrunqueue( struct thread *td, int newpri)
291 {
292 	struct ksegrp *kg;
293 	struct kse *ke;
294 
295 	mtx_assert(&sched_lock, MA_OWNED);
296 	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
297 
298 	ke = td->td_kse;
299 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
300 	/*
301 	 * If it is not a threaded process, take the shortcut.
302 	 */
303 	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
304 		/* We only care about the kse in the run queue. */
305 		td->td_priority = newpri;
306 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
307 			sched_rem(td);
308 			sched_add(td, SRQ_BORING);
309 		}
310 		return;
311 	}
312 
313 	/* It is a threaded process */
314 	kg = td->td_ksegrp;
315 	if (ke->ke_state == KES_ONRUNQ
316 #ifdef SCHED_ULE
317 	 || ((ke->ke_flags & KEF_ASSIGNED) != 0 &&
318 	     (ke->ke_flags & KEF_REMOVED) == 0)
319 #endif
320 	   ) {
321 		if (kg->kg_last_assigned == td) {
322 			kg->kg_last_assigned =
323 			    TAILQ_PREV(td, threadqueue, td_runq);
324 		}
325 		sched_rem(td);
326 	}
327 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
328 	TD_SET_CAN_RUN(td);
329 	td->td_priority = newpri;
330 	setrunqueue(td, SRQ_BORING);
331 }
332 
333 /*
334  * This function is called when a thread is about to be put on a
335  * ksegrp run queue because it has been made runnable or its
336  * priority has been adjusted and the ksegrp does not have a
337  * free kse slot.  It determines if a thread from the same ksegrp
338  * should be preempted.  If so, it tries to switch threads
339  * if the thread is on the same cpu or notifies another cpu that
340  * it should switch threads.
341  */
342 
343 static void
344 maybe_preempt_in_ksegrp(struct thread *td)
345 #if  !defined(SMP)
346 {
347 	struct thread *running_thread;
348 
349 	mtx_assert(&sched_lock, MA_OWNED);
350 	running_thread = curthread;
351 
352 	if (running_thread->td_ksegrp != td->td_ksegrp)
353 		return;
354 
355 	if (td->td_priority >= running_thread->td_priority)
356 		return;
357 #ifdef PREEMPTION
358 #ifndef FULL_PREEMPTION
359 	if (td->td_priority > PRI_MAX_ITHD) {
360 		running_thread->td_flags |= TDF_NEEDRESCHED;
361 		return;
362 	}
363 #endif /* FULL_PREEMPTION */
364 
365 	if (running_thread->td_critnest > 1)
366 		running_thread->td_owepreempt = 1;
367 	 else
368 		 mi_switch(SW_INVOL, NULL);
369 
370 #else /* PREEMPTION */
371 	running_thread->td_flags |= TDF_NEEDRESCHED;
372 #endif /* PREEMPTION */
373 	return;
374 }
375 
376 #else /* SMP */
377 {
378 	struct thread *running_thread;
379 	int worst_pri;
380 	struct ksegrp *kg;
381 	cpumask_t cpumask,dontuse;
382 	struct pcpu *pc;
383 	struct pcpu *best_pcpu;
384 	struct thread *cputhread;
385 
386 	mtx_assert(&sched_lock, MA_OWNED);
387 
388 	running_thread = curthread;
389 
390 #if !defined(KSEG_PEEMPT_BEST_CPU)
391 	if (running_thread->td_ksegrp != td->td_ksegrp) {
392 #endif
393 		kg = td->td_ksegrp;
394 
395 		/* if someone is ahead of this thread, wait our turn */
396 		if (td != TAILQ_FIRST(&kg->kg_runq))
397 			return;
398 
399 		worst_pri = td->td_priority;
400 		best_pcpu = NULL;
401 		dontuse   = stopped_cpus | idle_cpus_mask;
402 
403 		/*
404 		 * Find a cpu with the worst priority that runs at thread from
405 		 * the same  ksegrp - if multiple exist give first the last run
406 		 * cpu and then the current cpu priority
407 		 */
408 
409 		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
410 			cpumask   = pc->pc_cpumask;
411 			cputhread = pc->pc_curthread;
412 
413 			if ((cpumask & dontuse)  ||
414 			    cputhread->td_ksegrp != kg)
415 				continue;
416 
417 			if (cputhread->td_priority > worst_pri) {
418 				worst_pri = cputhread->td_priority;
419 				best_pcpu = pc;
420 				continue;
421 			}
422 
423 			if (cputhread->td_priority == worst_pri &&
424 			    best_pcpu != NULL &&
425 			    (td->td_lastcpu == pc->pc_cpuid ||
426 				(PCPU_GET(cpumask) == cpumask &&
427 				    td->td_lastcpu != best_pcpu->pc_cpuid)))
428 			    best_pcpu = pc;
429 		}
430 
431 		/* Check if we need to preempt someone */
432 		if (best_pcpu == NULL)
433 			return;
434 
435 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
436 #if !defined(FULL_PREEMPTION)
437 		if (td->td_priority <= PRI_MAX_ITHD)
438 #endif /* ! FULL_PREEMPTION */
439 			{
440 				ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
441 				return;
442 			}
443 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
444 
445 		if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
446 			best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
447 			ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
448 			return;
449 		}
450 #if !defined(KSEG_PEEMPT_BEST_CPU)
451 	}
452 #endif
453 
454 	if (td->td_priority >= running_thread->td_priority)
455 		return;
456 #ifdef PREEMPTION
457 
458 #if !defined(FULL_PREEMPTION)
459 	if (td->td_priority > PRI_MAX_ITHD) {
460 		running_thread->td_flags |= TDF_NEEDRESCHED;
461 	}
462 #endif /* ! FULL_PREEMPTION */
463 
464 	if (running_thread->td_critnest > 1)
465 		running_thread->td_owepreempt = 1;
466 	 else
467 		 mi_switch(SW_INVOL, NULL);
468 
469 #else /* PREEMPTION */
470 	running_thread->td_flags |= TDF_NEEDRESCHED;
471 #endif /* PREEMPTION */
472 	return;
473 }
474 #endif /* !SMP */
475 
476 
477 int limitcount;
478 void
479 setrunqueue(struct thread *td, int flags)
480 {
481 	struct ksegrp *kg;
482 	struct thread *td2;
483 	struct thread *tda;
484 
485 	CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
486 	    td, td->td_ksegrp, td->td_proc->p_pid);
487 	CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
488             td, td->td_proc->p_comm, td->td_priority, curthread,
489             curthread->td_proc->p_comm);
490 	mtx_assert(&sched_lock, MA_OWNED);
491 	KASSERT((td->td_inhibitors == 0),
492 			("setrunqueue: trying to run inhibitted thread"));
493 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
494 	    ("setrunqueue: bad thread state"));
495 	TD_SET_RUNQ(td);
496 	kg = td->td_ksegrp;
497 	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
498 		/*
499 		 * Common path optimisation: Only one of everything
500 		 * and the KSE is always already attached.
501 		 * Totally ignore the ksegrp run queue.
502 		 */
503 		if (kg->kg_avail_opennings != 1) {
504 			if (limitcount < 1) {
505 				limitcount++;
506 				printf("pid %d: corrected slot count (%d->1)\n",
507 				    td->td_proc->p_pid, kg->kg_avail_opennings);
508 
509 			}
510 			kg->kg_avail_opennings = 1;
511 		}
512 		sched_add(td, flags);
513 		return;
514 	}
515 
516 	/*
517 	 * If the concurrency has reduced, and we would go in the
518 	 * assigned section, then keep removing entries from the
519 	 * system run queue, until we are not in that section
520 	 * or there is room for us to be put in that section.
521 	 * What we MUST avoid is the case where there are threads of less
522 	 * priority than the new one scheduled, but it can not
523 	 * be scheduled itself. That would lead to a non contiguous set
524 	 * of scheduled threads, and everything would break.
525 	 */
526 	tda = kg->kg_last_assigned;
527 	while ((kg->kg_avail_opennings <= 0) &&
528 	    (tda && (tda->td_priority > td->td_priority))) {
529 		/*
530 		 * None free, but there is one we can commandeer.
531 		 */
532 		CTR2(KTR_RUNQ,
533 		    "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
534 		sched_rem(tda);
535 		tda = kg->kg_last_assigned =
536 		    TAILQ_PREV(tda, threadqueue, td_runq);
537 	}
538 
539 	/*
540 	 * Add the thread to the ksegrp's run queue at
541 	 * the appropriate place.
542 	 */
543 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
544 		if (td2->td_priority > td->td_priority) {
545 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
546 			break;
547 		}
548 	}
549 	if (td2 == NULL) {
550 		/* We ran off the end of the TAILQ or it was empty. */
551 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
552 	}
553 
554 	/*
555 	 * If we have a slot to use, then put the thread on the system
556 	 * run queue and if needed, readjust the last_assigned pointer.
557 	 * it may be that we need to schedule something anyhow
558 	 * even if the availabel slots are -ve so that
559 	 * all the items < last_assigned are scheduled.
560 	 */
561 	if (kg->kg_avail_opennings > 0) {
562 		if (tda == NULL) {
563 			/*
564 			 * No pre-existing last assigned so whoever is first
565 			 * gets the slot.. (maybe us)
566 			 */
567 			td2 = TAILQ_FIRST(&kg->kg_runq);
568 			kg->kg_last_assigned = td2;
569 		} else if (tda->td_priority > td->td_priority) {
570 			td2 = td;
571 		} else {
572 			/*
573 			 * We are past last_assigned, so
574 			 * give the next slot to whatever is next,
575 			 * which may or may not be us.
576 			 */
577 			td2 = TAILQ_NEXT(tda, td_runq);
578 			kg->kg_last_assigned = td2;
579 		}
580 		sched_add(td2, flags);
581 	} else {
582 		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
583 			td, td->td_ksegrp, td->td_proc->p_pid);
584 		if ((flags & SRQ_YIELDING) == 0)
585 			maybe_preempt_in_ksegrp(td);
586 	}
587 }
588 
589 /*
590  * Kernel thread preemption implementation.  Critical sections mark
591  * regions of code in which preemptions are not allowed.
592  */
593 void
594 critical_enter(void)
595 {
596 	struct thread *td;
597 
598 	td = curthread;
599 	td->td_critnest++;
600 	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
601 	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
602 }
603 
604 void
605 critical_exit(void)
606 {
607 	struct thread *td;
608 
609 	td = curthread;
610 	KASSERT(td->td_critnest != 0,
611 	    ("critical_exit: td_critnest == 0"));
612 #ifdef PREEMPTION
613 	if (td->td_critnest == 1) {
614 		td->td_critnest = 0;
615 		mtx_assert(&sched_lock, MA_NOTOWNED);
616 		if (td->td_owepreempt) {
617 			td->td_critnest = 1;
618 			mtx_lock_spin(&sched_lock);
619 			td->td_critnest--;
620 			mi_switch(SW_INVOL, NULL);
621 			mtx_unlock_spin(&sched_lock);
622 		}
623 	} else
624 #endif
625 		td->td_critnest--;
626 
627 
628 	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
629 	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
630 }
631 
632 /*
633  * This function is called when a thread is about to be put on run queue
634  * because it has been made runnable or its priority has been adjusted.  It
635  * determines if the new thread should be immediately preempted to.  If so,
636  * it switches to it and eventually returns true.  If not, it returns false
637  * so that the caller may place the thread on an appropriate run queue.
638  */
639 int
640 maybe_preempt(struct thread *td)
641 {
642 #ifdef PREEMPTION
643 	struct thread *ctd;
644 	int cpri, pri;
645 #endif
646 
647 	mtx_assert(&sched_lock, MA_OWNED);
648 #ifdef PREEMPTION
649 	/*
650 	 * The new thread should not preempt the current thread if any of the
651 	 * following conditions are true:
652 	 *
653 	 *  - The kernel is in the throes of crashing (panicstr).
654 	 *  - The current thread has a higher (numerically lower) or
655 	 *    equivalent priority.  Note that this prevents curthread from
656 	 *    trying to preempt to itself.
657 	 *  - It is too early in the boot for context switches (cold is set).
658 	 *  - The current thread has an inhibitor set or is in the process of
659 	 *    exiting.  In this case, the current thread is about to switch
660 	 *    out anyways, so there's no point in preempting.  If we did,
661 	 *    the current thread would not be properly resumed as well, so
662 	 *    just avoid that whole landmine.
663 	 *  - If the new thread's priority is not a realtime priority and
664 	 *    the current thread's priority is not an idle priority and
665 	 *    FULL_PREEMPTION is disabled.
666 	 *
667 	 * If all of these conditions are false, but the current thread is in
668 	 * a nested critical section, then we have to defer the preemption
669 	 * until we exit the critical section.  Otherwise, switch immediately
670 	 * to the new thread.
671 	 */
672 	ctd = curthread;
673 	KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
674 	  ("thread has no (or wrong) sched-private part."));
675 	KASSERT((td->td_inhibitors == 0),
676 			("maybe_preempt: trying to run inhibitted thread"));
677 	pri = td->td_priority;
678 	cpri = ctd->td_priority;
679 	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
680 	    TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
681 		return (0);
682 #ifndef FULL_PREEMPTION
683 	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
684 		return (0);
685 #endif
686 
687 	if (ctd->td_critnest > 1) {
688 		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
689 		    ctd->td_critnest);
690 		ctd->td_owepreempt = 1;
691 		return (0);
692 	}
693 
694 	/*
695 	 * Thread is runnable but not yet put on system run queue.
696 	 */
697 	MPASS(TD_ON_RUNQ(td));
698 	MPASS(td->td_sched->ke_state != KES_ONRUNQ);
699 	if (td->td_proc->p_flag & P_HADTHREADS) {
700 		/*
701 		 * If this is a threaded process we actually ARE on the
702 		 * ksegrp run queue so take it off that first.
703 		 * Also undo any damage done to the last_assigned pointer.
704 		 * XXX Fix setrunqueue so this isn't needed
705 		 */
706 		struct ksegrp *kg;
707 
708 		kg = td->td_ksegrp;
709 		if (kg->kg_last_assigned == td)
710 			kg->kg_last_assigned =
711 			    TAILQ_PREV(td, threadqueue, td_runq);
712 		TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
713 	}
714 
715 	TD_SET_RUNNING(td);
716 	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
717 	    td->td_proc->p_pid, td->td_proc->p_comm);
718 	mi_switch(SW_INVOL|SW_PREEMPT, td);
719 	return (1);
720 #else
721 	return (0);
722 #endif
723 }
724 
725 #if 0
726 #ifndef PREEMPTION
727 /* XXX: There should be a non-static version of this. */
728 static void
729 printf_caddr_t(void *data)
730 {
731 	printf("%s", (char *)data);
732 }
733 static char preempt_warning[] =
734     "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
735 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
736     preempt_warning)
737 #endif
738 #endif
739 
740 /************************************************************************
741  * SYSTEM RUN QUEUE manipulations and tests				*
742  ************************************************************************/
743 /*
744  * Initialize a run structure.
745  */
746 void
747 runq_init(struct runq *rq)
748 {
749 	int i;
750 
751 	bzero(rq, sizeof *rq);
752 	for (i = 0; i < RQ_NQS; i++)
753 		TAILQ_INIT(&rq->rq_queues[i]);
754 }
755 
756 /*
757  * Clear the status bit of the queue corresponding to priority level pri,
758  * indicating that it is empty.
759  */
760 static __inline void
761 runq_clrbit(struct runq *rq, int pri)
762 {
763 	struct rqbits *rqb;
764 
765 	rqb = &rq->rq_status;
766 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
767 	    rqb->rqb_bits[RQB_WORD(pri)],
768 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
769 	    RQB_BIT(pri), RQB_WORD(pri));
770 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
771 }
772 
773 /*
774  * Find the index of the first non-empty run queue.  This is done by
775  * scanning the status bits, a set bit indicates a non-empty queue.
776  */
777 static __inline int
778 runq_findbit(struct runq *rq)
779 {
780 	struct rqbits *rqb;
781 	int pri;
782 	int i;
783 
784 	rqb = &rq->rq_status;
785 	for (i = 0; i < RQB_LEN; i++)
786 		if (rqb->rqb_bits[i]) {
787 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
788 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
789 			    rqb->rqb_bits[i], i, pri);
790 			return (pri);
791 		}
792 
793 	return (-1);
794 }
795 
796 /*
797  * Set the status bit of the queue corresponding to priority level pri,
798  * indicating that it is non-empty.
799  */
800 static __inline void
801 runq_setbit(struct runq *rq, int pri)
802 {
803 	struct rqbits *rqb;
804 
805 	rqb = &rq->rq_status;
806 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
807 	    rqb->rqb_bits[RQB_WORD(pri)],
808 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
809 	    RQB_BIT(pri), RQB_WORD(pri));
810 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
811 }
812 
813 /*
814  * Add the KSE to the queue specified by its priority, and set the
815  * corresponding status bit.
816  */
817 void
818 runq_add(struct runq *rq, struct kse *ke, int flags)
819 {
820 	struct rqhead *rqh;
821 	int pri;
822 
823 	pri = ke->ke_thread->td_priority / RQ_PPQ;
824 	ke->ke_rqindex = pri;
825 	runq_setbit(rq, pri);
826 	rqh = &rq->rq_queues[pri];
827 	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
828 	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
829 	if (flags & SRQ_PREEMPTED) {
830 		TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
831 	} else {
832 		TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
833 	}
834 }
835 
836 /*
837  * Return true if there are runnable processes of any priority on the run
838  * queue, false otherwise.  Has no side effects, does not modify the run
839  * queue structure.
840  */
841 int
842 runq_check(struct runq *rq)
843 {
844 	struct rqbits *rqb;
845 	int i;
846 
847 	rqb = &rq->rq_status;
848 	for (i = 0; i < RQB_LEN; i++)
849 		if (rqb->rqb_bits[i]) {
850 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
851 			    rqb->rqb_bits[i], i);
852 			return (1);
853 		}
854 	CTR0(KTR_RUNQ, "runq_check: empty");
855 
856 	return (0);
857 }
858 
859 #if defined(SMP) && defined(SCHED_4BSD)
860 int runq_fuzz = 1;
861 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
862 #endif
863 
864 /*
865  * Find the highest priority process on the run queue.
866  */
867 struct kse *
868 runq_choose(struct runq *rq)
869 {
870 	struct rqhead *rqh;
871 	struct kse *ke;
872 	int pri;
873 
874 	mtx_assert(&sched_lock, MA_OWNED);
875 	while ((pri = runq_findbit(rq)) != -1) {
876 		rqh = &rq->rq_queues[pri];
877 #if defined(SMP) && defined(SCHED_4BSD)
878 		/* fuzz == 1 is normal.. 0 or less are ignored */
879 		if (runq_fuzz > 1) {
880 			/*
881 			 * In the first couple of entries, check if
882 			 * there is one for our CPU as a preference.
883 			 */
884 			int count = runq_fuzz;
885 			int cpu = PCPU_GET(cpuid);
886 			struct kse *ke2;
887 			ke2 = ke = TAILQ_FIRST(rqh);
888 
889 			while (count-- && ke2) {
890 				if (ke->ke_thread->td_lastcpu == cpu) {
891 					ke = ke2;
892 					break;
893 				}
894 				ke2 = TAILQ_NEXT(ke2, ke_procq);
895 			}
896 		} else
897 #endif
898 			ke = TAILQ_FIRST(rqh);
899 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
900 		CTR3(KTR_RUNQ,
901 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
902 		return (ke);
903 	}
904 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
905 
906 	return (NULL);
907 }
908 
909 /*
910  * Remove the KSE from the queue specified by its priority, and clear the
911  * corresponding status bit if the queue becomes empty.
912  * Caller must set ke->ke_state afterwards.
913  */
914 void
915 runq_remove(struct runq *rq, struct kse *ke)
916 {
917 	struct rqhead *rqh;
918 	int pri;
919 
920 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
921 		("runq_remove: process swapped out"));
922 	pri = ke->ke_rqindex;
923 	rqh = &rq->rq_queues[pri];
924 	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
925 	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
926 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
927 	TAILQ_REMOVE(rqh, ke, ke_procq);
928 	if (TAILQ_EMPTY(rqh)) {
929 		CTR0(KTR_RUNQ, "runq_remove: empty");
930 		runq_clrbit(rq, pri);
931 	}
932 }
933 
934 /****** functions that are temporarily here ***********/
935 #include <vm/uma.h>
936 extern struct mtx kse_zombie_lock;
937 
938 /*
939  *  Allocate scheduler specific per-process resources.
940  * The thread and ksegrp have already been linked in.
941  * In this case just set the default concurrency value.
942  *
943  * Called from:
944  *  proc_init() (UMA init method)
945  */
946 void
947 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
948 {
949 
950 	/* This can go in sched_fork */
951 	sched_init_concurrency(kg);
952 }
953 
954 /*
955  * thread is being either created or recycled.
956  * Fix up the per-scheduler resources associated with it.
957  * Called from:
958  *  sched_fork_thread()
959  *  thread_dtor()  (*may go away)
960  *  thread_init()  (*may go away)
961  */
962 void
963 sched_newthread(struct thread *td)
964 {
965 	struct td_sched *ke;
966 
967 	ke = (struct td_sched *) (td + 1);
968 	bzero(ke, sizeof(*ke));
969 	td->td_sched     = ke;
970 	ke->ke_thread	= td;
971 	ke->ke_state	= KES_THREAD;
972 }
973 
974 /*
975  * Set up an initial concurrency of 1
976  * and set the given thread (if given) to be using that
977  * concurrency slot.
978  * May be used "offline"..before the ksegrp is attached to the world
979  * and thus wouldn't need schedlock in that case.
980  * Called from:
981  *  thr_create()
982  *  proc_init() (UMA) via sched_newproc()
983  */
984 void
985 sched_init_concurrency(struct ksegrp *kg)
986 {
987 
988 	CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
989 	kg->kg_concurrency = 1;
990 	kg->kg_avail_opennings = 1;
991 }
992 
993 /*
994  * Change the concurrency of an existing ksegrp to N
995  * Called from:
996  *  kse_create()
997  *  kse_exit()
998  *  thread_exit()
999  *  thread_single()
1000  */
1001 void
1002 sched_set_concurrency(struct ksegrp *kg, int concurrency)
1003 {
1004 
1005 	CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
1006 	    kg,
1007 	    concurrency,
1008 	    kg->kg_avail_opennings,
1009 	    kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
1010 	kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
1011 	kg->kg_concurrency = concurrency;
1012 }
1013 
1014 /*
1015  * Called from thread_exit() for all exiting thread
1016  *
1017  * Not to be confused with sched_exit_thread()
1018  * that is only called from thread_exit() for threads exiting
1019  * without the rest of the process exiting because it is also called from
1020  * sched_exit() and we wouldn't want to call it twice.
1021  * XXX This can probably be fixed.
1022  */
1023 void
1024 sched_thread_exit(struct thread *td)
1025 {
1026 
1027 	SLOT_RELEASE(td->td_ksegrp);
1028 	slot_fill(td->td_ksegrp);
1029 }
1030 
1031 #endif /* KERN_SWITCH_INCLUDE */
1032