xref: /freebsd/sys/kern/kern_switch.c (revision 93a7aa79d628bf2b11ed75cba6fc1d904e3a4dd4)
1dba6c5a6SPeter Wemm /*
2d5a08a60SJake Burkholder  * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3d5a08a60SJake Burkholder  * All rights reserved.
4dba6c5a6SPeter Wemm  *
5dba6c5a6SPeter Wemm  * Redistribution and use in source and binary forms, with or without
6dba6c5a6SPeter Wemm  * modification, are permitted provided that the following conditions
7dba6c5a6SPeter Wemm  * are met:
8dba6c5a6SPeter Wemm  * 1. Redistributions of source code must retain the above copyright
9dba6c5a6SPeter Wemm  *    notice, this list of conditions and the following disclaimer.
10dba6c5a6SPeter Wemm  * 2. Redistributions in binary form must reproduce the above copyright
11dba6c5a6SPeter Wemm  *    notice, this list of conditions and the following disclaimer in the
12dba6c5a6SPeter Wemm  *    documentation and/or other materials provided with the distribution.
13dba6c5a6SPeter Wemm  *
14dba6c5a6SPeter Wemm  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15dba6c5a6SPeter Wemm  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16dba6c5a6SPeter Wemm  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17dba6c5a6SPeter Wemm  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18dba6c5a6SPeter Wemm  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19dba6c5a6SPeter Wemm  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20dba6c5a6SPeter Wemm  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21dba6c5a6SPeter Wemm  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22dba6c5a6SPeter Wemm  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23dba6c5a6SPeter Wemm  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24dba6c5a6SPeter Wemm  * SUCH DAMAGE.
25dba6c5a6SPeter Wemm  *
26dba6c5a6SPeter Wemm  * $FreeBSD$
27dba6c5a6SPeter Wemm  */
28dba6c5a6SPeter Wemm 
29e602ba25SJulian Elischer /***
30e602ba25SJulian Elischer 
31e602ba25SJulian Elischer Here is the logic..
32e602ba25SJulian Elischer 
33e602ba25SJulian Elischer If there are N processors, then there are at most N KSEs (kernel
34e602ba25SJulian Elischer schedulable entities) working to process threads that belong to a
35e602ba25SJulian Elischer KSEGOUP (kg). If there are X of these KSEs actually running at the
36e602ba25SJulian Elischer moment in question, then there are at most M (N-X) of these KSEs on
37e602ba25SJulian Elischer the run queue, as running KSEs are not on the queue.
38e602ba25SJulian Elischer 
39e602ba25SJulian Elischer Runnable threads are queued off the KSEGROUP in priority order.
40e602ba25SJulian Elischer If there are M or more threads runnable, the top M threads
41e602ba25SJulian Elischer (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
42e602ba25SJulian Elischer their priority from those threads and are put on the run queue.
43e602ba25SJulian Elischer 
44e602ba25SJulian Elischer The last thread that had a priority high enough to have a KSE associated
45e602ba25SJulian Elischer with it, AND IS ON THE RUN QUEUE is pointed to by
46e602ba25SJulian Elischer kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
47e602ba25SJulian Elischer assigned as all the available KSEs are activly running, or because there
48e602ba25SJulian Elischer are no threads queued, that pointer is NULL.
49e602ba25SJulian Elischer 
50e602ba25SJulian Elischer When a KSE is removed from the run queue to become runnable, we know
51e602ba25SJulian Elischer it was associated with the highest priority thread in the queue (at the head
52e602ba25SJulian Elischer of the queue). If it is also the last assigned we know M was 1 and must
53e602ba25SJulian Elischer now be 0. Since the thread is no longer queued that pointer must be
54e602ba25SJulian Elischer removed from it. Since we know there were no more KSEs available,
55e602ba25SJulian Elischer (M was 1 and is now 0) and since we are not FREEING our KSE
56e602ba25SJulian Elischer but using it, we know there are STILL no more KSEs available, we can prove
57e602ba25SJulian Elischer that the next thread in the ksegrp list will not have a KSE to assign to
58e602ba25SJulian Elischer it, so we can show that the pointer must be made 'invalid' (NULL).
59e602ba25SJulian Elischer 
60e602ba25SJulian Elischer The pointer exists so that when a new thread is made runnable, it can
61e602ba25SJulian Elischer have its priority compared with the last assigned thread to see if
62e602ba25SJulian Elischer it should 'steal' its KSE or not.. i.e. is it 'earlier'
63e602ba25SJulian Elischer on the list than that thread or later.. If it's earlier, then the KSE is
64e602ba25SJulian Elischer removed from the last assigned (which is now not assigned a KSE)
65e602ba25SJulian Elischer and reassigned to the new thread, which is placed earlier in the list.
66e602ba25SJulian Elischer The pointer is then backed up to the previous thread (which may or may not
67e602ba25SJulian Elischer be the new thread).
68e602ba25SJulian Elischer 
69e602ba25SJulian Elischer When a thread sleeps or is removed, the KSE becomes available and if there
70e602ba25SJulian Elischer are queued threads that are not assigned KSEs, the highest priority one of
71e602ba25SJulian Elischer them is assigned the KSE, which is then placed back on the run queue at
72e602ba25SJulian Elischer the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
73e602ba25SJulian Elischer to point to it.
74e602ba25SJulian Elischer 
75e602ba25SJulian Elischer The following diagram shows 2 KSEs and 3 threads from a single process.
76e602ba25SJulian Elischer 
77e602ba25SJulian Elischer  RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
78e602ba25SJulian Elischer               \    \____
79e602ba25SJulian Elischer                \        \
80e602ba25SJulian Elischer     KSEGROUP---thread--thread--thread    (queued in priority order)
81e602ba25SJulian Elischer         \                 /
82e602ba25SJulian Elischer          \_______________/
83e602ba25SJulian Elischer           (last_assigned)
84e602ba25SJulian Elischer 
85e602ba25SJulian Elischer The result of this scheme is that the M available KSEs are always
86e602ba25SJulian Elischer queued at the priorities they have inherrited from the M highest priority
87e602ba25SJulian Elischer threads for that KSEGROUP. If this situation changes, the KSEs are
88e602ba25SJulian Elischer reassigned to keep this true.
89e602ba25SJulian Elischer 
90e602ba25SJulian Elischer */
91e602ba25SJulian Elischer 
92dba6c5a6SPeter Wemm #include <sys/param.h>
93dba6c5a6SPeter Wemm #include <sys/systm.h>
94dba6c5a6SPeter Wemm #include <sys/kernel.h>
950384fff8SJason Evans #include <sys/ktr.h>
96f34fa851SJohn Baldwin #include <sys/lock.h>
9735e0e5b3SJohn Baldwin #include <sys/mutex.h>
98dba6c5a6SPeter Wemm #include <sys/proc.h>
99dba6c5a6SPeter Wemm #include <sys/queue.h>
100b43179fbSJeff Roberson #include <sys/sched.h>
101182da820SMatthew Dillon #include <machine/critical.h>
102dba6c5a6SPeter Wemm 
103d2ac2316SJake Burkholder CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
104d2ac2316SJake Burkholder 
10548bfcdddSJulian Elischer void panc(char *string1, char *string2);
10648bfcdddSJulian Elischer 
10748bfcdddSJulian Elischer #if 0
108e602ba25SJulian Elischer static void runq_readjust(struct runq *rq, struct kse *ke);
10948bfcdddSJulian Elischer #endif
110e602ba25SJulian Elischer /************************************************************************
111e602ba25SJulian Elischer  * Functions that manipulate runnability from a thread perspective.	*
112e602ba25SJulian Elischer  ************************************************************************/
113e602ba25SJulian Elischer /*
114e602ba25SJulian Elischer  * Select the KSE that will be run next.  From that find the thread, and x
115e602ba25SJulian Elischer  * remove it from the KSEGRP's run queue.  If there is thread clustering,
116e602ba25SJulian Elischer  * this will be what does it.
117e602ba25SJulian Elischer  */
118b40ce416SJulian Elischer struct thread *
119b40ce416SJulian Elischer choosethread(void)
120dba6c5a6SPeter Wemm {
121e602ba25SJulian Elischer 	struct kse *ke;
122e602ba25SJulian Elischer 	struct thread *td;
123e602ba25SJulian Elischer 	struct ksegrp *kg;
124e602ba25SJulian Elischer 
125fe799533SAndrew Gallatin retry:
126b43179fbSJeff Roberson 	if ((ke = sched_choose())) {
127e602ba25SJulian Elischer 		td = ke->ke_thread;
128e602ba25SJulian Elischer 		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
129e602ba25SJulian Elischer 		kg = ke->ke_ksegrp;
13093a7aa79SJulian Elischer 		if (TD_IS_UNBOUND(td)) {
131e602ba25SJulian Elischer 			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
13233c06e1dSJulian Elischer 			if (kg->kg_last_assigned == td) {
133e602ba25SJulian Elischer 				kg->kg_last_assigned = TAILQ_PREV(td,
134e602ba25SJulian Elischer 				    threadqueue, td_runq);
13533c06e1dSJulian Elischer 			}
136e602ba25SJulian Elischer 		}
137e602ba25SJulian Elischer 		kg->kg_runnable--;
138e602ba25SJulian Elischer 		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
139e602ba25SJulian Elischer 		    td, td->td_priority);
140e602ba25SJulian Elischer 	} else {
14140e55026SJulian Elischer 		/* Simulate runq_choose() having returned the idle thread */
142e602ba25SJulian Elischer 		td = PCPU_GET(idlethread);
143472be958SJulian Elischer 		ke = td->td_kse;
144e602ba25SJulian Elischer 		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
145e602ba25SJulian Elischer 	}
146472be958SJulian Elischer 	ke->ke_flags |= KEF_DIDRUN;
14793a7aa79SJulian Elischer 
14893a7aa79SJulian Elischer 	/*
14993a7aa79SJulian Elischer 	 * Only allow non system threads to run in panic
15093a7aa79SJulian Elischer 	 * if they are the one we are tracing.  (I think.. [JRE])
15193a7aa79SJulian Elischer 	 */
152fe799533SAndrew Gallatin 	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
153fe799533SAndrew Gallatin 	    (td->td_flags & TDF_INPANIC) == 0))
154fe799533SAndrew Gallatin 		goto retry;
15593a7aa79SJulian Elischer 
15671fad9fdSJulian Elischer 	TD_SET_RUNNING(td);
157e602ba25SJulian Elischer 	return (td);
158e602ba25SJulian Elischer }
159e602ba25SJulian Elischer 
160e602ba25SJulian Elischer /*
16148bfcdddSJulian Elischer  * Given a KSE (now surplus or at least loanable), either assign a new
16293a7aa79SJulian Elischer  * runable thread to it (and put it in the run queue) or put it in
16393a7aa79SJulian Elischer  * the ksegrp's idle KSE list.
16493a7aa79SJulian Elischer  * Or maybe give it back to its owner if it's been loaned.
16593a7aa79SJulian Elischer  * Assumes that the original thread is either not runnable or
16693a7aa79SJulian Elischer  * already on the run queue
167e602ba25SJulian Elischer  */
168e602ba25SJulian Elischer void
169e602ba25SJulian Elischer kse_reassign(struct kse *ke)
170e602ba25SJulian Elischer {
171e602ba25SJulian Elischer 	struct ksegrp *kg;
172e602ba25SJulian Elischer 	struct thread *td;
1739eb1fdeaSJulian Elischer 	struct thread *owner;
17448bfcdddSJulian Elischer 	struct thread *original;
17593a7aa79SJulian Elischer 	int loaned;
176e602ba25SJulian Elischer 
17793a7aa79SJulian Elischer 	KASSERT((ke->ke_owner), ("reassigning KSE with no owner"));
17893a7aa79SJulian Elischer 	KASSERT((ke->ke_thread && TD_IS_INHIBITED(ke->ke_thread)),
17993a7aa79SJulian Elischer     	    ("reassigning KSE with no or runnable  thread"));
18033c06e1dSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
181e602ba25SJulian Elischer 	kg = ke->ke_ksegrp;
18293a7aa79SJulian Elischer 	owner = ke->ke_owner;
18393a7aa79SJulian Elischer 	loaned = TD_LENDER(owner);
18448bfcdddSJulian Elischer 	original = ke->ke_thread;
18593a7aa79SJulian Elischer 
18693a7aa79SJulian Elischer 	if (TD_CAN_UNBIND(original) && (original->td_standin)) {
18793a7aa79SJulian Elischer 		KASSERT((owner == original),
18893a7aa79SJulian Elischer 		    ("Early thread borrowing?"));
18993a7aa79SJulian Elischer 		/*
19093a7aa79SJulian Elischer 		 * The outgoing thread is "threaded" and has never
19193a7aa79SJulian Elischer 		 * scheduled an upcall.
19293a7aa79SJulian Elischer 		 * decide whether this is a short or long term event
19393a7aa79SJulian Elischer 		 * and thus whether or not to schedule an upcall.
19493a7aa79SJulian Elischer 		 * if it is a short term event, just suspend it in
19593a7aa79SJulian Elischer 		 * a way that takes its KSE with it.
19693a7aa79SJulian Elischer 		 * Select the events for which we want to schedule upcalls.
19793a7aa79SJulian Elischer 		 * For now it's just sleep.
19893a7aa79SJulian Elischer 		 * Other threads that still have not fired an upcall
19993a7aa79SJulian Elischer 		 * are held to their KSE using the temorary Binding.
20093a7aa79SJulian Elischer 		 */
20193a7aa79SJulian Elischer 		if (TD_ON_SLEEPQ(original)) {
20293a7aa79SJulian Elischer 			/*
20393a7aa79SJulian Elischer 			 * An bound thread that can still unbind itself
20493a7aa79SJulian Elischer 			 * has been scheduled out.
20593a7aa79SJulian Elischer 			 * If it is sleeping, then we need to schedule an
20693a7aa79SJulian Elischer 			 * upcall.
20793a7aa79SJulian Elischer 			 * XXXKSE eventually almost any inhibition could do.
20893a7aa79SJulian Elischer 			 */
20993a7aa79SJulian Elischer 			original->td_flags &= ~TDF_CAN_UNBIND;
21093a7aa79SJulian Elischer 			original->td_flags |= TDF_UNBOUND;
21193a7aa79SJulian Elischer 			thread_schedule_upcall(original, ke);
21293a7aa79SJulian Elischer 			owner = ke->ke_owner;
21393a7aa79SJulian Elischer 			loaned = 1;
21493a7aa79SJulian Elischer 		}
21593a7aa79SJulian Elischer 	}
216e602ba25SJulian Elischer 
217e602ba25SJulian Elischer 	/*
21893a7aa79SJulian Elischer 	 * If the current thread was borrowing, then make things consistent
21993a7aa79SJulian Elischer 	 * by giving it back to the owner for the moment. The original thread
22093a7aa79SJulian Elischer 	 * must be unbound and have already used its chance for
22193a7aa79SJulian Elischer 	 * firing off an upcall. Threads that have not yet made an upcall
22293a7aa79SJulian Elischer 	 * can not borrow KSEs.
22393a7aa79SJulian Elischer 	 */
22493a7aa79SJulian Elischer 	if (loaned) {
22593a7aa79SJulian Elischer 		KASSERT((original->td_standin == NULL),
22693a7aa79SJulian Elischer 		    ("kse_reassign: borrower still has standin thread"));
22793a7aa79SJulian Elischer 		TD_CLR_LOAN(owner);
22893a7aa79SJulian Elischer 		ke->ke_thread = owner;
22993a7aa79SJulian Elischer 		original->td_kse = NULL; /* give it amnesia */
23093a7aa79SJulian Elischer 		/*
23193a7aa79SJulian Elischer 		 * Upcalling threads have lower priority than all
23293a7aa79SJulian Elischer 		 * in-kernel threads, However threads that have loaned out
23393a7aa79SJulian Elischer 		 * their KSE and are NOT upcalling have the priority that
23493a7aa79SJulian Elischer 		 * they have. In other words, only look for other work if
23593a7aa79SJulian Elischer 		 * the owner is not runnable, OR is upcalling.
23693a7aa79SJulian Elischer 		 */
23793a7aa79SJulian Elischer 		if (TD_CAN_RUN(owner) &&
23893a7aa79SJulian Elischer 		    ((owner->td_flags & TDF_UPCALLING) == 0)) {
23993a7aa79SJulian Elischer 			setrunnable(owner);
24093a7aa79SJulian Elischer 			CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
24193a7aa79SJulian Elischer 			    ke, owner);
24293a7aa79SJulian Elischer 			return;
24393a7aa79SJulian Elischer 		}
24493a7aa79SJulian Elischer 	}
24593a7aa79SJulian Elischer 
24693a7aa79SJulian Elischer 	/*
24793a7aa79SJulian Elischer 	 * Either the owner is not runnable, or is an upcall.
248e602ba25SJulian Elischer 	 * Find the first unassigned thread
249e602ba25SJulian Elischer 	 * If there is a 'last assigned' then see what's next.
250e602ba25SJulian Elischer 	 * otherwise look at what is first.
251e602ba25SJulian Elischer 	 */
252e602ba25SJulian Elischer 	if ((td = kg->kg_last_assigned)) {
253e602ba25SJulian Elischer 		td = TAILQ_NEXT(td, td_runq);
254e602ba25SJulian Elischer 	} else {
255e602ba25SJulian Elischer 		td = TAILQ_FIRST(&kg->kg_runq);
256e602ba25SJulian Elischer 	}
257e602ba25SJulian Elischer 
258e602ba25SJulian Elischer 	/*
259e602ba25SJulian Elischer 	 * If we found one assign it the kse, otherwise idle the kse.
260e602ba25SJulian Elischer 	 */
261e602ba25SJulian Elischer 	if (td) {
26248bfcdddSJulian Elischer 		/*
26393a7aa79SJulian Elischer 		 * Assign the new thread to the KSE.
26493a7aa79SJulian Elischer 		 * and make the KSE runnable again,
26548bfcdddSJulian Elischer 		 */
26693a7aa79SJulian Elischer 		if (TD_IS_BOUND(owner)) {
26748bfcdddSJulian Elischer 			/*
26893a7aa79SJulian Elischer 			 * If there is a reason to keep the previous
26993a7aa79SJulian Elischer 			 * owner, do so.
27048bfcdddSJulian Elischer 			 */
27193a7aa79SJulian Elischer 			TD_SET_LOAN(owner);
27248bfcdddSJulian Elischer 		} else {
27393a7aa79SJulian Elischer 			/* otherwise, cut it free */
27493a7aa79SJulian Elischer 			ke->ke_owner = td;
27593a7aa79SJulian Elischer 			owner->td_kse = NULL;
27648bfcdddSJulian Elischer 		}
277e602ba25SJulian Elischer 		kg->kg_last_assigned = td;
278e602ba25SJulian Elischer 		td->td_kse = ke;
279e602ba25SJulian Elischer 		ke->ke_thread = td;
280b43179fbSJeff Roberson 		sched_add(ke);
281e602ba25SJulian Elischer 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td);
28248bfcdddSJulian Elischer 		return;
28348bfcdddSJulian Elischer 	}
28448bfcdddSJulian Elischer 
28548bfcdddSJulian Elischer 	/*
28693a7aa79SJulian Elischer 	 * Now handle any waiting upcall.
28793a7aa79SJulian Elischer 	 * Since we didn't make them runnable before.
28848bfcdddSJulian Elischer 	 */
28993a7aa79SJulian Elischer 	if (TD_CAN_RUN(owner)) {
29093a7aa79SJulian Elischer 		setrunnable(owner);
29148bfcdddSJulian Elischer 		CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
29248bfcdddSJulian Elischer 		    ke, owner);
29348bfcdddSJulian Elischer 		return;
29448bfcdddSJulian Elischer 	}
29593a7aa79SJulian Elischer 
29648bfcdddSJulian Elischer 	/*
29793a7aa79SJulian Elischer 	 * It is possible that this is the last thread in the group
29893a7aa79SJulian Elischer 	 * because the KSE is being shut down or the process
29993a7aa79SJulian Elischer 	 * is exiting.
30048bfcdddSJulian Elischer 	 */
30193a7aa79SJulian Elischer 	if (TD_IS_EXITING(owner) || (ke->ke_flags & KEF_EXIT)) {
30293a7aa79SJulian Elischer 		ke->ke_thread = NULL;
30393a7aa79SJulian Elischer 		owner->td_kse = NULL;
30493a7aa79SJulian Elischer 		kse_unlink(ke);
30593a7aa79SJulian Elischer 		return;
30693a7aa79SJulian Elischer 	}
30793a7aa79SJulian Elischer 
30893a7aa79SJulian Elischer 	/*
30993a7aa79SJulian Elischer 	 * At this stage all we know is that the owner
31093a7aa79SJulian Elischer 	 * is the same as the 'active' thread in the KSE
31193a7aa79SJulian Elischer 	 * and that it is
31293a7aa79SJulian Elischer 	 * Presently NOT loaned out.
31393a7aa79SJulian Elischer 	 * Put it on the loanable queue. Make it fifo
31493a7aa79SJulian Elischer 	 * so that long term sleepers donate their KSE's first.
31593a7aa79SJulian Elischer 	 */
31693a7aa79SJulian Elischer 	KASSERT((TD_IS_BOUND(owner)), ("kse_reassign: UNBOUND lender"));
31748bfcdddSJulian Elischer 	ke->ke_state = KES_THREAD;
31848bfcdddSJulian Elischer 	ke->ke_flags |= KEF_ONLOANQ;
31993a7aa79SJulian Elischer 	TAILQ_INSERT_TAIL(&kg->kg_lq, ke, ke_kgrlist);
32048bfcdddSJulian Elischer 	kg->kg_loan_kses++;
32148bfcdddSJulian Elischer 	CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke);
32248bfcdddSJulian Elischer 	return;
323d5a08a60SJake Burkholder }
324d5a08a60SJake Burkholder 
3251f955e2dSJulian Elischer #if 0
326e602ba25SJulian Elischer /*
327e602ba25SJulian Elischer  * Remove a thread from its KSEGRP's run queue.
328e602ba25SJulian Elischer  * This in turn may remove it from a KSE if it was already assigned
329e602ba25SJulian Elischer  * to one, possibly causing a new thread to be assigned to the KSE
330e602ba25SJulian Elischer  * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
331e602ba25SJulian Elischer  */
3321f955e2dSJulian Elischer static void
333b40ce416SJulian Elischer remrunqueue(struct thread *td)
334d5a08a60SJake Burkholder {
33548bfcdddSJulian Elischer 	struct thread *td2, *td3;
336e602ba25SJulian Elischer 	struct ksegrp *kg;
337e602ba25SJulian Elischer 	struct kse *ke;
338e602ba25SJulian Elischer 
339e602ba25SJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
34071fad9fdSJulian Elischer 	KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
341e602ba25SJulian Elischer 	kg = td->td_ksegrp;
342e602ba25SJulian Elischer 	ke = td->td_kse;
343e602ba25SJulian Elischer 	/*
344e602ba25SJulian Elischer 	 * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
345e602ba25SJulian Elischer 	 * threads are BOUND.
346e602ba25SJulian Elischer 	 */
347e602ba25SJulian Elischer 	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
348e602ba25SJulian Elischer 	kg->kg_runnable--;
34971fad9fdSJulian Elischer 	TD_SET_CAN_RUN(td);
35093a7aa79SJulian Elischer 	if (TD_IS_BOUND(td))  {
351e602ba25SJulian Elischer 		/* Bring its kse with it, leave the thread attached */
352b43179fbSJeff Roberson 		sched_rem(ke);
353c3b98db0SJulian Elischer 		ke->ke_state = KES_THREAD;
354e602ba25SJulian Elischer 		return;
355d5a08a60SJake Burkholder 	}
35648bfcdddSJulian Elischer    	td3 = TAILQ_PREV(td, threadqueue, td_runq);
35748bfcdddSJulian Elischer 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
358e602ba25SJulian Elischer 	if (ke) {
359e602ba25SJulian Elischer 		/*
360e602ba25SJulian Elischer 		 * This thread has been assigned to a KSE.
361e602ba25SJulian Elischer 		 * We need to dissociate it and try assign the
362e602ba25SJulian Elischer 		 * KSE to the next available thread. Then, we should
363e602ba25SJulian Elischer 		 * see if we need to move the KSE in the run queues.
364e602ba25SJulian Elischer 		 */
36593a7aa79SJulian Elischer 		sched_rem(ke);
36693a7aa79SJulian Elischer 		ke->ke_state = KES_THREAD;
367e602ba25SJulian Elischer 		td2 = kg->kg_last_assigned;
368e602ba25SJulian Elischer 		KASSERT((td2 != NULL), ("last assigned has wrong value "));
36948bfcdddSJulian Elischer 		if (td2 == td)
370e602ba25SJulian Elischer 			kg->kg_last_assigned = td3;
37148bfcdddSJulian Elischer 		kse_reassign(ke);
372e602ba25SJulian Elischer 	}
373e602ba25SJulian Elischer }
3741f955e2dSJulian Elischer #endif
3751f955e2dSJulian Elischer 
3761f955e2dSJulian Elischer /*
3771f955e2dSJulian Elischer  * Change the priority of a thread that is on the run queue.
3781f955e2dSJulian Elischer  */
3791f955e2dSJulian Elischer void
3801f955e2dSJulian Elischer adjustrunqueue( struct thread *td, int newpri)
3811f955e2dSJulian Elischer {
3821f955e2dSJulian Elischer 	struct ksegrp *kg;
3831f955e2dSJulian Elischer 	struct kse *ke;
3841f955e2dSJulian Elischer 
3851f955e2dSJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
3861f955e2dSJulian Elischer 	KASSERT ((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
3871f955e2dSJulian Elischer 	/*
3881f955e2dSJulian Elischer 	 * If it's a bound thread/KSE pair, take the shortcut. All non-KSE
3891f955e2dSJulian Elischer 	 * threads are BOUND.
3901f955e2dSJulian Elischer 	 */
3911f955e2dSJulian Elischer 	ke = td->td_kse;
3921f955e2dSJulian Elischer 	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
39393a7aa79SJulian Elischer 	if (TD_IS_BOUND(td))  {
3941f955e2dSJulian Elischer 		/* We only care about the kse in the run queue. */
39524c5baaeSJulian Elischer 		td->td_priority = newpri;
3961f955e2dSJulian Elischer 		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
3971f955e2dSJulian Elischer 			sched_rem(ke);
3981f955e2dSJulian Elischer 			sched_add(ke);
3991f955e2dSJulian Elischer 		}
4001f955e2dSJulian Elischer 		return;
4011f955e2dSJulian Elischer 	}
4021f955e2dSJulian Elischer 	/*
4031f955e2dSJulian Elischer 	 * An unbound thread. This is not optimised yet.
4041f955e2dSJulian Elischer 	 */
4051f955e2dSJulian Elischer 	kg = td->td_ksegrp;
4061f955e2dSJulian Elischer 	kg->kg_runnable--;
4071f955e2dSJulian Elischer 	TD_SET_CAN_RUN(td);
4081f955e2dSJulian Elischer 	if (ke) {
4091f955e2dSJulian Elischer 		if (kg->kg_last_assigned == td) {
4101f955e2dSJulian Elischer 			kg->kg_last_assigned =
4111f955e2dSJulian Elischer 			    TAILQ_PREV(td, threadqueue, td_runq);
4121f955e2dSJulian Elischer 		}
4131f955e2dSJulian Elischer 		sched_rem(ke);
4141f955e2dSJulian Elischer 	}
4151f955e2dSJulian Elischer 	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
4161f955e2dSJulian Elischer 	td->td_priority = newpri;
4171f955e2dSJulian Elischer 	setrunqueue(td);
4181f955e2dSJulian Elischer }
419e602ba25SJulian Elischer 
420d5a08a60SJake Burkholder void
421b40ce416SJulian Elischer setrunqueue(struct thread *td)
422d5a08a60SJake Burkholder {
423e602ba25SJulian Elischer 	struct kse *ke;
424e602ba25SJulian Elischer 	struct ksegrp *kg;
425e602ba25SJulian Elischer 	struct thread *td2;
426e602ba25SJulian Elischer 	struct thread *tda;
427e602ba25SJulian Elischer 
428e602ba25SJulian Elischer 	CTR1(KTR_RUNQ, "setrunqueue: td%p", td);
429e602ba25SJulian Elischer 	mtx_assert(&sched_lock, MA_OWNED);
43071fad9fdSJulian Elischer 	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
43171fad9fdSJulian Elischer 	    ("setrunqueue: bad thread state"));
43271fad9fdSJulian Elischer 	TD_SET_RUNQ(td);
433e602ba25SJulian Elischer 	kg = td->td_ksegrp;
434e602ba25SJulian Elischer 	kg->kg_runnable++;
43548bfcdddSJulian Elischer 	if ((td->td_proc->p_flag & P_KSES) == 0) {
43648bfcdddSJulian Elischer 		/*
43748bfcdddSJulian Elischer 		 * Common path optimisation: Only one of everything
43848bfcdddSJulian Elischer 		 * and the KSE is always already attached.
43948bfcdddSJulian Elischer 		 * Totally ignore the ksegrp run queue.
44048bfcdddSJulian Elischer 		 */
441b43179fbSJeff Roberson 		sched_add(td->td_kse);
44248bfcdddSJulian Elischer 		return;
44348bfcdddSJulian Elischer 	}
44493a7aa79SJulian Elischer 	/*
44593a7aa79SJulian Elischer 	 * If the process is threaded but the thread is bound then
44693a7aa79SJulian Elischer 	 * there is still a little extra to do re. KSE loaning.
44793a7aa79SJulian Elischer 	 */
44893a7aa79SJulian Elischer 	if (TD_IS_BOUND(td)) {
449e602ba25SJulian Elischer 		KASSERT((td->td_kse != NULL),
450e602ba25SJulian Elischer 		    ("queueing BAD thread to run queue"));
4519eb1fdeaSJulian Elischer 		ke = td->td_kse;
45293a7aa79SJulian Elischer 		KASSERT((ke->ke_owner == ke->ke_thread),
45393a7aa79SJulian Elischer 		    ("setrunqueue: Hey KSE loaned out"));
4549eb1fdeaSJulian Elischer 		if (ke->ke_flags & KEF_ONLOANQ) {
4559eb1fdeaSJulian Elischer 			ke->ke_flags &= ~KEF_ONLOANQ;
4569eb1fdeaSJulian Elischer 			TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
4579eb1fdeaSJulian Elischer 			kg->kg_loan_kses--;
4589eb1fdeaSJulian Elischer 		}
459b43179fbSJeff Roberson 		sched_add(td->td_kse);
460e602ba25SJulian Elischer 		return;
461e602ba25SJulian Elischer 	}
46248bfcdddSJulian Elischer 
463e602ba25SJulian Elischer 	/*
464e602ba25SJulian Elischer 	 * Ok, so we are threading with this thread.
465e602ba25SJulian Elischer 	 * We don't have a KSE, see if we can get one..
466e602ba25SJulian Elischer 	 */
467e602ba25SJulian Elischer 	tda = kg->kg_last_assigned;
468e602ba25SJulian Elischer 	if ((ke = td->td_kse) == NULL) {
469e602ba25SJulian Elischer 		/*
470e602ba25SJulian Elischer 		 * We will need a KSE, see if there is one..
471e602ba25SJulian Elischer 		 * First look for a free one, before getting desperate.
472e602ba25SJulian Elischer 		 * If we can't get one, our priority is not high enough..
473e602ba25SJulian Elischer 		 * that's ok..
474e602ba25SJulian Elischer 		 */
47593a7aa79SJulian Elischer 		if (kg->kg_loan_kses) {
47648bfcdddSJulian Elischer 			/*
47748bfcdddSJulian Elischer 			 * Failing that see if we can borrow one.
47848bfcdddSJulian Elischer 			 */
4799eb1fdeaSJulian Elischer 			ke = TAILQ_FIRST(&kg->kg_lq);
4809eb1fdeaSJulian Elischer 			TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
4819eb1fdeaSJulian Elischer 			ke->ke_flags &= ~KEF_ONLOANQ;
4829eb1fdeaSJulian Elischer 			ke->ke_state = KES_THREAD;
48393a7aa79SJulian Elischer 			TD_SET_LOAN(ke->ke_owner);
48448bfcdddSJulian Elischer 			ke->ke_thread  = NULL;
4859eb1fdeaSJulian Elischer 			kg->kg_loan_kses--;
486e602ba25SJulian Elischer 		} else if (tda && (tda->td_priority > td->td_priority)) {
487e602ba25SJulian Elischer 			/*
488e602ba25SJulian Elischer 			 * None free, but there is one we can commandeer.
489e602ba25SJulian Elischer 			 */
490e602ba25SJulian Elischer 			ke = tda->td_kse;
491e602ba25SJulian Elischer 			tda->td_kse = NULL;
492e602ba25SJulian Elischer 			ke->ke_thread = NULL;
493e602ba25SJulian Elischer 			tda = kg->kg_last_assigned =
494e602ba25SJulian Elischer 		    	    TAILQ_PREV(tda, threadqueue, td_runq);
495b43179fbSJeff Roberson 			sched_rem(ke);
496e602ba25SJulian Elischer 		}
497e602ba25SJulian Elischer 	} else {
498c3b98db0SJulian Elischer 		/*
499c3b98db0SJulian Elischer 		 * Temporarily disassociate so it looks like the other cases.
50093a7aa79SJulian Elischer 		 * If the owner wasn't lending before, then it is now..
501c3b98db0SJulian Elischer 		 */
50293a7aa79SJulian Elischer 		if (!TD_LENDER(ke->ke_owner)) {
50393a7aa79SJulian Elischer 			TD_SET_LOAN(ke->ke_owner);
50493a7aa79SJulian Elischer 		}
505e602ba25SJulian Elischer 		ke->ke_thread = NULL;
506e602ba25SJulian Elischer 		td->td_kse = NULL;
507d5a08a60SJake Burkholder 	}
508d5a08a60SJake Burkholder 
509e602ba25SJulian Elischer 	/*
510e602ba25SJulian Elischer 	 * Add the thread to the ksegrp's run queue at
511e602ba25SJulian Elischer 	 * the appropriate place.
512e602ba25SJulian Elischer 	 */
513e602ba25SJulian Elischer 	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
514e602ba25SJulian Elischer 		if (td2->td_priority > td->td_priority) {
515e602ba25SJulian Elischer 			TAILQ_INSERT_BEFORE(td2, td, td_runq);
516e602ba25SJulian Elischer 			break;
517e602ba25SJulian Elischer 		}
518e602ba25SJulian Elischer 	}
519e602ba25SJulian Elischer 	if (td2 == NULL) {
520e602ba25SJulian Elischer 		/* We ran off the end of the TAILQ or it was empty. */
521e602ba25SJulian Elischer 		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
522e602ba25SJulian Elischer 	}
523e602ba25SJulian Elischer 
524e602ba25SJulian Elischer 	/*
525e602ba25SJulian Elischer 	 * If we have a ke to use, then put it on the run queue and
526e602ba25SJulian Elischer 	 * If needed, readjust the last_assigned pointer.
527e602ba25SJulian Elischer 	 */
528e602ba25SJulian Elischer 	if (ke) {
529e602ba25SJulian Elischer 		if (tda == NULL) {
530e602ba25SJulian Elischer 			/*
531e602ba25SJulian Elischer 			 * No pre-existing last assigned so whoever is first
532c3b98db0SJulian Elischer 			 * gets the KSE we brought in.. (maybe us)
533e602ba25SJulian Elischer 			 */
534e602ba25SJulian Elischer 			td2 = TAILQ_FIRST(&kg->kg_runq);
535e602ba25SJulian Elischer 			KASSERT((td2->td_kse == NULL),
536e602ba25SJulian Elischer 			    ("unexpected ke present"));
537e602ba25SJulian Elischer 			td2->td_kse = ke;
538e602ba25SJulian Elischer 			ke->ke_thread = td2;
539e602ba25SJulian Elischer 			kg->kg_last_assigned = td2;
540e602ba25SJulian Elischer 		} else if (tda->td_priority > td->td_priority) {
541e602ba25SJulian Elischer 			/*
542e602ba25SJulian Elischer 			 * It's ours, grab it, but last_assigned is past us
543e602ba25SJulian Elischer 			 * so don't change it.
544e602ba25SJulian Elischer 			 */
545e602ba25SJulian Elischer 			td->td_kse = ke;
546e602ba25SJulian Elischer 			ke->ke_thread = td;
547e602ba25SJulian Elischer 		} else {
548e602ba25SJulian Elischer 			/*
549e602ba25SJulian Elischer 			 * We are past last_assigned, so
550e602ba25SJulian Elischer 			 * put the new kse on whatever is next,
551e602ba25SJulian Elischer 			 * which may or may not be us.
552e602ba25SJulian Elischer 			 */
553e602ba25SJulian Elischer 			td2 = TAILQ_NEXT(tda, td_runq);
554e602ba25SJulian Elischer 			kg->kg_last_assigned = td2;
555e602ba25SJulian Elischer 			td2->td_kse = ke;
556e602ba25SJulian Elischer 			ke->ke_thread = td2;
557e602ba25SJulian Elischer 		}
558b43179fbSJeff Roberson 		sched_add(ke);
559e602ba25SJulian Elischer 	}
560e602ba25SJulian Elischer }
561e602ba25SJulian Elischer 
562e602ba25SJulian Elischer /************************************************************************
563e602ba25SJulian Elischer  * Critical section marker functions					*
564e602ba25SJulian Elischer  ************************************************************************/
5657e1f6dfeSJohn Baldwin /* Critical sections that prevent preemption. */
5667e1f6dfeSJohn Baldwin void
5677e1f6dfeSJohn Baldwin critical_enter(void)
5687e1f6dfeSJohn Baldwin {
5697e1f6dfeSJohn Baldwin 	struct thread *td;
5707e1f6dfeSJohn Baldwin 
5717e1f6dfeSJohn Baldwin 	td = curthread;
5727e1f6dfeSJohn Baldwin 	if (td->td_critnest == 0)
573d74ac681SMatthew Dillon 		cpu_critical_enter();
5747e1f6dfeSJohn Baldwin 	td->td_critnest++;
5757e1f6dfeSJohn Baldwin }
5767e1f6dfeSJohn Baldwin 
5777e1f6dfeSJohn Baldwin void
5787e1f6dfeSJohn Baldwin critical_exit(void)
5797e1f6dfeSJohn Baldwin {
5807e1f6dfeSJohn Baldwin 	struct thread *td;
5817e1f6dfeSJohn Baldwin 
5827e1f6dfeSJohn Baldwin 	td = curthread;
5837e1f6dfeSJohn Baldwin 	if (td->td_critnest == 1) {
5847e1f6dfeSJohn Baldwin 		td->td_critnest = 0;
585d74ac681SMatthew Dillon 		cpu_critical_exit();
586d74ac681SMatthew Dillon 	} else {
5877e1f6dfeSJohn Baldwin 		td->td_critnest--;
5887e1f6dfeSJohn Baldwin 	}
589d74ac681SMatthew Dillon }
5907e1f6dfeSJohn Baldwin 
591e602ba25SJulian Elischer 
592e602ba25SJulian Elischer /************************************************************************
593e602ba25SJulian Elischer  * SYSTEM RUN QUEUE manipulations and tests				*
594e602ba25SJulian Elischer  ************************************************************************/
595e602ba25SJulian Elischer /*
596e602ba25SJulian Elischer  * Initialize a run structure.
597e602ba25SJulian Elischer  */
598e602ba25SJulian Elischer void
599e602ba25SJulian Elischer runq_init(struct runq *rq)
600e602ba25SJulian Elischer {
601e602ba25SJulian Elischer 	int i;
602e602ba25SJulian Elischer 
603e602ba25SJulian Elischer 	bzero(rq, sizeof *rq);
604e602ba25SJulian Elischer 	for (i = 0; i < RQ_NQS; i++)
605e602ba25SJulian Elischer 		TAILQ_INIT(&rq->rq_queues[i]);
606e602ba25SJulian Elischer }
607e602ba25SJulian Elischer 
608d5a08a60SJake Burkholder /*
609d5a08a60SJake Burkholder  * Clear the status bit of the queue corresponding to priority level pri,
610d5a08a60SJake Burkholder  * indicating that it is empty.
611d5a08a60SJake Burkholder  */
612d5a08a60SJake Burkholder static __inline void
613d5a08a60SJake Burkholder runq_clrbit(struct runq *rq, int pri)
614d5a08a60SJake Burkholder {
615d5a08a60SJake Burkholder 	struct rqbits *rqb;
616d5a08a60SJake Burkholder 
617d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
618d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
619d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)],
620d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
621d5a08a60SJake Burkholder 	    RQB_BIT(pri), RQB_WORD(pri));
622d5a08a60SJake Burkholder 	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
623d5a08a60SJake Burkholder }
624d5a08a60SJake Burkholder 
625d5a08a60SJake Burkholder /*
626d5a08a60SJake Burkholder  * Find the index of the first non-empty run queue.  This is done by
627d5a08a60SJake Burkholder  * scanning the status bits, a set bit indicates a non-empty queue.
628d5a08a60SJake Burkholder  */
629d5a08a60SJake Burkholder static __inline int
630d5a08a60SJake Burkholder runq_findbit(struct runq *rq)
631d5a08a60SJake Burkholder {
632d5a08a60SJake Burkholder 	struct rqbits *rqb;
633d5a08a60SJake Burkholder 	int pri;
634d5a08a60SJake Burkholder 	int i;
635d5a08a60SJake Burkholder 
636d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
637d5a08a60SJake Burkholder 	for (i = 0; i < RQB_LEN; i++)
638d5a08a60SJake Burkholder 		if (rqb->rqb_bits[i]) {
6392f9267ecSPeter Wemm 			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
640d5a08a60SJake Burkholder 			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
641d5a08a60SJake Burkholder 			    rqb->rqb_bits[i], i, pri);
642d5a08a60SJake Burkholder 			return (pri);
643d5a08a60SJake Burkholder 		}
644d5a08a60SJake Burkholder 
645d5a08a60SJake Burkholder 	return (-1);
646d5a08a60SJake Burkholder }
647d5a08a60SJake Burkholder 
648d5a08a60SJake Burkholder /*
649d5a08a60SJake Burkholder  * Set the status bit of the queue corresponding to priority level pri,
650d5a08a60SJake Burkholder  * indicating that it is non-empty.
651d5a08a60SJake Burkholder  */
652d5a08a60SJake Burkholder static __inline void
653d5a08a60SJake Burkholder runq_setbit(struct runq *rq, int pri)
654d5a08a60SJake Burkholder {
655d5a08a60SJake Burkholder 	struct rqbits *rqb;
656d5a08a60SJake Burkholder 
657d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
658d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
659d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)],
660d5a08a60SJake Burkholder 	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
661d5a08a60SJake Burkholder 	    RQB_BIT(pri), RQB_WORD(pri));
662d5a08a60SJake Burkholder 	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
663d5a08a60SJake Burkholder }
664d5a08a60SJake Burkholder 
665d5a08a60SJake Burkholder /*
666e602ba25SJulian Elischer  * Add the KSE to the queue specified by its priority, and set the
667d5a08a60SJake Burkholder  * corresponding status bit.
668d5a08a60SJake Burkholder  */
669d5a08a60SJake Burkholder void
670b40ce416SJulian Elischer runq_add(struct runq *rq, struct kse *ke)
671d5a08a60SJake Burkholder {
672d5a08a60SJake Burkholder 	struct rqhead *rqh;
673d5a08a60SJake Burkholder 	int pri;
674dba6c5a6SPeter Wemm 
6752c100766SJulian Elischer 	pri = ke->ke_thread->td_priority / RQ_PPQ;
676b40ce416SJulian Elischer 	ke->ke_rqindex = pri;
677d5a08a60SJake Burkholder 	runq_setbit(rq, pri);
678d5a08a60SJake Burkholder 	rqh = &rq->rq_queues[pri];
679d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p",
6802c100766SJulian Elischer 	    ke->ke_proc, ke->ke_thread->td_priority, pri, rqh);
681b40ce416SJulian Elischer 	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
682dba6c5a6SPeter Wemm }
683d5a08a60SJake Burkholder 
684d5a08a60SJake Burkholder /*
685d5a08a60SJake Burkholder  * Return true if there are runnable processes of any priority on the run
686d5a08a60SJake Burkholder  * queue, false otherwise.  Has no side effects, does not modify the run
687d5a08a60SJake Burkholder  * queue structure.
688d5a08a60SJake Burkholder  */
689d5a08a60SJake Burkholder int
690d5a08a60SJake Burkholder runq_check(struct runq *rq)
691d5a08a60SJake Burkholder {
692d5a08a60SJake Burkholder 	struct rqbits *rqb;
693d5a08a60SJake Burkholder 	int i;
694d5a08a60SJake Burkholder 
695d5a08a60SJake Burkholder 	rqb = &rq->rq_status;
696d5a08a60SJake Burkholder 	for (i = 0; i < RQB_LEN; i++)
697d5a08a60SJake Burkholder 		if (rqb->rqb_bits[i]) {
698d5a08a60SJake Burkholder 			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
699d5a08a60SJake Burkholder 			    rqb->rqb_bits[i], i);
700d5a08a60SJake Burkholder 			return (1);
701dba6c5a6SPeter Wemm 		}
702d5a08a60SJake Burkholder 	CTR0(KTR_RUNQ, "runq_check: empty");
703d5a08a60SJake Burkholder 
704d5a08a60SJake Burkholder 	return (0);
705dba6c5a6SPeter Wemm }
706d5a08a60SJake Burkholder 
707d5a08a60SJake Burkholder /*
708b43179fbSJeff Roberson  * Find the highest priority process on the run queue.
709d5a08a60SJake Burkholder  */
710b40ce416SJulian Elischer struct kse *
711d5a08a60SJake Burkholder runq_choose(struct runq *rq)
712d5a08a60SJake Burkholder {
713d5a08a60SJake Burkholder 	struct rqhead *rqh;
714b40ce416SJulian Elischer 	struct kse *ke;
715d5a08a60SJake Burkholder 	int pri;
716d5a08a60SJake Burkholder 
717d5a08a60SJake Burkholder 	mtx_assert(&sched_lock, MA_OWNED);
718e602ba25SJulian Elischer 	while ((pri = runq_findbit(rq)) != -1) {
719d5a08a60SJake Burkholder 		rqh = &rq->rq_queues[pri];
720b40ce416SJulian Elischer 		ke = TAILQ_FIRST(rqh);
721b40ce416SJulian Elischer 		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
722e602ba25SJulian Elischer 		CTR3(KTR_RUNQ,
723e602ba25SJulian Elischer 		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
724b40ce416SJulian Elischer 		return (ke);
725d5a08a60SJake Burkholder 	}
726d5a08a60SJake Burkholder 	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
727d5a08a60SJake Burkholder 
728e602ba25SJulian Elischer 	return (NULL);
729d5a08a60SJake Burkholder }
730d5a08a60SJake Burkholder 
731d5a08a60SJake Burkholder /*
732e602ba25SJulian Elischer  * Remove the KSE from the queue specified by its priority, and clear the
733d5a08a60SJake Burkholder  * corresponding status bit if the queue becomes empty.
734e602ba25SJulian Elischer  * Caller must set ke->ke_state afterwards.
735d5a08a60SJake Burkholder  */
736d5a08a60SJake Burkholder void
737b40ce416SJulian Elischer runq_remove(struct runq *rq, struct kse *ke)
738d5a08a60SJake Burkholder {
739d5a08a60SJake Burkholder 	struct rqhead *rqh;
740d5a08a60SJake Burkholder 	int pri;
741d5a08a60SJake Burkholder 
7429eb881f8SSeigo Tanimura 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
7439eb881f8SSeigo Tanimura 		("runq_remove: process swapped out"));
744b40ce416SJulian Elischer 	pri = ke->ke_rqindex;
745d5a08a60SJake Burkholder 	rqh = &rq->rq_queues[pri];
746d5a08a60SJake Burkholder 	CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p",
7472c100766SJulian Elischer 	    ke, ke->ke_thread->td_priority, pri, rqh);
748b40ce416SJulian Elischer 	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
749b40ce416SJulian Elischer 	TAILQ_REMOVE(rqh, ke, ke_procq);
750d5a08a60SJake Burkholder 	if (TAILQ_EMPTY(rqh)) {
751d5a08a60SJake Burkholder 		CTR0(KTR_RUNQ, "runq_remove: empty");
752d5a08a60SJake Burkholder 		runq_clrbit(rq, pri);
753d5a08a60SJake Burkholder 	}
754dba6c5a6SPeter Wemm }
755e602ba25SJulian Elischer 
75648bfcdddSJulian Elischer #if 0
757e602ba25SJulian Elischer void
75848bfcdddSJulian Elischer panc(char *string1, char *string2)
75948bfcdddSJulian Elischer {
76048bfcdddSJulian Elischer 	printf("%s", string1);
76148bfcdddSJulian Elischer 	Debugger(string2);
76248bfcdddSJulian Elischer }
76348bfcdddSJulian Elischer 
76448bfcdddSJulian Elischer void
76548bfcdddSJulian Elischer thread_sanity_check(struct thread *td, char *string)
766e602ba25SJulian Elischer {
767e602ba25SJulian Elischer 	struct proc *p;
768e602ba25SJulian Elischer 	struct ksegrp *kg;
769e602ba25SJulian Elischer 	struct kse *ke;
77048bfcdddSJulian Elischer 	struct thread *td2 = NULL;
771e602ba25SJulian Elischer 	unsigned int prevpri;
77248bfcdddSJulian Elischer 	int	saw_lastassigned = 0;
77348bfcdddSJulian Elischer 	int unassigned = 0;
77448bfcdddSJulian Elischer 	int assigned = 0;
775e602ba25SJulian Elischer 
776e602ba25SJulian Elischer 	p = td->td_proc;
777e602ba25SJulian Elischer 	kg = td->td_ksegrp;
778e602ba25SJulian Elischer 	ke = td->td_kse;
779e602ba25SJulian Elischer 
780e602ba25SJulian Elischer 
781e602ba25SJulian Elischer 	if (ke) {
7824f0db5e0SJulian Elischer 		if (p != ke->ke_proc) {
78348bfcdddSJulian Elischer 			panc(string, "wrong proc");
784e602ba25SJulian Elischer 		}
785e602ba25SJulian Elischer 		if (ke->ke_thread != td) {
78648bfcdddSJulian Elischer 			panc(string, "wrong thread");
787e602ba25SJulian Elischer 		}
788e602ba25SJulian Elischer 	}
789e602ba25SJulian Elischer 
790e602ba25SJulian Elischer 	if ((p->p_flag & P_KSES) == 0) {
791e602ba25SJulian Elischer 		if (ke == NULL) {
79248bfcdddSJulian Elischer 			panc(string, "non KSE thread lost kse");
793e602ba25SJulian Elischer 		}
794e602ba25SJulian Elischer 	} else {
795e602ba25SJulian Elischer 		prevpri = 0;
796e602ba25SJulian Elischer 		saw_lastassigned = 0;
797e602ba25SJulian Elischer 		unassigned = 0;
798e602ba25SJulian Elischer 		assigned = 0;
799e602ba25SJulian Elischer 		TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
800e602ba25SJulian Elischer 			if (td2->td_priority < prevpri) {
80148bfcdddSJulian Elischer 				panc(string, "thread runqueue unosorted");
80248bfcdddSJulian Elischer 			}
80348bfcdddSJulian Elischer 			if ((td2->td_state == TDS_RUNQ) &&
80448bfcdddSJulian Elischer 			    td2->td_kse &&
80548bfcdddSJulian Elischer 			    (td2->td_kse->ke_state != KES_ONRUNQ)) {
80648bfcdddSJulian Elischer 				panc(string, "KSE wrong state");
807e602ba25SJulian Elischer 			}
808e602ba25SJulian Elischer 			prevpri = td2->td_priority;
809e602ba25SJulian Elischer 			if (td2->td_kse) {
810e602ba25SJulian Elischer 				assigned++;
811e602ba25SJulian Elischer 				if (unassigned) {
81248bfcdddSJulian Elischer 					panc(string, "unassigned before assigned");
813e602ba25SJulian Elischer 				}
814e602ba25SJulian Elischer  				if  (kg->kg_last_assigned == NULL) {
81548bfcdddSJulian Elischer 					panc(string, "lastassigned corrupt");
816e602ba25SJulian Elischer 				}
817e602ba25SJulian Elischer 				if (saw_lastassigned) {
81848bfcdddSJulian Elischer 					panc(string, "last assigned not last");
819e602ba25SJulian Elischer 				}
820e602ba25SJulian Elischer 				if (td2->td_kse->ke_thread != td2) {
82148bfcdddSJulian Elischer 					panc(string, "mismatched kse/thread");
822e602ba25SJulian Elischer 				}
823e602ba25SJulian Elischer 			} else {
824e602ba25SJulian Elischer 				unassigned++;
825e602ba25SJulian Elischer 			}
826e602ba25SJulian Elischer 			if (td2 == kg->kg_last_assigned) {
827e602ba25SJulian Elischer 				saw_lastassigned = 1;
828e602ba25SJulian Elischer 				if (td2->td_kse == NULL) {
82948bfcdddSJulian Elischer 					panc(string, "last assigned not assigned");
830e602ba25SJulian Elischer 				}
831e602ba25SJulian Elischer 			}
832e602ba25SJulian Elischer 		}
833e602ba25SJulian Elischer 		if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
83448bfcdddSJulian Elischer 			panc(string, "where on earth does lastassigned point?");
835e602ba25SJulian Elischer 		}
836e602ba25SJulian Elischer 		FOREACH_THREAD_IN_GROUP(kg, td2) {
837e602ba25SJulian Elischer 			if (((td2->td_flags & TDF_UNBOUND) == 0) &&
83871fad9fdSJulian Elischer 			    (TD_ON_RUNQ(td2))) {
839e602ba25SJulian Elischer 				assigned++;
840e602ba25SJulian Elischer 				if (td2->td_kse == NULL) {
84148bfcdddSJulian Elischer 					panc(string, "BOUND thread with no KSE");
842e602ba25SJulian Elischer 				}
843e602ba25SJulian Elischer 			}
844e602ba25SJulian Elischer 		}
845e602ba25SJulian Elischer #if 0
846e602ba25SJulian Elischer 		if ((unassigned + assigned) != kg->kg_runnable) {
84748bfcdddSJulian Elischer 			panc(string, "wrong number in runnable");
848e602ba25SJulian Elischer 		}
849e602ba25SJulian Elischer #endif
850e602ba25SJulian Elischer 	}
85148bfcdddSJulian Elischer 	if (assigned == 12345) {
85248bfcdddSJulian Elischer 		printf("%p %p %p %p %p %d, %d",
85348bfcdddSJulian Elischer 		    td, td2, ke, kg, p, assigned, saw_lastassigned);
85448bfcdddSJulian Elischer 	}
855e602ba25SJulian Elischer }
8565e3da64eSJulian Elischer #endif
857e602ba25SJulian Elischer 
858