xref: /freebsd/sys/sys/sched.h (revision a11926f2a5f00b57ebff5bd548c9904b7f6e5800)
1 /*-
2  * SPDX-License-Identifier: (BSD-4-Clause AND BSD-2-Clause)
3  *
4  * Copyright (c) 1996, 1997
5  *      HD Associates, Inc.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed by HD Associates, Inc
18  *      and Jukka Antero Ukkonen.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY HD ASSOCIATES AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL HD ASSOCIATES OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 /*-
37  * Copyright (c) 2002-2008, Jeffrey Roberson <jeff@freebsd.org>
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice unmodified, this list of conditions, and the following
45  *    disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
51  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
54  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
55  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
59  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60  */
61 
62 #ifndef _SCHED_H_
63 #define	_SCHED_H_
64 
65 #ifdef _KERNEL
66 
67 #include <sys/types.h>
68 #ifdef SCHED_STATS
69 #include <sys/pcpu.h>
70 #endif
71 
72 struct proc;
73 struct thread;
74 
75 /*
76  * General scheduling info.
77  *
78  * sched_load:
79  *	Total runnable non-ithread threads in the system.
80  *
81  * sched_runnable:
82  *	Runnable threads for this processor.
83  */
84 int	sched_load(void);
85 int	sched_rr_interval(void);
86 bool	sched_runnable(void);
87 
88 /*
89  * Proc related scheduling hooks.
90  */
91 void	sched_exit(struct proc *p, struct thread *childtd);
92 void	sched_fork(struct thread *td, struct thread *childtd);
93 void	sched_fork_exit(struct thread *td);
94 void	sched_class(struct thread *td, int class);
95 void	sched_nice(struct proc *p, int nice);
96 
97 /*
98  * Threads are switched in and out, block on resources, have temporary
99  * priorities inherited from their procs, and use up cpu time.
100  */
101 void	sched_ap_entry(void);
102 void	sched_exit_thread(struct thread *td, struct thread *child);
103 u_int	sched_estcpu(struct thread *td);
104 void	sched_fork_thread(struct thread *td, struct thread *child);
105 void	sched_ithread_prio(struct thread *td, u_char prio);
106 void	sched_lend_prio(struct thread *td, u_char prio);
107 void	sched_lend_user_prio(struct thread *td, u_char pri);
108 void	sched_lend_user_prio_cond(struct thread *td, u_char pri);
109 fixpt_t	sched_pctcpu(struct thread *td);
110 void	sched_prio(struct thread *td, u_char prio);
111 void	sched_sleep(struct thread *td, int prio);
112 void	sched_switch(struct thread *td, int flags);
113 void	sched_throw(struct thread *td);
114 void	sched_unlend_prio(struct thread *td, u_char prio);
115 void	sched_user_prio(struct thread *td, u_char prio);
116 void	sched_userret_slowpath(struct thread *td);
117 #ifdef	RACCT
118 #ifdef	SCHED_4BSD
119 fixpt_t	sched_pctcpu_delta(struct thread *td);
120 #endif
121 #endif
122 
123 static inline void
sched_userret(struct thread * td)124 sched_userret(struct thread *td)
125 {
126 
127 	/*
128 	 * XXX we cheat slightly on the locking here to avoid locking in
129 	 * the usual case.  Setting td_priority here is essentially an
130 	 * incomplete workaround for not setting it properly elsewhere.
131 	 * Now that some interrupt handlers are threads, not setting it
132 	 * properly elsewhere can clobber it in the window between setting
133 	 * it here and returning to user mode, so don't waste time setting
134 	 * it perfectly here.
135 	 */
136 	KASSERT((td->td_flags & TDF_BORROWING) == 0,
137 	    ("thread with borrowed priority returning to userland"));
138 	if (__predict_false(td->td_priority != td->td_user_pri))
139 		sched_userret_slowpath(td);
140 }
141 
142 /*
143  * Threads are moved on and off of run queues
144  */
145 void	sched_add(struct thread *td, int flags);
146 struct thread *sched_choose(void);
147 void	sched_clock(struct thread *td, int cnt);
148 void	sched_idletd(void *);
149 void	sched_preempt(struct thread *td);
150 void	sched_relinquish(struct thread *td);
151 void	sched_rem(struct thread *td);
152 void	sched_wakeup(struct thread *td, int srqflags);
153 
154 /*
155  * Binding makes cpu affinity permanent while pinning is used to temporarily
156  * hold a thread on a particular CPU.
157  */
158 void	sched_bind(struct thread *td, int cpu);
159 static __inline void sched_pin(void);
160 void	sched_unbind(struct thread *td);
161 static __inline void sched_unpin(void);
162 int	sched_is_bound(struct thread *td);
163 void	sched_affinity(struct thread *td);
164 
165 /*
166  * These procedures tell the process data structure allocation code how
167  * many bytes to actually allocate.
168  */
169 int	sched_sizeof_proc(void);
170 int	sched_sizeof_thread(void);
171 
172 /*
173  * This routine provides a consistent thread name for use with KTR graphing
174  * functions.
175  */
176 char	*sched_tdname(struct thread *td);
177 #ifdef KTR
178 void	sched_clear_tdname(struct thread *td);
179 #endif
180 
181 static __inline void
sched_pin(void)182 sched_pin(void)
183 {
184 	curthread->td_pinned++;
185 	atomic_interrupt_fence();
186 }
187 
188 static __inline void
sched_unpin(void)189 sched_unpin(void)
190 {
191 	atomic_interrupt_fence();
192 	MPASS(curthread->td_pinned > 0);
193 	curthread->td_pinned--;
194 }
195 
196 /* sched_add arguments (formerly setrunqueue) */
197 #define	SRQ_BORING	0x0000		/* No special circumstances. */
198 #define	SRQ_YIELDING	0x0001		/* We are yielding (from mi_switch). */
199 #define	SRQ_OURSELF	0x0002		/* It is ourself (from mi_switch). */
200 #define	SRQ_INTR	0x0004		/* It is probably urgent. */
201 #define	SRQ_PREEMPTED	0x0008		/* has been preempted.. be kind */
202 #define	SRQ_BORROWING	0x0010		/* Priority updated due to prio_lend */
203 #define	SRQ_HOLD	0x0020		/* Return holding original td lock */
204 #define	SRQ_HOLDTD	0x0040		/* Return holding td lock */
205 
206 /* Scheduler stats. */
207 #ifdef SCHED_STATS
208 DPCPU_DECLARE(long, sched_switch_stats[SWT_COUNT]);
209 
210 #define	SCHED_STAT_DEFINE_VAR(name, ptr, descr)				\
211 static void name ## _add_proc(void *dummy __unused)			\
212 {									\
213 									\
214 	SYSCTL_ADD_PROC(NULL,						\
215 	    SYSCTL_STATIC_CHILDREN(_kern_sched_stats), OID_AUTO,	\
216 	    #name, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,		\
217 	    ptr, 0, sysctl_dpcpu_long, "LU", descr);			\
218 }									\
219 SYSINIT(name, SI_SUB_LAST, SI_ORDER_MIDDLE, name ## _add_proc, NULL);
220 
221 #define	SCHED_STAT_DEFINE(name, descr)					\
222     DPCPU_DEFINE(unsigned long, name);					\
223     SCHED_STAT_DEFINE_VAR(name, &DPCPU_NAME(name), descr)
224 /*
225  * Sched stats are always incremented in critical sections so no atomic
226  * is necessary to increment them.
227  */
228 #define SCHED_STAT_INC(var)     DPCPU_GET(var)++;
229 #else
230 #define	SCHED_STAT_DEFINE_VAR(name, descr, ptr)
231 #define	SCHED_STAT_DEFINE(name, descr)
232 #define SCHED_STAT_INC(var)			(void)0
233 #endif
234 
235 /*
236  * Fixup scheduler state for proc0 and thread0
237  */
238 void schedinit(void);
239 
240 /*
241  * Fixup scheduler state for secondary APs
242  */
243 void schedinit_ap(void);
244 #endif /* _KERNEL */
245 
246 /* POSIX 1003.1b Process Scheduling */
247 
248 /*
249  * POSIX scheduling policies
250  */
251 #define SCHED_FIFO      1
252 #define SCHED_OTHER     2
253 #define SCHED_RR        3
254 
255 struct sched_param {
256         int     sched_priority;
257 };
258 
259 /*
260  * POSIX scheduling declarations for userland.
261  */
262 #ifndef _KERNEL
263 #include <sys/cdefs.h>
264 #include <sys/_timespec.h>
265 #include <sys/_types.h>
266 
267 #ifndef _PID_T_DECLARED
268 typedef __pid_t         pid_t;
269 #define _PID_T_DECLARED
270 #endif
271 
272 __BEGIN_DECLS
273 int     sched_get_priority_max(int);
274 int     sched_get_priority_min(int);
275 int     sched_getparam(pid_t, struct sched_param *);
276 int     sched_getscheduler(pid_t);
277 int     sched_rr_get_interval(pid_t, struct timespec *);
278 int     sched_setparam(pid_t, const struct sched_param *);
279 int     sched_setscheduler(pid_t, int, const struct sched_param *);
280 int     sched_yield(void);
281 __END_DECLS
282 
283 #endif
284 #endif /* !_SCHED_H_ */
285