xref: /titanic_41/usr/src/uts/common/sys/disp.h (revision 890e8ff10cfc85bc7d33064a9a30c3e8477b4813)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 
31 #ifndef _SYS_DISP_H
32 #define	_SYS_DISP_H
33 
34 #pragma ident	"%Z%%M%	%I%	%E% SMI"	/* SVr4.0 1.11	*/
35 
36 #include <sys/priocntl.h>
37 #include <sys/thread.h>
38 #include <sys/class.h>
39 
40 #ifdef	__cplusplus
41 extern "C" {
42 #endif
43 
44 /*
45  * The following is the format of a dispatcher queue entry.
46  */
47 typedef struct dispq {
48 	kthread_t	*dq_first;	/* first thread on queue or NULL */
49 	kthread_t	*dq_last;	/* last thread on queue or NULL */
50 	int		dq_sruncnt;	/* number of loaded, runnable */
51 					/*    threads on queue */
52 } dispq_t;
53 
54 /*
55  * Dispatch queue structure.
56  */
57 typedef struct _disp {
58 	disp_lock_t	disp_lock;	/* protects dispatching fields */
59 	pri_t		disp_npri;	/* # of priority levels in queue */
60 	dispq_t		*disp_q;		/* the dispatch queue */
61 	dispq_t		*disp_q_limit;	/* ptr past end of dispatch queue */
62 	ulong_t		*disp_qactmap;	/* bitmap of active dispatch queues */
63 
64 	/*
65 	 * Priorities:
66 	 *	disp_maxrunpri is the maximum run priority of runnable threads
67 	 * 	on this queue.  It is -1 if nothing is runnable.
68 	 *
69 	 *	disp_max_unbound_pri is the maximum run priority of threads on
70 	 *	this dispatch queue but runnable by any CPU.  This may be left
71 	 * 	artificially high, then corrected when some CPU tries to take
72 	 *	an unbound thread.  It is -1 if nothing is runnable.
73 	 */
74 	pri_t		disp_maxrunpri;	/* maximum run priority */
75 	pri_t		disp_max_unbound_pri;	/* max pri of unbound threads */
76 
77 	volatile int	disp_nrunnable;	/* runnable threads in cpu dispq */
78 
79 	struct cpu	*disp_cpu;	/* cpu owning this queue or NULL */
80 	hrtime_t	disp_steal;	/* time when threads become stealable */
81 } disp_t;
82 
83 #if defined(_KERNEL)
84 
85 #define	MAXCLSYSPRI	99
86 #define	MINCLSYSPRI	60
87 
88 
89 /*
90  * Global scheduling variables.
91  *	- See sys/cpuvar.h for CPU-local variables.
92  */
93 extern int	nswapped;	/* number of swapped threads */
94 				/* nswapped protected by swap_lock */
95 
96 extern	pri_t	minclsyspri;	/* minimum level of any system class */
97 extern	pri_t	maxclsyspri;	/* maximum level of any system class */
98 extern	pri_t	intr_pri;	/* interrupt thread priority base level */
99 
100 /*
101  * Amount of time that may elapse before a thread is considered to have
102  * lost it's cache investment.
103  */
104 #define	RECHOOSE_INTERVAL	3
105 extern int	rechoose_interval;
106 
107 /*
108  * Minimum amount of time that a thread can remain runnable before it can
109  * be stolen by another CPU (in nanoseconds).
110  */
111 extern hrtime_t nosteal_nsec;
112 
113 /*
114  * Kernel preemption occurs if a higher-priority thread is runnable with
115  * a priority at or above kpreemptpri.
116  *
117  * So that other processors can watch for such threads, a separate
118  * dispatch queue with unbound work above kpreemptpri is maintained.
119  * This is part of the CPU partition structure (cpupart_t).
120  */
121 extern	pri_t	kpreemptpri;	/* level above which preemption takes place */
122 
123 extern void		disp_kp_alloc(disp_t *, pri_t);	/* allocate kp queue */
124 extern void		disp_kp_free(disp_t *);		/* free kp queue */
125 
126 /*
127  * Macro for use by scheduling classes to decide whether the thread is about
128  * to be scheduled or not.  This returns the maximum run priority.
129  */
130 #define	DISP_MAXRUNPRI(t)	((t)->t_disp_queue->disp_maxrunpri)
131 
132 /*
133  * Platform callbacks for various dispatcher operations
134  *
135  * idle_cpu() is invoked when a cpu goes idle, and has nothing to do.
136  * disp_enq_thread() is invoked when a thread is placed on a run queue.
137  */
138 extern void	(*idle_cpu)();
139 extern void	(*disp_enq_thread)(struct cpu *, int);
140 
141 
142 extern int		dispdeq(kthread_t *);
143 extern void		dispinit(void);
144 extern void		disp_add(sclass_t *);
145 extern int		intr_active(struct cpu *, int);
146 extern int		servicing_interrupt(void);
147 extern void		preempt(void);
148 extern void		setbackdq(kthread_t *);
149 extern void		setfrontdq(kthread_t *);
150 extern void		swtch(void);
151 extern void		swtch_to(kthread_t *);
152 extern void		swtch_from_zombie(void)
153 				__NORETURN;
154 extern void		dq_sruninc(kthread_t *);
155 extern void		dq_srundec(kthread_t *);
156 extern void		cpu_rechoose(kthread_t *);
157 extern void		cpu_surrender(kthread_t *);
158 extern void		kpreempt(int);
159 extern struct cpu	*disp_lowpri_cpu(struct cpu *, struct lgrp_ld *, pri_t,
160 			    struct cpu *);
161 extern int		disp_bound_threads(struct cpu *, int);
162 extern int		disp_bound_anythreads(struct cpu *, int);
163 extern int		disp_bound_partition(struct cpu *, int);
164 extern void		disp_cpu_init(struct cpu *);
165 extern void		disp_cpu_fini(struct cpu *);
166 extern void		disp_cpu_inactive(struct cpu *);
167 extern void		disp_adjust_unbound_pri(kthread_t *);
168 extern void		resume(kthread_t *);
169 extern void		resume_from_intr(kthread_t *);
170 extern void		resume_from_zombie(kthread_t *)
171 				__NORETURN;
172 extern void		disp_swapped_enq(kthread_t *);
173 extern int		disp_anywork(void);
174 
175 #define	KPREEMPT_SYNC		(-1)
176 #define	kpreempt_disable()				\
177 	{						\
178 		curthread->t_preempt++;			\
179 		ASSERT(curthread->t_preempt >= 1);	\
180 	}
181 #define	kpreempt_enable()				\
182 	{						\
183 		ASSERT(curthread->t_preempt >= 1);	\
184 		if (--curthread->t_preempt == 0 &&	\
185 		    CPU->cpu_kprunrun)			\
186 			kpreempt(KPREEMPT_SYNC);	\
187 	}
188 
189 #endif	/* _KERNEL */
190 
191 #ifdef	__cplusplus
192 }
193 #endif
194 
195 #endif	/* _SYS_DISP_H */
196