xref: /titanic_41/usr/src/uts/common/sys/disp.h (revision 7206bf49b1fe641544165ee97f63856da95e0868)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
26  */
27 
28 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
29 /*	  All Rights Reserved  	*/
30 
31 
32 #ifndef _SYS_DISP_H
33 #define	_SYS_DISP_H
34 
35 #include <sys/priocntl.h>
36 #include <sys/thread.h>
37 #include <sys/class.h>
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 /*
44  * The following is the format of a dispatcher queue entry.
45  */
46 typedef struct dispq {
47 	kthread_t	*dq_first;	/* first thread on queue or NULL */
48 	kthread_t	*dq_last;	/* last thread on queue or NULL */
49 	int		dq_sruncnt;	/* number of loaded, runnable */
50 					/*    threads on queue */
51 } dispq_t;
52 
53 /*
54  * Dispatch queue structure.
55  */
56 typedef struct _disp {
57 	disp_lock_t	disp_lock;	/* protects dispatching fields */
58 	pri_t		disp_npri;	/* # of priority levels in queue */
59 	dispq_t		*disp_q;		/* the dispatch queue */
60 	dispq_t		*disp_q_limit;	/* ptr past end of dispatch queue */
61 	ulong_t		*disp_qactmap;	/* bitmap of active dispatch queues */
62 
63 	/*
64 	 * Priorities:
65 	 *	disp_maxrunpri is the maximum run priority of runnable threads
66 	 * 	on this queue.  It is -1 if nothing is runnable.
67 	 *
68 	 *	disp_max_unbound_pri is the maximum run priority of threads on
69 	 *	this dispatch queue but runnable by any CPU.  This may be left
70 	 * 	artificially high, then corrected when some CPU tries to take
71 	 *	an unbound thread.  It is -1 if nothing is runnable.
72 	 */
73 	pri_t		disp_maxrunpri;	/* maximum run priority */
74 	pri_t		disp_max_unbound_pri;	/* max pri of unbound threads */
75 
76 	volatile int	disp_nrunnable;	/* runnable threads in cpu dispq */
77 
78 	struct cpu	*disp_cpu;	/* cpu owning this queue or NULL */
79 	hrtime_t	disp_steal;	/* time when threads become stealable */
80 } disp_t;
81 
82 #if defined(_KERNEL) || defined(_FAKE_KERNEL)
83 
84 #define	MAXCLSYSPRI	99
85 #define	MINCLSYSPRI	60
86 
87 
88 /*
89  * Global scheduling variables.
90  *	- See sys/cpuvar.h for CPU-local variables.
91  */
92 extern int	nswapped;	/* number of swapped threads */
93 				/* nswapped protected by swap_lock */
94 
95 extern	pri_t	minclsyspri;	/* minimum level of any system class */
96 extern	pri_t	maxclsyspri;	/* maximum level of any system class */
97 extern	pri_t	intr_pri;	/* interrupt thread priority base level */
98 
99 #endif	/* _KERNEL || _FAKE_KERNEL */
100 #if defined(_KERNEL)
101 
102 /*
103  * Minimum amount of time that a thread can remain runnable before it can
104  * be stolen by another CPU (in nanoseconds).
105  */
106 extern hrtime_t nosteal_nsec;
107 
108 /*
109  * Kernel preemption occurs if a higher-priority thread is runnable with
110  * a priority at or above kpreemptpri.
111  *
112  * So that other processors can watch for such threads, a separate
113  * dispatch queue with unbound work above kpreemptpri is maintained.
114  * This is part of the CPU partition structure (cpupart_t).
115  */
116 extern	pri_t	kpreemptpri;	/* level above which preemption takes place */
117 
118 extern void		disp_kp_alloc(disp_t *, pri_t);	/* allocate kp queue */
119 extern void		disp_kp_free(disp_t *);		/* free kp queue */
120 
121 /*
122  * Macro for use by scheduling classes to decide whether the thread is about
123  * to be scheduled or not.  This returns the maximum run priority.
124  */
125 #define	DISP_MAXRUNPRI(t)	((t)->t_disp_queue->disp_maxrunpri)
126 
127 /*
128  * Platform callbacks for various dispatcher operations
129  *
130  * idle_cpu() is invoked when a cpu goes idle, and has nothing to do.
131  * disp_enq_thread() is invoked when a thread is placed on a run queue.
132  */
133 extern void	(*idle_cpu)();
134 extern void	(*disp_enq_thread)(struct cpu *, int);
135 
136 
137 extern int		dispdeq(kthread_t *);
138 extern void		dispinit(void);
139 extern void		disp_add(sclass_t *);
140 extern int		intr_active(struct cpu *, int);
141 extern int		servicing_interrupt(void);
142 extern void		preempt(void);
143 extern void		setbackdq(kthread_t *);
144 extern void		setfrontdq(kthread_t *);
145 extern void		swtch(void);
146 extern void		swtch_to(kthread_t *);
147 extern void		swtch_from_zombie(void)
148 				__NORETURN;
149 extern void		dq_sruninc(kthread_t *);
150 extern void		dq_srundec(kthread_t *);
151 extern void		cpu_rechoose(kthread_t *);
152 extern void		cpu_surrender(kthread_t *);
153 extern void		kpreempt(int);
154 extern struct cpu	*disp_lowpri_cpu(struct cpu *, struct lgrp_ld *, pri_t,
155 			    struct cpu *);
156 extern int		disp_bound_threads(struct cpu *, int);
157 extern int		disp_bound_anythreads(struct cpu *, int);
158 extern int		disp_bound_partition(struct cpu *, int);
159 extern void		disp_cpu_init(struct cpu *);
160 extern void		disp_cpu_fini(struct cpu *);
161 extern void		disp_cpu_inactive(struct cpu *);
162 extern void		disp_adjust_unbound_pri(kthread_t *);
163 extern void		resume(kthread_t *);
164 extern void		resume_from_intr(kthread_t *);
165 extern void		resume_from_zombie(kthread_t *)
166 				__NORETURN;
167 extern void		disp_swapped_enq(kthread_t *);
168 extern int		disp_anywork(void);
169 
170 #define	KPREEMPT_SYNC		(-1)
171 #define	kpreempt_disable()				\
172 	{						\
173 		curthread->t_preempt++;			\
174 		ASSERT(curthread->t_preempt >= 1);	\
175 	}
176 #define	kpreempt_enable()				\
177 	{						\
178 		ASSERT(curthread->t_preempt >= 1);	\
179 		if (--curthread->t_preempt == 0 &&	\
180 		    CPU->cpu_kprunrun)			\
181 			kpreempt(KPREEMPT_SYNC);	\
182 	}
183 
184 #endif	/* _KERNEL */
185 
186 #ifdef	__cplusplus
187 }
188 #endif
189 
190 #endif	/* _SYS_DISP_H */
191