xref: /titanic_52/usr/src/uts/common/inet/squeue.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate /*
30*7c478bd9Sstevel@tonic-gate  * Squeues - TCP/IP serialization mechanism.
31*7c478bd9Sstevel@tonic-gate  *
32*7c478bd9Sstevel@tonic-gate  * This is a general purpose high-performance serialization mechanism. It is
33*7c478bd9Sstevel@tonic-gate  * similar to a taskq with a single worker thread, the difference is that it
34*7c478bd9Sstevel@tonic-gate  * does not imply a context switch - the thread placing a request may actually
35*7c478bd9Sstevel@tonic-gate  * process it. It is also biased for processing requests in interrupt context.
36*7c478bd9Sstevel@tonic-gate  *
37*7c478bd9Sstevel@tonic-gate  * Each squeue has a worker thread which may optionally be bound to a CPU.
38*7c478bd9Sstevel@tonic-gate  *
39*7c478bd9Sstevel@tonic-gate  * Only one thread may process requests from a given squeue at any time. This is
40*7c478bd9Sstevel@tonic-gate  * called "entering" squeue.
41*7c478bd9Sstevel@tonic-gate  *
42*7c478bd9Sstevel@tonic-gate  * Each dispatched request is processed either by
43*7c478bd9Sstevel@tonic-gate  *
44*7c478bd9Sstevel@tonic-gate  *	a) Dispatching thread or
45*7c478bd9Sstevel@tonic-gate  *	b) Some other thread that is currently processing squeue at the time of
46*7c478bd9Sstevel@tonic-gate  *		request or
47*7c478bd9Sstevel@tonic-gate  *	c) worker thread.
48*7c478bd9Sstevel@tonic-gate  *
49*7c478bd9Sstevel@tonic-gate  * INTERFACES:
50*7c478bd9Sstevel@tonic-gate  *
51*7c478bd9Sstevel@tonic-gate  * squeue_t *squeue_create(name, bind, wait, pri)
52*7c478bd9Sstevel@tonic-gate  *
53*7c478bd9Sstevel@tonic-gate  *	name: symbolic name for squeue.
54*7c478bd9Sstevel@tonic-gate  *	wait: time to wait before waiking the worker thread after queueing
55*7c478bd9Sstevel@tonic-gate  *		request.
56*7c478bd9Sstevel@tonic-gate  *	bind: preferred CPU binding for the worker thread.
57*7c478bd9Sstevel@tonic-gate  *	pri:  thread priority for the worker thread.
58*7c478bd9Sstevel@tonic-gate  *
59*7c478bd9Sstevel@tonic-gate  *   This function never fails and may sleep. It returns a transparent pointer
60*7c478bd9Sstevel@tonic-gate  *   to the squeue_t structure that is passed to all other squeue operations.
61*7c478bd9Sstevel@tonic-gate  *
62*7c478bd9Sstevel@tonic-gate  * void squeue_bind(sqp, bind)
63*7c478bd9Sstevel@tonic-gate  *
64*7c478bd9Sstevel@tonic-gate  *   Bind squeue worker thread to a CPU specified by the 'bind' argument. The
65*7c478bd9Sstevel@tonic-gate  *   'bind' value of -1 binds to the preferred thread specified for
66*7c478bd9Sstevel@tonic-gate  *   squeue_create.
67*7c478bd9Sstevel@tonic-gate  *
68*7c478bd9Sstevel@tonic-gate  *   NOTE: Any value of 'bind' other then -1 is not supported currently, but the
69*7c478bd9Sstevel@tonic-gate  *	 API is present - in the future it may be useful to specify different
70*7c478bd9Sstevel@tonic-gate  *	 binding.
71*7c478bd9Sstevel@tonic-gate  *
72*7c478bd9Sstevel@tonic-gate  * void squeue_unbind(sqp)
73*7c478bd9Sstevel@tonic-gate  *
74*7c478bd9Sstevel@tonic-gate  *   Unbind the worker thread from its preferred CPU.
75*7c478bd9Sstevel@tonic-gate  *
76*7c478bd9Sstevel@tonic-gate  * void squeue_enter(*sqp, *mp, proc, arg, tag)
77*7c478bd9Sstevel@tonic-gate  *
78*7c478bd9Sstevel@tonic-gate  *   Post a single request for processing. Each request consists of mblock 'mp',
79*7c478bd9Sstevel@tonic-gate  *   function 'proc' to execute and an argument 'arg' to pass to this
80*7c478bd9Sstevel@tonic-gate  *   function. The function is called as (*proc)(arg, mp, sqp); The tag is an
81*7c478bd9Sstevel@tonic-gate  *   arbitrary number from 0 to 255 which will be stored in mp to track exact
82*7c478bd9Sstevel@tonic-gate  *   caller of squeue_enter. The combination of function name and the tag should
83*7c478bd9Sstevel@tonic-gate  *   provide enough information to identify the caller.
84*7c478bd9Sstevel@tonic-gate  *
85*7c478bd9Sstevel@tonic-gate  *   If no one is processing the squeue, squeue_enter() will call the function
86*7c478bd9Sstevel@tonic-gate  *   immediately. Otherwise it will add the request to the queue for later
87*7c478bd9Sstevel@tonic-gate  *   processing. Once the function is executed, the thread may continue
88*7c478bd9Sstevel@tonic-gate  *   executing all other requests pending on the queue.
89*7c478bd9Sstevel@tonic-gate  *
90*7c478bd9Sstevel@tonic-gate  *   NOTE: The tagging information is only used when SQUEUE_DEBUG is set to 1.
91*7c478bd9Sstevel@tonic-gate  *   NOTE: The argument can be conn_t only. Ideally we'd like to have generic
92*7c478bd9Sstevel@tonic-gate  *	   argument, but we want to drop connection reference count here - this
93*7c478bd9Sstevel@tonic-gate  *	   improves tail-call optimizations.
94*7c478bd9Sstevel@tonic-gate  *	   XXX: The arg should have type conn_t.
95*7c478bd9Sstevel@tonic-gate  *
96*7c478bd9Sstevel@tonic-gate  * void squeue_enter_nodrain(*sqp, *mp, proc, arg, tag)
97*7c478bd9Sstevel@tonic-gate  *
98*7c478bd9Sstevel@tonic-gate  *   Same as squeue_enter(), but the entering thread will only try to execute a
99*7c478bd9Sstevel@tonic-gate  *   single request. It will not continue executing any pending requests.
100*7c478bd9Sstevel@tonic-gate  *
101*7c478bd9Sstevel@tonic-gate  * void squeue_fill(*sqp, *mp, proc, arg, tag)
102*7c478bd9Sstevel@tonic-gate  *
103*7c478bd9Sstevel@tonic-gate  *   Just place the request on the queue without trying to execute it. Arrange
104*7c478bd9Sstevel@tonic-gate  *   for the worker thread to process the request.
105*7c478bd9Sstevel@tonic-gate  *
106*7c478bd9Sstevel@tonic-gate  * void squeue_profile_enable(sqp)
107*7c478bd9Sstevel@tonic-gate  * void squeue_profile_disable(sqp)
108*7c478bd9Sstevel@tonic-gate  *
109*7c478bd9Sstevel@tonic-gate  *    Enable or disable profiling for specified 'sqp'. Profiling is only
110*7c478bd9Sstevel@tonic-gate  *    available when SQUEUE_PROFILE is set.
111*7c478bd9Sstevel@tonic-gate  *
112*7c478bd9Sstevel@tonic-gate  * void squeue_profile_reset(sqp)
113*7c478bd9Sstevel@tonic-gate  *
114*7c478bd9Sstevel@tonic-gate  *    Reset all profiling information to zero. Profiling is only
115*7c478bd9Sstevel@tonic-gate  *    available when SQUEUE_PROFILE is set.
116*7c478bd9Sstevel@tonic-gate  *
117*7c478bd9Sstevel@tonic-gate  * void squeue_profile_start()
118*7c478bd9Sstevel@tonic-gate  * void squeue_profile_stop()
119*7c478bd9Sstevel@tonic-gate  *
120*7c478bd9Sstevel@tonic-gate  *    Globally enable or disabled profiling for all squeues.
121*7c478bd9Sstevel@tonic-gate  *
122*7c478bd9Sstevel@tonic-gate  * uintptr_t *squeue_getprivate(sqp, p)
123*7c478bd9Sstevel@tonic-gate  *
124*7c478bd9Sstevel@tonic-gate  *    Each squeue keeps small amount of private data space available for various
125*7c478bd9Sstevel@tonic-gate  *    consumers. Current consumers include TCP and NCA. Other consumers need to
126*7c478bd9Sstevel@tonic-gate  *    add their private tag to the sqprivate_t enum. The private information is
127*7c478bd9Sstevel@tonic-gate  *    limited to an uintptr_t value. The squeue has no knowledge of its content
128*7c478bd9Sstevel@tonic-gate  *    and does not manage it in any way.
129*7c478bd9Sstevel@tonic-gate  *
130*7c478bd9Sstevel@tonic-gate  *    The typical use may be a breakdown of data structures per CPU (since
131*7c478bd9Sstevel@tonic-gate  *    squeues are usually per CPU). See NCA for examples of use.
132*7c478bd9Sstevel@tonic-gate  *    Currently 'p' may have one legal value SQPRIVATE_TCP.
133*7c478bd9Sstevel@tonic-gate  *
134*7c478bd9Sstevel@tonic-gate  * processorid_t squeue_binding(sqp)
135*7c478bd9Sstevel@tonic-gate  *
136*7c478bd9Sstevel@tonic-gate  *    Returns the CPU binding for a given squeue.
137*7c478bd9Sstevel@tonic-gate  *
138*7c478bd9Sstevel@tonic-gate  * TUNABALES:
139*7c478bd9Sstevel@tonic-gate  *
140*7c478bd9Sstevel@tonic-gate  * squeue_intrdrain_ms: Maximum time in ms interrupts spend draining any
141*7c478bd9Sstevel@tonic-gate  *	squeue. Note that this is approximation - squeues have no control on the
142*7c478bd9Sstevel@tonic-gate  *	time it takes to process each request. This limit is only checked
143*7c478bd9Sstevel@tonic-gate  *	between processing individual messages.
144*7c478bd9Sstevel@tonic-gate  *    Default: 20 ms.
145*7c478bd9Sstevel@tonic-gate  *
146*7c478bd9Sstevel@tonic-gate  * squeue_writerdrain_ms: Maximum time in ms non-interrupts spend draining any
147*7c478bd9Sstevel@tonic-gate  *	squeue. Note that this is approximation - squeues have no control on the
148*7c478bd9Sstevel@tonic-gate  *	time it takes to process each request. This limit is only checked
149*7c478bd9Sstevel@tonic-gate  *	between processing individual messages.
150*7c478bd9Sstevel@tonic-gate  *    Default: 10 ms.
151*7c478bd9Sstevel@tonic-gate  *
152*7c478bd9Sstevel@tonic-gate  * squeue_workerdrain_ms: Maximum time in ms worker thread spends draining any
153*7c478bd9Sstevel@tonic-gate  *	squeue. Note that this is approximation - squeues have no control on the
154*7c478bd9Sstevel@tonic-gate  *	time it takes to process each request. This limit is only checked
155*7c478bd9Sstevel@tonic-gate  *	between processing individual messages.
156*7c478bd9Sstevel@tonic-gate  *    Default: 10 ms.
157*7c478bd9Sstevel@tonic-gate  *
158*7c478bd9Sstevel@tonic-gate  * squeue_workerwait_ms: When worker thread is interrupted because workerdrain
159*7c478bd9Sstevel@tonic-gate  *	expired, how much time to wait before waking worker thread again.
160*7c478bd9Sstevel@tonic-gate  *    Default: 10 ms.
161*7c478bd9Sstevel@tonic-gate  *
162*7c478bd9Sstevel@tonic-gate  * DEFINES:
163*7c478bd9Sstevel@tonic-gate  *
164*7c478bd9Sstevel@tonic-gate  * SQUEUE_DEBUG: If defined as 1, special code is compiled in which records
165*7c478bd9Sstevel@tonic-gate  *	additional information aiding debugging is recorded in squeue.
166*7c478bd9Sstevel@tonic-gate  *
167*7c478bd9Sstevel@tonic-gate  * SQUEUE_PROFILE: If defined as 1, special code is compiled in which collects
168*7c478bd9Sstevel@tonic-gate  *	various squeue statistics and exports them as kstats.
169*7c478bd9Sstevel@tonic-gate  *
170*7c478bd9Sstevel@tonic-gate  * Ideally we would like both SQUEUE_DEBUG and SQUEUE_PROFILE to be always set,
171*7c478bd9Sstevel@tonic-gate  * but it affects performance, so they are enabled on DEBUG kernels and disabled
172*7c478bd9Sstevel@tonic-gate  * on non-DEBUG by default.
173*7c478bd9Sstevel@tonic-gate  */
174*7c478bd9Sstevel@tonic-gate 
175*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
176*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
177*7c478bd9Sstevel@tonic-gate #include <sys/debug.h>
178*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
179*7c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
180*7c478bd9Sstevel@tonic-gate #include <sys/condvar_impl.h>
181*7c478bd9Sstevel@tonic-gate #include <sys/systm.h>
182*7c478bd9Sstevel@tonic-gate #include <sys/callb.h>
183*7c478bd9Sstevel@tonic-gate #include <sys/sdt.h>
184*7c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
185*7c478bd9Sstevel@tonic-gate 
186*7c478bd9Sstevel@tonic-gate #include <inet/ipclassifier.h>
187*7c478bd9Sstevel@tonic-gate 
188*7c478bd9Sstevel@tonic-gate /*
189*7c478bd9Sstevel@tonic-gate  * State flags.
190*7c478bd9Sstevel@tonic-gate  * Note: The MDB IP module depends on the values of these flags.
191*7c478bd9Sstevel@tonic-gate  */
192*7c478bd9Sstevel@tonic-gate #define	SQS_PROC	0x0001	/* being processed */
193*7c478bd9Sstevel@tonic-gate #define	SQS_WORKER	0x0002	/* worker thread */
194*7c478bd9Sstevel@tonic-gate #define	SQS_ENTER	0x0004	/* enter thread */
195*7c478bd9Sstevel@tonic-gate #define	SQS_FAST	0x0008	/* enter-fast thread */
196*7c478bd9Sstevel@tonic-gate #define	SQS_USER	0x0010	/* A non interrupt user */
197*7c478bd9Sstevel@tonic-gate #define	SQS_BOUND	0x0020	/* Worker thread is bound */
198*7c478bd9Sstevel@tonic-gate #define	SQS_PROFILE	0x0040	/* Enable profiling */
199*7c478bd9Sstevel@tonic-gate #define	SQS_REENTER	0x0080	/* Re entered thread */
200*7c478bd9Sstevel@tonic-gate #define	SQS_TMO_PROG	0x0100	/* Timeout is being set */
201*7c478bd9Sstevel@tonic-gate 
202*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
203*7c478bd9Sstevel@tonic-gate #define	SQUEUE_DEBUG 1
204*7c478bd9Sstevel@tonic-gate #define	SQUEUE_PROFILE 1
205*7c478bd9Sstevel@tonic-gate #else
206*7c478bd9Sstevel@tonic-gate #define	SQUEUE_DEBUG 0
207*7c478bd9Sstevel@tonic-gate #define	SQUEUE_PROFILE 0
208*7c478bd9Sstevel@tonic-gate #endif
209*7c478bd9Sstevel@tonic-gate 
210*7c478bd9Sstevel@tonic-gate #include <sys/squeue_impl.h>
211*7c478bd9Sstevel@tonic-gate 
212*7c478bd9Sstevel@tonic-gate static void squeue_fire(void *);
213*7c478bd9Sstevel@tonic-gate static void squeue_drain(squeue_t *, uint_t, clock_t);
214*7c478bd9Sstevel@tonic-gate static void squeue_worker(squeue_t *sqp);
215*7c478bd9Sstevel@tonic-gate 
216*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
217*7c478bd9Sstevel@tonic-gate static kmutex_t squeue_kstat_lock;
218*7c478bd9Sstevel@tonic-gate static int  squeue_kstat_update(kstat_t *, int);
219*7c478bd9Sstevel@tonic-gate #endif
220*7c478bd9Sstevel@tonic-gate 
221*7c478bd9Sstevel@tonic-gate kmem_cache_t *squeue_cache;
222*7c478bd9Sstevel@tonic-gate 
223*7c478bd9Sstevel@tonic-gate int squeue_intrdrain_ms = 20;
224*7c478bd9Sstevel@tonic-gate int squeue_writerdrain_ms = 10;
225*7c478bd9Sstevel@tonic-gate int squeue_workerdrain_ms = 10;
226*7c478bd9Sstevel@tonic-gate int squeue_workerwait_ms = 10;
227*7c478bd9Sstevel@tonic-gate 
228*7c478bd9Sstevel@tonic-gate /* The values above converted to ticks */
229*7c478bd9Sstevel@tonic-gate static int squeue_intrdrain_tick = 0;
230*7c478bd9Sstevel@tonic-gate static int squeue_writerdrain_tick = 0;
231*7c478bd9Sstevel@tonic-gate static int squeue_workerdrain_tick = 0;
232*7c478bd9Sstevel@tonic-gate static int squeue_workerwait_tick = 0;
233*7c478bd9Sstevel@tonic-gate 
234*7c478bd9Sstevel@tonic-gate /*
235*7c478bd9Sstevel@tonic-gate  * The minimum packet queued when worker thread doing the drain triggers
236*7c478bd9Sstevel@tonic-gate  * polling (if squeue allows it). The choice of 3 is arbitrary. You
237*7c478bd9Sstevel@tonic-gate  * definitely don't want it to be 1 since that will trigger polling
238*7c478bd9Sstevel@tonic-gate  * on very low loads as well (ssh seems to do be one such example
239*7c478bd9Sstevel@tonic-gate  * where packet flow was very low yet somehow 1 packet ended up getting
240*7c478bd9Sstevel@tonic-gate  * queued and worker thread fires every 10ms and blanking also gets
241*7c478bd9Sstevel@tonic-gate  * triggered.
242*7c478bd9Sstevel@tonic-gate  */
243*7c478bd9Sstevel@tonic-gate int squeue_worker_poll_min = 3;
244*7c478bd9Sstevel@tonic-gate 
245*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
246*7c478bd9Sstevel@tonic-gate /*
247*7c478bd9Sstevel@tonic-gate  * Set to B_TRUE to enable profiling.
248*7c478bd9Sstevel@tonic-gate  */
249*7c478bd9Sstevel@tonic-gate static int squeue_profile = B_FALSE;
250*7c478bd9Sstevel@tonic-gate #define	SQ_PROFILING(sqp) (squeue_profile && ((sqp)->sq_state & SQS_PROFILE))
251*7c478bd9Sstevel@tonic-gate 
252*7c478bd9Sstevel@tonic-gate #define	SQSTAT(sqp, x) ((sqp)->sq_stats.x++)
253*7c478bd9Sstevel@tonic-gate #define	SQDELTA(sqp, x, d) ((sqp)->sq_stats.x += (d))
254*7c478bd9Sstevel@tonic-gate 
255*7c478bd9Sstevel@tonic-gate struct squeue_kstat {
256*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_count;
257*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_max_qlen;
258*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_npackets_worker;
259*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_npackets_intr;
260*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_npackets_other;
261*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_nqueued_intr;
262*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_nqueued_other;
263*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_ndrains_worker;
264*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_ndrains_intr;
265*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_ndrains_other;
266*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_time_worker;
267*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_time_intr;
268*7c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_time_other;
269*7c478bd9Sstevel@tonic-gate } squeue_kstat = {
270*7c478bd9Sstevel@tonic-gate 	{ "count",		KSTAT_DATA_UINT64 },
271*7c478bd9Sstevel@tonic-gate 	{ "max_qlen",		KSTAT_DATA_UINT64 },
272*7c478bd9Sstevel@tonic-gate 	{ "packets_worker",	KSTAT_DATA_UINT64 },
273*7c478bd9Sstevel@tonic-gate 	{ "packets_intr",	KSTAT_DATA_UINT64 },
274*7c478bd9Sstevel@tonic-gate 	{ "packets_other",	KSTAT_DATA_UINT64 },
275*7c478bd9Sstevel@tonic-gate 	{ "queued_intr",	KSTAT_DATA_UINT64 },
276*7c478bd9Sstevel@tonic-gate 	{ "queued_other",	KSTAT_DATA_UINT64 },
277*7c478bd9Sstevel@tonic-gate 	{ "ndrains_worker",	KSTAT_DATA_UINT64 },
278*7c478bd9Sstevel@tonic-gate 	{ "ndrains_intr",	KSTAT_DATA_UINT64 },
279*7c478bd9Sstevel@tonic-gate 	{ "ndrains_other",	KSTAT_DATA_UINT64 },
280*7c478bd9Sstevel@tonic-gate 	{ "time_worker",	KSTAT_DATA_UINT64 },
281*7c478bd9Sstevel@tonic-gate 	{ "time_intr",		KSTAT_DATA_UINT64 },
282*7c478bd9Sstevel@tonic-gate 	{ "time_other",		KSTAT_DATA_UINT64 },
283*7c478bd9Sstevel@tonic-gate };
284*7c478bd9Sstevel@tonic-gate #endif
285*7c478bd9Sstevel@tonic-gate 
286*7c478bd9Sstevel@tonic-gate #define	SQUEUE_WORKER_WAKEUP(sqp) {					\
287*7c478bd9Sstevel@tonic-gate 	timeout_id_t tid = (sqp)->sq_tid;				\
288*7c478bd9Sstevel@tonic-gate 									\
289*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));				\
290*7c478bd9Sstevel@tonic-gate 	/*								\
291*7c478bd9Sstevel@tonic-gate 	 * Queue isn't being processed, so take				\
292*7c478bd9Sstevel@tonic-gate 	 * any post enqueue actions needed before leaving.		\
293*7c478bd9Sstevel@tonic-gate 	 */								\
294*7c478bd9Sstevel@tonic-gate 	if (tid != 0) {							\
295*7c478bd9Sstevel@tonic-gate 		/*							\
296*7c478bd9Sstevel@tonic-gate 		 * Waiting for an enter() to process mblk(s).		\
297*7c478bd9Sstevel@tonic-gate 		 */							\
298*7c478bd9Sstevel@tonic-gate 		clock_t	waited = lbolt - (sqp)->sq_awaken;		\
299*7c478bd9Sstevel@tonic-gate 									\
300*7c478bd9Sstevel@tonic-gate 		if (TICK_TO_MSEC(waited) >= (sqp)->sq_wait) {		\
301*7c478bd9Sstevel@tonic-gate 			/*						\
302*7c478bd9Sstevel@tonic-gate 			 * Times up and have a worker thread		\
303*7c478bd9Sstevel@tonic-gate 			 * waiting for work, so schedule it.		\
304*7c478bd9Sstevel@tonic-gate 			 */						\
305*7c478bd9Sstevel@tonic-gate 			(sqp)->sq_tid = 0;				\
306*7c478bd9Sstevel@tonic-gate 			(sqp)->sq_awaken = lbolt;			\
307*7c478bd9Sstevel@tonic-gate 			cv_signal(&(sqp)->sq_async);			\
308*7c478bd9Sstevel@tonic-gate 			mutex_exit(&(sqp)->sq_lock);			\
309*7c478bd9Sstevel@tonic-gate 			(void) untimeout(tid);				\
310*7c478bd9Sstevel@tonic-gate 			return;						\
311*7c478bd9Sstevel@tonic-gate 		}							\
312*7c478bd9Sstevel@tonic-gate 		mutex_exit(&(sqp)->sq_lock);				\
313*7c478bd9Sstevel@tonic-gate 		return;							\
314*7c478bd9Sstevel@tonic-gate 	} else if ((sqp)->sq_state & SQS_TMO_PROG) {			\
315*7c478bd9Sstevel@tonic-gate 		mutex_exit(&(sqp)->sq_lock);				\
316*7c478bd9Sstevel@tonic-gate 		return;							\
317*7c478bd9Sstevel@tonic-gate 	} else if ((sqp)->sq_wait != 0) {				\
318*7c478bd9Sstevel@tonic-gate 		clock_t	wait = (sqp)->sq_wait;				\
319*7c478bd9Sstevel@tonic-gate 		/*							\
320*7c478bd9Sstevel@tonic-gate 		 * Wait up to sqp->sq_wait ms for an			\
321*7c478bd9Sstevel@tonic-gate 		 * enter() to process this queue. We			\
322*7c478bd9Sstevel@tonic-gate 		 * don't want to contend on timeout locks		\
323*7c478bd9Sstevel@tonic-gate 		 * with sq_lock held for performance reasons,		\
324*7c478bd9Sstevel@tonic-gate 		 * so drop the sq_lock before calling timeout		\
325*7c478bd9Sstevel@tonic-gate 		 * but we need to check if timeout is required		\
326*7c478bd9Sstevel@tonic-gate 		 * after re acquiring the sq_lock. Once			\
327*7c478bd9Sstevel@tonic-gate 		 * the sq_lock is dropped, someone else could		\
328*7c478bd9Sstevel@tonic-gate 		 * have processed the packet or the timeout could	\
329*7c478bd9Sstevel@tonic-gate 		 * have already fired.					\
330*7c478bd9Sstevel@tonic-gate 		 */							\
331*7c478bd9Sstevel@tonic-gate 		(sqp)->sq_state |= SQS_TMO_PROG;			\
332*7c478bd9Sstevel@tonic-gate 		mutex_exit(&(sqp)->sq_lock);				\
333*7c478bd9Sstevel@tonic-gate 		tid = timeout(squeue_fire, (sqp), wait);		\
334*7c478bd9Sstevel@tonic-gate 		mutex_enter(&(sqp)->sq_lock);				\
335*7c478bd9Sstevel@tonic-gate 		/* Check again if we still need the timeout */		\
336*7c478bd9Sstevel@tonic-gate 		if ((((sqp)->sq_state & (SQS_PROC|SQS_TMO_PROG)) ==	\
337*7c478bd9Sstevel@tonic-gate 			SQS_TMO_PROG) && ((sqp)->sq_tid == 0) &&	\
338*7c478bd9Sstevel@tonic-gate 			((sqp)->sq_first != NULL)) {			\
339*7c478bd9Sstevel@tonic-gate 				(sqp)->sq_state &= ~SQS_TMO_PROG;	\
340*7c478bd9Sstevel@tonic-gate 				(sqp)->sq_awaken = lbolt;		\
341*7c478bd9Sstevel@tonic-gate 				(sqp)->sq_tid = tid;			\
342*7c478bd9Sstevel@tonic-gate 				mutex_exit(&(sqp)->sq_lock);		\
343*7c478bd9Sstevel@tonic-gate 				return;					\
344*7c478bd9Sstevel@tonic-gate 		} else {						\
345*7c478bd9Sstevel@tonic-gate 			if ((sqp)->sq_state & SQS_TMO_PROG) {		\
346*7c478bd9Sstevel@tonic-gate 				(sqp)->sq_state &= ~SQS_TMO_PROG;	\
347*7c478bd9Sstevel@tonic-gate 				mutex_exit(&(sqp)->sq_lock);		\
348*7c478bd9Sstevel@tonic-gate 				(void) untimeout(tid);			\
349*7c478bd9Sstevel@tonic-gate 			} else {					\
350*7c478bd9Sstevel@tonic-gate 				/*					\
351*7c478bd9Sstevel@tonic-gate 				 * The timer fired before we could 	\
352*7c478bd9Sstevel@tonic-gate 				 * reacquire the sq_lock. squeue_fire	\
353*7c478bd9Sstevel@tonic-gate 				 * removes the SQS_TMO_PROG flag	\
354*7c478bd9Sstevel@tonic-gate 				 * and we don't need to	do anything	\
355*7c478bd9Sstevel@tonic-gate 				 * else.				\
356*7c478bd9Sstevel@tonic-gate 				 */					\
357*7c478bd9Sstevel@tonic-gate 				mutex_exit(&(sqp)->sq_lock);		\
358*7c478bd9Sstevel@tonic-gate 			}						\
359*7c478bd9Sstevel@tonic-gate 		}							\
360*7c478bd9Sstevel@tonic-gate 	} else {							\
361*7c478bd9Sstevel@tonic-gate 		/*							\
362*7c478bd9Sstevel@tonic-gate 		 * Schedule the worker thread.				\
363*7c478bd9Sstevel@tonic-gate 		 */							\
364*7c478bd9Sstevel@tonic-gate 		(sqp)->sq_awaken = lbolt;				\
365*7c478bd9Sstevel@tonic-gate 		cv_signal(&(sqp)->sq_async);				\
366*7c478bd9Sstevel@tonic-gate 		mutex_exit(&(sqp)->sq_lock);				\
367*7c478bd9Sstevel@tonic-gate 	}								\
368*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&(sqp)->sq_lock)); 			\
369*7c478bd9Sstevel@tonic-gate }
370*7c478bd9Sstevel@tonic-gate 
371*7c478bd9Sstevel@tonic-gate #define	ENQUEUE_MP(sqp, mp, proc, arg) {			\
372*7c478bd9Sstevel@tonic-gate 	/*							\
373*7c478bd9Sstevel@tonic-gate 	 * Enque our mblk.					\
374*7c478bd9Sstevel@tonic-gate 	 */							\
375*7c478bd9Sstevel@tonic-gate 	(mp)->b_queue = NULL;					\
376*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
377*7c478bd9Sstevel@tonic-gate 	ASSERT((mp)->b_prev == NULL && (mp)->b_next == NULL); 	\
378*7c478bd9Sstevel@tonic-gate 	(mp)->b_queue = (queue_t *)(proc);			\
379*7c478bd9Sstevel@tonic-gate 	(mp)->b_prev = (mblk_t *)(arg);				\
380*7c478bd9Sstevel@tonic-gate 								\
381*7c478bd9Sstevel@tonic-gate 	if ((sqp)->sq_last != NULL)				\
382*7c478bd9Sstevel@tonic-gate 		(sqp)->sq_last->b_next = (mp);			\
383*7c478bd9Sstevel@tonic-gate 	else							\
384*7c478bd9Sstevel@tonic-gate 		(sqp)->sq_first = (mp);				\
385*7c478bd9Sstevel@tonic-gate 	(sqp)->sq_last = (mp);					\
386*7c478bd9Sstevel@tonic-gate 	(sqp)->sq_count++;					\
387*7c478bd9Sstevel@tonic-gate 	ASSERT((sqp)->sq_count > 0);				\
388*7c478bd9Sstevel@tonic-gate 	DTRACE_PROBE2(squeue__enqueue, squeue_t *, sqp,		\
389*7c478bd9Sstevel@tonic-gate 	    mblk_t *, mp);					\
390*7c478bd9Sstevel@tonic-gate }
391*7c478bd9Sstevel@tonic-gate 
392*7c478bd9Sstevel@tonic-gate 
393*7c478bd9Sstevel@tonic-gate #define	ENQUEUE_CHAIN(sqp, mp, tail, cnt) {			\
394*7c478bd9Sstevel@tonic-gate 	/*							\
395*7c478bd9Sstevel@tonic-gate 	 * Enqueue our mblk chain.				\
396*7c478bd9Sstevel@tonic-gate 	 */							\
397*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
398*7c478bd9Sstevel@tonic-gate 								\
399*7c478bd9Sstevel@tonic-gate 	if ((sqp)->sq_last != NULL)				\
400*7c478bd9Sstevel@tonic-gate 		(sqp)->sq_last->b_next = (mp);			\
401*7c478bd9Sstevel@tonic-gate 	else							\
402*7c478bd9Sstevel@tonic-gate 		(sqp)->sq_first = (mp);				\
403*7c478bd9Sstevel@tonic-gate 	(sqp)->sq_last = (tail);				\
404*7c478bd9Sstevel@tonic-gate 	(sqp)->sq_count += (cnt);				\
405*7c478bd9Sstevel@tonic-gate 	ASSERT((sqp)->sq_count > 0);				\
406*7c478bd9Sstevel@tonic-gate 	DTRACE_PROBE4(squeue__enqueuechain, squeue_t *, sqp,	\
407*7c478bd9Sstevel@tonic-gate 		mblk_t *, mp, mblk_t *, tail, int, cnt);	\
408*7c478bd9Sstevel@tonic-gate 								\
409*7c478bd9Sstevel@tonic-gate }
410*7c478bd9Sstevel@tonic-gate 
411*7c478bd9Sstevel@tonic-gate #define	SQS_POLLING_ON(sqp, rx_ring) {				\
412*7c478bd9Sstevel@tonic-gate 	ASSERT(rx_ring != NULL);				\
413*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
414*7c478bd9Sstevel@tonic-gate 	rx_ring->rr_blank(rx_ring->rr_handle,			\
415*7c478bd9Sstevel@tonic-gate 	    MIN((sqp->sq_avg_drain_time * sqp->sq_count),	\
416*7c478bd9Sstevel@tonic-gate 		rx_ring->rr_max_blank_time),			\
417*7c478bd9Sstevel@tonic-gate 		rx_ring->rr_max_pkt_cnt);			\
418*7c478bd9Sstevel@tonic-gate 	rx_ring->rr_poll_state |= ILL_POLLING;			\
419*7c478bd9Sstevel@tonic-gate 	rx_ring->rr_poll_time = lbolt;				\
420*7c478bd9Sstevel@tonic-gate }
421*7c478bd9Sstevel@tonic-gate 
422*7c478bd9Sstevel@tonic-gate 
423*7c478bd9Sstevel@tonic-gate #define	SQS_POLLING_OFF(sqp, rx_ring) {				\
424*7c478bd9Sstevel@tonic-gate 	ASSERT(rx_ring != NULL);				\
425*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
426*7c478bd9Sstevel@tonic-gate 	rx_ring->rr_blank(rx_ring->rr_handle,			\
427*7c478bd9Sstevel@tonic-gate 	    rx_ring->rr_min_blank_time,				\
428*7c478bd9Sstevel@tonic-gate 	    rx_ring->rr_min_pkt_cnt);				\
429*7c478bd9Sstevel@tonic-gate }
430*7c478bd9Sstevel@tonic-gate 
431*7c478bd9Sstevel@tonic-gate void
432*7c478bd9Sstevel@tonic-gate squeue_init(void)
433*7c478bd9Sstevel@tonic-gate {
434*7c478bd9Sstevel@tonic-gate 	squeue_cache = kmem_cache_create("squeue_cache",
435*7c478bd9Sstevel@tonic-gate 	    sizeof (squeue_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
436*7c478bd9Sstevel@tonic-gate 
437*7c478bd9Sstevel@tonic-gate 	squeue_intrdrain_tick = MSEC_TO_TICK_ROUNDUP(squeue_intrdrain_ms);
438*7c478bd9Sstevel@tonic-gate 	squeue_writerdrain_tick = MSEC_TO_TICK_ROUNDUP(squeue_writerdrain_ms);
439*7c478bd9Sstevel@tonic-gate 	squeue_workerdrain_tick = MSEC_TO_TICK_ROUNDUP(squeue_workerdrain_ms);
440*7c478bd9Sstevel@tonic-gate 	squeue_workerwait_tick = MSEC_TO_TICK_ROUNDUP(squeue_workerwait_ms);
441*7c478bd9Sstevel@tonic-gate }
442*7c478bd9Sstevel@tonic-gate 
443*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
444*7c478bd9Sstevel@tonic-gate squeue_t *
445*7c478bd9Sstevel@tonic-gate squeue_create(char *name, processorid_t bind, clock_t wait, pri_t pri)
446*7c478bd9Sstevel@tonic-gate {
447*7c478bd9Sstevel@tonic-gate 	squeue_t *sqp = kmem_cache_alloc(squeue_cache, KM_SLEEP);
448*7c478bd9Sstevel@tonic-gate 
449*7c478bd9Sstevel@tonic-gate 	bzero(sqp, sizeof (squeue_t));
450*7c478bd9Sstevel@tonic-gate 	(void) strncpy(sqp->sq_name, name, SQ_NAMELEN + 1);
451*7c478bd9Sstevel@tonic-gate 	sqp->sq_name[SQ_NAMELEN] = '\0';
452*7c478bd9Sstevel@tonic-gate 
453*7c478bd9Sstevel@tonic-gate 	sqp->sq_bind = bind;
454*7c478bd9Sstevel@tonic-gate 	sqp->sq_wait = MSEC_TO_TICK(wait);
455*7c478bd9Sstevel@tonic-gate 	sqp->sq_avg_drain_time =
456*7c478bd9Sstevel@tonic-gate 	    drv_hztousec(squeue_intrdrain_tick)/squeue_intrdrain_tick;
457*7c478bd9Sstevel@tonic-gate 
458*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
459*7c478bd9Sstevel@tonic-gate 	if ((sqp->sq_kstat = kstat_create("ip", bind, name,
460*7c478bd9Sstevel@tonic-gate 		"net", KSTAT_TYPE_NAMED,
461*7c478bd9Sstevel@tonic-gate 		sizeof (squeue_kstat) / sizeof (kstat_named_t),
462*7c478bd9Sstevel@tonic-gate 		KSTAT_FLAG_VIRTUAL)) != NULL) {
463*7c478bd9Sstevel@tonic-gate 		sqp->sq_kstat->ks_lock = &squeue_kstat_lock;
464*7c478bd9Sstevel@tonic-gate 		sqp->sq_kstat->ks_data = &squeue_kstat;
465*7c478bd9Sstevel@tonic-gate 		sqp->sq_kstat->ks_update = squeue_kstat_update;
466*7c478bd9Sstevel@tonic-gate 		sqp->sq_kstat->ks_private = sqp;
467*7c478bd9Sstevel@tonic-gate 		kstat_install(sqp->sq_kstat);
468*7c478bd9Sstevel@tonic-gate 	}
469*7c478bd9Sstevel@tonic-gate #endif
470*7c478bd9Sstevel@tonic-gate 
471*7c478bd9Sstevel@tonic-gate 	sqp->sq_worker = thread_create(NULL, 0, squeue_worker,
472*7c478bd9Sstevel@tonic-gate 	    sqp, 0, &p0, TS_RUN, pri);
473*7c478bd9Sstevel@tonic-gate 
474*7c478bd9Sstevel@tonic-gate 	return (sqp);
475*7c478bd9Sstevel@tonic-gate }
476*7c478bd9Sstevel@tonic-gate 
477*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
478*7c478bd9Sstevel@tonic-gate void
479*7c478bd9Sstevel@tonic-gate squeue_bind(squeue_t *sqp, processorid_t bind)
480*7c478bd9Sstevel@tonic-gate {
481*7c478bd9Sstevel@tonic-gate 	ASSERT(bind == -1);
482*7c478bd9Sstevel@tonic-gate 
483*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
484*7c478bd9Sstevel@tonic-gate 	if (sqp->sq_state & SQS_BOUND) {
485*7c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
486*7c478bd9Sstevel@tonic-gate 		return;
487*7c478bd9Sstevel@tonic-gate 	}
488*7c478bd9Sstevel@tonic-gate 
489*7c478bd9Sstevel@tonic-gate 	sqp->sq_state |= SQS_BOUND;
490*7c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
491*7c478bd9Sstevel@tonic-gate 
492*7c478bd9Sstevel@tonic-gate 	thread_affinity_set(sqp->sq_worker, sqp->sq_bind);
493*7c478bd9Sstevel@tonic-gate }
494*7c478bd9Sstevel@tonic-gate 
495*7c478bd9Sstevel@tonic-gate void
496*7c478bd9Sstevel@tonic-gate squeue_unbind(squeue_t *sqp)
497*7c478bd9Sstevel@tonic-gate {
498*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
499*7c478bd9Sstevel@tonic-gate 	if (!(sqp->sq_state & SQS_BOUND)) {
500*7c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
501*7c478bd9Sstevel@tonic-gate 		return;
502*7c478bd9Sstevel@tonic-gate 	}
503*7c478bd9Sstevel@tonic-gate 
504*7c478bd9Sstevel@tonic-gate 	sqp->sq_state &= ~SQS_BOUND;
505*7c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
506*7c478bd9Sstevel@tonic-gate 
507*7c478bd9Sstevel@tonic-gate 	thread_affinity_clear(sqp->sq_worker);
508*7c478bd9Sstevel@tonic-gate }
509*7c478bd9Sstevel@tonic-gate 
510*7c478bd9Sstevel@tonic-gate /*
511*7c478bd9Sstevel@tonic-gate  * squeue_enter() - enter squeue sqp with mblk mp (which can be
512*7c478bd9Sstevel@tonic-gate  * a chain), while tail points to the end and cnt in number of
513*7c478bd9Sstevel@tonic-gate  * mblks in the chain.
514*7c478bd9Sstevel@tonic-gate  *
515*7c478bd9Sstevel@tonic-gate  * For a chain of single packet (i.e. mp == tail), go through the
516*7c478bd9Sstevel@tonic-gate  * fast path if no one is processing the squeue and nothing is queued.
517*7c478bd9Sstevel@tonic-gate  *
518*7c478bd9Sstevel@tonic-gate  * The proc and arg for each mblk is already stored in the mblk in
519*7c478bd9Sstevel@tonic-gate  * appropriate places.
520*7c478bd9Sstevel@tonic-gate  */
521*7c478bd9Sstevel@tonic-gate void
522*7c478bd9Sstevel@tonic-gate squeue_enter_chain(squeue_t *sqp, mblk_t *mp, mblk_t *tail,
523*7c478bd9Sstevel@tonic-gate     uint32_t cnt, uint8_t tag)
524*7c478bd9Sstevel@tonic-gate {
525*7c478bd9Sstevel@tonic-gate 	int		interrupt = servicing_interrupt();
526*7c478bd9Sstevel@tonic-gate 	void 		*arg;
527*7c478bd9Sstevel@tonic-gate 	sqproc_t	proc;
528*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
529*7c478bd9Sstevel@tonic-gate 	hrtime_t 	start, delta;
530*7c478bd9Sstevel@tonic-gate #endif
531*7c478bd9Sstevel@tonic-gate 
532*7c478bd9Sstevel@tonic-gate 	ASSERT(sqp != NULL);
533*7c478bd9Sstevel@tonic-gate 	ASSERT(mp != NULL);
534*7c478bd9Sstevel@tonic-gate 	ASSERT(tail != NULL);
535*7c478bd9Sstevel@tonic-gate 	ASSERT(cnt > 0);
536*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
537*7c478bd9Sstevel@tonic-gate 
538*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
539*7c478bd9Sstevel@tonic-gate 	if (!(sqp->sq_state & SQS_PROC)) {
540*7c478bd9Sstevel@tonic-gate 		/*
541*7c478bd9Sstevel@tonic-gate 		 * See if anything is already queued. If we are the
542*7c478bd9Sstevel@tonic-gate 		 * first packet, do inline processing else queue the
543*7c478bd9Sstevel@tonic-gate 		 * packet and do the drain.
544*7c478bd9Sstevel@tonic-gate 		 */
545*7c478bd9Sstevel@tonic-gate 		sqp->sq_run = curthread;
546*7c478bd9Sstevel@tonic-gate 		if (sqp->sq_first == NULL && cnt == 1) {
547*7c478bd9Sstevel@tonic-gate 			/*
548*7c478bd9Sstevel@tonic-gate 			 * Fast-path, ok to process and nothing queued.
549*7c478bd9Sstevel@tonic-gate 			 */
550*7c478bd9Sstevel@tonic-gate 			sqp->sq_state |= (SQS_PROC|SQS_FAST);
551*7c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
552*7c478bd9Sstevel@tonic-gate 
553*7c478bd9Sstevel@tonic-gate 			/*
554*7c478bd9Sstevel@tonic-gate 			 * We are the chain of 1 packet so
555*7c478bd9Sstevel@tonic-gate 			 * go through this fast path.
556*7c478bd9Sstevel@tonic-gate 			 */
557*7c478bd9Sstevel@tonic-gate 			arg = mp->b_prev;
558*7c478bd9Sstevel@tonic-gate 			mp->b_prev = NULL;
559*7c478bd9Sstevel@tonic-gate 			proc = (sqproc_t)mp->b_queue;
560*7c478bd9Sstevel@tonic-gate 			mp->b_queue = NULL;
561*7c478bd9Sstevel@tonic-gate 
562*7c478bd9Sstevel@tonic-gate 			ASSERT(proc != NULL);
563*7c478bd9Sstevel@tonic-gate 			ASSERT(arg != NULL);
564*7c478bd9Sstevel@tonic-gate 			ASSERT(mp->b_next == NULL);
565*7c478bd9Sstevel@tonic-gate 
566*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
567*7c478bd9Sstevel@tonic-gate 			sqp->sq_isintr = interrupt;
568*7c478bd9Sstevel@tonic-gate 			sqp->sq_curmp = mp;
569*7c478bd9Sstevel@tonic-gate 			sqp->sq_curproc = proc;
570*7c478bd9Sstevel@tonic-gate 			sqp->sq_connp = arg;
571*7c478bd9Sstevel@tonic-gate 			mp->b_tag = sqp->sq_tag = tag;
572*7c478bd9Sstevel@tonic-gate #endif
573*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
574*7c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
575*7c478bd9Sstevel@tonic-gate 				if (interrupt)
576*7c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_npackets_intr);
577*7c478bd9Sstevel@tonic-gate 				else
578*7c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_npackets_other);
579*7c478bd9Sstevel@tonic-gate 				start = gethrtime();
580*7c478bd9Sstevel@tonic-gate 			}
581*7c478bd9Sstevel@tonic-gate #endif
582*7c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_TRUE;
583*7c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(squeue__proc__start, squeue_t *,
584*7c478bd9Sstevel@tonic-gate 			    sqp, mblk_t *, mp, conn_t *, arg);
585*7c478bd9Sstevel@tonic-gate 			(*proc)(arg, mp, sqp);
586*7c478bd9Sstevel@tonic-gate 			DTRACE_PROBE2(squeue__proc__end, squeue_t *,
587*7c478bd9Sstevel@tonic-gate 			    sqp, conn_t *, arg);
588*7c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_FALSE;
589*7c478bd9Sstevel@tonic-gate 
590*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
591*7c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
592*7c478bd9Sstevel@tonic-gate 				delta = gethrtime() - start;
593*7c478bd9Sstevel@tonic-gate 				if (interrupt)
594*7c478bd9Sstevel@tonic-gate 					SQDELTA(sqp, sq_time_intr, delta);
595*7c478bd9Sstevel@tonic-gate 				else
596*7c478bd9Sstevel@tonic-gate 					SQDELTA(sqp, sq_time_other, delta);
597*7c478bd9Sstevel@tonic-gate 			}
598*7c478bd9Sstevel@tonic-gate #endif
599*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
600*7c478bd9Sstevel@tonic-gate 			sqp->sq_curmp = NULL;
601*7c478bd9Sstevel@tonic-gate 			sqp->sq_curproc = NULL;
602*7c478bd9Sstevel@tonic-gate 			sqp->sq_connp = NULL;
603*7c478bd9Sstevel@tonic-gate 			sqp->sq_isintr = 0;
604*7c478bd9Sstevel@tonic-gate #endif
605*7c478bd9Sstevel@tonic-gate 
606*7c478bd9Sstevel@tonic-gate 			CONN_DEC_REF((conn_t *)arg);
607*7c478bd9Sstevel@tonic-gate 			ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
608*7c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
609*7c478bd9Sstevel@tonic-gate 			sqp->sq_state &= ~(SQS_PROC|SQS_FAST);
610*7c478bd9Sstevel@tonic-gate 			if (sqp->sq_first == NULL) {
611*7c478bd9Sstevel@tonic-gate 				/*
612*7c478bd9Sstevel@tonic-gate 				 * We processed inline our packet and
613*7c478bd9Sstevel@tonic-gate 				 * nothing new has arrived. We are done.
614*7c478bd9Sstevel@tonic-gate 				 */
615*7c478bd9Sstevel@tonic-gate 				sqp->sq_run = NULL;
616*7c478bd9Sstevel@tonic-gate 				mutex_exit(&sqp->sq_lock);
617*7c478bd9Sstevel@tonic-gate 				return;
618*7c478bd9Sstevel@tonic-gate 			} else if (sqp->sq_bind != CPU->cpu_id) {
619*7c478bd9Sstevel@tonic-gate 				/*
620*7c478bd9Sstevel@tonic-gate 				 * If the current thread is not running
621*7c478bd9Sstevel@tonic-gate 				 * on the CPU to which this squeue is bound,
622*7c478bd9Sstevel@tonic-gate 				 * then don't allow it to drain.
623*7c478bd9Sstevel@tonic-gate 				 */
624*7c478bd9Sstevel@tonic-gate 				sqp->sq_run = NULL;
625*7c478bd9Sstevel@tonic-gate 				SQUEUE_WORKER_WAKEUP(sqp);
626*7c478bd9Sstevel@tonic-gate 				return;
627*7c478bd9Sstevel@tonic-gate 			}
628*7c478bd9Sstevel@tonic-gate 		} else {
629*7c478bd9Sstevel@tonic-gate 			ENQUEUE_CHAIN(sqp, mp, tail, cnt);
630*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
631*7c478bd9Sstevel@tonic-gate 			mp->b_tag = tag;
632*7c478bd9Sstevel@tonic-gate #endif
633*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
634*7c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
635*7c478bd9Sstevel@tonic-gate 				if (servicing_interrupt())
636*7c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_nqueued_intr);
637*7c478bd9Sstevel@tonic-gate 				else
638*7c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_nqueued_other);
639*7c478bd9Sstevel@tonic-gate 				if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
640*7c478bd9Sstevel@tonic-gate 					sqp->sq_stats.sq_max_qlen =
641*7c478bd9Sstevel@tonic-gate 					    sqp->sq_count;
642*7c478bd9Sstevel@tonic-gate 			}
643*7c478bd9Sstevel@tonic-gate #endif
644*7c478bd9Sstevel@tonic-gate 		}
645*7c478bd9Sstevel@tonic-gate 
646*7c478bd9Sstevel@tonic-gate 		/*
647*7c478bd9Sstevel@tonic-gate 		 * We are here because either we couldn't do inline
648*7c478bd9Sstevel@tonic-gate 		 * processing (because something was already queued),
649*7c478bd9Sstevel@tonic-gate 		 * or we had a chanin of more than one packet,
650*7c478bd9Sstevel@tonic-gate 		 * or something else arrived after we were done with
651*7c478bd9Sstevel@tonic-gate 		 * inline processing.
652*7c478bd9Sstevel@tonic-gate 		 */
653*7c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&sqp->sq_lock));
654*7c478bd9Sstevel@tonic-gate 		ASSERT(sqp->sq_first != NULL);
655*7c478bd9Sstevel@tonic-gate 
656*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
657*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
658*7c478bd9Sstevel@tonic-gate 			start = gethrtime();
659*7c478bd9Sstevel@tonic-gate 		}
660*7c478bd9Sstevel@tonic-gate #endif
661*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
662*7c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = interrupt;
663*7c478bd9Sstevel@tonic-gate #endif
664*7c478bd9Sstevel@tonic-gate 
665*7c478bd9Sstevel@tonic-gate 		if (interrupt) {
666*7c478bd9Sstevel@tonic-gate 			squeue_drain(sqp, SQS_ENTER, lbolt +
667*7c478bd9Sstevel@tonic-gate 			    squeue_intrdrain_tick);
668*7c478bd9Sstevel@tonic-gate 		} else {
669*7c478bd9Sstevel@tonic-gate 			squeue_drain(sqp, SQS_USER, lbolt +
670*7c478bd9Sstevel@tonic-gate 			    squeue_writerdrain_tick);
671*7c478bd9Sstevel@tonic-gate 		}
672*7c478bd9Sstevel@tonic-gate 
673*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
674*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
675*7c478bd9Sstevel@tonic-gate 			delta = gethrtime() - start;
676*7c478bd9Sstevel@tonic-gate 			if (interrupt)
677*7c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_intr, delta);
678*7c478bd9Sstevel@tonic-gate 			else
679*7c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_other, delta);
680*7c478bd9Sstevel@tonic-gate 		}
681*7c478bd9Sstevel@tonic-gate #endif
682*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
683*7c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = 0;
684*7c478bd9Sstevel@tonic-gate #endif
685*7c478bd9Sstevel@tonic-gate 
686*7c478bd9Sstevel@tonic-gate 		/*
687*7c478bd9Sstevel@tonic-gate 		 * If we didn't do a complete drain, the worker
688*7c478bd9Sstevel@tonic-gate 		 * thread was already signalled by squeue_drain.
689*7c478bd9Sstevel@tonic-gate 		 */
690*7c478bd9Sstevel@tonic-gate 		sqp->sq_run = NULL;
691*7c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
692*7c478bd9Sstevel@tonic-gate 		return;
693*7c478bd9Sstevel@tonic-gate 	} else {
694*7c478bd9Sstevel@tonic-gate 		ASSERT(sqp->sq_run != NULL);
695*7c478bd9Sstevel@tonic-gate 		/*
696*7c478bd9Sstevel@tonic-gate 		 * Queue is already being processed. Just enqueue
697*7c478bd9Sstevel@tonic-gate 		 * the packet and go away.
698*7c478bd9Sstevel@tonic-gate 		 */
699*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
700*7c478bd9Sstevel@tonic-gate 		mp->b_tag = tag;
701*7c478bd9Sstevel@tonic-gate #endif
702*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
703*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
704*7c478bd9Sstevel@tonic-gate 			if (servicing_interrupt())
705*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_intr);
706*7c478bd9Sstevel@tonic-gate 			else
707*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_other);
708*7c478bd9Sstevel@tonic-gate 			if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
709*7c478bd9Sstevel@tonic-gate 				sqp->sq_stats.sq_max_qlen = sqp->sq_count;
710*7c478bd9Sstevel@tonic-gate 		}
711*7c478bd9Sstevel@tonic-gate #endif
712*7c478bd9Sstevel@tonic-gate 
713*7c478bd9Sstevel@tonic-gate 		ENQUEUE_CHAIN(sqp, mp, tail, cnt);
714*7c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
715*7c478bd9Sstevel@tonic-gate 		return;
716*7c478bd9Sstevel@tonic-gate 	}
717*7c478bd9Sstevel@tonic-gate }
718*7c478bd9Sstevel@tonic-gate 
719*7c478bd9Sstevel@tonic-gate /*
720*7c478bd9Sstevel@tonic-gate  * squeue_enter() - enter squeue *sqp with mblk *mp with argument of *arg.
721*7c478bd9Sstevel@tonic-gate  */
722*7c478bd9Sstevel@tonic-gate void
723*7c478bd9Sstevel@tonic-gate squeue_enter(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg,
724*7c478bd9Sstevel@tonic-gate     uint8_t tag)
725*7c478bd9Sstevel@tonic-gate {
726*7c478bd9Sstevel@tonic-gate 	int	interrupt = servicing_interrupt();
727*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
728*7c478bd9Sstevel@tonic-gate 	hrtime_t start, delta;
729*7c478bd9Sstevel@tonic-gate #endif
730*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
731*7c478bd9Sstevel@tonic-gate 	conn_t 	*connp = (conn_t *)arg;
732*7c478bd9Sstevel@tonic-gate 	ASSERT(connp->conn_tcp->tcp_connp == connp);
733*7c478bd9Sstevel@tonic-gate #endif
734*7c478bd9Sstevel@tonic-gate 
735*7c478bd9Sstevel@tonic-gate 	ASSERT(proc != NULL);
736*7c478bd9Sstevel@tonic-gate 	ASSERT(sqp != NULL);
737*7c478bd9Sstevel@tonic-gate 	ASSERT(mp != NULL);
738*7c478bd9Sstevel@tonic-gate 	ASSERT(mp->b_next == NULL);
739*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
740*7c478bd9Sstevel@tonic-gate 
741*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
742*7c478bd9Sstevel@tonic-gate 	if (!(sqp->sq_state & SQS_PROC)) {
743*7c478bd9Sstevel@tonic-gate 		/*
744*7c478bd9Sstevel@tonic-gate 		 * See if anything is already queued. If we are the
745*7c478bd9Sstevel@tonic-gate 		 * first packet, do inline processing else queue the
746*7c478bd9Sstevel@tonic-gate 		 * packet and do the drain.
747*7c478bd9Sstevel@tonic-gate 		 */
748*7c478bd9Sstevel@tonic-gate 		sqp->sq_run = curthread;
749*7c478bd9Sstevel@tonic-gate 		if (sqp->sq_first == NULL) {
750*7c478bd9Sstevel@tonic-gate 			/*
751*7c478bd9Sstevel@tonic-gate 			 * Fast-path, ok to process and nothing queued.
752*7c478bd9Sstevel@tonic-gate 			 */
753*7c478bd9Sstevel@tonic-gate 			sqp->sq_state |= (SQS_PROC|SQS_FAST);
754*7c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
755*7c478bd9Sstevel@tonic-gate 
756*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
757*7c478bd9Sstevel@tonic-gate 			sqp->sq_isintr = interrupt;
758*7c478bd9Sstevel@tonic-gate 			sqp->sq_curmp = mp;
759*7c478bd9Sstevel@tonic-gate 			sqp->sq_curproc = proc;
760*7c478bd9Sstevel@tonic-gate 			sqp->sq_connp = connp;
761*7c478bd9Sstevel@tonic-gate 			mp->b_tag = sqp->sq_tag = tag;
762*7c478bd9Sstevel@tonic-gate #endif
763*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
764*7c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
765*7c478bd9Sstevel@tonic-gate 				if (interrupt)
766*7c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_npackets_intr);
767*7c478bd9Sstevel@tonic-gate 				else
768*7c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_npackets_other);
769*7c478bd9Sstevel@tonic-gate 				start = gethrtime();
770*7c478bd9Sstevel@tonic-gate 			}
771*7c478bd9Sstevel@tonic-gate #endif
772*7c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_TRUE;
773*7c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(squeue__proc__start, squeue_t *,
774*7c478bd9Sstevel@tonic-gate 			    sqp, mblk_t *, mp, conn_t *, arg);
775*7c478bd9Sstevel@tonic-gate 			(*proc)(arg, mp, sqp);
776*7c478bd9Sstevel@tonic-gate 			DTRACE_PROBE2(squeue__proc__end, squeue_t *,
777*7c478bd9Sstevel@tonic-gate 			    sqp, conn_t *, arg);
778*7c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_FALSE;
779*7c478bd9Sstevel@tonic-gate 
780*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
781*7c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
782*7c478bd9Sstevel@tonic-gate 				delta = gethrtime() - start;
783*7c478bd9Sstevel@tonic-gate 				if (interrupt)
784*7c478bd9Sstevel@tonic-gate 					SQDELTA(sqp, sq_time_intr, delta);
785*7c478bd9Sstevel@tonic-gate 				else
786*7c478bd9Sstevel@tonic-gate 					SQDELTA(sqp, sq_time_other, delta);
787*7c478bd9Sstevel@tonic-gate 			}
788*7c478bd9Sstevel@tonic-gate #endif
789*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
790*7c478bd9Sstevel@tonic-gate 			sqp->sq_curmp = NULL;
791*7c478bd9Sstevel@tonic-gate 			sqp->sq_curproc = NULL;
792*7c478bd9Sstevel@tonic-gate 			sqp->sq_connp = NULL;
793*7c478bd9Sstevel@tonic-gate 			sqp->sq_isintr = 0;
794*7c478bd9Sstevel@tonic-gate #endif
795*7c478bd9Sstevel@tonic-gate 
796*7c478bd9Sstevel@tonic-gate 			CONN_DEC_REF((conn_t *)arg);
797*7c478bd9Sstevel@tonic-gate 			ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
798*7c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
799*7c478bd9Sstevel@tonic-gate 			sqp->sq_state &= ~(SQS_PROC|SQS_FAST);
800*7c478bd9Sstevel@tonic-gate 			if (sqp->sq_first == NULL) {
801*7c478bd9Sstevel@tonic-gate 				/*
802*7c478bd9Sstevel@tonic-gate 				 * We processed inline our packet and
803*7c478bd9Sstevel@tonic-gate 				 * nothing new has arrived. We are done.
804*7c478bd9Sstevel@tonic-gate 				 */
805*7c478bd9Sstevel@tonic-gate 				sqp->sq_run = NULL;
806*7c478bd9Sstevel@tonic-gate 				mutex_exit(&sqp->sq_lock);
807*7c478bd9Sstevel@tonic-gate 				return;
808*7c478bd9Sstevel@tonic-gate 			} else if (sqp->sq_bind != CPU->cpu_id) {
809*7c478bd9Sstevel@tonic-gate 				/*
810*7c478bd9Sstevel@tonic-gate 				 * If the current thread is not running
811*7c478bd9Sstevel@tonic-gate 				 * on the CPU to which this squeue is bound,
812*7c478bd9Sstevel@tonic-gate 				 * then don't allow it to drain.
813*7c478bd9Sstevel@tonic-gate 				 */
814*7c478bd9Sstevel@tonic-gate 				sqp->sq_run = NULL;
815*7c478bd9Sstevel@tonic-gate 				SQUEUE_WORKER_WAKEUP(sqp);
816*7c478bd9Sstevel@tonic-gate 				return;
817*7c478bd9Sstevel@tonic-gate 			}
818*7c478bd9Sstevel@tonic-gate 		} else {
819*7c478bd9Sstevel@tonic-gate 			ENQUEUE_MP(sqp, mp, proc, arg);
820*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
821*7c478bd9Sstevel@tonic-gate 			mp->b_tag = tag;
822*7c478bd9Sstevel@tonic-gate #endif
823*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
824*7c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
825*7c478bd9Sstevel@tonic-gate 				if (servicing_interrupt())
826*7c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_nqueued_intr);
827*7c478bd9Sstevel@tonic-gate 				else
828*7c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_nqueued_other);
829*7c478bd9Sstevel@tonic-gate 				if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
830*7c478bd9Sstevel@tonic-gate 					sqp->sq_stats.sq_max_qlen =
831*7c478bd9Sstevel@tonic-gate 					    sqp->sq_count;
832*7c478bd9Sstevel@tonic-gate 			}
833*7c478bd9Sstevel@tonic-gate #endif
834*7c478bd9Sstevel@tonic-gate 		}
835*7c478bd9Sstevel@tonic-gate 
836*7c478bd9Sstevel@tonic-gate 		/*
837*7c478bd9Sstevel@tonic-gate 		 * We are here because either we couldn't do inline
838*7c478bd9Sstevel@tonic-gate 		 * processing (because something was already queued)
839*7c478bd9Sstevel@tonic-gate 		 * or something else arrived after we were done with
840*7c478bd9Sstevel@tonic-gate 		 * inline processing.
841*7c478bd9Sstevel@tonic-gate 		 */
842*7c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&sqp->sq_lock));
843*7c478bd9Sstevel@tonic-gate 		ASSERT(sqp->sq_first != NULL);
844*7c478bd9Sstevel@tonic-gate 
845*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
846*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
847*7c478bd9Sstevel@tonic-gate 			start = gethrtime();
848*7c478bd9Sstevel@tonic-gate 		}
849*7c478bd9Sstevel@tonic-gate #endif
850*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
851*7c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = interrupt;
852*7c478bd9Sstevel@tonic-gate #endif
853*7c478bd9Sstevel@tonic-gate 
854*7c478bd9Sstevel@tonic-gate 		if (interrupt) {
855*7c478bd9Sstevel@tonic-gate 			squeue_drain(sqp, SQS_ENTER, lbolt +
856*7c478bd9Sstevel@tonic-gate 			    squeue_intrdrain_tick);
857*7c478bd9Sstevel@tonic-gate 		} else {
858*7c478bd9Sstevel@tonic-gate 			squeue_drain(sqp, SQS_USER, lbolt +
859*7c478bd9Sstevel@tonic-gate 			    squeue_writerdrain_tick);
860*7c478bd9Sstevel@tonic-gate 		}
861*7c478bd9Sstevel@tonic-gate 
862*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
863*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
864*7c478bd9Sstevel@tonic-gate 			delta = gethrtime() - start;
865*7c478bd9Sstevel@tonic-gate 			if (interrupt)
866*7c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_intr, delta);
867*7c478bd9Sstevel@tonic-gate 			else
868*7c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_other, delta);
869*7c478bd9Sstevel@tonic-gate 		}
870*7c478bd9Sstevel@tonic-gate #endif
871*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
872*7c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = 0;
873*7c478bd9Sstevel@tonic-gate #endif
874*7c478bd9Sstevel@tonic-gate 
875*7c478bd9Sstevel@tonic-gate 		/*
876*7c478bd9Sstevel@tonic-gate 		 * If we didn't do a complete drain, the worker
877*7c478bd9Sstevel@tonic-gate 		 * thread was already signalled by squeue_drain.
878*7c478bd9Sstevel@tonic-gate 		 */
879*7c478bd9Sstevel@tonic-gate 		sqp->sq_run = NULL;
880*7c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
881*7c478bd9Sstevel@tonic-gate 		return;
882*7c478bd9Sstevel@tonic-gate 	} else {
883*7c478bd9Sstevel@tonic-gate 		ASSERT(sqp->sq_run != NULL);
884*7c478bd9Sstevel@tonic-gate 		/*
885*7c478bd9Sstevel@tonic-gate 		 * We let a thread processing a squeue reenter only
886*7c478bd9Sstevel@tonic-gate 		 * once. This helps the case of incoming connection
887*7c478bd9Sstevel@tonic-gate 		 * where a SYN-ACK-ACK that triggers the conn_ind
888*7c478bd9Sstevel@tonic-gate 		 * doesn't have to queue the packet if listener and
889*7c478bd9Sstevel@tonic-gate 		 * eager are on the same squeue. Also helps the
890*7c478bd9Sstevel@tonic-gate 		 * loopback connection where the two ends are bound
891*7c478bd9Sstevel@tonic-gate 		 * to the same squeue (which is typical on single
892*7c478bd9Sstevel@tonic-gate 		 * CPU machines).
893*7c478bd9Sstevel@tonic-gate 		 * We let the thread reenter only once for the fear
894*7c478bd9Sstevel@tonic-gate 		 * of stack getting blown with multiple traversal.
895*7c478bd9Sstevel@tonic-gate 		 */
896*7c478bd9Sstevel@tonic-gate 		if (!(sqp->sq_state & SQS_REENTER) &&
897*7c478bd9Sstevel@tonic-gate 		    (sqp->sq_run == curthread) &&
898*7c478bd9Sstevel@tonic-gate 		    (((conn_t *)arg)->conn_on_sqp == B_FALSE)) {
899*7c478bd9Sstevel@tonic-gate 			sqp->sq_state |= SQS_REENTER;
900*7c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
901*7c478bd9Sstevel@tonic-gate 
902*7c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_TRUE;
903*7c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(squeue__proc__start, squeue_t *,
904*7c478bd9Sstevel@tonic-gate 			    sqp, mblk_t *, mp, conn_t *, arg);
905*7c478bd9Sstevel@tonic-gate 			(*proc)(arg, mp, sqp);
906*7c478bd9Sstevel@tonic-gate 			DTRACE_PROBE2(squeue__proc__end, squeue_t *,
907*7c478bd9Sstevel@tonic-gate 			    sqp, conn_t *, arg);
908*7c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_FALSE;
909*7c478bd9Sstevel@tonic-gate 			CONN_DEC_REF((conn_t *)arg);
910*7c478bd9Sstevel@tonic-gate 
911*7c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
912*7c478bd9Sstevel@tonic-gate 			sqp->sq_state &= ~SQS_REENTER;
913*7c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
914*7c478bd9Sstevel@tonic-gate 			return;
915*7c478bd9Sstevel@tonic-gate 		}
916*7c478bd9Sstevel@tonic-gate 		/*
917*7c478bd9Sstevel@tonic-gate 		 * Queue is already being processed. Just enqueue
918*7c478bd9Sstevel@tonic-gate 		 * the packet and go away.
919*7c478bd9Sstevel@tonic-gate 		 */
920*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
921*7c478bd9Sstevel@tonic-gate 		mp->b_tag = tag;
922*7c478bd9Sstevel@tonic-gate #endif
923*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
924*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
925*7c478bd9Sstevel@tonic-gate 			if (servicing_interrupt())
926*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_intr);
927*7c478bd9Sstevel@tonic-gate 			else
928*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_other);
929*7c478bd9Sstevel@tonic-gate 			if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
930*7c478bd9Sstevel@tonic-gate 				sqp->sq_stats.sq_max_qlen = sqp->sq_count;
931*7c478bd9Sstevel@tonic-gate 		}
932*7c478bd9Sstevel@tonic-gate #endif
933*7c478bd9Sstevel@tonic-gate 
934*7c478bd9Sstevel@tonic-gate 		ENQUEUE_MP(sqp, mp, proc, arg);
935*7c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
936*7c478bd9Sstevel@tonic-gate 		return;
937*7c478bd9Sstevel@tonic-gate 	}
938*7c478bd9Sstevel@tonic-gate }
939*7c478bd9Sstevel@tonic-gate 
940*7c478bd9Sstevel@tonic-gate void
941*7c478bd9Sstevel@tonic-gate squeue_enter_nodrain(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg,
942*7c478bd9Sstevel@tonic-gate     uint8_t tag)
943*7c478bd9Sstevel@tonic-gate {
944*7c478bd9Sstevel@tonic-gate 	int		interrupt = servicing_interrupt();
945*7c478bd9Sstevel@tonic-gate 	boolean_t	being_processed;
946*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
947*7c478bd9Sstevel@tonic-gate 	conn_t 		*connp = (conn_t *)arg;
948*7c478bd9Sstevel@tonic-gate #endif
949*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
950*7c478bd9Sstevel@tonic-gate 	hrtime_t 	start, delta;
951*7c478bd9Sstevel@tonic-gate #endif
952*7c478bd9Sstevel@tonic-gate 
953*7c478bd9Sstevel@tonic-gate 	ASSERT(proc != NULL);
954*7c478bd9Sstevel@tonic-gate 	ASSERT(sqp != NULL);
955*7c478bd9Sstevel@tonic-gate 	ASSERT(mp != NULL);
956*7c478bd9Sstevel@tonic-gate 	ASSERT(mp->b_next == NULL);
957*7c478bd9Sstevel@tonic-gate 	ASSERT(connp->conn_tcp->tcp_connp == connp);
958*7c478bd9Sstevel@tonic-gate 
959*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
960*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
961*7c478bd9Sstevel@tonic-gate 
962*7c478bd9Sstevel@tonic-gate 	being_processed = (sqp->sq_state & SQS_PROC);
963*7c478bd9Sstevel@tonic-gate 	if (!being_processed && (sqp->sq_first == NULL)) {
964*7c478bd9Sstevel@tonic-gate 		/*
965*7c478bd9Sstevel@tonic-gate 		 * Fast-path, ok to process and nothing queued.
966*7c478bd9Sstevel@tonic-gate 		 */
967*7c478bd9Sstevel@tonic-gate 		sqp->sq_state |= (SQS_PROC|SQS_FAST);
968*7c478bd9Sstevel@tonic-gate 		sqp->sq_run = curthread;
969*7c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
970*7c478bd9Sstevel@tonic-gate 
971*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
972*7c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = interrupt;
973*7c478bd9Sstevel@tonic-gate 		sqp->sq_curmp = mp;
974*7c478bd9Sstevel@tonic-gate 		sqp->sq_curproc = proc;
975*7c478bd9Sstevel@tonic-gate 		sqp->sq_connp = connp;
976*7c478bd9Sstevel@tonic-gate 		mp->b_tag = sqp->sq_tag = tag;
977*7c478bd9Sstevel@tonic-gate #endif
978*7c478bd9Sstevel@tonic-gate 
979*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
980*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
981*7c478bd9Sstevel@tonic-gate 			if (interrupt)
982*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_intr);
983*7c478bd9Sstevel@tonic-gate 			else
984*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_other);
985*7c478bd9Sstevel@tonic-gate 			start = gethrtime();
986*7c478bd9Sstevel@tonic-gate 		}
987*7c478bd9Sstevel@tonic-gate #endif
988*7c478bd9Sstevel@tonic-gate 
989*7c478bd9Sstevel@tonic-gate 		((conn_t *)arg)->conn_on_sqp = B_TRUE;
990*7c478bd9Sstevel@tonic-gate 		DTRACE_PROBE3(squeue__proc__start, squeue_t *,
991*7c478bd9Sstevel@tonic-gate 		    sqp, mblk_t *, mp, conn_t *, arg);
992*7c478bd9Sstevel@tonic-gate 		(*proc)(arg, mp, sqp);
993*7c478bd9Sstevel@tonic-gate 		DTRACE_PROBE2(squeue__proc__end, squeue_t *,
994*7c478bd9Sstevel@tonic-gate 		    sqp, conn_t *, arg);
995*7c478bd9Sstevel@tonic-gate 		((conn_t *)arg)->conn_on_sqp = B_FALSE;
996*7c478bd9Sstevel@tonic-gate 
997*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
998*7c478bd9Sstevel@tonic-gate 		sqp->sq_curmp = NULL;
999*7c478bd9Sstevel@tonic-gate 		sqp->sq_curproc = NULL;
1000*7c478bd9Sstevel@tonic-gate 		sqp->sq_connp = NULL;
1001*7c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = 0;
1002*7c478bd9Sstevel@tonic-gate #endif
1003*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1004*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
1005*7c478bd9Sstevel@tonic-gate 			delta = gethrtime() - start;
1006*7c478bd9Sstevel@tonic-gate 			if (interrupt)
1007*7c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_intr, delta);
1008*7c478bd9Sstevel@tonic-gate 			else
1009*7c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_other, delta);
1010*7c478bd9Sstevel@tonic-gate 		}
1011*7c478bd9Sstevel@tonic-gate #endif
1012*7c478bd9Sstevel@tonic-gate 
1013*7c478bd9Sstevel@tonic-gate 		CONN_DEC_REF((conn_t *)arg);
1014*7c478bd9Sstevel@tonic-gate 		mutex_enter(&sqp->sq_lock);
1015*7c478bd9Sstevel@tonic-gate 		sqp->sq_state &= ~(SQS_PROC|SQS_FAST);
1016*7c478bd9Sstevel@tonic-gate 		sqp->sq_run = NULL;
1017*7c478bd9Sstevel@tonic-gate 		if (sqp->sq_first == NULL) {
1018*7c478bd9Sstevel@tonic-gate 			/*
1019*7c478bd9Sstevel@tonic-gate 			 * We processed inline our packet and
1020*7c478bd9Sstevel@tonic-gate 			 * nothing new has arrived. We are done.
1021*7c478bd9Sstevel@tonic-gate 			 */
1022*7c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
1023*7c478bd9Sstevel@tonic-gate 		} else {
1024*7c478bd9Sstevel@tonic-gate 			SQUEUE_WORKER_WAKEUP(sqp);
1025*7c478bd9Sstevel@tonic-gate 		}
1026*7c478bd9Sstevel@tonic-gate 		return;
1027*7c478bd9Sstevel@tonic-gate 	} else {
1028*7c478bd9Sstevel@tonic-gate 		/*
1029*7c478bd9Sstevel@tonic-gate 		 * We let a thread processing a squeue reenter only
1030*7c478bd9Sstevel@tonic-gate 		 * once. This helps the case of incoming connection
1031*7c478bd9Sstevel@tonic-gate 		 * where a SYN-ACK-ACK that triggers the conn_ind
1032*7c478bd9Sstevel@tonic-gate 		 * doesn't have to queue the packet if listener and
1033*7c478bd9Sstevel@tonic-gate 		 * eager are on the same squeue. Also helps the
1034*7c478bd9Sstevel@tonic-gate 		 * loopback connection where the two ends are bound
1035*7c478bd9Sstevel@tonic-gate 		 * to the same squeue (which is typical on single
1036*7c478bd9Sstevel@tonic-gate 		 * CPU machines).
1037*7c478bd9Sstevel@tonic-gate 		 * We let the thread reenter only once for the fear
1038*7c478bd9Sstevel@tonic-gate 		 * of stack getting blown with multiple traversal.
1039*7c478bd9Sstevel@tonic-gate 		 */
1040*7c478bd9Sstevel@tonic-gate 		if (being_processed && !(sqp->sq_state & SQS_REENTER) &&
1041*7c478bd9Sstevel@tonic-gate 		    (sqp->sq_run == curthread) &&
1042*7c478bd9Sstevel@tonic-gate 		    (((conn_t *)arg)->conn_on_sqp == B_FALSE)) {
1043*7c478bd9Sstevel@tonic-gate 			sqp->sq_state |= SQS_REENTER;
1044*7c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
1045*7c478bd9Sstevel@tonic-gate 
1046*7c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_TRUE;
1047*7c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(squeue__proc__start, squeue_t *,
1048*7c478bd9Sstevel@tonic-gate 			    sqp, mblk_t *, mp, conn_t *, arg);
1049*7c478bd9Sstevel@tonic-gate 			(*proc)(arg, mp, sqp);
1050*7c478bd9Sstevel@tonic-gate 			DTRACE_PROBE2(squeue__proc__end, squeue_t *,
1051*7c478bd9Sstevel@tonic-gate 			    sqp, conn_t *, arg);
1052*7c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_FALSE;
1053*7c478bd9Sstevel@tonic-gate 			CONN_DEC_REF((conn_t *)arg);
1054*7c478bd9Sstevel@tonic-gate 
1055*7c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
1056*7c478bd9Sstevel@tonic-gate 			sqp->sq_state &= ~SQS_REENTER;
1057*7c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
1058*7c478bd9Sstevel@tonic-gate 			return;
1059*7c478bd9Sstevel@tonic-gate 		}
1060*7c478bd9Sstevel@tonic-gate 
1061*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
1062*7c478bd9Sstevel@tonic-gate 		mp->b_tag = tag;
1063*7c478bd9Sstevel@tonic-gate #endif
1064*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1065*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
1066*7c478bd9Sstevel@tonic-gate 			if (servicing_interrupt())
1067*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_intr);
1068*7c478bd9Sstevel@tonic-gate 			else
1069*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_other);
1070*7c478bd9Sstevel@tonic-gate 			if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
1071*7c478bd9Sstevel@tonic-gate 				sqp->sq_stats.sq_max_qlen = sqp->sq_count;
1072*7c478bd9Sstevel@tonic-gate 		}
1073*7c478bd9Sstevel@tonic-gate #endif
1074*7c478bd9Sstevel@tonic-gate 		ENQUEUE_MP(sqp, mp, proc, arg);
1075*7c478bd9Sstevel@tonic-gate 		if (being_processed) {
1076*7c478bd9Sstevel@tonic-gate 			/*
1077*7c478bd9Sstevel@tonic-gate 			 * Queue is already being processed.
1078*7c478bd9Sstevel@tonic-gate 			 * No need to do anything.
1079*7c478bd9Sstevel@tonic-gate 			 */
1080*7c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
1081*7c478bd9Sstevel@tonic-gate 			return;
1082*7c478bd9Sstevel@tonic-gate 		}
1083*7c478bd9Sstevel@tonic-gate 		SQUEUE_WORKER_WAKEUP(sqp);
1084*7c478bd9Sstevel@tonic-gate 	}
1085*7c478bd9Sstevel@tonic-gate }
1086*7c478bd9Sstevel@tonic-gate 
1087*7c478bd9Sstevel@tonic-gate /*
1088*7c478bd9Sstevel@tonic-gate  * squeue_fill() - fill squeue *sqp with mblk *mp with argument of *arg
1089*7c478bd9Sstevel@tonic-gate  * without processing the squeue.
1090*7c478bd9Sstevel@tonic-gate  */
1091*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
1092*7c478bd9Sstevel@tonic-gate void
1093*7c478bd9Sstevel@tonic-gate squeue_fill(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void * arg,
1094*7c478bd9Sstevel@tonic-gate     uint8_t tag)
1095*7c478bd9Sstevel@tonic-gate {
1096*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
1097*7c478bd9Sstevel@tonic-gate 	conn_t *connp = (conn_t *)arg;
1098*7c478bd9Sstevel@tonic-gate #endif
1099*7c478bd9Sstevel@tonic-gate 	ASSERT(proc != NULL);
1100*7c478bd9Sstevel@tonic-gate 	ASSERT(sqp != NULL);
1101*7c478bd9Sstevel@tonic-gate 	ASSERT(mp != NULL);
1102*7c478bd9Sstevel@tonic-gate 	ASSERT(mp->b_next == NULL);
1103*7c478bd9Sstevel@tonic-gate 	ASSERT(connp->conn_tcp->tcp_connp == connp);
1104*7c478bd9Sstevel@tonic-gate 
1105*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
1106*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
1107*7c478bd9Sstevel@tonic-gate 	ENQUEUE_MP(sqp, mp, proc, arg);
1108*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
1109*7c478bd9Sstevel@tonic-gate 	mp->b_tag = tag;
1110*7c478bd9Sstevel@tonic-gate #endif
1111*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1112*7c478bd9Sstevel@tonic-gate 	if (SQ_PROFILING(sqp)) {
1113*7c478bd9Sstevel@tonic-gate 		if (servicing_interrupt())
1114*7c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_nqueued_intr);
1115*7c478bd9Sstevel@tonic-gate 		else
1116*7c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_nqueued_other);
1117*7c478bd9Sstevel@tonic-gate 		if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
1118*7c478bd9Sstevel@tonic-gate 			sqp->sq_stats.sq_max_qlen = sqp->sq_count;
1119*7c478bd9Sstevel@tonic-gate 	}
1120*7c478bd9Sstevel@tonic-gate #endif
1121*7c478bd9Sstevel@tonic-gate 
1122*7c478bd9Sstevel@tonic-gate 	/*
1123*7c478bd9Sstevel@tonic-gate 	 * If queue is already being processed. No need to do anything.
1124*7c478bd9Sstevel@tonic-gate 	 */
1125*7c478bd9Sstevel@tonic-gate 	if (sqp->sq_state & SQS_PROC) {
1126*7c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
1127*7c478bd9Sstevel@tonic-gate 		return;
1128*7c478bd9Sstevel@tonic-gate 	}
1129*7c478bd9Sstevel@tonic-gate 
1130*7c478bd9Sstevel@tonic-gate 	SQUEUE_WORKER_WAKEUP(sqp);
1131*7c478bd9Sstevel@tonic-gate }
1132*7c478bd9Sstevel@tonic-gate 
1133*7c478bd9Sstevel@tonic-gate 
1134*7c478bd9Sstevel@tonic-gate /*
1135*7c478bd9Sstevel@tonic-gate  * PRIVATE FUNCTIONS
1136*7c478bd9Sstevel@tonic-gate  */
1137*7c478bd9Sstevel@tonic-gate 
1138*7c478bd9Sstevel@tonic-gate static void
1139*7c478bd9Sstevel@tonic-gate squeue_fire(void *arg)
1140*7c478bd9Sstevel@tonic-gate {
1141*7c478bd9Sstevel@tonic-gate 	squeue_t	*sqp = arg;
1142*7c478bd9Sstevel@tonic-gate 	uint_t		state;
1143*7c478bd9Sstevel@tonic-gate 
1144*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
1145*7c478bd9Sstevel@tonic-gate 
1146*7c478bd9Sstevel@tonic-gate 	state = sqp->sq_state;
1147*7c478bd9Sstevel@tonic-gate 	if (sqp->sq_tid == 0 && !(state & SQS_TMO_PROG)) {
1148*7c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
1149*7c478bd9Sstevel@tonic-gate 		return;
1150*7c478bd9Sstevel@tonic-gate 	}
1151*7c478bd9Sstevel@tonic-gate 
1152*7c478bd9Sstevel@tonic-gate 	sqp->sq_tid = 0;
1153*7c478bd9Sstevel@tonic-gate 	/*
1154*7c478bd9Sstevel@tonic-gate 	 * The timeout fired before we got a chance to set it.
1155*7c478bd9Sstevel@tonic-gate 	 * Process it anyway but remove the SQS_TMO_PROG so that
1156*7c478bd9Sstevel@tonic-gate 	 * the guy trying to set the timeout knows that it has
1157*7c478bd9Sstevel@tonic-gate 	 * already been processed.
1158*7c478bd9Sstevel@tonic-gate 	 */
1159*7c478bd9Sstevel@tonic-gate 	if (state & SQS_TMO_PROG)
1160*7c478bd9Sstevel@tonic-gate 		sqp->sq_state &= ~SQS_TMO_PROG;
1161*7c478bd9Sstevel@tonic-gate 
1162*7c478bd9Sstevel@tonic-gate 	if (!(state & SQS_PROC)) {
1163*7c478bd9Sstevel@tonic-gate 		sqp->sq_awaken = lbolt;
1164*7c478bd9Sstevel@tonic-gate 		cv_signal(&sqp->sq_async);
1165*7c478bd9Sstevel@tonic-gate 	}
1166*7c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
1167*7c478bd9Sstevel@tonic-gate }
1168*7c478bd9Sstevel@tonic-gate 
1169*7c478bd9Sstevel@tonic-gate static void
1170*7c478bd9Sstevel@tonic-gate squeue_drain(squeue_t *sqp, uint_t proc_type, clock_t expire)
1171*7c478bd9Sstevel@tonic-gate {
1172*7c478bd9Sstevel@tonic-gate 	mblk_t	*mp;
1173*7c478bd9Sstevel@tonic-gate 	mblk_t 	*head;
1174*7c478bd9Sstevel@tonic-gate 	sqproc_t proc;
1175*7c478bd9Sstevel@tonic-gate 	conn_t	*connp;
1176*7c478bd9Sstevel@tonic-gate 	clock_t	start = lbolt;
1177*7c478bd9Sstevel@tonic-gate 	clock_t	drain_time;
1178*7c478bd9Sstevel@tonic-gate 	timeout_id_t tid;
1179*7c478bd9Sstevel@tonic-gate 	uint_t	cnt;
1180*7c478bd9Sstevel@tonic-gate 	uint_t	total_cnt = 0;
1181*7c478bd9Sstevel@tonic-gate 	ill_rx_ring_t	*sq_rx_ring = sqp->sq_rx_ring;
1182*7c478bd9Sstevel@tonic-gate 	int	interrupt = servicing_interrupt();
1183*7c478bd9Sstevel@tonic-gate 	boolean_t poll_on = B_FALSE;
1184*7c478bd9Sstevel@tonic-gate 
1185*7c478bd9Sstevel@tonic-gate 	ASSERT(mutex_owned(&sqp->sq_lock));
1186*7c478bd9Sstevel@tonic-gate 	ASSERT(!(sqp->sq_state & SQS_PROC));
1187*7c478bd9Sstevel@tonic-gate 
1188*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1189*7c478bd9Sstevel@tonic-gate 	if (SQ_PROFILING(sqp)) {
1190*7c478bd9Sstevel@tonic-gate 		if (interrupt)
1191*7c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_ndrains_intr);
1192*7c478bd9Sstevel@tonic-gate 		else if (!(proc_type & SQS_WORKER))
1193*7c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_ndrains_other);
1194*7c478bd9Sstevel@tonic-gate 		else
1195*7c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_ndrains_worker);
1196*7c478bd9Sstevel@tonic-gate 	}
1197*7c478bd9Sstevel@tonic-gate #endif
1198*7c478bd9Sstevel@tonic-gate 
1199*7c478bd9Sstevel@tonic-gate 	if ((tid = sqp->sq_tid) != 0)
1200*7c478bd9Sstevel@tonic-gate 		sqp->sq_tid = 0;
1201*7c478bd9Sstevel@tonic-gate 
1202*7c478bd9Sstevel@tonic-gate 	sqp->sq_state |= SQS_PROC | proc_type;
1203*7c478bd9Sstevel@tonic-gate 	head = sqp->sq_first;
1204*7c478bd9Sstevel@tonic-gate 	sqp->sq_first = NULL;
1205*7c478bd9Sstevel@tonic-gate 	sqp->sq_last = NULL;
1206*7c478bd9Sstevel@tonic-gate 	cnt = sqp->sq_count;
1207*7c478bd9Sstevel@tonic-gate 
1208*7c478bd9Sstevel@tonic-gate 	/*
1209*7c478bd9Sstevel@tonic-gate 	 * We have backlog built up. Switch to polling mode if the
1210*7c478bd9Sstevel@tonic-gate 	 * device underneath allows it. Need to do it only for
1211*7c478bd9Sstevel@tonic-gate 	 * drain by non-interrupt thread so interrupts don't
1212*7c478bd9Sstevel@tonic-gate 	 * come and disrupt us in between. If its a interrupt thread,
1213*7c478bd9Sstevel@tonic-gate 	 * no need because most devices will not issue another
1214*7c478bd9Sstevel@tonic-gate 	 * interrupt till this one returns.
1215*7c478bd9Sstevel@tonic-gate 	 */
1216*7c478bd9Sstevel@tonic-gate 	if ((sqp->sq_state & SQS_POLL_CAPAB) && !(proc_type & SQS_ENTER) &&
1217*7c478bd9Sstevel@tonic-gate 		(sqp->sq_count > squeue_worker_poll_min)) {
1218*7c478bd9Sstevel@tonic-gate 		ASSERT(sq_rx_ring != NULL);
1219*7c478bd9Sstevel@tonic-gate 		SQS_POLLING_ON(sqp, sq_rx_ring);
1220*7c478bd9Sstevel@tonic-gate 		poll_on = B_TRUE;
1221*7c478bd9Sstevel@tonic-gate 	}
1222*7c478bd9Sstevel@tonic-gate 
1223*7c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
1224*7c478bd9Sstevel@tonic-gate 
1225*7c478bd9Sstevel@tonic-gate 	if (tid != 0)
1226*7c478bd9Sstevel@tonic-gate 		(void) untimeout(tid);
1227*7c478bd9Sstevel@tonic-gate again:
1228*7c478bd9Sstevel@tonic-gate 	while ((mp = head) != NULL) {
1229*7c478bd9Sstevel@tonic-gate 		head = mp->b_next;
1230*7c478bd9Sstevel@tonic-gate 		mp->b_next = NULL;
1231*7c478bd9Sstevel@tonic-gate 
1232*7c478bd9Sstevel@tonic-gate 		proc = (sqproc_t)mp->b_queue;
1233*7c478bd9Sstevel@tonic-gate 		mp->b_queue = NULL;
1234*7c478bd9Sstevel@tonic-gate 		connp = (conn_t *)mp->b_prev;
1235*7c478bd9Sstevel@tonic-gate 		mp->b_prev = NULL;
1236*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
1237*7c478bd9Sstevel@tonic-gate 		sqp->sq_curmp = mp;
1238*7c478bd9Sstevel@tonic-gate 		sqp->sq_curproc = proc;
1239*7c478bd9Sstevel@tonic-gate 		sqp->sq_connp = connp;
1240*7c478bd9Sstevel@tonic-gate 		sqp->sq_tag = mp->b_tag;
1241*7c478bd9Sstevel@tonic-gate #endif
1242*7c478bd9Sstevel@tonic-gate 
1243*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1244*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
1245*7c478bd9Sstevel@tonic-gate 			if (interrupt)
1246*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_intr);
1247*7c478bd9Sstevel@tonic-gate 			else if (!(proc_type & SQS_WORKER))
1248*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_other);
1249*7c478bd9Sstevel@tonic-gate 			else
1250*7c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_worker);
1251*7c478bd9Sstevel@tonic-gate 		}
1252*7c478bd9Sstevel@tonic-gate #endif
1253*7c478bd9Sstevel@tonic-gate 
1254*7c478bd9Sstevel@tonic-gate 		connp->conn_on_sqp = B_TRUE;
1255*7c478bd9Sstevel@tonic-gate 		DTRACE_PROBE3(squeue__proc__start, squeue_t *,
1256*7c478bd9Sstevel@tonic-gate 		    sqp, mblk_t *, mp, conn_t *, connp);
1257*7c478bd9Sstevel@tonic-gate 		(*proc)(connp, mp, sqp);
1258*7c478bd9Sstevel@tonic-gate 		DTRACE_PROBE2(squeue__proc__end, squeue_t *,
1259*7c478bd9Sstevel@tonic-gate 		    sqp, conn_t *, connp);
1260*7c478bd9Sstevel@tonic-gate 		connp->conn_on_sqp = B_FALSE;
1261*7c478bd9Sstevel@tonic-gate 		CONN_DEC_REF(connp);
1262*7c478bd9Sstevel@tonic-gate 	}
1263*7c478bd9Sstevel@tonic-gate 
1264*7c478bd9Sstevel@tonic-gate 
1265*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
1266*7c478bd9Sstevel@tonic-gate 	sqp->sq_curmp = NULL;
1267*7c478bd9Sstevel@tonic-gate 	sqp->sq_curproc = NULL;
1268*7c478bd9Sstevel@tonic-gate 	sqp->sq_connp = NULL;
1269*7c478bd9Sstevel@tonic-gate #endif
1270*7c478bd9Sstevel@tonic-gate 
1271*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
1272*7c478bd9Sstevel@tonic-gate 	sqp->sq_count -= cnt;
1273*7c478bd9Sstevel@tonic-gate 	total_cnt += cnt;
1274*7c478bd9Sstevel@tonic-gate 
1275*7c478bd9Sstevel@tonic-gate 	if (sqp->sq_first != NULL) {
1276*7c478bd9Sstevel@tonic-gate 		if (!expire || (lbolt < expire)) {
1277*7c478bd9Sstevel@tonic-gate 			/* More arrived and time not expired */
1278*7c478bd9Sstevel@tonic-gate 			head = sqp->sq_first;
1279*7c478bd9Sstevel@tonic-gate 			sqp->sq_first = NULL;
1280*7c478bd9Sstevel@tonic-gate 			sqp->sq_last = NULL;
1281*7c478bd9Sstevel@tonic-gate 			cnt = sqp->sq_count;
1282*7c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
1283*7c478bd9Sstevel@tonic-gate 			goto again;
1284*7c478bd9Sstevel@tonic-gate 		}
1285*7c478bd9Sstevel@tonic-gate 
1286*7c478bd9Sstevel@tonic-gate 		/*
1287*7c478bd9Sstevel@tonic-gate 		 * If we are not worker thread and we
1288*7c478bd9Sstevel@tonic-gate 		 * reached our time limit to do drain,
1289*7c478bd9Sstevel@tonic-gate 		 * signal the worker thread to pick
1290*7c478bd9Sstevel@tonic-gate 		 * up the work.
1291*7c478bd9Sstevel@tonic-gate 		 * If we were the worker thread, then
1292*7c478bd9Sstevel@tonic-gate 		 * we take a break to allow an interrupt
1293*7c478bd9Sstevel@tonic-gate 		 * or writer to pick up the load.
1294*7c478bd9Sstevel@tonic-gate 		 */
1295*7c478bd9Sstevel@tonic-gate 		if (proc_type != SQS_WORKER) {
1296*7c478bd9Sstevel@tonic-gate 			sqp->sq_awaken = lbolt;
1297*7c478bd9Sstevel@tonic-gate 			cv_signal(&sqp->sq_async);
1298*7c478bd9Sstevel@tonic-gate 		}
1299*7c478bd9Sstevel@tonic-gate 	}
1300*7c478bd9Sstevel@tonic-gate 
1301*7c478bd9Sstevel@tonic-gate 	/*
1302*7c478bd9Sstevel@tonic-gate 	 * Try to see if we can get a time estimate to process a packet.
1303*7c478bd9Sstevel@tonic-gate 	 * Do it only in interrupt context since less chance of context
1304*7c478bd9Sstevel@tonic-gate 	 * switch or pinning etc. to get a better estimate.
1305*7c478bd9Sstevel@tonic-gate 	 */
1306*7c478bd9Sstevel@tonic-gate 	if (interrupt && ((drain_time = (lbolt - start)) > 0))
1307*7c478bd9Sstevel@tonic-gate 		sqp->sq_avg_drain_time = ((80 * sqp->sq_avg_drain_time) +
1308*7c478bd9Sstevel@tonic-gate 		    (20 * (drv_hztousec(drain_time)/total_cnt)))/100;
1309*7c478bd9Sstevel@tonic-gate 
1310*7c478bd9Sstevel@tonic-gate 	sqp->sq_state &= ~(SQS_PROC | proc_type);
1311*7c478bd9Sstevel@tonic-gate 
1312*7c478bd9Sstevel@tonic-gate 	/*
1313*7c478bd9Sstevel@tonic-gate 	 * If polling was turned on, turn it off and reduce the default
1314*7c478bd9Sstevel@tonic-gate 	 * interrupt blank interval as well to bring new packets in faster
1315*7c478bd9Sstevel@tonic-gate 	 * (reduces the latency when there is no backlog).
1316*7c478bd9Sstevel@tonic-gate 	 */
1317*7c478bd9Sstevel@tonic-gate 	if (poll_on && (sqp->sq_state & SQS_POLL_CAPAB)) {
1318*7c478bd9Sstevel@tonic-gate 		ASSERT(sq_rx_ring != NULL);
1319*7c478bd9Sstevel@tonic-gate 		SQS_POLLING_OFF(sqp, sq_rx_ring);
1320*7c478bd9Sstevel@tonic-gate 	}
1321*7c478bd9Sstevel@tonic-gate }
1322*7c478bd9Sstevel@tonic-gate 
1323*7c478bd9Sstevel@tonic-gate static void
1324*7c478bd9Sstevel@tonic-gate squeue_worker(squeue_t *sqp)
1325*7c478bd9Sstevel@tonic-gate {
1326*7c478bd9Sstevel@tonic-gate 	kmutex_t *lock = &sqp->sq_lock;
1327*7c478bd9Sstevel@tonic-gate 	kcondvar_t *async = &sqp->sq_async;
1328*7c478bd9Sstevel@tonic-gate 	callb_cpr_t cprinfo;
1329*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1330*7c478bd9Sstevel@tonic-gate 	hrtime_t start;
1331*7c478bd9Sstevel@tonic-gate #endif
1332*7c478bd9Sstevel@tonic-gate 
1333*7c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "nca");
1334*7c478bd9Sstevel@tonic-gate 	mutex_enter(lock);
1335*7c478bd9Sstevel@tonic-gate 
1336*7c478bd9Sstevel@tonic-gate 	for (;;) {
1337*7c478bd9Sstevel@tonic-gate 		while (sqp->sq_first == NULL || (sqp->sq_state & SQS_PROC)) {
1338*7c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
1339*7c478bd9Sstevel@tonic-gate still_wait:
1340*7c478bd9Sstevel@tonic-gate 			cv_wait(async, lock);
1341*7c478bd9Sstevel@tonic-gate 			if (sqp->sq_state & SQS_PROC) {
1342*7c478bd9Sstevel@tonic-gate 				goto still_wait;
1343*7c478bd9Sstevel@tonic-gate 			}
1344*7c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, lock);
1345*7c478bd9Sstevel@tonic-gate 		}
1346*7c478bd9Sstevel@tonic-gate 
1347*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1348*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
1349*7c478bd9Sstevel@tonic-gate 			start = gethrtime();
1350*7c478bd9Sstevel@tonic-gate 		}
1351*7c478bd9Sstevel@tonic-gate #endif
1352*7c478bd9Sstevel@tonic-gate 
1353*7c478bd9Sstevel@tonic-gate 		ASSERT(squeue_workerdrain_tick != 0);
1354*7c478bd9Sstevel@tonic-gate 		sqp->sq_run = curthread;
1355*7c478bd9Sstevel@tonic-gate 		squeue_drain(sqp, SQS_WORKER, lbolt +  squeue_workerdrain_tick);
1356*7c478bd9Sstevel@tonic-gate 		sqp->sq_run = NULL;
1357*7c478bd9Sstevel@tonic-gate 
1358*7c478bd9Sstevel@tonic-gate 		if (sqp->sq_first != NULL) {
1359*7c478bd9Sstevel@tonic-gate 			/*
1360*7c478bd9Sstevel@tonic-gate 			 * Doing too much processing by worker thread
1361*7c478bd9Sstevel@tonic-gate 			 * in presense of interrupts can be sub optimal.
1362*7c478bd9Sstevel@tonic-gate 			 * Instead, once a drain is done by worker thread
1363*7c478bd9Sstevel@tonic-gate 			 * for squeue_writerdrain_ms (the reason we are
1364*7c478bd9Sstevel@tonic-gate 			 * here), we force wait for squeue_workerwait_tick
1365*7c478bd9Sstevel@tonic-gate 			 * before doing more processing even if sq_wait is
1366*7c478bd9Sstevel@tonic-gate 			 * set to 0.
1367*7c478bd9Sstevel@tonic-gate 			 *
1368*7c478bd9Sstevel@tonic-gate 			 * This can be counterproductive for performance
1369*7c478bd9Sstevel@tonic-gate 			 * if worker thread is the only means to process
1370*7c478bd9Sstevel@tonic-gate 			 * the packets (interrupts or writers are not
1371*7c478bd9Sstevel@tonic-gate 			 * allowed inside the squeue).
1372*7c478bd9Sstevel@tonic-gate 			 */
1373*7c478bd9Sstevel@tonic-gate 			if (sqp->sq_tid == 0 &&
1374*7c478bd9Sstevel@tonic-gate 			    !(sqp->sq_state & SQS_TMO_PROG)) {
1375*7c478bd9Sstevel@tonic-gate 				timeout_id_t	tid;
1376*7c478bd9Sstevel@tonic-gate 
1377*7c478bd9Sstevel@tonic-gate 				sqp->sq_state |= SQS_TMO_PROG;
1378*7c478bd9Sstevel@tonic-gate 				mutex_exit(&sqp->sq_lock);
1379*7c478bd9Sstevel@tonic-gate 				tid = timeout(squeue_fire, sqp,
1380*7c478bd9Sstevel@tonic-gate 				    squeue_workerwait_tick);
1381*7c478bd9Sstevel@tonic-gate 				mutex_enter(&sqp->sq_lock);
1382*7c478bd9Sstevel@tonic-gate 				/*
1383*7c478bd9Sstevel@tonic-gate 				 * Check again if we still need
1384*7c478bd9Sstevel@tonic-gate 				 * the timeout
1385*7c478bd9Sstevel@tonic-gate 				 */
1386*7c478bd9Sstevel@tonic-gate 				if (((sqp->sq_state & (SQS_TMO_PROG|SQS_PROC))
1387*7c478bd9Sstevel@tonic-gate 				    == SQS_TMO_PROG) && (sqp->sq_tid == 0) &&
1388*7c478bd9Sstevel@tonic-gate 				    (sqp->sq_first != NULL)) {
1389*7c478bd9Sstevel@tonic-gate 					sqp->sq_state &= ~SQS_TMO_PROG;
1390*7c478bd9Sstevel@tonic-gate 					sqp->sq_awaken = lbolt;
1391*7c478bd9Sstevel@tonic-gate 					sqp->sq_tid = tid;
1392*7c478bd9Sstevel@tonic-gate 				} else if (sqp->sq_state & SQS_TMO_PROG) {
1393*7c478bd9Sstevel@tonic-gate 					/* timeout not needed */
1394*7c478bd9Sstevel@tonic-gate 					sqp->sq_state &= ~SQS_TMO_PROG;
1395*7c478bd9Sstevel@tonic-gate 					mutex_exit(&(sqp)->sq_lock);
1396*7c478bd9Sstevel@tonic-gate 					(void) untimeout(tid);
1397*7c478bd9Sstevel@tonic-gate 					mutex_enter(&sqp->sq_lock);
1398*7c478bd9Sstevel@tonic-gate 				}
1399*7c478bd9Sstevel@tonic-gate 			}
1400*7c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
1401*7c478bd9Sstevel@tonic-gate 			cv_wait(async, lock);
1402*7c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, lock);
1403*7c478bd9Sstevel@tonic-gate 		}
1404*7c478bd9Sstevel@tonic-gate 
1405*7c478bd9Sstevel@tonic-gate 
1406*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1407*7c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
1408*7c478bd9Sstevel@tonic-gate 			SQDELTA(sqp, sq_time_worker, gethrtime() - start);
1409*7c478bd9Sstevel@tonic-gate 		}
1410*7c478bd9Sstevel@tonic-gate #endif
1411*7c478bd9Sstevel@tonic-gate 	}
1412*7c478bd9Sstevel@tonic-gate }
1413*7c478bd9Sstevel@tonic-gate 
1414*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1415*7c478bd9Sstevel@tonic-gate static int
1416*7c478bd9Sstevel@tonic-gate squeue_kstat_update(kstat_t *ksp, int rw)
1417*7c478bd9Sstevel@tonic-gate {
1418*7c478bd9Sstevel@tonic-gate 	struct squeue_kstat *sqsp = &squeue_kstat;
1419*7c478bd9Sstevel@tonic-gate 	squeue_t *sqp = ksp->ks_private;
1420*7c478bd9Sstevel@tonic-gate 
1421*7c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
1422*7c478bd9Sstevel@tonic-gate 		return (EACCES);
1423*7c478bd9Sstevel@tonic-gate 
1424*7c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
1425*7c478bd9Sstevel@tonic-gate 	sqsp->sq_count.value.ui64 = sqp->sq_count;
1426*7c478bd9Sstevel@tonic-gate 	sqsp->sq_max_qlen.value.ui64 = sqp->sq_stats.sq_max_qlen;
1427*7c478bd9Sstevel@tonic-gate #endif
1428*7c478bd9Sstevel@tonic-gate 	sqsp->sq_npackets_worker.value.ui64 = sqp->sq_stats.sq_npackets_worker;
1429*7c478bd9Sstevel@tonic-gate 	sqsp->sq_npackets_intr.value.ui64 = sqp->sq_stats.sq_npackets_intr;
1430*7c478bd9Sstevel@tonic-gate 	sqsp->sq_npackets_other.value.ui64 = sqp->sq_stats.sq_npackets_other;
1431*7c478bd9Sstevel@tonic-gate 	sqsp->sq_nqueued_intr.value.ui64 = sqp->sq_stats.sq_nqueued_intr;
1432*7c478bd9Sstevel@tonic-gate 	sqsp->sq_nqueued_other.value.ui64 = sqp->sq_stats.sq_nqueued_other;
1433*7c478bd9Sstevel@tonic-gate 	sqsp->sq_ndrains_worker.value.ui64 = sqp->sq_stats.sq_ndrains_worker;
1434*7c478bd9Sstevel@tonic-gate 	sqsp->sq_ndrains_intr.value.ui64 = sqp->sq_stats.sq_ndrains_intr;
1435*7c478bd9Sstevel@tonic-gate 	sqsp->sq_ndrains_other.value.ui64 = sqp->sq_stats.sq_ndrains_other;
1436*7c478bd9Sstevel@tonic-gate 	sqsp->sq_time_worker.value.ui64 = sqp->sq_stats.sq_time_worker;
1437*7c478bd9Sstevel@tonic-gate 	sqsp->sq_time_intr.value.ui64 = sqp->sq_stats.sq_time_intr;
1438*7c478bd9Sstevel@tonic-gate 	sqsp->sq_time_other.value.ui64 = sqp->sq_stats.sq_time_other;
1439*7c478bd9Sstevel@tonic-gate 	return (0);
1440*7c478bd9Sstevel@tonic-gate }
1441*7c478bd9Sstevel@tonic-gate #endif
1442*7c478bd9Sstevel@tonic-gate 
1443*7c478bd9Sstevel@tonic-gate void
1444*7c478bd9Sstevel@tonic-gate squeue_profile_enable(squeue_t *sqp)
1445*7c478bd9Sstevel@tonic-gate {
1446*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
1447*7c478bd9Sstevel@tonic-gate 	sqp->sq_state |= SQS_PROFILE;
1448*7c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
1449*7c478bd9Sstevel@tonic-gate }
1450*7c478bd9Sstevel@tonic-gate 
1451*7c478bd9Sstevel@tonic-gate void
1452*7c478bd9Sstevel@tonic-gate squeue_profile_disable(squeue_t *sqp)
1453*7c478bd9Sstevel@tonic-gate {
1454*7c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
1455*7c478bd9Sstevel@tonic-gate 	sqp->sq_state &= ~SQS_PROFILE;
1456*7c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
1457*7c478bd9Sstevel@tonic-gate }
1458*7c478bd9Sstevel@tonic-gate 
1459*7c478bd9Sstevel@tonic-gate void
1460*7c478bd9Sstevel@tonic-gate squeue_profile_reset(squeue_t *sqp)
1461*7c478bd9Sstevel@tonic-gate {
1462*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1463*7c478bd9Sstevel@tonic-gate 	bzero(&sqp->sq_stats, sizeof (sqstat_t));
1464*7c478bd9Sstevel@tonic-gate #endif
1465*7c478bd9Sstevel@tonic-gate }
1466*7c478bd9Sstevel@tonic-gate 
1467*7c478bd9Sstevel@tonic-gate void
1468*7c478bd9Sstevel@tonic-gate squeue_profile_start(void)
1469*7c478bd9Sstevel@tonic-gate {
1470*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1471*7c478bd9Sstevel@tonic-gate 	squeue_profile = B_TRUE;
1472*7c478bd9Sstevel@tonic-gate #endif
1473*7c478bd9Sstevel@tonic-gate }
1474*7c478bd9Sstevel@tonic-gate 
1475*7c478bd9Sstevel@tonic-gate void
1476*7c478bd9Sstevel@tonic-gate squeue_profile_stop(void)
1477*7c478bd9Sstevel@tonic-gate {
1478*7c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
1479*7c478bd9Sstevel@tonic-gate 	squeue_profile = B_FALSE;
1480*7c478bd9Sstevel@tonic-gate #endif
1481*7c478bd9Sstevel@tonic-gate }
1482*7c478bd9Sstevel@tonic-gate 
1483*7c478bd9Sstevel@tonic-gate uintptr_t *
1484*7c478bd9Sstevel@tonic-gate squeue_getprivate(squeue_t *sqp, sqprivate_t p)
1485*7c478bd9Sstevel@tonic-gate {
1486*7c478bd9Sstevel@tonic-gate 	ASSERT(p < SQPRIVATE_MAX);
1487*7c478bd9Sstevel@tonic-gate 
1488*7c478bd9Sstevel@tonic-gate 	return (&sqp->sq_private[p]);
1489*7c478bd9Sstevel@tonic-gate }
1490*7c478bd9Sstevel@tonic-gate 
1491*7c478bd9Sstevel@tonic-gate processorid_t
1492*7c478bd9Sstevel@tonic-gate squeue_binding(squeue_t *sqp)
1493*7c478bd9Sstevel@tonic-gate {
1494*7c478bd9Sstevel@tonic-gate 	return (sqp->sq_bind);
1495*7c478bd9Sstevel@tonic-gate }
1496