xref: /titanic_52/usr/src/uts/common/inet/squeue.c (revision d045b9872121ef87817d5d01968d80cc01574bc8)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
57c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
67c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
77c478bd9Sstevel@tonic-gate  * with the License.
87c478bd9Sstevel@tonic-gate  *
97c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
107c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
117c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
127c478bd9Sstevel@tonic-gate  * and limitations under the License.
137c478bd9Sstevel@tonic-gate  *
147c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
157c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
167c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
177c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
187c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
197c478bd9Sstevel@tonic-gate  *
207c478bd9Sstevel@tonic-gate  * CDDL HEADER END
217c478bd9Sstevel@tonic-gate  */
227c478bd9Sstevel@tonic-gate /*
23*d045b987Smasputra  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
287c478bd9Sstevel@tonic-gate 
297c478bd9Sstevel@tonic-gate /*
307c478bd9Sstevel@tonic-gate  * Squeues - TCP/IP serialization mechanism.
317c478bd9Sstevel@tonic-gate  *
327c478bd9Sstevel@tonic-gate  * This is a general purpose high-performance serialization mechanism. It is
337c478bd9Sstevel@tonic-gate  * similar to a taskq with a single worker thread, the difference is that it
347c478bd9Sstevel@tonic-gate  * does not imply a context switch - the thread placing a request may actually
357c478bd9Sstevel@tonic-gate  * process it. It is also biased for processing requests in interrupt context.
367c478bd9Sstevel@tonic-gate  *
377c478bd9Sstevel@tonic-gate  * Each squeue has a worker thread which may optionally be bound to a CPU.
387c478bd9Sstevel@tonic-gate  *
397c478bd9Sstevel@tonic-gate  * Only one thread may process requests from a given squeue at any time. This is
407c478bd9Sstevel@tonic-gate  * called "entering" squeue.
417c478bd9Sstevel@tonic-gate  *
427c478bd9Sstevel@tonic-gate  * Each dispatched request is processed either by
437c478bd9Sstevel@tonic-gate  *
447c478bd9Sstevel@tonic-gate  *	a) Dispatching thread or
457c478bd9Sstevel@tonic-gate  *	b) Some other thread that is currently processing squeue at the time of
467c478bd9Sstevel@tonic-gate  *		request or
477c478bd9Sstevel@tonic-gate  *	c) worker thread.
487c478bd9Sstevel@tonic-gate  *
497c478bd9Sstevel@tonic-gate  * INTERFACES:
507c478bd9Sstevel@tonic-gate  *
517c478bd9Sstevel@tonic-gate  * squeue_t *squeue_create(name, bind, wait, pri)
527c478bd9Sstevel@tonic-gate  *
537c478bd9Sstevel@tonic-gate  *	name: symbolic name for squeue.
547c478bd9Sstevel@tonic-gate  *	wait: time to wait before waiking the worker thread after queueing
557c478bd9Sstevel@tonic-gate  *		request.
567c478bd9Sstevel@tonic-gate  *	bind: preferred CPU binding for the worker thread.
577c478bd9Sstevel@tonic-gate  *	pri:  thread priority for the worker thread.
587c478bd9Sstevel@tonic-gate  *
597c478bd9Sstevel@tonic-gate  *   This function never fails and may sleep. It returns a transparent pointer
607c478bd9Sstevel@tonic-gate  *   to the squeue_t structure that is passed to all other squeue operations.
617c478bd9Sstevel@tonic-gate  *
627c478bd9Sstevel@tonic-gate  * void squeue_bind(sqp, bind)
637c478bd9Sstevel@tonic-gate  *
647c478bd9Sstevel@tonic-gate  *   Bind squeue worker thread to a CPU specified by the 'bind' argument. The
657c478bd9Sstevel@tonic-gate  *   'bind' value of -1 binds to the preferred thread specified for
667c478bd9Sstevel@tonic-gate  *   squeue_create.
677c478bd9Sstevel@tonic-gate  *
687c478bd9Sstevel@tonic-gate  *   NOTE: Any value of 'bind' other then -1 is not supported currently, but the
697c478bd9Sstevel@tonic-gate  *	 API is present - in the future it may be useful to specify different
707c478bd9Sstevel@tonic-gate  *	 binding.
717c478bd9Sstevel@tonic-gate  *
727c478bd9Sstevel@tonic-gate  * void squeue_unbind(sqp)
737c478bd9Sstevel@tonic-gate  *
747c478bd9Sstevel@tonic-gate  *   Unbind the worker thread from its preferred CPU.
757c478bd9Sstevel@tonic-gate  *
767c478bd9Sstevel@tonic-gate  * void squeue_enter(*sqp, *mp, proc, arg, tag)
777c478bd9Sstevel@tonic-gate  *
787c478bd9Sstevel@tonic-gate  *   Post a single request for processing. Each request consists of mblock 'mp',
797c478bd9Sstevel@tonic-gate  *   function 'proc' to execute and an argument 'arg' to pass to this
807c478bd9Sstevel@tonic-gate  *   function. The function is called as (*proc)(arg, mp, sqp); The tag is an
817c478bd9Sstevel@tonic-gate  *   arbitrary number from 0 to 255 which will be stored in mp to track exact
827c478bd9Sstevel@tonic-gate  *   caller of squeue_enter. The combination of function name and the tag should
837c478bd9Sstevel@tonic-gate  *   provide enough information to identify the caller.
847c478bd9Sstevel@tonic-gate  *
857c478bd9Sstevel@tonic-gate  *   If no one is processing the squeue, squeue_enter() will call the function
867c478bd9Sstevel@tonic-gate  *   immediately. Otherwise it will add the request to the queue for later
877c478bd9Sstevel@tonic-gate  *   processing. Once the function is executed, the thread may continue
887c478bd9Sstevel@tonic-gate  *   executing all other requests pending on the queue.
897c478bd9Sstevel@tonic-gate  *
907c478bd9Sstevel@tonic-gate  *   NOTE: The tagging information is only used when SQUEUE_DEBUG is set to 1.
917c478bd9Sstevel@tonic-gate  *   NOTE: The argument can be conn_t only. Ideally we'd like to have generic
927c478bd9Sstevel@tonic-gate  *	   argument, but we want to drop connection reference count here - this
937c478bd9Sstevel@tonic-gate  *	   improves tail-call optimizations.
947c478bd9Sstevel@tonic-gate  *	   XXX: The arg should have type conn_t.
957c478bd9Sstevel@tonic-gate  *
967c478bd9Sstevel@tonic-gate  * void squeue_enter_nodrain(*sqp, *mp, proc, arg, tag)
977c478bd9Sstevel@tonic-gate  *
987c478bd9Sstevel@tonic-gate  *   Same as squeue_enter(), but the entering thread will only try to execute a
997c478bd9Sstevel@tonic-gate  *   single request. It will not continue executing any pending requests.
1007c478bd9Sstevel@tonic-gate  *
1017c478bd9Sstevel@tonic-gate  * void squeue_fill(*sqp, *mp, proc, arg, tag)
1027c478bd9Sstevel@tonic-gate  *
1037c478bd9Sstevel@tonic-gate  *   Just place the request on the queue without trying to execute it. Arrange
1047c478bd9Sstevel@tonic-gate  *   for the worker thread to process the request.
1057c478bd9Sstevel@tonic-gate  *
1067c478bd9Sstevel@tonic-gate  * void squeue_profile_enable(sqp)
1077c478bd9Sstevel@tonic-gate  * void squeue_profile_disable(sqp)
1087c478bd9Sstevel@tonic-gate  *
1097c478bd9Sstevel@tonic-gate  *    Enable or disable profiling for specified 'sqp'. Profiling is only
1107c478bd9Sstevel@tonic-gate  *    available when SQUEUE_PROFILE is set.
1117c478bd9Sstevel@tonic-gate  *
1127c478bd9Sstevel@tonic-gate  * void squeue_profile_reset(sqp)
1137c478bd9Sstevel@tonic-gate  *
1147c478bd9Sstevel@tonic-gate  *    Reset all profiling information to zero. Profiling is only
1157c478bd9Sstevel@tonic-gate  *    available when SQUEUE_PROFILE is set.
1167c478bd9Sstevel@tonic-gate  *
1177c478bd9Sstevel@tonic-gate  * void squeue_profile_start()
1187c478bd9Sstevel@tonic-gate  * void squeue_profile_stop()
1197c478bd9Sstevel@tonic-gate  *
1207c478bd9Sstevel@tonic-gate  *    Globally enable or disabled profiling for all squeues.
1217c478bd9Sstevel@tonic-gate  *
1227c478bd9Sstevel@tonic-gate  * uintptr_t *squeue_getprivate(sqp, p)
1237c478bd9Sstevel@tonic-gate  *
1247c478bd9Sstevel@tonic-gate  *    Each squeue keeps small amount of private data space available for various
1257c478bd9Sstevel@tonic-gate  *    consumers. Current consumers include TCP and NCA. Other consumers need to
1267c478bd9Sstevel@tonic-gate  *    add their private tag to the sqprivate_t enum. The private information is
1277c478bd9Sstevel@tonic-gate  *    limited to an uintptr_t value. The squeue has no knowledge of its content
1287c478bd9Sstevel@tonic-gate  *    and does not manage it in any way.
1297c478bd9Sstevel@tonic-gate  *
1307c478bd9Sstevel@tonic-gate  *    The typical use may be a breakdown of data structures per CPU (since
1317c478bd9Sstevel@tonic-gate  *    squeues are usually per CPU). See NCA for examples of use.
1327c478bd9Sstevel@tonic-gate  *    Currently 'p' may have one legal value SQPRIVATE_TCP.
1337c478bd9Sstevel@tonic-gate  *
1347c478bd9Sstevel@tonic-gate  * processorid_t squeue_binding(sqp)
1357c478bd9Sstevel@tonic-gate  *
1367c478bd9Sstevel@tonic-gate  *    Returns the CPU binding for a given squeue.
1377c478bd9Sstevel@tonic-gate  *
1387c478bd9Sstevel@tonic-gate  * TUNABALES:
1397c478bd9Sstevel@tonic-gate  *
1407c478bd9Sstevel@tonic-gate  * squeue_intrdrain_ms: Maximum time in ms interrupts spend draining any
1417c478bd9Sstevel@tonic-gate  *	squeue. Note that this is approximation - squeues have no control on the
1427c478bd9Sstevel@tonic-gate  *	time it takes to process each request. This limit is only checked
1437c478bd9Sstevel@tonic-gate  *	between processing individual messages.
1447c478bd9Sstevel@tonic-gate  *    Default: 20 ms.
1457c478bd9Sstevel@tonic-gate  *
1467c478bd9Sstevel@tonic-gate  * squeue_writerdrain_ms: Maximum time in ms non-interrupts spend draining any
1477c478bd9Sstevel@tonic-gate  *	squeue. Note that this is approximation - squeues have no control on the
1487c478bd9Sstevel@tonic-gate  *	time it takes to process each request. This limit is only checked
1497c478bd9Sstevel@tonic-gate  *	between processing individual messages.
1507c478bd9Sstevel@tonic-gate  *    Default: 10 ms.
1517c478bd9Sstevel@tonic-gate  *
1527c478bd9Sstevel@tonic-gate  * squeue_workerdrain_ms: Maximum time in ms worker thread spends draining any
1537c478bd9Sstevel@tonic-gate  *	squeue. Note that this is approximation - squeues have no control on the
1547c478bd9Sstevel@tonic-gate  *	time it takes to process each request. This limit is only checked
1557c478bd9Sstevel@tonic-gate  *	between processing individual messages.
1567c478bd9Sstevel@tonic-gate  *    Default: 10 ms.
1577c478bd9Sstevel@tonic-gate  *
1587c478bd9Sstevel@tonic-gate  * squeue_workerwait_ms: When worker thread is interrupted because workerdrain
1597c478bd9Sstevel@tonic-gate  *	expired, how much time to wait before waking worker thread again.
1607c478bd9Sstevel@tonic-gate  *    Default: 10 ms.
1617c478bd9Sstevel@tonic-gate  *
1627c478bd9Sstevel@tonic-gate  * DEFINES:
1637c478bd9Sstevel@tonic-gate  *
1647c478bd9Sstevel@tonic-gate  * SQUEUE_DEBUG: If defined as 1, special code is compiled in which records
1657c478bd9Sstevel@tonic-gate  *	additional information aiding debugging is recorded in squeue.
1667c478bd9Sstevel@tonic-gate  *
1677c478bd9Sstevel@tonic-gate  * SQUEUE_PROFILE: If defined as 1, special code is compiled in which collects
1687c478bd9Sstevel@tonic-gate  *	various squeue statistics and exports them as kstats.
1697c478bd9Sstevel@tonic-gate  *
1707c478bd9Sstevel@tonic-gate  * Ideally we would like both SQUEUE_DEBUG and SQUEUE_PROFILE to be always set,
1717c478bd9Sstevel@tonic-gate  * but it affects performance, so they are enabled on DEBUG kernels and disabled
1727c478bd9Sstevel@tonic-gate  * on non-DEBUG by default.
1737c478bd9Sstevel@tonic-gate  */
1747c478bd9Sstevel@tonic-gate 
1757c478bd9Sstevel@tonic-gate #include <sys/types.h>
1767c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
1777c478bd9Sstevel@tonic-gate #include <sys/debug.h>
1787c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
1797c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
1807c478bd9Sstevel@tonic-gate #include <sys/condvar_impl.h>
1817c478bd9Sstevel@tonic-gate #include <sys/systm.h>
1827c478bd9Sstevel@tonic-gate #include <sys/callb.h>
1837c478bd9Sstevel@tonic-gate #include <sys/sdt.h>
1847c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
1857c478bd9Sstevel@tonic-gate 
1867c478bd9Sstevel@tonic-gate #include <inet/ipclassifier.h>
187*d045b987Smasputra #include <inet/udp_impl.h>
1887c478bd9Sstevel@tonic-gate 
1897c478bd9Sstevel@tonic-gate /*
1907c478bd9Sstevel@tonic-gate  * State flags.
1917c478bd9Sstevel@tonic-gate  * Note: The MDB IP module depends on the values of these flags.
1927c478bd9Sstevel@tonic-gate  */
1937c478bd9Sstevel@tonic-gate #define	SQS_PROC	0x0001	/* being processed */
1947c478bd9Sstevel@tonic-gate #define	SQS_WORKER	0x0002	/* worker thread */
1957c478bd9Sstevel@tonic-gate #define	SQS_ENTER	0x0004	/* enter thread */
1967c478bd9Sstevel@tonic-gate #define	SQS_FAST	0x0008	/* enter-fast thread */
1977c478bd9Sstevel@tonic-gate #define	SQS_USER	0x0010	/* A non interrupt user */
1987c478bd9Sstevel@tonic-gate #define	SQS_BOUND	0x0020	/* Worker thread is bound */
1997c478bd9Sstevel@tonic-gate #define	SQS_PROFILE	0x0040	/* Enable profiling */
2007c478bd9Sstevel@tonic-gate #define	SQS_REENTER	0x0080	/* Re entered thread */
2017c478bd9Sstevel@tonic-gate #define	SQS_TMO_PROG	0x0100	/* Timeout is being set */
2027c478bd9Sstevel@tonic-gate 
2037c478bd9Sstevel@tonic-gate #ifdef DEBUG
2047c478bd9Sstevel@tonic-gate #define	SQUEUE_DEBUG 1
2057c478bd9Sstevel@tonic-gate #define	SQUEUE_PROFILE 1
2067c478bd9Sstevel@tonic-gate #else
2077c478bd9Sstevel@tonic-gate #define	SQUEUE_DEBUG 0
2087c478bd9Sstevel@tonic-gate #define	SQUEUE_PROFILE 0
2097c478bd9Sstevel@tonic-gate #endif
2107c478bd9Sstevel@tonic-gate 
2117c478bd9Sstevel@tonic-gate #include <sys/squeue_impl.h>
2127c478bd9Sstevel@tonic-gate 
2137c478bd9Sstevel@tonic-gate static void squeue_fire(void *);
214d19d6468Sbw static void squeue_drain(squeue_t *, uint_t, hrtime_t);
2157c478bd9Sstevel@tonic-gate static void squeue_worker(squeue_t *sqp);
2167c478bd9Sstevel@tonic-gate 
2177c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
2187c478bd9Sstevel@tonic-gate static kmutex_t squeue_kstat_lock;
2197c478bd9Sstevel@tonic-gate static int  squeue_kstat_update(kstat_t *, int);
2207c478bd9Sstevel@tonic-gate #endif
2217c478bd9Sstevel@tonic-gate 
2227c478bd9Sstevel@tonic-gate kmem_cache_t *squeue_cache;
2237c478bd9Sstevel@tonic-gate 
224d19d6468Sbw #define	SQUEUE_MSEC_TO_NSEC 1000000
225d19d6468Sbw 
2267c478bd9Sstevel@tonic-gate int squeue_intrdrain_ms = 20;
2277c478bd9Sstevel@tonic-gate int squeue_writerdrain_ms = 10;
2287c478bd9Sstevel@tonic-gate int squeue_workerdrain_ms = 10;
2297c478bd9Sstevel@tonic-gate int squeue_workerwait_ms = 10;
2307c478bd9Sstevel@tonic-gate 
231d19d6468Sbw /* The values above converted to ticks or nano seconds */
232d19d6468Sbw static int squeue_intrdrain_ns = 0;
233d19d6468Sbw static int squeue_writerdrain_ns = 0;
234d19d6468Sbw static int squeue_workerdrain_ns = 0;
2357c478bd9Sstevel@tonic-gate static int squeue_workerwait_tick = 0;
2367c478bd9Sstevel@tonic-gate 
2377c478bd9Sstevel@tonic-gate /*
2387c478bd9Sstevel@tonic-gate  * The minimum packet queued when worker thread doing the drain triggers
2397c478bd9Sstevel@tonic-gate  * polling (if squeue allows it). The choice of 3 is arbitrary. You
2407c478bd9Sstevel@tonic-gate  * definitely don't want it to be 1 since that will trigger polling
2417c478bd9Sstevel@tonic-gate  * on very low loads as well (ssh seems to do be one such example
2427c478bd9Sstevel@tonic-gate  * where packet flow was very low yet somehow 1 packet ended up getting
2437c478bd9Sstevel@tonic-gate  * queued and worker thread fires every 10ms and blanking also gets
2447c478bd9Sstevel@tonic-gate  * triggered.
2457c478bd9Sstevel@tonic-gate  */
2467c478bd9Sstevel@tonic-gate int squeue_worker_poll_min = 3;
2477c478bd9Sstevel@tonic-gate 
2487c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
2497c478bd9Sstevel@tonic-gate /*
2507c478bd9Sstevel@tonic-gate  * Set to B_TRUE to enable profiling.
2517c478bd9Sstevel@tonic-gate  */
2527c478bd9Sstevel@tonic-gate static int squeue_profile = B_FALSE;
2537c478bd9Sstevel@tonic-gate #define	SQ_PROFILING(sqp) (squeue_profile && ((sqp)->sq_state & SQS_PROFILE))
2547c478bd9Sstevel@tonic-gate 
2557c478bd9Sstevel@tonic-gate #define	SQSTAT(sqp, x) ((sqp)->sq_stats.x++)
2567c478bd9Sstevel@tonic-gate #define	SQDELTA(sqp, x, d) ((sqp)->sq_stats.x += (d))
2577c478bd9Sstevel@tonic-gate 
2587c478bd9Sstevel@tonic-gate struct squeue_kstat {
2597c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_count;
2607c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_max_qlen;
2617c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_npackets_worker;
2627c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_npackets_intr;
2637c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_npackets_other;
2647c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_nqueued_intr;
2657c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_nqueued_other;
2667c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_ndrains_worker;
2677c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_ndrains_intr;
2687c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_ndrains_other;
2697c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_time_worker;
2707c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_time_intr;
2717c478bd9Sstevel@tonic-gate 	kstat_named_t	sq_time_other;
2727c478bd9Sstevel@tonic-gate } squeue_kstat = {
2737c478bd9Sstevel@tonic-gate 	{ "count",		KSTAT_DATA_UINT64 },
2747c478bd9Sstevel@tonic-gate 	{ "max_qlen",		KSTAT_DATA_UINT64 },
2757c478bd9Sstevel@tonic-gate 	{ "packets_worker",	KSTAT_DATA_UINT64 },
2767c478bd9Sstevel@tonic-gate 	{ "packets_intr",	KSTAT_DATA_UINT64 },
2777c478bd9Sstevel@tonic-gate 	{ "packets_other",	KSTAT_DATA_UINT64 },
2787c478bd9Sstevel@tonic-gate 	{ "queued_intr",	KSTAT_DATA_UINT64 },
2797c478bd9Sstevel@tonic-gate 	{ "queued_other",	KSTAT_DATA_UINT64 },
2807c478bd9Sstevel@tonic-gate 	{ "ndrains_worker",	KSTAT_DATA_UINT64 },
2817c478bd9Sstevel@tonic-gate 	{ "ndrains_intr",	KSTAT_DATA_UINT64 },
2827c478bd9Sstevel@tonic-gate 	{ "ndrains_other",	KSTAT_DATA_UINT64 },
2837c478bd9Sstevel@tonic-gate 	{ "time_worker",	KSTAT_DATA_UINT64 },
2847c478bd9Sstevel@tonic-gate 	{ "time_intr",		KSTAT_DATA_UINT64 },
2857c478bd9Sstevel@tonic-gate 	{ "time_other",		KSTAT_DATA_UINT64 },
2867c478bd9Sstevel@tonic-gate };
2877c478bd9Sstevel@tonic-gate #endif
2887c478bd9Sstevel@tonic-gate 
2897c478bd9Sstevel@tonic-gate #define	SQUEUE_WORKER_WAKEUP(sqp) {					\
2907c478bd9Sstevel@tonic-gate 	timeout_id_t tid = (sqp)->sq_tid;				\
2917c478bd9Sstevel@tonic-gate 									\
2927c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));				\
2937c478bd9Sstevel@tonic-gate 	/*								\
2947c478bd9Sstevel@tonic-gate 	 * Queue isn't being processed, so take				\
2957c478bd9Sstevel@tonic-gate 	 * any post enqueue actions needed before leaving.		\
2967c478bd9Sstevel@tonic-gate 	 */								\
2977c478bd9Sstevel@tonic-gate 	if (tid != 0) {							\
2987c478bd9Sstevel@tonic-gate 		/*							\
2997c478bd9Sstevel@tonic-gate 		 * Waiting for an enter() to process mblk(s).		\
3007c478bd9Sstevel@tonic-gate 		 */							\
3017c478bd9Sstevel@tonic-gate 		clock_t	waited = lbolt - (sqp)->sq_awaken;		\
3027c478bd9Sstevel@tonic-gate 									\
3037c478bd9Sstevel@tonic-gate 		if (TICK_TO_MSEC(waited) >= (sqp)->sq_wait) {		\
3047c478bd9Sstevel@tonic-gate 			/*						\
3057c478bd9Sstevel@tonic-gate 			 * Times up and have a worker thread		\
3067c478bd9Sstevel@tonic-gate 			 * waiting for work, so schedule it.		\
3077c478bd9Sstevel@tonic-gate 			 */						\
3087c478bd9Sstevel@tonic-gate 			(sqp)->sq_tid = 0;				\
3097c478bd9Sstevel@tonic-gate 			(sqp)->sq_awaken = lbolt;			\
3107c478bd9Sstevel@tonic-gate 			cv_signal(&(sqp)->sq_async);			\
3117c478bd9Sstevel@tonic-gate 			mutex_exit(&(sqp)->sq_lock);			\
3127c478bd9Sstevel@tonic-gate 			(void) untimeout(tid);				\
3137c478bd9Sstevel@tonic-gate 			return;						\
3147c478bd9Sstevel@tonic-gate 		}							\
3157c478bd9Sstevel@tonic-gate 		mutex_exit(&(sqp)->sq_lock);				\
3167c478bd9Sstevel@tonic-gate 		return;							\
3177c478bd9Sstevel@tonic-gate 	} else if ((sqp)->sq_state & SQS_TMO_PROG) {			\
3187c478bd9Sstevel@tonic-gate 		mutex_exit(&(sqp)->sq_lock);				\
3197c478bd9Sstevel@tonic-gate 		return;							\
3207c478bd9Sstevel@tonic-gate 	} else if ((sqp)->sq_wait != 0) {				\
3217c478bd9Sstevel@tonic-gate 		clock_t	wait = (sqp)->sq_wait;				\
3227c478bd9Sstevel@tonic-gate 		/*							\
3237c478bd9Sstevel@tonic-gate 		 * Wait up to sqp->sq_wait ms for an			\
3247c478bd9Sstevel@tonic-gate 		 * enter() to process this queue. We			\
3257c478bd9Sstevel@tonic-gate 		 * don't want to contend on timeout locks		\
3267c478bd9Sstevel@tonic-gate 		 * with sq_lock held for performance reasons,		\
3277c478bd9Sstevel@tonic-gate 		 * so drop the sq_lock before calling timeout		\
3287c478bd9Sstevel@tonic-gate 		 * but we need to check if timeout is required		\
3297c478bd9Sstevel@tonic-gate 		 * after re acquiring the sq_lock. Once			\
3307c478bd9Sstevel@tonic-gate 		 * the sq_lock is dropped, someone else could		\
3317c478bd9Sstevel@tonic-gate 		 * have processed the packet or the timeout could	\
3327c478bd9Sstevel@tonic-gate 		 * have already fired.					\
3337c478bd9Sstevel@tonic-gate 		 */							\
3347c478bd9Sstevel@tonic-gate 		(sqp)->sq_state |= SQS_TMO_PROG;			\
3357c478bd9Sstevel@tonic-gate 		mutex_exit(&(sqp)->sq_lock);				\
3367c478bd9Sstevel@tonic-gate 		tid = timeout(squeue_fire, (sqp), wait);		\
3377c478bd9Sstevel@tonic-gate 		mutex_enter(&(sqp)->sq_lock);				\
3387c478bd9Sstevel@tonic-gate 		/* Check again if we still need the timeout */		\
3397c478bd9Sstevel@tonic-gate 		if ((((sqp)->sq_state & (SQS_PROC|SQS_TMO_PROG)) ==	\
3407c478bd9Sstevel@tonic-gate 			SQS_TMO_PROG) && ((sqp)->sq_tid == 0) &&	\
3417c478bd9Sstevel@tonic-gate 			((sqp)->sq_first != NULL)) {			\
3427c478bd9Sstevel@tonic-gate 				(sqp)->sq_state &= ~SQS_TMO_PROG;	\
3437c478bd9Sstevel@tonic-gate 				(sqp)->sq_awaken = lbolt;		\
3447c478bd9Sstevel@tonic-gate 				(sqp)->sq_tid = tid;			\
3457c478bd9Sstevel@tonic-gate 				mutex_exit(&(sqp)->sq_lock);		\
3467c478bd9Sstevel@tonic-gate 				return;					\
3477c478bd9Sstevel@tonic-gate 		} else {						\
3487c478bd9Sstevel@tonic-gate 			if ((sqp)->sq_state & SQS_TMO_PROG) {		\
3497c478bd9Sstevel@tonic-gate 				(sqp)->sq_state &= ~SQS_TMO_PROG;	\
3507c478bd9Sstevel@tonic-gate 				mutex_exit(&(sqp)->sq_lock);		\
3517c478bd9Sstevel@tonic-gate 				(void) untimeout(tid);			\
3527c478bd9Sstevel@tonic-gate 			} else {					\
3537c478bd9Sstevel@tonic-gate 				/*					\
3547c478bd9Sstevel@tonic-gate 				 * The timer fired before we could 	\
3557c478bd9Sstevel@tonic-gate 				 * reacquire the sq_lock. squeue_fire	\
3567c478bd9Sstevel@tonic-gate 				 * removes the SQS_TMO_PROG flag	\
3577c478bd9Sstevel@tonic-gate 				 * and we don't need to	do anything	\
3587c478bd9Sstevel@tonic-gate 				 * else.				\
3597c478bd9Sstevel@tonic-gate 				 */					\
3607c478bd9Sstevel@tonic-gate 				mutex_exit(&(sqp)->sq_lock);		\
3617c478bd9Sstevel@tonic-gate 			}						\
3627c478bd9Sstevel@tonic-gate 		}							\
3637c478bd9Sstevel@tonic-gate 	} else {							\
3647c478bd9Sstevel@tonic-gate 		/*							\
3657c478bd9Sstevel@tonic-gate 		 * Schedule the worker thread.				\
3667c478bd9Sstevel@tonic-gate 		 */							\
3677c478bd9Sstevel@tonic-gate 		(sqp)->sq_awaken = lbolt;				\
3687c478bd9Sstevel@tonic-gate 		cv_signal(&(sqp)->sq_async);				\
3697c478bd9Sstevel@tonic-gate 		mutex_exit(&(sqp)->sq_lock);				\
3707c478bd9Sstevel@tonic-gate 	}								\
3717c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&(sqp)->sq_lock)); 			\
3727c478bd9Sstevel@tonic-gate }
3737c478bd9Sstevel@tonic-gate 
3747c478bd9Sstevel@tonic-gate #define	ENQUEUE_MP(sqp, mp, proc, arg) {			\
3757c478bd9Sstevel@tonic-gate 	/*							\
3767c478bd9Sstevel@tonic-gate 	 * Enque our mblk.					\
3777c478bd9Sstevel@tonic-gate 	 */							\
3787c478bd9Sstevel@tonic-gate 	(mp)->b_queue = NULL;					\
3797c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
3807c478bd9Sstevel@tonic-gate 	ASSERT((mp)->b_prev == NULL && (mp)->b_next == NULL); 	\
3817c478bd9Sstevel@tonic-gate 	(mp)->b_queue = (queue_t *)(proc);			\
3827c478bd9Sstevel@tonic-gate 	(mp)->b_prev = (mblk_t *)(arg);				\
3837c478bd9Sstevel@tonic-gate 								\
3847c478bd9Sstevel@tonic-gate 	if ((sqp)->sq_last != NULL)				\
3857c478bd9Sstevel@tonic-gate 		(sqp)->sq_last->b_next = (mp);			\
3867c478bd9Sstevel@tonic-gate 	else							\
3877c478bd9Sstevel@tonic-gate 		(sqp)->sq_first = (mp);				\
3887c478bd9Sstevel@tonic-gate 	(sqp)->sq_last = (mp);					\
3897c478bd9Sstevel@tonic-gate 	(sqp)->sq_count++;					\
3907c478bd9Sstevel@tonic-gate 	ASSERT((sqp)->sq_count > 0);				\
3917c478bd9Sstevel@tonic-gate 	DTRACE_PROBE2(squeue__enqueue, squeue_t *, sqp,		\
3927c478bd9Sstevel@tonic-gate 	    mblk_t *, mp);					\
3937c478bd9Sstevel@tonic-gate }
3947c478bd9Sstevel@tonic-gate 
3957c478bd9Sstevel@tonic-gate 
3967c478bd9Sstevel@tonic-gate #define	ENQUEUE_CHAIN(sqp, mp, tail, cnt) {			\
3977c478bd9Sstevel@tonic-gate 	/*							\
3987c478bd9Sstevel@tonic-gate 	 * Enqueue our mblk chain.				\
3997c478bd9Sstevel@tonic-gate 	 */							\
4007c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
4017c478bd9Sstevel@tonic-gate 								\
4027c478bd9Sstevel@tonic-gate 	if ((sqp)->sq_last != NULL)				\
4037c478bd9Sstevel@tonic-gate 		(sqp)->sq_last->b_next = (mp);			\
4047c478bd9Sstevel@tonic-gate 	else							\
4057c478bd9Sstevel@tonic-gate 		(sqp)->sq_first = (mp);				\
4067c478bd9Sstevel@tonic-gate 	(sqp)->sq_last = (tail);				\
4077c478bd9Sstevel@tonic-gate 	(sqp)->sq_count += (cnt);				\
4087c478bd9Sstevel@tonic-gate 	ASSERT((sqp)->sq_count > 0);				\
4097c478bd9Sstevel@tonic-gate 	DTRACE_PROBE4(squeue__enqueuechain, squeue_t *, sqp,	\
4107c478bd9Sstevel@tonic-gate 		mblk_t *, mp, mblk_t *, tail, int, cnt);	\
4117c478bd9Sstevel@tonic-gate 								\
4127c478bd9Sstevel@tonic-gate }
4137c478bd9Sstevel@tonic-gate 
4147c478bd9Sstevel@tonic-gate #define	SQS_POLLING_ON(sqp, rx_ring) {				\
4157c478bd9Sstevel@tonic-gate 	ASSERT(rx_ring != NULL);				\
4167c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
4177c478bd9Sstevel@tonic-gate 	rx_ring->rr_blank(rx_ring->rr_handle,			\
4187c478bd9Sstevel@tonic-gate 	    MIN((sqp->sq_avg_drain_time * sqp->sq_count),	\
4197c478bd9Sstevel@tonic-gate 		rx_ring->rr_max_blank_time),			\
4207c478bd9Sstevel@tonic-gate 		rx_ring->rr_max_pkt_cnt);			\
4217c478bd9Sstevel@tonic-gate 	rx_ring->rr_poll_state |= ILL_POLLING;			\
4227c478bd9Sstevel@tonic-gate 	rx_ring->rr_poll_time = lbolt;				\
4237c478bd9Sstevel@tonic-gate }
4247c478bd9Sstevel@tonic-gate 
4257c478bd9Sstevel@tonic-gate 
4267c478bd9Sstevel@tonic-gate #define	SQS_POLLING_OFF(sqp, rx_ring) {				\
4277c478bd9Sstevel@tonic-gate 	ASSERT(rx_ring != NULL);				\
4287c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&(sqp)->sq_lock));			\
4297c478bd9Sstevel@tonic-gate 	rx_ring->rr_blank(rx_ring->rr_handle,			\
4307c478bd9Sstevel@tonic-gate 	    rx_ring->rr_min_blank_time,				\
4317c478bd9Sstevel@tonic-gate 	    rx_ring->rr_min_pkt_cnt);				\
4327c478bd9Sstevel@tonic-gate }
4337c478bd9Sstevel@tonic-gate 
4347c478bd9Sstevel@tonic-gate void
4357c478bd9Sstevel@tonic-gate squeue_init(void)
4367c478bd9Sstevel@tonic-gate {
4377c478bd9Sstevel@tonic-gate 	squeue_cache = kmem_cache_create("squeue_cache",
4387c478bd9Sstevel@tonic-gate 	    sizeof (squeue_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
4397c478bd9Sstevel@tonic-gate 
440d19d6468Sbw 	squeue_intrdrain_ns = squeue_intrdrain_ms * SQUEUE_MSEC_TO_NSEC;
441d19d6468Sbw 	squeue_writerdrain_ns = squeue_writerdrain_ms * SQUEUE_MSEC_TO_NSEC;
442d19d6468Sbw 	squeue_workerdrain_ns = squeue_workerdrain_ms * SQUEUE_MSEC_TO_NSEC;
4437c478bd9Sstevel@tonic-gate 	squeue_workerwait_tick = MSEC_TO_TICK_ROUNDUP(squeue_workerwait_ms);
4447c478bd9Sstevel@tonic-gate }
4457c478bd9Sstevel@tonic-gate 
4467c478bd9Sstevel@tonic-gate /* ARGSUSED */
4477c478bd9Sstevel@tonic-gate squeue_t *
4487c478bd9Sstevel@tonic-gate squeue_create(char *name, processorid_t bind, clock_t wait, pri_t pri)
4497c478bd9Sstevel@tonic-gate {
4507c478bd9Sstevel@tonic-gate 	squeue_t *sqp = kmem_cache_alloc(squeue_cache, KM_SLEEP);
4517c478bd9Sstevel@tonic-gate 
4527c478bd9Sstevel@tonic-gate 	bzero(sqp, sizeof (squeue_t));
4537c478bd9Sstevel@tonic-gate 	(void) strncpy(sqp->sq_name, name, SQ_NAMELEN + 1);
4547c478bd9Sstevel@tonic-gate 	sqp->sq_name[SQ_NAMELEN] = '\0';
4557c478bd9Sstevel@tonic-gate 
4567c478bd9Sstevel@tonic-gate 	sqp->sq_bind = bind;
4577c478bd9Sstevel@tonic-gate 	sqp->sq_wait = MSEC_TO_TICK(wait);
4587c478bd9Sstevel@tonic-gate 	sqp->sq_avg_drain_time =
459d19d6468Sbw 	    drv_hztousec(NSEC_TO_TICK_ROUNDUP(squeue_intrdrain_ns)) /
460d19d6468Sbw 	    NSEC_TO_TICK_ROUNDUP(squeue_intrdrain_ns);
4617c478bd9Sstevel@tonic-gate 
4627c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
4637c478bd9Sstevel@tonic-gate 	if ((sqp->sq_kstat = kstat_create("ip", bind, name,
4647c478bd9Sstevel@tonic-gate 		"net", KSTAT_TYPE_NAMED,
4657c478bd9Sstevel@tonic-gate 		sizeof (squeue_kstat) / sizeof (kstat_named_t),
4667c478bd9Sstevel@tonic-gate 		KSTAT_FLAG_VIRTUAL)) != NULL) {
4677c478bd9Sstevel@tonic-gate 		sqp->sq_kstat->ks_lock = &squeue_kstat_lock;
4687c478bd9Sstevel@tonic-gate 		sqp->sq_kstat->ks_data = &squeue_kstat;
4697c478bd9Sstevel@tonic-gate 		sqp->sq_kstat->ks_update = squeue_kstat_update;
4707c478bd9Sstevel@tonic-gate 		sqp->sq_kstat->ks_private = sqp;
4717c478bd9Sstevel@tonic-gate 		kstat_install(sqp->sq_kstat);
4727c478bd9Sstevel@tonic-gate 	}
4737c478bd9Sstevel@tonic-gate #endif
4747c478bd9Sstevel@tonic-gate 
4757c478bd9Sstevel@tonic-gate 	sqp->sq_worker = thread_create(NULL, 0, squeue_worker,
4767c478bd9Sstevel@tonic-gate 	    sqp, 0, &p0, TS_RUN, pri);
4777c478bd9Sstevel@tonic-gate 
4787c478bd9Sstevel@tonic-gate 	return (sqp);
4797c478bd9Sstevel@tonic-gate }
4807c478bd9Sstevel@tonic-gate 
4817c478bd9Sstevel@tonic-gate /* ARGSUSED */
4827c478bd9Sstevel@tonic-gate void
4837c478bd9Sstevel@tonic-gate squeue_bind(squeue_t *sqp, processorid_t bind)
4847c478bd9Sstevel@tonic-gate {
4857c478bd9Sstevel@tonic-gate 	ASSERT(bind == -1);
4867c478bd9Sstevel@tonic-gate 
4877c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
4887c478bd9Sstevel@tonic-gate 	if (sqp->sq_state & SQS_BOUND) {
4897c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
4907c478bd9Sstevel@tonic-gate 		return;
4917c478bd9Sstevel@tonic-gate 	}
4927c478bd9Sstevel@tonic-gate 
4937c478bd9Sstevel@tonic-gate 	sqp->sq_state |= SQS_BOUND;
4947c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
4957c478bd9Sstevel@tonic-gate 
4967c478bd9Sstevel@tonic-gate 	thread_affinity_set(sqp->sq_worker, sqp->sq_bind);
4977c478bd9Sstevel@tonic-gate }
4987c478bd9Sstevel@tonic-gate 
4997c478bd9Sstevel@tonic-gate void
5007c478bd9Sstevel@tonic-gate squeue_unbind(squeue_t *sqp)
5017c478bd9Sstevel@tonic-gate {
5027c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
5037c478bd9Sstevel@tonic-gate 	if (!(sqp->sq_state & SQS_BOUND)) {
5047c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
5057c478bd9Sstevel@tonic-gate 		return;
5067c478bd9Sstevel@tonic-gate 	}
5077c478bd9Sstevel@tonic-gate 
5087c478bd9Sstevel@tonic-gate 	sqp->sq_state &= ~SQS_BOUND;
5097c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
5107c478bd9Sstevel@tonic-gate 
5117c478bd9Sstevel@tonic-gate 	thread_affinity_clear(sqp->sq_worker);
5127c478bd9Sstevel@tonic-gate }
5137c478bd9Sstevel@tonic-gate 
5147c478bd9Sstevel@tonic-gate /*
5157c478bd9Sstevel@tonic-gate  * squeue_enter() - enter squeue sqp with mblk mp (which can be
5167c478bd9Sstevel@tonic-gate  * a chain), while tail points to the end and cnt in number of
5177c478bd9Sstevel@tonic-gate  * mblks in the chain.
5187c478bd9Sstevel@tonic-gate  *
5197c478bd9Sstevel@tonic-gate  * For a chain of single packet (i.e. mp == tail), go through the
5207c478bd9Sstevel@tonic-gate  * fast path if no one is processing the squeue and nothing is queued.
5217c478bd9Sstevel@tonic-gate  *
5227c478bd9Sstevel@tonic-gate  * The proc and arg for each mblk is already stored in the mblk in
5237c478bd9Sstevel@tonic-gate  * appropriate places.
5247c478bd9Sstevel@tonic-gate  */
5257c478bd9Sstevel@tonic-gate void
5267c478bd9Sstevel@tonic-gate squeue_enter_chain(squeue_t *sqp, mblk_t *mp, mblk_t *tail,
5277c478bd9Sstevel@tonic-gate     uint32_t cnt, uint8_t tag)
5287c478bd9Sstevel@tonic-gate {
5297c478bd9Sstevel@tonic-gate 	int		interrupt = servicing_interrupt();
5307c478bd9Sstevel@tonic-gate 	void 		*arg;
5317c478bd9Sstevel@tonic-gate 	sqproc_t	proc;
532d19d6468Sbw 	hrtime_t	now;
5337c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
5347c478bd9Sstevel@tonic-gate 	hrtime_t 	start, delta;
5357c478bd9Sstevel@tonic-gate #endif
5367c478bd9Sstevel@tonic-gate 
5377c478bd9Sstevel@tonic-gate 	ASSERT(sqp != NULL);
5387c478bd9Sstevel@tonic-gate 	ASSERT(mp != NULL);
5397c478bd9Sstevel@tonic-gate 	ASSERT(tail != NULL);
5407c478bd9Sstevel@tonic-gate 	ASSERT(cnt > 0);
5417c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
5427c478bd9Sstevel@tonic-gate 
5437c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
5447c478bd9Sstevel@tonic-gate 	if (!(sqp->sq_state & SQS_PROC)) {
5457c478bd9Sstevel@tonic-gate 		/*
5467c478bd9Sstevel@tonic-gate 		 * See if anything is already queued. If we are the
5477c478bd9Sstevel@tonic-gate 		 * first packet, do inline processing else queue the
5487c478bd9Sstevel@tonic-gate 		 * packet and do the drain.
5497c478bd9Sstevel@tonic-gate 		 */
5507c478bd9Sstevel@tonic-gate 		sqp->sq_run = curthread;
5517c478bd9Sstevel@tonic-gate 		if (sqp->sq_first == NULL && cnt == 1) {
5527c478bd9Sstevel@tonic-gate 			/*
5537c478bd9Sstevel@tonic-gate 			 * Fast-path, ok to process and nothing queued.
5547c478bd9Sstevel@tonic-gate 			 */
5557c478bd9Sstevel@tonic-gate 			sqp->sq_state |= (SQS_PROC|SQS_FAST);
5567c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
5577c478bd9Sstevel@tonic-gate 
5587c478bd9Sstevel@tonic-gate 			/*
5597c478bd9Sstevel@tonic-gate 			 * We are the chain of 1 packet so
5607c478bd9Sstevel@tonic-gate 			 * go through this fast path.
5617c478bd9Sstevel@tonic-gate 			 */
5627c478bd9Sstevel@tonic-gate 			arg = mp->b_prev;
5637c478bd9Sstevel@tonic-gate 			mp->b_prev = NULL;
5647c478bd9Sstevel@tonic-gate 			proc = (sqproc_t)mp->b_queue;
5657c478bd9Sstevel@tonic-gate 			mp->b_queue = NULL;
5667c478bd9Sstevel@tonic-gate 
5677c478bd9Sstevel@tonic-gate 			ASSERT(proc != NULL);
5687c478bd9Sstevel@tonic-gate 			ASSERT(arg != NULL);
5697c478bd9Sstevel@tonic-gate 			ASSERT(mp->b_next == NULL);
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
5727c478bd9Sstevel@tonic-gate 			sqp->sq_isintr = interrupt;
5737c478bd9Sstevel@tonic-gate 			sqp->sq_curmp = mp;
5747c478bd9Sstevel@tonic-gate 			sqp->sq_curproc = proc;
5757c478bd9Sstevel@tonic-gate 			sqp->sq_connp = arg;
5767c478bd9Sstevel@tonic-gate 			mp->b_tag = sqp->sq_tag = tag;
5777c478bd9Sstevel@tonic-gate #endif
5787c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
5797c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
5807c478bd9Sstevel@tonic-gate 				if (interrupt)
5817c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_npackets_intr);
5827c478bd9Sstevel@tonic-gate 				else
5837c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_npackets_other);
5847c478bd9Sstevel@tonic-gate 				start = gethrtime();
5857c478bd9Sstevel@tonic-gate 			}
5867c478bd9Sstevel@tonic-gate #endif
5877c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_TRUE;
5887c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(squeue__proc__start, squeue_t *,
5897c478bd9Sstevel@tonic-gate 			    sqp, mblk_t *, mp, conn_t *, arg);
5907c478bd9Sstevel@tonic-gate 			(*proc)(arg, mp, sqp);
5917c478bd9Sstevel@tonic-gate 			DTRACE_PROBE2(squeue__proc__end, squeue_t *,
5927c478bd9Sstevel@tonic-gate 			    sqp, conn_t *, arg);
5937c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_FALSE;
5947c478bd9Sstevel@tonic-gate 
5957c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
5967c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
5977c478bd9Sstevel@tonic-gate 				delta = gethrtime() - start;
5987c478bd9Sstevel@tonic-gate 				if (interrupt)
5997c478bd9Sstevel@tonic-gate 					SQDELTA(sqp, sq_time_intr, delta);
6007c478bd9Sstevel@tonic-gate 				else
6017c478bd9Sstevel@tonic-gate 					SQDELTA(sqp, sq_time_other, delta);
6027c478bd9Sstevel@tonic-gate 			}
6037c478bd9Sstevel@tonic-gate #endif
6047c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
6057c478bd9Sstevel@tonic-gate 			sqp->sq_curmp = NULL;
6067c478bd9Sstevel@tonic-gate 			sqp->sq_curproc = NULL;
6077c478bd9Sstevel@tonic-gate 			sqp->sq_connp = NULL;
6087c478bd9Sstevel@tonic-gate 			sqp->sq_isintr = 0;
6097c478bd9Sstevel@tonic-gate #endif
6107c478bd9Sstevel@tonic-gate 
6117c478bd9Sstevel@tonic-gate 			CONN_DEC_REF((conn_t *)arg);
6127c478bd9Sstevel@tonic-gate 			ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
6137c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
6147c478bd9Sstevel@tonic-gate 			sqp->sq_state &= ~(SQS_PROC|SQS_FAST);
6157c478bd9Sstevel@tonic-gate 			if (sqp->sq_first == NULL) {
6167c478bd9Sstevel@tonic-gate 				/*
6177c478bd9Sstevel@tonic-gate 				 * We processed inline our packet and
6187c478bd9Sstevel@tonic-gate 				 * nothing new has arrived. We are done.
6197c478bd9Sstevel@tonic-gate 				 */
6207c478bd9Sstevel@tonic-gate 				sqp->sq_run = NULL;
6217c478bd9Sstevel@tonic-gate 				mutex_exit(&sqp->sq_lock);
6227c478bd9Sstevel@tonic-gate 				return;
6237c478bd9Sstevel@tonic-gate 			} else if (sqp->sq_bind != CPU->cpu_id) {
6247c478bd9Sstevel@tonic-gate 				/*
6257c478bd9Sstevel@tonic-gate 				 * If the current thread is not running
6267c478bd9Sstevel@tonic-gate 				 * on the CPU to which this squeue is bound,
6277c478bd9Sstevel@tonic-gate 				 * then don't allow it to drain.
6287c478bd9Sstevel@tonic-gate 				 */
6297c478bd9Sstevel@tonic-gate 				sqp->sq_run = NULL;
6307c478bd9Sstevel@tonic-gate 				SQUEUE_WORKER_WAKEUP(sqp);
6317c478bd9Sstevel@tonic-gate 				return;
6327c478bd9Sstevel@tonic-gate 			}
6337c478bd9Sstevel@tonic-gate 		} else {
6347c478bd9Sstevel@tonic-gate 			ENQUEUE_CHAIN(sqp, mp, tail, cnt);
6357c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
6367c478bd9Sstevel@tonic-gate 			mp->b_tag = tag;
6377c478bd9Sstevel@tonic-gate #endif
6387c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
6397c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
6407c478bd9Sstevel@tonic-gate 				if (servicing_interrupt())
6417c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_nqueued_intr);
6427c478bd9Sstevel@tonic-gate 				else
6437c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_nqueued_other);
6447c478bd9Sstevel@tonic-gate 				if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
6457c478bd9Sstevel@tonic-gate 					sqp->sq_stats.sq_max_qlen =
6467c478bd9Sstevel@tonic-gate 					    sqp->sq_count;
6477c478bd9Sstevel@tonic-gate 			}
6487c478bd9Sstevel@tonic-gate #endif
6497c478bd9Sstevel@tonic-gate 		}
6507c478bd9Sstevel@tonic-gate 
6517c478bd9Sstevel@tonic-gate 		/*
6527c478bd9Sstevel@tonic-gate 		 * We are here because either we couldn't do inline
6537c478bd9Sstevel@tonic-gate 		 * processing (because something was already queued),
6547c478bd9Sstevel@tonic-gate 		 * or we had a chanin of more than one packet,
6557c478bd9Sstevel@tonic-gate 		 * or something else arrived after we were done with
6567c478bd9Sstevel@tonic-gate 		 * inline processing.
6577c478bd9Sstevel@tonic-gate 		 */
6587c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&sqp->sq_lock));
6597c478bd9Sstevel@tonic-gate 		ASSERT(sqp->sq_first != NULL);
6607c478bd9Sstevel@tonic-gate 
6617c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
6627c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
6637c478bd9Sstevel@tonic-gate 			start = gethrtime();
6647c478bd9Sstevel@tonic-gate 		}
6657c478bd9Sstevel@tonic-gate #endif
6667c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
6677c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = interrupt;
6687c478bd9Sstevel@tonic-gate #endif
6697c478bd9Sstevel@tonic-gate 
670d19d6468Sbw 		now = gethrtime();
6717c478bd9Sstevel@tonic-gate 		if (interrupt) {
672d19d6468Sbw 			squeue_drain(sqp, SQS_ENTER, now +
673d19d6468Sbw 			    squeue_intrdrain_ns);
6747c478bd9Sstevel@tonic-gate 		} else {
675d19d6468Sbw 			squeue_drain(sqp, SQS_USER, now +
676d19d6468Sbw 			    squeue_writerdrain_ns);
6777c478bd9Sstevel@tonic-gate 		}
6787c478bd9Sstevel@tonic-gate 
6797c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
6807c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
6817c478bd9Sstevel@tonic-gate 			delta = gethrtime() - start;
6827c478bd9Sstevel@tonic-gate 			if (interrupt)
6837c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_intr, delta);
6847c478bd9Sstevel@tonic-gate 			else
6857c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_other, delta);
6867c478bd9Sstevel@tonic-gate 		}
6877c478bd9Sstevel@tonic-gate #endif
6887c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
6897c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = 0;
6907c478bd9Sstevel@tonic-gate #endif
6917c478bd9Sstevel@tonic-gate 
6927c478bd9Sstevel@tonic-gate 		/*
6937c478bd9Sstevel@tonic-gate 		 * If we didn't do a complete drain, the worker
6947c478bd9Sstevel@tonic-gate 		 * thread was already signalled by squeue_drain.
6957c478bd9Sstevel@tonic-gate 		 */
6967c478bd9Sstevel@tonic-gate 		sqp->sq_run = NULL;
6977c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
6987c478bd9Sstevel@tonic-gate 		return;
6997c478bd9Sstevel@tonic-gate 	} else {
7007c478bd9Sstevel@tonic-gate 		ASSERT(sqp->sq_run != NULL);
7017c478bd9Sstevel@tonic-gate 		/*
7027c478bd9Sstevel@tonic-gate 		 * Queue is already being processed. Just enqueue
7037c478bd9Sstevel@tonic-gate 		 * the packet and go away.
7047c478bd9Sstevel@tonic-gate 		 */
7057c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
7067c478bd9Sstevel@tonic-gate 		mp->b_tag = tag;
7077c478bd9Sstevel@tonic-gate #endif
7087c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
7097c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
7107c478bd9Sstevel@tonic-gate 			if (servicing_interrupt())
7117c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_intr);
7127c478bd9Sstevel@tonic-gate 			else
7137c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_other);
7147c478bd9Sstevel@tonic-gate 			if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
7157c478bd9Sstevel@tonic-gate 				sqp->sq_stats.sq_max_qlen = sqp->sq_count;
7167c478bd9Sstevel@tonic-gate 		}
7177c478bd9Sstevel@tonic-gate #endif
7187c478bd9Sstevel@tonic-gate 
7197c478bd9Sstevel@tonic-gate 		ENQUEUE_CHAIN(sqp, mp, tail, cnt);
7207c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
7217c478bd9Sstevel@tonic-gate 		return;
7227c478bd9Sstevel@tonic-gate 	}
7237c478bd9Sstevel@tonic-gate }
7247c478bd9Sstevel@tonic-gate 
7257c478bd9Sstevel@tonic-gate /*
7267c478bd9Sstevel@tonic-gate  * squeue_enter() - enter squeue *sqp with mblk *mp with argument of *arg.
7277c478bd9Sstevel@tonic-gate  */
7287c478bd9Sstevel@tonic-gate void
7297c478bd9Sstevel@tonic-gate squeue_enter(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg,
7307c478bd9Sstevel@tonic-gate     uint8_t tag)
7317c478bd9Sstevel@tonic-gate {
7327c478bd9Sstevel@tonic-gate 	int	interrupt = servicing_interrupt();
733d19d6468Sbw 	hrtime_t now;
7347c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
7357c478bd9Sstevel@tonic-gate 	hrtime_t start, delta;
7367c478bd9Sstevel@tonic-gate #endif
7377c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
7387c478bd9Sstevel@tonic-gate 	conn_t 	*connp = (conn_t *)arg;
739ff550d0eSmasputra 	ASSERT(!IPCL_IS_TCP(connp) || connp->conn_tcp->tcp_connp == connp);
740ff550d0eSmasputra 	ASSERT(!IPCL_IS_UDP(connp) || connp->conn_udp->udp_connp == connp);
7417c478bd9Sstevel@tonic-gate #endif
7427c478bd9Sstevel@tonic-gate 
7437c478bd9Sstevel@tonic-gate 	ASSERT(proc != NULL);
7447c478bd9Sstevel@tonic-gate 	ASSERT(sqp != NULL);
7457c478bd9Sstevel@tonic-gate 	ASSERT(mp != NULL);
7467c478bd9Sstevel@tonic-gate 	ASSERT(mp->b_next == NULL);
7477c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
7487c478bd9Sstevel@tonic-gate 
7497c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
7507c478bd9Sstevel@tonic-gate 	if (!(sqp->sq_state & SQS_PROC)) {
7517c478bd9Sstevel@tonic-gate 		/*
7527c478bd9Sstevel@tonic-gate 		 * See if anything is already queued. If we are the
7537c478bd9Sstevel@tonic-gate 		 * first packet, do inline processing else queue the
7547c478bd9Sstevel@tonic-gate 		 * packet and do the drain.
7557c478bd9Sstevel@tonic-gate 		 */
7567c478bd9Sstevel@tonic-gate 		sqp->sq_run = curthread;
7577c478bd9Sstevel@tonic-gate 		if (sqp->sq_first == NULL) {
7587c478bd9Sstevel@tonic-gate 			/*
7597c478bd9Sstevel@tonic-gate 			 * Fast-path, ok to process and nothing queued.
7607c478bd9Sstevel@tonic-gate 			 */
7617c478bd9Sstevel@tonic-gate 			sqp->sq_state |= (SQS_PROC|SQS_FAST);
7627c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
7637c478bd9Sstevel@tonic-gate 
7647c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
7657c478bd9Sstevel@tonic-gate 			sqp->sq_isintr = interrupt;
7667c478bd9Sstevel@tonic-gate 			sqp->sq_curmp = mp;
7677c478bd9Sstevel@tonic-gate 			sqp->sq_curproc = proc;
7687c478bd9Sstevel@tonic-gate 			sqp->sq_connp = connp;
7697c478bd9Sstevel@tonic-gate 			mp->b_tag = sqp->sq_tag = tag;
7707c478bd9Sstevel@tonic-gate #endif
7717c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
7727c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
7737c478bd9Sstevel@tonic-gate 				if (interrupt)
7747c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_npackets_intr);
7757c478bd9Sstevel@tonic-gate 				else
7767c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_npackets_other);
7777c478bd9Sstevel@tonic-gate 				start = gethrtime();
7787c478bd9Sstevel@tonic-gate 			}
7797c478bd9Sstevel@tonic-gate #endif
7807c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_TRUE;
7817c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(squeue__proc__start, squeue_t *,
7827c478bd9Sstevel@tonic-gate 			    sqp, mblk_t *, mp, conn_t *, arg);
7837c478bd9Sstevel@tonic-gate 			(*proc)(arg, mp, sqp);
7847c478bd9Sstevel@tonic-gate 			DTRACE_PROBE2(squeue__proc__end, squeue_t *,
7857c478bd9Sstevel@tonic-gate 			    sqp, conn_t *, arg);
7867c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_FALSE;
7877c478bd9Sstevel@tonic-gate 
7887c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
7897c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
7907c478bd9Sstevel@tonic-gate 				delta = gethrtime() - start;
7917c478bd9Sstevel@tonic-gate 				if (interrupt)
7927c478bd9Sstevel@tonic-gate 					SQDELTA(sqp, sq_time_intr, delta);
7937c478bd9Sstevel@tonic-gate 				else
7947c478bd9Sstevel@tonic-gate 					SQDELTA(sqp, sq_time_other, delta);
7957c478bd9Sstevel@tonic-gate 			}
7967c478bd9Sstevel@tonic-gate #endif
7977c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
7987c478bd9Sstevel@tonic-gate 			sqp->sq_curmp = NULL;
7997c478bd9Sstevel@tonic-gate 			sqp->sq_curproc = NULL;
8007c478bd9Sstevel@tonic-gate 			sqp->sq_connp = NULL;
8017c478bd9Sstevel@tonic-gate 			sqp->sq_isintr = 0;
8027c478bd9Sstevel@tonic-gate #endif
8037c478bd9Sstevel@tonic-gate 
8047c478bd9Sstevel@tonic-gate 			CONN_DEC_REF((conn_t *)arg);
8057c478bd9Sstevel@tonic-gate 			ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
8067c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
8077c478bd9Sstevel@tonic-gate 			sqp->sq_state &= ~(SQS_PROC|SQS_FAST);
8087c478bd9Sstevel@tonic-gate 			if (sqp->sq_first == NULL) {
8097c478bd9Sstevel@tonic-gate 				/*
8107c478bd9Sstevel@tonic-gate 				 * We processed inline our packet and
8117c478bd9Sstevel@tonic-gate 				 * nothing new has arrived. We are done.
8127c478bd9Sstevel@tonic-gate 				 */
8137c478bd9Sstevel@tonic-gate 				sqp->sq_run = NULL;
8147c478bd9Sstevel@tonic-gate 				mutex_exit(&sqp->sq_lock);
8157c478bd9Sstevel@tonic-gate 				return;
8167c478bd9Sstevel@tonic-gate 			} else if (sqp->sq_bind != CPU->cpu_id) {
8177c478bd9Sstevel@tonic-gate 				/*
8187c478bd9Sstevel@tonic-gate 				 * If the current thread is not running
8197c478bd9Sstevel@tonic-gate 				 * on the CPU to which this squeue is bound,
8207c478bd9Sstevel@tonic-gate 				 * then don't allow it to drain.
8217c478bd9Sstevel@tonic-gate 				 */
8227c478bd9Sstevel@tonic-gate 				sqp->sq_run = NULL;
8237c478bd9Sstevel@tonic-gate 				SQUEUE_WORKER_WAKEUP(sqp);
8247c478bd9Sstevel@tonic-gate 				return;
8257c478bd9Sstevel@tonic-gate 			}
8267c478bd9Sstevel@tonic-gate 		} else {
8277c478bd9Sstevel@tonic-gate 			ENQUEUE_MP(sqp, mp, proc, arg);
8287c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
8297c478bd9Sstevel@tonic-gate 			mp->b_tag = tag;
8307c478bd9Sstevel@tonic-gate #endif
8317c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
8327c478bd9Sstevel@tonic-gate 			if (SQ_PROFILING(sqp)) {
8337c478bd9Sstevel@tonic-gate 				if (servicing_interrupt())
8347c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_nqueued_intr);
8357c478bd9Sstevel@tonic-gate 				else
8367c478bd9Sstevel@tonic-gate 					SQSTAT(sqp, sq_nqueued_other);
8377c478bd9Sstevel@tonic-gate 				if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
8387c478bd9Sstevel@tonic-gate 					sqp->sq_stats.sq_max_qlen =
8397c478bd9Sstevel@tonic-gate 					    sqp->sq_count;
8407c478bd9Sstevel@tonic-gate 			}
8417c478bd9Sstevel@tonic-gate #endif
8427c478bd9Sstevel@tonic-gate 		}
8437c478bd9Sstevel@tonic-gate 
8447c478bd9Sstevel@tonic-gate 		/*
8457c478bd9Sstevel@tonic-gate 		 * We are here because either we couldn't do inline
8467c478bd9Sstevel@tonic-gate 		 * processing (because something was already queued)
8477c478bd9Sstevel@tonic-gate 		 * or something else arrived after we were done with
8487c478bd9Sstevel@tonic-gate 		 * inline processing.
8497c478bd9Sstevel@tonic-gate 		 */
8507c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&sqp->sq_lock));
8517c478bd9Sstevel@tonic-gate 		ASSERT(sqp->sq_first != NULL);
8527c478bd9Sstevel@tonic-gate 
8537c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
8547c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
8557c478bd9Sstevel@tonic-gate 			start = gethrtime();
8567c478bd9Sstevel@tonic-gate 		}
8577c478bd9Sstevel@tonic-gate #endif
8587c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
8597c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = interrupt;
8607c478bd9Sstevel@tonic-gate #endif
8617c478bd9Sstevel@tonic-gate 
862d19d6468Sbw 		now = gethrtime();
8637c478bd9Sstevel@tonic-gate 		if (interrupt) {
864d19d6468Sbw 			squeue_drain(sqp, SQS_ENTER, now +
865d19d6468Sbw 			    squeue_intrdrain_ns);
8667c478bd9Sstevel@tonic-gate 		} else {
867d19d6468Sbw 			squeue_drain(sqp, SQS_USER, now +
868d19d6468Sbw 			    squeue_writerdrain_ns);
8697c478bd9Sstevel@tonic-gate 		}
8707c478bd9Sstevel@tonic-gate 
8717c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
8727c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
8737c478bd9Sstevel@tonic-gate 			delta = gethrtime() - start;
8747c478bd9Sstevel@tonic-gate 			if (interrupt)
8757c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_intr, delta);
8767c478bd9Sstevel@tonic-gate 			else
8777c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_other, delta);
8787c478bd9Sstevel@tonic-gate 		}
8797c478bd9Sstevel@tonic-gate #endif
8807c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
8817c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = 0;
8827c478bd9Sstevel@tonic-gate #endif
8837c478bd9Sstevel@tonic-gate 
8847c478bd9Sstevel@tonic-gate 		/*
8857c478bd9Sstevel@tonic-gate 		 * If we didn't do a complete drain, the worker
8867c478bd9Sstevel@tonic-gate 		 * thread was already signalled by squeue_drain.
8877c478bd9Sstevel@tonic-gate 		 */
8887c478bd9Sstevel@tonic-gate 		sqp->sq_run = NULL;
8897c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
8907c478bd9Sstevel@tonic-gate 		return;
8917c478bd9Sstevel@tonic-gate 	} else {
8927c478bd9Sstevel@tonic-gate 		ASSERT(sqp->sq_run != NULL);
8937c478bd9Sstevel@tonic-gate 		/*
8947c478bd9Sstevel@tonic-gate 		 * We let a thread processing a squeue reenter only
8957c478bd9Sstevel@tonic-gate 		 * once. This helps the case of incoming connection
8967c478bd9Sstevel@tonic-gate 		 * where a SYN-ACK-ACK that triggers the conn_ind
8977c478bd9Sstevel@tonic-gate 		 * doesn't have to queue the packet if listener and
8987c478bd9Sstevel@tonic-gate 		 * eager are on the same squeue. Also helps the
8997c478bd9Sstevel@tonic-gate 		 * loopback connection where the two ends are bound
9007c478bd9Sstevel@tonic-gate 		 * to the same squeue (which is typical on single
9017c478bd9Sstevel@tonic-gate 		 * CPU machines).
9027c478bd9Sstevel@tonic-gate 		 * We let the thread reenter only once for the fear
9037c478bd9Sstevel@tonic-gate 		 * of stack getting blown with multiple traversal.
9047c478bd9Sstevel@tonic-gate 		 */
9057c478bd9Sstevel@tonic-gate 		if (!(sqp->sq_state & SQS_REENTER) &&
9067c478bd9Sstevel@tonic-gate 		    (sqp->sq_run == curthread) &&
9077c478bd9Sstevel@tonic-gate 		    (((conn_t *)arg)->conn_on_sqp == B_FALSE)) {
9087c478bd9Sstevel@tonic-gate 			sqp->sq_state |= SQS_REENTER;
9097c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
9107c478bd9Sstevel@tonic-gate 
9117c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_TRUE;
9127c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(squeue__proc__start, squeue_t *,
9137c478bd9Sstevel@tonic-gate 			    sqp, mblk_t *, mp, conn_t *, arg);
9147c478bd9Sstevel@tonic-gate 			(*proc)(arg, mp, sqp);
9157c478bd9Sstevel@tonic-gate 			DTRACE_PROBE2(squeue__proc__end, squeue_t *,
9167c478bd9Sstevel@tonic-gate 			    sqp, conn_t *, arg);
9177c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_FALSE;
9187c478bd9Sstevel@tonic-gate 			CONN_DEC_REF((conn_t *)arg);
9197c478bd9Sstevel@tonic-gate 
9207c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
9217c478bd9Sstevel@tonic-gate 			sqp->sq_state &= ~SQS_REENTER;
9227c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
9237c478bd9Sstevel@tonic-gate 			return;
9247c478bd9Sstevel@tonic-gate 		}
9257c478bd9Sstevel@tonic-gate 		/*
9267c478bd9Sstevel@tonic-gate 		 * Queue is already being processed. Just enqueue
9277c478bd9Sstevel@tonic-gate 		 * the packet and go away.
9287c478bd9Sstevel@tonic-gate 		 */
9297c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
9307c478bd9Sstevel@tonic-gate 		mp->b_tag = tag;
9317c478bd9Sstevel@tonic-gate #endif
9327c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
9337c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
9347c478bd9Sstevel@tonic-gate 			if (servicing_interrupt())
9357c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_intr);
9367c478bd9Sstevel@tonic-gate 			else
9377c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_other);
9387c478bd9Sstevel@tonic-gate 			if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
9397c478bd9Sstevel@tonic-gate 				sqp->sq_stats.sq_max_qlen = sqp->sq_count;
9407c478bd9Sstevel@tonic-gate 		}
9417c478bd9Sstevel@tonic-gate #endif
9427c478bd9Sstevel@tonic-gate 
9437c478bd9Sstevel@tonic-gate 		ENQUEUE_MP(sqp, mp, proc, arg);
9447c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
9457c478bd9Sstevel@tonic-gate 		return;
9467c478bd9Sstevel@tonic-gate 	}
9477c478bd9Sstevel@tonic-gate }
9487c478bd9Sstevel@tonic-gate 
9497c478bd9Sstevel@tonic-gate void
9507c478bd9Sstevel@tonic-gate squeue_enter_nodrain(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg,
9517c478bd9Sstevel@tonic-gate     uint8_t tag)
9527c478bd9Sstevel@tonic-gate {
9537c478bd9Sstevel@tonic-gate 	int		interrupt = servicing_interrupt();
9547c478bd9Sstevel@tonic-gate 	boolean_t	being_processed;
9557c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
9567c478bd9Sstevel@tonic-gate 	conn_t 		*connp = (conn_t *)arg;
9577c478bd9Sstevel@tonic-gate #endif
9587c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
9597c478bd9Sstevel@tonic-gate 	hrtime_t 	start, delta;
9607c478bd9Sstevel@tonic-gate #endif
9617c478bd9Sstevel@tonic-gate 
9627c478bd9Sstevel@tonic-gate 	ASSERT(proc != NULL);
9637c478bd9Sstevel@tonic-gate 	ASSERT(sqp != NULL);
9647c478bd9Sstevel@tonic-gate 	ASSERT(mp != NULL);
9657c478bd9Sstevel@tonic-gate 	ASSERT(mp->b_next == NULL);
966ff550d0eSmasputra 	ASSERT(!IPCL_IS_TCP(connp) || connp->conn_tcp->tcp_connp == connp);
967ff550d0eSmasputra 	ASSERT(!IPCL_IS_UDP(connp) || connp->conn_udp->udp_connp == connp);
9687c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
969ff550d0eSmasputra 
9707c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
9717c478bd9Sstevel@tonic-gate 
9727c478bd9Sstevel@tonic-gate 	being_processed = (sqp->sq_state & SQS_PROC);
9737c478bd9Sstevel@tonic-gate 	if (!being_processed && (sqp->sq_first == NULL)) {
9747c478bd9Sstevel@tonic-gate 		/*
9757c478bd9Sstevel@tonic-gate 		 * Fast-path, ok to process and nothing queued.
9767c478bd9Sstevel@tonic-gate 		 */
9777c478bd9Sstevel@tonic-gate 		sqp->sq_state |= (SQS_PROC|SQS_FAST);
9787c478bd9Sstevel@tonic-gate 		sqp->sq_run = curthread;
9797c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
9807c478bd9Sstevel@tonic-gate 
9817c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
9827c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = interrupt;
9837c478bd9Sstevel@tonic-gate 		sqp->sq_curmp = mp;
9847c478bd9Sstevel@tonic-gate 		sqp->sq_curproc = proc;
9857c478bd9Sstevel@tonic-gate 		sqp->sq_connp = connp;
9867c478bd9Sstevel@tonic-gate 		mp->b_tag = sqp->sq_tag = tag;
9877c478bd9Sstevel@tonic-gate #endif
9887c478bd9Sstevel@tonic-gate 
9897c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
9907c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
9917c478bd9Sstevel@tonic-gate 			if (interrupt)
9927c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_intr);
9937c478bd9Sstevel@tonic-gate 			else
9947c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_other);
9957c478bd9Sstevel@tonic-gate 			start = gethrtime();
9967c478bd9Sstevel@tonic-gate 		}
9977c478bd9Sstevel@tonic-gate #endif
9987c478bd9Sstevel@tonic-gate 
9997c478bd9Sstevel@tonic-gate 		((conn_t *)arg)->conn_on_sqp = B_TRUE;
10007c478bd9Sstevel@tonic-gate 		DTRACE_PROBE3(squeue__proc__start, squeue_t *,
10017c478bd9Sstevel@tonic-gate 		    sqp, mblk_t *, mp, conn_t *, arg);
10027c478bd9Sstevel@tonic-gate 		(*proc)(arg, mp, sqp);
10037c478bd9Sstevel@tonic-gate 		DTRACE_PROBE2(squeue__proc__end, squeue_t *,
10047c478bd9Sstevel@tonic-gate 		    sqp, conn_t *, arg);
10057c478bd9Sstevel@tonic-gate 		((conn_t *)arg)->conn_on_sqp = B_FALSE;
10067c478bd9Sstevel@tonic-gate 
10077c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
10087c478bd9Sstevel@tonic-gate 		sqp->sq_curmp = NULL;
10097c478bd9Sstevel@tonic-gate 		sqp->sq_curproc = NULL;
10107c478bd9Sstevel@tonic-gate 		sqp->sq_connp = NULL;
10117c478bd9Sstevel@tonic-gate 		sqp->sq_isintr = 0;
10127c478bd9Sstevel@tonic-gate #endif
10137c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
10147c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
10157c478bd9Sstevel@tonic-gate 			delta = gethrtime() - start;
10167c478bd9Sstevel@tonic-gate 			if (interrupt)
10177c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_intr, delta);
10187c478bd9Sstevel@tonic-gate 			else
10197c478bd9Sstevel@tonic-gate 				SQDELTA(sqp, sq_time_other, delta);
10207c478bd9Sstevel@tonic-gate 		}
10217c478bd9Sstevel@tonic-gate #endif
10227c478bd9Sstevel@tonic-gate 
10237c478bd9Sstevel@tonic-gate 		CONN_DEC_REF((conn_t *)arg);
10247c478bd9Sstevel@tonic-gate 		mutex_enter(&sqp->sq_lock);
10257c478bd9Sstevel@tonic-gate 		sqp->sq_state &= ~(SQS_PROC|SQS_FAST);
10267c478bd9Sstevel@tonic-gate 		sqp->sq_run = NULL;
10277c478bd9Sstevel@tonic-gate 		if (sqp->sq_first == NULL) {
10287c478bd9Sstevel@tonic-gate 			/*
10297c478bd9Sstevel@tonic-gate 			 * We processed inline our packet and
10307c478bd9Sstevel@tonic-gate 			 * nothing new has arrived. We are done.
10317c478bd9Sstevel@tonic-gate 			 */
10327c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
10337c478bd9Sstevel@tonic-gate 		} else {
10347c478bd9Sstevel@tonic-gate 			SQUEUE_WORKER_WAKEUP(sqp);
10357c478bd9Sstevel@tonic-gate 		}
10367c478bd9Sstevel@tonic-gate 		return;
10377c478bd9Sstevel@tonic-gate 	} else {
10387c478bd9Sstevel@tonic-gate 		/*
10397c478bd9Sstevel@tonic-gate 		 * We let a thread processing a squeue reenter only
10407c478bd9Sstevel@tonic-gate 		 * once. This helps the case of incoming connection
10417c478bd9Sstevel@tonic-gate 		 * where a SYN-ACK-ACK that triggers the conn_ind
10427c478bd9Sstevel@tonic-gate 		 * doesn't have to queue the packet if listener and
10437c478bd9Sstevel@tonic-gate 		 * eager are on the same squeue. Also helps the
10447c478bd9Sstevel@tonic-gate 		 * loopback connection where the two ends are bound
10457c478bd9Sstevel@tonic-gate 		 * to the same squeue (which is typical on single
10467c478bd9Sstevel@tonic-gate 		 * CPU machines).
10477c478bd9Sstevel@tonic-gate 		 * We let the thread reenter only once for the fear
10487c478bd9Sstevel@tonic-gate 		 * of stack getting blown with multiple traversal.
10497c478bd9Sstevel@tonic-gate 		 */
10507c478bd9Sstevel@tonic-gate 		if (being_processed && !(sqp->sq_state & SQS_REENTER) &&
10517c478bd9Sstevel@tonic-gate 		    (sqp->sq_run == curthread) &&
10527c478bd9Sstevel@tonic-gate 		    (((conn_t *)arg)->conn_on_sqp == B_FALSE)) {
10537c478bd9Sstevel@tonic-gate 			sqp->sq_state |= SQS_REENTER;
10547c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
10557c478bd9Sstevel@tonic-gate 
10567c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_TRUE;
10577c478bd9Sstevel@tonic-gate 			DTRACE_PROBE3(squeue__proc__start, squeue_t *,
10587c478bd9Sstevel@tonic-gate 			    sqp, mblk_t *, mp, conn_t *, arg);
10597c478bd9Sstevel@tonic-gate 			(*proc)(arg, mp, sqp);
10607c478bd9Sstevel@tonic-gate 			DTRACE_PROBE2(squeue__proc__end, squeue_t *,
10617c478bd9Sstevel@tonic-gate 			    sqp, conn_t *, arg);
10627c478bd9Sstevel@tonic-gate 			((conn_t *)arg)->conn_on_sqp = B_FALSE;
10637c478bd9Sstevel@tonic-gate 			CONN_DEC_REF((conn_t *)arg);
10647c478bd9Sstevel@tonic-gate 
10657c478bd9Sstevel@tonic-gate 			mutex_enter(&sqp->sq_lock);
10667c478bd9Sstevel@tonic-gate 			sqp->sq_state &= ~SQS_REENTER;
10677c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
10687c478bd9Sstevel@tonic-gate 			return;
10697c478bd9Sstevel@tonic-gate 		}
10707c478bd9Sstevel@tonic-gate 
10717c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
10727c478bd9Sstevel@tonic-gate 		mp->b_tag = tag;
10737c478bd9Sstevel@tonic-gate #endif
10747c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
10757c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
10767c478bd9Sstevel@tonic-gate 			if (servicing_interrupt())
10777c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_intr);
10787c478bd9Sstevel@tonic-gate 			else
10797c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_nqueued_other);
10807c478bd9Sstevel@tonic-gate 			if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
10817c478bd9Sstevel@tonic-gate 				sqp->sq_stats.sq_max_qlen = sqp->sq_count;
10827c478bd9Sstevel@tonic-gate 		}
10837c478bd9Sstevel@tonic-gate #endif
10847c478bd9Sstevel@tonic-gate 		ENQUEUE_MP(sqp, mp, proc, arg);
10857c478bd9Sstevel@tonic-gate 		if (being_processed) {
10867c478bd9Sstevel@tonic-gate 			/*
10877c478bd9Sstevel@tonic-gate 			 * Queue is already being processed.
10887c478bd9Sstevel@tonic-gate 			 * No need to do anything.
10897c478bd9Sstevel@tonic-gate 			 */
10907c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
10917c478bd9Sstevel@tonic-gate 			return;
10927c478bd9Sstevel@tonic-gate 		}
10937c478bd9Sstevel@tonic-gate 		SQUEUE_WORKER_WAKEUP(sqp);
10947c478bd9Sstevel@tonic-gate 	}
10957c478bd9Sstevel@tonic-gate }
10967c478bd9Sstevel@tonic-gate 
10977c478bd9Sstevel@tonic-gate /*
10987c478bd9Sstevel@tonic-gate  * squeue_fill() - fill squeue *sqp with mblk *mp with argument of *arg
10997c478bd9Sstevel@tonic-gate  * without processing the squeue.
11007c478bd9Sstevel@tonic-gate  */
11017c478bd9Sstevel@tonic-gate /* ARGSUSED */
11027c478bd9Sstevel@tonic-gate void
11037c478bd9Sstevel@tonic-gate squeue_fill(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void * arg,
11047c478bd9Sstevel@tonic-gate     uint8_t tag)
11057c478bd9Sstevel@tonic-gate {
11067c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
11077c478bd9Sstevel@tonic-gate 	conn_t *connp = (conn_t *)arg;
11087c478bd9Sstevel@tonic-gate #endif
11097c478bd9Sstevel@tonic-gate 	ASSERT(proc != NULL);
11107c478bd9Sstevel@tonic-gate 	ASSERT(sqp != NULL);
11117c478bd9Sstevel@tonic-gate 	ASSERT(mp != NULL);
11127c478bd9Sstevel@tonic-gate 	ASSERT(mp->b_next == NULL);
1113ff550d0eSmasputra 	ASSERT(!IPCL_IS_TCP(connp) || connp->conn_tcp->tcp_connp == connp);
1114ff550d0eSmasputra 	ASSERT(!IPCL_IS_UDP(connp) || connp->conn_udp->udp_connp == connp);
11157c478bd9Sstevel@tonic-gate 
11167c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock));
11177c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
11187c478bd9Sstevel@tonic-gate 	ENQUEUE_MP(sqp, mp, proc, arg);
11197c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
11207c478bd9Sstevel@tonic-gate 	mp->b_tag = tag;
11217c478bd9Sstevel@tonic-gate #endif
11227c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
11237c478bd9Sstevel@tonic-gate 	if (SQ_PROFILING(sqp)) {
11247c478bd9Sstevel@tonic-gate 		if (servicing_interrupt())
11257c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_nqueued_intr);
11267c478bd9Sstevel@tonic-gate 		else
11277c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_nqueued_other);
11287c478bd9Sstevel@tonic-gate 		if (sqp->sq_stats.sq_max_qlen < sqp->sq_count)
11297c478bd9Sstevel@tonic-gate 			sqp->sq_stats.sq_max_qlen = sqp->sq_count;
11307c478bd9Sstevel@tonic-gate 	}
11317c478bd9Sstevel@tonic-gate #endif
11327c478bd9Sstevel@tonic-gate 
11337c478bd9Sstevel@tonic-gate 	/*
11347c478bd9Sstevel@tonic-gate 	 * If queue is already being processed. No need to do anything.
11357c478bd9Sstevel@tonic-gate 	 */
11367c478bd9Sstevel@tonic-gate 	if (sqp->sq_state & SQS_PROC) {
11377c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
11387c478bd9Sstevel@tonic-gate 		return;
11397c478bd9Sstevel@tonic-gate 	}
11407c478bd9Sstevel@tonic-gate 
11417c478bd9Sstevel@tonic-gate 	SQUEUE_WORKER_WAKEUP(sqp);
11427c478bd9Sstevel@tonic-gate }
11437c478bd9Sstevel@tonic-gate 
11447c478bd9Sstevel@tonic-gate 
11457c478bd9Sstevel@tonic-gate /*
11467c478bd9Sstevel@tonic-gate  * PRIVATE FUNCTIONS
11477c478bd9Sstevel@tonic-gate  */
11487c478bd9Sstevel@tonic-gate 
11497c478bd9Sstevel@tonic-gate static void
11507c478bd9Sstevel@tonic-gate squeue_fire(void *arg)
11517c478bd9Sstevel@tonic-gate {
11527c478bd9Sstevel@tonic-gate 	squeue_t	*sqp = arg;
11537c478bd9Sstevel@tonic-gate 	uint_t		state;
11547c478bd9Sstevel@tonic-gate 
11557c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
11567c478bd9Sstevel@tonic-gate 
11577c478bd9Sstevel@tonic-gate 	state = sqp->sq_state;
11587c478bd9Sstevel@tonic-gate 	if (sqp->sq_tid == 0 && !(state & SQS_TMO_PROG)) {
11597c478bd9Sstevel@tonic-gate 		mutex_exit(&sqp->sq_lock);
11607c478bd9Sstevel@tonic-gate 		return;
11617c478bd9Sstevel@tonic-gate 	}
11627c478bd9Sstevel@tonic-gate 
11637c478bd9Sstevel@tonic-gate 	sqp->sq_tid = 0;
11647c478bd9Sstevel@tonic-gate 	/*
11657c478bd9Sstevel@tonic-gate 	 * The timeout fired before we got a chance to set it.
11667c478bd9Sstevel@tonic-gate 	 * Process it anyway but remove the SQS_TMO_PROG so that
11677c478bd9Sstevel@tonic-gate 	 * the guy trying to set the timeout knows that it has
11687c478bd9Sstevel@tonic-gate 	 * already been processed.
11697c478bd9Sstevel@tonic-gate 	 */
11707c478bd9Sstevel@tonic-gate 	if (state & SQS_TMO_PROG)
11717c478bd9Sstevel@tonic-gate 		sqp->sq_state &= ~SQS_TMO_PROG;
11727c478bd9Sstevel@tonic-gate 
11737c478bd9Sstevel@tonic-gate 	if (!(state & SQS_PROC)) {
11747c478bd9Sstevel@tonic-gate 		sqp->sq_awaken = lbolt;
11757c478bd9Sstevel@tonic-gate 		cv_signal(&sqp->sq_async);
11767c478bd9Sstevel@tonic-gate 	}
11777c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
11787c478bd9Sstevel@tonic-gate }
11797c478bd9Sstevel@tonic-gate 
11807c478bd9Sstevel@tonic-gate static void
1181d19d6468Sbw squeue_drain(squeue_t *sqp, uint_t proc_type, hrtime_t expire)
11827c478bd9Sstevel@tonic-gate {
11837c478bd9Sstevel@tonic-gate 	mblk_t	*mp;
11847c478bd9Sstevel@tonic-gate 	mblk_t 	*head;
11857c478bd9Sstevel@tonic-gate 	sqproc_t proc;
11867c478bd9Sstevel@tonic-gate 	conn_t	*connp;
11877c478bd9Sstevel@tonic-gate 	clock_t	start = lbolt;
11887c478bd9Sstevel@tonic-gate 	clock_t	drain_time;
11897c478bd9Sstevel@tonic-gate 	timeout_id_t tid;
11907c478bd9Sstevel@tonic-gate 	uint_t	cnt;
11917c478bd9Sstevel@tonic-gate 	uint_t	total_cnt = 0;
11927c478bd9Sstevel@tonic-gate 	ill_rx_ring_t	*sq_rx_ring = sqp->sq_rx_ring;
11937c478bd9Sstevel@tonic-gate 	int	interrupt = servicing_interrupt();
11947c478bd9Sstevel@tonic-gate 	boolean_t poll_on = B_FALSE;
1195d19d6468Sbw 	hrtime_t now;
11967c478bd9Sstevel@tonic-gate 
11977c478bd9Sstevel@tonic-gate 	ASSERT(mutex_owned(&sqp->sq_lock));
11987c478bd9Sstevel@tonic-gate 	ASSERT(!(sqp->sq_state & SQS_PROC));
11997c478bd9Sstevel@tonic-gate 
12007c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
12017c478bd9Sstevel@tonic-gate 	if (SQ_PROFILING(sqp)) {
12027c478bd9Sstevel@tonic-gate 		if (interrupt)
12037c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_ndrains_intr);
12047c478bd9Sstevel@tonic-gate 		else if (!(proc_type & SQS_WORKER))
12057c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_ndrains_other);
12067c478bd9Sstevel@tonic-gate 		else
12077c478bd9Sstevel@tonic-gate 			SQSTAT(sqp, sq_ndrains_worker);
12087c478bd9Sstevel@tonic-gate 	}
12097c478bd9Sstevel@tonic-gate #endif
12107c478bd9Sstevel@tonic-gate 
12117c478bd9Sstevel@tonic-gate 	if ((tid = sqp->sq_tid) != 0)
12127c478bd9Sstevel@tonic-gate 		sqp->sq_tid = 0;
12137c478bd9Sstevel@tonic-gate 
12147c478bd9Sstevel@tonic-gate 	sqp->sq_state |= SQS_PROC | proc_type;
12157c478bd9Sstevel@tonic-gate 	head = sqp->sq_first;
12167c478bd9Sstevel@tonic-gate 	sqp->sq_first = NULL;
12177c478bd9Sstevel@tonic-gate 	sqp->sq_last = NULL;
12187c478bd9Sstevel@tonic-gate 	cnt = sqp->sq_count;
12197c478bd9Sstevel@tonic-gate 
12207c478bd9Sstevel@tonic-gate 	/*
12217c478bd9Sstevel@tonic-gate 	 * We have backlog built up. Switch to polling mode if the
12227c478bd9Sstevel@tonic-gate 	 * device underneath allows it. Need to do it only for
12237c478bd9Sstevel@tonic-gate 	 * drain by non-interrupt thread so interrupts don't
12247c478bd9Sstevel@tonic-gate 	 * come and disrupt us in between. If its a interrupt thread,
12257c478bd9Sstevel@tonic-gate 	 * no need because most devices will not issue another
12267c478bd9Sstevel@tonic-gate 	 * interrupt till this one returns.
12277c478bd9Sstevel@tonic-gate 	 */
12287c478bd9Sstevel@tonic-gate 	if ((sqp->sq_state & SQS_POLL_CAPAB) && !(proc_type & SQS_ENTER) &&
12297c478bd9Sstevel@tonic-gate 		(sqp->sq_count > squeue_worker_poll_min)) {
12307c478bd9Sstevel@tonic-gate 		ASSERT(sq_rx_ring != NULL);
12317c478bd9Sstevel@tonic-gate 		SQS_POLLING_ON(sqp, sq_rx_ring);
12327c478bd9Sstevel@tonic-gate 		poll_on = B_TRUE;
12337c478bd9Sstevel@tonic-gate 	}
12347c478bd9Sstevel@tonic-gate 
12357c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
12367c478bd9Sstevel@tonic-gate 
12377c478bd9Sstevel@tonic-gate 	if (tid != 0)
12387c478bd9Sstevel@tonic-gate 		(void) untimeout(tid);
12397c478bd9Sstevel@tonic-gate again:
12407c478bd9Sstevel@tonic-gate 	while ((mp = head) != NULL) {
12417c478bd9Sstevel@tonic-gate 		head = mp->b_next;
12427c478bd9Sstevel@tonic-gate 		mp->b_next = NULL;
12437c478bd9Sstevel@tonic-gate 
12447c478bd9Sstevel@tonic-gate 		proc = (sqproc_t)mp->b_queue;
12457c478bd9Sstevel@tonic-gate 		mp->b_queue = NULL;
12467c478bd9Sstevel@tonic-gate 		connp = (conn_t *)mp->b_prev;
12477c478bd9Sstevel@tonic-gate 		mp->b_prev = NULL;
12487c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
12497c478bd9Sstevel@tonic-gate 		sqp->sq_curmp = mp;
12507c478bd9Sstevel@tonic-gate 		sqp->sq_curproc = proc;
12517c478bd9Sstevel@tonic-gate 		sqp->sq_connp = connp;
12527c478bd9Sstevel@tonic-gate 		sqp->sq_tag = mp->b_tag;
12537c478bd9Sstevel@tonic-gate #endif
12547c478bd9Sstevel@tonic-gate 
12557c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
12567c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
12577c478bd9Sstevel@tonic-gate 			if (interrupt)
12587c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_intr);
12597c478bd9Sstevel@tonic-gate 			else if (!(proc_type & SQS_WORKER))
12607c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_other);
12617c478bd9Sstevel@tonic-gate 			else
12627c478bd9Sstevel@tonic-gate 				SQSTAT(sqp, sq_npackets_worker);
12637c478bd9Sstevel@tonic-gate 		}
12647c478bd9Sstevel@tonic-gate #endif
12657c478bd9Sstevel@tonic-gate 
12667c478bd9Sstevel@tonic-gate 		connp->conn_on_sqp = B_TRUE;
12677c478bd9Sstevel@tonic-gate 		DTRACE_PROBE3(squeue__proc__start, squeue_t *,
12687c478bd9Sstevel@tonic-gate 		    sqp, mblk_t *, mp, conn_t *, connp);
12697c478bd9Sstevel@tonic-gate 		(*proc)(connp, mp, sqp);
12707c478bd9Sstevel@tonic-gate 		DTRACE_PROBE2(squeue__proc__end, squeue_t *,
12717c478bd9Sstevel@tonic-gate 		    sqp, conn_t *, connp);
12727c478bd9Sstevel@tonic-gate 		connp->conn_on_sqp = B_FALSE;
12737c478bd9Sstevel@tonic-gate 		CONN_DEC_REF(connp);
12747c478bd9Sstevel@tonic-gate 	}
12757c478bd9Sstevel@tonic-gate 
12767c478bd9Sstevel@tonic-gate 
12777c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
12787c478bd9Sstevel@tonic-gate 	sqp->sq_curmp = NULL;
12797c478bd9Sstevel@tonic-gate 	sqp->sq_curproc = NULL;
12807c478bd9Sstevel@tonic-gate 	sqp->sq_connp = NULL;
12817c478bd9Sstevel@tonic-gate #endif
12827c478bd9Sstevel@tonic-gate 
12837c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
12847c478bd9Sstevel@tonic-gate 	sqp->sq_count -= cnt;
12857c478bd9Sstevel@tonic-gate 	total_cnt += cnt;
12867c478bd9Sstevel@tonic-gate 
12877c478bd9Sstevel@tonic-gate 	if (sqp->sq_first != NULL) {
1288d19d6468Sbw 
1289d19d6468Sbw 		now = gethrtime();
1290d19d6468Sbw 		if (!expire || (now < expire)) {
12917c478bd9Sstevel@tonic-gate 			/* More arrived and time not expired */
12927c478bd9Sstevel@tonic-gate 			head = sqp->sq_first;
12937c478bd9Sstevel@tonic-gate 			sqp->sq_first = NULL;
12947c478bd9Sstevel@tonic-gate 			sqp->sq_last = NULL;
12957c478bd9Sstevel@tonic-gate 			cnt = sqp->sq_count;
12967c478bd9Sstevel@tonic-gate 			mutex_exit(&sqp->sq_lock);
12977c478bd9Sstevel@tonic-gate 			goto again;
12987c478bd9Sstevel@tonic-gate 		}
12997c478bd9Sstevel@tonic-gate 
13007c478bd9Sstevel@tonic-gate 		/*
13017c478bd9Sstevel@tonic-gate 		 * If we are not worker thread and we
13027c478bd9Sstevel@tonic-gate 		 * reached our time limit to do drain,
13037c478bd9Sstevel@tonic-gate 		 * signal the worker thread to pick
13047c478bd9Sstevel@tonic-gate 		 * up the work.
13057c478bd9Sstevel@tonic-gate 		 * If we were the worker thread, then
13067c478bd9Sstevel@tonic-gate 		 * we take a break to allow an interrupt
13077c478bd9Sstevel@tonic-gate 		 * or writer to pick up the load.
13087c478bd9Sstevel@tonic-gate 		 */
13097c478bd9Sstevel@tonic-gate 		if (proc_type != SQS_WORKER) {
13107c478bd9Sstevel@tonic-gate 			sqp->sq_awaken = lbolt;
13117c478bd9Sstevel@tonic-gate 			cv_signal(&sqp->sq_async);
13127c478bd9Sstevel@tonic-gate 		}
13137c478bd9Sstevel@tonic-gate 	}
13147c478bd9Sstevel@tonic-gate 
13157c478bd9Sstevel@tonic-gate 	/*
13167c478bd9Sstevel@tonic-gate 	 * Try to see if we can get a time estimate to process a packet.
13177c478bd9Sstevel@tonic-gate 	 * Do it only in interrupt context since less chance of context
13187c478bd9Sstevel@tonic-gate 	 * switch or pinning etc. to get a better estimate.
13197c478bd9Sstevel@tonic-gate 	 */
13207c478bd9Sstevel@tonic-gate 	if (interrupt && ((drain_time = (lbolt - start)) > 0))
13217c478bd9Sstevel@tonic-gate 		sqp->sq_avg_drain_time = ((80 * sqp->sq_avg_drain_time) +
13227c478bd9Sstevel@tonic-gate 		    (20 * (drv_hztousec(drain_time)/total_cnt)))/100;
13237c478bd9Sstevel@tonic-gate 
13247c478bd9Sstevel@tonic-gate 	sqp->sq_state &= ~(SQS_PROC | proc_type);
13257c478bd9Sstevel@tonic-gate 
13267c478bd9Sstevel@tonic-gate 	/*
13277c478bd9Sstevel@tonic-gate 	 * If polling was turned on, turn it off and reduce the default
13287c478bd9Sstevel@tonic-gate 	 * interrupt blank interval as well to bring new packets in faster
13297c478bd9Sstevel@tonic-gate 	 * (reduces the latency when there is no backlog).
13307c478bd9Sstevel@tonic-gate 	 */
13317c478bd9Sstevel@tonic-gate 	if (poll_on && (sqp->sq_state & SQS_POLL_CAPAB)) {
13327c478bd9Sstevel@tonic-gate 		ASSERT(sq_rx_ring != NULL);
13337c478bd9Sstevel@tonic-gate 		SQS_POLLING_OFF(sqp, sq_rx_ring);
13347c478bd9Sstevel@tonic-gate 	}
13357c478bd9Sstevel@tonic-gate }
13367c478bd9Sstevel@tonic-gate 
13377c478bd9Sstevel@tonic-gate static void
13387c478bd9Sstevel@tonic-gate squeue_worker(squeue_t *sqp)
13397c478bd9Sstevel@tonic-gate {
13407c478bd9Sstevel@tonic-gate 	kmutex_t *lock = &sqp->sq_lock;
13417c478bd9Sstevel@tonic-gate 	kcondvar_t *async = &sqp->sq_async;
13427c478bd9Sstevel@tonic-gate 	callb_cpr_t cprinfo;
1343d19d6468Sbw 	hrtime_t now;
13447c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
13457c478bd9Sstevel@tonic-gate 	hrtime_t start;
13467c478bd9Sstevel@tonic-gate #endif
13477c478bd9Sstevel@tonic-gate 
13487c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "nca");
13497c478bd9Sstevel@tonic-gate 	mutex_enter(lock);
13507c478bd9Sstevel@tonic-gate 
13517c478bd9Sstevel@tonic-gate 	for (;;) {
13527c478bd9Sstevel@tonic-gate 		while (sqp->sq_first == NULL || (sqp->sq_state & SQS_PROC)) {
13537c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
13547c478bd9Sstevel@tonic-gate still_wait:
13557c478bd9Sstevel@tonic-gate 			cv_wait(async, lock);
13567c478bd9Sstevel@tonic-gate 			if (sqp->sq_state & SQS_PROC) {
13577c478bd9Sstevel@tonic-gate 				goto still_wait;
13587c478bd9Sstevel@tonic-gate 			}
13597c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, lock);
13607c478bd9Sstevel@tonic-gate 		}
13617c478bd9Sstevel@tonic-gate 
13627c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
13637c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
13647c478bd9Sstevel@tonic-gate 			start = gethrtime();
13657c478bd9Sstevel@tonic-gate 		}
13667c478bd9Sstevel@tonic-gate #endif
13677c478bd9Sstevel@tonic-gate 
1368d19d6468Sbw 		ASSERT(squeue_workerdrain_ns != 0);
1369d19d6468Sbw 		now = gethrtime();
13707c478bd9Sstevel@tonic-gate 		sqp->sq_run = curthread;
1371d19d6468Sbw 		squeue_drain(sqp, SQS_WORKER, now +  squeue_workerdrain_ns);
13727c478bd9Sstevel@tonic-gate 		sqp->sq_run = NULL;
13737c478bd9Sstevel@tonic-gate 
13747c478bd9Sstevel@tonic-gate 		if (sqp->sq_first != NULL) {
13757c478bd9Sstevel@tonic-gate 			/*
13767c478bd9Sstevel@tonic-gate 			 * Doing too much processing by worker thread
13777c478bd9Sstevel@tonic-gate 			 * in presense of interrupts can be sub optimal.
13787c478bd9Sstevel@tonic-gate 			 * Instead, once a drain is done by worker thread
1379d19d6468Sbw 			 * for squeue_writerdrain_ns (the reason we are
13807c478bd9Sstevel@tonic-gate 			 * here), we force wait for squeue_workerwait_tick
13817c478bd9Sstevel@tonic-gate 			 * before doing more processing even if sq_wait is
13827c478bd9Sstevel@tonic-gate 			 * set to 0.
13837c478bd9Sstevel@tonic-gate 			 *
13847c478bd9Sstevel@tonic-gate 			 * This can be counterproductive for performance
13857c478bd9Sstevel@tonic-gate 			 * if worker thread is the only means to process
13867c478bd9Sstevel@tonic-gate 			 * the packets (interrupts or writers are not
13877c478bd9Sstevel@tonic-gate 			 * allowed inside the squeue).
13887c478bd9Sstevel@tonic-gate 			 */
13897c478bd9Sstevel@tonic-gate 			if (sqp->sq_tid == 0 &&
13907c478bd9Sstevel@tonic-gate 			    !(sqp->sq_state & SQS_TMO_PROG)) {
13917c478bd9Sstevel@tonic-gate 				timeout_id_t	tid;
13927c478bd9Sstevel@tonic-gate 
13937c478bd9Sstevel@tonic-gate 				sqp->sq_state |= SQS_TMO_PROG;
13947c478bd9Sstevel@tonic-gate 				mutex_exit(&sqp->sq_lock);
13957c478bd9Sstevel@tonic-gate 				tid = timeout(squeue_fire, sqp,
13967c478bd9Sstevel@tonic-gate 				    squeue_workerwait_tick);
13977c478bd9Sstevel@tonic-gate 				mutex_enter(&sqp->sq_lock);
13987c478bd9Sstevel@tonic-gate 				/*
13997c478bd9Sstevel@tonic-gate 				 * Check again if we still need
14007c478bd9Sstevel@tonic-gate 				 * the timeout
14017c478bd9Sstevel@tonic-gate 				 */
14027c478bd9Sstevel@tonic-gate 				if (((sqp->sq_state & (SQS_TMO_PROG|SQS_PROC))
14037c478bd9Sstevel@tonic-gate 				    == SQS_TMO_PROG) && (sqp->sq_tid == 0) &&
14047c478bd9Sstevel@tonic-gate 				    (sqp->sq_first != NULL)) {
14057c478bd9Sstevel@tonic-gate 					sqp->sq_state &= ~SQS_TMO_PROG;
14067c478bd9Sstevel@tonic-gate 					sqp->sq_awaken = lbolt;
14077c478bd9Sstevel@tonic-gate 					sqp->sq_tid = tid;
14087c478bd9Sstevel@tonic-gate 				} else if (sqp->sq_state & SQS_TMO_PROG) {
14097c478bd9Sstevel@tonic-gate 					/* timeout not needed */
14107c478bd9Sstevel@tonic-gate 					sqp->sq_state &= ~SQS_TMO_PROG;
14117c478bd9Sstevel@tonic-gate 					mutex_exit(&(sqp)->sq_lock);
14127c478bd9Sstevel@tonic-gate 					(void) untimeout(tid);
14137c478bd9Sstevel@tonic-gate 					mutex_enter(&sqp->sq_lock);
14147c478bd9Sstevel@tonic-gate 				}
14157c478bd9Sstevel@tonic-gate 			}
14167c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
14177c478bd9Sstevel@tonic-gate 			cv_wait(async, lock);
14187c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, lock);
14197c478bd9Sstevel@tonic-gate 		}
14207c478bd9Sstevel@tonic-gate 
14217c478bd9Sstevel@tonic-gate 
14227c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
14237c478bd9Sstevel@tonic-gate 		if (SQ_PROFILING(sqp)) {
14247c478bd9Sstevel@tonic-gate 			SQDELTA(sqp, sq_time_worker, gethrtime() - start);
14257c478bd9Sstevel@tonic-gate 		}
14267c478bd9Sstevel@tonic-gate #endif
14277c478bd9Sstevel@tonic-gate 	}
14287c478bd9Sstevel@tonic-gate }
14297c478bd9Sstevel@tonic-gate 
14307c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
14317c478bd9Sstevel@tonic-gate static int
14327c478bd9Sstevel@tonic-gate squeue_kstat_update(kstat_t *ksp, int rw)
14337c478bd9Sstevel@tonic-gate {
14347c478bd9Sstevel@tonic-gate 	struct squeue_kstat *sqsp = &squeue_kstat;
14357c478bd9Sstevel@tonic-gate 	squeue_t *sqp = ksp->ks_private;
14367c478bd9Sstevel@tonic-gate 
14377c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
14387c478bd9Sstevel@tonic-gate 		return (EACCES);
14397c478bd9Sstevel@tonic-gate 
14407c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG
14417c478bd9Sstevel@tonic-gate 	sqsp->sq_count.value.ui64 = sqp->sq_count;
14427c478bd9Sstevel@tonic-gate 	sqsp->sq_max_qlen.value.ui64 = sqp->sq_stats.sq_max_qlen;
14437c478bd9Sstevel@tonic-gate #endif
14447c478bd9Sstevel@tonic-gate 	sqsp->sq_npackets_worker.value.ui64 = sqp->sq_stats.sq_npackets_worker;
14457c478bd9Sstevel@tonic-gate 	sqsp->sq_npackets_intr.value.ui64 = sqp->sq_stats.sq_npackets_intr;
14467c478bd9Sstevel@tonic-gate 	sqsp->sq_npackets_other.value.ui64 = sqp->sq_stats.sq_npackets_other;
14477c478bd9Sstevel@tonic-gate 	sqsp->sq_nqueued_intr.value.ui64 = sqp->sq_stats.sq_nqueued_intr;
14487c478bd9Sstevel@tonic-gate 	sqsp->sq_nqueued_other.value.ui64 = sqp->sq_stats.sq_nqueued_other;
14497c478bd9Sstevel@tonic-gate 	sqsp->sq_ndrains_worker.value.ui64 = sqp->sq_stats.sq_ndrains_worker;
14507c478bd9Sstevel@tonic-gate 	sqsp->sq_ndrains_intr.value.ui64 = sqp->sq_stats.sq_ndrains_intr;
14517c478bd9Sstevel@tonic-gate 	sqsp->sq_ndrains_other.value.ui64 = sqp->sq_stats.sq_ndrains_other;
14527c478bd9Sstevel@tonic-gate 	sqsp->sq_time_worker.value.ui64 = sqp->sq_stats.sq_time_worker;
14537c478bd9Sstevel@tonic-gate 	sqsp->sq_time_intr.value.ui64 = sqp->sq_stats.sq_time_intr;
14547c478bd9Sstevel@tonic-gate 	sqsp->sq_time_other.value.ui64 = sqp->sq_stats.sq_time_other;
14557c478bd9Sstevel@tonic-gate 	return (0);
14567c478bd9Sstevel@tonic-gate }
14577c478bd9Sstevel@tonic-gate #endif
14587c478bd9Sstevel@tonic-gate 
14597c478bd9Sstevel@tonic-gate void
14607c478bd9Sstevel@tonic-gate squeue_profile_enable(squeue_t *sqp)
14617c478bd9Sstevel@tonic-gate {
14627c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
14637c478bd9Sstevel@tonic-gate 	sqp->sq_state |= SQS_PROFILE;
14647c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
14657c478bd9Sstevel@tonic-gate }
14667c478bd9Sstevel@tonic-gate 
14677c478bd9Sstevel@tonic-gate void
14687c478bd9Sstevel@tonic-gate squeue_profile_disable(squeue_t *sqp)
14697c478bd9Sstevel@tonic-gate {
14707c478bd9Sstevel@tonic-gate 	mutex_enter(&sqp->sq_lock);
14717c478bd9Sstevel@tonic-gate 	sqp->sq_state &= ~SQS_PROFILE;
14727c478bd9Sstevel@tonic-gate 	mutex_exit(&sqp->sq_lock);
14737c478bd9Sstevel@tonic-gate }
14747c478bd9Sstevel@tonic-gate 
14757c478bd9Sstevel@tonic-gate void
14767c478bd9Sstevel@tonic-gate squeue_profile_reset(squeue_t *sqp)
14777c478bd9Sstevel@tonic-gate {
14787c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
14797c478bd9Sstevel@tonic-gate 	bzero(&sqp->sq_stats, sizeof (sqstat_t));
14807c478bd9Sstevel@tonic-gate #endif
14817c478bd9Sstevel@tonic-gate }
14827c478bd9Sstevel@tonic-gate 
14837c478bd9Sstevel@tonic-gate void
14847c478bd9Sstevel@tonic-gate squeue_profile_start(void)
14857c478bd9Sstevel@tonic-gate {
14867c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
14877c478bd9Sstevel@tonic-gate 	squeue_profile = B_TRUE;
14887c478bd9Sstevel@tonic-gate #endif
14897c478bd9Sstevel@tonic-gate }
14907c478bd9Sstevel@tonic-gate 
14917c478bd9Sstevel@tonic-gate void
14927c478bd9Sstevel@tonic-gate squeue_profile_stop(void)
14937c478bd9Sstevel@tonic-gate {
14947c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE
14957c478bd9Sstevel@tonic-gate 	squeue_profile = B_FALSE;
14967c478bd9Sstevel@tonic-gate #endif
14977c478bd9Sstevel@tonic-gate }
14987c478bd9Sstevel@tonic-gate 
14997c478bd9Sstevel@tonic-gate uintptr_t *
15007c478bd9Sstevel@tonic-gate squeue_getprivate(squeue_t *sqp, sqprivate_t p)
15017c478bd9Sstevel@tonic-gate {
15027c478bd9Sstevel@tonic-gate 	ASSERT(p < SQPRIVATE_MAX);
15037c478bd9Sstevel@tonic-gate 
15047c478bd9Sstevel@tonic-gate 	return (&sqp->sq_private[p]);
15057c478bd9Sstevel@tonic-gate }
15067c478bd9Sstevel@tonic-gate 
15077c478bd9Sstevel@tonic-gate processorid_t
15087c478bd9Sstevel@tonic-gate squeue_binding(squeue_t *sqp)
15097c478bd9Sstevel@tonic-gate {
15107c478bd9Sstevel@tonic-gate 	return (sqp->sq_bind);
15117c478bd9Sstevel@tonic-gate }
1512