17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 67c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 77c478bd9Sstevel@tonic-gate * with the License. 87c478bd9Sstevel@tonic-gate * 97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 127c478bd9Sstevel@tonic-gate * and limitations under the License. 137c478bd9Sstevel@tonic-gate * 147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 197c478bd9Sstevel@tonic-gate * 207c478bd9Sstevel@tonic-gate * CDDL HEADER END 217c478bd9Sstevel@tonic-gate */ 227c478bd9Sstevel@tonic-gate /* 237c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate /* 307c478bd9Sstevel@tonic-gate * Squeues - TCP/IP serialization mechanism. 317c478bd9Sstevel@tonic-gate * 327c478bd9Sstevel@tonic-gate * This is a general purpose high-performance serialization mechanism. It is 337c478bd9Sstevel@tonic-gate * similar to a taskq with a single worker thread, the difference is that it 347c478bd9Sstevel@tonic-gate * does not imply a context switch - the thread placing a request may actually 357c478bd9Sstevel@tonic-gate * process it. It is also biased for processing requests in interrupt context. 367c478bd9Sstevel@tonic-gate * 377c478bd9Sstevel@tonic-gate * Each squeue has a worker thread which may optionally be bound to a CPU. 387c478bd9Sstevel@tonic-gate * 397c478bd9Sstevel@tonic-gate * Only one thread may process requests from a given squeue at any time. This is 407c478bd9Sstevel@tonic-gate * called "entering" squeue. 417c478bd9Sstevel@tonic-gate * 427c478bd9Sstevel@tonic-gate * Each dispatched request is processed either by 437c478bd9Sstevel@tonic-gate * 447c478bd9Sstevel@tonic-gate * a) Dispatching thread or 457c478bd9Sstevel@tonic-gate * b) Some other thread that is currently processing squeue at the time of 467c478bd9Sstevel@tonic-gate * request or 477c478bd9Sstevel@tonic-gate * c) worker thread. 487c478bd9Sstevel@tonic-gate * 497c478bd9Sstevel@tonic-gate * INTERFACES: 507c478bd9Sstevel@tonic-gate * 517c478bd9Sstevel@tonic-gate * squeue_t *squeue_create(name, bind, wait, pri) 527c478bd9Sstevel@tonic-gate * 537c478bd9Sstevel@tonic-gate * name: symbolic name for squeue. 547c478bd9Sstevel@tonic-gate * wait: time to wait before waiking the worker thread after queueing 557c478bd9Sstevel@tonic-gate * request. 567c478bd9Sstevel@tonic-gate * bind: preferred CPU binding for the worker thread. 577c478bd9Sstevel@tonic-gate * pri: thread priority for the worker thread. 587c478bd9Sstevel@tonic-gate * 597c478bd9Sstevel@tonic-gate * This function never fails and may sleep. It returns a transparent pointer 607c478bd9Sstevel@tonic-gate * to the squeue_t structure that is passed to all other squeue operations. 617c478bd9Sstevel@tonic-gate * 627c478bd9Sstevel@tonic-gate * void squeue_bind(sqp, bind) 637c478bd9Sstevel@tonic-gate * 647c478bd9Sstevel@tonic-gate * Bind squeue worker thread to a CPU specified by the 'bind' argument. The 657c478bd9Sstevel@tonic-gate * 'bind' value of -1 binds to the preferred thread specified for 667c478bd9Sstevel@tonic-gate * squeue_create. 677c478bd9Sstevel@tonic-gate * 687c478bd9Sstevel@tonic-gate * NOTE: Any value of 'bind' other then -1 is not supported currently, but the 697c478bd9Sstevel@tonic-gate * API is present - in the future it may be useful to specify different 707c478bd9Sstevel@tonic-gate * binding. 717c478bd9Sstevel@tonic-gate * 727c478bd9Sstevel@tonic-gate * void squeue_unbind(sqp) 737c478bd9Sstevel@tonic-gate * 747c478bd9Sstevel@tonic-gate * Unbind the worker thread from its preferred CPU. 757c478bd9Sstevel@tonic-gate * 767c478bd9Sstevel@tonic-gate * void squeue_enter(*sqp, *mp, proc, arg, tag) 777c478bd9Sstevel@tonic-gate * 787c478bd9Sstevel@tonic-gate * Post a single request for processing. Each request consists of mblock 'mp', 797c478bd9Sstevel@tonic-gate * function 'proc' to execute and an argument 'arg' to pass to this 807c478bd9Sstevel@tonic-gate * function. The function is called as (*proc)(arg, mp, sqp); The tag is an 817c478bd9Sstevel@tonic-gate * arbitrary number from 0 to 255 which will be stored in mp to track exact 827c478bd9Sstevel@tonic-gate * caller of squeue_enter. The combination of function name and the tag should 837c478bd9Sstevel@tonic-gate * provide enough information to identify the caller. 847c478bd9Sstevel@tonic-gate * 857c478bd9Sstevel@tonic-gate * If no one is processing the squeue, squeue_enter() will call the function 867c478bd9Sstevel@tonic-gate * immediately. Otherwise it will add the request to the queue for later 877c478bd9Sstevel@tonic-gate * processing. Once the function is executed, the thread may continue 887c478bd9Sstevel@tonic-gate * executing all other requests pending on the queue. 897c478bd9Sstevel@tonic-gate * 907c478bd9Sstevel@tonic-gate * NOTE: The tagging information is only used when SQUEUE_DEBUG is set to 1. 917c478bd9Sstevel@tonic-gate * NOTE: The argument can be conn_t only. Ideally we'd like to have generic 927c478bd9Sstevel@tonic-gate * argument, but we want to drop connection reference count here - this 937c478bd9Sstevel@tonic-gate * improves tail-call optimizations. 947c478bd9Sstevel@tonic-gate * XXX: The arg should have type conn_t. 957c478bd9Sstevel@tonic-gate * 967c478bd9Sstevel@tonic-gate * void squeue_enter_nodrain(*sqp, *mp, proc, arg, tag) 977c478bd9Sstevel@tonic-gate * 987c478bd9Sstevel@tonic-gate * Same as squeue_enter(), but the entering thread will only try to execute a 997c478bd9Sstevel@tonic-gate * single request. It will not continue executing any pending requests. 1007c478bd9Sstevel@tonic-gate * 1017c478bd9Sstevel@tonic-gate * void squeue_fill(*sqp, *mp, proc, arg, tag) 1027c478bd9Sstevel@tonic-gate * 1037c478bd9Sstevel@tonic-gate * Just place the request on the queue without trying to execute it. Arrange 1047c478bd9Sstevel@tonic-gate * for the worker thread to process the request. 1057c478bd9Sstevel@tonic-gate * 1067c478bd9Sstevel@tonic-gate * void squeue_profile_enable(sqp) 1077c478bd9Sstevel@tonic-gate * void squeue_profile_disable(sqp) 1087c478bd9Sstevel@tonic-gate * 1097c478bd9Sstevel@tonic-gate * Enable or disable profiling for specified 'sqp'. Profiling is only 1107c478bd9Sstevel@tonic-gate * available when SQUEUE_PROFILE is set. 1117c478bd9Sstevel@tonic-gate * 1127c478bd9Sstevel@tonic-gate * void squeue_profile_reset(sqp) 1137c478bd9Sstevel@tonic-gate * 1147c478bd9Sstevel@tonic-gate * Reset all profiling information to zero. Profiling is only 1157c478bd9Sstevel@tonic-gate * available when SQUEUE_PROFILE is set. 1167c478bd9Sstevel@tonic-gate * 1177c478bd9Sstevel@tonic-gate * void squeue_profile_start() 1187c478bd9Sstevel@tonic-gate * void squeue_profile_stop() 1197c478bd9Sstevel@tonic-gate * 1207c478bd9Sstevel@tonic-gate * Globally enable or disabled profiling for all squeues. 1217c478bd9Sstevel@tonic-gate * 1227c478bd9Sstevel@tonic-gate * uintptr_t *squeue_getprivate(sqp, p) 1237c478bd9Sstevel@tonic-gate * 1247c478bd9Sstevel@tonic-gate * Each squeue keeps small amount of private data space available for various 1257c478bd9Sstevel@tonic-gate * consumers. Current consumers include TCP and NCA. Other consumers need to 1267c478bd9Sstevel@tonic-gate * add their private tag to the sqprivate_t enum. The private information is 1277c478bd9Sstevel@tonic-gate * limited to an uintptr_t value. The squeue has no knowledge of its content 1287c478bd9Sstevel@tonic-gate * and does not manage it in any way. 1297c478bd9Sstevel@tonic-gate * 1307c478bd9Sstevel@tonic-gate * The typical use may be a breakdown of data structures per CPU (since 1317c478bd9Sstevel@tonic-gate * squeues are usually per CPU). See NCA for examples of use. 1327c478bd9Sstevel@tonic-gate * Currently 'p' may have one legal value SQPRIVATE_TCP. 1337c478bd9Sstevel@tonic-gate * 1347c478bd9Sstevel@tonic-gate * processorid_t squeue_binding(sqp) 1357c478bd9Sstevel@tonic-gate * 1367c478bd9Sstevel@tonic-gate * Returns the CPU binding for a given squeue. 1377c478bd9Sstevel@tonic-gate * 1387c478bd9Sstevel@tonic-gate * TUNABALES: 1397c478bd9Sstevel@tonic-gate * 1407c478bd9Sstevel@tonic-gate * squeue_intrdrain_ms: Maximum time in ms interrupts spend draining any 1417c478bd9Sstevel@tonic-gate * squeue. Note that this is approximation - squeues have no control on the 1427c478bd9Sstevel@tonic-gate * time it takes to process each request. This limit is only checked 1437c478bd9Sstevel@tonic-gate * between processing individual messages. 1447c478bd9Sstevel@tonic-gate * Default: 20 ms. 1457c478bd9Sstevel@tonic-gate * 1467c478bd9Sstevel@tonic-gate * squeue_writerdrain_ms: Maximum time in ms non-interrupts spend draining any 1477c478bd9Sstevel@tonic-gate * squeue. Note that this is approximation - squeues have no control on the 1487c478bd9Sstevel@tonic-gate * time it takes to process each request. This limit is only checked 1497c478bd9Sstevel@tonic-gate * between processing individual messages. 1507c478bd9Sstevel@tonic-gate * Default: 10 ms. 1517c478bd9Sstevel@tonic-gate * 1527c478bd9Sstevel@tonic-gate * squeue_workerdrain_ms: Maximum time in ms worker thread spends draining any 1537c478bd9Sstevel@tonic-gate * squeue. Note that this is approximation - squeues have no control on the 1547c478bd9Sstevel@tonic-gate * time it takes to process each request. This limit is only checked 1557c478bd9Sstevel@tonic-gate * between processing individual messages. 1567c478bd9Sstevel@tonic-gate * Default: 10 ms. 1577c478bd9Sstevel@tonic-gate * 1587c478bd9Sstevel@tonic-gate * squeue_workerwait_ms: When worker thread is interrupted because workerdrain 1597c478bd9Sstevel@tonic-gate * expired, how much time to wait before waking worker thread again. 1607c478bd9Sstevel@tonic-gate * Default: 10 ms. 1617c478bd9Sstevel@tonic-gate * 1627c478bd9Sstevel@tonic-gate * DEFINES: 1637c478bd9Sstevel@tonic-gate * 1647c478bd9Sstevel@tonic-gate * SQUEUE_DEBUG: If defined as 1, special code is compiled in which records 1657c478bd9Sstevel@tonic-gate * additional information aiding debugging is recorded in squeue. 1667c478bd9Sstevel@tonic-gate * 1677c478bd9Sstevel@tonic-gate * SQUEUE_PROFILE: If defined as 1, special code is compiled in which collects 1687c478bd9Sstevel@tonic-gate * various squeue statistics and exports them as kstats. 1697c478bd9Sstevel@tonic-gate * 1707c478bd9Sstevel@tonic-gate * Ideally we would like both SQUEUE_DEBUG and SQUEUE_PROFILE to be always set, 1717c478bd9Sstevel@tonic-gate * but it affects performance, so they are enabled on DEBUG kernels and disabled 1727c478bd9Sstevel@tonic-gate * on non-DEBUG by default. 1737c478bd9Sstevel@tonic-gate */ 1747c478bd9Sstevel@tonic-gate 1757c478bd9Sstevel@tonic-gate #include <sys/types.h> 1767c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 1777c478bd9Sstevel@tonic-gate #include <sys/debug.h> 1787c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 1797c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 1807c478bd9Sstevel@tonic-gate #include <sys/condvar_impl.h> 1817c478bd9Sstevel@tonic-gate #include <sys/systm.h> 1827c478bd9Sstevel@tonic-gate #include <sys/callb.h> 1837c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 1847c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 1857c478bd9Sstevel@tonic-gate 1867c478bd9Sstevel@tonic-gate #include <inet/ipclassifier.h> 1877c478bd9Sstevel@tonic-gate 1887c478bd9Sstevel@tonic-gate /* 1897c478bd9Sstevel@tonic-gate * State flags. 1907c478bd9Sstevel@tonic-gate * Note: The MDB IP module depends on the values of these flags. 1917c478bd9Sstevel@tonic-gate */ 1927c478bd9Sstevel@tonic-gate #define SQS_PROC 0x0001 /* being processed */ 1937c478bd9Sstevel@tonic-gate #define SQS_WORKER 0x0002 /* worker thread */ 1947c478bd9Sstevel@tonic-gate #define SQS_ENTER 0x0004 /* enter thread */ 1957c478bd9Sstevel@tonic-gate #define SQS_FAST 0x0008 /* enter-fast thread */ 1967c478bd9Sstevel@tonic-gate #define SQS_USER 0x0010 /* A non interrupt user */ 1977c478bd9Sstevel@tonic-gate #define SQS_BOUND 0x0020 /* Worker thread is bound */ 1987c478bd9Sstevel@tonic-gate #define SQS_PROFILE 0x0040 /* Enable profiling */ 1997c478bd9Sstevel@tonic-gate #define SQS_REENTER 0x0080 /* Re entered thread */ 2007c478bd9Sstevel@tonic-gate #define SQS_TMO_PROG 0x0100 /* Timeout is being set */ 2017c478bd9Sstevel@tonic-gate 2027c478bd9Sstevel@tonic-gate #ifdef DEBUG 2037c478bd9Sstevel@tonic-gate #define SQUEUE_DEBUG 1 2047c478bd9Sstevel@tonic-gate #define SQUEUE_PROFILE 1 2057c478bd9Sstevel@tonic-gate #else 2067c478bd9Sstevel@tonic-gate #define SQUEUE_DEBUG 0 2077c478bd9Sstevel@tonic-gate #define SQUEUE_PROFILE 0 2087c478bd9Sstevel@tonic-gate #endif 2097c478bd9Sstevel@tonic-gate 2107c478bd9Sstevel@tonic-gate #include <sys/squeue_impl.h> 2117c478bd9Sstevel@tonic-gate 2127c478bd9Sstevel@tonic-gate static void squeue_fire(void *); 2137c478bd9Sstevel@tonic-gate static void squeue_drain(squeue_t *, uint_t, clock_t); 2147c478bd9Sstevel@tonic-gate static void squeue_worker(squeue_t *sqp); 2157c478bd9Sstevel@tonic-gate 2167c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 2177c478bd9Sstevel@tonic-gate static kmutex_t squeue_kstat_lock; 2187c478bd9Sstevel@tonic-gate static int squeue_kstat_update(kstat_t *, int); 2197c478bd9Sstevel@tonic-gate #endif 2207c478bd9Sstevel@tonic-gate 2217c478bd9Sstevel@tonic-gate kmem_cache_t *squeue_cache; 2227c478bd9Sstevel@tonic-gate 2237c478bd9Sstevel@tonic-gate int squeue_intrdrain_ms = 20; 2247c478bd9Sstevel@tonic-gate int squeue_writerdrain_ms = 10; 2257c478bd9Sstevel@tonic-gate int squeue_workerdrain_ms = 10; 2267c478bd9Sstevel@tonic-gate int squeue_workerwait_ms = 10; 2277c478bd9Sstevel@tonic-gate 2287c478bd9Sstevel@tonic-gate /* The values above converted to ticks */ 2297c478bd9Sstevel@tonic-gate static int squeue_intrdrain_tick = 0; 2307c478bd9Sstevel@tonic-gate static int squeue_writerdrain_tick = 0; 2317c478bd9Sstevel@tonic-gate static int squeue_workerdrain_tick = 0; 2327c478bd9Sstevel@tonic-gate static int squeue_workerwait_tick = 0; 2337c478bd9Sstevel@tonic-gate 2347c478bd9Sstevel@tonic-gate /* 2357c478bd9Sstevel@tonic-gate * The minimum packet queued when worker thread doing the drain triggers 2367c478bd9Sstevel@tonic-gate * polling (if squeue allows it). The choice of 3 is arbitrary. You 2377c478bd9Sstevel@tonic-gate * definitely don't want it to be 1 since that will trigger polling 2387c478bd9Sstevel@tonic-gate * on very low loads as well (ssh seems to do be one such example 2397c478bd9Sstevel@tonic-gate * where packet flow was very low yet somehow 1 packet ended up getting 2407c478bd9Sstevel@tonic-gate * queued and worker thread fires every 10ms and blanking also gets 2417c478bd9Sstevel@tonic-gate * triggered. 2427c478bd9Sstevel@tonic-gate */ 2437c478bd9Sstevel@tonic-gate int squeue_worker_poll_min = 3; 2447c478bd9Sstevel@tonic-gate 2457c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 2467c478bd9Sstevel@tonic-gate /* 2477c478bd9Sstevel@tonic-gate * Set to B_TRUE to enable profiling. 2487c478bd9Sstevel@tonic-gate */ 2497c478bd9Sstevel@tonic-gate static int squeue_profile = B_FALSE; 2507c478bd9Sstevel@tonic-gate #define SQ_PROFILING(sqp) (squeue_profile && ((sqp)->sq_state & SQS_PROFILE)) 2517c478bd9Sstevel@tonic-gate 2527c478bd9Sstevel@tonic-gate #define SQSTAT(sqp, x) ((sqp)->sq_stats.x++) 2537c478bd9Sstevel@tonic-gate #define SQDELTA(sqp, x, d) ((sqp)->sq_stats.x += (d)) 2547c478bd9Sstevel@tonic-gate 2557c478bd9Sstevel@tonic-gate struct squeue_kstat { 2567c478bd9Sstevel@tonic-gate kstat_named_t sq_count; 2577c478bd9Sstevel@tonic-gate kstat_named_t sq_max_qlen; 2587c478bd9Sstevel@tonic-gate kstat_named_t sq_npackets_worker; 2597c478bd9Sstevel@tonic-gate kstat_named_t sq_npackets_intr; 2607c478bd9Sstevel@tonic-gate kstat_named_t sq_npackets_other; 2617c478bd9Sstevel@tonic-gate kstat_named_t sq_nqueued_intr; 2627c478bd9Sstevel@tonic-gate kstat_named_t sq_nqueued_other; 2637c478bd9Sstevel@tonic-gate kstat_named_t sq_ndrains_worker; 2647c478bd9Sstevel@tonic-gate kstat_named_t sq_ndrains_intr; 2657c478bd9Sstevel@tonic-gate kstat_named_t sq_ndrains_other; 2667c478bd9Sstevel@tonic-gate kstat_named_t sq_time_worker; 2677c478bd9Sstevel@tonic-gate kstat_named_t sq_time_intr; 2687c478bd9Sstevel@tonic-gate kstat_named_t sq_time_other; 2697c478bd9Sstevel@tonic-gate } squeue_kstat = { 2707c478bd9Sstevel@tonic-gate { "count", KSTAT_DATA_UINT64 }, 2717c478bd9Sstevel@tonic-gate { "max_qlen", KSTAT_DATA_UINT64 }, 2727c478bd9Sstevel@tonic-gate { "packets_worker", KSTAT_DATA_UINT64 }, 2737c478bd9Sstevel@tonic-gate { "packets_intr", KSTAT_DATA_UINT64 }, 2747c478bd9Sstevel@tonic-gate { "packets_other", KSTAT_DATA_UINT64 }, 2757c478bd9Sstevel@tonic-gate { "queued_intr", KSTAT_DATA_UINT64 }, 2767c478bd9Sstevel@tonic-gate { "queued_other", KSTAT_DATA_UINT64 }, 2777c478bd9Sstevel@tonic-gate { "ndrains_worker", KSTAT_DATA_UINT64 }, 2787c478bd9Sstevel@tonic-gate { "ndrains_intr", KSTAT_DATA_UINT64 }, 2797c478bd9Sstevel@tonic-gate { "ndrains_other", KSTAT_DATA_UINT64 }, 2807c478bd9Sstevel@tonic-gate { "time_worker", KSTAT_DATA_UINT64 }, 2817c478bd9Sstevel@tonic-gate { "time_intr", KSTAT_DATA_UINT64 }, 2827c478bd9Sstevel@tonic-gate { "time_other", KSTAT_DATA_UINT64 }, 2837c478bd9Sstevel@tonic-gate }; 2847c478bd9Sstevel@tonic-gate #endif 2857c478bd9Sstevel@tonic-gate 2867c478bd9Sstevel@tonic-gate #define SQUEUE_WORKER_WAKEUP(sqp) { \ 2877c478bd9Sstevel@tonic-gate timeout_id_t tid = (sqp)->sq_tid; \ 2887c478bd9Sstevel@tonic-gate \ 2897c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 2907c478bd9Sstevel@tonic-gate /* \ 2917c478bd9Sstevel@tonic-gate * Queue isn't being processed, so take \ 2927c478bd9Sstevel@tonic-gate * any post enqueue actions needed before leaving. \ 2937c478bd9Sstevel@tonic-gate */ \ 2947c478bd9Sstevel@tonic-gate if (tid != 0) { \ 2957c478bd9Sstevel@tonic-gate /* \ 2967c478bd9Sstevel@tonic-gate * Waiting for an enter() to process mblk(s). \ 2977c478bd9Sstevel@tonic-gate */ \ 2987c478bd9Sstevel@tonic-gate clock_t waited = lbolt - (sqp)->sq_awaken; \ 2997c478bd9Sstevel@tonic-gate \ 3007c478bd9Sstevel@tonic-gate if (TICK_TO_MSEC(waited) >= (sqp)->sq_wait) { \ 3017c478bd9Sstevel@tonic-gate /* \ 3027c478bd9Sstevel@tonic-gate * Times up and have a worker thread \ 3037c478bd9Sstevel@tonic-gate * waiting for work, so schedule it. \ 3047c478bd9Sstevel@tonic-gate */ \ 3057c478bd9Sstevel@tonic-gate (sqp)->sq_tid = 0; \ 3067c478bd9Sstevel@tonic-gate (sqp)->sq_awaken = lbolt; \ 3077c478bd9Sstevel@tonic-gate cv_signal(&(sqp)->sq_async); \ 3087c478bd9Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3097c478bd9Sstevel@tonic-gate (void) untimeout(tid); \ 3107c478bd9Sstevel@tonic-gate return; \ 3117c478bd9Sstevel@tonic-gate } \ 3127c478bd9Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3137c478bd9Sstevel@tonic-gate return; \ 3147c478bd9Sstevel@tonic-gate } else if ((sqp)->sq_state & SQS_TMO_PROG) { \ 3157c478bd9Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3167c478bd9Sstevel@tonic-gate return; \ 3177c478bd9Sstevel@tonic-gate } else if ((sqp)->sq_wait != 0) { \ 3187c478bd9Sstevel@tonic-gate clock_t wait = (sqp)->sq_wait; \ 3197c478bd9Sstevel@tonic-gate /* \ 3207c478bd9Sstevel@tonic-gate * Wait up to sqp->sq_wait ms for an \ 3217c478bd9Sstevel@tonic-gate * enter() to process this queue. We \ 3227c478bd9Sstevel@tonic-gate * don't want to contend on timeout locks \ 3237c478bd9Sstevel@tonic-gate * with sq_lock held for performance reasons, \ 3247c478bd9Sstevel@tonic-gate * so drop the sq_lock before calling timeout \ 3257c478bd9Sstevel@tonic-gate * but we need to check if timeout is required \ 3267c478bd9Sstevel@tonic-gate * after re acquiring the sq_lock. Once \ 3277c478bd9Sstevel@tonic-gate * the sq_lock is dropped, someone else could \ 3287c478bd9Sstevel@tonic-gate * have processed the packet or the timeout could \ 3297c478bd9Sstevel@tonic-gate * have already fired. \ 3307c478bd9Sstevel@tonic-gate */ \ 3317c478bd9Sstevel@tonic-gate (sqp)->sq_state |= SQS_TMO_PROG; \ 3327c478bd9Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3337c478bd9Sstevel@tonic-gate tid = timeout(squeue_fire, (sqp), wait); \ 3347c478bd9Sstevel@tonic-gate mutex_enter(&(sqp)->sq_lock); \ 3357c478bd9Sstevel@tonic-gate /* Check again if we still need the timeout */ \ 3367c478bd9Sstevel@tonic-gate if ((((sqp)->sq_state & (SQS_PROC|SQS_TMO_PROG)) == \ 3377c478bd9Sstevel@tonic-gate SQS_TMO_PROG) && ((sqp)->sq_tid == 0) && \ 3387c478bd9Sstevel@tonic-gate ((sqp)->sq_first != NULL)) { \ 3397c478bd9Sstevel@tonic-gate (sqp)->sq_state &= ~SQS_TMO_PROG; \ 3407c478bd9Sstevel@tonic-gate (sqp)->sq_awaken = lbolt; \ 3417c478bd9Sstevel@tonic-gate (sqp)->sq_tid = tid; \ 3427c478bd9Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3437c478bd9Sstevel@tonic-gate return; \ 3447c478bd9Sstevel@tonic-gate } else { \ 3457c478bd9Sstevel@tonic-gate if ((sqp)->sq_state & SQS_TMO_PROG) { \ 3467c478bd9Sstevel@tonic-gate (sqp)->sq_state &= ~SQS_TMO_PROG; \ 3477c478bd9Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3487c478bd9Sstevel@tonic-gate (void) untimeout(tid); \ 3497c478bd9Sstevel@tonic-gate } else { \ 3507c478bd9Sstevel@tonic-gate /* \ 3517c478bd9Sstevel@tonic-gate * The timer fired before we could \ 3527c478bd9Sstevel@tonic-gate * reacquire the sq_lock. squeue_fire \ 3537c478bd9Sstevel@tonic-gate * removes the SQS_TMO_PROG flag \ 3547c478bd9Sstevel@tonic-gate * and we don't need to do anything \ 3557c478bd9Sstevel@tonic-gate * else. \ 3567c478bd9Sstevel@tonic-gate */ \ 3577c478bd9Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3587c478bd9Sstevel@tonic-gate } \ 3597c478bd9Sstevel@tonic-gate } \ 3607c478bd9Sstevel@tonic-gate } else { \ 3617c478bd9Sstevel@tonic-gate /* \ 3627c478bd9Sstevel@tonic-gate * Schedule the worker thread. \ 3637c478bd9Sstevel@tonic-gate */ \ 3647c478bd9Sstevel@tonic-gate (sqp)->sq_awaken = lbolt; \ 3657c478bd9Sstevel@tonic-gate cv_signal(&(sqp)->sq_async); \ 3667c478bd9Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); \ 3677c478bd9Sstevel@tonic-gate } \ 3687c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&(sqp)->sq_lock)); \ 3697c478bd9Sstevel@tonic-gate } 3707c478bd9Sstevel@tonic-gate 3717c478bd9Sstevel@tonic-gate #define ENQUEUE_MP(sqp, mp, proc, arg) { \ 3727c478bd9Sstevel@tonic-gate /* \ 3737c478bd9Sstevel@tonic-gate * Enque our mblk. \ 3747c478bd9Sstevel@tonic-gate */ \ 3757c478bd9Sstevel@tonic-gate (mp)->b_queue = NULL; \ 3767c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 3777c478bd9Sstevel@tonic-gate ASSERT((mp)->b_prev == NULL && (mp)->b_next == NULL); \ 3787c478bd9Sstevel@tonic-gate (mp)->b_queue = (queue_t *)(proc); \ 3797c478bd9Sstevel@tonic-gate (mp)->b_prev = (mblk_t *)(arg); \ 3807c478bd9Sstevel@tonic-gate \ 3817c478bd9Sstevel@tonic-gate if ((sqp)->sq_last != NULL) \ 3827c478bd9Sstevel@tonic-gate (sqp)->sq_last->b_next = (mp); \ 3837c478bd9Sstevel@tonic-gate else \ 3847c478bd9Sstevel@tonic-gate (sqp)->sq_first = (mp); \ 3857c478bd9Sstevel@tonic-gate (sqp)->sq_last = (mp); \ 3867c478bd9Sstevel@tonic-gate (sqp)->sq_count++; \ 3877c478bd9Sstevel@tonic-gate ASSERT((sqp)->sq_count > 0); \ 3887c478bd9Sstevel@tonic-gate DTRACE_PROBE2(squeue__enqueue, squeue_t *, sqp, \ 3897c478bd9Sstevel@tonic-gate mblk_t *, mp); \ 3907c478bd9Sstevel@tonic-gate } 3917c478bd9Sstevel@tonic-gate 3927c478bd9Sstevel@tonic-gate 3937c478bd9Sstevel@tonic-gate #define ENQUEUE_CHAIN(sqp, mp, tail, cnt) { \ 3947c478bd9Sstevel@tonic-gate /* \ 3957c478bd9Sstevel@tonic-gate * Enqueue our mblk chain. \ 3967c478bd9Sstevel@tonic-gate */ \ 3977c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 3987c478bd9Sstevel@tonic-gate \ 3997c478bd9Sstevel@tonic-gate if ((sqp)->sq_last != NULL) \ 4007c478bd9Sstevel@tonic-gate (sqp)->sq_last->b_next = (mp); \ 4017c478bd9Sstevel@tonic-gate else \ 4027c478bd9Sstevel@tonic-gate (sqp)->sq_first = (mp); \ 4037c478bd9Sstevel@tonic-gate (sqp)->sq_last = (tail); \ 4047c478bd9Sstevel@tonic-gate (sqp)->sq_count += (cnt); \ 4057c478bd9Sstevel@tonic-gate ASSERT((sqp)->sq_count > 0); \ 4067c478bd9Sstevel@tonic-gate DTRACE_PROBE4(squeue__enqueuechain, squeue_t *, sqp, \ 4077c478bd9Sstevel@tonic-gate mblk_t *, mp, mblk_t *, tail, int, cnt); \ 4087c478bd9Sstevel@tonic-gate \ 4097c478bd9Sstevel@tonic-gate } 4107c478bd9Sstevel@tonic-gate 4117c478bd9Sstevel@tonic-gate #define SQS_POLLING_ON(sqp, rx_ring) { \ 4127c478bd9Sstevel@tonic-gate ASSERT(rx_ring != NULL); \ 4137c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 4147c478bd9Sstevel@tonic-gate rx_ring->rr_blank(rx_ring->rr_handle, \ 4157c478bd9Sstevel@tonic-gate MIN((sqp->sq_avg_drain_time * sqp->sq_count), \ 4167c478bd9Sstevel@tonic-gate rx_ring->rr_max_blank_time), \ 4177c478bd9Sstevel@tonic-gate rx_ring->rr_max_pkt_cnt); \ 4187c478bd9Sstevel@tonic-gate rx_ring->rr_poll_state |= ILL_POLLING; \ 4197c478bd9Sstevel@tonic-gate rx_ring->rr_poll_time = lbolt; \ 4207c478bd9Sstevel@tonic-gate } 4217c478bd9Sstevel@tonic-gate 4227c478bd9Sstevel@tonic-gate 4237c478bd9Sstevel@tonic-gate #define SQS_POLLING_OFF(sqp, rx_ring) { \ 4247c478bd9Sstevel@tonic-gate ASSERT(rx_ring != NULL); \ 4257c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&(sqp)->sq_lock)); \ 4267c478bd9Sstevel@tonic-gate rx_ring->rr_blank(rx_ring->rr_handle, \ 4277c478bd9Sstevel@tonic-gate rx_ring->rr_min_blank_time, \ 4287c478bd9Sstevel@tonic-gate rx_ring->rr_min_pkt_cnt); \ 4297c478bd9Sstevel@tonic-gate } 4307c478bd9Sstevel@tonic-gate 4317c478bd9Sstevel@tonic-gate void 4327c478bd9Sstevel@tonic-gate squeue_init(void) 4337c478bd9Sstevel@tonic-gate { 4347c478bd9Sstevel@tonic-gate squeue_cache = kmem_cache_create("squeue_cache", 4357c478bd9Sstevel@tonic-gate sizeof (squeue_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 4367c478bd9Sstevel@tonic-gate 4377c478bd9Sstevel@tonic-gate squeue_intrdrain_tick = MSEC_TO_TICK_ROUNDUP(squeue_intrdrain_ms); 4387c478bd9Sstevel@tonic-gate squeue_writerdrain_tick = MSEC_TO_TICK_ROUNDUP(squeue_writerdrain_ms); 4397c478bd9Sstevel@tonic-gate squeue_workerdrain_tick = MSEC_TO_TICK_ROUNDUP(squeue_workerdrain_ms); 4407c478bd9Sstevel@tonic-gate squeue_workerwait_tick = MSEC_TO_TICK_ROUNDUP(squeue_workerwait_ms); 4417c478bd9Sstevel@tonic-gate } 4427c478bd9Sstevel@tonic-gate 4437c478bd9Sstevel@tonic-gate /* ARGSUSED */ 4447c478bd9Sstevel@tonic-gate squeue_t * 4457c478bd9Sstevel@tonic-gate squeue_create(char *name, processorid_t bind, clock_t wait, pri_t pri) 4467c478bd9Sstevel@tonic-gate { 4477c478bd9Sstevel@tonic-gate squeue_t *sqp = kmem_cache_alloc(squeue_cache, KM_SLEEP); 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate bzero(sqp, sizeof (squeue_t)); 4507c478bd9Sstevel@tonic-gate (void) strncpy(sqp->sq_name, name, SQ_NAMELEN + 1); 4517c478bd9Sstevel@tonic-gate sqp->sq_name[SQ_NAMELEN] = '\0'; 4527c478bd9Sstevel@tonic-gate 4537c478bd9Sstevel@tonic-gate sqp->sq_bind = bind; 4547c478bd9Sstevel@tonic-gate sqp->sq_wait = MSEC_TO_TICK(wait); 4557c478bd9Sstevel@tonic-gate sqp->sq_avg_drain_time = 4567c478bd9Sstevel@tonic-gate drv_hztousec(squeue_intrdrain_tick)/squeue_intrdrain_tick; 4577c478bd9Sstevel@tonic-gate 4587c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 4597c478bd9Sstevel@tonic-gate if ((sqp->sq_kstat = kstat_create("ip", bind, name, 4607c478bd9Sstevel@tonic-gate "net", KSTAT_TYPE_NAMED, 4617c478bd9Sstevel@tonic-gate sizeof (squeue_kstat) / sizeof (kstat_named_t), 4627c478bd9Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL)) != NULL) { 4637c478bd9Sstevel@tonic-gate sqp->sq_kstat->ks_lock = &squeue_kstat_lock; 4647c478bd9Sstevel@tonic-gate sqp->sq_kstat->ks_data = &squeue_kstat; 4657c478bd9Sstevel@tonic-gate sqp->sq_kstat->ks_update = squeue_kstat_update; 4667c478bd9Sstevel@tonic-gate sqp->sq_kstat->ks_private = sqp; 4677c478bd9Sstevel@tonic-gate kstat_install(sqp->sq_kstat); 4687c478bd9Sstevel@tonic-gate } 4697c478bd9Sstevel@tonic-gate #endif 4707c478bd9Sstevel@tonic-gate 4717c478bd9Sstevel@tonic-gate sqp->sq_worker = thread_create(NULL, 0, squeue_worker, 4727c478bd9Sstevel@tonic-gate sqp, 0, &p0, TS_RUN, pri); 4737c478bd9Sstevel@tonic-gate 4747c478bd9Sstevel@tonic-gate return (sqp); 4757c478bd9Sstevel@tonic-gate } 4767c478bd9Sstevel@tonic-gate 4777c478bd9Sstevel@tonic-gate /* ARGSUSED */ 4787c478bd9Sstevel@tonic-gate void 4797c478bd9Sstevel@tonic-gate squeue_bind(squeue_t *sqp, processorid_t bind) 4807c478bd9Sstevel@tonic-gate { 4817c478bd9Sstevel@tonic-gate ASSERT(bind == -1); 4827c478bd9Sstevel@tonic-gate 4837c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 4847c478bd9Sstevel@tonic-gate if (sqp->sq_state & SQS_BOUND) { 4857c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 4867c478bd9Sstevel@tonic-gate return; 4877c478bd9Sstevel@tonic-gate } 4887c478bd9Sstevel@tonic-gate 4897c478bd9Sstevel@tonic-gate sqp->sq_state |= SQS_BOUND; 4907c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate thread_affinity_set(sqp->sq_worker, sqp->sq_bind); 4937c478bd9Sstevel@tonic-gate } 4947c478bd9Sstevel@tonic-gate 4957c478bd9Sstevel@tonic-gate void 4967c478bd9Sstevel@tonic-gate squeue_unbind(squeue_t *sqp) 4977c478bd9Sstevel@tonic-gate { 4987c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 4997c478bd9Sstevel@tonic-gate if (!(sqp->sq_state & SQS_BOUND)) { 5007c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 5017c478bd9Sstevel@tonic-gate return; 5027c478bd9Sstevel@tonic-gate } 5037c478bd9Sstevel@tonic-gate 5047c478bd9Sstevel@tonic-gate sqp->sq_state &= ~SQS_BOUND; 5057c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 5067c478bd9Sstevel@tonic-gate 5077c478bd9Sstevel@tonic-gate thread_affinity_clear(sqp->sq_worker); 5087c478bd9Sstevel@tonic-gate } 5097c478bd9Sstevel@tonic-gate 5107c478bd9Sstevel@tonic-gate /* 5117c478bd9Sstevel@tonic-gate * squeue_enter() - enter squeue sqp with mblk mp (which can be 5127c478bd9Sstevel@tonic-gate * a chain), while tail points to the end and cnt in number of 5137c478bd9Sstevel@tonic-gate * mblks in the chain. 5147c478bd9Sstevel@tonic-gate * 5157c478bd9Sstevel@tonic-gate * For a chain of single packet (i.e. mp == tail), go through the 5167c478bd9Sstevel@tonic-gate * fast path if no one is processing the squeue and nothing is queued. 5177c478bd9Sstevel@tonic-gate * 5187c478bd9Sstevel@tonic-gate * The proc and arg for each mblk is already stored in the mblk in 5197c478bd9Sstevel@tonic-gate * appropriate places. 5207c478bd9Sstevel@tonic-gate */ 5217c478bd9Sstevel@tonic-gate void 5227c478bd9Sstevel@tonic-gate squeue_enter_chain(squeue_t *sqp, mblk_t *mp, mblk_t *tail, 5237c478bd9Sstevel@tonic-gate uint32_t cnt, uint8_t tag) 5247c478bd9Sstevel@tonic-gate { 5257c478bd9Sstevel@tonic-gate int interrupt = servicing_interrupt(); 5267c478bd9Sstevel@tonic-gate void *arg; 5277c478bd9Sstevel@tonic-gate sqproc_t proc; 5287c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 5297c478bd9Sstevel@tonic-gate hrtime_t start, delta; 5307c478bd9Sstevel@tonic-gate #endif 5317c478bd9Sstevel@tonic-gate 5327c478bd9Sstevel@tonic-gate ASSERT(sqp != NULL); 5337c478bd9Sstevel@tonic-gate ASSERT(mp != NULL); 5347c478bd9Sstevel@tonic-gate ASSERT(tail != NULL); 5357c478bd9Sstevel@tonic-gate ASSERT(cnt > 0); 5367c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 5397c478bd9Sstevel@tonic-gate if (!(sqp->sq_state & SQS_PROC)) { 5407c478bd9Sstevel@tonic-gate /* 5417c478bd9Sstevel@tonic-gate * See if anything is already queued. If we are the 5427c478bd9Sstevel@tonic-gate * first packet, do inline processing else queue the 5437c478bd9Sstevel@tonic-gate * packet and do the drain. 5447c478bd9Sstevel@tonic-gate */ 5457c478bd9Sstevel@tonic-gate sqp->sq_run = curthread; 5467c478bd9Sstevel@tonic-gate if (sqp->sq_first == NULL && cnt == 1) { 5477c478bd9Sstevel@tonic-gate /* 5487c478bd9Sstevel@tonic-gate * Fast-path, ok to process and nothing queued. 5497c478bd9Sstevel@tonic-gate */ 5507c478bd9Sstevel@tonic-gate sqp->sq_state |= (SQS_PROC|SQS_FAST); 5517c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 5527c478bd9Sstevel@tonic-gate 5537c478bd9Sstevel@tonic-gate /* 5547c478bd9Sstevel@tonic-gate * We are the chain of 1 packet so 5557c478bd9Sstevel@tonic-gate * go through this fast path. 5567c478bd9Sstevel@tonic-gate */ 5577c478bd9Sstevel@tonic-gate arg = mp->b_prev; 5587c478bd9Sstevel@tonic-gate mp->b_prev = NULL; 5597c478bd9Sstevel@tonic-gate proc = (sqproc_t)mp->b_queue; 5607c478bd9Sstevel@tonic-gate mp->b_queue = NULL; 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate ASSERT(proc != NULL); 5637c478bd9Sstevel@tonic-gate ASSERT(arg != NULL); 5647c478bd9Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 5657c478bd9Sstevel@tonic-gate 5667c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 5677c478bd9Sstevel@tonic-gate sqp->sq_isintr = interrupt; 5687c478bd9Sstevel@tonic-gate sqp->sq_curmp = mp; 5697c478bd9Sstevel@tonic-gate sqp->sq_curproc = proc; 5707c478bd9Sstevel@tonic-gate sqp->sq_connp = arg; 5717c478bd9Sstevel@tonic-gate mp->b_tag = sqp->sq_tag = tag; 5727c478bd9Sstevel@tonic-gate #endif 5737c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 5747c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 5757c478bd9Sstevel@tonic-gate if (interrupt) 5767c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 5777c478bd9Sstevel@tonic-gate else 5787c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 5797c478bd9Sstevel@tonic-gate start = gethrtime(); 5807c478bd9Sstevel@tonic-gate } 5817c478bd9Sstevel@tonic-gate #endif 5827c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 5837c478bd9Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 5847c478bd9Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 5857c478bd9Sstevel@tonic-gate (*proc)(arg, mp, sqp); 5867c478bd9Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 5877c478bd9Sstevel@tonic-gate sqp, conn_t *, arg); 5887c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 5917c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 5927c478bd9Sstevel@tonic-gate delta = gethrtime() - start; 5937c478bd9Sstevel@tonic-gate if (interrupt) 5947c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 5957c478bd9Sstevel@tonic-gate else 5967c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 5977c478bd9Sstevel@tonic-gate } 5987c478bd9Sstevel@tonic-gate #endif 5997c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 6007c478bd9Sstevel@tonic-gate sqp->sq_curmp = NULL; 6017c478bd9Sstevel@tonic-gate sqp->sq_curproc = NULL; 6027c478bd9Sstevel@tonic-gate sqp->sq_connp = NULL; 6037c478bd9Sstevel@tonic-gate sqp->sq_isintr = 0; 6047c478bd9Sstevel@tonic-gate #endif 6057c478bd9Sstevel@tonic-gate 6067c478bd9Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 6077c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 6087c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 6097c478bd9Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC|SQS_FAST); 6107c478bd9Sstevel@tonic-gate if (sqp->sq_first == NULL) { 6117c478bd9Sstevel@tonic-gate /* 6127c478bd9Sstevel@tonic-gate * We processed inline our packet and 6137c478bd9Sstevel@tonic-gate * nothing new has arrived. We are done. 6147c478bd9Sstevel@tonic-gate */ 6157c478bd9Sstevel@tonic-gate sqp->sq_run = NULL; 6167c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 6177c478bd9Sstevel@tonic-gate return; 6187c478bd9Sstevel@tonic-gate } else if (sqp->sq_bind != CPU->cpu_id) { 6197c478bd9Sstevel@tonic-gate /* 6207c478bd9Sstevel@tonic-gate * If the current thread is not running 6217c478bd9Sstevel@tonic-gate * on the CPU to which this squeue is bound, 6227c478bd9Sstevel@tonic-gate * then don't allow it to drain. 6237c478bd9Sstevel@tonic-gate */ 6247c478bd9Sstevel@tonic-gate sqp->sq_run = NULL; 6257c478bd9Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 6267c478bd9Sstevel@tonic-gate return; 6277c478bd9Sstevel@tonic-gate } 6287c478bd9Sstevel@tonic-gate } else { 6297c478bd9Sstevel@tonic-gate ENQUEUE_CHAIN(sqp, mp, tail, cnt); 6307c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 6317c478bd9Sstevel@tonic-gate mp->b_tag = tag; 6327c478bd9Sstevel@tonic-gate #endif 6337c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 6347c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 6357c478bd9Sstevel@tonic-gate if (servicing_interrupt()) 6367c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 6377c478bd9Sstevel@tonic-gate else 6387c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 6397c478bd9Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 6407c478bd9Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = 6417c478bd9Sstevel@tonic-gate sqp->sq_count; 6427c478bd9Sstevel@tonic-gate } 6437c478bd9Sstevel@tonic-gate #endif 6447c478bd9Sstevel@tonic-gate } 6457c478bd9Sstevel@tonic-gate 6467c478bd9Sstevel@tonic-gate /* 6477c478bd9Sstevel@tonic-gate * We are here because either we couldn't do inline 6487c478bd9Sstevel@tonic-gate * processing (because something was already queued), 6497c478bd9Sstevel@tonic-gate * or we had a chanin of more than one packet, 6507c478bd9Sstevel@tonic-gate * or something else arrived after we were done with 6517c478bd9Sstevel@tonic-gate * inline processing. 6527c478bd9Sstevel@tonic-gate */ 6537c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&sqp->sq_lock)); 6547c478bd9Sstevel@tonic-gate ASSERT(sqp->sq_first != NULL); 6557c478bd9Sstevel@tonic-gate 6567c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 6577c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 6587c478bd9Sstevel@tonic-gate start = gethrtime(); 6597c478bd9Sstevel@tonic-gate } 6607c478bd9Sstevel@tonic-gate #endif 6617c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 6627c478bd9Sstevel@tonic-gate sqp->sq_isintr = interrupt; 6637c478bd9Sstevel@tonic-gate #endif 6647c478bd9Sstevel@tonic-gate 6657c478bd9Sstevel@tonic-gate if (interrupt) { 6667c478bd9Sstevel@tonic-gate squeue_drain(sqp, SQS_ENTER, lbolt + 6677c478bd9Sstevel@tonic-gate squeue_intrdrain_tick); 6687c478bd9Sstevel@tonic-gate } else { 6697c478bd9Sstevel@tonic-gate squeue_drain(sqp, SQS_USER, lbolt + 6707c478bd9Sstevel@tonic-gate squeue_writerdrain_tick); 6717c478bd9Sstevel@tonic-gate } 6727c478bd9Sstevel@tonic-gate 6737c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 6747c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 6757c478bd9Sstevel@tonic-gate delta = gethrtime() - start; 6767c478bd9Sstevel@tonic-gate if (interrupt) 6777c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 6787c478bd9Sstevel@tonic-gate else 6797c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 6807c478bd9Sstevel@tonic-gate } 6817c478bd9Sstevel@tonic-gate #endif 6827c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 6837c478bd9Sstevel@tonic-gate sqp->sq_isintr = 0; 6847c478bd9Sstevel@tonic-gate #endif 6857c478bd9Sstevel@tonic-gate 6867c478bd9Sstevel@tonic-gate /* 6877c478bd9Sstevel@tonic-gate * If we didn't do a complete drain, the worker 6887c478bd9Sstevel@tonic-gate * thread was already signalled by squeue_drain. 6897c478bd9Sstevel@tonic-gate */ 6907c478bd9Sstevel@tonic-gate sqp->sq_run = NULL; 6917c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 6927c478bd9Sstevel@tonic-gate return; 6937c478bd9Sstevel@tonic-gate } else { 6947c478bd9Sstevel@tonic-gate ASSERT(sqp->sq_run != NULL); 6957c478bd9Sstevel@tonic-gate /* 6967c478bd9Sstevel@tonic-gate * Queue is already being processed. Just enqueue 6977c478bd9Sstevel@tonic-gate * the packet and go away. 6987c478bd9Sstevel@tonic-gate */ 6997c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 7007c478bd9Sstevel@tonic-gate mp->b_tag = tag; 7017c478bd9Sstevel@tonic-gate #endif 7027c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 7037c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 7047c478bd9Sstevel@tonic-gate if (servicing_interrupt()) 7057c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 7067c478bd9Sstevel@tonic-gate else 7077c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 7087c478bd9Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 7097c478bd9Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 7107c478bd9Sstevel@tonic-gate } 7117c478bd9Sstevel@tonic-gate #endif 7127c478bd9Sstevel@tonic-gate 7137c478bd9Sstevel@tonic-gate ENQUEUE_CHAIN(sqp, mp, tail, cnt); 7147c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 7157c478bd9Sstevel@tonic-gate return; 7167c478bd9Sstevel@tonic-gate } 7177c478bd9Sstevel@tonic-gate } 7187c478bd9Sstevel@tonic-gate 7197c478bd9Sstevel@tonic-gate /* 7207c478bd9Sstevel@tonic-gate * squeue_enter() - enter squeue *sqp with mblk *mp with argument of *arg. 7217c478bd9Sstevel@tonic-gate */ 7227c478bd9Sstevel@tonic-gate void 7237c478bd9Sstevel@tonic-gate squeue_enter(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg, 7247c478bd9Sstevel@tonic-gate uint8_t tag) 7257c478bd9Sstevel@tonic-gate { 7267c478bd9Sstevel@tonic-gate int interrupt = servicing_interrupt(); 7277c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 7287c478bd9Sstevel@tonic-gate hrtime_t start, delta; 7297c478bd9Sstevel@tonic-gate #endif 7307c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 7317c478bd9Sstevel@tonic-gate conn_t *connp = (conn_t *)arg; 732*ff550d0eSmasputra ASSERT(!IPCL_IS_TCP(connp) || connp->conn_tcp->tcp_connp == connp); 733*ff550d0eSmasputra ASSERT(!IPCL_IS_UDP(connp) || connp->conn_udp->udp_connp == connp); 7347c478bd9Sstevel@tonic-gate #endif 7357c478bd9Sstevel@tonic-gate 7367c478bd9Sstevel@tonic-gate ASSERT(proc != NULL); 7377c478bd9Sstevel@tonic-gate ASSERT(sqp != NULL); 7387c478bd9Sstevel@tonic-gate ASSERT(mp != NULL); 7397c478bd9Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 7407c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 7417c478bd9Sstevel@tonic-gate 7427c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 7437c478bd9Sstevel@tonic-gate if (!(sqp->sq_state & SQS_PROC)) { 7447c478bd9Sstevel@tonic-gate /* 7457c478bd9Sstevel@tonic-gate * See if anything is already queued. If we are the 7467c478bd9Sstevel@tonic-gate * first packet, do inline processing else queue the 7477c478bd9Sstevel@tonic-gate * packet and do the drain. 7487c478bd9Sstevel@tonic-gate */ 7497c478bd9Sstevel@tonic-gate sqp->sq_run = curthread; 7507c478bd9Sstevel@tonic-gate if (sqp->sq_first == NULL) { 7517c478bd9Sstevel@tonic-gate /* 7527c478bd9Sstevel@tonic-gate * Fast-path, ok to process and nothing queued. 7537c478bd9Sstevel@tonic-gate */ 7547c478bd9Sstevel@tonic-gate sqp->sq_state |= (SQS_PROC|SQS_FAST); 7557c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 7567c478bd9Sstevel@tonic-gate 7577c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 7587c478bd9Sstevel@tonic-gate sqp->sq_isintr = interrupt; 7597c478bd9Sstevel@tonic-gate sqp->sq_curmp = mp; 7607c478bd9Sstevel@tonic-gate sqp->sq_curproc = proc; 7617c478bd9Sstevel@tonic-gate sqp->sq_connp = connp; 7627c478bd9Sstevel@tonic-gate mp->b_tag = sqp->sq_tag = tag; 7637c478bd9Sstevel@tonic-gate #endif 7647c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 7657c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 7667c478bd9Sstevel@tonic-gate if (interrupt) 7677c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 7687c478bd9Sstevel@tonic-gate else 7697c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 7707c478bd9Sstevel@tonic-gate start = gethrtime(); 7717c478bd9Sstevel@tonic-gate } 7727c478bd9Sstevel@tonic-gate #endif 7737c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 7747c478bd9Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 7757c478bd9Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 7767c478bd9Sstevel@tonic-gate (*proc)(arg, mp, sqp); 7777c478bd9Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 7787c478bd9Sstevel@tonic-gate sqp, conn_t *, arg); 7797c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 7807c478bd9Sstevel@tonic-gate 7817c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 7827c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 7837c478bd9Sstevel@tonic-gate delta = gethrtime() - start; 7847c478bd9Sstevel@tonic-gate if (interrupt) 7857c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 7867c478bd9Sstevel@tonic-gate else 7877c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 7887c478bd9Sstevel@tonic-gate } 7897c478bd9Sstevel@tonic-gate #endif 7907c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 7917c478bd9Sstevel@tonic-gate sqp->sq_curmp = NULL; 7927c478bd9Sstevel@tonic-gate sqp->sq_curproc = NULL; 7937c478bd9Sstevel@tonic-gate sqp->sq_connp = NULL; 7947c478bd9Sstevel@tonic-gate sqp->sq_isintr = 0; 7957c478bd9Sstevel@tonic-gate #endif 7967c478bd9Sstevel@tonic-gate 7977c478bd9Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 7987c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 7997c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 8007c478bd9Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC|SQS_FAST); 8017c478bd9Sstevel@tonic-gate if (sqp->sq_first == NULL) { 8027c478bd9Sstevel@tonic-gate /* 8037c478bd9Sstevel@tonic-gate * We processed inline our packet and 8047c478bd9Sstevel@tonic-gate * nothing new has arrived. We are done. 8057c478bd9Sstevel@tonic-gate */ 8067c478bd9Sstevel@tonic-gate sqp->sq_run = NULL; 8077c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 8087c478bd9Sstevel@tonic-gate return; 8097c478bd9Sstevel@tonic-gate } else if (sqp->sq_bind != CPU->cpu_id) { 8107c478bd9Sstevel@tonic-gate /* 8117c478bd9Sstevel@tonic-gate * If the current thread is not running 8127c478bd9Sstevel@tonic-gate * on the CPU to which this squeue is bound, 8137c478bd9Sstevel@tonic-gate * then don't allow it to drain. 8147c478bd9Sstevel@tonic-gate */ 8157c478bd9Sstevel@tonic-gate sqp->sq_run = NULL; 8167c478bd9Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 8177c478bd9Sstevel@tonic-gate return; 8187c478bd9Sstevel@tonic-gate } 8197c478bd9Sstevel@tonic-gate } else { 8207c478bd9Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 8217c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 8227c478bd9Sstevel@tonic-gate mp->b_tag = tag; 8237c478bd9Sstevel@tonic-gate #endif 8247c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 8257c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 8267c478bd9Sstevel@tonic-gate if (servicing_interrupt()) 8277c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 8287c478bd9Sstevel@tonic-gate else 8297c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 8307c478bd9Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 8317c478bd9Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = 8327c478bd9Sstevel@tonic-gate sqp->sq_count; 8337c478bd9Sstevel@tonic-gate } 8347c478bd9Sstevel@tonic-gate #endif 8357c478bd9Sstevel@tonic-gate } 8367c478bd9Sstevel@tonic-gate 8377c478bd9Sstevel@tonic-gate /* 8387c478bd9Sstevel@tonic-gate * We are here because either we couldn't do inline 8397c478bd9Sstevel@tonic-gate * processing (because something was already queued) 8407c478bd9Sstevel@tonic-gate * or something else arrived after we were done with 8417c478bd9Sstevel@tonic-gate * inline processing. 8427c478bd9Sstevel@tonic-gate */ 8437c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&sqp->sq_lock)); 8447c478bd9Sstevel@tonic-gate ASSERT(sqp->sq_first != NULL); 8457c478bd9Sstevel@tonic-gate 8467c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 8477c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 8487c478bd9Sstevel@tonic-gate start = gethrtime(); 8497c478bd9Sstevel@tonic-gate } 8507c478bd9Sstevel@tonic-gate #endif 8517c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 8527c478bd9Sstevel@tonic-gate sqp->sq_isintr = interrupt; 8537c478bd9Sstevel@tonic-gate #endif 8547c478bd9Sstevel@tonic-gate 8557c478bd9Sstevel@tonic-gate if (interrupt) { 8567c478bd9Sstevel@tonic-gate squeue_drain(sqp, SQS_ENTER, lbolt + 8577c478bd9Sstevel@tonic-gate squeue_intrdrain_tick); 8587c478bd9Sstevel@tonic-gate } else { 8597c478bd9Sstevel@tonic-gate squeue_drain(sqp, SQS_USER, lbolt + 8607c478bd9Sstevel@tonic-gate squeue_writerdrain_tick); 8617c478bd9Sstevel@tonic-gate } 8627c478bd9Sstevel@tonic-gate 8637c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 8647c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 8657c478bd9Sstevel@tonic-gate delta = gethrtime() - start; 8667c478bd9Sstevel@tonic-gate if (interrupt) 8677c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 8687c478bd9Sstevel@tonic-gate else 8697c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 8707c478bd9Sstevel@tonic-gate } 8717c478bd9Sstevel@tonic-gate #endif 8727c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 8737c478bd9Sstevel@tonic-gate sqp->sq_isintr = 0; 8747c478bd9Sstevel@tonic-gate #endif 8757c478bd9Sstevel@tonic-gate 8767c478bd9Sstevel@tonic-gate /* 8777c478bd9Sstevel@tonic-gate * If we didn't do a complete drain, the worker 8787c478bd9Sstevel@tonic-gate * thread was already signalled by squeue_drain. 8797c478bd9Sstevel@tonic-gate */ 8807c478bd9Sstevel@tonic-gate sqp->sq_run = NULL; 8817c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 8827c478bd9Sstevel@tonic-gate return; 8837c478bd9Sstevel@tonic-gate } else { 8847c478bd9Sstevel@tonic-gate ASSERT(sqp->sq_run != NULL); 8857c478bd9Sstevel@tonic-gate /* 8867c478bd9Sstevel@tonic-gate * We let a thread processing a squeue reenter only 8877c478bd9Sstevel@tonic-gate * once. This helps the case of incoming connection 8887c478bd9Sstevel@tonic-gate * where a SYN-ACK-ACK that triggers the conn_ind 8897c478bd9Sstevel@tonic-gate * doesn't have to queue the packet if listener and 8907c478bd9Sstevel@tonic-gate * eager are on the same squeue. Also helps the 8917c478bd9Sstevel@tonic-gate * loopback connection where the two ends are bound 8927c478bd9Sstevel@tonic-gate * to the same squeue (which is typical on single 8937c478bd9Sstevel@tonic-gate * CPU machines). 8947c478bd9Sstevel@tonic-gate * We let the thread reenter only once for the fear 8957c478bd9Sstevel@tonic-gate * of stack getting blown with multiple traversal. 8967c478bd9Sstevel@tonic-gate */ 8977c478bd9Sstevel@tonic-gate if (!(sqp->sq_state & SQS_REENTER) && 8987c478bd9Sstevel@tonic-gate (sqp->sq_run == curthread) && 8997c478bd9Sstevel@tonic-gate (((conn_t *)arg)->conn_on_sqp == B_FALSE)) { 9007c478bd9Sstevel@tonic-gate sqp->sq_state |= SQS_REENTER; 9017c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 9027c478bd9Sstevel@tonic-gate 9037c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 9047c478bd9Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 9057c478bd9Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 9067c478bd9Sstevel@tonic-gate (*proc)(arg, mp, sqp); 9077c478bd9Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 9087c478bd9Sstevel@tonic-gate sqp, conn_t *, arg); 9097c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 9107c478bd9Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 9117c478bd9Sstevel@tonic-gate 9127c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 9137c478bd9Sstevel@tonic-gate sqp->sq_state &= ~SQS_REENTER; 9147c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 9157c478bd9Sstevel@tonic-gate return; 9167c478bd9Sstevel@tonic-gate } 9177c478bd9Sstevel@tonic-gate /* 9187c478bd9Sstevel@tonic-gate * Queue is already being processed. Just enqueue 9197c478bd9Sstevel@tonic-gate * the packet and go away. 9207c478bd9Sstevel@tonic-gate */ 9217c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 9227c478bd9Sstevel@tonic-gate mp->b_tag = tag; 9237c478bd9Sstevel@tonic-gate #endif 9247c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 9257c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 9267c478bd9Sstevel@tonic-gate if (servicing_interrupt()) 9277c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 9287c478bd9Sstevel@tonic-gate else 9297c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 9307c478bd9Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 9317c478bd9Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 9327c478bd9Sstevel@tonic-gate } 9337c478bd9Sstevel@tonic-gate #endif 9347c478bd9Sstevel@tonic-gate 9357c478bd9Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 9367c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 9377c478bd9Sstevel@tonic-gate return; 9387c478bd9Sstevel@tonic-gate } 9397c478bd9Sstevel@tonic-gate } 9407c478bd9Sstevel@tonic-gate 9417c478bd9Sstevel@tonic-gate void 9427c478bd9Sstevel@tonic-gate squeue_enter_nodrain(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void *arg, 9437c478bd9Sstevel@tonic-gate uint8_t tag) 9447c478bd9Sstevel@tonic-gate { 9457c478bd9Sstevel@tonic-gate int interrupt = servicing_interrupt(); 9467c478bd9Sstevel@tonic-gate boolean_t being_processed; 9477c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 9487c478bd9Sstevel@tonic-gate conn_t *connp = (conn_t *)arg; 9497c478bd9Sstevel@tonic-gate #endif 9507c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 9517c478bd9Sstevel@tonic-gate hrtime_t start, delta; 9527c478bd9Sstevel@tonic-gate #endif 9537c478bd9Sstevel@tonic-gate 9547c478bd9Sstevel@tonic-gate ASSERT(proc != NULL); 9557c478bd9Sstevel@tonic-gate ASSERT(sqp != NULL); 9567c478bd9Sstevel@tonic-gate ASSERT(mp != NULL); 9577c478bd9Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 958*ff550d0eSmasputra ASSERT(!IPCL_IS_TCP(connp) || connp->conn_tcp->tcp_connp == connp); 959*ff550d0eSmasputra ASSERT(!IPCL_IS_UDP(connp) || connp->conn_udp->udp_connp == connp); 9607c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 961*ff550d0eSmasputra 9627c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 9637c478bd9Sstevel@tonic-gate 9647c478bd9Sstevel@tonic-gate being_processed = (sqp->sq_state & SQS_PROC); 9657c478bd9Sstevel@tonic-gate if (!being_processed && (sqp->sq_first == NULL)) { 9667c478bd9Sstevel@tonic-gate /* 9677c478bd9Sstevel@tonic-gate * Fast-path, ok to process and nothing queued. 9687c478bd9Sstevel@tonic-gate */ 9697c478bd9Sstevel@tonic-gate sqp->sq_state |= (SQS_PROC|SQS_FAST); 9707c478bd9Sstevel@tonic-gate sqp->sq_run = curthread; 9717c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 9727c478bd9Sstevel@tonic-gate 9737c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 9747c478bd9Sstevel@tonic-gate sqp->sq_isintr = interrupt; 9757c478bd9Sstevel@tonic-gate sqp->sq_curmp = mp; 9767c478bd9Sstevel@tonic-gate sqp->sq_curproc = proc; 9777c478bd9Sstevel@tonic-gate sqp->sq_connp = connp; 9787c478bd9Sstevel@tonic-gate mp->b_tag = sqp->sq_tag = tag; 9797c478bd9Sstevel@tonic-gate #endif 9807c478bd9Sstevel@tonic-gate 9817c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 9827c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 9837c478bd9Sstevel@tonic-gate if (interrupt) 9847c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 9857c478bd9Sstevel@tonic-gate else 9867c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 9877c478bd9Sstevel@tonic-gate start = gethrtime(); 9887c478bd9Sstevel@tonic-gate } 9897c478bd9Sstevel@tonic-gate #endif 9907c478bd9Sstevel@tonic-gate 9917c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 9927c478bd9Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 9937c478bd9Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 9947c478bd9Sstevel@tonic-gate (*proc)(arg, mp, sqp); 9957c478bd9Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 9967c478bd9Sstevel@tonic-gate sqp, conn_t *, arg); 9977c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 9987c478bd9Sstevel@tonic-gate 9997c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 10007c478bd9Sstevel@tonic-gate sqp->sq_curmp = NULL; 10017c478bd9Sstevel@tonic-gate sqp->sq_curproc = NULL; 10027c478bd9Sstevel@tonic-gate sqp->sq_connp = NULL; 10037c478bd9Sstevel@tonic-gate sqp->sq_isintr = 0; 10047c478bd9Sstevel@tonic-gate #endif 10057c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 10067c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 10077c478bd9Sstevel@tonic-gate delta = gethrtime() - start; 10087c478bd9Sstevel@tonic-gate if (interrupt) 10097c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_intr, delta); 10107c478bd9Sstevel@tonic-gate else 10117c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_other, delta); 10127c478bd9Sstevel@tonic-gate } 10137c478bd9Sstevel@tonic-gate #endif 10147c478bd9Sstevel@tonic-gate 10157c478bd9Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 10167c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 10177c478bd9Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC|SQS_FAST); 10187c478bd9Sstevel@tonic-gate sqp->sq_run = NULL; 10197c478bd9Sstevel@tonic-gate if (sqp->sq_first == NULL) { 10207c478bd9Sstevel@tonic-gate /* 10217c478bd9Sstevel@tonic-gate * We processed inline our packet and 10227c478bd9Sstevel@tonic-gate * nothing new has arrived. We are done. 10237c478bd9Sstevel@tonic-gate */ 10247c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 10257c478bd9Sstevel@tonic-gate } else { 10267c478bd9Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 10277c478bd9Sstevel@tonic-gate } 10287c478bd9Sstevel@tonic-gate return; 10297c478bd9Sstevel@tonic-gate } else { 10307c478bd9Sstevel@tonic-gate /* 10317c478bd9Sstevel@tonic-gate * We let a thread processing a squeue reenter only 10327c478bd9Sstevel@tonic-gate * once. This helps the case of incoming connection 10337c478bd9Sstevel@tonic-gate * where a SYN-ACK-ACK that triggers the conn_ind 10347c478bd9Sstevel@tonic-gate * doesn't have to queue the packet if listener and 10357c478bd9Sstevel@tonic-gate * eager are on the same squeue. Also helps the 10367c478bd9Sstevel@tonic-gate * loopback connection where the two ends are bound 10377c478bd9Sstevel@tonic-gate * to the same squeue (which is typical on single 10387c478bd9Sstevel@tonic-gate * CPU machines). 10397c478bd9Sstevel@tonic-gate * We let the thread reenter only once for the fear 10407c478bd9Sstevel@tonic-gate * of stack getting blown with multiple traversal. 10417c478bd9Sstevel@tonic-gate */ 10427c478bd9Sstevel@tonic-gate if (being_processed && !(sqp->sq_state & SQS_REENTER) && 10437c478bd9Sstevel@tonic-gate (sqp->sq_run == curthread) && 10447c478bd9Sstevel@tonic-gate (((conn_t *)arg)->conn_on_sqp == B_FALSE)) { 10457c478bd9Sstevel@tonic-gate sqp->sq_state |= SQS_REENTER; 10467c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 10477c478bd9Sstevel@tonic-gate 10487c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_TRUE; 10497c478bd9Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 10507c478bd9Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, arg); 10517c478bd9Sstevel@tonic-gate (*proc)(arg, mp, sqp); 10527c478bd9Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 10537c478bd9Sstevel@tonic-gate sqp, conn_t *, arg); 10547c478bd9Sstevel@tonic-gate ((conn_t *)arg)->conn_on_sqp = B_FALSE; 10557c478bd9Sstevel@tonic-gate CONN_DEC_REF((conn_t *)arg); 10567c478bd9Sstevel@tonic-gate 10577c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 10587c478bd9Sstevel@tonic-gate sqp->sq_state &= ~SQS_REENTER; 10597c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 10607c478bd9Sstevel@tonic-gate return; 10617c478bd9Sstevel@tonic-gate } 10627c478bd9Sstevel@tonic-gate 10637c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 10647c478bd9Sstevel@tonic-gate mp->b_tag = tag; 10657c478bd9Sstevel@tonic-gate #endif 10667c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 10677c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 10687c478bd9Sstevel@tonic-gate if (servicing_interrupt()) 10697c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 10707c478bd9Sstevel@tonic-gate else 10717c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 10727c478bd9Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 10737c478bd9Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 10747c478bd9Sstevel@tonic-gate } 10757c478bd9Sstevel@tonic-gate #endif 10767c478bd9Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 10777c478bd9Sstevel@tonic-gate if (being_processed) { 10787c478bd9Sstevel@tonic-gate /* 10797c478bd9Sstevel@tonic-gate * Queue is already being processed. 10807c478bd9Sstevel@tonic-gate * No need to do anything. 10817c478bd9Sstevel@tonic-gate */ 10827c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 10837c478bd9Sstevel@tonic-gate return; 10847c478bd9Sstevel@tonic-gate } 10857c478bd9Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 10867c478bd9Sstevel@tonic-gate } 10877c478bd9Sstevel@tonic-gate } 10887c478bd9Sstevel@tonic-gate 10897c478bd9Sstevel@tonic-gate /* 10907c478bd9Sstevel@tonic-gate * squeue_fill() - fill squeue *sqp with mblk *mp with argument of *arg 10917c478bd9Sstevel@tonic-gate * without processing the squeue. 10927c478bd9Sstevel@tonic-gate */ 10937c478bd9Sstevel@tonic-gate /* ARGSUSED */ 10947c478bd9Sstevel@tonic-gate void 10957c478bd9Sstevel@tonic-gate squeue_fill(squeue_t *sqp, mblk_t *mp, sqproc_t proc, void * arg, 10967c478bd9Sstevel@tonic-gate uint8_t tag) 10977c478bd9Sstevel@tonic-gate { 10987c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 10997c478bd9Sstevel@tonic-gate conn_t *connp = (conn_t *)arg; 11007c478bd9Sstevel@tonic-gate #endif 11017c478bd9Sstevel@tonic-gate ASSERT(proc != NULL); 11027c478bd9Sstevel@tonic-gate ASSERT(sqp != NULL); 11037c478bd9Sstevel@tonic-gate ASSERT(mp != NULL); 11047c478bd9Sstevel@tonic-gate ASSERT(mp->b_next == NULL); 1105*ff550d0eSmasputra ASSERT(!IPCL_IS_TCP(connp) || connp->conn_tcp->tcp_connp == connp); 1106*ff550d0eSmasputra ASSERT(!IPCL_IS_UDP(connp) || connp->conn_udp->udp_connp == connp); 11077c478bd9Sstevel@tonic-gate 11087c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(&sqp->sq_lock)); 11097c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 11107c478bd9Sstevel@tonic-gate ENQUEUE_MP(sqp, mp, proc, arg); 11117c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 11127c478bd9Sstevel@tonic-gate mp->b_tag = tag; 11137c478bd9Sstevel@tonic-gate #endif 11147c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 11157c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 11167c478bd9Sstevel@tonic-gate if (servicing_interrupt()) 11177c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_intr); 11187c478bd9Sstevel@tonic-gate else 11197c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_nqueued_other); 11207c478bd9Sstevel@tonic-gate if (sqp->sq_stats.sq_max_qlen < sqp->sq_count) 11217c478bd9Sstevel@tonic-gate sqp->sq_stats.sq_max_qlen = sqp->sq_count; 11227c478bd9Sstevel@tonic-gate } 11237c478bd9Sstevel@tonic-gate #endif 11247c478bd9Sstevel@tonic-gate 11257c478bd9Sstevel@tonic-gate /* 11267c478bd9Sstevel@tonic-gate * If queue is already being processed. No need to do anything. 11277c478bd9Sstevel@tonic-gate */ 11287c478bd9Sstevel@tonic-gate if (sqp->sq_state & SQS_PROC) { 11297c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 11307c478bd9Sstevel@tonic-gate return; 11317c478bd9Sstevel@tonic-gate } 11327c478bd9Sstevel@tonic-gate 11337c478bd9Sstevel@tonic-gate SQUEUE_WORKER_WAKEUP(sqp); 11347c478bd9Sstevel@tonic-gate } 11357c478bd9Sstevel@tonic-gate 11367c478bd9Sstevel@tonic-gate 11377c478bd9Sstevel@tonic-gate /* 11387c478bd9Sstevel@tonic-gate * PRIVATE FUNCTIONS 11397c478bd9Sstevel@tonic-gate */ 11407c478bd9Sstevel@tonic-gate 11417c478bd9Sstevel@tonic-gate static void 11427c478bd9Sstevel@tonic-gate squeue_fire(void *arg) 11437c478bd9Sstevel@tonic-gate { 11447c478bd9Sstevel@tonic-gate squeue_t *sqp = arg; 11457c478bd9Sstevel@tonic-gate uint_t state; 11467c478bd9Sstevel@tonic-gate 11477c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 11487c478bd9Sstevel@tonic-gate 11497c478bd9Sstevel@tonic-gate state = sqp->sq_state; 11507c478bd9Sstevel@tonic-gate if (sqp->sq_tid == 0 && !(state & SQS_TMO_PROG)) { 11517c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 11527c478bd9Sstevel@tonic-gate return; 11537c478bd9Sstevel@tonic-gate } 11547c478bd9Sstevel@tonic-gate 11557c478bd9Sstevel@tonic-gate sqp->sq_tid = 0; 11567c478bd9Sstevel@tonic-gate /* 11577c478bd9Sstevel@tonic-gate * The timeout fired before we got a chance to set it. 11587c478bd9Sstevel@tonic-gate * Process it anyway but remove the SQS_TMO_PROG so that 11597c478bd9Sstevel@tonic-gate * the guy trying to set the timeout knows that it has 11607c478bd9Sstevel@tonic-gate * already been processed. 11617c478bd9Sstevel@tonic-gate */ 11627c478bd9Sstevel@tonic-gate if (state & SQS_TMO_PROG) 11637c478bd9Sstevel@tonic-gate sqp->sq_state &= ~SQS_TMO_PROG; 11647c478bd9Sstevel@tonic-gate 11657c478bd9Sstevel@tonic-gate if (!(state & SQS_PROC)) { 11667c478bd9Sstevel@tonic-gate sqp->sq_awaken = lbolt; 11677c478bd9Sstevel@tonic-gate cv_signal(&sqp->sq_async); 11687c478bd9Sstevel@tonic-gate } 11697c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 11707c478bd9Sstevel@tonic-gate } 11717c478bd9Sstevel@tonic-gate 11727c478bd9Sstevel@tonic-gate static void 11737c478bd9Sstevel@tonic-gate squeue_drain(squeue_t *sqp, uint_t proc_type, clock_t expire) 11747c478bd9Sstevel@tonic-gate { 11757c478bd9Sstevel@tonic-gate mblk_t *mp; 11767c478bd9Sstevel@tonic-gate mblk_t *head; 11777c478bd9Sstevel@tonic-gate sqproc_t proc; 11787c478bd9Sstevel@tonic-gate conn_t *connp; 11797c478bd9Sstevel@tonic-gate clock_t start = lbolt; 11807c478bd9Sstevel@tonic-gate clock_t drain_time; 11817c478bd9Sstevel@tonic-gate timeout_id_t tid; 11827c478bd9Sstevel@tonic-gate uint_t cnt; 11837c478bd9Sstevel@tonic-gate uint_t total_cnt = 0; 11847c478bd9Sstevel@tonic-gate ill_rx_ring_t *sq_rx_ring = sqp->sq_rx_ring; 11857c478bd9Sstevel@tonic-gate int interrupt = servicing_interrupt(); 11867c478bd9Sstevel@tonic-gate boolean_t poll_on = B_FALSE; 11877c478bd9Sstevel@tonic-gate 11887c478bd9Sstevel@tonic-gate ASSERT(mutex_owned(&sqp->sq_lock)); 11897c478bd9Sstevel@tonic-gate ASSERT(!(sqp->sq_state & SQS_PROC)); 11907c478bd9Sstevel@tonic-gate 11917c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 11927c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 11937c478bd9Sstevel@tonic-gate if (interrupt) 11947c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_ndrains_intr); 11957c478bd9Sstevel@tonic-gate else if (!(proc_type & SQS_WORKER)) 11967c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_ndrains_other); 11977c478bd9Sstevel@tonic-gate else 11987c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_ndrains_worker); 11997c478bd9Sstevel@tonic-gate } 12007c478bd9Sstevel@tonic-gate #endif 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate if ((tid = sqp->sq_tid) != 0) 12037c478bd9Sstevel@tonic-gate sqp->sq_tid = 0; 12047c478bd9Sstevel@tonic-gate 12057c478bd9Sstevel@tonic-gate sqp->sq_state |= SQS_PROC | proc_type; 12067c478bd9Sstevel@tonic-gate head = sqp->sq_first; 12077c478bd9Sstevel@tonic-gate sqp->sq_first = NULL; 12087c478bd9Sstevel@tonic-gate sqp->sq_last = NULL; 12097c478bd9Sstevel@tonic-gate cnt = sqp->sq_count; 12107c478bd9Sstevel@tonic-gate 12117c478bd9Sstevel@tonic-gate /* 12127c478bd9Sstevel@tonic-gate * We have backlog built up. Switch to polling mode if the 12137c478bd9Sstevel@tonic-gate * device underneath allows it. Need to do it only for 12147c478bd9Sstevel@tonic-gate * drain by non-interrupt thread so interrupts don't 12157c478bd9Sstevel@tonic-gate * come and disrupt us in between. If its a interrupt thread, 12167c478bd9Sstevel@tonic-gate * no need because most devices will not issue another 12177c478bd9Sstevel@tonic-gate * interrupt till this one returns. 12187c478bd9Sstevel@tonic-gate */ 12197c478bd9Sstevel@tonic-gate if ((sqp->sq_state & SQS_POLL_CAPAB) && !(proc_type & SQS_ENTER) && 12207c478bd9Sstevel@tonic-gate (sqp->sq_count > squeue_worker_poll_min)) { 12217c478bd9Sstevel@tonic-gate ASSERT(sq_rx_ring != NULL); 12227c478bd9Sstevel@tonic-gate SQS_POLLING_ON(sqp, sq_rx_ring); 12237c478bd9Sstevel@tonic-gate poll_on = B_TRUE; 12247c478bd9Sstevel@tonic-gate } 12257c478bd9Sstevel@tonic-gate 12267c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 12277c478bd9Sstevel@tonic-gate 12287c478bd9Sstevel@tonic-gate if (tid != 0) 12297c478bd9Sstevel@tonic-gate (void) untimeout(tid); 12307c478bd9Sstevel@tonic-gate again: 12317c478bd9Sstevel@tonic-gate while ((mp = head) != NULL) { 12327c478bd9Sstevel@tonic-gate head = mp->b_next; 12337c478bd9Sstevel@tonic-gate mp->b_next = NULL; 12347c478bd9Sstevel@tonic-gate 12357c478bd9Sstevel@tonic-gate proc = (sqproc_t)mp->b_queue; 12367c478bd9Sstevel@tonic-gate mp->b_queue = NULL; 12377c478bd9Sstevel@tonic-gate connp = (conn_t *)mp->b_prev; 12387c478bd9Sstevel@tonic-gate mp->b_prev = NULL; 12397c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 12407c478bd9Sstevel@tonic-gate sqp->sq_curmp = mp; 12417c478bd9Sstevel@tonic-gate sqp->sq_curproc = proc; 12427c478bd9Sstevel@tonic-gate sqp->sq_connp = connp; 12437c478bd9Sstevel@tonic-gate sqp->sq_tag = mp->b_tag; 12447c478bd9Sstevel@tonic-gate #endif 12457c478bd9Sstevel@tonic-gate 12467c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 12477c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 12487c478bd9Sstevel@tonic-gate if (interrupt) 12497c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_intr); 12507c478bd9Sstevel@tonic-gate else if (!(proc_type & SQS_WORKER)) 12517c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_other); 12527c478bd9Sstevel@tonic-gate else 12537c478bd9Sstevel@tonic-gate SQSTAT(sqp, sq_npackets_worker); 12547c478bd9Sstevel@tonic-gate } 12557c478bd9Sstevel@tonic-gate #endif 12567c478bd9Sstevel@tonic-gate 12577c478bd9Sstevel@tonic-gate connp->conn_on_sqp = B_TRUE; 12587c478bd9Sstevel@tonic-gate DTRACE_PROBE3(squeue__proc__start, squeue_t *, 12597c478bd9Sstevel@tonic-gate sqp, mblk_t *, mp, conn_t *, connp); 12607c478bd9Sstevel@tonic-gate (*proc)(connp, mp, sqp); 12617c478bd9Sstevel@tonic-gate DTRACE_PROBE2(squeue__proc__end, squeue_t *, 12627c478bd9Sstevel@tonic-gate sqp, conn_t *, connp); 12637c478bd9Sstevel@tonic-gate connp->conn_on_sqp = B_FALSE; 12647c478bd9Sstevel@tonic-gate CONN_DEC_REF(connp); 12657c478bd9Sstevel@tonic-gate } 12667c478bd9Sstevel@tonic-gate 12677c478bd9Sstevel@tonic-gate 12687c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 12697c478bd9Sstevel@tonic-gate sqp->sq_curmp = NULL; 12707c478bd9Sstevel@tonic-gate sqp->sq_curproc = NULL; 12717c478bd9Sstevel@tonic-gate sqp->sq_connp = NULL; 12727c478bd9Sstevel@tonic-gate #endif 12737c478bd9Sstevel@tonic-gate 12747c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 12757c478bd9Sstevel@tonic-gate sqp->sq_count -= cnt; 12767c478bd9Sstevel@tonic-gate total_cnt += cnt; 12777c478bd9Sstevel@tonic-gate 12787c478bd9Sstevel@tonic-gate if (sqp->sq_first != NULL) { 12797c478bd9Sstevel@tonic-gate if (!expire || (lbolt < expire)) { 12807c478bd9Sstevel@tonic-gate /* More arrived and time not expired */ 12817c478bd9Sstevel@tonic-gate head = sqp->sq_first; 12827c478bd9Sstevel@tonic-gate sqp->sq_first = NULL; 12837c478bd9Sstevel@tonic-gate sqp->sq_last = NULL; 12847c478bd9Sstevel@tonic-gate cnt = sqp->sq_count; 12857c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 12867c478bd9Sstevel@tonic-gate goto again; 12877c478bd9Sstevel@tonic-gate } 12887c478bd9Sstevel@tonic-gate 12897c478bd9Sstevel@tonic-gate /* 12907c478bd9Sstevel@tonic-gate * If we are not worker thread and we 12917c478bd9Sstevel@tonic-gate * reached our time limit to do drain, 12927c478bd9Sstevel@tonic-gate * signal the worker thread to pick 12937c478bd9Sstevel@tonic-gate * up the work. 12947c478bd9Sstevel@tonic-gate * If we were the worker thread, then 12957c478bd9Sstevel@tonic-gate * we take a break to allow an interrupt 12967c478bd9Sstevel@tonic-gate * or writer to pick up the load. 12977c478bd9Sstevel@tonic-gate */ 12987c478bd9Sstevel@tonic-gate if (proc_type != SQS_WORKER) { 12997c478bd9Sstevel@tonic-gate sqp->sq_awaken = lbolt; 13007c478bd9Sstevel@tonic-gate cv_signal(&sqp->sq_async); 13017c478bd9Sstevel@tonic-gate } 13027c478bd9Sstevel@tonic-gate } 13037c478bd9Sstevel@tonic-gate 13047c478bd9Sstevel@tonic-gate /* 13057c478bd9Sstevel@tonic-gate * Try to see if we can get a time estimate to process a packet. 13067c478bd9Sstevel@tonic-gate * Do it only in interrupt context since less chance of context 13077c478bd9Sstevel@tonic-gate * switch or pinning etc. to get a better estimate. 13087c478bd9Sstevel@tonic-gate */ 13097c478bd9Sstevel@tonic-gate if (interrupt && ((drain_time = (lbolt - start)) > 0)) 13107c478bd9Sstevel@tonic-gate sqp->sq_avg_drain_time = ((80 * sqp->sq_avg_drain_time) + 13117c478bd9Sstevel@tonic-gate (20 * (drv_hztousec(drain_time)/total_cnt)))/100; 13127c478bd9Sstevel@tonic-gate 13137c478bd9Sstevel@tonic-gate sqp->sq_state &= ~(SQS_PROC | proc_type); 13147c478bd9Sstevel@tonic-gate 13157c478bd9Sstevel@tonic-gate /* 13167c478bd9Sstevel@tonic-gate * If polling was turned on, turn it off and reduce the default 13177c478bd9Sstevel@tonic-gate * interrupt blank interval as well to bring new packets in faster 13187c478bd9Sstevel@tonic-gate * (reduces the latency when there is no backlog). 13197c478bd9Sstevel@tonic-gate */ 13207c478bd9Sstevel@tonic-gate if (poll_on && (sqp->sq_state & SQS_POLL_CAPAB)) { 13217c478bd9Sstevel@tonic-gate ASSERT(sq_rx_ring != NULL); 13227c478bd9Sstevel@tonic-gate SQS_POLLING_OFF(sqp, sq_rx_ring); 13237c478bd9Sstevel@tonic-gate } 13247c478bd9Sstevel@tonic-gate } 13257c478bd9Sstevel@tonic-gate 13267c478bd9Sstevel@tonic-gate static void 13277c478bd9Sstevel@tonic-gate squeue_worker(squeue_t *sqp) 13287c478bd9Sstevel@tonic-gate { 13297c478bd9Sstevel@tonic-gate kmutex_t *lock = &sqp->sq_lock; 13307c478bd9Sstevel@tonic-gate kcondvar_t *async = &sqp->sq_async; 13317c478bd9Sstevel@tonic-gate callb_cpr_t cprinfo; 13327c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 13337c478bd9Sstevel@tonic-gate hrtime_t start; 13347c478bd9Sstevel@tonic-gate #endif 13357c478bd9Sstevel@tonic-gate 13367c478bd9Sstevel@tonic-gate CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "nca"); 13377c478bd9Sstevel@tonic-gate mutex_enter(lock); 13387c478bd9Sstevel@tonic-gate 13397c478bd9Sstevel@tonic-gate for (;;) { 13407c478bd9Sstevel@tonic-gate while (sqp->sq_first == NULL || (sqp->sq_state & SQS_PROC)) { 13417c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cprinfo); 13427c478bd9Sstevel@tonic-gate still_wait: 13437c478bd9Sstevel@tonic-gate cv_wait(async, lock); 13447c478bd9Sstevel@tonic-gate if (sqp->sq_state & SQS_PROC) { 13457c478bd9Sstevel@tonic-gate goto still_wait; 13467c478bd9Sstevel@tonic-gate } 13477c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cprinfo, lock); 13487c478bd9Sstevel@tonic-gate } 13497c478bd9Sstevel@tonic-gate 13507c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 13517c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 13527c478bd9Sstevel@tonic-gate start = gethrtime(); 13537c478bd9Sstevel@tonic-gate } 13547c478bd9Sstevel@tonic-gate #endif 13557c478bd9Sstevel@tonic-gate 13567c478bd9Sstevel@tonic-gate ASSERT(squeue_workerdrain_tick != 0); 13577c478bd9Sstevel@tonic-gate sqp->sq_run = curthread; 13587c478bd9Sstevel@tonic-gate squeue_drain(sqp, SQS_WORKER, lbolt + squeue_workerdrain_tick); 13597c478bd9Sstevel@tonic-gate sqp->sq_run = NULL; 13607c478bd9Sstevel@tonic-gate 13617c478bd9Sstevel@tonic-gate if (sqp->sq_first != NULL) { 13627c478bd9Sstevel@tonic-gate /* 13637c478bd9Sstevel@tonic-gate * Doing too much processing by worker thread 13647c478bd9Sstevel@tonic-gate * in presense of interrupts can be sub optimal. 13657c478bd9Sstevel@tonic-gate * Instead, once a drain is done by worker thread 13667c478bd9Sstevel@tonic-gate * for squeue_writerdrain_ms (the reason we are 13677c478bd9Sstevel@tonic-gate * here), we force wait for squeue_workerwait_tick 13687c478bd9Sstevel@tonic-gate * before doing more processing even if sq_wait is 13697c478bd9Sstevel@tonic-gate * set to 0. 13707c478bd9Sstevel@tonic-gate * 13717c478bd9Sstevel@tonic-gate * This can be counterproductive for performance 13727c478bd9Sstevel@tonic-gate * if worker thread is the only means to process 13737c478bd9Sstevel@tonic-gate * the packets (interrupts or writers are not 13747c478bd9Sstevel@tonic-gate * allowed inside the squeue). 13757c478bd9Sstevel@tonic-gate */ 13767c478bd9Sstevel@tonic-gate if (sqp->sq_tid == 0 && 13777c478bd9Sstevel@tonic-gate !(sqp->sq_state & SQS_TMO_PROG)) { 13787c478bd9Sstevel@tonic-gate timeout_id_t tid; 13797c478bd9Sstevel@tonic-gate 13807c478bd9Sstevel@tonic-gate sqp->sq_state |= SQS_TMO_PROG; 13817c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 13827c478bd9Sstevel@tonic-gate tid = timeout(squeue_fire, sqp, 13837c478bd9Sstevel@tonic-gate squeue_workerwait_tick); 13847c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 13857c478bd9Sstevel@tonic-gate /* 13867c478bd9Sstevel@tonic-gate * Check again if we still need 13877c478bd9Sstevel@tonic-gate * the timeout 13887c478bd9Sstevel@tonic-gate */ 13897c478bd9Sstevel@tonic-gate if (((sqp->sq_state & (SQS_TMO_PROG|SQS_PROC)) 13907c478bd9Sstevel@tonic-gate == SQS_TMO_PROG) && (sqp->sq_tid == 0) && 13917c478bd9Sstevel@tonic-gate (sqp->sq_first != NULL)) { 13927c478bd9Sstevel@tonic-gate sqp->sq_state &= ~SQS_TMO_PROG; 13937c478bd9Sstevel@tonic-gate sqp->sq_awaken = lbolt; 13947c478bd9Sstevel@tonic-gate sqp->sq_tid = tid; 13957c478bd9Sstevel@tonic-gate } else if (sqp->sq_state & SQS_TMO_PROG) { 13967c478bd9Sstevel@tonic-gate /* timeout not needed */ 13977c478bd9Sstevel@tonic-gate sqp->sq_state &= ~SQS_TMO_PROG; 13987c478bd9Sstevel@tonic-gate mutex_exit(&(sqp)->sq_lock); 13997c478bd9Sstevel@tonic-gate (void) untimeout(tid); 14007c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 14017c478bd9Sstevel@tonic-gate } 14027c478bd9Sstevel@tonic-gate } 14037c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cprinfo); 14047c478bd9Sstevel@tonic-gate cv_wait(async, lock); 14057c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cprinfo, lock); 14067c478bd9Sstevel@tonic-gate } 14077c478bd9Sstevel@tonic-gate 14087c478bd9Sstevel@tonic-gate 14097c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 14107c478bd9Sstevel@tonic-gate if (SQ_PROFILING(sqp)) { 14117c478bd9Sstevel@tonic-gate SQDELTA(sqp, sq_time_worker, gethrtime() - start); 14127c478bd9Sstevel@tonic-gate } 14137c478bd9Sstevel@tonic-gate #endif 14147c478bd9Sstevel@tonic-gate } 14157c478bd9Sstevel@tonic-gate } 14167c478bd9Sstevel@tonic-gate 14177c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 14187c478bd9Sstevel@tonic-gate static int 14197c478bd9Sstevel@tonic-gate squeue_kstat_update(kstat_t *ksp, int rw) 14207c478bd9Sstevel@tonic-gate { 14217c478bd9Sstevel@tonic-gate struct squeue_kstat *sqsp = &squeue_kstat; 14227c478bd9Sstevel@tonic-gate squeue_t *sqp = ksp->ks_private; 14237c478bd9Sstevel@tonic-gate 14247c478bd9Sstevel@tonic-gate if (rw == KSTAT_WRITE) 14257c478bd9Sstevel@tonic-gate return (EACCES); 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate #if SQUEUE_DEBUG 14287c478bd9Sstevel@tonic-gate sqsp->sq_count.value.ui64 = sqp->sq_count; 14297c478bd9Sstevel@tonic-gate sqsp->sq_max_qlen.value.ui64 = sqp->sq_stats.sq_max_qlen; 14307c478bd9Sstevel@tonic-gate #endif 14317c478bd9Sstevel@tonic-gate sqsp->sq_npackets_worker.value.ui64 = sqp->sq_stats.sq_npackets_worker; 14327c478bd9Sstevel@tonic-gate sqsp->sq_npackets_intr.value.ui64 = sqp->sq_stats.sq_npackets_intr; 14337c478bd9Sstevel@tonic-gate sqsp->sq_npackets_other.value.ui64 = sqp->sq_stats.sq_npackets_other; 14347c478bd9Sstevel@tonic-gate sqsp->sq_nqueued_intr.value.ui64 = sqp->sq_stats.sq_nqueued_intr; 14357c478bd9Sstevel@tonic-gate sqsp->sq_nqueued_other.value.ui64 = sqp->sq_stats.sq_nqueued_other; 14367c478bd9Sstevel@tonic-gate sqsp->sq_ndrains_worker.value.ui64 = sqp->sq_stats.sq_ndrains_worker; 14377c478bd9Sstevel@tonic-gate sqsp->sq_ndrains_intr.value.ui64 = sqp->sq_stats.sq_ndrains_intr; 14387c478bd9Sstevel@tonic-gate sqsp->sq_ndrains_other.value.ui64 = sqp->sq_stats.sq_ndrains_other; 14397c478bd9Sstevel@tonic-gate sqsp->sq_time_worker.value.ui64 = sqp->sq_stats.sq_time_worker; 14407c478bd9Sstevel@tonic-gate sqsp->sq_time_intr.value.ui64 = sqp->sq_stats.sq_time_intr; 14417c478bd9Sstevel@tonic-gate sqsp->sq_time_other.value.ui64 = sqp->sq_stats.sq_time_other; 14427c478bd9Sstevel@tonic-gate return (0); 14437c478bd9Sstevel@tonic-gate } 14447c478bd9Sstevel@tonic-gate #endif 14457c478bd9Sstevel@tonic-gate 14467c478bd9Sstevel@tonic-gate void 14477c478bd9Sstevel@tonic-gate squeue_profile_enable(squeue_t *sqp) 14487c478bd9Sstevel@tonic-gate { 14497c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 14507c478bd9Sstevel@tonic-gate sqp->sq_state |= SQS_PROFILE; 14517c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 14527c478bd9Sstevel@tonic-gate } 14537c478bd9Sstevel@tonic-gate 14547c478bd9Sstevel@tonic-gate void 14557c478bd9Sstevel@tonic-gate squeue_profile_disable(squeue_t *sqp) 14567c478bd9Sstevel@tonic-gate { 14577c478bd9Sstevel@tonic-gate mutex_enter(&sqp->sq_lock); 14587c478bd9Sstevel@tonic-gate sqp->sq_state &= ~SQS_PROFILE; 14597c478bd9Sstevel@tonic-gate mutex_exit(&sqp->sq_lock); 14607c478bd9Sstevel@tonic-gate } 14617c478bd9Sstevel@tonic-gate 14627c478bd9Sstevel@tonic-gate void 14637c478bd9Sstevel@tonic-gate squeue_profile_reset(squeue_t *sqp) 14647c478bd9Sstevel@tonic-gate { 14657c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 14667c478bd9Sstevel@tonic-gate bzero(&sqp->sq_stats, sizeof (sqstat_t)); 14677c478bd9Sstevel@tonic-gate #endif 14687c478bd9Sstevel@tonic-gate } 14697c478bd9Sstevel@tonic-gate 14707c478bd9Sstevel@tonic-gate void 14717c478bd9Sstevel@tonic-gate squeue_profile_start(void) 14727c478bd9Sstevel@tonic-gate { 14737c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 14747c478bd9Sstevel@tonic-gate squeue_profile = B_TRUE; 14757c478bd9Sstevel@tonic-gate #endif 14767c478bd9Sstevel@tonic-gate } 14777c478bd9Sstevel@tonic-gate 14787c478bd9Sstevel@tonic-gate void 14797c478bd9Sstevel@tonic-gate squeue_profile_stop(void) 14807c478bd9Sstevel@tonic-gate { 14817c478bd9Sstevel@tonic-gate #if SQUEUE_PROFILE 14827c478bd9Sstevel@tonic-gate squeue_profile = B_FALSE; 14837c478bd9Sstevel@tonic-gate #endif 14847c478bd9Sstevel@tonic-gate } 14857c478bd9Sstevel@tonic-gate 14867c478bd9Sstevel@tonic-gate uintptr_t * 14877c478bd9Sstevel@tonic-gate squeue_getprivate(squeue_t *sqp, sqprivate_t p) 14887c478bd9Sstevel@tonic-gate { 14897c478bd9Sstevel@tonic-gate ASSERT(p < SQPRIVATE_MAX); 14907c478bd9Sstevel@tonic-gate 14917c478bd9Sstevel@tonic-gate return (&sqp->sq_private[p]); 14927c478bd9Sstevel@tonic-gate } 14937c478bd9Sstevel@tonic-gate 14947c478bd9Sstevel@tonic-gate processorid_t 14957c478bd9Sstevel@tonic-gate squeue_binding(squeue_t *sqp) 14967c478bd9Sstevel@tonic-gate { 14977c478bd9Sstevel@tonic-gate return (sqp->sq_bind); 14987c478bd9Sstevel@tonic-gate } 1499