17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*5d3b8cb7SBill Sommerfeld * Common Development and Distribution License (the "License").
6*5d3b8cb7SBill Sommerfeld * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
22*5d3b8cb7SBill Sommerfeld * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
267c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
277c478bd9Sstevel@tonic-gate /* All Rights Reserved */
287c478bd9Sstevel@tonic-gate
297c478bd9Sstevel@tonic-gate
307c478bd9Sstevel@tonic-gate /*
317c478bd9Sstevel@tonic-gate * UNIX Device Driver Interface functions
327c478bd9Sstevel@tonic-gate * This file contains the C-versions of putnext() and put().
337c478bd9Sstevel@tonic-gate * Assembly language versions exist for some architectures.
347c478bd9Sstevel@tonic-gate */
357c478bd9Sstevel@tonic-gate
367c478bd9Sstevel@tonic-gate #include <sys/types.h>
377c478bd9Sstevel@tonic-gate #include <sys/systm.h>
387c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
397c478bd9Sstevel@tonic-gate #include <sys/debug.h>
407c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
417c478bd9Sstevel@tonic-gate #include <sys/stream.h>
427c478bd9Sstevel@tonic-gate #include <sys/thread.h>
437c478bd9Sstevel@tonic-gate #include <sys/strsubr.h>
447c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
457c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
467c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
477c478bd9Sstevel@tonic-gate #include <sys/strft.h>
487c478bd9Sstevel@tonic-gate #include <sys/stack.h>
497c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
507c478bd9Sstevel@tonic-gate
517c478bd9Sstevel@tonic-gate /*
527c478bd9Sstevel@tonic-gate * Streams with many modules may create long chains of calls via putnext() which
537c478bd9Sstevel@tonic-gate * may exhaust stack space. When putnext detects that the stack space left is
547c478bd9Sstevel@tonic-gate * too small (less then PUT_STACK_NEEDED), the call chain is broken and
557c478bd9Sstevel@tonic-gate * further processing is delegated to the background thread via call to
567c478bd9Sstevel@tonic-gate * putnext_tail(). Unfortunately there is no generic solution with fixed stack
577c478bd9Sstevel@tonic-gate * size, and putnext() is recursive function, so this hack is a necessary evil.
587c478bd9Sstevel@tonic-gate *
597c478bd9Sstevel@tonic-gate * The redzone value is chosen dependent on the default stack size which is 8K
607c478bd9Sstevel@tonic-gate * on 32-bit kernels and on x86 and 16K on 64-bit kernels. The values are chosen
61*5d3b8cb7SBill Sommerfeld * empirically. For 64-bit kernels it is 5000 and for 32-bit kernels it is 3000.
62*5d3b8cb7SBill Sommerfeld * Experiments showed that 2500 is not enough for either 32-bit or 64-bit
63*5d3b8cb7SBill Sommerfeld * kernels.
647c478bd9Sstevel@tonic-gate *
657c478bd9Sstevel@tonic-gate * The redzone value is a tuneable rather then a constant to allow adjustments
667c478bd9Sstevel@tonic-gate * in the field.
677c478bd9Sstevel@tonic-gate *
687c478bd9Sstevel@tonic-gate * The check in PUT_STACK_NOTENOUGH is taken from segkp_map_red() function. It
697c478bd9Sstevel@tonic-gate * is possible to define it as a generic function exported by seg_kp, but
707c478bd9Sstevel@tonic-gate *
717c478bd9Sstevel@tonic-gate * a) It may sound like an open invitation to use the facility indiscriminately.
727c478bd9Sstevel@tonic-gate * b) It adds extra function call in putnext path.
737c478bd9Sstevel@tonic-gate *
747c478bd9Sstevel@tonic-gate * We keep a global counter `put_stack_notenough' which keeps track how many
757c478bd9Sstevel@tonic-gate * times the stack switching hack was used.
767c478bd9Sstevel@tonic-gate */
777c478bd9Sstevel@tonic-gate
787c478bd9Sstevel@tonic-gate static ulong_t put_stack_notenough;
797c478bd9Sstevel@tonic-gate
807c478bd9Sstevel@tonic-gate #ifdef _LP64
817c478bd9Sstevel@tonic-gate #define PUT_STACK_NEEDED 5000
827c478bd9Sstevel@tonic-gate #else
83*5d3b8cb7SBill Sommerfeld #define PUT_STACK_NEEDED 3000
847c478bd9Sstevel@tonic-gate #endif
857c478bd9Sstevel@tonic-gate
867c478bd9Sstevel@tonic-gate int put_stack_needed = PUT_STACK_NEEDED;
877c478bd9Sstevel@tonic-gate
887c478bd9Sstevel@tonic-gate #if defined(STACK_GROWTH_DOWN)
897c478bd9Sstevel@tonic-gate #define PUT_STACK_NOTENOUGH() \
907c478bd9Sstevel@tonic-gate (((STACK_BIAS + (uintptr_t)getfp() - \
917c478bd9Sstevel@tonic-gate (uintptr_t)curthread->t_stkbase) < put_stack_needed) && \
927c478bd9Sstevel@tonic-gate ++put_stack_notenough)
937c478bd9Sstevel@tonic-gate #else
947c478bd9Sstevel@tonic-gate #error "STACK_GROWTH_DOWN undefined"
957c478bd9Sstevel@tonic-gate #endif
967c478bd9Sstevel@tonic-gate
977c478bd9Sstevel@tonic-gate boolean_t UseFastlocks = B_FALSE;
987c478bd9Sstevel@tonic-gate
997c478bd9Sstevel@tonic-gate /*
1007c478bd9Sstevel@tonic-gate * function: putnext()
1017c478bd9Sstevel@tonic-gate * purpose: call the put routine of the queue linked to qp
1027c478bd9Sstevel@tonic-gate *
1037c478bd9Sstevel@tonic-gate * Note: this function is written to perform well on modern computer
1047c478bd9Sstevel@tonic-gate * architectures by e.g. preloading values into registers and "smearing" out
1057c478bd9Sstevel@tonic-gate * code.
1067c478bd9Sstevel@tonic-gate *
1077c478bd9Sstevel@tonic-gate * A note on the fastput mechanism. The most significant bit of a
1087c478bd9Sstevel@tonic-gate * putcount is considered the "FASTPUT" bit. If set, then there is
1097c478bd9Sstevel@tonic-gate * nothing stoping a concurrent put from occuring (note that putcounts
1107c478bd9Sstevel@tonic-gate * are only allowed on CIPUT perimiters). If, however, it is cleared,
1117c478bd9Sstevel@tonic-gate * then we need to take the normal lock path by aquiring the SQLOCK.
1127c478bd9Sstevel@tonic-gate * This is a slowlock. When a thread starts exclusiveness, e.g. wants
1137c478bd9Sstevel@tonic-gate * writer access, it will clear the FASTPUT bit, causing new threads
1147c478bd9Sstevel@tonic-gate * to take the slowlock path. This assures that putcounts will not
1157c478bd9Sstevel@tonic-gate * increase in value, so the want-writer does not need to constantly
1167c478bd9Sstevel@tonic-gate * aquire the putlocks to sum the putcounts. This does have the
1177c478bd9Sstevel@tonic-gate * possibility of having the count drop right after reading, but that
1187c478bd9Sstevel@tonic-gate * is no different than aquiring, reading and then releasing. However,
1197c478bd9Sstevel@tonic-gate * in this mode, it cannot go up, so eventually they will drop to zero
1207c478bd9Sstevel@tonic-gate * and the want-writer can proceed.
1217c478bd9Sstevel@tonic-gate *
1227c478bd9Sstevel@tonic-gate * If the FASTPUT bit is set, or in the slowlock path we see that there
1237c478bd9Sstevel@tonic-gate * are no writers or want-writers, we make the choice of calling the
1247c478bd9Sstevel@tonic-gate * putproc, or a "fast-fill_syncq". The fast-fill is a fill with
1257c478bd9Sstevel@tonic-gate * immediate intention to drain. This is done because there are
1267c478bd9Sstevel@tonic-gate * messages already at the queue waiting to drain. To preserve message
1277c478bd9Sstevel@tonic-gate * ordering, we need to put this message at the end, and pickup the
1287c478bd9Sstevel@tonic-gate * messages at the beginning. We call the macro that actually
1297c478bd9Sstevel@tonic-gate * enqueues the message on the queue, and then call qdrain_syncq. If
1307c478bd9Sstevel@tonic-gate * there is already a drainer, we just return. We could make that
1317c478bd9Sstevel@tonic-gate * check before calling qdrain_syncq, but it is a little more clear
1327c478bd9Sstevel@tonic-gate * to have qdrain_syncq do this (we might try the above optimization
1337c478bd9Sstevel@tonic-gate * as this behavior evolves). qdrain_syncq assumes that SQ_EXCL is set
1347c478bd9Sstevel@tonic-gate * already if this is a non-CIPUT perimiter, and that an appropriate
1357c478bd9Sstevel@tonic-gate * claim has been made. So we do all that work before dropping the
1367c478bd9Sstevel@tonic-gate * SQLOCK with our claim.
1377c478bd9Sstevel@tonic-gate *
1387c478bd9Sstevel@tonic-gate * If we cannot proceed with the putproc/fast-fill, we just fall
1397c478bd9Sstevel@tonic-gate * through to the qfill_syncq, and then tail processing. If state
1407c478bd9Sstevel@tonic-gate * has changed in that cycle, or wakeups are needed, it will occur
1417c478bd9Sstevel@tonic-gate * there.
1427c478bd9Sstevel@tonic-gate */
1437c478bd9Sstevel@tonic-gate void
putnext(queue_t * qp,mblk_t * mp)1447c478bd9Sstevel@tonic-gate putnext(queue_t *qp, mblk_t *mp)
1457c478bd9Sstevel@tonic-gate {
1467c478bd9Sstevel@tonic-gate queue_t *fqp = qp; /* For strft tracing */
1477c478bd9Sstevel@tonic-gate syncq_t *sq;
1487c478bd9Sstevel@tonic-gate uint16_t flags;
1497c478bd9Sstevel@tonic-gate uint16_t drain_mask;
1507c478bd9Sstevel@tonic-gate struct qinit *qi;
1517c478bd9Sstevel@tonic-gate int (*putproc)();
1527c478bd9Sstevel@tonic-gate struct stdata *stp;
1537c478bd9Sstevel@tonic-gate int ix;
1547c478bd9Sstevel@tonic-gate boolean_t queued = B_FALSE;
1557c478bd9Sstevel@tonic-gate kmutex_t *sdlock = NULL;
1567c478bd9Sstevel@tonic-gate kmutex_t *sqciplock = NULL;
1577c478bd9Sstevel@tonic-gate ushort_t *sqcipcount = NULL;
1587c478bd9Sstevel@tonic-gate
1597c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_STREAMS_FR, TR_PUTNEXT_START,
1607c478bd9Sstevel@tonic-gate "putnext_start:(%p, %p)", qp, mp);
1617c478bd9Sstevel@tonic-gate
1627c478bd9Sstevel@tonic-gate ASSERT(mp->b_datap->db_ref != 0);
1637c478bd9Sstevel@tonic-gate ASSERT(mp->b_next == NULL && mp->b_prev == NULL);
1647c478bd9Sstevel@tonic-gate stp = STREAM(qp);
1657c478bd9Sstevel@tonic-gate ASSERT(stp != NULL);
1667c478bd9Sstevel@tonic-gate if (stp->sd_ciputctrl != NULL) {
1677c478bd9Sstevel@tonic-gate ix = CPU->cpu_seqid & stp->sd_nciputctrl;
1687c478bd9Sstevel@tonic-gate sdlock = &stp->sd_ciputctrl[ix].ciputctrl_lock;
1697c478bd9Sstevel@tonic-gate mutex_enter(sdlock);
1707c478bd9Sstevel@tonic-gate } else {
1717c478bd9Sstevel@tonic-gate mutex_enter(sdlock = &stp->sd_lock);
1727c478bd9Sstevel@tonic-gate }
1737c478bd9Sstevel@tonic-gate qp = qp->q_next;
1747c478bd9Sstevel@tonic-gate sq = qp->q_syncq;
1757c478bd9Sstevel@tonic-gate ASSERT(sq != NULL);
1767c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
1777c478bd9Sstevel@tonic-gate qi = qp->q_qinfo;
1787c478bd9Sstevel@tonic-gate
1797c478bd9Sstevel@tonic-gate if (sq->sq_ciputctrl != NULL) {
1807c478bd9Sstevel@tonic-gate /* fastlock: */
1817c478bd9Sstevel@tonic-gate ASSERT(sq->sq_flags & SQ_CIPUT);
1827c478bd9Sstevel@tonic-gate ix = CPU->cpu_seqid & sq->sq_nciputctrl;
1837c478bd9Sstevel@tonic-gate sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock;
1847c478bd9Sstevel@tonic-gate sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count;
1857c478bd9Sstevel@tonic-gate mutex_enter(sqciplock);
1867c478bd9Sstevel@tonic-gate if (!((*sqcipcount) & SQ_FASTPUT) ||
1877c478bd9Sstevel@tonic-gate (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) {
1887c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
1897c478bd9Sstevel@tonic-gate sqciplock = NULL;
1907c478bd9Sstevel@tonic-gate goto slowlock;
1917c478bd9Sstevel@tonic-gate }
1927c478bd9Sstevel@tonic-gate mutex_exit(sdlock);
1937c478bd9Sstevel@tonic-gate (*sqcipcount)++;
1947c478bd9Sstevel@tonic-gate ASSERT(*sqcipcount != 0);
1957c478bd9Sstevel@tonic-gate queued = qp->q_sqflags & Q_SQQUEUED;
1967c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
1977c478bd9Sstevel@tonic-gate } else {
1987c478bd9Sstevel@tonic-gate slowlock:
1997c478bd9Sstevel@tonic-gate ASSERT(sqciplock == NULL);
2007c478bd9Sstevel@tonic-gate mutex_enter(SQLOCK(sq));
2017c478bd9Sstevel@tonic-gate mutex_exit(sdlock);
2027c478bd9Sstevel@tonic-gate flags = sq->sq_flags;
2037c478bd9Sstevel@tonic-gate /*
2047c478bd9Sstevel@tonic-gate * We are going to drop SQLOCK, so make a claim to prevent syncq
2057c478bd9Sstevel@tonic-gate * from closing.
2067c478bd9Sstevel@tonic-gate */
2077c478bd9Sstevel@tonic-gate sq->sq_count++;
2087c478bd9Sstevel@tonic-gate ASSERT(sq->sq_count != 0); /* Wraparound */
2097c478bd9Sstevel@tonic-gate /*
2107c478bd9Sstevel@tonic-gate * If there are writers or exclusive waiters, there is not much
2117c478bd9Sstevel@tonic-gate * we can do. Place the message on the syncq and schedule a
2127c478bd9Sstevel@tonic-gate * background thread to drain it.
2137c478bd9Sstevel@tonic-gate *
2147c478bd9Sstevel@tonic-gate * Also if we are approaching end of stack, fill the syncq and
2157c478bd9Sstevel@tonic-gate * switch processing to a background thread - see comments on
2167c478bd9Sstevel@tonic-gate * top.
2177c478bd9Sstevel@tonic-gate */
2187c478bd9Sstevel@tonic-gate if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) ||
2197c478bd9Sstevel@tonic-gate (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) {
2207c478bd9Sstevel@tonic-gate
2217c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
2227c478bd9Sstevel@tonic-gate "putnext_end:(%p, %p, %p) SQ_EXCL fill",
2237c478bd9Sstevel@tonic-gate qp, mp, sq);
2247c478bd9Sstevel@tonic-gate
2257c478bd9Sstevel@tonic-gate /*
2267c478bd9Sstevel@tonic-gate * NOTE: qfill_syncq will need QLOCK. It is safe to drop
2277c478bd9Sstevel@tonic-gate * SQLOCK because positive sq_count keeps the syncq from
2287c478bd9Sstevel@tonic-gate * closing.
2297c478bd9Sstevel@tonic-gate */
2307c478bd9Sstevel@tonic-gate mutex_exit(SQLOCK(sq));
2317c478bd9Sstevel@tonic-gate
2327c478bd9Sstevel@tonic-gate qfill_syncq(sq, qp, mp);
2337c478bd9Sstevel@tonic-gate /*
2347c478bd9Sstevel@tonic-gate * NOTE: after the call to qfill_syncq() qp may be
2357c478bd9Sstevel@tonic-gate * closed, both qp and sq should not be referenced at
2367c478bd9Sstevel@tonic-gate * this point.
2377c478bd9Sstevel@tonic-gate *
2387c478bd9Sstevel@tonic-gate * This ASSERT is located here to prevent stack frame
2397c478bd9Sstevel@tonic-gate * consumption in the DEBUG code.
2407c478bd9Sstevel@tonic-gate */
2417c478bd9Sstevel@tonic-gate ASSERT(sqciplock == NULL);
2427c478bd9Sstevel@tonic-gate return;
2437c478bd9Sstevel@tonic-gate }
2447c478bd9Sstevel@tonic-gate
2457c478bd9Sstevel@tonic-gate queued = qp->q_sqflags & Q_SQQUEUED;
2467c478bd9Sstevel@tonic-gate /*
2477c478bd9Sstevel@tonic-gate * If not a concurrent perimiter, we need to acquire
2487c478bd9Sstevel@tonic-gate * it exclusively. It could not have been previously
2497c478bd9Sstevel@tonic-gate * set since we held the SQLOCK before testing
2507c478bd9Sstevel@tonic-gate * SQ_GOAWAY above (which includes SQ_EXCL).
2517c478bd9Sstevel@tonic-gate * We do this here because we hold the SQLOCK, and need
2527c478bd9Sstevel@tonic-gate * to make this state change BEFORE dropping it.
2537c478bd9Sstevel@tonic-gate */
2547c478bd9Sstevel@tonic-gate if (!(flags & SQ_CIPUT)) {
2557c478bd9Sstevel@tonic-gate ASSERT((sq->sq_flags & SQ_EXCL) == 0);
2567c478bd9Sstevel@tonic-gate ASSERT(!(sq->sq_type & SQ_CIPUT));
2577c478bd9Sstevel@tonic-gate sq->sq_flags |= SQ_EXCL;
2587c478bd9Sstevel@tonic-gate }
2597c478bd9Sstevel@tonic-gate mutex_exit(SQLOCK(sq));
2607c478bd9Sstevel@tonic-gate }
2617c478bd9Sstevel@tonic-gate
2627c478bd9Sstevel@tonic-gate ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
2637c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
2647c478bd9Sstevel@tonic-gate
2657c478bd9Sstevel@tonic-gate /*
2667c478bd9Sstevel@tonic-gate * We now have a claim on the syncq, we are either going to
2677c478bd9Sstevel@tonic-gate * put the message on the syncq and then drain it, or we are
2687c478bd9Sstevel@tonic-gate * going to call the putproc().
2697c478bd9Sstevel@tonic-gate */
2707c478bd9Sstevel@tonic-gate putproc = qi->qi_putp;
2717c478bd9Sstevel@tonic-gate if (!queued) {
2727c478bd9Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
2737c478bd9Sstevel@tonic-gate mp->b_datap->db_base);
2747c478bd9Sstevel@tonic-gate (*putproc)(qp, mp);
2757c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
2767c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
2777c478bd9Sstevel@tonic-gate } else {
2787c478bd9Sstevel@tonic-gate mutex_enter(QLOCK(qp));
2797c478bd9Sstevel@tonic-gate /*
2807c478bd9Sstevel@tonic-gate * If there are no messages in front of us, just call putproc(),
2817c478bd9Sstevel@tonic-gate * otherwise enqueue the message and drain the queue.
2827c478bd9Sstevel@tonic-gate */
2837c478bd9Sstevel@tonic-gate if (qp->q_syncqmsgs == 0) {
2847c478bd9Sstevel@tonic-gate mutex_exit(QLOCK(qp));
2857c478bd9Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
2867c478bd9Sstevel@tonic-gate mp->b_datap->db_base);
2877c478bd9Sstevel@tonic-gate (*putproc)(qp, mp);
2887c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
2897c478bd9Sstevel@tonic-gate } else {
2907c478bd9Sstevel@tonic-gate /*
2917c478bd9Sstevel@tonic-gate * We are doing a fill with the intent to
2927c478bd9Sstevel@tonic-gate * drain (meaning we are filling because
2937c478bd9Sstevel@tonic-gate * there are messages in front of us ane we
2947c478bd9Sstevel@tonic-gate * need to preserve message ordering)
2957c478bd9Sstevel@tonic-gate * Therefore, put the message on the queue
2967c478bd9Sstevel@tonic-gate * and call qdrain_syncq (must be done with
2977c478bd9Sstevel@tonic-gate * the QLOCK held).
2987c478bd9Sstevel@tonic-gate */
2997c478bd9Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT,
3007c478bd9Sstevel@tonic-gate mp->b_rptr - mp->b_datap->db_base);
3017c478bd9Sstevel@tonic-gate
3027c478bd9Sstevel@tonic-gate #ifdef DEBUG
3037c478bd9Sstevel@tonic-gate /*
3047c478bd9Sstevel@tonic-gate * These two values were in the original code for
3057c478bd9Sstevel@tonic-gate * all syncq messages. This is unnecessary in
3067c478bd9Sstevel@tonic-gate * the current implementation, but was retained
3077c478bd9Sstevel@tonic-gate * in debug mode as it is usefull to know where
3087c478bd9Sstevel@tonic-gate * problems occur.
3097c478bd9Sstevel@tonic-gate */
3107c478bd9Sstevel@tonic-gate mp->b_queue = qp;
3117c478bd9Sstevel@tonic-gate mp->b_prev = (mblk_t *)putproc;
3127c478bd9Sstevel@tonic-gate #endif
3137c478bd9Sstevel@tonic-gate SQPUT_MP(qp, mp);
3147c478bd9Sstevel@tonic-gate qdrain_syncq(sq, qp);
3157c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
3167c478bd9Sstevel@tonic-gate }
3177c478bd9Sstevel@tonic-gate }
3187c478bd9Sstevel@tonic-gate /*
3197c478bd9Sstevel@tonic-gate * Before we release our claim, we need to see if any
3207c478bd9Sstevel@tonic-gate * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED,
3217c478bd9Sstevel@tonic-gate * we were responsible for going exclusive and, therefore,
3227c478bd9Sstevel@tonic-gate * are resposible for draining.
3237c478bd9Sstevel@tonic-gate */
3247c478bd9Sstevel@tonic-gate if (sq->sq_flags & (SQ_EXCL)) {
3257c478bd9Sstevel@tonic-gate drain_mask = 0;
3267c478bd9Sstevel@tonic-gate } else {
3277c478bd9Sstevel@tonic-gate drain_mask = SQ_QUEUED;
3287c478bd9Sstevel@tonic-gate }
3297c478bd9Sstevel@tonic-gate
3307c478bd9Sstevel@tonic-gate if (sqciplock != NULL) {
3317c478bd9Sstevel@tonic-gate mutex_enter(sqciplock);
3327c478bd9Sstevel@tonic-gate flags = sq->sq_flags;
3337c478bd9Sstevel@tonic-gate ASSERT(flags & SQ_CIPUT);
3347c478bd9Sstevel@tonic-gate /* SQ_EXCL could have been set by qwriter_inner */
3357c478bd9Sstevel@tonic-gate if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) {
3367c478bd9Sstevel@tonic-gate /*
3377c478bd9Sstevel@tonic-gate * we need SQLOCK to handle
3387c478bd9Sstevel@tonic-gate * wakeups/drains/flags change. sqciplock
3397c478bd9Sstevel@tonic-gate * is needed to decrement sqcipcount.
3407c478bd9Sstevel@tonic-gate * SQLOCK has to be grabbed before sqciplock
3417c478bd9Sstevel@tonic-gate * for lock ordering purposes.
3427c478bd9Sstevel@tonic-gate * after sqcipcount is decremented some lock
3437c478bd9Sstevel@tonic-gate * still needs to be held to make sure
3447c478bd9Sstevel@tonic-gate * syncq won't get freed on us.
3457c478bd9Sstevel@tonic-gate *
3467c478bd9Sstevel@tonic-gate * To prevent deadlocks we try to grab SQLOCK and if it
3477c478bd9Sstevel@tonic-gate * is held already we drop sqciplock, acquire SQLOCK and
3487c478bd9Sstevel@tonic-gate * reacqwire sqciplock again.
3497c478bd9Sstevel@tonic-gate */
3507c478bd9Sstevel@tonic-gate if (mutex_tryenter(SQLOCK(sq)) == 0) {
3517c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
3527c478bd9Sstevel@tonic-gate mutex_enter(SQLOCK(sq));
3537c478bd9Sstevel@tonic-gate mutex_enter(sqciplock);
3547c478bd9Sstevel@tonic-gate }
3557c478bd9Sstevel@tonic-gate flags = sq->sq_flags;
3567c478bd9Sstevel@tonic-gate ASSERT(*sqcipcount != 0);
3577c478bd9Sstevel@tonic-gate (*sqcipcount)--;
3587c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
3597c478bd9Sstevel@tonic-gate } else {
3607c478bd9Sstevel@tonic-gate ASSERT(*sqcipcount != 0);
3617c478bd9Sstevel@tonic-gate (*sqcipcount)--;
3627c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
3637c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
3647c478bd9Sstevel@tonic-gate "putnext_end:(%p, %p, %p) done", qp, mp, sq);
3657c478bd9Sstevel@tonic-gate return;
3667c478bd9Sstevel@tonic-gate }
3677c478bd9Sstevel@tonic-gate } else {
3687c478bd9Sstevel@tonic-gate mutex_enter(SQLOCK(sq));
3697c478bd9Sstevel@tonic-gate flags = sq->sq_flags;
3707c478bd9Sstevel@tonic-gate ASSERT(sq->sq_count != 0);
3717c478bd9Sstevel@tonic-gate sq->sq_count--;
3727c478bd9Sstevel@tonic-gate }
3737c478bd9Sstevel@tonic-gate if ((flags & (SQ_TAIL)) || sq->sq_needexcl) {
3747c478bd9Sstevel@tonic-gate putnext_tail(sq, qp, (flags & ~drain_mask));
3757c478bd9Sstevel@tonic-gate /*
3767c478bd9Sstevel@tonic-gate * The only purpose of this ASSERT is to preserve calling stack
3777c478bd9Sstevel@tonic-gate * in DEBUG kernel.
3787c478bd9Sstevel@tonic-gate */
3797c478bd9Sstevel@tonic-gate ASSERT(sq != NULL);
3807c478bd9Sstevel@tonic-gate return;
3817c478bd9Sstevel@tonic-gate }
3827c478bd9Sstevel@tonic-gate ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued);
3837c478bd9Sstevel@tonic-gate ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued);
3847c478bd9Sstevel@tonic-gate /*
3857c478bd9Sstevel@tonic-gate * Safe to always drop SQ_EXCL:
3867c478bd9Sstevel@tonic-gate * Not SQ_CIPUT means we set SQ_EXCL above
3877c478bd9Sstevel@tonic-gate * For SQ_CIPUT SQ_EXCL will only be set if the put
3887c478bd9Sstevel@tonic-gate * procedure did a qwriter(INNER) in which case
3897c478bd9Sstevel@tonic-gate * nobody else is in the inner perimeter and we
3907c478bd9Sstevel@tonic-gate * are exiting.
3917c478bd9Sstevel@tonic-gate *
3927c478bd9Sstevel@tonic-gate * I would like to make the following assertion:
3937c478bd9Sstevel@tonic-gate *
3947c478bd9Sstevel@tonic-gate * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) ||
3957c478bd9Sstevel@tonic-gate * sq->sq_count == 0);
3967c478bd9Sstevel@tonic-gate *
3977c478bd9Sstevel@tonic-gate * which indicates that if we are both putshared and exclusive,
3987c478bd9Sstevel@tonic-gate * we became exclusive while executing the putproc, and the only
3997c478bd9Sstevel@tonic-gate * claim on the syncq was the one we dropped a few lines above.
4007c478bd9Sstevel@tonic-gate * But other threads that enter putnext while the syncq is exclusive
4017c478bd9Sstevel@tonic-gate * need to make a claim as they may need to drop SQLOCK in the
4027c478bd9Sstevel@tonic-gate * has_writers case to avoid deadlocks. If these threads are
4037c478bd9Sstevel@tonic-gate * delayed or preempted, it is possible that the writer thread can
4047c478bd9Sstevel@tonic-gate * find out that there are other claims making the (sq_count == 0)
4057c478bd9Sstevel@tonic-gate * test invalid.
4067c478bd9Sstevel@tonic-gate */
4077c478bd9Sstevel@tonic-gate
4087c478bd9Sstevel@tonic-gate sq->sq_flags = flags & ~SQ_EXCL;
4097c478bd9Sstevel@tonic-gate mutex_exit(SQLOCK(sq));
4107c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
4117c478bd9Sstevel@tonic-gate "putnext_end:(%p, %p, %p) done", qp, mp, sq);
4127c478bd9Sstevel@tonic-gate }
4137c478bd9Sstevel@tonic-gate
4147c478bd9Sstevel@tonic-gate
4157c478bd9Sstevel@tonic-gate /*
4167c478bd9Sstevel@tonic-gate * wrapper for qi_putp entry in module ops vec.
4177c478bd9Sstevel@tonic-gate * implements asynchronous putnext().
4187c478bd9Sstevel@tonic-gate * Note, that unlike putnext(), this routine is NOT optimized for the
4197c478bd9Sstevel@tonic-gate * fastpath. Calling this routine will grab whatever locks are necessary
4207c478bd9Sstevel@tonic-gate * to protect the stream head, q_next, and syncq's.
4217c478bd9Sstevel@tonic-gate * And since it is in the normal locks path, we do not use putlocks if
4227c478bd9Sstevel@tonic-gate * they exist (though this can be changed by swapping the value of
4237c478bd9Sstevel@tonic-gate * UseFastlocks).
4247c478bd9Sstevel@tonic-gate */
4257c478bd9Sstevel@tonic-gate void
put(queue_t * qp,mblk_t * mp)4267c478bd9Sstevel@tonic-gate put(queue_t *qp, mblk_t *mp)
4277c478bd9Sstevel@tonic-gate {
4287c478bd9Sstevel@tonic-gate queue_t *fqp = qp; /* For strft tracing */
4297c478bd9Sstevel@tonic-gate syncq_t *sq;
4307c478bd9Sstevel@tonic-gate uint16_t flags;
4317c478bd9Sstevel@tonic-gate uint16_t drain_mask;
4327c478bd9Sstevel@tonic-gate struct qinit *qi;
4337c478bd9Sstevel@tonic-gate int (*putproc)();
4347c478bd9Sstevel@tonic-gate int ix;
4357c478bd9Sstevel@tonic-gate boolean_t queued = B_FALSE;
4367c478bd9Sstevel@tonic-gate kmutex_t *sqciplock = NULL;
4377c478bd9Sstevel@tonic-gate ushort_t *sqcipcount = NULL;
4387c478bd9Sstevel@tonic-gate
4397c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_STREAMS_FR, TR_PUT_START,
4407c478bd9Sstevel@tonic-gate "put:(%X, %X)", qp, mp);
4417c478bd9Sstevel@tonic-gate ASSERT(mp->b_datap->db_ref != 0);
4427c478bd9Sstevel@tonic-gate ASSERT(mp->b_next == NULL && mp->b_prev == NULL);
4437c478bd9Sstevel@tonic-gate
4447c478bd9Sstevel@tonic-gate sq = qp->q_syncq;
4457c478bd9Sstevel@tonic-gate ASSERT(sq != NULL);
4467c478bd9Sstevel@tonic-gate qi = qp->q_qinfo;
4477c478bd9Sstevel@tonic-gate
4487c478bd9Sstevel@tonic-gate if (UseFastlocks && sq->sq_ciputctrl != NULL) {
4497c478bd9Sstevel@tonic-gate /* fastlock: */
4507c478bd9Sstevel@tonic-gate ASSERT(sq->sq_flags & SQ_CIPUT);
4517c478bd9Sstevel@tonic-gate ix = CPU->cpu_seqid & sq->sq_nciputctrl;
4527c478bd9Sstevel@tonic-gate sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock;
4537c478bd9Sstevel@tonic-gate sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count;
4547c478bd9Sstevel@tonic-gate mutex_enter(sqciplock);
4557c478bd9Sstevel@tonic-gate if (!((*sqcipcount) & SQ_FASTPUT) ||
4567c478bd9Sstevel@tonic-gate (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) {
4577c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
4587c478bd9Sstevel@tonic-gate sqciplock = NULL;
4597c478bd9Sstevel@tonic-gate goto slowlock;
4607c478bd9Sstevel@tonic-gate }
4617c478bd9Sstevel@tonic-gate (*sqcipcount)++;
4627c478bd9Sstevel@tonic-gate ASSERT(*sqcipcount != 0);
4637c478bd9Sstevel@tonic-gate queued = qp->q_sqflags & Q_SQQUEUED;
4647c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
4657c478bd9Sstevel@tonic-gate } else {
4667c478bd9Sstevel@tonic-gate slowlock:
4677c478bd9Sstevel@tonic-gate ASSERT(sqciplock == NULL);
4687c478bd9Sstevel@tonic-gate mutex_enter(SQLOCK(sq));
4697c478bd9Sstevel@tonic-gate flags = sq->sq_flags;
4707c478bd9Sstevel@tonic-gate /*
4717c478bd9Sstevel@tonic-gate * We are going to drop SQLOCK, so make a claim to prevent syncq
4727c478bd9Sstevel@tonic-gate * from closing.
4737c478bd9Sstevel@tonic-gate */
4747c478bd9Sstevel@tonic-gate sq->sq_count++;
4757c478bd9Sstevel@tonic-gate ASSERT(sq->sq_count != 0); /* Wraparound */
4767c478bd9Sstevel@tonic-gate /*
4777c478bd9Sstevel@tonic-gate * If there are writers or exclusive waiters, there is not much
4787c478bd9Sstevel@tonic-gate * we can do. Place the message on the syncq and schedule a
4797c478bd9Sstevel@tonic-gate * background thread to drain it.
4807c478bd9Sstevel@tonic-gate *
4817c478bd9Sstevel@tonic-gate * Also if we are approaching end of stack, fill the syncq and
4827c478bd9Sstevel@tonic-gate * switch processing to a background thread - see comments on
4837c478bd9Sstevel@tonic-gate * top.
4847c478bd9Sstevel@tonic-gate */
4857c478bd9Sstevel@tonic-gate if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) ||
4867c478bd9Sstevel@tonic-gate (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) {
4877c478bd9Sstevel@tonic-gate
4887c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
4897c478bd9Sstevel@tonic-gate "putnext_end:(%p, %p, %p) SQ_EXCL fill",
4907c478bd9Sstevel@tonic-gate qp, mp, sq);
4917c478bd9Sstevel@tonic-gate
4927c478bd9Sstevel@tonic-gate /*
4937c478bd9Sstevel@tonic-gate * NOTE: qfill_syncq will need QLOCK. It is safe to drop
4947c478bd9Sstevel@tonic-gate * SQLOCK because positive sq_count keeps the syncq from
4957c478bd9Sstevel@tonic-gate * closing.
4967c478bd9Sstevel@tonic-gate */
4977c478bd9Sstevel@tonic-gate mutex_exit(SQLOCK(sq));
4987c478bd9Sstevel@tonic-gate
4997c478bd9Sstevel@tonic-gate qfill_syncq(sq, qp, mp);
5007c478bd9Sstevel@tonic-gate /*
5017c478bd9Sstevel@tonic-gate * NOTE: after the call to qfill_syncq() qp may be
5027c478bd9Sstevel@tonic-gate * closed, both qp and sq should not be referenced at
5037c478bd9Sstevel@tonic-gate * this point.
5047c478bd9Sstevel@tonic-gate *
5057c478bd9Sstevel@tonic-gate * This ASSERT is located here to prevent stack frame
5067c478bd9Sstevel@tonic-gate * consumption in the DEBUG code.
5077c478bd9Sstevel@tonic-gate */
5087c478bd9Sstevel@tonic-gate ASSERT(sqciplock == NULL);
5097c478bd9Sstevel@tonic-gate return;
5107c478bd9Sstevel@tonic-gate }
5117c478bd9Sstevel@tonic-gate
5127c478bd9Sstevel@tonic-gate queued = qp->q_sqflags & Q_SQQUEUED;
5137c478bd9Sstevel@tonic-gate /*
5147c478bd9Sstevel@tonic-gate * If not a concurrent perimiter, we need to acquire
5157c478bd9Sstevel@tonic-gate * it exclusively. It could not have been previously
5167c478bd9Sstevel@tonic-gate * set since we held the SQLOCK before testing
5177c478bd9Sstevel@tonic-gate * SQ_GOAWAY above (which includes SQ_EXCL).
5187c478bd9Sstevel@tonic-gate * We do this here because we hold the SQLOCK, and need
5197c478bd9Sstevel@tonic-gate * to make this state change BEFORE dropping it.
5207c478bd9Sstevel@tonic-gate */
5217c478bd9Sstevel@tonic-gate if (!(flags & SQ_CIPUT)) {
5227c478bd9Sstevel@tonic-gate ASSERT((sq->sq_flags & SQ_EXCL) == 0);
5237c478bd9Sstevel@tonic-gate ASSERT(!(sq->sq_type & SQ_CIPUT));
5247c478bd9Sstevel@tonic-gate sq->sq_flags |= SQ_EXCL;
5257c478bd9Sstevel@tonic-gate }
5267c478bd9Sstevel@tonic-gate mutex_exit(SQLOCK(sq));
5277c478bd9Sstevel@tonic-gate }
5287c478bd9Sstevel@tonic-gate
5297c478bd9Sstevel@tonic-gate ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
5307c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
5317c478bd9Sstevel@tonic-gate
5327c478bd9Sstevel@tonic-gate /*
5337c478bd9Sstevel@tonic-gate * We now have a claim on the syncq, we are either going to
5347c478bd9Sstevel@tonic-gate * put the message on the syncq and then drain it, or we are
5357c478bd9Sstevel@tonic-gate * going to call the putproc().
5367c478bd9Sstevel@tonic-gate */
5377c478bd9Sstevel@tonic-gate putproc = qi->qi_putp;
5387c478bd9Sstevel@tonic-gate if (!queued) {
5397c478bd9Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
5407c478bd9Sstevel@tonic-gate mp->b_datap->db_base);
5417c478bd9Sstevel@tonic-gate (*putproc)(qp, mp);
5427c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
5437c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
5447c478bd9Sstevel@tonic-gate } else {
5457c478bd9Sstevel@tonic-gate mutex_enter(QLOCK(qp));
5467c478bd9Sstevel@tonic-gate /*
5477c478bd9Sstevel@tonic-gate * If there are no messages in front of us, just call putproc(),
5487c478bd9Sstevel@tonic-gate * otherwise enqueue the message and drain the queue.
5497c478bd9Sstevel@tonic-gate */
5507c478bd9Sstevel@tonic-gate if (qp->q_syncqmsgs == 0) {
5517c478bd9Sstevel@tonic-gate mutex_exit(QLOCK(qp));
5527c478bd9Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr -
5537c478bd9Sstevel@tonic-gate mp->b_datap->db_base);
5547c478bd9Sstevel@tonic-gate (*putproc)(qp, mp);
5557c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
5567c478bd9Sstevel@tonic-gate } else {
5577c478bd9Sstevel@tonic-gate /*
5587c478bd9Sstevel@tonic-gate * We are doing a fill with the intent to
5597c478bd9Sstevel@tonic-gate * drain (meaning we are filling because
5607c478bd9Sstevel@tonic-gate * there are messages in front of us ane we
5617c478bd9Sstevel@tonic-gate * need to preserve message ordering)
5627c478bd9Sstevel@tonic-gate * Therefore, put the message on the queue
5637c478bd9Sstevel@tonic-gate * and call qdrain_syncq (must be done with
5647c478bd9Sstevel@tonic-gate * the QLOCK held).
5657c478bd9Sstevel@tonic-gate */
5667c478bd9Sstevel@tonic-gate STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT,
5677c478bd9Sstevel@tonic-gate mp->b_rptr - mp->b_datap->db_base);
5687c478bd9Sstevel@tonic-gate
5697c478bd9Sstevel@tonic-gate #ifdef DEBUG
5707c478bd9Sstevel@tonic-gate /*
5717c478bd9Sstevel@tonic-gate * These two values were in the original code for
5727c478bd9Sstevel@tonic-gate * all syncq messages. This is unnecessary in
5737c478bd9Sstevel@tonic-gate * the current implementation, but was retained
5747c478bd9Sstevel@tonic-gate * in debug mode as it is usefull to know where
5757c478bd9Sstevel@tonic-gate * problems occur.
5767c478bd9Sstevel@tonic-gate */
5777c478bd9Sstevel@tonic-gate mp->b_queue = qp;
5787c478bd9Sstevel@tonic-gate mp->b_prev = (mblk_t *)putproc;
5797c478bd9Sstevel@tonic-gate #endif
5807c478bd9Sstevel@tonic-gate SQPUT_MP(qp, mp);
5817c478bd9Sstevel@tonic-gate qdrain_syncq(sq, qp);
5827c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
5837c478bd9Sstevel@tonic-gate }
5847c478bd9Sstevel@tonic-gate }
5857c478bd9Sstevel@tonic-gate /*
5867c478bd9Sstevel@tonic-gate * Before we release our claim, we need to see if any
5877c478bd9Sstevel@tonic-gate * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED,
5887c478bd9Sstevel@tonic-gate * we were responsible for going exclusive and, therefore,
5897c478bd9Sstevel@tonic-gate * are resposible for draining.
5907c478bd9Sstevel@tonic-gate */
5917c478bd9Sstevel@tonic-gate if (sq->sq_flags & (SQ_EXCL)) {
5927c478bd9Sstevel@tonic-gate drain_mask = 0;
5937c478bd9Sstevel@tonic-gate } else {
5947c478bd9Sstevel@tonic-gate drain_mask = SQ_QUEUED;
5957c478bd9Sstevel@tonic-gate }
5967c478bd9Sstevel@tonic-gate
5977c478bd9Sstevel@tonic-gate if (sqciplock != NULL) {
5987c478bd9Sstevel@tonic-gate mutex_enter(sqciplock);
5997c478bd9Sstevel@tonic-gate flags = sq->sq_flags;
6007c478bd9Sstevel@tonic-gate ASSERT(flags & SQ_CIPUT);
6017c478bd9Sstevel@tonic-gate /* SQ_EXCL could have been set by qwriter_inner */
6027c478bd9Sstevel@tonic-gate if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) {
6037c478bd9Sstevel@tonic-gate /*
6047c478bd9Sstevel@tonic-gate * we need SQLOCK to handle
6057c478bd9Sstevel@tonic-gate * wakeups/drains/flags change. sqciplock
6067c478bd9Sstevel@tonic-gate * is needed to decrement sqcipcount.
6077c478bd9Sstevel@tonic-gate * SQLOCK has to be grabbed before sqciplock
6087c478bd9Sstevel@tonic-gate * for lock ordering purposes.
6097c478bd9Sstevel@tonic-gate * after sqcipcount is decremented some lock
6107c478bd9Sstevel@tonic-gate * still needs to be held to make sure
6117c478bd9Sstevel@tonic-gate * syncq won't get freed on us.
6127c478bd9Sstevel@tonic-gate *
6137c478bd9Sstevel@tonic-gate * To prevent deadlocks we try to grab SQLOCK and if it
6147c478bd9Sstevel@tonic-gate * is held already we drop sqciplock, acquire SQLOCK and
6157c478bd9Sstevel@tonic-gate * reacqwire sqciplock again.
6167c478bd9Sstevel@tonic-gate */
6177c478bd9Sstevel@tonic-gate if (mutex_tryenter(SQLOCK(sq)) == 0) {
6187c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
6197c478bd9Sstevel@tonic-gate mutex_enter(SQLOCK(sq));
6207c478bd9Sstevel@tonic-gate mutex_enter(sqciplock);
6217c478bd9Sstevel@tonic-gate }
6227c478bd9Sstevel@tonic-gate flags = sq->sq_flags;
6237c478bd9Sstevel@tonic-gate ASSERT(*sqcipcount != 0);
6247c478bd9Sstevel@tonic-gate (*sqcipcount)--;
6257c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
6267c478bd9Sstevel@tonic-gate } else {
6277c478bd9Sstevel@tonic-gate ASSERT(*sqcipcount != 0);
6287c478bd9Sstevel@tonic-gate (*sqcipcount)--;
6297c478bd9Sstevel@tonic-gate mutex_exit(sqciplock);
6307c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
6317c478bd9Sstevel@tonic-gate "putnext_end:(%p, %p, %p) done", qp, mp, sq);
6327c478bd9Sstevel@tonic-gate return;
6337c478bd9Sstevel@tonic-gate }
6347c478bd9Sstevel@tonic-gate } else {
6357c478bd9Sstevel@tonic-gate mutex_enter(SQLOCK(sq));
6367c478bd9Sstevel@tonic-gate flags = sq->sq_flags;
6377c478bd9Sstevel@tonic-gate ASSERT(sq->sq_count != 0);
6387c478bd9Sstevel@tonic-gate sq->sq_count--;
6397c478bd9Sstevel@tonic-gate }
6407c478bd9Sstevel@tonic-gate if ((flags & (SQ_TAIL)) || sq->sq_needexcl) {
6417c478bd9Sstevel@tonic-gate putnext_tail(sq, qp, (flags & ~drain_mask));
6427c478bd9Sstevel@tonic-gate /*
6437c478bd9Sstevel@tonic-gate * The only purpose of this ASSERT is to preserve calling stack
6447c478bd9Sstevel@tonic-gate * in DEBUG kernel.
6457c478bd9Sstevel@tonic-gate */
6467c478bd9Sstevel@tonic-gate ASSERT(sq != NULL);
6477c478bd9Sstevel@tonic-gate return;
6487c478bd9Sstevel@tonic-gate }
6497c478bd9Sstevel@tonic-gate ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued);
6507c478bd9Sstevel@tonic-gate ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued);
6517c478bd9Sstevel@tonic-gate /*
6527c478bd9Sstevel@tonic-gate * Safe to always drop SQ_EXCL:
6537c478bd9Sstevel@tonic-gate * Not SQ_CIPUT means we set SQ_EXCL above
6547c478bd9Sstevel@tonic-gate * For SQ_CIPUT SQ_EXCL will only be set if the put
6557c478bd9Sstevel@tonic-gate * procedure did a qwriter(INNER) in which case
6567c478bd9Sstevel@tonic-gate * nobody else is in the inner perimeter and we
6577c478bd9Sstevel@tonic-gate * are exiting.
6587c478bd9Sstevel@tonic-gate *
6597c478bd9Sstevel@tonic-gate * I would like to make the following assertion:
6607c478bd9Sstevel@tonic-gate *
6617c478bd9Sstevel@tonic-gate * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) ||
6627c478bd9Sstevel@tonic-gate * sq->sq_count == 0);
6637c478bd9Sstevel@tonic-gate *
6647c478bd9Sstevel@tonic-gate * which indicates that if we are both putshared and exclusive,
6657c478bd9Sstevel@tonic-gate * we became exclusive while executing the putproc, and the only
6667c478bd9Sstevel@tonic-gate * claim on the syncq was the one we dropped a few lines above.
6677c478bd9Sstevel@tonic-gate * But other threads that enter putnext while the syncq is exclusive
6687c478bd9Sstevel@tonic-gate * need to make a claim as they may need to drop SQLOCK in the
6697c478bd9Sstevel@tonic-gate * has_writers case to avoid deadlocks. If these threads are
6707c478bd9Sstevel@tonic-gate * delayed or preempted, it is possible that the writer thread can
6717c478bd9Sstevel@tonic-gate * find out that there are other claims making the (sq_count == 0)
6727c478bd9Sstevel@tonic-gate * test invalid.
6737c478bd9Sstevel@tonic-gate */
6747c478bd9Sstevel@tonic-gate
6757c478bd9Sstevel@tonic-gate sq->sq_flags = flags & ~SQ_EXCL;
6767c478bd9Sstevel@tonic-gate mutex_exit(SQLOCK(sq));
6777c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
6787c478bd9Sstevel@tonic-gate "putnext_end:(%p, %p, %p) done", qp, mp, sq);
6797c478bd9Sstevel@tonic-gate }
680