xref: /titanic_54/usr/src/uts/common/io/tl.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate /*
30*7c478bd9Sstevel@tonic-gate  * Multithreaded STREAMS Local Transport Provider.
31*7c478bd9Sstevel@tonic-gate  *
32*7c478bd9Sstevel@tonic-gate  * OVERVIEW
33*7c478bd9Sstevel@tonic-gate  * ========
34*7c478bd9Sstevel@tonic-gate  *
35*7c478bd9Sstevel@tonic-gate  * This driver provides TLI as well as socket semantics.  It provides
36*7c478bd9Sstevel@tonic-gate  * connectionless, connection oriented, and connection oriented with orderly
37*7c478bd9Sstevel@tonic-gate  * release transports for TLI and sockets. Each transport type has separate name
38*7c478bd9Sstevel@tonic-gate  * spaces (i.e. it is not possible to connect from a socket to a TLI endpoint) -
39*7c478bd9Sstevel@tonic-gate  * this removes any name space conflicts when binding to socket style transport
40*7c478bd9Sstevel@tonic-gate  * addresses.
41*7c478bd9Sstevel@tonic-gate  *
42*7c478bd9Sstevel@tonic-gate  * NOTE: There is one exception: Socket ticots and ticotsord transports share
43*7c478bd9Sstevel@tonic-gate  * the same namespace. In fact, sockets always use ticotsord type transport.
44*7c478bd9Sstevel@tonic-gate  *
45*7c478bd9Sstevel@tonic-gate  * The driver mode is specified during open() by the minor number used for
46*7c478bd9Sstevel@tonic-gate  * open.
47*7c478bd9Sstevel@tonic-gate  *
48*7c478bd9Sstevel@tonic-gate  *  The sockets in addition have the following semantic differences:
49*7c478bd9Sstevel@tonic-gate  *  No support for passing up credentials (TL_SET[U]CRED).
50*7c478bd9Sstevel@tonic-gate  *
51*7c478bd9Sstevel@tonic-gate  *	Options are passed through transparently on T_CONN_REQ to T_CONN_IND,
52*7c478bd9Sstevel@tonic-gate  *	from T_UNITDATA_REQ to T_UNIDATA_IND, and from T_OPTDATA_REQ to
53*7c478bd9Sstevel@tonic-gate  *	T_OPTDATA_IND.
54*7c478bd9Sstevel@tonic-gate  *
55*7c478bd9Sstevel@tonic-gate  *	The T_CONN_CON is generated when processing the T_CONN_REQ i.e. before
56*7c478bd9Sstevel@tonic-gate  *	a T_CONN_RES is received from the acceptor. This means that a socket
57*7c478bd9Sstevel@tonic-gate  *	connect will complete before the peer has called accept.
58*7c478bd9Sstevel@tonic-gate  *
59*7c478bd9Sstevel@tonic-gate  *
60*7c478bd9Sstevel@tonic-gate  * MULTITHREADING
61*7c478bd9Sstevel@tonic-gate  * ==============
62*7c478bd9Sstevel@tonic-gate  *
63*7c478bd9Sstevel@tonic-gate  * The driver does not use STREAMS protection mechanisms. Instead it uses a
64*7c478bd9Sstevel@tonic-gate  * generic "serializer" abstraction. Most of the operations are executed behind
65*7c478bd9Sstevel@tonic-gate  * the serializer and are, essentially single-threaded. All functions executed
66*7c478bd9Sstevel@tonic-gate  * behind the same serializer are strictly serialized. So if one thread calls
67*7c478bd9Sstevel@tonic-gate  * serializer_enter(serializer, foo, mp1, arg1); and another thread calls
68*7c478bd9Sstevel@tonic-gate  * serializer_enter(serializer, bar, mp2, arg1); then (depending on which one
69*7c478bd9Sstevel@tonic-gate  * was called) the actual sequence will be foo(mp1, arg1); bar(mp1, arg2) or
70*7c478bd9Sstevel@tonic-gate  * bar(mp1, arg2); foo(mp1, arg1); But foo() and bar() will never run at the
71*7c478bd9Sstevel@tonic-gate  * same time.
72*7c478bd9Sstevel@tonic-gate  *
73*7c478bd9Sstevel@tonic-gate  * Connectionless transport use a single serializer per transport type (one for
74*7c478bd9Sstevel@tonic-gate  * TLI and one for sockets. Connection-oriented transports use finer-grained
75*7c478bd9Sstevel@tonic-gate  * serializers.
76*7c478bd9Sstevel@tonic-gate  *
77*7c478bd9Sstevel@tonic-gate  * All COTS-type endpoints start their life with private serializers. During
78*7c478bd9Sstevel@tonic-gate  * connection request processing the endpoint serializer is switched to the
79*7c478bd9Sstevel@tonic-gate  * listener's serializer and the rest of T_CONN_REQ processing is done on the
80*7c478bd9Sstevel@tonic-gate  * listener serializer. During T_CONN_RES processing the eager serializer is
81*7c478bd9Sstevel@tonic-gate  * switched from listener to acceptor serializer and after that point all
82*7c478bd9Sstevel@tonic-gate  * processing for eager and acceptor happens on this serializer. To avoid races
83*7c478bd9Sstevel@tonic-gate  * with endpoint closes while its serializer may be changing closes are blocked
84*7c478bd9Sstevel@tonic-gate  * while serializers are manipulated.
85*7c478bd9Sstevel@tonic-gate  *
86*7c478bd9Sstevel@tonic-gate  * References accounting
87*7c478bd9Sstevel@tonic-gate  * ---------------------
88*7c478bd9Sstevel@tonic-gate  *
89*7c478bd9Sstevel@tonic-gate  * Endpoints are reference counted and freed when the last reference is
90*7c478bd9Sstevel@tonic-gate  * dropped. Functions within the serializer may access an endpoint state even
91*7c478bd9Sstevel@tonic-gate  * after an endpoint closed. The te_closing being set on the endpoint indicates
92*7c478bd9Sstevel@tonic-gate  * that the endpoint entered its close routine.
93*7c478bd9Sstevel@tonic-gate  *
94*7c478bd9Sstevel@tonic-gate  * One reference is held for each opened endpoint instance. The reference
95*7c478bd9Sstevel@tonic-gate  * counter is incremented when the endpoint is linked to another endpoint and
96*7c478bd9Sstevel@tonic-gate  * decremented when the link disappears. It is also incremented when the
97*7c478bd9Sstevel@tonic-gate  * endpoint is found by the hash table lookup. This increment is atomic with the
98*7c478bd9Sstevel@tonic-gate  * lookup itself and happens while the hash table read lock is held.
99*7c478bd9Sstevel@tonic-gate  *
100*7c478bd9Sstevel@tonic-gate  * Close synchronization
101*7c478bd9Sstevel@tonic-gate  * ---------------------
102*7c478bd9Sstevel@tonic-gate  *
103*7c478bd9Sstevel@tonic-gate  * During close the endpoint as marked as closing using te_closing flag. It is
104*7c478bd9Sstevel@tonic-gate  * usually enough to check for te_closing flag since all other state changes
105*7c478bd9Sstevel@tonic-gate  * happen after this flag is set and the close entered serializer. Immediately
106*7c478bd9Sstevel@tonic-gate  * after setting te_closing flag tl_close() enters serializer and waits until
107*7c478bd9Sstevel@tonic-gate  * the callback finishes. This allows all functions called within serializer to
108*7c478bd9Sstevel@tonic-gate  * simply check te_closing without any locks.
109*7c478bd9Sstevel@tonic-gate  *
110*7c478bd9Sstevel@tonic-gate  * Serializer management.
111*7c478bd9Sstevel@tonic-gate  * ---------------------
112*7c478bd9Sstevel@tonic-gate  *
113*7c478bd9Sstevel@tonic-gate  * For COTS transports serializers are created when the endpoint is constructed
114*7c478bd9Sstevel@tonic-gate  * and destroyed when the endpoint is destructed. CLTS transports use global
115*7c478bd9Sstevel@tonic-gate  * serializers - one for sockets and one for TLI.
116*7c478bd9Sstevel@tonic-gate  *
117*7c478bd9Sstevel@tonic-gate  * COTS serializers have separate reference counts to deal with several
118*7c478bd9Sstevel@tonic-gate  * endpoints sharing the same serializer. There is a subtle problem related to
119*7c478bd9Sstevel@tonic-gate  * the serializer destruction. The serializer should never be destroyed by any
120*7c478bd9Sstevel@tonic-gate  * function executed inside serializer. This means that close has to wait till
121*7c478bd9Sstevel@tonic-gate  * all serializer activity for this endpoint is finished before it can drop the
122*7c478bd9Sstevel@tonic-gate  * last reference on the endpoint (which may as well free the serializer).  This
123*7c478bd9Sstevel@tonic-gate  * is only relevant for COTS transports which manage serializers
124*7c478bd9Sstevel@tonic-gate  * dynamically. For CLTS transports close may complete without waiting for all
125*7c478bd9Sstevel@tonic-gate  * serializer activity to finish since serializer is only destroyed at driver
126*7c478bd9Sstevel@tonic-gate  * detach time.
127*7c478bd9Sstevel@tonic-gate  *
128*7c478bd9Sstevel@tonic-gate  * COTS endpoints keep track of the number of outstanding requests on the
129*7c478bd9Sstevel@tonic-gate  * serializer for the endpoint. The code handling accept() avoids changing
130*7c478bd9Sstevel@tonic-gate  * client serializer if it has any pending messages on the serializer and
131*7c478bd9Sstevel@tonic-gate  * instead moves acceptor to listener's serializer.
132*7c478bd9Sstevel@tonic-gate  *
133*7c478bd9Sstevel@tonic-gate  *
134*7c478bd9Sstevel@tonic-gate  * Use of hash tables
135*7c478bd9Sstevel@tonic-gate  * ------------------
136*7c478bd9Sstevel@tonic-gate  *
137*7c478bd9Sstevel@tonic-gate  * The driver uses modhash hash table implementation. Each transport uses two
138*7c478bd9Sstevel@tonic-gate  * hash tables - one for finding endpoints by acceptor ID and another one for
139*7c478bd9Sstevel@tonic-gate  * finding endpoints by address. For sockets TICOTS and TICOTSORD share the same
140*7c478bd9Sstevel@tonic-gate  * pair of hash tables since sockets only use TICOTSORD.
141*7c478bd9Sstevel@tonic-gate  *
142*7c478bd9Sstevel@tonic-gate  * All hash tables lookups increment a reference count for returned endpoints,
143*7c478bd9Sstevel@tonic-gate  * so we may safely check the endpoint state even when the endpoint is removed
144*7c478bd9Sstevel@tonic-gate  * from the hash by another thread immediately after it is found.
145*7c478bd9Sstevel@tonic-gate  *
146*7c478bd9Sstevel@tonic-gate  *
147*7c478bd9Sstevel@tonic-gate  * CLOSE processing
148*7c478bd9Sstevel@tonic-gate  * ================
149*7c478bd9Sstevel@tonic-gate  *
150*7c478bd9Sstevel@tonic-gate  * The driver enters serializer twice on close(). The close sequence is the
151*7c478bd9Sstevel@tonic-gate  * following:
152*7c478bd9Sstevel@tonic-gate  *
153*7c478bd9Sstevel@tonic-gate  * 1) Wait until closing is safe (te_closewait becomes zero)
154*7c478bd9Sstevel@tonic-gate  *	This step is needed to prevent close during serializer switches. In most
155*7c478bd9Sstevel@tonic-gate  *	cases (close happening after connection establishment) te_closewait is
156*7c478bd9Sstevel@tonic-gate  *	zero.
157*7c478bd9Sstevel@tonic-gate  * 1) Set te_closing.
158*7c478bd9Sstevel@tonic-gate  * 2) Call tl_close_ser() within serializer and wait for it to complete.
159*7c478bd9Sstevel@tonic-gate  *
160*7c478bd9Sstevel@tonic-gate  *      te_close_ser simply marks endpoint and wakes up waiting tl_close().
161*7c478bd9Sstevel@tonic-gate  *	It also needs to clear write-side q_next pointers - this should be done
162*7c478bd9Sstevel@tonic-gate  *	before qprocsoff().
163*7c478bd9Sstevel@tonic-gate  *
164*7c478bd9Sstevel@tonic-gate  *    This synchronous serializer entry during close is needed to ensure that
165*7c478bd9Sstevel@tonic-gate  *    the queue is valid everywhere inside the serializer.
166*7c478bd9Sstevel@tonic-gate  *
167*7c478bd9Sstevel@tonic-gate  *    Note that in many cases close will execute tl_close_ser() synchronously,
168*7c478bd9Sstevel@tonic-gate  *    so it will not wait at all.
169*7c478bd9Sstevel@tonic-gate  *
170*7c478bd9Sstevel@tonic-gate  * 3) Calls qprocsoff().
171*7c478bd9Sstevel@tonic-gate  * 4) Calls tl_close_finish_ser() within the serializer and waits for it to
172*7c478bd9Sstevel@tonic-gate  *	complete (for COTS transports). For CLTS transport there is no wait.
173*7c478bd9Sstevel@tonic-gate  *
174*7c478bd9Sstevel@tonic-gate  *	tl_close_finish_ser() Finishes the close process and wakes up waiting
175*7c478bd9Sstevel@tonic-gate  *	close if there is any.
176*7c478bd9Sstevel@tonic-gate  *
177*7c478bd9Sstevel@tonic-gate  *    Note that in most cases close will enter te_close_ser_finish()
178*7c478bd9Sstevel@tonic-gate  *    synchronously and will not wait at all.
179*7c478bd9Sstevel@tonic-gate  *
180*7c478bd9Sstevel@tonic-gate  *
181*7c478bd9Sstevel@tonic-gate  * Flow Control
182*7c478bd9Sstevel@tonic-gate  * ============
183*7c478bd9Sstevel@tonic-gate  *
184*7c478bd9Sstevel@tonic-gate  * The driver implements both read and write side service routines. No one calls
185*7c478bd9Sstevel@tonic-gate  * putq() on the read queue. The read side service routine tl_rsrv() is called
186*7c478bd9Sstevel@tonic-gate  * when the read side stream is back-enabled. It enters serializer synchronously
187*7c478bd9Sstevel@tonic-gate  * (waits till serializer processing is complete). Within serializer it
188*7c478bd9Sstevel@tonic-gate  * back-enables all endpoints blocked by the queue for connection-less
189*7c478bd9Sstevel@tonic-gate  * transports and enables write side service processing for the peer for
190*7c478bd9Sstevel@tonic-gate  * connection-oriented transports.
191*7c478bd9Sstevel@tonic-gate  *
192*7c478bd9Sstevel@tonic-gate  * Read and write side service routines use special mblk_sized space in the
193*7c478bd9Sstevel@tonic-gate  * endpoint structure to enter perimeter.
194*7c478bd9Sstevel@tonic-gate  *
195*7c478bd9Sstevel@tonic-gate  * Write-side flow control
196*7c478bd9Sstevel@tonic-gate  * -----------------------
197*7c478bd9Sstevel@tonic-gate  *
198*7c478bd9Sstevel@tonic-gate  * Write side flow control is a bit tricky. The driver needs to deal with two
199*7c478bd9Sstevel@tonic-gate  * message queues - the explicit STREAMS message queue maintained by
200*7c478bd9Sstevel@tonic-gate  * putq()/getq()/putbq() and the implicit queue within the serializer. These two
201*7c478bd9Sstevel@tonic-gate  * queues should be synchronized to preserve message ordering and should
202*7c478bd9Sstevel@tonic-gate  * maintain a single order determined by the order in which messages enter
203*7c478bd9Sstevel@tonic-gate  * tl_wput(). In order to maintain the ordering between these two queues the
204*7c478bd9Sstevel@tonic-gate  * STREAMS queue is only manipulated within the serializer, so the ordering is
205*7c478bd9Sstevel@tonic-gate  * provided by the serializer.
206*7c478bd9Sstevel@tonic-gate  *
207*7c478bd9Sstevel@tonic-gate  * Functions called from the tl_wsrv() sometimes may call putbq(). To
208*7c478bd9Sstevel@tonic-gate  * immediately stop any further processing of the STREAMS message queues the
209*7c478bd9Sstevel@tonic-gate  * code calling putbq() also sets the te_nowsrv flag in the endpoint. The write
210*7c478bd9Sstevel@tonic-gate  * side service processing stops when the flag is set.
211*7c478bd9Sstevel@tonic-gate  *
212*7c478bd9Sstevel@tonic-gate  * The tl_wsrv() function enters serializer synchronously and waits for it to
213*7c478bd9Sstevel@tonic-gate  * complete. The serializer call-back tl_wsrv_ser() either drains all messages
214*7c478bd9Sstevel@tonic-gate  * on the STREAMS queue or terminates when it notices the te_nowsrv flag
215*7c478bd9Sstevel@tonic-gate  * set. Note that the maximum amount of messages processed by tl_wput_ser() is
216*7c478bd9Sstevel@tonic-gate  * always bounded by the amount of messages on the STREAMS queue at the time
217*7c478bd9Sstevel@tonic-gate  * tl_wsrv_ser() is entered. Any new messages may only appear on the STREAMS
218*7c478bd9Sstevel@tonic-gate  * queue from another serialized entry which can't happen in parallel. This
219*7c478bd9Sstevel@tonic-gate  * guarantees that tl_wput_ser() is complete in bounded time (there is no risk
220*7c478bd9Sstevel@tonic-gate  * of it draining forever while writer places new messages on the STREAMS
221*7c478bd9Sstevel@tonic-gate  * queue).
222*7c478bd9Sstevel@tonic-gate  *
223*7c478bd9Sstevel@tonic-gate  * Note that a closing endpoint never sets te_nowsrv and never calls putbq().
224*7c478bd9Sstevel@tonic-gate  *
225*7c478bd9Sstevel@tonic-gate  *
226*7c478bd9Sstevel@tonic-gate  * Unix Domain Sockets
227*7c478bd9Sstevel@tonic-gate  * ===================
228*7c478bd9Sstevel@tonic-gate  *
229*7c478bd9Sstevel@tonic-gate  * The driver knows the structure of Unix Domain sockets addresses and treats
230*7c478bd9Sstevel@tonic-gate  * them differently from generic TLI addresses. For sockets implicit binds are
231*7c478bd9Sstevel@tonic-gate  * requested by setting SOU_MAGIC_IMPLICIT in the soua_magic part of the address
232*7c478bd9Sstevel@tonic-gate  * instead of using address length of zero. Explicit binds specify
233*7c478bd9Sstevel@tonic-gate  * SOU_MAGIC_EXPLICIT as magic.
234*7c478bd9Sstevel@tonic-gate  *
235*7c478bd9Sstevel@tonic-gate  * For implicit binds we always use minor number as soua_vp part of the address
236*7c478bd9Sstevel@tonic-gate  * and avoid any hash table lookups. This saves two hash tables lookups per
237*7c478bd9Sstevel@tonic-gate  * anonymous bind.
238*7c478bd9Sstevel@tonic-gate  *
239*7c478bd9Sstevel@tonic-gate  * For explicit address we hash the vnode pointer instead of hashing the
240*7c478bd9Sstevel@tonic-gate  * full-scale address+zone+length. Hashing by pointer is more efficient then
241*7c478bd9Sstevel@tonic-gate  * hashing by the full address.
242*7c478bd9Sstevel@tonic-gate  *
243*7c478bd9Sstevel@tonic-gate  * For unix domain sockets the te_ap is always pointing to te_uxaddr part of the
244*7c478bd9Sstevel@tonic-gate  * tep structure, so it should be never freed.
245*7c478bd9Sstevel@tonic-gate  *
246*7c478bd9Sstevel@tonic-gate  * Also for sockets the driver always uses minor number as acceptor id.
247*7c478bd9Sstevel@tonic-gate  *
248*7c478bd9Sstevel@tonic-gate  * TPI VIOLATIONS
249*7c478bd9Sstevel@tonic-gate  * --------------
250*7c478bd9Sstevel@tonic-gate  *
251*7c478bd9Sstevel@tonic-gate  * This driver violates TPI in several respects for Unix Domain Sockets:
252*7c478bd9Sstevel@tonic-gate  *
253*7c478bd9Sstevel@tonic-gate  * 1) It treats O_T_BIND_REQ as T_BIND_REQ and refuses bind if an explicit bind
254*7c478bd9Sstevel@tonic-gate  *	is requested and the endpoint is already in use. There is no point in
255*7c478bd9Sstevel@tonic-gate  *	generating an unused address since this address will be rejected by
256*7c478bd9Sstevel@tonic-gate  *	sockfs anyway. For implicit binds it always generates a new address
257*7c478bd9Sstevel@tonic-gate  *	(sets soua_vp to its minor number).
258*7c478bd9Sstevel@tonic-gate  *
259*7c478bd9Sstevel@tonic-gate  * 2) It always uses minor number as acceptor ID and never uses queue
260*7c478bd9Sstevel@tonic-gate  *	pointer. It is ok since sockets get acceptor ID from T_CAPABILITY_REQ
261*7c478bd9Sstevel@tonic-gate  *	message and they do not use the queue pointer.
262*7c478bd9Sstevel@tonic-gate  *
263*7c478bd9Sstevel@tonic-gate  * 3) For Listener sockets the usual sequence is to issue bind() zero backlog
264*7c478bd9Sstevel@tonic-gate  *	followed by listen(). The listen() should be issued with non-zero
265*7c478bd9Sstevel@tonic-gate  *	backlog, so sotpi_listen() issues unbind request followed by bind
266*7c478bd9Sstevel@tonic-gate  *	request to the same address but with a non-zero qlen value. Both
267*7c478bd9Sstevel@tonic-gate  *	tl_bind() and tl_unbind() require write lock on the hash table to
268*7c478bd9Sstevel@tonic-gate  *	insert/remove the address. The driver does not remove the address from
269*7c478bd9Sstevel@tonic-gate  *	the hash for endpoints that are bound to the explicit address and have
270*7c478bd9Sstevel@tonic-gate  *	backlog of zero. During T_BIND_REQ processing if the address requested
271*7c478bd9Sstevel@tonic-gate  *	is equal to the address the endpoint already has it updates the backlog
272*7c478bd9Sstevel@tonic-gate  *	without reinserting the address in the hash table. This optimization
273*7c478bd9Sstevel@tonic-gate  *	avoids two hash table updates for each listener created. It always
274*7c478bd9Sstevel@tonic-gate  *	avoids the problem of a "stolen" address when another listener may use
275*7c478bd9Sstevel@tonic-gate  *	the same address between the unbind and bind and suddenly listen() fails
276*7c478bd9Sstevel@tonic-gate  *	because address is in use even though the bind() succeeded.
277*7c478bd9Sstevel@tonic-gate  *
278*7c478bd9Sstevel@tonic-gate  *
279*7c478bd9Sstevel@tonic-gate  * CONNECTIONLESS TRANSPORTS
280*7c478bd9Sstevel@tonic-gate  * =========================
281*7c478bd9Sstevel@tonic-gate  *
282*7c478bd9Sstevel@tonic-gate  * Connectionless transports all share the same serializer (one for TLI and one
283*7c478bd9Sstevel@tonic-gate  * for Sockets). Functions executing behind serializer can check or modify state
284*7c478bd9Sstevel@tonic-gate  * of any endpoint.
285*7c478bd9Sstevel@tonic-gate  *
286*7c478bd9Sstevel@tonic-gate  * When endpoint X talks to another endpoint Y it caches the pointer to Y in the
287*7c478bd9Sstevel@tonic-gate  * te_lastep field. The next time X talks to some address A it checks whether A
288*7c478bd9Sstevel@tonic-gate  * is the same as Y's address and if it is there is no need to lookup Y. If the
289*7c478bd9Sstevel@tonic-gate  * address is different or the state of Y is not appropriate (e.g. closed or not
290*7c478bd9Sstevel@tonic-gate  * idle) X does a lookup using tl_find_peer() and caches the new address.
291*7c478bd9Sstevel@tonic-gate  * NOTE: tl_find_peer() never returns closing endpoint and it places a refhold
292*7c478bd9Sstevel@tonic-gate  * on the endpoint found.
293*7c478bd9Sstevel@tonic-gate  *
294*7c478bd9Sstevel@tonic-gate  * During close of endpoint Y it doesn't try to remove itself from other
295*7c478bd9Sstevel@tonic-gate  * endpoints caches. They will detect that Y is gone and will search the peer
296*7c478bd9Sstevel@tonic-gate  * endpoint again.
297*7c478bd9Sstevel@tonic-gate  *
298*7c478bd9Sstevel@tonic-gate  * Flow Control Handling.
299*7c478bd9Sstevel@tonic-gate  * ----------------------
300*7c478bd9Sstevel@tonic-gate  *
301*7c478bd9Sstevel@tonic-gate  * Each connectionless endpoint keeps a list of endpoints which are
302*7c478bd9Sstevel@tonic-gate  * flow-controlled by its queue. It also keeps a pointer to the queue which
303*7c478bd9Sstevel@tonic-gate  * flow-controls itself.  Whenever flow control releases for endpoint X it
304*7c478bd9Sstevel@tonic-gate  * enables all queues from the list. During close it also back-enables everyone
305*7c478bd9Sstevel@tonic-gate  * in the list. If X is flow-controlled when it is closing it removes it from
306*7c478bd9Sstevel@tonic-gate  * the peers list.
307*7c478bd9Sstevel@tonic-gate  *
308*7c478bd9Sstevel@tonic-gate  * DATA STRUCTURES
309*7c478bd9Sstevel@tonic-gate  * ===============
310*7c478bd9Sstevel@tonic-gate  *
311*7c478bd9Sstevel@tonic-gate  * Each endpoint is represented by the tl_endpt_t structure which keeps all the
312*7c478bd9Sstevel@tonic-gate  * endpoint state. For connection-oriented transports it has a keeps a list
313*7c478bd9Sstevel@tonic-gate  * of pending connections (tl_icon_t). For connectionless transports it keeps a
314*7c478bd9Sstevel@tonic-gate  * list of endpoints flow controlled by this one.
315*7c478bd9Sstevel@tonic-gate  *
316*7c478bd9Sstevel@tonic-gate  * Each transport type is represented by a per-transport data structure
317*7c478bd9Sstevel@tonic-gate  * tl_transport_state_t. It contains a pointer to an acceptor ID hash and the
318*7c478bd9Sstevel@tonic-gate  * endpoint address hash tables for each transport. It also contains pointer to
319*7c478bd9Sstevel@tonic-gate  * transport serializer for connectionless transports.
320*7c478bd9Sstevel@tonic-gate  *
321*7c478bd9Sstevel@tonic-gate  * Each endpoint keeps a link to its transport structure, so the code can find
322*7c478bd9Sstevel@tonic-gate  * all per-transport information quickly.
323*7c478bd9Sstevel@tonic-gate  */
324*7c478bd9Sstevel@tonic-gate 
325*7c478bd9Sstevel@tonic-gate #include	<sys/types.h>
326*7c478bd9Sstevel@tonic-gate #include	<sys/inttypes.h>
327*7c478bd9Sstevel@tonic-gate #include	<sys/stream.h>
328*7c478bd9Sstevel@tonic-gate #include	<sys/stropts.h>
329*7c478bd9Sstevel@tonic-gate #define	_SUN_TPI_VERSION 2
330*7c478bd9Sstevel@tonic-gate #include	<sys/tihdr.h>
331*7c478bd9Sstevel@tonic-gate #include	<sys/strlog.h>
332*7c478bd9Sstevel@tonic-gate #include	<sys/debug.h>
333*7c478bd9Sstevel@tonic-gate #include	<sys/cred.h>
334*7c478bd9Sstevel@tonic-gate #include	<sys/errno.h>
335*7c478bd9Sstevel@tonic-gate #include	<sys/kmem.h>
336*7c478bd9Sstevel@tonic-gate #include	<sys/id_space.h>
337*7c478bd9Sstevel@tonic-gate #include	<sys/modhash.h>
338*7c478bd9Sstevel@tonic-gate #include	<sys/mkdev.h>
339*7c478bd9Sstevel@tonic-gate #include	<sys/tl.h>
340*7c478bd9Sstevel@tonic-gate #include	<sys/stat.h>
341*7c478bd9Sstevel@tonic-gate #include	<sys/conf.h>
342*7c478bd9Sstevel@tonic-gate #include	<sys/modctl.h>
343*7c478bd9Sstevel@tonic-gate #include	<sys/strsun.h>
344*7c478bd9Sstevel@tonic-gate #include	<sys/socket.h>
345*7c478bd9Sstevel@tonic-gate #include	<sys/socketvar.h>
346*7c478bd9Sstevel@tonic-gate #include	<sys/sysmacros.h>
347*7c478bd9Sstevel@tonic-gate #include	<sys/xti_xtiopt.h>
348*7c478bd9Sstevel@tonic-gate #include	<sys/ddi.h>
349*7c478bd9Sstevel@tonic-gate #include	<sys/sunddi.h>
350*7c478bd9Sstevel@tonic-gate #include	<sys/zone.h>
351*7c478bd9Sstevel@tonic-gate #include	<inet/common.h>	/* typedef int (*pfi_t)() for inet/optcom.h */
352*7c478bd9Sstevel@tonic-gate #include	<inet/optcom.h>
353*7c478bd9Sstevel@tonic-gate #include	<sys/strsubr.h>
354*7c478bd9Sstevel@tonic-gate #include	<sys/ucred.h>
355*7c478bd9Sstevel@tonic-gate #include	<sys/suntpi.h>
356*7c478bd9Sstevel@tonic-gate #include	<sys/list.h>
357*7c478bd9Sstevel@tonic-gate #include	<sys/serializer.h>
358*7c478bd9Sstevel@tonic-gate 
359*7c478bd9Sstevel@tonic-gate /*
360*7c478bd9Sstevel@tonic-gate  * TBD List
361*7c478bd9Sstevel@tonic-gate  * 14 Eliminate state changes through table
362*7c478bd9Sstevel@tonic-gate  * 16. AF_UNIX socket options
363*7c478bd9Sstevel@tonic-gate  * 17. connect() for ticlts
364*7c478bd9Sstevel@tonic-gate  * 18. support for "netstat" to show AF_UNIX plus TLI local
365*7c478bd9Sstevel@tonic-gate  *	transport connections
366*7c478bd9Sstevel@tonic-gate  * 21. sanity check to flushing on sending M_ERROR
367*7c478bd9Sstevel@tonic-gate  */
368*7c478bd9Sstevel@tonic-gate 
369*7c478bd9Sstevel@tonic-gate /*
370*7c478bd9Sstevel@tonic-gate  * CONSTANT DECLARATIONS
371*7c478bd9Sstevel@tonic-gate  * --------------------
372*7c478bd9Sstevel@tonic-gate  */
373*7c478bd9Sstevel@tonic-gate 
374*7c478bd9Sstevel@tonic-gate /*
375*7c478bd9Sstevel@tonic-gate  * Local declarations
376*7c478bd9Sstevel@tonic-gate  */
377*7c478bd9Sstevel@tonic-gate #define	NEXTSTATE(EV, ST)	ti_statetbl[EV][ST]
378*7c478bd9Sstevel@tonic-gate 
379*7c478bd9Sstevel@tonic-gate #define	TL_MAXQLEN	128	/* Max conn indications allowed. */
380*7c478bd9Sstevel@tonic-gate #define	BADSEQNUM	(-1)	/* initial seq number used by T_DISCON_IND */
381*7c478bd9Sstevel@tonic-gate #define	TL_BUFWAIT	(10000)	/* usecs to wait for allocb buffer timeout */
382*7c478bd9Sstevel@tonic-gate #define	TL_TIDUSZ (64*1024)	/* tidu size when "strmsgz" is unlimited (0) */
383*7c478bd9Sstevel@tonic-gate /*
384*7c478bd9Sstevel@tonic-gate  * Hash tables size.
385*7c478bd9Sstevel@tonic-gate  */
386*7c478bd9Sstevel@tonic-gate #define	TL_HASH_SIZE 311
387*7c478bd9Sstevel@tonic-gate 
388*7c478bd9Sstevel@tonic-gate /*
389*7c478bd9Sstevel@tonic-gate  * Definitions for module_info
390*7c478bd9Sstevel@tonic-gate  */
391*7c478bd9Sstevel@tonic-gate #define		TL_ID		(104)		/* module ID number */
392*7c478bd9Sstevel@tonic-gate #define		TL_NAME		"tl"		/* module name */
393*7c478bd9Sstevel@tonic-gate #define		TL_MINPSZ	(0)		/* min packet size */
394*7c478bd9Sstevel@tonic-gate #define		TL_MAXPSZ	INFPSZ 		/* max packet size ZZZ */
395*7c478bd9Sstevel@tonic-gate #define		TL_HIWAT	(16*1024)	/* hi water mark */
396*7c478bd9Sstevel@tonic-gate #define		TL_LOWAT	(256)		/* lo water mark */
397*7c478bd9Sstevel@tonic-gate /*
398*7c478bd9Sstevel@tonic-gate  * Definition of minor numbers/modes for new transport provider modes.
399*7c478bd9Sstevel@tonic-gate  * We view the socket use as a separate mode to get a separate name space.
400*7c478bd9Sstevel@tonic-gate  */
401*7c478bd9Sstevel@tonic-gate #define		TL_TICOTS	0	/* connection oriented transport */
402*7c478bd9Sstevel@tonic-gate #define		TL_TICOTSORD 	1	/* COTS w/ orderly release */
403*7c478bd9Sstevel@tonic-gate #define		TL_TICLTS 	2	/* connectionless transport */
404*7c478bd9Sstevel@tonic-gate #define		TL_UNUSED	3
405*7c478bd9Sstevel@tonic-gate #define		TL_SOCKET	4	/* Socket */
406*7c478bd9Sstevel@tonic-gate #define		TL_SOCK_COTS	(TL_SOCKET|TL_TICOTS)
407*7c478bd9Sstevel@tonic-gate #define		TL_SOCK_COTSORD	(TL_SOCKET|TL_TICOTSORD)
408*7c478bd9Sstevel@tonic-gate #define		TL_SOCK_CLTS	(TL_SOCKET|TL_TICLTS)
409*7c478bd9Sstevel@tonic-gate 
410*7c478bd9Sstevel@tonic-gate #define		TL_MINOR_MASK	0x7
411*7c478bd9Sstevel@tonic-gate #define		TL_MINOR_START	(TL_TICLTS + 1)
412*7c478bd9Sstevel@tonic-gate 
413*7c478bd9Sstevel@tonic-gate /*
414*7c478bd9Sstevel@tonic-gate  * LOCAL MACROS
415*7c478bd9Sstevel@tonic-gate  */
416*7c478bd9Sstevel@tonic-gate #define	T_ALIGN(p)	P2ROUNDUP((p), sizeof (t_scalar_t))
417*7c478bd9Sstevel@tonic-gate 
418*7c478bd9Sstevel@tonic-gate /*
419*7c478bd9Sstevel@tonic-gate  * EXTERNAL VARIABLE DECLARATIONS
420*7c478bd9Sstevel@tonic-gate  * -----------------------------
421*7c478bd9Sstevel@tonic-gate  */
422*7c478bd9Sstevel@tonic-gate /*
423*7c478bd9Sstevel@tonic-gate  * state table defined in the OS space.c
424*7c478bd9Sstevel@tonic-gate  */
425*7c478bd9Sstevel@tonic-gate extern	char	ti_statetbl[TE_NOEVENTS][TS_NOSTATES];
426*7c478bd9Sstevel@tonic-gate 
427*7c478bd9Sstevel@tonic-gate /*
428*7c478bd9Sstevel@tonic-gate  * STREAMS DRIVER ENTRY POINTS PROTOTYPES
429*7c478bd9Sstevel@tonic-gate  */
430*7c478bd9Sstevel@tonic-gate static int tl_open(queue_t *, dev_t *, int, int, cred_t *);
431*7c478bd9Sstevel@tonic-gate static int tl_close(queue_t *, int, cred_t *);
432*7c478bd9Sstevel@tonic-gate static void tl_wput(queue_t *, mblk_t *);
433*7c478bd9Sstevel@tonic-gate static void tl_wsrv(queue_t *);
434*7c478bd9Sstevel@tonic-gate static void tl_rsrv(queue_t *);
435*7c478bd9Sstevel@tonic-gate 
436*7c478bd9Sstevel@tonic-gate static int tl_attach(dev_info_t *, ddi_attach_cmd_t);
437*7c478bd9Sstevel@tonic-gate static int tl_detach(dev_info_t *, ddi_detach_cmd_t);
438*7c478bd9Sstevel@tonic-gate static int tl_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
439*7c478bd9Sstevel@tonic-gate 
440*7c478bd9Sstevel@tonic-gate 
441*7c478bd9Sstevel@tonic-gate /*
442*7c478bd9Sstevel@tonic-gate  * GLOBAL DATA STRUCTURES AND VARIABLES
443*7c478bd9Sstevel@tonic-gate  * -----------------------------------
444*7c478bd9Sstevel@tonic-gate  */
445*7c478bd9Sstevel@tonic-gate 
446*7c478bd9Sstevel@tonic-gate /*
447*7c478bd9Sstevel@tonic-gate  * Table representing database of all options managed by T_SVR4_OPTMGMT_REQ
448*7c478bd9Sstevel@tonic-gate  * For now, we only manage the SO_RECVUCRED option but we also have
449*7c478bd9Sstevel@tonic-gate  * harmless dummy options to make things work with some common code we access.
450*7c478bd9Sstevel@tonic-gate  */
451*7c478bd9Sstevel@tonic-gate opdes_t	tl_opt_arr[] = {
452*7c478bd9Sstevel@tonic-gate 	/* The SO_TYPE is needed for the hack below */
453*7c478bd9Sstevel@tonic-gate 	{
454*7c478bd9Sstevel@tonic-gate 		SO_TYPE,
455*7c478bd9Sstevel@tonic-gate 		SOL_SOCKET,
456*7c478bd9Sstevel@tonic-gate 		OA_R,
457*7c478bd9Sstevel@tonic-gate 		OA_R,
458*7c478bd9Sstevel@tonic-gate 		OP_NP,
459*7c478bd9Sstevel@tonic-gate 		OP_PASSNEXT,
460*7c478bd9Sstevel@tonic-gate 		sizeof (t_scalar_t),
461*7c478bd9Sstevel@tonic-gate 		0
462*7c478bd9Sstevel@tonic-gate 	},
463*7c478bd9Sstevel@tonic-gate 	{
464*7c478bd9Sstevel@tonic-gate 		SO_RECVUCRED,
465*7c478bd9Sstevel@tonic-gate 		SOL_SOCKET,
466*7c478bd9Sstevel@tonic-gate 		OA_RW,
467*7c478bd9Sstevel@tonic-gate 		OA_RW,
468*7c478bd9Sstevel@tonic-gate 		OP_NP,
469*7c478bd9Sstevel@tonic-gate 		OP_PASSNEXT,
470*7c478bd9Sstevel@tonic-gate 		sizeof (int),
471*7c478bd9Sstevel@tonic-gate 		0
472*7c478bd9Sstevel@tonic-gate 	}
473*7c478bd9Sstevel@tonic-gate };
474*7c478bd9Sstevel@tonic-gate 
475*7c478bd9Sstevel@tonic-gate /*
476*7c478bd9Sstevel@tonic-gate  * Table of all supported levels
477*7c478bd9Sstevel@tonic-gate  * Note: Some levels (e.g. XTI_GENERIC) may be valid but may not have
478*7c478bd9Sstevel@tonic-gate  * any supported options so we need this info separately.
479*7c478bd9Sstevel@tonic-gate  *
480*7c478bd9Sstevel@tonic-gate  * This is needed only for topmost tpi providers.
481*7c478bd9Sstevel@tonic-gate  */
482*7c478bd9Sstevel@tonic-gate optlevel_t	tl_valid_levels_arr[] = {
483*7c478bd9Sstevel@tonic-gate 	XTI_GENERIC,
484*7c478bd9Sstevel@tonic-gate 	SOL_SOCKET,
485*7c478bd9Sstevel@tonic-gate 	TL_PROT_LEVEL
486*7c478bd9Sstevel@tonic-gate };
487*7c478bd9Sstevel@tonic-gate 
488*7c478bd9Sstevel@tonic-gate #define	TL_VALID_LEVELS_CNT	A_CNT(tl_valid_levels_arr)
489*7c478bd9Sstevel@tonic-gate /*
490*7c478bd9Sstevel@tonic-gate  * Current upper bound on the amount of space needed to return all options.
491*7c478bd9Sstevel@tonic-gate  * Additional options with data size of sizeof(long) are handled automatically.
492*7c478bd9Sstevel@tonic-gate  * Others need hand job.
493*7c478bd9Sstevel@tonic-gate  */
494*7c478bd9Sstevel@tonic-gate #define	TL_MAX_OPT_BUF_LEN						\
495*7c478bd9Sstevel@tonic-gate 		((A_CNT(tl_opt_arr) << 2) +				\
496*7c478bd9Sstevel@tonic-gate 		(A_CNT(tl_opt_arr) * sizeof (struct opthdr)) +		\
497*7c478bd9Sstevel@tonic-gate 		+ 64 + sizeof (struct T_optmgmt_ack))
498*7c478bd9Sstevel@tonic-gate 
499*7c478bd9Sstevel@tonic-gate #define	TL_OPT_ARR_CNT	A_CNT(tl_opt_arr)
500*7c478bd9Sstevel@tonic-gate 
501*7c478bd9Sstevel@tonic-gate /*
502*7c478bd9Sstevel@tonic-gate  *	transport addr structure
503*7c478bd9Sstevel@tonic-gate  */
504*7c478bd9Sstevel@tonic-gate typedef struct tl_addr {
505*7c478bd9Sstevel@tonic-gate 	zoneid_t	ta_zoneid;		/* Zone scope of address */
506*7c478bd9Sstevel@tonic-gate 	t_scalar_t	ta_alen;		/* length of abuf */
507*7c478bd9Sstevel@tonic-gate 	void		*ta_abuf;		/* the addr itself */
508*7c478bd9Sstevel@tonic-gate } tl_addr_t;
509*7c478bd9Sstevel@tonic-gate 
510*7c478bd9Sstevel@tonic-gate /*
511*7c478bd9Sstevel@tonic-gate  * Refcounted version of serializer.
512*7c478bd9Sstevel@tonic-gate  */
513*7c478bd9Sstevel@tonic-gate typedef struct tl_serializer {
514*7c478bd9Sstevel@tonic-gate 	uint_t		ts_refcnt;
515*7c478bd9Sstevel@tonic-gate 	serializer_t	*ts_serializer;
516*7c478bd9Sstevel@tonic-gate } tl_serializer_t;
517*7c478bd9Sstevel@tonic-gate 
518*7c478bd9Sstevel@tonic-gate /*
519*7c478bd9Sstevel@tonic-gate  * Each transport type has a separate state.
520*7c478bd9Sstevel@tonic-gate  * Per-transport state.
521*7c478bd9Sstevel@tonic-gate  */
522*7c478bd9Sstevel@tonic-gate typedef struct tl_transport_state {
523*7c478bd9Sstevel@tonic-gate 	char		*tr_name;
524*7c478bd9Sstevel@tonic-gate 	minor_t		tr_minor;
525*7c478bd9Sstevel@tonic-gate 	uint32_t	tr_defaddr;
526*7c478bd9Sstevel@tonic-gate 	mod_hash_t	*tr_ai_hash;
527*7c478bd9Sstevel@tonic-gate 	mod_hash_t	*tr_addr_hash;
528*7c478bd9Sstevel@tonic-gate 	tl_serializer_t	*tr_serializer;
529*7c478bd9Sstevel@tonic-gate } tl_transport_state_t;
530*7c478bd9Sstevel@tonic-gate 
531*7c478bd9Sstevel@tonic-gate #define	TL_DFADDR 0x1000
532*7c478bd9Sstevel@tonic-gate 
533*7c478bd9Sstevel@tonic-gate static tl_transport_state_t tl_transports[] = {
534*7c478bd9Sstevel@tonic-gate 	{ "ticots", TL_TICOTS, TL_DFADDR, NULL, NULL, NULL },
535*7c478bd9Sstevel@tonic-gate 	{ "ticotsord", TL_TICOTSORD, TL_DFADDR, NULL, NULL, NULL },
536*7c478bd9Sstevel@tonic-gate 	{ "ticlts", TL_TICLTS, TL_DFADDR, NULL, NULL, NULL },
537*7c478bd9Sstevel@tonic-gate 	{ "undefined", TL_UNUSED, TL_DFADDR, NULL, NULL, NULL },
538*7c478bd9Sstevel@tonic-gate 	{ "sticots", TL_SOCK_COTS, TL_DFADDR, NULL, NULL, NULL },
539*7c478bd9Sstevel@tonic-gate 	{ "sticotsord", TL_SOCK_COTSORD, TL_DFADDR, NULL, NULL },
540*7c478bd9Sstevel@tonic-gate 	{ "sticlts", TL_SOCK_CLTS, TL_DFADDR, NULL, NULL, NULL }
541*7c478bd9Sstevel@tonic-gate };
542*7c478bd9Sstevel@tonic-gate 
543*7c478bd9Sstevel@tonic-gate #define	TL_MAXTRANSPORT A_CNT(tl_transports)
544*7c478bd9Sstevel@tonic-gate 
545*7c478bd9Sstevel@tonic-gate struct tl_endpt;
546*7c478bd9Sstevel@tonic-gate typedef struct tl_endpt tl_endpt_t;
547*7c478bd9Sstevel@tonic-gate 
548*7c478bd9Sstevel@tonic-gate typedef void (tlproc_t)(mblk_t *, tl_endpt_t *);
549*7c478bd9Sstevel@tonic-gate 
550*7c478bd9Sstevel@tonic-gate /*
551*7c478bd9Sstevel@tonic-gate  * Data structure used to represent pending connects.
552*7c478bd9Sstevel@tonic-gate  * Records enough information so that the connecting peer can close
553*7c478bd9Sstevel@tonic-gate  * before the connection gets accepted.
554*7c478bd9Sstevel@tonic-gate  */
555*7c478bd9Sstevel@tonic-gate typedef struct tl_icon {
556*7c478bd9Sstevel@tonic-gate 	list_node_t	ti_node;
557*7c478bd9Sstevel@tonic-gate 	struct tl_endpt *ti_tep;	/* NULL if peer has already closed */
558*7c478bd9Sstevel@tonic-gate 	mblk_t		*ti_mp;		/* b_next list of data + ordrel_ind */
559*7c478bd9Sstevel@tonic-gate 	t_scalar_t	ti_seqno;	/* Sequence number */
560*7c478bd9Sstevel@tonic-gate } tl_icon_t;
561*7c478bd9Sstevel@tonic-gate 
562*7c478bd9Sstevel@tonic-gate typedef struct so_ux_addr soux_addr_t;
563*7c478bd9Sstevel@tonic-gate #define	TL_SOUX_ADDRLEN sizeof (soux_addr_t)
564*7c478bd9Sstevel@tonic-gate 
565*7c478bd9Sstevel@tonic-gate /*
566*7c478bd9Sstevel@tonic-gate  *	transport endpoint structure
567*7c478bd9Sstevel@tonic-gate  */
568*7c478bd9Sstevel@tonic-gate struct tl_endpt {
569*7c478bd9Sstevel@tonic-gate 	queue_t		*te_rq;		/* stream read queue */
570*7c478bd9Sstevel@tonic-gate 	queue_t		*te_wq;		/* stream write queue */
571*7c478bd9Sstevel@tonic-gate 	uint32_t	te_refcnt;
572*7c478bd9Sstevel@tonic-gate 	int32_t 	te_state;	/* TPI state of endpoint */
573*7c478bd9Sstevel@tonic-gate 	minor_t		te_minor;	/* minor number */
574*7c478bd9Sstevel@tonic-gate #define	te_seqno	te_minor
575*7c478bd9Sstevel@tonic-gate 	uint_t		te_flag;	/* flag field */
576*7c478bd9Sstevel@tonic-gate 	boolean_t	te_nowsrv;
577*7c478bd9Sstevel@tonic-gate 	tl_serializer_t	*te_ser;	/* Serializer to use */
578*7c478bd9Sstevel@tonic-gate #define	te_serializer	te_ser->ts_serializer
579*7c478bd9Sstevel@tonic-gate 
580*7c478bd9Sstevel@tonic-gate 	soux_addr_t	te_uxaddr;	/* Socket address */
581*7c478bd9Sstevel@tonic-gate #define	te_magic	te_uxaddr.soua_magic
582*7c478bd9Sstevel@tonic-gate #define	te_vp		te_uxaddr.soua_vp
583*7c478bd9Sstevel@tonic-gate 	tl_addr_t	te_ap;		/* addr bound to this endpt */
584*7c478bd9Sstevel@tonic-gate #define	te_zoneid te_ap.ta_zoneid
585*7c478bd9Sstevel@tonic-gate #define	te_alen	te_ap.ta_alen
586*7c478bd9Sstevel@tonic-gate #define	te_abuf	te_ap.ta_abuf
587*7c478bd9Sstevel@tonic-gate 
588*7c478bd9Sstevel@tonic-gate 	tl_transport_state_t *te_transport;
589*7c478bd9Sstevel@tonic-gate #define	te_addrhash	te_transport->tr_addr_hash
590*7c478bd9Sstevel@tonic-gate #define	te_aihash	te_transport->tr_ai_hash
591*7c478bd9Sstevel@tonic-gate #define	te_defaddr	te_transport->tr_defaddr
592*7c478bd9Sstevel@tonic-gate 	cred_t		*te_credp;	/* endpoint user credentials */
593*7c478bd9Sstevel@tonic-gate 	mod_hash_hndl_t	te_hash_hndl;	/* Handle for address hash */
594*7c478bd9Sstevel@tonic-gate 
595*7c478bd9Sstevel@tonic-gate 	/*
596*7c478bd9Sstevel@tonic-gate 	 * State specific for connection-oriented and connectionless transports.
597*7c478bd9Sstevel@tonic-gate 	 */
598*7c478bd9Sstevel@tonic-gate 	union {
599*7c478bd9Sstevel@tonic-gate 		/* Connection-oriented state. */
600*7c478bd9Sstevel@tonic-gate 		struct {
601*7c478bd9Sstevel@tonic-gate 			t_uscalar_t _te_nicon;	/* count of conn requests */
602*7c478bd9Sstevel@tonic-gate 			t_uscalar_t _te_qlen;	/* max conn requests */
603*7c478bd9Sstevel@tonic-gate 			tl_endpt_t  *_te_oconp;	/* conn request pending */
604*7c478bd9Sstevel@tonic-gate 			tl_endpt_t  *_te_conp;	/* connected endpt */
605*7c478bd9Sstevel@tonic-gate #ifndef _ILP32
606*7c478bd9Sstevel@tonic-gate 			void	    *_te_pad;
607*7c478bd9Sstevel@tonic-gate #endif
608*7c478bd9Sstevel@tonic-gate 			list_t	_te_iconp;	/* list of conn ind. pending */
609*7c478bd9Sstevel@tonic-gate 		} _te_cots_state;
610*7c478bd9Sstevel@tonic-gate 		/* Connection-less state. */
611*7c478bd9Sstevel@tonic-gate 		struct {
612*7c478bd9Sstevel@tonic-gate 			tl_endpt_t *_te_lastep;	/* last dest. endpoint */
613*7c478bd9Sstevel@tonic-gate 			tl_endpt_t *_te_flowq;	/* flow controlled on whom */
614*7c478bd9Sstevel@tonic-gate 			list_node_t _te_flows;	/* lists of connections */
615*7c478bd9Sstevel@tonic-gate 			list_t  _te_flowlist;	/* Who flowcontrols on me */
616*7c478bd9Sstevel@tonic-gate 		} _te_clts_state;
617*7c478bd9Sstevel@tonic-gate 	} _te_transport_state;
618*7c478bd9Sstevel@tonic-gate #define	te_nicon	_te_transport_state._te_cots_state._te_nicon
619*7c478bd9Sstevel@tonic-gate #define	te_qlen		_te_transport_state._te_cots_state._te_qlen
620*7c478bd9Sstevel@tonic-gate #define	te_oconp	_te_transport_state._te_cots_state._te_oconp
621*7c478bd9Sstevel@tonic-gate #define	te_conp		_te_transport_state._te_cots_state._te_conp
622*7c478bd9Sstevel@tonic-gate #define	te_iconp	_te_transport_state._te_cots_state._te_iconp
623*7c478bd9Sstevel@tonic-gate #define	te_lastep	_te_transport_state._te_clts_state._te_lastep
624*7c478bd9Sstevel@tonic-gate #define	te_flowq	_te_transport_state._te_clts_state._te_flowq
625*7c478bd9Sstevel@tonic-gate #define	te_flowlist	_te_transport_state._te_clts_state._te_flowlist
626*7c478bd9Sstevel@tonic-gate #define	te_flows	_te_transport_state._te_clts_state._te_flows
627*7c478bd9Sstevel@tonic-gate 
628*7c478bd9Sstevel@tonic-gate 	bufcall_id_t	te_bufcid;	/* outstanding bufcall id */
629*7c478bd9Sstevel@tonic-gate 	timeout_id_t	te_timoutid;	/* outstanding timeout id */
630*7c478bd9Sstevel@tonic-gate 	pid_t		te_cpid;	/* cached pid of endpoint */
631*7c478bd9Sstevel@tonic-gate 	t_uscalar_t	te_acceptor_id;	/* acceptor id for T_CONN_RES */
632*7c478bd9Sstevel@tonic-gate 	/*
633*7c478bd9Sstevel@tonic-gate 	 * Pieces of the endpoint state needed for closing.
634*7c478bd9Sstevel@tonic-gate 	 */
635*7c478bd9Sstevel@tonic-gate 	kmutex_t	te_closelock;
636*7c478bd9Sstevel@tonic-gate 	kcondvar_t	te_closecv;
637*7c478bd9Sstevel@tonic-gate 	uint8_t		te_closing;	/* The endpoint started closing */
638*7c478bd9Sstevel@tonic-gate 	uint8_t		te_closewait;	/* Wait in close until zero */
639*7c478bd9Sstevel@tonic-gate 	mblk_t		te_closemp;	/* for entering serializer on close */
640*7c478bd9Sstevel@tonic-gate 	mblk_t		te_rsrvmp;	/* for entering serializer on rsrv */
641*7c478bd9Sstevel@tonic-gate 	mblk_t		te_wsrvmp;	/* for entering serializer on wsrv */
642*7c478bd9Sstevel@tonic-gate 	kmutex_t	te_srv_lock;
643*7c478bd9Sstevel@tonic-gate 	kcondvar_t	te_srv_cv;
644*7c478bd9Sstevel@tonic-gate 	uint8_t		te_rsrv_active;	/* Running in tl_rsrv()	*/
645*7c478bd9Sstevel@tonic-gate 	uint8_t		te_wsrv_active;	/* Running in tl_wsrv()	*/
646*7c478bd9Sstevel@tonic-gate 	/*
647*7c478bd9Sstevel@tonic-gate 	 * Pieces of the endpoint state needed for serializer transitions.
648*7c478bd9Sstevel@tonic-gate 	 */
649*7c478bd9Sstevel@tonic-gate 	kmutex_t	te_ser_lock;	/* Protects the count below */
650*7c478bd9Sstevel@tonic-gate 	uint_t		te_ser_count;	/* Number of messages on serializer */
651*7c478bd9Sstevel@tonic-gate };
652*7c478bd9Sstevel@tonic-gate 
653*7c478bd9Sstevel@tonic-gate /*
654*7c478bd9Sstevel@tonic-gate  * Flag values. Lower 4 bits specify that transport used.
655*7c478bd9Sstevel@tonic-gate  * TL_LISTENER, TL_ACCEPTOR, TL_ACCEPTED and TL_EAGER are for debugging only,
656*7c478bd9Sstevel@tonic-gate  * they allow to identify the endpoint more easily.
657*7c478bd9Sstevel@tonic-gate  */
658*7c478bd9Sstevel@tonic-gate #define	TL_LISTENER	0x00010	/* the listener endpoint */
659*7c478bd9Sstevel@tonic-gate #define	TL_ACCEPTOR	0x00020	/* the accepting endpoint */
660*7c478bd9Sstevel@tonic-gate #define	TL_EAGER	0x00040	/* connecting endpoint */
661*7c478bd9Sstevel@tonic-gate #define	TL_ACCEPTED	0x00080	/* accepted connection */
662*7c478bd9Sstevel@tonic-gate #define	TL_SETCRED	0x00100	/* flag to indicate sending of credentials */
663*7c478bd9Sstevel@tonic-gate #define	TL_SETUCRED	0x00200	/* flag to indicate sending of ucred */
664*7c478bd9Sstevel@tonic-gate #define	TL_SOCKUCRED	0x00400	/* flag to indicate sending of SCM_UCRED */
665*7c478bd9Sstevel@tonic-gate #define	TL_ADDRHASHED	0x01000	/* Endpoint address is stored in te_addrhash */
666*7c478bd9Sstevel@tonic-gate #define	TL_CLOSE_SER	0x10000	/* Endpoint close has entered the serializer */
667*7c478bd9Sstevel@tonic-gate /*
668*7c478bd9Sstevel@tonic-gate  * Boolean checks for the endpoint type.
669*7c478bd9Sstevel@tonic-gate  */
670*7c478bd9Sstevel@tonic-gate #define		IS_CLTS(x)	(((x)->te_flag & TL_TICLTS) != 0)
671*7c478bd9Sstevel@tonic-gate #define		IS_COTS(x)	(((x)->te_flag & TL_TICLTS) == 0)
672*7c478bd9Sstevel@tonic-gate #define		IS_COTSORD(x)	(((x)->te_flag & TL_TICOTSORD) != 0)
673*7c478bd9Sstevel@tonic-gate #define		IS_SOCKET(x)	(((x)->te_flag & TL_SOCKET) != 0)
674*7c478bd9Sstevel@tonic-gate 
675*7c478bd9Sstevel@tonic-gate #define	TLPID(mp, tep)	(DB_CPID(mp) == -1 ? (tep)->te_cpid : DB_CPID(mp))
676*7c478bd9Sstevel@tonic-gate 
677*7c478bd9Sstevel@tonic-gate /*
678*7c478bd9Sstevel@tonic-gate  * Certain operations are always used together. These macros reduce the chance
679*7c478bd9Sstevel@tonic-gate  * of missing a part of a combination.
680*7c478bd9Sstevel@tonic-gate  */
681*7c478bd9Sstevel@tonic-gate #define	TL_UNCONNECT(x) { tl_refrele(x); x = NULL; }
682*7c478bd9Sstevel@tonic-gate #define	TL_REMOVE_PEER(x) { if ((x) != NULL) TL_UNCONNECT(x) }
683*7c478bd9Sstevel@tonic-gate 
684*7c478bd9Sstevel@tonic-gate #define	TL_PUTBQ(x, mp) {		\
685*7c478bd9Sstevel@tonic-gate 	ASSERT(!((x)->te_flag & TL_CLOSE_SER));	\
686*7c478bd9Sstevel@tonic-gate 	(x)->te_nowsrv = B_TRUE;	\
687*7c478bd9Sstevel@tonic-gate 	(void) putbq((x)->te_wq, mp);	\
688*7c478bd9Sstevel@tonic-gate }
689*7c478bd9Sstevel@tonic-gate 
690*7c478bd9Sstevel@tonic-gate #define	TL_QENABLE(x) { (x)->te_nowsrv = B_FALSE; qenable((x)->te_wq); }
691*7c478bd9Sstevel@tonic-gate #define	TL_PUTQ(x, mp) { (x)->te_nowsrv = B_FALSE; (void)putq((x)->te_wq, mp); }
692*7c478bd9Sstevel@tonic-gate 
693*7c478bd9Sstevel@tonic-gate /*
694*7c478bd9Sstevel@tonic-gate  * STREAMS driver glue data structures.
695*7c478bd9Sstevel@tonic-gate  */
696*7c478bd9Sstevel@tonic-gate static	struct	module_info	tl_minfo = {
697*7c478bd9Sstevel@tonic-gate 	TL_ID,			/* mi_idnum */
698*7c478bd9Sstevel@tonic-gate 	TL_NAME,		/* mi_idname */
699*7c478bd9Sstevel@tonic-gate 	TL_MINPSZ,		/* mi_minpsz */
700*7c478bd9Sstevel@tonic-gate 	TL_MAXPSZ,		/* mi_maxpsz */
701*7c478bd9Sstevel@tonic-gate 	TL_HIWAT,		/* mi_hiwat */
702*7c478bd9Sstevel@tonic-gate 	TL_LOWAT		/* mi_lowat */
703*7c478bd9Sstevel@tonic-gate };
704*7c478bd9Sstevel@tonic-gate 
705*7c478bd9Sstevel@tonic-gate static	struct	qinit	tl_rinit = {
706*7c478bd9Sstevel@tonic-gate 	NULL,			/* qi_putp */
707*7c478bd9Sstevel@tonic-gate 	(int (*)())tl_rsrv,	/* qi_srvp */
708*7c478bd9Sstevel@tonic-gate 	tl_open,		/* qi_qopen */
709*7c478bd9Sstevel@tonic-gate 	tl_close,		/* qi_qclose */
710*7c478bd9Sstevel@tonic-gate 	NULL,			/* qi_qadmin */
711*7c478bd9Sstevel@tonic-gate 	&tl_minfo,		/* qi_minfo */
712*7c478bd9Sstevel@tonic-gate 	NULL			/* qi_mstat */
713*7c478bd9Sstevel@tonic-gate };
714*7c478bd9Sstevel@tonic-gate 
715*7c478bd9Sstevel@tonic-gate static	struct	qinit	tl_winit = {
716*7c478bd9Sstevel@tonic-gate 	(int (*)())tl_wput,	/* qi_putp */
717*7c478bd9Sstevel@tonic-gate 	(int (*)())tl_wsrv,	/* qi_srvp */
718*7c478bd9Sstevel@tonic-gate 	NULL,			/* qi_qopen */
719*7c478bd9Sstevel@tonic-gate 	NULL,			/* qi_qclose */
720*7c478bd9Sstevel@tonic-gate 	NULL,			/* qi_qadmin */
721*7c478bd9Sstevel@tonic-gate 	&tl_minfo,		/* qi_minfo */
722*7c478bd9Sstevel@tonic-gate 	NULL			/* qi_mstat */
723*7c478bd9Sstevel@tonic-gate };
724*7c478bd9Sstevel@tonic-gate 
725*7c478bd9Sstevel@tonic-gate static	struct streamtab	tlinfo = {
726*7c478bd9Sstevel@tonic-gate 	&tl_rinit,		/* st_rdinit */
727*7c478bd9Sstevel@tonic-gate 	&tl_winit,		/* st_wrinit */
728*7c478bd9Sstevel@tonic-gate 	NULL,			/* st_muxrinit */
729*7c478bd9Sstevel@tonic-gate 	NULL			/* st_muxwrinit */
730*7c478bd9Sstevel@tonic-gate };
731*7c478bd9Sstevel@tonic-gate 
732*7c478bd9Sstevel@tonic-gate DDI_DEFINE_STREAM_OPS(tl_devops, nulldev, nulldev, tl_attach, tl_detach,
733*7c478bd9Sstevel@tonic-gate     nulldev, tl_info, D_MP, &tlinfo);
734*7c478bd9Sstevel@tonic-gate 
735*7c478bd9Sstevel@tonic-gate static struct modldrv modldrv = {
736*7c478bd9Sstevel@tonic-gate 	&mod_driverops,		/* Type of module -- pseudo driver here */
737*7c478bd9Sstevel@tonic-gate 	"TPI Local Transport (tl) %I%",
738*7c478bd9Sstevel@tonic-gate 	&tl_devops,		/* driver ops */
739*7c478bd9Sstevel@tonic-gate };
740*7c478bd9Sstevel@tonic-gate 
741*7c478bd9Sstevel@tonic-gate /*
742*7c478bd9Sstevel@tonic-gate  * Module linkage information for the kernel.
743*7c478bd9Sstevel@tonic-gate  */
744*7c478bd9Sstevel@tonic-gate static struct modlinkage modlinkage = {
745*7c478bd9Sstevel@tonic-gate 	MODREV_1,
746*7c478bd9Sstevel@tonic-gate 	&modldrv,
747*7c478bd9Sstevel@tonic-gate 	NULL
748*7c478bd9Sstevel@tonic-gate };
749*7c478bd9Sstevel@tonic-gate 
750*7c478bd9Sstevel@tonic-gate /*
751*7c478bd9Sstevel@tonic-gate  * Templates for response to info request
752*7c478bd9Sstevel@tonic-gate  * Check sanity of unlimited connect data etc.
753*7c478bd9Sstevel@tonic-gate  */
754*7c478bd9Sstevel@tonic-gate 
755*7c478bd9Sstevel@tonic-gate #define		TL_CLTS_PROVIDER_FLAG	(XPG4_1|SENDZERO)
756*7c478bd9Sstevel@tonic-gate #define		TL_COTS_PROVIDER_FLAG	(XPG4_1|SENDZERO)
757*7c478bd9Sstevel@tonic-gate 
758*7c478bd9Sstevel@tonic-gate static struct T_info_ack tl_cots_info_ack =
759*7c478bd9Sstevel@tonic-gate 	{
760*7c478bd9Sstevel@tonic-gate 		T_INFO_ACK,	/* PRIM_type -always T_INFO_ACK */
761*7c478bd9Sstevel@tonic-gate 		T_INFINITE,	/* TSDU size */
762*7c478bd9Sstevel@tonic-gate 		T_INFINITE,	/* ETSDU size */
763*7c478bd9Sstevel@tonic-gate 		T_INFINITE,	/* CDATA_size */
764*7c478bd9Sstevel@tonic-gate 		T_INFINITE,	/* DDATA_size */
765*7c478bd9Sstevel@tonic-gate 		T_INFINITE,	/* ADDR_size  */
766*7c478bd9Sstevel@tonic-gate 		T_INFINITE,	/* OPT_size */
767*7c478bd9Sstevel@tonic-gate 		0,		/* TIDU_size - fill at run time */
768*7c478bd9Sstevel@tonic-gate 		T_COTS,		/* SERV_type */
769*7c478bd9Sstevel@tonic-gate 		-1,		/* CURRENT_state */
770*7c478bd9Sstevel@tonic-gate 		TL_COTS_PROVIDER_FLAG	/* PROVIDER_flag */
771*7c478bd9Sstevel@tonic-gate 	};
772*7c478bd9Sstevel@tonic-gate 
773*7c478bd9Sstevel@tonic-gate static struct T_info_ack tl_clts_info_ack =
774*7c478bd9Sstevel@tonic-gate 	{
775*7c478bd9Sstevel@tonic-gate 		T_INFO_ACK,	/* PRIM_type - always T_INFO_ACK */
776*7c478bd9Sstevel@tonic-gate 		0,		/* TSDU_size - fill at run time */
777*7c478bd9Sstevel@tonic-gate 		-2,		/* ETSDU_size -2 => not supported */
778*7c478bd9Sstevel@tonic-gate 		-2,		/* CDATA_size -2 => not supported */
779*7c478bd9Sstevel@tonic-gate 		-2,		/* DDATA_size  -2 => not supported */
780*7c478bd9Sstevel@tonic-gate 		-1,		/* ADDR_size -1 => unlimited */
781*7c478bd9Sstevel@tonic-gate 		-1,		/* OPT_size */
782*7c478bd9Sstevel@tonic-gate 		0,		/* TIDU_size - fill at run time */
783*7c478bd9Sstevel@tonic-gate 		T_CLTS,		/* SERV_type */
784*7c478bd9Sstevel@tonic-gate 		-1,		/* CURRENT_state */
785*7c478bd9Sstevel@tonic-gate 		TL_CLTS_PROVIDER_FLAG /* PROVIDER_flag */
786*7c478bd9Sstevel@tonic-gate 	};
787*7c478bd9Sstevel@tonic-gate 
788*7c478bd9Sstevel@tonic-gate /*
789*7c478bd9Sstevel@tonic-gate  * private copy of devinfo pointer used in tl_info
790*7c478bd9Sstevel@tonic-gate  */
791*7c478bd9Sstevel@tonic-gate static dev_info_t *tl_dip;
792*7c478bd9Sstevel@tonic-gate 
793*7c478bd9Sstevel@tonic-gate /*
794*7c478bd9Sstevel@tonic-gate  * Endpoints cache.
795*7c478bd9Sstevel@tonic-gate  */
796*7c478bd9Sstevel@tonic-gate static kmem_cache_t *tl_cache;
797*7c478bd9Sstevel@tonic-gate /*
798*7c478bd9Sstevel@tonic-gate  * Minor number space.
799*7c478bd9Sstevel@tonic-gate  */
800*7c478bd9Sstevel@tonic-gate static id_space_t *tl_minors;
801*7c478bd9Sstevel@tonic-gate 
802*7c478bd9Sstevel@tonic-gate /*
803*7c478bd9Sstevel@tonic-gate  * Default Data Unit size.
804*7c478bd9Sstevel@tonic-gate  */
805*7c478bd9Sstevel@tonic-gate static t_scalar_t tl_tidusz;
806*7c478bd9Sstevel@tonic-gate 
807*7c478bd9Sstevel@tonic-gate /*
808*7c478bd9Sstevel@tonic-gate  * Size of hash tables.
809*7c478bd9Sstevel@tonic-gate  */
810*7c478bd9Sstevel@tonic-gate static size_t tl_hash_size = TL_HASH_SIZE;
811*7c478bd9Sstevel@tonic-gate 
812*7c478bd9Sstevel@tonic-gate /*
813*7c478bd9Sstevel@tonic-gate  * Debug and test variable ONLY. Turn off T_CONN_IND queueing
814*7c478bd9Sstevel@tonic-gate  * for sockets.
815*7c478bd9Sstevel@tonic-gate  */
816*7c478bd9Sstevel@tonic-gate static int tl_disable_early_connect = 0;
817*7c478bd9Sstevel@tonic-gate static int tl_client_closing_when_accepting;
818*7c478bd9Sstevel@tonic-gate 
819*7c478bd9Sstevel@tonic-gate static int tl_serializer_noswitch;
820*7c478bd9Sstevel@tonic-gate 
821*7c478bd9Sstevel@tonic-gate /*
822*7c478bd9Sstevel@tonic-gate  * LOCAL FUNCTION PROTOTYPES
823*7c478bd9Sstevel@tonic-gate  * -------------------------
824*7c478bd9Sstevel@tonic-gate  */
825*7c478bd9Sstevel@tonic-gate static boolean_t tl_eqaddr(tl_addr_t *, tl_addr_t *);
826*7c478bd9Sstevel@tonic-gate static void tl_do_proto(mblk_t *, tl_endpt_t *);
827*7c478bd9Sstevel@tonic-gate static void tl_do_ioctl(mblk_t *, tl_endpt_t *);
828*7c478bd9Sstevel@tonic-gate static void tl_do_ioctl_ser(mblk_t *, tl_endpt_t *);
829*7c478bd9Sstevel@tonic-gate static void tl_error_ack(queue_t *, mblk_t *, t_scalar_t, t_scalar_t,
830*7c478bd9Sstevel@tonic-gate 	t_scalar_t);
831*7c478bd9Sstevel@tonic-gate static void tl_bind(mblk_t *, tl_endpt_t *);
832*7c478bd9Sstevel@tonic-gate static void tl_bind_ser(mblk_t *, tl_endpt_t *);
833*7c478bd9Sstevel@tonic-gate static void tl_ok_ack(queue_t *, mblk_t  *mp, t_scalar_t);
834*7c478bd9Sstevel@tonic-gate static void tl_unbind(mblk_t *, tl_endpt_t *);
835*7c478bd9Sstevel@tonic-gate static void tl_optmgmt(queue_t *, mblk_t *);
836*7c478bd9Sstevel@tonic-gate static void tl_conn_req(queue_t *, mblk_t *);
837*7c478bd9Sstevel@tonic-gate static void tl_conn_req_ser(mblk_t *, tl_endpt_t *);
838*7c478bd9Sstevel@tonic-gate static void tl_conn_res(mblk_t *, tl_endpt_t *);
839*7c478bd9Sstevel@tonic-gate static void tl_discon_req(mblk_t *, tl_endpt_t *);
840*7c478bd9Sstevel@tonic-gate static void tl_capability_req(mblk_t *, tl_endpt_t *);
841*7c478bd9Sstevel@tonic-gate static void tl_info_req_ser(mblk_t *, tl_endpt_t *);
842*7c478bd9Sstevel@tonic-gate static void tl_info_req(mblk_t *, tl_endpt_t *);
843*7c478bd9Sstevel@tonic-gate static void tl_addr_req(mblk_t *, tl_endpt_t *);
844*7c478bd9Sstevel@tonic-gate static void tl_connected_cots_addr_req(mblk_t *, tl_endpt_t *);
845*7c478bd9Sstevel@tonic-gate static void tl_data(mblk_t  *, tl_endpt_t *);
846*7c478bd9Sstevel@tonic-gate static void tl_exdata(mblk_t *, tl_endpt_t *);
847*7c478bd9Sstevel@tonic-gate static void tl_ordrel(mblk_t *, tl_endpt_t *);
848*7c478bd9Sstevel@tonic-gate static void tl_unitdata(mblk_t *, tl_endpt_t *);
849*7c478bd9Sstevel@tonic-gate static void tl_unitdata_ser(mblk_t *, tl_endpt_t *);
850*7c478bd9Sstevel@tonic-gate static void tl_uderr(queue_t *, mblk_t *, t_scalar_t);
851*7c478bd9Sstevel@tonic-gate static tl_endpt_t *tl_find_peer(tl_endpt_t *, tl_addr_t *);
852*7c478bd9Sstevel@tonic-gate static tl_endpt_t *tl_sock_find_peer(tl_endpt_t *, struct so_ux_addr *);
853*7c478bd9Sstevel@tonic-gate static boolean_t tl_get_any_addr(tl_endpt_t *, tl_addr_t *);
854*7c478bd9Sstevel@tonic-gate static void tl_cl_backenable(tl_endpt_t *);
855*7c478bd9Sstevel@tonic-gate static void tl_co_unconnect(tl_endpt_t *);
856*7c478bd9Sstevel@tonic-gate static mblk_t *tl_resizemp(mblk_t *, ssize_t);
857*7c478bd9Sstevel@tonic-gate static void tl_discon_ind(tl_endpt_t *, uint32_t);
858*7c478bd9Sstevel@tonic-gate static mblk_t *tl_discon_ind_alloc(uint32_t, t_scalar_t);
859*7c478bd9Sstevel@tonic-gate static mblk_t *tl_ordrel_ind_alloc(void);
860*7c478bd9Sstevel@tonic-gate static tl_icon_t *tl_icon_find(tl_endpt_t *, t_scalar_t);
861*7c478bd9Sstevel@tonic-gate static void tl_icon_queuemsg(tl_endpt_t *, t_scalar_t, mblk_t *);
862*7c478bd9Sstevel@tonic-gate static boolean_t tl_icon_hasprim(tl_endpt_t *, t_scalar_t, t_scalar_t);
863*7c478bd9Sstevel@tonic-gate static void tl_icon_sendmsgs(tl_endpt_t *, mblk_t **);
864*7c478bd9Sstevel@tonic-gate static void tl_icon_freemsgs(mblk_t **);
865*7c478bd9Sstevel@tonic-gate static void tl_merror(queue_t *, mblk_t *, int);
866*7c478bd9Sstevel@tonic-gate static void tl_fill_option(uchar_t *, cred_t *, pid_t, int);
867*7c478bd9Sstevel@tonic-gate static int tl_default_opt(queue_t *, int, int, uchar_t *);
868*7c478bd9Sstevel@tonic-gate static int tl_get_opt(queue_t *, int, int, uchar_t *);
869*7c478bd9Sstevel@tonic-gate static int tl_set_opt(queue_t *, uint_t, int, int, uint_t, uchar_t *, uint_t *,
870*7c478bd9Sstevel@tonic-gate     uchar_t *, void *, cred_t *, mblk_t *);
871*7c478bd9Sstevel@tonic-gate static void tl_memrecover(queue_t *, mblk_t *, size_t);
872*7c478bd9Sstevel@tonic-gate static void tl_freetip(tl_endpt_t *, tl_icon_t *);
873*7c478bd9Sstevel@tonic-gate static void tl_free(tl_endpt_t *);
874*7c478bd9Sstevel@tonic-gate static int  tl_constructor(void *, void *, int);
875*7c478bd9Sstevel@tonic-gate static void tl_destructor(void *, void *);
876*7c478bd9Sstevel@tonic-gate static void tl_find_callback(mod_hash_key_t, mod_hash_val_t);
877*7c478bd9Sstevel@tonic-gate static tl_serializer_t *tl_serializer_alloc(int);
878*7c478bd9Sstevel@tonic-gate static void tl_serializer_refhold(tl_serializer_t *);
879*7c478bd9Sstevel@tonic-gate static void tl_serializer_refrele(tl_serializer_t *);
880*7c478bd9Sstevel@tonic-gate static void tl_serializer_enter(tl_endpt_t *, tlproc_t, mblk_t *);
881*7c478bd9Sstevel@tonic-gate static void tl_serializer_exit(tl_endpt_t *);
882*7c478bd9Sstevel@tonic-gate static boolean_t tl_noclose(tl_endpt_t *);
883*7c478bd9Sstevel@tonic-gate static void tl_closeok(tl_endpt_t *);
884*7c478bd9Sstevel@tonic-gate static void tl_refhold(tl_endpt_t *);
885*7c478bd9Sstevel@tonic-gate static void tl_refrele(tl_endpt_t *);
886*7c478bd9Sstevel@tonic-gate static int tl_hash_cmp_addr(mod_hash_key_t, mod_hash_key_t);
887*7c478bd9Sstevel@tonic-gate static uint_t tl_hash_by_addr(void *, mod_hash_key_t);
888*7c478bd9Sstevel@tonic-gate static void tl_close_ser(mblk_t *, tl_endpt_t *);
889*7c478bd9Sstevel@tonic-gate static void tl_close_finish_ser(mblk_t *, tl_endpt_t *);
890*7c478bd9Sstevel@tonic-gate static void tl_wput_data_ser(mblk_t *, tl_endpt_t *);
891*7c478bd9Sstevel@tonic-gate static void tl_proto_ser(mblk_t *, tl_endpt_t *);
892*7c478bd9Sstevel@tonic-gate static void tl_putq_ser(mblk_t *, tl_endpt_t *);
893*7c478bd9Sstevel@tonic-gate static void tl_wput_common_ser(mblk_t *, tl_endpt_t *);
894*7c478bd9Sstevel@tonic-gate static void tl_wput_ser(mblk_t *, tl_endpt_t *);
895*7c478bd9Sstevel@tonic-gate static void tl_wsrv_ser(mblk_t *, tl_endpt_t *);
896*7c478bd9Sstevel@tonic-gate static void tl_rsrv_ser(mblk_t *, tl_endpt_t *);
897*7c478bd9Sstevel@tonic-gate static void tl_addr_unbind(tl_endpt_t *);
898*7c478bd9Sstevel@tonic-gate 
899*7c478bd9Sstevel@tonic-gate /*
900*7c478bd9Sstevel@tonic-gate  * Intialize option database object for TL
901*7c478bd9Sstevel@tonic-gate  */
902*7c478bd9Sstevel@tonic-gate 
903*7c478bd9Sstevel@tonic-gate optdb_obj_t tl_opt_obj = {
904*7c478bd9Sstevel@tonic-gate 	tl_default_opt,		/* TL default value function pointer */
905*7c478bd9Sstevel@tonic-gate 	tl_get_opt,		/* TL get function pointer */
906*7c478bd9Sstevel@tonic-gate 	tl_set_opt,		/* TL set function pointer */
907*7c478bd9Sstevel@tonic-gate 	B_TRUE,			/* TL is tpi provider */
908*7c478bd9Sstevel@tonic-gate 	TL_OPT_ARR_CNT,		/* TL option database count of entries */
909*7c478bd9Sstevel@tonic-gate 	tl_opt_arr,		/* TL option database */
910*7c478bd9Sstevel@tonic-gate 	TL_VALID_LEVELS_CNT,	/* TL valid level count of entries */
911*7c478bd9Sstevel@tonic-gate 	tl_valid_levels_arr	/* TL valid level array */
912*7c478bd9Sstevel@tonic-gate };
913*7c478bd9Sstevel@tonic-gate 
914*7c478bd9Sstevel@tonic-gate /*
915*7c478bd9Sstevel@tonic-gate  * Logical operations.
916*7c478bd9Sstevel@tonic-gate  *
917*7c478bd9Sstevel@tonic-gate  * IMPLY(X, Y) means that X implies Y i.e. when X is true, Y
918*7c478bd9Sstevel@tonic-gate  * should also be true.
919*7c478bd9Sstevel@tonic-gate  *
920*7c478bd9Sstevel@tonic-gate  * EQUIV(X, Y) is logical equivalence. Both X and Y should be true or falce at
921*7c478bd9Sstevel@tonic-gate  * the same time.
922*7c478bd9Sstevel@tonic-gate  */
923*7c478bd9Sstevel@tonic-gate #define	IMPLY(X, Y)	(!(X) || (Y))
924*7c478bd9Sstevel@tonic-gate #define	EQUIV(X, Y)	(IMPLY(X, Y) && IMPLY(Y, X))
925*7c478bd9Sstevel@tonic-gate 
926*7c478bd9Sstevel@tonic-gate /*
927*7c478bd9Sstevel@tonic-gate  * LOCAL FUNCTIONS AND DRIVER ENTRY POINTS
928*7c478bd9Sstevel@tonic-gate  * ---------------------------------------
929*7c478bd9Sstevel@tonic-gate  */
930*7c478bd9Sstevel@tonic-gate 
931*7c478bd9Sstevel@tonic-gate /*
932*7c478bd9Sstevel@tonic-gate  * Loadable module routines
933*7c478bd9Sstevel@tonic-gate  */
934*7c478bd9Sstevel@tonic-gate int
935*7c478bd9Sstevel@tonic-gate _init(void)
936*7c478bd9Sstevel@tonic-gate {
937*7c478bd9Sstevel@tonic-gate 	return (mod_install(&modlinkage));
938*7c478bd9Sstevel@tonic-gate }
939*7c478bd9Sstevel@tonic-gate 
940*7c478bd9Sstevel@tonic-gate int
941*7c478bd9Sstevel@tonic-gate _fini(void)
942*7c478bd9Sstevel@tonic-gate {
943*7c478bd9Sstevel@tonic-gate 	return (mod_remove(&modlinkage));
944*7c478bd9Sstevel@tonic-gate }
945*7c478bd9Sstevel@tonic-gate 
946*7c478bd9Sstevel@tonic-gate int
947*7c478bd9Sstevel@tonic-gate _info(struct modinfo *modinfop)
948*7c478bd9Sstevel@tonic-gate {
949*7c478bd9Sstevel@tonic-gate 	return (mod_info(&modlinkage, modinfop));
950*7c478bd9Sstevel@tonic-gate }
951*7c478bd9Sstevel@tonic-gate 
952*7c478bd9Sstevel@tonic-gate /*
953*7c478bd9Sstevel@tonic-gate  * Driver Entry Points and Other routines
954*7c478bd9Sstevel@tonic-gate  */
955*7c478bd9Sstevel@tonic-gate static int
956*7c478bd9Sstevel@tonic-gate tl_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
957*7c478bd9Sstevel@tonic-gate {
958*7c478bd9Sstevel@tonic-gate 	int i;
959*7c478bd9Sstevel@tonic-gate 	char name[32];
960*7c478bd9Sstevel@tonic-gate 
961*7c478bd9Sstevel@tonic-gate 	/*
962*7c478bd9Sstevel@tonic-gate 	 * Resume from a checkpoint state.
963*7c478bd9Sstevel@tonic-gate 	 */
964*7c478bd9Sstevel@tonic-gate 	if (cmd == DDI_RESUME)
965*7c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
966*7c478bd9Sstevel@tonic-gate 
967*7c478bd9Sstevel@tonic-gate 	if (cmd != DDI_ATTACH)
968*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
969*7c478bd9Sstevel@tonic-gate 
970*7c478bd9Sstevel@tonic-gate 	/*
971*7c478bd9Sstevel@tonic-gate 	 * Deduce TIDU size to use.  Note: "strmsgsz" being 0 has semantics that
972*7c478bd9Sstevel@tonic-gate 	 * streams message sizes can be unlimited. We use a defined constant
973*7c478bd9Sstevel@tonic-gate 	 * instead.
974*7c478bd9Sstevel@tonic-gate 	 */
975*7c478bd9Sstevel@tonic-gate 	tl_tidusz = strmsgsz != 0 ? (t_scalar_t)strmsgsz : TL_TIDUSZ;
976*7c478bd9Sstevel@tonic-gate 
977*7c478bd9Sstevel@tonic-gate 	/*
978*7c478bd9Sstevel@tonic-gate 	 * Create subdevices for each transport.
979*7c478bd9Sstevel@tonic-gate 	 */
980*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < TL_UNUSED; i++) {
981*7c478bd9Sstevel@tonic-gate 		if (ddi_create_minor_node(devi,
982*7c478bd9Sstevel@tonic-gate 			tl_transports[i].tr_name,
983*7c478bd9Sstevel@tonic-gate 			S_IFCHR, tl_transports[i].tr_minor,
984*7c478bd9Sstevel@tonic-gate 			DDI_PSEUDO, NULL) == DDI_FAILURE) {
985*7c478bd9Sstevel@tonic-gate 			ddi_remove_minor_node(devi, NULL);
986*7c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
987*7c478bd9Sstevel@tonic-gate 		}
988*7c478bd9Sstevel@tonic-gate 	}
989*7c478bd9Sstevel@tonic-gate 
990*7c478bd9Sstevel@tonic-gate 	tl_cache = kmem_cache_create("tl_cache", sizeof (tl_endpt_t),
991*7c478bd9Sstevel@tonic-gate 	    0, tl_constructor, tl_destructor, NULL, NULL, NULL, 0);
992*7c478bd9Sstevel@tonic-gate 
993*7c478bd9Sstevel@tonic-gate 	if (tl_cache == NULL) {
994*7c478bd9Sstevel@tonic-gate 		ddi_remove_minor_node(devi, NULL);
995*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
996*7c478bd9Sstevel@tonic-gate 	}
997*7c478bd9Sstevel@tonic-gate 
998*7c478bd9Sstevel@tonic-gate 	tl_minors = id_space_create("tl_minor_space",
999*7c478bd9Sstevel@tonic-gate 	    TL_MINOR_START, MAXMIN32 - TL_MINOR_START + 1);
1000*7c478bd9Sstevel@tonic-gate 
1001*7c478bd9Sstevel@tonic-gate 	/*
1002*7c478bd9Sstevel@tonic-gate 	 * Create ID space for minor numbers
1003*7c478bd9Sstevel@tonic-gate 	 */
1004*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < TL_MAXTRANSPORT; i++) {
1005*7c478bd9Sstevel@tonic-gate 		tl_transport_state_t *t = &tl_transports[i];
1006*7c478bd9Sstevel@tonic-gate 
1007*7c478bd9Sstevel@tonic-gate 		if (i == TL_UNUSED)
1008*7c478bd9Sstevel@tonic-gate 			continue;
1009*7c478bd9Sstevel@tonic-gate 
1010*7c478bd9Sstevel@tonic-gate 		/* Socket COTSORD shares namespace with COTS */
1011*7c478bd9Sstevel@tonic-gate 		if (i == TL_SOCK_COTSORD) {
1012*7c478bd9Sstevel@tonic-gate 			t->tr_ai_hash =
1013*7c478bd9Sstevel@tonic-gate 			    tl_transports[TL_SOCK_COTS].tr_ai_hash;
1014*7c478bd9Sstevel@tonic-gate 			ASSERT(t->tr_ai_hash != NULL);
1015*7c478bd9Sstevel@tonic-gate 			t->tr_addr_hash =
1016*7c478bd9Sstevel@tonic-gate 			    tl_transports[TL_SOCK_COTS].tr_addr_hash;
1017*7c478bd9Sstevel@tonic-gate 			ASSERT(t->tr_addr_hash != NULL);
1018*7c478bd9Sstevel@tonic-gate 			continue;
1019*7c478bd9Sstevel@tonic-gate 		}
1020*7c478bd9Sstevel@tonic-gate 
1021*7c478bd9Sstevel@tonic-gate 		/*
1022*7c478bd9Sstevel@tonic-gate 		 * Create hash tables.
1023*7c478bd9Sstevel@tonic-gate 		 */
1024*7c478bd9Sstevel@tonic-gate 		(void) snprintf(name, sizeof (name), "%s_ai_hash",
1025*7c478bd9Sstevel@tonic-gate 		    t->tr_name);
1026*7c478bd9Sstevel@tonic-gate #ifdef _ILP32
1027*7c478bd9Sstevel@tonic-gate 		if (i & TL_SOCKET)
1028*7c478bd9Sstevel@tonic-gate 			t->tr_ai_hash =
1029*7c478bd9Sstevel@tonic-gate 			    mod_hash_create_idhash(name, tl_hash_size - 1,
1030*7c478bd9Sstevel@tonic-gate 				mod_hash_null_valdtor);
1031*7c478bd9Sstevel@tonic-gate 		else
1032*7c478bd9Sstevel@tonic-gate 			t->tr_ai_hash =
1033*7c478bd9Sstevel@tonic-gate 			    mod_hash_create_ptrhash(name, tl_hash_size,
1034*7c478bd9Sstevel@tonic-gate 				mod_hash_null_valdtor, sizeof (queue_t));
1035*7c478bd9Sstevel@tonic-gate #else
1036*7c478bd9Sstevel@tonic-gate 		t->tr_ai_hash =
1037*7c478bd9Sstevel@tonic-gate 		    mod_hash_create_idhash(name, tl_hash_size - 1,
1038*7c478bd9Sstevel@tonic-gate 			mod_hash_null_valdtor);
1039*7c478bd9Sstevel@tonic-gate #endif /* _ILP32 */
1040*7c478bd9Sstevel@tonic-gate 
1041*7c478bd9Sstevel@tonic-gate 		if (i & TL_SOCKET) {
1042*7c478bd9Sstevel@tonic-gate 			(void) snprintf(name, sizeof (name), "%s_sockaddr_hash",
1043*7c478bd9Sstevel@tonic-gate 			    t->tr_name);
1044*7c478bd9Sstevel@tonic-gate 			t->tr_addr_hash = mod_hash_create_ptrhash(name,
1045*7c478bd9Sstevel@tonic-gate 			    tl_hash_size, mod_hash_null_valdtor,
1046*7c478bd9Sstevel@tonic-gate 			    sizeof (uintptr_t));
1047*7c478bd9Sstevel@tonic-gate 		} else {
1048*7c478bd9Sstevel@tonic-gate 			(void) snprintf(name, sizeof (name), "%s_addr_hash",
1049*7c478bd9Sstevel@tonic-gate 			    t->tr_name);
1050*7c478bd9Sstevel@tonic-gate 			t->tr_addr_hash = mod_hash_create_extended(name,
1051*7c478bd9Sstevel@tonic-gate 			    tl_hash_size, mod_hash_null_keydtor,
1052*7c478bd9Sstevel@tonic-gate 			    mod_hash_null_valdtor,
1053*7c478bd9Sstevel@tonic-gate 			    tl_hash_by_addr, NULL, tl_hash_cmp_addr, KM_SLEEP);
1054*7c478bd9Sstevel@tonic-gate 		}
1055*7c478bd9Sstevel@tonic-gate 
1056*7c478bd9Sstevel@tonic-gate 		/* Create serializer for connectionless transports. */
1057*7c478bd9Sstevel@tonic-gate 		if (i & TL_TICLTS)
1058*7c478bd9Sstevel@tonic-gate 			t->tr_serializer = tl_serializer_alloc(KM_SLEEP);
1059*7c478bd9Sstevel@tonic-gate 	}
1060*7c478bd9Sstevel@tonic-gate 
1061*7c478bd9Sstevel@tonic-gate 	tl_dip = devi;
1062*7c478bd9Sstevel@tonic-gate 
1063*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
1064*7c478bd9Sstevel@tonic-gate }
1065*7c478bd9Sstevel@tonic-gate 
1066*7c478bd9Sstevel@tonic-gate static int
1067*7c478bd9Sstevel@tonic-gate tl_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1068*7c478bd9Sstevel@tonic-gate {
1069*7c478bd9Sstevel@tonic-gate 	int i;
1070*7c478bd9Sstevel@tonic-gate 
1071*7c478bd9Sstevel@tonic-gate 	if (cmd == DDI_SUSPEND)
1072*7c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
1073*7c478bd9Sstevel@tonic-gate 
1074*7c478bd9Sstevel@tonic-gate 	if (cmd != DDI_DETACH)
1075*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
1076*7c478bd9Sstevel@tonic-gate 
1077*7c478bd9Sstevel@tonic-gate 	/*
1078*7c478bd9Sstevel@tonic-gate 	 * Destroy arenas and hash tables.
1079*7c478bd9Sstevel@tonic-gate 	 */
1080*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < TL_MAXTRANSPORT; i++) {
1081*7c478bd9Sstevel@tonic-gate 		tl_transport_state_t *t = &tl_transports[i];
1082*7c478bd9Sstevel@tonic-gate 
1083*7c478bd9Sstevel@tonic-gate 		if ((i == TL_UNUSED) || (i == TL_SOCK_COTSORD))
1084*7c478bd9Sstevel@tonic-gate 			continue;
1085*7c478bd9Sstevel@tonic-gate 
1086*7c478bd9Sstevel@tonic-gate 		ASSERT(EQUIV(i & TL_TICLTS, t->tr_serializer != NULL));
1087*7c478bd9Sstevel@tonic-gate 		if (t->tr_serializer != NULL) {
1088*7c478bd9Sstevel@tonic-gate 			tl_serializer_refrele(t->tr_serializer);
1089*7c478bd9Sstevel@tonic-gate 			t->tr_serializer = NULL;
1090*7c478bd9Sstevel@tonic-gate 		}
1091*7c478bd9Sstevel@tonic-gate 
1092*7c478bd9Sstevel@tonic-gate #ifdef _ILP32
1093*7c478bd9Sstevel@tonic-gate 		if (i & TL_SOCKET)
1094*7c478bd9Sstevel@tonic-gate 			mod_hash_destroy_idhash(t->tr_ai_hash);
1095*7c478bd9Sstevel@tonic-gate 		else
1096*7c478bd9Sstevel@tonic-gate 			mod_hash_destroy_ptrhash(t->tr_ai_hash);
1097*7c478bd9Sstevel@tonic-gate #else
1098*7c478bd9Sstevel@tonic-gate 		mod_hash_destroy_idhash(t->tr_ai_hash);
1099*7c478bd9Sstevel@tonic-gate #endif /* _ILP32 */
1100*7c478bd9Sstevel@tonic-gate 		t->tr_ai_hash = NULL;
1101*7c478bd9Sstevel@tonic-gate 		if (i & TL_SOCKET)
1102*7c478bd9Sstevel@tonic-gate 			mod_hash_destroy_ptrhash(t->tr_addr_hash);
1103*7c478bd9Sstevel@tonic-gate 		else
1104*7c478bd9Sstevel@tonic-gate 			mod_hash_destroy_hash(t->tr_addr_hash);
1105*7c478bd9Sstevel@tonic-gate 		t->tr_addr_hash = NULL;
1106*7c478bd9Sstevel@tonic-gate 	}
1107*7c478bd9Sstevel@tonic-gate 
1108*7c478bd9Sstevel@tonic-gate 	kmem_cache_destroy(tl_cache);
1109*7c478bd9Sstevel@tonic-gate 	tl_cache = NULL;
1110*7c478bd9Sstevel@tonic-gate 	id_space_destroy(tl_minors);
1111*7c478bd9Sstevel@tonic-gate 	tl_minors = NULL;
1112*7c478bd9Sstevel@tonic-gate 	ddi_remove_minor_node(devi, NULL);
1113*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
1114*7c478bd9Sstevel@tonic-gate }
1115*7c478bd9Sstevel@tonic-gate 
1116*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
1117*7c478bd9Sstevel@tonic-gate static int
1118*7c478bd9Sstevel@tonic-gate tl_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
1119*7c478bd9Sstevel@tonic-gate {
1120*7c478bd9Sstevel@tonic-gate 
1121*7c478bd9Sstevel@tonic-gate 	int retcode = DDI_FAILURE;
1122*7c478bd9Sstevel@tonic-gate 
1123*7c478bd9Sstevel@tonic-gate 	switch (infocmd) {
1124*7c478bd9Sstevel@tonic-gate 
1125*7c478bd9Sstevel@tonic-gate 	case DDI_INFO_DEVT2DEVINFO:
1126*7c478bd9Sstevel@tonic-gate 		if (tl_dip != NULL) {
1127*7c478bd9Sstevel@tonic-gate 			*result = (void *)tl_dip;
1128*7c478bd9Sstevel@tonic-gate 			retcode = DDI_SUCCESS;
1129*7c478bd9Sstevel@tonic-gate 		}
1130*7c478bd9Sstevel@tonic-gate 		break;
1131*7c478bd9Sstevel@tonic-gate 
1132*7c478bd9Sstevel@tonic-gate 	case DDI_INFO_DEVT2INSTANCE:
1133*7c478bd9Sstevel@tonic-gate 		*result = (void *)0;
1134*7c478bd9Sstevel@tonic-gate 		retcode = DDI_SUCCESS;
1135*7c478bd9Sstevel@tonic-gate 		break;
1136*7c478bd9Sstevel@tonic-gate 
1137*7c478bd9Sstevel@tonic-gate 	default:
1138*7c478bd9Sstevel@tonic-gate 		break;
1139*7c478bd9Sstevel@tonic-gate 	}
1140*7c478bd9Sstevel@tonic-gate 	return (retcode);
1141*7c478bd9Sstevel@tonic-gate }
1142*7c478bd9Sstevel@tonic-gate 
1143*7c478bd9Sstevel@tonic-gate /*
1144*7c478bd9Sstevel@tonic-gate  * Endpoint reference management.
1145*7c478bd9Sstevel@tonic-gate  */
1146*7c478bd9Sstevel@tonic-gate static void
1147*7c478bd9Sstevel@tonic-gate tl_refhold(tl_endpt_t *tep)
1148*7c478bd9Sstevel@tonic-gate {
1149*7c478bd9Sstevel@tonic-gate 	atomic_add_32(&tep->te_refcnt, 1);
1150*7c478bd9Sstevel@tonic-gate }
1151*7c478bd9Sstevel@tonic-gate 
1152*7c478bd9Sstevel@tonic-gate static void
1153*7c478bd9Sstevel@tonic-gate tl_refrele(tl_endpt_t *tep)
1154*7c478bd9Sstevel@tonic-gate {
1155*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_refcnt != 0);
1156*7c478bd9Sstevel@tonic-gate 
1157*7c478bd9Sstevel@tonic-gate 	if (atomic_add_32_nv(&tep->te_refcnt, -1) == 0)
1158*7c478bd9Sstevel@tonic-gate 		tl_free(tep);
1159*7c478bd9Sstevel@tonic-gate }
1160*7c478bd9Sstevel@tonic-gate 
1161*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1162*7c478bd9Sstevel@tonic-gate static int
1163*7c478bd9Sstevel@tonic-gate tl_constructor(void *buf, void *cdrarg, int kmflags)
1164*7c478bd9Sstevel@tonic-gate {
1165*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep = buf;
1166*7c478bd9Sstevel@tonic-gate 
1167*7c478bd9Sstevel@tonic-gate 	bzero(tep, sizeof (tl_endpt_t));
1168*7c478bd9Sstevel@tonic-gate 	mutex_init(&tep->te_closelock, NULL, MUTEX_DEFAULT, NULL);
1169*7c478bd9Sstevel@tonic-gate 	cv_init(&tep->te_closecv, NULL, CV_DEFAULT, NULL);
1170*7c478bd9Sstevel@tonic-gate 	mutex_init(&tep->te_srv_lock, NULL, MUTEX_DEFAULT, NULL);
1171*7c478bd9Sstevel@tonic-gate 	cv_init(&tep->te_srv_cv, NULL, CV_DEFAULT, NULL);
1172*7c478bd9Sstevel@tonic-gate 	mutex_init(&tep->te_ser_lock, NULL, MUTEX_DEFAULT, NULL);
1173*7c478bd9Sstevel@tonic-gate 
1174*7c478bd9Sstevel@tonic-gate 	return (0);
1175*7c478bd9Sstevel@tonic-gate }
1176*7c478bd9Sstevel@tonic-gate 
1177*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1178*7c478bd9Sstevel@tonic-gate static void
1179*7c478bd9Sstevel@tonic-gate tl_destructor(void *buf, void *cdrarg)
1180*7c478bd9Sstevel@tonic-gate {
1181*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep = buf;
1182*7c478bd9Sstevel@tonic-gate 
1183*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&tep->te_closelock);
1184*7c478bd9Sstevel@tonic-gate 	cv_destroy(&tep->te_closecv);
1185*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&tep->te_srv_lock);
1186*7c478bd9Sstevel@tonic-gate 	cv_destroy(&tep->te_srv_cv);
1187*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&tep->te_ser_lock);
1188*7c478bd9Sstevel@tonic-gate }
1189*7c478bd9Sstevel@tonic-gate 
1190*7c478bd9Sstevel@tonic-gate static void
1191*7c478bd9Sstevel@tonic-gate tl_free(tl_endpt_t *tep)
1192*7c478bd9Sstevel@tonic-gate {
1193*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_refcnt == 0);
1194*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_transport != NULL);
1195*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_rq == NULL);
1196*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_wq == NULL);
1197*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_ser != NULL);
1198*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_ser_count == 0);
1199*7c478bd9Sstevel@tonic-gate 	ASSERT(! (tep->te_flag & TL_ADDRHASHED));
1200*7c478bd9Sstevel@tonic-gate 
1201*7c478bd9Sstevel@tonic-gate 	if (IS_SOCKET(tep)) {
1202*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_alen == TL_SOUX_ADDRLEN);
1203*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_abuf == &tep->te_uxaddr);
1204*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_vp == (void *)(uintptr_t)tep->te_minor);
1205*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_magic == SOU_MAGIC_IMPLICIT);
1206*7c478bd9Sstevel@tonic-gate 	} else if (tep->te_abuf != NULL) {
1207*7c478bd9Sstevel@tonic-gate 		kmem_free(tep->te_abuf, tep->te_alen);
1208*7c478bd9Sstevel@tonic-gate 		tep->te_alen = -1; /* uninitialized */
1209*7c478bd9Sstevel@tonic-gate 		tep->te_abuf = NULL;
1210*7c478bd9Sstevel@tonic-gate 	} else {
1211*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_alen == -1);
1212*7c478bd9Sstevel@tonic-gate 	}
1213*7c478bd9Sstevel@tonic-gate 
1214*7c478bd9Sstevel@tonic-gate 	id_free(tl_minors, tep->te_minor);
1215*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_credp == NULL);
1216*7c478bd9Sstevel@tonic-gate 
1217*7c478bd9Sstevel@tonic-gate 	if (tep->te_hash_hndl != NULL)
1218*7c478bd9Sstevel@tonic-gate 		mod_hash_cancel(tep->te_addrhash, &tep->te_hash_hndl);
1219*7c478bd9Sstevel@tonic-gate 
1220*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep)) {
1221*7c478bd9Sstevel@tonic-gate 		TL_REMOVE_PEER(tep->te_conp);
1222*7c478bd9Sstevel@tonic-gate 		TL_REMOVE_PEER(tep->te_oconp);
1223*7c478bd9Sstevel@tonic-gate 		tl_serializer_refrele(tep->te_ser);
1224*7c478bd9Sstevel@tonic-gate 		tep->te_ser = NULL;
1225*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_nicon == 0);
1226*7c478bd9Sstevel@tonic-gate 		ASSERT(list_head(&tep->te_iconp) == NULL);
1227*7c478bd9Sstevel@tonic-gate 	} else {
1228*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_lastep == NULL);
1229*7c478bd9Sstevel@tonic-gate 		ASSERT(list_head(&tep->te_flowlist) == NULL);
1230*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_flowq == NULL);
1231*7c478bd9Sstevel@tonic-gate 	}
1232*7c478bd9Sstevel@tonic-gate 
1233*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_bufcid == 0);
1234*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_timoutid == 0);
1235*7c478bd9Sstevel@tonic-gate 	bzero(&tep->te_ap, sizeof (tep->te_ap));
1236*7c478bd9Sstevel@tonic-gate 	tep->te_acceptor_id = 0;
1237*7c478bd9Sstevel@tonic-gate 
1238*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_closewait == 0);
1239*7c478bd9Sstevel@tonic-gate 	ASSERT(!tep->te_rsrv_active);
1240*7c478bd9Sstevel@tonic-gate 	ASSERT(!tep->te_wsrv_active);
1241*7c478bd9Sstevel@tonic-gate 	tep->te_closing = 0;
1242*7c478bd9Sstevel@tonic-gate 	tep->te_nowsrv = B_FALSE;
1243*7c478bd9Sstevel@tonic-gate 	tep->te_flag = 0;
1244*7c478bd9Sstevel@tonic-gate 
1245*7c478bd9Sstevel@tonic-gate 	kmem_cache_free(tl_cache, tep);
1246*7c478bd9Sstevel@tonic-gate }
1247*7c478bd9Sstevel@tonic-gate 
1248*7c478bd9Sstevel@tonic-gate /*
1249*7c478bd9Sstevel@tonic-gate  * Allocate/free reference-counted wrappers for serializers.
1250*7c478bd9Sstevel@tonic-gate  */
1251*7c478bd9Sstevel@tonic-gate static tl_serializer_t *
1252*7c478bd9Sstevel@tonic-gate tl_serializer_alloc(int flags)
1253*7c478bd9Sstevel@tonic-gate {
1254*7c478bd9Sstevel@tonic-gate 	tl_serializer_t *s = kmem_alloc(sizeof (tl_serializer_t), flags);
1255*7c478bd9Sstevel@tonic-gate 	serializer_t *ser;
1256*7c478bd9Sstevel@tonic-gate 
1257*7c478bd9Sstevel@tonic-gate 	if (s == NULL)
1258*7c478bd9Sstevel@tonic-gate 		return (NULL);
1259*7c478bd9Sstevel@tonic-gate 
1260*7c478bd9Sstevel@tonic-gate 	ser = serializer_create(flags);
1261*7c478bd9Sstevel@tonic-gate 
1262*7c478bd9Sstevel@tonic-gate 	if (ser == NULL) {
1263*7c478bd9Sstevel@tonic-gate 		kmem_free(s, sizeof (tl_serializer_t));
1264*7c478bd9Sstevel@tonic-gate 		return (NULL);
1265*7c478bd9Sstevel@tonic-gate 	}
1266*7c478bd9Sstevel@tonic-gate 
1267*7c478bd9Sstevel@tonic-gate 	s->ts_refcnt = 1;
1268*7c478bd9Sstevel@tonic-gate 	s->ts_serializer = ser;
1269*7c478bd9Sstevel@tonic-gate 	return (s);
1270*7c478bd9Sstevel@tonic-gate }
1271*7c478bd9Sstevel@tonic-gate 
1272*7c478bd9Sstevel@tonic-gate static void
1273*7c478bd9Sstevel@tonic-gate tl_serializer_refhold(tl_serializer_t *s)
1274*7c478bd9Sstevel@tonic-gate {
1275*7c478bd9Sstevel@tonic-gate 	atomic_add_32(&s->ts_refcnt, 1);
1276*7c478bd9Sstevel@tonic-gate }
1277*7c478bd9Sstevel@tonic-gate 
1278*7c478bd9Sstevel@tonic-gate static void
1279*7c478bd9Sstevel@tonic-gate tl_serializer_refrele(tl_serializer_t *s)
1280*7c478bd9Sstevel@tonic-gate {
1281*7c478bd9Sstevel@tonic-gate 	if (atomic_add_32_nv(&s->ts_refcnt, -1) == 0) {
1282*7c478bd9Sstevel@tonic-gate 		serializer_destroy(s->ts_serializer);
1283*7c478bd9Sstevel@tonic-gate 		kmem_free(s, sizeof (tl_serializer_t));
1284*7c478bd9Sstevel@tonic-gate 	}
1285*7c478bd9Sstevel@tonic-gate }
1286*7c478bd9Sstevel@tonic-gate 
1287*7c478bd9Sstevel@tonic-gate /*
1288*7c478bd9Sstevel@tonic-gate  * Post a request on the endpoint serializer. For COTS transports keep track of
1289*7c478bd9Sstevel@tonic-gate  * the number of pending requests.
1290*7c478bd9Sstevel@tonic-gate  */
1291*7c478bd9Sstevel@tonic-gate static void
1292*7c478bd9Sstevel@tonic-gate tl_serializer_enter(tl_endpt_t *tep, tlproc_t tlproc, mblk_t *mp)
1293*7c478bd9Sstevel@tonic-gate {
1294*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep)) {
1295*7c478bd9Sstevel@tonic-gate 		mutex_enter(&tep->te_ser_lock);
1296*7c478bd9Sstevel@tonic-gate 		tep->te_ser_count++;
1297*7c478bd9Sstevel@tonic-gate 		mutex_exit(&tep->te_ser_lock);
1298*7c478bd9Sstevel@tonic-gate 	}
1299*7c478bd9Sstevel@tonic-gate 	serializer_enter(tep->te_serializer, (srproc_t *)tlproc, mp, tep);
1300*7c478bd9Sstevel@tonic-gate }
1301*7c478bd9Sstevel@tonic-gate 
1302*7c478bd9Sstevel@tonic-gate /*
1303*7c478bd9Sstevel@tonic-gate  * Complete processing the request on the serializer. Decrement the counter for
1304*7c478bd9Sstevel@tonic-gate  * pending requests for COTS transports.
1305*7c478bd9Sstevel@tonic-gate  */
1306*7c478bd9Sstevel@tonic-gate static void
1307*7c478bd9Sstevel@tonic-gate tl_serializer_exit(tl_endpt_t *tep)
1308*7c478bd9Sstevel@tonic-gate {
1309*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep)) {
1310*7c478bd9Sstevel@tonic-gate 		mutex_enter(&tep->te_ser_lock);
1311*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_ser_count != 0);
1312*7c478bd9Sstevel@tonic-gate 		tep->te_ser_count--;
1313*7c478bd9Sstevel@tonic-gate 		mutex_exit(&tep->te_ser_lock);
1314*7c478bd9Sstevel@tonic-gate 	}
1315*7c478bd9Sstevel@tonic-gate }
1316*7c478bd9Sstevel@tonic-gate 
1317*7c478bd9Sstevel@tonic-gate /*
1318*7c478bd9Sstevel@tonic-gate  * Hash management functions.
1319*7c478bd9Sstevel@tonic-gate  */
1320*7c478bd9Sstevel@tonic-gate 
1321*7c478bd9Sstevel@tonic-gate /*
1322*7c478bd9Sstevel@tonic-gate  * Return TRUE if two addresses are equal, false otherwise.
1323*7c478bd9Sstevel@tonic-gate  */
1324*7c478bd9Sstevel@tonic-gate static boolean_t
1325*7c478bd9Sstevel@tonic-gate tl_eqaddr(tl_addr_t *ap1, tl_addr_t *ap2)
1326*7c478bd9Sstevel@tonic-gate {
1327*7c478bd9Sstevel@tonic-gate 	return ((ap1->ta_alen > 0) &&
1328*7c478bd9Sstevel@tonic-gate 	    (ap1->ta_alen == ap2->ta_alen) &&
1329*7c478bd9Sstevel@tonic-gate 	    (ap1->ta_zoneid == ap2->ta_zoneid) &&
1330*7c478bd9Sstevel@tonic-gate 	    (bcmp(ap1->ta_abuf, ap2->ta_abuf, ap1->ta_alen) == 0));
1331*7c478bd9Sstevel@tonic-gate }
1332*7c478bd9Sstevel@tonic-gate 
1333*7c478bd9Sstevel@tonic-gate /*
1334*7c478bd9Sstevel@tonic-gate  * This function is called whenever an endpoint is found in the hash table.
1335*7c478bd9Sstevel@tonic-gate  */
1336*7c478bd9Sstevel@tonic-gate /* ARGSUSED0 */
1337*7c478bd9Sstevel@tonic-gate static void
1338*7c478bd9Sstevel@tonic-gate tl_find_callback(mod_hash_key_t key, mod_hash_val_t val)
1339*7c478bd9Sstevel@tonic-gate {
1340*7c478bd9Sstevel@tonic-gate 	tl_refhold((tl_endpt_t *)val);
1341*7c478bd9Sstevel@tonic-gate }
1342*7c478bd9Sstevel@tonic-gate 
1343*7c478bd9Sstevel@tonic-gate /*
1344*7c478bd9Sstevel@tonic-gate  * Address hash function.
1345*7c478bd9Sstevel@tonic-gate  */
1346*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
1347*7c478bd9Sstevel@tonic-gate static uint_t
1348*7c478bd9Sstevel@tonic-gate tl_hash_by_addr(void *hash_data, mod_hash_key_t key)
1349*7c478bd9Sstevel@tonic-gate {
1350*7c478bd9Sstevel@tonic-gate 	tl_addr_t *ap = (tl_addr_t *)key;
1351*7c478bd9Sstevel@tonic-gate 	size_t	len = ap->ta_alen;
1352*7c478bd9Sstevel@tonic-gate 	uchar_t *p = ap->ta_abuf;
1353*7c478bd9Sstevel@tonic-gate 	uint_t i, g;
1354*7c478bd9Sstevel@tonic-gate 
1355*7c478bd9Sstevel@tonic-gate 	ASSERT((len > 0) && (p != NULL));
1356*7c478bd9Sstevel@tonic-gate 
1357*7c478bd9Sstevel@tonic-gate 	for (i = ap->ta_zoneid; len -- != 0; p++) {
1358*7c478bd9Sstevel@tonic-gate 		i = (i << 4) + (*p);
1359*7c478bd9Sstevel@tonic-gate 		if ((g = (i & 0xf0000000U)) != 0) {
1360*7c478bd9Sstevel@tonic-gate 			i ^= (g >> 24);
1361*7c478bd9Sstevel@tonic-gate 			i ^= g;
1362*7c478bd9Sstevel@tonic-gate 		}
1363*7c478bd9Sstevel@tonic-gate 	}
1364*7c478bd9Sstevel@tonic-gate 	return (i);
1365*7c478bd9Sstevel@tonic-gate }
1366*7c478bd9Sstevel@tonic-gate 
1367*7c478bd9Sstevel@tonic-gate /*
1368*7c478bd9Sstevel@tonic-gate  * This function is used by hash lookups. It compares two generic addresses.
1369*7c478bd9Sstevel@tonic-gate  */
1370*7c478bd9Sstevel@tonic-gate static int
1371*7c478bd9Sstevel@tonic-gate tl_hash_cmp_addr(mod_hash_key_t key1, mod_hash_key_t key2)
1372*7c478bd9Sstevel@tonic-gate {
1373*7c478bd9Sstevel@tonic-gate #ifdef 	DEBUG
1374*7c478bd9Sstevel@tonic-gate 	tl_addr_t *ap1 = (tl_addr_t *)key1;
1375*7c478bd9Sstevel@tonic-gate 	tl_addr_t *ap2 = (tl_addr_t *)key2;
1376*7c478bd9Sstevel@tonic-gate 
1377*7c478bd9Sstevel@tonic-gate 	ASSERT(key1 != NULL);
1378*7c478bd9Sstevel@tonic-gate 	ASSERT(key2 != NULL);
1379*7c478bd9Sstevel@tonic-gate 
1380*7c478bd9Sstevel@tonic-gate 	ASSERT(ap1->ta_abuf != NULL);
1381*7c478bd9Sstevel@tonic-gate 	ASSERT(ap2->ta_abuf != NULL);
1382*7c478bd9Sstevel@tonic-gate 	ASSERT(ap1->ta_alen > 0);
1383*7c478bd9Sstevel@tonic-gate 	ASSERT(ap2->ta_alen > 0);
1384*7c478bd9Sstevel@tonic-gate #endif
1385*7c478bd9Sstevel@tonic-gate 
1386*7c478bd9Sstevel@tonic-gate 	return (! tl_eqaddr((tl_addr_t *)key1, (tl_addr_t *)key2));
1387*7c478bd9Sstevel@tonic-gate }
1388*7c478bd9Sstevel@tonic-gate 
1389*7c478bd9Sstevel@tonic-gate /*
1390*7c478bd9Sstevel@tonic-gate  * Prevent endpoint from closing if possible.
1391*7c478bd9Sstevel@tonic-gate  * Return B_TRUE on success, B_FALSE on failure.
1392*7c478bd9Sstevel@tonic-gate  */
1393*7c478bd9Sstevel@tonic-gate static boolean_t
1394*7c478bd9Sstevel@tonic-gate tl_noclose(tl_endpt_t *tep)
1395*7c478bd9Sstevel@tonic-gate {
1396*7c478bd9Sstevel@tonic-gate 	boolean_t rc = B_FALSE;
1397*7c478bd9Sstevel@tonic-gate 
1398*7c478bd9Sstevel@tonic-gate 	mutex_enter(&tep->te_closelock);
1399*7c478bd9Sstevel@tonic-gate 	if (! tep->te_closing) {
1400*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_closewait == 0);
1401*7c478bd9Sstevel@tonic-gate 		tep->te_closewait++;
1402*7c478bd9Sstevel@tonic-gate 		rc = B_TRUE;
1403*7c478bd9Sstevel@tonic-gate 	}
1404*7c478bd9Sstevel@tonic-gate 	mutex_exit(&tep->te_closelock);
1405*7c478bd9Sstevel@tonic-gate 	return (rc);
1406*7c478bd9Sstevel@tonic-gate }
1407*7c478bd9Sstevel@tonic-gate 
1408*7c478bd9Sstevel@tonic-gate /*
1409*7c478bd9Sstevel@tonic-gate  * Allow endpoint to close if needed.
1410*7c478bd9Sstevel@tonic-gate  */
1411*7c478bd9Sstevel@tonic-gate static void
1412*7c478bd9Sstevel@tonic-gate tl_closeok(tl_endpt_t *tep)
1413*7c478bd9Sstevel@tonic-gate {
1414*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_closewait > 0);
1415*7c478bd9Sstevel@tonic-gate 	mutex_enter(&tep->te_closelock);
1416*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_closewait == 1);
1417*7c478bd9Sstevel@tonic-gate 	tep->te_closewait--;
1418*7c478bd9Sstevel@tonic-gate 	cv_signal(&tep->te_closecv);
1419*7c478bd9Sstevel@tonic-gate 	mutex_exit(&tep->te_closelock);
1420*7c478bd9Sstevel@tonic-gate }
1421*7c478bd9Sstevel@tonic-gate 
1422*7c478bd9Sstevel@tonic-gate /*
1423*7c478bd9Sstevel@tonic-gate  * STREAMS open entry point.
1424*7c478bd9Sstevel@tonic-gate  */
1425*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
1426*7c478bd9Sstevel@tonic-gate static int
1427*7c478bd9Sstevel@tonic-gate tl_open(queue_t	*rq, dev_t *devp, int oflag, int sflag,	cred_t	*credp)
1428*7c478bd9Sstevel@tonic-gate {
1429*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep;
1430*7c478bd9Sstevel@tonic-gate 	minor_t	    minor = getminor(*devp);
1431*7c478bd9Sstevel@tonic-gate 
1432*7c478bd9Sstevel@tonic-gate 	/*
1433*7c478bd9Sstevel@tonic-gate 	 * Driver is called directly. Both CLONEOPEN and MODOPEN
1434*7c478bd9Sstevel@tonic-gate 	 * are illegal
1435*7c478bd9Sstevel@tonic-gate 	 */
1436*7c478bd9Sstevel@tonic-gate 	if ((sflag == CLONEOPEN) || (sflag == MODOPEN))
1437*7c478bd9Sstevel@tonic-gate 		return (ENXIO);
1438*7c478bd9Sstevel@tonic-gate 
1439*7c478bd9Sstevel@tonic-gate 	if (rq->q_ptr != NULL)
1440*7c478bd9Sstevel@tonic-gate 		return (0);
1441*7c478bd9Sstevel@tonic-gate 
1442*7c478bd9Sstevel@tonic-gate 	/* Minor number should specify the mode used for the driver. */
1443*7c478bd9Sstevel@tonic-gate 	if ((minor >= TL_UNUSED))
1444*7c478bd9Sstevel@tonic-gate 		return (ENXIO);
1445*7c478bd9Sstevel@tonic-gate 
1446*7c478bd9Sstevel@tonic-gate 	if (oflag & SO_SOCKSTR) {
1447*7c478bd9Sstevel@tonic-gate 		minor |= TL_SOCKET;
1448*7c478bd9Sstevel@tonic-gate 	}
1449*7c478bd9Sstevel@tonic-gate 
1450*7c478bd9Sstevel@tonic-gate 	tep = kmem_cache_alloc(tl_cache, KM_SLEEP);
1451*7c478bd9Sstevel@tonic-gate 	tep->te_refcnt = 1;
1452*7c478bd9Sstevel@tonic-gate 	tep->te_cpid = curproc->p_pid;
1453*7c478bd9Sstevel@tonic-gate 	rq->q_ptr = WR(rq)->q_ptr = tep;
1454*7c478bd9Sstevel@tonic-gate 	tep->te_state = TS_UNBND;
1455*7c478bd9Sstevel@tonic-gate 	tep->te_credp = credp;
1456*7c478bd9Sstevel@tonic-gate 	crhold(credp);
1457*7c478bd9Sstevel@tonic-gate 	tep->te_zoneid = getzoneid();
1458*7c478bd9Sstevel@tonic-gate 
1459*7c478bd9Sstevel@tonic-gate 	tep->te_flag = minor & TL_MINOR_MASK;
1460*7c478bd9Sstevel@tonic-gate 	tep->te_transport = &tl_transports[minor];
1461*7c478bd9Sstevel@tonic-gate 
1462*7c478bd9Sstevel@tonic-gate 	/* Allocate a unique minor number for this instance. */
1463*7c478bd9Sstevel@tonic-gate 	tep->te_minor = (minor_t)id_alloc(tl_minors);
1464*7c478bd9Sstevel@tonic-gate 
1465*7c478bd9Sstevel@tonic-gate 	/* Reserve hash handle for bind(). */
1466*7c478bd9Sstevel@tonic-gate 	(void) mod_hash_reserve(tep->te_addrhash, &tep->te_hash_hndl);
1467*7c478bd9Sstevel@tonic-gate 
1468*7c478bd9Sstevel@tonic-gate 	/* Transport-specific initialization */
1469*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep)) {
1470*7c478bd9Sstevel@tonic-gate 		/* Use private serializer */
1471*7c478bd9Sstevel@tonic-gate 		tep->te_ser = tl_serializer_alloc(KM_SLEEP);
1472*7c478bd9Sstevel@tonic-gate 
1473*7c478bd9Sstevel@tonic-gate 		/* Create list for pending connections */
1474*7c478bd9Sstevel@tonic-gate 		list_create(&tep->te_iconp, sizeof (tl_icon_t),
1475*7c478bd9Sstevel@tonic-gate 		    offsetof(tl_icon_t, ti_node));
1476*7c478bd9Sstevel@tonic-gate 		tep->te_qlen = 0;
1477*7c478bd9Sstevel@tonic-gate 		tep->te_nicon = 0;
1478*7c478bd9Sstevel@tonic-gate 		tep->te_oconp = NULL;
1479*7c478bd9Sstevel@tonic-gate 		tep->te_conp = NULL;
1480*7c478bd9Sstevel@tonic-gate 	} else {
1481*7c478bd9Sstevel@tonic-gate 		/* Use shared serializer */
1482*7c478bd9Sstevel@tonic-gate 		tep->te_ser = tep->te_transport->tr_serializer;
1483*7c478bd9Sstevel@tonic-gate 		bzero(&tep->te_flows, sizeof (list_node_t));
1484*7c478bd9Sstevel@tonic-gate 		/* Create list for flow control */
1485*7c478bd9Sstevel@tonic-gate 		list_create(&tep->te_flowlist, sizeof (tl_endpt_t),
1486*7c478bd9Sstevel@tonic-gate 		    offsetof(tl_endpt_t, te_flows));
1487*7c478bd9Sstevel@tonic-gate 		tep->te_flowq = NULL;
1488*7c478bd9Sstevel@tonic-gate 		tep->te_lastep = NULL;
1489*7c478bd9Sstevel@tonic-gate 
1490*7c478bd9Sstevel@tonic-gate 	}
1491*7c478bd9Sstevel@tonic-gate 
1492*7c478bd9Sstevel@tonic-gate 	/* Initialize endpoint address */
1493*7c478bd9Sstevel@tonic-gate 	if (IS_SOCKET(tep)) {
1494*7c478bd9Sstevel@tonic-gate 		/* Socket-specific address handling. */
1495*7c478bd9Sstevel@tonic-gate 		tep->te_alen = TL_SOUX_ADDRLEN;
1496*7c478bd9Sstevel@tonic-gate 		tep->te_abuf = &tep->te_uxaddr;
1497*7c478bd9Sstevel@tonic-gate 		tep->te_vp = (void *)(uintptr_t)tep->te_minor;
1498*7c478bd9Sstevel@tonic-gate 		tep->te_magic = SOU_MAGIC_IMPLICIT;
1499*7c478bd9Sstevel@tonic-gate 	} else {
1500*7c478bd9Sstevel@tonic-gate 		tep->te_alen = -1;
1501*7c478bd9Sstevel@tonic-gate 		tep->te_abuf = NULL;
1502*7c478bd9Sstevel@tonic-gate 	}
1503*7c478bd9Sstevel@tonic-gate 
1504*7c478bd9Sstevel@tonic-gate 	/* clone the driver */
1505*7c478bd9Sstevel@tonic-gate 	*devp = makedevice(getmajor(*devp), tep->te_minor);
1506*7c478bd9Sstevel@tonic-gate 
1507*7c478bd9Sstevel@tonic-gate 	tep->te_rq = rq;
1508*7c478bd9Sstevel@tonic-gate 	tep->te_wq = WR(rq);
1509*7c478bd9Sstevel@tonic-gate 
1510*7c478bd9Sstevel@tonic-gate #ifdef	_ILP32
1511*7c478bd9Sstevel@tonic-gate 	if (IS_SOCKET(tep))
1512*7c478bd9Sstevel@tonic-gate 		tep->te_acceptor_id = tep->te_minor;
1513*7c478bd9Sstevel@tonic-gate 	else
1514*7c478bd9Sstevel@tonic-gate 		tep->te_acceptor_id = (t_uscalar_t)rq;
1515*7c478bd9Sstevel@tonic-gate #else
1516*7c478bd9Sstevel@tonic-gate 	tep->te_acceptor_id = tep->te_minor;
1517*7c478bd9Sstevel@tonic-gate #endif	/* _ILP32 */
1518*7c478bd9Sstevel@tonic-gate 
1519*7c478bd9Sstevel@tonic-gate 
1520*7c478bd9Sstevel@tonic-gate 	qprocson(rq);
1521*7c478bd9Sstevel@tonic-gate 
1522*7c478bd9Sstevel@tonic-gate 	/*
1523*7c478bd9Sstevel@tonic-gate 	 * Insert acceptor ID in the hash. The AI hash always sleeps on
1524*7c478bd9Sstevel@tonic-gate 	 * insertion so insertion can't fail.
1525*7c478bd9Sstevel@tonic-gate 	 */
1526*7c478bd9Sstevel@tonic-gate 	(void) mod_hash_insert(tep->te_transport->tr_ai_hash,
1527*7c478bd9Sstevel@tonic-gate 	    (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id,
1528*7c478bd9Sstevel@tonic-gate 	    (mod_hash_val_t)tep);
1529*7c478bd9Sstevel@tonic-gate 
1530*7c478bd9Sstevel@tonic-gate 	return (0);
1531*7c478bd9Sstevel@tonic-gate }
1532*7c478bd9Sstevel@tonic-gate 
1533*7c478bd9Sstevel@tonic-gate /* ARGSUSED1 */
1534*7c478bd9Sstevel@tonic-gate static int
1535*7c478bd9Sstevel@tonic-gate tl_close(queue_t *rq, int flag,	cred_t *credp)
1536*7c478bd9Sstevel@tonic-gate {
1537*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr;
1538*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *elp = NULL;
1539*7c478bd9Sstevel@tonic-gate 	queue_t *wq = tep->te_wq;
1540*7c478bd9Sstevel@tonic-gate 	int rc;
1541*7c478bd9Sstevel@tonic-gate 
1542*7c478bd9Sstevel@tonic-gate 	ASSERT(wq == WR(rq));
1543*7c478bd9Sstevel@tonic-gate 
1544*7c478bd9Sstevel@tonic-gate 	/*
1545*7c478bd9Sstevel@tonic-gate 	 * Remove the endpoint from acceptor hash.
1546*7c478bd9Sstevel@tonic-gate 	 */
1547*7c478bd9Sstevel@tonic-gate 	rc = mod_hash_remove(tep->te_transport->tr_ai_hash,
1548*7c478bd9Sstevel@tonic-gate 	    (mod_hash_key_t)(uintptr_t)tep->te_acceptor_id,
1549*7c478bd9Sstevel@tonic-gate 	    (mod_hash_val_t *)&elp);
1550*7c478bd9Sstevel@tonic-gate 	ASSERT(rc == 0 && tep == elp);
1551*7c478bd9Sstevel@tonic-gate 	if ((rc != 0) || (tep != elp)) {
1552*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
1553*7c478bd9Sstevel@tonic-gate 			    SL_TRACE|SL_ERROR,
1554*7c478bd9Sstevel@tonic-gate 			    "tl_close:inconsistency in AI hash"));
1555*7c478bd9Sstevel@tonic-gate 	}
1556*7c478bd9Sstevel@tonic-gate 
1557*7c478bd9Sstevel@tonic-gate 	/*
1558*7c478bd9Sstevel@tonic-gate 	 * Wait till close is safe, then mark endpoint as closing.
1559*7c478bd9Sstevel@tonic-gate 	 */
1560*7c478bd9Sstevel@tonic-gate 	mutex_enter(&tep->te_closelock);
1561*7c478bd9Sstevel@tonic-gate 	while (tep->te_closewait)
1562*7c478bd9Sstevel@tonic-gate 		cv_wait(&tep->te_closecv, &tep->te_closelock);
1563*7c478bd9Sstevel@tonic-gate 	tep->te_closing = B_TRUE;
1564*7c478bd9Sstevel@tonic-gate 	/*
1565*7c478bd9Sstevel@tonic-gate 	 * Will wait for the serializer part of the close to finish, so set
1566*7c478bd9Sstevel@tonic-gate 	 * te_closewait now.
1567*7c478bd9Sstevel@tonic-gate 	 */
1568*7c478bd9Sstevel@tonic-gate 	tep->te_closewait = 1;
1569*7c478bd9Sstevel@tonic-gate 	tep->te_nowsrv = B_FALSE;
1570*7c478bd9Sstevel@tonic-gate 	mutex_exit(&tep->te_closelock);
1571*7c478bd9Sstevel@tonic-gate 
1572*7c478bd9Sstevel@tonic-gate 	/*
1573*7c478bd9Sstevel@tonic-gate 	 * tl_close_ser doesn't drop reference, so no need to tl_refhold.
1574*7c478bd9Sstevel@tonic-gate 	 * It is safe because close will wait for tl_close_ser to finish.
1575*7c478bd9Sstevel@tonic-gate 	 */
1576*7c478bd9Sstevel@tonic-gate 	tl_serializer_enter(tep, tl_close_ser, &tep->te_closemp);
1577*7c478bd9Sstevel@tonic-gate 
1578*7c478bd9Sstevel@tonic-gate 	/*
1579*7c478bd9Sstevel@tonic-gate 	 * Wait for the first phase of close to complete before qprocsoff().
1580*7c478bd9Sstevel@tonic-gate 	 */
1581*7c478bd9Sstevel@tonic-gate 	mutex_enter(&tep->te_closelock);
1582*7c478bd9Sstevel@tonic-gate 	while (tep->te_closewait)
1583*7c478bd9Sstevel@tonic-gate 		cv_wait(&tep->te_closecv, &tep->te_closelock);
1584*7c478bd9Sstevel@tonic-gate 	mutex_exit(&tep->te_closelock);
1585*7c478bd9Sstevel@tonic-gate 
1586*7c478bd9Sstevel@tonic-gate 	qprocsoff(rq);
1587*7c478bd9Sstevel@tonic-gate 
1588*7c478bd9Sstevel@tonic-gate 	if (tep->te_bufcid) {
1589*7c478bd9Sstevel@tonic-gate 		qunbufcall(rq, tep->te_bufcid);
1590*7c478bd9Sstevel@tonic-gate 		tep->te_bufcid = 0;
1591*7c478bd9Sstevel@tonic-gate 	}
1592*7c478bd9Sstevel@tonic-gate 	if (tep->te_timoutid) {
1593*7c478bd9Sstevel@tonic-gate 		(void) quntimeout(rq, tep->te_timoutid);
1594*7c478bd9Sstevel@tonic-gate 		tep->te_timoutid = 0;
1595*7c478bd9Sstevel@tonic-gate 	}
1596*7c478bd9Sstevel@tonic-gate 
1597*7c478bd9Sstevel@tonic-gate 	/*
1598*7c478bd9Sstevel@tonic-gate 	 * Finish close behind serializer.
1599*7c478bd9Sstevel@tonic-gate 	 *
1600*7c478bd9Sstevel@tonic-gate 	 * For a CLTS endpoint increase a refcount and continue close processing
1601*7c478bd9Sstevel@tonic-gate 	 * with serializer protection. This processing may happen asynchronously
1602*7c478bd9Sstevel@tonic-gate 	 * with the completion of tl_close().
1603*7c478bd9Sstevel@tonic-gate 	 *
1604*7c478bd9Sstevel@tonic-gate 	 * Fot a COTS endpoint wait before destroying tep since the serializer
1605*7c478bd9Sstevel@tonic-gate 	 * may go away together with tep and we need to destroy serializer
1606*7c478bd9Sstevel@tonic-gate 	 * outside of serializer context.
1607*7c478bd9Sstevel@tonic-gate 	 */
1608*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_closewait == 0);
1609*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep))
1610*7c478bd9Sstevel@tonic-gate 		tep->te_closewait = 1;
1611*7c478bd9Sstevel@tonic-gate 	else
1612*7c478bd9Sstevel@tonic-gate 		tl_refhold(tep);
1613*7c478bd9Sstevel@tonic-gate 
1614*7c478bd9Sstevel@tonic-gate 	tl_serializer_enter(tep, tl_close_finish_ser, &tep->te_closemp);
1615*7c478bd9Sstevel@tonic-gate 
1616*7c478bd9Sstevel@tonic-gate 	/*
1617*7c478bd9Sstevel@tonic-gate 	 * For connection-oriented transports wait for all serializer activity
1618*7c478bd9Sstevel@tonic-gate 	 * to settle down.
1619*7c478bd9Sstevel@tonic-gate 	 */
1620*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep)) {
1621*7c478bd9Sstevel@tonic-gate 		mutex_enter(&tep->te_closelock);
1622*7c478bd9Sstevel@tonic-gate 		while (tep->te_closewait)
1623*7c478bd9Sstevel@tonic-gate 			cv_wait(&tep->te_closecv, &tep->te_closelock);
1624*7c478bd9Sstevel@tonic-gate 		mutex_exit(&tep->te_closelock);
1625*7c478bd9Sstevel@tonic-gate 	}
1626*7c478bd9Sstevel@tonic-gate 
1627*7c478bd9Sstevel@tonic-gate 	crfree(tep->te_credp);
1628*7c478bd9Sstevel@tonic-gate 	tep->te_credp = NULL;
1629*7c478bd9Sstevel@tonic-gate 	tep->te_wq = NULL;
1630*7c478bd9Sstevel@tonic-gate 	tl_refrele(tep);
1631*7c478bd9Sstevel@tonic-gate 	/*
1632*7c478bd9Sstevel@tonic-gate 	 * tep is likely to be destroyed now, so can't reference it any more.
1633*7c478bd9Sstevel@tonic-gate 	 */
1634*7c478bd9Sstevel@tonic-gate 
1635*7c478bd9Sstevel@tonic-gate 	rq->q_ptr = wq->q_ptr = NULL;
1636*7c478bd9Sstevel@tonic-gate 	return (0);
1637*7c478bd9Sstevel@tonic-gate }
1638*7c478bd9Sstevel@tonic-gate 
1639*7c478bd9Sstevel@tonic-gate /*
1640*7c478bd9Sstevel@tonic-gate  * First phase of close processing done behind the serializer.
1641*7c478bd9Sstevel@tonic-gate  *
1642*7c478bd9Sstevel@tonic-gate  * Do not drop the reference in the end - tl_close() wants this reference to
1643*7c478bd9Sstevel@tonic-gate  * stay.
1644*7c478bd9Sstevel@tonic-gate  */
1645*7c478bd9Sstevel@tonic-gate /* ARGSUSED0 */
1646*7c478bd9Sstevel@tonic-gate static void
1647*7c478bd9Sstevel@tonic-gate tl_close_ser(mblk_t *mp, tl_endpt_t *tep)
1648*7c478bd9Sstevel@tonic-gate {
1649*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_closing);
1650*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_closewait == 1);
1651*7c478bd9Sstevel@tonic-gate 	ASSERT(!(tep->te_flag & TL_CLOSE_SER));
1652*7c478bd9Sstevel@tonic-gate 
1653*7c478bd9Sstevel@tonic-gate 	tep->te_flag |= TL_CLOSE_SER;
1654*7c478bd9Sstevel@tonic-gate 
1655*7c478bd9Sstevel@tonic-gate 	/*
1656*7c478bd9Sstevel@tonic-gate 	 * Drain out all messages on queue except for TL_TICOTS where the
1657*7c478bd9Sstevel@tonic-gate 	 * abortive release semantics permit discarding of data on close
1658*7c478bd9Sstevel@tonic-gate 	 */
1659*7c478bd9Sstevel@tonic-gate 	if (tep->te_wq->q_first && (IS_CLTS(tep) || IS_COTSORD(tep))) {
1660*7c478bd9Sstevel@tonic-gate 		tl_wsrv_ser(NULL, tep);
1661*7c478bd9Sstevel@tonic-gate 	}
1662*7c478bd9Sstevel@tonic-gate 
1663*7c478bd9Sstevel@tonic-gate 	/* Remove address from hash table. */
1664*7c478bd9Sstevel@tonic-gate 	tl_addr_unbind(tep);
1665*7c478bd9Sstevel@tonic-gate 	/*
1666*7c478bd9Sstevel@tonic-gate 	 * qprocsoff() gets confused when q->q_next is not NULL on the write
1667*7c478bd9Sstevel@tonic-gate 	 * queue of the driver, so clear these before qprocsoff() is called.
1668*7c478bd9Sstevel@tonic-gate 	 * Also clear q_next for the peer since this queue is going away.
1669*7c478bd9Sstevel@tonic-gate 	 */
1670*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep) && !IS_SOCKET(tep)) {
1671*7c478bd9Sstevel@tonic-gate 		tl_endpt_t *peer_tep = tep->te_conp;
1672*7c478bd9Sstevel@tonic-gate 
1673*7c478bd9Sstevel@tonic-gate 		tep->te_wq->q_next = NULL;
1674*7c478bd9Sstevel@tonic-gate 		if ((peer_tep != NULL) && !peer_tep->te_closing)
1675*7c478bd9Sstevel@tonic-gate 			peer_tep->te_wq->q_next = NULL;
1676*7c478bd9Sstevel@tonic-gate 	}
1677*7c478bd9Sstevel@tonic-gate 
1678*7c478bd9Sstevel@tonic-gate 	tep->te_rq = NULL;
1679*7c478bd9Sstevel@tonic-gate 
1680*7c478bd9Sstevel@tonic-gate 	/* wake up tl_close() */
1681*7c478bd9Sstevel@tonic-gate 	tl_closeok(tep);
1682*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
1683*7c478bd9Sstevel@tonic-gate }
1684*7c478bd9Sstevel@tonic-gate 
1685*7c478bd9Sstevel@tonic-gate /*
1686*7c478bd9Sstevel@tonic-gate  * Second phase of tl_close(). Should wakeup tl_close() for COTS mode and drop
1687*7c478bd9Sstevel@tonic-gate  * the reference for CLTS.
1688*7c478bd9Sstevel@tonic-gate  *
1689*7c478bd9Sstevel@tonic-gate  * Called from serializer. Should drop reference count for CLTS only.
1690*7c478bd9Sstevel@tonic-gate  */
1691*7c478bd9Sstevel@tonic-gate /* ARGSUSED0 */
1692*7c478bd9Sstevel@tonic-gate static void
1693*7c478bd9Sstevel@tonic-gate tl_close_finish_ser(mblk_t *mp, tl_endpt_t *tep)
1694*7c478bd9Sstevel@tonic-gate {
1695*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_closing);
1696*7c478bd9Sstevel@tonic-gate 	ASSERT(IMPLY(IS_CLTS(tep), tep->te_closewait == 0));
1697*7c478bd9Sstevel@tonic-gate 	ASSERT(IMPLY(IS_COTS(tep), tep->te_closewait == 1));
1698*7c478bd9Sstevel@tonic-gate 
1699*7c478bd9Sstevel@tonic-gate 	tep->te_state = -1;	/* Uninitialized */
1700*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep)) {
1701*7c478bd9Sstevel@tonic-gate 		tl_co_unconnect(tep);
1702*7c478bd9Sstevel@tonic-gate 	} else {
1703*7c478bd9Sstevel@tonic-gate 		/* Connectionless specific cleanup */
1704*7c478bd9Sstevel@tonic-gate 		TL_REMOVE_PEER(tep->te_lastep);
1705*7c478bd9Sstevel@tonic-gate 		/*
1706*7c478bd9Sstevel@tonic-gate 		 * Backenable anybody that is flow controlled waiting for
1707*7c478bd9Sstevel@tonic-gate 		 * this endpoint.
1708*7c478bd9Sstevel@tonic-gate 		 */
1709*7c478bd9Sstevel@tonic-gate 		tl_cl_backenable(tep);
1710*7c478bd9Sstevel@tonic-gate 		if (tep->te_flowq != NULL) {
1711*7c478bd9Sstevel@tonic-gate 			list_remove(&(tep->te_flowq->te_flowlist), tep);
1712*7c478bd9Sstevel@tonic-gate 			tep->te_flowq = NULL;
1713*7c478bd9Sstevel@tonic-gate 		}
1714*7c478bd9Sstevel@tonic-gate 	}
1715*7c478bd9Sstevel@tonic-gate 
1716*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
1717*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep))
1718*7c478bd9Sstevel@tonic-gate 		tl_closeok(tep);
1719*7c478bd9Sstevel@tonic-gate 	else
1720*7c478bd9Sstevel@tonic-gate 		tl_refrele(tep);
1721*7c478bd9Sstevel@tonic-gate }
1722*7c478bd9Sstevel@tonic-gate 
1723*7c478bd9Sstevel@tonic-gate /*
1724*7c478bd9Sstevel@tonic-gate  * STREAMS write-side put procedure.
1725*7c478bd9Sstevel@tonic-gate  * Enter serializer for most of the processing.
1726*7c478bd9Sstevel@tonic-gate  *
1727*7c478bd9Sstevel@tonic-gate  * The T_CONN_REQ is processed outside of serializer.
1728*7c478bd9Sstevel@tonic-gate  */
1729*7c478bd9Sstevel@tonic-gate static void
1730*7c478bd9Sstevel@tonic-gate tl_wput(queue_t *wq, mblk_t *mp)
1731*7c478bd9Sstevel@tonic-gate {
1732*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*tep = (tl_endpt_t *)wq->q_ptr;
1733*7c478bd9Sstevel@tonic-gate 	ssize_t			msz = MBLKL(mp);
1734*7c478bd9Sstevel@tonic-gate 	union T_primitives	*prim = (union T_primitives *)mp->b_rptr;
1735*7c478bd9Sstevel@tonic-gate 	tlproc_t		*tl_proc = NULL;
1736*7c478bd9Sstevel@tonic-gate 
1737*7c478bd9Sstevel@tonic-gate 	switch (DB_TYPE(mp)) {
1738*7c478bd9Sstevel@tonic-gate 	case M_DATA:
1739*7c478bd9Sstevel@tonic-gate 		/* Only valid for connection-oriented transports */
1740*7c478bd9Sstevel@tonic-gate 		if (IS_CLTS(tep)) {
1741*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
1742*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
1743*7c478bd9Sstevel@tonic-gate 				"tl_wput:M_DATA invalid for ticlts driver"));
1744*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, mp, EPROTO);
1745*7c478bd9Sstevel@tonic-gate 			break;
1746*7c478bd9Sstevel@tonic-gate 		}
1747*7c478bd9Sstevel@tonic-gate 		tl_proc = tl_wput_data_ser;
1748*7c478bd9Sstevel@tonic-gate 		break;
1749*7c478bd9Sstevel@tonic-gate 
1750*7c478bd9Sstevel@tonic-gate 	case M_IOCTL:
1751*7c478bd9Sstevel@tonic-gate 		switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
1752*7c478bd9Sstevel@tonic-gate 		case TL_IOC_CREDOPT:
1753*7c478bd9Sstevel@tonic-gate 			/* FALLTHROUGH */
1754*7c478bd9Sstevel@tonic-gate 		case TL_IOC_UCREDOPT:
1755*7c478bd9Sstevel@tonic-gate 			/*
1756*7c478bd9Sstevel@tonic-gate 			 * Serialize endpoint state change.
1757*7c478bd9Sstevel@tonic-gate 			 */
1758*7c478bd9Sstevel@tonic-gate 			tl_proc = tl_do_ioctl_ser;
1759*7c478bd9Sstevel@tonic-gate 			break;
1760*7c478bd9Sstevel@tonic-gate 
1761*7c478bd9Sstevel@tonic-gate 		default:
1762*7c478bd9Sstevel@tonic-gate 			miocnak(wq, mp, 0, EINVAL);
1763*7c478bd9Sstevel@tonic-gate 			return;
1764*7c478bd9Sstevel@tonic-gate 		}
1765*7c478bd9Sstevel@tonic-gate 		break;
1766*7c478bd9Sstevel@tonic-gate 
1767*7c478bd9Sstevel@tonic-gate 	case M_FLUSH:
1768*7c478bd9Sstevel@tonic-gate 		/*
1769*7c478bd9Sstevel@tonic-gate 		 * do canonical M_FLUSH processing
1770*7c478bd9Sstevel@tonic-gate 		 */
1771*7c478bd9Sstevel@tonic-gate 		if (*mp->b_rptr & FLUSHW) {
1772*7c478bd9Sstevel@tonic-gate 			flushq(wq, FLUSHALL);
1773*7c478bd9Sstevel@tonic-gate 			*mp->b_rptr &= ~FLUSHW;
1774*7c478bd9Sstevel@tonic-gate 		}
1775*7c478bd9Sstevel@tonic-gate 		if (*mp->b_rptr & FLUSHR) {
1776*7c478bd9Sstevel@tonic-gate 			flushq(RD(wq), FLUSHALL);
1777*7c478bd9Sstevel@tonic-gate 			qreply(wq, mp);
1778*7c478bd9Sstevel@tonic-gate 		} else {
1779*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
1780*7c478bd9Sstevel@tonic-gate 		}
1781*7c478bd9Sstevel@tonic-gate 		return;
1782*7c478bd9Sstevel@tonic-gate 
1783*7c478bd9Sstevel@tonic-gate 	case M_PROTO:
1784*7c478bd9Sstevel@tonic-gate 		if (msz < sizeof (prim->type)) {
1785*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
1786*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
1787*7c478bd9Sstevel@tonic-gate 				"tl_wput:M_PROTO data too short"));
1788*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, mp, EPROTO);
1789*7c478bd9Sstevel@tonic-gate 			return;
1790*7c478bd9Sstevel@tonic-gate 		}
1791*7c478bd9Sstevel@tonic-gate 		switch (prim->type) {
1792*7c478bd9Sstevel@tonic-gate 		case T_OPTMGMT_REQ:
1793*7c478bd9Sstevel@tonic-gate 		case T_SVR4_OPTMGMT_REQ:
1794*7c478bd9Sstevel@tonic-gate 			/*
1795*7c478bd9Sstevel@tonic-gate 			 * Process TPI option management requests immediately
1796*7c478bd9Sstevel@tonic-gate 			 * in put procedure regardless of in-order processing
1797*7c478bd9Sstevel@tonic-gate 			 * of already queued messages.
1798*7c478bd9Sstevel@tonic-gate 			 * (Note: This driver supports AF_UNIX socket
1799*7c478bd9Sstevel@tonic-gate 			 * implementation.  Unless we implement this processing,
1800*7c478bd9Sstevel@tonic-gate 			 * setsockopt() on socket endpoint will block on flow
1801*7c478bd9Sstevel@tonic-gate 			 * controlled endpoints which it should not. That is
1802*7c478bd9Sstevel@tonic-gate 			 * required for successful execution of VSU socket tests
1803*7c478bd9Sstevel@tonic-gate 			 * and is consistent with BSD socket behavior).
1804*7c478bd9Sstevel@tonic-gate 			 */
1805*7c478bd9Sstevel@tonic-gate 			tl_optmgmt(wq, mp);
1806*7c478bd9Sstevel@tonic-gate 			return;
1807*7c478bd9Sstevel@tonic-gate 		case O_T_BIND_REQ:
1808*7c478bd9Sstevel@tonic-gate 		case T_BIND_REQ:
1809*7c478bd9Sstevel@tonic-gate 			tl_proc = tl_bind_ser;
1810*7c478bd9Sstevel@tonic-gate 			break;
1811*7c478bd9Sstevel@tonic-gate 		case T_CONN_REQ:
1812*7c478bd9Sstevel@tonic-gate 			if (IS_CLTS(tep)) {
1813*7c478bd9Sstevel@tonic-gate 				tl_merror(wq, mp, EPROTO);
1814*7c478bd9Sstevel@tonic-gate 				return;
1815*7c478bd9Sstevel@tonic-gate 			}
1816*7c478bd9Sstevel@tonic-gate 			tl_conn_req(wq, mp);
1817*7c478bd9Sstevel@tonic-gate 			return;
1818*7c478bd9Sstevel@tonic-gate 		case T_DATA_REQ:
1819*7c478bd9Sstevel@tonic-gate 		case T_OPTDATA_REQ:
1820*7c478bd9Sstevel@tonic-gate 		case T_EXDATA_REQ:
1821*7c478bd9Sstevel@tonic-gate 		case T_ORDREL_REQ:
1822*7c478bd9Sstevel@tonic-gate 			tl_proc = tl_putq_ser;
1823*7c478bd9Sstevel@tonic-gate 			break;
1824*7c478bd9Sstevel@tonic-gate 		case T_UNITDATA_REQ:
1825*7c478bd9Sstevel@tonic-gate 			if (IS_COTS(tep) ||
1826*7c478bd9Sstevel@tonic-gate 			    (msz < sizeof (struct T_unitdata_req))) {
1827*7c478bd9Sstevel@tonic-gate 				tl_merror(wq, mp, EPROTO);
1828*7c478bd9Sstevel@tonic-gate 				return;
1829*7c478bd9Sstevel@tonic-gate 			}
1830*7c478bd9Sstevel@tonic-gate 			if ((tep->te_state == TS_IDLE) && !wq->q_first) {
1831*7c478bd9Sstevel@tonic-gate 				tl_proc = tl_unitdata_ser;
1832*7c478bd9Sstevel@tonic-gate 			} else {
1833*7c478bd9Sstevel@tonic-gate 				tl_proc = tl_putq_ser;
1834*7c478bd9Sstevel@tonic-gate 			}
1835*7c478bd9Sstevel@tonic-gate 			break;
1836*7c478bd9Sstevel@tonic-gate 		default:
1837*7c478bd9Sstevel@tonic-gate 			/*
1838*7c478bd9Sstevel@tonic-gate 			 * process in service procedure if message already
1839*7c478bd9Sstevel@tonic-gate 			 * queued (maintain in-order processing)
1840*7c478bd9Sstevel@tonic-gate 			 */
1841*7c478bd9Sstevel@tonic-gate 			if (wq->q_first != NULL) {
1842*7c478bd9Sstevel@tonic-gate 				tl_proc = tl_putq_ser;
1843*7c478bd9Sstevel@tonic-gate 			} else {
1844*7c478bd9Sstevel@tonic-gate 				tl_proc = tl_wput_ser;
1845*7c478bd9Sstevel@tonic-gate 			}
1846*7c478bd9Sstevel@tonic-gate 			break;
1847*7c478bd9Sstevel@tonic-gate 		}
1848*7c478bd9Sstevel@tonic-gate 		break;
1849*7c478bd9Sstevel@tonic-gate 
1850*7c478bd9Sstevel@tonic-gate 	case M_PCPROTO:
1851*7c478bd9Sstevel@tonic-gate 		/*
1852*7c478bd9Sstevel@tonic-gate 		 * Check that the message has enough data to figure out TPI
1853*7c478bd9Sstevel@tonic-gate 		 * primitive.
1854*7c478bd9Sstevel@tonic-gate 		 */
1855*7c478bd9Sstevel@tonic-gate 		if (msz < sizeof (prim->type)) {
1856*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
1857*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
1858*7c478bd9Sstevel@tonic-gate 				"tl_wput:M_PCROTO data too short"));
1859*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, mp, EPROTO);
1860*7c478bd9Sstevel@tonic-gate 			return;
1861*7c478bd9Sstevel@tonic-gate 		}
1862*7c478bd9Sstevel@tonic-gate 		switch (prim->type) {
1863*7c478bd9Sstevel@tonic-gate 		case T_CAPABILITY_REQ:
1864*7c478bd9Sstevel@tonic-gate 			tl_capability_req(mp, tep);
1865*7c478bd9Sstevel@tonic-gate 			return;
1866*7c478bd9Sstevel@tonic-gate 		case T_INFO_REQ:
1867*7c478bd9Sstevel@tonic-gate 			tl_proc = tl_info_req_ser;
1868*7c478bd9Sstevel@tonic-gate 			break;
1869*7c478bd9Sstevel@tonic-gate 		default:
1870*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
1871*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
1872*7c478bd9Sstevel@tonic-gate 				    "tl_wput:unknown TPI msg primitive"));
1873*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, mp, EPROTO);
1874*7c478bd9Sstevel@tonic-gate 			return;
1875*7c478bd9Sstevel@tonic-gate 		}
1876*7c478bd9Sstevel@tonic-gate 		break;
1877*7c478bd9Sstevel@tonic-gate 	default:
1878*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
1879*7c478bd9Sstevel@tonic-gate 			"tl_wput:default:unexpected Streams message"));
1880*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
1881*7c478bd9Sstevel@tonic-gate 		return;
1882*7c478bd9Sstevel@tonic-gate 	}
1883*7c478bd9Sstevel@tonic-gate 
1884*7c478bd9Sstevel@tonic-gate 	/*
1885*7c478bd9Sstevel@tonic-gate 	 * Continue processing via serializer.
1886*7c478bd9Sstevel@tonic-gate 	 */
1887*7c478bd9Sstevel@tonic-gate 	ASSERT(tl_proc != NULL);
1888*7c478bd9Sstevel@tonic-gate 	tl_refhold(tep);
1889*7c478bd9Sstevel@tonic-gate 	tl_serializer_enter(tep, tl_proc, mp);
1890*7c478bd9Sstevel@tonic-gate }
1891*7c478bd9Sstevel@tonic-gate 
1892*7c478bd9Sstevel@tonic-gate /*
1893*7c478bd9Sstevel@tonic-gate  * Place message on the queue while preserving order.
1894*7c478bd9Sstevel@tonic-gate  */
1895*7c478bd9Sstevel@tonic-gate static void
1896*7c478bd9Sstevel@tonic-gate tl_putq_ser(mblk_t *mp, tl_endpt_t *tep)
1897*7c478bd9Sstevel@tonic-gate {
1898*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
1899*7c478bd9Sstevel@tonic-gate 		tl_wput_ser(mp, tep);
1900*7c478bd9Sstevel@tonic-gate 	} else {
1901*7c478bd9Sstevel@tonic-gate 		TL_PUTQ(tep, mp);
1902*7c478bd9Sstevel@tonic-gate 		tl_serializer_exit(tep);
1903*7c478bd9Sstevel@tonic-gate 		tl_refrele(tep);
1904*7c478bd9Sstevel@tonic-gate 	}
1905*7c478bd9Sstevel@tonic-gate 
1906*7c478bd9Sstevel@tonic-gate }
1907*7c478bd9Sstevel@tonic-gate 
1908*7c478bd9Sstevel@tonic-gate static void
1909*7c478bd9Sstevel@tonic-gate tl_wput_common_ser(mblk_t *mp, tl_endpt_t *tep)
1910*7c478bd9Sstevel@tonic-gate {
1911*7c478bd9Sstevel@tonic-gate 	ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO));
1912*7c478bd9Sstevel@tonic-gate 
1913*7c478bd9Sstevel@tonic-gate 	switch (DB_TYPE(mp)) {
1914*7c478bd9Sstevel@tonic-gate 	case M_DATA:
1915*7c478bd9Sstevel@tonic-gate 		tl_data(mp, tep);
1916*7c478bd9Sstevel@tonic-gate 		break;
1917*7c478bd9Sstevel@tonic-gate 	case M_PROTO:
1918*7c478bd9Sstevel@tonic-gate 		tl_do_proto(mp, tep);
1919*7c478bd9Sstevel@tonic-gate 		break;
1920*7c478bd9Sstevel@tonic-gate 	default:
1921*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
1922*7c478bd9Sstevel@tonic-gate 		break;
1923*7c478bd9Sstevel@tonic-gate 	}
1924*7c478bd9Sstevel@tonic-gate }
1925*7c478bd9Sstevel@tonic-gate 
1926*7c478bd9Sstevel@tonic-gate /*
1927*7c478bd9Sstevel@tonic-gate  * Write side put procedure called from serializer.
1928*7c478bd9Sstevel@tonic-gate  */
1929*7c478bd9Sstevel@tonic-gate static void
1930*7c478bd9Sstevel@tonic-gate tl_wput_ser(mblk_t *mp, tl_endpt_t *tep)
1931*7c478bd9Sstevel@tonic-gate {
1932*7c478bd9Sstevel@tonic-gate 	tl_wput_common_ser(mp, tep);
1933*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
1934*7c478bd9Sstevel@tonic-gate 	tl_refrele(tep);
1935*7c478bd9Sstevel@tonic-gate }
1936*7c478bd9Sstevel@tonic-gate 
1937*7c478bd9Sstevel@tonic-gate /*
1938*7c478bd9Sstevel@tonic-gate  * M_DATA processing. Called from serializer.
1939*7c478bd9Sstevel@tonic-gate  */
1940*7c478bd9Sstevel@tonic-gate static void
1941*7c478bd9Sstevel@tonic-gate tl_wput_data_ser(mblk_t *mp, tl_endpt_t *tep)
1942*7c478bd9Sstevel@tonic-gate {
1943*7c478bd9Sstevel@tonic-gate 	tl_endpt_t	*peer_tep = tep->te_conp;
1944*7c478bd9Sstevel@tonic-gate 	queue_t		*peer_rq;
1945*7c478bd9Sstevel@tonic-gate 
1946*7c478bd9Sstevel@tonic-gate 	ASSERT(DB_TYPE(mp) == M_DATA);
1947*7c478bd9Sstevel@tonic-gate 	ASSERT(IS_COTS(tep));
1948*7c478bd9Sstevel@tonic-gate 
1949*7c478bd9Sstevel@tonic-gate 	ASSERT(IMPLY(peer_tep, tep->te_serializer == peer_tep->te_serializer));
1950*7c478bd9Sstevel@tonic-gate 
1951*7c478bd9Sstevel@tonic-gate 	/*
1952*7c478bd9Sstevel@tonic-gate 	 * fastpath for data. Ignore flow control if tep is closing.
1953*7c478bd9Sstevel@tonic-gate 	 */
1954*7c478bd9Sstevel@tonic-gate 	if ((peer_tep != NULL) &&
1955*7c478bd9Sstevel@tonic-gate 	    !peer_tep->te_closing &&
1956*7c478bd9Sstevel@tonic-gate 	    ((tep->te_state == TS_DATA_XFER) ||
1957*7c478bd9Sstevel@tonic-gate 		(tep->te_state == TS_WREQ_ORDREL)) &&
1958*7c478bd9Sstevel@tonic-gate 	    (tep->te_wq != NULL) &&
1959*7c478bd9Sstevel@tonic-gate 	    (tep->te_wq->q_first == NULL) &&
1960*7c478bd9Sstevel@tonic-gate 	    ((peer_tep->te_state == TS_DATA_XFER) ||
1961*7c478bd9Sstevel@tonic-gate 		(peer_tep->te_state == TS_WREQ_ORDREL))	&&
1962*7c478bd9Sstevel@tonic-gate 	    ((peer_rq = peer_tep->te_rq) != NULL) &&
1963*7c478bd9Sstevel@tonic-gate 	    (canputnext(peer_rq) || tep->te_closing)) {
1964*7c478bd9Sstevel@tonic-gate 		putnext(peer_rq, mp);
1965*7c478bd9Sstevel@tonic-gate 	} else if (tep->te_closing) {
1966*7c478bd9Sstevel@tonic-gate 		/*
1967*7c478bd9Sstevel@tonic-gate 		 * It is possible that by the time we got here tep started to
1968*7c478bd9Sstevel@tonic-gate 		 * close. If the write queue is not empty, and the state is
1969*7c478bd9Sstevel@tonic-gate 		 * TS_DATA_XFER the data should be delivered in order, so we
1970*7c478bd9Sstevel@tonic-gate 		 * call putq() instead of freeing the data.
1971*7c478bd9Sstevel@tonic-gate 		 */
1972*7c478bd9Sstevel@tonic-gate 		if ((tep->te_wq != NULL) &&
1973*7c478bd9Sstevel@tonic-gate 		    ((tep->te_state == TS_DATA_XFER) ||
1974*7c478bd9Sstevel@tonic-gate 			(tep->te_state == TS_WREQ_ORDREL))) {
1975*7c478bd9Sstevel@tonic-gate 			TL_PUTQ(tep, mp);
1976*7c478bd9Sstevel@tonic-gate 		} else {
1977*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
1978*7c478bd9Sstevel@tonic-gate 		}
1979*7c478bd9Sstevel@tonic-gate 	} else {
1980*7c478bd9Sstevel@tonic-gate 		TL_PUTQ(tep, mp);
1981*7c478bd9Sstevel@tonic-gate 	}
1982*7c478bd9Sstevel@tonic-gate 
1983*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
1984*7c478bd9Sstevel@tonic-gate 	tl_refrele(tep);
1985*7c478bd9Sstevel@tonic-gate }
1986*7c478bd9Sstevel@tonic-gate 
1987*7c478bd9Sstevel@tonic-gate /*
1988*7c478bd9Sstevel@tonic-gate  * Write side service routine.
1989*7c478bd9Sstevel@tonic-gate  *
1990*7c478bd9Sstevel@tonic-gate  * All actual processing happens within serializer which is entered
1991*7c478bd9Sstevel@tonic-gate  * synchronously. It is possible that by the time tl_wsrv() wakes up, some new
1992*7c478bd9Sstevel@tonic-gate  * messages that need processing may have arrived, so tl_wsrv repeats until
1993*7c478bd9Sstevel@tonic-gate  * queue is empty or te_nowsrv is set.
1994*7c478bd9Sstevel@tonic-gate  */
1995*7c478bd9Sstevel@tonic-gate static void
1996*7c478bd9Sstevel@tonic-gate tl_wsrv(queue_t *wq)
1997*7c478bd9Sstevel@tonic-gate {
1998*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
1999*7c478bd9Sstevel@tonic-gate 
2000*7c478bd9Sstevel@tonic-gate 	while ((wq->q_first != NULL) && !tep->te_nowsrv) {
2001*7c478bd9Sstevel@tonic-gate 		mutex_enter(&tep->te_srv_lock);
2002*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_wsrv_active == B_FALSE);
2003*7c478bd9Sstevel@tonic-gate 		tep->te_wsrv_active = B_TRUE;
2004*7c478bd9Sstevel@tonic-gate 		mutex_exit(&tep->te_srv_lock);
2005*7c478bd9Sstevel@tonic-gate 
2006*7c478bd9Sstevel@tonic-gate 		tl_serializer_enter(tep, tl_wsrv_ser, &tep->te_wsrvmp);
2007*7c478bd9Sstevel@tonic-gate 
2008*7c478bd9Sstevel@tonic-gate 		/*
2009*7c478bd9Sstevel@tonic-gate 		 * Wait for serializer job to complete.
2010*7c478bd9Sstevel@tonic-gate 		 */
2011*7c478bd9Sstevel@tonic-gate 		mutex_enter(&tep->te_srv_lock);
2012*7c478bd9Sstevel@tonic-gate 		while (tep->te_wsrv_active) {
2013*7c478bd9Sstevel@tonic-gate 			cv_wait(&tep->te_srv_cv, &tep->te_srv_lock);
2014*7c478bd9Sstevel@tonic-gate 		}
2015*7c478bd9Sstevel@tonic-gate 		cv_signal(&tep->te_srv_cv);
2016*7c478bd9Sstevel@tonic-gate 		mutex_exit(&tep->te_srv_lock);
2017*7c478bd9Sstevel@tonic-gate 	}
2018*7c478bd9Sstevel@tonic-gate }
2019*7c478bd9Sstevel@tonic-gate 
2020*7c478bd9Sstevel@tonic-gate /*
2021*7c478bd9Sstevel@tonic-gate  * Serialized write side processing of the STREAMS queue.
2022*7c478bd9Sstevel@tonic-gate  * May be called either from tl_wsrv() or from tl_close() in which case ser_mp
2023*7c478bd9Sstevel@tonic-gate  * is NULL.
2024*7c478bd9Sstevel@tonic-gate  */
2025*7c478bd9Sstevel@tonic-gate static void
2026*7c478bd9Sstevel@tonic-gate tl_wsrv_ser(mblk_t *ser_mp, tl_endpt_t *tep)
2027*7c478bd9Sstevel@tonic-gate {
2028*7c478bd9Sstevel@tonic-gate 	mblk_t *mp;
2029*7c478bd9Sstevel@tonic-gate 	queue_t *wq = tep->te_wq;
2030*7c478bd9Sstevel@tonic-gate 
2031*7c478bd9Sstevel@tonic-gate 	ASSERT(wq != NULL);
2032*7c478bd9Sstevel@tonic-gate 	while (!tep->te_nowsrv && (mp = getq(wq)) != NULL) {
2033*7c478bd9Sstevel@tonic-gate 		tl_wput_common_ser(mp, tep);
2034*7c478bd9Sstevel@tonic-gate 	}
2035*7c478bd9Sstevel@tonic-gate 
2036*7c478bd9Sstevel@tonic-gate 	/*
2037*7c478bd9Sstevel@tonic-gate 	 * Wakeup service routine unless called from close.
2038*7c478bd9Sstevel@tonic-gate 	 * If ser_mp is specified, the caller is tl_wsrv().
2039*7c478bd9Sstevel@tonic-gate 	 * Otherwise, the caller is tl_close_ser(). Since tl_close_ser() doesn't
2040*7c478bd9Sstevel@tonic-gate 	 * call tl_serializer_enter() before calling tl_wsrv_ser(), there should
2041*7c478bd9Sstevel@tonic-gate 	 * be no matching tl_serializer_exit() in this case.
2042*7c478bd9Sstevel@tonic-gate 	 * Also, there is no need to wakeup anyone since tl_close_ser() is not
2043*7c478bd9Sstevel@tonic-gate 	 * waiting on te_srv_cv.
2044*7c478bd9Sstevel@tonic-gate 	 */
2045*7c478bd9Sstevel@tonic-gate 	if (ser_mp != NULL) {
2046*7c478bd9Sstevel@tonic-gate 		/*
2047*7c478bd9Sstevel@tonic-gate 		 * We are called from tl_wsrv.
2048*7c478bd9Sstevel@tonic-gate 		 */
2049*7c478bd9Sstevel@tonic-gate 		mutex_enter(&tep->te_srv_lock);
2050*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_wsrv_active);
2051*7c478bd9Sstevel@tonic-gate 		tep->te_wsrv_active = B_FALSE;
2052*7c478bd9Sstevel@tonic-gate 		cv_signal(&tep->te_srv_cv);
2053*7c478bd9Sstevel@tonic-gate 		mutex_exit(&tep->te_srv_lock);
2054*7c478bd9Sstevel@tonic-gate 		tl_serializer_exit(tep);
2055*7c478bd9Sstevel@tonic-gate 	}
2056*7c478bd9Sstevel@tonic-gate }
2057*7c478bd9Sstevel@tonic-gate 
2058*7c478bd9Sstevel@tonic-gate /*
2059*7c478bd9Sstevel@tonic-gate  * Called when the stream is backenabled. Enter serializer and qenable everyone
2060*7c478bd9Sstevel@tonic-gate  * flow controlled by tep.
2061*7c478bd9Sstevel@tonic-gate  *
2062*7c478bd9Sstevel@tonic-gate  * NOTE: The service routine should enter serializer synchronously. Otherwise it
2063*7c478bd9Sstevel@tonic-gate  * is possible that two instances of tl_rsrv will be running reusing the same
2064*7c478bd9Sstevel@tonic-gate  * rsrv mblk.
2065*7c478bd9Sstevel@tonic-gate  */
2066*7c478bd9Sstevel@tonic-gate static void
2067*7c478bd9Sstevel@tonic-gate tl_rsrv(queue_t *rq)
2068*7c478bd9Sstevel@tonic-gate {
2069*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep = (tl_endpt_t *)rq->q_ptr;
2070*7c478bd9Sstevel@tonic-gate 
2071*7c478bd9Sstevel@tonic-gate 	ASSERT(rq->q_first == NULL);
2072*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_rsrv_active == 0);
2073*7c478bd9Sstevel@tonic-gate 
2074*7c478bd9Sstevel@tonic-gate 	tep->te_rsrv_active = B_TRUE;
2075*7c478bd9Sstevel@tonic-gate 	tl_serializer_enter(tep, tl_rsrv_ser, &tep->te_rsrvmp);
2076*7c478bd9Sstevel@tonic-gate 	/*
2077*7c478bd9Sstevel@tonic-gate 	 * Wait for serializer job to complete.
2078*7c478bd9Sstevel@tonic-gate 	 */
2079*7c478bd9Sstevel@tonic-gate 	mutex_enter(&tep->te_srv_lock);
2080*7c478bd9Sstevel@tonic-gate 	while (tep->te_rsrv_active) {
2081*7c478bd9Sstevel@tonic-gate 		cv_wait(&tep->te_srv_cv, &tep->te_srv_lock);
2082*7c478bd9Sstevel@tonic-gate 	}
2083*7c478bd9Sstevel@tonic-gate 	cv_signal(&tep->te_srv_cv);
2084*7c478bd9Sstevel@tonic-gate 	mutex_exit(&tep->te_srv_lock);
2085*7c478bd9Sstevel@tonic-gate }
2086*7c478bd9Sstevel@tonic-gate 
2087*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
2088*7c478bd9Sstevel@tonic-gate static void
2089*7c478bd9Sstevel@tonic-gate tl_rsrv_ser(mblk_t *mp, tl_endpt_t *tep)
2090*7c478bd9Sstevel@tonic-gate {
2091*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *peer_tep;
2092*7c478bd9Sstevel@tonic-gate 
2093*7c478bd9Sstevel@tonic-gate 	if (IS_CLTS(tep) && tep->te_state == TS_IDLE) {
2094*7c478bd9Sstevel@tonic-gate 		tl_cl_backenable(tep);
2095*7c478bd9Sstevel@tonic-gate 	} else if (
2096*7c478bd9Sstevel@tonic-gate 		IS_COTS(tep) &&
2097*7c478bd9Sstevel@tonic-gate 		    ((peer_tep = tep->te_conp) != NULL) &&
2098*7c478bd9Sstevel@tonic-gate 		    !peer_tep->te_closing &&
2099*7c478bd9Sstevel@tonic-gate 		    ((tep->te_state == TS_DATA_XFER) ||
2100*7c478bd9Sstevel@tonic-gate 			(tep->te_state == TS_WIND_ORDREL)||
2101*7c478bd9Sstevel@tonic-gate 			(tep->te_state == TS_WREQ_ORDREL))) {
2102*7c478bd9Sstevel@tonic-gate 		TL_QENABLE(peer_tep);
2103*7c478bd9Sstevel@tonic-gate 	}
2104*7c478bd9Sstevel@tonic-gate 
2105*7c478bd9Sstevel@tonic-gate 	/*
2106*7c478bd9Sstevel@tonic-gate 	 * Wakeup read side service routine.
2107*7c478bd9Sstevel@tonic-gate 	 */
2108*7c478bd9Sstevel@tonic-gate 	mutex_enter(&tep->te_srv_lock);
2109*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_rsrv_active);
2110*7c478bd9Sstevel@tonic-gate 	tep->te_rsrv_active = B_FALSE;
2111*7c478bd9Sstevel@tonic-gate 	cv_signal(&tep->te_srv_cv);
2112*7c478bd9Sstevel@tonic-gate 	mutex_exit(&tep->te_srv_lock);
2113*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
2114*7c478bd9Sstevel@tonic-gate }
2115*7c478bd9Sstevel@tonic-gate 
2116*7c478bd9Sstevel@tonic-gate /*
2117*7c478bd9Sstevel@tonic-gate  * process M_PROTO messages. Always called from serializer.
2118*7c478bd9Sstevel@tonic-gate  */
2119*7c478bd9Sstevel@tonic-gate static void
2120*7c478bd9Sstevel@tonic-gate tl_do_proto(mblk_t *mp, tl_endpt_t *tep)
2121*7c478bd9Sstevel@tonic-gate {
2122*7c478bd9Sstevel@tonic-gate 	ssize_t			msz = MBLKL(mp);
2123*7c478bd9Sstevel@tonic-gate 	union T_primitives	*prim = (union T_primitives *)mp->b_rptr;
2124*7c478bd9Sstevel@tonic-gate 
2125*7c478bd9Sstevel@tonic-gate 	/* Message size was validated by tl_wput(). */
2126*7c478bd9Sstevel@tonic-gate 	ASSERT(msz >= sizeof (prim->type));
2127*7c478bd9Sstevel@tonic-gate 
2128*7c478bd9Sstevel@tonic-gate 	switch (prim->type) {
2129*7c478bd9Sstevel@tonic-gate 	case T_UNBIND_REQ:
2130*7c478bd9Sstevel@tonic-gate 		tl_unbind(mp, tep);
2131*7c478bd9Sstevel@tonic-gate 		break;
2132*7c478bd9Sstevel@tonic-gate 
2133*7c478bd9Sstevel@tonic-gate 	case T_ADDR_REQ:
2134*7c478bd9Sstevel@tonic-gate 		tl_addr_req(mp, tep);
2135*7c478bd9Sstevel@tonic-gate 		break;
2136*7c478bd9Sstevel@tonic-gate 
2137*7c478bd9Sstevel@tonic-gate 	case O_T_CONN_RES:
2138*7c478bd9Sstevel@tonic-gate 	case T_CONN_RES:
2139*7c478bd9Sstevel@tonic-gate 		if (IS_CLTS(tep)) {
2140*7c478bd9Sstevel@tonic-gate 			tl_merror(tep->te_wq, mp, EPROTO);
2141*7c478bd9Sstevel@tonic-gate 			break;
2142*7c478bd9Sstevel@tonic-gate 		}
2143*7c478bd9Sstevel@tonic-gate 		tl_conn_res(mp, tep);
2144*7c478bd9Sstevel@tonic-gate 		break;
2145*7c478bd9Sstevel@tonic-gate 
2146*7c478bd9Sstevel@tonic-gate 	case T_DISCON_REQ:
2147*7c478bd9Sstevel@tonic-gate 		if (IS_CLTS(tep)) {
2148*7c478bd9Sstevel@tonic-gate 			tl_merror(tep->te_wq, mp, EPROTO);
2149*7c478bd9Sstevel@tonic-gate 			break;
2150*7c478bd9Sstevel@tonic-gate 		}
2151*7c478bd9Sstevel@tonic-gate 		tl_discon_req(mp, tep);
2152*7c478bd9Sstevel@tonic-gate 		break;
2153*7c478bd9Sstevel@tonic-gate 
2154*7c478bd9Sstevel@tonic-gate 	case T_DATA_REQ:
2155*7c478bd9Sstevel@tonic-gate 		if (IS_CLTS(tep)) {
2156*7c478bd9Sstevel@tonic-gate 			tl_merror(tep->te_wq, mp, EPROTO);
2157*7c478bd9Sstevel@tonic-gate 			break;
2158*7c478bd9Sstevel@tonic-gate 		}
2159*7c478bd9Sstevel@tonic-gate 		tl_data(mp, tep);
2160*7c478bd9Sstevel@tonic-gate 		break;
2161*7c478bd9Sstevel@tonic-gate 
2162*7c478bd9Sstevel@tonic-gate 	case T_OPTDATA_REQ:
2163*7c478bd9Sstevel@tonic-gate 		if (IS_CLTS(tep)) {
2164*7c478bd9Sstevel@tonic-gate 			tl_merror(tep->te_wq, mp, EPROTO);
2165*7c478bd9Sstevel@tonic-gate 			break;
2166*7c478bd9Sstevel@tonic-gate 		}
2167*7c478bd9Sstevel@tonic-gate 		tl_data(mp, tep);
2168*7c478bd9Sstevel@tonic-gate 		break;
2169*7c478bd9Sstevel@tonic-gate 
2170*7c478bd9Sstevel@tonic-gate 	case T_EXDATA_REQ:
2171*7c478bd9Sstevel@tonic-gate 		if (IS_CLTS(tep)) {
2172*7c478bd9Sstevel@tonic-gate 			tl_merror(tep->te_wq, mp, EPROTO);
2173*7c478bd9Sstevel@tonic-gate 			break;
2174*7c478bd9Sstevel@tonic-gate 		}
2175*7c478bd9Sstevel@tonic-gate 		tl_exdata(mp, tep);
2176*7c478bd9Sstevel@tonic-gate 		break;
2177*7c478bd9Sstevel@tonic-gate 
2178*7c478bd9Sstevel@tonic-gate 	case T_ORDREL_REQ:
2179*7c478bd9Sstevel@tonic-gate 		if (! IS_COTSORD(tep)) {
2180*7c478bd9Sstevel@tonic-gate 			tl_merror(tep->te_wq, mp, EPROTO);
2181*7c478bd9Sstevel@tonic-gate 			break;
2182*7c478bd9Sstevel@tonic-gate 		}
2183*7c478bd9Sstevel@tonic-gate 		tl_ordrel(mp, tep);
2184*7c478bd9Sstevel@tonic-gate 		break;
2185*7c478bd9Sstevel@tonic-gate 
2186*7c478bd9Sstevel@tonic-gate 	case T_UNITDATA_REQ:
2187*7c478bd9Sstevel@tonic-gate 		if (IS_COTS(tep)) {
2188*7c478bd9Sstevel@tonic-gate 			tl_merror(tep->te_wq, mp, EPROTO);
2189*7c478bd9Sstevel@tonic-gate 			break;
2190*7c478bd9Sstevel@tonic-gate 		}
2191*7c478bd9Sstevel@tonic-gate 		tl_unitdata(mp, tep);
2192*7c478bd9Sstevel@tonic-gate 		break;
2193*7c478bd9Sstevel@tonic-gate 
2194*7c478bd9Sstevel@tonic-gate 	default:
2195*7c478bd9Sstevel@tonic-gate 		tl_merror(tep->te_wq, mp, EPROTO);
2196*7c478bd9Sstevel@tonic-gate 		break;
2197*7c478bd9Sstevel@tonic-gate 	}
2198*7c478bd9Sstevel@tonic-gate }
2199*7c478bd9Sstevel@tonic-gate 
2200*7c478bd9Sstevel@tonic-gate /*
2201*7c478bd9Sstevel@tonic-gate  * Process ioctl from serializer.
2202*7c478bd9Sstevel@tonic-gate  * This is a wrapper around tl_do_ioctl().
2203*7c478bd9Sstevel@tonic-gate  */
2204*7c478bd9Sstevel@tonic-gate static void
2205*7c478bd9Sstevel@tonic-gate tl_do_ioctl_ser(mblk_t *mp, tl_endpt_t *tep)
2206*7c478bd9Sstevel@tonic-gate {
2207*7c478bd9Sstevel@tonic-gate 	if (! tep->te_closing)
2208*7c478bd9Sstevel@tonic-gate 		tl_do_ioctl(mp, tep);
2209*7c478bd9Sstevel@tonic-gate 	else
2210*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
2211*7c478bd9Sstevel@tonic-gate 
2212*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
2213*7c478bd9Sstevel@tonic-gate 	tl_refrele(tep);
2214*7c478bd9Sstevel@tonic-gate }
2215*7c478bd9Sstevel@tonic-gate 
2216*7c478bd9Sstevel@tonic-gate static void
2217*7c478bd9Sstevel@tonic-gate tl_do_ioctl(mblk_t *mp, tl_endpt_t *tep)
2218*7c478bd9Sstevel@tonic-gate {
2219*7c478bd9Sstevel@tonic-gate 	struct iocblk *iocbp = (struct iocblk *)mp->b_rptr;
2220*7c478bd9Sstevel@tonic-gate 	int cmd = iocbp->ioc_cmd;
2221*7c478bd9Sstevel@tonic-gate 	queue_t *wq = tep->te_wq;
2222*7c478bd9Sstevel@tonic-gate 	int error;
2223*7c478bd9Sstevel@tonic-gate 	int thisopt, otheropt;
2224*7c478bd9Sstevel@tonic-gate 
2225*7c478bd9Sstevel@tonic-gate 	ASSERT((cmd == TL_IOC_CREDOPT) || (cmd == TL_IOC_UCREDOPT));
2226*7c478bd9Sstevel@tonic-gate 
2227*7c478bd9Sstevel@tonic-gate 	switch (cmd) {
2228*7c478bd9Sstevel@tonic-gate 	case TL_IOC_CREDOPT:
2229*7c478bd9Sstevel@tonic-gate 		if (cmd == TL_IOC_CREDOPT) {
2230*7c478bd9Sstevel@tonic-gate 			thisopt = TL_SETCRED;
2231*7c478bd9Sstevel@tonic-gate 			otheropt = TL_SETUCRED;
2232*7c478bd9Sstevel@tonic-gate 		} else {
2233*7c478bd9Sstevel@tonic-gate 			/* FALLTHROUGH */
2234*7c478bd9Sstevel@tonic-gate 	case TL_IOC_UCREDOPT:
2235*7c478bd9Sstevel@tonic-gate 			thisopt = TL_SETUCRED;
2236*7c478bd9Sstevel@tonic-gate 			otheropt = TL_SETCRED;
2237*7c478bd9Sstevel@tonic-gate 		}
2238*7c478bd9Sstevel@tonic-gate 		/*
2239*7c478bd9Sstevel@tonic-gate 		 * The credentials passing does not apply to sockets.
2240*7c478bd9Sstevel@tonic-gate 		 * Only one of the cred options can be set at a given time.
2241*7c478bd9Sstevel@tonic-gate 		 */
2242*7c478bd9Sstevel@tonic-gate 		if (IS_SOCKET(tep) || (tep->te_flag & otheropt)) {
2243*7c478bd9Sstevel@tonic-gate 			miocnak(wq, mp, 0, EINVAL);
2244*7c478bd9Sstevel@tonic-gate 			return;
2245*7c478bd9Sstevel@tonic-gate 		}
2246*7c478bd9Sstevel@tonic-gate 
2247*7c478bd9Sstevel@tonic-gate 		/*
2248*7c478bd9Sstevel@tonic-gate 		 * Turn on generation of credential options for
2249*7c478bd9Sstevel@tonic-gate 		 * T_conn_req, T_conn_con, T_unidata_ind.
2250*7c478bd9Sstevel@tonic-gate 		 */
2251*7c478bd9Sstevel@tonic-gate 		error = miocpullup(mp, sizeof (uint32_t));
2252*7c478bd9Sstevel@tonic-gate 		if (error != 0) {
2253*7c478bd9Sstevel@tonic-gate 			miocnak(wq, mp, 0, error);
2254*7c478bd9Sstevel@tonic-gate 			return;
2255*7c478bd9Sstevel@tonic-gate 		}
2256*7c478bd9Sstevel@tonic-gate 		if (!IS_P2ALIGNED(mp->b_cont->b_rptr, sizeof (uint32_t))) {
2257*7c478bd9Sstevel@tonic-gate 			miocnak(wq, mp, 0, EINVAL);
2258*7c478bd9Sstevel@tonic-gate 			return;
2259*7c478bd9Sstevel@tonic-gate 		}
2260*7c478bd9Sstevel@tonic-gate 
2261*7c478bd9Sstevel@tonic-gate 		if (*(uint32_t *)mp->b_cont->b_rptr)
2262*7c478bd9Sstevel@tonic-gate 			tep->te_flag |= thisopt;
2263*7c478bd9Sstevel@tonic-gate 		else
2264*7c478bd9Sstevel@tonic-gate 			tep->te_flag &= ~thisopt;
2265*7c478bd9Sstevel@tonic-gate 
2266*7c478bd9Sstevel@tonic-gate 		miocack(wq, mp, 0, 0);
2267*7c478bd9Sstevel@tonic-gate 		break;
2268*7c478bd9Sstevel@tonic-gate 
2269*7c478bd9Sstevel@tonic-gate 	default:
2270*7c478bd9Sstevel@tonic-gate 		/* Should not be here */
2271*7c478bd9Sstevel@tonic-gate 		miocnak(wq, mp, 0, EINVAL);
2272*7c478bd9Sstevel@tonic-gate 		break;
2273*7c478bd9Sstevel@tonic-gate 	}
2274*7c478bd9Sstevel@tonic-gate }
2275*7c478bd9Sstevel@tonic-gate 
2276*7c478bd9Sstevel@tonic-gate 
2277*7c478bd9Sstevel@tonic-gate /*
2278*7c478bd9Sstevel@tonic-gate  * send T_ERROR_ACK
2279*7c478bd9Sstevel@tonic-gate  * Note: assumes enough memory or caller passed big enough mp
2280*7c478bd9Sstevel@tonic-gate  *	- no recovery from allocb failures
2281*7c478bd9Sstevel@tonic-gate  */
2282*7c478bd9Sstevel@tonic-gate 
2283*7c478bd9Sstevel@tonic-gate static void
2284*7c478bd9Sstevel@tonic-gate tl_error_ack(queue_t *wq, mblk_t *mp, t_scalar_t tli_err,
2285*7c478bd9Sstevel@tonic-gate     t_scalar_t unix_err, t_scalar_t type)
2286*7c478bd9Sstevel@tonic-gate {
2287*7c478bd9Sstevel@tonic-gate 	struct T_error_ack *err_ack;
2288*7c478bd9Sstevel@tonic-gate 	mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_error_ack),
2289*7c478bd9Sstevel@tonic-gate 	    M_PCPROTO, T_ERROR_ACK);
2290*7c478bd9Sstevel@tonic-gate 
2291*7c478bd9Sstevel@tonic-gate 	if (ackmp == NULL) {
2292*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, 0, 1, SL_TRACE|SL_ERROR,
2293*7c478bd9Sstevel@tonic-gate 			    "tl_error_ack:out of mblk memory"));
2294*7c478bd9Sstevel@tonic-gate 		tl_merror(wq, NULL, ENOSR);
2295*7c478bd9Sstevel@tonic-gate 		return;
2296*7c478bd9Sstevel@tonic-gate 	}
2297*7c478bd9Sstevel@tonic-gate 	err_ack = (struct T_error_ack *)ackmp->b_rptr;
2298*7c478bd9Sstevel@tonic-gate 	err_ack->ERROR_prim = type;
2299*7c478bd9Sstevel@tonic-gate 	err_ack->TLI_error = tli_err;
2300*7c478bd9Sstevel@tonic-gate 	err_ack->UNIX_error = unix_err;
2301*7c478bd9Sstevel@tonic-gate 
2302*7c478bd9Sstevel@tonic-gate 	/*
2303*7c478bd9Sstevel@tonic-gate 	 * send error ack message
2304*7c478bd9Sstevel@tonic-gate 	 */
2305*7c478bd9Sstevel@tonic-gate 	qreply(wq, ackmp);
2306*7c478bd9Sstevel@tonic-gate }
2307*7c478bd9Sstevel@tonic-gate 
2308*7c478bd9Sstevel@tonic-gate 
2309*7c478bd9Sstevel@tonic-gate 
2310*7c478bd9Sstevel@tonic-gate /*
2311*7c478bd9Sstevel@tonic-gate  * send T_OK_ACK
2312*7c478bd9Sstevel@tonic-gate  * Note: assumes enough memory or caller passed big enough mp
2313*7c478bd9Sstevel@tonic-gate  *	- no recovery from allocb failures
2314*7c478bd9Sstevel@tonic-gate  */
2315*7c478bd9Sstevel@tonic-gate static void
2316*7c478bd9Sstevel@tonic-gate tl_ok_ack(queue_t *wq, mblk_t *mp, t_scalar_t type)
2317*7c478bd9Sstevel@tonic-gate {
2318*7c478bd9Sstevel@tonic-gate 	struct T_ok_ack *ok_ack;
2319*7c478bd9Sstevel@tonic-gate 	mblk_t *ackmp = tpi_ack_alloc(mp, sizeof (struct T_ok_ack),
2320*7c478bd9Sstevel@tonic-gate 	    M_PCPROTO, T_OK_ACK);
2321*7c478bd9Sstevel@tonic-gate 
2322*7c478bd9Sstevel@tonic-gate 	if (ackmp == NULL) {
2323*7c478bd9Sstevel@tonic-gate 		tl_merror(wq, NULL, ENOMEM);
2324*7c478bd9Sstevel@tonic-gate 		return;
2325*7c478bd9Sstevel@tonic-gate 	}
2326*7c478bd9Sstevel@tonic-gate 
2327*7c478bd9Sstevel@tonic-gate 	ok_ack = (struct T_ok_ack *)ackmp->b_rptr;
2328*7c478bd9Sstevel@tonic-gate 	ok_ack->CORRECT_prim = type;
2329*7c478bd9Sstevel@tonic-gate 
2330*7c478bd9Sstevel@tonic-gate 	(void) qreply(wq, ackmp);
2331*7c478bd9Sstevel@tonic-gate }
2332*7c478bd9Sstevel@tonic-gate 
2333*7c478bd9Sstevel@tonic-gate /*
2334*7c478bd9Sstevel@tonic-gate  * Process T_BIND_REQ and O_T_BIND_REQ from serializer.
2335*7c478bd9Sstevel@tonic-gate  * This is a wrapper around tl_bind().
2336*7c478bd9Sstevel@tonic-gate  */
2337*7c478bd9Sstevel@tonic-gate static void
2338*7c478bd9Sstevel@tonic-gate tl_bind_ser(mblk_t *mp, tl_endpt_t *tep)
2339*7c478bd9Sstevel@tonic-gate {
2340*7c478bd9Sstevel@tonic-gate 	if (! tep->te_closing)
2341*7c478bd9Sstevel@tonic-gate 		tl_bind(mp, tep);
2342*7c478bd9Sstevel@tonic-gate 	else
2343*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
2344*7c478bd9Sstevel@tonic-gate 
2345*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
2346*7c478bd9Sstevel@tonic-gate 	tl_refrele(tep);
2347*7c478bd9Sstevel@tonic-gate }
2348*7c478bd9Sstevel@tonic-gate 
2349*7c478bd9Sstevel@tonic-gate /*
2350*7c478bd9Sstevel@tonic-gate  * Process T_BIND_REQ and O_T_BIND_REQ TPI requests.
2351*7c478bd9Sstevel@tonic-gate  * Assumes that the endpoint is in the unbound.
2352*7c478bd9Sstevel@tonic-gate  */
2353*7c478bd9Sstevel@tonic-gate static void
2354*7c478bd9Sstevel@tonic-gate tl_bind(mblk_t *mp, tl_endpt_t *tep)
2355*7c478bd9Sstevel@tonic-gate {
2356*7c478bd9Sstevel@tonic-gate 	queue_t			*wq = tep->te_wq;
2357*7c478bd9Sstevel@tonic-gate 	struct T_bind_ack	*b_ack;
2358*7c478bd9Sstevel@tonic-gate 	struct T_bind_req	*bind = (struct T_bind_req *)mp->b_rptr;
2359*7c478bd9Sstevel@tonic-gate 	mblk_t			*ackmp, *bamp;
2360*7c478bd9Sstevel@tonic-gate 	soux_addr_t		ux_addr;
2361*7c478bd9Sstevel@tonic-gate 	t_uscalar_t		qlen = 0;
2362*7c478bd9Sstevel@tonic-gate 	t_scalar_t		alen, aoff;
2363*7c478bd9Sstevel@tonic-gate 	tl_addr_t		addr_req;
2364*7c478bd9Sstevel@tonic-gate 	void			*addr_startp;
2365*7c478bd9Sstevel@tonic-gate 	ssize_t			msz = MBLKL(mp), basize;
2366*7c478bd9Sstevel@tonic-gate 	t_scalar_t		tli_err = 0, unix_err = 0;
2367*7c478bd9Sstevel@tonic-gate 	t_scalar_t		save_prim_type = bind->PRIM_type;
2368*7c478bd9Sstevel@tonic-gate 	t_scalar_t		save_state = tep->te_state;
2369*7c478bd9Sstevel@tonic-gate 
2370*7c478bd9Sstevel@tonic-gate 	if (tep->te_state != TS_UNBND) {
2371*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
2372*7c478bd9Sstevel@tonic-gate 			SL_TRACE|SL_ERROR,
2373*7c478bd9Sstevel@tonic-gate 			"tl_wput:bind_request:out of state, state=%d",
2374*7c478bd9Sstevel@tonic-gate 			tep->te_state));
2375*7c478bd9Sstevel@tonic-gate 		tli_err = TOUTSTATE;
2376*7c478bd9Sstevel@tonic-gate 		goto error;
2377*7c478bd9Sstevel@tonic-gate 	}
2378*7c478bd9Sstevel@tonic-gate 
2379*7c478bd9Sstevel@tonic-gate 	if (msz < sizeof (struct T_bind_req)) {
2380*7c478bd9Sstevel@tonic-gate 		tli_err = TSYSERR; unix_err = EINVAL;
2381*7c478bd9Sstevel@tonic-gate 		goto error;
2382*7c478bd9Sstevel@tonic-gate 	}
2383*7c478bd9Sstevel@tonic-gate 
2384*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_BIND_REQ, tep->te_state);
2385*7c478bd9Sstevel@tonic-gate 
2386*7c478bd9Sstevel@tonic-gate 	ASSERT((bind->PRIM_type == O_T_BIND_REQ) ||
2387*7c478bd9Sstevel@tonic-gate 	    (bind->PRIM_type == T_BIND_REQ));
2388*7c478bd9Sstevel@tonic-gate 
2389*7c478bd9Sstevel@tonic-gate 	alen = bind->ADDR_length;
2390*7c478bd9Sstevel@tonic-gate 	aoff = bind->ADDR_offset;
2391*7c478bd9Sstevel@tonic-gate 
2392*7c478bd9Sstevel@tonic-gate 	/* negotiate max conn req pending */
2393*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep)) {
2394*7c478bd9Sstevel@tonic-gate 		qlen = bind->CONIND_number;
2395*7c478bd9Sstevel@tonic-gate 		if (qlen > TL_MAXQLEN)
2396*7c478bd9Sstevel@tonic-gate 			qlen = TL_MAXQLEN;
2397*7c478bd9Sstevel@tonic-gate 	}
2398*7c478bd9Sstevel@tonic-gate 
2399*7c478bd9Sstevel@tonic-gate 	/*
2400*7c478bd9Sstevel@tonic-gate 	 * Reserve hash handle. It can only be NULL if the endpoint is unbound
2401*7c478bd9Sstevel@tonic-gate 	 * and bound again.
2402*7c478bd9Sstevel@tonic-gate 	 */
2403*7c478bd9Sstevel@tonic-gate 	if ((tep->te_hash_hndl == NULL) &&
2404*7c478bd9Sstevel@tonic-gate 	    ((tep->te_flag & TL_ADDRHASHED) == 0) &&
2405*7c478bd9Sstevel@tonic-gate 	    mod_hash_reserve_nosleep(tep->te_addrhash,
2406*7c478bd9Sstevel@tonic-gate 		&tep->te_hash_hndl) != 0) {
2407*7c478bd9Sstevel@tonic-gate 		tli_err = TSYSERR; unix_err = ENOSR;
2408*7c478bd9Sstevel@tonic-gate 		goto error;
2409*7c478bd9Sstevel@tonic-gate 	}
2410*7c478bd9Sstevel@tonic-gate 
2411*7c478bd9Sstevel@tonic-gate 	/*
2412*7c478bd9Sstevel@tonic-gate 	 * Verify address correctness.
2413*7c478bd9Sstevel@tonic-gate 	 */
2414*7c478bd9Sstevel@tonic-gate 	if (IS_SOCKET(tep)) {
2415*7c478bd9Sstevel@tonic-gate 		ASSERT(bind->PRIM_type == O_T_BIND_REQ);
2416*7c478bd9Sstevel@tonic-gate 
2417*7c478bd9Sstevel@tonic-gate 		if ((alen != TL_SOUX_ADDRLEN) ||
2418*7c478bd9Sstevel@tonic-gate 		    (aoff < 0) ||
2419*7c478bd9Sstevel@tonic-gate 		    (aoff + alen > msz)) {
2420*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2421*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
2422*7c478bd9Sstevel@tonic-gate 				    "tl_bind: invalid socket addr"));
2423*7c478bd9Sstevel@tonic-gate 			tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2424*7c478bd9Sstevel@tonic-gate 			tli_err = TSYSERR; unix_err = EINVAL;
2425*7c478bd9Sstevel@tonic-gate 			goto error;
2426*7c478bd9Sstevel@tonic-gate 		}
2427*7c478bd9Sstevel@tonic-gate 		/* Copy address from message to local buffer. */
2428*7c478bd9Sstevel@tonic-gate 		bcopy(mp->b_rptr + aoff, &ux_addr, sizeof (ux_addr));
2429*7c478bd9Sstevel@tonic-gate 		/*
2430*7c478bd9Sstevel@tonic-gate 		 * Check that we got correct address from sockets
2431*7c478bd9Sstevel@tonic-gate 		 */
2432*7c478bd9Sstevel@tonic-gate 		if ((ux_addr.soua_magic != SOU_MAGIC_EXPLICIT) &&
2433*7c478bd9Sstevel@tonic-gate 		    (ux_addr.soua_magic != SOU_MAGIC_IMPLICIT)) {
2434*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2435*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
2436*7c478bd9Sstevel@tonic-gate 				    "tl_bind: invalid socket magic"));
2437*7c478bd9Sstevel@tonic-gate 			tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2438*7c478bd9Sstevel@tonic-gate 			tli_err = TSYSERR; unix_err = EINVAL;
2439*7c478bd9Sstevel@tonic-gate 			goto error;
2440*7c478bd9Sstevel@tonic-gate 		}
2441*7c478bd9Sstevel@tonic-gate 		if ((ux_addr.soua_magic == SOU_MAGIC_IMPLICIT) &&
2442*7c478bd9Sstevel@tonic-gate 		    (ux_addr.soua_vp != NULL)) {
2443*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2444*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
2445*7c478bd9Sstevel@tonic-gate 				    "tl_bind: implicit addr non-empty"));
2446*7c478bd9Sstevel@tonic-gate 			tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2447*7c478bd9Sstevel@tonic-gate 			tli_err = TSYSERR; unix_err = EINVAL;
2448*7c478bd9Sstevel@tonic-gate 			goto error;
2449*7c478bd9Sstevel@tonic-gate 		}
2450*7c478bd9Sstevel@tonic-gate 		if ((ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) &&
2451*7c478bd9Sstevel@tonic-gate 		    (ux_addr.soua_vp == NULL)) {
2452*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2453*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
2454*7c478bd9Sstevel@tonic-gate 				    "tl_bind: explicit addr empty"));
2455*7c478bd9Sstevel@tonic-gate 			tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2456*7c478bd9Sstevel@tonic-gate 			tli_err = TSYSERR; unix_err = EINVAL;
2457*7c478bd9Sstevel@tonic-gate 			goto error;
2458*7c478bd9Sstevel@tonic-gate 		}
2459*7c478bd9Sstevel@tonic-gate 	} else {
2460*7c478bd9Sstevel@tonic-gate 		if ((alen > 0) && ((aoff < 0) ||
2461*7c478bd9Sstevel@tonic-gate 			((ssize_t)(aoff + alen) > msz) ||
2462*7c478bd9Sstevel@tonic-gate 			((aoff + alen) < 0))) {
2463*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2464*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
2465*7c478bd9Sstevel@tonic-gate 				    "tl_bind: invalid message"));
2466*7c478bd9Sstevel@tonic-gate 			tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2467*7c478bd9Sstevel@tonic-gate 			tli_err = TSYSERR; unix_err = EINVAL;
2468*7c478bd9Sstevel@tonic-gate 			goto error;
2469*7c478bd9Sstevel@tonic-gate 		}
2470*7c478bd9Sstevel@tonic-gate 		if ((alen < 0) || (alen > (msz - sizeof (struct T_bind_req)))) {
2471*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2472*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
2473*7c478bd9Sstevel@tonic-gate 				    "tl_bind: bad addr in  message"));
2474*7c478bd9Sstevel@tonic-gate 			tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2475*7c478bd9Sstevel@tonic-gate 			tli_err = TBADADDR;
2476*7c478bd9Sstevel@tonic-gate 			goto error;
2477*7c478bd9Sstevel@tonic-gate 		}
2478*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
2479*7c478bd9Sstevel@tonic-gate 		/*
2480*7c478bd9Sstevel@tonic-gate 		 * Mild form of ASSERT()ion to detect broken TPI apps.
2481*7c478bd9Sstevel@tonic-gate 		 * if (! assertion)
2482*7c478bd9Sstevel@tonic-gate 		 *	log warning;
2483*7c478bd9Sstevel@tonic-gate 		 */
2484*7c478bd9Sstevel@tonic-gate 		if (! ((alen == 0 && aoff == 0) ||
2485*7c478bd9Sstevel@tonic-gate 			(aoff >= (t_scalar_t)(sizeof (struct T_bind_req))))) {
2486*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2487*7c478bd9Sstevel@tonic-gate 				    3, SL_TRACE|SL_ERROR,
2488*7c478bd9Sstevel@tonic-gate 				    "tl_bind: addr overlaps TPI message"));
2489*7c478bd9Sstevel@tonic-gate 		}
2490*7c478bd9Sstevel@tonic-gate #endif
2491*7c478bd9Sstevel@tonic-gate 	}
2492*7c478bd9Sstevel@tonic-gate 
2493*7c478bd9Sstevel@tonic-gate 	/*
2494*7c478bd9Sstevel@tonic-gate 	 * Bind the address provided or allocate one if requested.
2495*7c478bd9Sstevel@tonic-gate 	 * Allow rebinds with a new qlen value.
2496*7c478bd9Sstevel@tonic-gate 	 */
2497*7c478bd9Sstevel@tonic-gate 	if (IS_SOCKET(tep)) {
2498*7c478bd9Sstevel@tonic-gate 		/*
2499*7c478bd9Sstevel@tonic-gate 		 * For anonymous requests the te_ap is already set up properly
2500*7c478bd9Sstevel@tonic-gate 		 * so use minor number as an address.
2501*7c478bd9Sstevel@tonic-gate 		 * For explicit requests need to check whether the address is
2502*7c478bd9Sstevel@tonic-gate 		 * already in use.
2503*7c478bd9Sstevel@tonic-gate 		 */
2504*7c478bd9Sstevel@tonic-gate 		if (ux_addr.soua_magic == SOU_MAGIC_EXPLICIT) {
2505*7c478bd9Sstevel@tonic-gate 			int rc;
2506*7c478bd9Sstevel@tonic-gate 
2507*7c478bd9Sstevel@tonic-gate 			if (tep->te_flag & TL_ADDRHASHED) {
2508*7c478bd9Sstevel@tonic-gate 				ASSERT(IS_COTS(tep) && tep->te_qlen == 0);
2509*7c478bd9Sstevel@tonic-gate 				if (tep->te_vp == ux_addr.soua_vp)
2510*7c478bd9Sstevel@tonic-gate 					goto skip_addr_bind;
2511*7c478bd9Sstevel@tonic-gate 				else /* Rebind to a new address. */
2512*7c478bd9Sstevel@tonic-gate 					tl_addr_unbind(tep);
2513*7c478bd9Sstevel@tonic-gate 			}
2514*7c478bd9Sstevel@tonic-gate 			/*
2515*7c478bd9Sstevel@tonic-gate 			 * Insert address in the hash if it is not already
2516*7c478bd9Sstevel@tonic-gate 			 * there.  Since we use preallocated handle, the insert
2517*7c478bd9Sstevel@tonic-gate 			 * can fail only if the key is already present.
2518*7c478bd9Sstevel@tonic-gate 			 */
2519*7c478bd9Sstevel@tonic-gate 			rc = mod_hash_insert_reserve(tep->te_addrhash,
2520*7c478bd9Sstevel@tonic-gate 			    (mod_hash_key_t)ux_addr.soua_vp,
2521*7c478bd9Sstevel@tonic-gate 			    (mod_hash_val_t)tep, tep->te_hash_hndl);
2522*7c478bd9Sstevel@tonic-gate 
2523*7c478bd9Sstevel@tonic-gate 			if (rc != 0) {
2524*7c478bd9Sstevel@tonic-gate 				ASSERT(rc == MH_ERR_DUPLICATE);
2525*7c478bd9Sstevel@tonic-gate 				/*
2526*7c478bd9Sstevel@tonic-gate 				 * Violate O_T_BIND_REQ semantics and fail with
2527*7c478bd9Sstevel@tonic-gate 				 * TADDRBUSY - sockets will not use any address
2528*7c478bd9Sstevel@tonic-gate 				 * other than supplied one for explicit binds.
2529*7c478bd9Sstevel@tonic-gate 				 */
2530*7c478bd9Sstevel@tonic-gate 				(void) (STRLOG(TL_ID, tep->te_minor, 1,
2531*7c478bd9Sstevel@tonic-gate 					SL_TRACE|SL_ERROR,
2532*7c478bd9Sstevel@tonic-gate 					"tl_bind:requested addr %p is busy",
2533*7c478bd9Sstevel@tonic-gate 					    ux_addr.soua_vp));
2534*7c478bd9Sstevel@tonic-gate 				tli_err = TADDRBUSY; unix_err = 0;
2535*7c478bd9Sstevel@tonic-gate 				goto error;
2536*7c478bd9Sstevel@tonic-gate 			}
2537*7c478bd9Sstevel@tonic-gate 			tep->te_uxaddr = ux_addr;
2538*7c478bd9Sstevel@tonic-gate 			tep->te_flag |= TL_ADDRHASHED;
2539*7c478bd9Sstevel@tonic-gate 			tep->te_hash_hndl = NULL;
2540*7c478bd9Sstevel@tonic-gate 		}
2541*7c478bd9Sstevel@tonic-gate 	} else if (alen == 0) {
2542*7c478bd9Sstevel@tonic-gate 		/*
2543*7c478bd9Sstevel@tonic-gate 		 * assign any free address
2544*7c478bd9Sstevel@tonic-gate 		 */
2545*7c478bd9Sstevel@tonic-gate 		if (! tl_get_any_addr(tep, NULL)) {
2546*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2547*7c478bd9Sstevel@tonic-gate 				1, SL_TRACE|SL_ERROR,
2548*7c478bd9Sstevel@tonic-gate 				"tl_bind:failed to get buffer for any "
2549*7c478bd9Sstevel@tonic-gate 				"address"));
2550*7c478bd9Sstevel@tonic-gate 			tli_err = TSYSERR; unix_err = ENOSR;
2551*7c478bd9Sstevel@tonic-gate 			goto error;
2552*7c478bd9Sstevel@tonic-gate 		}
2553*7c478bd9Sstevel@tonic-gate 	} else {
2554*7c478bd9Sstevel@tonic-gate 		addr_req.ta_alen = alen;
2555*7c478bd9Sstevel@tonic-gate 		addr_req.ta_abuf = (mp->b_rptr + aoff);
2556*7c478bd9Sstevel@tonic-gate 		addr_req.ta_zoneid = tep->te_zoneid;
2557*7c478bd9Sstevel@tonic-gate 
2558*7c478bd9Sstevel@tonic-gate 		tep->te_abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP);
2559*7c478bd9Sstevel@tonic-gate 		if (tep->te_abuf == NULL) {
2560*7c478bd9Sstevel@tonic-gate 			tli_err = TSYSERR; unix_err = ENOSR;
2561*7c478bd9Sstevel@tonic-gate 			goto error;
2562*7c478bd9Sstevel@tonic-gate 		}
2563*7c478bd9Sstevel@tonic-gate 		bcopy(addr_req.ta_abuf, tep->te_abuf, addr_req.ta_alen);
2564*7c478bd9Sstevel@tonic-gate 		tep->te_alen = alen;
2565*7c478bd9Sstevel@tonic-gate 
2566*7c478bd9Sstevel@tonic-gate 		if (mod_hash_insert_reserve(tep->te_addrhash,
2567*7c478bd9Sstevel@tonic-gate 			(mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep,
2568*7c478bd9Sstevel@tonic-gate 			tep->te_hash_hndl) != 0) {
2569*7c478bd9Sstevel@tonic-gate 			if (save_prim_type == T_BIND_REQ) {
2570*7c478bd9Sstevel@tonic-gate 				/*
2571*7c478bd9Sstevel@tonic-gate 				 * The bind semantics for this primitive
2572*7c478bd9Sstevel@tonic-gate 				 * require a failure if the exact address
2573*7c478bd9Sstevel@tonic-gate 				 * requested is busy
2574*7c478bd9Sstevel@tonic-gate 				 */
2575*7c478bd9Sstevel@tonic-gate 				(void) (STRLOG(TL_ID, tep->te_minor, 1,
2576*7c478bd9Sstevel@tonic-gate 					SL_TRACE|SL_ERROR,
2577*7c478bd9Sstevel@tonic-gate 					"tl_bind:requested addr is busy"));
2578*7c478bd9Sstevel@tonic-gate 				tli_err = TADDRBUSY; unix_err = 0;
2579*7c478bd9Sstevel@tonic-gate 				goto error;
2580*7c478bd9Sstevel@tonic-gate 			}
2581*7c478bd9Sstevel@tonic-gate 
2582*7c478bd9Sstevel@tonic-gate 			/*
2583*7c478bd9Sstevel@tonic-gate 			 * O_T_BIND_REQ semantics say if address if requested
2584*7c478bd9Sstevel@tonic-gate 			 * address is busy, bind to any available free address
2585*7c478bd9Sstevel@tonic-gate 			 */
2586*7c478bd9Sstevel@tonic-gate 			if (! tl_get_any_addr(tep, &addr_req)) {
2587*7c478bd9Sstevel@tonic-gate 				(void) (STRLOG(TL_ID, tep->te_minor, 1,
2588*7c478bd9Sstevel@tonic-gate 					SL_TRACE|SL_ERROR,
2589*7c478bd9Sstevel@tonic-gate 					"tl_bind:unable to get any addr buf"));
2590*7c478bd9Sstevel@tonic-gate 				tli_err = TSYSERR; unix_err = ENOMEM;
2591*7c478bd9Sstevel@tonic-gate 				goto error;
2592*7c478bd9Sstevel@tonic-gate 			}
2593*7c478bd9Sstevel@tonic-gate 		} else {
2594*7c478bd9Sstevel@tonic-gate 			tep->te_flag |= TL_ADDRHASHED;
2595*7c478bd9Sstevel@tonic-gate 			tep->te_hash_hndl = NULL;
2596*7c478bd9Sstevel@tonic-gate 		}
2597*7c478bd9Sstevel@tonic-gate 	}
2598*7c478bd9Sstevel@tonic-gate 
2599*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_alen >= 0);
2600*7c478bd9Sstevel@tonic-gate 
2601*7c478bd9Sstevel@tonic-gate skip_addr_bind:
2602*7c478bd9Sstevel@tonic-gate 	/*
2603*7c478bd9Sstevel@tonic-gate 	 * prepare T_BIND_ACK TPI message
2604*7c478bd9Sstevel@tonic-gate 	 */
2605*7c478bd9Sstevel@tonic-gate 	basize = sizeof (struct T_bind_ack) + tep->te_alen;
2606*7c478bd9Sstevel@tonic-gate 	bamp = reallocb(mp, basize, 0);
2607*7c478bd9Sstevel@tonic-gate 	if (bamp == NULL) {
2608*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
2609*7c478bd9Sstevel@tonic-gate 			"tl_wput:tl_bind: allocb failed"));
2610*7c478bd9Sstevel@tonic-gate 		/*
2611*7c478bd9Sstevel@tonic-gate 		 * roll back state changes
2612*7c478bd9Sstevel@tonic-gate 		 */
2613*7c478bd9Sstevel@tonic-gate 		tl_addr_unbind(tep);
2614*7c478bd9Sstevel@tonic-gate 		tep->te_state = TS_UNBND;
2615*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, mp, basize);
2616*7c478bd9Sstevel@tonic-gate 		return;
2617*7c478bd9Sstevel@tonic-gate 	}
2618*7c478bd9Sstevel@tonic-gate 
2619*7c478bd9Sstevel@tonic-gate 	DB_TYPE(bamp) = M_PCPROTO;
2620*7c478bd9Sstevel@tonic-gate 	bamp->b_wptr = bamp->b_rptr + basize;
2621*7c478bd9Sstevel@tonic-gate 	b_ack = (struct T_bind_ack *)bamp->b_rptr;
2622*7c478bd9Sstevel@tonic-gate 	b_ack->PRIM_type = T_BIND_ACK;
2623*7c478bd9Sstevel@tonic-gate 	b_ack->CONIND_number = qlen;
2624*7c478bd9Sstevel@tonic-gate 	b_ack->ADDR_length = tep->te_alen;
2625*7c478bd9Sstevel@tonic-gate 	b_ack->ADDR_offset = (t_scalar_t)sizeof (struct T_bind_ack);
2626*7c478bd9Sstevel@tonic-gate 	addr_startp = bamp->b_rptr + b_ack->ADDR_offset;
2627*7c478bd9Sstevel@tonic-gate 	bcopy(tep->te_abuf, addr_startp, tep->te_alen);
2628*7c478bd9Sstevel@tonic-gate 
2629*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep)) {
2630*7c478bd9Sstevel@tonic-gate 		tep->te_qlen = qlen;
2631*7c478bd9Sstevel@tonic-gate 		if (qlen > 0)
2632*7c478bd9Sstevel@tonic-gate 			tep->te_flag |= TL_LISTENER;
2633*7c478bd9Sstevel@tonic-gate 	}
2634*7c478bd9Sstevel@tonic-gate 
2635*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_BIND_ACK, tep->te_state);
2636*7c478bd9Sstevel@tonic-gate 	/*
2637*7c478bd9Sstevel@tonic-gate 	 * send T_BIND_ACK message
2638*7c478bd9Sstevel@tonic-gate 	 */
2639*7c478bd9Sstevel@tonic-gate 	(void) qreply(wq, bamp);
2640*7c478bd9Sstevel@tonic-gate 	return;
2641*7c478bd9Sstevel@tonic-gate 
2642*7c478bd9Sstevel@tonic-gate error:
2643*7c478bd9Sstevel@tonic-gate 	ackmp = reallocb(mp, sizeof (struct T_error_ack), 0);
2644*7c478bd9Sstevel@tonic-gate 	if (ackmp == NULL) {
2645*7c478bd9Sstevel@tonic-gate 		/*
2646*7c478bd9Sstevel@tonic-gate 		 * roll back state changes
2647*7c478bd9Sstevel@tonic-gate 		 */
2648*7c478bd9Sstevel@tonic-gate 		tep->te_state = save_state;
2649*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2650*7c478bd9Sstevel@tonic-gate 		return;
2651*7c478bd9Sstevel@tonic-gate 	}
2652*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
2653*7c478bd9Sstevel@tonic-gate 	tl_error_ack(wq, ackmp, tli_err, unix_err, save_prim_type);
2654*7c478bd9Sstevel@tonic-gate }
2655*7c478bd9Sstevel@tonic-gate 
2656*7c478bd9Sstevel@tonic-gate /*
2657*7c478bd9Sstevel@tonic-gate  * Process T_UNBIND_REQ.
2658*7c478bd9Sstevel@tonic-gate  * Called from serializer.
2659*7c478bd9Sstevel@tonic-gate  */
2660*7c478bd9Sstevel@tonic-gate static void
2661*7c478bd9Sstevel@tonic-gate tl_unbind(mblk_t *mp, tl_endpt_t *tep)
2662*7c478bd9Sstevel@tonic-gate {
2663*7c478bd9Sstevel@tonic-gate 	queue_t *wq;
2664*7c478bd9Sstevel@tonic-gate 	mblk_t *ackmp;
2665*7c478bd9Sstevel@tonic-gate 
2666*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
2667*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
2668*7c478bd9Sstevel@tonic-gate 		return;
2669*7c478bd9Sstevel@tonic-gate 	}
2670*7c478bd9Sstevel@tonic-gate 
2671*7c478bd9Sstevel@tonic-gate 	wq = tep->te_wq;
2672*7c478bd9Sstevel@tonic-gate 
2673*7c478bd9Sstevel@tonic-gate 	/*
2674*7c478bd9Sstevel@tonic-gate 	 * preallocate memory for max of T_OK_ACK and T_ERROR_ACK
2675*7c478bd9Sstevel@tonic-gate 	 * ==> allocate for T_ERROR_ACK (known max)
2676*7c478bd9Sstevel@tonic-gate 	 */
2677*7c478bd9Sstevel@tonic-gate 	if ((ackmp = reallocb(mp, sizeof (struct T_error_ack), 0)) == NULL) {
2678*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2679*7c478bd9Sstevel@tonic-gate 		return;
2680*7c478bd9Sstevel@tonic-gate 	}
2681*7c478bd9Sstevel@tonic-gate 	/*
2682*7c478bd9Sstevel@tonic-gate 	 * memory resources committed
2683*7c478bd9Sstevel@tonic-gate 	 * Note: no message validation. T_UNBIND_REQ message is
2684*7c478bd9Sstevel@tonic-gate 	 * same size as PRIM_type field so already verified earlier.
2685*7c478bd9Sstevel@tonic-gate 	 */
2686*7c478bd9Sstevel@tonic-gate 
2687*7c478bd9Sstevel@tonic-gate 	/*
2688*7c478bd9Sstevel@tonic-gate 	 * validate state
2689*7c478bd9Sstevel@tonic-gate 	 */
2690*7c478bd9Sstevel@tonic-gate 	if (tep->te_state != TS_IDLE) {
2691*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
2692*7c478bd9Sstevel@tonic-gate 			SL_TRACE|SL_ERROR,
2693*7c478bd9Sstevel@tonic-gate 			"tl_wput:T_UNBIND_REQ:out of state, state=%d",
2694*7c478bd9Sstevel@tonic-gate 			tep->te_state));
2695*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_UNBIND_REQ);
2696*7c478bd9Sstevel@tonic-gate 		return;
2697*7c478bd9Sstevel@tonic-gate 	}
2698*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_UNBIND_REQ, tep->te_state);
2699*7c478bd9Sstevel@tonic-gate 
2700*7c478bd9Sstevel@tonic-gate 	/*
2701*7c478bd9Sstevel@tonic-gate 	 * TPI says on T_UNBIND_REQ:
2702*7c478bd9Sstevel@tonic-gate 	 *    send up a M_FLUSH to flush both
2703*7c478bd9Sstevel@tonic-gate 	 *    read and write queues
2704*7c478bd9Sstevel@tonic-gate 	 */
2705*7c478bd9Sstevel@tonic-gate 	(void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
2706*7c478bd9Sstevel@tonic-gate 
2707*7c478bd9Sstevel@tonic-gate 	if (! IS_SOCKET(tep) || !IS_CLTS(tep) || tep->te_qlen != 0 ||
2708*7c478bd9Sstevel@tonic-gate 	    tep->te_magic != SOU_MAGIC_EXPLICIT) {
2709*7c478bd9Sstevel@tonic-gate 
2710*7c478bd9Sstevel@tonic-gate 		/*
2711*7c478bd9Sstevel@tonic-gate 		 * Sockets use bind with qlen==0 followed by bind() to
2712*7c478bd9Sstevel@tonic-gate 		 * the same address with qlen > 0 for listeners.
2713*7c478bd9Sstevel@tonic-gate 		 * We allow rebind with a new qlen value.
2714*7c478bd9Sstevel@tonic-gate 		 */
2715*7c478bd9Sstevel@tonic-gate 		tl_addr_unbind(tep);
2716*7c478bd9Sstevel@tonic-gate 	}
2717*7c478bd9Sstevel@tonic-gate 
2718*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
2719*7c478bd9Sstevel@tonic-gate 	/*
2720*7c478bd9Sstevel@tonic-gate 	 * send  T_OK_ACK
2721*7c478bd9Sstevel@tonic-gate 	 */
2722*7c478bd9Sstevel@tonic-gate 	tl_ok_ack(wq, ackmp, T_UNBIND_REQ);
2723*7c478bd9Sstevel@tonic-gate }
2724*7c478bd9Sstevel@tonic-gate 
2725*7c478bd9Sstevel@tonic-gate 
2726*7c478bd9Sstevel@tonic-gate /*
2727*7c478bd9Sstevel@tonic-gate  * Option management code from drv/ip is used here
2728*7c478bd9Sstevel@tonic-gate  * Note: TL_PROT_LEVEL/TL_IOC_CREDOPT option is not part of tl_opt_arr
2729*7c478bd9Sstevel@tonic-gate  *	database of options. So optcom_req() will fail T_SVR4_OPTMGMT_REQ.
2730*7c478bd9Sstevel@tonic-gate  *	However, that is what we want as that option is 'unorthodox'
2731*7c478bd9Sstevel@tonic-gate  *	and only valid in T_CONN_IND, T_CONN_CON  and T_UNITDATA_IND
2732*7c478bd9Sstevel@tonic-gate  *	and not in T_SVR4_OPTMGMT_REQ/ACK
2733*7c478bd9Sstevel@tonic-gate  * Note2: use of optcom_req means this routine is an exception to
2734*7c478bd9Sstevel@tonic-gate  *	 recovery from allocb() failures.
2735*7c478bd9Sstevel@tonic-gate  */
2736*7c478bd9Sstevel@tonic-gate 
2737*7c478bd9Sstevel@tonic-gate static void
2738*7c478bd9Sstevel@tonic-gate tl_optmgmt(queue_t *wq, mblk_t *mp)
2739*7c478bd9Sstevel@tonic-gate {
2740*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep;
2741*7c478bd9Sstevel@tonic-gate 	mblk_t *ackmp;
2742*7c478bd9Sstevel@tonic-gate 	union T_primitives *prim;
2743*7c478bd9Sstevel@tonic-gate 
2744*7c478bd9Sstevel@tonic-gate 	tep = (tl_endpt_t *)wq->q_ptr;
2745*7c478bd9Sstevel@tonic-gate 	prim = (union T_primitives *)mp->b_rptr;
2746*7c478bd9Sstevel@tonic-gate 
2747*7c478bd9Sstevel@tonic-gate 	/*  all states OK for AF_UNIX options ? */
2748*7c478bd9Sstevel@tonic-gate 	if (!IS_SOCKET(tep) && tep->te_state != TS_IDLE &&
2749*7c478bd9Sstevel@tonic-gate 	    prim->type == T_SVR4_OPTMGMT_REQ) {
2750*7c478bd9Sstevel@tonic-gate 		/*
2751*7c478bd9Sstevel@tonic-gate 		 * Broken TLI semantics that options can only be managed
2752*7c478bd9Sstevel@tonic-gate 		 * in TS_IDLE state. Needed for Sparc ABI test suite that
2753*7c478bd9Sstevel@tonic-gate 		 * tests this TLI (mis)feature using this device driver.
2754*7c478bd9Sstevel@tonic-gate 		 */
2755*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
2756*7c478bd9Sstevel@tonic-gate 			SL_TRACE|SL_ERROR,
2757*7c478bd9Sstevel@tonic-gate 			"tl_wput:T_SVR4_OPTMGMT_REQ:out of state, state=%d",
2758*7c478bd9Sstevel@tonic-gate 			tep->te_state));
2759*7c478bd9Sstevel@tonic-gate 		/*
2760*7c478bd9Sstevel@tonic-gate 		 * preallocate memory for T_ERROR_ACK
2761*7c478bd9Sstevel@tonic-gate 		 */
2762*7c478bd9Sstevel@tonic-gate 		ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
2763*7c478bd9Sstevel@tonic-gate 		if (! ackmp) {
2764*7c478bd9Sstevel@tonic-gate 			tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2765*7c478bd9Sstevel@tonic-gate 			return;
2766*7c478bd9Sstevel@tonic-gate 		}
2767*7c478bd9Sstevel@tonic-gate 
2768*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_SVR4_OPTMGMT_REQ);
2769*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
2770*7c478bd9Sstevel@tonic-gate 		return;
2771*7c478bd9Sstevel@tonic-gate 	}
2772*7c478bd9Sstevel@tonic-gate 
2773*7c478bd9Sstevel@tonic-gate 	/*
2774*7c478bd9Sstevel@tonic-gate 	 * call common option management routine from drv/ip
2775*7c478bd9Sstevel@tonic-gate 	 */
2776*7c478bd9Sstevel@tonic-gate 	if (prim->type == T_SVR4_OPTMGMT_REQ) {
2777*7c478bd9Sstevel@tonic-gate 		(void) svr4_optcom_req(wq, mp, tep->te_credp, &tl_opt_obj);
2778*7c478bd9Sstevel@tonic-gate 	} else {
2779*7c478bd9Sstevel@tonic-gate 		ASSERT(prim->type == T_OPTMGMT_REQ);
2780*7c478bd9Sstevel@tonic-gate 		(void) tpi_optcom_req(wq, mp, tep->te_credp, &tl_opt_obj);
2781*7c478bd9Sstevel@tonic-gate 	}
2782*7c478bd9Sstevel@tonic-gate }
2783*7c478bd9Sstevel@tonic-gate 
2784*7c478bd9Sstevel@tonic-gate /*
2785*7c478bd9Sstevel@tonic-gate  * Handle T_conn_req - the driver part of accept().
2786*7c478bd9Sstevel@tonic-gate  * If TL_SET[U]CRED generate the credentials options.
2787*7c478bd9Sstevel@tonic-gate  * If this is a socket pass through options unmodified.
2788*7c478bd9Sstevel@tonic-gate  * For sockets generate the T_CONN_CON here instead of
2789*7c478bd9Sstevel@tonic-gate  * waiting for the T_CONN_RES.
2790*7c478bd9Sstevel@tonic-gate  */
2791*7c478bd9Sstevel@tonic-gate static void
2792*7c478bd9Sstevel@tonic-gate tl_conn_req(queue_t *wq, mblk_t *mp)
2793*7c478bd9Sstevel@tonic-gate {
2794*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*tep = (tl_endpt_t *)wq->q_ptr;
2795*7c478bd9Sstevel@tonic-gate 	struct T_conn_req	*creq = (struct T_conn_req *)mp->b_rptr;
2796*7c478bd9Sstevel@tonic-gate 	ssize_t			msz = MBLKL(mp);
2797*7c478bd9Sstevel@tonic-gate 	t_scalar_t		alen, aoff, olen, ooff,	err = 0;
2798*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*peer_tep = NULL;
2799*7c478bd9Sstevel@tonic-gate 	mblk_t			*ackmp;
2800*7c478bd9Sstevel@tonic-gate 	mblk_t			*dimp;
2801*7c478bd9Sstevel@tonic-gate 	struct T_discon_ind	*di;
2802*7c478bd9Sstevel@tonic-gate 	soux_addr_t		ux_addr;
2803*7c478bd9Sstevel@tonic-gate 	tl_addr_t		dst;
2804*7c478bd9Sstevel@tonic-gate 
2805*7c478bd9Sstevel@tonic-gate 	ASSERT(IS_COTS(tep));
2806*7c478bd9Sstevel@tonic-gate 
2807*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
2808*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
2809*7c478bd9Sstevel@tonic-gate 		return;
2810*7c478bd9Sstevel@tonic-gate 	}
2811*7c478bd9Sstevel@tonic-gate 
2812*7c478bd9Sstevel@tonic-gate 	/*
2813*7c478bd9Sstevel@tonic-gate 	 * preallocate memory for:
2814*7c478bd9Sstevel@tonic-gate 	 * 1. max of T_ERROR_ACK and T_OK_ACK
2815*7c478bd9Sstevel@tonic-gate 	 *	==> known max T_ERROR_ACK
2816*7c478bd9Sstevel@tonic-gate 	 * 2. max of T_DISCON_IND and T_CONN_IND
2817*7c478bd9Sstevel@tonic-gate 	 */
2818*7c478bd9Sstevel@tonic-gate 	ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
2819*7c478bd9Sstevel@tonic-gate 	if (! ackmp) {
2820*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, mp, sizeof (struct T_error_ack));
2821*7c478bd9Sstevel@tonic-gate 		return;
2822*7c478bd9Sstevel@tonic-gate 	}
2823*7c478bd9Sstevel@tonic-gate 	/*
2824*7c478bd9Sstevel@tonic-gate 	 * memory committed for T_OK_ACK/T_ERROR_ACK now
2825*7c478bd9Sstevel@tonic-gate 	 * will be committed for T_DISCON_IND/T_CONN_IND later
2826*7c478bd9Sstevel@tonic-gate 	 */
2827*7c478bd9Sstevel@tonic-gate 
2828*7c478bd9Sstevel@tonic-gate 	if (tep->te_state != TS_IDLE) {
2829*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
2830*7c478bd9Sstevel@tonic-gate 			SL_TRACE|SL_ERROR,
2831*7c478bd9Sstevel@tonic-gate 			"tl_wput:T_CONN_REQ:out of state, state=%d",
2832*7c478bd9Sstevel@tonic-gate 			tep->te_state));
2833*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
2834*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
2835*7c478bd9Sstevel@tonic-gate 		return;
2836*7c478bd9Sstevel@tonic-gate 	}
2837*7c478bd9Sstevel@tonic-gate 
2838*7c478bd9Sstevel@tonic-gate 	/*
2839*7c478bd9Sstevel@tonic-gate 	 * validate the message
2840*7c478bd9Sstevel@tonic-gate 	 * Note: dereference fields in struct inside message only
2841*7c478bd9Sstevel@tonic-gate 	 * after validating the message length.
2842*7c478bd9Sstevel@tonic-gate 	 */
2843*7c478bd9Sstevel@tonic-gate 	if (msz < sizeof (struct T_conn_req)) {
2844*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
2845*7c478bd9Sstevel@tonic-gate 			"tl_conn_req:invalid message length"));
2846*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
2847*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
2848*7c478bd9Sstevel@tonic-gate 		return;
2849*7c478bd9Sstevel@tonic-gate 	}
2850*7c478bd9Sstevel@tonic-gate 	alen = creq->DEST_length;
2851*7c478bd9Sstevel@tonic-gate 	aoff = creq->DEST_offset;
2852*7c478bd9Sstevel@tonic-gate 	olen = creq->OPT_length;
2853*7c478bd9Sstevel@tonic-gate 	ooff = creq->OPT_offset;
2854*7c478bd9Sstevel@tonic-gate 	if (olen == 0)
2855*7c478bd9Sstevel@tonic-gate 		ooff = 0;
2856*7c478bd9Sstevel@tonic-gate 
2857*7c478bd9Sstevel@tonic-gate 	if (IS_SOCKET(tep)) {
2858*7c478bd9Sstevel@tonic-gate 		if ((alen != TL_SOUX_ADDRLEN) ||
2859*7c478bd9Sstevel@tonic-gate 		    (aoff < 0) ||
2860*7c478bd9Sstevel@tonic-gate 		    (aoff + alen > msz) ||
2861*7c478bd9Sstevel@tonic-gate 		    (alen > msz - sizeof (struct T_conn_req))) {
2862*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2863*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
2864*7c478bd9Sstevel@tonic-gate 				    "tl_conn_req: invalid socket addr"));
2865*7c478bd9Sstevel@tonic-gate 			tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
2866*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
2867*7c478bd9Sstevel@tonic-gate 			return;
2868*7c478bd9Sstevel@tonic-gate 		}
2869*7c478bd9Sstevel@tonic-gate 		bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN);
2870*7c478bd9Sstevel@tonic-gate 		if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) &&
2871*7c478bd9Sstevel@tonic-gate 		    (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) {
2872*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
2873*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
2874*7c478bd9Sstevel@tonic-gate 				    "tl_conn_req: invalid socket magic"));
2875*7c478bd9Sstevel@tonic-gate 			tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
2876*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
2877*7c478bd9Sstevel@tonic-gate 			return;
2878*7c478bd9Sstevel@tonic-gate 		}
2879*7c478bd9Sstevel@tonic-gate 	} else {
2880*7c478bd9Sstevel@tonic-gate 		if ((alen > 0 && ((aoff + alen) > msz || aoff + alen < 0)) ||
2881*7c478bd9Sstevel@tonic-gate 		    (olen > 0 && ((ssize_t)(ooff + olen) > msz ||
2882*7c478bd9Sstevel@tonic-gate 			ooff + olen < 0)) ||
2883*7c478bd9Sstevel@tonic-gate 		    olen < 0 || ooff < 0) {
2884*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
2885*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
2886*7c478bd9Sstevel@tonic-gate 				    "tl_conn_req:invalid message"));
2887*7c478bd9Sstevel@tonic-gate 			tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_CONN_REQ);
2888*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
2889*7c478bd9Sstevel@tonic-gate 			return;
2890*7c478bd9Sstevel@tonic-gate 		}
2891*7c478bd9Sstevel@tonic-gate 
2892*7c478bd9Sstevel@tonic-gate 		if (alen <= 0 || aoff < 0 ||
2893*7c478bd9Sstevel@tonic-gate 		    (ssize_t)alen > msz - sizeof (struct T_conn_req)) {
2894*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
2895*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
2896*7c478bd9Sstevel@tonic-gate 				    "tl_conn_req:bad addr in message, "
2897*7c478bd9Sstevel@tonic-gate 				    "alen=%d, msz=%ld",
2898*7c478bd9Sstevel@tonic-gate 				    alen, msz));
2899*7c478bd9Sstevel@tonic-gate 			tl_error_ack(wq, ackmp, TBADADDR, 0, T_CONN_REQ);
2900*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
2901*7c478bd9Sstevel@tonic-gate 			return;
2902*7c478bd9Sstevel@tonic-gate 		}
2903*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
2904*7c478bd9Sstevel@tonic-gate 		/*
2905*7c478bd9Sstevel@tonic-gate 		 * Mild form of ASSERT()ion to detect broken TPI apps.
2906*7c478bd9Sstevel@tonic-gate 		 * if (! assertion)
2907*7c478bd9Sstevel@tonic-gate 		 *	log warning;
2908*7c478bd9Sstevel@tonic-gate 		 */
2909*7c478bd9Sstevel@tonic-gate 		if (! (aoff >= (t_scalar_t)sizeof (struct T_conn_req))) {
2910*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3,
2911*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
2912*7c478bd9Sstevel@tonic-gate 				    "tl_conn_req: addr overlaps TPI message"));
2913*7c478bd9Sstevel@tonic-gate 		}
2914*7c478bd9Sstevel@tonic-gate #endif
2915*7c478bd9Sstevel@tonic-gate 		if (olen) {
2916*7c478bd9Sstevel@tonic-gate 			/*
2917*7c478bd9Sstevel@tonic-gate 			 * no opts in connect req
2918*7c478bd9Sstevel@tonic-gate 			 * supported in this provider except for sockets.
2919*7c478bd9Sstevel@tonic-gate 			 */
2920*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
2921*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
2922*7c478bd9Sstevel@tonic-gate 				    "tl_conn_req:options not supported "
2923*7c478bd9Sstevel@tonic-gate 				    "in message"));
2924*7c478bd9Sstevel@tonic-gate 			tl_error_ack(wq, ackmp, TBADOPT, 0, T_CONN_REQ);
2925*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
2926*7c478bd9Sstevel@tonic-gate 			return;
2927*7c478bd9Sstevel@tonic-gate 		}
2928*7c478bd9Sstevel@tonic-gate 	}
2929*7c478bd9Sstevel@tonic-gate 
2930*7c478bd9Sstevel@tonic-gate 	/*
2931*7c478bd9Sstevel@tonic-gate 	 * Prevent tep from closing on us.
2932*7c478bd9Sstevel@tonic-gate 	 */
2933*7c478bd9Sstevel@tonic-gate 	if (! tl_noclose(tep)) {
2934*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
2935*7c478bd9Sstevel@tonic-gate 			"tl_conn_req:endpoint is closing"));
2936*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_CONN_REQ);
2937*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
2938*7c478bd9Sstevel@tonic-gate 		return;
2939*7c478bd9Sstevel@tonic-gate 	}
2940*7c478bd9Sstevel@tonic-gate 
2941*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_CONN_REQ, tep->te_state);
2942*7c478bd9Sstevel@tonic-gate 	/*
2943*7c478bd9Sstevel@tonic-gate 	 * get endpoint to connect to
2944*7c478bd9Sstevel@tonic-gate 	 * check that peer with DEST addr is bound to addr
2945*7c478bd9Sstevel@tonic-gate 	 * and has CONIND_number > 0
2946*7c478bd9Sstevel@tonic-gate 	 */
2947*7c478bd9Sstevel@tonic-gate 	dst.ta_alen = alen;
2948*7c478bd9Sstevel@tonic-gate 	dst.ta_abuf = mp->b_rptr + aoff;
2949*7c478bd9Sstevel@tonic-gate 	dst.ta_zoneid = tep->te_zoneid;
2950*7c478bd9Sstevel@tonic-gate 
2951*7c478bd9Sstevel@tonic-gate 	/*
2952*7c478bd9Sstevel@tonic-gate 	 * Verify if remote addr is in use
2953*7c478bd9Sstevel@tonic-gate 	 */
2954*7c478bd9Sstevel@tonic-gate 	peer_tep = (IS_SOCKET(tep) ?
2955*7c478bd9Sstevel@tonic-gate 	    tl_sock_find_peer(tep, &ux_addr) :
2956*7c478bd9Sstevel@tonic-gate 	    tl_find_peer(tep, &dst));
2957*7c478bd9Sstevel@tonic-gate 
2958*7c478bd9Sstevel@tonic-gate 	if (peer_tep == NULL) {
2959*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
2960*7c478bd9Sstevel@tonic-gate 			"tl_conn_req:no one at connect address"));
2961*7c478bd9Sstevel@tonic-gate 		err = ECONNREFUSED;
2962*7c478bd9Sstevel@tonic-gate 	} else if (peer_tep->te_nicon >= peer_tep->te_qlen)  {
2963*7c478bd9Sstevel@tonic-gate 		/*
2964*7c478bd9Sstevel@tonic-gate 		 * validate that number of incoming connection is
2965*7c478bd9Sstevel@tonic-gate 		 * not to capacity on destination endpoint
2966*7c478bd9Sstevel@tonic-gate 		 */
2967*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
2968*7c478bd9Sstevel@tonic-gate 			"tl_conn_req: qlen overflow connection refused"));
2969*7c478bd9Sstevel@tonic-gate 			err = ECONNREFUSED;
2970*7c478bd9Sstevel@tonic-gate 	} else if (!((peer_tep->te_state == TS_IDLE) ||
2971*7c478bd9Sstevel@tonic-gate 			(peer_tep->te_state == TS_WRES_CIND))) {
2972*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
2973*7c478bd9Sstevel@tonic-gate 			"tl_conn_req:peer in bad state"));
2974*7c478bd9Sstevel@tonic-gate 		err = ECONNREFUSED;
2975*7c478bd9Sstevel@tonic-gate 	}
2976*7c478bd9Sstevel@tonic-gate 
2977*7c478bd9Sstevel@tonic-gate 	/*
2978*7c478bd9Sstevel@tonic-gate 	 * preallocate now for T_DISCON_IND or T_CONN_IND
2979*7c478bd9Sstevel@tonic-gate 	 */
2980*7c478bd9Sstevel@tonic-gate 	if (err != 0) {
2981*7c478bd9Sstevel@tonic-gate 		if (peer_tep != NULL)
2982*7c478bd9Sstevel@tonic-gate 			tl_refrele(peer_tep);
2983*7c478bd9Sstevel@tonic-gate 		/* We are still expected to send T_OK_ACK */
2984*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
2985*7c478bd9Sstevel@tonic-gate 		tl_ok_ack(tep->te_wq, ackmp, T_CONN_REQ);
2986*7c478bd9Sstevel@tonic-gate 		tl_closeok(tep);
2987*7c478bd9Sstevel@tonic-gate 		dimp = tpi_ack_alloc(mp, sizeof (struct T_discon_ind),
2988*7c478bd9Sstevel@tonic-gate 		    M_PROTO, T_DISCON_IND);
2989*7c478bd9Sstevel@tonic-gate 		if (dimp == NULL) {
2990*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, NULL, ENOSR);
2991*7c478bd9Sstevel@tonic-gate 			return;
2992*7c478bd9Sstevel@tonic-gate 		}
2993*7c478bd9Sstevel@tonic-gate 		di = (struct T_discon_ind *)dimp->b_rptr;
2994*7c478bd9Sstevel@tonic-gate 		di->DISCON_reason = err;
2995*7c478bd9Sstevel@tonic-gate 		di->SEQ_number = BADSEQNUM;
2996*7c478bd9Sstevel@tonic-gate 
2997*7c478bd9Sstevel@tonic-gate 		tep->te_state = TS_IDLE;
2998*7c478bd9Sstevel@tonic-gate 		/*
2999*7c478bd9Sstevel@tonic-gate 		 * send T_DISCON_IND message
3000*7c478bd9Sstevel@tonic-gate 		 */
3001*7c478bd9Sstevel@tonic-gate 		putnext(tep->te_rq, dimp);
3002*7c478bd9Sstevel@tonic-gate 		return;
3003*7c478bd9Sstevel@tonic-gate 	}
3004*7c478bd9Sstevel@tonic-gate 
3005*7c478bd9Sstevel@tonic-gate 	ASSERT(IS_COTS(peer_tep));
3006*7c478bd9Sstevel@tonic-gate 
3007*7c478bd9Sstevel@tonic-gate 	/*
3008*7c478bd9Sstevel@tonic-gate 	 * Found the listener. At this point processing will continue on
3009*7c478bd9Sstevel@tonic-gate 	 * listener serializer. Close of the endpoint should be blocked while we
3010*7c478bd9Sstevel@tonic-gate 	 * switch serializers.
3011*7c478bd9Sstevel@tonic-gate 	 */
3012*7c478bd9Sstevel@tonic-gate 	tl_serializer_refhold(peer_tep->te_ser);
3013*7c478bd9Sstevel@tonic-gate 	tl_serializer_refrele(tep->te_ser);
3014*7c478bd9Sstevel@tonic-gate 	tep->te_ser = peer_tep->te_ser;
3015*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_oconp == NULL);
3016*7c478bd9Sstevel@tonic-gate 	tep->te_oconp = peer_tep;
3017*7c478bd9Sstevel@tonic-gate 
3018*7c478bd9Sstevel@tonic-gate 	/*
3019*7c478bd9Sstevel@tonic-gate 	 * It is safe to close now. Close may continue on listener serializer.
3020*7c478bd9Sstevel@tonic-gate 	 */
3021*7c478bd9Sstevel@tonic-gate 	tl_closeok(tep);
3022*7c478bd9Sstevel@tonic-gate 
3023*7c478bd9Sstevel@tonic-gate 	/*
3024*7c478bd9Sstevel@tonic-gate 	 * Pass ackmp to tl_conn_req_ser. Note that mp->b_cont may contain user
3025*7c478bd9Sstevel@tonic-gate 	 * data, so we link mp to ackmp.
3026*7c478bd9Sstevel@tonic-gate 	 */
3027*7c478bd9Sstevel@tonic-gate 	ackmp->b_cont = mp;
3028*7c478bd9Sstevel@tonic-gate 	mp = ackmp;
3029*7c478bd9Sstevel@tonic-gate 
3030*7c478bd9Sstevel@tonic-gate 	tl_refhold(tep);
3031*7c478bd9Sstevel@tonic-gate 	tl_serializer_enter(tep, tl_conn_req_ser, mp);
3032*7c478bd9Sstevel@tonic-gate }
3033*7c478bd9Sstevel@tonic-gate 
3034*7c478bd9Sstevel@tonic-gate /*
3035*7c478bd9Sstevel@tonic-gate  * Finish T_CONN_REQ processing on listener serializer.
3036*7c478bd9Sstevel@tonic-gate  */
3037*7c478bd9Sstevel@tonic-gate static void
3038*7c478bd9Sstevel@tonic-gate tl_conn_req_ser(mblk_t *mp, tl_endpt_t *tep)
3039*7c478bd9Sstevel@tonic-gate {
3040*7c478bd9Sstevel@tonic-gate 	queue_t		*wq;
3041*7c478bd9Sstevel@tonic-gate 	tl_endpt_t	*peer_tep = tep->te_oconp;
3042*7c478bd9Sstevel@tonic-gate 	mblk_t		*confmp, *cimp, *indmp;
3043*7c478bd9Sstevel@tonic-gate 	void		*opts = NULL;
3044*7c478bd9Sstevel@tonic-gate 	mblk_t		*ackmp = mp;
3045*7c478bd9Sstevel@tonic-gate 	struct T_conn_req	*creq = (struct T_conn_req *)mp->b_cont->b_rptr;
3046*7c478bd9Sstevel@tonic-gate 	struct T_conn_ind	*ci;
3047*7c478bd9Sstevel@tonic-gate 	tl_icon_t	*tip;
3048*7c478bd9Sstevel@tonic-gate 	void		*addr_startp;
3049*7c478bd9Sstevel@tonic-gate 	t_scalar_t	olen = creq->OPT_length;
3050*7c478bd9Sstevel@tonic-gate 	t_scalar_t	ooff = creq->OPT_offset;
3051*7c478bd9Sstevel@tonic-gate 	size_t 		ci_msz;
3052*7c478bd9Sstevel@tonic-gate 	size_t		size;
3053*7c478bd9Sstevel@tonic-gate 
3054*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
3055*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_oconp);
3056*7c478bd9Sstevel@tonic-gate 		tl_serializer_exit(tep);
3057*7c478bd9Sstevel@tonic-gate 		tl_refrele(tep);
3058*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3059*7c478bd9Sstevel@tonic-gate 		return;
3060*7c478bd9Sstevel@tonic-gate 	}
3061*7c478bd9Sstevel@tonic-gate 
3062*7c478bd9Sstevel@tonic-gate 	wq = tep->te_wq;
3063*7c478bd9Sstevel@tonic-gate 	tep->te_flag |= TL_EAGER;
3064*7c478bd9Sstevel@tonic-gate 
3065*7c478bd9Sstevel@tonic-gate 	/*
3066*7c478bd9Sstevel@tonic-gate 	 * Extract preallocated ackmp from mp.
3067*7c478bd9Sstevel@tonic-gate 	 */
3068*7c478bd9Sstevel@tonic-gate 	mp = mp->b_cont;
3069*7c478bd9Sstevel@tonic-gate 	ackmp->b_cont = NULL;
3070*7c478bd9Sstevel@tonic-gate 
3071*7c478bd9Sstevel@tonic-gate 	if (olen == 0)
3072*7c478bd9Sstevel@tonic-gate 		ooff = 0;
3073*7c478bd9Sstevel@tonic-gate 
3074*7c478bd9Sstevel@tonic-gate 	if (peer_tep->te_closing ||
3075*7c478bd9Sstevel@tonic-gate 	    !((peer_tep->te_state == TS_IDLE) ||
3076*7c478bd9Sstevel@tonic-gate 		(peer_tep->te_state == TS_WRES_CIND))) {
3077*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_oconp);
3078*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, mp, TSYSERR, ECONNREFUSED, T_CONN_REQ);
3079*7c478bd9Sstevel@tonic-gate 		freemsg(ackmp);
3080*7c478bd9Sstevel@tonic-gate 		tl_serializer_exit(tep);
3081*7c478bd9Sstevel@tonic-gate 		tl_refrele(tep);
3082*7c478bd9Sstevel@tonic-gate 		return;
3083*7c478bd9Sstevel@tonic-gate 	}
3084*7c478bd9Sstevel@tonic-gate 
3085*7c478bd9Sstevel@tonic-gate 	/*
3086*7c478bd9Sstevel@tonic-gate 	 * preallocate now for T_DISCON_IND or T_CONN_IND
3087*7c478bd9Sstevel@tonic-gate 	 */
3088*7c478bd9Sstevel@tonic-gate 	/*
3089*7c478bd9Sstevel@tonic-gate 	 * calculate length of T_CONN_IND message
3090*7c478bd9Sstevel@tonic-gate 	 */
3091*7c478bd9Sstevel@tonic-gate 	if (peer_tep->te_flag & TL_SETCRED) {
3092*7c478bd9Sstevel@tonic-gate 		ooff = 0;
3093*7c478bd9Sstevel@tonic-gate 		olen = (t_scalar_t) sizeof (struct opthdr) +
3094*7c478bd9Sstevel@tonic-gate 		    OPTLEN(sizeof (tl_credopt_t));
3095*7c478bd9Sstevel@tonic-gate 		/* 1 option only */
3096*7c478bd9Sstevel@tonic-gate 	} else if (peer_tep->te_flag & TL_SETUCRED) {
3097*7c478bd9Sstevel@tonic-gate 		ooff = 0;
3098*7c478bd9Sstevel@tonic-gate 		olen = (t_scalar_t)sizeof (struct opthdr) +
3099*7c478bd9Sstevel@tonic-gate 		    OPTLEN(ucredsize);
3100*7c478bd9Sstevel@tonic-gate 		/* 1 option only */
3101*7c478bd9Sstevel@tonic-gate 	}
3102*7c478bd9Sstevel@tonic-gate 	ci_msz = sizeof (struct T_conn_ind) + tep->te_alen;
3103*7c478bd9Sstevel@tonic-gate 	ci_msz = T_ALIGN(ci_msz) + olen;
3104*7c478bd9Sstevel@tonic-gate 	size = max(ci_msz, sizeof (struct T_discon_ind));
3105*7c478bd9Sstevel@tonic-gate 
3106*7c478bd9Sstevel@tonic-gate 	/*
3107*7c478bd9Sstevel@tonic-gate 	 * Save options from mp - we'll need them for T_CONN_IND.
3108*7c478bd9Sstevel@tonic-gate 	 */
3109*7c478bd9Sstevel@tonic-gate 	if (ooff != 0) {
3110*7c478bd9Sstevel@tonic-gate 		opts = kmem_alloc(olen, KM_NOSLEEP);
3111*7c478bd9Sstevel@tonic-gate 		if (opts == NULL) {
3112*7c478bd9Sstevel@tonic-gate 			/*
3113*7c478bd9Sstevel@tonic-gate 			 * roll back state changes
3114*7c478bd9Sstevel@tonic-gate 			 */
3115*7c478bd9Sstevel@tonic-gate 			tep->te_state = TS_IDLE;
3116*7c478bd9Sstevel@tonic-gate 			tl_memrecover(wq, mp, size);
3117*7c478bd9Sstevel@tonic-gate 			freemsg(ackmp);
3118*7c478bd9Sstevel@tonic-gate 			TL_UNCONNECT(tep->te_oconp);
3119*7c478bd9Sstevel@tonic-gate 			tl_serializer_exit(tep);
3120*7c478bd9Sstevel@tonic-gate 			tl_refrele(tep);
3121*7c478bd9Sstevel@tonic-gate 			return;
3122*7c478bd9Sstevel@tonic-gate 		}
3123*7c478bd9Sstevel@tonic-gate 		/* Copy options to a temp buffer */
3124*7c478bd9Sstevel@tonic-gate 		bcopy(mp->b_rptr + ooff, opts, olen);
3125*7c478bd9Sstevel@tonic-gate 	}
3126*7c478bd9Sstevel@tonic-gate 
3127*7c478bd9Sstevel@tonic-gate 	if (IS_SOCKET(tep) && !tl_disable_early_connect) {
3128*7c478bd9Sstevel@tonic-gate 		/*
3129*7c478bd9Sstevel@tonic-gate 		 * Generate a T_CONN_CON that has the identical address
3130*7c478bd9Sstevel@tonic-gate 		 * (and options) as the T_CONN_REQ.
3131*7c478bd9Sstevel@tonic-gate 		 * NOTE: assumes that the T_conn_req and T_conn_con structures
3132*7c478bd9Sstevel@tonic-gate 		 * are isomorphic.
3133*7c478bd9Sstevel@tonic-gate 		 */
3134*7c478bd9Sstevel@tonic-gate 		confmp = copyb(mp);
3135*7c478bd9Sstevel@tonic-gate 		if (! confmp) {
3136*7c478bd9Sstevel@tonic-gate 			/*
3137*7c478bd9Sstevel@tonic-gate 			 * roll back state changes
3138*7c478bd9Sstevel@tonic-gate 			 */
3139*7c478bd9Sstevel@tonic-gate 			tep->te_state = TS_IDLE;
3140*7c478bd9Sstevel@tonic-gate 			tl_memrecover(wq, mp, mp->b_wptr - mp->b_rptr);
3141*7c478bd9Sstevel@tonic-gate 			freemsg(ackmp);
3142*7c478bd9Sstevel@tonic-gate 			if (opts != NULL)
3143*7c478bd9Sstevel@tonic-gate 				kmem_free(opts, olen);
3144*7c478bd9Sstevel@tonic-gate 			TL_UNCONNECT(tep->te_oconp);
3145*7c478bd9Sstevel@tonic-gate 			tl_serializer_exit(tep);
3146*7c478bd9Sstevel@tonic-gate 			tl_refrele(tep);
3147*7c478bd9Sstevel@tonic-gate 			return;
3148*7c478bd9Sstevel@tonic-gate 		}
3149*7c478bd9Sstevel@tonic-gate 		((struct T_conn_con *)(confmp->b_rptr))->PRIM_type =
3150*7c478bd9Sstevel@tonic-gate 			T_CONN_CON;
3151*7c478bd9Sstevel@tonic-gate 	} else {
3152*7c478bd9Sstevel@tonic-gate 		confmp = NULL;
3153*7c478bd9Sstevel@tonic-gate 	}
3154*7c478bd9Sstevel@tonic-gate 	if ((indmp = reallocb(mp, size, 0)) == NULL) {
3155*7c478bd9Sstevel@tonic-gate 		/*
3156*7c478bd9Sstevel@tonic-gate 		 * roll back state changes
3157*7c478bd9Sstevel@tonic-gate 		 */
3158*7c478bd9Sstevel@tonic-gate 		tep->te_state = TS_IDLE;
3159*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, mp, size);
3160*7c478bd9Sstevel@tonic-gate 		freemsg(ackmp);
3161*7c478bd9Sstevel@tonic-gate 		if (opts != NULL)
3162*7c478bd9Sstevel@tonic-gate 			kmem_free(opts, olen);
3163*7c478bd9Sstevel@tonic-gate 		freemsg(confmp);
3164*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_oconp);
3165*7c478bd9Sstevel@tonic-gate 		tl_serializer_exit(tep);
3166*7c478bd9Sstevel@tonic-gate 		tl_refrele(tep);
3167*7c478bd9Sstevel@tonic-gate 		return;
3168*7c478bd9Sstevel@tonic-gate 	}
3169*7c478bd9Sstevel@tonic-gate 
3170*7c478bd9Sstevel@tonic-gate 	tip = kmem_zalloc(sizeof (*tip), KM_NOSLEEP);
3171*7c478bd9Sstevel@tonic-gate 	if (tip == NULL) {
3172*7c478bd9Sstevel@tonic-gate 		/*
3173*7c478bd9Sstevel@tonic-gate 		 * roll back state changes
3174*7c478bd9Sstevel@tonic-gate 		 */
3175*7c478bd9Sstevel@tonic-gate 		tep->te_state = TS_IDLE;
3176*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, indmp, sizeof (*tip));
3177*7c478bd9Sstevel@tonic-gate 		freemsg(ackmp);
3178*7c478bd9Sstevel@tonic-gate 		if (opts != NULL)
3179*7c478bd9Sstevel@tonic-gate 			kmem_free(opts, olen);
3180*7c478bd9Sstevel@tonic-gate 		freemsg(confmp);
3181*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_oconp);
3182*7c478bd9Sstevel@tonic-gate 		tl_serializer_exit(tep);
3183*7c478bd9Sstevel@tonic-gate 		tl_refrele(tep);
3184*7c478bd9Sstevel@tonic-gate 		return;
3185*7c478bd9Sstevel@tonic-gate 	}
3186*7c478bd9Sstevel@tonic-gate 	tip->ti_mp = NULL;
3187*7c478bd9Sstevel@tonic-gate 
3188*7c478bd9Sstevel@tonic-gate 	/*
3189*7c478bd9Sstevel@tonic-gate 	 * memory is now committed for T_DISCON_IND/T_CONN_IND/T_CONN_CON
3190*7c478bd9Sstevel@tonic-gate 	 * and tl_icon_t cell.
3191*7c478bd9Sstevel@tonic-gate 	 */
3192*7c478bd9Sstevel@tonic-gate 
3193*7c478bd9Sstevel@tonic-gate 	/*
3194*7c478bd9Sstevel@tonic-gate 	 * ack validity of request and send the peer credential in the ACK.
3195*7c478bd9Sstevel@tonic-gate 	 */
3196*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_OK_ACK1, tep->te_state);
3197*7c478bd9Sstevel@tonic-gate 
3198*7c478bd9Sstevel@tonic-gate 	if (peer_tep != NULL && peer_tep->te_credp != NULL &&
3199*7c478bd9Sstevel@tonic-gate 	    confmp != NULL) {
3200*7c478bd9Sstevel@tonic-gate 		mblk_setcred(confmp, peer_tep->te_credp);
3201*7c478bd9Sstevel@tonic-gate 		DB_CPID(confmp) = peer_tep->te_cpid;
3202*7c478bd9Sstevel@tonic-gate 	}
3203*7c478bd9Sstevel@tonic-gate 
3204*7c478bd9Sstevel@tonic-gate 	tl_ok_ack(wq, ackmp, T_CONN_REQ);
3205*7c478bd9Sstevel@tonic-gate 
3206*7c478bd9Sstevel@tonic-gate 	/*
3207*7c478bd9Sstevel@tonic-gate 	 * prepare message to send T_CONN_IND
3208*7c478bd9Sstevel@tonic-gate 	 */
3209*7c478bd9Sstevel@tonic-gate 	/*
3210*7c478bd9Sstevel@tonic-gate 	 * allocate the message - original data blocks retained
3211*7c478bd9Sstevel@tonic-gate 	 * in the returned mblk
3212*7c478bd9Sstevel@tonic-gate 	 */
3213*7c478bd9Sstevel@tonic-gate 	cimp = tl_resizemp(indmp, size);
3214*7c478bd9Sstevel@tonic-gate 	if (! cimp) {
3215*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
3216*7c478bd9Sstevel@tonic-gate 			"tl_conn_req:con_ind:allocb failure"));
3217*7c478bd9Sstevel@tonic-gate 		tl_merror(wq, indmp, ENOMEM);
3218*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_oconp);
3219*7c478bd9Sstevel@tonic-gate 		tl_serializer_exit(tep);
3220*7c478bd9Sstevel@tonic-gate 		tl_refrele(tep);
3221*7c478bd9Sstevel@tonic-gate 		if (opts != NULL)
3222*7c478bd9Sstevel@tonic-gate 			kmem_free(opts, olen);
3223*7c478bd9Sstevel@tonic-gate 		freemsg(confmp);
3224*7c478bd9Sstevel@tonic-gate 		ASSERT(tip->ti_mp == NULL);
3225*7c478bd9Sstevel@tonic-gate 		kmem_free(tip, sizeof (*tip));
3226*7c478bd9Sstevel@tonic-gate 		return;
3227*7c478bd9Sstevel@tonic-gate 	}
3228*7c478bd9Sstevel@tonic-gate 
3229*7c478bd9Sstevel@tonic-gate 	DB_TYPE(cimp) = M_PROTO;
3230*7c478bd9Sstevel@tonic-gate 	ci = (struct T_conn_ind *)cimp->b_rptr;
3231*7c478bd9Sstevel@tonic-gate 	ci->PRIM_type  = T_CONN_IND;
3232*7c478bd9Sstevel@tonic-gate 	ci->SRC_offset = (t_scalar_t)sizeof (struct T_conn_ind);
3233*7c478bd9Sstevel@tonic-gate 	ci->SRC_length = tep->te_alen;
3234*7c478bd9Sstevel@tonic-gate 	ci->SEQ_number = tep->te_seqno;
3235*7c478bd9Sstevel@tonic-gate 
3236*7c478bd9Sstevel@tonic-gate 	addr_startp = cimp->b_rptr + ci->SRC_offset;
3237*7c478bd9Sstevel@tonic-gate 	bcopy(tep->te_abuf, addr_startp, tep->te_alen);
3238*7c478bd9Sstevel@tonic-gate 	if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED)) {
3239*7c478bd9Sstevel@tonic-gate 		ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset +
3240*7c478bd9Sstevel@tonic-gate 					ci->SRC_length);
3241*7c478bd9Sstevel@tonic-gate 		ci->OPT_length = olen; /* because only 1 option */
3242*7c478bd9Sstevel@tonic-gate 		tl_fill_option(cimp->b_rptr + ci->OPT_offset,
3243*7c478bd9Sstevel@tonic-gate 			DB_CREDDEF(cimp, tep->te_credp),
3244*7c478bd9Sstevel@tonic-gate 			TLPID(cimp, tep),
3245*7c478bd9Sstevel@tonic-gate 			peer_tep->te_flag);
3246*7c478bd9Sstevel@tonic-gate 	} else if (ooff != 0) {
3247*7c478bd9Sstevel@tonic-gate 		/* Copy option from T_CONN_REQ */
3248*7c478bd9Sstevel@tonic-gate 		ci->OPT_offset = (t_scalar_t)T_ALIGN(ci->SRC_offset +
3249*7c478bd9Sstevel@tonic-gate 					ci->SRC_length);
3250*7c478bd9Sstevel@tonic-gate 		ci->OPT_length = olen;
3251*7c478bd9Sstevel@tonic-gate 		ASSERT(opts != NULL);
3252*7c478bd9Sstevel@tonic-gate 		bcopy(opts, (void *)((uintptr_t)ci + ci->OPT_offset), olen);
3253*7c478bd9Sstevel@tonic-gate 	} else {
3254*7c478bd9Sstevel@tonic-gate 		ci->OPT_offset = 0;
3255*7c478bd9Sstevel@tonic-gate 		ci->OPT_length = 0;
3256*7c478bd9Sstevel@tonic-gate 	}
3257*7c478bd9Sstevel@tonic-gate 	if (opts != NULL)
3258*7c478bd9Sstevel@tonic-gate 		kmem_free(opts, olen);
3259*7c478bd9Sstevel@tonic-gate 
3260*7c478bd9Sstevel@tonic-gate 	/*
3261*7c478bd9Sstevel@tonic-gate 	 * register connection request with server peer
3262*7c478bd9Sstevel@tonic-gate 	 * append to list of incoming connections
3263*7c478bd9Sstevel@tonic-gate 	 * increment references for both peer_tep and tep: peer_tep is placed on
3264*7c478bd9Sstevel@tonic-gate 	 * te_oconp and tep is placed on listeners queue.
3265*7c478bd9Sstevel@tonic-gate 	 */
3266*7c478bd9Sstevel@tonic-gate 	tip->ti_tep = tep;
3267*7c478bd9Sstevel@tonic-gate 	tip->ti_seqno = tep->te_seqno;
3268*7c478bd9Sstevel@tonic-gate 	list_insert_tail(&peer_tep->te_iconp, tip);
3269*7c478bd9Sstevel@tonic-gate 	peer_tep->te_nicon++;
3270*7c478bd9Sstevel@tonic-gate 
3271*7c478bd9Sstevel@tonic-gate 	peer_tep->te_state = NEXTSTATE(TE_CONN_IND, peer_tep->te_state);
3272*7c478bd9Sstevel@tonic-gate 	/*
3273*7c478bd9Sstevel@tonic-gate 	 * send the T_CONN_IND message
3274*7c478bd9Sstevel@tonic-gate 	 */
3275*7c478bd9Sstevel@tonic-gate 	putnext(peer_tep->te_rq, cimp);
3276*7c478bd9Sstevel@tonic-gate 
3277*7c478bd9Sstevel@tonic-gate 	/*
3278*7c478bd9Sstevel@tonic-gate 	 * Send a T_CONN_CON message for sockets.
3279*7c478bd9Sstevel@tonic-gate 	 * Disable the queues until we have reached the correct state!
3280*7c478bd9Sstevel@tonic-gate 	 */
3281*7c478bd9Sstevel@tonic-gate 	if (confmp != NULL) {
3282*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_CONN_CON, tep->te_state);
3283*7c478bd9Sstevel@tonic-gate 		noenable(wq);
3284*7c478bd9Sstevel@tonic-gate 		putnext(tep->te_rq, confmp);
3285*7c478bd9Sstevel@tonic-gate 	}
3286*7c478bd9Sstevel@tonic-gate 	/*
3287*7c478bd9Sstevel@tonic-gate 	 * Now we need to increment tep reference because tep is referenced by
3288*7c478bd9Sstevel@tonic-gate 	 * server list of pending connections. We also need to decrement
3289*7c478bd9Sstevel@tonic-gate 	 * reference before exiting serializer. Two operations void each other
3290*7c478bd9Sstevel@tonic-gate 	 * so we don't modify reference at all.
3291*7c478bd9Sstevel@tonic-gate 	 */
3292*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_refcnt >= 2);
3293*7c478bd9Sstevel@tonic-gate 	ASSERT(peer_tep->te_refcnt >= 2);
3294*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
3295*7c478bd9Sstevel@tonic-gate }
3296*7c478bd9Sstevel@tonic-gate 
3297*7c478bd9Sstevel@tonic-gate 
3298*7c478bd9Sstevel@tonic-gate 
3299*7c478bd9Sstevel@tonic-gate /*
3300*7c478bd9Sstevel@tonic-gate  * Handle T_conn_res on listener stream. Called on listener serializer.
3301*7c478bd9Sstevel@tonic-gate  * tl_conn_req has already generated the T_CONN_CON.
3302*7c478bd9Sstevel@tonic-gate  * tl_conn_res is called on listener serializer.
3303*7c478bd9Sstevel@tonic-gate  * No one accesses acceptor at this point, so it is safe to modify acceptor.
3304*7c478bd9Sstevel@tonic-gate  * Switch eager serializer to acceptor's.
3305*7c478bd9Sstevel@tonic-gate  *
3306*7c478bd9Sstevel@tonic-gate  * If TL_SET[U]CRED generate the credentials options.
3307*7c478bd9Sstevel@tonic-gate  * For sockets tl_conn_req has already generated the T_CONN_CON.
3308*7c478bd9Sstevel@tonic-gate  */
3309*7c478bd9Sstevel@tonic-gate static void
3310*7c478bd9Sstevel@tonic-gate tl_conn_res(mblk_t *mp, tl_endpt_t *tep)
3311*7c478bd9Sstevel@tonic-gate {
3312*7c478bd9Sstevel@tonic-gate 	queue_t			*wq;
3313*7c478bd9Sstevel@tonic-gate 	struct T_conn_res	*cres = (struct T_conn_res *)mp->b_rptr;
3314*7c478bd9Sstevel@tonic-gate 	ssize_t			msz = MBLKL(mp);
3315*7c478bd9Sstevel@tonic-gate 	t_scalar_t		olen, ooff, err = 0;
3316*7c478bd9Sstevel@tonic-gate 	t_scalar_t		prim = cres->PRIM_type;
3317*7c478bd9Sstevel@tonic-gate 	uchar_t			*addr_startp;
3318*7c478bd9Sstevel@tonic-gate 	tl_endpt_t 		*acc_ep = NULL, *cl_ep = NULL;
3319*7c478bd9Sstevel@tonic-gate 	tl_icon_t		*tip;
3320*7c478bd9Sstevel@tonic-gate 	size_t			size;
3321*7c478bd9Sstevel@tonic-gate 	mblk_t			*ackmp, *respmp;
3322*7c478bd9Sstevel@tonic-gate 	mblk_t			*dimp, *ccmp = NULL;
3323*7c478bd9Sstevel@tonic-gate 	struct T_discon_ind	*di;
3324*7c478bd9Sstevel@tonic-gate 	struct T_conn_con	*cc;
3325*7c478bd9Sstevel@tonic-gate 	boolean_t		client_noclose_set = B_FALSE;
3326*7c478bd9Sstevel@tonic-gate 	boolean_t		switch_client_serializer = B_TRUE;
3327*7c478bd9Sstevel@tonic-gate 
3328*7c478bd9Sstevel@tonic-gate 	ASSERT(IS_COTS(tep));
3329*7c478bd9Sstevel@tonic-gate 
3330*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
3331*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3332*7c478bd9Sstevel@tonic-gate 		return;
3333*7c478bd9Sstevel@tonic-gate 	}
3334*7c478bd9Sstevel@tonic-gate 
3335*7c478bd9Sstevel@tonic-gate 	wq = tep->te_wq;
3336*7c478bd9Sstevel@tonic-gate 
3337*7c478bd9Sstevel@tonic-gate 	/*
3338*7c478bd9Sstevel@tonic-gate 	 * preallocate memory for:
3339*7c478bd9Sstevel@tonic-gate 	 * 1. max of T_ERROR_ACK and T_OK_ACK
3340*7c478bd9Sstevel@tonic-gate 	 *	==> known max T_ERROR_ACK
3341*7c478bd9Sstevel@tonic-gate 	 * 2. max of T_DISCON_IND and T_CONN_CON
3342*7c478bd9Sstevel@tonic-gate 	 */
3343*7c478bd9Sstevel@tonic-gate 	ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
3344*7c478bd9Sstevel@tonic-gate 	if (! ackmp) {
3345*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, mp, sizeof (struct T_error_ack));
3346*7c478bd9Sstevel@tonic-gate 		return;
3347*7c478bd9Sstevel@tonic-gate 	}
3348*7c478bd9Sstevel@tonic-gate 	/*
3349*7c478bd9Sstevel@tonic-gate 	 * memory committed for T_OK_ACK/T_ERROR_ACK now
3350*7c478bd9Sstevel@tonic-gate 	 * will be committed for T_DISCON_IND/T_CONN_CON later
3351*7c478bd9Sstevel@tonic-gate 	 */
3352*7c478bd9Sstevel@tonic-gate 
3353*7c478bd9Sstevel@tonic-gate 
3354*7c478bd9Sstevel@tonic-gate 	ASSERT(prim == T_CONN_RES || prim == O_T_CONN_RES);
3355*7c478bd9Sstevel@tonic-gate 
3356*7c478bd9Sstevel@tonic-gate 	/*
3357*7c478bd9Sstevel@tonic-gate 	 * validate state
3358*7c478bd9Sstevel@tonic-gate 	 */
3359*7c478bd9Sstevel@tonic-gate 	if (tep->te_state != TS_WRES_CIND) {
3360*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
3361*7c478bd9Sstevel@tonic-gate 			SL_TRACE|SL_ERROR,
3362*7c478bd9Sstevel@tonic-gate 			"tl_wput:T_CONN_RES:out of state, state=%d",
3363*7c478bd9Sstevel@tonic-gate 			tep->te_state));
3364*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim);
3365*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3366*7c478bd9Sstevel@tonic-gate 		return;
3367*7c478bd9Sstevel@tonic-gate 	}
3368*7c478bd9Sstevel@tonic-gate 
3369*7c478bd9Sstevel@tonic-gate 	/*
3370*7c478bd9Sstevel@tonic-gate 	 * validate the message
3371*7c478bd9Sstevel@tonic-gate 	 * Note: dereference fields in struct inside message only
3372*7c478bd9Sstevel@tonic-gate 	 * after validating the message length.
3373*7c478bd9Sstevel@tonic-gate 	 */
3374*7c478bd9Sstevel@tonic-gate 	if (msz < sizeof (struct T_conn_res)) {
3375*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
3376*7c478bd9Sstevel@tonic-gate 			"tl_conn_res:invalid message length"));
3377*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
3378*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3379*7c478bd9Sstevel@tonic-gate 		return;
3380*7c478bd9Sstevel@tonic-gate 	}
3381*7c478bd9Sstevel@tonic-gate 	olen = cres->OPT_length;
3382*7c478bd9Sstevel@tonic-gate 	ooff = cres->OPT_offset;
3383*7c478bd9Sstevel@tonic-gate 	if (((olen > 0) && ((ooff + olen) > msz))) {
3384*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
3385*7c478bd9Sstevel@tonic-gate 			"tl_conn_res:invalid message"));
3386*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TSYSERR, EINVAL, prim);
3387*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3388*7c478bd9Sstevel@tonic-gate 		return;
3389*7c478bd9Sstevel@tonic-gate 	}
3390*7c478bd9Sstevel@tonic-gate 	if (olen) {
3391*7c478bd9Sstevel@tonic-gate 		/*
3392*7c478bd9Sstevel@tonic-gate 		 * no opts in connect res
3393*7c478bd9Sstevel@tonic-gate 		 * supported in this provider
3394*7c478bd9Sstevel@tonic-gate 		 */
3395*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
3396*7c478bd9Sstevel@tonic-gate 			"tl_conn_res:options not supported in message"));
3397*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TBADOPT, 0, prim);
3398*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3399*7c478bd9Sstevel@tonic-gate 		return;
3400*7c478bd9Sstevel@tonic-gate 	}
3401*7c478bd9Sstevel@tonic-gate 
3402*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_CONN_RES, tep->te_state);
3403*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_state == TS_WACK_CRES);
3404*7c478bd9Sstevel@tonic-gate 
3405*7c478bd9Sstevel@tonic-gate 	if (cres->SEQ_number < TL_MINOR_START &&
3406*7c478bd9Sstevel@tonic-gate 		cres->SEQ_number >= BADSEQNUM) {
3407*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3408*7c478bd9Sstevel@tonic-gate 			"tl_conn_res:remote endpoint sequence number bad"));
3409*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3410*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
3411*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3412*7c478bd9Sstevel@tonic-gate 		return;
3413*7c478bd9Sstevel@tonic-gate 	}
3414*7c478bd9Sstevel@tonic-gate 
3415*7c478bd9Sstevel@tonic-gate 	/*
3416*7c478bd9Sstevel@tonic-gate 	 * find accepting endpoint. Will have extra reference if found.
3417*7c478bd9Sstevel@tonic-gate 	 */
3418*7c478bd9Sstevel@tonic-gate 	if (mod_hash_find_cb(tep->te_transport->tr_ai_hash,
3419*7c478bd9Sstevel@tonic-gate 		(mod_hash_key_t)(uintptr_t)cres->ACCEPTOR_id,
3420*7c478bd9Sstevel@tonic-gate 		(mod_hash_val_t *)&acc_ep, tl_find_callback) != 0) {
3421*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3422*7c478bd9Sstevel@tonic-gate 			"tl_conn_res:bad accepting endpoint"));
3423*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3424*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TBADF, 0, prim);
3425*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3426*7c478bd9Sstevel@tonic-gate 		return;
3427*7c478bd9Sstevel@tonic-gate 	}
3428*7c478bd9Sstevel@tonic-gate 
3429*7c478bd9Sstevel@tonic-gate 	/*
3430*7c478bd9Sstevel@tonic-gate 	 * Prevent acceptor from closing.
3431*7c478bd9Sstevel@tonic-gate 	 */
3432*7c478bd9Sstevel@tonic-gate 	if (! tl_noclose(acc_ep)) {
3433*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3434*7c478bd9Sstevel@tonic-gate 			"tl_conn_res:bad accepting endpoint"));
3435*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3436*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TBADF, 0, prim);
3437*7c478bd9Sstevel@tonic-gate 		tl_refrele(acc_ep);
3438*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3439*7c478bd9Sstevel@tonic-gate 		return;
3440*7c478bd9Sstevel@tonic-gate 	}
3441*7c478bd9Sstevel@tonic-gate 
3442*7c478bd9Sstevel@tonic-gate 	acc_ep->te_flag |= TL_ACCEPTOR;
3443*7c478bd9Sstevel@tonic-gate 
3444*7c478bd9Sstevel@tonic-gate 	/*
3445*7c478bd9Sstevel@tonic-gate 	 * validate that accepting endpoint, if different from listening
3446*7c478bd9Sstevel@tonic-gate 	 * has address bound => state is TS_IDLE
3447*7c478bd9Sstevel@tonic-gate 	 * TROUBLE in XPG4 !!?
3448*7c478bd9Sstevel@tonic-gate 	 */
3449*7c478bd9Sstevel@tonic-gate 	if ((tep != acc_ep) && (acc_ep->te_state != TS_IDLE)) {
3450*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3451*7c478bd9Sstevel@tonic-gate 			"tl_conn_res:accepting endpoint has no address bound,"
3452*7c478bd9Sstevel@tonic-gate 			"state=%d", acc_ep->te_state));
3453*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3454*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TOUTSTATE, 0, prim);
3455*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3456*7c478bd9Sstevel@tonic-gate 		tl_closeok(acc_ep);
3457*7c478bd9Sstevel@tonic-gate 		tl_refrele(acc_ep);
3458*7c478bd9Sstevel@tonic-gate 		return;
3459*7c478bd9Sstevel@tonic-gate 	}
3460*7c478bd9Sstevel@tonic-gate 
3461*7c478bd9Sstevel@tonic-gate 	/*
3462*7c478bd9Sstevel@tonic-gate 	 * validate if accepting endpt same as listening, then
3463*7c478bd9Sstevel@tonic-gate 	 * no other incoming connection should be on the queue
3464*7c478bd9Sstevel@tonic-gate 	 */
3465*7c478bd9Sstevel@tonic-gate 
3466*7c478bd9Sstevel@tonic-gate 	if ((tep == acc_ep) && (tep->te_nicon > 1)) {
3467*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
3468*7c478bd9Sstevel@tonic-gate 			"tl_conn_res: > 1 conn_ind on listener-acceptor"));
3469*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3470*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TBADF, 0, prim);
3471*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3472*7c478bd9Sstevel@tonic-gate 		tl_closeok(acc_ep);
3473*7c478bd9Sstevel@tonic-gate 		tl_refrele(acc_ep);
3474*7c478bd9Sstevel@tonic-gate 		return;
3475*7c478bd9Sstevel@tonic-gate 	}
3476*7c478bd9Sstevel@tonic-gate 
3477*7c478bd9Sstevel@tonic-gate 	/*
3478*7c478bd9Sstevel@tonic-gate 	 * Mark for deletion, the entry corresponding to client
3479*7c478bd9Sstevel@tonic-gate 	 * on list of pending connections made by the listener
3480*7c478bd9Sstevel@tonic-gate 	 *  search list to see if client is one of the
3481*7c478bd9Sstevel@tonic-gate 	 * recorded as a listener.
3482*7c478bd9Sstevel@tonic-gate 	 */
3483*7c478bd9Sstevel@tonic-gate 	tip = tl_icon_find(tep, cres->SEQ_number);
3484*7c478bd9Sstevel@tonic-gate 	if (tip == NULL) {
3485*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE|SL_ERROR,
3486*7c478bd9Sstevel@tonic-gate 			"tl_conn_res:no client in listener list"));
3487*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_ERROR_ACK, tep->te_state);
3488*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TBADSEQ, 0, prim);
3489*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3490*7c478bd9Sstevel@tonic-gate 		tl_closeok(acc_ep);
3491*7c478bd9Sstevel@tonic-gate 		tl_refrele(acc_ep);
3492*7c478bd9Sstevel@tonic-gate 		return;
3493*7c478bd9Sstevel@tonic-gate 	}
3494*7c478bd9Sstevel@tonic-gate 
3495*7c478bd9Sstevel@tonic-gate 	/*
3496*7c478bd9Sstevel@tonic-gate 	 * If ti_tep is NULL the client has already closed. In this case
3497*7c478bd9Sstevel@tonic-gate 	 * the code below will avoid any action on the client side
3498*7c478bd9Sstevel@tonic-gate 	 * but complete the server and acceptor state transitions.
3499*7c478bd9Sstevel@tonic-gate 	 */
3500*7c478bd9Sstevel@tonic-gate 	ASSERT(tip->ti_tep == NULL ||
3501*7c478bd9Sstevel@tonic-gate 		tip->ti_tep->te_seqno == cres->SEQ_number);
3502*7c478bd9Sstevel@tonic-gate 	cl_ep = tip->ti_tep;
3503*7c478bd9Sstevel@tonic-gate 
3504*7c478bd9Sstevel@tonic-gate 	/*
3505*7c478bd9Sstevel@tonic-gate 	 * If the client is present it is switched from listener's to acceptor's
3506*7c478bd9Sstevel@tonic-gate 	 * serializer. We should block client closes while serializers are
3507*7c478bd9Sstevel@tonic-gate 	 * being switched.
3508*7c478bd9Sstevel@tonic-gate 	 *
3509*7c478bd9Sstevel@tonic-gate 	 * It is possible that the client is present but is currently being
3510*7c478bd9Sstevel@tonic-gate 	 * closed. There are two possible cases:
3511*7c478bd9Sstevel@tonic-gate 	 *
3512*7c478bd9Sstevel@tonic-gate 	 * 1) The client has already entered tl_close_finish_ser() and sent
3513*7c478bd9Sstevel@tonic-gate 	 *    T_ORDREL_IND. In this case we can just ignore the client (but we
3514*7c478bd9Sstevel@tonic-gate 	 *    still need to send all messages from tip->ti_mp to the acceptor).
3515*7c478bd9Sstevel@tonic-gate 	 *
3516*7c478bd9Sstevel@tonic-gate 	 * 2) The client started the close but has not entered
3517*7c478bd9Sstevel@tonic-gate 	 *    tl_close_finish_ser() yet. In this case, the client is already
3518*7c478bd9Sstevel@tonic-gate 	 *    proceeding asynchronously on the listener's serializer, so we're
3519*7c478bd9Sstevel@tonic-gate 	 *    forced to change the acceptor to use the listener's serializer to
3520*7c478bd9Sstevel@tonic-gate 	 *    ensure that any operations on the acceptor are serialized with
3521*7c478bd9Sstevel@tonic-gate 	 *    respect to the close that's in-progress.
3522*7c478bd9Sstevel@tonic-gate 	 */
3523*7c478bd9Sstevel@tonic-gate 	if (cl_ep != NULL) {
3524*7c478bd9Sstevel@tonic-gate 		if (tl_noclose(cl_ep)) {
3525*7c478bd9Sstevel@tonic-gate 			client_noclose_set = B_TRUE;
3526*7c478bd9Sstevel@tonic-gate 		} else {
3527*7c478bd9Sstevel@tonic-gate 			/*
3528*7c478bd9Sstevel@tonic-gate 			 * Client is closing. If it it has sent the
3529*7c478bd9Sstevel@tonic-gate 			 * T_ORDREL_IND, we can simply ignore it - otherwise,
3530*7c478bd9Sstevel@tonic-gate 			 * we have to let let the client continue until it is
3531*7c478bd9Sstevel@tonic-gate 			 * sent.
3532*7c478bd9Sstevel@tonic-gate 			 *
3533*7c478bd9Sstevel@tonic-gate 			 * If we do continue using the client, acceptor will
3534*7c478bd9Sstevel@tonic-gate 			 * switch to client's serializer which is used by client
3535*7c478bd9Sstevel@tonic-gate 			 * for its close.
3536*7c478bd9Sstevel@tonic-gate 			 */
3537*7c478bd9Sstevel@tonic-gate 			tl_client_closing_when_accepting++;
3538*7c478bd9Sstevel@tonic-gate 			switch_client_serializer = B_FALSE;
3539*7c478bd9Sstevel@tonic-gate 			if (!IS_SOCKET(cl_ep) || tl_disable_early_connect ||
3540*7c478bd9Sstevel@tonic-gate 			    cl_ep->te_state == -1)
3541*7c478bd9Sstevel@tonic-gate 				cl_ep = NULL;
3542*7c478bd9Sstevel@tonic-gate 		}
3543*7c478bd9Sstevel@tonic-gate 	}
3544*7c478bd9Sstevel@tonic-gate 
3545*7c478bd9Sstevel@tonic-gate 	if (cl_ep != NULL) {
3546*7c478bd9Sstevel@tonic-gate 		/*
3547*7c478bd9Sstevel@tonic-gate 		 * validate client state to be TS_WCON_CREQ or TS_DATA_XFER
3548*7c478bd9Sstevel@tonic-gate 		 * (latter for sockets only)
3549*7c478bd9Sstevel@tonic-gate 		 */
3550*7c478bd9Sstevel@tonic-gate 		if (cl_ep->te_state != TS_WCON_CREQ &&
3551*7c478bd9Sstevel@tonic-gate 		    (cl_ep->te_state != TS_DATA_XFER &&
3552*7c478bd9Sstevel@tonic-gate 		    IS_SOCKET(cl_ep))) {
3553*7c478bd9Sstevel@tonic-gate 			err = ECONNREFUSED;
3554*7c478bd9Sstevel@tonic-gate 			/*
3555*7c478bd9Sstevel@tonic-gate 			 * T_DISCON_IND sent later after committing memory
3556*7c478bd9Sstevel@tonic-gate 			 * and acking validity of request
3557*7c478bd9Sstevel@tonic-gate 			 */
3558*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 2, SL_TRACE,
3559*7c478bd9Sstevel@tonic-gate 				"tl_conn_res:peer in bad state"));
3560*7c478bd9Sstevel@tonic-gate 		}
3561*7c478bd9Sstevel@tonic-gate 
3562*7c478bd9Sstevel@tonic-gate 		/*
3563*7c478bd9Sstevel@tonic-gate 		 * preallocate now for T_DISCON_IND or T_CONN_CONN
3564*7c478bd9Sstevel@tonic-gate 		 * ack validity of request (T_OK_ACK) after memory committed
3565*7c478bd9Sstevel@tonic-gate 		 */
3566*7c478bd9Sstevel@tonic-gate 
3567*7c478bd9Sstevel@tonic-gate 		if (err)
3568*7c478bd9Sstevel@tonic-gate 			size = sizeof (struct T_discon_ind);
3569*7c478bd9Sstevel@tonic-gate 		else {
3570*7c478bd9Sstevel@tonic-gate 			/*
3571*7c478bd9Sstevel@tonic-gate 			 * calculate length of T_CONN_CON message
3572*7c478bd9Sstevel@tonic-gate 			 */
3573*7c478bd9Sstevel@tonic-gate 			olen = 0;
3574*7c478bd9Sstevel@tonic-gate 			if (cl_ep->te_flag & TL_SETCRED) {
3575*7c478bd9Sstevel@tonic-gate 				olen = (t_scalar_t)sizeof (struct opthdr) +
3576*7c478bd9Sstevel@tonic-gate 					OPTLEN(sizeof (tl_credopt_t));
3577*7c478bd9Sstevel@tonic-gate 			} else if (cl_ep->te_flag & TL_SETUCRED) {
3578*7c478bd9Sstevel@tonic-gate 				olen = (t_scalar_t)sizeof (struct opthdr) +
3579*7c478bd9Sstevel@tonic-gate 					OPTLEN(ucredsize);
3580*7c478bd9Sstevel@tonic-gate 			}
3581*7c478bd9Sstevel@tonic-gate 			size = T_ALIGN(sizeof (struct T_conn_con) +
3582*7c478bd9Sstevel@tonic-gate 					acc_ep->te_alen) + olen;
3583*7c478bd9Sstevel@tonic-gate 		}
3584*7c478bd9Sstevel@tonic-gate 		if ((respmp = reallocb(mp, size, 0)) == NULL) {
3585*7c478bd9Sstevel@tonic-gate 			/*
3586*7c478bd9Sstevel@tonic-gate 			 * roll back state changes
3587*7c478bd9Sstevel@tonic-gate 			 */
3588*7c478bd9Sstevel@tonic-gate 			tep->te_state = TS_WRES_CIND;
3589*7c478bd9Sstevel@tonic-gate 			tl_memrecover(wq, mp, size);
3590*7c478bd9Sstevel@tonic-gate 			freemsg(ackmp);
3591*7c478bd9Sstevel@tonic-gate 			if (client_noclose_set)
3592*7c478bd9Sstevel@tonic-gate 				tl_closeok(cl_ep);
3593*7c478bd9Sstevel@tonic-gate 			tl_closeok(acc_ep);
3594*7c478bd9Sstevel@tonic-gate 			tl_refrele(acc_ep);
3595*7c478bd9Sstevel@tonic-gate 			return;
3596*7c478bd9Sstevel@tonic-gate 		}
3597*7c478bd9Sstevel@tonic-gate 		mp = NULL;
3598*7c478bd9Sstevel@tonic-gate 	}
3599*7c478bd9Sstevel@tonic-gate 
3600*7c478bd9Sstevel@tonic-gate 	/*
3601*7c478bd9Sstevel@tonic-gate 	 * Now ack validity of request
3602*7c478bd9Sstevel@tonic-gate 	 */
3603*7c478bd9Sstevel@tonic-gate 	if (tep->te_nicon == 1) {
3604*7c478bd9Sstevel@tonic-gate 		if (tep == acc_ep)
3605*7c478bd9Sstevel@tonic-gate 			tep->te_state = NEXTSTATE(TE_OK_ACK2, tep->te_state);
3606*7c478bd9Sstevel@tonic-gate 		else
3607*7c478bd9Sstevel@tonic-gate 			tep->te_state = NEXTSTATE(TE_OK_ACK3, tep->te_state);
3608*7c478bd9Sstevel@tonic-gate 	} else
3609*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_OK_ACK4, tep->te_state);
3610*7c478bd9Sstevel@tonic-gate 
3611*7c478bd9Sstevel@tonic-gate 	/*
3612*7c478bd9Sstevel@tonic-gate 	 * send T_DISCON_IND now if client state validation failed earlier
3613*7c478bd9Sstevel@tonic-gate 	 */
3614*7c478bd9Sstevel@tonic-gate 	if (err) {
3615*7c478bd9Sstevel@tonic-gate 		tl_ok_ack(wq, ackmp, prim);
3616*7c478bd9Sstevel@tonic-gate 		/*
3617*7c478bd9Sstevel@tonic-gate 		 * flush the queues - why always ?
3618*7c478bd9Sstevel@tonic-gate 		 */
3619*7c478bd9Sstevel@tonic-gate 		(void) putnextctl1(acc_ep->te_rq, M_FLUSH, FLUSHR);
3620*7c478bd9Sstevel@tonic-gate 
3621*7c478bd9Sstevel@tonic-gate 		dimp = tl_resizemp(respmp, size);
3622*7c478bd9Sstevel@tonic-gate 		if (! dimp) {
3623*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3,
3624*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
3625*7c478bd9Sstevel@tonic-gate 				"tl_conn_res:con_ind:allocb failure"));
3626*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, respmp, ENOMEM);
3627*7c478bd9Sstevel@tonic-gate 			tl_closeok(acc_ep);
3628*7c478bd9Sstevel@tonic-gate 			if (client_noclose_set)
3629*7c478bd9Sstevel@tonic-gate 				tl_closeok(cl_ep);
3630*7c478bd9Sstevel@tonic-gate 			tl_refrele(acc_ep);
3631*7c478bd9Sstevel@tonic-gate 			return;
3632*7c478bd9Sstevel@tonic-gate 		}
3633*7c478bd9Sstevel@tonic-gate 		if (dimp->b_cont) {
3634*7c478bd9Sstevel@tonic-gate 			/* no user data in provider generated discon ind */
3635*7c478bd9Sstevel@tonic-gate 			freemsg(dimp->b_cont);
3636*7c478bd9Sstevel@tonic-gate 			dimp->b_cont = NULL;
3637*7c478bd9Sstevel@tonic-gate 		}
3638*7c478bd9Sstevel@tonic-gate 
3639*7c478bd9Sstevel@tonic-gate 		DB_TYPE(dimp) = M_PROTO;
3640*7c478bd9Sstevel@tonic-gate 		di = (struct T_discon_ind *)dimp->b_rptr;
3641*7c478bd9Sstevel@tonic-gate 		di->PRIM_type  = T_DISCON_IND;
3642*7c478bd9Sstevel@tonic-gate 		di->DISCON_reason = err;
3643*7c478bd9Sstevel@tonic-gate 		di->SEQ_number = BADSEQNUM;
3644*7c478bd9Sstevel@tonic-gate 
3645*7c478bd9Sstevel@tonic-gate 		tep->te_state = TS_IDLE;
3646*7c478bd9Sstevel@tonic-gate 		/*
3647*7c478bd9Sstevel@tonic-gate 		 * send T_DISCON_IND message
3648*7c478bd9Sstevel@tonic-gate 		 */
3649*7c478bd9Sstevel@tonic-gate 		putnext(acc_ep->te_rq, dimp);
3650*7c478bd9Sstevel@tonic-gate 		if (client_noclose_set)
3651*7c478bd9Sstevel@tonic-gate 			tl_closeok(cl_ep);
3652*7c478bd9Sstevel@tonic-gate 		tl_closeok(acc_ep);
3653*7c478bd9Sstevel@tonic-gate 		tl_refrele(acc_ep);
3654*7c478bd9Sstevel@tonic-gate 		return;
3655*7c478bd9Sstevel@tonic-gate 	}
3656*7c478bd9Sstevel@tonic-gate 
3657*7c478bd9Sstevel@tonic-gate 	/*
3658*7c478bd9Sstevel@tonic-gate 	 * now start connecting the accepting endpoint
3659*7c478bd9Sstevel@tonic-gate 	 */
3660*7c478bd9Sstevel@tonic-gate 	if (tep != acc_ep)
3661*7c478bd9Sstevel@tonic-gate 		acc_ep->te_state = NEXTSTATE(TE_PASS_CONN, acc_ep->te_state);
3662*7c478bd9Sstevel@tonic-gate 
3663*7c478bd9Sstevel@tonic-gate 	if (cl_ep == NULL) {
3664*7c478bd9Sstevel@tonic-gate 		/*
3665*7c478bd9Sstevel@tonic-gate 		 * The client has already closed. Send up any queued messages
3666*7c478bd9Sstevel@tonic-gate 		 * and change the state accordingly.
3667*7c478bd9Sstevel@tonic-gate 		 */
3668*7c478bd9Sstevel@tonic-gate 		tl_ok_ack(wq, ackmp, prim);
3669*7c478bd9Sstevel@tonic-gate 		tl_icon_sendmsgs(acc_ep, &tip->ti_mp);
3670*7c478bd9Sstevel@tonic-gate 
3671*7c478bd9Sstevel@tonic-gate 		/*
3672*7c478bd9Sstevel@tonic-gate 		 * remove endpoint from incoming connection
3673*7c478bd9Sstevel@tonic-gate 		 * delete client from list of incoming connections
3674*7c478bd9Sstevel@tonic-gate 		 */
3675*7c478bd9Sstevel@tonic-gate 		tl_freetip(tep, tip);
3676*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3677*7c478bd9Sstevel@tonic-gate 		tl_closeok(acc_ep);
3678*7c478bd9Sstevel@tonic-gate 		tl_refrele(acc_ep);
3679*7c478bd9Sstevel@tonic-gate 		return;
3680*7c478bd9Sstevel@tonic-gate 	} else if (tip->ti_mp != NULL) {
3681*7c478bd9Sstevel@tonic-gate 		/*
3682*7c478bd9Sstevel@tonic-gate 		 * The client could have queued a T_DISCON_IND which needs
3683*7c478bd9Sstevel@tonic-gate 		 * to be sent up.
3684*7c478bd9Sstevel@tonic-gate 		 * Note that t_discon_req can not operate the same as
3685*7c478bd9Sstevel@tonic-gate 		 * t_data_req since it is not possible for it to putbq
3686*7c478bd9Sstevel@tonic-gate 		 * the message and return -1 due to the use of qwriter.
3687*7c478bd9Sstevel@tonic-gate 		 */
3688*7c478bd9Sstevel@tonic-gate 		tl_icon_sendmsgs(acc_ep, &tip->ti_mp);
3689*7c478bd9Sstevel@tonic-gate 	}
3690*7c478bd9Sstevel@tonic-gate 
3691*7c478bd9Sstevel@tonic-gate 	/*
3692*7c478bd9Sstevel@tonic-gate 	 * prepare connect confirm T_CONN_CON message
3693*7c478bd9Sstevel@tonic-gate 	 */
3694*7c478bd9Sstevel@tonic-gate 
3695*7c478bd9Sstevel@tonic-gate 	/*
3696*7c478bd9Sstevel@tonic-gate 	 * allocate the message - original data blocks
3697*7c478bd9Sstevel@tonic-gate 	 * retained in the returned mblk
3698*7c478bd9Sstevel@tonic-gate 	 */
3699*7c478bd9Sstevel@tonic-gate 	if (! IS_SOCKET(cl_ep) || tl_disable_early_connect) {
3700*7c478bd9Sstevel@tonic-gate 		ccmp = tl_resizemp(respmp, size);
3701*7c478bd9Sstevel@tonic-gate 		if (ccmp == NULL) {
3702*7c478bd9Sstevel@tonic-gate 			tl_ok_ack(wq, ackmp, prim);
3703*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3,
3704*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
3705*7c478bd9Sstevel@tonic-gate 				    "tl_conn_res:conn_con:allocb failure"));
3706*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, respmp, ENOMEM);
3707*7c478bd9Sstevel@tonic-gate 			tl_closeok(acc_ep);
3708*7c478bd9Sstevel@tonic-gate 			if (client_noclose_set)
3709*7c478bd9Sstevel@tonic-gate 				tl_closeok(cl_ep);
3710*7c478bd9Sstevel@tonic-gate 			tl_refrele(acc_ep);
3711*7c478bd9Sstevel@tonic-gate 			return;
3712*7c478bd9Sstevel@tonic-gate 		}
3713*7c478bd9Sstevel@tonic-gate 
3714*7c478bd9Sstevel@tonic-gate 		DB_TYPE(ccmp) = M_PROTO;
3715*7c478bd9Sstevel@tonic-gate 		cc = (struct T_conn_con *)ccmp->b_rptr;
3716*7c478bd9Sstevel@tonic-gate 		cc->PRIM_type  = T_CONN_CON;
3717*7c478bd9Sstevel@tonic-gate 		cc->RES_offset = (t_scalar_t)sizeof (struct T_conn_con);
3718*7c478bd9Sstevel@tonic-gate 		cc->RES_length = acc_ep->te_alen;
3719*7c478bd9Sstevel@tonic-gate 		addr_startp = ccmp->b_rptr + cc->RES_offset;
3720*7c478bd9Sstevel@tonic-gate 		bcopy(acc_ep->te_abuf, addr_startp, acc_ep->te_alen);
3721*7c478bd9Sstevel@tonic-gate 		if (cl_ep->te_flag & (TL_SETCRED|TL_SETUCRED)) {
3722*7c478bd9Sstevel@tonic-gate 			cc->OPT_offset = (t_scalar_t)T_ALIGN(cc->RES_offset +
3723*7c478bd9Sstevel@tonic-gate 			    cc->RES_length);
3724*7c478bd9Sstevel@tonic-gate 			cc->OPT_length = olen;
3725*7c478bd9Sstevel@tonic-gate 			tl_fill_option(ccmp->b_rptr + cc->OPT_offset,
3726*7c478bd9Sstevel@tonic-gate 			    acc_ep->te_credp, acc_ep->te_cpid, cl_ep->te_flag);
3727*7c478bd9Sstevel@tonic-gate 		} else {
3728*7c478bd9Sstevel@tonic-gate 			cc->OPT_offset = 0;
3729*7c478bd9Sstevel@tonic-gate 			cc->OPT_length = 0;
3730*7c478bd9Sstevel@tonic-gate 		}
3731*7c478bd9Sstevel@tonic-gate 		/*
3732*7c478bd9Sstevel@tonic-gate 		 * Forward the credential in the packet so it can be picked up
3733*7c478bd9Sstevel@tonic-gate 		 * at the higher layers for more complete credential processing
3734*7c478bd9Sstevel@tonic-gate 		 */
3735*7c478bd9Sstevel@tonic-gate 		mblk_setcred(ccmp, acc_ep->te_credp);
3736*7c478bd9Sstevel@tonic-gate 		DB_CPID(ccmp) = acc_ep->te_cpid;
3737*7c478bd9Sstevel@tonic-gate 	} else {
3738*7c478bd9Sstevel@tonic-gate 		freemsg(respmp);
3739*7c478bd9Sstevel@tonic-gate 		respmp = NULL;
3740*7c478bd9Sstevel@tonic-gate 	}
3741*7c478bd9Sstevel@tonic-gate 
3742*7c478bd9Sstevel@tonic-gate 	/*
3743*7c478bd9Sstevel@tonic-gate 	 * make connection linking
3744*7c478bd9Sstevel@tonic-gate 	 * accepting and client endpoints
3745*7c478bd9Sstevel@tonic-gate 	 * No need to increment references:
3746*7c478bd9Sstevel@tonic-gate 	 *	on client: it should already have one from tip->ti_tep linkage.
3747*7c478bd9Sstevel@tonic-gate 	 *	on acceptor is should already have one from the table lookup.
3748*7c478bd9Sstevel@tonic-gate 	 *
3749*7c478bd9Sstevel@tonic-gate 	 * At this point both client and acceptor can't close. Set client
3750*7c478bd9Sstevel@tonic-gate 	 * serializer to acceptor's.
3751*7c478bd9Sstevel@tonic-gate 	 */
3752*7c478bd9Sstevel@tonic-gate 	ASSERT(cl_ep->te_refcnt >= 2);
3753*7c478bd9Sstevel@tonic-gate 	ASSERT(acc_ep->te_refcnt >= 2);
3754*7c478bd9Sstevel@tonic-gate 	ASSERT(cl_ep->te_conp == NULL);
3755*7c478bd9Sstevel@tonic-gate 	ASSERT(acc_ep->te_conp == NULL);
3756*7c478bd9Sstevel@tonic-gate 	cl_ep->te_conp = acc_ep;
3757*7c478bd9Sstevel@tonic-gate 	acc_ep->te_conp = cl_ep;
3758*7c478bd9Sstevel@tonic-gate 	ASSERT(cl_ep->te_ser == tep->te_ser);
3759*7c478bd9Sstevel@tonic-gate 	if (switch_client_serializer) {
3760*7c478bd9Sstevel@tonic-gate 		mutex_enter(&cl_ep->te_ser_lock);
3761*7c478bd9Sstevel@tonic-gate 		if (cl_ep->te_ser_count > 0) {
3762*7c478bd9Sstevel@tonic-gate 			switch_client_serializer = B_FALSE;
3763*7c478bd9Sstevel@tonic-gate 			tl_serializer_noswitch++;
3764*7c478bd9Sstevel@tonic-gate 		} else {
3765*7c478bd9Sstevel@tonic-gate 			/*
3766*7c478bd9Sstevel@tonic-gate 			 * Move client to the acceptor's serializer.
3767*7c478bd9Sstevel@tonic-gate 			 */
3768*7c478bd9Sstevel@tonic-gate 			tl_serializer_refhold(acc_ep->te_ser);
3769*7c478bd9Sstevel@tonic-gate 			tl_serializer_refrele(cl_ep->te_ser);
3770*7c478bd9Sstevel@tonic-gate 			cl_ep->te_ser = acc_ep->te_ser;
3771*7c478bd9Sstevel@tonic-gate 		}
3772*7c478bd9Sstevel@tonic-gate 		mutex_exit(&cl_ep->te_ser_lock);
3773*7c478bd9Sstevel@tonic-gate 	}
3774*7c478bd9Sstevel@tonic-gate 	if (!switch_client_serializer) {
3775*7c478bd9Sstevel@tonic-gate 		/*
3776*7c478bd9Sstevel@tonic-gate 		 * It is not possible to switch client to use acceptor's.
3777*7c478bd9Sstevel@tonic-gate 		 * Move acceptor to client's serializer (which is the same as
3778*7c478bd9Sstevel@tonic-gate 		 * listener's).
3779*7c478bd9Sstevel@tonic-gate 		 */
3780*7c478bd9Sstevel@tonic-gate 		tl_serializer_refhold(cl_ep->te_ser);
3781*7c478bd9Sstevel@tonic-gate 		tl_serializer_refrele(acc_ep->te_ser);
3782*7c478bd9Sstevel@tonic-gate 		acc_ep->te_ser = cl_ep->te_ser;
3783*7c478bd9Sstevel@tonic-gate 	}
3784*7c478bd9Sstevel@tonic-gate 
3785*7c478bd9Sstevel@tonic-gate 	TL_REMOVE_PEER(cl_ep->te_oconp);
3786*7c478bd9Sstevel@tonic-gate 	TL_REMOVE_PEER(acc_ep->te_oconp);
3787*7c478bd9Sstevel@tonic-gate 
3788*7c478bd9Sstevel@tonic-gate 	/*
3789*7c478bd9Sstevel@tonic-gate 	 * remove endpoint from incoming connection
3790*7c478bd9Sstevel@tonic-gate 	 * delete client from list of incoming connections
3791*7c478bd9Sstevel@tonic-gate 	 */
3792*7c478bd9Sstevel@tonic-gate 	tip->ti_tep = NULL;
3793*7c478bd9Sstevel@tonic-gate 	tl_freetip(tep, tip);
3794*7c478bd9Sstevel@tonic-gate 	tl_ok_ack(wq, ackmp, prim);
3795*7c478bd9Sstevel@tonic-gate 
3796*7c478bd9Sstevel@tonic-gate 	/*
3797*7c478bd9Sstevel@tonic-gate 	 * data blocks already linked in reallocb()
3798*7c478bd9Sstevel@tonic-gate 	 */
3799*7c478bd9Sstevel@tonic-gate 
3800*7c478bd9Sstevel@tonic-gate 	/*
3801*7c478bd9Sstevel@tonic-gate 	 * link queues so that I_SENDFD will work
3802*7c478bd9Sstevel@tonic-gate 	 */
3803*7c478bd9Sstevel@tonic-gate 	if (! IS_SOCKET(tep)) {
3804*7c478bd9Sstevel@tonic-gate 		acc_ep->te_wq->q_next = cl_ep->te_rq;
3805*7c478bd9Sstevel@tonic-gate 		cl_ep->te_wq->q_next = acc_ep->te_rq;
3806*7c478bd9Sstevel@tonic-gate 	}
3807*7c478bd9Sstevel@tonic-gate 
3808*7c478bd9Sstevel@tonic-gate 	/*
3809*7c478bd9Sstevel@tonic-gate 	 * send T_CONN_CON up on client side unless it was already
3810*7c478bd9Sstevel@tonic-gate 	 * done (for a socket). In cases any data or ordrel req has been
3811*7c478bd9Sstevel@tonic-gate 	 * queued make sure that the service procedure runs.
3812*7c478bd9Sstevel@tonic-gate 	 */
3813*7c478bd9Sstevel@tonic-gate 	if (IS_SOCKET(cl_ep) && !tl_disable_early_connect) {
3814*7c478bd9Sstevel@tonic-gate 		enableok(cl_ep->te_wq);
3815*7c478bd9Sstevel@tonic-gate 		TL_QENABLE(cl_ep);
3816*7c478bd9Sstevel@tonic-gate 		if (ccmp != NULL)
3817*7c478bd9Sstevel@tonic-gate 			freemsg(ccmp);
3818*7c478bd9Sstevel@tonic-gate 	} else {
3819*7c478bd9Sstevel@tonic-gate 		/*
3820*7c478bd9Sstevel@tonic-gate 		 * change client state on TE_CONN_CON event
3821*7c478bd9Sstevel@tonic-gate 		 */
3822*7c478bd9Sstevel@tonic-gate 		cl_ep->te_state = NEXTSTATE(TE_CONN_CON, cl_ep->te_state);
3823*7c478bd9Sstevel@tonic-gate 		putnext(cl_ep->te_rq, ccmp);
3824*7c478bd9Sstevel@tonic-gate 	}
3825*7c478bd9Sstevel@tonic-gate 
3826*7c478bd9Sstevel@tonic-gate 	/* Mark the both endpoints as accepted */
3827*7c478bd9Sstevel@tonic-gate 	cl_ep->te_flag |= TL_ACCEPTED;
3828*7c478bd9Sstevel@tonic-gate 	acc_ep->te_flag |= TL_ACCEPTED;
3829*7c478bd9Sstevel@tonic-gate 
3830*7c478bd9Sstevel@tonic-gate 	/*
3831*7c478bd9Sstevel@tonic-gate 	 * Allow client and acceptor to close.
3832*7c478bd9Sstevel@tonic-gate 	 */
3833*7c478bd9Sstevel@tonic-gate 	tl_closeok(acc_ep);
3834*7c478bd9Sstevel@tonic-gate 	if (client_noclose_set)
3835*7c478bd9Sstevel@tonic-gate 		tl_closeok(cl_ep);
3836*7c478bd9Sstevel@tonic-gate }
3837*7c478bd9Sstevel@tonic-gate 
3838*7c478bd9Sstevel@tonic-gate 
3839*7c478bd9Sstevel@tonic-gate 
3840*7c478bd9Sstevel@tonic-gate 
3841*7c478bd9Sstevel@tonic-gate static void
3842*7c478bd9Sstevel@tonic-gate tl_discon_req(mblk_t *mp, tl_endpt_t *tep)
3843*7c478bd9Sstevel@tonic-gate {
3844*7c478bd9Sstevel@tonic-gate 	queue_t			*wq;
3845*7c478bd9Sstevel@tonic-gate 	struct T_discon_req	*dr;
3846*7c478bd9Sstevel@tonic-gate 	ssize_t			msz;
3847*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*peer_tep = tep->te_conp;
3848*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*srv_tep = tep->te_oconp;
3849*7c478bd9Sstevel@tonic-gate 	tl_icon_t		*tip;
3850*7c478bd9Sstevel@tonic-gate 	size_t			size;
3851*7c478bd9Sstevel@tonic-gate 	mblk_t			*ackmp, *dimp, *respmp;
3852*7c478bd9Sstevel@tonic-gate 	struct T_discon_ind	*di;
3853*7c478bd9Sstevel@tonic-gate 	t_scalar_t		save_state, new_state;
3854*7c478bd9Sstevel@tonic-gate 
3855*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
3856*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3857*7c478bd9Sstevel@tonic-gate 		return;
3858*7c478bd9Sstevel@tonic-gate 	}
3859*7c478bd9Sstevel@tonic-gate 
3860*7c478bd9Sstevel@tonic-gate 	if ((peer_tep != NULL) && peer_tep->te_closing) {
3861*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_conp);
3862*7c478bd9Sstevel@tonic-gate 		peer_tep = NULL;
3863*7c478bd9Sstevel@tonic-gate 	}
3864*7c478bd9Sstevel@tonic-gate 	if ((srv_tep != NULL) && srv_tep->te_closing) {
3865*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_oconp);
3866*7c478bd9Sstevel@tonic-gate 		srv_tep = NULL;
3867*7c478bd9Sstevel@tonic-gate 	}
3868*7c478bd9Sstevel@tonic-gate 
3869*7c478bd9Sstevel@tonic-gate 	wq = tep->te_wq;
3870*7c478bd9Sstevel@tonic-gate 
3871*7c478bd9Sstevel@tonic-gate 	/*
3872*7c478bd9Sstevel@tonic-gate 	 * preallocate memory for:
3873*7c478bd9Sstevel@tonic-gate 	 * 1. max of T_ERROR_ACK and T_OK_ACK
3874*7c478bd9Sstevel@tonic-gate 	 *	==> known max T_ERROR_ACK
3875*7c478bd9Sstevel@tonic-gate 	 * 2. for  T_DISCON_IND
3876*7c478bd9Sstevel@tonic-gate 	 */
3877*7c478bd9Sstevel@tonic-gate 	ackmp = allocb(sizeof (struct T_error_ack), BPRI_MED);
3878*7c478bd9Sstevel@tonic-gate 	if (! ackmp) {
3879*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, mp, sizeof (struct T_error_ack));
3880*7c478bd9Sstevel@tonic-gate 		return;
3881*7c478bd9Sstevel@tonic-gate 	}
3882*7c478bd9Sstevel@tonic-gate 	/*
3883*7c478bd9Sstevel@tonic-gate 	 * memory committed for T_OK_ACK/T_ERROR_ACK now
3884*7c478bd9Sstevel@tonic-gate 	 * will be committed for T_DISCON_IND  later
3885*7c478bd9Sstevel@tonic-gate 	 */
3886*7c478bd9Sstevel@tonic-gate 
3887*7c478bd9Sstevel@tonic-gate 	dr = (struct T_discon_req *)mp->b_rptr;
3888*7c478bd9Sstevel@tonic-gate 	msz = MBLKL(mp);
3889*7c478bd9Sstevel@tonic-gate 
3890*7c478bd9Sstevel@tonic-gate 	/*
3891*7c478bd9Sstevel@tonic-gate 	 * validate the state
3892*7c478bd9Sstevel@tonic-gate 	 */
3893*7c478bd9Sstevel@tonic-gate 	save_state = new_state = tep->te_state;
3894*7c478bd9Sstevel@tonic-gate 	if (! (save_state >= TS_WCON_CREQ && save_state <= TS_WRES_CIND) &&
3895*7c478bd9Sstevel@tonic-gate 	    ! (save_state >= TS_DATA_XFER && save_state <= TS_WREQ_ORDREL)) {
3896*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
3897*7c478bd9Sstevel@tonic-gate 			SL_TRACE|SL_ERROR,
3898*7c478bd9Sstevel@tonic-gate 			"tl_wput:T_DISCON_REQ:out of state, state=%d",
3899*7c478bd9Sstevel@tonic-gate 			tep->te_state));
3900*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TOUTSTATE, 0, T_DISCON_REQ);
3901*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3902*7c478bd9Sstevel@tonic-gate 		return;
3903*7c478bd9Sstevel@tonic-gate 	}
3904*7c478bd9Sstevel@tonic-gate 	/*
3905*7c478bd9Sstevel@tonic-gate 	 * Defer committing the state change until it is determined if
3906*7c478bd9Sstevel@tonic-gate 	 * the message will be queued with the tl_icon or not.
3907*7c478bd9Sstevel@tonic-gate 	 */
3908*7c478bd9Sstevel@tonic-gate 	new_state  = NEXTSTATE(TE_DISCON_REQ, tep->te_state);
3909*7c478bd9Sstevel@tonic-gate 
3910*7c478bd9Sstevel@tonic-gate 	/* validate the message */
3911*7c478bd9Sstevel@tonic-gate 	if (msz < sizeof (struct T_discon_req)) {
3912*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
3913*7c478bd9Sstevel@tonic-gate 			"tl_discon_req:invalid message"));
3914*7c478bd9Sstevel@tonic-gate 		tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
3915*7c478bd9Sstevel@tonic-gate 		tl_error_ack(wq, ackmp, TSYSERR, EINVAL, T_DISCON_REQ);
3916*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
3917*7c478bd9Sstevel@tonic-gate 		return;
3918*7c478bd9Sstevel@tonic-gate 	}
3919*7c478bd9Sstevel@tonic-gate 
3920*7c478bd9Sstevel@tonic-gate 	/*
3921*7c478bd9Sstevel@tonic-gate 	 * if server, then validate that client exists
3922*7c478bd9Sstevel@tonic-gate 	 * by connection sequence number etc.
3923*7c478bd9Sstevel@tonic-gate 	 */
3924*7c478bd9Sstevel@tonic-gate 	if (tep->te_nicon > 0) { /* server */
3925*7c478bd9Sstevel@tonic-gate 
3926*7c478bd9Sstevel@tonic-gate 		/*
3927*7c478bd9Sstevel@tonic-gate 		 * search server list for disconnect client
3928*7c478bd9Sstevel@tonic-gate 		 */
3929*7c478bd9Sstevel@tonic-gate 		tip = tl_icon_find(tep, dr->SEQ_number);
3930*7c478bd9Sstevel@tonic-gate 		if (tip == NULL) {
3931*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 2,
3932*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
3933*7c478bd9Sstevel@tonic-gate 				"tl_discon_req:no disconnect endpoint"));
3934*7c478bd9Sstevel@tonic-gate 			tep->te_state = NEXTSTATE(TE_ERROR_ACK, new_state);
3935*7c478bd9Sstevel@tonic-gate 			tl_error_ack(wq, ackmp, TBADSEQ, 0, T_DISCON_REQ);
3936*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
3937*7c478bd9Sstevel@tonic-gate 			return;
3938*7c478bd9Sstevel@tonic-gate 		}
3939*7c478bd9Sstevel@tonic-gate 		/*
3940*7c478bd9Sstevel@tonic-gate 		 * If ti_tep is NULL the client has already closed. In this case
3941*7c478bd9Sstevel@tonic-gate 		 * the code below will avoid any action on the client side.
3942*7c478bd9Sstevel@tonic-gate 		 */
3943*7c478bd9Sstevel@tonic-gate 
3944*7c478bd9Sstevel@tonic-gate 		ASSERT(IMPLY(tip->ti_tep != NULL,
3945*7c478bd9Sstevel@tonic-gate 			tip->ti_tep->te_seqno == dr->SEQ_number));
3946*7c478bd9Sstevel@tonic-gate 		peer_tep = tip->ti_tep;
3947*7c478bd9Sstevel@tonic-gate 	}
3948*7c478bd9Sstevel@tonic-gate 
3949*7c478bd9Sstevel@tonic-gate 	/*
3950*7c478bd9Sstevel@tonic-gate 	 * preallocate now for T_DISCON_IND
3951*7c478bd9Sstevel@tonic-gate 	 * ack validity of request (T_OK_ACK) after memory committed
3952*7c478bd9Sstevel@tonic-gate 	 */
3953*7c478bd9Sstevel@tonic-gate 	size = sizeof (struct T_discon_ind);
3954*7c478bd9Sstevel@tonic-gate 	if ((respmp = reallocb(mp, size, 0)) == NULL) {
3955*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, mp, size);
3956*7c478bd9Sstevel@tonic-gate 		freemsg(ackmp);
3957*7c478bd9Sstevel@tonic-gate 		return;
3958*7c478bd9Sstevel@tonic-gate 	}
3959*7c478bd9Sstevel@tonic-gate 
3960*7c478bd9Sstevel@tonic-gate 	/*
3961*7c478bd9Sstevel@tonic-gate 	 * prepare message to ack validity of request
3962*7c478bd9Sstevel@tonic-gate 	 */
3963*7c478bd9Sstevel@tonic-gate 	if (tep->te_nicon == 0)
3964*7c478bd9Sstevel@tonic-gate 		new_state = NEXTSTATE(TE_OK_ACK1, new_state);
3965*7c478bd9Sstevel@tonic-gate 	else
3966*7c478bd9Sstevel@tonic-gate 		if (tep->te_nicon == 1)
3967*7c478bd9Sstevel@tonic-gate 			new_state = NEXTSTATE(TE_OK_ACK2, new_state);
3968*7c478bd9Sstevel@tonic-gate 		else
3969*7c478bd9Sstevel@tonic-gate 			new_state = NEXTSTATE(TE_OK_ACK4, new_state);
3970*7c478bd9Sstevel@tonic-gate 
3971*7c478bd9Sstevel@tonic-gate 	/*
3972*7c478bd9Sstevel@tonic-gate 	 * Flushing queues according to TPI. Using the old state.
3973*7c478bd9Sstevel@tonic-gate 	 */
3974*7c478bd9Sstevel@tonic-gate 	if ((tep->te_nicon <= 1) &&
3975*7c478bd9Sstevel@tonic-gate 	    ((save_state == TS_DATA_XFER) ||
3976*7c478bd9Sstevel@tonic-gate 	    (save_state == TS_WIND_ORDREL) ||
3977*7c478bd9Sstevel@tonic-gate 	    (save_state == TS_WREQ_ORDREL)))
3978*7c478bd9Sstevel@tonic-gate 		(void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
3979*7c478bd9Sstevel@tonic-gate 
3980*7c478bd9Sstevel@tonic-gate 	/* send T_OK_ACK up  */
3981*7c478bd9Sstevel@tonic-gate 	tl_ok_ack(wq, ackmp, T_DISCON_REQ);
3982*7c478bd9Sstevel@tonic-gate 
3983*7c478bd9Sstevel@tonic-gate 	/*
3984*7c478bd9Sstevel@tonic-gate 	 * now do disconnect business
3985*7c478bd9Sstevel@tonic-gate 	 */
3986*7c478bd9Sstevel@tonic-gate 	if (tep->te_nicon > 0) { /* listener */
3987*7c478bd9Sstevel@tonic-gate 		if (peer_tep != NULL && !peer_tep->te_closing) {
3988*7c478bd9Sstevel@tonic-gate 			/*
3989*7c478bd9Sstevel@tonic-gate 			 * disconnect incoming connect request pending to tep
3990*7c478bd9Sstevel@tonic-gate 			 */
3991*7c478bd9Sstevel@tonic-gate 			if ((dimp = tl_resizemp(respmp, size)) == NULL) {
3992*7c478bd9Sstevel@tonic-gate 				(void) (STRLOG(TL_ID, tep->te_minor, 2,
3993*7c478bd9Sstevel@tonic-gate 					SL_TRACE|SL_ERROR,
3994*7c478bd9Sstevel@tonic-gate 					"tl_discon_req: reallocb failed"));
3995*7c478bd9Sstevel@tonic-gate 				tep->te_state = new_state;
3996*7c478bd9Sstevel@tonic-gate 				tl_merror(wq, respmp, ENOMEM);
3997*7c478bd9Sstevel@tonic-gate 				return;
3998*7c478bd9Sstevel@tonic-gate 			}
3999*7c478bd9Sstevel@tonic-gate 			di = (struct T_discon_ind *)dimp->b_rptr;
4000*7c478bd9Sstevel@tonic-gate 			di->SEQ_number = BADSEQNUM;
4001*7c478bd9Sstevel@tonic-gate 			save_state = peer_tep->te_state;
4002*7c478bd9Sstevel@tonic-gate 			peer_tep->te_state = TS_IDLE;
4003*7c478bd9Sstevel@tonic-gate 
4004*7c478bd9Sstevel@tonic-gate 			TL_REMOVE_PEER(peer_tep->te_oconp);
4005*7c478bd9Sstevel@tonic-gate 			enableok(peer_tep->te_wq);
4006*7c478bd9Sstevel@tonic-gate 			TL_QENABLE(peer_tep);
4007*7c478bd9Sstevel@tonic-gate 		} else {
4008*7c478bd9Sstevel@tonic-gate 			freemsg(respmp);
4009*7c478bd9Sstevel@tonic-gate 			dimp = NULL;
4010*7c478bd9Sstevel@tonic-gate 		}
4011*7c478bd9Sstevel@tonic-gate 
4012*7c478bd9Sstevel@tonic-gate 		/*
4013*7c478bd9Sstevel@tonic-gate 		 * remove endpoint from incoming connection list
4014*7c478bd9Sstevel@tonic-gate 		 * - remove disconnect client from list on server
4015*7c478bd9Sstevel@tonic-gate 		 */
4016*7c478bd9Sstevel@tonic-gate 		tl_freetip(tep, tip);
4017*7c478bd9Sstevel@tonic-gate 	} else if ((peer_tep = tep->te_oconp) != NULL) { /* client */
4018*7c478bd9Sstevel@tonic-gate 		/*
4019*7c478bd9Sstevel@tonic-gate 		 * disconnect an outgoing request pending from tep
4020*7c478bd9Sstevel@tonic-gate 		 */
4021*7c478bd9Sstevel@tonic-gate 
4022*7c478bd9Sstevel@tonic-gate 		if ((dimp = tl_resizemp(respmp, size)) == NULL) {
4023*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 2,
4024*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
4025*7c478bd9Sstevel@tonic-gate 				"tl_discon_req: reallocb failed"));
4026*7c478bd9Sstevel@tonic-gate 			tep->te_state = new_state;
4027*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, respmp, ENOMEM);
4028*7c478bd9Sstevel@tonic-gate 			return;
4029*7c478bd9Sstevel@tonic-gate 		}
4030*7c478bd9Sstevel@tonic-gate 		di = (struct T_discon_ind *)dimp->b_rptr;
4031*7c478bd9Sstevel@tonic-gate 		DB_TYPE(dimp) = M_PROTO;
4032*7c478bd9Sstevel@tonic-gate 		di->PRIM_type  = T_DISCON_IND;
4033*7c478bd9Sstevel@tonic-gate 		di->DISCON_reason = ECONNRESET;
4034*7c478bd9Sstevel@tonic-gate 		di->SEQ_number = tep->te_seqno;
4035*7c478bd9Sstevel@tonic-gate 
4036*7c478bd9Sstevel@tonic-gate 		/*
4037*7c478bd9Sstevel@tonic-gate 		 * If this is a socket the T_DISCON_IND is queued with
4038*7c478bd9Sstevel@tonic-gate 		 * the T_CONN_IND. Otherwise the T_CONN_IND is removed
4039*7c478bd9Sstevel@tonic-gate 		 * from the list of pending connections.
4040*7c478bd9Sstevel@tonic-gate 		 * Note that when te_oconp is set the peer better have
4041*7c478bd9Sstevel@tonic-gate 		 * a t_connind_t for the client.
4042*7c478bd9Sstevel@tonic-gate 		 */
4043*7c478bd9Sstevel@tonic-gate 		if (IS_SOCKET(tep) && !tl_disable_early_connect) {
4044*7c478bd9Sstevel@tonic-gate 			/*
4045*7c478bd9Sstevel@tonic-gate 			 * No need to check that
4046*7c478bd9Sstevel@tonic-gate 			 * ti_tep == NULL since the T_DISCON_IND
4047*7c478bd9Sstevel@tonic-gate 			 * takes precedence over other queued
4048*7c478bd9Sstevel@tonic-gate 			 * messages.
4049*7c478bd9Sstevel@tonic-gate 			 */
4050*7c478bd9Sstevel@tonic-gate 			tl_icon_queuemsg(peer_tep, tep->te_seqno, dimp);
4051*7c478bd9Sstevel@tonic-gate 			peer_tep = NULL;
4052*7c478bd9Sstevel@tonic-gate 			dimp = NULL;
4053*7c478bd9Sstevel@tonic-gate 			/*
4054*7c478bd9Sstevel@tonic-gate 			 * Can't clear te_oconp since tl_co_unconnect needs
4055*7c478bd9Sstevel@tonic-gate 			 * it as a hint not to free the tep.
4056*7c478bd9Sstevel@tonic-gate 			 * Keep the state unchanged since tl_conn_res inspects
4057*7c478bd9Sstevel@tonic-gate 			 * it.
4058*7c478bd9Sstevel@tonic-gate 			 */
4059*7c478bd9Sstevel@tonic-gate 			new_state = tep->te_state;
4060*7c478bd9Sstevel@tonic-gate 		} else {
4061*7c478bd9Sstevel@tonic-gate 			/* Found - delete it */
4062*7c478bd9Sstevel@tonic-gate 			tip = tl_icon_find(peer_tep, tep->te_seqno);
4063*7c478bd9Sstevel@tonic-gate 			if (tip != NULL) {
4064*7c478bd9Sstevel@tonic-gate 				ASSERT(tep == tip->ti_tep);
4065*7c478bd9Sstevel@tonic-gate 				save_state = peer_tep->te_state;
4066*7c478bd9Sstevel@tonic-gate 				if (peer_tep->te_nicon == 1)
4067*7c478bd9Sstevel@tonic-gate 					peer_tep->te_state =
4068*7c478bd9Sstevel@tonic-gate 					    NEXTSTATE(TE_DISCON_IND2,
4069*7c478bd9Sstevel@tonic-gate 						peer_tep->te_state);
4070*7c478bd9Sstevel@tonic-gate 				else
4071*7c478bd9Sstevel@tonic-gate 					peer_tep->te_state =
4072*7c478bd9Sstevel@tonic-gate 					    NEXTSTATE(TE_DISCON_IND3,
4073*7c478bd9Sstevel@tonic-gate 						peer_tep->te_state);
4074*7c478bd9Sstevel@tonic-gate 				tl_freetip(peer_tep, tip);
4075*7c478bd9Sstevel@tonic-gate 			}
4076*7c478bd9Sstevel@tonic-gate 			ASSERT(tep->te_oconp != NULL);
4077*7c478bd9Sstevel@tonic-gate 			TL_UNCONNECT(tep->te_oconp);
4078*7c478bd9Sstevel@tonic-gate 		}
4079*7c478bd9Sstevel@tonic-gate 	} else if ((peer_tep = tep->te_conp) != NULL) { /* connected! */
4080*7c478bd9Sstevel@tonic-gate 		if ((dimp = tl_resizemp(respmp, size)) == NULL) {
4081*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 2,
4082*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
4083*7c478bd9Sstevel@tonic-gate 				"tl_discon_req: reallocb failed"));
4084*7c478bd9Sstevel@tonic-gate 			tep->te_state = new_state;
4085*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, respmp, ENOMEM);
4086*7c478bd9Sstevel@tonic-gate 			return;
4087*7c478bd9Sstevel@tonic-gate 		}
4088*7c478bd9Sstevel@tonic-gate 		di = (struct T_discon_ind *)dimp->b_rptr;
4089*7c478bd9Sstevel@tonic-gate 		di->SEQ_number = BADSEQNUM;
4090*7c478bd9Sstevel@tonic-gate 
4091*7c478bd9Sstevel@tonic-gate 		save_state = peer_tep->te_state;
4092*7c478bd9Sstevel@tonic-gate 		peer_tep->te_state = TS_IDLE;
4093*7c478bd9Sstevel@tonic-gate 	} else {
4094*7c478bd9Sstevel@tonic-gate 		/* Not connected */
4095*7c478bd9Sstevel@tonic-gate 		tep->te_state = new_state;
4096*7c478bd9Sstevel@tonic-gate 		freemsg(respmp);
4097*7c478bd9Sstevel@tonic-gate 		return;
4098*7c478bd9Sstevel@tonic-gate 	}
4099*7c478bd9Sstevel@tonic-gate 
4100*7c478bd9Sstevel@tonic-gate 	/* Commit state changes */
4101*7c478bd9Sstevel@tonic-gate 	tep->te_state = new_state;
4102*7c478bd9Sstevel@tonic-gate 
4103*7c478bd9Sstevel@tonic-gate 	if (peer_tep == NULL) {
4104*7c478bd9Sstevel@tonic-gate 		ASSERT(dimp == NULL);
4105*7c478bd9Sstevel@tonic-gate 		goto done;
4106*7c478bd9Sstevel@tonic-gate 	}
4107*7c478bd9Sstevel@tonic-gate 	/*
4108*7c478bd9Sstevel@tonic-gate 	 * Flush queues on peer before sending up
4109*7c478bd9Sstevel@tonic-gate 	 * T_DISCON_IND according to TPI
4110*7c478bd9Sstevel@tonic-gate 	 */
4111*7c478bd9Sstevel@tonic-gate 
4112*7c478bd9Sstevel@tonic-gate 	if ((save_state == TS_DATA_XFER) ||
4113*7c478bd9Sstevel@tonic-gate 	    (save_state == TS_WIND_ORDREL) ||
4114*7c478bd9Sstevel@tonic-gate 	    (save_state == TS_WREQ_ORDREL))
4115*7c478bd9Sstevel@tonic-gate 		(void) putnextctl1(peer_tep->te_rq, M_FLUSH, FLUSHRW);
4116*7c478bd9Sstevel@tonic-gate 
4117*7c478bd9Sstevel@tonic-gate 	DB_TYPE(dimp) = M_PROTO;
4118*7c478bd9Sstevel@tonic-gate 	di->PRIM_type  = T_DISCON_IND;
4119*7c478bd9Sstevel@tonic-gate 	di->DISCON_reason = ECONNRESET;
4120*7c478bd9Sstevel@tonic-gate 
4121*7c478bd9Sstevel@tonic-gate 	/*
4122*7c478bd9Sstevel@tonic-gate 	 * data blocks already linked into dimp by reallocb()
4123*7c478bd9Sstevel@tonic-gate 	 */
4124*7c478bd9Sstevel@tonic-gate 	/*
4125*7c478bd9Sstevel@tonic-gate 	 * send indication message to peer user module
4126*7c478bd9Sstevel@tonic-gate 	 */
4127*7c478bd9Sstevel@tonic-gate 	ASSERT(dimp != NULL);
4128*7c478bd9Sstevel@tonic-gate 	putnext(peer_tep->te_rq, dimp);
4129*7c478bd9Sstevel@tonic-gate done:
4130*7c478bd9Sstevel@tonic-gate 	if (tep->te_conp) {	/* disconnect pointers if connected */
4131*7c478bd9Sstevel@tonic-gate 		ASSERT(! peer_tep->te_closing);
4132*7c478bd9Sstevel@tonic-gate 
4133*7c478bd9Sstevel@tonic-gate 		/*
4134*7c478bd9Sstevel@tonic-gate 		 * Messages may be queued on peer's write queue
4135*7c478bd9Sstevel@tonic-gate 		 * waiting to be processed by its write service
4136*7c478bd9Sstevel@tonic-gate 		 * procedure. Before the pointer to the peer transport
4137*7c478bd9Sstevel@tonic-gate 		 * structure is set to NULL, qenable the peer's write
4138*7c478bd9Sstevel@tonic-gate 		 * queue so that the queued up messages are processed.
4139*7c478bd9Sstevel@tonic-gate 		 */
4140*7c478bd9Sstevel@tonic-gate 		if ((save_state == TS_DATA_XFER) ||
4141*7c478bd9Sstevel@tonic-gate 		    (save_state == TS_WIND_ORDREL) ||
4142*7c478bd9Sstevel@tonic-gate 		    (save_state == TS_WREQ_ORDREL))
4143*7c478bd9Sstevel@tonic-gate 			TL_QENABLE(peer_tep);
4144*7c478bd9Sstevel@tonic-gate 		ASSERT(peer_tep != NULL && peer_tep->te_conp != NULL);
4145*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(peer_tep->te_conp);
4146*7c478bd9Sstevel@tonic-gate 		if (! IS_SOCKET(tep)) {
4147*7c478bd9Sstevel@tonic-gate 			/*
4148*7c478bd9Sstevel@tonic-gate 			 * unlink the streams
4149*7c478bd9Sstevel@tonic-gate 			 */
4150*7c478bd9Sstevel@tonic-gate 			tep->te_wq->q_next = NULL;
4151*7c478bd9Sstevel@tonic-gate 			peer_tep->te_wq->q_next = NULL;
4152*7c478bd9Sstevel@tonic-gate 		}
4153*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_conp);
4154*7c478bd9Sstevel@tonic-gate 	}
4155*7c478bd9Sstevel@tonic-gate }
4156*7c478bd9Sstevel@tonic-gate 
4157*7c478bd9Sstevel@tonic-gate 
4158*7c478bd9Sstevel@tonic-gate static void
4159*7c478bd9Sstevel@tonic-gate tl_addr_req(mblk_t *mp, tl_endpt_t *tep)
4160*7c478bd9Sstevel@tonic-gate {
4161*7c478bd9Sstevel@tonic-gate 	queue_t			*wq;
4162*7c478bd9Sstevel@tonic-gate 	size_t			ack_sz;
4163*7c478bd9Sstevel@tonic-gate 	mblk_t			*ackmp;
4164*7c478bd9Sstevel@tonic-gate 	struct T_addr_ack	*taa;
4165*7c478bd9Sstevel@tonic-gate 
4166*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
4167*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4168*7c478bd9Sstevel@tonic-gate 		return;
4169*7c478bd9Sstevel@tonic-gate 	}
4170*7c478bd9Sstevel@tonic-gate 
4171*7c478bd9Sstevel@tonic-gate 	wq = tep->te_wq;
4172*7c478bd9Sstevel@tonic-gate 
4173*7c478bd9Sstevel@tonic-gate 	/*
4174*7c478bd9Sstevel@tonic-gate 	 * Note: T_ADDR_REQ message has only PRIM_type field
4175*7c478bd9Sstevel@tonic-gate 	 * so it is already validated earlier.
4176*7c478bd9Sstevel@tonic-gate 	 */
4177*7c478bd9Sstevel@tonic-gate 
4178*7c478bd9Sstevel@tonic-gate 	if (IS_CLTS(tep) ||
4179*7c478bd9Sstevel@tonic-gate 	    (tep->te_state > TS_WREQ_ORDREL) ||
4180*7c478bd9Sstevel@tonic-gate 	    (tep->te_state < TS_DATA_XFER)) {
4181*7c478bd9Sstevel@tonic-gate 		/*
4182*7c478bd9Sstevel@tonic-gate 		 * Either connectionless or connection oriented but not
4183*7c478bd9Sstevel@tonic-gate 		 * in connected data transfer state or half-closed states.
4184*7c478bd9Sstevel@tonic-gate 		 */
4185*7c478bd9Sstevel@tonic-gate 		ack_sz = sizeof (struct T_addr_ack);
4186*7c478bd9Sstevel@tonic-gate 		if (tep->te_state >= TS_IDLE)
4187*7c478bd9Sstevel@tonic-gate 			/* is bound */
4188*7c478bd9Sstevel@tonic-gate 			ack_sz += tep->te_alen;
4189*7c478bd9Sstevel@tonic-gate 		ackmp = reallocb(mp, ack_sz, 0);
4190*7c478bd9Sstevel@tonic-gate 		if (ackmp == NULL) {
4191*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
4192*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
4193*7c478bd9Sstevel@tonic-gate 				"tl_addr_req: reallocb failed"));
4194*7c478bd9Sstevel@tonic-gate 			tl_memrecover(wq, mp, ack_sz);
4195*7c478bd9Sstevel@tonic-gate 			return;
4196*7c478bd9Sstevel@tonic-gate 		}
4197*7c478bd9Sstevel@tonic-gate 
4198*7c478bd9Sstevel@tonic-gate 		taa = (struct T_addr_ack *)ackmp->b_rptr;
4199*7c478bd9Sstevel@tonic-gate 
4200*7c478bd9Sstevel@tonic-gate 		bzero(taa, sizeof (struct T_addr_ack));
4201*7c478bd9Sstevel@tonic-gate 
4202*7c478bd9Sstevel@tonic-gate 		taa->PRIM_type = T_ADDR_ACK;
4203*7c478bd9Sstevel@tonic-gate 		ackmp->b_datap->db_type = M_PCPROTO;
4204*7c478bd9Sstevel@tonic-gate 		ackmp->b_wptr = (uchar_t *)&taa[1];
4205*7c478bd9Sstevel@tonic-gate 
4206*7c478bd9Sstevel@tonic-gate 		if (tep->te_state >= TS_IDLE) {
4207*7c478bd9Sstevel@tonic-gate 			/* endpoint is bound */
4208*7c478bd9Sstevel@tonic-gate 			taa->LOCADDR_length = tep->te_alen;
4209*7c478bd9Sstevel@tonic-gate 			taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa);
4210*7c478bd9Sstevel@tonic-gate 
4211*7c478bd9Sstevel@tonic-gate 			bcopy(tep->te_abuf, ackmp->b_wptr,
4212*7c478bd9Sstevel@tonic-gate 				tep->te_alen);
4213*7c478bd9Sstevel@tonic-gate 			ackmp->b_wptr += tep->te_alen;
4214*7c478bd9Sstevel@tonic-gate 			ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim);
4215*7c478bd9Sstevel@tonic-gate 		}
4216*7c478bd9Sstevel@tonic-gate 
4217*7c478bd9Sstevel@tonic-gate 		(void) qreply(wq, ackmp);
4218*7c478bd9Sstevel@tonic-gate 	} else {
4219*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_state == TS_DATA_XFER ||
4220*7c478bd9Sstevel@tonic-gate 			tep->te_state == TS_WIND_ORDREL ||
4221*7c478bd9Sstevel@tonic-gate 			tep->te_state == TS_WREQ_ORDREL);
4222*7c478bd9Sstevel@tonic-gate 		/* connection oriented in data transfer */
4223*7c478bd9Sstevel@tonic-gate 		tl_connected_cots_addr_req(mp, tep);
4224*7c478bd9Sstevel@tonic-gate 	}
4225*7c478bd9Sstevel@tonic-gate }
4226*7c478bd9Sstevel@tonic-gate 
4227*7c478bd9Sstevel@tonic-gate 
4228*7c478bd9Sstevel@tonic-gate static void
4229*7c478bd9Sstevel@tonic-gate tl_connected_cots_addr_req(mblk_t *mp, tl_endpt_t *tep)
4230*7c478bd9Sstevel@tonic-gate {
4231*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*peer_tep;
4232*7c478bd9Sstevel@tonic-gate 	size_t			ack_sz;
4233*7c478bd9Sstevel@tonic-gate 	mblk_t			*ackmp;
4234*7c478bd9Sstevel@tonic-gate 	struct T_addr_ack	*taa;
4235*7c478bd9Sstevel@tonic-gate 	uchar_t			*addr_startp;
4236*7c478bd9Sstevel@tonic-gate 
4237*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
4238*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4239*7c478bd9Sstevel@tonic-gate 		return;
4240*7c478bd9Sstevel@tonic-gate 	}
4241*7c478bd9Sstevel@tonic-gate 
4242*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_state >= TS_IDLE);
4243*7c478bd9Sstevel@tonic-gate 
4244*7c478bd9Sstevel@tonic-gate 	ack_sz = sizeof (struct T_addr_ack);
4245*7c478bd9Sstevel@tonic-gate 	ack_sz += T_ALIGN(tep->te_alen);
4246*7c478bd9Sstevel@tonic-gate 	peer_tep = tep->te_conp;
4247*7c478bd9Sstevel@tonic-gate 	ack_sz += peer_tep->te_alen;
4248*7c478bd9Sstevel@tonic-gate 
4249*7c478bd9Sstevel@tonic-gate 	ackmp = tpi_ack_alloc(mp, ack_sz, M_PCPROTO, T_ADDR_ACK);
4250*7c478bd9Sstevel@tonic-gate 	if (ackmp == NULL) {
4251*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4252*7c478bd9Sstevel@tonic-gate 			"tl_connected_cots_addr_req: reallocb failed"));
4253*7c478bd9Sstevel@tonic-gate 		tl_memrecover(tep->te_wq, mp, ack_sz);
4254*7c478bd9Sstevel@tonic-gate 		return;
4255*7c478bd9Sstevel@tonic-gate 	}
4256*7c478bd9Sstevel@tonic-gate 
4257*7c478bd9Sstevel@tonic-gate 	taa = (struct T_addr_ack *)ackmp->b_rptr;
4258*7c478bd9Sstevel@tonic-gate 
4259*7c478bd9Sstevel@tonic-gate 	/* endpoint is bound */
4260*7c478bd9Sstevel@tonic-gate 	taa->LOCADDR_length = tep->te_alen;
4261*7c478bd9Sstevel@tonic-gate 	taa->LOCADDR_offset = (t_scalar_t)sizeof (*taa);
4262*7c478bd9Sstevel@tonic-gate 
4263*7c478bd9Sstevel@tonic-gate 	addr_startp = (uchar_t *)&taa[1];
4264*7c478bd9Sstevel@tonic-gate 
4265*7c478bd9Sstevel@tonic-gate 	bcopy(tep->te_abuf, addr_startp,
4266*7c478bd9Sstevel@tonic-gate 	    tep->te_alen);
4267*7c478bd9Sstevel@tonic-gate 
4268*7c478bd9Sstevel@tonic-gate 	taa->REMADDR_length = peer_tep->te_alen;
4269*7c478bd9Sstevel@tonic-gate 	taa->REMADDR_offset = (t_scalar_t)T_ALIGN(taa->LOCADDR_offset +
4270*7c478bd9Sstevel@tonic-gate 				    taa->LOCADDR_length);
4271*7c478bd9Sstevel@tonic-gate 	addr_startp = ackmp->b_rptr + taa->REMADDR_offset;
4272*7c478bd9Sstevel@tonic-gate 	bcopy(peer_tep->te_abuf, addr_startp,
4273*7c478bd9Sstevel@tonic-gate 	    peer_tep->te_alen);
4274*7c478bd9Sstevel@tonic-gate 	ackmp->b_wptr = (uchar_t *)ackmp->b_rptr +
4275*7c478bd9Sstevel@tonic-gate 	    taa->REMADDR_offset + peer_tep->te_alen;
4276*7c478bd9Sstevel@tonic-gate 	ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim);
4277*7c478bd9Sstevel@tonic-gate 
4278*7c478bd9Sstevel@tonic-gate 	putnext(tep->te_rq, ackmp);
4279*7c478bd9Sstevel@tonic-gate }
4280*7c478bd9Sstevel@tonic-gate 
4281*7c478bd9Sstevel@tonic-gate static void
4282*7c478bd9Sstevel@tonic-gate tl_copy_info(struct T_info_ack *ia, tl_endpt_t *tep)
4283*7c478bd9Sstevel@tonic-gate {
4284*7c478bd9Sstevel@tonic-gate 	if (IS_CLTS(tep)) {
4285*7c478bd9Sstevel@tonic-gate 		*ia = tl_clts_info_ack;
4286*7c478bd9Sstevel@tonic-gate 		ia->TSDU_size = tl_tidusz; /* TSDU and TIDU size are same */
4287*7c478bd9Sstevel@tonic-gate 	} else {
4288*7c478bd9Sstevel@tonic-gate 		*ia = tl_cots_info_ack;
4289*7c478bd9Sstevel@tonic-gate 		if (IS_COTSORD(tep))
4290*7c478bd9Sstevel@tonic-gate 			ia->SERV_type = T_COTS_ORD;
4291*7c478bd9Sstevel@tonic-gate 	}
4292*7c478bd9Sstevel@tonic-gate 	ia->TIDU_size = tl_tidusz;
4293*7c478bd9Sstevel@tonic-gate 	ia->CURRENT_state = tep->te_state;
4294*7c478bd9Sstevel@tonic-gate }
4295*7c478bd9Sstevel@tonic-gate 
4296*7c478bd9Sstevel@tonic-gate /*
4297*7c478bd9Sstevel@tonic-gate  * This routine responds to T_CAPABILITY_REQ messages.  It is called by
4298*7c478bd9Sstevel@tonic-gate  * tl_wput.
4299*7c478bd9Sstevel@tonic-gate  */
4300*7c478bd9Sstevel@tonic-gate static void
4301*7c478bd9Sstevel@tonic-gate tl_capability_req(mblk_t *mp, tl_endpt_t *tep)
4302*7c478bd9Sstevel@tonic-gate {
4303*7c478bd9Sstevel@tonic-gate 	mblk_t			*ackmp;
4304*7c478bd9Sstevel@tonic-gate 	t_uscalar_t		cap_bits1;
4305*7c478bd9Sstevel@tonic-gate 	struct T_capability_ack	*tcap;
4306*7c478bd9Sstevel@tonic-gate 
4307*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
4308*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4309*7c478bd9Sstevel@tonic-gate 		return;
4310*7c478bd9Sstevel@tonic-gate 	}
4311*7c478bd9Sstevel@tonic-gate 
4312*7c478bd9Sstevel@tonic-gate 	cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
4313*7c478bd9Sstevel@tonic-gate 
4314*7c478bd9Sstevel@tonic-gate 	ackmp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
4315*7c478bd9Sstevel@tonic-gate 	    M_PCPROTO, T_CAPABILITY_ACK);
4316*7c478bd9Sstevel@tonic-gate 	if (ackmp == NULL) {
4317*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4318*7c478bd9Sstevel@tonic-gate 			"tl_capability_req: reallocb failed"));
4319*7c478bd9Sstevel@tonic-gate 		tl_memrecover(tep->te_wq, mp,
4320*7c478bd9Sstevel@tonic-gate 		    sizeof (struct T_capability_ack));
4321*7c478bd9Sstevel@tonic-gate 		return;
4322*7c478bd9Sstevel@tonic-gate 	}
4323*7c478bd9Sstevel@tonic-gate 
4324*7c478bd9Sstevel@tonic-gate 	tcap = (struct T_capability_ack *)ackmp->b_rptr;
4325*7c478bd9Sstevel@tonic-gate 	tcap->CAP_bits1 = 0;
4326*7c478bd9Sstevel@tonic-gate 
4327*7c478bd9Sstevel@tonic-gate 	if (cap_bits1 & TC1_INFO) {
4328*7c478bd9Sstevel@tonic-gate 		tl_copy_info(&tcap->INFO_ack, tep);
4329*7c478bd9Sstevel@tonic-gate 		tcap->CAP_bits1 |= TC1_INFO;
4330*7c478bd9Sstevel@tonic-gate 	}
4331*7c478bd9Sstevel@tonic-gate 
4332*7c478bd9Sstevel@tonic-gate 	if (cap_bits1 & TC1_ACCEPTOR_ID) {
4333*7c478bd9Sstevel@tonic-gate 		tcap->ACCEPTOR_id = tep->te_acceptor_id;
4334*7c478bd9Sstevel@tonic-gate 		tcap->CAP_bits1 |= TC1_ACCEPTOR_ID;
4335*7c478bd9Sstevel@tonic-gate 	}
4336*7c478bd9Sstevel@tonic-gate 
4337*7c478bd9Sstevel@tonic-gate 	putnext(tep->te_rq, ackmp);
4338*7c478bd9Sstevel@tonic-gate }
4339*7c478bd9Sstevel@tonic-gate 
4340*7c478bd9Sstevel@tonic-gate static void
4341*7c478bd9Sstevel@tonic-gate tl_info_req_ser(mblk_t *mp, tl_endpt_t *tep)
4342*7c478bd9Sstevel@tonic-gate {
4343*7c478bd9Sstevel@tonic-gate 	if (! tep->te_closing)
4344*7c478bd9Sstevel@tonic-gate 		tl_info_req(mp, tep);
4345*7c478bd9Sstevel@tonic-gate 	else
4346*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4347*7c478bd9Sstevel@tonic-gate 
4348*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
4349*7c478bd9Sstevel@tonic-gate 	tl_refrele(tep);
4350*7c478bd9Sstevel@tonic-gate }
4351*7c478bd9Sstevel@tonic-gate 
4352*7c478bd9Sstevel@tonic-gate static void
4353*7c478bd9Sstevel@tonic-gate tl_info_req(mblk_t *mp, tl_endpt_t *tep)
4354*7c478bd9Sstevel@tonic-gate {
4355*7c478bd9Sstevel@tonic-gate 	mblk_t *ackmp;
4356*7c478bd9Sstevel@tonic-gate 
4357*7c478bd9Sstevel@tonic-gate 	ackmp = tpi_ack_alloc(mp, sizeof (struct T_info_ack),
4358*7c478bd9Sstevel@tonic-gate 	    M_PCPROTO, T_INFO_ACK);
4359*7c478bd9Sstevel@tonic-gate 	if (ackmp == NULL) {
4360*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4361*7c478bd9Sstevel@tonic-gate 			"tl_info_req: reallocb failed"));
4362*7c478bd9Sstevel@tonic-gate 		tl_memrecover(tep->te_wq, mp, sizeof (struct T_info_ack));
4363*7c478bd9Sstevel@tonic-gate 		return;
4364*7c478bd9Sstevel@tonic-gate 	}
4365*7c478bd9Sstevel@tonic-gate 
4366*7c478bd9Sstevel@tonic-gate 	/*
4367*7c478bd9Sstevel@tonic-gate 	 * fill in T_INFO_ACK contents
4368*7c478bd9Sstevel@tonic-gate 	 */
4369*7c478bd9Sstevel@tonic-gate 	tl_copy_info((struct T_info_ack *)ackmp->b_rptr, tep);
4370*7c478bd9Sstevel@tonic-gate 
4371*7c478bd9Sstevel@tonic-gate 	/*
4372*7c478bd9Sstevel@tonic-gate 	 * send ack message
4373*7c478bd9Sstevel@tonic-gate 	 */
4374*7c478bd9Sstevel@tonic-gate 	putnext(tep->te_rq, ackmp);
4375*7c478bd9Sstevel@tonic-gate }
4376*7c478bd9Sstevel@tonic-gate 
4377*7c478bd9Sstevel@tonic-gate /*
4378*7c478bd9Sstevel@tonic-gate  * Handle M_DATA, T_data_req and T_optdata_req.
4379*7c478bd9Sstevel@tonic-gate  * If this is a socket pass through T_optdata_req options unmodified.
4380*7c478bd9Sstevel@tonic-gate  */
4381*7c478bd9Sstevel@tonic-gate static void
4382*7c478bd9Sstevel@tonic-gate tl_data(mblk_t *mp, tl_endpt_t *tep)
4383*7c478bd9Sstevel@tonic-gate {
4384*7c478bd9Sstevel@tonic-gate 	queue_t			*wq = tep->te_wq;
4385*7c478bd9Sstevel@tonic-gate 	union T_primitives	*prim = (union T_primitives *)mp->b_rptr;
4386*7c478bd9Sstevel@tonic-gate 	ssize_t			msz = MBLKL(mp);
4387*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*peer_tep;
4388*7c478bd9Sstevel@tonic-gate 	queue_t			*peer_rq;
4389*7c478bd9Sstevel@tonic-gate 	boolean_t		closing = tep->te_closing;
4390*7c478bd9Sstevel@tonic-gate 
4391*7c478bd9Sstevel@tonic-gate 	if (IS_CLTS(tep)) {
4392*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 2,
4393*7c478bd9Sstevel@tonic-gate 			    SL_TRACE|SL_ERROR,
4394*7c478bd9Sstevel@tonic-gate 			    "tl_wput:clts:unattached M_DATA"));
4395*7c478bd9Sstevel@tonic-gate 		if (!closing) {
4396*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, mp, EPROTO);
4397*7c478bd9Sstevel@tonic-gate 		} else {
4398*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
4399*7c478bd9Sstevel@tonic-gate 		}
4400*7c478bd9Sstevel@tonic-gate 		return;
4401*7c478bd9Sstevel@tonic-gate 	}
4402*7c478bd9Sstevel@tonic-gate 
4403*7c478bd9Sstevel@tonic-gate 	/*
4404*7c478bd9Sstevel@tonic-gate 	 * If the endpoint is closing it should still forward any data to the
4405*7c478bd9Sstevel@tonic-gate 	 * peer (if it has one). If it is not allowed to forward it can just
4406*7c478bd9Sstevel@tonic-gate 	 * free the message.
4407*7c478bd9Sstevel@tonic-gate 	 */
4408*7c478bd9Sstevel@tonic-gate 	if (closing &&
4409*7c478bd9Sstevel@tonic-gate 	    (tep->te_state != TS_DATA_XFER) &&
4410*7c478bd9Sstevel@tonic-gate 	    (tep->te_state != TS_WREQ_ORDREL)) {
4411*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4412*7c478bd9Sstevel@tonic-gate 		return;
4413*7c478bd9Sstevel@tonic-gate 	}
4414*7c478bd9Sstevel@tonic-gate 
4415*7c478bd9Sstevel@tonic-gate 	if (DB_TYPE(mp) == M_PROTO) {
4416*7c478bd9Sstevel@tonic-gate 		if (prim->type == T_DATA_REQ &&
4417*7c478bd9Sstevel@tonic-gate 		    msz < sizeof (struct T_data_req)) {
4418*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
4419*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
4420*7c478bd9Sstevel@tonic-gate 				"tl_data:T_DATA_REQ:invalid message"));
4421*7c478bd9Sstevel@tonic-gate 			if (!closing) {
4422*7c478bd9Sstevel@tonic-gate 				tl_merror(wq, mp, EPROTO);
4423*7c478bd9Sstevel@tonic-gate 			} else {
4424*7c478bd9Sstevel@tonic-gate 				freemsg(mp);
4425*7c478bd9Sstevel@tonic-gate 			}
4426*7c478bd9Sstevel@tonic-gate 			return;
4427*7c478bd9Sstevel@tonic-gate 		} else if (prim->type == T_OPTDATA_REQ &&
4428*7c478bd9Sstevel@tonic-gate 			    (msz < sizeof (struct T_optdata_req) ||
4429*7c478bd9Sstevel@tonic-gate 			    !IS_SOCKET(tep))) {
4430*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
4431*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
4432*7c478bd9Sstevel@tonic-gate 				"tl_data:T_OPTDATA_REQ:invalid message"));
4433*7c478bd9Sstevel@tonic-gate 			if (!closing) {
4434*7c478bd9Sstevel@tonic-gate 				tl_merror(wq, mp, EPROTO);
4435*7c478bd9Sstevel@tonic-gate 			} else {
4436*7c478bd9Sstevel@tonic-gate 				freemsg(mp);
4437*7c478bd9Sstevel@tonic-gate 			}
4438*7c478bd9Sstevel@tonic-gate 			return;
4439*7c478bd9Sstevel@tonic-gate 		}
4440*7c478bd9Sstevel@tonic-gate 	}
4441*7c478bd9Sstevel@tonic-gate 
4442*7c478bd9Sstevel@tonic-gate 	/*
4443*7c478bd9Sstevel@tonic-gate 	 * connection oriented provider
4444*7c478bd9Sstevel@tonic-gate 	 */
4445*7c478bd9Sstevel@tonic-gate 	switch (tep->te_state) {
4446*7c478bd9Sstevel@tonic-gate 	case TS_IDLE:
4447*7c478bd9Sstevel@tonic-gate 		/*
4448*7c478bd9Sstevel@tonic-gate 		 * Other end not here - do nothing.
4449*7c478bd9Sstevel@tonic-gate 		 */
4450*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4451*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
4452*7c478bd9Sstevel@tonic-gate 			"tl_data:cots with endpoint idle"));
4453*7c478bd9Sstevel@tonic-gate 		return;
4454*7c478bd9Sstevel@tonic-gate 
4455*7c478bd9Sstevel@tonic-gate 	case TS_DATA_XFER:
4456*7c478bd9Sstevel@tonic-gate 		/* valid states */
4457*7c478bd9Sstevel@tonic-gate 		if (tep->te_conp != NULL)
4458*7c478bd9Sstevel@tonic-gate 			break;
4459*7c478bd9Sstevel@tonic-gate 
4460*7c478bd9Sstevel@tonic-gate 		if (tep->te_oconp == NULL) {
4461*7c478bd9Sstevel@tonic-gate 			if (!closing) {
4462*7c478bd9Sstevel@tonic-gate 				tl_merror(wq, mp, EPROTO);
4463*7c478bd9Sstevel@tonic-gate 			} else {
4464*7c478bd9Sstevel@tonic-gate 				freemsg(mp);
4465*7c478bd9Sstevel@tonic-gate 			}
4466*7c478bd9Sstevel@tonic-gate 			return;
4467*7c478bd9Sstevel@tonic-gate 		}
4468*7c478bd9Sstevel@tonic-gate 		/*
4469*7c478bd9Sstevel@tonic-gate 		 * For a socket the T_CONN_CON is sent early thus
4470*7c478bd9Sstevel@tonic-gate 		 * the peer might not yet have accepted the connection.
4471*7c478bd9Sstevel@tonic-gate 		 * If we are closing queue the packet with the T_CONN_IND.
4472*7c478bd9Sstevel@tonic-gate 		 * Otherwise defer processing the packet until the peer
4473*7c478bd9Sstevel@tonic-gate 		 * accepts the connection.
4474*7c478bd9Sstevel@tonic-gate 		 * Note that the queue is noenabled when we go into this
4475*7c478bd9Sstevel@tonic-gate 		 * state.
4476*7c478bd9Sstevel@tonic-gate 		 */
4477*7c478bd9Sstevel@tonic-gate 		if (!closing) {
4478*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
4479*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
4480*7c478bd9Sstevel@tonic-gate 				    "tl_data: ocon"));
4481*7c478bd9Sstevel@tonic-gate 			TL_PUTBQ(tep, mp);
4482*7c478bd9Sstevel@tonic-gate 			return;
4483*7c478bd9Sstevel@tonic-gate 		}
4484*7c478bd9Sstevel@tonic-gate 		if (DB_TYPE(mp) == M_PROTO) {
4485*7c478bd9Sstevel@tonic-gate 			if (msz < sizeof (t_scalar_t)) {
4486*7c478bd9Sstevel@tonic-gate 				freemsg(mp);
4487*7c478bd9Sstevel@tonic-gate 				return;
4488*7c478bd9Sstevel@tonic-gate 			}
4489*7c478bd9Sstevel@tonic-gate 			/* reuse message block - just change REQ to IND */
4490*7c478bd9Sstevel@tonic-gate 			if (prim->type == T_DATA_REQ)
4491*7c478bd9Sstevel@tonic-gate 				prim->type = T_DATA_IND;
4492*7c478bd9Sstevel@tonic-gate 			else
4493*7c478bd9Sstevel@tonic-gate 				prim->type = T_OPTDATA_IND;
4494*7c478bd9Sstevel@tonic-gate 		}
4495*7c478bd9Sstevel@tonic-gate 		tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4496*7c478bd9Sstevel@tonic-gate 		return;
4497*7c478bd9Sstevel@tonic-gate 
4498*7c478bd9Sstevel@tonic-gate 	case TS_WREQ_ORDREL:
4499*7c478bd9Sstevel@tonic-gate 		if (tep->te_conp == NULL) {
4500*7c478bd9Sstevel@tonic-gate 			/*
4501*7c478bd9Sstevel@tonic-gate 			 * Other end closed - generate discon_ind
4502*7c478bd9Sstevel@tonic-gate 			 * with reason 0 to cause an EPIPE but no
4503*7c478bd9Sstevel@tonic-gate 			 * read side error on AF_UNIX sockets.
4504*7c478bd9Sstevel@tonic-gate 			 */
4505*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
4506*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3,
4507*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
4508*7c478bd9Sstevel@tonic-gate 				"tl_data: WREQ_ORDREL and no peer"));
4509*7c478bd9Sstevel@tonic-gate 			tl_discon_ind(tep, 0);
4510*7c478bd9Sstevel@tonic-gate 			return;
4511*7c478bd9Sstevel@tonic-gate 		}
4512*7c478bd9Sstevel@tonic-gate 		break;
4513*7c478bd9Sstevel@tonic-gate 
4514*7c478bd9Sstevel@tonic-gate 	default:
4515*7c478bd9Sstevel@tonic-gate 		/* invalid state for event TE_DATA_REQ */
4516*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4517*7c478bd9Sstevel@tonic-gate 			"tl_data:cots:out of state"));
4518*7c478bd9Sstevel@tonic-gate 		tl_merror(wq, mp, EPROTO);
4519*7c478bd9Sstevel@tonic-gate 		return;
4520*7c478bd9Sstevel@tonic-gate 	}
4521*7c478bd9Sstevel@tonic-gate 	/*
4522*7c478bd9Sstevel@tonic-gate 	 * tep->te_state = NEXTSTATE(TE_DATA_REQ, tep->te_state);
4523*7c478bd9Sstevel@tonic-gate 	 * (State stays same on this event)
4524*7c478bd9Sstevel@tonic-gate 	 */
4525*7c478bd9Sstevel@tonic-gate 
4526*7c478bd9Sstevel@tonic-gate 	/*
4527*7c478bd9Sstevel@tonic-gate 	 * get connected endpoint
4528*7c478bd9Sstevel@tonic-gate 	 */
4529*7c478bd9Sstevel@tonic-gate 	if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4530*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4531*7c478bd9Sstevel@tonic-gate 		/* Peer closed */
4532*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4533*7c478bd9Sstevel@tonic-gate 			"tl_data: peer gone"));
4534*7c478bd9Sstevel@tonic-gate 		return;
4535*7c478bd9Sstevel@tonic-gate 	}
4536*7c478bd9Sstevel@tonic-gate 
4537*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_serializer == peer_tep->te_serializer);
4538*7c478bd9Sstevel@tonic-gate 	peer_rq = peer_tep->te_rq;
4539*7c478bd9Sstevel@tonic-gate 
4540*7c478bd9Sstevel@tonic-gate 	/*
4541*7c478bd9Sstevel@tonic-gate 	 * Put it back if flow controlled
4542*7c478bd9Sstevel@tonic-gate 	 * Note: Messages already on queue when we are closing is bounded
4543*7c478bd9Sstevel@tonic-gate 	 * so we can ignore flow control.
4544*7c478bd9Sstevel@tonic-gate 	 */
4545*7c478bd9Sstevel@tonic-gate 	if (!canputnext(peer_rq) && !closing) {
4546*7c478bd9Sstevel@tonic-gate 		TL_PUTBQ(tep, mp);
4547*7c478bd9Sstevel@tonic-gate 		return;
4548*7c478bd9Sstevel@tonic-gate 	}
4549*7c478bd9Sstevel@tonic-gate 
4550*7c478bd9Sstevel@tonic-gate 	/*
4551*7c478bd9Sstevel@tonic-gate 	 * validate peer state
4552*7c478bd9Sstevel@tonic-gate 	 */
4553*7c478bd9Sstevel@tonic-gate 	switch (peer_tep->te_state) {
4554*7c478bd9Sstevel@tonic-gate 	case TS_DATA_XFER:
4555*7c478bd9Sstevel@tonic-gate 	case TS_WIND_ORDREL:
4556*7c478bd9Sstevel@tonic-gate 		/* valid states */
4557*7c478bd9Sstevel@tonic-gate 		break;
4558*7c478bd9Sstevel@tonic-gate 	default:
4559*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4560*7c478bd9Sstevel@tonic-gate 			"tl_data:rx side:invalid state"));
4561*7c478bd9Sstevel@tonic-gate 		tl_merror(peer_tep->te_wq, mp, EPROTO);
4562*7c478bd9Sstevel@tonic-gate 		return;
4563*7c478bd9Sstevel@tonic-gate 	}
4564*7c478bd9Sstevel@tonic-gate 	if (DB_TYPE(mp) == M_PROTO) {
4565*7c478bd9Sstevel@tonic-gate 		/* reuse message block - just change REQ to IND */
4566*7c478bd9Sstevel@tonic-gate 		if (prim->type == T_DATA_REQ)
4567*7c478bd9Sstevel@tonic-gate 			prim->type = T_DATA_IND;
4568*7c478bd9Sstevel@tonic-gate 		else
4569*7c478bd9Sstevel@tonic-gate 			prim->type = T_OPTDATA_IND;
4570*7c478bd9Sstevel@tonic-gate 	}
4571*7c478bd9Sstevel@tonic-gate 	/*
4572*7c478bd9Sstevel@tonic-gate 	 * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state);
4573*7c478bd9Sstevel@tonic-gate 	 * (peer state stays same on this event)
4574*7c478bd9Sstevel@tonic-gate 	 */
4575*7c478bd9Sstevel@tonic-gate 	/*
4576*7c478bd9Sstevel@tonic-gate 	 * send data to connected peer
4577*7c478bd9Sstevel@tonic-gate 	 */
4578*7c478bd9Sstevel@tonic-gate 	putnext(peer_rq, mp);
4579*7c478bd9Sstevel@tonic-gate }
4580*7c478bd9Sstevel@tonic-gate 
4581*7c478bd9Sstevel@tonic-gate 
4582*7c478bd9Sstevel@tonic-gate 
4583*7c478bd9Sstevel@tonic-gate static void
4584*7c478bd9Sstevel@tonic-gate tl_exdata(mblk_t *mp, tl_endpt_t *tep)
4585*7c478bd9Sstevel@tonic-gate {
4586*7c478bd9Sstevel@tonic-gate 	queue_t			*wq = tep->te_wq;
4587*7c478bd9Sstevel@tonic-gate 	union T_primitives	*prim = (union T_primitives *)mp->b_rptr;
4588*7c478bd9Sstevel@tonic-gate 	ssize_t			msz = MBLKL(mp);
4589*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*peer_tep;
4590*7c478bd9Sstevel@tonic-gate 	queue_t			*peer_rq;
4591*7c478bd9Sstevel@tonic-gate 	boolean_t		closing = tep->te_closing;
4592*7c478bd9Sstevel@tonic-gate 
4593*7c478bd9Sstevel@tonic-gate 	if (msz < sizeof (struct T_exdata_req)) {
4594*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4595*7c478bd9Sstevel@tonic-gate 			"tl_exdata:invalid message"));
4596*7c478bd9Sstevel@tonic-gate 		if (!closing) {
4597*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, mp, EPROTO);
4598*7c478bd9Sstevel@tonic-gate 		} else {
4599*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
4600*7c478bd9Sstevel@tonic-gate 		}
4601*7c478bd9Sstevel@tonic-gate 		return;
4602*7c478bd9Sstevel@tonic-gate 	}
4603*7c478bd9Sstevel@tonic-gate 
4604*7c478bd9Sstevel@tonic-gate 	/*
4605*7c478bd9Sstevel@tonic-gate 	 * If the endpoint is closing it should still forward any data to the
4606*7c478bd9Sstevel@tonic-gate 	 * peer (if it has one). If it is not allowed to forward it can just
4607*7c478bd9Sstevel@tonic-gate 	 * free the message.
4608*7c478bd9Sstevel@tonic-gate 	 */
4609*7c478bd9Sstevel@tonic-gate 	if (closing &&
4610*7c478bd9Sstevel@tonic-gate 	    (tep->te_state != TS_DATA_XFER) &&
4611*7c478bd9Sstevel@tonic-gate 	    (tep->te_state != TS_WREQ_ORDREL)) {
4612*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4613*7c478bd9Sstevel@tonic-gate 		return;
4614*7c478bd9Sstevel@tonic-gate 	}
4615*7c478bd9Sstevel@tonic-gate 
4616*7c478bd9Sstevel@tonic-gate 	/*
4617*7c478bd9Sstevel@tonic-gate 	 * validate state
4618*7c478bd9Sstevel@tonic-gate 	 */
4619*7c478bd9Sstevel@tonic-gate 	switch (tep->te_state) {
4620*7c478bd9Sstevel@tonic-gate 	case TS_IDLE:
4621*7c478bd9Sstevel@tonic-gate 		/*
4622*7c478bd9Sstevel@tonic-gate 		 * Other end not here - do nothing.
4623*7c478bd9Sstevel@tonic-gate 		 */
4624*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4625*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
4626*7c478bd9Sstevel@tonic-gate 			"tl_exdata:cots with endpoint idle"));
4627*7c478bd9Sstevel@tonic-gate 		return;
4628*7c478bd9Sstevel@tonic-gate 
4629*7c478bd9Sstevel@tonic-gate 	case TS_DATA_XFER:
4630*7c478bd9Sstevel@tonic-gate 		/* valid states */
4631*7c478bd9Sstevel@tonic-gate 		if (tep->te_conp != NULL)
4632*7c478bd9Sstevel@tonic-gate 			break;
4633*7c478bd9Sstevel@tonic-gate 
4634*7c478bd9Sstevel@tonic-gate 		if (tep->te_oconp == NULL) {
4635*7c478bd9Sstevel@tonic-gate 			if (!closing) {
4636*7c478bd9Sstevel@tonic-gate 				tl_merror(wq, mp, EPROTO);
4637*7c478bd9Sstevel@tonic-gate 			} else {
4638*7c478bd9Sstevel@tonic-gate 				freemsg(mp);
4639*7c478bd9Sstevel@tonic-gate 			}
4640*7c478bd9Sstevel@tonic-gate 			return;
4641*7c478bd9Sstevel@tonic-gate 		}
4642*7c478bd9Sstevel@tonic-gate 		/*
4643*7c478bd9Sstevel@tonic-gate 		 * For a socket the T_CONN_CON is sent early thus
4644*7c478bd9Sstevel@tonic-gate 		 * the peer might not yet have accepted the connection.
4645*7c478bd9Sstevel@tonic-gate 		 * If we are closing queue the packet with the T_CONN_IND.
4646*7c478bd9Sstevel@tonic-gate 		 * Otherwise defer processing the packet until the peer
4647*7c478bd9Sstevel@tonic-gate 		 * accepts the connection.
4648*7c478bd9Sstevel@tonic-gate 		 * Note that the queue is noenabled when we go into this
4649*7c478bd9Sstevel@tonic-gate 		 * state.
4650*7c478bd9Sstevel@tonic-gate 		 */
4651*7c478bd9Sstevel@tonic-gate 		if (!closing) {
4652*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
4653*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
4654*7c478bd9Sstevel@tonic-gate 				    "tl_exdata: ocon"));
4655*7c478bd9Sstevel@tonic-gate 			TL_PUTBQ(tep, mp);
4656*7c478bd9Sstevel@tonic-gate 			return;
4657*7c478bd9Sstevel@tonic-gate 		}
4658*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4659*7c478bd9Sstevel@tonic-gate 			    "tl_exdata: closing socket ocon"));
4660*7c478bd9Sstevel@tonic-gate 		prim->type = T_EXDATA_IND;
4661*7c478bd9Sstevel@tonic-gate 		tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4662*7c478bd9Sstevel@tonic-gate 		return;
4663*7c478bd9Sstevel@tonic-gate 
4664*7c478bd9Sstevel@tonic-gate 	case TS_WREQ_ORDREL:
4665*7c478bd9Sstevel@tonic-gate 		if (tep->te_conp == NULL) {
4666*7c478bd9Sstevel@tonic-gate 			/*
4667*7c478bd9Sstevel@tonic-gate 			 * Other end closed - generate discon_ind
4668*7c478bd9Sstevel@tonic-gate 			 * with reason 0 to cause an EPIPE but no
4669*7c478bd9Sstevel@tonic-gate 			 * read side error on AF_UNIX sockets.
4670*7c478bd9Sstevel@tonic-gate 			 */
4671*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
4672*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3,
4673*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
4674*7c478bd9Sstevel@tonic-gate 				"tl_exdata: WREQ_ORDREL and no peer"));
4675*7c478bd9Sstevel@tonic-gate 			tl_discon_ind(tep, 0);
4676*7c478bd9Sstevel@tonic-gate 			return;
4677*7c478bd9Sstevel@tonic-gate 		}
4678*7c478bd9Sstevel@tonic-gate 		break;
4679*7c478bd9Sstevel@tonic-gate 
4680*7c478bd9Sstevel@tonic-gate 	default:
4681*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
4682*7c478bd9Sstevel@tonic-gate 			SL_TRACE|SL_ERROR,
4683*7c478bd9Sstevel@tonic-gate 			"tl_wput:T_EXDATA_REQ:out of state, state=%d",
4684*7c478bd9Sstevel@tonic-gate 			tep->te_state));
4685*7c478bd9Sstevel@tonic-gate 		tl_merror(wq, mp, EPROTO);
4686*7c478bd9Sstevel@tonic-gate 		return;
4687*7c478bd9Sstevel@tonic-gate 	}
4688*7c478bd9Sstevel@tonic-gate 	/*
4689*7c478bd9Sstevel@tonic-gate 	 * tep->te_state = NEXTSTATE(TE_EXDATA_REQ, tep->te_state);
4690*7c478bd9Sstevel@tonic-gate 	 * (state stays same on this event)
4691*7c478bd9Sstevel@tonic-gate 	 */
4692*7c478bd9Sstevel@tonic-gate 
4693*7c478bd9Sstevel@tonic-gate 	/*
4694*7c478bd9Sstevel@tonic-gate 	 * get connected endpoint
4695*7c478bd9Sstevel@tonic-gate 	 */
4696*7c478bd9Sstevel@tonic-gate 	if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4697*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4698*7c478bd9Sstevel@tonic-gate 		/* Peer closed */
4699*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4700*7c478bd9Sstevel@tonic-gate 			"tl_exdata: peer gone"));
4701*7c478bd9Sstevel@tonic-gate 		return;
4702*7c478bd9Sstevel@tonic-gate 	}
4703*7c478bd9Sstevel@tonic-gate 
4704*7c478bd9Sstevel@tonic-gate 	peer_rq = peer_tep->te_rq;
4705*7c478bd9Sstevel@tonic-gate 
4706*7c478bd9Sstevel@tonic-gate 	/*
4707*7c478bd9Sstevel@tonic-gate 	 * Put it back if flow controlled
4708*7c478bd9Sstevel@tonic-gate 	 * Note: Messages already on queue when we are closing is bounded
4709*7c478bd9Sstevel@tonic-gate 	 * so we can ignore flow control.
4710*7c478bd9Sstevel@tonic-gate 	 */
4711*7c478bd9Sstevel@tonic-gate 	if (!canputnext(peer_rq) && !closing) {
4712*7c478bd9Sstevel@tonic-gate 		TL_PUTBQ(tep, mp);
4713*7c478bd9Sstevel@tonic-gate 		return;
4714*7c478bd9Sstevel@tonic-gate 	}
4715*7c478bd9Sstevel@tonic-gate 
4716*7c478bd9Sstevel@tonic-gate 	/*
4717*7c478bd9Sstevel@tonic-gate 	 * validate state on peer
4718*7c478bd9Sstevel@tonic-gate 	 */
4719*7c478bd9Sstevel@tonic-gate 	switch (peer_tep->te_state) {
4720*7c478bd9Sstevel@tonic-gate 	case TS_DATA_XFER:
4721*7c478bd9Sstevel@tonic-gate 	case TS_WIND_ORDREL:
4722*7c478bd9Sstevel@tonic-gate 		/* valid states */
4723*7c478bd9Sstevel@tonic-gate 		break;
4724*7c478bd9Sstevel@tonic-gate 	default:
4725*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4726*7c478bd9Sstevel@tonic-gate 			"tl_exdata:rx side:invalid state"));
4727*7c478bd9Sstevel@tonic-gate 		tl_merror(peer_tep->te_wq, mp, EPROTO);
4728*7c478bd9Sstevel@tonic-gate 		return;
4729*7c478bd9Sstevel@tonic-gate 	}
4730*7c478bd9Sstevel@tonic-gate 	/*
4731*7c478bd9Sstevel@tonic-gate 	 * peer_tep->te_state = NEXTSTATE(TE_DATA_IND, peer_tep->te_state);
4732*7c478bd9Sstevel@tonic-gate 	 * (peer state stays same on this event)
4733*7c478bd9Sstevel@tonic-gate 	 */
4734*7c478bd9Sstevel@tonic-gate 	/*
4735*7c478bd9Sstevel@tonic-gate 	 * reuse message block
4736*7c478bd9Sstevel@tonic-gate 	 */
4737*7c478bd9Sstevel@tonic-gate 	prim->type = T_EXDATA_IND;
4738*7c478bd9Sstevel@tonic-gate 
4739*7c478bd9Sstevel@tonic-gate 	/*
4740*7c478bd9Sstevel@tonic-gate 	 * send data to connected peer
4741*7c478bd9Sstevel@tonic-gate 	 */
4742*7c478bd9Sstevel@tonic-gate 	putnext(peer_rq, mp);
4743*7c478bd9Sstevel@tonic-gate }
4744*7c478bd9Sstevel@tonic-gate 
4745*7c478bd9Sstevel@tonic-gate 
4746*7c478bd9Sstevel@tonic-gate 
4747*7c478bd9Sstevel@tonic-gate static void
4748*7c478bd9Sstevel@tonic-gate tl_ordrel(mblk_t *mp, tl_endpt_t *tep)
4749*7c478bd9Sstevel@tonic-gate {
4750*7c478bd9Sstevel@tonic-gate 	queue_t			*wq =  tep->te_wq;
4751*7c478bd9Sstevel@tonic-gate 	union T_primitives	*prim = (union T_primitives *)mp->b_rptr;
4752*7c478bd9Sstevel@tonic-gate 	ssize_t			msz = MBLKL(mp);
4753*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*peer_tep;
4754*7c478bd9Sstevel@tonic-gate 	queue_t			*peer_rq;
4755*7c478bd9Sstevel@tonic-gate 	boolean_t		closing = tep->te_closing;
4756*7c478bd9Sstevel@tonic-gate 
4757*7c478bd9Sstevel@tonic-gate 	if (msz < sizeof (struct T_ordrel_req)) {
4758*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4759*7c478bd9Sstevel@tonic-gate 			"tl_ordrel:invalid message"));
4760*7c478bd9Sstevel@tonic-gate 		if (!closing) {
4761*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, mp, EPROTO);
4762*7c478bd9Sstevel@tonic-gate 		} else {
4763*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
4764*7c478bd9Sstevel@tonic-gate 		}
4765*7c478bd9Sstevel@tonic-gate 		return;
4766*7c478bd9Sstevel@tonic-gate 	}
4767*7c478bd9Sstevel@tonic-gate 
4768*7c478bd9Sstevel@tonic-gate 	/*
4769*7c478bd9Sstevel@tonic-gate 	 * validate state
4770*7c478bd9Sstevel@tonic-gate 	 */
4771*7c478bd9Sstevel@tonic-gate 	switch (tep->te_state) {
4772*7c478bd9Sstevel@tonic-gate 	case TS_DATA_XFER:
4773*7c478bd9Sstevel@tonic-gate 	case TS_WREQ_ORDREL:
4774*7c478bd9Sstevel@tonic-gate 		/* valid states */
4775*7c478bd9Sstevel@tonic-gate 		if (tep->te_conp != NULL)
4776*7c478bd9Sstevel@tonic-gate 			break;
4777*7c478bd9Sstevel@tonic-gate 
4778*7c478bd9Sstevel@tonic-gate 		if (tep->te_oconp == NULL)
4779*7c478bd9Sstevel@tonic-gate 			break;
4780*7c478bd9Sstevel@tonic-gate 
4781*7c478bd9Sstevel@tonic-gate 		/*
4782*7c478bd9Sstevel@tonic-gate 		 * For a socket the T_CONN_CON is sent early thus
4783*7c478bd9Sstevel@tonic-gate 		 * the peer might not yet have accepted the connection.
4784*7c478bd9Sstevel@tonic-gate 		 * If we are closing queue the packet with the T_CONN_IND.
4785*7c478bd9Sstevel@tonic-gate 		 * Otherwise defer processing the packet until the peer
4786*7c478bd9Sstevel@tonic-gate 		 * accepts the connection.
4787*7c478bd9Sstevel@tonic-gate 		 * Note that the queue is noenabled when we go into this
4788*7c478bd9Sstevel@tonic-gate 		 * state.
4789*7c478bd9Sstevel@tonic-gate 		 */
4790*7c478bd9Sstevel@tonic-gate 		if (!closing) {
4791*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
4792*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
4793*7c478bd9Sstevel@tonic-gate 				"tl_ordlrel: ocon"));
4794*7c478bd9Sstevel@tonic-gate 			TL_PUTBQ(tep, mp);
4795*7c478bd9Sstevel@tonic-gate 			return;
4796*7c478bd9Sstevel@tonic-gate 		}
4797*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4798*7c478bd9Sstevel@tonic-gate 			"tl_ordlrel: closing socket ocon"));
4799*7c478bd9Sstevel@tonic-gate 		prim->type = T_ORDREL_IND;
4800*7c478bd9Sstevel@tonic-gate 		(void) tl_icon_queuemsg(tep->te_oconp, tep->te_seqno, mp);
4801*7c478bd9Sstevel@tonic-gate 		return;
4802*7c478bd9Sstevel@tonic-gate 
4803*7c478bd9Sstevel@tonic-gate 	default:
4804*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
4805*7c478bd9Sstevel@tonic-gate 			SL_TRACE|SL_ERROR,
4806*7c478bd9Sstevel@tonic-gate 			"tl_wput:T_ORDREL_REQ:out of state, state=%d",
4807*7c478bd9Sstevel@tonic-gate 			tep->te_state));
4808*7c478bd9Sstevel@tonic-gate 		if (!closing) {
4809*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, mp, EPROTO);
4810*7c478bd9Sstevel@tonic-gate 		} else {
4811*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
4812*7c478bd9Sstevel@tonic-gate 		}
4813*7c478bd9Sstevel@tonic-gate 		return;
4814*7c478bd9Sstevel@tonic-gate 	}
4815*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_ORDREL_REQ, tep->te_state);
4816*7c478bd9Sstevel@tonic-gate 
4817*7c478bd9Sstevel@tonic-gate 	/*
4818*7c478bd9Sstevel@tonic-gate 	 * get connected endpoint
4819*7c478bd9Sstevel@tonic-gate 	 */
4820*7c478bd9Sstevel@tonic-gate 	if (((peer_tep = tep->te_conp) == NULL) || peer_tep->te_closing) {
4821*7c478bd9Sstevel@tonic-gate 		/* Peer closed */
4822*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4823*7c478bd9Sstevel@tonic-gate 			"tl_ordrel: peer gone"));
4824*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4825*7c478bd9Sstevel@tonic-gate 		return;
4826*7c478bd9Sstevel@tonic-gate 	}
4827*7c478bd9Sstevel@tonic-gate 
4828*7c478bd9Sstevel@tonic-gate 	peer_rq = peer_tep->te_rq;
4829*7c478bd9Sstevel@tonic-gate 
4830*7c478bd9Sstevel@tonic-gate 	/*
4831*7c478bd9Sstevel@tonic-gate 	 * Put it back if flow controlled except when we are closing.
4832*7c478bd9Sstevel@tonic-gate 	 * Note: Messages already on queue when we are closing is bounded
4833*7c478bd9Sstevel@tonic-gate 	 * so we can ignore flow control.
4834*7c478bd9Sstevel@tonic-gate 	 */
4835*7c478bd9Sstevel@tonic-gate 	if (! canputnext(peer_rq) && !closing) {
4836*7c478bd9Sstevel@tonic-gate 		TL_PUTBQ(tep, mp);
4837*7c478bd9Sstevel@tonic-gate 		return;
4838*7c478bd9Sstevel@tonic-gate 	}
4839*7c478bd9Sstevel@tonic-gate 
4840*7c478bd9Sstevel@tonic-gate 	/*
4841*7c478bd9Sstevel@tonic-gate 	 * validate state on peer
4842*7c478bd9Sstevel@tonic-gate 	 */
4843*7c478bd9Sstevel@tonic-gate 	switch (peer_tep->te_state) {
4844*7c478bd9Sstevel@tonic-gate 	case TS_DATA_XFER:
4845*7c478bd9Sstevel@tonic-gate 	case TS_WIND_ORDREL:
4846*7c478bd9Sstevel@tonic-gate 		/* valid states */
4847*7c478bd9Sstevel@tonic-gate 		break;
4848*7c478bd9Sstevel@tonic-gate 	default:
4849*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
4850*7c478bd9Sstevel@tonic-gate 			"tl_ordrel:rx side:invalid state"));
4851*7c478bd9Sstevel@tonic-gate 		tl_merror(peer_tep->te_wq, mp, EPROTO);
4852*7c478bd9Sstevel@tonic-gate 		return;
4853*7c478bd9Sstevel@tonic-gate 	}
4854*7c478bd9Sstevel@tonic-gate 	peer_tep->te_state = NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state);
4855*7c478bd9Sstevel@tonic-gate 
4856*7c478bd9Sstevel@tonic-gate 	/*
4857*7c478bd9Sstevel@tonic-gate 	 * reuse message block
4858*7c478bd9Sstevel@tonic-gate 	 */
4859*7c478bd9Sstevel@tonic-gate 	prim->type = T_ORDREL_IND;
4860*7c478bd9Sstevel@tonic-gate 	(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
4861*7c478bd9Sstevel@tonic-gate 		"tl_ordrel: send ordrel_ind"));
4862*7c478bd9Sstevel@tonic-gate 
4863*7c478bd9Sstevel@tonic-gate 	/*
4864*7c478bd9Sstevel@tonic-gate 	 * send data to connected peer
4865*7c478bd9Sstevel@tonic-gate 	 */
4866*7c478bd9Sstevel@tonic-gate 	putnext(peer_rq, mp);
4867*7c478bd9Sstevel@tonic-gate }
4868*7c478bd9Sstevel@tonic-gate 
4869*7c478bd9Sstevel@tonic-gate 
4870*7c478bd9Sstevel@tonic-gate /*
4871*7c478bd9Sstevel@tonic-gate  * Send T_UDERROR_IND. The error should be from the <sys/errno.h> space.
4872*7c478bd9Sstevel@tonic-gate  */
4873*7c478bd9Sstevel@tonic-gate static void
4874*7c478bd9Sstevel@tonic-gate tl_uderr(queue_t *wq, mblk_t *mp, t_scalar_t err)
4875*7c478bd9Sstevel@tonic-gate {
4876*7c478bd9Sstevel@tonic-gate 	size_t			err_sz;
4877*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*tep;
4878*7c478bd9Sstevel@tonic-gate 	struct T_unitdata_req	*udreq;
4879*7c478bd9Sstevel@tonic-gate 	mblk_t			*err_mp;
4880*7c478bd9Sstevel@tonic-gate 	t_scalar_t		alen;
4881*7c478bd9Sstevel@tonic-gate 	t_scalar_t		olen;
4882*7c478bd9Sstevel@tonic-gate 	struct T_uderror_ind	*uderr;
4883*7c478bd9Sstevel@tonic-gate 	uchar_t			*addr_startp;
4884*7c478bd9Sstevel@tonic-gate 
4885*7c478bd9Sstevel@tonic-gate 	err_sz = sizeof (struct T_uderror_ind);
4886*7c478bd9Sstevel@tonic-gate 	tep = (tl_endpt_t *)wq->q_ptr;
4887*7c478bd9Sstevel@tonic-gate 	udreq = (struct T_unitdata_req *)mp->b_rptr;
4888*7c478bd9Sstevel@tonic-gate 	alen = udreq->DEST_length;
4889*7c478bd9Sstevel@tonic-gate 	olen = udreq->OPT_length;
4890*7c478bd9Sstevel@tonic-gate 
4891*7c478bd9Sstevel@tonic-gate 	if (alen > 0)
4892*7c478bd9Sstevel@tonic-gate 		err_sz = T_ALIGN(err_sz + alen);
4893*7c478bd9Sstevel@tonic-gate 	if (olen > 0)
4894*7c478bd9Sstevel@tonic-gate 		err_sz += olen;
4895*7c478bd9Sstevel@tonic-gate 
4896*7c478bd9Sstevel@tonic-gate 	err_mp = allocb(err_sz, BPRI_MED);
4897*7c478bd9Sstevel@tonic-gate 	if (! err_mp) {
4898*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
4899*7c478bd9Sstevel@tonic-gate 			"tl_uderr:allocb failure"));
4900*7c478bd9Sstevel@tonic-gate 		/*
4901*7c478bd9Sstevel@tonic-gate 		 * Note: no rollback of state needed as it does
4902*7c478bd9Sstevel@tonic-gate 		 * not change in connectionless transport
4903*7c478bd9Sstevel@tonic-gate 		 */
4904*7c478bd9Sstevel@tonic-gate 		tl_memrecover(wq, mp, err_sz);
4905*7c478bd9Sstevel@tonic-gate 		return;
4906*7c478bd9Sstevel@tonic-gate 	}
4907*7c478bd9Sstevel@tonic-gate 
4908*7c478bd9Sstevel@tonic-gate 	DB_TYPE(err_mp) = M_PROTO;
4909*7c478bd9Sstevel@tonic-gate 	err_mp->b_wptr = err_mp->b_rptr + err_sz;
4910*7c478bd9Sstevel@tonic-gate 	uderr = (struct T_uderror_ind *)err_mp->b_rptr;
4911*7c478bd9Sstevel@tonic-gate 	uderr->PRIM_type = T_UDERROR_IND;
4912*7c478bd9Sstevel@tonic-gate 	uderr->ERROR_type = err;
4913*7c478bd9Sstevel@tonic-gate 	uderr->DEST_length = alen;
4914*7c478bd9Sstevel@tonic-gate 	uderr->OPT_length = olen;
4915*7c478bd9Sstevel@tonic-gate 	if (alen <= 0) {
4916*7c478bd9Sstevel@tonic-gate 		uderr->DEST_offset = 0;
4917*7c478bd9Sstevel@tonic-gate 	} else {
4918*7c478bd9Sstevel@tonic-gate 		uderr->DEST_offset =
4919*7c478bd9Sstevel@tonic-gate 			(t_scalar_t)sizeof (struct T_uderror_ind);
4920*7c478bd9Sstevel@tonic-gate 		addr_startp  = mp->b_rptr + udreq->DEST_offset;
4921*7c478bd9Sstevel@tonic-gate 		bcopy(addr_startp, err_mp->b_rptr + uderr->DEST_offset,
4922*7c478bd9Sstevel@tonic-gate 			(size_t)alen);
4923*7c478bd9Sstevel@tonic-gate 	}
4924*7c478bd9Sstevel@tonic-gate 	if (olen <= 0) {
4925*7c478bd9Sstevel@tonic-gate 		uderr->OPT_offset = 0;
4926*7c478bd9Sstevel@tonic-gate 	} else {
4927*7c478bd9Sstevel@tonic-gate 		uderr->OPT_offset =
4928*7c478bd9Sstevel@tonic-gate 			(t_scalar_t)T_ALIGN(sizeof (struct T_uderror_ind) +
4929*7c478bd9Sstevel@tonic-gate 						uderr->DEST_length);
4930*7c478bd9Sstevel@tonic-gate 		addr_startp  = mp->b_rptr + udreq->OPT_offset;
4931*7c478bd9Sstevel@tonic-gate 		bcopy(addr_startp, err_mp->b_rptr+uderr->OPT_offset,
4932*7c478bd9Sstevel@tonic-gate 			(size_t)olen);
4933*7c478bd9Sstevel@tonic-gate 	}
4934*7c478bd9Sstevel@tonic-gate 	freemsg(mp);
4935*7c478bd9Sstevel@tonic-gate 
4936*7c478bd9Sstevel@tonic-gate 	/*
4937*7c478bd9Sstevel@tonic-gate 	 * send indication message
4938*7c478bd9Sstevel@tonic-gate 	 */
4939*7c478bd9Sstevel@tonic-gate 	tep->te_state = NEXTSTATE(TE_UDERROR_IND, tep->te_state);
4940*7c478bd9Sstevel@tonic-gate 
4941*7c478bd9Sstevel@tonic-gate 	qreply(wq, err_mp);
4942*7c478bd9Sstevel@tonic-gate }
4943*7c478bd9Sstevel@tonic-gate 
4944*7c478bd9Sstevel@tonic-gate static void
4945*7c478bd9Sstevel@tonic-gate tl_unitdata_ser(mblk_t *mp, tl_endpt_t *tep)
4946*7c478bd9Sstevel@tonic-gate {
4947*7c478bd9Sstevel@tonic-gate 	queue_t *wq = tep->te_wq;
4948*7c478bd9Sstevel@tonic-gate 
4949*7c478bd9Sstevel@tonic-gate 	if (!tep->te_closing && (wq->q_first != NULL)) {
4950*7c478bd9Sstevel@tonic-gate 		TL_PUTQ(tep, mp);
4951*7c478bd9Sstevel@tonic-gate 	} else if (tep->te_rq != NULL)
4952*7c478bd9Sstevel@tonic-gate 		tl_unitdata(mp, tep);
4953*7c478bd9Sstevel@tonic-gate 	else
4954*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
4955*7c478bd9Sstevel@tonic-gate 
4956*7c478bd9Sstevel@tonic-gate 	tl_serializer_exit(tep);
4957*7c478bd9Sstevel@tonic-gate 	tl_refrele(tep);
4958*7c478bd9Sstevel@tonic-gate }
4959*7c478bd9Sstevel@tonic-gate 
4960*7c478bd9Sstevel@tonic-gate /*
4961*7c478bd9Sstevel@tonic-gate  * Handle T_unitdata_req.
4962*7c478bd9Sstevel@tonic-gate  * If TL_SET[U]CRED or TL_SOCKUCRED generate the credentials options.
4963*7c478bd9Sstevel@tonic-gate  * If this is a socket pass through options unmodified.
4964*7c478bd9Sstevel@tonic-gate  */
4965*7c478bd9Sstevel@tonic-gate static void
4966*7c478bd9Sstevel@tonic-gate tl_unitdata(mblk_t *mp, tl_endpt_t *tep)
4967*7c478bd9Sstevel@tonic-gate {
4968*7c478bd9Sstevel@tonic-gate 	queue_t			*wq = tep->te_wq;
4969*7c478bd9Sstevel@tonic-gate 	soux_addr_t		ux_addr;
4970*7c478bd9Sstevel@tonic-gate 	tl_addr_t		destaddr;
4971*7c478bd9Sstevel@tonic-gate 	uchar_t			*addr_startp;
4972*7c478bd9Sstevel@tonic-gate 	tl_endpt_t		*peer_tep;
4973*7c478bd9Sstevel@tonic-gate 	struct T_unitdata_ind	*udind;
4974*7c478bd9Sstevel@tonic-gate 	struct T_unitdata_req	*udreq;
4975*7c478bd9Sstevel@tonic-gate 	ssize_t			msz, ui_sz;
4976*7c478bd9Sstevel@tonic-gate 	t_scalar_t		alen, aoff, olen, ooff;
4977*7c478bd9Sstevel@tonic-gate 	t_scalar_t		oldolen = 0;
4978*7c478bd9Sstevel@tonic-gate 
4979*7c478bd9Sstevel@tonic-gate 	udreq = (struct T_unitdata_req *)mp->b_rptr;
4980*7c478bd9Sstevel@tonic-gate 	msz = MBLKL(mp);
4981*7c478bd9Sstevel@tonic-gate 
4982*7c478bd9Sstevel@tonic-gate 	/*
4983*7c478bd9Sstevel@tonic-gate 	 * validate the state
4984*7c478bd9Sstevel@tonic-gate 	 */
4985*7c478bd9Sstevel@tonic-gate 	if (tep->te_state != TS_IDLE) {
4986*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1,
4987*7c478bd9Sstevel@tonic-gate 			SL_TRACE|SL_ERROR,
4988*7c478bd9Sstevel@tonic-gate 			"tl_wput:T_CONN_REQ:out of state"));
4989*7c478bd9Sstevel@tonic-gate 		tl_merror(wq, mp, EPROTO);
4990*7c478bd9Sstevel@tonic-gate 		return;
4991*7c478bd9Sstevel@tonic-gate 	}
4992*7c478bd9Sstevel@tonic-gate 	/*
4993*7c478bd9Sstevel@tonic-gate 	 * tep->te_state = NEXTSTATE(TE_UNITDATA_REQ, tep->te_state);
4994*7c478bd9Sstevel@tonic-gate 	 * (state does not change on this event)
4995*7c478bd9Sstevel@tonic-gate 	 */
4996*7c478bd9Sstevel@tonic-gate 
4997*7c478bd9Sstevel@tonic-gate 	/*
4998*7c478bd9Sstevel@tonic-gate 	 * validate the message
4999*7c478bd9Sstevel@tonic-gate 	 * Note: dereference fields in struct inside message only
5000*7c478bd9Sstevel@tonic-gate 	 * after validating the message length.
5001*7c478bd9Sstevel@tonic-gate 	 */
5002*7c478bd9Sstevel@tonic-gate 	if (msz < sizeof (struct T_unitdata_req)) {
5003*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
5004*7c478bd9Sstevel@tonic-gate 			"tl_unitdata:invalid message length"));
5005*7c478bd9Sstevel@tonic-gate 		tl_merror(wq, mp, EINVAL);
5006*7c478bd9Sstevel@tonic-gate 		return;
5007*7c478bd9Sstevel@tonic-gate 	}
5008*7c478bd9Sstevel@tonic-gate 	alen = udreq->DEST_length;
5009*7c478bd9Sstevel@tonic-gate 	aoff = udreq->DEST_offset;
5010*7c478bd9Sstevel@tonic-gate 	oldolen = olen = udreq->OPT_length;
5011*7c478bd9Sstevel@tonic-gate 	ooff = udreq->OPT_offset;
5012*7c478bd9Sstevel@tonic-gate 	if (olen == 0)
5013*7c478bd9Sstevel@tonic-gate 		ooff = 0;
5014*7c478bd9Sstevel@tonic-gate 
5015*7c478bd9Sstevel@tonic-gate 	if (IS_SOCKET(tep)) {
5016*7c478bd9Sstevel@tonic-gate 		if ((alen != TL_SOUX_ADDRLEN) ||
5017*7c478bd9Sstevel@tonic-gate 		    (aoff < 0) ||
5018*7c478bd9Sstevel@tonic-gate 		    (aoff + alen > msz) ||
5019*7c478bd9Sstevel@tonic-gate 		    (olen < 0) || (ooff < 0) ||
5020*7c478bd9Sstevel@tonic-gate 		    ((olen > 0) && ((ooff + olen) > msz))) {
5021*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
5022*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
5023*7c478bd9Sstevel@tonic-gate 				    "tl_unitdata_req: invalid socket addr "
5024*7c478bd9Sstevel@tonic-gate 				    "(msz=%d, al=%d, ao=%d, ol=%d, oo = %d)",
5025*7c478bd9Sstevel@tonic-gate 				    (int)msz, alen, aoff, olen, ooff));
5026*7c478bd9Sstevel@tonic-gate 			tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ);
5027*7c478bd9Sstevel@tonic-gate 			return;
5028*7c478bd9Sstevel@tonic-gate 		}
5029*7c478bd9Sstevel@tonic-gate 		bcopy(mp->b_rptr + aoff, &ux_addr, TL_SOUX_ADDRLEN);
5030*7c478bd9Sstevel@tonic-gate 
5031*7c478bd9Sstevel@tonic-gate 		if ((ux_addr.soua_magic != SOU_MAGIC_IMPLICIT) &&
5032*7c478bd9Sstevel@tonic-gate 		    (ux_addr.soua_magic != SOU_MAGIC_EXPLICIT)) {
5033*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor,
5034*7c478bd9Sstevel@tonic-gate 				    1, SL_TRACE|SL_ERROR,
5035*7c478bd9Sstevel@tonic-gate 				    "tl_conn_req: invalid socket magic"));
5036*7c478bd9Sstevel@tonic-gate 			tl_error_ack(wq, mp, TSYSERR, EINVAL, T_UNITDATA_REQ);
5037*7c478bd9Sstevel@tonic-gate 			return;
5038*7c478bd9Sstevel@tonic-gate 		}
5039*7c478bd9Sstevel@tonic-gate 	} else {
5040*7c478bd9Sstevel@tonic-gate 		if ((alen < 0) ||
5041*7c478bd9Sstevel@tonic-gate 		    (aoff < 0) ||
5042*7c478bd9Sstevel@tonic-gate 		    ((alen > 0) && ((aoff + alen) > msz)) ||
5043*7c478bd9Sstevel@tonic-gate 		    ((ssize_t)alen > (msz - sizeof (struct T_unitdata_req))) ||
5044*7c478bd9Sstevel@tonic-gate 		    ((aoff + alen) < 0) ||
5045*7c478bd9Sstevel@tonic-gate 		    ((olen > 0) && ((ooff + olen) > msz)) ||
5046*7c478bd9Sstevel@tonic-gate 		    (olen < 0) ||
5047*7c478bd9Sstevel@tonic-gate 		    (ooff < 0) ||
5048*7c478bd9Sstevel@tonic-gate 		    ((ssize_t)olen > (msz - sizeof (struct T_unitdata_req)))) {
5049*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
5050*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
5051*7c478bd9Sstevel@tonic-gate 				    "tl_unitdata:invalid unit data message"));
5052*7c478bd9Sstevel@tonic-gate 			tl_merror(wq, mp, EINVAL);
5053*7c478bd9Sstevel@tonic-gate 			return;
5054*7c478bd9Sstevel@tonic-gate 		}
5055*7c478bd9Sstevel@tonic-gate 	}
5056*7c478bd9Sstevel@tonic-gate 
5057*7c478bd9Sstevel@tonic-gate 	/* Options not supported unless it's a socket */
5058*7c478bd9Sstevel@tonic-gate 	if (alen == 0 || (olen != 0 && !IS_SOCKET(tep))) {
5059*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
5060*7c478bd9Sstevel@tonic-gate 		    "tl_unitdata:option use(unsupported) or zero len addr"));
5061*7c478bd9Sstevel@tonic-gate 		tl_uderr(wq, mp, EPROTO);
5062*7c478bd9Sstevel@tonic-gate 		return;
5063*7c478bd9Sstevel@tonic-gate 	}
5064*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
5065*7c478bd9Sstevel@tonic-gate 	/*
5066*7c478bd9Sstevel@tonic-gate 	 * Mild form of ASSERT()ion to detect broken TPI apps.
5067*7c478bd9Sstevel@tonic-gate 	 * if (! assertion)
5068*7c478bd9Sstevel@tonic-gate 	 *	log warning;
5069*7c478bd9Sstevel@tonic-gate 	 */
5070*7c478bd9Sstevel@tonic-gate 	if (! (aoff >= (t_scalar_t)sizeof (struct T_unitdata_req))) {
5071*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
5072*7c478bd9Sstevel@tonic-gate 			"tl_unitdata:addr overlaps TPI message"));
5073*7c478bd9Sstevel@tonic-gate 	}
5074*7c478bd9Sstevel@tonic-gate #endif
5075*7c478bd9Sstevel@tonic-gate 	/*
5076*7c478bd9Sstevel@tonic-gate 	 * get destination endpoint
5077*7c478bd9Sstevel@tonic-gate 	 */
5078*7c478bd9Sstevel@tonic-gate 	destaddr.ta_alen = alen;
5079*7c478bd9Sstevel@tonic-gate 	destaddr.ta_abuf = mp->b_rptr + aoff;
5080*7c478bd9Sstevel@tonic-gate 	destaddr.ta_zoneid = tep->te_zoneid;
5081*7c478bd9Sstevel@tonic-gate 
5082*7c478bd9Sstevel@tonic-gate 	/*
5083*7c478bd9Sstevel@tonic-gate 	 * Check whether the destination is the same that was used previously
5084*7c478bd9Sstevel@tonic-gate 	 * and the destination endpoint is in the right state. If something is
5085*7c478bd9Sstevel@tonic-gate 	 * wrong, find destination again and cache it.
5086*7c478bd9Sstevel@tonic-gate 	 */
5087*7c478bd9Sstevel@tonic-gate 	peer_tep = tep->te_lastep;
5088*7c478bd9Sstevel@tonic-gate 
5089*7c478bd9Sstevel@tonic-gate 	if ((peer_tep == NULL) || peer_tep->te_closing ||
5090*7c478bd9Sstevel@tonic-gate 	    (peer_tep->te_state != TS_IDLE) ||
5091*7c478bd9Sstevel@tonic-gate 	    !tl_eqaddr(&destaddr, &peer_tep->te_ap)) {
5092*7c478bd9Sstevel@tonic-gate 		/*
5093*7c478bd9Sstevel@tonic-gate 		 * Not the same as cached destination , need to find the right
5094*7c478bd9Sstevel@tonic-gate 		 * destination.
5095*7c478bd9Sstevel@tonic-gate 		 */
5096*7c478bd9Sstevel@tonic-gate 		peer_tep = (IS_SOCKET(tep) ?
5097*7c478bd9Sstevel@tonic-gate 		    tl_sock_find_peer(tep, &ux_addr) :
5098*7c478bd9Sstevel@tonic-gate 		    tl_find_peer(tep, &destaddr));
5099*7c478bd9Sstevel@tonic-gate 
5100*7c478bd9Sstevel@tonic-gate 		if (peer_tep == NULL) {
5101*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3,
5102*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
5103*7c478bd9Sstevel@tonic-gate 				"tl_unitdata:no one at destination address"));
5104*7c478bd9Sstevel@tonic-gate 			tl_uderr(wq, mp, ECONNRESET);
5105*7c478bd9Sstevel@tonic-gate 			return;
5106*7c478bd9Sstevel@tonic-gate 		}
5107*7c478bd9Sstevel@tonic-gate 
5108*7c478bd9Sstevel@tonic-gate 		/*
5109*7c478bd9Sstevel@tonic-gate 		 * Cache the new peer.
5110*7c478bd9Sstevel@tonic-gate 		 */
5111*7c478bd9Sstevel@tonic-gate 		if (tep->te_lastep != NULL)
5112*7c478bd9Sstevel@tonic-gate 			tl_refrele(tep->te_lastep);
5113*7c478bd9Sstevel@tonic-gate 
5114*7c478bd9Sstevel@tonic-gate 		tep->te_lastep = peer_tep;
5115*7c478bd9Sstevel@tonic-gate 	}
5116*7c478bd9Sstevel@tonic-gate 
5117*7c478bd9Sstevel@tonic-gate 	if (peer_tep->te_state != TS_IDLE) {
5118*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
5119*7c478bd9Sstevel@tonic-gate 			"tl_unitdata:provider in invalid state"));
5120*7c478bd9Sstevel@tonic-gate 		tl_uderr(wq, mp, EPROTO);
5121*7c478bd9Sstevel@tonic-gate 		return;
5122*7c478bd9Sstevel@tonic-gate 	}
5123*7c478bd9Sstevel@tonic-gate 
5124*7c478bd9Sstevel@tonic-gate 	ASSERT(peer_tep->te_rq != NULL);
5125*7c478bd9Sstevel@tonic-gate 
5126*7c478bd9Sstevel@tonic-gate 	/*
5127*7c478bd9Sstevel@tonic-gate 	 * Put it back if flow controlled except when we are closing.
5128*7c478bd9Sstevel@tonic-gate 	 * Note: Messages already on queue when we are closing is bounded
5129*7c478bd9Sstevel@tonic-gate 	 * so we can ignore flow control.
5130*7c478bd9Sstevel@tonic-gate 	 */
5131*7c478bd9Sstevel@tonic-gate 	if (!canputnext(peer_tep->te_rq) && !(tep->te_closing)) {
5132*7c478bd9Sstevel@tonic-gate 		/* record what we are flow controlled on */
5133*7c478bd9Sstevel@tonic-gate 		if (tep->te_flowq != NULL) {
5134*7c478bd9Sstevel@tonic-gate 			list_remove(&tep->te_flowq->te_flowlist, tep);
5135*7c478bd9Sstevel@tonic-gate 		}
5136*7c478bd9Sstevel@tonic-gate 		list_insert_head(&peer_tep->te_flowlist, tep);
5137*7c478bd9Sstevel@tonic-gate 		tep->te_flowq = peer_tep;
5138*7c478bd9Sstevel@tonic-gate 		TL_PUTBQ(tep, mp);
5139*7c478bd9Sstevel@tonic-gate 		return;
5140*7c478bd9Sstevel@tonic-gate 	}
5141*7c478bd9Sstevel@tonic-gate 	/*
5142*7c478bd9Sstevel@tonic-gate 	 * prepare indication message
5143*7c478bd9Sstevel@tonic-gate 	 */
5144*7c478bd9Sstevel@tonic-gate 
5145*7c478bd9Sstevel@tonic-gate 	/*
5146*7c478bd9Sstevel@tonic-gate 	 * calculate length of message
5147*7c478bd9Sstevel@tonic-gate 	 */
5148*7c478bd9Sstevel@tonic-gate 	if (peer_tep->te_flag & TL_SETCRED) {
5149*7c478bd9Sstevel@tonic-gate 		ASSERT(olen == 0);
5150*7c478bd9Sstevel@tonic-gate 		olen = (t_scalar_t)sizeof (struct opthdr) +
5151*7c478bd9Sstevel@tonic-gate 				OPTLEN(sizeof (tl_credopt_t));
5152*7c478bd9Sstevel@tonic-gate 					/* 1 option only */
5153*7c478bd9Sstevel@tonic-gate 	} else if (peer_tep->te_flag & TL_SETUCRED) {
5154*7c478bd9Sstevel@tonic-gate 		ASSERT(olen == 0);
5155*7c478bd9Sstevel@tonic-gate 		olen = (t_scalar_t)sizeof (struct opthdr) + OPTLEN(ucredsize);
5156*7c478bd9Sstevel@tonic-gate 					/* 1 option only */
5157*7c478bd9Sstevel@tonic-gate 	} else if (peer_tep->te_flag & TL_SOCKUCRED) {
5158*7c478bd9Sstevel@tonic-gate 		/* Possibly more than one option */
5159*7c478bd9Sstevel@tonic-gate 		olen += (t_scalar_t)sizeof (struct T_opthdr) +
5160*7c478bd9Sstevel@tonic-gate 		    OPTLEN(ucredsize);
5161*7c478bd9Sstevel@tonic-gate 	}
5162*7c478bd9Sstevel@tonic-gate 
5163*7c478bd9Sstevel@tonic-gate 	ui_sz = T_ALIGN(sizeof (struct T_unitdata_ind) + tep->te_alen) +
5164*7c478bd9Sstevel@tonic-gate 		olen;
5165*7c478bd9Sstevel@tonic-gate 	/*
5166*7c478bd9Sstevel@tonic-gate 	 * If the unitdata_ind fits and we are not adding options
5167*7c478bd9Sstevel@tonic-gate 	 * reuse the udreq mblk.
5168*7c478bd9Sstevel@tonic-gate 	 */
5169*7c478bd9Sstevel@tonic-gate 	if (msz >= ui_sz && alen >= tep->te_alen &&
5170*7c478bd9Sstevel@tonic-gate 	    !(peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED))) {
5171*7c478bd9Sstevel@tonic-gate 		/*
5172*7c478bd9Sstevel@tonic-gate 		 * Reuse the original mblk. Leave options in place.
5173*7c478bd9Sstevel@tonic-gate 		 */
5174*7c478bd9Sstevel@tonic-gate 		udind =  (struct T_unitdata_ind *)mp->b_rptr;
5175*7c478bd9Sstevel@tonic-gate 		udind->PRIM_type = T_UNITDATA_IND;
5176*7c478bd9Sstevel@tonic-gate 		udind->SRC_length = tep->te_alen;
5177*7c478bd9Sstevel@tonic-gate 		addr_startp = mp->b_rptr + udind->SRC_offset;
5178*7c478bd9Sstevel@tonic-gate 		bcopy(tep->te_abuf, addr_startp, tep->te_alen);
5179*7c478bd9Sstevel@tonic-gate 	} else {
5180*7c478bd9Sstevel@tonic-gate 		/* Allocate a new T_unidata_ind message */
5181*7c478bd9Sstevel@tonic-gate 		mblk_t *ui_mp;
5182*7c478bd9Sstevel@tonic-gate 
5183*7c478bd9Sstevel@tonic-gate 		ui_mp = allocb(ui_sz, BPRI_MED);
5184*7c478bd9Sstevel@tonic-gate 		if (! ui_mp) {
5185*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 4, SL_TRACE,
5186*7c478bd9Sstevel@tonic-gate 				"tl_unitdata:allocb failure:message queued"));
5187*7c478bd9Sstevel@tonic-gate 			tl_memrecover(wq, mp, ui_sz);
5188*7c478bd9Sstevel@tonic-gate 			return;
5189*7c478bd9Sstevel@tonic-gate 		}
5190*7c478bd9Sstevel@tonic-gate 
5191*7c478bd9Sstevel@tonic-gate 		/*
5192*7c478bd9Sstevel@tonic-gate 		 * fill in T_UNITDATA_IND contents
5193*7c478bd9Sstevel@tonic-gate 		 */
5194*7c478bd9Sstevel@tonic-gate 		DB_TYPE(ui_mp) = M_PROTO;
5195*7c478bd9Sstevel@tonic-gate 		ui_mp->b_wptr = ui_mp->b_rptr + ui_sz;
5196*7c478bd9Sstevel@tonic-gate 		udind =  (struct T_unitdata_ind *)ui_mp->b_rptr;
5197*7c478bd9Sstevel@tonic-gate 		udind->PRIM_type = T_UNITDATA_IND;
5198*7c478bd9Sstevel@tonic-gate 		udind->SRC_offset = (t_scalar_t)sizeof (struct T_unitdata_ind);
5199*7c478bd9Sstevel@tonic-gate 		udind->SRC_length = tep->te_alen;
5200*7c478bd9Sstevel@tonic-gate 		addr_startp = ui_mp->b_rptr + udind->SRC_offset;
5201*7c478bd9Sstevel@tonic-gate 		bcopy(tep->te_abuf, addr_startp, tep->te_alen);
5202*7c478bd9Sstevel@tonic-gate 		udind->OPT_offset =
5203*7c478bd9Sstevel@tonic-gate 		    (t_scalar_t)T_ALIGN(udind->SRC_offset + udind->SRC_length);
5204*7c478bd9Sstevel@tonic-gate 		udind->OPT_length = olen;
5205*7c478bd9Sstevel@tonic-gate 		if (peer_tep->te_flag & (TL_SETCRED|TL_SETUCRED|TL_SOCKUCRED)) {
5206*7c478bd9Sstevel@tonic-gate 			if (oldolen != 0) {
5207*7c478bd9Sstevel@tonic-gate 				bcopy((void *)((uintptr_t)udreq + ooff),
5208*7c478bd9Sstevel@tonic-gate 				    (void *)((uintptr_t)udind +
5209*7c478bd9Sstevel@tonic-gate 				    udind->OPT_offset),
5210*7c478bd9Sstevel@tonic-gate 				    oldolen);
5211*7c478bd9Sstevel@tonic-gate 			}
5212*7c478bd9Sstevel@tonic-gate 			tl_fill_option(ui_mp->b_rptr + udind->OPT_offset +
5213*7c478bd9Sstevel@tonic-gate 			    oldolen,
5214*7c478bd9Sstevel@tonic-gate 			    DB_CREDDEF(mp, tep->te_credp), TLPID(mp, tep),
5215*7c478bd9Sstevel@tonic-gate 			    peer_tep->te_flag);
5216*7c478bd9Sstevel@tonic-gate 		} else {
5217*7c478bd9Sstevel@tonic-gate 			bcopy((void *)((uintptr_t)udreq + ooff),
5218*7c478bd9Sstevel@tonic-gate 				(void *)((uintptr_t)udind + udind->OPT_offset),
5219*7c478bd9Sstevel@tonic-gate 				olen);
5220*7c478bd9Sstevel@tonic-gate 		}
5221*7c478bd9Sstevel@tonic-gate 
5222*7c478bd9Sstevel@tonic-gate 		/*
5223*7c478bd9Sstevel@tonic-gate 		 * relink data blocks from mp to ui_mp
5224*7c478bd9Sstevel@tonic-gate 		 */
5225*7c478bd9Sstevel@tonic-gate 		ui_mp->b_cont = mp->b_cont;
5226*7c478bd9Sstevel@tonic-gate 		freeb(mp);
5227*7c478bd9Sstevel@tonic-gate 		mp = ui_mp;
5228*7c478bd9Sstevel@tonic-gate 	}
5229*7c478bd9Sstevel@tonic-gate 	/*
5230*7c478bd9Sstevel@tonic-gate 	 * send indication message
5231*7c478bd9Sstevel@tonic-gate 	 */
5232*7c478bd9Sstevel@tonic-gate 	peer_tep->te_state = NEXTSTATE(TE_UNITDATA_IND, peer_tep->te_state);
5233*7c478bd9Sstevel@tonic-gate 	putnext(peer_tep->te_rq, mp);
5234*7c478bd9Sstevel@tonic-gate }
5235*7c478bd9Sstevel@tonic-gate 
5236*7c478bd9Sstevel@tonic-gate 
5237*7c478bd9Sstevel@tonic-gate 
5238*7c478bd9Sstevel@tonic-gate /*
5239*7c478bd9Sstevel@tonic-gate  * Check if a given addr is in use.
5240*7c478bd9Sstevel@tonic-gate  * Endpoint ptr returned or NULL if not found.
5241*7c478bd9Sstevel@tonic-gate  * The name space is separate for each mode. This implies that
5242*7c478bd9Sstevel@tonic-gate  * sockets get their own name space.
5243*7c478bd9Sstevel@tonic-gate  */
5244*7c478bd9Sstevel@tonic-gate static tl_endpt_t *
5245*7c478bd9Sstevel@tonic-gate tl_find_peer(tl_endpt_t *tep, tl_addr_t *ap)
5246*7c478bd9Sstevel@tonic-gate {
5247*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *peer_tep = NULL;
5248*7c478bd9Sstevel@tonic-gate 	int rc = mod_hash_find_cb(tep->te_addrhash, (mod_hash_key_t)ap,
5249*7c478bd9Sstevel@tonic-gate 	    (mod_hash_val_t *)&peer_tep, tl_find_callback);
5250*7c478bd9Sstevel@tonic-gate 
5251*7c478bd9Sstevel@tonic-gate 	ASSERT(! IS_SOCKET(tep));
5252*7c478bd9Sstevel@tonic-gate 
5253*7c478bd9Sstevel@tonic-gate 	ASSERT(ap != NULL && ap->ta_alen > 0);
5254*7c478bd9Sstevel@tonic-gate 	ASSERT(ap->ta_zoneid == tep->te_zoneid);
5255*7c478bd9Sstevel@tonic-gate 	ASSERT(ap->ta_abuf != NULL);
5256*7c478bd9Sstevel@tonic-gate 	ASSERT(EQUIV(rc == 0, peer_tep != NULL));
5257*7c478bd9Sstevel@tonic-gate 	ASSERT(IMPLY(rc == 0,
5258*7c478bd9Sstevel@tonic-gate 		(tep->te_zoneid == peer_tep->te_zoneid) &&
5259*7c478bd9Sstevel@tonic-gate 		(tep->te_transport == peer_tep->te_transport)));
5260*7c478bd9Sstevel@tonic-gate 
5261*7c478bd9Sstevel@tonic-gate 	if ((rc == 0) && (peer_tep->te_closing)) {
5262*7c478bd9Sstevel@tonic-gate 		tl_refrele(peer_tep);
5263*7c478bd9Sstevel@tonic-gate 		peer_tep = NULL;
5264*7c478bd9Sstevel@tonic-gate 	}
5265*7c478bd9Sstevel@tonic-gate 
5266*7c478bd9Sstevel@tonic-gate 	return (peer_tep);
5267*7c478bd9Sstevel@tonic-gate }
5268*7c478bd9Sstevel@tonic-gate 
5269*7c478bd9Sstevel@tonic-gate /*
5270*7c478bd9Sstevel@tonic-gate  * Find peer for a socket based on unix domain address.
5271*7c478bd9Sstevel@tonic-gate  * For implicit addresses our peer can be found by minor number in ai hash. For
5272*7c478bd9Sstevel@tonic-gate  * explici binds we look vnode address at addr_hash.
5273*7c478bd9Sstevel@tonic-gate  */
5274*7c478bd9Sstevel@tonic-gate static tl_endpt_t *
5275*7c478bd9Sstevel@tonic-gate tl_sock_find_peer(tl_endpt_t *tep, soux_addr_t *ux_addr)
5276*7c478bd9Sstevel@tonic-gate {
5277*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *peer_tep = NULL;
5278*7c478bd9Sstevel@tonic-gate 	mod_hash_t *hash = ux_addr->soua_magic == SOU_MAGIC_IMPLICIT ?
5279*7c478bd9Sstevel@tonic-gate 	    tep->te_aihash : tep->te_addrhash;
5280*7c478bd9Sstevel@tonic-gate 	int rc = mod_hash_find_cb(hash, (mod_hash_key_t)ux_addr->soua_vp,
5281*7c478bd9Sstevel@tonic-gate 	    (mod_hash_val_t *)&peer_tep, tl_find_callback);
5282*7c478bd9Sstevel@tonic-gate 
5283*7c478bd9Sstevel@tonic-gate 	ASSERT(IS_SOCKET(tep));
5284*7c478bd9Sstevel@tonic-gate 	ASSERT(EQUIV(rc == 0, peer_tep != NULL));
5285*7c478bd9Sstevel@tonic-gate 	ASSERT(IMPLY(rc == 0,
5286*7c478bd9Sstevel@tonic-gate 		(tep->te_zoneid == peer_tep->te_zoneid) &&
5287*7c478bd9Sstevel@tonic-gate 		(tep->te_transport == peer_tep->te_transport)));
5288*7c478bd9Sstevel@tonic-gate 	/*
5289*7c478bd9Sstevel@tonic-gate 	 * Don't attempt to use closing peer.
5290*7c478bd9Sstevel@tonic-gate 	 */
5291*7c478bd9Sstevel@tonic-gate 	if ((peer_tep != NULL) &&
5292*7c478bd9Sstevel@tonic-gate 	    (peer_tep->te_closing ||
5293*7c478bd9Sstevel@tonic-gate 		(peer_tep->te_zoneid != tep->te_zoneid))) {
5294*7c478bd9Sstevel@tonic-gate 		tl_refrele(peer_tep);
5295*7c478bd9Sstevel@tonic-gate 		peer_tep = NULL;
5296*7c478bd9Sstevel@tonic-gate 	}
5297*7c478bd9Sstevel@tonic-gate 
5298*7c478bd9Sstevel@tonic-gate 	return (peer_tep);
5299*7c478bd9Sstevel@tonic-gate }
5300*7c478bd9Sstevel@tonic-gate 
5301*7c478bd9Sstevel@tonic-gate /*
5302*7c478bd9Sstevel@tonic-gate  * Generate a free addr and return it in struct pointed by ap
5303*7c478bd9Sstevel@tonic-gate  * but allocating space for address buffer.
5304*7c478bd9Sstevel@tonic-gate  * The generated address will be at least 4 bytes long and, if req->ta_alen
5305*7c478bd9Sstevel@tonic-gate  * exceeds 4 bytes, be req->ta_alen bytes long.
5306*7c478bd9Sstevel@tonic-gate  *
5307*7c478bd9Sstevel@tonic-gate  * If address is found it will be inserted in the hash.
5308*7c478bd9Sstevel@tonic-gate  *
5309*7c478bd9Sstevel@tonic-gate  * If req->ta_alen is larger than the default alen (4 bytes) the last
5310*7c478bd9Sstevel@tonic-gate  * alen-4 bytes will always be the same as in req.
5311*7c478bd9Sstevel@tonic-gate  *
5312*7c478bd9Sstevel@tonic-gate  * Return 0 for failure.
5313*7c478bd9Sstevel@tonic-gate  * Return non-zero for success.
5314*7c478bd9Sstevel@tonic-gate  */
5315*7c478bd9Sstevel@tonic-gate static boolean_t
5316*7c478bd9Sstevel@tonic-gate tl_get_any_addr(tl_endpt_t *tep, tl_addr_t *req)
5317*7c478bd9Sstevel@tonic-gate {
5318*7c478bd9Sstevel@tonic-gate 	t_scalar_t	alen;
5319*7c478bd9Sstevel@tonic-gate 	uint32_t	loopcnt;	/* Limit loop to 2^32 */
5320*7c478bd9Sstevel@tonic-gate 
5321*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_hash_hndl != NULL);
5322*7c478bd9Sstevel@tonic-gate 	ASSERT(! IS_SOCKET(tep));
5323*7c478bd9Sstevel@tonic-gate 
5324*7c478bd9Sstevel@tonic-gate 	if (tep->te_hash_hndl == NULL)
5325*7c478bd9Sstevel@tonic-gate 		return (B_FALSE);
5326*7c478bd9Sstevel@tonic-gate 
5327*7c478bd9Sstevel@tonic-gate 	/*
5328*7c478bd9Sstevel@tonic-gate 	 * check if default addr is in use
5329*7c478bd9Sstevel@tonic-gate 	 * if it is - bump it and try again
5330*7c478bd9Sstevel@tonic-gate 	 */
5331*7c478bd9Sstevel@tonic-gate 	if (req == NULL) {
5332*7c478bd9Sstevel@tonic-gate 		alen = sizeof (uint32_t);
5333*7c478bd9Sstevel@tonic-gate 	} else {
5334*7c478bd9Sstevel@tonic-gate 		alen = max(req->ta_alen, sizeof (uint32_t));
5335*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_zoneid == req->ta_zoneid);
5336*7c478bd9Sstevel@tonic-gate 	}
5337*7c478bd9Sstevel@tonic-gate 
5338*7c478bd9Sstevel@tonic-gate 	if (tep->te_alen < alen) {
5339*7c478bd9Sstevel@tonic-gate 		void *abuf = kmem_zalloc((size_t)alen, KM_NOSLEEP);
5340*7c478bd9Sstevel@tonic-gate 
5341*7c478bd9Sstevel@tonic-gate 		/*
5342*7c478bd9Sstevel@tonic-gate 		 * Not enough space in tep->ta_ap to hold the address,
5343*7c478bd9Sstevel@tonic-gate 		 * allocate a bigger space.
5344*7c478bd9Sstevel@tonic-gate 		 */
5345*7c478bd9Sstevel@tonic-gate 		if (abuf == NULL)
5346*7c478bd9Sstevel@tonic-gate 			return (B_FALSE);
5347*7c478bd9Sstevel@tonic-gate 
5348*7c478bd9Sstevel@tonic-gate 		if (tep->te_alen > 0)
5349*7c478bd9Sstevel@tonic-gate 			kmem_free(tep->te_abuf, tep->te_alen);
5350*7c478bd9Sstevel@tonic-gate 
5351*7c478bd9Sstevel@tonic-gate 		tep->te_alen = alen;
5352*7c478bd9Sstevel@tonic-gate 		tep->te_abuf = abuf;
5353*7c478bd9Sstevel@tonic-gate 	}
5354*7c478bd9Sstevel@tonic-gate 
5355*7c478bd9Sstevel@tonic-gate 	/* Copy in the address in req */
5356*7c478bd9Sstevel@tonic-gate 	if (req != NULL) {
5357*7c478bd9Sstevel@tonic-gate 		ASSERT(alen >= req->ta_alen);
5358*7c478bd9Sstevel@tonic-gate 		bcopy(req->ta_abuf, tep->te_abuf, (size_t)req->ta_alen);
5359*7c478bd9Sstevel@tonic-gate 	}
5360*7c478bd9Sstevel@tonic-gate 
5361*7c478bd9Sstevel@tonic-gate 	/*
5362*7c478bd9Sstevel@tonic-gate 	 * First try minor number then try default addresses.
5363*7c478bd9Sstevel@tonic-gate 	 */
5364*7c478bd9Sstevel@tonic-gate 	bcopy(&tep->te_minor, tep->te_abuf, sizeof (uint32_t));
5365*7c478bd9Sstevel@tonic-gate 
5366*7c478bd9Sstevel@tonic-gate 	for (loopcnt = 0; loopcnt < UINT32_MAX; loopcnt++) {
5367*7c478bd9Sstevel@tonic-gate 		if (mod_hash_insert_reserve(tep->te_addrhash,
5368*7c478bd9Sstevel@tonic-gate 			(mod_hash_key_t)&tep->te_ap, (mod_hash_val_t)tep,
5369*7c478bd9Sstevel@tonic-gate 			tep->te_hash_hndl) == 0) {
5370*7c478bd9Sstevel@tonic-gate 			/*
5371*7c478bd9Sstevel@tonic-gate 			 * found free address
5372*7c478bd9Sstevel@tonic-gate 			 */
5373*7c478bd9Sstevel@tonic-gate 			tep->te_flag |= TL_ADDRHASHED;
5374*7c478bd9Sstevel@tonic-gate 			tep->te_hash_hndl = NULL;
5375*7c478bd9Sstevel@tonic-gate 
5376*7c478bd9Sstevel@tonic-gate 			return (B_TRUE); /* successful return */
5377*7c478bd9Sstevel@tonic-gate 		}
5378*7c478bd9Sstevel@tonic-gate 		/*
5379*7c478bd9Sstevel@tonic-gate 		 * Use default address.
5380*7c478bd9Sstevel@tonic-gate 		 */
5381*7c478bd9Sstevel@tonic-gate 		bcopy(&tep->te_defaddr, tep->te_abuf, sizeof (uint32_t));
5382*7c478bd9Sstevel@tonic-gate 		atomic_add_32(&tep->te_defaddr, 1);
5383*7c478bd9Sstevel@tonic-gate 	}
5384*7c478bd9Sstevel@tonic-gate 
5385*7c478bd9Sstevel@tonic-gate 	/*
5386*7c478bd9Sstevel@tonic-gate 	 * Failed to find anything.
5387*7c478bd9Sstevel@tonic-gate 	 */
5388*7c478bd9Sstevel@tonic-gate 	(void) (STRLOG(TL_ID, -1, 1, SL_ERROR,
5389*7c478bd9Sstevel@tonic-gate 		"tl_get_any_addr:looped 2^32 times"));
5390*7c478bd9Sstevel@tonic-gate 	return (B_FALSE);
5391*7c478bd9Sstevel@tonic-gate }
5392*7c478bd9Sstevel@tonic-gate 
5393*7c478bd9Sstevel@tonic-gate /*
5394*7c478bd9Sstevel@tonic-gate  * reallocb + set r/w ptrs to reflect size.
5395*7c478bd9Sstevel@tonic-gate  */
5396*7c478bd9Sstevel@tonic-gate static mblk_t *
5397*7c478bd9Sstevel@tonic-gate tl_resizemp(mblk_t *mp, ssize_t new_size)
5398*7c478bd9Sstevel@tonic-gate {
5399*7c478bd9Sstevel@tonic-gate 	if ((mp = reallocb(mp, new_size, 0)) == NULL)
5400*7c478bd9Sstevel@tonic-gate 		return (NULL);
5401*7c478bd9Sstevel@tonic-gate 
5402*7c478bd9Sstevel@tonic-gate 	mp->b_rptr = DB_BASE(mp);
5403*7c478bd9Sstevel@tonic-gate 	mp->b_wptr = mp->b_rptr + new_size;
5404*7c478bd9Sstevel@tonic-gate 	return (mp);
5405*7c478bd9Sstevel@tonic-gate }
5406*7c478bd9Sstevel@tonic-gate 
5407*7c478bd9Sstevel@tonic-gate static void
5408*7c478bd9Sstevel@tonic-gate tl_cl_backenable(tl_endpt_t *tep)
5409*7c478bd9Sstevel@tonic-gate {
5410*7c478bd9Sstevel@tonic-gate 	list_t *l = &tep->te_flowlist;
5411*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *elp;
5412*7c478bd9Sstevel@tonic-gate 
5413*7c478bd9Sstevel@tonic-gate 	ASSERT(IS_CLTS(tep));
5414*7c478bd9Sstevel@tonic-gate 
5415*7c478bd9Sstevel@tonic-gate 	for (elp = list_head(l); elp != NULL; elp = list_head(l)) {
5416*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_ser == elp->te_ser);
5417*7c478bd9Sstevel@tonic-gate 		ASSERT(elp->te_flowq == tep);
5418*7c478bd9Sstevel@tonic-gate 		if (! elp->te_closing)
5419*7c478bd9Sstevel@tonic-gate 			TL_QENABLE(elp);
5420*7c478bd9Sstevel@tonic-gate 		elp->te_flowq = NULL;
5421*7c478bd9Sstevel@tonic-gate 		list_remove(l, elp);
5422*7c478bd9Sstevel@tonic-gate 	}
5423*7c478bd9Sstevel@tonic-gate }
5424*7c478bd9Sstevel@tonic-gate 
5425*7c478bd9Sstevel@tonic-gate /*
5426*7c478bd9Sstevel@tonic-gate  * Unconnect endpoints.
5427*7c478bd9Sstevel@tonic-gate  */
5428*7c478bd9Sstevel@tonic-gate static void
5429*7c478bd9Sstevel@tonic-gate tl_co_unconnect(tl_endpt_t *tep)
5430*7c478bd9Sstevel@tonic-gate {
5431*7c478bd9Sstevel@tonic-gate 	tl_endpt_t	*peer_tep = tep->te_conp;
5432*7c478bd9Sstevel@tonic-gate 	tl_endpt_t	*srv_tep = tep->te_oconp;
5433*7c478bd9Sstevel@tonic-gate 	list_t		*l;
5434*7c478bd9Sstevel@tonic-gate 	tl_icon_t  	*tip;
5435*7c478bd9Sstevel@tonic-gate 	tl_endpt_t	*cl_tep;
5436*7c478bd9Sstevel@tonic-gate 	mblk_t		*d_mp;
5437*7c478bd9Sstevel@tonic-gate 
5438*7c478bd9Sstevel@tonic-gate 	ASSERT(IS_COTS(tep));
5439*7c478bd9Sstevel@tonic-gate 	/*
5440*7c478bd9Sstevel@tonic-gate 	 * If our peer is closing, don't use it.
5441*7c478bd9Sstevel@tonic-gate 	 */
5442*7c478bd9Sstevel@tonic-gate 	if ((peer_tep != NULL) && peer_tep->te_closing) {
5443*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_conp);
5444*7c478bd9Sstevel@tonic-gate 		peer_tep = NULL;
5445*7c478bd9Sstevel@tonic-gate 	}
5446*7c478bd9Sstevel@tonic-gate 	if ((srv_tep != NULL) && srv_tep->te_closing) {
5447*7c478bd9Sstevel@tonic-gate 		TL_UNCONNECT(tep->te_oconp);
5448*7c478bd9Sstevel@tonic-gate 		srv_tep = NULL;
5449*7c478bd9Sstevel@tonic-gate 	}
5450*7c478bd9Sstevel@tonic-gate 
5451*7c478bd9Sstevel@tonic-gate 	if (tep->te_nicon > 0) {
5452*7c478bd9Sstevel@tonic-gate 		l = &tep->te_iconp;
5453*7c478bd9Sstevel@tonic-gate 		/*
5454*7c478bd9Sstevel@tonic-gate 		 * If incoming requests pending, change state
5455*7c478bd9Sstevel@tonic-gate 		 * of clients on disconnect ind event and send
5456*7c478bd9Sstevel@tonic-gate 		 * discon_ind pdu to modules above them
5457*7c478bd9Sstevel@tonic-gate 		 * for server: all clients get disconnect
5458*7c478bd9Sstevel@tonic-gate 		 */
5459*7c478bd9Sstevel@tonic-gate 
5460*7c478bd9Sstevel@tonic-gate 		while (tep->te_nicon > 0) {
5461*7c478bd9Sstevel@tonic-gate 			tip    = list_head(l);
5462*7c478bd9Sstevel@tonic-gate 			cl_tep = tip->ti_tep;
5463*7c478bd9Sstevel@tonic-gate 
5464*7c478bd9Sstevel@tonic-gate 			if (cl_tep == NULL) {
5465*7c478bd9Sstevel@tonic-gate 				tl_freetip(tep, tip);
5466*7c478bd9Sstevel@tonic-gate 				continue;
5467*7c478bd9Sstevel@tonic-gate 			}
5468*7c478bd9Sstevel@tonic-gate 
5469*7c478bd9Sstevel@tonic-gate 			if (cl_tep->te_oconp != NULL) {
5470*7c478bd9Sstevel@tonic-gate 				ASSERT(cl_tep != cl_tep->te_oconp);
5471*7c478bd9Sstevel@tonic-gate 				TL_UNCONNECT(cl_tep->te_oconp);
5472*7c478bd9Sstevel@tonic-gate 			}
5473*7c478bd9Sstevel@tonic-gate 
5474*7c478bd9Sstevel@tonic-gate 			if (cl_tep->te_closing) {
5475*7c478bd9Sstevel@tonic-gate 				tl_freetip(tep, tip);
5476*7c478bd9Sstevel@tonic-gate 				continue;
5477*7c478bd9Sstevel@tonic-gate 			}
5478*7c478bd9Sstevel@tonic-gate 
5479*7c478bd9Sstevel@tonic-gate 			enableok(cl_tep->te_wq);
5480*7c478bd9Sstevel@tonic-gate 			TL_QENABLE(cl_tep);
5481*7c478bd9Sstevel@tonic-gate 			d_mp = tl_discon_ind_alloc(ECONNREFUSED, BADSEQNUM);
5482*7c478bd9Sstevel@tonic-gate 			if (d_mp != NULL) {
5483*7c478bd9Sstevel@tonic-gate 				cl_tep->te_state = TS_IDLE;
5484*7c478bd9Sstevel@tonic-gate 				putnext(cl_tep->te_rq, d_mp);
5485*7c478bd9Sstevel@tonic-gate 			} else {
5486*7c478bd9Sstevel@tonic-gate 				(void) (STRLOG(TL_ID, tep->te_minor, 3,
5487*7c478bd9Sstevel@tonic-gate 					    SL_TRACE|SL_ERROR,
5488*7c478bd9Sstevel@tonic-gate 					    "tl_co_unconnect:icmng: "
5489*7c478bd9Sstevel@tonic-gate 					    "allocb failure"));
5490*7c478bd9Sstevel@tonic-gate 			}
5491*7c478bd9Sstevel@tonic-gate 			tl_freetip(tep, tip);
5492*7c478bd9Sstevel@tonic-gate 		}
5493*7c478bd9Sstevel@tonic-gate 	} else if (srv_tep != NULL) {
5494*7c478bd9Sstevel@tonic-gate 		/*
5495*7c478bd9Sstevel@tonic-gate 		 * If outgoing request pending, change state
5496*7c478bd9Sstevel@tonic-gate 		 * of server on discon ind event
5497*7c478bd9Sstevel@tonic-gate 		 */
5498*7c478bd9Sstevel@tonic-gate 
5499*7c478bd9Sstevel@tonic-gate 		if (IS_SOCKET(tep) && !tl_disable_early_connect &&
5500*7c478bd9Sstevel@tonic-gate 		    IS_COTSORD(srv_tep) &&
5501*7c478bd9Sstevel@tonic-gate 		    !tl_icon_hasprim(srv_tep, tep->te_seqno, T_ORDREL_IND)) {
5502*7c478bd9Sstevel@tonic-gate 			/*
5503*7c478bd9Sstevel@tonic-gate 			 * Queue ordrel_ind for server to be picked up
5504*7c478bd9Sstevel@tonic-gate 			 * when the connection is accepted.
5505*7c478bd9Sstevel@tonic-gate 			 */
5506*7c478bd9Sstevel@tonic-gate 			d_mp = tl_ordrel_ind_alloc();
5507*7c478bd9Sstevel@tonic-gate 		} else {
5508*7c478bd9Sstevel@tonic-gate 			/*
5509*7c478bd9Sstevel@tonic-gate 			 * send discon_ind to server
5510*7c478bd9Sstevel@tonic-gate 			 */
5511*7c478bd9Sstevel@tonic-gate 			d_mp = tl_discon_ind_alloc(ECONNRESET, tep->te_seqno);
5512*7c478bd9Sstevel@tonic-gate 		}
5513*7c478bd9Sstevel@tonic-gate 		if (d_mp == NULL) {
5514*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3,
5515*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
5516*7c478bd9Sstevel@tonic-gate 				"tl_co_unconnect:outgoing:allocb failure"));
5517*7c478bd9Sstevel@tonic-gate 			TL_UNCONNECT(tep->te_oconp);
5518*7c478bd9Sstevel@tonic-gate 			goto discon_peer;
5519*7c478bd9Sstevel@tonic-gate 		}
5520*7c478bd9Sstevel@tonic-gate 
5521*7c478bd9Sstevel@tonic-gate 		/*
5522*7c478bd9Sstevel@tonic-gate 		 * If this is a socket the T_DISCON_IND is queued with
5523*7c478bd9Sstevel@tonic-gate 		 * the T_CONN_IND. Otherwise the T_CONN_IND is removed
5524*7c478bd9Sstevel@tonic-gate 		 * from the list of pending connections.
5525*7c478bd9Sstevel@tonic-gate 		 * Note that when te_oconp is set the peer better have
5526*7c478bd9Sstevel@tonic-gate 		 * a t_connind_t for the client.
5527*7c478bd9Sstevel@tonic-gate 		 */
5528*7c478bd9Sstevel@tonic-gate 		if (IS_SOCKET(tep) && !tl_disable_early_connect) {
5529*7c478bd9Sstevel@tonic-gate 			/*
5530*7c478bd9Sstevel@tonic-gate 			 * Queue the disconnection message.
5531*7c478bd9Sstevel@tonic-gate 			 */
5532*7c478bd9Sstevel@tonic-gate 			tl_icon_queuemsg(srv_tep, tep->te_seqno, d_mp);
5533*7c478bd9Sstevel@tonic-gate 		} else {
5534*7c478bd9Sstevel@tonic-gate 			tip = tl_icon_find(srv_tep, tep->te_seqno);
5535*7c478bd9Sstevel@tonic-gate 			if (tip == NULL) {
5536*7c478bd9Sstevel@tonic-gate 				freemsg(d_mp);
5537*7c478bd9Sstevel@tonic-gate 			} else {
5538*7c478bd9Sstevel@tonic-gate 				ASSERT(tep == tip->ti_tep);
5539*7c478bd9Sstevel@tonic-gate 				ASSERT(tep->te_ser == srv_tep->te_ser);
5540*7c478bd9Sstevel@tonic-gate 				/*
5541*7c478bd9Sstevel@tonic-gate 				 * Delete tip from the server list.
5542*7c478bd9Sstevel@tonic-gate 				 */
5543*7c478bd9Sstevel@tonic-gate 				if (srv_tep->te_nicon == 1) {
5544*7c478bd9Sstevel@tonic-gate 					srv_tep->te_state =
5545*7c478bd9Sstevel@tonic-gate 					    NEXTSTATE(TE_DISCON_IND2,
5546*7c478bd9Sstevel@tonic-gate 						srv_tep->te_state);
5547*7c478bd9Sstevel@tonic-gate 				} else {
5548*7c478bd9Sstevel@tonic-gate 					srv_tep->te_state =
5549*7c478bd9Sstevel@tonic-gate 					    NEXTSTATE(TE_DISCON_IND3,
5550*7c478bd9Sstevel@tonic-gate 						srv_tep->te_state);
5551*7c478bd9Sstevel@tonic-gate 				}
5552*7c478bd9Sstevel@tonic-gate 				ASSERT(*(uint32_t *)(d_mp->b_rptr) ==
5553*7c478bd9Sstevel@tonic-gate 				    T_DISCON_IND);
5554*7c478bd9Sstevel@tonic-gate 				putnext(srv_tep->te_rq, d_mp);
5555*7c478bd9Sstevel@tonic-gate 				tl_freetip(srv_tep, tip);
5556*7c478bd9Sstevel@tonic-gate 			}
5557*7c478bd9Sstevel@tonic-gate 			TL_UNCONNECT(tep->te_oconp);
5558*7c478bd9Sstevel@tonic-gate 			srv_tep = NULL;
5559*7c478bd9Sstevel@tonic-gate 		}
5560*7c478bd9Sstevel@tonic-gate 	} else if (peer_tep != NULL) {
5561*7c478bd9Sstevel@tonic-gate 		/*
5562*7c478bd9Sstevel@tonic-gate 		 * unconnect existing connection
5563*7c478bd9Sstevel@tonic-gate 		 * If connected, change state of peer on
5564*7c478bd9Sstevel@tonic-gate 		 * discon ind event and send discon ind pdu
5565*7c478bd9Sstevel@tonic-gate 		 * to module above it
5566*7c478bd9Sstevel@tonic-gate 		 */
5567*7c478bd9Sstevel@tonic-gate 
5568*7c478bd9Sstevel@tonic-gate 		ASSERT(tep->te_ser == peer_tep->te_ser);
5569*7c478bd9Sstevel@tonic-gate 		if (IS_COTSORD(peer_tep) &&
5570*7c478bd9Sstevel@tonic-gate 		    (peer_tep->te_state == TS_WIND_ORDREL ||
5571*7c478bd9Sstevel@tonic-gate 		    peer_tep->te_state == TS_DATA_XFER)) {
5572*7c478bd9Sstevel@tonic-gate 			/*
5573*7c478bd9Sstevel@tonic-gate 			 * send ordrel ind
5574*7c478bd9Sstevel@tonic-gate 			 */
5575*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE,
5576*7c478bd9Sstevel@tonic-gate 			"tl_co_unconnect:connected: ordrel_ind state %d->%d",
5577*7c478bd9Sstevel@tonic-gate 				peer_tep->te_state,
5578*7c478bd9Sstevel@tonic-gate 				NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state)));
5579*7c478bd9Sstevel@tonic-gate 			d_mp = tl_ordrel_ind_alloc();
5580*7c478bd9Sstevel@tonic-gate 			if (! d_mp) {
5581*7c478bd9Sstevel@tonic-gate 				(void) (STRLOG(TL_ID, tep->te_minor, 3,
5582*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
5583*7c478bd9Sstevel@tonic-gate 				    "tl_co_unconnect:connected:"
5584*7c478bd9Sstevel@tonic-gate 				    "allocb failure"));
5585*7c478bd9Sstevel@tonic-gate 				/*
5586*7c478bd9Sstevel@tonic-gate 				 * Continue with cleaning up peer as
5587*7c478bd9Sstevel@tonic-gate 				 * this side may go away with the close
5588*7c478bd9Sstevel@tonic-gate 				 */
5589*7c478bd9Sstevel@tonic-gate 				TL_QENABLE(peer_tep);
5590*7c478bd9Sstevel@tonic-gate 				goto discon_peer;
5591*7c478bd9Sstevel@tonic-gate 			}
5592*7c478bd9Sstevel@tonic-gate 			peer_tep->te_state =
5593*7c478bd9Sstevel@tonic-gate 				NEXTSTATE(TE_ORDREL_IND, peer_tep->te_state);
5594*7c478bd9Sstevel@tonic-gate 
5595*7c478bd9Sstevel@tonic-gate 			putnext(peer_tep->te_rq, d_mp);
5596*7c478bd9Sstevel@tonic-gate 			/*
5597*7c478bd9Sstevel@tonic-gate 			 * Handle flow control case.  This will generate
5598*7c478bd9Sstevel@tonic-gate 			 * a t_discon_ind message with reason 0 if there
5599*7c478bd9Sstevel@tonic-gate 			 * is data queued on the write side.
5600*7c478bd9Sstevel@tonic-gate 			 */
5601*7c478bd9Sstevel@tonic-gate 			TL_QENABLE(peer_tep);
5602*7c478bd9Sstevel@tonic-gate 		} else if (IS_COTSORD(peer_tep) &&
5603*7c478bd9Sstevel@tonic-gate 		    peer_tep->te_state == TS_WREQ_ORDREL) {
5604*7c478bd9Sstevel@tonic-gate 			/*
5605*7c478bd9Sstevel@tonic-gate 			 * Sent an ordrel_ind. We send a discon with
5606*7c478bd9Sstevel@tonic-gate 			 * with error 0 to inform that the peer is gone.
5607*7c478bd9Sstevel@tonic-gate 			 */
5608*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3,
5609*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
5610*7c478bd9Sstevel@tonic-gate 				"tl_co_unconnect: discon in state %d",
5611*7c478bd9Sstevel@tonic-gate 				tep->te_state));
5612*7c478bd9Sstevel@tonic-gate 			tl_discon_ind(peer_tep, 0);
5613*7c478bd9Sstevel@tonic-gate 		} else {
5614*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 3,
5615*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
5616*7c478bd9Sstevel@tonic-gate 				"tl_co_unconnect: state %d", tep->te_state));
5617*7c478bd9Sstevel@tonic-gate 			tl_discon_ind(peer_tep, ECONNRESET);
5618*7c478bd9Sstevel@tonic-gate 		}
5619*7c478bd9Sstevel@tonic-gate 
5620*7c478bd9Sstevel@tonic-gate discon_peer:
5621*7c478bd9Sstevel@tonic-gate 		/*
5622*7c478bd9Sstevel@tonic-gate 		 * Disconnect cross-pointers only for close
5623*7c478bd9Sstevel@tonic-gate 		 */
5624*7c478bd9Sstevel@tonic-gate 		if (tep->te_closing) {
5625*7c478bd9Sstevel@tonic-gate 			peer_tep = tep->te_conp;
5626*7c478bd9Sstevel@tonic-gate 			TL_REMOVE_PEER(peer_tep->te_conp);
5627*7c478bd9Sstevel@tonic-gate 			TL_REMOVE_PEER(tep->te_conp);
5628*7c478bd9Sstevel@tonic-gate 		}
5629*7c478bd9Sstevel@tonic-gate 	}
5630*7c478bd9Sstevel@tonic-gate }
5631*7c478bd9Sstevel@tonic-gate 
5632*7c478bd9Sstevel@tonic-gate /*
5633*7c478bd9Sstevel@tonic-gate  * Note: The following routine does not recover from allocb()
5634*7c478bd9Sstevel@tonic-gate  * failures
5635*7c478bd9Sstevel@tonic-gate  * The reason should be from the <sys/errno.h> space.
5636*7c478bd9Sstevel@tonic-gate  */
5637*7c478bd9Sstevel@tonic-gate static void
5638*7c478bd9Sstevel@tonic-gate tl_discon_ind(tl_endpt_t *tep, uint32_t reason)
5639*7c478bd9Sstevel@tonic-gate {
5640*7c478bd9Sstevel@tonic-gate 	mblk_t *d_mp;
5641*7c478bd9Sstevel@tonic-gate 
5642*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing)
5643*7c478bd9Sstevel@tonic-gate 		return;
5644*7c478bd9Sstevel@tonic-gate 
5645*7c478bd9Sstevel@tonic-gate 	/*
5646*7c478bd9Sstevel@tonic-gate 	 * flush the queues.
5647*7c478bd9Sstevel@tonic-gate 	 */
5648*7c478bd9Sstevel@tonic-gate 	flushq(tep->te_rq, FLUSHDATA);
5649*7c478bd9Sstevel@tonic-gate 	(void) putnextctl1(tep->te_rq, M_FLUSH, FLUSHRW);
5650*7c478bd9Sstevel@tonic-gate 
5651*7c478bd9Sstevel@tonic-gate 	/*
5652*7c478bd9Sstevel@tonic-gate 	 * send discon ind
5653*7c478bd9Sstevel@tonic-gate 	 */
5654*7c478bd9Sstevel@tonic-gate 	d_mp = tl_discon_ind_alloc(reason, tep->te_seqno);
5655*7c478bd9Sstevel@tonic-gate 	if (! d_mp) {
5656*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 3, SL_TRACE|SL_ERROR,
5657*7c478bd9Sstevel@tonic-gate 			"tl_discon_ind:allocb failure"));
5658*7c478bd9Sstevel@tonic-gate 		return;
5659*7c478bd9Sstevel@tonic-gate 	}
5660*7c478bd9Sstevel@tonic-gate 	tep->te_state = TS_IDLE;
5661*7c478bd9Sstevel@tonic-gate 	putnext(tep->te_rq, d_mp);
5662*7c478bd9Sstevel@tonic-gate }
5663*7c478bd9Sstevel@tonic-gate 
5664*7c478bd9Sstevel@tonic-gate /*
5665*7c478bd9Sstevel@tonic-gate  * Note: The following routine does not recover from allocb()
5666*7c478bd9Sstevel@tonic-gate  * failures
5667*7c478bd9Sstevel@tonic-gate  * The reason should be from the <sys/errno.h> space.
5668*7c478bd9Sstevel@tonic-gate  */
5669*7c478bd9Sstevel@tonic-gate static mblk_t *
5670*7c478bd9Sstevel@tonic-gate tl_discon_ind_alloc(uint32_t reason, t_scalar_t seqnum)
5671*7c478bd9Sstevel@tonic-gate {
5672*7c478bd9Sstevel@tonic-gate 	mblk_t *mp;
5673*7c478bd9Sstevel@tonic-gate 	struct T_discon_ind *tdi;
5674*7c478bd9Sstevel@tonic-gate 
5675*7c478bd9Sstevel@tonic-gate 	if (mp = allocb(sizeof (struct T_discon_ind), BPRI_MED)) {
5676*7c478bd9Sstevel@tonic-gate 		DB_TYPE(mp) = M_PROTO;
5677*7c478bd9Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + sizeof (struct T_discon_ind);
5678*7c478bd9Sstevel@tonic-gate 		tdi = (struct T_discon_ind *)mp->b_rptr;
5679*7c478bd9Sstevel@tonic-gate 		tdi->PRIM_type = T_DISCON_IND;
5680*7c478bd9Sstevel@tonic-gate 		tdi->DISCON_reason = reason;
5681*7c478bd9Sstevel@tonic-gate 		tdi->SEQ_number = seqnum;
5682*7c478bd9Sstevel@tonic-gate 	}
5683*7c478bd9Sstevel@tonic-gate 	return (mp);
5684*7c478bd9Sstevel@tonic-gate }
5685*7c478bd9Sstevel@tonic-gate 
5686*7c478bd9Sstevel@tonic-gate 
5687*7c478bd9Sstevel@tonic-gate /*
5688*7c478bd9Sstevel@tonic-gate  * Note: The following routine does not recover from allocb()
5689*7c478bd9Sstevel@tonic-gate  * failures
5690*7c478bd9Sstevel@tonic-gate  */
5691*7c478bd9Sstevel@tonic-gate static mblk_t *
5692*7c478bd9Sstevel@tonic-gate tl_ordrel_ind_alloc(void)
5693*7c478bd9Sstevel@tonic-gate {
5694*7c478bd9Sstevel@tonic-gate 	mblk_t *mp;
5695*7c478bd9Sstevel@tonic-gate 	struct T_ordrel_ind *toi;
5696*7c478bd9Sstevel@tonic-gate 
5697*7c478bd9Sstevel@tonic-gate 	if (mp = allocb(sizeof (struct T_ordrel_ind), BPRI_MED)) {
5698*7c478bd9Sstevel@tonic-gate 		DB_TYPE(mp) = M_PROTO;
5699*7c478bd9Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + sizeof (struct T_ordrel_ind);
5700*7c478bd9Sstevel@tonic-gate 		toi = (struct T_ordrel_ind *)mp->b_rptr;
5701*7c478bd9Sstevel@tonic-gate 		toi->PRIM_type = T_ORDREL_IND;
5702*7c478bd9Sstevel@tonic-gate 	}
5703*7c478bd9Sstevel@tonic-gate 	return (mp);
5704*7c478bd9Sstevel@tonic-gate }
5705*7c478bd9Sstevel@tonic-gate 
5706*7c478bd9Sstevel@tonic-gate 
5707*7c478bd9Sstevel@tonic-gate /*
5708*7c478bd9Sstevel@tonic-gate  * Lookup the seqno in the list of queued connections.
5709*7c478bd9Sstevel@tonic-gate  */
5710*7c478bd9Sstevel@tonic-gate static tl_icon_t *
5711*7c478bd9Sstevel@tonic-gate tl_icon_find(tl_endpt_t *tep, t_scalar_t seqno)
5712*7c478bd9Sstevel@tonic-gate {
5713*7c478bd9Sstevel@tonic-gate 	list_t *l = &tep->te_iconp;
5714*7c478bd9Sstevel@tonic-gate 	tl_icon_t *tip = list_head(l);
5715*7c478bd9Sstevel@tonic-gate 
5716*7c478bd9Sstevel@tonic-gate 	ASSERT(seqno != 0);
5717*7c478bd9Sstevel@tonic-gate 
5718*7c478bd9Sstevel@tonic-gate 	for (; tip != NULL && (tip->ti_seqno != seqno); tip = list_next(l, tip))
5719*7c478bd9Sstevel@tonic-gate 		;
5720*7c478bd9Sstevel@tonic-gate 
5721*7c478bd9Sstevel@tonic-gate 	return (tip);
5722*7c478bd9Sstevel@tonic-gate }
5723*7c478bd9Sstevel@tonic-gate 
5724*7c478bd9Sstevel@tonic-gate /*
5725*7c478bd9Sstevel@tonic-gate  * Queue data for a given T_CONN_IND while verifying that redundant
5726*7c478bd9Sstevel@tonic-gate  * messages, such as a T_ORDREL_IND after a T_DISCON_IND, are not queued.
5727*7c478bd9Sstevel@tonic-gate  * Used when the originator of the connection closes.
5728*7c478bd9Sstevel@tonic-gate  */
5729*7c478bd9Sstevel@tonic-gate static void
5730*7c478bd9Sstevel@tonic-gate tl_icon_queuemsg(tl_endpt_t *tep, t_scalar_t seqno, mblk_t *nmp)
5731*7c478bd9Sstevel@tonic-gate {
5732*7c478bd9Sstevel@tonic-gate 	tl_icon_t		*tip;
5733*7c478bd9Sstevel@tonic-gate 	mblk_t			**mpp, *mp;
5734*7c478bd9Sstevel@tonic-gate 	int			prim, nprim;
5735*7c478bd9Sstevel@tonic-gate 
5736*7c478bd9Sstevel@tonic-gate 	if (nmp->b_datap->db_type == M_PROTO)
5737*7c478bd9Sstevel@tonic-gate 		nprim = ((union T_primitives *)nmp->b_rptr)->type;
5738*7c478bd9Sstevel@tonic-gate 	else
5739*7c478bd9Sstevel@tonic-gate 		nprim = -1;	/* M_DATA */
5740*7c478bd9Sstevel@tonic-gate 
5741*7c478bd9Sstevel@tonic-gate 	tip = tl_icon_find(tep, seqno);
5742*7c478bd9Sstevel@tonic-gate 	if (tip == NULL) {
5743*7c478bd9Sstevel@tonic-gate 		freemsg(nmp);
5744*7c478bd9Sstevel@tonic-gate 		return;
5745*7c478bd9Sstevel@tonic-gate 	}
5746*7c478bd9Sstevel@tonic-gate 
5747*7c478bd9Sstevel@tonic-gate 	ASSERT(tip->ti_seqno != 0);
5748*7c478bd9Sstevel@tonic-gate 	mpp = &tip->ti_mp;
5749*7c478bd9Sstevel@tonic-gate 	while (*mpp != NULL) {
5750*7c478bd9Sstevel@tonic-gate 		mp = *mpp;
5751*7c478bd9Sstevel@tonic-gate 
5752*7c478bd9Sstevel@tonic-gate 		if (mp->b_datap->db_type == M_PROTO)
5753*7c478bd9Sstevel@tonic-gate 			prim = ((union T_primitives *)mp->b_rptr)->type;
5754*7c478bd9Sstevel@tonic-gate 		else
5755*7c478bd9Sstevel@tonic-gate 			prim = -1;	/* M_DATA */
5756*7c478bd9Sstevel@tonic-gate 
5757*7c478bd9Sstevel@tonic-gate 		/*
5758*7c478bd9Sstevel@tonic-gate 		 * Allow nothing after a T_DISCON_IND
5759*7c478bd9Sstevel@tonic-gate 		 */
5760*7c478bd9Sstevel@tonic-gate 		if (prim == T_DISCON_IND) {
5761*7c478bd9Sstevel@tonic-gate 			freemsg(nmp);
5762*7c478bd9Sstevel@tonic-gate 			return;
5763*7c478bd9Sstevel@tonic-gate 		}
5764*7c478bd9Sstevel@tonic-gate 		/*
5765*7c478bd9Sstevel@tonic-gate 		 * Only allow a T_DISCON_IND after an T_ORDREL_IND
5766*7c478bd9Sstevel@tonic-gate 		 */
5767*7c478bd9Sstevel@tonic-gate 		if (prim == T_ORDREL_IND && nprim != T_DISCON_IND) {
5768*7c478bd9Sstevel@tonic-gate 			freemsg(nmp);
5769*7c478bd9Sstevel@tonic-gate 			return;
5770*7c478bd9Sstevel@tonic-gate 		}
5771*7c478bd9Sstevel@tonic-gate 		mpp = &(mp->b_next);
5772*7c478bd9Sstevel@tonic-gate 	}
5773*7c478bd9Sstevel@tonic-gate 	*mpp = nmp;
5774*7c478bd9Sstevel@tonic-gate }
5775*7c478bd9Sstevel@tonic-gate 
5776*7c478bd9Sstevel@tonic-gate /*
5777*7c478bd9Sstevel@tonic-gate  * Verify if a certain TPI primitive exists on the connind queue.
5778*7c478bd9Sstevel@tonic-gate  * Use prim -1 for M_DATA.
5779*7c478bd9Sstevel@tonic-gate  * Return non-zero if found.
5780*7c478bd9Sstevel@tonic-gate  */
5781*7c478bd9Sstevel@tonic-gate static boolean_t
5782*7c478bd9Sstevel@tonic-gate tl_icon_hasprim(tl_endpt_t *tep, t_scalar_t seqno, t_scalar_t prim)
5783*7c478bd9Sstevel@tonic-gate {
5784*7c478bd9Sstevel@tonic-gate 	tl_icon_t *tip = tl_icon_find(tep, seqno);
5785*7c478bd9Sstevel@tonic-gate 	boolean_t found = B_FALSE;
5786*7c478bd9Sstevel@tonic-gate 
5787*7c478bd9Sstevel@tonic-gate 	if (tip != NULL) {
5788*7c478bd9Sstevel@tonic-gate 		mblk_t *mp;
5789*7c478bd9Sstevel@tonic-gate 		for (mp = tip->ti_mp; !found && mp != NULL; mp = mp->b_next) {
5790*7c478bd9Sstevel@tonic-gate 			found = (DB_TYPE(mp) == M_PROTO &&
5791*7c478bd9Sstevel@tonic-gate 			    ((union T_primitives *)mp->b_rptr)->type == prim);
5792*7c478bd9Sstevel@tonic-gate 		}
5793*7c478bd9Sstevel@tonic-gate 	}
5794*7c478bd9Sstevel@tonic-gate 	return (found);
5795*7c478bd9Sstevel@tonic-gate }
5796*7c478bd9Sstevel@tonic-gate 
5797*7c478bd9Sstevel@tonic-gate /*
5798*7c478bd9Sstevel@tonic-gate  * Send the b_next mblk chain that has accumulated before the connection
5799*7c478bd9Sstevel@tonic-gate  * was accepted. Perform the necessary state transitions.
5800*7c478bd9Sstevel@tonic-gate  */
5801*7c478bd9Sstevel@tonic-gate static void
5802*7c478bd9Sstevel@tonic-gate tl_icon_sendmsgs(tl_endpt_t *tep, mblk_t **mpp)
5803*7c478bd9Sstevel@tonic-gate {
5804*7c478bd9Sstevel@tonic-gate 	mblk_t			*mp;
5805*7c478bd9Sstevel@tonic-gate 	union T_primitives	*primp;
5806*7c478bd9Sstevel@tonic-gate 
5807*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
5808*7c478bd9Sstevel@tonic-gate 		tl_icon_freemsgs(mpp);
5809*7c478bd9Sstevel@tonic-gate 		return;
5810*7c478bd9Sstevel@tonic-gate 	}
5811*7c478bd9Sstevel@tonic-gate 
5812*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_state == TS_DATA_XFER);
5813*7c478bd9Sstevel@tonic-gate 	ASSERT(tep->te_rq->q_first == NULL);
5814*7c478bd9Sstevel@tonic-gate 
5815*7c478bd9Sstevel@tonic-gate 	while ((mp = *mpp) != NULL) {
5816*7c478bd9Sstevel@tonic-gate 		*mpp = mp->b_next;
5817*7c478bd9Sstevel@tonic-gate 		mp->b_next = NULL;
5818*7c478bd9Sstevel@tonic-gate 
5819*7c478bd9Sstevel@tonic-gate 		ASSERT((DB_TYPE(mp) == M_DATA) || (DB_TYPE(mp) == M_PROTO));
5820*7c478bd9Sstevel@tonic-gate 		switch (DB_TYPE(mp)) {
5821*7c478bd9Sstevel@tonic-gate 		default:
5822*7c478bd9Sstevel@tonic-gate 			freemsg(mp);
5823*7c478bd9Sstevel@tonic-gate 			break;
5824*7c478bd9Sstevel@tonic-gate 		case M_DATA:
5825*7c478bd9Sstevel@tonic-gate 			putnext(tep->te_rq, mp);
5826*7c478bd9Sstevel@tonic-gate 			break;
5827*7c478bd9Sstevel@tonic-gate 		case M_PROTO:
5828*7c478bd9Sstevel@tonic-gate 			primp = (union T_primitives *)mp->b_rptr;
5829*7c478bd9Sstevel@tonic-gate 			switch (primp->type) {
5830*7c478bd9Sstevel@tonic-gate 			case T_UNITDATA_IND:
5831*7c478bd9Sstevel@tonic-gate 			case T_DATA_IND:
5832*7c478bd9Sstevel@tonic-gate 			case T_OPTDATA_IND:
5833*7c478bd9Sstevel@tonic-gate 			case T_EXDATA_IND:
5834*7c478bd9Sstevel@tonic-gate 				putnext(tep->te_rq, mp);
5835*7c478bd9Sstevel@tonic-gate 				break;
5836*7c478bd9Sstevel@tonic-gate 			case T_ORDREL_IND:
5837*7c478bd9Sstevel@tonic-gate 				tep->te_state = NEXTSTATE(TE_ORDREL_IND,
5838*7c478bd9Sstevel@tonic-gate 							tep->te_state);
5839*7c478bd9Sstevel@tonic-gate 				putnext(tep->te_rq, mp);
5840*7c478bd9Sstevel@tonic-gate 				break;
5841*7c478bd9Sstevel@tonic-gate 			case T_DISCON_IND:
5842*7c478bd9Sstevel@tonic-gate 				tep->te_state = TS_IDLE;
5843*7c478bd9Sstevel@tonic-gate 				putnext(tep->te_rq, mp);
5844*7c478bd9Sstevel@tonic-gate 				break;
5845*7c478bd9Sstevel@tonic-gate 			default:
5846*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
5847*7c478bd9Sstevel@tonic-gate 				cmn_err(CE_PANIC,
5848*7c478bd9Sstevel@tonic-gate 					"tl_icon_sendmsgs: unknown primitive");
5849*7c478bd9Sstevel@tonic-gate #endif /* DEBUG */
5850*7c478bd9Sstevel@tonic-gate 				freemsg(mp);
5851*7c478bd9Sstevel@tonic-gate 				break;
5852*7c478bd9Sstevel@tonic-gate 			}
5853*7c478bd9Sstevel@tonic-gate 			break;
5854*7c478bd9Sstevel@tonic-gate 		}
5855*7c478bd9Sstevel@tonic-gate 	}
5856*7c478bd9Sstevel@tonic-gate }
5857*7c478bd9Sstevel@tonic-gate 
5858*7c478bd9Sstevel@tonic-gate /*
5859*7c478bd9Sstevel@tonic-gate  * Free the b_next mblk chain that has accumulated before the connection
5860*7c478bd9Sstevel@tonic-gate  * was accepted.
5861*7c478bd9Sstevel@tonic-gate  */
5862*7c478bd9Sstevel@tonic-gate static void
5863*7c478bd9Sstevel@tonic-gate tl_icon_freemsgs(mblk_t **mpp)
5864*7c478bd9Sstevel@tonic-gate {
5865*7c478bd9Sstevel@tonic-gate 	mblk_t *mp;
5866*7c478bd9Sstevel@tonic-gate 
5867*7c478bd9Sstevel@tonic-gate 	while ((mp = *mpp) != NULL) {
5868*7c478bd9Sstevel@tonic-gate 		*mpp = mp->b_next;
5869*7c478bd9Sstevel@tonic-gate 		mp->b_next = NULL;
5870*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
5871*7c478bd9Sstevel@tonic-gate 	}
5872*7c478bd9Sstevel@tonic-gate }
5873*7c478bd9Sstevel@tonic-gate 
5874*7c478bd9Sstevel@tonic-gate /*
5875*7c478bd9Sstevel@tonic-gate  * Send M_ERROR
5876*7c478bd9Sstevel@tonic-gate  * Note: assumes caller ensured enough space in mp or enough
5877*7c478bd9Sstevel@tonic-gate  *	memory available. Does not attempt recovery from allocb()
5878*7c478bd9Sstevel@tonic-gate  *	failures
5879*7c478bd9Sstevel@tonic-gate  */
5880*7c478bd9Sstevel@tonic-gate 
5881*7c478bd9Sstevel@tonic-gate static void
5882*7c478bd9Sstevel@tonic-gate tl_merror(queue_t *wq, mblk_t *mp, int error)
5883*7c478bd9Sstevel@tonic-gate {
5884*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
5885*7c478bd9Sstevel@tonic-gate 
5886*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
5887*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
5888*7c478bd9Sstevel@tonic-gate 		return;
5889*7c478bd9Sstevel@tonic-gate 	}
5890*7c478bd9Sstevel@tonic-gate 
5891*7c478bd9Sstevel@tonic-gate 	(void) (STRLOG(TL_ID, tep->te_minor, 1,
5892*7c478bd9Sstevel@tonic-gate 		    SL_TRACE|SL_ERROR,
5893*7c478bd9Sstevel@tonic-gate 		    "tl_merror: tep=%p, err=%d", tep, error));
5894*7c478bd9Sstevel@tonic-gate 
5895*7c478bd9Sstevel@tonic-gate 	/*
5896*7c478bd9Sstevel@tonic-gate 	 * flush all messages on queue. we are shutting
5897*7c478bd9Sstevel@tonic-gate 	 * the stream down on fatal error
5898*7c478bd9Sstevel@tonic-gate 	 */
5899*7c478bd9Sstevel@tonic-gate 	flushq(wq, FLUSHALL);
5900*7c478bd9Sstevel@tonic-gate 	if (IS_COTS(tep)) {
5901*7c478bd9Sstevel@tonic-gate 		/* connection oriented - unconnect endpoints */
5902*7c478bd9Sstevel@tonic-gate 		tl_co_unconnect(tep);
5903*7c478bd9Sstevel@tonic-gate 	}
5904*7c478bd9Sstevel@tonic-gate 	if (mp->b_cont) {
5905*7c478bd9Sstevel@tonic-gate 		freemsg(mp->b_cont);
5906*7c478bd9Sstevel@tonic-gate 		mp->b_cont = NULL;
5907*7c478bd9Sstevel@tonic-gate 	}
5908*7c478bd9Sstevel@tonic-gate 
5909*7c478bd9Sstevel@tonic-gate 	if ((MBLKSIZE(mp) < 1) || (DB_REF(mp) > 1)) {
5910*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
5911*7c478bd9Sstevel@tonic-gate 		mp = allocb(1, BPRI_HI);
5912*7c478bd9Sstevel@tonic-gate 		if (!mp) {
5913*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
5914*7c478bd9Sstevel@tonic-gate 				SL_TRACE|SL_ERROR,
5915*7c478bd9Sstevel@tonic-gate 				"tl_merror:M_PROTO: out of memory"));
5916*7c478bd9Sstevel@tonic-gate 			return;
5917*7c478bd9Sstevel@tonic-gate 		}
5918*7c478bd9Sstevel@tonic-gate 	}
5919*7c478bd9Sstevel@tonic-gate 	if (mp) {
5920*7c478bd9Sstevel@tonic-gate 		DB_TYPE(mp) = M_ERROR;
5921*7c478bd9Sstevel@tonic-gate 		mp->b_rptr = DB_BASE(mp);
5922*7c478bd9Sstevel@tonic-gate 		*mp->b_rptr = (char)error;
5923*7c478bd9Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + sizeof (char);
5924*7c478bd9Sstevel@tonic-gate 		qreply(wq, mp);
5925*7c478bd9Sstevel@tonic-gate 	} else {
5926*7c478bd9Sstevel@tonic-gate 		(void) putnextctl1(tep->te_rq, M_ERROR, error);
5927*7c478bd9Sstevel@tonic-gate 	}
5928*7c478bd9Sstevel@tonic-gate }
5929*7c478bd9Sstevel@tonic-gate 
5930*7c478bd9Sstevel@tonic-gate static void
5931*7c478bd9Sstevel@tonic-gate tl_fill_option(uchar_t *buf, cred_t *cr, pid_t cpid, int flag)
5932*7c478bd9Sstevel@tonic-gate {
5933*7c478bd9Sstevel@tonic-gate 	if (flag & TL_SETCRED) {
5934*7c478bd9Sstevel@tonic-gate 		struct opthdr *opt = (struct opthdr *)buf;
5935*7c478bd9Sstevel@tonic-gate 		tl_credopt_t *tlcred;
5936*7c478bd9Sstevel@tonic-gate 
5937*7c478bd9Sstevel@tonic-gate 		opt->level = TL_PROT_LEVEL;
5938*7c478bd9Sstevel@tonic-gate 		opt->name = TL_OPT_PEER_CRED;
5939*7c478bd9Sstevel@tonic-gate 		opt->len = (t_uscalar_t)OPTLEN(sizeof (tl_credopt_t));
5940*7c478bd9Sstevel@tonic-gate 
5941*7c478bd9Sstevel@tonic-gate 		tlcred = (tl_credopt_t *)(opt + 1);
5942*7c478bd9Sstevel@tonic-gate 		tlcred->tc_uid = crgetuid(cr);
5943*7c478bd9Sstevel@tonic-gate 		tlcred->tc_gid = crgetgid(cr);
5944*7c478bd9Sstevel@tonic-gate 		tlcred->tc_ruid = crgetruid(cr);
5945*7c478bd9Sstevel@tonic-gate 		tlcred->tc_rgid = crgetrgid(cr);
5946*7c478bd9Sstevel@tonic-gate 		tlcred->tc_suid = crgetsuid(cr);
5947*7c478bd9Sstevel@tonic-gate 		tlcred->tc_sgid = crgetsgid(cr);
5948*7c478bd9Sstevel@tonic-gate 		tlcred->tc_ngroups = crgetngroups(cr);
5949*7c478bd9Sstevel@tonic-gate 	} else if (flag & TL_SETUCRED) {
5950*7c478bd9Sstevel@tonic-gate 		struct opthdr *opt = (struct opthdr *)buf;
5951*7c478bd9Sstevel@tonic-gate 
5952*7c478bd9Sstevel@tonic-gate 		opt->level = TL_PROT_LEVEL;
5953*7c478bd9Sstevel@tonic-gate 		opt->name = TL_OPT_PEER_UCRED;
5954*7c478bd9Sstevel@tonic-gate 		opt->len = (t_uscalar_t)OPTLEN(ucredsize);
5955*7c478bd9Sstevel@tonic-gate 
5956*7c478bd9Sstevel@tonic-gate 		(void) cred2ucred(cr, cpid, (void *)(opt + 1));
5957*7c478bd9Sstevel@tonic-gate 	} else {
5958*7c478bd9Sstevel@tonic-gate 		struct T_opthdr *topt = (struct T_opthdr *)buf;
5959*7c478bd9Sstevel@tonic-gate 		ASSERT(flag & TL_SOCKUCRED);
5960*7c478bd9Sstevel@tonic-gate 
5961*7c478bd9Sstevel@tonic-gate 		topt->level = SOL_SOCKET;
5962*7c478bd9Sstevel@tonic-gate 		topt->name = SCM_UCRED;
5963*7c478bd9Sstevel@tonic-gate 		topt->len = ucredsize + sizeof (*topt);
5964*7c478bd9Sstevel@tonic-gate 		topt->status = 0;
5965*7c478bd9Sstevel@tonic-gate 		(void) cred2ucred(cr, cpid, (void *)(topt + 1));
5966*7c478bd9Sstevel@tonic-gate 	}
5967*7c478bd9Sstevel@tonic-gate }
5968*7c478bd9Sstevel@tonic-gate 
5969*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
5970*7c478bd9Sstevel@tonic-gate static int
5971*7c478bd9Sstevel@tonic-gate tl_default_opt(queue_t *wq, int level, int name, uchar_t *ptr)
5972*7c478bd9Sstevel@tonic-gate {
5973*7c478bd9Sstevel@tonic-gate 	/* no default value processed in protocol specific code currently */
5974*7c478bd9Sstevel@tonic-gate 	return (-1);
5975*7c478bd9Sstevel@tonic-gate }
5976*7c478bd9Sstevel@tonic-gate 
5977*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
5978*7c478bd9Sstevel@tonic-gate static int
5979*7c478bd9Sstevel@tonic-gate tl_get_opt(queue_t *wq, int level, int name, uchar_t *ptr)
5980*7c478bd9Sstevel@tonic-gate {
5981*7c478bd9Sstevel@tonic-gate 	int len;
5982*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep;
5983*7c478bd9Sstevel@tonic-gate 	int *valp;
5984*7c478bd9Sstevel@tonic-gate 
5985*7c478bd9Sstevel@tonic-gate 	tep = (tl_endpt_t *)wq->q_ptr;
5986*7c478bd9Sstevel@tonic-gate 
5987*7c478bd9Sstevel@tonic-gate 	len = 0;
5988*7c478bd9Sstevel@tonic-gate 
5989*7c478bd9Sstevel@tonic-gate 	/*
5990*7c478bd9Sstevel@tonic-gate 	 * Assumes: option level and name sanity check done elsewhere
5991*7c478bd9Sstevel@tonic-gate 	 */
5992*7c478bd9Sstevel@tonic-gate 
5993*7c478bd9Sstevel@tonic-gate 	switch (level) {
5994*7c478bd9Sstevel@tonic-gate 	case SOL_SOCKET:
5995*7c478bd9Sstevel@tonic-gate 		if (! IS_SOCKET(tep))
5996*7c478bd9Sstevel@tonic-gate 			break;
5997*7c478bd9Sstevel@tonic-gate 		switch (name) {
5998*7c478bd9Sstevel@tonic-gate 		case SO_RECVUCRED:
5999*7c478bd9Sstevel@tonic-gate 			len = sizeof (int);
6000*7c478bd9Sstevel@tonic-gate 			valp = (int *)ptr;
6001*7c478bd9Sstevel@tonic-gate 			*valp = (tep->te_flag & TL_SOCKUCRED) != 0;
6002*7c478bd9Sstevel@tonic-gate 			break;
6003*7c478bd9Sstevel@tonic-gate 		default:
6004*7c478bd9Sstevel@tonic-gate 			break;
6005*7c478bd9Sstevel@tonic-gate 		}
6006*7c478bd9Sstevel@tonic-gate 		break;
6007*7c478bd9Sstevel@tonic-gate 	case TL_PROT_LEVEL:
6008*7c478bd9Sstevel@tonic-gate 		switch (name) {
6009*7c478bd9Sstevel@tonic-gate 		case TL_OPT_PEER_CRED:
6010*7c478bd9Sstevel@tonic-gate 		case TL_OPT_PEER_UCRED:
6011*7c478bd9Sstevel@tonic-gate 			/*
6012*7c478bd9Sstevel@tonic-gate 			 * option not supposed to retrieved directly
6013*7c478bd9Sstevel@tonic-gate 			 * Only sent in T_CON_{IND,CON}, T_UNITDATA_IND
6014*7c478bd9Sstevel@tonic-gate 			 * when some internal flags set by other options
6015*7c478bd9Sstevel@tonic-gate 			 * Direct retrieval always designed to fail(ignored)
6016*7c478bd9Sstevel@tonic-gate 			 * for this option.
6017*7c478bd9Sstevel@tonic-gate 			 */
6018*7c478bd9Sstevel@tonic-gate 			break;
6019*7c478bd9Sstevel@tonic-gate 		}
6020*7c478bd9Sstevel@tonic-gate 	}
6021*7c478bd9Sstevel@tonic-gate 	return (len);
6022*7c478bd9Sstevel@tonic-gate }
6023*7c478bd9Sstevel@tonic-gate 
6024*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
6025*7c478bd9Sstevel@tonic-gate static int
6026*7c478bd9Sstevel@tonic-gate tl_set_opt(
6027*7c478bd9Sstevel@tonic-gate 	queue_t		*wq,
6028*7c478bd9Sstevel@tonic-gate 	uint_t		mgmt_flags,
6029*7c478bd9Sstevel@tonic-gate 	int		level,
6030*7c478bd9Sstevel@tonic-gate 	int		name,
6031*7c478bd9Sstevel@tonic-gate 	uint_t		inlen,
6032*7c478bd9Sstevel@tonic-gate 	uchar_t		*invalp,
6033*7c478bd9Sstevel@tonic-gate 	uint_t		*outlenp,
6034*7c478bd9Sstevel@tonic-gate 	uchar_t		*outvalp,
6035*7c478bd9Sstevel@tonic-gate 	void		*thisdg_attrs,
6036*7c478bd9Sstevel@tonic-gate 	cred_t		*cr,
6037*7c478bd9Sstevel@tonic-gate 	mblk_t		*mblk)
6038*7c478bd9Sstevel@tonic-gate {
6039*7c478bd9Sstevel@tonic-gate 	int error;
6040*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep;
6041*7c478bd9Sstevel@tonic-gate 
6042*7c478bd9Sstevel@tonic-gate 	tep = (tl_endpt_t *)wq->q_ptr;
6043*7c478bd9Sstevel@tonic-gate 
6044*7c478bd9Sstevel@tonic-gate 	error = 0;		/* NOERROR */
6045*7c478bd9Sstevel@tonic-gate 
6046*7c478bd9Sstevel@tonic-gate 	/*
6047*7c478bd9Sstevel@tonic-gate 	 * Assumes: option level and name sanity checks done elsewhere
6048*7c478bd9Sstevel@tonic-gate 	 */
6049*7c478bd9Sstevel@tonic-gate 
6050*7c478bd9Sstevel@tonic-gate 	switch (level) {
6051*7c478bd9Sstevel@tonic-gate 	case SOL_SOCKET:
6052*7c478bd9Sstevel@tonic-gate 		if (! IS_SOCKET(tep)) {
6053*7c478bd9Sstevel@tonic-gate 			error = EINVAL;
6054*7c478bd9Sstevel@tonic-gate 			break;
6055*7c478bd9Sstevel@tonic-gate 		}
6056*7c478bd9Sstevel@tonic-gate 		/*
6057*7c478bd9Sstevel@tonic-gate 		 * TBD: fill in other AF_UNIX socket options and then stop
6058*7c478bd9Sstevel@tonic-gate 		 * returning error.
6059*7c478bd9Sstevel@tonic-gate 		 */
6060*7c478bd9Sstevel@tonic-gate 		switch (name) {
6061*7c478bd9Sstevel@tonic-gate 		case SO_RECVUCRED:
6062*7c478bd9Sstevel@tonic-gate 			/*
6063*7c478bd9Sstevel@tonic-gate 			 * We only support this for datagram sockets;
6064*7c478bd9Sstevel@tonic-gate 			 * getpeerucred handles the connection oriented
6065*7c478bd9Sstevel@tonic-gate 			 * transports.
6066*7c478bd9Sstevel@tonic-gate 			 */
6067*7c478bd9Sstevel@tonic-gate 			if (! IS_CLTS(tep)) {
6068*7c478bd9Sstevel@tonic-gate 				error = EINVAL;
6069*7c478bd9Sstevel@tonic-gate 				break;
6070*7c478bd9Sstevel@tonic-gate 			}
6071*7c478bd9Sstevel@tonic-gate 			if (*(int *)invalp == 0)
6072*7c478bd9Sstevel@tonic-gate 				tep->te_flag &= ~TL_SOCKUCRED;
6073*7c478bd9Sstevel@tonic-gate 			else
6074*7c478bd9Sstevel@tonic-gate 				tep->te_flag |= TL_SOCKUCRED;
6075*7c478bd9Sstevel@tonic-gate 			break;
6076*7c478bd9Sstevel@tonic-gate 		default:
6077*7c478bd9Sstevel@tonic-gate 			error = EINVAL;
6078*7c478bd9Sstevel@tonic-gate 			break;
6079*7c478bd9Sstevel@tonic-gate 		}
6080*7c478bd9Sstevel@tonic-gate 		break;
6081*7c478bd9Sstevel@tonic-gate 	case TL_PROT_LEVEL:
6082*7c478bd9Sstevel@tonic-gate 		switch (name) {
6083*7c478bd9Sstevel@tonic-gate 		case TL_OPT_PEER_CRED:
6084*7c478bd9Sstevel@tonic-gate 		case TL_OPT_PEER_UCRED:
6085*7c478bd9Sstevel@tonic-gate 			/*
6086*7c478bd9Sstevel@tonic-gate 			 * option not supposed to be set directly
6087*7c478bd9Sstevel@tonic-gate 			 * Its value in initialized for each endpoint at
6088*7c478bd9Sstevel@tonic-gate 			 * driver open time.
6089*7c478bd9Sstevel@tonic-gate 			 * Direct setting always designed to fail for this
6090*7c478bd9Sstevel@tonic-gate 			 * option.
6091*7c478bd9Sstevel@tonic-gate 			 */
6092*7c478bd9Sstevel@tonic-gate 			(void) (STRLOG(TL_ID, tep->te_minor, 1,
6093*7c478bd9Sstevel@tonic-gate 				    SL_TRACE|SL_ERROR,
6094*7c478bd9Sstevel@tonic-gate 				    "tl_set_opt: option is not supported"));
6095*7c478bd9Sstevel@tonic-gate 			error = EPROTO;
6096*7c478bd9Sstevel@tonic-gate 			break;
6097*7c478bd9Sstevel@tonic-gate 		}
6098*7c478bd9Sstevel@tonic-gate 	}
6099*7c478bd9Sstevel@tonic-gate 	return (error);
6100*7c478bd9Sstevel@tonic-gate }
6101*7c478bd9Sstevel@tonic-gate 
6102*7c478bd9Sstevel@tonic-gate 
6103*7c478bd9Sstevel@tonic-gate static void
6104*7c478bd9Sstevel@tonic-gate tl_timer(void *arg)
6105*7c478bd9Sstevel@tonic-gate {
6106*7c478bd9Sstevel@tonic-gate 	queue_t *wq = arg;
6107*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
6108*7c478bd9Sstevel@tonic-gate 
6109*7c478bd9Sstevel@tonic-gate 	ASSERT(tep);
6110*7c478bd9Sstevel@tonic-gate 
6111*7c478bd9Sstevel@tonic-gate 	tep->te_timoutid = 0;
6112*7c478bd9Sstevel@tonic-gate 
6113*7c478bd9Sstevel@tonic-gate 	enableok(wq);
6114*7c478bd9Sstevel@tonic-gate 	/*
6115*7c478bd9Sstevel@tonic-gate 	 * Note: can call wsrv directly here and save context switch
6116*7c478bd9Sstevel@tonic-gate 	 * Consider change when qtimeout (not timeout) is active
6117*7c478bd9Sstevel@tonic-gate 	 */
6118*7c478bd9Sstevel@tonic-gate 	qenable(wq);
6119*7c478bd9Sstevel@tonic-gate }
6120*7c478bd9Sstevel@tonic-gate 
6121*7c478bd9Sstevel@tonic-gate static void
6122*7c478bd9Sstevel@tonic-gate tl_buffer(void *arg)
6123*7c478bd9Sstevel@tonic-gate {
6124*7c478bd9Sstevel@tonic-gate 	queue_t *wq = arg;
6125*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep = (tl_endpt_t *)wq->q_ptr;
6126*7c478bd9Sstevel@tonic-gate 
6127*7c478bd9Sstevel@tonic-gate 	ASSERT(tep);
6128*7c478bd9Sstevel@tonic-gate 
6129*7c478bd9Sstevel@tonic-gate 	tep->te_bufcid = 0;
6130*7c478bd9Sstevel@tonic-gate 	tep->te_nowsrv = B_FALSE;
6131*7c478bd9Sstevel@tonic-gate 
6132*7c478bd9Sstevel@tonic-gate 	enableok(wq);
6133*7c478bd9Sstevel@tonic-gate 	/*
6134*7c478bd9Sstevel@tonic-gate 	 *  Note: can call wsrv directly here and save context switch
6135*7c478bd9Sstevel@tonic-gate 	 * Consider change when qbufcall (not bufcall) is active
6136*7c478bd9Sstevel@tonic-gate 	 */
6137*7c478bd9Sstevel@tonic-gate 	qenable(wq);
6138*7c478bd9Sstevel@tonic-gate }
6139*7c478bd9Sstevel@tonic-gate 
6140*7c478bd9Sstevel@tonic-gate static void
6141*7c478bd9Sstevel@tonic-gate tl_memrecover(queue_t *wq, mblk_t *mp, size_t size)
6142*7c478bd9Sstevel@tonic-gate {
6143*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *tep;
6144*7c478bd9Sstevel@tonic-gate 
6145*7c478bd9Sstevel@tonic-gate 	tep = (tl_endpt_t *)wq->q_ptr;
6146*7c478bd9Sstevel@tonic-gate 
6147*7c478bd9Sstevel@tonic-gate 	if (tep->te_closing) {
6148*7c478bd9Sstevel@tonic-gate 		freemsg(mp);
6149*7c478bd9Sstevel@tonic-gate 		return;
6150*7c478bd9Sstevel@tonic-gate 	}
6151*7c478bd9Sstevel@tonic-gate 	noenable(wq);
6152*7c478bd9Sstevel@tonic-gate 
6153*7c478bd9Sstevel@tonic-gate 	(void) insq(wq, wq->q_first, mp);
6154*7c478bd9Sstevel@tonic-gate 
6155*7c478bd9Sstevel@tonic-gate 	if (tep->te_bufcid || tep->te_timoutid) {
6156*7c478bd9Sstevel@tonic-gate 		(void) (STRLOG(TL_ID, tep->te_minor, 1, SL_TRACE|SL_ERROR,
6157*7c478bd9Sstevel@tonic-gate 			"tl_memrecover:recover %p pending", (void *)wq));
6158*7c478bd9Sstevel@tonic-gate 		return;
6159*7c478bd9Sstevel@tonic-gate 	}
6160*7c478bd9Sstevel@tonic-gate 
6161*7c478bd9Sstevel@tonic-gate 	if (!(tep->te_bufcid = qbufcall(wq, size, BPRI_MED, tl_buffer, wq))) {
6162*7c478bd9Sstevel@tonic-gate 		tep->te_timoutid = qtimeout(wq, tl_timer, wq,
6163*7c478bd9Sstevel@tonic-gate 		    drv_usectohz(TL_BUFWAIT));
6164*7c478bd9Sstevel@tonic-gate 	}
6165*7c478bd9Sstevel@tonic-gate }
6166*7c478bd9Sstevel@tonic-gate 
6167*7c478bd9Sstevel@tonic-gate static void
6168*7c478bd9Sstevel@tonic-gate tl_freetip(tl_endpt_t *tep, tl_icon_t *tip)
6169*7c478bd9Sstevel@tonic-gate {
6170*7c478bd9Sstevel@tonic-gate 	ASSERT(tip->ti_seqno != 0);
6171*7c478bd9Sstevel@tonic-gate 
6172*7c478bd9Sstevel@tonic-gate 	if (tip->ti_mp != NULL) {
6173*7c478bd9Sstevel@tonic-gate 		tl_icon_freemsgs(&tip->ti_mp);
6174*7c478bd9Sstevel@tonic-gate 		tip->ti_mp = NULL;
6175*7c478bd9Sstevel@tonic-gate 	}
6176*7c478bd9Sstevel@tonic-gate 	if (tip->ti_tep != NULL) {
6177*7c478bd9Sstevel@tonic-gate 		tl_refrele(tip->ti_tep);
6178*7c478bd9Sstevel@tonic-gate 		tip->ti_tep = NULL;
6179*7c478bd9Sstevel@tonic-gate 	}
6180*7c478bd9Sstevel@tonic-gate 	list_remove(&tep->te_iconp, tip);
6181*7c478bd9Sstevel@tonic-gate 	kmem_free(tip, sizeof (tl_icon_t));
6182*7c478bd9Sstevel@tonic-gate 	tep->te_nicon--;
6183*7c478bd9Sstevel@tonic-gate }
6184*7c478bd9Sstevel@tonic-gate 
6185*7c478bd9Sstevel@tonic-gate /*
6186*7c478bd9Sstevel@tonic-gate  * Remove address from address hash.
6187*7c478bd9Sstevel@tonic-gate  */
6188*7c478bd9Sstevel@tonic-gate static void
6189*7c478bd9Sstevel@tonic-gate tl_addr_unbind(tl_endpt_t *tep)
6190*7c478bd9Sstevel@tonic-gate {
6191*7c478bd9Sstevel@tonic-gate 	tl_endpt_t *elp;
6192*7c478bd9Sstevel@tonic-gate 
6193*7c478bd9Sstevel@tonic-gate 	if (tep->te_flag & TL_ADDRHASHED) {
6194*7c478bd9Sstevel@tonic-gate 		if (IS_SOCKET(tep)) {
6195*7c478bd9Sstevel@tonic-gate 			(void) mod_hash_remove(tep->te_addrhash,
6196*7c478bd9Sstevel@tonic-gate 			    (mod_hash_key_t)tep->te_vp,
6197*7c478bd9Sstevel@tonic-gate 			    (mod_hash_val_t *)&elp);
6198*7c478bd9Sstevel@tonic-gate 			tep->te_vp = (void *)(uintptr_t)tep->te_minor;
6199*7c478bd9Sstevel@tonic-gate 			tep->te_magic = SOU_MAGIC_IMPLICIT;
6200*7c478bd9Sstevel@tonic-gate 		} else {
6201*7c478bd9Sstevel@tonic-gate 			(void) mod_hash_remove(tep->te_addrhash,
6202*7c478bd9Sstevel@tonic-gate 			    (mod_hash_key_t)&tep->te_ap,
6203*7c478bd9Sstevel@tonic-gate 			    (mod_hash_val_t *)&elp);
6204*7c478bd9Sstevel@tonic-gate 			(void) kmem_free(tep->te_abuf, tep->te_alen);
6205*7c478bd9Sstevel@tonic-gate 			tep->te_alen = -1;
6206*7c478bd9Sstevel@tonic-gate 			tep->te_abuf = NULL;
6207*7c478bd9Sstevel@tonic-gate 		}
6208*7c478bd9Sstevel@tonic-gate 		tep->te_flag &= ~TL_ADDRHASHED;
6209*7c478bd9Sstevel@tonic-gate 	}
6210*7c478bd9Sstevel@tonic-gate }
6211