xref: /illumos-gate/usr/src/lib/libnsl/nsl/_utility.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
23*7c478bd9Sstevel@tonic-gate /*	  All Rights Reserved  	*/
24*7c478bd9Sstevel@tonic-gate 
25*7c478bd9Sstevel@tonic-gate 
26*7c478bd9Sstevel@tonic-gate /*
27*7c478bd9Sstevel@tonic-gate  * Copyright 1993-2003 Sun Microsystems, Inc.  All rights reserved.
28*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
29*7c478bd9Sstevel@tonic-gate  */
30*7c478bd9Sstevel@tonic-gate 
31*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
32*7c478bd9Sstevel@tonic-gate 
33*7c478bd9Sstevel@tonic-gate #include "mt.h"
34*7c478bd9Sstevel@tonic-gate #include <stdlib.h>
35*7c478bd9Sstevel@tonic-gate #include <string.h>
36*7c478bd9Sstevel@tonic-gate #include <strings.h>
37*7c478bd9Sstevel@tonic-gate #include <unistd.h>
38*7c478bd9Sstevel@tonic-gate #include <rpc/trace.h>
39*7c478bd9Sstevel@tonic-gate #include <errno.h>
40*7c478bd9Sstevel@tonic-gate #include <stropts.h>
41*7c478bd9Sstevel@tonic-gate #include <sys/stream.h>
42*7c478bd9Sstevel@tonic-gate #define	_SUN_TPI_VERSION 2
43*7c478bd9Sstevel@tonic-gate #include <sys/tihdr.h>
44*7c478bd9Sstevel@tonic-gate #include <sys/timod.h>
45*7c478bd9Sstevel@tonic-gate #include <sys/stat.h>
46*7c478bd9Sstevel@tonic-gate #include <xti.h>
47*7c478bd9Sstevel@tonic-gate #include <fcntl.h>
48*7c478bd9Sstevel@tonic-gate #include <signal.h>
49*7c478bd9Sstevel@tonic-gate #include <assert.h>
50*7c478bd9Sstevel@tonic-gate #include <syslog.h>
51*7c478bd9Sstevel@tonic-gate #include <limits.h>
52*7c478bd9Sstevel@tonic-gate #include "tx.h"
53*7c478bd9Sstevel@tonic-gate 
54*7c478bd9Sstevel@tonic-gate #define	DEFSIZE 2048
55*7c478bd9Sstevel@tonic-gate 
56*7c478bd9Sstevel@tonic-gate /*
57*7c478bd9Sstevel@tonic-gate  * The following used to be in tiuser.h, but was causing too much namespace
58*7c478bd9Sstevel@tonic-gate  * pollution.
59*7c478bd9Sstevel@tonic-gate  */
60*7c478bd9Sstevel@tonic-gate #define	ROUNDUP32(X)	((X + 0x03)&~0x03)
61*7c478bd9Sstevel@tonic-gate 
62*7c478bd9Sstevel@tonic-gate static struct _ti_user	*find_tilink(int s);
63*7c478bd9Sstevel@tonic-gate static struct _ti_user	*add_tilink(int s);
64*7c478bd9Sstevel@tonic-gate static void _t_free_lookbufs(struct _ti_user *tiptr);
65*7c478bd9Sstevel@tonic-gate static unsigned int _t_setsize(t_scalar_t infosize);
66*7c478bd9Sstevel@tonic-gate static int _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf);
67*7c478bd9Sstevel@tonic-gate static int _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf);
68*7c478bd9Sstevel@tonic-gate static int _t_adjust_state(int fd, int instate);
69*7c478bd9Sstevel@tonic-gate static int _t_alloc_bufs(int fd, struct _ti_user *tiptr,
70*7c478bd9Sstevel@tonic-gate 	struct T_info_ack *tsap);
71*7c478bd9Sstevel@tonic-gate 
72*7c478bd9Sstevel@tonic-gate mutex_t	_ti_userlock = DEFAULTMUTEX;	/* Protects hash_bucket[] */
73*7c478bd9Sstevel@tonic-gate 
74*7c478bd9Sstevel@tonic-gate /*
75*7c478bd9Sstevel@tonic-gate  * Checkfd - checks validity of file descriptor
76*7c478bd9Sstevel@tonic-gate  */
77*7c478bd9Sstevel@tonic-gate struct _ti_user *
78*7c478bd9Sstevel@tonic-gate _t_checkfd(int fd, int force_sync, int api_semantics)
79*7c478bd9Sstevel@tonic-gate {
80*7c478bd9Sstevel@tonic-gate 	sigset_t mask;
81*7c478bd9Sstevel@tonic-gate 	struct _ti_user *tiptr;
82*7c478bd9Sstevel@tonic-gate 	int retval, timodpushed;
83*7c478bd9Sstevel@tonic-gate 
84*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_checkfd, 0, fd);
85*7c478bd9Sstevel@tonic-gate 	if (fd < 0) {
86*7c478bd9Sstevel@tonic-gate 		t_errno = TBADF;
87*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_checkfd, 1, fd);
88*7c478bd9Sstevel@tonic-gate 		return (NULL);
89*7c478bd9Sstevel@tonic-gate 	}
90*7c478bd9Sstevel@tonic-gate 	tiptr = NULL;
91*7c478bd9Sstevel@tonic-gate 	sig_mutex_lock(&_ti_userlock);
92*7c478bd9Sstevel@tonic-gate 	if ((tiptr = find_tilink(fd)) != NULL) {
93*7c478bd9Sstevel@tonic-gate 		if (! force_sync) {
94*7c478bd9Sstevel@tonic-gate 			sig_mutex_unlock(&_ti_userlock);
95*7c478bd9Sstevel@tonic-gate 			return (tiptr);
96*7c478bd9Sstevel@tonic-gate 		}
97*7c478bd9Sstevel@tonic-gate 	}
98*7c478bd9Sstevel@tonic-gate 	sig_mutex_unlock(&_ti_userlock);
99*7c478bd9Sstevel@tonic-gate 
100*7c478bd9Sstevel@tonic-gate 	/*
101*7c478bd9Sstevel@tonic-gate 	 * Not found or a forced sync is required.
102*7c478bd9Sstevel@tonic-gate 	 * check if this is a valid TLI/XTI descriptor.
103*7c478bd9Sstevel@tonic-gate 	 */
104*7c478bd9Sstevel@tonic-gate 	timodpushed = 0;
105*7c478bd9Sstevel@tonic-gate 	do {
106*7c478bd9Sstevel@tonic-gate 		retval = _ioctl(fd, I_FIND, "timod");
107*7c478bd9Sstevel@tonic-gate 	} while (retval < 0 && errno == EINTR);
108*7c478bd9Sstevel@tonic-gate 
109*7c478bd9Sstevel@tonic-gate 	if (retval < 0 || (retval == 0 && _T_IS_TLI(api_semantics))) {
110*7c478bd9Sstevel@tonic-gate 		/*
111*7c478bd9Sstevel@tonic-gate 		 * not a stream or a TLI endpoint with no timod
112*7c478bd9Sstevel@tonic-gate 		 * XXX Note: If it is a XTI call, we push "timod" and
113*7c478bd9Sstevel@tonic-gate 		 * try to convert it into a transport endpoint later.
114*7c478bd9Sstevel@tonic-gate 		 * We do not do it for TLI and "retain" the old buggy
115*7c478bd9Sstevel@tonic-gate 		 * behavior because ypbind and a lot of other deamons seem
116*7c478bd9Sstevel@tonic-gate 		 * to use a buggy logic test of the form
117*7c478bd9Sstevel@tonic-gate 		 * "(t_getstate(0) != -1 || t_errno != TBADF)" to see if
118*7c478bd9Sstevel@tonic-gate 		 * they we ever invoked with request on stdin and drop into
119*7c478bd9Sstevel@tonic-gate 		 * untested code. This test is in code generated by rpcgen
120*7c478bd9Sstevel@tonic-gate 		 * which is why it is replicated test in many daemons too.
121*7c478bd9Sstevel@tonic-gate 		 * We will need to fix that test too with "IsaTLIendpoint"
122*7c478bd9Sstevel@tonic-gate 		 * test if we ever fix this for TLI
123*7c478bd9Sstevel@tonic-gate 		 */
124*7c478bd9Sstevel@tonic-gate 		t_errno = TBADF;
125*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_checkfd, 1, fd);
126*7c478bd9Sstevel@tonic-gate 		return (NULL);
127*7c478bd9Sstevel@tonic-gate 	}
128*7c478bd9Sstevel@tonic-gate 
129*7c478bd9Sstevel@tonic-gate 	if (retval == 0) {
130*7c478bd9Sstevel@tonic-gate 		/*
131*7c478bd9Sstevel@tonic-gate 		 * "timod" not already on stream, then push it
132*7c478bd9Sstevel@tonic-gate 		 */
133*7c478bd9Sstevel@tonic-gate 		do {
134*7c478bd9Sstevel@tonic-gate 			/*
135*7c478bd9Sstevel@tonic-gate 			 * Assumes (correctly) that I_PUSH  is
136*7c478bd9Sstevel@tonic-gate 			 * atomic w.r.t signals (EINTR error)
137*7c478bd9Sstevel@tonic-gate 			 */
138*7c478bd9Sstevel@tonic-gate 			retval = _ioctl(fd, I_PUSH, "timod");
139*7c478bd9Sstevel@tonic-gate 		} while (retval < 0 && errno == EINTR);
140*7c478bd9Sstevel@tonic-gate 
141*7c478bd9Sstevel@tonic-gate 		if (retval < 0) {
142*7c478bd9Sstevel@tonic-gate 			int sv_errno = errno;
143*7c478bd9Sstevel@tonic-gate 
144*7c478bd9Sstevel@tonic-gate 			t_errno = TSYSERR;
145*7c478bd9Sstevel@tonic-gate 			trace2(TR_t_open, 1, flags);
146*7c478bd9Sstevel@tonic-gate 			errno = sv_errno;
147*7c478bd9Sstevel@tonic-gate 			return (NULL);
148*7c478bd9Sstevel@tonic-gate 		}
149*7c478bd9Sstevel@tonic-gate 		timodpushed = 1;
150*7c478bd9Sstevel@tonic-gate 	}
151*7c478bd9Sstevel@tonic-gate 	/*
152*7c478bd9Sstevel@tonic-gate 	 * Try to (re)constitute the info at user level from state
153*7c478bd9Sstevel@tonic-gate 	 * in the kernel. This could be information that lost due
154*7c478bd9Sstevel@tonic-gate 	 * to an exec or being instantiated at a new descriptor due
155*7c478bd9Sstevel@tonic-gate 	 * to , open(), dup2() etc.
156*7c478bd9Sstevel@tonic-gate 	 *
157*7c478bd9Sstevel@tonic-gate 	 * _t_create() requires that all signals be blocked.
158*7c478bd9Sstevel@tonic-gate 	 * Note that sig_mutex_lock() only defers signals, it does not
159*7c478bd9Sstevel@tonic-gate 	 * block them, so interruptible syscalls could still get EINTR.
160*7c478bd9Sstevel@tonic-gate 	 */
161*7c478bd9Sstevel@tonic-gate 	(void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
162*7c478bd9Sstevel@tonic-gate 	sig_mutex_lock(&_ti_userlock);
163*7c478bd9Sstevel@tonic-gate 	tiptr = _t_create(fd, NULL, api_semantics, NULL);
164*7c478bd9Sstevel@tonic-gate 	if (tiptr == NULL) {
165*7c478bd9Sstevel@tonic-gate 		int sv_errno = errno;
166*7c478bd9Sstevel@tonic-gate 		sig_mutex_unlock(&_ti_userlock);
167*7c478bd9Sstevel@tonic-gate 		(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
168*7c478bd9Sstevel@tonic-gate 		/*
169*7c478bd9Sstevel@tonic-gate 		 * restore to stream before timod pushed. It may
170*7c478bd9Sstevel@tonic-gate 		 * not have been a network transport stream.
171*7c478bd9Sstevel@tonic-gate 		 */
172*7c478bd9Sstevel@tonic-gate 		if (timodpushed)
173*7c478bd9Sstevel@tonic-gate 			(void) _ioctl(fd, I_POP, 0);
174*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_checkfd, 1, fd);
175*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
176*7c478bd9Sstevel@tonic-gate 		return (NULL);
177*7c478bd9Sstevel@tonic-gate 	}
178*7c478bd9Sstevel@tonic-gate 	sig_mutex_unlock(&_ti_userlock);
179*7c478bd9Sstevel@tonic-gate 	(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
180*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_checkfd, 1, fd);
181*7c478bd9Sstevel@tonic-gate 	return (tiptr);
182*7c478bd9Sstevel@tonic-gate }
183*7c478bd9Sstevel@tonic-gate 
184*7c478bd9Sstevel@tonic-gate /*
185*7c478bd9Sstevel@tonic-gate  * copy data to output buffer making sure the output buffer is 32 bit
186*7c478bd9Sstevel@tonic-gate  * aligned, even though the input buffer may not be.
187*7c478bd9Sstevel@tonic-gate  */
188*7c478bd9Sstevel@tonic-gate int
189*7c478bd9Sstevel@tonic-gate _t_aligned_copy(
190*7c478bd9Sstevel@tonic-gate 	struct strbuf *strbufp,
191*7c478bd9Sstevel@tonic-gate 	int len,
192*7c478bd9Sstevel@tonic-gate 	int init_offset,
193*7c478bd9Sstevel@tonic-gate 	char *datap,
194*7c478bd9Sstevel@tonic-gate 	t_scalar_t *rtn_offset)
195*7c478bd9Sstevel@tonic-gate {
196*7c478bd9Sstevel@tonic-gate 	trace1(TR__t_aligned_copy, 0);
197*7c478bd9Sstevel@tonic-gate 
198*7c478bd9Sstevel@tonic-gate 	*rtn_offset = ROUNDUP32(init_offset);
199*7c478bd9Sstevel@tonic-gate 	if ((*rtn_offset + len) > strbufp->maxlen) {
200*7c478bd9Sstevel@tonic-gate 		/*
201*7c478bd9Sstevel@tonic-gate 		 * Aligned copy will overflow buffer
202*7c478bd9Sstevel@tonic-gate 		 */
203*7c478bd9Sstevel@tonic-gate 		return (-1);
204*7c478bd9Sstevel@tonic-gate 	}
205*7c478bd9Sstevel@tonic-gate 	(void) memcpy(strbufp->buf + *rtn_offset, datap, (size_t)len);
206*7c478bd9Sstevel@tonic-gate 
207*7c478bd9Sstevel@tonic-gate 	trace1(TR__t_aligned_copy, 1);
208*7c478bd9Sstevel@tonic-gate 	return (0);
209*7c478bd9Sstevel@tonic-gate }
210*7c478bd9Sstevel@tonic-gate 
211*7c478bd9Sstevel@tonic-gate 
212*7c478bd9Sstevel@tonic-gate /*
213*7c478bd9Sstevel@tonic-gate  * append data and control info in look buffer (list in the MT case)
214*7c478bd9Sstevel@tonic-gate  *
215*7c478bd9Sstevel@tonic-gate  * The only thing that can be in look buffer is a T_DISCON_IND,
216*7c478bd9Sstevel@tonic-gate  * T_ORDREL_IND or a T_UDERROR_IND.
217*7c478bd9Sstevel@tonic-gate  *
218*7c478bd9Sstevel@tonic-gate  * It also enforces priority of T_DISCONDs over any T_ORDREL_IND
219*7c478bd9Sstevel@tonic-gate  * already in the buffer. It assumes no T_ORDREL_IND is appended
220*7c478bd9Sstevel@tonic-gate  * when there is already something on the looklist (error case) and
221*7c478bd9Sstevel@tonic-gate  * that a T_ORDREL_IND if present will always be the first on the
222*7c478bd9Sstevel@tonic-gate  * list.
223*7c478bd9Sstevel@tonic-gate  *
224*7c478bd9Sstevel@tonic-gate  * This also assumes ti_lock is held via sig_mutex_lock(),
225*7c478bd9Sstevel@tonic-gate  * so signals are deferred here.
226*7c478bd9Sstevel@tonic-gate  */
227*7c478bd9Sstevel@tonic-gate int
228*7c478bd9Sstevel@tonic-gate _t_register_lookevent(
229*7c478bd9Sstevel@tonic-gate 	struct _ti_user *tiptr,
230*7c478bd9Sstevel@tonic-gate 	caddr_t dptr,
231*7c478bd9Sstevel@tonic-gate 	int dsize,
232*7c478bd9Sstevel@tonic-gate 	caddr_t cptr,
233*7c478bd9Sstevel@tonic-gate 	int csize)
234*7c478bd9Sstevel@tonic-gate {
235*7c478bd9Sstevel@tonic-gate 	struct _ti_lookbufs *tlbs;
236*7c478bd9Sstevel@tonic-gate 	int cbuf_size, dbuf_size;
237*7c478bd9Sstevel@tonic-gate 	int sv_errno;
238*7c478bd9Sstevel@tonic-gate 
239*7c478bd9Sstevel@tonic-gate 	trace3(TR__t_register_lookevent, 0, dsize, csize);
240*7c478bd9Sstevel@tonic-gate 
241*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&tiptr->ti_lock));
242*7c478bd9Sstevel@tonic-gate 
243*7c478bd9Sstevel@tonic-gate 	cbuf_size = tiptr->ti_ctlsize;
244*7c478bd9Sstevel@tonic-gate 	dbuf_size = tiptr->ti_rcvsize;
245*7c478bd9Sstevel@tonic-gate 
246*7c478bd9Sstevel@tonic-gate 	if ((csize > cbuf_size) || dsize > dbuf_size) {
247*7c478bd9Sstevel@tonic-gate 		/* can't fit - return error */
248*7c478bd9Sstevel@tonic-gate 		trace3(TR__t_register_lookevent, 1, dsize, csize);
249*7c478bd9Sstevel@tonic-gate 		return (-1);	/* error */
250*7c478bd9Sstevel@tonic-gate 	}
251*7c478bd9Sstevel@tonic-gate 	/*
252*7c478bd9Sstevel@tonic-gate 	 * Enforce priority of T_DISCON_IND over T_ORDREL_IND
253*7c478bd9Sstevel@tonic-gate 	 * queued earlier.
254*7c478bd9Sstevel@tonic-gate 	 * Note: Since there can be only at most one T_ORDREL_IND
255*7c478bd9Sstevel@tonic-gate 	 * queued (more than one is error case), and we look for it
256*7c478bd9Sstevel@tonic-gate 	 * on each append of T_DISCON_IND, it can only be at the
257*7c478bd9Sstevel@tonic-gate 	 * head of the list if it is there.
258*7c478bd9Sstevel@tonic-gate 	 */
259*7c478bd9Sstevel@tonic-gate 	if (tiptr->ti_lookcnt > 0) { /* something already on looklist */
260*7c478bd9Sstevel@tonic-gate 		if (cptr && csize >= (int)sizeof (struct T_discon_ind) &&
261*7c478bd9Sstevel@tonic-gate 		    *(t_scalar_t *)cptr == T_DISCON_IND) {
262*7c478bd9Sstevel@tonic-gate 			/* appending discon ind */
263*7c478bd9Sstevel@tonic-gate 			assert(tiptr->ti_servtype != T_CLTS);
264*7c478bd9Sstevel@tonic-gate 			if (*(t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf ==
265*7c478bd9Sstevel@tonic-gate 			    T_ORDREL_IND) { /* T_ORDREL_IND is on list */
266*7c478bd9Sstevel@tonic-gate 				/*
267*7c478bd9Sstevel@tonic-gate 				 * Blow away T_ORDREL_IND
268*7c478bd9Sstevel@tonic-gate 				 */
269*7c478bd9Sstevel@tonic-gate 				_t_free_looklist_head(tiptr);
270*7c478bd9Sstevel@tonic-gate 			}
271*7c478bd9Sstevel@tonic-gate 		}
272*7c478bd9Sstevel@tonic-gate 	}
273*7c478bd9Sstevel@tonic-gate 	tlbs = &tiptr->ti_lookbufs;
274*7c478bd9Sstevel@tonic-gate 	if (tiptr->ti_lookcnt > 0) {
275*7c478bd9Sstevel@tonic-gate 		int listcount = 0;
276*7c478bd9Sstevel@tonic-gate 		/*
277*7c478bd9Sstevel@tonic-gate 		 * Allocate and append a new lookbuf to the
278*7c478bd9Sstevel@tonic-gate 		 * existing list. (Should only happen in MT case)
279*7c478bd9Sstevel@tonic-gate 		 */
280*7c478bd9Sstevel@tonic-gate 		while (tlbs->tl_next != NULL) {
281*7c478bd9Sstevel@tonic-gate 			listcount++;
282*7c478bd9Sstevel@tonic-gate 			tlbs = tlbs->tl_next;
283*7c478bd9Sstevel@tonic-gate 		}
284*7c478bd9Sstevel@tonic-gate 		assert(tiptr->ti_lookcnt == listcount);
285*7c478bd9Sstevel@tonic-gate 
286*7c478bd9Sstevel@tonic-gate 		/*
287*7c478bd9Sstevel@tonic-gate 		 * signals are deferred, calls to malloc() are safe.
288*7c478bd9Sstevel@tonic-gate 		 */
289*7c478bd9Sstevel@tonic-gate 		if ((tlbs->tl_next = malloc(sizeof (struct _ti_lookbufs)))
290*7c478bd9Sstevel@tonic-gate 			== NULL) {
291*7c478bd9Sstevel@tonic-gate 			sv_errno = errno;
292*7c478bd9Sstevel@tonic-gate 			trace3(TR__t_register_lookevent, 1, dsize, csize);
293*7c478bd9Sstevel@tonic-gate 			errno = sv_errno;
294*7c478bd9Sstevel@tonic-gate 			return (-1); /* error */
295*7c478bd9Sstevel@tonic-gate 		}
296*7c478bd9Sstevel@tonic-gate 		tlbs = tlbs->tl_next;
297*7c478bd9Sstevel@tonic-gate 		/*
298*7c478bd9Sstevel@tonic-gate 		 * Allocate the buffers. The sizes derived from the
299*7c478bd9Sstevel@tonic-gate 		 * sizes of other related buffers. See _t_alloc_bufs()
300*7c478bd9Sstevel@tonic-gate 		 * for details.
301*7c478bd9Sstevel@tonic-gate 		 */
302*7c478bd9Sstevel@tonic-gate 		if ((tlbs->tl_lookcbuf = malloc(cbuf_size)) == NULL) {
303*7c478bd9Sstevel@tonic-gate 			/* giving up - free other memory chunks */
304*7c478bd9Sstevel@tonic-gate 			sv_errno = errno;
305*7c478bd9Sstevel@tonic-gate 			free(tlbs);
306*7c478bd9Sstevel@tonic-gate 			trace3(TR__t_register_lookevent, 1, dsize, csize);
307*7c478bd9Sstevel@tonic-gate 			errno = sv_errno;
308*7c478bd9Sstevel@tonic-gate 			return (-1); /* error */
309*7c478bd9Sstevel@tonic-gate 		}
310*7c478bd9Sstevel@tonic-gate 		if ((dsize > 0) &&
311*7c478bd9Sstevel@tonic-gate 		    ((tlbs->tl_lookdbuf = malloc(dbuf_size)) == NULL)) {
312*7c478bd9Sstevel@tonic-gate 			/* giving up - free other memory chunks */
313*7c478bd9Sstevel@tonic-gate 			sv_errno = errno;
314*7c478bd9Sstevel@tonic-gate 			free(tlbs->tl_lookcbuf);
315*7c478bd9Sstevel@tonic-gate 			free(tlbs);
316*7c478bd9Sstevel@tonic-gate 			trace3(TR__t_register_lookevent, 1, dsize, csize);
317*7c478bd9Sstevel@tonic-gate 			errno = sv_errno;
318*7c478bd9Sstevel@tonic-gate 			return (-1); /* error */
319*7c478bd9Sstevel@tonic-gate 		}
320*7c478bd9Sstevel@tonic-gate 	}
321*7c478bd9Sstevel@tonic-gate 
322*7c478bd9Sstevel@tonic-gate 	(void) memcpy(tlbs->tl_lookcbuf, cptr, csize);
323*7c478bd9Sstevel@tonic-gate 	if (dsize > 0)
324*7c478bd9Sstevel@tonic-gate 		(void) memcpy(tlbs->tl_lookdbuf, dptr, dsize);
325*7c478bd9Sstevel@tonic-gate 	tlbs->tl_lookdlen = dsize;
326*7c478bd9Sstevel@tonic-gate 	tlbs->tl_lookclen = csize;
327*7c478bd9Sstevel@tonic-gate 	tlbs->tl_next = NULL;
328*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookcnt++;
329*7c478bd9Sstevel@tonic-gate 	trace3(TR__t_register_lookevent, 1, dsize, csize);
330*7c478bd9Sstevel@tonic-gate 	return (0);		/* ok return */
331*7c478bd9Sstevel@tonic-gate }
332*7c478bd9Sstevel@tonic-gate 
333*7c478bd9Sstevel@tonic-gate /*
334*7c478bd9Sstevel@tonic-gate  * Is there something that needs attention?
335*7c478bd9Sstevel@tonic-gate  * Assumes tiptr->ti_lock held and this threads signals blocked
336*7c478bd9Sstevel@tonic-gate  * in MT case.
337*7c478bd9Sstevel@tonic-gate  */
338*7c478bd9Sstevel@tonic-gate int
339*7c478bd9Sstevel@tonic-gate _t_is_event(int fd, struct _ti_user *tiptr)
340*7c478bd9Sstevel@tonic-gate {
341*7c478bd9Sstevel@tonic-gate 	int size, retval;
342*7c478bd9Sstevel@tonic-gate 
343*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_is_event, 0, fd);
344*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&tiptr->ti_lock));
345*7c478bd9Sstevel@tonic-gate 	if ((retval = _ioctl(fd, I_NREAD, &size)) < 0) {
346*7c478bd9Sstevel@tonic-gate 		int sv_errno = errno;
347*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
348*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_is_event, 1, fd);
349*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
350*7c478bd9Sstevel@tonic-gate 		return (-1);
351*7c478bd9Sstevel@tonic-gate 	}
352*7c478bd9Sstevel@tonic-gate 
353*7c478bd9Sstevel@tonic-gate 	if ((retval > 0) || (tiptr->ti_lookcnt > 0)) {
354*7c478bd9Sstevel@tonic-gate 		t_errno = TLOOK;
355*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_is_event, 1, fd);
356*7c478bd9Sstevel@tonic-gate 		return (-1);
357*7c478bd9Sstevel@tonic-gate 	}
358*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_is_event, 1, fd);
359*7c478bd9Sstevel@tonic-gate 	return (0);
360*7c478bd9Sstevel@tonic-gate }
361*7c478bd9Sstevel@tonic-gate 
362*7c478bd9Sstevel@tonic-gate /*
363*7c478bd9Sstevel@tonic-gate  * wait for T_OK_ACK
364*7c478bd9Sstevel@tonic-gate  * assumes tiptr->ti_lock held in MT case
365*7c478bd9Sstevel@tonic-gate  */
366*7c478bd9Sstevel@tonic-gate int
367*7c478bd9Sstevel@tonic-gate _t_is_ok(int fd, struct _ti_user *tiptr, t_scalar_t type)
368*7c478bd9Sstevel@tonic-gate {
369*7c478bd9Sstevel@tonic-gate 	struct strbuf ctlbuf;
370*7c478bd9Sstevel@tonic-gate 	struct strbuf databuf;
371*7c478bd9Sstevel@tonic-gate 	union T_primitives *pptr;
372*7c478bd9Sstevel@tonic-gate 	int retval, cntlflag;
373*7c478bd9Sstevel@tonic-gate 	int size;
374*7c478bd9Sstevel@tonic-gate 	int sv_errno;
375*7c478bd9Sstevel@tonic-gate 	int didalloc, didralloc;
376*7c478bd9Sstevel@tonic-gate 	int flags = 0;
377*7c478bd9Sstevel@tonic-gate 
378*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_is_ok, 0, fd);
379*7c478bd9Sstevel@tonic-gate 
380*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&tiptr->ti_lock));
381*7c478bd9Sstevel@tonic-gate 	/*
382*7c478bd9Sstevel@tonic-gate 	 * Acquire ctlbuf for use in sending/receiving control part
383*7c478bd9Sstevel@tonic-gate 	 * of the message.
384*7c478bd9Sstevel@tonic-gate 	 */
385*7c478bd9Sstevel@tonic-gate 	if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
386*7c478bd9Sstevel@tonic-gate 		sv_errno = errno;
387*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_is_ok, 1, fd);
388*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
389*7c478bd9Sstevel@tonic-gate 		return (-1);
390*7c478bd9Sstevel@tonic-gate 	}
391*7c478bd9Sstevel@tonic-gate 	/*
392*7c478bd9Sstevel@tonic-gate 	 * Acquire databuf for use in sending/receiving data part
393*7c478bd9Sstevel@tonic-gate 	 */
394*7c478bd9Sstevel@tonic-gate 	if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) {
395*7c478bd9Sstevel@tonic-gate 		sv_errno = errno;
396*7c478bd9Sstevel@tonic-gate 		if (didalloc)
397*7c478bd9Sstevel@tonic-gate 			free(ctlbuf.buf);
398*7c478bd9Sstevel@tonic-gate 		else
399*7c478bd9Sstevel@tonic-gate 			tiptr->ti_ctlbuf = ctlbuf.buf;
400*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_is_ok, 1, fd);
401*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
402*7c478bd9Sstevel@tonic-gate 		return (-1);
403*7c478bd9Sstevel@tonic-gate 	}
404*7c478bd9Sstevel@tonic-gate 
405*7c478bd9Sstevel@tonic-gate 	/*
406*7c478bd9Sstevel@tonic-gate 	 * Temporarily convert a non blocking endpoint to a
407*7c478bd9Sstevel@tonic-gate 	 * blocking one and restore status later
408*7c478bd9Sstevel@tonic-gate 	 */
409*7c478bd9Sstevel@tonic-gate 	cntlflag = _fcntl(fd, F_GETFL, 0);
410*7c478bd9Sstevel@tonic-gate 	if (cntlflag & (O_NDELAY | O_NONBLOCK))
411*7c478bd9Sstevel@tonic-gate 		(void) _fcntl(fd, F_SETFL, cntlflag & ~(O_NDELAY | O_NONBLOCK));
412*7c478bd9Sstevel@tonic-gate 
413*7c478bd9Sstevel@tonic-gate 	flags = RS_HIPRI;
414*7c478bd9Sstevel@tonic-gate 
415*7c478bd9Sstevel@tonic-gate 	while ((retval = getmsg(fd, &ctlbuf, &databuf, &flags)) < 0) {
416*7c478bd9Sstevel@tonic-gate 		if (errno == EINTR)
417*7c478bd9Sstevel@tonic-gate 			continue;
418*7c478bd9Sstevel@tonic-gate 		if (cntlflag & (O_NDELAY | O_NONBLOCK))
419*7c478bd9Sstevel@tonic-gate 			(void) _fcntl(fd, F_SETFL, cntlflag);
420*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
421*7c478bd9Sstevel@tonic-gate 		goto err_out;
422*7c478bd9Sstevel@tonic-gate 	}
423*7c478bd9Sstevel@tonic-gate 
424*7c478bd9Sstevel@tonic-gate 	/* did I get entire message */
425*7c478bd9Sstevel@tonic-gate 	if (retval > 0) {
426*7c478bd9Sstevel@tonic-gate 		if (cntlflag & (O_NDELAY | O_NONBLOCK))
427*7c478bd9Sstevel@tonic-gate 			(void) _fcntl(fd, F_SETFL, cntlflag);
428*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
429*7c478bd9Sstevel@tonic-gate 		errno = EIO;
430*7c478bd9Sstevel@tonic-gate 		goto err_out;
431*7c478bd9Sstevel@tonic-gate 	}
432*7c478bd9Sstevel@tonic-gate 
433*7c478bd9Sstevel@tonic-gate 	/*
434*7c478bd9Sstevel@tonic-gate 	 * is ctl part large enough to determine type?
435*7c478bd9Sstevel@tonic-gate 	 */
436*7c478bd9Sstevel@tonic-gate 	if (ctlbuf.len < (int)sizeof (t_scalar_t)) {
437*7c478bd9Sstevel@tonic-gate 		if (cntlflag & (O_NDELAY | O_NONBLOCK))
438*7c478bd9Sstevel@tonic-gate 			(void) _fcntl(fd, F_SETFL, cntlflag);
439*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
440*7c478bd9Sstevel@tonic-gate 		errno = EPROTO;
441*7c478bd9Sstevel@tonic-gate 		goto err_out;
442*7c478bd9Sstevel@tonic-gate 	}
443*7c478bd9Sstevel@tonic-gate 
444*7c478bd9Sstevel@tonic-gate 	if (cntlflag & (O_NDELAY | O_NONBLOCK))
445*7c478bd9Sstevel@tonic-gate 		(void) _fcntl(fd, F_SETFL, cntlflag);
446*7c478bd9Sstevel@tonic-gate 
447*7c478bd9Sstevel@tonic-gate 	pptr = (union T_primitives *)ctlbuf.buf;
448*7c478bd9Sstevel@tonic-gate 
449*7c478bd9Sstevel@tonic-gate 	switch (pptr->type) {
450*7c478bd9Sstevel@tonic-gate 	case T_OK_ACK:
451*7c478bd9Sstevel@tonic-gate 		if ((ctlbuf.len < (int)sizeof (struct T_ok_ack)) ||
452*7c478bd9Sstevel@tonic-gate 		    (pptr->ok_ack.CORRECT_prim != type)) {
453*7c478bd9Sstevel@tonic-gate 			t_errno = TSYSERR;
454*7c478bd9Sstevel@tonic-gate 			errno = EPROTO;
455*7c478bd9Sstevel@tonic-gate 			goto err_out;
456*7c478bd9Sstevel@tonic-gate 		}
457*7c478bd9Sstevel@tonic-gate 		if (didalloc)
458*7c478bd9Sstevel@tonic-gate 			free(ctlbuf.buf);
459*7c478bd9Sstevel@tonic-gate 		else
460*7c478bd9Sstevel@tonic-gate 			tiptr->ti_ctlbuf = ctlbuf.buf;
461*7c478bd9Sstevel@tonic-gate 		if (didralloc)
462*7c478bd9Sstevel@tonic-gate 			free(databuf.buf);
463*7c478bd9Sstevel@tonic-gate 		else
464*7c478bd9Sstevel@tonic-gate 			tiptr->ti_rcvbuf = databuf.buf;
465*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_is_ok, 1, fd);
466*7c478bd9Sstevel@tonic-gate 		return (0);
467*7c478bd9Sstevel@tonic-gate 
468*7c478bd9Sstevel@tonic-gate 	case T_ERROR_ACK:
469*7c478bd9Sstevel@tonic-gate 		if ((ctlbuf.len < (int)sizeof (struct T_error_ack)) ||
470*7c478bd9Sstevel@tonic-gate 		    (pptr->error_ack.ERROR_prim != type)) {
471*7c478bd9Sstevel@tonic-gate 			t_errno = TSYSERR;
472*7c478bd9Sstevel@tonic-gate 			errno = EPROTO;
473*7c478bd9Sstevel@tonic-gate 			goto err_out;
474*7c478bd9Sstevel@tonic-gate 		}
475*7c478bd9Sstevel@tonic-gate 		/*
476*7c478bd9Sstevel@tonic-gate 		 * if error is out of state and there is something
477*7c478bd9Sstevel@tonic-gate 		 * on read queue, then indicate to user that
478*7c478bd9Sstevel@tonic-gate 		 * there is something that needs attention
479*7c478bd9Sstevel@tonic-gate 		 */
480*7c478bd9Sstevel@tonic-gate 		if (pptr->error_ack.TLI_error == TOUTSTATE) {
481*7c478bd9Sstevel@tonic-gate 			if ((retval = _ioctl(fd, I_NREAD, &size)) < 0) {
482*7c478bd9Sstevel@tonic-gate 				t_errno = TSYSERR;
483*7c478bd9Sstevel@tonic-gate 				goto err_out;
484*7c478bd9Sstevel@tonic-gate 			}
485*7c478bd9Sstevel@tonic-gate 			if (retval > 0)
486*7c478bd9Sstevel@tonic-gate 				t_errno = TLOOK;
487*7c478bd9Sstevel@tonic-gate 			else
488*7c478bd9Sstevel@tonic-gate 				t_errno = TOUTSTATE;
489*7c478bd9Sstevel@tonic-gate 		} else {
490*7c478bd9Sstevel@tonic-gate 			t_errno = pptr->error_ack.TLI_error;
491*7c478bd9Sstevel@tonic-gate 			if (t_errno == TSYSERR)
492*7c478bd9Sstevel@tonic-gate 				errno = pptr->error_ack.UNIX_error;
493*7c478bd9Sstevel@tonic-gate 		}
494*7c478bd9Sstevel@tonic-gate 		goto err_out;
495*7c478bd9Sstevel@tonic-gate 	default:
496*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
497*7c478bd9Sstevel@tonic-gate 		errno = EPROTO;
498*7c478bd9Sstevel@tonic-gate 		/* fallthru to err_out: */
499*7c478bd9Sstevel@tonic-gate 	}
500*7c478bd9Sstevel@tonic-gate err_out:
501*7c478bd9Sstevel@tonic-gate 	sv_errno = errno;
502*7c478bd9Sstevel@tonic-gate 	if (didalloc)
503*7c478bd9Sstevel@tonic-gate 		free(ctlbuf.buf);
504*7c478bd9Sstevel@tonic-gate 	else
505*7c478bd9Sstevel@tonic-gate 		tiptr->ti_ctlbuf = ctlbuf.buf;
506*7c478bd9Sstevel@tonic-gate 	if (didralloc)
507*7c478bd9Sstevel@tonic-gate 		free(databuf.buf);
508*7c478bd9Sstevel@tonic-gate 	else
509*7c478bd9Sstevel@tonic-gate 		tiptr->ti_rcvbuf = databuf.buf;
510*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_is_ok, 1, fd);
511*7c478bd9Sstevel@tonic-gate 	errno = sv_errno;
512*7c478bd9Sstevel@tonic-gate 	return (-1);
513*7c478bd9Sstevel@tonic-gate }
514*7c478bd9Sstevel@tonic-gate 
515*7c478bd9Sstevel@tonic-gate /*
516*7c478bd9Sstevel@tonic-gate  * timod ioctl
517*7c478bd9Sstevel@tonic-gate  */
518*7c478bd9Sstevel@tonic-gate int
519*7c478bd9Sstevel@tonic-gate _t_do_ioctl(int fd, char *buf, int size, int cmd, int *retlenp)
520*7c478bd9Sstevel@tonic-gate {
521*7c478bd9Sstevel@tonic-gate 	int retval, sv_errno;
522*7c478bd9Sstevel@tonic-gate 	struct strioctl strioc;
523*7c478bd9Sstevel@tonic-gate 
524*7c478bd9Sstevel@tonic-gate 	trace1(TR__t_do_ioctl, 0);
525*7c478bd9Sstevel@tonic-gate 	strioc.ic_cmd = cmd;
526*7c478bd9Sstevel@tonic-gate 	strioc.ic_timout = -1;
527*7c478bd9Sstevel@tonic-gate 	strioc.ic_len = size;
528*7c478bd9Sstevel@tonic-gate 	strioc.ic_dp = buf;
529*7c478bd9Sstevel@tonic-gate 
530*7c478bd9Sstevel@tonic-gate 	if ((retval = _ioctl(fd, I_STR, &strioc)) < 0) {
531*7c478bd9Sstevel@tonic-gate 		sv_errno = errno;
532*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
533*7c478bd9Sstevel@tonic-gate 		trace1(TR__t_do_ioctl, 1);
534*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
535*7c478bd9Sstevel@tonic-gate 		return (-1);
536*7c478bd9Sstevel@tonic-gate 	}
537*7c478bd9Sstevel@tonic-gate 
538*7c478bd9Sstevel@tonic-gate 	if (retval > 0) {
539*7c478bd9Sstevel@tonic-gate 		t_errno = retval&0xff;
540*7c478bd9Sstevel@tonic-gate 		trace1(TR__t_do_ioctl, 1);
541*7c478bd9Sstevel@tonic-gate 		if (t_errno == TSYSERR)
542*7c478bd9Sstevel@tonic-gate 			errno = (retval >>  8)&0xff;
543*7c478bd9Sstevel@tonic-gate 		return (-1);
544*7c478bd9Sstevel@tonic-gate 	}
545*7c478bd9Sstevel@tonic-gate 	if (retlenp)
546*7c478bd9Sstevel@tonic-gate 		*retlenp = strioc.ic_len;
547*7c478bd9Sstevel@tonic-gate 	trace1(TR__t_do_ioctl, 1);
548*7c478bd9Sstevel@tonic-gate 	return (0);
549*7c478bd9Sstevel@tonic-gate }
550*7c478bd9Sstevel@tonic-gate 
551*7c478bd9Sstevel@tonic-gate /*
552*7c478bd9Sstevel@tonic-gate  * alloc scratch buffers and look buffers
553*7c478bd9Sstevel@tonic-gate  */
554*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
555*7c478bd9Sstevel@tonic-gate static int
556*7c478bd9Sstevel@tonic-gate _t_alloc_bufs(int fd, struct _ti_user *tiptr, struct T_info_ack *tsap)
557*7c478bd9Sstevel@tonic-gate {
558*7c478bd9Sstevel@tonic-gate 	unsigned int size1, size2;
559*7c478bd9Sstevel@tonic-gate 	t_scalar_t optsize;
560*7c478bd9Sstevel@tonic-gate 	unsigned int csize, dsize, asize, osize;
561*7c478bd9Sstevel@tonic-gate 	char *ctlbuf, *rcvbuf;
562*7c478bd9Sstevel@tonic-gate 	char *lookdbuf, *lookcbuf;
563*7c478bd9Sstevel@tonic-gate 	int sv_errno;
564*7c478bd9Sstevel@tonic-gate 
565*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_alloc_bufs, 0, fd);
566*7c478bd9Sstevel@tonic-gate 	csize = _t_setsize(tsap->CDATA_size);
567*7c478bd9Sstevel@tonic-gate 	dsize = _t_setsize(tsap->DDATA_size);
568*7c478bd9Sstevel@tonic-gate 
569*7c478bd9Sstevel@tonic-gate 	size1 = _T_MAX(csize, dsize);
570*7c478bd9Sstevel@tonic-gate 
571*7c478bd9Sstevel@tonic-gate 	if (size1 != 0) {
572*7c478bd9Sstevel@tonic-gate 		if ((rcvbuf = malloc(size1)) == NULL) {
573*7c478bd9Sstevel@tonic-gate 			sv_errno = errno;
574*7c478bd9Sstevel@tonic-gate 			trace2(TR__t_alloc_bufs, 1, fd);
575*7c478bd9Sstevel@tonic-gate 			errno = sv_errno;
576*7c478bd9Sstevel@tonic-gate 			return (-1);
577*7c478bd9Sstevel@tonic-gate 		}
578*7c478bd9Sstevel@tonic-gate 		if ((lookdbuf = malloc(size1)) == NULL) {
579*7c478bd9Sstevel@tonic-gate 			sv_errno = errno;
580*7c478bd9Sstevel@tonic-gate 			free(rcvbuf);
581*7c478bd9Sstevel@tonic-gate 			trace2(TR__t_alloc_bufs, 1, fd);
582*7c478bd9Sstevel@tonic-gate 			errno = sv_errno;
583*7c478bd9Sstevel@tonic-gate 			return (-1);
584*7c478bd9Sstevel@tonic-gate 		}
585*7c478bd9Sstevel@tonic-gate 	} else {
586*7c478bd9Sstevel@tonic-gate 		rcvbuf = NULL;
587*7c478bd9Sstevel@tonic-gate 		lookdbuf = NULL;
588*7c478bd9Sstevel@tonic-gate 	}
589*7c478bd9Sstevel@tonic-gate 
590*7c478bd9Sstevel@tonic-gate 	asize = _t_setsize(tsap->ADDR_size);
591*7c478bd9Sstevel@tonic-gate 	if (tsap->OPT_size >= 0)
592*7c478bd9Sstevel@tonic-gate 		/* compensate for XTI level options */
593*7c478bd9Sstevel@tonic-gate 		optsize = tsap->OPT_size + TX_XTI_LEVEL_MAX_OPTBUF;
594*7c478bd9Sstevel@tonic-gate 	else
595*7c478bd9Sstevel@tonic-gate 		optsize = tsap->OPT_size;
596*7c478bd9Sstevel@tonic-gate 	osize = _t_setsize(optsize);
597*7c478bd9Sstevel@tonic-gate 
598*7c478bd9Sstevel@tonic-gate 	/*
599*7c478bd9Sstevel@tonic-gate 	 * We compute the largest buffer size needed for this provider by
600*7c478bd9Sstevel@tonic-gate 	 * adding the components. [ An extra sizeof (t_scalar_t) is added to
601*7c478bd9Sstevel@tonic-gate 	 * take care of rounding off for alignment) for each buffer ]
602*7c478bd9Sstevel@tonic-gate 	 * The goal here is compute the size of largest possible buffer that
603*7c478bd9Sstevel@tonic-gate 	 * might be needed to hold a TPI message for the transport provider
604*7c478bd9Sstevel@tonic-gate 	 * on this endpoint.
605*7c478bd9Sstevel@tonic-gate 	 * Note: T_ADDR_ACK contains potentially two address buffers.
606*7c478bd9Sstevel@tonic-gate 	 */
607*7c478bd9Sstevel@tonic-gate 
608*7c478bd9Sstevel@tonic-gate 	size2 = (unsigned int)sizeof (union T_primitives) /* TPI struct */
609*7c478bd9Sstevel@tonic-gate 	    + asize + (unsigned int)sizeof (t_scalar_t) +
610*7c478bd9Sstevel@tonic-gate 		/* first addr buffer plus alignment */
611*7c478bd9Sstevel@tonic-gate 	    asize + (unsigned int)sizeof (t_scalar_t) +
612*7c478bd9Sstevel@tonic-gate 		/* second addr buffer plus ailignment */
613*7c478bd9Sstevel@tonic-gate 	    osize + (unsigned int)sizeof (t_scalar_t);
614*7c478bd9Sstevel@tonic-gate 		/* option buffer plus alignment */
615*7c478bd9Sstevel@tonic-gate 
616*7c478bd9Sstevel@tonic-gate 	if ((ctlbuf = malloc(size2)) == NULL) {
617*7c478bd9Sstevel@tonic-gate 		sv_errno = errno;
618*7c478bd9Sstevel@tonic-gate 		if (size1 != 0) {
619*7c478bd9Sstevel@tonic-gate 			free(rcvbuf);
620*7c478bd9Sstevel@tonic-gate 			free(lookdbuf);
621*7c478bd9Sstevel@tonic-gate 		}
622*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_alloc_bufs, 1, fd);
623*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
624*7c478bd9Sstevel@tonic-gate 		return (-1);
625*7c478bd9Sstevel@tonic-gate 	}
626*7c478bd9Sstevel@tonic-gate 
627*7c478bd9Sstevel@tonic-gate 	if ((lookcbuf = malloc(size2)) == NULL) {
628*7c478bd9Sstevel@tonic-gate 		sv_errno = errno;
629*7c478bd9Sstevel@tonic-gate 		if (size1 != 0) {
630*7c478bd9Sstevel@tonic-gate 			free(rcvbuf);
631*7c478bd9Sstevel@tonic-gate 			free(lookdbuf);
632*7c478bd9Sstevel@tonic-gate 		}
633*7c478bd9Sstevel@tonic-gate 		free(ctlbuf);
634*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_alloc_bufs, 1, fd);
635*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
636*7c478bd9Sstevel@tonic-gate 		return (-1);
637*7c478bd9Sstevel@tonic-gate 	}
638*7c478bd9Sstevel@tonic-gate 
639*7c478bd9Sstevel@tonic-gate 	tiptr->ti_rcvsize = size1;
640*7c478bd9Sstevel@tonic-gate 	tiptr->ti_rcvbuf = rcvbuf;
641*7c478bd9Sstevel@tonic-gate 	tiptr->ti_ctlsize = size2;
642*7c478bd9Sstevel@tonic-gate 	tiptr->ti_ctlbuf = ctlbuf;
643*7c478bd9Sstevel@tonic-gate 
644*7c478bd9Sstevel@tonic-gate 	/*
645*7c478bd9Sstevel@tonic-gate 	 * Note: The head of the lookbuffers list (and associated buffers)
646*7c478bd9Sstevel@tonic-gate 	 * is allocated here on initialization.
647*7c478bd9Sstevel@tonic-gate 	 * More allocated on demand.
648*7c478bd9Sstevel@tonic-gate 	 */
649*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_lookclen = 0;
650*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_lookcbuf = lookcbuf;
651*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_lookdlen = 0;
652*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_lookdbuf = lookdbuf;
653*7c478bd9Sstevel@tonic-gate 
654*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_alloc_bufs, 1, fd);
655*7c478bd9Sstevel@tonic-gate 	return (0);
656*7c478bd9Sstevel@tonic-gate }
657*7c478bd9Sstevel@tonic-gate 
658*7c478bd9Sstevel@tonic-gate 
659*7c478bd9Sstevel@tonic-gate /*
660*7c478bd9Sstevel@tonic-gate  * set sizes of buffers
661*7c478bd9Sstevel@tonic-gate  */
662*7c478bd9Sstevel@tonic-gate static unsigned int
663*7c478bd9Sstevel@tonic-gate _t_setsize(t_scalar_t infosize)
664*7c478bd9Sstevel@tonic-gate {
665*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_setsize, 0, infosize);
666*7c478bd9Sstevel@tonic-gate 	switch (infosize) {
667*7c478bd9Sstevel@tonic-gate 	case T_INFINITE /* -1 */:
668*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_setsize, 1, infosize);
669*7c478bd9Sstevel@tonic-gate 		return (DEFSIZE);
670*7c478bd9Sstevel@tonic-gate 	case T_INVALID /* -2 */:
671*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_setsize, 1, infosize);
672*7c478bd9Sstevel@tonic-gate 		return (0);
673*7c478bd9Sstevel@tonic-gate 	default:
674*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_setsize, 1, infosize);
675*7c478bd9Sstevel@tonic-gate 		return ((unsigned int) infosize);
676*7c478bd9Sstevel@tonic-gate 	}
677*7c478bd9Sstevel@tonic-gate }
678*7c478bd9Sstevel@tonic-gate 
679*7c478bd9Sstevel@tonic-gate static void
680*7c478bd9Sstevel@tonic-gate _t_reinit_tiptr(struct _ti_user *tiptr)
681*7c478bd9Sstevel@tonic-gate {
682*7c478bd9Sstevel@tonic-gate 	/*
683*7c478bd9Sstevel@tonic-gate 	 * Note: This routine is designed for a "reinitialization"
684*7c478bd9Sstevel@tonic-gate 	 * Following fields are not modified here and preserved.
685*7c478bd9Sstevel@tonic-gate 	 *	 - ti_fd field
686*7c478bd9Sstevel@tonic-gate 	 *	 - ti_lock
687*7c478bd9Sstevel@tonic-gate 	 *	 - ti_next
688*7c478bd9Sstevel@tonic-gate 	 *	 - ti_prev
689*7c478bd9Sstevel@tonic-gate 	 * The above fields have to be separately initialized if this
690*7c478bd9Sstevel@tonic-gate 	 * is used for a fresh initialization.
691*7c478bd9Sstevel@tonic-gate 	 */
692*7c478bd9Sstevel@tonic-gate 
693*7c478bd9Sstevel@tonic-gate 	trace1(TR__t_reinit_tiptr, 0);
694*7c478bd9Sstevel@tonic-gate 	tiptr->ti_flags = 0;
695*7c478bd9Sstevel@tonic-gate 	tiptr->ti_rcvsize = 0;
696*7c478bd9Sstevel@tonic-gate 	tiptr->ti_rcvbuf = NULL;
697*7c478bd9Sstevel@tonic-gate 	tiptr->ti_ctlsize = 0;
698*7c478bd9Sstevel@tonic-gate 	tiptr->ti_ctlbuf = NULL;
699*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_lookdbuf = NULL;
700*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_lookcbuf = NULL;
701*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_lookdlen = 0;
702*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_lookclen = 0;
703*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_next = NULL;
704*7c478bd9Sstevel@tonic-gate 	tiptr->ti_maxpsz = 0;
705*7c478bd9Sstevel@tonic-gate 	tiptr->ti_tsdusize = 0;
706*7c478bd9Sstevel@tonic-gate 	tiptr->ti_etsdusize = 0;
707*7c478bd9Sstevel@tonic-gate 	tiptr->ti_cdatasize = 0;
708*7c478bd9Sstevel@tonic-gate 	tiptr->ti_ddatasize = 0;
709*7c478bd9Sstevel@tonic-gate 	tiptr->ti_servtype = 0;
710*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookcnt = 0;
711*7c478bd9Sstevel@tonic-gate 	tiptr->ti_state = 0;
712*7c478bd9Sstevel@tonic-gate 	tiptr->ti_ocnt = 0;
713*7c478bd9Sstevel@tonic-gate 	tiptr->ti_prov_flag = 0;
714*7c478bd9Sstevel@tonic-gate 	tiptr->ti_qlen = 0;
715*7c478bd9Sstevel@tonic-gate 
716*7c478bd9Sstevel@tonic-gate 	trace1(TR__t_reinit_tiptr, 1);
717*7c478bd9Sstevel@tonic-gate }
718*7c478bd9Sstevel@tonic-gate 
719*7c478bd9Sstevel@tonic-gate /*
720*7c478bd9Sstevel@tonic-gate  * Link manipulation routines.
721*7c478bd9Sstevel@tonic-gate  *
722*7c478bd9Sstevel@tonic-gate  * NBUCKETS hash buckets are used to give fast
723*7c478bd9Sstevel@tonic-gate  * access. The number is derived the file descriptor softlimit
724*7c478bd9Sstevel@tonic-gate  * number (64).
725*7c478bd9Sstevel@tonic-gate  */
726*7c478bd9Sstevel@tonic-gate 
727*7c478bd9Sstevel@tonic-gate #define	NBUCKETS	64
728*7c478bd9Sstevel@tonic-gate static struct _ti_user		*hash_bucket[NBUCKETS];
729*7c478bd9Sstevel@tonic-gate 
730*7c478bd9Sstevel@tonic-gate /*
731*7c478bd9Sstevel@tonic-gate  * Allocates a new link and returns a pointer to it.
732*7c478bd9Sstevel@tonic-gate  * Assumes that the caller is holding _ti_userlock via sig_mutex_lock(),
733*7c478bd9Sstevel@tonic-gate  * so signals are deferred here.
734*7c478bd9Sstevel@tonic-gate  */
735*7c478bd9Sstevel@tonic-gate static struct _ti_user *
736*7c478bd9Sstevel@tonic-gate add_tilink(int s)
737*7c478bd9Sstevel@tonic-gate {
738*7c478bd9Sstevel@tonic-gate 	struct _ti_user	*tiptr;
739*7c478bd9Sstevel@tonic-gate 	struct _ti_user	*prevptr;
740*7c478bd9Sstevel@tonic-gate 	struct _ti_user	*curptr;
741*7c478bd9Sstevel@tonic-gate 	int	x;
742*7c478bd9Sstevel@tonic-gate 	struct stat stbuf;
743*7c478bd9Sstevel@tonic-gate 
744*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&_ti_userlock));
745*7c478bd9Sstevel@tonic-gate 
746*7c478bd9Sstevel@tonic-gate 	if (s < 0 || fstat(s, &stbuf) != 0)
747*7c478bd9Sstevel@tonic-gate 		return (NULL);
748*7c478bd9Sstevel@tonic-gate 
749*7c478bd9Sstevel@tonic-gate 	x = s % NBUCKETS;
750*7c478bd9Sstevel@tonic-gate 	if (hash_bucket[x] != NULL) {
751*7c478bd9Sstevel@tonic-gate 		/*
752*7c478bd9Sstevel@tonic-gate 		 * Walk along the bucket looking for
753*7c478bd9Sstevel@tonic-gate 		 * duplicate entry or the end.
754*7c478bd9Sstevel@tonic-gate 		 */
755*7c478bd9Sstevel@tonic-gate 		for (curptr = hash_bucket[x]; curptr != NULL;
756*7c478bd9Sstevel@tonic-gate 						curptr = curptr->ti_next) {
757*7c478bd9Sstevel@tonic-gate 			if (curptr->ti_fd == s) {
758*7c478bd9Sstevel@tonic-gate 				/*
759*7c478bd9Sstevel@tonic-gate 				 * This can happen when the user has close(2)'ed
760*7c478bd9Sstevel@tonic-gate 				 * a descriptor and then been allocated it again
761*7c478bd9Sstevel@tonic-gate 				 * via t_open().
762*7c478bd9Sstevel@tonic-gate 				 *
763*7c478bd9Sstevel@tonic-gate 				 * We will re-use the existing _ti_user struct
764*7c478bd9Sstevel@tonic-gate 				 * in this case rather than using the one
765*7c478bd9Sstevel@tonic-gate 				 * we allocated above.  If there are buffers
766*7c478bd9Sstevel@tonic-gate 				 * associated with the existing _ti_user
767*7c478bd9Sstevel@tonic-gate 				 * struct, they may not be the correct size,
768*7c478bd9Sstevel@tonic-gate 				 * so we can not use it.  We free them
769*7c478bd9Sstevel@tonic-gate 				 * here and re-allocate a new ones
770*7c478bd9Sstevel@tonic-gate 				 * later on.
771*7c478bd9Sstevel@tonic-gate 				 */
772*7c478bd9Sstevel@tonic-gate 				if (curptr->ti_rcvbuf != NULL)
773*7c478bd9Sstevel@tonic-gate 					free(curptr->ti_rcvbuf);
774*7c478bd9Sstevel@tonic-gate 				free(curptr->ti_ctlbuf);
775*7c478bd9Sstevel@tonic-gate 				_t_free_lookbufs(curptr);
776*7c478bd9Sstevel@tonic-gate 				_t_reinit_tiptr(curptr);
777*7c478bd9Sstevel@tonic-gate 				curptr->ti_rdev = stbuf.st_rdev;
778*7c478bd9Sstevel@tonic-gate 				curptr->ti_ino = stbuf.st_ino;
779*7c478bd9Sstevel@tonic-gate 				return (curptr);
780*7c478bd9Sstevel@tonic-gate 			}
781*7c478bd9Sstevel@tonic-gate 			prevptr = curptr;
782*7c478bd9Sstevel@tonic-gate 		}
783*7c478bd9Sstevel@tonic-gate 		/*
784*7c478bd9Sstevel@tonic-gate 		 * Allocate and link in a new one.
785*7c478bd9Sstevel@tonic-gate 		 */
786*7c478bd9Sstevel@tonic-gate 		if ((tiptr = (struct _ti_user *)malloc(sizeof (*tiptr)))
787*7c478bd9Sstevel@tonic-gate 		    == NULL)
788*7c478bd9Sstevel@tonic-gate 			return (NULL);
789*7c478bd9Sstevel@tonic-gate 		/*
790*7c478bd9Sstevel@tonic-gate 		 * First initialize fields common with reinitialization and
791*7c478bd9Sstevel@tonic-gate 		 * then other fields too
792*7c478bd9Sstevel@tonic-gate 		 */
793*7c478bd9Sstevel@tonic-gate 		_t_reinit_tiptr(tiptr);
794*7c478bd9Sstevel@tonic-gate 		prevptr->ti_next = tiptr;
795*7c478bd9Sstevel@tonic-gate 		tiptr->ti_prev = prevptr;
796*7c478bd9Sstevel@tonic-gate 	} else {
797*7c478bd9Sstevel@tonic-gate 		/*
798*7c478bd9Sstevel@tonic-gate 		 * First entry.
799*7c478bd9Sstevel@tonic-gate 		 */
800*7c478bd9Sstevel@tonic-gate 		if ((tiptr = (struct _ti_user *)malloc(sizeof (*tiptr)))
801*7c478bd9Sstevel@tonic-gate 		    == NULL)
802*7c478bd9Sstevel@tonic-gate 			return (NULL);
803*7c478bd9Sstevel@tonic-gate 		_t_reinit_tiptr(tiptr);
804*7c478bd9Sstevel@tonic-gate 		hash_bucket[x] = tiptr;
805*7c478bd9Sstevel@tonic-gate 		tiptr->ti_prev = NULL;
806*7c478bd9Sstevel@tonic-gate 	}
807*7c478bd9Sstevel@tonic-gate 	tiptr->ti_next = NULL;
808*7c478bd9Sstevel@tonic-gate 	tiptr->ti_fd = s;
809*7c478bd9Sstevel@tonic-gate 	tiptr->ti_rdev = stbuf.st_rdev;
810*7c478bd9Sstevel@tonic-gate 	tiptr->ti_ino = stbuf.st_ino;
811*7c478bd9Sstevel@tonic-gate 	mutex_init(&tiptr->ti_lock, USYNC_THREAD, NULL);
812*7c478bd9Sstevel@tonic-gate 	return (tiptr);
813*7c478bd9Sstevel@tonic-gate }
814*7c478bd9Sstevel@tonic-gate 
815*7c478bd9Sstevel@tonic-gate /*
816*7c478bd9Sstevel@tonic-gate  * Find a link by descriptor
817*7c478bd9Sstevel@tonic-gate  * Assumes that the caller is holding _ti_userlock.
818*7c478bd9Sstevel@tonic-gate  */
819*7c478bd9Sstevel@tonic-gate static struct _ti_user *
820*7c478bd9Sstevel@tonic-gate find_tilink(int s)
821*7c478bd9Sstevel@tonic-gate {
822*7c478bd9Sstevel@tonic-gate 	struct _ti_user	*curptr;
823*7c478bd9Sstevel@tonic-gate 	int	x;
824*7c478bd9Sstevel@tonic-gate 	struct stat stbuf;
825*7c478bd9Sstevel@tonic-gate 
826*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&_ti_userlock));
827*7c478bd9Sstevel@tonic-gate 
828*7c478bd9Sstevel@tonic-gate 	if (s < 0 || fstat(s, &stbuf) != 0)
829*7c478bd9Sstevel@tonic-gate 		return (NULL);
830*7c478bd9Sstevel@tonic-gate 
831*7c478bd9Sstevel@tonic-gate 	x = s % NBUCKETS;
832*7c478bd9Sstevel@tonic-gate 	/*
833*7c478bd9Sstevel@tonic-gate 	 * Walk along the bucket looking for the descriptor.
834*7c478bd9Sstevel@tonic-gate 	 */
835*7c478bd9Sstevel@tonic-gate 	for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
836*7c478bd9Sstevel@tonic-gate 		if (curptr->ti_fd == s) {
837*7c478bd9Sstevel@tonic-gate 			if (curptr->ti_rdev == stbuf.st_rdev &&
838*7c478bd9Sstevel@tonic-gate 			    curptr->ti_ino == stbuf.st_ino)
839*7c478bd9Sstevel@tonic-gate 				return (curptr);
840*7c478bd9Sstevel@tonic-gate 			(void) _t_delete_tilink(s);
841*7c478bd9Sstevel@tonic-gate 		}
842*7c478bd9Sstevel@tonic-gate 	}
843*7c478bd9Sstevel@tonic-gate 	return (NULL);
844*7c478bd9Sstevel@tonic-gate }
845*7c478bd9Sstevel@tonic-gate 
846*7c478bd9Sstevel@tonic-gate /*
847*7c478bd9Sstevel@tonic-gate  * Assumes that the caller is holding _ti_userlock.
848*7c478bd9Sstevel@tonic-gate  * Also assumes that all signals are blocked.
849*7c478bd9Sstevel@tonic-gate  */
850*7c478bd9Sstevel@tonic-gate int
851*7c478bd9Sstevel@tonic-gate _t_delete_tilink(int s)
852*7c478bd9Sstevel@tonic-gate {
853*7c478bd9Sstevel@tonic-gate 	struct _ti_user	*curptr;
854*7c478bd9Sstevel@tonic-gate 	int	x;
855*7c478bd9Sstevel@tonic-gate 
856*7c478bd9Sstevel@tonic-gate 	/*
857*7c478bd9Sstevel@tonic-gate 	 * Find the link.
858*7c478bd9Sstevel@tonic-gate 	 */
859*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&_ti_userlock));
860*7c478bd9Sstevel@tonic-gate 	if (s < 0)
861*7c478bd9Sstevel@tonic-gate 		return (-1);
862*7c478bd9Sstevel@tonic-gate 	x = s % NBUCKETS;
863*7c478bd9Sstevel@tonic-gate 	/*
864*7c478bd9Sstevel@tonic-gate 	 * Walk along the bucket looking for
865*7c478bd9Sstevel@tonic-gate 	 * the descriptor.
866*7c478bd9Sstevel@tonic-gate 	 */
867*7c478bd9Sstevel@tonic-gate 	for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
868*7c478bd9Sstevel@tonic-gate 		if (curptr->ti_fd == s) {
869*7c478bd9Sstevel@tonic-gate 			struct _ti_user	*nextptr;
870*7c478bd9Sstevel@tonic-gate 			struct _ti_user	*prevptr;
871*7c478bd9Sstevel@tonic-gate 
872*7c478bd9Sstevel@tonic-gate 			nextptr = curptr->ti_next;
873*7c478bd9Sstevel@tonic-gate 			prevptr = curptr->ti_prev;
874*7c478bd9Sstevel@tonic-gate 			if (prevptr)
875*7c478bd9Sstevel@tonic-gate 				prevptr->ti_next = nextptr;
876*7c478bd9Sstevel@tonic-gate 			else
877*7c478bd9Sstevel@tonic-gate 				hash_bucket[x] = nextptr;
878*7c478bd9Sstevel@tonic-gate 			if (nextptr)
879*7c478bd9Sstevel@tonic-gate 				nextptr->ti_prev = prevptr;
880*7c478bd9Sstevel@tonic-gate 
881*7c478bd9Sstevel@tonic-gate 			/*
882*7c478bd9Sstevel@tonic-gate 			 * free resource associated with the curptr
883*7c478bd9Sstevel@tonic-gate 			 */
884*7c478bd9Sstevel@tonic-gate 			if (curptr->ti_rcvbuf != NULL)
885*7c478bd9Sstevel@tonic-gate 				free(curptr->ti_rcvbuf);
886*7c478bd9Sstevel@tonic-gate 			free(curptr->ti_ctlbuf);
887*7c478bd9Sstevel@tonic-gate 			_t_free_lookbufs(curptr);
888*7c478bd9Sstevel@tonic-gate 			mutex_destroy(&curptr->ti_lock);
889*7c478bd9Sstevel@tonic-gate 			free(curptr);
890*7c478bd9Sstevel@tonic-gate 			return (0);
891*7c478bd9Sstevel@tonic-gate 		}
892*7c478bd9Sstevel@tonic-gate 	}
893*7c478bd9Sstevel@tonic-gate 	return (-1);
894*7c478bd9Sstevel@tonic-gate }
895*7c478bd9Sstevel@tonic-gate 
896*7c478bd9Sstevel@tonic-gate /*
897*7c478bd9Sstevel@tonic-gate  * Allocate a TLI state structure and synch it with the kernel
898*7c478bd9Sstevel@tonic-gate  * *tiptr is returned
899*7c478bd9Sstevel@tonic-gate  * Assumes that the caller is holding the _ti_userlock and has blocked signals.
900*7c478bd9Sstevel@tonic-gate  *
901*7c478bd9Sstevel@tonic-gate  * This function may fail the first time it is called with given transport if it
902*7c478bd9Sstevel@tonic-gate  * doesn't support T_CAPABILITY_REQ TPI message.
903*7c478bd9Sstevel@tonic-gate  */
904*7c478bd9Sstevel@tonic-gate struct _ti_user *
905*7c478bd9Sstevel@tonic-gate _t_create(int fd,  struct t_info *info, int api_semantics, int *t_capreq_failed)
906*7c478bd9Sstevel@tonic-gate {
907*7c478bd9Sstevel@tonic-gate 	/*
908*7c478bd9Sstevel@tonic-gate 	 * Aligned data buffer for ioctl.
909*7c478bd9Sstevel@tonic-gate 	 */
910*7c478bd9Sstevel@tonic-gate 	union {
911*7c478bd9Sstevel@tonic-gate 		struct ti_sync_req ti_req;
912*7c478bd9Sstevel@tonic-gate 		struct ti_sync_ack ti_ack;
913*7c478bd9Sstevel@tonic-gate 		union T_primitives t_prim;
914*7c478bd9Sstevel@tonic-gate 		char pad[128];
915*7c478bd9Sstevel@tonic-gate 	} ioctl_data;
916*7c478bd9Sstevel@tonic-gate 	void *ioctlbuf = &ioctl_data; /* TI_SYNC/GETINFO with room to grow */
917*7c478bd9Sstevel@tonic-gate 			    /* preferred location first local variable */
918*7c478bd9Sstevel@tonic-gate 			    /*  see note below */
919*7c478bd9Sstevel@tonic-gate 	/*
920*7c478bd9Sstevel@tonic-gate 	 * Note: We use "ioctlbuf" allocated on stack above with
921*7c478bd9Sstevel@tonic-gate 	 * room to grow since (struct ti_sync_ack) can grow in size
922*7c478bd9Sstevel@tonic-gate 	 * on future kernels. (We do not use malloc'd "ti_ctlbuf" as that
923*7c478bd9Sstevel@tonic-gate 	 * part of instance structure which may not exist yet)
924*7c478bd9Sstevel@tonic-gate 	 * Its preferred declaration location is first local variable in this
925*7c478bd9Sstevel@tonic-gate 	 * procedure as bugs causing overruns will be detectable on
926*7c478bd9Sstevel@tonic-gate 	 * platforms where procedure calling conventions place return
927*7c478bd9Sstevel@tonic-gate 	 * address on stack (such as x86) instead of causing silent
928*7c478bd9Sstevel@tonic-gate 	 * memory corruption.
929*7c478bd9Sstevel@tonic-gate 	 */
930*7c478bd9Sstevel@tonic-gate 	struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
931*7c478bd9Sstevel@tonic-gate 	struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
932*7c478bd9Sstevel@tonic-gate 	struct T_capability_req *tcrp = (struct T_capability_req *)ioctlbuf;
933*7c478bd9Sstevel@tonic-gate 	struct T_capability_ack *tcap = (struct T_capability_ack *)ioctlbuf;
934*7c478bd9Sstevel@tonic-gate 	struct T_info_ack *tiap = &tcap->INFO_ack;
935*7c478bd9Sstevel@tonic-gate 	struct _ti_user	*ntiptr;
936*7c478bd9Sstevel@tonic-gate 	int expected_acksize;
937*7c478bd9Sstevel@tonic-gate 	int retlen, rstate, sv_errno, rval;
938*7c478bd9Sstevel@tonic-gate 
939*7c478bd9Sstevel@tonic-gate 	trace2(TR__t_create, 0, flags);
940*7c478bd9Sstevel@tonic-gate 
941*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&_ti_userlock));
942*7c478bd9Sstevel@tonic-gate 
943*7c478bd9Sstevel@tonic-gate 	/*
944*7c478bd9Sstevel@tonic-gate 	 * Use ioctl required for sync'ing state with kernel.
945*7c478bd9Sstevel@tonic-gate 	 * We use two ioctls. TI_CAPABILITY is used to get TPI information and
946*7c478bd9Sstevel@tonic-gate 	 * TI_SYNC is used to synchronise state with timod. Statically linked
947*7c478bd9Sstevel@tonic-gate 	 * TLI applications will no longer work on older releases where there
948*7c478bd9Sstevel@tonic-gate 	 * are no TI_SYNC and TI_CAPABILITY.
949*7c478bd9Sstevel@tonic-gate 	 */
950*7c478bd9Sstevel@tonic-gate 
951*7c478bd9Sstevel@tonic-gate 	/*
952*7c478bd9Sstevel@tonic-gate 	 * Request info about transport.
953*7c478bd9Sstevel@tonic-gate 	 * Assumes that TC1_INFO should always be implemented.
954*7c478bd9Sstevel@tonic-gate 	 * For TI_CAPABILITY size argument to ioctl specifies maximum buffer
955*7c478bd9Sstevel@tonic-gate 	 * size.
956*7c478bd9Sstevel@tonic-gate 	 */
957*7c478bd9Sstevel@tonic-gate 	tcrp->PRIM_type = T_CAPABILITY_REQ;
958*7c478bd9Sstevel@tonic-gate 	tcrp->CAP_bits1 = TC1_INFO | TC1_ACCEPTOR_ID;
959*7c478bd9Sstevel@tonic-gate 	rval = _t_do_ioctl(fd, (char *)ioctlbuf,
960*7c478bd9Sstevel@tonic-gate 	    (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
961*7c478bd9Sstevel@tonic-gate 	expected_acksize = (int)sizeof (struct T_capability_ack);
962*7c478bd9Sstevel@tonic-gate 
963*7c478bd9Sstevel@tonic-gate 	if (rval < 0) {
964*7c478bd9Sstevel@tonic-gate 		sv_errno = errno;
965*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_create, 1, flags);
966*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
967*7c478bd9Sstevel@tonic-gate 		/*
968*7c478bd9Sstevel@tonic-gate 		 * TI_CAPABILITY may fail when transport provider doesn't
969*7c478bd9Sstevel@tonic-gate 		 * support T_CAPABILITY_REQ message type. In this case file
970*7c478bd9Sstevel@tonic-gate 		 * descriptor may be unusable (when transport provider sent
971*7c478bd9Sstevel@tonic-gate 		 * M_ERROR in response to T_CAPABILITY_REQ). This should only
972*7c478bd9Sstevel@tonic-gate 		 * happen once during system lifetime for given transport
973*7c478bd9Sstevel@tonic-gate 		 * provider since timod will emulate TI_CAPABILITY after it
974*7c478bd9Sstevel@tonic-gate 		 * detected the failure.
975*7c478bd9Sstevel@tonic-gate 		 */
976*7c478bd9Sstevel@tonic-gate 		if (t_capreq_failed != NULL)
977*7c478bd9Sstevel@tonic-gate 			*t_capreq_failed = 1;
978*7c478bd9Sstevel@tonic-gate 		return (NULL);
979*7c478bd9Sstevel@tonic-gate 	}
980*7c478bd9Sstevel@tonic-gate 
981*7c478bd9Sstevel@tonic-gate 	if (retlen != expected_acksize) {
982*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
983*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_create, 1, flags);
984*7c478bd9Sstevel@tonic-gate 		errno = EIO;
985*7c478bd9Sstevel@tonic-gate 		return (NULL);
986*7c478bd9Sstevel@tonic-gate 	}
987*7c478bd9Sstevel@tonic-gate 
988*7c478bd9Sstevel@tonic-gate 	if ((tcap->CAP_bits1 & TC1_INFO) == 0) {
989*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
990*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_create, 1, flags);
991*7c478bd9Sstevel@tonic-gate 		errno = EPROTO;
992*7c478bd9Sstevel@tonic-gate 		return (NULL);
993*7c478bd9Sstevel@tonic-gate 	}
994*7c478bd9Sstevel@tonic-gate 	if (info != NULL) {
995*7c478bd9Sstevel@tonic-gate 		if (tiap->PRIM_type != T_INFO_ACK) {
996*7c478bd9Sstevel@tonic-gate 			t_errno = TSYSERR;
997*7c478bd9Sstevel@tonic-gate 			trace2(TR__t_create, 1, flags);
998*7c478bd9Sstevel@tonic-gate 			errno = EPROTO;
999*7c478bd9Sstevel@tonic-gate 			return (NULL);
1000*7c478bd9Sstevel@tonic-gate 		}
1001*7c478bd9Sstevel@tonic-gate 		info->addr = tiap->ADDR_size;
1002*7c478bd9Sstevel@tonic-gate 		info->options = tiap->OPT_size;
1003*7c478bd9Sstevel@tonic-gate 		info->tsdu = tiap->TSDU_size;
1004*7c478bd9Sstevel@tonic-gate 		info->etsdu = tiap->ETSDU_size;
1005*7c478bd9Sstevel@tonic-gate 		info->connect = tiap->CDATA_size;
1006*7c478bd9Sstevel@tonic-gate 		info->discon = tiap->DDATA_size;
1007*7c478bd9Sstevel@tonic-gate 		info->servtype = tiap->SERV_type;
1008*7c478bd9Sstevel@tonic-gate 		if (_T_IS_XTI(api_semantics)) {
1009*7c478bd9Sstevel@tonic-gate 			/*
1010*7c478bd9Sstevel@tonic-gate 			 * XTI ONLY - TLI "struct t_info" does not
1011*7c478bd9Sstevel@tonic-gate 			 * have "flags"
1012*7c478bd9Sstevel@tonic-gate 			 */
1013*7c478bd9Sstevel@tonic-gate 			info->flags = 0;
1014*7c478bd9Sstevel@tonic-gate 			if (tiap->PROVIDER_flag & (SENDZERO|OLD_SENDZERO))
1015*7c478bd9Sstevel@tonic-gate 				info->flags |= T_SENDZERO;
1016*7c478bd9Sstevel@tonic-gate 			/*
1017*7c478bd9Sstevel@tonic-gate 			 * Some day there MAY be a NEW bit in T_info_ack
1018*7c478bd9Sstevel@tonic-gate 			 * PROVIDER_flag namespace exposed by TPI header
1019*7c478bd9Sstevel@tonic-gate 			 * <sys/tihdr.h> which will functionally correspond to
1020*7c478bd9Sstevel@tonic-gate 			 * role played by T_ORDRELDATA in info->flags namespace
1021*7c478bd9Sstevel@tonic-gate 			 * When that bit exists, we can add a test to see if
1022*7c478bd9Sstevel@tonic-gate 			 * it is set and set T_ORDRELDATA.
1023*7c478bd9Sstevel@tonic-gate 			 * Note: Currently only mOSI ("minimal OSI") provider
1024*7c478bd9Sstevel@tonic-gate 			 * is specified to use T_ORDRELDATA so probability of
1025*7c478bd9Sstevel@tonic-gate 			 * needing it is minimal.
1026*7c478bd9Sstevel@tonic-gate 			 */
1027*7c478bd9Sstevel@tonic-gate 		}
1028*7c478bd9Sstevel@tonic-gate 	}
1029*7c478bd9Sstevel@tonic-gate 
1030*7c478bd9Sstevel@tonic-gate 	/*
1031*7c478bd9Sstevel@tonic-gate 	 * if first time or no instance (after fork/exec, dup etc,
1032*7c478bd9Sstevel@tonic-gate 	 * then create initialize data structure
1033*7c478bd9Sstevel@tonic-gate 	 * and allocate buffers
1034*7c478bd9Sstevel@tonic-gate 	 */
1035*7c478bd9Sstevel@tonic-gate 	ntiptr = add_tilink(fd);
1036*7c478bd9Sstevel@tonic-gate 	if (ntiptr == NULL) {
1037*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
1038*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_create, 1, flags);
1039*7c478bd9Sstevel@tonic-gate 		errno = ENOMEM;
1040*7c478bd9Sstevel@tonic-gate 		return (NULL);
1041*7c478bd9Sstevel@tonic-gate 	}
1042*7c478bd9Sstevel@tonic-gate 	sig_mutex_lock(&ntiptr->ti_lock);
1043*7c478bd9Sstevel@tonic-gate 
1044*7c478bd9Sstevel@tonic-gate 	/*
1045*7c478bd9Sstevel@tonic-gate 	 * Allocate buffers for the new descriptor
1046*7c478bd9Sstevel@tonic-gate 	 */
1047*7c478bd9Sstevel@tonic-gate 	if (_t_alloc_bufs(fd, ntiptr, tiap) < 0) {
1048*7c478bd9Sstevel@tonic-gate 		sv_errno = errno;
1049*7c478bd9Sstevel@tonic-gate 		(void) _t_delete_tilink(fd);
1050*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
1051*7c478bd9Sstevel@tonic-gate 		sig_mutex_unlock(&ntiptr->ti_lock);
1052*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_create, 1, flags);
1053*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
1054*7c478bd9Sstevel@tonic-gate 		return (NULL);
1055*7c478bd9Sstevel@tonic-gate 	}
1056*7c478bd9Sstevel@tonic-gate 
1057*7c478bd9Sstevel@tonic-gate 	/* Fill instance structure */
1058*7c478bd9Sstevel@tonic-gate 
1059*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_lookcnt = 0;
1060*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_flags = USED;
1061*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_state = T_UNINIT;
1062*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_ocnt = 0;
1063*7c478bd9Sstevel@tonic-gate 
1064*7c478bd9Sstevel@tonic-gate 	assert(tiap->TIDU_size > 0);
1065*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_maxpsz = tiap->TIDU_size;
1066*7c478bd9Sstevel@tonic-gate 	assert(tiap->TSDU_size >= -2);
1067*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_tsdusize = tiap->TSDU_size;
1068*7c478bd9Sstevel@tonic-gate 	assert(tiap->ETSDU_size >= -2);
1069*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_etsdusize = tiap->ETSDU_size;
1070*7c478bd9Sstevel@tonic-gate 	assert(tiap->CDATA_size >= -2);
1071*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_cdatasize = tiap->CDATA_size;
1072*7c478bd9Sstevel@tonic-gate 	assert(tiap->DDATA_size >= -2);
1073*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_ddatasize = tiap->DDATA_size;
1074*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_servtype = tiap->SERV_type;
1075*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_prov_flag = tiap->PROVIDER_flag;
1076*7c478bd9Sstevel@tonic-gate 
1077*7c478bd9Sstevel@tonic-gate 	if ((tcap->CAP_bits1 & TC1_ACCEPTOR_ID) != 0) {
1078*7c478bd9Sstevel@tonic-gate 		ntiptr->acceptor_id = tcap->ACCEPTOR_id;
1079*7c478bd9Sstevel@tonic-gate 		ntiptr->ti_flags |= V_ACCEPTOR_ID;
1080*7c478bd9Sstevel@tonic-gate 	}
1081*7c478bd9Sstevel@tonic-gate 	else
1082*7c478bd9Sstevel@tonic-gate 		ntiptr->ti_flags &= ~V_ACCEPTOR_ID;
1083*7c478bd9Sstevel@tonic-gate 
1084*7c478bd9Sstevel@tonic-gate 	/*
1085*7c478bd9Sstevel@tonic-gate 	 * Restore state from kernel (caveat some heuristics)
1086*7c478bd9Sstevel@tonic-gate 	 */
1087*7c478bd9Sstevel@tonic-gate 	switch (tiap->CURRENT_state) {
1088*7c478bd9Sstevel@tonic-gate 
1089*7c478bd9Sstevel@tonic-gate 	case TS_UNBND:
1090*7c478bd9Sstevel@tonic-gate 		ntiptr->ti_state = T_UNBND;
1091*7c478bd9Sstevel@tonic-gate 		break;
1092*7c478bd9Sstevel@tonic-gate 
1093*7c478bd9Sstevel@tonic-gate 	case TS_IDLE:
1094*7c478bd9Sstevel@tonic-gate 		if ((rstate = _t_adjust_state(fd, T_IDLE)) < 0) {
1095*7c478bd9Sstevel@tonic-gate 			sv_errno = errno;
1096*7c478bd9Sstevel@tonic-gate 			(void) _t_delete_tilink(fd);
1097*7c478bd9Sstevel@tonic-gate 			sig_mutex_unlock(&ntiptr->ti_lock);
1098*7c478bd9Sstevel@tonic-gate 			trace2(TR__t_create, 1, fd);
1099*7c478bd9Sstevel@tonic-gate 			errno = sv_errno;
1100*7c478bd9Sstevel@tonic-gate 			return (NULL);
1101*7c478bd9Sstevel@tonic-gate 		}
1102*7c478bd9Sstevel@tonic-gate 		ntiptr->ti_state = rstate;
1103*7c478bd9Sstevel@tonic-gate 		break;
1104*7c478bd9Sstevel@tonic-gate 
1105*7c478bd9Sstevel@tonic-gate 	case TS_WRES_CIND:
1106*7c478bd9Sstevel@tonic-gate 		ntiptr->ti_state = T_INCON;
1107*7c478bd9Sstevel@tonic-gate 		break;
1108*7c478bd9Sstevel@tonic-gate 
1109*7c478bd9Sstevel@tonic-gate 	case TS_WCON_CREQ:
1110*7c478bd9Sstevel@tonic-gate 		ntiptr->ti_state = T_OUTCON;
1111*7c478bd9Sstevel@tonic-gate 		break;
1112*7c478bd9Sstevel@tonic-gate 
1113*7c478bd9Sstevel@tonic-gate 	case TS_DATA_XFER:
1114*7c478bd9Sstevel@tonic-gate 		if ((rstate = _t_adjust_state(fd, T_DATAXFER)) < 0)  {
1115*7c478bd9Sstevel@tonic-gate 			sv_errno = errno;
1116*7c478bd9Sstevel@tonic-gate 			(void) _t_delete_tilink(fd);
1117*7c478bd9Sstevel@tonic-gate 			sig_mutex_unlock(&ntiptr->ti_lock);
1118*7c478bd9Sstevel@tonic-gate 			trace2(TR__t_create, 1, fd);
1119*7c478bd9Sstevel@tonic-gate 			errno = sv_errno;
1120*7c478bd9Sstevel@tonic-gate 			return (NULL);
1121*7c478bd9Sstevel@tonic-gate 		}
1122*7c478bd9Sstevel@tonic-gate 		ntiptr->ti_state = rstate;
1123*7c478bd9Sstevel@tonic-gate 		break;
1124*7c478bd9Sstevel@tonic-gate 
1125*7c478bd9Sstevel@tonic-gate 	case TS_WIND_ORDREL:
1126*7c478bd9Sstevel@tonic-gate 		ntiptr->ti_state = T_OUTREL;
1127*7c478bd9Sstevel@tonic-gate 		break;
1128*7c478bd9Sstevel@tonic-gate 
1129*7c478bd9Sstevel@tonic-gate 	case TS_WREQ_ORDREL:
1130*7c478bd9Sstevel@tonic-gate 		if ((rstate = _t_adjust_state(fd, T_INREL)) < 0)  {
1131*7c478bd9Sstevel@tonic-gate 			sv_errno = errno;
1132*7c478bd9Sstevel@tonic-gate 			(void) _t_delete_tilink(fd);
1133*7c478bd9Sstevel@tonic-gate 			sig_mutex_unlock(&ntiptr->ti_lock);
1134*7c478bd9Sstevel@tonic-gate 			trace2(TR__t_create, 1, fd);
1135*7c478bd9Sstevel@tonic-gate 			errno = sv_errno;
1136*7c478bd9Sstevel@tonic-gate 			return (NULL);
1137*7c478bd9Sstevel@tonic-gate 		}
1138*7c478bd9Sstevel@tonic-gate 		ntiptr->ti_state = rstate;
1139*7c478bd9Sstevel@tonic-gate 		break;
1140*7c478bd9Sstevel@tonic-gate 	default:
1141*7c478bd9Sstevel@tonic-gate 		t_errno = TSTATECHNG;
1142*7c478bd9Sstevel@tonic-gate 		(void) _t_delete_tilink(fd);
1143*7c478bd9Sstevel@tonic-gate 		sig_mutex_unlock(&ntiptr->ti_lock);
1144*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_create, 1, fd);
1145*7c478bd9Sstevel@tonic-gate 		return (NULL);
1146*7c478bd9Sstevel@tonic-gate 	}
1147*7c478bd9Sstevel@tonic-gate 
1148*7c478bd9Sstevel@tonic-gate 	/*
1149*7c478bd9Sstevel@tonic-gate 	 * Sync information with timod.
1150*7c478bd9Sstevel@tonic-gate 	 */
1151*7c478bd9Sstevel@tonic-gate 	tsrp->tsr_flags = TSRF_QLEN_REQ;
1152*7c478bd9Sstevel@tonic-gate 
1153*7c478bd9Sstevel@tonic-gate 	rval = _t_do_ioctl(fd, ioctlbuf,
1154*7c478bd9Sstevel@tonic-gate 	    (int)sizeof (struct ti_sync_req), TI_SYNC, &retlen);
1155*7c478bd9Sstevel@tonic-gate 	expected_acksize = (int)sizeof (struct ti_sync_ack);
1156*7c478bd9Sstevel@tonic-gate 
1157*7c478bd9Sstevel@tonic-gate 	if (rval < 0) {
1158*7c478bd9Sstevel@tonic-gate 		sv_errno = errno;
1159*7c478bd9Sstevel@tonic-gate 		(void) _t_delete_tilink(fd);
1160*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
1161*7c478bd9Sstevel@tonic-gate 		sig_mutex_unlock(&ntiptr->ti_lock);
1162*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_create, 1, flags);
1163*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
1164*7c478bd9Sstevel@tonic-gate 		return (NULL);
1165*7c478bd9Sstevel@tonic-gate 	}
1166*7c478bd9Sstevel@tonic-gate 
1167*7c478bd9Sstevel@tonic-gate 	/*
1168*7c478bd9Sstevel@tonic-gate 	 * This is a "less than" check as "struct ti_sync_ack" returned by
1169*7c478bd9Sstevel@tonic-gate 	 * TI_SYNC can grow in size in future kernels. If/when a statically
1170*7c478bd9Sstevel@tonic-gate 	 * linked application is run on a future kernel, it should not fail.
1171*7c478bd9Sstevel@tonic-gate 	 */
1172*7c478bd9Sstevel@tonic-gate 	if (retlen < expected_acksize) {
1173*7c478bd9Sstevel@tonic-gate 		sv_errno = errno;
1174*7c478bd9Sstevel@tonic-gate 		(void) _t_delete_tilink(fd);
1175*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
1176*7c478bd9Sstevel@tonic-gate 		sig_mutex_unlock(&ntiptr->ti_lock);
1177*7c478bd9Sstevel@tonic-gate 		trace2(TR__t_create, 1, flags);
1178*7c478bd9Sstevel@tonic-gate 		errno = sv_errno;
1179*7c478bd9Sstevel@tonic-gate 		return (NULL);
1180*7c478bd9Sstevel@tonic-gate 	}
1181*7c478bd9Sstevel@tonic-gate 
1182*7c478bd9Sstevel@tonic-gate 	if (_T_IS_TLI(api_semantics))
1183*7c478bd9Sstevel@tonic-gate 		tsap->tsa_qlen = 0; /* not needed for TLI */
1184*7c478bd9Sstevel@tonic-gate 
1185*7c478bd9Sstevel@tonic-gate 	ntiptr->ti_qlen = tsap->tsa_qlen;
1186*7c478bd9Sstevel@tonic-gate 	sig_mutex_unlock(&ntiptr->ti_lock);
1187*7c478bd9Sstevel@tonic-gate 	return (ntiptr);
1188*7c478bd9Sstevel@tonic-gate }
1189*7c478bd9Sstevel@tonic-gate 
1190*7c478bd9Sstevel@tonic-gate 
1191*7c478bd9Sstevel@tonic-gate static int
1192*7c478bd9Sstevel@tonic-gate _t_adjust_state(int fd, int instate)
1193*7c478bd9Sstevel@tonic-gate {
1194*7c478bd9Sstevel@tonic-gate 	char ctlbuf[sizeof (t_scalar_t)];
1195*7c478bd9Sstevel@tonic-gate 	char databuf[sizeof (int)]; /* size unimportant - anything > 0 */
1196*7c478bd9Sstevel@tonic-gate 	struct strpeek arg;
1197*7c478bd9Sstevel@tonic-gate 	int outstate, retval;
1198*7c478bd9Sstevel@tonic-gate 
1199*7c478bd9Sstevel@tonic-gate 	/*
1200*7c478bd9Sstevel@tonic-gate 	 * Peek at message on stream head (if any)
1201*7c478bd9Sstevel@tonic-gate 	 * and see if it is data
1202*7c478bd9Sstevel@tonic-gate 	 */
1203*7c478bd9Sstevel@tonic-gate 	arg.ctlbuf.buf = ctlbuf;
1204*7c478bd9Sstevel@tonic-gate 	arg.ctlbuf.maxlen = (int)sizeof (ctlbuf);
1205*7c478bd9Sstevel@tonic-gate 	arg.ctlbuf.len = 0;
1206*7c478bd9Sstevel@tonic-gate 
1207*7c478bd9Sstevel@tonic-gate 	arg.databuf.buf = databuf;
1208*7c478bd9Sstevel@tonic-gate 	arg.databuf.maxlen = (int)sizeof (databuf);
1209*7c478bd9Sstevel@tonic-gate 	arg.databuf.len = 0;
1210*7c478bd9Sstevel@tonic-gate 
1211*7c478bd9Sstevel@tonic-gate 	arg.flags = 0;
1212*7c478bd9Sstevel@tonic-gate 
1213*7c478bd9Sstevel@tonic-gate 	if ((retval = _ioctl(fd, I_PEEK, &arg)) < 0)  {
1214*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
1215*7c478bd9Sstevel@tonic-gate 		return (-1);
1216*7c478bd9Sstevel@tonic-gate 	}
1217*7c478bd9Sstevel@tonic-gate 	outstate = instate;
1218*7c478bd9Sstevel@tonic-gate 	/*
1219*7c478bd9Sstevel@tonic-gate 	 * If peek shows something at stream head, then
1220*7c478bd9Sstevel@tonic-gate 	 * Adjust "outstate" based on some heuristics.
1221*7c478bd9Sstevel@tonic-gate 	 */
1222*7c478bd9Sstevel@tonic-gate 	if (retval > 0) {
1223*7c478bd9Sstevel@tonic-gate 		switch (instate) {
1224*7c478bd9Sstevel@tonic-gate 		case T_IDLE:
1225*7c478bd9Sstevel@tonic-gate 			/*
1226*7c478bd9Sstevel@tonic-gate 			 * The following heuristic is to handle data
1227*7c478bd9Sstevel@tonic-gate 			 * ahead of T_DISCON_IND indications that might
1228*7c478bd9Sstevel@tonic-gate 			 * be at the stream head waiting to be
1229*7c478bd9Sstevel@tonic-gate 			 * read (T_DATA_IND or M_DATA)
1230*7c478bd9Sstevel@tonic-gate 			 */
1231*7c478bd9Sstevel@tonic-gate 			if (((arg.ctlbuf.len == 4) &&
1232*7c478bd9Sstevel@tonic-gate 			    ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
1233*7c478bd9Sstevel@tonic-gate 			    ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
1234*7c478bd9Sstevel@tonic-gate 				outstate = T_DATAXFER;
1235*7c478bd9Sstevel@tonic-gate 			}
1236*7c478bd9Sstevel@tonic-gate 			break;
1237*7c478bd9Sstevel@tonic-gate 		case T_DATAXFER:
1238*7c478bd9Sstevel@tonic-gate 			/*
1239*7c478bd9Sstevel@tonic-gate 			 * The following heuristic is to handle
1240*7c478bd9Sstevel@tonic-gate 			 * the case where the connection is established
1241*7c478bd9Sstevel@tonic-gate 			 * and in data transfer state at the provider
1242*7c478bd9Sstevel@tonic-gate 			 * but the T_CONN_CON has not yet been read
1243*7c478bd9Sstevel@tonic-gate 			 * from the stream head.
1244*7c478bd9Sstevel@tonic-gate 			 */
1245*7c478bd9Sstevel@tonic-gate 			if ((arg.ctlbuf.len == 4) &&
1246*7c478bd9Sstevel@tonic-gate 				((*(int32_t *)arg.ctlbuf.buf) == T_CONN_CON))
1247*7c478bd9Sstevel@tonic-gate 				outstate = T_OUTCON;
1248*7c478bd9Sstevel@tonic-gate 			break;
1249*7c478bd9Sstevel@tonic-gate 		case T_INREL:
1250*7c478bd9Sstevel@tonic-gate 			/*
1251*7c478bd9Sstevel@tonic-gate 			 * The following heuristic is to handle data
1252*7c478bd9Sstevel@tonic-gate 			 * ahead of T_ORDREL_IND indications that might
1253*7c478bd9Sstevel@tonic-gate 			 * be at the stream head waiting to be
1254*7c478bd9Sstevel@tonic-gate 			 * read (T_DATA_IND or M_DATA)
1255*7c478bd9Sstevel@tonic-gate 			 */
1256*7c478bd9Sstevel@tonic-gate 			if (((arg.ctlbuf.len == 4) &&
1257*7c478bd9Sstevel@tonic-gate 			    ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
1258*7c478bd9Sstevel@tonic-gate 			    ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
1259*7c478bd9Sstevel@tonic-gate 				outstate = T_DATAXFER;
1260*7c478bd9Sstevel@tonic-gate 			}
1261*7c478bd9Sstevel@tonic-gate 			break;
1262*7c478bd9Sstevel@tonic-gate 		default:
1263*7c478bd9Sstevel@tonic-gate 			break;
1264*7c478bd9Sstevel@tonic-gate 		}
1265*7c478bd9Sstevel@tonic-gate 	}
1266*7c478bd9Sstevel@tonic-gate 	return (outstate);
1267*7c478bd9Sstevel@tonic-gate }
1268*7c478bd9Sstevel@tonic-gate 
1269*7c478bd9Sstevel@tonic-gate /*
1270*7c478bd9Sstevel@tonic-gate  * Assumes caller has blocked signals at least in this thread (for safe
1271*7c478bd9Sstevel@tonic-gate  * malloc/free operations)
1272*7c478bd9Sstevel@tonic-gate  */
1273*7c478bd9Sstevel@tonic-gate static int
1274*7c478bd9Sstevel@tonic-gate _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf)
1275*7c478bd9Sstevel@tonic-gate {
1276*7c478bd9Sstevel@tonic-gate 	unsigned	size2;
1277*7c478bd9Sstevel@tonic-gate 
1278*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&tiptr->ti_lock));
1279*7c478bd9Sstevel@tonic-gate 	size2 = tiptr->ti_ctlsize; /* same size as default ctlbuf */
1280*7c478bd9Sstevel@tonic-gate 
1281*7c478bd9Sstevel@tonic-gate 	if ((*retbuf = malloc(size2)) == NULL) {
1282*7c478bd9Sstevel@tonic-gate 		return (-1);
1283*7c478bd9Sstevel@tonic-gate 	}
1284*7c478bd9Sstevel@tonic-gate 	return (size2);
1285*7c478bd9Sstevel@tonic-gate }
1286*7c478bd9Sstevel@tonic-gate 
1287*7c478bd9Sstevel@tonic-gate 
1288*7c478bd9Sstevel@tonic-gate /*
1289*7c478bd9Sstevel@tonic-gate  * Assumes caller has blocked signals at least in this thread (for safe
1290*7c478bd9Sstevel@tonic-gate  * malloc/free operations)
1291*7c478bd9Sstevel@tonic-gate  */
1292*7c478bd9Sstevel@tonic-gate int
1293*7c478bd9Sstevel@tonic-gate _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf)
1294*7c478bd9Sstevel@tonic-gate {
1295*7c478bd9Sstevel@tonic-gate 	unsigned	size1;
1296*7c478bd9Sstevel@tonic-gate 
1297*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&tiptr->ti_lock));
1298*7c478bd9Sstevel@tonic-gate 	size1 = tiptr->ti_rcvsize; /* same size as default rcvbuf */
1299*7c478bd9Sstevel@tonic-gate 
1300*7c478bd9Sstevel@tonic-gate 	if ((*retbuf = malloc(size1)) == NULL) {
1301*7c478bd9Sstevel@tonic-gate 		return (-1);
1302*7c478bd9Sstevel@tonic-gate 	}
1303*7c478bd9Sstevel@tonic-gate 	return (size1);
1304*7c478bd9Sstevel@tonic-gate }
1305*7c478bd9Sstevel@tonic-gate 
1306*7c478bd9Sstevel@tonic-gate /*
1307*7c478bd9Sstevel@tonic-gate  * Free lookbuffer structures and associated resources
1308*7c478bd9Sstevel@tonic-gate  * Assumes ti_lock held for MT case.
1309*7c478bd9Sstevel@tonic-gate  */
1310*7c478bd9Sstevel@tonic-gate static void
1311*7c478bd9Sstevel@tonic-gate _t_free_lookbufs(struct _ti_user *tiptr)
1312*7c478bd9Sstevel@tonic-gate {
1313*7c478bd9Sstevel@tonic-gate 	struct _ti_lookbufs *tlbs, *prev_tlbs, *head_tlbs;
1314*7c478bd9Sstevel@tonic-gate 
1315*7c478bd9Sstevel@tonic-gate 	/*
1316*7c478bd9Sstevel@tonic-gate 	 * Assertion:
1317*7c478bd9Sstevel@tonic-gate 	 * The structure lock should be held or the global list
1318*7c478bd9Sstevel@tonic-gate 	 * manipulation lock. The assumption is that nothing
1319*7c478bd9Sstevel@tonic-gate 	 * else can access the descriptor since global list manipulation
1320*7c478bd9Sstevel@tonic-gate 	 * lock is held so it is OK to manipulate fields without the
1321*7c478bd9Sstevel@tonic-gate 	 * structure lock
1322*7c478bd9Sstevel@tonic-gate 	 */
1323*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&tiptr->ti_lock) || MUTEX_HELD(&_ti_userlock));
1324*7c478bd9Sstevel@tonic-gate 
1325*7c478bd9Sstevel@tonic-gate 	/*
1326*7c478bd9Sstevel@tonic-gate 	 * Free only the buffers in the first lookbuf
1327*7c478bd9Sstevel@tonic-gate 	 */
1328*7c478bd9Sstevel@tonic-gate 	head_tlbs = &tiptr->ti_lookbufs;
1329*7c478bd9Sstevel@tonic-gate 	if (head_tlbs->tl_lookdbuf != NULL) {
1330*7c478bd9Sstevel@tonic-gate 		free(head_tlbs->tl_lookdbuf);
1331*7c478bd9Sstevel@tonic-gate 		head_tlbs->tl_lookdbuf = NULL;
1332*7c478bd9Sstevel@tonic-gate 	}
1333*7c478bd9Sstevel@tonic-gate 	free(head_tlbs->tl_lookcbuf);
1334*7c478bd9Sstevel@tonic-gate 	head_tlbs->tl_lookcbuf = NULL;
1335*7c478bd9Sstevel@tonic-gate 	/*
1336*7c478bd9Sstevel@tonic-gate 	 * Free the node and the buffers in the rest of the
1337*7c478bd9Sstevel@tonic-gate 	 * list
1338*7c478bd9Sstevel@tonic-gate 	 */
1339*7c478bd9Sstevel@tonic-gate 
1340*7c478bd9Sstevel@tonic-gate 	tlbs = head_tlbs->tl_next;
1341*7c478bd9Sstevel@tonic-gate 	head_tlbs->tl_next = NULL;
1342*7c478bd9Sstevel@tonic-gate 
1343*7c478bd9Sstevel@tonic-gate 	while (tlbs != NULL) {
1344*7c478bd9Sstevel@tonic-gate 		if (tlbs->tl_lookdbuf != NULL)
1345*7c478bd9Sstevel@tonic-gate 			free(tlbs->tl_lookdbuf);
1346*7c478bd9Sstevel@tonic-gate 		free(tlbs->tl_lookcbuf);
1347*7c478bd9Sstevel@tonic-gate 		prev_tlbs = tlbs;
1348*7c478bd9Sstevel@tonic-gate 		tlbs = tlbs->tl_next;
1349*7c478bd9Sstevel@tonic-gate 		free((char *)prev_tlbs);
1350*7c478bd9Sstevel@tonic-gate 	}
1351*7c478bd9Sstevel@tonic-gate }
1352*7c478bd9Sstevel@tonic-gate 
1353*7c478bd9Sstevel@tonic-gate /*
1354*7c478bd9Sstevel@tonic-gate  * Free lookbuffer event list head.
1355*7c478bd9Sstevel@tonic-gate  * Consume current lookbuffer event
1356*7c478bd9Sstevel@tonic-gate  * Assumes ti_lock held for MT case.
1357*7c478bd9Sstevel@tonic-gate  * Note: The head of this list is part of the instance
1358*7c478bd9Sstevel@tonic-gate  * structure so the code is a little unorthodox.
1359*7c478bd9Sstevel@tonic-gate  */
1360*7c478bd9Sstevel@tonic-gate void
1361*7c478bd9Sstevel@tonic-gate _t_free_looklist_head(struct _ti_user *tiptr)
1362*7c478bd9Sstevel@tonic-gate {
1363*7c478bd9Sstevel@tonic-gate 	struct _ti_lookbufs *tlbs, *next_tlbs;
1364*7c478bd9Sstevel@tonic-gate 
1365*7c478bd9Sstevel@tonic-gate 	tlbs = &tiptr->ti_lookbufs;
1366*7c478bd9Sstevel@tonic-gate 
1367*7c478bd9Sstevel@tonic-gate 	if (tlbs->tl_next) {
1368*7c478bd9Sstevel@tonic-gate 		/*
1369*7c478bd9Sstevel@tonic-gate 		 * Free the control and data buffers
1370*7c478bd9Sstevel@tonic-gate 		 */
1371*7c478bd9Sstevel@tonic-gate 		if (tlbs->tl_lookdbuf != NULL)
1372*7c478bd9Sstevel@tonic-gate 			free(tlbs->tl_lookdbuf);
1373*7c478bd9Sstevel@tonic-gate 		free(tlbs->tl_lookcbuf);
1374*7c478bd9Sstevel@tonic-gate 		/*
1375*7c478bd9Sstevel@tonic-gate 		 * Replace with next lookbuf event contents
1376*7c478bd9Sstevel@tonic-gate 		 */
1377*7c478bd9Sstevel@tonic-gate 		next_tlbs = tlbs->tl_next;
1378*7c478bd9Sstevel@tonic-gate 		tlbs->tl_next = next_tlbs->tl_next;
1379*7c478bd9Sstevel@tonic-gate 		tlbs->tl_lookcbuf = next_tlbs->tl_lookcbuf;
1380*7c478bd9Sstevel@tonic-gate 		tlbs->tl_lookclen = next_tlbs->tl_lookclen;
1381*7c478bd9Sstevel@tonic-gate 		tlbs->tl_lookdbuf = next_tlbs->tl_lookdbuf;
1382*7c478bd9Sstevel@tonic-gate 		tlbs->tl_lookdlen = next_tlbs->tl_lookdlen;
1383*7c478bd9Sstevel@tonic-gate 		free(next_tlbs);
1384*7c478bd9Sstevel@tonic-gate 		/*
1385*7c478bd9Sstevel@tonic-gate 		 * Decrement the flag - should never get to zero.
1386*7c478bd9Sstevel@tonic-gate 		 * in this path
1387*7c478bd9Sstevel@tonic-gate 		 */
1388*7c478bd9Sstevel@tonic-gate 		tiptr->ti_lookcnt--;
1389*7c478bd9Sstevel@tonic-gate 		assert(tiptr->ti_lookcnt > 0);
1390*7c478bd9Sstevel@tonic-gate 	} else {
1391*7c478bd9Sstevel@tonic-gate 		/*
1392*7c478bd9Sstevel@tonic-gate 		 * No more look buffer events - just clear the flag
1393*7c478bd9Sstevel@tonic-gate 		 * and leave the buffers alone
1394*7c478bd9Sstevel@tonic-gate 		 */
1395*7c478bd9Sstevel@tonic-gate 		assert(tiptr->ti_lookcnt == 1);
1396*7c478bd9Sstevel@tonic-gate 		tiptr->ti_lookcnt = 0;
1397*7c478bd9Sstevel@tonic-gate 	}
1398*7c478bd9Sstevel@tonic-gate }
1399*7c478bd9Sstevel@tonic-gate 
1400*7c478bd9Sstevel@tonic-gate /*
1401*7c478bd9Sstevel@tonic-gate  * Discard lookbuffer events.
1402*7c478bd9Sstevel@tonic-gate  * Assumes ti_lock held for MT case.
1403*7c478bd9Sstevel@tonic-gate  */
1404*7c478bd9Sstevel@tonic-gate void
1405*7c478bd9Sstevel@tonic-gate _t_flush_lookevents(struct _ti_user *tiptr)
1406*7c478bd9Sstevel@tonic-gate {
1407*7c478bd9Sstevel@tonic-gate 	struct _ti_lookbufs *tlbs, *prev_tlbs;
1408*7c478bd9Sstevel@tonic-gate 
1409*7c478bd9Sstevel@tonic-gate 	/*
1410*7c478bd9Sstevel@tonic-gate 	 * Leave the first nodes buffers alone (i.e. allocated)
1411*7c478bd9Sstevel@tonic-gate 	 * but reset the flag.
1412*7c478bd9Sstevel@tonic-gate 	 */
1413*7c478bd9Sstevel@tonic-gate 	assert(MUTEX_HELD(&tiptr->ti_lock));
1414*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookcnt = 0;
1415*7c478bd9Sstevel@tonic-gate 	/*
1416*7c478bd9Sstevel@tonic-gate 	 * Blow away the rest of the list
1417*7c478bd9Sstevel@tonic-gate 	 */
1418*7c478bd9Sstevel@tonic-gate 	tlbs = tiptr->ti_lookbufs.tl_next;
1419*7c478bd9Sstevel@tonic-gate 	tiptr->ti_lookbufs.tl_next = NULL;
1420*7c478bd9Sstevel@tonic-gate 	while (tlbs != NULL) {
1421*7c478bd9Sstevel@tonic-gate 		if (tlbs->tl_lookdbuf != NULL)
1422*7c478bd9Sstevel@tonic-gate 			free(tlbs->tl_lookdbuf);
1423*7c478bd9Sstevel@tonic-gate 		free(tlbs->tl_lookcbuf);
1424*7c478bd9Sstevel@tonic-gate 		prev_tlbs = tlbs;
1425*7c478bd9Sstevel@tonic-gate 		tlbs = tlbs->tl_next;
1426*7c478bd9Sstevel@tonic-gate 		free((char *)prev_tlbs);
1427*7c478bd9Sstevel@tonic-gate 	}
1428*7c478bd9Sstevel@tonic-gate }
1429*7c478bd9Sstevel@tonic-gate 
1430*7c478bd9Sstevel@tonic-gate 
1431*7c478bd9Sstevel@tonic-gate /*
1432*7c478bd9Sstevel@tonic-gate  * This routine checks if the receive. buffer in the instance structure
1433*7c478bd9Sstevel@tonic-gate  * is available (non-null). If it is, the buffer is acquired and marked busy
1434*7c478bd9Sstevel@tonic-gate  * (null). If it is busy (possible in MT programs), it allocates a new
1435*7c478bd9Sstevel@tonic-gate  * buffer and sets a flag indicating new memory was allocated and the caller
1436*7c478bd9Sstevel@tonic-gate  * has to free it.
1437*7c478bd9Sstevel@tonic-gate  */
1438*7c478bd9Sstevel@tonic-gate int
1439*7c478bd9Sstevel@tonic-gate _t_acquire_ctlbuf(
1440*7c478bd9Sstevel@tonic-gate 	struct _ti_user *tiptr,
1441*7c478bd9Sstevel@tonic-gate 	struct strbuf *ctlbufp,
1442*7c478bd9Sstevel@tonic-gate 	int *didallocp)
1443*7c478bd9Sstevel@tonic-gate {
1444*7c478bd9Sstevel@tonic-gate 	*didallocp = 0;
1445*7c478bd9Sstevel@tonic-gate 
1446*7c478bd9Sstevel@tonic-gate 	ctlbufp->len = 0;
1447*7c478bd9Sstevel@tonic-gate 	if (tiptr->ti_ctlbuf) {
1448*7c478bd9Sstevel@tonic-gate 		ctlbufp->buf = tiptr->ti_ctlbuf;
1449*7c478bd9Sstevel@tonic-gate 		tiptr->ti_ctlbuf = NULL;
1450*7c478bd9Sstevel@tonic-gate 		ctlbufp->maxlen = tiptr->ti_ctlsize;
1451*7c478bd9Sstevel@tonic-gate 	} else {
1452*7c478bd9Sstevel@tonic-gate 		/*
1453*7c478bd9Sstevel@tonic-gate 		 * tiptr->ti_ctlbuf is in use
1454*7c478bd9Sstevel@tonic-gate 		 * allocate new buffer and free after use.
1455*7c478bd9Sstevel@tonic-gate 		 */
1456*7c478bd9Sstevel@tonic-gate 		if ((ctlbufp->maxlen = _t_cbuf_alloc(tiptr,
1457*7c478bd9Sstevel@tonic-gate 						&ctlbufp->buf)) < 0) {
1458*7c478bd9Sstevel@tonic-gate 			t_errno = TSYSERR;
1459*7c478bd9Sstevel@tonic-gate 			return (-1);
1460*7c478bd9Sstevel@tonic-gate 		}
1461*7c478bd9Sstevel@tonic-gate 		*didallocp = 1;
1462*7c478bd9Sstevel@tonic-gate 	}
1463*7c478bd9Sstevel@tonic-gate 	return (0);
1464*7c478bd9Sstevel@tonic-gate }
1465*7c478bd9Sstevel@tonic-gate 
1466*7c478bd9Sstevel@tonic-gate /*
1467*7c478bd9Sstevel@tonic-gate  * This routine checks if the receive buffer in the instance structure
1468*7c478bd9Sstevel@tonic-gate  * is available (non-null). If it is, the buffer is acquired and marked busy
1469*7c478bd9Sstevel@tonic-gate  * (null). If it is busy (possible in MT programs), it allocates a new
1470*7c478bd9Sstevel@tonic-gate  * buffer and sets a flag indicating new memory was allocated and the caller
1471*7c478bd9Sstevel@tonic-gate  * has to free it.
1472*7c478bd9Sstevel@tonic-gate  * Note: The receive buffer pointer can also be null if the transport
1473*7c478bd9Sstevel@tonic-gate  * provider does not support connect/disconnect data, (e.g. TCP) - not
1474*7c478bd9Sstevel@tonic-gate  * just when it is "busy". In that case, ti_rcvsize will be 0 and that is
1475*7c478bd9Sstevel@tonic-gate  * used to instantiate the databuf which points to a null buffer of
1476*7c478bd9Sstevel@tonic-gate  * length 0 which is the right thing to do for that case.
1477*7c478bd9Sstevel@tonic-gate  */
1478*7c478bd9Sstevel@tonic-gate int
1479*7c478bd9Sstevel@tonic-gate _t_acquire_databuf(
1480*7c478bd9Sstevel@tonic-gate 	struct _ti_user *tiptr,
1481*7c478bd9Sstevel@tonic-gate 	struct strbuf *databufp,
1482*7c478bd9Sstevel@tonic-gate 	int *didallocp)
1483*7c478bd9Sstevel@tonic-gate {
1484*7c478bd9Sstevel@tonic-gate 	*didallocp = 0;
1485*7c478bd9Sstevel@tonic-gate 
1486*7c478bd9Sstevel@tonic-gate 	databufp->len = 0;
1487*7c478bd9Sstevel@tonic-gate 	if (tiptr->ti_rcvbuf) {
1488*7c478bd9Sstevel@tonic-gate 		assert(tiptr->ti_rcvsize != 0);
1489*7c478bd9Sstevel@tonic-gate 		databufp->buf = tiptr->ti_rcvbuf;
1490*7c478bd9Sstevel@tonic-gate 		tiptr->ti_rcvbuf = NULL;
1491*7c478bd9Sstevel@tonic-gate 		databufp->maxlen = tiptr->ti_rcvsize;
1492*7c478bd9Sstevel@tonic-gate 	} else if (tiptr->ti_rcvsize == 0) {
1493*7c478bd9Sstevel@tonic-gate 		databufp->buf = NULL;
1494*7c478bd9Sstevel@tonic-gate 		databufp->maxlen = 0;
1495*7c478bd9Sstevel@tonic-gate 	} else {
1496*7c478bd9Sstevel@tonic-gate 		/*
1497*7c478bd9Sstevel@tonic-gate 		 * tiptr->ti_rcvbuf is in use
1498*7c478bd9Sstevel@tonic-gate 		 * allocate new buffer and free after use.
1499*7c478bd9Sstevel@tonic-gate 		 */
1500*7c478bd9Sstevel@tonic-gate 		if ((databufp->maxlen = _t_rbuf_alloc(tiptr,
1501*7c478bd9Sstevel@tonic-gate 						&databufp->buf)) < 0) {
1502*7c478bd9Sstevel@tonic-gate 			t_errno = TSYSERR;
1503*7c478bd9Sstevel@tonic-gate 			return (-1);
1504*7c478bd9Sstevel@tonic-gate 		}
1505*7c478bd9Sstevel@tonic-gate 		*didallocp = 1;
1506*7c478bd9Sstevel@tonic-gate 	}
1507*7c478bd9Sstevel@tonic-gate 	return (0);
1508*7c478bd9Sstevel@tonic-gate }
1509*7c478bd9Sstevel@tonic-gate 
1510*7c478bd9Sstevel@tonic-gate /*
1511*7c478bd9Sstevel@tonic-gate  * This routine requests timod to look for any expedited data
1512*7c478bd9Sstevel@tonic-gate  * queued in the "receive buffers" in the kernel. Used for XTI
1513*7c478bd9Sstevel@tonic-gate  * t_look() semantics for transports that send expedited data
1514*7c478bd9Sstevel@tonic-gate  * data inline (e.g TCP).
1515*7c478bd9Sstevel@tonic-gate  * Returns -1 for failure
1516*7c478bd9Sstevel@tonic-gate  * Returns 0 for success
1517*7c478bd9Sstevel@tonic-gate  * 	On a successful return, the location pointed by "expedited_queuedp"
1518*7c478bd9Sstevel@tonic-gate  * 	contains
1519*7c478bd9Sstevel@tonic-gate  *		0 if no expedited data is found queued in "receive buffers"
1520*7c478bd9Sstevel@tonic-gate  *		1 if expedited data is found queued in "receive buffers"
1521*7c478bd9Sstevel@tonic-gate  */
1522*7c478bd9Sstevel@tonic-gate 
1523*7c478bd9Sstevel@tonic-gate int
1524*7c478bd9Sstevel@tonic-gate _t_expinline_queued(int fd, int *expedited_queuedp)
1525*7c478bd9Sstevel@tonic-gate {
1526*7c478bd9Sstevel@tonic-gate 	union {
1527*7c478bd9Sstevel@tonic-gate 		struct ti_sync_req ti_req;
1528*7c478bd9Sstevel@tonic-gate 		struct ti_sync_ack ti_ack;
1529*7c478bd9Sstevel@tonic-gate 		char pad[128];
1530*7c478bd9Sstevel@tonic-gate 	} ioctl_data;
1531*7c478bd9Sstevel@tonic-gate 	void *ioctlbuf = &ioctl_data; /* for TI_SYNC with room to grow */
1532*7c478bd9Sstevel@tonic-gate 			    /* preferred location first local variable */
1533*7c478bd9Sstevel@tonic-gate 			    /* see note in _t_create above */
1534*7c478bd9Sstevel@tonic-gate 	struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
1535*7c478bd9Sstevel@tonic-gate 	struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
1536*7c478bd9Sstevel@tonic-gate 	int rval, retlen;
1537*7c478bd9Sstevel@tonic-gate 
1538*7c478bd9Sstevel@tonic-gate 	*expedited_queuedp = 0;
1539*7c478bd9Sstevel@tonic-gate 	/* request info on rq expinds  */
1540*7c478bd9Sstevel@tonic-gate 	tsrp->tsr_flags = TSRF_IS_EXP_IN_RCVBUF;
1541*7c478bd9Sstevel@tonic-gate 	do {
1542*7c478bd9Sstevel@tonic-gate 		rval = _t_do_ioctl(fd, ioctlbuf,
1543*7c478bd9Sstevel@tonic-gate 		    (int)sizeof (struct T_info_req), TI_SYNC, &retlen);
1544*7c478bd9Sstevel@tonic-gate 	} while (rval < 0 && errno == EINTR);
1545*7c478bd9Sstevel@tonic-gate 
1546*7c478bd9Sstevel@tonic-gate 	if (rval < 0)
1547*7c478bd9Sstevel@tonic-gate 		return (-1);
1548*7c478bd9Sstevel@tonic-gate 
1549*7c478bd9Sstevel@tonic-gate 	/*
1550*7c478bd9Sstevel@tonic-gate 	 * This is a "less than" check as "struct ti_sync_ack" returned by
1551*7c478bd9Sstevel@tonic-gate 	 * TI_SYNC can grow in size in future kernels. If/when a statically
1552*7c478bd9Sstevel@tonic-gate 	 * linked application is run on a future kernel, it should not fail.
1553*7c478bd9Sstevel@tonic-gate 	 */
1554*7c478bd9Sstevel@tonic-gate 	if (retlen < (int)sizeof (struct ti_sync_ack)) {
1555*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
1556*7c478bd9Sstevel@tonic-gate 		errno = EIO;
1557*7c478bd9Sstevel@tonic-gate 		return (-1);
1558*7c478bd9Sstevel@tonic-gate 	}
1559*7c478bd9Sstevel@tonic-gate 	if (tsap->tsa_flags & TSAF_EXP_QUEUED)
1560*7c478bd9Sstevel@tonic-gate 		*expedited_queuedp = 1;
1561*7c478bd9Sstevel@tonic-gate 	return (0);
1562*7c478bd9Sstevel@tonic-gate }
1563*7c478bd9Sstevel@tonic-gate 
1564*7c478bd9Sstevel@tonic-gate /*
1565*7c478bd9Sstevel@tonic-gate  * Support functions for use by functions that do scatter/gather
1566*7c478bd9Sstevel@tonic-gate  * like t_sndv(), t_rcvv() etc..follow below.
1567*7c478bd9Sstevel@tonic-gate  */
1568*7c478bd9Sstevel@tonic-gate 
1569*7c478bd9Sstevel@tonic-gate /*
1570*7c478bd9Sstevel@tonic-gate  * _t_bytecount_upto_intmax() :
1571*7c478bd9Sstevel@tonic-gate  *	    Sum of the lengths of the individual buffers in
1572*7c478bd9Sstevel@tonic-gate  *	    the t_iovec array. If the sum exceeds INT_MAX
1573*7c478bd9Sstevel@tonic-gate  *	    it is truncated to INT_MAX.
1574*7c478bd9Sstevel@tonic-gate  */
1575*7c478bd9Sstevel@tonic-gate unsigned int
1576*7c478bd9Sstevel@tonic-gate _t_bytecount_upto_intmax(const struct t_iovec *tiov, unsigned int tiovcount)
1577*7c478bd9Sstevel@tonic-gate {
1578*7c478bd9Sstevel@tonic-gate 	size_t nbytes;
1579*7c478bd9Sstevel@tonic-gate 	int i;
1580*7c478bd9Sstevel@tonic-gate 
1581*7c478bd9Sstevel@tonic-gate 	nbytes = 0;
1582*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < tiovcount && nbytes < INT_MAX; i++) {
1583*7c478bd9Sstevel@tonic-gate 		if (tiov[i].iov_len >= INT_MAX) {
1584*7c478bd9Sstevel@tonic-gate 			nbytes = INT_MAX;
1585*7c478bd9Sstevel@tonic-gate 			break;
1586*7c478bd9Sstevel@tonic-gate 		}
1587*7c478bd9Sstevel@tonic-gate 		nbytes += tiov[i].iov_len;
1588*7c478bd9Sstevel@tonic-gate 	}
1589*7c478bd9Sstevel@tonic-gate 
1590*7c478bd9Sstevel@tonic-gate 	if (nbytes > INT_MAX)
1591*7c478bd9Sstevel@tonic-gate 		nbytes = INT_MAX;
1592*7c478bd9Sstevel@tonic-gate 
1593*7c478bd9Sstevel@tonic-gate 	return ((unsigned int)nbytes);
1594*7c478bd9Sstevel@tonic-gate }
1595*7c478bd9Sstevel@tonic-gate 
1596*7c478bd9Sstevel@tonic-gate /*
1597*7c478bd9Sstevel@tonic-gate  * Gather the data in the t_iovec buffers, into a single linear buffer
1598*7c478bd9Sstevel@tonic-gate  * starting at dataptr. Caller must have allocated sufficient space
1599*7c478bd9Sstevel@tonic-gate  * starting at dataptr. The total amount of data that is gathered is
1600*7c478bd9Sstevel@tonic-gate  * limited to INT_MAX. Any remaining data in the t_iovec buffers is
1601*7c478bd9Sstevel@tonic-gate  * not copied.
1602*7c478bd9Sstevel@tonic-gate  */
1603*7c478bd9Sstevel@tonic-gate void
1604*7c478bd9Sstevel@tonic-gate _t_gather(char *dataptr, const struct t_iovec *tiov, unsigned int tiovcount)
1605*7c478bd9Sstevel@tonic-gate {
1606*7c478bd9Sstevel@tonic-gate 	char *curptr;
1607*7c478bd9Sstevel@tonic-gate 	unsigned int cur_count;
1608*7c478bd9Sstevel@tonic-gate 	unsigned int nbytes_remaining;
1609*7c478bd9Sstevel@tonic-gate 	int i;
1610*7c478bd9Sstevel@tonic-gate 
1611*7c478bd9Sstevel@tonic-gate 	curptr = dataptr;
1612*7c478bd9Sstevel@tonic-gate 	cur_count = 0;
1613*7c478bd9Sstevel@tonic-gate 
1614*7c478bd9Sstevel@tonic-gate 	nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
1615*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
1616*7c478bd9Sstevel@tonic-gate 		if (tiov[i].iov_len <= nbytes_remaining)
1617*7c478bd9Sstevel@tonic-gate 			cur_count = (int)tiov[i].iov_len;
1618*7c478bd9Sstevel@tonic-gate 		else
1619*7c478bd9Sstevel@tonic-gate 			cur_count = nbytes_remaining;
1620*7c478bd9Sstevel@tonic-gate 		(void) memcpy(curptr, tiov[i].iov_base, cur_count);
1621*7c478bd9Sstevel@tonic-gate 		curptr += cur_count;
1622*7c478bd9Sstevel@tonic-gate 		nbytes_remaining -= cur_count;
1623*7c478bd9Sstevel@tonic-gate 	}
1624*7c478bd9Sstevel@tonic-gate }
1625*7c478bd9Sstevel@tonic-gate 
1626*7c478bd9Sstevel@tonic-gate /*
1627*7c478bd9Sstevel@tonic-gate  * Scatter the data from the single linear buffer at pdatabuf->buf into
1628*7c478bd9Sstevel@tonic-gate  * the t_iovec buffers.
1629*7c478bd9Sstevel@tonic-gate  */
1630*7c478bd9Sstevel@tonic-gate void
1631*7c478bd9Sstevel@tonic-gate _t_scatter(struct strbuf *pdatabuf, struct t_iovec *tiov, int tiovcount)
1632*7c478bd9Sstevel@tonic-gate {
1633*7c478bd9Sstevel@tonic-gate 	char *curptr;
1634*7c478bd9Sstevel@tonic-gate 	unsigned int nbytes_remaining;
1635*7c478bd9Sstevel@tonic-gate 	unsigned int curlen;
1636*7c478bd9Sstevel@tonic-gate 	int i;
1637*7c478bd9Sstevel@tonic-gate 
1638*7c478bd9Sstevel@tonic-gate 	/*
1639*7c478bd9Sstevel@tonic-gate 	 * There cannot be any uncopied data leftover in pdatabuf
1640*7c478bd9Sstevel@tonic-gate 	 * at the conclusion of this function. (asserted below)
1641*7c478bd9Sstevel@tonic-gate 	 */
1642*7c478bd9Sstevel@tonic-gate 	assert(pdatabuf->len <= _t_bytecount_upto_intmax(tiov, tiovcount));
1643*7c478bd9Sstevel@tonic-gate 	curptr = pdatabuf->buf;
1644*7c478bd9Sstevel@tonic-gate 	nbytes_remaining = pdatabuf->len;
1645*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
1646*7c478bd9Sstevel@tonic-gate 		if (tiov[i].iov_len < nbytes_remaining)
1647*7c478bd9Sstevel@tonic-gate 			curlen = (unsigned int)tiov[i].iov_len;
1648*7c478bd9Sstevel@tonic-gate 		else
1649*7c478bd9Sstevel@tonic-gate 			curlen = nbytes_remaining;
1650*7c478bd9Sstevel@tonic-gate 		(void) memcpy(tiov[i].iov_base, curptr, curlen);
1651*7c478bd9Sstevel@tonic-gate 		curptr += curlen;
1652*7c478bd9Sstevel@tonic-gate 		nbytes_remaining -= curlen;
1653*7c478bd9Sstevel@tonic-gate 	}
1654*7c478bd9Sstevel@tonic-gate }
1655*7c478bd9Sstevel@tonic-gate 
1656*7c478bd9Sstevel@tonic-gate /*
1657*7c478bd9Sstevel@tonic-gate  * Adjust the iovec array, for subsequent use. Examine each element in the
1658*7c478bd9Sstevel@tonic-gate  * iovec array,and zero out the iov_len if the buffer was sent fully.
1659*7c478bd9Sstevel@tonic-gate  * otherwise the buffer was only partially sent, so adjust both iov_len and
1660*7c478bd9Sstevel@tonic-gate  * iov_base.
1661*7c478bd9Sstevel@tonic-gate  *
1662*7c478bd9Sstevel@tonic-gate  */
1663*7c478bd9Sstevel@tonic-gate void
1664*7c478bd9Sstevel@tonic-gate _t_adjust_iov(int bytes_sent, struct iovec *iov, int *iovcountp)
1665*7c478bd9Sstevel@tonic-gate {
1666*7c478bd9Sstevel@tonic-gate 
1667*7c478bd9Sstevel@tonic-gate 	int i;
1668*7c478bd9Sstevel@tonic-gate 
1669*7c478bd9Sstevel@tonic-gate 	for (i = 0; i < *iovcountp && bytes_sent; i++) {
1670*7c478bd9Sstevel@tonic-gate 		if (iov[i].iov_len == 0)
1671*7c478bd9Sstevel@tonic-gate 			continue;
1672*7c478bd9Sstevel@tonic-gate 		if (bytes_sent < iov[i].iov_len)
1673*7c478bd9Sstevel@tonic-gate 			break;
1674*7c478bd9Sstevel@tonic-gate 		else {
1675*7c478bd9Sstevel@tonic-gate 			bytes_sent -= iov[i].iov_len;
1676*7c478bd9Sstevel@tonic-gate 			iov[i].iov_len = 0;
1677*7c478bd9Sstevel@tonic-gate 		}
1678*7c478bd9Sstevel@tonic-gate 	}
1679*7c478bd9Sstevel@tonic-gate 	iov[i].iov_len -= bytes_sent;
1680*7c478bd9Sstevel@tonic-gate 	iov[i].iov_base += bytes_sent;
1681*7c478bd9Sstevel@tonic-gate }
1682*7c478bd9Sstevel@tonic-gate 
1683*7c478bd9Sstevel@tonic-gate /*
1684*7c478bd9Sstevel@tonic-gate  * Copy the t_iovec array to the iovec array while taking care to see
1685*7c478bd9Sstevel@tonic-gate  * that the sum of the buffer lengths in the result is not more than
1686*7c478bd9Sstevel@tonic-gate  * INT_MAX. This function requires that T_IOV_MAX is no larger than
1687*7c478bd9Sstevel@tonic-gate  * IOV_MAX. Otherwise the resulting array is not a suitable input to
1688*7c478bd9Sstevel@tonic-gate  * writev(). If the sum of the lengths in t_iovec is zero, so is the
1689*7c478bd9Sstevel@tonic-gate  * resulting iovec.
1690*7c478bd9Sstevel@tonic-gate  */
1691*7c478bd9Sstevel@tonic-gate void
1692*7c478bd9Sstevel@tonic-gate _t_copy_tiov_to_iov(const struct t_iovec *tiov, int tiovcount,
1693*7c478bd9Sstevel@tonic-gate     struct iovec *iov, int *iovcountp)
1694*7c478bd9Sstevel@tonic-gate {
1695*7c478bd9Sstevel@tonic-gate 	int i;
1696*7c478bd9Sstevel@tonic-gate 	unsigned int nbytes_remaining;
1697*7c478bd9Sstevel@tonic-gate 
1698*7c478bd9Sstevel@tonic-gate 	nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
1699*7c478bd9Sstevel@tonic-gate 	i = 0;
1700*7c478bd9Sstevel@tonic-gate 	do {
1701*7c478bd9Sstevel@tonic-gate 		iov[i].iov_base = tiov[i].iov_base;
1702*7c478bd9Sstevel@tonic-gate 		if (tiov[i].iov_len > nbytes_remaining)
1703*7c478bd9Sstevel@tonic-gate 			iov[i].iov_len = nbytes_remaining;
1704*7c478bd9Sstevel@tonic-gate 		else
1705*7c478bd9Sstevel@tonic-gate 			iov[i].iov_len  = tiov[i].iov_len;
1706*7c478bd9Sstevel@tonic-gate 		nbytes_remaining -= iov[i].iov_len;
1707*7c478bd9Sstevel@tonic-gate 		i++;
1708*7c478bd9Sstevel@tonic-gate 	} while (nbytes_remaining != 0 && i < tiovcount);
1709*7c478bd9Sstevel@tonic-gate 
1710*7c478bd9Sstevel@tonic-gate 	*iovcountp = i;
1711*7c478bd9Sstevel@tonic-gate }
1712*7c478bd9Sstevel@tonic-gate 
1713*7c478bd9Sstevel@tonic-gate /*
1714*7c478bd9Sstevel@tonic-gate  * Routine called after connection establishment on transports where
1715*7c478bd9Sstevel@tonic-gate  * connection establishment changes certain transport attributes such as
1716*7c478bd9Sstevel@tonic-gate  * TIDU_size
1717*7c478bd9Sstevel@tonic-gate  */
1718*7c478bd9Sstevel@tonic-gate int
1719*7c478bd9Sstevel@tonic-gate _t_do_postconn_sync(int fd, struct _ti_user *tiptr)
1720*7c478bd9Sstevel@tonic-gate {
1721*7c478bd9Sstevel@tonic-gate 	union {
1722*7c478bd9Sstevel@tonic-gate 		struct T_capability_req tc_req;
1723*7c478bd9Sstevel@tonic-gate 		struct T_capability_ack tc_ack;
1724*7c478bd9Sstevel@tonic-gate 	} ioctl_data;
1725*7c478bd9Sstevel@tonic-gate 
1726*7c478bd9Sstevel@tonic-gate 	void *ioctlbuf = &ioctl_data;
1727*7c478bd9Sstevel@tonic-gate 	int expected_acksize;
1728*7c478bd9Sstevel@tonic-gate 	int retlen, rval;
1729*7c478bd9Sstevel@tonic-gate 	struct T_capability_req *tc_reqp = (struct T_capability_req *)ioctlbuf;
1730*7c478bd9Sstevel@tonic-gate 	struct T_capability_ack *tc_ackp = (struct T_capability_ack *)ioctlbuf;
1731*7c478bd9Sstevel@tonic-gate 	struct T_info_ack *tiap;
1732*7c478bd9Sstevel@tonic-gate 
1733*7c478bd9Sstevel@tonic-gate 	/*
1734*7c478bd9Sstevel@tonic-gate 	 * This T_CAPABILITY_REQ should not fail, even if it is unsupported
1735*7c478bd9Sstevel@tonic-gate 	 * by the transport provider. timod will emulate it in that case.
1736*7c478bd9Sstevel@tonic-gate 	 */
1737*7c478bd9Sstevel@tonic-gate 	tc_reqp->PRIM_type = T_CAPABILITY_REQ;
1738*7c478bd9Sstevel@tonic-gate 	tc_reqp->CAP_bits1 = TC1_INFO;
1739*7c478bd9Sstevel@tonic-gate 	rval = _t_do_ioctl(fd, (char *)ioctlbuf,
1740*7c478bd9Sstevel@tonic-gate 	    (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
1741*7c478bd9Sstevel@tonic-gate 	expected_acksize = (int)sizeof (struct T_capability_ack);
1742*7c478bd9Sstevel@tonic-gate 
1743*7c478bd9Sstevel@tonic-gate 	if (rval < 0)
1744*7c478bd9Sstevel@tonic-gate 		return (-1);
1745*7c478bd9Sstevel@tonic-gate 
1746*7c478bd9Sstevel@tonic-gate 	/*
1747*7c478bd9Sstevel@tonic-gate 	 * T_capability TPI messages are extensible and can grow in future.
1748*7c478bd9Sstevel@tonic-gate 	 * However timod will take care of returning no more information
1749*7c478bd9Sstevel@tonic-gate 	 * than what was requested, and truncating the "extended"
1750*7c478bd9Sstevel@tonic-gate 	 * information towards the end of the T_capability_ack, if necessary.
1751*7c478bd9Sstevel@tonic-gate 	 */
1752*7c478bd9Sstevel@tonic-gate 	if (retlen != expected_acksize) {
1753*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
1754*7c478bd9Sstevel@tonic-gate 		errno = EIO;
1755*7c478bd9Sstevel@tonic-gate 		return (-1);
1756*7c478bd9Sstevel@tonic-gate 	}
1757*7c478bd9Sstevel@tonic-gate 
1758*7c478bd9Sstevel@tonic-gate 	/*
1759*7c478bd9Sstevel@tonic-gate 	 * The T_info_ack part of the T_capability_ack is guaranteed to be
1760*7c478bd9Sstevel@tonic-gate 	 * present only if the corresponding TC1_INFO bit is set
1761*7c478bd9Sstevel@tonic-gate 	 */
1762*7c478bd9Sstevel@tonic-gate 	if ((tc_ackp->CAP_bits1 & TC1_INFO) == 0) {
1763*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
1764*7c478bd9Sstevel@tonic-gate 		errno = EPROTO;
1765*7c478bd9Sstevel@tonic-gate 		return (-1);
1766*7c478bd9Sstevel@tonic-gate 	}
1767*7c478bd9Sstevel@tonic-gate 
1768*7c478bd9Sstevel@tonic-gate 	tiap = &tc_ackp->INFO_ack;
1769*7c478bd9Sstevel@tonic-gate 	if (tiap->PRIM_type != T_INFO_ACK) {
1770*7c478bd9Sstevel@tonic-gate 		t_errno = TSYSERR;
1771*7c478bd9Sstevel@tonic-gate 		errno = EPROTO;
1772*7c478bd9Sstevel@tonic-gate 		return (-1);
1773*7c478bd9Sstevel@tonic-gate 	}
1774*7c478bd9Sstevel@tonic-gate 
1775*7c478bd9Sstevel@tonic-gate 	/*
1776*7c478bd9Sstevel@tonic-gate 	 * Note: Sync with latest information returned in "struct T_info_ack
1777*7c478bd9Sstevel@tonic-gate 	 * but we deliberately not sync the state here as user level state
1778*7c478bd9Sstevel@tonic-gate 	 * construction here is not required, only update of attributes which
1779*7c478bd9Sstevel@tonic-gate 	 * may have changed because of negotations during connection
1780*7c478bd9Sstevel@tonic-gate 	 * establsihment
1781*7c478bd9Sstevel@tonic-gate 	 */
1782*7c478bd9Sstevel@tonic-gate 	assert(tiap->TIDU_size > 0);
1783*7c478bd9Sstevel@tonic-gate 	tiptr->ti_maxpsz = tiap->TIDU_size;
1784*7c478bd9Sstevel@tonic-gate 	assert(tiap->TSDU_size >= T_INVALID);
1785*7c478bd9Sstevel@tonic-gate 	tiptr->ti_tsdusize = tiap->TSDU_size;
1786*7c478bd9Sstevel@tonic-gate 	assert(tiap->ETSDU_size >= T_INVALID);
1787*7c478bd9Sstevel@tonic-gate 	tiptr->ti_etsdusize = tiap->ETSDU_size;
1788*7c478bd9Sstevel@tonic-gate 	assert(tiap->CDATA_size >= T_INVALID);
1789*7c478bd9Sstevel@tonic-gate 	tiptr->ti_cdatasize = tiap->CDATA_size;
1790*7c478bd9Sstevel@tonic-gate 	assert(tiap->DDATA_size >= T_INVALID);
1791*7c478bd9Sstevel@tonic-gate 	tiptr->ti_ddatasize = tiap->DDATA_size;
1792*7c478bd9Sstevel@tonic-gate 	tiptr->ti_prov_flag = tiap->PROVIDER_flag;
1793*7c478bd9Sstevel@tonic-gate 
1794*7c478bd9Sstevel@tonic-gate 	return (0);
1795*7c478bd9Sstevel@tonic-gate }
1796