xref: /titanic_51/usr/src/lib/libnsl/nsl/t_accept.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
23 /*	  All Rights Reserved  	*/
24 
25 
26 /*
27  * Copyright 1993-2003 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 
31 
32 #pragma ident	"%Z%%M%	%I%	%E% SMI"	/* SVr4.0 1.5.2.1 */
33 
34 #include "mt.h"
35 #include <stdlib.h>
36 #include <rpc/trace.h>
37 #include <errno.h>
38 #include <unistd.h>
39 #include <stropts.h>
40 #include <sys/stream.h>
41 #define	_SUN_TPI_VERSION 2
42 #include <sys/tihdr.h>
43 #include <sys/timod.h>
44 #include <xti.h>
45 #include <signal.h>
46 #include <syslog.h>
47 #include <assert.h>
48 #include "tx.h"
49 
50 int
51 _tx_accept(
52 	int fd,
53 	int resfd,
54 	const struct t_call *call,
55 	int api_semantics
56 )
57 {
58 	struct T_conn_res *cres;
59 	struct strfdinsert strfdinsert;
60 	int size, retval, sv_errno;
61 	struct _ti_user *tiptr;
62 	struct _ti_user *restiptr;
63 	sigset_t mask;
64 	struct strbuf ctlbuf;
65 	int didalloc;
66 	t_scalar_t conn_res_prim;
67 
68 	trace3(TR_t_accept, 0, fd, resfd);
69 	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL) {
70 		sv_errno = errno;
71 		trace3(TR_t_accept, 1, fd, resfd);
72 		errno = sv_errno;
73 		return (-1);
74 	}
75 	if ((restiptr = _t_checkfd(resfd, 0, api_semantics)) == NULL) {
76 		sv_errno = errno;
77 		trace3(TR_t_accept, 1, fd, resfd);
78 		errno = sv_errno;
79 		return (-1);
80 	}
81 
82 	/*
83 	 * We need to block signals to perform the I_FDINSERT operation
84 	 * (sending T_CONN_RES downstream) which is non-idempotent.
85 	 * Note that sig_mutex_lock() only defers signals, it does not
86 	 * block them, so interruptible syscalls could still get EINTR.
87 	 */
88 	(void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
89 	sig_mutex_lock(&tiptr->ti_lock);
90 
91 	if (tiptr->ti_servtype == T_CLTS) {
92 		t_errno = TNOTSUPPORT;
93 		sig_mutex_unlock(&tiptr->ti_lock);
94 		(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
95 		trace3(TR_t_accept, 1, fd, resfd);
96 		return (-1);
97 	}
98 
99 	if (_T_IS_XTI(api_semantics)) {
100 		/*
101 		 * User level state verification only done for XTI
102 		 * because doing for TLI may break existing applications
103 		 *
104 		 * For fd == resfd, state should be T_INCON
105 		 * For fd != resfd,
106 		 *	    fd state should be T_INCON
107 		 *	    resfd state should be T_IDLE (bound endpoint) or
108 		 *	    it can be T_UNBND. The T_UNBND case is not (yet?)
109 		 *	    allowed in the published XTI spec but fixed by the
110 		 *	    corrigenda.
111 		 */
112 		if ((fd == resfd && tiptr->ti_state != T_INCON) ||
113 		    (fd != resfd &&
114 			((tiptr->ti_state != T_INCON) ||
115 		    ! (restiptr->ti_state == T_IDLE ||
116 			restiptr->ti_state == T_UNBND)))) {
117 			t_errno = TOUTSTATE;
118 			sig_mutex_unlock(&tiptr->ti_lock);
119 			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
120 			trace3(TR_t_accept, 1, fd, resfd);
121 			return (-1);
122 		}
123 
124 		/*
125 		 * XTI says:
126 		 * If fd != resfd, and a resfd bound to a protocol address is
127 		 * passed, then it better not have a qlen > 0.
128 		 * That is, an endpoint bound as if it will be a listener
129 		 * cannot be used as an acceptor.
130 		 */
131 		if (fd != resfd && restiptr->ti_state == T_IDLE &&
132 		    restiptr->ti_qlen > 0) {
133 			t_errno = TRESQLEN;
134 			sig_mutex_unlock(&tiptr->ti_lock);
135 			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
136 			trace3(TR_t_accept, 1, fd, resfd);
137 			return (-1);
138 		}
139 
140 		if (fd == resfd && tiptr->ti_ocnt > 1) {
141 			t_errno = TINDOUT;
142 			sig_mutex_unlock(&tiptr->ti_lock);
143 			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
144 			trace3(TR_t_accept, 1, fd, resfd);
145 			return (-1);
146 		}
147 
148 		/*
149 		 * Note: TRESADDR error is specified by XTI. It happens
150 		 * when resfd is bound and fd and resfd are not BOUND to
151 		 * the same protocol address. TCP obviously does allow
152 		 * two endpoints to bind to the same address. Why is the
153 		 * need for this error considering there is an address switch
154 		 * that can be done for the endpoint at accept time ? Go
155 		 * figure and ask the XTI folks.
156 		 * We interpret this to be a transport specific error condition
157 		 * to be be coveyed by the transport provider in T_ERROR_ACK
158 		 * to T_CONN_RES on transports that allow two endpoints to
159 		 * be bound to the same address and have trouble with the
160 		 * idea of accepting connections on a resfd that has a qlen > 0
161 		 */
162 	}
163 
164 	if (fd != resfd) {
165 		if ((retval = _ioctl(resfd, I_NREAD, &size)) < 0) {
166 			sv_errno = errno;
167 
168 			t_errno = TSYSERR;
169 			sig_mutex_unlock(&tiptr->ti_lock);
170 			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
171 			trace3(TR_t_accept, 1, fd, resfd);
172 			errno = sv_errno;
173 			return (-1);
174 		}
175 		if (retval > 0) {
176 			t_errno = TBADF;
177 			sig_mutex_unlock(&tiptr->ti_lock);
178 			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
179 			trace3(TR_t_accept, 1, fd, resfd);
180 			return (-1);
181 		}
182 	}
183 
184 	/*
185 	 * Acquire ctlbuf for use in sending/receiving control part
186 	 * of the message.
187 	 */
188 	if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
189 		sv_errno = errno;
190 		sig_mutex_unlock(&tiptr->ti_lock);
191 		(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
192 		trace3(TR_t_accept, 1, fd, resfd);
193 		errno = sv_errno;
194 		return (-1);
195 	}
196 
197 	/*
198 	 * In Unix98 t_accept() need not return [TLOOK] if connect/disconnect
199 	 * indications are present. TLI and Unix95 need to return error.
200 	 */
201 	if (_T_API_VER_LT(api_semantics, TX_XTI_XNS5_API)) {
202 		if (_t_is_event(fd, tiptr) < 0)
203 			goto err_out;
204 	}
205 
206 	cres = (struct T_conn_res *)ctlbuf.buf;
207 	cres->OPT_length = call->opt.len;
208 	cres->OPT_offset = 0;
209 	cres->SEQ_number = call->sequence;
210 	if ((restiptr->ti_flags & V_ACCEPTOR_ID) != 0) {
211 		cres->ACCEPTOR_id = restiptr->acceptor_id;
212 		cres->PRIM_type = conn_res_prim = T_CONN_RES;
213 	} else {
214 		/* I_FDINSERT should use O_T_CONN_RES. */
215 		cres->ACCEPTOR_id = 0;
216 		cres->PRIM_type = conn_res_prim = O_T_CONN_RES;
217 	}
218 
219 	size = (int)sizeof (struct T_conn_res);
220 
221 	if (call->opt.len) {
222 		if (_t_aligned_copy(&ctlbuf, call->opt.len, size,
223 		    call->opt.buf, &cres->OPT_offset) < 0) {
224 			/*
225 			 * Aligned copy will overflow buffer allocated based
226 			 * transport maximum options length.
227 			 * return error.
228 			 */
229 			t_errno = TBADOPT;
230 			goto err_out;
231 		}
232 		size = cres->OPT_offset + cres->OPT_length;
233 	}
234 
235 	if (call->udata.len) {
236 		if ((tiptr->ti_cdatasize == T_INVALID /* -2 */) ||
237 		    ((tiptr->ti_cdatasize != T_INFINITE /* -1 */) &&
238 			(call->udata.len > (uint32_t)tiptr->ti_cdatasize))) {
239 			/*
240 			 * user data not valid with connect or it
241 			 * exceeds the limits specified by the transport
242 			 * provider
243 			 */
244 			t_errno = TBADDATA;
245 			goto err_out;
246 		}
247 	}
248 
249 
250 	ctlbuf.len = size;
251 
252 	/*
253 	 * Assumes signals are blocked so putmsg() will not block
254 	 * indefinitely
255 	 */
256 	if ((restiptr->ti_flags & V_ACCEPTOR_ID) != 0) {
257 		/*
258 		 * Assumes signals are blocked so putmsg() will not block
259 		 * indefinitely
260 		 */
261 		if (putmsg(fd, &ctlbuf,
262 		    (struct strbuf *)(call->udata.len? &call->udata: NULL), 0) <
263 		    0) {
264 			if (errno == EAGAIN)
265 				t_errno = TFLOW;
266 			else
267 				t_errno = TSYSERR;
268 			goto err_out;
269 		}
270 	} else {
271 		strfdinsert.ctlbuf.maxlen = ctlbuf.maxlen;
272 		strfdinsert.ctlbuf.len = ctlbuf.len;
273 		strfdinsert.ctlbuf.buf = ctlbuf.buf;
274 
275 		strfdinsert.databuf.maxlen = call->udata.maxlen;
276 		strfdinsert.databuf.len =
277 		    (call->udata.len? call->udata.len: -1);
278 		strfdinsert.databuf.buf = call->udata.buf;
279 		strfdinsert.fildes = resfd;
280 		strfdinsert.offset = (int)sizeof (t_scalar_t);
281 		strfdinsert.flags = 0;		/* could be EXPEDITED also */
282 
283 		if (_ioctl(fd, I_FDINSERT, &strfdinsert) < 0) {
284 			if (errno == EAGAIN)
285 				t_errno = TFLOW;
286 			else
287 				t_errno = TSYSERR;
288 			goto err_out;
289 		}
290 	}
291 
292 	if (_t_is_ok(fd, tiptr, conn_res_prim) < 0) {
293 		/*
294 		 * At the TPI level, the error returned in a T_ERROR_ACK
295 		 * received in response to a T_CONN_RES for a listener and
296 		 * acceptor endpoints not being the same kind of endpoints
297 		 * has changed to a new t_errno code introduced with
298 		 * XTI (TPROVMISMATCH). We need to adjust TLI error code
299 		 * to be same as before.
300 		 */
301 		if (_T_IS_TLI(api_semantics) && t_errno == TPROVMISMATCH) {
302 			/* TLI only */
303 			t_errno = TBADF;
304 		}
305 		goto err_out;
306 	}
307 
308 	if (tiptr->ti_ocnt == 1) {
309 		if (fd == resfd) {
310 			_T_TX_NEXTSTATE(T_ACCEPT1, tiptr,
311 				"t_accept: invalid state event T_ACCEPT1");
312 		} else {
313 			_T_TX_NEXTSTATE(T_ACCEPT2, tiptr,
314 				"t_accept: invalid state event T_ACCEPT2");
315 			/*
316 			 * XXX Here we lock the resfd lock also. This
317 			 * is an instance of holding two locks without
318 			 * any enforcement of a locking hiararchy.
319 			 * There is potential for deadlock in incorrect
320 			 * or buggy programs here but this is the safer
321 			 * choice in this case. Correct programs will not
322 			 * deadlock.
323 			 */
324 			sig_mutex_lock(&restiptr->ti_lock);
325 			_T_TX_NEXTSTATE(T_PASSCON, restiptr,
326 				"t_accept: invalid state event T_PASSCON");
327 			sig_mutex_unlock(&restiptr->ti_lock);
328 		}
329 	} else {
330 		_T_TX_NEXTSTATE(T_ACCEPT3, tiptr,
331 				"t_accept: invalid state event T_ACCEPT3");
332 		if (fd != resfd)
333 			sig_mutex_lock(&restiptr->ti_lock);
334 		_T_TX_NEXTSTATE(T_PASSCON, restiptr,
335 				"t_accept: invalid state event T_PASSCON");
336 		if (fd != resfd)
337 			sig_mutex_unlock(&restiptr->ti_lock);
338 	}
339 
340 	tiptr->ti_ocnt--;
341 	tiptr->ti_flags &= ~TX_TQFULL_NOTIFIED;
342 
343 	/*
344 	 * Update attributes which may have been negotiated during
345 	 * connection establishment for protocols where we suspect
346 	 * such negotiation is likely (e.g. OSI). We do not do it for
347 	 * all endpoints for performance reasons. Also, this code is
348 	 * deliberately done after user level state changes so even
349 	 * the (unlikely) failure case reflects a connected endpoint.
350 	 */
351 	if (restiptr->ti_tsdusize != 0) {
352 		if (_t_do_postconn_sync(resfd, restiptr) < 0)
353 			goto err_out;
354 	}
355 
356 	if (didalloc)
357 		free(ctlbuf.buf);
358 	else
359 		tiptr->ti_ctlbuf = ctlbuf.buf;
360 	sig_mutex_unlock(&tiptr->ti_lock);
361 	(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
362 	trace3(TR_t_accept, 1, fd, resfd);
363 	return (0);
364 	/* NOTREACHED */
365 err_out:
366 	sv_errno = errno;
367 	if (didalloc)
368 		free(ctlbuf.buf);
369 	else
370 		tiptr->ti_ctlbuf = ctlbuf.buf;
371 	sig_mutex_unlock(&tiptr->ti_lock);
372 	(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
373 	trace3(TR_t_accept, 1, fd, resfd);
374 	errno = sv_errno;
375 	return (-1);
376 }
377