xref: /titanic_44/usr/src/lib/libnsl/rpc/svc_vc.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28 /*
29  * Portions of this source code were derived from Berkeley
30  * 4.3 BSD under license from the Regents of the University of
31  * California.
32  */
33 
34 /*
35  * svc_vc.c -- Server side for Connection Oriented RPC.
36  *
37  * Actually implements two flavors of transporter -
38  * a rendezvouser (a listener and connection establisher)
39  * and a record stream.
40  */
41 
42 #pragma ident	"%Z%%M%	%I%	%E% SMI"
43 
44 #include "mt.h"
45 #include "rpc_mt.h"
46 #include <stdio.h>
47 #include <stdlib.h>
48 #include <rpc/rpc.h>
49 #include <sys/types.h>
50 #include <rpc/trace.h>
51 #include <errno.h>
52 #include <sys/stat.h>
53 #include <sys/mkdev.h>
54 #include <sys/poll.h>
55 #include <syslog.h>
56 #include <rpc/nettype.h>
57 #include <tiuser.h>
58 #include <string.h>
59 #include <stropts.h>
60 #include <stdlib.h>
61 #include <unistd.h>
62 #include <sys/timod.h>
63 #include <limits.h>
64 
65 #ifndef MIN
66 #define	MIN(a, b)	(((a) < (b)) ? (a) : (b))
67 #endif
68 
69 #define	CLEANUP_SIZE	1024
70 
71 extern int nsvc_xdrs;
72 extern int __rpc_connmaxrec;
73 extern int __rpc_irtimeout;
74 
75 extern SVCXPRT	**svc_xports;
76 extern int	__td_setnodelay(int);
77 extern bool_t	__xdrrec_getbytes_nonblock(XDR *, enum xprt_stat *);
78 extern bool_t	__xdrrec_set_conn_nonblock(XDR *, uint32_t);
79 extern int	_t_do_ioctl(int, char *, int, int, int *);
80 extern int	__rpc_legal_connmaxrec(int);
81 /* Structure used to initialize SVC_XP_AUTH(xprt).svc_ah_ops. */
82 extern struct svc_auth_ops svc_auth_any_ops;
83 extern void	__xprt_unregister_private(const SVCXPRT *, bool_t);
84 
85 static struct xp_ops 	*svc_vc_ops(void);
86 static struct xp_ops 	*svc_vc_rendezvous_ops(void);
87 static void		svc_vc_destroy(SVCXPRT *);
88 static bool_t		svc_vc_nonblock(SVCXPRT *, SVCXPRT *);
89 static int		read_vc(SVCXPRT *, caddr_t, int);
90 static int		write_vc(SVCXPRT *, caddr_t, int);
91 static SVCXPRT		*makefd_xprt(int, uint_t, uint_t, t_scalar_t, char *);
92 static bool_t		fd_is_dead(int);
93 static void		update_nonblock_timestamps(SVCXPRT *);
94 
95 struct cf_rendezvous { /* kept in xprt->xp_p1 for rendezvouser */
96 	uint_t sendsize;
97 	uint_t recvsize;
98 	struct t_call *t_call;
99 	struct t_bind *t_bind;
100 	t_scalar_t cf_tsdu;
101 	char *cf_cache;
102 	int tcp_flag;
103 	int tcp_keepalive;
104 	int cf_connmaxrec;
105 };
106 
107 struct cf_conn {	/* kept in xprt->xp_p1 for actual connection */
108 	uint_t sendsize;
109 	uint_t recvsize;
110 	enum xprt_stat strm_stat;
111 	uint32_t x_id;
112 	t_scalar_t cf_tsdu;
113 	XDR xdrs;
114 	char *cf_cache;
115 	char verf_body[MAX_AUTH_BYTES];
116 	bool_t cf_conn_nonblock;
117 	time_t cf_conn_nonblock_timestamp;
118 };
119 
120 static int t_rcvall(int, char *, int);
121 static int t_rcvnonblock(SVCXPRT *, caddr_t, int);
122 static void svc_timeout_nonblock_xprt_and_LRU(bool_t);
123 
124 extern int __xdrrec_setfirst(XDR *);
125 extern int __xdrrec_resetfirst(XDR *);
126 extern int __is_xdrrec_first(XDR *);
127 
128 void __svc_nisplus_enable_timestamps(void);
129 void __svc_timeout_nonblock_xprt(void);
130 
131 /*
132  * This is intended as a performance improvement on the old string handling
133  * stuff by read only moving data into the  text segment.
134  * Format = <routine> : <error>
135  */
136 
137 static const char errstring[] = " %s : %s";
138 
139 /* Routine names */
140 
141 static const char svc_vc_create_str[] = "svc_vc_create";
142 static const char svc_fd_create_str[] = "svc_fd_create";
143 static const char makefd_xprt_str[] = "svc_vc_create: makefd_xprt ";
144 static const char rendezvous_request_str[] = "rendezvous_request";
145 static const char svc_vc_fderr[] =
146 		"fd > FD_SETSIZE; Use rpc_control(RPC_SVC_USE_POLLFD,...);";
147 static const char do_accept_str[] = "do_accept";
148 
149 /* error messages */
150 
151 static const char no_mem_str[] = "out of memory";
152 static const char no_tinfo_str[] = "could not get transport information";
153 static const char no_fcntl_getfl_str[] = "could not get status flags and modes";
154 static const char no_nonblock_str[] = "could not set transport non-blocking";
155 
156 /*
157  *  Records a timestamp when data comes in on a descriptor.  This is
158  *  only used if timestamps are enabled with __svc_nisplus_enable_timestamps().
159  */
160 static long *timestamps;
161 static int ntimestamps; /* keep track how many timestamps */
162 static mutex_t timestamp_lock = DEFAULTMUTEX;
163 
164 /*
165  * Used to determine whether the time-out logic should be executed.
166  */
167 static bool_t check_nonblock_timestamps = FALSE;
168 
169 void
170 svc_vc_xprtfree(SVCXPRT *xprt)
171 {
172 /* LINTED pointer alignment */
173 	SVCXPRT_EXT		*xt = xprt ? SVCEXT(xprt) : NULL;
174 	struct cf_rendezvous	*r = xprt ?
175 /* LINTED pointer alignment */
176 				    (struct cf_rendezvous *)xprt->xp_p1 : NULL;
177 
178 	if (!xprt)
179 		return;
180 
181 	if (xprt->xp_tp)
182 		free(xprt->xp_tp);
183 	if (xprt->xp_netid)
184 		free(xprt->xp_netid);
185 	if (xt && (xt->parent == NULL)) {
186 		if (xprt->xp_ltaddr.buf)
187 			free(xprt->xp_ltaddr.buf);
188 		if (xprt->xp_rtaddr.buf)
189 			free(xprt->xp_rtaddr.buf);
190 	}
191 	if (r) {
192 		if (r->t_call)
193 			t_free((char *)r->t_call, T_CALL);
194 		if (r->t_bind)
195 			t_free((char *)r->t_bind, T_BIND);
196 		free((char *)r);
197 	}
198 	svc_xprt_free(xprt);
199 }
200 
201 /*
202  * Usage:
203  *	xprt = svc_vc_create(fd, sendsize, recvsize);
204  * Since connection streams do buffered io similar to stdio, the caller
205  * can specify how big the send and receive buffers are. If recvsize
206  * or sendsize are 0, defaults will be chosen.
207  * fd should be open and bound.
208  */
209 SVCXPRT *
210 svc_vc_create_private(int fd, uint_t sendsize, uint_t recvsize)
211 {
212 	struct cf_rendezvous *r;
213 	SVCXPRT *xprt;
214 	struct t_info tinfo;
215 
216 	trace4(TR_svc_vc_create, 0, fd, sendsize, recvsize);
217 	if (RPC_FD_NOTIN_FDSET(fd)) {
218 		errno = EBADF;
219 		t_errno = TBADF;
220 		(void) syslog(LOG_ERR, errstring, svc_vc_create_str,
221 		    svc_vc_fderr);
222 		trace2(TR_svc_dg_create, 1, fd);
223 		return ((SVCXPRT *)NULL);
224 	}
225 	if ((xprt = svc_xprt_alloc()) == (SVCXPRT *)NULL) {
226 		(void) syslog(LOG_ERR, errstring,
227 		    svc_vc_create_str, no_mem_str);
228 		trace2(TR_svc_vc_create, 1, fd);
229 		return ((SVCXPRT *)NULL);
230 	}
231 /* LINTED pointer alignment */
232 	svc_flags(xprt) |= SVC_RENDEZVOUS;
233 
234 	r = (struct cf_rendezvous *)calloc(1, sizeof (*r));
235 	if (r == (struct cf_rendezvous *)NULL) {
236 		(void) syslog(LOG_ERR, errstring,
237 			svc_vc_create_str, no_mem_str);
238 		svc_vc_xprtfree(xprt);
239 		trace2(TR_svc_vc_create, 1, fd);
240 		return ((SVCXPRT *)NULL);
241 	}
242 	if (t_getinfo(fd, &tinfo) == -1) {
243 		char errorstr[100];
244 
245 		__tli_sys_strerror(errorstr, sizeof (errorstr),
246 				t_errno, errno);
247 		(void) syslog(LOG_ERR, "%s : %s : %s",
248 			svc_vc_create_str, no_tinfo_str, errorstr);
249 		(void) mem_free((caddr_t)r, sizeof (*r));
250 		svc_vc_xprtfree(xprt);
251 		trace2(TR_svc_vc_create, 1, fd);
252 		return ((SVCXPRT *)NULL);
253 	}
254 	/*
255 	 * Find the receive and the send size
256 	 */
257 	r->sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu);
258 	r->recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu);
259 	if ((r->sendsize == 0) || (r->recvsize == 0)) {
260 		syslog(LOG_ERR,
261 		    "svc_vc_create:  transport does not support "
262 		    "data transfer");
263 		(void) mem_free((caddr_t)r, sizeof (*r));
264 		svc_vc_xprtfree(xprt);
265 		trace2(TR_svc_vc_create, 1, fd);
266 		return ((SVCXPRT *)NULL);
267 	}
268 
269 /* LINTED pointer alignment */
270 	r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT);
271 	if (r->t_call == NULL) {
272 		(void) syslog(LOG_ERR, errstring,
273 			svc_vc_create_str, no_mem_str);
274 		(void) mem_free((caddr_t)r, sizeof (*r));
275 		svc_vc_xprtfree(xprt);
276 		trace2(TR_svc_vc_create, 1, fd);
277 		return ((SVCXPRT *)NULL);
278 	}
279 
280 /* LINTED pointer alignment */
281 	r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR);
282 	if (r->t_bind == (struct t_bind *)NULL) {
283 		(void) syslog(LOG_ERR, errstring,
284 			svc_vc_create_str, no_mem_str);
285 		t_free((char *)r->t_call, T_CALL);
286 		(void) mem_free((caddr_t)r, sizeof (*r));
287 		svc_vc_xprtfree(xprt);
288 		trace2(TR_svc_vc_create, 1, fd);
289 		return ((SVCXPRT *)NULL);
290 	}
291 
292 	r->cf_tsdu = tinfo.tsdu;
293 	r->tcp_flag = FALSE;
294 	r->tcp_keepalive = FALSE;
295 	r->cf_connmaxrec = __rpc_connmaxrec;
296 	xprt->xp_fd = fd;
297 	xprt->xp_p1 = (caddr_t)r;
298 	xprt->xp_p2 = NULL;
299 	xprt->xp_verf = _null_auth;
300 	xprt->xp_ops = svc_vc_rendezvous_ops();
301 /* LINTED pointer alignment */
302 	SVC_XP_AUTH(xprt).svc_ah_ops = svc_auth_any_ops;
303 /* LINTED pointer alignment */
304 	SVC_XP_AUTH(xprt).svc_ah_private = NULL;
305 
306 	trace2(TR_svc_vc_create, 1, fd);
307 	return (xprt);
308 }
309 
310 SVCXPRT *
311 svc_vc_create(int fd, uint_t sendsize, uint_t recvsize)
312 {
313 	SVCXPRT *xprt;
314 
315 	if ((xprt = svc_vc_create_private(fd, sendsize, recvsize)) != NULL)
316 		xprt_register(xprt);
317 	return (xprt);
318 }
319 
320 SVCXPRT *
321 svc_vc_xprtcopy(SVCXPRT *parent)
322 {
323 	SVCXPRT			*xprt;
324 	struct cf_rendezvous	*r, *pr;
325 	int			fd = parent->xp_fd;
326 
327 	if ((xprt = svc_xprt_alloc()) == NULL)
328 		return (NULL);
329 
330 /* LINTED pointer alignment */
331 	SVCEXT(xprt)->parent = parent;
332 /* LINTED pointer alignment */
333 	SVCEXT(xprt)->flags = SVCEXT(parent)->flags;
334 
335 	xprt->xp_fd = fd;
336 	xprt->xp_ops = svc_vc_rendezvous_ops();
337 	if (parent->xp_tp) {
338 		xprt->xp_tp = (char *)strdup(parent->xp_tp);
339 		if (xprt->xp_tp == NULL) {
340 			syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed");
341 			svc_vc_xprtfree(xprt);
342 			return (NULL);
343 		}
344 	}
345 	if (parent->xp_netid) {
346 		xprt->xp_netid = (char *)strdup(parent->xp_netid);
347 		if (xprt->xp_netid == NULL) {
348 			syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed");
349 			if (xprt->xp_tp)
350 				free((char *)xprt->xp_tp);
351 			svc_vc_xprtfree(xprt);
352 			return (NULL);
353 		}
354 	}
355 
356 	/*
357 	 * can share both local and remote address
358 	 */
359 	xprt->xp_ltaddr = parent->xp_ltaddr;
360 	xprt->xp_rtaddr = parent->xp_rtaddr; /* XXX - not used for rendezvous */
361 	xprt->xp_type = parent->xp_type;
362 	xprt->xp_verf = parent->xp_verf;
363 
364 	if ((r = (struct cf_rendezvous *)calloc(1, sizeof (*r))) == NULL) {
365 		svc_vc_xprtfree(xprt);
366 		return (NULL);
367 	}
368 	xprt->xp_p1 = (caddr_t)r;
369 /* LINTED pointer alignment */
370 	pr = (struct cf_rendezvous *)parent->xp_p1;
371 	r->sendsize = pr->sendsize;
372 	r->recvsize = pr->recvsize;
373 	r->cf_tsdu = pr->cf_tsdu;
374 	r->cf_cache = pr->cf_cache;
375 	r->tcp_flag = pr->tcp_flag;
376 	r->tcp_keepalive = pr->tcp_keepalive;
377 	r->cf_connmaxrec = pr->cf_connmaxrec;
378 /* LINTED pointer alignment */
379 	r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT);
380 	if (r->t_call == NULL) {
381 		svc_vc_xprtfree(xprt);
382 		return (NULL);
383 	}
384 /* LINTED pointer alignment */
385 	r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR);
386 	if (r->t_bind == NULL) {
387 		svc_vc_xprtfree(xprt);
388 		return (NULL);
389 	}
390 
391 	return (xprt);
392 }
393 
394 /*
395  * XXX : Used for setting flag to indicate that this is TCP
396  */
397 
398 /*ARGSUSED*/
399 int
400 __svc_vc_setflag(SVCXPRT *xprt, int flag)
401 {
402 	struct cf_rendezvous *r;
403 
404 /* LINTED pointer alignment */
405 	r = (struct cf_rendezvous *)xprt->xp_p1;
406 	r->tcp_flag = TRUE;
407 	return (1);
408 }
409 
410 /*
411  * used for the actual connection.
412  */
413 SVCXPRT *
414 svc_fd_create_private(int fd, uint_t sendsize, uint_t recvsize)
415 {
416 	struct t_info tinfo;
417 	SVCXPRT *dummy;
418 	struct netbuf tres = {0};
419 
420 	trace4(TR_svc_fd_create, 0, fd, sendsize, recvsize);
421 	if (RPC_FD_NOTIN_FDSET(fd)) {
422 		errno = EBADF;
423 		t_errno = TBADF;
424 		(void) syslog(LOG_ERR, errstring,
425 		    svc_fd_create_str, svc_vc_fderr);
426 		trace2(TR_svc_dg_create, 1, fd);
427 		return ((SVCXPRT *)NULL);
428 	}
429 	if (t_getinfo(fd, &tinfo) == -1) {
430 		char errorstr[100];
431 
432 		__tli_sys_strerror(errorstr, sizeof (errorstr),
433 				t_errno, errno);
434 		(void) syslog(LOG_ERR, "%s : %s : %s",
435 			svc_fd_create_str, no_tinfo_str, errorstr);
436 		trace2(TR_svc_fd_create, 1, fd);
437 		return ((SVCXPRT *)NULL);
438 	}
439 	/*
440 	 * Find the receive and the send size
441 	 */
442 	sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu);
443 	recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu);
444 	if ((sendsize == 0) || (recvsize == 0)) {
445 		syslog(LOG_ERR, errstring, svc_fd_create_str,
446 			"transport does not support data transfer");
447 		trace2(TR_svc_fd_create, 1, fd);
448 		return ((SVCXPRT *)NULL);
449 	}
450 	dummy = makefd_xprt(fd, sendsize, recvsize, tinfo.tsdu, NULL);
451 				/* NULL signifies no dup cache */
452 	/* Assign the local bind address */
453 	if (t_getname(fd, &tres, LOCALNAME) == -1)
454 		tres.len = 0;
455 	dummy->xp_ltaddr = tres;
456 	/* Fill in type of service */
457 	dummy->xp_type = tinfo.servtype;
458 	trace2(TR_svc_fd_create, 1, fd);
459 	return (dummy);
460 }
461 
462 SVCXPRT *
463 svc_fd_create(int fd, uint_t sendsize, uint_t recvsize)
464 {
465 	SVCXPRT *xprt;
466 
467 	if ((xprt = svc_fd_create_private(fd, sendsize, recvsize)) != NULL)
468 		xprt_register(xprt);
469 	return (xprt);
470 }
471 
472 void
473 svc_fd_xprtfree(SVCXPRT *xprt)
474 {
475 /* LINTED pointer alignment */
476 	SVCXPRT_EXT	*xt = xprt ? SVCEXT(xprt) : NULL;
477 /* LINTED pointer alignment */
478 	struct cf_conn	*cd = xprt ? (struct cf_conn *)xprt->xp_p1 : NULL;
479 
480 	if (!xprt)
481 		return;
482 
483 	if (xprt->xp_tp)
484 		free(xprt->xp_tp);
485 	if (xprt->xp_netid)
486 		free(xprt->xp_netid);
487 	if (xt && (xt->parent == NULL)) {
488 		if (xprt->xp_ltaddr.buf)
489 			free(xprt->xp_ltaddr.buf);
490 		if (xprt->xp_rtaddr.buf)
491 			free(xprt->xp_rtaddr.buf);
492 	}
493 	if (cd) {
494 		XDR_DESTROY(&(cd->xdrs));
495 		free((char *)cd);
496 	}
497 	if (xt && (xt->parent == NULL) && xprt->xp_p2) {
498 /* LINTED pointer alignment */
499 		free((caddr_t)((struct netbuf *)xprt->xp_p2)->buf);
500 		free((caddr_t)xprt->xp_p2);
501 	}
502 	svc_xprt_free(xprt);
503 }
504 
505 static SVCXPRT *
506 makefd_xprt(int fd, uint_t sendsize, uint_t recvsize, t_scalar_t tsdu,
507     char *cache)
508 {
509 	SVCXPRT *xprt;
510 	struct cf_conn *cd;
511 
512 	trace5(TR_makefd_xprt, 0, fd, sendsize, recvsize, tsdu);
513 	xprt = svc_xprt_alloc();
514 	if (xprt == (SVCXPRT *)NULL) {
515 		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
516 		trace2(TR_makefd_xprt, 1, fd);
517 		return ((SVCXPRT *)NULL);
518 	}
519 /* LINTED pointer alignment */
520 	svc_flags(xprt) |= SVC_CONNECTION;
521 
522 	cd = (struct cf_conn *)mem_alloc(sizeof (struct cf_conn));
523 	if (cd == (struct cf_conn *)NULL) {
524 		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
525 		svc_fd_xprtfree(xprt);
526 		trace2(TR_makefd_xprt, 1, fd);
527 		return ((SVCXPRT *)NULL);
528 	}
529 	cd->sendsize = sendsize;
530 	cd->recvsize = recvsize;
531 	cd->strm_stat = XPRT_IDLE;
532 	cd->cf_tsdu = tsdu;
533 	cd->cf_cache = cache;
534 	cd->cf_conn_nonblock = FALSE;
535 	cd->cf_conn_nonblock_timestamp = 0;
536 	cd->xdrs.x_ops = NULL;
537 	xdrrec_create(&(cd->xdrs), sendsize, 0, (caddr_t)xprt,
538 			(int(*)())NULL, (int(*)(void *, char *, int))write_vc);
539 	if (cd->xdrs.x_ops == NULL) {
540 		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
541 		mem_free(cd, sizeof (struct cf_conn));
542 		svc_fd_xprtfree(xprt);
543 		trace2(TR_makefd_xprt, 1, fd);
544 		return ((SVCXPRT *)NULL);
545 	}
546 
547 	rw_wrlock(&svc_fd_lock);
548 	if (svc_xdrs == NULL) {
549 		svc_xdrs = (XDR **)calloc(FD_INCREMENT,  sizeof (XDR *));
550 		if (svc_xdrs == NULL) {
551 			(void) syslog(LOG_ERR, errstring, makefd_xprt_str,
552 								no_mem_str);
553 			XDR_DESTROY(&(cd->xdrs));
554 			mem_free(cd, sizeof (struct cf_conn));
555 			svc_fd_xprtfree(xprt);
556 			trace2(TR_makefd_xprt, 1, fd);
557 			rw_unlock(&svc_fd_lock);
558 			return ((SVCXPRT *)NULL);
559 		}
560 		nsvc_xdrs = FD_INCREMENT;
561 	}
562 
563 	while (fd >= nsvc_xdrs) {
564 		XDR **tmp_xdrs = svc_xdrs;
565 		tmp_xdrs = realloc(svc_xdrs,
566 				sizeof (XDR *) * (nsvc_xdrs + FD_INCREMENT));
567 		if (tmp_xdrs == NULL) {
568 			(void) syslog(LOG_ERR, errstring, makefd_xprt_str,
569 								no_mem_str);
570 			XDR_DESTROY(&(cd->xdrs));
571 			mem_free(cd, sizeof (struct cf_conn));
572 			svc_fd_xprtfree(xprt);
573 			trace2(TR_makefd_xprt, 1, fd);
574 			rw_unlock(&svc_fd_lock);
575 			return ((SVCXPRT *)NULL);
576 		}
577 
578 		svc_xdrs = tmp_xdrs;
579 		/* initial the new array to 0 from the last allocated array */
580 		(void) memset(&svc_xdrs[nsvc_xdrs], 0,
581 					sizeof (XDR *) * FD_INCREMENT);
582 		nsvc_xdrs += FD_INCREMENT;
583 	}
584 
585 	if (svc_xdrs[fd] != NULL) {
586 		XDR_DESTROY(svc_xdrs[fd]);
587 	} else if ((svc_xdrs[fd] = malloc(sizeof (XDR))) == NULL) {
588 		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
589 		XDR_DESTROY(&(cd->xdrs));
590 		mem_free(cd, sizeof (struct cf_conn));
591 		svc_fd_xprtfree(xprt);
592 		trace2(TR_makefd_xprt, 1, fd);
593 		rw_unlock(&svc_fd_lock);
594 		return ((SVCXPRT *)NULL);
595 	}
596 	(void) memset(svc_xdrs[fd], 0, sizeof (XDR));
597 	xdrrec_create(svc_xdrs[fd], 0, recvsize, (caddr_t)xprt,
598 			(int(*)(void *, char *, int))read_vc, (int(*)())NULL);
599 	if (svc_xdrs[fd]->x_ops == NULL) {
600 		free(svc_xdrs[fd]);
601 		svc_xdrs[fd] = NULL;
602 		XDR_DESTROY(&(cd->xdrs));
603 		mem_free(cd, sizeof (struct cf_conn));
604 		svc_fd_xprtfree(xprt);
605 		trace2(TR_makefd_xprt, 1, fd);
606 		rw_unlock(&svc_fd_lock);
607 		return ((SVCXPRT *)NULL);
608 	}
609 	rw_unlock(&svc_fd_lock);
610 
611 	xprt->xp_p1 = (caddr_t)cd;
612 	xprt->xp_p2 = NULL;
613 	xprt->xp_verf.oa_base = cd->verf_body;
614 	xprt->xp_ops = svc_vc_ops();	/* truely deals with calls */
615 	xprt->xp_fd = fd;
616 	trace2(TR_makefd_xprt, 1, fd);
617 	return (xprt);
618 }
619 
620 SVCXPRT *
621 svc_fd_xprtcopy(SVCXPRT *parent)
622 {
623 	SVCXPRT			*xprt;
624 	struct cf_conn		*cd, *pcd;
625 
626 	if ((xprt = svc_xprt_alloc()) == NULL)
627 		return (NULL);
628 
629 /* LINTED pointer alignment */
630 	SVCEXT(xprt)->parent = parent;
631 /* LINTED pointer alignment */
632 	SVCEXT(xprt)->flags = SVCEXT(parent)->flags;
633 
634 	xprt->xp_fd = parent->xp_fd;
635 	xprt->xp_ops = svc_vc_ops();
636 	if (parent->xp_tp) {
637 		xprt->xp_tp = (char *)strdup(parent->xp_tp);
638 		if (xprt->xp_tp == NULL) {
639 			syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed");
640 			svc_fd_xprtfree(xprt);
641 			return (NULL);
642 		}
643 	}
644 	if (parent->xp_netid) {
645 		xprt->xp_netid = (char *)strdup(parent->xp_netid);
646 		if (xprt->xp_netid == NULL) {
647 			syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed");
648 			if (xprt->xp_tp)
649 				free((char *)xprt->xp_tp);
650 			svc_fd_xprtfree(xprt);
651 			return (NULL);
652 		}
653 	}
654 	/*
655 	 * share local and remote addresses with parent
656 	 */
657 	xprt->xp_ltaddr = parent->xp_ltaddr;
658 	xprt->xp_rtaddr = parent->xp_rtaddr;
659 	xprt->xp_type = parent->xp_type;
660 
661 	if ((cd = (struct cf_conn *)malloc(sizeof (struct cf_conn))) == NULL) {
662 		svc_fd_xprtfree(xprt);
663 		return (NULL);
664 	}
665 /* LINTED pointer alignment */
666 	pcd = (struct cf_conn *)parent->xp_p1;
667 	cd->sendsize = pcd->sendsize;
668 	cd->recvsize = pcd->recvsize;
669 	cd->strm_stat = pcd->strm_stat;
670 	cd->x_id = pcd->x_id;
671 	cd->cf_tsdu = pcd->cf_tsdu;
672 	cd->cf_cache = pcd->cf_cache;
673 	cd->cf_conn_nonblock = pcd->cf_conn_nonblock;
674 	cd->cf_conn_nonblock_timestamp = pcd->cf_conn_nonblock_timestamp;
675 	cd->xdrs.x_ops = NULL;
676 	xdrrec_create(&(cd->xdrs), cd->sendsize, 0, (caddr_t)xprt,
677 			(int(*)())NULL, (int(*)(void *, char *, int))write_vc);
678 	if (cd->xdrs.x_ops == NULL) {
679 		free(cd);
680 		svc_fd_xprtfree(xprt);
681 		return (NULL);
682 	}
683 	xprt->xp_verf.oa_base = cd->verf_body;
684 	xprt->xp_p1 = (char *)cd;
685 	xprt->xp_p2 = parent->xp_p2;	/* shared */
686 
687 	return (xprt);
688 }
689 
690 /*
691  * This routine is called by svc_getreqset(), when a packet is recd.
692  * The listener process creates another end point on which the actual
693  * connection is carried. It returns FALSE to indicate that it was
694  * not a rpc packet (falsely though), but as a side effect creates
695  * another endpoint which is also registered, which then always
696  * has a request ready to be served.
697  */
698 /* ARGSUSED1 */
699 static bool_t
700 rendezvous_request(SVCXPRT *xprt, struct rpc_msg *msg)
701 {
702 	struct cf_rendezvous *r;
703 	char *tpname = NULL;
704 	char devbuf[256];
705 	static void do_accept();
706 
707 	trace1(TR_rendezvous_request, 0);
708 /* LINTED pointer alignment */
709 	r = (struct cf_rendezvous *)xprt->xp_p1;
710 
711 again:
712 	switch (t_look(xprt->xp_fd)) {
713 	case T_DISCONNECT:
714 		(void) t_rcvdis(xprt->xp_fd, NULL);
715 		trace1(TR_rendezvous_request, 1);
716 		return (FALSE);
717 
718 	case T_LISTEN:
719 
720 		if (t_listen(xprt->xp_fd, r->t_call) == -1) {
721 			if ((t_errno == TSYSERR) && (errno == EINTR))
722 				goto again;
723 
724 			if (t_errno == TLOOK) {
725 				if (t_look(xprt->xp_fd) == T_DISCONNECT)
726 				    (void) t_rcvdis(xprt->xp_fd, NULL);
727 			}
728 			trace1(TR_rendezvous_request, 1);
729 			return (FALSE);
730 		}
731 		break;
732 	default:
733 		trace1(TR_rendezvous_request, 1);
734 		return (FALSE);
735 	}
736 	/*
737 	 * Now create another endpoint, and accept the connection
738 	 * on it.
739 	 */
740 
741 	if (xprt->xp_tp) {
742 		tpname = xprt->xp_tp;
743 	} else {
744 		/*
745 		 * If xprt->xp_tp is NULL, then try to extract the
746 		 * transport protocol information from the transport
747 		 * protcol corresponding to xprt->xp_fd
748 		 */
749 		struct netconfig *nconf;
750 		tpname = devbuf;
751 		if ((nconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
752 				== NULL) {
753 			(void) syslog(LOG_ERR, errstring,
754 					rendezvous_request_str,
755 					"no suitable transport");
756 			goto err;
757 		}
758 		strcpy(tpname, nconf->nc_device);
759 		freenetconfigent(nconf);
760 	}
761 
762 	do_accept(xprt->xp_fd, tpname, xprt->xp_netid, r->t_call, r);
763 
764 err:
765 	trace1(TR_rendezvous_request, 1);
766 	return (FALSE); /* there is never an rpc msg to be processed */
767 }
768 
769 static void
770 do_accept(int srcfd, char *tpname, char *netid, struct t_call *tcp,
771     struct cf_rendezvous *r)
772 {
773 	int	destfd;
774 	struct t_call	t_call;
775 	struct t_call	*tcp2 = (struct t_call *)NULL;
776 	struct t_info	tinfo;
777 	SVCXPRT	*xprt = (SVCXPRT *)NULL;
778 	SVCXPRT	*xprt_srcfd = (SVCXPRT *)NULL;
779 	char *option, *option_ret;
780 	struct opthdr *opt;
781 	struct t_optmgmt optreq, optret;
782 	int *p_optval;
783 
784 	trace1(TR_do_accept, 0);
785 
786 	destfd = t_open(tpname, O_RDWR, &tinfo);
787 	if (check_nonblock_timestamps) {
788 		if (destfd == -1 && t_errno == TSYSERR && errno == EMFILE) {
789 			/*
790 			 * Since there are nonblocking connection xprts and
791 			 * too many open files, the LRU connection xprt should
792 			 * get destroyed in case an attacker has been creating
793 			 * many connections.
794 			 */
795 			mutex_lock(&svc_mutex);
796 			svc_timeout_nonblock_xprt_and_LRU(TRUE);
797 			mutex_unlock(&svc_mutex);
798 			destfd = t_open(tpname, O_RDWR, &tinfo);
799 		} else {
800 			/*
801 			 * Destroy/timeout all nonblock connection xprts
802 			 * that have not had recent activity.
803 			 * Do not destroy LRU xprt unless there are
804 			 * too many open files.
805 			 */
806 			mutex_lock(&svc_mutex);
807 			svc_timeout_nonblock_xprt_and_LRU(FALSE);
808 			mutex_unlock(&svc_mutex);
809 		}
810 	}
811 	if (destfd == -1) {
812 		char errorstr[100];
813 
814 		__tli_sys_strerror(errorstr, sizeof (errorstr), t_errno,
815 			errno);
816 		(void) syslog(LOG_ERR, "%s : %s : %s", do_accept_str,
817 				"can't open connection", errorstr);
818 		(void) t_snddis(srcfd, tcp);
819 		trace1(TR_do_accept, 1);
820 		return;
821 	} else if (destfd < 256) {
822 		int nfd;
823 
824 		nfd = _fcntl(destfd, F_DUPFD, 256);
825 		if (nfd != -1) {
826 			if (t_close(destfd) == -1) {
827 				char errorstr[100];
828 
829 				__tli_sys_strerror(errorstr, sizeof (errorstr),
830 						t_errno, errno);
831 				(void) syslog(LOG_ERR,
832 		"could not t_close() old fd %d; mem & fd leak error: %s",
833 						destfd, errorstr);
834 			}
835 			destfd = nfd;
836 			if (t_sync(destfd) == -1) {
837 				char errorstr[100];
838 
839 				__tli_sys_strerror(errorstr, sizeof (errorstr),
840 						t_errno, errno);
841 				(void) syslog(LOG_ERR,
842 				    "could not t_sync() duped fd %d: %s",
843 						destfd, errorstr);
844 				(void) t_snddis(srcfd, tcp);
845 				trace1(TR_do_accept, 1);
846 				return;
847 			}
848 		}
849 	}
850 	if (RPC_FD_NOTIN_FDSET(destfd)) {
851 		(void) syslog(LOG_ERR, errstring, do_accept_str,
852 						svc_vc_fderr);
853 		(void) t_close(destfd);
854 		(void) t_snddis(srcfd, tcp);
855 		errno = EBADF;
856 		t_errno = TBADF;
857 		trace1(TR_do_accept, 1);
858 		return;
859 	}
860 	(void) _fcntl(destfd, F_SETFD, 1); /* make it "close on exec" */
861 	if ((tinfo.servtype != T_COTS) && (tinfo.servtype != T_COTS_ORD)) {
862 		/* Not a connection oriented mode */
863 		(void) syslog(LOG_ERR, errstring, do_accept_str,
864 				"do_accept:  illegal transport");
865 		(void) t_close(destfd);
866 		(void) t_snddis(srcfd, tcp);
867 		trace1(TR_do_accept, 1);
868 		return;
869 	}
870 
871 
872 	if (t_bind(destfd, (struct t_bind *)NULL, r->t_bind) == -1) {
873 		char errorstr[100];
874 
875 		__tli_sys_strerror(errorstr, sizeof (errorstr), t_errno,
876 				errno);
877 		(void) syslog(LOG_ERR, " %s : %s : %s", do_accept_str,
878 			"t_bind failed", errorstr);
879 		(void) t_close(destfd);
880 		(void) t_snddis(srcfd, tcp);
881 		trace1(TR_do_accept, 1);
882 		return;
883 	}
884 
885 	if (r->tcp_flag)	/* if TCP, set NODELAY flag */
886 		__td_setnodelay(destfd);
887 
888 	/*
889 	 * This connection is not listening, hence no need to set
890 	 * the qlen.
891 	 */
892 
893 	/*
894 	 * XXX: The local transport chokes on its own listen
895 	 * options so we zero them for now
896 	 */
897 	t_call = *tcp;
898 	t_call.opt.len = 0;
899 	t_call.opt.maxlen = 0;
900 	t_call.opt.buf = (char *)NULL;
901 
902 	while (t_accept(srcfd, destfd, &t_call) == -1) {
903 		char errorstr[100];
904 
905 		switch (t_errno) {
906 		case TLOOK:
907 again:
908 			switch (t_look(srcfd)) {
909 			case T_CONNECT:
910 			case T_DATA:
911 			case T_EXDATA:
912 				/* this should not happen */
913 				break;
914 
915 			case T_DISCONNECT:
916 				(void) t_rcvdis(srcfd,
917 					(struct t_discon *)NULL);
918 				break;
919 
920 			case T_LISTEN:
921 				if (tcp2 == (struct t_call *)NULL)
922 /* LINTED pointer alignment */
923 					tcp2 = (struct t_call *)t_alloc(srcfd,
924 					    T_CALL, T_ADDR | T_OPT);
925 				if (tcp2 == (struct t_call *)NULL) {
926 
927 					(void) t_close(destfd);
928 					(void) t_snddis(srcfd, tcp);
929 					syslog(LOG_ERR, errstring,
930 						do_accept_str, no_mem_str);
931 					trace1(TR_do_accept, 1);
932 					return;
933 					/* NOTREACHED */
934 				}
935 				if (t_listen(srcfd, tcp2) == -1) {
936 					switch (t_errno) {
937 					case TSYSERR:
938 						if (errno == EINTR)
939 							goto again;
940 						break;
941 
942 					case TLOOK:
943 						goto again;
944 					}
945 					(void) t_free((char *)tcp2, T_CALL);
946 					(void) t_close(destfd);
947 					(void) t_snddis(srcfd, tcp);
948 					trace1(TR_do_accept, 1);
949 					return;
950 					/* NOTREACHED */
951 				}
952 				do_accept(srcfd, tpname, netid, tcp2, r);
953 				break;
954 
955 			case T_ORDREL:
956 				(void) t_rcvrel(srcfd);
957 				(void) t_sndrel(srcfd);
958 				break;
959 			}
960 			if (tcp2) {
961 				(void) t_free((char *)tcp2, T_CALL);
962 				tcp2 = (struct t_call *)NULL;
963 			}
964 			break;
965 
966 		case TBADSEQ:
967 			/*
968 			 * This can happen if the remote side has
969 			 * disconnected before the connection is
970 			 * accepted.  In this case, a disconnect
971 			 * should not be sent on srcfd (important!
972 			 * the listening fd will be hosed otherwise!).
973 			 * This error is not logged since this is an
974 			 * operational situation that is recoverable.
975 			 */
976 			(void) t_close(destfd);
977 			trace1(TR_do_accept, 1);
978 			return;
979 			/* NOTREACHED */
980 
981 		case TOUTSTATE:
982 			/*
983 			 * This can happen if the t_rcvdis() or t_rcvrel()/
984 			 * t_sndrel() put srcfd into the T_IDLE state.
985 			 */
986 			if (t_getstate(srcfd) == T_IDLE) {
987 				(void) t_close(destfd);
988 				(void) t_snddis(srcfd, tcp);
989 				trace1(TR_do_accept, 1);
990 				return;
991 			}
992 			/* else FALL THROUGH TO */
993 
994 		default:
995 			__tli_sys_strerror(errorstr, sizeof (errorstr),
996 					t_errno, errno);
997 			(void) syslog(LOG_ERR,
998 			    "cannot accept connection:  %s (current state %d)",
999 			    errorstr, t_getstate(srcfd));
1000 			(void) t_close(destfd);
1001 			(void) t_snddis(srcfd, tcp);
1002 			trace1(TR_do_accept, 1);
1003 			return;
1004 			/* NOTREACHED */
1005 		}
1006 	}
1007 
1008 	if (r->tcp_flag && r->tcp_keepalive) {
1009 		option = (char *)malloc(sizeof (struct opthdr)
1010 					+ sizeof (int));
1011 		option_ret = (char *)malloc(sizeof (struct opthdr)
1012 					+ sizeof (int));
1013 		if (option && option_ret) {
1014 			opt = (struct opthdr *)option;
1015 			opt->level = SOL_SOCKET;
1016 			opt->name  = SO_KEEPALIVE;
1017 			opt->len  = sizeof (int);
1018 			p_optval = (int *)(opt + 1);
1019 			*p_optval = SO_KEEPALIVE;
1020 			optreq.opt.maxlen = optreq.opt.len =
1021 				sizeof (struct opthdr) + sizeof (int);
1022 			optreq.opt.buf = (char *)option;
1023 			optreq.flags = T_NEGOTIATE;
1024 			optret.opt.maxlen = sizeof (struct opthdr)
1025 					+ sizeof (int);
1026 			optret.opt.buf = (char *)option_ret;
1027 			t_optmgmt(destfd, &optreq, &optret);
1028 			free(option);
1029 			free(option_ret);
1030 		} else {
1031 			if (option)
1032 				free(option);
1033 			if (option_ret)
1034 				free(option_ret);
1035 		}
1036 	}
1037 
1038 
1039 	/*
1040 	 * make a new transporter
1041 	 */
1042 	xprt = makefd_xprt(destfd, r->sendsize, r->recvsize, r->cf_tsdu,
1043 				r->cf_cache);
1044 	if (xprt == (SVCXPRT *)NULL) {
1045 		/*
1046 		 * makefd_xprt() returns a NULL xprt only when
1047 		 * it's out of memory.
1048 		 */
1049 		goto memerr;
1050 	}
1051 
1052 	/*
1053 	 * Copy the new local and remote bind information
1054 	 */
1055 
1056 	xprt->xp_rtaddr.len = tcp->addr.len;
1057 	xprt->xp_rtaddr.maxlen = tcp->addr.len;
1058 	if ((xprt->xp_rtaddr.buf = malloc(tcp->addr.len)) == NULL)
1059 		goto memerr;
1060 	memcpy(xprt->xp_rtaddr.buf, tcp->addr.buf, tcp->addr.len);
1061 
1062 	if (strcmp(netid, "tcp") == 0) {
1063 		xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in);
1064 		if ((xprt->xp_ltaddr.buf =
1065 			malloc(xprt->xp_ltaddr.maxlen)) == NULL)
1066 			goto memerr;
1067 		if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) {
1068 		    (void) syslog(LOG_ERR,
1069 				"do_accept: t_getname for tcp failed!");
1070 			goto xprt_err;
1071 		}
1072 	} else if (strcmp(netid, "tcp6") == 0) {
1073 		xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in6);
1074 		if ((xprt->xp_ltaddr.buf =
1075 			malloc(xprt->xp_ltaddr.maxlen)) == NULL)
1076 			goto memerr;
1077 		if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) {
1078 			(void) syslog(LOG_ERR,
1079 				"do_accept: t_getname for tcp6 failed!");
1080 			goto xprt_err;
1081 		}
1082 	}
1083 
1084 	xprt->xp_tp = strdup(tpname);
1085 	xprt->xp_netid = strdup(netid);
1086 	if ((xprt->xp_tp == (char *)NULL) ||
1087 	    (xprt->xp_netid == (char *)NULL)) {
1088 		goto memerr;
1089 	}
1090 	if (tcp->opt.len > 0) {
1091 		struct netbuf *netptr;
1092 
1093 		xprt->xp_p2 = malloc(sizeof (struct netbuf));
1094 
1095 		if (xprt->xp_p2 != (char *)NULL) {
1096 /* LINTED pointer alignment */
1097 			netptr = (struct netbuf *)xprt->xp_p2;
1098 
1099 			netptr->len = tcp->opt.len;
1100 			netptr->maxlen = tcp->opt.len;
1101 			if ((netptr->buf = malloc(tcp->opt.len)) == NULL)
1102 				goto memerr;
1103 			memcpy(netptr->buf, tcp->opt.buf, tcp->opt.len);
1104 		} else
1105 			goto memerr;
1106 	}
1107 /*	(void) ioctl(destfd, I_POP, (char *)NULL);    */
1108 
1109 	/*
1110 	 * If a nonblocked connection fd has been requested,
1111 	 * perform the necessary operations.
1112 	 */
1113 	xprt_srcfd = svc_xports[srcfd];
1114 	if (((struct cf_rendezvous *)(xprt_srcfd->xp_p1))->cf_connmaxrec) {
1115 		if (!svc_vc_nonblock(xprt_srcfd, xprt))
1116 			goto xprt_err;
1117 	}
1118 
1119 	/*
1120 	 * Copy the call back declared for the service to the current
1121 	 * connection
1122 	 */
1123 	xprt->xp_closeclnt = xprt_srcfd->xp_closeclnt;
1124 	xprt_register(xprt);
1125 
1126 	trace1(TR_do_accept, 1);
1127 	return;
1128 
1129 memerr:
1130 	(void) syslog(LOG_ERR, errstring, do_accept_str, no_mem_str);
1131 xprt_err:
1132 	if (xprt)
1133 		svc_vc_destroy(xprt);
1134 	(void) t_close(destfd);
1135 	trace1(TR_do_accept, 1);
1136 	return;
1137 
1138 }
1139 
1140 
1141 /*
1142  * This routine performs the necessary fcntl() operations to create
1143  * a nonblocked connection fd.
1144  * It also adjusts the sizes and allocates the buffer
1145  * for the nonblocked operations, and updates the associated
1146  * timestamp field in struct cf_conn for timeout bookkeeping.
1147  */
1148 static bool_t
1149 svc_vc_nonblock(SVCXPRT *xprt_rendezvous, SVCXPRT *xprt_conn)
1150 {
1151 	int nn;
1152 	int fdconn = xprt_conn->xp_fd;
1153 	struct cf_rendezvous *r =
1154 		(struct cf_rendezvous *)xprt_rendezvous->xp_p1;
1155 	struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1;
1156 	uint32_t maxrecsz;
1157 
1158 	if ((nn = fcntl(fdconn, F_GETFL, 0)) < 0) {
1159 		(void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str,
1160 			    no_fcntl_getfl_str);
1161 		return (FALSE);
1162 	}
1163 
1164 	if (fcntl(fdconn, F_SETFL, nn|O_NONBLOCK) != 0) {
1165 		(void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str,
1166 				no_nonblock_str);
1167 		return (FALSE);
1168 	}
1169 
1170 	cd->cf_conn_nonblock = TRUE;
1171 	/*
1172 	 * If the max fragment size has not been set via
1173 	 * rpc_control(), use the default.
1174 	 */
1175 	if ((maxrecsz = r->cf_connmaxrec) == 0)
1176 		maxrecsz = r->recvsize;
1177 	/* Set XDR stream to use non-blocking semantics. */
1178 	if (__xdrrec_set_conn_nonblock(svc_xdrs[fdconn], maxrecsz)) {
1179 		check_nonblock_timestamps = TRUE;
1180 		update_nonblock_timestamps(xprt_conn);
1181 		return (TRUE);
1182 	}
1183 	return (FALSE);
1184 }
1185 
1186 /* ARGSUSED */
1187 static enum xprt_stat
1188 rendezvous_stat(SVCXPRT *xprt)
1189 {
1190 	trace1(TR_rendezvous_stat, 0);
1191 	trace1(TR_rendezvous_stat, 1);
1192 	return (XPRT_IDLE);
1193 }
1194 
1195 static void
1196 svc_vc_destroy(SVCXPRT *xprt)
1197 {
1198 	trace1(TR_svc_vc_destroy, 0);
1199 	mutex_lock(&svc_mutex);
1200 	_svc_vc_destroy_private(xprt, TRUE);
1201 	(void) svc_timeout_nonblock_xprt_and_LRU(FALSE);
1202 	mutex_unlock(&svc_mutex);
1203 	trace1(TR_svc_vc_destroy, 1);
1204 }
1205 
1206 void
1207 _svc_vc_destroy_private(SVCXPRT *xprt, bool_t lock_not_held)
1208 {
1209 	if (svc_mt_mode != RPC_SVC_MT_NONE) {
1210 /* LINTED pointer alignment */
1211 		if (SVCEXT(xprt)->parent)
1212 /* LINTED pointer alignment */
1213 			xprt = SVCEXT(xprt)->parent;
1214 /* LINTED pointer alignment */
1215 		svc_flags(xprt) |= SVC_DEFUNCT;
1216 /* LINTED pointer alignment */
1217 		if (SVCEXT(xprt)->refcnt > 0)
1218 			return;
1219 	}
1220 
1221 	if (xprt->xp_closeclnt != NULL) {
1222 		svc_errorhandler_t cb = xprt->xp_closeclnt;
1223 
1224 		/*
1225 		 * Reset the pointer here to avoid reentrance on the same
1226 		 * SVCXPRT handle.
1227 		 */
1228 		xprt->xp_closeclnt = NULL;
1229 		cb(xprt, (xprt->xp_rtaddr.len != 0));
1230 	}
1231 
1232 	__xprt_unregister_private(xprt, lock_not_held);
1233 	t_close(xprt->xp_fd);
1234 
1235 	mutex_lock(&timestamp_lock);
1236 	if (timestamps && xprt->xp_fd < ntimestamps) {
1237 		timestamps[xprt->xp_fd] = 0;
1238 	}
1239 	mutex_unlock(&timestamp_lock);
1240 
1241 	if (svc_mt_mode != RPC_SVC_MT_NONE) {
1242 		svc_xprt_destroy(xprt);
1243 	} else {
1244 /* LINTED pointer alignment */
1245 		if (svc_type(xprt) == SVC_RENDEZVOUS)
1246 			svc_vc_xprtfree(xprt);
1247 		else
1248 			svc_fd_xprtfree(xprt);
1249 	}
1250 }
1251 
1252 /*ARGSUSED*/
1253 static bool_t
1254 svc_vc_control(SVCXPRT *xprt, const uint_t rq, void *in)
1255 {
1256 	trace3(TR_svc_vc_control, 0, xprt, rq);
1257 	switch (rq) {
1258 	case SVCSET_RECVERRHANDLER:
1259 		/*  00-07-18 */
1260 		xprt->xp_closeclnt = (svc_errorhandler_t)in;
1261 		return (TRUE);
1262 	case SVCGET_RECVERRHANDLER:
1263 		/*  00-07-18 */
1264 		*(svc_errorhandler_t *)in = xprt->xp_closeclnt;
1265 		return (TRUE);
1266 	case SVCGET_XID:
1267 		if (xprt->xp_p1 == NULL) {
1268 			trace1(TR_svc_vc_control, 1);
1269 			return (FALSE);
1270 		} else {
1271 			*(uint32_t *)in =
1272 			/* LINTED pointer alignment */
1273 			((struct cf_conn *)(xprt->xp_p1))->x_id;
1274 			trace1(TR_svc_vc_control, 1);
1275 			return (TRUE);
1276 		}
1277 	default:
1278 		trace1(TR_svc_vc_control, 1);
1279 		return (FALSE);
1280 	}
1281 }
1282 
1283 static bool_t
1284 rendezvous_control(SVCXPRT *xprt, const uint_t rq, void *in)
1285 {
1286 	struct cf_rendezvous *r;
1287 	uint32_t tmp_uint32;
1288 	int tmp;
1289 
1290 	trace3(TR_rendezvous_control, 0, xprt, rq);
1291 	switch (rq) {
1292 	case SVCSET_RECVERRHANDLER:
1293 		/*  00-07-18 */
1294 		xprt->xp_closeclnt = (svc_errorhandler_t)in;
1295 		return (TRUE);
1296 	case SVCGET_RECVERRHANDLER:
1297 		/*  00-07-18 */
1298 		*(svc_errorhandler_t *)in = xprt->xp_closeclnt;
1299 		return (TRUE);
1300 	case SVCSET_KEEPALIVE:
1301 		r = (struct cf_rendezvous *)xprt->xp_p1;
1302 		if (r->tcp_flag) {
1303 			r->tcp_keepalive = (int)(intptr_t)in;
1304 			return (TRUE);
1305 		} else {
1306 			return (FALSE);
1307 		}
1308 	case SVCSET_CONNMAXREC:
1309 		/*
1310 		 * Override the default maximum record size, set via
1311 		 * rpc_control(), for this connection. Only appropriate
1312 		 * for connection oriented transports, but is ignored for
1313 		 * the connectionless case, so no need to check the
1314 		 * connection type here.
1315 		 */
1316 		r = (struct cf_rendezvous *)xprt->xp_p1;
1317 		tmp = __rpc_legal_connmaxrec(*(int *)in);
1318 		if (r != 0 && tmp >= 0) {
1319 			r->cf_connmaxrec = tmp;
1320 			return (TRUE);
1321 		} else {
1322 			return (FALSE);
1323 		}
1324 	case SVCGET_CONNMAXREC:
1325 		r = (struct cf_rendezvous *)xprt->xp_p1;
1326 		if (r != 0) {
1327 			*(int *)in = r->cf_connmaxrec;
1328 			return (TRUE);
1329 		} else {
1330 			return (FALSE);
1331 		}
1332 	case SVCGET_XID:	/* fall through for now */
1333 	default:
1334 		trace1(TR_rendezvous_control, 1);
1335 		return (FALSE);
1336 	}
1337 }
1338 
1339 /*
1340  * All read operations timeout after 35 seconds.
1341  * A timeout is fatal for the connection.
1342  * update_timestamps() is used by nisplus operations,
1343  * update_nonblock_timestamps() is used for nonblocked
1344  * connection fds.
1345  */
1346 #define	WAIT_PER_TRY	35000	/* milliseconds */
1347 
1348 static void
1349 update_timestamps(int fd)
1350 {
1351 	mutex_lock(&timestamp_lock);
1352 	if (timestamps) {
1353 		struct timeval tv;
1354 
1355 		gettimeofday(&tv, NULL);
1356 		while (fd >= ntimestamps) {
1357 			long *tmp_timestamps = timestamps;
1358 
1359 			/* allocate more timestamps */
1360 			tmp_timestamps = realloc(timestamps,
1361 				sizeof (long) *
1362 				(ntimestamps + FD_INCREMENT));
1363 			if (tmp_timestamps == NULL) {
1364 				mutex_unlock(&timestamp_lock);
1365 				syslog(LOG_ERR,
1366 					"update_timestamps: out of memory");
1367 				return;
1368 			}
1369 
1370 			timestamps = tmp_timestamps;
1371 			(void) memset(&timestamps[ntimestamps], 0,
1372 				sizeof (long) * FD_INCREMENT);
1373 			ntimestamps += FD_INCREMENT;
1374 		}
1375 		timestamps[fd] = tv.tv_sec;
1376 	}
1377 	mutex_unlock(&timestamp_lock);
1378 }
1379 
1380 static  void
1381 update_nonblock_timestamps(SVCXPRT *xprt_conn)
1382 {
1383 	struct timeval tv;
1384 	struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1;
1385 
1386 	gettimeofday(&tv, NULL);
1387 	cd->cf_conn_nonblock_timestamp = tv.tv_sec;
1388 }
1389 
1390 /*
1391  * reads data from the vc conection.
1392  * any error is fatal and the connection is closed.
1393  * (And a read of zero bytes is a half closed stream => error.)
1394  */
1395 static int
1396 read_vc(SVCXPRT *xprt, caddr_t buf, int len)
1397 {
1398 	int fd = xprt->xp_fd;
1399 	XDR *xdrs = svc_xdrs[fd];
1400 	struct pollfd pfd;
1401 	int ret;
1402 
1403 	trace2(TR_read_vc, 0, len);
1404 
1405 	/*
1406 	 * Make sure the connection is not already dead.
1407 	 */
1408 /* LINTED pointer alignment */
1409 	if (svc_failed(xprt)) {
1410 		trace1(TR_read_vc, 1);
1411 		return (-1);
1412 	}
1413 
1414 	if (((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock) {
1415 		/*
1416 		 * For nonblocked reads, only update the
1417 		 * timestamps to record the activity so the
1418 		 * connection will not be timedout.
1419 		 * Up to "len" bytes are requested.
1420 		 * If fewer than "len" bytes are received, the
1421 		 * connection is poll()ed again.
1422 		 * The poll() for the connection fd is performed
1423 		 * in the main poll() so that all outstanding fds
1424 		 * are polled rather than just the vc connection.
1425 		 * Polling on only the vc connection until the entire
1426 		 * fragment has been read can be exploited in
1427 		 * a Denial of Service Attack such as telnet <host> 111.
1428 		 */
1429 		if ((len = t_rcvnonblock(xprt, buf, len)) >= 0) {
1430 			if (len > 0) {
1431 				update_timestamps(fd);
1432 				update_nonblock_timestamps(xprt);
1433 			}
1434 			trace1(TR_read_vc, 1);
1435 			return (len);
1436 		} else {
1437 			goto fatal_err;
1438 		}
1439 	}
1440 
1441 	if (!__is_xdrrec_first(xdrs)) {
1442 
1443 		pfd.fd = fd;
1444 		pfd.events = MASKVAL;
1445 
1446 		do {
1447 			if ((ret = poll(&pfd, 1, WAIT_PER_TRY)) <= 0) {
1448 				/*
1449 				 * If errno is EINTR, ERESTART, or EAGAIN
1450 				 * ignore error and repeat poll
1451 				 */
1452 				if (ret < 0 && (errno == EINTR ||
1453 				    errno == ERESTART || errno == EAGAIN))
1454 					continue;
1455 				goto fatal_err;
1456 			}
1457 		} while (pfd.revents == 0);
1458 		if (pfd.revents & POLLNVAL)
1459 			goto fatal_err;
1460 	}
1461 	__xdrrec_resetfirst(xdrs);
1462 	if ((len = t_rcvall(fd, buf, len)) > 0) {
1463 		update_timestamps(fd);
1464 		trace1(TR_read_vc, 1);
1465 		return (len);
1466 	}
1467 
1468 fatal_err:
1469 /* LINTED pointer alignment */
1470 	((struct cf_conn *)(xprt->xp_p1))->strm_stat = XPRT_DIED;
1471 /* LINTED pointer alignment */
1472 	svc_flags(xprt) |= SVC_FAILED;
1473 	trace1(TR_read_vc, 1);
1474 	return (-1);
1475 }
1476 
1477 /*
1478  * Requests up to "len" bytes of data.
1479  * Returns number of bytes actually received, or error indication.
1480  */
1481 static int
1482 t_rcvnonblock(SVCXPRT *xprt, caddr_t buf, int len)
1483 {
1484 	int fd = xprt->xp_fd;
1485 	struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1);
1486 	int flag;
1487 	int res;
1488 
1489 	trace3(TR_t_rcvnonblock, 0, fd, len);
1490 	res = t_rcv(fd, buf, (unsigned)len, &flag);
1491 	if (res == -1) {
1492 		switch (t_errno) {
1493 		case TLOOK:
1494 			switch (t_look(fd)) {
1495 			case T_DISCONNECT:
1496 				t_rcvdis(fd, NULL);
1497 				break;
1498 			case T_ORDREL:
1499 				t_rcvrel(fd);
1500 				(void) t_sndrel(fd);
1501 				break;
1502 			default:
1503 				break;
1504 			}
1505 			break;
1506 		case TNODATA:
1507 			/*
1508 			 * Either poll() lied, or the xprt/fd was closed and
1509 			 * re-opened under our feet. Return 0, so that we go
1510 			 * back to waiting for data.
1511 			 */
1512 			res = 0;
1513 			break;
1514 		/* Should handle TBUFOVFLW TSYSERR ? */
1515 		default:
1516 			break;
1517 		}
1518 	}
1519 	trace2(TR_t_rcvnonblock, 1, fd);
1520 	return (res);
1521 }
1522 
1523 /*
1524  * Timeout out nonblocked connection fds
1525  * If there has been no activity on the fd for __rpc_irtimeout
1526  * seconds, timeout the fd  by destroying its xprt.
1527  * If the caller gets an EMFILE error, the caller may also request
1528  * that the least busy xprt gets destroyed as well.
1529  * svc_thr_mutex is held when this is called.
1530  * svc_mutex is held when this is called.
1531  */
1532 static void
1533 svc_timeout_nonblock_xprt_and_LRU(bool_t destroy_lru)
1534 {
1535 	SVCXPRT *xprt;
1536 	SVCXPRT *dead_xprt[CLEANUP_SIZE];
1537 	SVCXPRT *candidate_xprt = NULL;
1538 	struct cf_conn *cd;
1539 	int i, fd_idx = 0, dead_idx = 0;
1540 	struct timeval now;
1541 	time_t lasttime, maxctime = 0;
1542 	extern rwlock_t svc_lock;
1543 	extern rwlock_t svc_fd_lock;
1544 
1545 	if (!check_nonblock_timestamps)
1546 		return;
1547 
1548 	gettimeofday(&now, NULL);
1549 	if (svc_xports == NULL)
1550 		return;
1551 	/*
1552 	 * Hold svc_fd_lock to protect
1553 	 * svc_xports, svc_maxpollfd, svc_max_pollfd
1554 	 */
1555 	rw_wrlock(&svc_fd_lock);
1556 	while (1) {
1557 		/*
1558 		 * Timeout upto CLEANUP_SIZE connection fds per
1559 		 * iteration for the while(1) loop
1560 		 */
1561 		for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1562 			if ((xprt = svc_xports[fd_idx]) == NULL) {
1563 				continue;
1564 			}
1565 			/* Only look at connection fds */
1566 			if (svc_type(xprt) != SVC_CONNECTION) {
1567 				continue;
1568 			}
1569 			cd = (struct cf_conn *)xprt->xp_p1;
1570 			if (!cd->cf_conn_nonblock)
1571 				continue;
1572 			lasttime = now.tv_sec - cd->cf_conn_nonblock_timestamp;
1573 			if (lasttime >= __rpc_irtimeout &&
1574 			    __rpc_irtimeout != 0) {
1575 				/* Enter in timedout/dead array */
1576 				dead_xprt[dead_idx++] = xprt;
1577 				if (dead_idx >= CLEANUP_SIZE)
1578 					break;
1579 			} else
1580 			if (lasttime > maxctime) {
1581 				/* Possible LRU xprt */
1582 				candidate_xprt = xprt;
1583 				maxctime = lasttime;
1584 			}
1585 		}
1586 
1587 		for (i = 0; i < dead_idx; i++) {
1588 			/* Still holding svc_fd_lock */
1589 			_svc_vc_destroy_private(dead_xprt[i], FALSE);
1590 		}
1591 
1592 		/*
1593 		 * If all the nonblocked fds have been checked, we're done.
1594 		 */
1595 		if (fd_idx++ >= svc_max_pollfd)
1596 			break;
1597 	}
1598 	if ((destroy_lru) && (candidate_xprt != (SVCXPRT *)NULL)) {
1599 		_svc_vc_destroy_private(candidate_xprt, FALSE);
1600 	}
1601 	rw_unlock(&svc_fd_lock);
1602 }
1603 /*
1604  * Receive the required bytes of data, even if it is fragmented.
1605  */
1606 static int
1607 t_rcvall(int fd, char *buf, int len)
1608 {
1609 	int flag;
1610 	int final = 0;
1611 	int res;
1612 
1613 	trace3(TR_t_rcvall, 0, fd, len);
1614 	do {
1615 		res = t_rcv(fd, buf, (unsigned)len, &flag);
1616 		if (res == -1) {
1617 			if (t_errno == TLOOK) {
1618 				switch (t_look(fd)) {
1619 				case T_DISCONNECT:
1620 					t_rcvdis(fd, NULL);
1621 					break;
1622 				case T_ORDREL:
1623 					t_rcvrel(fd);
1624 					(void) t_sndrel(fd);
1625 					break;
1626 				default:
1627 					break;
1628 				}
1629 			}
1630 			break;
1631 		}
1632 		final += res;
1633 		buf += res;
1634 		len -= res;
1635 	} while (len && (flag & T_MORE));
1636 	trace2(TR_t_rcvall, 1, fd);
1637 	return (res == -1 ? -1 : final);
1638 }
1639 
1640 /*
1641  * writes data to the vc connection.
1642  * Any error is fatal and the connection is closed.
1643  */
1644 static int
1645 write_vc(SVCXPRT *xprt, caddr_t buf, int len)
1646 {
1647 	int i, cnt;
1648 	int flag;
1649 	int maxsz;
1650 	int nonblock;
1651 	struct pollfd pfd;
1652 	int ret;
1653 
1654 	trace2(TR_write_vc, 0, len);
1655 /* LINTED pointer alignment */
1656 	maxsz = ((struct cf_conn *)(xprt->xp_p1))->cf_tsdu;
1657 	nonblock = ((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock;
1658 	if (nonblock && maxsz <= 0)
1659 		maxsz = len;
1660 	if ((maxsz == 0) || (maxsz == -1)) {
1661 		if ((len = t_snd(xprt->xp_fd, buf, (unsigned)len,
1662 				(int)0)) == -1) {
1663 			if (t_errno == TLOOK) {
1664 				switch (t_look(xprt->xp_fd)) {
1665 				case T_DISCONNECT:
1666 					t_rcvdis(xprt->xp_fd, NULL);
1667 					break;
1668 				case T_ORDREL:
1669 					t_rcvrel(xprt->xp_fd);
1670 					(void) t_sndrel(xprt->xp_fd);
1671 					break;
1672 				default:
1673 					break;
1674 				}
1675 			}
1676 /* LINTED pointer alignment */
1677 			((struct cf_conn *)(xprt->xp_p1))->strm_stat
1678 					= XPRT_DIED;
1679 /* LINTED pointer alignment */
1680 			svc_flags(xprt) |= SVC_FAILED;
1681 		}
1682 		trace1(TR_write_vc, 1);
1683 		return (len);
1684 	}
1685 
1686 	/*
1687 	 * Setup for polling. We want to be able to write normal
1688 	 * data to the transport
1689 	 */
1690 	pfd.fd = xprt->xp_fd;
1691 	pfd.events = POLLWRNORM;
1692 
1693 	/*
1694 	 * This for those transports which have a max size for data,
1695 	 * and for the non-blocking case, where t_snd() may send less
1696 	 * than requested.
1697 	 */
1698 	for (cnt = len, i = 0; cnt > 0; cnt -= i, buf += i) {
1699 		flag = cnt > maxsz ? T_MORE : 0;
1700 		if ((i = t_snd(xprt->xp_fd, buf,
1701 			(unsigned)MIN(cnt, maxsz), flag)) == -1) {
1702 			if (t_errno == TLOOK) {
1703 				switch (t_look(xprt->xp_fd)) {
1704 				case T_DISCONNECT:
1705 					t_rcvdis(xprt->xp_fd, NULL);
1706 					break;
1707 				case T_ORDREL:
1708 					t_rcvrel(xprt->xp_fd);
1709 					break;
1710 				default:
1711 					break;
1712 				}
1713 			} else if (t_errno == TFLOW) {
1714 				/* Try again */
1715 				i = 0;
1716 				/* Wait till we can write to the transport */
1717 				do {
1718 				    if ((ret = poll(&pfd, 1,
1719 							WAIT_PER_TRY)) < 0) {
1720 					/*
1721 					 * If errno is ERESTART, or
1722 					 * EAGAIN ignore error and repeat poll
1723 					 */
1724 					if (errno == ERESTART ||
1725 					    errno == EAGAIN)
1726 						continue;
1727 					else
1728 						goto fatal_err;
1729 				    }
1730 				} while (pfd.revents == 0);
1731 				if (pfd.revents & (POLLNVAL | POLLERR |
1732 						    POLLHUP))
1733 					goto fatal_err;
1734 				continue;
1735 			}
1736 fatal_err:
1737 /* LINTED pointer alignment */
1738 			((struct cf_conn *)(xprt->xp_p1))->strm_stat
1739 					= XPRT_DIED;
1740 /* LINTED pointer alignment */
1741 			svc_flags(xprt) |= SVC_FAILED;
1742 			trace1(TR_write_vc, 1);
1743 			return (-1);
1744 		}
1745 	}
1746 	trace1(TR_write_vc, 1);
1747 	return (len);
1748 }
1749 
1750 static enum xprt_stat
1751 svc_vc_stat(SVCXPRT *xprt)
1752 {
1753 /* LINTED pointer alignment */
1754 	SVCXPRT *parent = SVCEXT(xprt)->parent ? SVCEXT(xprt)->parent : xprt;
1755 
1756 	trace1(TR_svc_vc_stat, 0);
1757 /* LINTED pointer alignment */
1758 	if (svc_failed(parent) || svc_failed(xprt)) {
1759 		trace1(TR_svc_vc_stat, 1);
1760 		return (XPRT_DIED);
1761 	}
1762 	if (! xdrrec_eof(svc_xdrs[xprt->xp_fd])) {
1763 		trace1(TR_svc_vc_stat, 1);
1764 		return (XPRT_MOREREQS);
1765 	}
1766 	/*
1767 	 * xdrrec_eof could have noticed that the connection is dead, so
1768 	 * check status again.
1769 	 */
1770 /* LINTED pointer alignment */
1771 	if (svc_failed(parent) || svc_failed(xprt)) {
1772 		trace1(TR_svc_vc_stat, 1);
1773 		return (XPRT_DIED);
1774 	}
1775 	trace1(TR_svc_vc_stat, 1);
1776 	return (XPRT_IDLE);
1777 }
1778 
1779 
1780 
1781 static bool_t
1782 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg)
1783 {
1784 /* LINTED pointer alignment */
1785 	struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1);
1786 	XDR *xdrs = svc_xdrs[xprt->xp_fd];
1787 
1788 	trace1(TR_svc_vc_recv, 0);
1789 	xdrs->x_op = XDR_DECODE;
1790 
1791 	if (cd->cf_conn_nonblock) {
1792 		/* Get the next input */
1793 		if (!__xdrrec_getbytes_nonblock(xdrs, &cd->strm_stat)) {
1794 			/*
1795 			 * The entire record has not been received.
1796 			 * If the xprt has died, pass it along in svc_flags.
1797 			 * Return FALSE; For nonblocked vc connection,
1798 			 * xdr_callmsg() is called only after the entire
1799 			 * record has been received.  For blocked vc
1800 			 * connection, the data is received on the fly as it
1801 			 * is being processed through the xdr routines.
1802 			 */
1803 			if (cd->strm_stat == XPRT_DIED)
1804 				svc_flags(xprt) |= SVC_FAILED;
1805 			trace1(TR_svc_vc_recv, 1);
1806 			return (FALSE);
1807 		}
1808 	} else {
1809 		if (!xdrrec_skiprecord(xdrs)) {
1810 			trace1(TR_svc_vc_recv, 1);
1811 			return (FALSE);
1812 		}
1813 		__xdrrec_setfirst(xdrs);
1814 	}
1815 
1816 	if (xdr_callmsg(xdrs, msg)) {
1817 		cd->x_id = msg->rm_xid;
1818 		trace1(TR_svc_vc_recv, 1);
1819 		return (TRUE);
1820 	}
1821 
1822 	/*
1823 	 * If a non-blocking connection, drop it when message decode fails.
1824 	 * We are either under attack, or we're talking to a broken client.
1825 	 */
1826 	if (cd->cf_conn_nonblock) {
1827 		svc_flags(xprt) |= SVC_FAILED;
1828 	}
1829 
1830 	trace1(TR_svc_vc_recv, 1);
1831 	return (FALSE);
1832 }
1833 
1834 static bool_t
1835 svc_vc_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
1836 {
1837 	bool_t dummy1;
1838 
1839 	trace1(TR_svc_vc_getargs, 0);
1840 
1841 /* LINTED pointer alignment */
1842 	dummy1 = SVCAUTH_UNWRAP(&SVC_XP_AUTH(xprt), svc_xdrs[xprt->xp_fd],
1843 							xdr_args, args_ptr);
1844 	if (svc_mt_mode != RPC_SVC_MT_NONE)
1845 		svc_args_done(xprt);
1846 	trace1(TR_svc_vc_getargs, 1);
1847 	return (dummy1);
1848 }
1849 
1850 static bool_t
1851 svc_vc_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
1852 {
1853 /* LINTED pointer alignment */
1854 	XDR *xdrs = &(((struct cf_conn *)(xprt->xp_p1))->xdrs);
1855 	bool_t dummy2;
1856 
1857 	trace1(TR_svc_vc_freeargs, 0);
1858 	xdrs->x_op = XDR_FREE;
1859 	dummy2 = (*xdr_args)(xdrs, args_ptr);
1860 	trace1(TR_svc_vc_freeargs, 1);
1861 	return (dummy2);
1862 }
1863 
1864 static bool_t
1865 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg)
1866 {
1867 /* LINTED pointer alignment */
1868 	struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1);
1869 	XDR *xdrs = &(cd->xdrs);
1870 	bool_t stat = FALSE;
1871 	xdrproc_t xdr_results;
1872 	caddr_t xdr_location;
1873 	bool_t has_args;
1874 
1875 	trace1(TR_svc_vc_reply, 0);
1876 
1877 #ifdef __lock_lint
1878 	mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent));
1879 #else
1880 	if (svc_mt_mode != RPC_SVC_MT_NONE)
1881 /* LINTED pointer alignment */
1882 		mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent));
1883 #endif
1884 
1885 	if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
1886 				msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
1887 		has_args = TRUE;
1888 		xdr_results = msg->acpted_rply.ar_results.proc;
1889 		xdr_location = msg->acpted_rply.ar_results.where;
1890 		msg->acpted_rply.ar_results.proc = xdr_void;
1891 		msg->acpted_rply.ar_results.where = NULL;
1892 	} else
1893 		has_args = FALSE;
1894 
1895 	xdrs->x_op = XDR_ENCODE;
1896 	msg->rm_xid = cd->x_id;
1897 /* LINTED pointer alignment */
1898 	if (xdr_replymsg(xdrs, msg) && (!has_args || SVCAUTH_WRAP(
1899 			&SVC_XP_AUTH(xprt), xdrs, xdr_results, xdr_location))) {
1900 		stat = TRUE;
1901 	}
1902 	(void) xdrrec_endofrecord(xdrs, TRUE);
1903 
1904 #ifdef __lock_lint
1905 	mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent));
1906 #else
1907 	if (svc_mt_mode != RPC_SVC_MT_NONE)
1908 /* LINTED pointer alignment */
1909 		mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent));
1910 #endif
1911 
1912 	trace1(TR_svc_vc_reply, 1);
1913 	return (stat);
1914 }
1915 
1916 static struct xp_ops *
1917 svc_vc_ops()
1918 {
1919 	static struct xp_ops ops;
1920 	extern mutex_t ops_lock;
1921 
1922 /* VARIABLES PROTECTED BY ops_lock: ops */
1923 
1924 	trace1(TR_svc_vc_ops, 0);
1925 	mutex_lock(&ops_lock);
1926 	if (ops.xp_recv == NULL) {
1927 		ops.xp_recv = svc_vc_recv;
1928 		ops.xp_stat = svc_vc_stat;
1929 		ops.xp_getargs = svc_vc_getargs;
1930 		ops.xp_reply = svc_vc_reply;
1931 		ops.xp_freeargs = svc_vc_freeargs;
1932 		ops.xp_destroy = svc_vc_destroy;
1933 		ops.xp_control = svc_vc_control;
1934 	}
1935 	mutex_unlock(&ops_lock);
1936 	trace1(TR_svc_vc_ops, 1);
1937 	return (&ops);
1938 }
1939 
1940 static struct xp_ops *
1941 svc_vc_rendezvous_ops()
1942 {
1943 	static struct xp_ops ops;
1944 	extern mutex_t ops_lock;
1945 
1946 	trace1(TR_svc_vc_rendezvous_ops, 0);
1947 	mutex_lock(&ops_lock);
1948 	if (ops.xp_recv == NULL) {
1949 		ops.xp_recv = rendezvous_request;
1950 		ops.xp_stat = rendezvous_stat;
1951 		ops.xp_getargs = (bool_t (*)())abort;
1952 		ops.xp_reply = (bool_t (*)())abort;
1953 		ops.xp_freeargs = (bool_t (*)())abort,
1954 		ops.xp_destroy = svc_vc_destroy;
1955 		ops.xp_control = rendezvous_control;
1956 	}
1957 	mutex_unlock(&ops_lock);
1958 	trace1(TR_svc_vc_rendezvous_ops, 1);
1959 	return (&ops);
1960 }
1961 
1962 /*
1963  * PRIVATE RPC INTERFACE
1964  *
1965  * This is a hack to let NIS+ clean up connections that have already been
1966  * closed.  This problem arises because rpc.nisd forks a child to handle
1967  * existing connections when it does checkpointing.  The child may close
1968  * some of these connections.  But the descriptors still stay open in the
1969  * parent, and because TLI descriptors don't support persistent EOF
1970  * condition (like sockets do), the parent will never detect that these
1971  * descriptors are dead.
1972  *
1973  * The following internal procedure __svc_nisplus_fdcleanup_hack() - should
1974  * be removed as soon as rpc.nisd is rearchitected to do the right thing.
1975  * This procedure should not find its way into any header files.
1976  *
1977  * This procedure should be called only when rpc.nisd knows that there
1978  * are no children servicing clients.
1979  */
1980 
1981 static bool_t
1982 fd_is_dead(int fd)
1983 {
1984 	struct T_info_ack inforeq;
1985 	int retval;
1986 
1987 	inforeq.PRIM_type = T_INFO_REQ;
1988 	if (!_t_do_ioctl(fd, (caddr_t)&inforeq, sizeof (struct T_info_req),
1989 						TI_GETINFO, &retval))
1990 		return (TRUE);
1991 	if (retval != (int)sizeof (struct T_info_ack))
1992 		return (TRUE);
1993 
1994 	switch (inforeq.CURRENT_state) {
1995 	case TS_UNBND:
1996 	case TS_IDLE:
1997 		return (TRUE);
1998 	default:
1999 		break;
2000 	}
2001 	return (FALSE);
2002 }
2003 
2004 void
2005 __svc_nisplus_fdcleanup_hack()
2006 {
2007 	SVCXPRT *xprt;
2008 	SVCXPRT *dead_xprt[CLEANUP_SIZE];
2009 	int i, fd_idx = 0, dead_idx = 0;
2010 
2011 	if (svc_xports == NULL)
2012 		return;
2013 	while (1) {
2014 		rw_wrlock(&svc_fd_lock);
2015 		for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
2016 			if ((xprt = svc_xports[fd_idx]) == NULL)
2017 				continue;
2018 /* LINTED pointer alignment */
2019 			if (svc_type(xprt) != SVC_CONNECTION)
2020 				continue;
2021 			if (fd_is_dead(fd_idx)) {
2022 				dead_xprt[dead_idx++] = xprt;
2023 				if (dead_idx >= CLEANUP_SIZE)
2024 					break;
2025 			}
2026 		}
2027 
2028 		for (i = 0; i < dead_idx; i++) {
2029 			/* Still holding svc_fd_lock */
2030 			_svc_vc_destroy_private(dead_xprt[i], FALSE);
2031 		}
2032 		rw_unlock(&svc_fd_lock);
2033 		if (fd_idx++ >= svc_max_pollfd)
2034 			return;
2035 	}
2036 }
2037 
2038 void
2039 __svc_nisplus_enable_timestamps()
2040 {
2041 	mutex_lock(&timestamp_lock);
2042 	if (!timestamps) {
2043 		timestamps = calloc(FD_INCREMENT, sizeof (long));
2044 		if (timestamps != NULL)
2045 			ntimestamps = FD_INCREMENT;
2046 		else {
2047 			mutex_unlock(&timestamp_lock);
2048 			syslog(LOG_ERR,
2049 				"__svc_nisplus_enable_timestamps: "
2050 				"out of memory");
2051 			return;
2052 		}
2053 	}
2054 	mutex_unlock(&timestamp_lock);
2055 }
2056 
2057 void
2058 __svc_nisplus_purge_since(long since)
2059 {
2060 	SVCXPRT *xprt;
2061 	SVCXPRT *dead_xprt[CLEANUP_SIZE];
2062 	int i, fd_idx = 0, dead_idx = 0;
2063 
2064 	if (svc_xports == NULL)
2065 		return;
2066 	while (1) {
2067 		rw_wrlock(&svc_fd_lock);
2068 		mutex_lock(&timestamp_lock);
2069 		for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
2070 			if ((xprt = svc_xports[fd_idx]) == NULL) {
2071 				continue;
2072 			}
2073 			if (svc_type(xprt) != SVC_CONNECTION) {
2074 				continue;
2075 			}
2076 			if (fd_idx >= ntimestamps) {
2077 				break;
2078 			}
2079 			if (timestamps[fd_idx] &&
2080 			    timestamps[fd_idx] < since) {
2081 				dead_xprt[dead_idx++] = xprt;
2082 				if (dead_idx >= CLEANUP_SIZE)
2083 					break;
2084 			}
2085 		}
2086 		mutex_unlock(&timestamp_lock);
2087 
2088 		for (i = 0; i < dead_idx; i++) {
2089 			/* Still holding svc_fd_lock */
2090 			_svc_vc_destroy_private(dead_xprt[i], FALSE);
2091 		}
2092 		rw_unlock(&svc_fd_lock);
2093 		if (fd_idx++ >= svc_max_pollfd)
2094 			return;
2095 	}
2096 }
2097 
2098 /*
2099  * dup cache wrapper functions for vc requests. The set of dup
2100  * functions were written with the view that they may be expanded
2101  * during creation of a generic svc_vc_enablecache routine
2102  * which would have a size based cache, rather than a time based cache.
2103  * The real work is done in generic svc.c
2104  */
2105 bool_t
2106 __svc_vc_dupcache_init(SVCXPRT *xprt, void *condition, int basis)
2107 {
2108 	return (__svc_dupcache_init(condition, basis,
2109 		/* LINTED pointer alignment */
2110 		&(((struct cf_rendezvous *)xprt->xp_p1)->cf_cache)));
2111 }
2112 
2113 int
2114 __svc_vc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz)
2115 {
2116 	return (__svc_dup(req, resp_buf, resp_bufsz,
2117 		/* LINTED pointer alignment */
2118 		((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache));
2119 }
2120 
2121 int
2122 __svc_vc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2123 				int status)
2124 {
2125 	return (__svc_dupdone(req, resp_buf, resp_bufsz, status,
2126 		/* LINTED pointer alignment */
2127 		((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache));
2128 }
2129