xref: /titanic_41/usr/src/lib/libnsl/rpc/svc_vc.c (revision c138f478d2bc94e73ab8f6a084e323bec25e62f5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
29 /*
30  * Portions of this source code were derived from Berkeley
31  * 4.3 BSD under license from the Regents of the University of
32  * California.
33  */
34 
35 #pragma ident	"%Z%%M%	%I%	%E% SMI"
36 
37 /*
38  * svc_vc.c -- Server side for Connection Oriented RPC.
39  *
40  * Actually implements two flavors of transporter -
41  * a rendezvouser (a listener and connection establisher)
42  * and a record stream.
43  */
44 
45 #include "mt.h"
46 #include "rpc_mt.h"
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <rpc/rpc.h>
50 #include <sys/types.h>
51 #include <errno.h>
52 #include <sys/stat.h>
53 #include <sys/mkdev.h>
54 #include <sys/poll.h>
55 #include <syslog.h>
56 #include <rpc/nettype.h>
57 #include <tiuser.h>
58 #include <string.h>
59 #include <stropts.h>
60 #include <stdlib.h>
61 #include <unistd.h>
62 #include <sys/timod.h>
63 #include <limits.h>
64 
65 #ifndef MIN
66 #define	MIN(a, b)	(((a) < (b)) ? (a) : (b))
67 #endif
68 
69 #define	CLEANUP_SIZE	1024
70 
71 extern int nsvc_xdrs;
72 extern int __rpc_connmaxrec;
73 extern int __rpc_irtimeout;
74 
75 extern SVCXPRT	**svc_xports;
76 extern int	__td_setnodelay(int);
77 extern bool_t	__xdrrec_getbytes_nonblock(XDR *, enum xprt_stat *);
78 extern bool_t	__xdrrec_set_conn_nonblock(XDR *, uint32_t);
79 extern int	_t_do_ioctl(int, char *, int, int, int *);
80 extern int	__rpc_legal_connmaxrec(int);
81 /* Structure used to initialize SVC_XP_AUTH(xprt).svc_ah_ops. */
82 extern struct svc_auth_ops svc_auth_any_ops;
83 extern void	__xprt_unregister_private(const SVCXPRT *, bool_t);
84 
85 static struct xp_ops 	*svc_vc_ops(void);
86 static struct xp_ops 	*svc_vc_rendezvous_ops(void);
87 static void		svc_vc_destroy(SVCXPRT *);
88 static bool_t		svc_vc_nonblock(SVCXPRT *, SVCXPRT *);
89 static int		read_vc(SVCXPRT *, caddr_t, int);
90 static int		write_vc(SVCXPRT *, caddr_t, int);
91 static SVCXPRT		*makefd_xprt(int, uint_t, uint_t, t_scalar_t, char *);
92 static bool_t		fd_is_dead(int);
93 static void		update_nonblock_timestamps(SVCXPRT *);
94 
95 struct cf_rendezvous { /* kept in xprt->xp_p1 for rendezvouser */
96 	uint_t sendsize;
97 	uint_t recvsize;
98 	struct t_call *t_call;
99 	struct t_bind *t_bind;
100 	t_scalar_t cf_tsdu;
101 	char *cf_cache;
102 	int tcp_flag;
103 	int tcp_keepalive;
104 	int cf_connmaxrec;
105 };
106 
107 struct cf_conn {	/* kept in xprt->xp_p1 for actual connection */
108 	uint_t sendsize;
109 	uint_t recvsize;
110 	enum xprt_stat strm_stat;
111 	uint32_t x_id;
112 	t_scalar_t cf_tsdu;
113 	XDR xdrs;
114 	char *cf_cache;
115 	char verf_body[MAX_AUTH_BYTES];
116 	bool_t cf_conn_nonblock;
117 	time_t cf_conn_nonblock_timestamp;
118 };
119 
120 static int t_rcvall(int, char *, int);
121 static int t_rcvnonblock(SVCXPRT *, caddr_t, int);
122 static void svc_timeout_nonblock_xprt_and_LRU(bool_t);
123 
124 extern int __xdrrec_setfirst(XDR *);
125 extern int __xdrrec_resetfirst(XDR *);
126 extern int __is_xdrrec_first(XDR *);
127 
128 void __svc_nisplus_enable_timestamps(void);
129 void __svc_timeout_nonblock_xprt(void);
130 
131 /*
132  * This is intended as a performance improvement on the old string handling
133  * stuff by read only moving data into the  text segment.
134  * Format = <routine> : <error>
135  */
136 
137 static const char errstring[] = " %s : %s";
138 
139 /* Routine names */
140 
141 static const char svc_vc_create_str[] = "svc_vc_create";
142 static const char svc_fd_create_str[] = "svc_fd_create";
143 static const char makefd_xprt_str[] = "svc_vc_create: makefd_xprt ";
144 static const char rendezvous_request_str[] = "rendezvous_request";
145 static const char svc_vc_fderr[] =
146 		"fd > FD_SETSIZE; Use rpc_control(RPC_SVC_USE_POLLFD,...);";
147 static const char do_accept_str[] = "do_accept";
148 
149 /* error messages */
150 
151 static const char no_mem_str[] = "out of memory";
152 static const char no_tinfo_str[] = "could not get transport information";
153 static const char no_fcntl_getfl_str[] = "could not get status flags and modes";
154 static const char no_nonblock_str[] = "could not set transport non-blocking";
155 
156 /*
157  *  Records a timestamp when data comes in on a descriptor.  This is
158  *  only used if timestamps are enabled with __svc_nisplus_enable_timestamps().
159  */
160 static long *timestamps;
161 static int ntimestamps; /* keep track how many timestamps */
162 static mutex_t timestamp_lock = DEFAULTMUTEX;
163 
164 /*
165  * Used to determine whether the time-out logic should be executed.
166  */
167 static bool_t check_nonblock_timestamps = FALSE;
168 
169 void
170 svc_vc_xprtfree(SVCXPRT *xprt)
171 {
172 /* LINTED pointer alignment */
173 	SVCXPRT_EXT		*xt = xprt ? SVCEXT(xprt) : NULL;
174 	struct cf_rendezvous	*r = xprt ?
175 /* LINTED pointer alignment */
176 				    (struct cf_rendezvous *)xprt->xp_p1 : NULL;
177 
178 	if (!xprt)
179 		return;
180 
181 	if (xprt->xp_tp)
182 		free(xprt->xp_tp);
183 	if (xprt->xp_netid)
184 		free(xprt->xp_netid);
185 	if (xt && (xt->parent == NULL)) {
186 		if (xprt->xp_ltaddr.buf)
187 			free(xprt->xp_ltaddr.buf);
188 		if (xprt->xp_rtaddr.buf)
189 			free(xprt->xp_rtaddr.buf);
190 	}
191 	if (r) {
192 		if (r->t_call)
193 			(void) t_free((char *)r->t_call, T_CALL);
194 		if (r->t_bind)
195 			(void) t_free((char *)r->t_bind, T_BIND);
196 		free(r);
197 	}
198 	svc_xprt_free(xprt);
199 }
200 
201 /*
202  * Usage:
203  *	xprt = svc_vc_create(fd, sendsize, recvsize);
204  * Since connection streams do buffered io similar to stdio, the caller
205  * can specify how big the send and receive buffers are. If recvsize
206  * or sendsize are 0, defaults will be chosen.
207  * fd should be open and bound.
208  */
209 SVCXPRT *
210 svc_vc_create_private(int fd, uint_t sendsize, uint_t recvsize)
211 {
212 	struct cf_rendezvous *r;
213 	SVCXPRT *xprt;
214 	struct t_info tinfo;
215 
216 	if (RPC_FD_NOTIN_FDSET(fd)) {
217 		errno = EBADF;
218 		t_errno = TBADF;
219 		(void) syslog(LOG_ERR, errstring, svc_vc_create_str,
220 		    svc_vc_fderr);
221 		return (NULL);
222 	}
223 	if ((xprt = svc_xprt_alloc()) == NULL) {
224 		(void) syslog(LOG_ERR, errstring,
225 		    svc_vc_create_str, no_mem_str);
226 		return (NULL);
227 	}
228 /* LINTED pointer alignment */
229 	svc_flags(xprt) |= SVC_RENDEZVOUS;
230 
231 	r = calloc(1, sizeof (*r));
232 	if (r == NULL) {
233 		(void) syslog(LOG_ERR, errstring,
234 			svc_vc_create_str, no_mem_str);
235 		svc_vc_xprtfree(xprt);
236 		return (NULL);
237 	}
238 	if (t_getinfo(fd, &tinfo) == -1) {
239 		char errorstr[100];
240 
241 		__tli_sys_strerror(errorstr, sizeof (errorstr),
242 				t_errno, errno);
243 		(void) syslog(LOG_ERR, "%s : %s : %s",
244 			svc_vc_create_str, no_tinfo_str, errorstr);
245 		free(r);
246 		svc_vc_xprtfree(xprt);
247 		return (NULL);
248 	}
249 	/*
250 	 * Find the receive and the send size
251 	 */
252 	r->sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu);
253 	r->recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu);
254 	if ((r->sendsize == 0) || (r->recvsize == 0)) {
255 		syslog(LOG_ERR,
256 		    "svc_vc_create:  transport does not support "
257 		    "data transfer");
258 		free(r);
259 		svc_vc_xprtfree(xprt);
260 		return (NULL);
261 	}
262 
263 /* LINTED pointer alignment */
264 	r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT);
265 	if (r->t_call == NULL) {
266 		(void) syslog(LOG_ERR, errstring,
267 			svc_vc_create_str, no_mem_str);
268 		free(r);
269 		svc_vc_xprtfree(xprt);
270 		return (NULL);
271 	}
272 
273 /* LINTED pointer alignment */
274 	r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR);
275 	if (r->t_bind == NULL) {
276 		(void) syslog(LOG_ERR, errstring,
277 			svc_vc_create_str, no_mem_str);
278 		(void) t_free((char *)r->t_call, T_CALL);
279 		free(r);
280 		svc_vc_xprtfree(xprt);
281 		return (NULL);
282 	}
283 
284 	r->cf_tsdu = tinfo.tsdu;
285 	r->tcp_flag = FALSE;
286 	r->tcp_keepalive = FALSE;
287 	r->cf_connmaxrec = __rpc_connmaxrec;
288 	xprt->xp_fd = fd;
289 	xprt->xp_p1 = (caddr_t)r;
290 	xprt->xp_p2 = NULL;
291 	xprt->xp_verf = _null_auth;
292 	xprt->xp_ops = svc_vc_rendezvous_ops();
293 /* LINTED pointer alignment */
294 	SVC_XP_AUTH(xprt).svc_ah_ops = svc_auth_any_ops;
295 /* LINTED pointer alignment */
296 	SVC_XP_AUTH(xprt).svc_ah_private = NULL;
297 
298 	return (xprt);
299 }
300 
301 SVCXPRT *
302 svc_vc_create(const int fd, const uint_t sendsize, const uint_t recvsize)
303 {
304 	SVCXPRT *xprt;
305 
306 	if ((xprt = svc_vc_create_private(fd, sendsize, recvsize)) != NULL)
307 		xprt_register(xprt);
308 	return (xprt);
309 }
310 
311 SVCXPRT *
312 svc_vc_xprtcopy(SVCXPRT *parent)
313 {
314 	SVCXPRT			*xprt;
315 	struct cf_rendezvous	*r, *pr;
316 	int			fd = parent->xp_fd;
317 
318 	if ((xprt = svc_xprt_alloc()) == NULL)
319 		return (NULL);
320 
321 /* LINTED pointer alignment */
322 	SVCEXT(xprt)->parent = parent;
323 /* LINTED pointer alignment */
324 	SVCEXT(xprt)->flags = SVCEXT(parent)->flags;
325 
326 	xprt->xp_fd = fd;
327 	xprt->xp_ops = svc_vc_rendezvous_ops();
328 	if (parent->xp_tp) {
329 		xprt->xp_tp = (char *)strdup(parent->xp_tp);
330 		if (xprt->xp_tp == NULL) {
331 			syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed");
332 			svc_vc_xprtfree(xprt);
333 			return (NULL);
334 		}
335 	}
336 	if (parent->xp_netid) {
337 		xprt->xp_netid = (char *)strdup(parent->xp_netid);
338 		if (xprt->xp_netid == NULL) {
339 			syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed");
340 			if (xprt->xp_tp)
341 				free(xprt->xp_tp);
342 			svc_vc_xprtfree(xprt);
343 			return (NULL);
344 		}
345 	}
346 
347 	/*
348 	 * can share both local and remote address
349 	 */
350 	xprt->xp_ltaddr = parent->xp_ltaddr;
351 	xprt->xp_rtaddr = parent->xp_rtaddr; /* XXX - not used for rendezvous */
352 	xprt->xp_type = parent->xp_type;
353 	xprt->xp_verf = parent->xp_verf;
354 
355 	if ((r = calloc(1, sizeof (*r))) == NULL) {
356 		svc_vc_xprtfree(xprt);
357 		return (NULL);
358 	}
359 	xprt->xp_p1 = (caddr_t)r;
360 /* LINTED pointer alignment */
361 	pr = (struct cf_rendezvous *)parent->xp_p1;
362 	r->sendsize = pr->sendsize;
363 	r->recvsize = pr->recvsize;
364 	r->cf_tsdu = pr->cf_tsdu;
365 	r->cf_cache = pr->cf_cache;
366 	r->tcp_flag = pr->tcp_flag;
367 	r->tcp_keepalive = pr->tcp_keepalive;
368 	r->cf_connmaxrec = pr->cf_connmaxrec;
369 /* LINTED pointer alignment */
370 	r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT);
371 	if (r->t_call == NULL) {
372 		svc_vc_xprtfree(xprt);
373 		return (NULL);
374 	}
375 /* LINTED pointer alignment */
376 	r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR);
377 	if (r->t_bind == NULL) {
378 		svc_vc_xprtfree(xprt);
379 		return (NULL);
380 	}
381 
382 	return (xprt);
383 }
384 
385 /*
386  * XXX : Used for setting flag to indicate that this is TCP
387  */
388 
389 /*ARGSUSED*/
390 int
391 __svc_vc_setflag(SVCXPRT *xprt, int flag)
392 {
393 	struct cf_rendezvous *r;
394 
395 /* LINTED pointer alignment */
396 	r = (struct cf_rendezvous *)xprt->xp_p1;
397 	r->tcp_flag = TRUE;
398 	return (1);
399 }
400 
401 /*
402  * used for the actual connection.
403  */
404 SVCXPRT *
405 svc_fd_create_private(int fd, uint_t sendsize, uint_t recvsize)
406 {
407 	struct t_info tinfo;
408 	SVCXPRT *dummy;
409 	struct netbuf tres = {0};
410 
411 	if (RPC_FD_NOTIN_FDSET(fd)) {
412 		errno = EBADF;
413 		t_errno = TBADF;
414 		(void) syslog(LOG_ERR, errstring,
415 		    svc_fd_create_str, svc_vc_fderr);
416 		return (NULL);
417 	}
418 	if (t_getinfo(fd, &tinfo) == -1) {
419 		char errorstr[100];
420 
421 		__tli_sys_strerror(errorstr, sizeof (errorstr),
422 				t_errno, errno);
423 		(void) syslog(LOG_ERR, "%s : %s : %s",
424 			svc_fd_create_str, no_tinfo_str, errorstr);
425 		return (NULL);
426 	}
427 	/*
428 	 * Find the receive and the send size
429 	 */
430 	sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu);
431 	recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu);
432 	if ((sendsize == 0) || (recvsize == 0)) {
433 		syslog(LOG_ERR, errstring, svc_fd_create_str,
434 			"transport does not support data transfer");
435 		return (NULL);
436 	}
437 	dummy = makefd_xprt(fd, sendsize, recvsize, tinfo.tsdu, NULL);
438 				/* NULL signifies no dup cache */
439 	/* Assign the local bind address */
440 	if (t_getname(fd, &tres, LOCALNAME) == -1)
441 		tres.len = 0;
442 	dummy->xp_ltaddr = tres;
443 	/* Fill in type of service */
444 	dummy->xp_type = tinfo.servtype;
445 	return (dummy);
446 }
447 
448 SVCXPRT *
449 svc_fd_create(const int fd, const uint_t sendsize, const uint_t recvsize)
450 {
451 	SVCXPRT *xprt;
452 
453 	if ((xprt = svc_fd_create_private(fd, sendsize, recvsize)) != NULL)
454 		xprt_register(xprt);
455 	return (xprt);
456 }
457 
458 void
459 svc_fd_xprtfree(SVCXPRT *xprt)
460 {
461 /* LINTED pointer alignment */
462 	SVCXPRT_EXT	*xt = xprt ? SVCEXT(xprt) : NULL;
463 /* LINTED pointer alignment */
464 	struct cf_conn	*cd = xprt ? (struct cf_conn *)xprt->xp_p1 : NULL;
465 
466 	if (!xprt)
467 		return;
468 
469 	if (xprt->xp_tp)
470 		free(xprt->xp_tp);
471 	if (xprt->xp_netid)
472 		free(xprt->xp_netid);
473 	if (xt && (xt->parent == NULL)) {
474 		if (xprt->xp_ltaddr.buf)
475 			free(xprt->xp_ltaddr.buf);
476 		if (xprt->xp_rtaddr.buf)
477 			free(xprt->xp_rtaddr.buf);
478 	}
479 	if (cd) {
480 		XDR_DESTROY(&(cd->xdrs));
481 		free(cd);
482 	}
483 	if (xt && (xt->parent == NULL) && xprt->xp_p2) {
484 /* LINTED pointer alignment */
485 		free(((struct netbuf *)xprt->xp_p2)->buf);
486 		free(xprt->xp_p2);
487 	}
488 	svc_xprt_free(xprt);
489 }
490 
491 static SVCXPRT *
492 makefd_xprt(int fd, uint_t sendsize, uint_t recvsize, t_scalar_t tsdu,
493     char *cache)
494 {
495 	SVCXPRT *xprt;
496 	struct cf_conn *cd;
497 
498 	xprt = svc_xprt_alloc();
499 	if (xprt == NULL) {
500 		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
501 		return (NULL);
502 	}
503 /* LINTED pointer alignment */
504 	svc_flags(xprt) |= SVC_CONNECTION;
505 
506 	cd = malloc(sizeof (struct cf_conn));
507 	if (cd == NULL) {
508 		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
509 		svc_fd_xprtfree(xprt);
510 		return (NULL);
511 	}
512 	cd->sendsize = sendsize;
513 	cd->recvsize = recvsize;
514 	cd->strm_stat = XPRT_IDLE;
515 	cd->cf_tsdu = tsdu;
516 	cd->cf_cache = cache;
517 	cd->cf_conn_nonblock = FALSE;
518 	cd->cf_conn_nonblock_timestamp = 0;
519 	cd->xdrs.x_ops = NULL;
520 	xdrrec_create(&(cd->xdrs), sendsize, 0, (caddr_t)xprt,
521 			(int(*)())NULL, (int(*)(void *, char *, int))write_vc);
522 	if (cd->xdrs.x_ops == NULL) {
523 		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
524 		free(cd);
525 		svc_fd_xprtfree(xprt);
526 		return (NULL);
527 	}
528 
529 	(void) rw_wrlock(&svc_fd_lock);
530 	if (svc_xdrs == NULL) {
531 		svc_xdrs = calloc(FD_INCREMENT,  sizeof (XDR *));
532 		if (svc_xdrs == NULL) {
533 			(void) syslog(LOG_ERR, errstring, makefd_xprt_str,
534 								no_mem_str);
535 			XDR_DESTROY(&(cd->xdrs));
536 			free(cd);
537 			svc_fd_xprtfree(xprt);
538 			(void) rw_unlock(&svc_fd_lock);
539 			return (NULL);
540 		}
541 		nsvc_xdrs = FD_INCREMENT;
542 	}
543 
544 	while (fd >= nsvc_xdrs) {
545 		XDR **tmp_xdrs = svc_xdrs;
546 		tmp_xdrs = realloc(svc_xdrs,
547 				sizeof (XDR *) * (nsvc_xdrs + FD_INCREMENT));
548 		if (tmp_xdrs == NULL) {
549 			(void) syslog(LOG_ERR, errstring, makefd_xprt_str,
550 								no_mem_str);
551 			XDR_DESTROY(&(cd->xdrs));
552 			free(cd);
553 			svc_fd_xprtfree(xprt);
554 			(void) rw_unlock(&svc_fd_lock);
555 			return (NULL);
556 		}
557 
558 		svc_xdrs = tmp_xdrs;
559 		/* initial the new array to 0 from the last allocated array */
560 		(void) memset(&svc_xdrs[nsvc_xdrs], 0,
561 					sizeof (XDR *) * FD_INCREMENT);
562 		nsvc_xdrs += FD_INCREMENT;
563 	}
564 
565 	if (svc_xdrs[fd] != NULL) {
566 		XDR_DESTROY(svc_xdrs[fd]);
567 	} else if ((svc_xdrs[fd] = malloc(sizeof (XDR))) == NULL) {
568 		(void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
569 		XDR_DESTROY(&(cd->xdrs));
570 		free(cd);
571 		svc_fd_xprtfree(xprt);
572 		(void) rw_unlock(&svc_fd_lock);
573 		return (NULL);
574 	}
575 	(void) memset(svc_xdrs[fd], 0, sizeof (XDR));
576 	xdrrec_create(svc_xdrs[fd], 0, recvsize, (caddr_t)xprt,
577 			(int(*)(void *, char *, int))read_vc, (int(*)())NULL);
578 	if (svc_xdrs[fd]->x_ops == NULL) {
579 		free(svc_xdrs[fd]);
580 		svc_xdrs[fd] = NULL;
581 		XDR_DESTROY(&(cd->xdrs));
582 		free(cd);
583 		svc_fd_xprtfree(xprt);
584 		(void) rw_unlock(&svc_fd_lock);
585 		return (NULL);
586 	}
587 	(void) rw_unlock(&svc_fd_lock);
588 
589 	xprt->xp_p1 = (caddr_t)cd;
590 	xprt->xp_p2 = NULL;
591 	xprt->xp_verf.oa_base = cd->verf_body;
592 	xprt->xp_ops = svc_vc_ops();	/* truely deals with calls */
593 	xprt->xp_fd = fd;
594 	return (xprt);
595 }
596 
597 SVCXPRT *
598 svc_fd_xprtcopy(SVCXPRT *parent)
599 {
600 	SVCXPRT			*xprt;
601 	struct cf_conn		*cd, *pcd;
602 
603 	if ((xprt = svc_xprt_alloc()) == NULL)
604 		return (NULL);
605 
606 /* LINTED pointer alignment */
607 	SVCEXT(xprt)->parent = parent;
608 /* LINTED pointer alignment */
609 	SVCEXT(xprt)->flags = SVCEXT(parent)->flags;
610 
611 	xprt->xp_fd = parent->xp_fd;
612 	xprt->xp_ops = svc_vc_ops();
613 	if (parent->xp_tp) {
614 		xprt->xp_tp = (char *)strdup(parent->xp_tp);
615 		if (xprt->xp_tp == NULL) {
616 			syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed");
617 			svc_fd_xprtfree(xprt);
618 			return (NULL);
619 		}
620 	}
621 	if (parent->xp_netid) {
622 		xprt->xp_netid = (char *)strdup(parent->xp_netid);
623 		if (xprt->xp_netid == NULL) {
624 			syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed");
625 			if (xprt->xp_tp)
626 				free(xprt->xp_tp);
627 			svc_fd_xprtfree(xprt);
628 			return (NULL);
629 		}
630 	}
631 	/*
632 	 * share local and remote addresses with parent
633 	 */
634 	xprt->xp_ltaddr = parent->xp_ltaddr;
635 	xprt->xp_rtaddr = parent->xp_rtaddr;
636 	xprt->xp_type = parent->xp_type;
637 
638 	if ((cd = malloc(sizeof (struct cf_conn))) == NULL) {
639 		svc_fd_xprtfree(xprt);
640 		return (NULL);
641 	}
642 /* LINTED pointer alignment */
643 	pcd = (struct cf_conn *)parent->xp_p1;
644 	cd->sendsize = pcd->sendsize;
645 	cd->recvsize = pcd->recvsize;
646 	cd->strm_stat = pcd->strm_stat;
647 	cd->x_id = pcd->x_id;
648 	cd->cf_tsdu = pcd->cf_tsdu;
649 	cd->cf_cache = pcd->cf_cache;
650 	cd->cf_conn_nonblock = pcd->cf_conn_nonblock;
651 	cd->cf_conn_nonblock_timestamp = pcd->cf_conn_nonblock_timestamp;
652 	cd->xdrs.x_ops = NULL;
653 	xdrrec_create(&(cd->xdrs), cd->sendsize, 0, (caddr_t)xprt,
654 			(int(*)())NULL, (int(*)(void *, char *, int))write_vc);
655 	if (cd->xdrs.x_ops == NULL) {
656 		free(cd);
657 		svc_fd_xprtfree(xprt);
658 		return (NULL);
659 	}
660 	xprt->xp_verf.oa_base = cd->verf_body;
661 	xprt->xp_p1 = (char *)cd;
662 	xprt->xp_p2 = parent->xp_p2;	/* shared */
663 
664 	return (xprt);
665 }
666 
667 /*
668  * This routine is called by svc_getreqset(), when a packet is recd.
669  * The listener process creates another end point on which the actual
670  * connection is carried. It returns FALSE to indicate that it was
671  * not a rpc packet (falsely though), but as a side effect creates
672  * another endpoint which is also registered, which then always
673  * has a request ready to be served.
674  */
675 /* ARGSUSED1 */
676 static bool_t
677 rendezvous_request(SVCXPRT *xprt, struct rpc_msg *msg)
678 {
679 	struct cf_rendezvous *r;
680 	char *tpname = NULL;
681 	char devbuf[256];
682 	static void do_accept();
683 
684 /* LINTED pointer alignment */
685 	r = (struct cf_rendezvous *)xprt->xp_p1;
686 
687 again:
688 	switch (t_look(xprt->xp_fd)) {
689 	case T_DISCONNECT:
690 		(void) t_rcvdis(xprt->xp_fd, NULL);
691 		return (FALSE);
692 
693 	case T_LISTEN:
694 
695 		if (t_listen(xprt->xp_fd, r->t_call) == -1) {
696 			if ((t_errno == TSYSERR) && (errno == EINTR))
697 				goto again;
698 
699 			if (t_errno == TLOOK) {
700 				if (t_look(xprt->xp_fd) == T_DISCONNECT)
701 				    (void) t_rcvdis(xprt->xp_fd, NULL);
702 			}
703 			return (FALSE);
704 		}
705 		break;
706 	default:
707 		return (FALSE);
708 	}
709 	/*
710 	 * Now create another endpoint, and accept the connection
711 	 * on it.
712 	 */
713 
714 	if (xprt->xp_tp) {
715 		tpname = xprt->xp_tp;
716 	} else {
717 		/*
718 		 * If xprt->xp_tp is NULL, then try to extract the
719 		 * transport protocol information from the transport
720 		 * protcol corresponding to xprt->xp_fd
721 		 */
722 		struct netconfig *nconf;
723 		tpname = devbuf;
724 		if ((nconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
725 				== NULL) {
726 			(void) syslog(LOG_ERR, errstring,
727 					rendezvous_request_str,
728 					"no suitable transport");
729 			goto err;
730 		}
731 		(void) strcpy(tpname, nconf->nc_device);
732 		freenetconfigent(nconf);
733 	}
734 
735 	do_accept(xprt->xp_fd, tpname, xprt->xp_netid, r->t_call, r);
736 
737 err:
738 	return (FALSE); /* there is never an rpc msg to be processed */
739 }
740 
741 static void
742 do_accept(int srcfd, char *tpname, char *netid, struct t_call *tcp,
743     struct cf_rendezvous *r)
744 {
745 	int	destfd;
746 	struct t_call	t_call;
747 	struct t_call	*tcp2 = NULL;
748 	struct t_info	tinfo;
749 	SVCXPRT	*xprt = NULL;
750 	SVCXPRT	*xprt_srcfd = NULL;
751 	char *option, *option_ret;
752 	struct opthdr *opt;
753 	struct t_optmgmt optreq, optret;
754 	int *p_optval;
755 
756 	destfd = t_open(tpname, O_RDWR, &tinfo);
757 	if (check_nonblock_timestamps) {
758 		if (destfd == -1 && t_errno == TSYSERR && errno == EMFILE) {
759 			/*
760 			 * Since there are nonblocking connection xprts and
761 			 * too many open files, the LRU connection xprt should
762 			 * get destroyed in case an attacker has been creating
763 			 * many connections.
764 			 */
765 			(void) mutex_lock(&svc_mutex);
766 			svc_timeout_nonblock_xprt_and_LRU(TRUE);
767 			(void) mutex_unlock(&svc_mutex);
768 			destfd = t_open(tpname, O_RDWR, &tinfo);
769 		} else {
770 			/*
771 			 * Destroy/timeout all nonblock connection xprts
772 			 * that have not had recent activity.
773 			 * Do not destroy LRU xprt unless there are
774 			 * too many open files.
775 			 */
776 			(void) mutex_lock(&svc_mutex);
777 			svc_timeout_nonblock_xprt_and_LRU(FALSE);
778 			(void) mutex_unlock(&svc_mutex);
779 		}
780 	}
781 	if (destfd == -1) {
782 		char errorstr[100];
783 
784 		__tli_sys_strerror(errorstr, sizeof (errorstr), t_errno,
785 			errno);
786 		(void) syslog(LOG_ERR, "%s : %s : %s", do_accept_str,
787 				"can't open connection", errorstr);
788 		(void) t_snddis(srcfd, tcp);
789 		return;
790 	}
791 	if (destfd < 256) {
792 		int nfd;
793 
794 		nfd = _fcntl(destfd, F_DUPFD, 256);
795 		if (nfd != -1) {
796 			if (t_close(destfd) == -1) {
797 				char errorstr[100];
798 
799 				__tli_sys_strerror(errorstr, sizeof (errorstr),
800 						t_errno, errno);
801 				(void) syslog(LOG_ERR,
802 		"could not t_close() old fd %d; mem & fd leak error: %s",
803 						destfd, errorstr);
804 			}
805 			destfd = nfd;
806 			if (t_sync(destfd) == -1) {
807 				char errorstr[100];
808 
809 				__tli_sys_strerror(errorstr, sizeof (errorstr),
810 						t_errno, errno);
811 				(void) syslog(LOG_ERR,
812 				    "could not t_sync() duped fd %d: %s",
813 						destfd, errorstr);
814 				(void) t_snddis(srcfd, tcp);
815 				return;
816 			}
817 		}
818 	}
819 	if (RPC_FD_NOTIN_FDSET(destfd)) {
820 		(void) syslog(LOG_ERR, errstring, do_accept_str,
821 						svc_vc_fderr);
822 		(void) t_close(destfd);
823 		(void) t_snddis(srcfd, tcp);
824 		errno = EBADF;
825 		t_errno = TBADF;
826 		return;
827 	}
828 	(void) _fcntl(destfd, F_SETFD, 1); /* make it "close on exec" */
829 	if ((tinfo.servtype != T_COTS) && (tinfo.servtype != T_COTS_ORD)) {
830 		/* Not a connection oriented mode */
831 		(void) syslog(LOG_ERR, errstring, do_accept_str,
832 				"do_accept:  illegal transport");
833 		(void) t_close(destfd);
834 		(void) t_snddis(srcfd, tcp);
835 		return;
836 	}
837 
838 
839 	if (t_bind(destfd, NULL, r->t_bind) == -1) {
840 		char errorstr[100];
841 
842 		__tli_sys_strerror(errorstr, sizeof (errorstr), t_errno,
843 				errno);
844 		(void) syslog(LOG_ERR, " %s : %s : %s", do_accept_str,
845 			"t_bind failed", errorstr);
846 		(void) t_close(destfd);
847 		(void) t_snddis(srcfd, tcp);
848 		return;
849 	}
850 
851 	if (r->tcp_flag)	/* if TCP, set NODELAY flag */
852 		(void) __td_setnodelay(destfd);
853 
854 	/*
855 	 * This connection is not listening, hence no need to set
856 	 * the qlen.
857 	 */
858 
859 	/*
860 	 * XXX: The local transport chokes on its own listen
861 	 * options so we zero them for now
862 	 */
863 	t_call = *tcp;
864 	t_call.opt.len = 0;
865 	t_call.opt.maxlen = 0;
866 	t_call.opt.buf = NULL;
867 
868 	while (t_accept(srcfd, destfd, &t_call) == -1) {
869 		char errorstr[100];
870 
871 		switch (t_errno) {
872 		case TLOOK:
873 again:
874 			switch (t_look(srcfd)) {
875 			case T_CONNECT:
876 			case T_DATA:
877 			case T_EXDATA:
878 				/* this should not happen */
879 				break;
880 
881 			case T_DISCONNECT:
882 				(void) t_rcvdis(srcfd, NULL);
883 				break;
884 
885 			case T_LISTEN:
886 				if (tcp2 == NULL)
887 /* LINTED pointer alignment */
888 					tcp2 = (struct t_call *)t_alloc(srcfd,
889 					    T_CALL, T_ADDR | T_OPT);
890 				if (tcp2 == NULL) {
891 
892 					(void) t_close(destfd);
893 					(void) t_snddis(srcfd, tcp);
894 					syslog(LOG_ERR, errstring,
895 						do_accept_str, no_mem_str);
896 					return;
897 					/* NOTREACHED */
898 				}
899 				if (t_listen(srcfd, tcp2) == -1) {
900 					switch (t_errno) {
901 					case TSYSERR:
902 						if (errno == EINTR)
903 							goto again;
904 						break;
905 
906 					case TLOOK:
907 						goto again;
908 					}
909 					(void) t_free((char *)tcp2, T_CALL);
910 					(void) t_close(destfd);
911 					(void) t_snddis(srcfd, tcp);
912 					return;
913 					/* NOTREACHED */
914 				}
915 				do_accept(srcfd, tpname, netid, tcp2, r);
916 				break;
917 
918 			case T_ORDREL:
919 				(void) t_rcvrel(srcfd);
920 				(void) t_sndrel(srcfd);
921 				break;
922 			}
923 			if (tcp2) {
924 				(void) t_free((char *)tcp2, T_CALL);
925 				tcp2 = NULL;
926 			}
927 			break;
928 
929 		case TBADSEQ:
930 			/*
931 			 * This can happen if the remote side has
932 			 * disconnected before the connection is
933 			 * accepted.  In this case, a disconnect
934 			 * should not be sent on srcfd (important!
935 			 * the listening fd will be hosed otherwise!).
936 			 * This error is not logged since this is an
937 			 * operational situation that is recoverable.
938 			 */
939 			(void) t_close(destfd);
940 			return;
941 			/* NOTREACHED */
942 
943 		case TOUTSTATE:
944 			/*
945 			 * This can happen if the t_rcvdis() or t_rcvrel()/
946 			 * t_sndrel() put srcfd into the T_IDLE state.
947 			 */
948 			if (t_getstate(srcfd) == T_IDLE) {
949 				(void) t_close(destfd);
950 				(void) t_snddis(srcfd, tcp);
951 				return;
952 			}
953 			/* else FALL THROUGH TO */
954 
955 		default:
956 			__tli_sys_strerror(errorstr, sizeof (errorstr),
957 					t_errno, errno);
958 			(void) syslog(LOG_ERR,
959 			    "cannot accept connection:  %s (current state %d)",
960 			    errorstr, t_getstate(srcfd));
961 			(void) t_close(destfd);
962 			(void) t_snddis(srcfd, tcp);
963 			return;
964 			/* NOTREACHED */
965 		}
966 	}
967 
968 	if (r->tcp_flag && r->tcp_keepalive) {
969 		option = malloc(sizeof (struct opthdr) + sizeof (int));
970 		option_ret = malloc(sizeof (struct opthdr) + sizeof (int));
971 		if (option && option_ret) {
972 			/* LINTED pointer cast */
973 			opt = (struct opthdr *)option;
974 			opt->level = SOL_SOCKET;
975 			opt->name  = SO_KEEPALIVE;
976 			opt->len  = sizeof (int);
977 			p_optval = (int *)(opt + 1);
978 			*p_optval = SO_KEEPALIVE;
979 			optreq.opt.maxlen = optreq.opt.len =
980 				sizeof (struct opthdr) + sizeof (int);
981 			optreq.opt.buf = (char *)option;
982 			optreq.flags = T_NEGOTIATE;
983 			optret.opt.maxlen = sizeof (struct opthdr)
984 					+ sizeof (int);
985 			optret.opt.buf = (char *)option_ret;
986 			(void) t_optmgmt(destfd, &optreq, &optret);
987 			free(option);
988 			free(option_ret);
989 		} else {
990 			if (option)
991 				free(option);
992 			if (option_ret)
993 				free(option_ret);
994 		}
995 	}
996 
997 
998 	/*
999 	 * make a new transporter
1000 	 */
1001 	xprt = makefd_xprt(destfd, r->sendsize, r->recvsize, r->cf_tsdu,
1002 				r->cf_cache);
1003 	if (xprt == NULL) {
1004 		/*
1005 		 * makefd_xprt() returns a NULL xprt only when
1006 		 * it's out of memory.
1007 		 */
1008 		goto memerr;
1009 	}
1010 
1011 	/*
1012 	 * Copy the new local and remote bind information
1013 	 */
1014 
1015 	xprt->xp_rtaddr.len = tcp->addr.len;
1016 	xprt->xp_rtaddr.maxlen = tcp->addr.len;
1017 	if ((xprt->xp_rtaddr.buf = malloc(tcp->addr.len)) == NULL)
1018 		goto memerr;
1019 	(void) memcpy(xprt->xp_rtaddr.buf, tcp->addr.buf, tcp->addr.len);
1020 
1021 	if (strcmp(netid, "tcp") == 0) {
1022 		xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in);
1023 		if ((xprt->xp_ltaddr.buf =
1024 			malloc(xprt->xp_ltaddr.maxlen)) == NULL)
1025 			goto memerr;
1026 		if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) {
1027 		    (void) syslog(LOG_ERR,
1028 				"do_accept: t_getname for tcp failed!");
1029 			goto xprt_err;
1030 		}
1031 	} else if (strcmp(netid, "tcp6") == 0) {
1032 		xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in6);
1033 		if ((xprt->xp_ltaddr.buf =
1034 			malloc(xprt->xp_ltaddr.maxlen)) == NULL)
1035 			goto memerr;
1036 		if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) {
1037 			(void) syslog(LOG_ERR,
1038 				"do_accept: t_getname for tcp6 failed!");
1039 			goto xprt_err;
1040 		}
1041 	}
1042 
1043 	xprt->xp_tp = strdup(tpname);
1044 	xprt->xp_netid = strdup(netid);
1045 	if ((xprt->xp_tp == NULL) ||
1046 	    (xprt->xp_netid == NULL)) {
1047 		goto memerr;
1048 	}
1049 	if (tcp->opt.len > 0) {
1050 		struct netbuf *netptr;
1051 
1052 		xprt->xp_p2 = malloc(sizeof (struct netbuf));
1053 
1054 		if (xprt->xp_p2 != NULL) {
1055 /* LINTED pointer alignment */
1056 			netptr = (struct netbuf *)xprt->xp_p2;
1057 
1058 			netptr->len = tcp->opt.len;
1059 			netptr->maxlen = tcp->opt.len;
1060 			if ((netptr->buf = malloc(tcp->opt.len)) == NULL)
1061 				goto memerr;
1062 			(void) memcpy(netptr->buf, tcp->opt.buf, tcp->opt.len);
1063 		} else
1064 			goto memerr;
1065 	}
1066 /*	(void) ioctl(destfd, I_POP, NULL);    */
1067 
1068 	/*
1069 	 * If a nonblocked connection fd has been requested,
1070 	 * perform the necessary operations.
1071 	 */
1072 	xprt_srcfd = svc_xports[srcfd];
1073 	/* LINTED pointer cast */
1074 	if (((struct cf_rendezvous *)(xprt_srcfd->xp_p1))->cf_connmaxrec) {
1075 		if (!svc_vc_nonblock(xprt_srcfd, xprt))
1076 			goto xprt_err;
1077 	}
1078 
1079 	/*
1080 	 * Copy the call back declared for the service to the current
1081 	 * connection
1082 	 */
1083 	xprt->xp_closeclnt = xprt_srcfd->xp_closeclnt;
1084 	xprt_register(xprt);
1085 
1086 	return;
1087 
1088 memerr:
1089 	(void) syslog(LOG_ERR, errstring, do_accept_str, no_mem_str);
1090 xprt_err:
1091 	if (xprt)
1092 		svc_vc_destroy(xprt);
1093 	(void) t_close(destfd);
1094 }
1095 
1096 /*
1097  * This routine performs the necessary fcntl() operations to create
1098  * a nonblocked connection fd.
1099  * It also adjusts the sizes and allocates the buffer
1100  * for the nonblocked operations, and updates the associated
1101  * timestamp field in struct cf_conn for timeout bookkeeping.
1102  */
1103 static bool_t
1104 svc_vc_nonblock(SVCXPRT *xprt_rendezvous, SVCXPRT *xprt_conn)
1105 {
1106 	int nn;
1107 	int fdconn = xprt_conn->xp_fd;
1108 	struct cf_rendezvous *r =
1109 		/* LINTED pointer cast */
1110 		(struct cf_rendezvous *)xprt_rendezvous->xp_p1;
1111 	/* LINTED pointer cast */
1112 	struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1;
1113 	uint32_t maxrecsz;
1114 
1115 	if ((nn = fcntl(fdconn, F_GETFL, 0)) < 0) {
1116 		(void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str,
1117 			    no_fcntl_getfl_str);
1118 		return (FALSE);
1119 	}
1120 
1121 	if (fcntl(fdconn, F_SETFL, nn|O_NONBLOCK) != 0) {
1122 		(void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str,
1123 				no_nonblock_str);
1124 		return (FALSE);
1125 	}
1126 
1127 	cd->cf_conn_nonblock = TRUE;
1128 	/*
1129 	 * If the max fragment size has not been set via
1130 	 * rpc_control(), use the default.
1131 	 */
1132 	if ((maxrecsz = r->cf_connmaxrec) == 0)
1133 		maxrecsz = r->recvsize;
1134 	/* Set XDR stream to use non-blocking semantics. */
1135 	if (__xdrrec_set_conn_nonblock(svc_xdrs[fdconn], maxrecsz)) {
1136 		check_nonblock_timestamps = TRUE;
1137 		update_nonblock_timestamps(xprt_conn);
1138 		return (TRUE);
1139 	}
1140 	return (FALSE);
1141 }
1142 
1143 /* ARGSUSED */
1144 static enum xprt_stat
1145 rendezvous_stat(SVCXPRT *xprt)
1146 {
1147 	return (XPRT_IDLE);
1148 }
1149 
1150 static void
1151 svc_vc_destroy(SVCXPRT *xprt)
1152 {
1153 	(void) mutex_lock(&svc_mutex);
1154 	_svc_vc_destroy_private(xprt, TRUE);
1155 	(void) svc_timeout_nonblock_xprt_and_LRU(FALSE);
1156 	(void) mutex_unlock(&svc_mutex);
1157 }
1158 
1159 void
1160 _svc_vc_destroy_private(SVCXPRT *xprt, bool_t lock_not_held)
1161 {
1162 	if (svc_mt_mode != RPC_SVC_MT_NONE) {
1163 /* LINTED pointer alignment */
1164 		if (SVCEXT(xprt)->parent)
1165 /* LINTED pointer alignment */
1166 			xprt = SVCEXT(xprt)->parent;
1167 /* LINTED pointer alignment */
1168 		svc_flags(xprt) |= SVC_DEFUNCT;
1169 /* LINTED pointer alignment */
1170 		if (SVCEXT(xprt)->refcnt > 0)
1171 			return;
1172 	}
1173 
1174 	if (xprt->xp_closeclnt != NULL) {
1175 		svc_errorhandler_t cb = xprt->xp_closeclnt;
1176 
1177 		/*
1178 		 * Reset the pointer here to avoid reentrance on the same
1179 		 * SVCXPRT handle.
1180 		 */
1181 		xprt->xp_closeclnt = NULL;
1182 		cb(xprt, (xprt->xp_rtaddr.len != 0));
1183 	}
1184 
1185 	__xprt_unregister_private(xprt, lock_not_held);
1186 	(void) t_close(xprt->xp_fd);
1187 
1188 	(void) mutex_lock(&timestamp_lock);
1189 	if (timestamps && xprt->xp_fd < ntimestamps) {
1190 		timestamps[xprt->xp_fd] = 0;
1191 	}
1192 	(void) mutex_unlock(&timestamp_lock);
1193 
1194 	if (svc_mt_mode != RPC_SVC_MT_NONE) {
1195 		svc_xprt_destroy(xprt);
1196 	} else {
1197 /* LINTED pointer alignment */
1198 		if (svc_type(xprt) == SVC_RENDEZVOUS)
1199 			svc_vc_xprtfree(xprt);
1200 		else
1201 			svc_fd_xprtfree(xprt);
1202 	}
1203 }
1204 
1205 /*ARGSUSED*/
1206 static bool_t
1207 svc_vc_control(SVCXPRT *xprt, const uint_t rq, void *in)
1208 {
1209 	switch (rq) {
1210 	case SVCSET_RECVERRHANDLER:
1211 		xprt->xp_closeclnt = (svc_errorhandler_t)in;
1212 		return (TRUE);
1213 	case SVCGET_RECVERRHANDLER:
1214 		*(svc_errorhandler_t *)in = xprt->xp_closeclnt;
1215 		return (TRUE);
1216 	case SVCGET_XID:
1217 		if (xprt->xp_p1 == NULL)
1218 			return (FALSE);
1219 		/* LINTED pointer alignment */
1220 		*(uint32_t *)in = ((struct cf_conn *)(xprt->xp_p1))->x_id;
1221 		return (TRUE);
1222 	default:
1223 		return (FALSE);
1224 	}
1225 }
1226 
1227 static bool_t
1228 rendezvous_control(SVCXPRT *xprt, const uint_t rq, void *in)
1229 {
1230 	struct cf_rendezvous *r;
1231 	int tmp;
1232 
1233 	switch (rq) {
1234 	case SVCSET_RECVERRHANDLER:
1235 		xprt->xp_closeclnt = (svc_errorhandler_t)in;
1236 		return (TRUE);
1237 	case SVCGET_RECVERRHANDLER:
1238 		*(svc_errorhandler_t *)in = xprt->xp_closeclnt;
1239 		return (TRUE);
1240 	case SVCSET_KEEPALIVE:
1241 		/* LINTED pointer cast */
1242 		r = (struct cf_rendezvous *)xprt->xp_p1;
1243 		if (r->tcp_flag) {
1244 			r->tcp_keepalive = (int)(intptr_t)in;
1245 			return (TRUE);
1246 		}
1247 		return (FALSE);
1248 	case SVCSET_CONNMAXREC:
1249 		/*
1250 		 * Override the default maximum record size, set via
1251 		 * rpc_control(), for this connection. Only appropriate
1252 		 * for connection oriented transports, but is ignored for
1253 		 * the connectionless case, so no need to check the
1254 		 * connection type here.
1255 		 */
1256 		/* LINTED pointer cast */
1257 		r = (struct cf_rendezvous *)xprt->xp_p1;
1258 		tmp = __rpc_legal_connmaxrec(*(int *)in);
1259 		if (r != 0 && tmp >= 0) {
1260 			r->cf_connmaxrec = tmp;
1261 			return (TRUE);
1262 		}
1263 		return (FALSE);
1264 	case SVCGET_CONNMAXREC:
1265 		/* LINTED pointer cast */
1266 		r = (struct cf_rendezvous *)xprt->xp_p1;
1267 		if (r != 0) {
1268 			*(int *)in = r->cf_connmaxrec;
1269 			return (TRUE);
1270 		}
1271 		return (FALSE);
1272 	case SVCGET_XID:	/* fall through for now */
1273 	default:
1274 		return (FALSE);
1275 	}
1276 }
1277 
1278 /*
1279  * All read operations timeout after 35 seconds.
1280  * A timeout is fatal for the connection.
1281  * update_timestamps() is used by nisplus operations,
1282  * update_nonblock_timestamps() is used for nonblocked
1283  * connection fds.
1284  */
1285 #define	WAIT_PER_TRY	35000	/* milliseconds */
1286 
1287 static void
1288 update_timestamps(int fd)
1289 {
1290 	(void) mutex_lock(&timestamp_lock);
1291 	if (timestamps) {
1292 		struct timeval tv;
1293 
1294 		(void) gettimeofday(&tv, NULL);
1295 		while (fd >= ntimestamps) {
1296 			long *tmp_timestamps = timestamps;
1297 
1298 			/* allocate more timestamps */
1299 			tmp_timestamps = realloc(timestamps,
1300 				sizeof (long) *
1301 				(ntimestamps + FD_INCREMENT));
1302 			if (tmp_timestamps == NULL) {
1303 				(void) mutex_unlock(&timestamp_lock);
1304 				syslog(LOG_ERR,
1305 					"update_timestamps: out of memory");
1306 				return;
1307 			}
1308 
1309 			timestamps = tmp_timestamps;
1310 			(void) memset(&timestamps[ntimestamps], 0,
1311 				sizeof (long) * FD_INCREMENT);
1312 			ntimestamps += FD_INCREMENT;
1313 		}
1314 		timestamps[fd] = tv.tv_sec;
1315 	}
1316 	(void) mutex_unlock(&timestamp_lock);
1317 }
1318 
1319 static  void
1320 update_nonblock_timestamps(SVCXPRT *xprt_conn)
1321 {
1322 	struct timeval tv;
1323 	/* LINTED pointer cast */
1324 	struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1;
1325 
1326 	(void) gettimeofday(&tv, NULL);
1327 	cd->cf_conn_nonblock_timestamp = tv.tv_sec;
1328 }
1329 
1330 /*
1331  * reads data from the vc conection.
1332  * any error is fatal and the connection is closed.
1333  * (And a read of zero bytes is a half closed stream => error.)
1334  */
1335 static int
1336 read_vc(SVCXPRT *xprt, caddr_t buf, int len)
1337 {
1338 	int fd = xprt->xp_fd;
1339 	XDR *xdrs = svc_xdrs[fd];
1340 	struct pollfd pfd;
1341 	int ret;
1342 
1343 	/*
1344 	 * Make sure the connection is not already dead.
1345 	 */
1346 /* LINTED pointer alignment */
1347 	if (svc_failed(xprt))
1348 		return (-1);
1349 
1350 	/* LINTED pointer cast */
1351 	if (((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock) {
1352 		/*
1353 		 * For nonblocked reads, only update the
1354 		 * timestamps to record the activity so the
1355 		 * connection will not be timedout.
1356 		 * Up to "len" bytes are requested.
1357 		 * If fewer than "len" bytes are received, the
1358 		 * connection is poll()ed again.
1359 		 * The poll() for the connection fd is performed
1360 		 * in the main poll() so that all outstanding fds
1361 		 * are polled rather than just the vc connection.
1362 		 * Polling on only the vc connection until the entire
1363 		 * fragment has been read can be exploited in
1364 		 * a Denial of Service Attack such as telnet <host> 111.
1365 		 */
1366 		if ((len = t_rcvnonblock(xprt, buf, len)) >= 0) {
1367 			if (len > 0) {
1368 				update_timestamps(fd);
1369 				update_nonblock_timestamps(xprt);
1370 			}
1371 			return (len);
1372 		}
1373 		goto fatal_err;
1374 	}
1375 
1376 	if (!__is_xdrrec_first(xdrs)) {
1377 
1378 		pfd.fd = fd;
1379 		pfd.events = MASKVAL;
1380 
1381 		do {
1382 			if ((ret = poll(&pfd, 1, WAIT_PER_TRY)) <= 0) {
1383 				/*
1384 				 * If errno is EINTR, ERESTART, or EAGAIN
1385 				 * ignore error and repeat poll
1386 				 */
1387 				if (ret < 0 && (errno == EINTR ||
1388 				    errno == ERESTART || errno == EAGAIN))
1389 					continue;
1390 				goto fatal_err;
1391 			}
1392 		} while (pfd.revents == 0);
1393 		if (pfd.revents & POLLNVAL)
1394 			goto fatal_err;
1395 	}
1396 	(void) __xdrrec_resetfirst(xdrs);
1397 	if ((len = t_rcvall(fd, buf, len)) > 0) {
1398 		update_timestamps(fd);
1399 		return (len);
1400 	}
1401 
1402 fatal_err:
1403 /* LINTED pointer alignment */
1404 	((struct cf_conn *)(xprt->xp_p1))->strm_stat = XPRT_DIED;
1405 /* LINTED pointer alignment */
1406 	svc_flags(xprt) |= SVC_FAILED;
1407 	return (-1);
1408 }
1409 
1410 /*
1411  * Requests up to "len" bytes of data.
1412  * Returns number of bytes actually received, or error indication.
1413  */
1414 static int
1415 t_rcvnonblock(SVCXPRT *xprt, caddr_t buf, int len)
1416 {
1417 	int fd = xprt->xp_fd;
1418 	int flag;
1419 	int res;
1420 
1421 	res = t_rcv(fd, buf, (unsigned)len, &flag);
1422 	if (res == -1) {
1423 		switch (t_errno) {
1424 		case TLOOK:
1425 			switch (t_look(fd)) {
1426 			case T_DISCONNECT:
1427 				(void) t_rcvdis(fd, NULL);
1428 				break;
1429 			case T_ORDREL:
1430 				(void) t_rcvrel(fd);
1431 				(void) t_sndrel(fd);
1432 				break;
1433 			default:
1434 				break;
1435 			}
1436 			break;
1437 		case TNODATA:
1438 			/*
1439 			 * Either poll() lied, or the xprt/fd was closed and
1440 			 * re-opened under our feet. Return 0, so that we go
1441 			 * back to waiting for data.
1442 			 */
1443 			res = 0;
1444 			break;
1445 		/* Should handle TBUFOVFLW TSYSERR ? */
1446 		default:
1447 			break;
1448 		}
1449 	}
1450 	return (res);
1451 }
1452 
1453 /*
1454  * Timeout out nonblocked connection fds
1455  * If there has been no activity on the fd for __rpc_irtimeout
1456  * seconds, timeout the fd  by destroying its xprt.
1457  * If the caller gets an EMFILE error, the caller may also request
1458  * that the least busy xprt gets destroyed as well.
1459  * svc_thr_mutex is held when this is called.
1460  * svc_mutex is held when this is called.
1461  */
1462 static void
1463 svc_timeout_nonblock_xprt_and_LRU(bool_t destroy_lru)
1464 {
1465 	SVCXPRT *xprt;
1466 	SVCXPRT *dead_xprt[CLEANUP_SIZE];
1467 	SVCXPRT *candidate_xprt = NULL;
1468 	struct cf_conn *cd;
1469 	int i, fd_idx = 0, dead_idx = 0;
1470 	struct timeval now;
1471 	time_t lasttime, maxctime = 0;
1472 	extern rwlock_t svc_fd_lock;
1473 
1474 	if (!check_nonblock_timestamps)
1475 		return;
1476 
1477 	(void) gettimeofday(&now, NULL);
1478 	if (svc_xports == NULL)
1479 		return;
1480 	/*
1481 	 * Hold svc_fd_lock to protect
1482 	 * svc_xports, svc_maxpollfd, svc_max_pollfd
1483 	 */
1484 	(void) rw_wrlock(&svc_fd_lock);
1485 	for (;;) {
1486 		/*
1487 		 * Timeout upto CLEANUP_SIZE connection fds per
1488 		 * iteration for the while(1) loop
1489 		 */
1490 		for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1491 			if ((xprt = svc_xports[fd_idx]) == NULL) {
1492 				continue;
1493 			}
1494 			/* Only look at connection fds */
1495 			/* LINTED pointer cast */
1496 			if (svc_type(xprt) != SVC_CONNECTION) {
1497 				continue;
1498 			}
1499 			/* LINTED pointer cast */
1500 			cd = (struct cf_conn *)xprt->xp_p1;
1501 			if (!cd->cf_conn_nonblock)
1502 				continue;
1503 			lasttime = now.tv_sec - cd->cf_conn_nonblock_timestamp;
1504 			if (lasttime >= __rpc_irtimeout &&
1505 			    __rpc_irtimeout != 0) {
1506 				/* Enter in timedout/dead array */
1507 				dead_xprt[dead_idx++] = xprt;
1508 				if (dead_idx >= CLEANUP_SIZE)
1509 					break;
1510 			} else
1511 			if (lasttime > maxctime) {
1512 				/* Possible LRU xprt */
1513 				candidate_xprt = xprt;
1514 				maxctime = lasttime;
1515 			}
1516 		}
1517 
1518 		for (i = 0; i < dead_idx; i++) {
1519 			/* Still holding svc_fd_lock */
1520 			_svc_vc_destroy_private(dead_xprt[i], FALSE);
1521 		}
1522 
1523 		/*
1524 		 * If all the nonblocked fds have been checked, we're done.
1525 		 */
1526 		if (fd_idx++ >= svc_max_pollfd)
1527 			break;
1528 	}
1529 	if ((destroy_lru) && (candidate_xprt != NULL)) {
1530 		_svc_vc_destroy_private(candidate_xprt, FALSE);
1531 	}
1532 	(void) rw_unlock(&svc_fd_lock);
1533 }
1534 /*
1535  * Receive the required bytes of data, even if it is fragmented.
1536  */
1537 static int
1538 t_rcvall(int fd, char *buf, int len)
1539 {
1540 	int flag;
1541 	int final = 0;
1542 	int res;
1543 
1544 	do {
1545 		res = t_rcv(fd, buf, (unsigned)len, &flag);
1546 		if (res == -1) {
1547 			if (t_errno == TLOOK) {
1548 				switch (t_look(fd)) {
1549 				case T_DISCONNECT:
1550 					(void) t_rcvdis(fd, NULL);
1551 					break;
1552 				case T_ORDREL:
1553 					(void) t_rcvrel(fd);
1554 					(void) t_sndrel(fd);
1555 					break;
1556 				default:
1557 					break;
1558 				}
1559 			}
1560 			break;
1561 		}
1562 		final += res;
1563 		buf += res;
1564 		len -= res;
1565 	} while (len && (flag & T_MORE));
1566 	return (res == -1 ? -1 : final);
1567 }
1568 
1569 /*
1570  * writes data to the vc connection.
1571  * Any error is fatal and the connection is closed.
1572  */
1573 static int
1574 write_vc(SVCXPRT *xprt, caddr_t buf, int len)
1575 {
1576 	int i, cnt;
1577 	int flag;
1578 	int maxsz;
1579 	int nonblock;
1580 	struct pollfd pfd;
1581 
1582 /* LINTED pointer alignment */
1583 	maxsz = ((struct cf_conn *)(xprt->xp_p1))->cf_tsdu;
1584 	/* LINTED pointer cast */
1585 	nonblock = ((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock;
1586 	if (nonblock && maxsz <= 0)
1587 		maxsz = len;
1588 	if ((maxsz == 0) || (maxsz == -1)) {
1589 		if ((len = t_snd(xprt->xp_fd, buf, (unsigned)len,
1590 				(int)0)) == -1) {
1591 			if (t_errno == TLOOK) {
1592 				switch (t_look(xprt->xp_fd)) {
1593 				case T_DISCONNECT:
1594 					(void) t_rcvdis(xprt->xp_fd, NULL);
1595 					break;
1596 				case T_ORDREL:
1597 					(void) t_rcvrel(xprt->xp_fd);
1598 					(void) t_sndrel(xprt->xp_fd);
1599 					break;
1600 				default:
1601 					break;
1602 				}
1603 			}
1604 /* LINTED pointer alignment */
1605 			((struct cf_conn *)(xprt->xp_p1))->strm_stat
1606 					= XPRT_DIED;
1607 /* LINTED pointer alignment */
1608 			svc_flags(xprt) |= SVC_FAILED;
1609 		}
1610 		return (len);
1611 	}
1612 
1613 	/*
1614 	 * Setup for polling. We want to be able to write normal
1615 	 * data to the transport
1616 	 */
1617 	pfd.fd = xprt->xp_fd;
1618 	pfd.events = POLLWRNORM;
1619 
1620 	/*
1621 	 * This for those transports which have a max size for data,
1622 	 * and for the non-blocking case, where t_snd() may send less
1623 	 * than requested.
1624 	 */
1625 	for (cnt = len, i = 0; cnt > 0; cnt -= i, buf += i) {
1626 		flag = cnt > maxsz ? T_MORE : 0;
1627 		if ((i = t_snd(xprt->xp_fd, buf,
1628 			(unsigned)MIN(cnt, maxsz), flag)) == -1) {
1629 			if (t_errno == TLOOK) {
1630 				switch (t_look(xprt->xp_fd)) {
1631 				case T_DISCONNECT:
1632 					(void) t_rcvdis(xprt->xp_fd, NULL);
1633 					break;
1634 				case T_ORDREL:
1635 					(void) t_rcvrel(xprt->xp_fd);
1636 					break;
1637 				default:
1638 					break;
1639 				}
1640 			} else if (t_errno == TFLOW) {
1641 				/* Try again */
1642 				i = 0;
1643 				/* Wait till we can write to the transport */
1644 				do {
1645 				    if (poll(&pfd, 1, WAIT_PER_TRY) < 0) {
1646 					/*
1647 					 * If errno is ERESTART, or
1648 					 * EAGAIN ignore error and repeat poll
1649 					 */
1650 					if (errno == ERESTART ||
1651 					    errno == EAGAIN)
1652 						continue;
1653 					else
1654 						goto fatal_err;
1655 				    }
1656 				} while (pfd.revents == 0);
1657 				if (pfd.revents & (POLLNVAL | POLLERR |
1658 						    POLLHUP))
1659 					goto fatal_err;
1660 				continue;
1661 			}
1662 fatal_err:
1663 /* LINTED pointer alignment */
1664 			((struct cf_conn *)(xprt->xp_p1))->strm_stat
1665 					= XPRT_DIED;
1666 /* LINTED pointer alignment */
1667 			svc_flags(xprt) |= SVC_FAILED;
1668 			return (-1);
1669 		}
1670 	}
1671 	return (len);
1672 }
1673 
1674 static enum xprt_stat
1675 svc_vc_stat(SVCXPRT *xprt)
1676 {
1677 /* LINTED pointer alignment */
1678 	SVCXPRT *parent = SVCEXT(xprt)->parent ? SVCEXT(xprt)->parent : xprt;
1679 
1680 /* LINTED pointer alignment */
1681 	if (svc_failed(parent) || svc_failed(xprt))
1682 		return (XPRT_DIED);
1683 	if (!xdrrec_eof(svc_xdrs[xprt->xp_fd]))
1684 		return (XPRT_MOREREQS);
1685 	/*
1686 	 * xdrrec_eof could have noticed that the connection is dead, so
1687 	 * check status again.
1688 	 */
1689 /* LINTED pointer alignment */
1690 	if (svc_failed(parent) || svc_failed(xprt))
1691 		return (XPRT_DIED);
1692 	return (XPRT_IDLE);
1693 }
1694 
1695 
1696 
1697 static bool_t
1698 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg)
1699 {
1700 /* LINTED pointer alignment */
1701 	struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1);
1702 	XDR *xdrs = svc_xdrs[xprt->xp_fd];
1703 
1704 	xdrs->x_op = XDR_DECODE;
1705 
1706 	if (cd->cf_conn_nonblock) {
1707 		/* Get the next input */
1708 		if (!__xdrrec_getbytes_nonblock(xdrs, &cd->strm_stat)) {
1709 			/*
1710 			 * The entire record has not been received.
1711 			 * If the xprt has died, pass it along in svc_flags.
1712 			 * Return FALSE; For nonblocked vc connection,
1713 			 * xdr_callmsg() is called only after the entire
1714 			 * record has been received.  For blocked vc
1715 			 * connection, the data is received on the fly as it
1716 			 * is being processed through the xdr routines.
1717 			 */
1718 			if (cd->strm_stat == XPRT_DIED)
1719 				/* LINTED pointer cast */
1720 				svc_flags(xprt) |= SVC_FAILED;
1721 			return (FALSE);
1722 		}
1723 	} else {
1724 		if (!xdrrec_skiprecord(xdrs))
1725 			return (FALSE);
1726 		(void) __xdrrec_setfirst(xdrs);
1727 	}
1728 
1729 	if (xdr_callmsg(xdrs, msg)) {
1730 		cd->x_id = msg->rm_xid;
1731 		return (TRUE);
1732 	}
1733 
1734 	/*
1735 	 * If a non-blocking connection, drop it when message decode fails.
1736 	 * We are either under attack, or we're talking to a broken client.
1737 	 */
1738 	if (cd->cf_conn_nonblock) {
1739 		/* LINTED pointer cast */
1740 		svc_flags(xprt) |= SVC_FAILED;
1741 	}
1742 
1743 	return (FALSE);
1744 }
1745 
1746 static bool_t
1747 svc_vc_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
1748 {
1749 	bool_t dummy;
1750 
1751 /* LINTED pointer alignment */
1752 	dummy = SVCAUTH_UNWRAP(&SVC_XP_AUTH(xprt), svc_xdrs[xprt->xp_fd],
1753 							xdr_args, args_ptr);
1754 	if (svc_mt_mode != RPC_SVC_MT_NONE)
1755 		svc_args_done(xprt);
1756 	return (dummy);
1757 }
1758 
1759 static bool_t
1760 svc_vc_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
1761 {
1762 /* LINTED pointer alignment */
1763 	XDR *xdrs = &(((struct cf_conn *)(xprt->xp_p1))->xdrs);
1764 
1765 	xdrs->x_op = XDR_FREE;
1766 	return ((*xdr_args)(xdrs, args_ptr));
1767 }
1768 
1769 static bool_t
1770 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg)
1771 {
1772 /* LINTED pointer alignment */
1773 	struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1);
1774 	XDR *xdrs = &(cd->xdrs);
1775 	bool_t stat = FALSE;
1776 	xdrproc_t xdr_results;
1777 	caddr_t xdr_location;
1778 	bool_t has_args;
1779 
1780 #ifdef __lock_lint
1781 	(void) mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent));
1782 #else
1783 	if (svc_mt_mode != RPC_SVC_MT_NONE)
1784 /* LINTED pointer alignment */
1785 		(void) mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent));
1786 #endif
1787 
1788 	if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
1789 				msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
1790 		has_args = TRUE;
1791 		xdr_results = msg->acpted_rply.ar_results.proc;
1792 		xdr_location = msg->acpted_rply.ar_results.where;
1793 		msg->acpted_rply.ar_results.proc = xdr_void;
1794 		msg->acpted_rply.ar_results.where = NULL;
1795 	} else
1796 		has_args = FALSE;
1797 
1798 	xdrs->x_op = XDR_ENCODE;
1799 	msg->rm_xid = cd->x_id;
1800 /* LINTED pointer alignment */
1801 	if (xdr_replymsg(xdrs, msg) && (!has_args || SVCAUTH_WRAP(
1802 			&SVC_XP_AUTH(xprt), xdrs, xdr_results, xdr_location))) {
1803 		stat = TRUE;
1804 	}
1805 	(void) xdrrec_endofrecord(xdrs, TRUE);
1806 
1807 #ifdef __lock_lint
1808 	(void) mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent));
1809 #else
1810 	if (svc_mt_mode != RPC_SVC_MT_NONE)
1811 /* LINTED pointer alignment */
1812 		(void) mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent));
1813 #endif
1814 
1815 	return (stat);
1816 }
1817 
1818 static struct xp_ops *
1819 svc_vc_ops(void)
1820 {
1821 	static struct xp_ops ops;
1822 	extern mutex_t ops_lock;
1823 
1824 /* VARIABLES PROTECTED BY ops_lock: ops */
1825 
1826 	(void) mutex_lock(&ops_lock);
1827 	if (ops.xp_recv == NULL) {
1828 		ops.xp_recv = svc_vc_recv;
1829 		ops.xp_stat = svc_vc_stat;
1830 		ops.xp_getargs = svc_vc_getargs;
1831 		ops.xp_reply = svc_vc_reply;
1832 		ops.xp_freeargs = svc_vc_freeargs;
1833 		ops.xp_destroy = svc_vc_destroy;
1834 		ops.xp_control = svc_vc_control;
1835 	}
1836 	(void) mutex_unlock(&ops_lock);
1837 	return (&ops);
1838 }
1839 
1840 static struct xp_ops *
1841 svc_vc_rendezvous_ops(void)
1842 {
1843 	static struct xp_ops ops;
1844 	extern mutex_t ops_lock;
1845 
1846 	(void) mutex_lock(&ops_lock);
1847 	if (ops.xp_recv == NULL) {
1848 		ops.xp_recv = rendezvous_request;
1849 		ops.xp_stat = rendezvous_stat;
1850 		ops.xp_getargs = (bool_t (*)())abort;
1851 		ops.xp_reply = (bool_t (*)())abort;
1852 		ops.xp_freeargs = (bool_t (*)())abort,
1853 		ops.xp_destroy = svc_vc_destroy;
1854 		ops.xp_control = rendezvous_control;
1855 	}
1856 	(void) mutex_unlock(&ops_lock);
1857 	return (&ops);
1858 }
1859 
1860 /*
1861  * PRIVATE RPC INTERFACE
1862  *
1863  * This is a hack to let NIS+ clean up connections that have already been
1864  * closed.  This problem arises because rpc.nisd forks a child to handle
1865  * existing connections when it does checkpointing.  The child may close
1866  * some of these connections.  But the descriptors still stay open in the
1867  * parent, and because TLI descriptors don't support persistent EOF
1868  * condition (like sockets do), the parent will never detect that these
1869  * descriptors are dead.
1870  *
1871  * The following internal procedure __svc_nisplus_fdcleanup_hack() - should
1872  * be removed as soon as rpc.nisd is rearchitected to do the right thing.
1873  * This procedure should not find its way into any header files.
1874  *
1875  * This procedure should be called only when rpc.nisd knows that there
1876  * are no children servicing clients.
1877  */
1878 
1879 static bool_t
1880 fd_is_dead(int fd)
1881 {
1882 	struct T_info_ack inforeq;
1883 	int retval;
1884 
1885 	inforeq.PRIM_type = T_INFO_REQ;
1886 	if (!_t_do_ioctl(fd, (caddr_t)&inforeq, sizeof (struct T_info_req),
1887 						TI_GETINFO, &retval))
1888 		return (TRUE);
1889 	if (retval != (int)sizeof (struct T_info_ack))
1890 		return (TRUE);
1891 
1892 	switch (inforeq.CURRENT_state) {
1893 	case TS_UNBND:
1894 	case TS_IDLE:
1895 		return (TRUE);
1896 	default:
1897 		break;
1898 	}
1899 	return (FALSE);
1900 }
1901 
1902 void
1903 __svc_nisplus_fdcleanup_hack(void)
1904 {
1905 	SVCXPRT *xprt;
1906 	SVCXPRT *dead_xprt[CLEANUP_SIZE];
1907 	int i, fd_idx = 0, dead_idx = 0;
1908 
1909 	if (svc_xports == NULL)
1910 		return;
1911 	for (;;) {
1912 		(void) rw_wrlock(&svc_fd_lock);
1913 		for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1914 			if ((xprt = svc_xports[fd_idx]) == NULL)
1915 				continue;
1916 /* LINTED pointer alignment */
1917 			if (svc_type(xprt) != SVC_CONNECTION)
1918 				continue;
1919 			if (fd_is_dead(fd_idx)) {
1920 				dead_xprt[dead_idx++] = xprt;
1921 				if (dead_idx >= CLEANUP_SIZE)
1922 					break;
1923 			}
1924 		}
1925 
1926 		for (i = 0; i < dead_idx; i++) {
1927 			/* Still holding svc_fd_lock */
1928 			_svc_vc_destroy_private(dead_xprt[i], FALSE);
1929 		}
1930 		(void) rw_unlock(&svc_fd_lock);
1931 		if (fd_idx++ >= svc_max_pollfd)
1932 			return;
1933 	}
1934 }
1935 
1936 void
1937 __svc_nisplus_enable_timestamps(void)
1938 {
1939 	(void) mutex_lock(&timestamp_lock);
1940 	if (!timestamps) {
1941 		timestamps = calloc(FD_INCREMENT, sizeof (long));
1942 		if (timestamps != NULL)
1943 			ntimestamps = FD_INCREMENT;
1944 		else {
1945 			(void) mutex_unlock(&timestamp_lock);
1946 			syslog(LOG_ERR,
1947 				"__svc_nisplus_enable_timestamps: "
1948 				"out of memory");
1949 			return;
1950 		}
1951 	}
1952 	(void) mutex_unlock(&timestamp_lock);
1953 }
1954 
1955 void
1956 __svc_nisplus_purge_since(long since)
1957 {
1958 	SVCXPRT *xprt;
1959 	SVCXPRT *dead_xprt[CLEANUP_SIZE];
1960 	int i, fd_idx = 0, dead_idx = 0;
1961 
1962 	if (svc_xports == NULL)
1963 		return;
1964 	for (;;) {
1965 		(void) rw_wrlock(&svc_fd_lock);
1966 		(void) mutex_lock(&timestamp_lock);
1967 		for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1968 			if ((xprt = svc_xports[fd_idx]) == NULL) {
1969 				continue;
1970 			}
1971 			/* LINTED pointer cast */
1972 			if (svc_type(xprt) != SVC_CONNECTION) {
1973 				continue;
1974 			}
1975 			if (fd_idx >= ntimestamps) {
1976 				break;
1977 			}
1978 			if (timestamps[fd_idx] &&
1979 			    timestamps[fd_idx] < since) {
1980 				dead_xprt[dead_idx++] = xprt;
1981 				if (dead_idx >= CLEANUP_SIZE)
1982 					break;
1983 			}
1984 		}
1985 		(void) mutex_unlock(&timestamp_lock);
1986 
1987 		for (i = 0; i < dead_idx; i++) {
1988 			/* Still holding svc_fd_lock */
1989 			_svc_vc_destroy_private(dead_xprt[i], FALSE);
1990 		}
1991 		(void) rw_unlock(&svc_fd_lock);
1992 		if (fd_idx++ >= svc_max_pollfd)
1993 			return;
1994 	}
1995 }
1996 
1997 /*
1998  * dup cache wrapper functions for vc requests. The set of dup
1999  * functions were written with the view that they may be expanded
2000  * during creation of a generic svc_vc_enablecache routine
2001  * which would have a size based cache, rather than a time based cache.
2002  * The real work is done in generic svc.c
2003  */
2004 bool_t
2005 __svc_vc_dupcache_init(SVCXPRT *xprt, void *condition, int basis)
2006 {
2007 	return (__svc_dupcache_init(condition, basis,
2008 		/* LINTED pointer alignment */
2009 		&(((struct cf_rendezvous *)xprt->xp_p1)->cf_cache)));
2010 }
2011 
2012 int
2013 __svc_vc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz)
2014 {
2015 	return (__svc_dup(req, resp_buf, resp_bufsz,
2016 		/* LINTED pointer alignment */
2017 		((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache));
2018 }
2019 
2020 int
2021 __svc_vc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2022 				int status)
2023 {
2024 	return (__svc_dupdone(req, resp_buf, resp_bufsz, status,
2025 		/* LINTED pointer alignment */
2026 		((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache));
2027 }
2028