xref: /titanic_52/usr/src/lib/libnsl/rpc/clnt_vc.c (revision 7e89328164e4b89906924cf4e0387ea13a77631b)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
58f379ff8Sevanl  * Common Development and Distribution License (the "License").
68f379ff8Sevanl  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
2161961e0fSrobinson 
227c478bd9Sstevel@tonic-gate /*
23*7e893281SMarcel Telka  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
24*7e893281SMarcel Telka  */
25*7e893281SMarcel Telka 
26*7e893281SMarcel Telka /*
27cb620785Sraf  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
287c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
297c478bd9Sstevel@tonic-gate  */
30cb620785Sraf 
317c478bd9Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
327c478bd9Sstevel@tonic-gate /* All Rights Reserved */
337c478bd9Sstevel@tonic-gate /*
347c478bd9Sstevel@tonic-gate  * Portions of this source code were derived from Berkeley
357c478bd9Sstevel@tonic-gate  * 4.3 BSD under license from the Regents of the University of
367c478bd9Sstevel@tonic-gate  * California.
377c478bd9Sstevel@tonic-gate  */
387c478bd9Sstevel@tonic-gate 
397c478bd9Sstevel@tonic-gate /*
407c478bd9Sstevel@tonic-gate  * clnt_vc.c
417c478bd9Sstevel@tonic-gate  *
427c478bd9Sstevel@tonic-gate  * Implements a connectionful client side RPC.
437c478bd9Sstevel@tonic-gate  *
447c478bd9Sstevel@tonic-gate  * Connectionful RPC supports 'batched calls'.
457c478bd9Sstevel@tonic-gate  * A sequence of calls may be batched-up in a send buffer. The rpc call
467c478bd9Sstevel@tonic-gate  * return immediately to the client even though the call was not necessarily
477c478bd9Sstevel@tonic-gate  * sent. The batching occurs if the results' xdr routine is NULL (0) AND
487c478bd9Sstevel@tonic-gate  * the rpc timeout value is zero (see clnt.h, rpc).
497c478bd9Sstevel@tonic-gate  *
507c478bd9Sstevel@tonic-gate  * Clients should NOT casually batch calls that in fact return results; that
517c478bd9Sstevel@tonic-gate  * is the server side should be aware that a call is batched and not produce
527c478bd9Sstevel@tonic-gate  * any return message. Batched calls that produce many result messages can
537c478bd9Sstevel@tonic-gate  * deadlock (netlock) the client and the server....
547c478bd9Sstevel@tonic-gate  */
557c478bd9Sstevel@tonic-gate 
567c478bd9Sstevel@tonic-gate 
577c478bd9Sstevel@tonic-gate #include "mt.h"
587c478bd9Sstevel@tonic-gate #include "rpc_mt.h"
597c478bd9Sstevel@tonic-gate #include <assert.h>
607c478bd9Sstevel@tonic-gate #include <rpc/rpc.h>
617c478bd9Sstevel@tonic-gate #include <errno.h>
627c478bd9Sstevel@tonic-gate #include <sys/byteorder.h>
637c478bd9Sstevel@tonic-gate #include <sys/mkdev.h>
647c478bd9Sstevel@tonic-gate #include <sys/poll.h>
657c478bd9Sstevel@tonic-gate #include <syslog.h>
667c478bd9Sstevel@tonic-gate #include <stdlib.h>
677c478bd9Sstevel@tonic-gate #include <unistd.h>
687c478bd9Sstevel@tonic-gate #include <netinet/tcp.h>
69*7e893281SMarcel Telka #include <limits.h>
707c478bd9Sstevel@tonic-gate 
717c478bd9Sstevel@tonic-gate #define	MCALL_MSG_SIZE 24
72*7e893281SMarcel Telka #define	SECS_TO_NS(x)	((hrtime_t)(x) * 1000 * 1000 * 1000)
73*7e893281SMarcel Telka #define	MSECS_TO_NS(x)	((hrtime_t)(x) * 1000 * 1000)
74*7e893281SMarcel Telka #define	USECS_TO_NS(x)	((hrtime_t)(x) * 1000)
75*7e893281SMarcel Telka #define	NSECS_TO_MS(x)	((x) / 1000 / 1000)
767c478bd9Sstevel@tonic-gate #ifndef MIN
777c478bd9Sstevel@tonic-gate #define	MIN(a, b)	(((a) < (b)) ? (a) : (b))
787c478bd9Sstevel@tonic-gate #endif
797c478bd9Sstevel@tonic-gate 
8061961e0fSrobinson extern int __rpc_timeval_to_msec(struct timeval *);
817c478bd9Sstevel@tonic-gate extern int __rpc_compress_pollfd(int, pollfd_t *, pollfd_t *);
8261961e0fSrobinson extern bool_t xdr_opaque_auth(XDR *, struct opaque_auth *);
8361961e0fSrobinson extern bool_t __rpc_gss_wrap(AUTH *, char *, uint_t, XDR *, bool_t (*)(),
8461961e0fSrobinson 								caddr_t);
8561961e0fSrobinson extern bool_t __rpc_gss_unwrap(AUTH *, XDR *, bool_t (*)(), caddr_t);
8661961e0fSrobinson extern CLIENT *_clnt_vc_create_timed(int, struct netbuf *, rpcprog_t,
877c478bd9Sstevel@tonic-gate 		rpcvers_t, uint_t, uint_t, const struct timeval *);
887c478bd9Sstevel@tonic-gate 
8961961e0fSrobinson static struct clnt_ops	*clnt_vc_ops(void);
907c478bd9Sstevel@tonic-gate static int		read_vc(void *, caddr_t, int);
917c478bd9Sstevel@tonic-gate static int		write_vc(void *, caddr_t, int);
9261961e0fSrobinson static int		t_rcvall(int, char *, int);
9361961e0fSrobinson static bool_t		time_not_ok(struct timeval *);
947c478bd9Sstevel@tonic-gate 
957c478bd9Sstevel@tonic-gate struct ct_data;
9661961e0fSrobinson static bool_t		set_up_connection(int, struct netbuf *,
9761961e0fSrobinson 				struct ct_data *, const struct timeval *);
9861961e0fSrobinson static bool_t		set_io_mode(struct ct_data *, int);
997c478bd9Sstevel@tonic-gate 
1007c478bd9Sstevel@tonic-gate /*
1017c478bd9Sstevel@tonic-gate  * Lock table handle used by various MT sync. routines
1027c478bd9Sstevel@tonic-gate  */
1037c478bd9Sstevel@tonic-gate static mutex_t	vctbl_lock = DEFAULTMUTEX;
1047c478bd9Sstevel@tonic-gate static void	*vctbl = NULL;
1057c478bd9Sstevel@tonic-gate 
1067c478bd9Sstevel@tonic-gate static const char clnt_vc_errstr[] = "%s : %s";
1077c478bd9Sstevel@tonic-gate static const char clnt_vc_str[] = "clnt_vc_create";
1087c478bd9Sstevel@tonic-gate static const char clnt_read_vc_str[] = "read_vc";
1097c478bd9Sstevel@tonic-gate static const char __no_mem_str[] = "out of memory";
1107c478bd9Sstevel@tonic-gate static const char no_fcntl_getfl_str[] = "could not get status flags and modes";
1117c478bd9Sstevel@tonic-gate static const char no_nonblock_str[] = "could not set transport blocking mode";
1127c478bd9Sstevel@tonic-gate 
1137c478bd9Sstevel@tonic-gate /*
1147c478bd9Sstevel@tonic-gate  * Private data structure
1157c478bd9Sstevel@tonic-gate  */
1167c478bd9Sstevel@tonic-gate struct ct_data {
1177c478bd9Sstevel@tonic-gate 	int		ct_fd;		/* connection's fd */
1187c478bd9Sstevel@tonic-gate 	bool_t		ct_closeit;	/* close it on destroy */
1197c478bd9Sstevel@tonic-gate 	int		ct_tsdu;	/* size of tsdu */
1207c478bd9Sstevel@tonic-gate 	int		ct_wait;	/* wait interval in milliseconds */
1217c478bd9Sstevel@tonic-gate 	bool_t		ct_waitset;	/* wait set by clnt_control? */
1227c478bd9Sstevel@tonic-gate 	struct netbuf	ct_addr;	/* remote addr */
1237c478bd9Sstevel@tonic-gate 	struct rpc_err	ct_error;
1247c478bd9Sstevel@tonic-gate 	char		ct_mcall[MCALL_MSG_SIZE]; /* marshalled callmsg */
1257c478bd9Sstevel@tonic-gate 	uint_t		ct_mpos;	/* pos after marshal */
1267c478bd9Sstevel@tonic-gate 	XDR		ct_xdrs;	/* XDR stream */
1277c478bd9Sstevel@tonic-gate 
1287c478bd9Sstevel@tonic-gate 	/* NON STANDARD INFO - 00-08-31 */
1297c478bd9Sstevel@tonic-gate 	bool_t		ct_is_oneway; /* True if the current call is oneway. */
1307c478bd9Sstevel@tonic-gate 	bool_t		ct_is_blocking;
1317c478bd9Sstevel@tonic-gate 	ushort_t	ct_io_mode;
1327c478bd9Sstevel@tonic-gate 	ushort_t	ct_blocking_mode;
1337c478bd9Sstevel@tonic-gate 	uint_t		ct_bufferSize; /* Total size of the buffer. */
1347c478bd9Sstevel@tonic-gate 	uint_t		ct_bufferPendingSize; /* Size of unsent data. */
1357c478bd9Sstevel@tonic-gate 	char 		*ct_buffer; /* Pointer to the buffer. */
1367c478bd9Sstevel@tonic-gate 	char 		*ct_bufferWritePtr; /* Ptr to the first free byte. */
1377c478bd9Sstevel@tonic-gate 	char 		*ct_bufferReadPtr; /* Ptr to the first byte of data. */
1387c478bd9Sstevel@tonic-gate };
1397c478bd9Sstevel@tonic-gate 
1407c478bd9Sstevel@tonic-gate struct nb_reg_node {
1417c478bd9Sstevel@tonic-gate 	struct nb_reg_node *next;
1427c478bd9Sstevel@tonic-gate 	struct ct_data *ct;
1437c478bd9Sstevel@tonic-gate };
1447c478bd9Sstevel@tonic-gate 
1457c478bd9Sstevel@tonic-gate static struct nb_reg_node *nb_first = (struct nb_reg_node *)&nb_first;
1467c478bd9Sstevel@tonic-gate static struct nb_reg_node *nb_free  = (struct nb_reg_node *)&nb_free;
1477c478bd9Sstevel@tonic-gate 
1487c478bd9Sstevel@tonic-gate static bool_t exit_handler_set = FALSE;
1497c478bd9Sstevel@tonic-gate 
1507c478bd9Sstevel@tonic-gate static mutex_t nb_list_mutex = DEFAULTMUTEX;
1517c478bd9Sstevel@tonic-gate 
1527c478bd9Sstevel@tonic-gate 
1537c478bd9Sstevel@tonic-gate /* Define some macros to manage the linked list. */
1547c478bd9Sstevel@tonic-gate #define	LIST_ISEMPTY(l) (l == (struct nb_reg_node *)&l)
1557c478bd9Sstevel@tonic-gate #define	LIST_CLR(l) (l = (struct nb_reg_node *)&l)
1567c478bd9Sstevel@tonic-gate #define	LIST_ADD(l, node) (node->next = l->next, l = node)
1577c478bd9Sstevel@tonic-gate #define	LIST_EXTRACT(l, node) (node = l, l = l->next)
1587c478bd9Sstevel@tonic-gate #define	LIST_FOR_EACH(l, node) \
1597c478bd9Sstevel@tonic-gate 	for (node = l; node != (struct nb_reg_node *)&l; node = node->next)
1607c478bd9Sstevel@tonic-gate 
1617c478bd9Sstevel@tonic-gate 
1627c478bd9Sstevel@tonic-gate /* Default size of the IO buffer used in non blocking mode */
1637c478bd9Sstevel@tonic-gate #define	DEFAULT_PENDING_ZONE_MAX_SIZE (16*1024)
1647c478bd9Sstevel@tonic-gate 
16561961e0fSrobinson static int nb_send(struct ct_data *, void *, unsigned int);
16661961e0fSrobinson static int do_flush(struct ct_data *, uint_t);
16761961e0fSrobinson static bool_t set_flush_mode(struct ct_data *, int);
16861961e0fSrobinson static bool_t set_blocking_connection(struct ct_data *, bool_t);
1697c478bd9Sstevel@tonic-gate 
17061961e0fSrobinson static int register_nb(struct ct_data *);
17161961e0fSrobinson static int unregister_nb(struct ct_data *);
1727c478bd9Sstevel@tonic-gate 
1737c478bd9Sstevel@tonic-gate 
1747c478bd9Sstevel@tonic-gate /*
1757c478bd9Sstevel@tonic-gate  * Change the mode of the underlying fd.
1767c478bd9Sstevel@tonic-gate  */
1777c478bd9Sstevel@tonic-gate static bool_t
1787c478bd9Sstevel@tonic-gate set_blocking_connection(struct ct_data *ct, bool_t blocking)
1797c478bd9Sstevel@tonic-gate {
1807c478bd9Sstevel@tonic-gate 	int flag;
1817c478bd9Sstevel@tonic-gate 
1827c478bd9Sstevel@tonic-gate 	/*
1837c478bd9Sstevel@tonic-gate 	 * If the underlying fd is already in the required mode,
1847c478bd9Sstevel@tonic-gate 	 * avoid the syscall.
1857c478bd9Sstevel@tonic-gate 	 */
1867c478bd9Sstevel@tonic-gate 	if (ct->ct_is_blocking == blocking)
1877c478bd9Sstevel@tonic-gate 		return (TRUE);
1887c478bd9Sstevel@tonic-gate 
1897c478bd9Sstevel@tonic-gate 	if ((flag = fcntl(ct->ct_fd, F_GETFL, 0)) < 0) {
1907c478bd9Sstevel@tonic-gate 		(void) syslog(LOG_ERR, "set_blocking_connection : %s",
1917c478bd9Sstevel@tonic-gate 		    no_fcntl_getfl_str);
1927c478bd9Sstevel@tonic-gate 		return (FALSE);
1937c478bd9Sstevel@tonic-gate 	}
1947c478bd9Sstevel@tonic-gate 
1957c478bd9Sstevel@tonic-gate 	flag = blocking? flag&~O_NONBLOCK : flag|O_NONBLOCK;
1967c478bd9Sstevel@tonic-gate 	if (fcntl(ct->ct_fd, F_SETFL, flag) != 0) {
1977c478bd9Sstevel@tonic-gate 		(void) syslog(LOG_ERR, "set_blocking_connection : %s",
1987c478bd9Sstevel@tonic-gate 		    no_nonblock_str);
1997c478bd9Sstevel@tonic-gate 		return (FALSE);
2007c478bd9Sstevel@tonic-gate 	}
2017c478bd9Sstevel@tonic-gate 	ct->ct_is_blocking = blocking;
2027c478bd9Sstevel@tonic-gate 	return (TRUE);
2037c478bd9Sstevel@tonic-gate }
2047c478bd9Sstevel@tonic-gate 
2057c478bd9Sstevel@tonic-gate /*
2067c478bd9Sstevel@tonic-gate  * Create a client handle for a connection.
2077c478bd9Sstevel@tonic-gate  * Default options are set, which the user can change using clnt_control()'s.
2087c478bd9Sstevel@tonic-gate  * The rpc/vc package does buffering similar to stdio, so the client
2097c478bd9Sstevel@tonic-gate  * must pick send and receive buffer sizes, 0 => use the default.
2107c478bd9Sstevel@tonic-gate  * NB: fd is copied into a private area.
2117c478bd9Sstevel@tonic-gate  * NB: The rpch->cl_auth is set null authentication. Caller may wish to
2127c478bd9Sstevel@tonic-gate  * set this something more useful.
2137c478bd9Sstevel@tonic-gate  *
2147c478bd9Sstevel@tonic-gate  * fd should be open and bound.
2157c478bd9Sstevel@tonic-gate  */
2167c478bd9Sstevel@tonic-gate CLIENT *
21761961e0fSrobinson clnt_vc_create(const int fd, struct netbuf *svcaddr, const rpcprog_t prog,
21861961e0fSrobinson 	const rpcvers_t vers, const uint_t sendsz, const uint_t recvsz)
2197c478bd9Sstevel@tonic-gate {
2207c478bd9Sstevel@tonic-gate 	return (_clnt_vc_create_timed(fd, svcaddr, prog, vers, sendsz,
2217c478bd9Sstevel@tonic-gate 	    recvsz, NULL));
2227c478bd9Sstevel@tonic-gate }
2237c478bd9Sstevel@tonic-gate 
2247c478bd9Sstevel@tonic-gate /*
2257c478bd9Sstevel@tonic-gate  * This has the same definition as clnt_vc_create(), except it
2267c478bd9Sstevel@tonic-gate  * takes an additional parameter - a pointer to a timeval structure.
2277c478bd9Sstevel@tonic-gate  *
2287c478bd9Sstevel@tonic-gate  * Not a public interface. This is for clnt_create_timed,
2297c478bd9Sstevel@tonic-gate  * clnt_create_vers_timed, clnt_tp_create_timed to pass down the timeout
2307c478bd9Sstevel@tonic-gate  * value to control a tcp connection attempt.
2317c478bd9Sstevel@tonic-gate  * (for bug 4049792: clnt_create_timed does not time out)
2327c478bd9Sstevel@tonic-gate  *
2337c478bd9Sstevel@tonic-gate  * If tp is NULL, use default timeout to set up the connection.
2347c478bd9Sstevel@tonic-gate  */
2357c478bd9Sstevel@tonic-gate CLIENT *
23661961e0fSrobinson _clnt_vc_create_timed(int fd, struct netbuf *svcaddr, rpcprog_t prog,
23761961e0fSrobinson 	rpcvers_t vers, uint_t sendsz, uint_t recvsz, const struct timeval *tp)
2387c478bd9Sstevel@tonic-gate {
2397c478bd9Sstevel@tonic-gate 	CLIENT *cl;			/* client handle */
2407c478bd9Sstevel@tonic-gate 	struct ct_data *ct;		/* private data */
2417c478bd9Sstevel@tonic-gate 	struct timeval now;
2427c478bd9Sstevel@tonic-gate 	struct rpc_msg call_msg;
2437c478bd9Sstevel@tonic-gate 	struct t_info tinfo;
2447c478bd9Sstevel@tonic-gate 	int flag;
2457c478bd9Sstevel@tonic-gate 
24661961e0fSrobinson 	cl = malloc(sizeof (*cl));
247dff8cdb7SMarcel Telka 	if ((ct = malloc(sizeof (*ct))) != NULL)
248dff8cdb7SMarcel Telka 		ct->ct_addr.buf = NULL;
249dff8cdb7SMarcel Telka 
25061961e0fSrobinson 	if ((cl == NULL) || (ct == NULL)) {
2517c478bd9Sstevel@tonic-gate 		(void) syslog(LOG_ERR, clnt_vc_errstr,
2527c478bd9Sstevel@tonic-gate 		    clnt_vc_str, __no_mem_str);
2537c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
2547c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = errno;
2557c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = 0;
2567c478bd9Sstevel@tonic-gate 		goto err;
2577c478bd9Sstevel@tonic-gate 	}
2587c478bd9Sstevel@tonic-gate 
2598f379ff8Sevanl 	/*
2608f379ff8Sevanl 	 * The only use of vctbl_lock is for serializing the creation of
2618f379ff8Sevanl 	 * vctbl. Once created the lock needs to be released so we don't
2628f379ff8Sevanl 	 * hold it across the set_up_connection() call and end up with a
2638f379ff8Sevanl 	 * bunch of threads stuck waiting for the mutex.
2648f379ff8Sevanl 	 */
2657c478bd9Sstevel@tonic-gate 	sig_mutex_lock(&vctbl_lock);
2667c478bd9Sstevel@tonic-gate 
2677c478bd9Sstevel@tonic-gate 	if ((vctbl == NULL) && ((vctbl = rpc_fd_init()) == NULL)) {
2687c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
2697c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = errno;
2707c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = 0;
2717c478bd9Sstevel@tonic-gate 		sig_mutex_unlock(&vctbl_lock);
2727c478bd9Sstevel@tonic-gate 		goto err;
2737c478bd9Sstevel@tonic-gate 	}
2747c478bd9Sstevel@tonic-gate 
2758f379ff8Sevanl 	sig_mutex_unlock(&vctbl_lock);
2768f379ff8Sevanl 
2777c478bd9Sstevel@tonic-gate 	ct->ct_io_mode = RPC_CL_BLOCKING;
2787c478bd9Sstevel@tonic-gate 	ct->ct_blocking_mode = RPC_CL_BLOCKING_FLUSH;
2797c478bd9Sstevel@tonic-gate 
2807c478bd9Sstevel@tonic-gate 	ct->ct_buffer = NULL;	/* We allocate the buffer when needed. */
2817c478bd9Sstevel@tonic-gate 	ct->ct_bufferSize = DEFAULT_PENDING_ZONE_MAX_SIZE;
2827c478bd9Sstevel@tonic-gate 	ct->ct_bufferPendingSize = 0;
2837c478bd9Sstevel@tonic-gate 	ct->ct_bufferWritePtr = NULL;
2847c478bd9Sstevel@tonic-gate 	ct->ct_bufferReadPtr = NULL;
2857c478bd9Sstevel@tonic-gate 
2867c478bd9Sstevel@tonic-gate 	/* Check the current state of the fd. */
2877c478bd9Sstevel@tonic-gate 	if ((flag = fcntl(fd, F_GETFL, 0)) < 0) {
2887c478bd9Sstevel@tonic-gate 		(void) syslog(LOG_ERR, "_clnt_vc_create_timed : %s",
2897c478bd9Sstevel@tonic-gate 		    no_fcntl_getfl_str);
2907c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
2917c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = errno;
2927c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = 0;
2937c478bd9Sstevel@tonic-gate 		goto err;
2947c478bd9Sstevel@tonic-gate 	}
2957c478bd9Sstevel@tonic-gate 	ct->ct_is_blocking = flag & O_NONBLOCK ? FALSE : TRUE;
2967c478bd9Sstevel@tonic-gate 
2977c478bd9Sstevel@tonic-gate 	if (set_up_connection(fd, svcaddr, ct, tp) == FALSE) {
2987c478bd9Sstevel@tonic-gate 		goto err;
2997c478bd9Sstevel@tonic-gate 	}
3007c478bd9Sstevel@tonic-gate 
3017c478bd9Sstevel@tonic-gate 	/*
3027c478bd9Sstevel@tonic-gate 	 * Set up other members of private data struct
3037c478bd9Sstevel@tonic-gate 	 */
3047c478bd9Sstevel@tonic-gate 	ct->ct_fd = fd;
3057c478bd9Sstevel@tonic-gate 	/*
3067c478bd9Sstevel@tonic-gate 	 * The actual value will be set by clnt_call or clnt_control
3077c478bd9Sstevel@tonic-gate 	 */
3087c478bd9Sstevel@tonic-gate 	ct->ct_wait = 30000;
3097c478bd9Sstevel@tonic-gate 	ct->ct_waitset = FALSE;
3107c478bd9Sstevel@tonic-gate 	/*
3117c478bd9Sstevel@tonic-gate 	 * By default, closeit is always FALSE. It is users responsibility
3127c478bd9Sstevel@tonic-gate 	 * to do a t_close on it, else the user may use clnt_control
3137c478bd9Sstevel@tonic-gate 	 * to let clnt_destroy do it for him/her.
3147c478bd9Sstevel@tonic-gate 	 */
3157c478bd9Sstevel@tonic-gate 	ct->ct_closeit = FALSE;
3167c478bd9Sstevel@tonic-gate 
3177c478bd9Sstevel@tonic-gate 	/*
3187c478bd9Sstevel@tonic-gate 	 * Initialize call message
3197c478bd9Sstevel@tonic-gate 	 */
3207c478bd9Sstevel@tonic-gate 	(void) gettimeofday(&now, (struct timezone *)0);
3217c478bd9Sstevel@tonic-gate 	call_msg.rm_xid = getpid() ^ now.tv_sec ^ now.tv_usec;
3227c478bd9Sstevel@tonic-gate 	call_msg.rm_call.cb_prog = prog;
3237c478bd9Sstevel@tonic-gate 	call_msg.rm_call.cb_vers = vers;
3247c478bd9Sstevel@tonic-gate 
3257c478bd9Sstevel@tonic-gate 	/*
3267c478bd9Sstevel@tonic-gate 	 * pre-serialize the static part of the call msg and stash it away
3277c478bd9Sstevel@tonic-gate 	 */
3287c478bd9Sstevel@tonic-gate 	xdrmem_create(&(ct->ct_xdrs), ct->ct_mcall, MCALL_MSG_SIZE, XDR_ENCODE);
3297c478bd9Sstevel@tonic-gate 	if (!xdr_callhdr(&(ct->ct_xdrs), &call_msg)) {
3307c478bd9Sstevel@tonic-gate 		goto err;
3317c478bd9Sstevel@tonic-gate 	}
3327c478bd9Sstevel@tonic-gate 	ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs));
3337c478bd9Sstevel@tonic-gate 	XDR_DESTROY(&(ct->ct_xdrs));
3347c478bd9Sstevel@tonic-gate 
3357c478bd9Sstevel@tonic-gate 	if (t_getinfo(fd, &tinfo) == -1) {
3367c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_TLIERROR;
3377c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = t_errno;
3387c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = 0;
3397c478bd9Sstevel@tonic-gate 		goto err;
3407c478bd9Sstevel@tonic-gate 	}
3417c478bd9Sstevel@tonic-gate 	/*
3427c478bd9Sstevel@tonic-gate 	 * Find the receive and the send size
3437c478bd9Sstevel@tonic-gate 	 */
3447c478bd9Sstevel@tonic-gate 	sendsz = __rpc_get_t_size((int)sendsz, tinfo.tsdu);
3457c478bd9Sstevel@tonic-gate 	recvsz = __rpc_get_t_size((int)recvsz, tinfo.tsdu);
3467c478bd9Sstevel@tonic-gate 	if ((sendsz == 0) || (recvsz == 0)) {
3477c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_TLIERROR;
3487c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = 0;
3497c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = 0;
3507c478bd9Sstevel@tonic-gate 		goto err;
3517c478bd9Sstevel@tonic-gate 	}
3527c478bd9Sstevel@tonic-gate 	ct->ct_tsdu = tinfo.tsdu;
3537c478bd9Sstevel@tonic-gate 	/*
3547c478bd9Sstevel@tonic-gate 	 * Create a client handle which uses xdrrec for serialization
3557c478bd9Sstevel@tonic-gate 	 * and authnone for authentication.
3567c478bd9Sstevel@tonic-gate 	 */
3577c478bd9Sstevel@tonic-gate 	ct->ct_xdrs.x_ops = NULL;
3587c478bd9Sstevel@tonic-gate 	xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, (caddr_t)ct,
3597c478bd9Sstevel@tonic-gate 	    read_vc, write_vc);
3607c478bd9Sstevel@tonic-gate 	if (ct->ct_xdrs.x_ops == NULL) {
3617c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
3627c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = 0;
3637c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = ENOMEM;
3647c478bd9Sstevel@tonic-gate 		goto err;
3657c478bd9Sstevel@tonic-gate 	}
3667c478bd9Sstevel@tonic-gate 	cl->cl_ops = clnt_vc_ops();
3677c478bd9Sstevel@tonic-gate 	cl->cl_private = (caddr_t)ct;
3687c478bd9Sstevel@tonic-gate 	cl->cl_auth = authnone_create();
36961961e0fSrobinson 	cl->cl_tp = NULL;
37061961e0fSrobinson 	cl->cl_netid = NULL;
3717c478bd9Sstevel@tonic-gate 	return (cl);
3727c478bd9Sstevel@tonic-gate 
3737c478bd9Sstevel@tonic-gate err:
3747c478bd9Sstevel@tonic-gate 	if (ct) {
37561961e0fSrobinson 		free(ct->ct_addr.buf);
37661961e0fSrobinson 		free(ct);
3777c478bd9Sstevel@tonic-gate 	}
37861961e0fSrobinson 	free(cl);
379dff8cdb7SMarcel Telka 
38061961e0fSrobinson 	return (NULL);
3817c478bd9Sstevel@tonic-gate }
3827c478bd9Sstevel@tonic-gate 
3837c478bd9Sstevel@tonic-gate #define	TCPOPT_BUFSIZE 128
3847c478bd9Sstevel@tonic-gate 
3857c478bd9Sstevel@tonic-gate /*
3867c478bd9Sstevel@tonic-gate  * Set tcp connection timeout value.
3877c478bd9Sstevel@tonic-gate  * Retun 0 for success, -1 for failure.
3887c478bd9Sstevel@tonic-gate  */
3897c478bd9Sstevel@tonic-gate static int
3907c478bd9Sstevel@tonic-gate _set_tcp_conntime(int fd, int optval)
3917c478bd9Sstevel@tonic-gate {
3927c478bd9Sstevel@tonic-gate 	struct t_optmgmt req, res;
3937c478bd9Sstevel@tonic-gate 	struct opthdr *opt;
3947c478bd9Sstevel@tonic-gate 	int *ip;
3957c478bd9Sstevel@tonic-gate 	char buf[TCPOPT_BUFSIZE];
3967c478bd9Sstevel@tonic-gate 
39761961e0fSrobinson 	/* LINTED pointer cast */
3987c478bd9Sstevel@tonic-gate 	opt = (struct opthdr *)buf;
3997c478bd9Sstevel@tonic-gate 	opt->level =  IPPROTO_TCP;
4007c478bd9Sstevel@tonic-gate 	opt->name = TCP_CONN_ABORT_THRESHOLD;
4017c478bd9Sstevel@tonic-gate 	opt->len = sizeof (int);
4027c478bd9Sstevel@tonic-gate 
4037c478bd9Sstevel@tonic-gate 	req.flags = T_NEGOTIATE;
4047c478bd9Sstevel@tonic-gate 	req.opt.len = sizeof (struct opthdr) + opt->len;
4057c478bd9Sstevel@tonic-gate 	req.opt.buf = (char *)opt;
40661961e0fSrobinson 	/* LINTED pointer cast */
4077c478bd9Sstevel@tonic-gate 	ip = (int *)((char *)buf + sizeof (struct opthdr));
4087c478bd9Sstevel@tonic-gate 	*ip = optval;
4097c478bd9Sstevel@tonic-gate 
4107c478bd9Sstevel@tonic-gate 	res.flags = 0;
4117c478bd9Sstevel@tonic-gate 	res.opt.buf = (char *)buf;
4127c478bd9Sstevel@tonic-gate 	res.opt.maxlen = sizeof (buf);
4137c478bd9Sstevel@tonic-gate 	if (t_optmgmt(fd, &req, &res) < 0 || res.flags != T_SUCCESS) {
4147c478bd9Sstevel@tonic-gate 		return (-1);
4157c478bd9Sstevel@tonic-gate 	}
4167c478bd9Sstevel@tonic-gate 	return (0);
4177c478bd9Sstevel@tonic-gate }
4187c478bd9Sstevel@tonic-gate 
4197c478bd9Sstevel@tonic-gate /*
4207c478bd9Sstevel@tonic-gate  * Get current tcp connection timeout value.
421*7e893281SMarcel Telka  * Retun the timeout in milliseconds, or -1 for failure.
4227c478bd9Sstevel@tonic-gate  */
4237c478bd9Sstevel@tonic-gate static int
4247c478bd9Sstevel@tonic-gate _get_tcp_conntime(int fd)
4257c478bd9Sstevel@tonic-gate {
4267c478bd9Sstevel@tonic-gate 	struct t_optmgmt req, res;
4277c478bd9Sstevel@tonic-gate 	struct opthdr *opt;
4287c478bd9Sstevel@tonic-gate 	int *ip, retval;
4297c478bd9Sstevel@tonic-gate 	char buf[TCPOPT_BUFSIZE];
4307c478bd9Sstevel@tonic-gate 
43161961e0fSrobinson 	/* LINTED pointer cast */
4327c478bd9Sstevel@tonic-gate 	opt = (struct opthdr *)buf;
4337c478bd9Sstevel@tonic-gate 	opt->level =  IPPROTO_TCP;
4347c478bd9Sstevel@tonic-gate 	opt->name = TCP_CONN_ABORT_THRESHOLD;
4357c478bd9Sstevel@tonic-gate 	opt->len = sizeof (int);
4367c478bd9Sstevel@tonic-gate 
4377c478bd9Sstevel@tonic-gate 	req.flags = T_CURRENT;
4387c478bd9Sstevel@tonic-gate 	req.opt.len = sizeof (struct opthdr) + opt->len;
4397c478bd9Sstevel@tonic-gate 	req.opt.buf = (char *)opt;
44061961e0fSrobinson 	/* LINTED pointer cast */
4417c478bd9Sstevel@tonic-gate 	ip = (int *)((char *)buf + sizeof (struct opthdr));
4427c478bd9Sstevel@tonic-gate 	*ip = 0;
4437c478bd9Sstevel@tonic-gate 
4447c478bd9Sstevel@tonic-gate 	res.flags = 0;
4457c478bd9Sstevel@tonic-gate 	res.opt.buf = (char *)buf;
4467c478bd9Sstevel@tonic-gate 	res.opt.maxlen = sizeof (buf);
4477c478bd9Sstevel@tonic-gate 	if (t_optmgmt(fd, &req, &res) < 0 || res.flags != T_SUCCESS) {
4487c478bd9Sstevel@tonic-gate 		return (-1);
4497c478bd9Sstevel@tonic-gate 	}
4507c478bd9Sstevel@tonic-gate 
45161961e0fSrobinson 	/* LINTED pointer cast */
4527c478bd9Sstevel@tonic-gate 	ip = (int *)((char *)buf + sizeof (struct opthdr));
4537c478bd9Sstevel@tonic-gate 	retval = *ip;
4547c478bd9Sstevel@tonic-gate 	return (retval);
4557c478bd9Sstevel@tonic-gate }
4567c478bd9Sstevel@tonic-gate 
4577c478bd9Sstevel@tonic-gate static bool_t
45861961e0fSrobinson set_up_connection(int fd, struct netbuf *svcaddr, struct ct_data *ct,
45961961e0fSrobinson     const struct timeval *tp)
4607c478bd9Sstevel@tonic-gate {
4617c478bd9Sstevel@tonic-gate 	int state;
4627c478bd9Sstevel@tonic-gate 	struct t_call sndcallstr, *rcvcall;
4637c478bd9Sstevel@tonic-gate 	int nconnect;
4647c478bd9Sstevel@tonic-gate 	bool_t connected, do_rcv_connect;
465*7e893281SMarcel Telka 	int curr_time = -1;
466*7e893281SMarcel Telka 	hrtime_t start;
467*7e893281SMarcel Telka 	hrtime_t tout;	/* timeout in nanoseconds (from tp) */
4687c478bd9Sstevel@tonic-gate 
4697c478bd9Sstevel@tonic-gate 	ct->ct_addr.len = 0;
4707c478bd9Sstevel@tonic-gate 	state = t_getstate(fd);
4717c478bd9Sstevel@tonic-gate 	if (state == -1) {
4727c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_TLIERROR;
4737c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = 0;
4747c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = t_errno;
4757c478bd9Sstevel@tonic-gate 		return (FALSE);
4767c478bd9Sstevel@tonic-gate 	}
4777c478bd9Sstevel@tonic-gate 
4787c478bd9Sstevel@tonic-gate 	switch (state) {
4797c478bd9Sstevel@tonic-gate 	case T_IDLE:
48061961e0fSrobinson 		if (svcaddr == NULL) {
4817c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
4827c478bd9Sstevel@tonic-gate 			return (FALSE);
4837c478bd9Sstevel@tonic-gate 		}
4847c478bd9Sstevel@tonic-gate 		/*
4857c478bd9Sstevel@tonic-gate 		 * Connect only if state is IDLE and svcaddr known
4867c478bd9Sstevel@tonic-gate 		 */
4877c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
4887c478bd9Sstevel@tonic-gate 		rcvcall = (struct t_call *)t_alloc(fd, T_CALL, T_OPT|T_ADDR);
4897c478bd9Sstevel@tonic-gate 		if (rcvcall == NULL) {
4907c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_stat = RPC_TLIERROR;
4917c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_error.re_terrno = t_errno;
4927c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_error.re_errno = errno;
4937c478bd9Sstevel@tonic-gate 			return (FALSE);
4947c478bd9Sstevel@tonic-gate 		}
4957c478bd9Sstevel@tonic-gate 		rcvcall->udata.maxlen = 0;
4967c478bd9Sstevel@tonic-gate 		sndcallstr.addr = *svcaddr;
4977c478bd9Sstevel@tonic-gate 		sndcallstr.opt.len = 0;
4987c478bd9Sstevel@tonic-gate 		sndcallstr.udata.len = 0;
4997c478bd9Sstevel@tonic-gate 		/*
5007c478bd9Sstevel@tonic-gate 		 * Even NULL could have sufficed for rcvcall, because
5017c478bd9Sstevel@tonic-gate 		 * the address returned is same for all cases except
5027c478bd9Sstevel@tonic-gate 		 * for the gateway case, and hence required.
5037c478bd9Sstevel@tonic-gate 		 */
5047c478bd9Sstevel@tonic-gate 		connected = FALSE;
5057c478bd9Sstevel@tonic-gate 		do_rcv_connect = FALSE;
5067c478bd9Sstevel@tonic-gate 
5077c478bd9Sstevel@tonic-gate 		/*
5087c478bd9Sstevel@tonic-gate 		 * If there is a timeout value specified, we will try to
5097c478bd9Sstevel@tonic-gate 		 * reset the tcp connection timeout. If the transport does
5107c478bd9Sstevel@tonic-gate 		 * not support the TCP_CONN_ABORT_THRESHOLD option or fails
5117c478bd9Sstevel@tonic-gate 		 * for other reason, default timeout will be used.
5127c478bd9Sstevel@tonic-gate 		 */
5137c478bd9Sstevel@tonic-gate 		if (tp != NULL) {
514*7e893281SMarcel Telka 			start = gethrtime();
5157c478bd9Sstevel@tonic-gate 
5165131caa1SMarcel Telka 			/*
517*7e893281SMarcel Telka 			 * Calculate the timeout in nanoseconds
5185131caa1SMarcel Telka 			 */
519*7e893281SMarcel Telka 			tout = SECS_TO_NS(tp->tv_sec) +
520*7e893281SMarcel Telka 			    USECS_TO_NS(tp->tv_usec);
521*7e893281SMarcel Telka 			curr_time = _get_tcp_conntime(fd);
5227c478bd9Sstevel@tonic-gate 		}
5237c478bd9Sstevel@tonic-gate 
5247c478bd9Sstevel@tonic-gate 		for (nconnect = 0; nconnect < 3; nconnect++) {
525*7e893281SMarcel Telka 			if (tp != NULL) {
526*7e893281SMarcel Telka 				/*
527*7e893281SMarcel Telka 				 * Calculate the elapsed time
528*7e893281SMarcel Telka 				 */
529*7e893281SMarcel Telka 				hrtime_t elapsed = gethrtime() - start;
530*7e893281SMarcel Telka 				if (elapsed >= tout)
531*7e893281SMarcel Telka 					break;
532*7e893281SMarcel Telka 
533*7e893281SMarcel Telka 				if (curr_time != -1) {
534*7e893281SMarcel Telka 					int ms;
535*7e893281SMarcel Telka 
536*7e893281SMarcel Telka 					/*
537*7e893281SMarcel Telka 					 * TCP_CONN_ABORT_THRESHOLD takes int
538*7e893281SMarcel Telka 					 * value in milliseconds.  Make sure we
539*7e893281SMarcel Telka 					 * do not overflow.
540*7e893281SMarcel Telka 					 */
541*7e893281SMarcel Telka 					if (NSECS_TO_MS(tout - elapsed) >=
542*7e893281SMarcel Telka 					    INT_MAX) {
543*7e893281SMarcel Telka 						ms = INT_MAX;
544*7e893281SMarcel Telka 					} else {
545*7e893281SMarcel Telka 						ms = (int)
546*7e893281SMarcel Telka 						    NSECS_TO_MS(tout - elapsed);
547*7e893281SMarcel Telka 						if (MSECS_TO_NS(ms) !=
548*7e893281SMarcel Telka 						    tout - elapsed)
549*7e893281SMarcel Telka 							ms++;
550*7e893281SMarcel Telka 					}
551*7e893281SMarcel Telka 
552*7e893281SMarcel Telka 					(void) _set_tcp_conntime(fd, ms);
553*7e893281SMarcel Telka 				}
554*7e893281SMarcel Telka 			}
555*7e893281SMarcel Telka 
5567c478bd9Sstevel@tonic-gate 			if (t_connect(fd, &sndcallstr, rcvcall) != -1) {
5577c478bd9Sstevel@tonic-gate 				connected = TRUE;
5587c478bd9Sstevel@tonic-gate 				break;
5597c478bd9Sstevel@tonic-gate 			}
560aaad6470Ssk102515 			if (t_errno == TLOOK) {
561aaad6470Ssk102515 				switch (t_look(fd)) {
562aaad6470Ssk102515 				case T_DISCONNECT:
563aaad6470Ssk102515 					(void) t_rcvdis(fd, (struct
564aaad6470Ssk102515 					    t_discon *) NULL);
565aaad6470Ssk102515 					break;
566aaad6470Ssk102515 				default:
567aaad6470Ssk102515 					break;
568aaad6470Ssk102515 				}
569aaad6470Ssk102515 			} else if (!(t_errno == TSYSERR && errno == EINTR)) {
5707c478bd9Sstevel@tonic-gate 				break;
5717c478bd9Sstevel@tonic-gate 			}
5727c478bd9Sstevel@tonic-gate 			if ((state = t_getstate(fd)) == T_OUTCON) {
5737c478bd9Sstevel@tonic-gate 				do_rcv_connect = TRUE;
5747c478bd9Sstevel@tonic-gate 				break;
5757c478bd9Sstevel@tonic-gate 			}
5767c478bd9Sstevel@tonic-gate 			if (state != T_IDLE) {
5777c478bd9Sstevel@tonic-gate 				break;
5787c478bd9Sstevel@tonic-gate 			}
5797c478bd9Sstevel@tonic-gate 		}
5807c478bd9Sstevel@tonic-gate 		if (do_rcv_connect) {
5817c478bd9Sstevel@tonic-gate 			do {
5827c478bd9Sstevel@tonic-gate 				if (t_rcvconnect(fd, rcvcall) != -1) {
5837c478bd9Sstevel@tonic-gate 					connected = TRUE;
5847c478bd9Sstevel@tonic-gate 					break;
5857c478bd9Sstevel@tonic-gate 				}
5867c478bd9Sstevel@tonic-gate 			} while (t_errno == TSYSERR && errno == EINTR);
5877c478bd9Sstevel@tonic-gate 		}
5887c478bd9Sstevel@tonic-gate 
5897c478bd9Sstevel@tonic-gate 		/*
5907c478bd9Sstevel@tonic-gate 		 * Set the connection timeout back to its old value.
5917c478bd9Sstevel@tonic-gate 		 */
592*7e893281SMarcel Telka 		if (curr_time != -1) {
59361961e0fSrobinson 			(void) _set_tcp_conntime(fd, curr_time);
5947c478bd9Sstevel@tonic-gate 		}
5957c478bd9Sstevel@tonic-gate 
5967c478bd9Sstevel@tonic-gate 		if (!connected) {
5977c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_stat = RPC_TLIERROR;
5987c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_error.re_terrno = t_errno;
5997c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_error.re_errno = errno;
6007c478bd9Sstevel@tonic-gate 			(void) t_free((char *)rcvcall, T_CALL);
6017c478bd9Sstevel@tonic-gate 			return (FALSE);
6027c478bd9Sstevel@tonic-gate 		}
6037c478bd9Sstevel@tonic-gate 
6047c478bd9Sstevel@tonic-gate 		/* Free old area if allocated */
6057c478bd9Sstevel@tonic-gate 		if (ct->ct_addr.buf)
6067c478bd9Sstevel@tonic-gate 			free(ct->ct_addr.buf);
6077c478bd9Sstevel@tonic-gate 		ct->ct_addr = rcvcall->addr;	/* To get the new address */
6087c478bd9Sstevel@tonic-gate 		/* So that address buf does not get freed */
6097c478bd9Sstevel@tonic-gate 		rcvcall->addr.buf = NULL;
6107c478bd9Sstevel@tonic-gate 		(void) t_free((char *)rcvcall, T_CALL);
6117c478bd9Sstevel@tonic-gate 		break;
6127c478bd9Sstevel@tonic-gate 	case T_DATAXFER:
6137c478bd9Sstevel@tonic-gate 	case T_OUTCON:
61461961e0fSrobinson 		if (svcaddr == NULL) {
6157c478bd9Sstevel@tonic-gate 			/*
6167c478bd9Sstevel@tonic-gate 			 * svcaddr could also be NULL in cases where the
6177c478bd9Sstevel@tonic-gate 			 * client is already bound and connected.
6187c478bd9Sstevel@tonic-gate 			 */
6197c478bd9Sstevel@tonic-gate 			ct->ct_addr.len = 0;
6207c478bd9Sstevel@tonic-gate 		} else {
6217c478bd9Sstevel@tonic-gate 			ct->ct_addr.buf = malloc(svcaddr->len);
62261961e0fSrobinson 			if (ct->ct_addr.buf == NULL) {
6237c478bd9Sstevel@tonic-gate 				(void) syslog(LOG_ERR, clnt_vc_errstr,
6247c478bd9Sstevel@tonic-gate 				    clnt_vc_str, __no_mem_str);
6257c478bd9Sstevel@tonic-gate 				rpc_createerr.cf_stat = RPC_SYSTEMERROR;
6267c478bd9Sstevel@tonic-gate 				rpc_createerr.cf_error.re_errno = errno;
6277c478bd9Sstevel@tonic-gate 				rpc_createerr.cf_error.re_terrno = 0;
6287c478bd9Sstevel@tonic-gate 				return (FALSE);
6297c478bd9Sstevel@tonic-gate 			}
6307c478bd9Sstevel@tonic-gate 			(void) memcpy(ct->ct_addr.buf, svcaddr->buf,
63161961e0fSrobinson 			    (size_t)svcaddr->len);
6327c478bd9Sstevel@tonic-gate 			ct->ct_addr.len = ct->ct_addr.maxlen = svcaddr->len;
6337c478bd9Sstevel@tonic-gate 		}
6347c478bd9Sstevel@tonic-gate 		break;
6357c478bd9Sstevel@tonic-gate 	default:
6367c478bd9Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
6377c478bd9Sstevel@tonic-gate 		return (FALSE);
6387c478bd9Sstevel@tonic-gate 	}
6397c478bd9Sstevel@tonic-gate 	return (TRUE);
6407c478bd9Sstevel@tonic-gate }
6417c478bd9Sstevel@tonic-gate 
6427c478bd9Sstevel@tonic-gate static enum clnt_stat
64361961e0fSrobinson clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, caddr_t args_ptr,
64461961e0fSrobinson 	xdrproc_t xdr_results, caddr_t results_ptr, struct timeval timeout)
6457c478bd9Sstevel@tonic-gate {
6467c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
6477c478bd9Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
6487c478bd9Sstevel@tonic-gate 	XDR *xdrs = &(ct->ct_xdrs);
6497c478bd9Sstevel@tonic-gate 	struct rpc_msg reply_msg;
6507c478bd9Sstevel@tonic-gate 	uint32_t x_id;
6517c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
6527c478bd9Sstevel@tonic-gate 	uint32_t *msg_x_id = (uint32_t *)(ct->ct_mcall);	/* yuk */
6537c478bd9Sstevel@tonic-gate 	bool_t shipnow;
6547c478bd9Sstevel@tonic-gate 	int refreshes = 2;
6557c478bd9Sstevel@tonic-gate 
6567c478bd9Sstevel@tonic-gate 	if (rpc_fd_lock(vctbl, ct->ct_fd)) {
6577c478bd9Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_FAILED;
6587c478bd9Sstevel@tonic-gate 		rpc_callerr.re_errno = errno;
6597c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
6607c478bd9Sstevel@tonic-gate 		return (RPC_FAILED);
6617c478bd9Sstevel@tonic-gate 	}
6627c478bd9Sstevel@tonic-gate 
6637c478bd9Sstevel@tonic-gate 	ct->ct_is_oneway = FALSE;
6647c478bd9Sstevel@tonic-gate 	if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
6657c478bd9Sstevel@tonic-gate 		if (do_flush(ct, RPC_CL_BLOCKING_FLUSH) != 0) {
6667c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
6677c478bd9Sstevel@tonic-gate 			return (RPC_FAILED);  /* XXX */
6687c478bd9Sstevel@tonic-gate 		}
6697c478bd9Sstevel@tonic-gate 	}
6707c478bd9Sstevel@tonic-gate 
6717c478bd9Sstevel@tonic-gate 	if (!ct->ct_waitset) {
6727c478bd9Sstevel@tonic-gate 		/* If time is not within limits, we ignore it. */
6737c478bd9Sstevel@tonic-gate 		if (time_not_ok(&timeout) == FALSE)
6747c478bd9Sstevel@tonic-gate 			ct->ct_wait = __rpc_timeval_to_msec(&timeout);
6757c478bd9Sstevel@tonic-gate 	} else {
6767c478bd9Sstevel@tonic-gate 		timeout.tv_sec = (ct->ct_wait / 1000);
6777c478bd9Sstevel@tonic-gate 		timeout.tv_usec = (ct->ct_wait % 1000) * 1000;
6787c478bd9Sstevel@tonic-gate 	}
6797c478bd9Sstevel@tonic-gate 
6807c478bd9Sstevel@tonic-gate 	shipnow = ((xdr_results == (xdrproc_t)0) && (timeout.tv_sec == 0) &&
6817c478bd9Sstevel@tonic-gate 	    (timeout.tv_usec == 0)) ? FALSE : TRUE;
6827c478bd9Sstevel@tonic-gate call_again:
6837c478bd9Sstevel@tonic-gate 	xdrs->x_op = XDR_ENCODE;
6847c478bd9Sstevel@tonic-gate 	rpc_callerr.re_status = RPC_SUCCESS;
6857c478bd9Sstevel@tonic-gate 	/*
6867c478bd9Sstevel@tonic-gate 	 * Due to little endian byte order, it is necessary to convert to host
6877c478bd9Sstevel@tonic-gate 	 * format before decrementing xid.
6887c478bd9Sstevel@tonic-gate 	 */
6897c478bd9Sstevel@tonic-gate 	x_id = ntohl(*msg_x_id) - 1;
6907c478bd9Sstevel@tonic-gate 	*msg_x_id = htonl(x_id);
6917c478bd9Sstevel@tonic-gate 
6927c478bd9Sstevel@tonic-gate 	if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
6937c478bd9Sstevel@tonic-gate 		if ((!XDR_PUTBYTES(xdrs, ct->ct_mcall, ct->ct_mpos)) ||
6947c478bd9Sstevel@tonic-gate 		    (!XDR_PUTINT32(xdrs, (int32_t *)&proc)) ||
6957c478bd9Sstevel@tonic-gate 		    (!AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
6967c478bd9Sstevel@tonic-gate 		    (!xdr_args(xdrs, args_ptr))) {
6977c478bd9Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
6987c478bd9Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTENCODEARGS;
6997c478bd9Sstevel@tonic-gate 			(void) xdrrec_endofrecord(xdrs, TRUE);
7007c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
7017c478bd9Sstevel@tonic-gate 			return (rpc_callerr.re_status);
7027c478bd9Sstevel@tonic-gate 		}
7037c478bd9Sstevel@tonic-gate 	} else {
7047c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
7057c478bd9Sstevel@tonic-gate 		uint32_t *u = (uint32_t *)&ct->ct_mcall[ct->ct_mpos];
7067c478bd9Sstevel@tonic-gate 		IXDR_PUT_U_INT32(u, proc);
7077c478bd9Sstevel@tonic-gate 		if (!__rpc_gss_wrap(cl->cl_auth, ct->ct_mcall,
7087c478bd9Sstevel@tonic-gate 		    ((char *)u) - ct->ct_mcall, xdrs, xdr_args, args_ptr)) {
7097c478bd9Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
7107c478bd9Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTENCODEARGS;
7117c478bd9Sstevel@tonic-gate 			(void) xdrrec_endofrecord(xdrs, TRUE);
7127c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
7137c478bd9Sstevel@tonic-gate 			return (rpc_callerr.re_status);
7147c478bd9Sstevel@tonic-gate 		}
7157c478bd9Sstevel@tonic-gate 	}
7167c478bd9Sstevel@tonic-gate 	if (!xdrrec_endofrecord(xdrs, shipnow)) {
7177c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
7187c478bd9Sstevel@tonic-gate 		return (rpc_callerr.re_status = RPC_CANTSEND);
7197c478bd9Sstevel@tonic-gate 	}
7207c478bd9Sstevel@tonic-gate 	if (!shipnow) {
7217c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
7227c478bd9Sstevel@tonic-gate 		return (RPC_SUCCESS);
7237c478bd9Sstevel@tonic-gate 	}
7247c478bd9Sstevel@tonic-gate 	/*
7257c478bd9Sstevel@tonic-gate 	 * Hack to provide rpc-based message passing
7267c478bd9Sstevel@tonic-gate 	 */
7277c478bd9Sstevel@tonic-gate 	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
7287c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
7297c478bd9Sstevel@tonic-gate 		return (rpc_callerr.re_status = RPC_TIMEDOUT);
7307c478bd9Sstevel@tonic-gate 	}
7317c478bd9Sstevel@tonic-gate 
7327c478bd9Sstevel@tonic-gate 
7337c478bd9Sstevel@tonic-gate 	/*
7347c478bd9Sstevel@tonic-gate 	 * Keep receiving until we get a valid transaction id
7357c478bd9Sstevel@tonic-gate 	 */
7367c478bd9Sstevel@tonic-gate 	xdrs->x_op = XDR_DECODE;
73761961e0fSrobinson 	for (;;) {
7387c478bd9Sstevel@tonic-gate 		reply_msg.acpted_rply.ar_verf = _null_auth;
7397c478bd9Sstevel@tonic-gate 		reply_msg.acpted_rply.ar_results.where = NULL;
7407c478bd9Sstevel@tonic-gate 		reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
7417c478bd9Sstevel@tonic-gate 		if (!xdrrec_skiprecord(xdrs)) {
7427c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
7437c478bd9Sstevel@tonic-gate 			return (rpc_callerr.re_status);
7447c478bd9Sstevel@tonic-gate 		}
7457c478bd9Sstevel@tonic-gate 		/* now decode and validate the response header */
7467c478bd9Sstevel@tonic-gate 		if (!xdr_replymsg(xdrs, &reply_msg)) {
7477c478bd9Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
7487c478bd9Sstevel@tonic-gate 				continue;
7497c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
7507c478bd9Sstevel@tonic-gate 			return (rpc_callerr.re_status);
7517c478bd9Sstevel@tonic-gate 		}
7527c478bd9Sstevel@tonic-gate 		if (reply_msg.rm_xid == x_id)
7537c478bd9Sstevel@tonic-gate 			break;
7547c478bd9Sstevel@tonic-gate 	}
7557c478bd9Sstevel@tonic-gate 
7567c478bd9Sstevel@tonic-gate 	/*
7577c478bd9Sstevel@tonic-gate 	 * process header
7587c478bd9Sstevel@tonic-gate 	 */
7597c478bd9Sstevel@tonic-gate 	if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
7607c478bd9Sstevel@tonic-gate 	    (reply_msg.acpted_rply.ar_stat == SUCCESS))
7617c478bd9Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_SUCCESS;
7627c478bd9Sstevel@tonic-gate 	else
7637c478bd9Sstevel@tonic-gate 		__seterr_reply(&reply_msg, &(rpc_callerr));
7647c478bd9Sstevel@tonic-gate 
7657c478bd9Sstevel@tonic-gate 	if (rpc_callerr.re_status == RPC_SUCCESS) {
7667c478bd9Sstevel@tonic-gate 		if (!AUTH_VALIDATE(cl->cl_auth,
7677c478bd9Sstevel@tonic-gate 		    &reply_msg.acpted_rply.ar_verf)) {
7687c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_AUTHERROR;
7697c478bd9Sstevel@tonic-gate 			rpc_callerr.re_why = AUTH_INVALIDRESP;
7707c478bd9Sstevel@tonic-gate 		} else if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
7717c478bd9Sstevel@tonic-gate 			if (!(*xdr_results)(xdrs, results_ptr)) {
7727c478bd9Sstevel@tonic-gate 				if (rpc_callerr.re_status == RPC_SUCCESS)
7735131caa1SMarcel Telka 					rpc_callerr.re_status =
7745131caa1SMarcel Telka 					    RPC_CANTDECODERES;
7757c478bd9Sstevel@tonic-gate 			}
7767c478bd9Sstevel@tonic-gate 		} else if (!__rpc_gss_unwrap(cl->cl_auth, xdrs, xdr_results,
7777c478bd9Sstevel@tonic-gate 		    results_ptr)) {
7787c478bd9Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
7797c478bd9Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTDECODERES;
7807c478bd9Sstevel@tonic-gate 		}
7817c478bd9Sstevel@tonic-gate 	}	/* end successful completion */
7827c478bd9Sstevel@tonic-gate 	/*
7837c478bd9Sstevel@tonic-gate 	 * If unsuccesful AND error is an authentication error
7847c478bd9Sstevel@tonic-gate 	 * then refresh credentials and try again, else break
7857c478bd9Sstevel@tonic-gate 	 */
7867c478bd9Sstevel@tonic-gate 	else if (rpc_callerr.re_status == RPC_AUTHERROR) {
7877c478bd9Sstevel@tonic-gate 		/* maybe our credentials need to be refreshed ... */
7887c478bd9Sstevel@tonic-gate 		if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg))
7897c478bd9Sstevel@tonic-gate 			goto call_again;
7907c478bd9Sstevel@tonic-gate 		else
7917c478bd9Sstevel@tonic-gate 			/*
7927c478bd9Sstevel@tonic-gate 			 * We are setting rpc_callerr here given that libnsl
7937c478bd9Sstevel@tonic-gate 			 * is not reentrant thereby reinitializing the TSD.
7947c478bd9Sstevel@tonic-gate 			 * If not set here then success could be returned even
7957c478bd9Sstevel@tonic-gate 			 * though refresh failed.
7967c478bd9Sstevel@tonic-gate 			 */
7977c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_AUTHERROR;
7987c478bd9Sstevel@tonic-gate 	} /* end of unsuccessful completion */
7997c478bd9Sstevel@tonic-gate 	/* free verifier ... */
8007c478bd9Sstevel@tonic-gate 	if (reply_msg.rm_reply.rp_stat == MSG_ACCEPTED &&
8017c478bd9Sstevel@tonic-gate 	    reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
8027c478bd9Sstevel@tonic-gate 		xdrs->x_op = XDR_FREE;
8037c478bd9Sstevel@tonic-gate 		(void) xdr_opaque_auth(xdrs, &(reply_msg.acpted_rply.ar_verf));
8047c478bd9Sstevel@tonic-gate 	}
8057c478bd9Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct->ct_fd);
8067c478bd9Sstevel@tonic-gate 	return (rpc_callerr.re_status);
8077c478bd9Sstevel@tonic-gate }
8087c478bd9Sstevel@tonic-gate 
8097c478bd9Sstevel@tonic-gate static enum clnt_stat
81061961e0fSrobinson clnt_vc_send(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, caddr_t args_ptr)
8117c478bd9Sstevel@tonic-gate {
8127c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
8137c478bd9Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
8147c478bd9Sstevel@tonic-gate 	XDR *xdrs = &(ct->ct_xdrs);
8157c478bd9Sstevel@tonic-gate 	uint32_t x_id;
8167c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
8177c478bd9Sstevel@tonic-gate 	uint32_t *msg_x_id = (uint32_t *)(ct->ct_mcall);	/* yuk */
8187c478bd9Sstevel@tonic-gate 
8197c478bd9Sstevel@tonic-gate 	if (rpc_fd_lock(vctbl, ct->ct_fd)) {
8207c478bd9Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_FAILED;
8217c478bd9Sstevel@tonic-gate 		rpc_callerr.re_errno = errno;
8227c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
8237c478bd9Sstevel@tonic-gate 		return (RPC_FAILED);
8247c478bd9Sstevel@tonic-gate 	}
8257c478bd9Sstevel@tonic-gate 
8267c478bd9Sstevel@tonic-gate 	ct->ct_is_oneway = TRUE;
8277c478bd9Sstevel@tonic-gate 
8287c478bd9Sstevel@tonic-gate 	xdrs->x_op = XDR_ENCODE;
8297c478bd9Sstevel@tonic-gate 	rpc_callerr.re_status = RPC_SUCCESS;
8307c478bd9Sstevel@tonic-gate 	/*
8317c478bd9Sstevel@tonic-gate 	 * Due to little endian byte order, it is necessary to convert to host
8327c478bd9Sstevel@tonic-gate 	 * format before decrementing xid.
8337c478bd9Sstevel@tonic-gate 	 */
8347c478bd9Sstevel@tonic-gate 	x_id = ntohl(*msg_x_id) - 1;
8357c478bd9Sstevel@tonic-gate 	*msg_x_id = htonl(x_id);
8367c478bd9Sstevel@tonic-gate 
8377c478bd9Sstevel@tonic-gate 	if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
8387c478bd9Sstevel@tonic-gate 		if ((!XDR_PUTBYTES(xdrs, ct->ct_mcall, ct->ct_mpos)) ||
8397c478bd9Sstevel@tonic-gate 		    (!XDR_PUTINT32(xdrs, (int32_t *)&proc)) ||
8407c478bd9Sstevel@tonic-gate 		    (!AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
8417c478bd9Sstevel@tonic-gate 		    (!xdr_args(xdrs, args_ptr))) {
8427c478bd9Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
8437c478bd9Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTENCODEARGS;
8447c478bd9Sstevel@tonic-gate 			(void) xdrrec_endofrecord(xdrs, TRUE);
8457c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
8467c478bd9Sstevel@tonic-gate 			return (rpc_callerr.re_status);
8477c478bd9Sstevel@tonic-gate 		}
8487c478bd9Sstevel@tonic-gate 	} else {
8497c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
8507c478bd9Sstevel@tonic-gate 		uint32_t *u = (uint32_t *)&ct->ct_mcall[ct->ct_mpos];
8517c478bd9Sstevel@tonic-gate 		IXDR_PUT_U_INT32(u, proc);
8527c478bd9Sstevel@tonic-gate 		if (!__rpc_gss_wrap(cl->cl_auth, ct->ct_mcall,
8537c478bd9Sstevel@tonic-gate 		    ((char *)u) - ct->ct_mcall, xdrs, xdr_args, args_ptr)) {
8547c478bd9Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
8557c478bd9Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTENCODEARGS;
8567c478bd9Sstevel@tonic-gate 			(void) xdrrec_endofrecord(xdrs, TRUE);
8577c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
8587c478bd9Sstevel@tonic-gate 			return (rpc_callerr.re_status);
8597c478bd9Sstevel@tonic-gate 		}
8607c478bd9Sstevel@tonic-gate 	}
8617c478bd9Sstevel@tonic-gate 
8627c478bd9Sstevel@tonic-gate 	/*
8637c478bd9Sstevel@tonic-gate 	 * Do not need to check errors, as the following code does
8647c478bd9Sstevel@tonic-gate 	 * not depend on the successful completion of the call.
8657c478bd9Sstevel@tonic-gate 	 * An error, if any occurs, is reported through
8667c478bd9Sstevel@tonic-gate 	 * rpc_callerr.re_status.
8677c478bd9Sstevel@tonic-gate 	 */
86861961e0fSrobinson 	(void) xdrrec_endofrecord(xdrs, TRUE);
8697c478bd9Sstevel@tonic-gate 
8707c478bd9Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct->ct_fd);
8717c478bd9Sstevel@tonic-gate 	return (rpc_callerr.re_status);
8727c478bd9Sstevel@tonic-gate }
8737c478bd9Sstevel@tonic-gate 
87461961e0fSrobinson /* ARGSUSED */
8757c478bd9Sstevel@tonic-gate static void
87661961e0fSrobinson clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
8777c478bd9Sstevel@tonic-gate {
8787c478bd9Sstevel@tonic-gate 	*errp = rpc_callerr;
8797c478bd9Sstevel@tonic-gate }
8807c478bd9Sstevel@tonic-gate 
8817c478bd9Sstevel@tonic-gate static bool_t
88261961e0fSrobinson clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, caddr_t res_ptr)
8837c478bd9Sstevel@tonic-gate {
8847c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
8857c478bd9Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
8867c478bd9Sstevel@tonic-gate 	XDR *xdrs = &(ct->ct_xdrs);
88761961e0fSrobinson 	bool_t stat;
8887c478bd9Sstevel@tonic-gate 
88961961e0fSrobinson 	(void) rpc_fd_lock(vctbl, ct->ct_fd);
8907c478bd9Sstevel@tonic-gate 	xdrs->x_op = XDR_FREE;
89161961e0fSrobinson 	stat = (*xdr_res)(xdrs, res_ptr);
8927c478bd9Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct->ct_fd);
89361961e0fSrobinson 	return (stat);
8947c478bd9Sstevel@tonic-gate }
8957c478bd9Sstevel@tonic-gate 
8967c478bd9Sstevel@tonic-gate static void
8977c478bd9Sstevel@tonic-gate clnt_vc_abort(void)
8987c478bd9Sstevel@tonic-gate {
8997c478bd9Sstevel@tonic-gate }
9007c478bd9Sstevel@tonic-gate 
9017c478bd9Sstevel@tonic-gate /*ARGSUSED*/
9027c478bd9Sstevel@tonic-gate static bool_t
90361961e0fSrobinson clnt_vc_control(CLIENT *cl, int request, char *info)
9047c478bd9Sstevel@tonic-gate {
9057c478bd9Sstevel@tonic-gate 	bool_t ret;
9067c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
9077c478bd9Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
9087c478bd9Sstevel@tonic-gate 
9097c478bd9Sstevel@tonic-gate 	if (rpc_fd_lock(vctbl, ct->ct_fd)) {
9107c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
9115131caa1SMarcel Telka 		return (FALSE);
9127c478bd9Sstevel@tonic-gate 	}
9137c478bd9Sstevel@tonic-gate 
9147c478bd9Sstevel@tonic-gate 	switch (request) {
9157c478bd9Sstevel@tonic-gate 	case CLSET_FD_CLOSE:
9167c478bd9Sstevel@tonic-gate 		ct->ct_closeit = TRUE;
9177c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
9187c478bd9Sstevel@tonic-gate 		return (TRUE);
9197c478bd9Sstevel@tonic-gate 	case CLSET_FD_NCLOSE:
9207c478bd9Sstevel@tonic-gate 		ct->ct_closeit = FALSE;
9217c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
9227c478bd9Sstevel@tonic-gate 		return (TRUE);
9237c478bd9Sstevel@tonic-gate 	case CLFLUSH:
9247c478bd9Sstevel@tonic-gate 		if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
9257c478bd9Sstevel@tonic-gate 			int res;
9267c478bd9Sstevel@tonic-gate 			res = do_flush(ct, (info == NULL ||
92761961e0fSrobinson 			    /* LINTED pointer cast */
9287c478bd9Sstevel@tonic-gate 			    *(int *)info == RPC_CL_DEFAULT_FLUSH)?
92961961e0fSrobinson 			    /* LINTED pointer cast */
9307c478bd9Sstevel@tonic-gate 			    ct->ct_blocking_mode: *(int *)info);
9317c478bd9Sstevel@tonic-gate 			ret = (0 == res);
932d00075c7SMarcel Telka 		} else {
933d00075c7SMarcel Telka 			ret = FALSE;
9347c478bd9Sstevel@tonic-gate 		}
9357c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
9367c478bd9Sstevel@tonic-gate 		return (ret);
9377c478bd9Sstevel@tonic-gate 	}
9387c478bd9Sstevel@tonic-gate 
9397c478bd9Sstevel@tonic-gate 	/* for other requests which use info */
9407c478bd9Sstevel@tonic-gate 	if (info == NULL) {
9417c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
9427c478bd9Sstevel@tonic-gate 		return (FALSE);
9437c478bd9Sstevel@tonic-gate 	}
9447c478bd9Sstevel@tonic-gate 	switch (request) {
9457c478bd9Sstevel@tonic-gate 	case CLSET_TIMEOUT:
9467c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
9477c478bd9Sstevel@tonic-gate 		if (time_not_ok((struct timeval *)info)) {
9487c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
9497c478bd9Sstevel@tonic-gate 			return (FALSE);
9507c478bd9Sstevel@tonic-gate 		}
9517c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
9527c478bd9Sstevel@tonic-gate 		ct->ct_wait = __rpc_timeval_to_msec((struct timeval *)info);
9537c478bd9Sstevel@tonic-gate 		ct->ct_waitset = TRUE;
9547c478bd9Sstevel@tonic-gate 		break;
9557c478bd9Sstevel@tonic-gate 	case CLGET_TIMEOUT:
9567c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
9577c478bd9Sstevel@tonic-gate 		((struct timeval *)info)->tv_sec = ct->ct_wait / 1000;
9587c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
9595131caa1SMarcel Telka 		((struct timeval *)info)->tv_usec = (ct->ct_wait % 1000) * 1000;
9607c478bd9Sstevel@tonic-gate 		break;
9617c478bd9Sstevel@tonic-gate 	case CLGET_SERVER_ADDR:	/* For compatibility only */
96261961e0fSrobinson 		(void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len);
9637c478bd9Sstevel@tonic-gate 		break;
9647c478bd9Sstevel@tonic-gate 	case CLGET_FD:
9657c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
9667c478bd9Sstevel@tonic-gate 		*(int *)info = ct->ct_fd;
9677c478bd9Sstevel@tonic-gate 		break;
9687c478bd9Sstevel@tonic-gate 	case CLGET_SVC_ADDR:
9697c478bd9Sstevel@tonic-gate 		/* The caller should not free this memory area */
9707c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
9717c478bd9Sstevel@tonic-gate 		*(struct netbuf *)info = ct->ct_addr;
9727c478bd9Sstevel@tonic-gate 		break;
9737c478bd9Sstevel@tonic-gate 	case CLSET_SVC_ADDR:		/* set to new address */
9747c478bd9Sstevel@tonic-gate #ifdef undef
9757c478bd9Sstevel@tonic-gate 		/*
9767c478bd9Sstevel@tonic-gate 		 * XXX: once the t_snddis(), followed by t_connect() starts to
9777c478bd9Sstevel@tonic-gate 		 * work, this ifdef should be removed.  CLIENT handle reuse
9787c478bd9Sstevel@tonic-gate 		 * would then be possible for COTS as well.
9797c478bd9Sstevel@tonic-gate 		 */
9807c478bd9Sstevel@tonic-gate 		if (t_snddis(ct->ct_fd, NULL) == -1) {
9817c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_stat = RPC_TLIERROR;
9827c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_error.re_terrno = t_errno;
9837c478bd9Sstevel@tonic-gate 			rpc_createerr.cf_error.re_errno = errno;
9847c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
9857c478bd9Sstevel@tonic-gate 			return (FALSE);
9867c478bd9Sstevel@tonic-gate 		}
9877c478bd9Sstevel@tonic-gate 		ret = set_up_connection(ct->ct_fd, (struct netbuf *)info,
9885131caa1SMarcel Telka 		    ct, NULL);
9897c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
9907c478bd9Sstevel@tonic-gate 		return (ret);
9917c478bd9Sstevel@tonic-gate #else
9927c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
9937c478bd9Sstevel@tonic-gate 		return (FALSE);
9947c478bd9Sstevel@tonic-gate #endif
9957c478bd9Sstevel@tonic-gate 	case CLGET_XID:
9967c478bd9Sstevel@tonic-gate 		/*
9977c478bd9Sstevel@tonic-gate 		 * use the knowledge that xid is the
9987c478bd9Sstevel@tonic-gate 		 * first element in the call structure
9997c478bd9Sstevel@tonic-gate 		 * This will get the xid of the PREVIOUS call
10007c478bd9Sstevel@tonic-gate 		 */
10017c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
10027c478bd9Sstevel@tonic-gate 		*(uint32_t *)info = ntohl(*(uint32_t *)ct->ct_mcall);
10037c478bd9Sstevel@tonic-gate 		break;
10047c478bd9Sstevel@tonic-gate 	case CLSET_XID:
10057c478bd9Sstevel@tonic-gate 		/* This will set the xid of the NEXT call */
10067c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
10077c478bd9Sstevel@tonic-gate 		*(uint32_t *)ct->ct_mcall =  htonl(*(uint32_t *)info + 1);
10087c478bd9Sstevel@tonic-gate 		/* increment by 1 as clnt_vc_call() decrements once */
10097c478bd9Sstevel@tonic-gate 		break;
10107c478bd9Sstevel@tonic-gate 	case CLGET_VERS:
10117c478bd9Sstevel@tonic-gate 		/*
10127c478bd9Sstevel@tonic-gate 		 * This RELIES on the information that, in the call body,
10137c478bd9Sstevel@tonic-gate 		 * the version number field is the fifth field from the
10147c478bd9Sstevel@tonic-gate 		 * begining of the RPC header. MUST be changed if the
10157c478bd9Sstevel@tonic-gate 		 * call_struct is changed
10167c478bd9Sstevel@tonic-gate 		 */
10177c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
10187c478bd9Sstevel@tonic-gate 		*(uint32_t *)info = ntohl(*(uint32_t *)(ct->ct_mcall +
10197c478bd9Sstevel@tonic-gate 		    4 * BYTES_PER_XDR_UNIT));
10207c478bd9Sstevel@tonic-gate 		break;
10217c478bd9Sstevel@tonic-gate 
10227c478bd9Sstevel@tonic-gate 	case CLSET_VERS:
10237c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
10247c478bd9Sstevel@tonic-gate 		*(uint32_t *)(ct->ct_mcall + 4 * BYTES_PER_XDR_UNIT) =
10257c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
10267c478bd9Sstevel@tonic-gate 		    htonl(*(uint32_t *)info);
10277c478bd9Sstevel@tonic-gate 		break;
10287c478bd9Sstevel@tonic-gate 
10297c478bd9Sstevel@tonic-gate 	case CLGET_PROG:
10307c478bd9Sstevel@tonic-gate 		/*
10317c478bd9Sstevel@tonic-gate 		 * This RELIES on the information that, in the call body,
10327c478bd9Sstevel@tonic-gate 		 * the program number field is the fourth field from the
10337c478bd9Sstevel@tonic-gate 		 * begining of the RPC header. MUST be changed if the
10347c478bd9Sstevel@tonic-gate 		 * call_struct is changed
10357c478bd9Sstevel@tonic-gate 		 */
10367c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
10377c478bd9Sstevel@tonic-gate 		*(uint32_t *)info = ntohl(*(uint32_t *)(ct->ct_mcall +
10387c478bd9Sstevel@tonic-gate 		    3 * BYTES_PER_XDR_UNIT));
10397c478bd9Sstevel@tonic-gate 		break;
10407c478bd9Sstevel@tonic-gate 
10417c478bd9Sstevel@tonic-gate 	case CLSET_PROG:
10427c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
10437c478bd9Sstevel@tonic-gate 		*(uint32_t *)(ct->ct_mcall + 3 * BYTES_PER_XDR_UNIT) =
10447c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
10457c478bd9Sstevel@tonic-gate 		    htonl(*(uint32_t *)info);
10467c478bd9Sstevel@tonic-gate 		break;
10477c478bd9Sstevel@tonic-gate 
10487c478bd9Sstevel@tonic-gate 	case CLSET_IO_MODE:
104961961e0fSrobinson 		/* LINTED pointer cast */
10507c478bd9Sstevel@tonic-gate 		if (!set_io_mode(ct, *(int *)info)) {
10517c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
10527c478bd9Sstevel@tonic-gate 			return (FALSE);
10537c478bd9Sstevel@tonic-gate 		}
10547c478bd9Sstevel@tonic-gate 		break;
10557c478bd9Sstevel@tonic-gate 	case CLSET_FLUSH_MODE:
10567c478bd9Sstevel@tonic-gate 		/* Set a specific FLUSH_MODE */
105761961e0fSrobinson 		/* LINTED pointer cast */
10587c478bd9Sstevel@tonic-gate 		if (!set_flush_mode(ct, *(int *)info)) {
10597c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
10607c478bd9Sstevel@tonic-gate 			return (FALSE);
10617c478bd9Sstevel@tonic-gate 		}
10627c478bd9Sstevel@tonic-gate 		break;
10637c478bd9Sstevel@tonic-gate 	case CLGET_FLUSH_MODE:
106461961e0fSrobinson 		/* LINTED pointer cast */
10657c478bd9Sstevel@tonic-gate 		*(rpcflushmode_t *)info = ct->ct_blocking_mode;
10667c478bd9Sstevel@tonic-gate 		break;
10677c478bd9Sstevel@tonic-gate 
10687c478bd9Sstevel@tonic-gate 	case CLGET_IO_MODE:
106961961e0fSrobinson 		/* LINTED pointer cast */
10707c478bd9Sstevel@tonic-gate 		*(rpciomode_t *)info = ct->ct_io_mode;
10717c478bd9Sstevel@tonic-gate 		break;
10727c478bd9Sstevel@tonic-gate 
10737c478bd9Sstevel@tonic-gate 	case CLGET_CURRENT_REC_SIZE:
10747c478bd9Sstevel@tonic-gate 		/*
10757c478bd9Sstevel@tonic-gate 		 * Returns the current amount of memory allocated
10767c478bd9Sstevel@tonic-gate 		 * to pending requests
10777c478bd9Sstevel@tonic-gate 		 */
107861961e0fSrobinson 		/* LINTED pointer cast */
10797c478bd9Sstevel@tonic-gate 		*(int *)info = ct->ct_bufferPendingSize;
10807c478bd9Sstevel@tonic-gate 		break;
10817c478bd9Sstevel@tonic-gate 
10827c478bd9Sstevel@tonic-gate 	case CLSET_CONNMAXREC_SIZE:
10837c478bd9Sstevel@tonic-gate 		/* Cannot resize the buffer if it is used. */
10847c478bd9Sstevel@tonic-gate 		if (ct->ct_bufferPendingSize != 0) {
10857c478bd9Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
10867c478bd9Sstevel@tonic-gate 			return (FALSE);
10877c478bd9Sstevel@tonic-gate 		}
10887c478bd9Sstevel@tonic-gate 		/*
10897c478bd9Sstevel@tonic-gate 		 * If the new size is equal to the current size,
10907c478bd9Sstevel@tonic-gate 		 * there is nothing to do.
10917c478bd9Sstevel@tonic-gate 		 */
109261961e0fSrobinson 		/* LINTED pointer cast */
10937c478bd9Sstevel@tonic-gate 		if (ct->ct_bufferSize == *(uint_t *)info)
10947c478bd9Sstevel@tonic-gate 			break;
10957c478bd9Sstevel@tonic-gate 
109661961e0fSrobinson 		/* LINTED pointer cast */
10977c478bd9Sstevel@tonic-gate 		ct->ct_bufferSize = *(uint_t *)info;
10987c478bd9Sstevel@tonic-gate 		if (ct->ct_buffer) {
10997c478bd9Sstevel@tonic-gate 			free(ct->ct_buffer);
11007c478bd9Sstevel@tonic-gate 			ct->ct_buffer = NULL;
11017c478bd9Sstevel@tonic-gate 			ct->ct_bufferReadPtr = ct->ct_bufferWritePtr = NULL;
11027c478bd9Sstevel@tonic-gate 		}
11037c478bd9Sstevel@tonic-gate 		break;
11047c478bd9Sstevel@tonic-gate 
11057c478bd9Sstevel@tonic-gate 	case CLGET_CONNMAXREC_SIZE:
11067c478bd9Sstevel@tonic-gate 		/*
11077c478bd9Sstevel@tonic-gate 		 * Returns the size of buffer allocated
11087c478bd9Sstevel@tonic-gate 		 * to pending requests
11097c478bd9Sstevel@tonic-gate 		 */
111061961e0fSrobinson 		/* LINTED pointer cast */
11117c478bd9Sstevel@tonic-gate 		*(uint_t *)info = ct->ct_bufferSize;
11127c478bd9Sstevel@tonic-gate 		break;
11137c478bd9Sstevel@tonic-gate 
11147c478bd9Sstevel@tonic-gate 	default:
11157c478bd9Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
11167c478bd9Sstevel@tonic-gate 		return (FALSE);
11177c478bd9Sstevel@tonic-gate 	}
11187c478bd9Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct->ct_fd);
11197c478bd9Sstevel@tonic-gate 	return (TRUE);
11207c478bd9Sstevel@tonic-gate }
11217c478bd9Sstevel@tonic-gate 
11227c478bd9Sstevel@tonic-gate static void
112361961e0fSrobinson clnt_vc_destroy(CLIENT *cl)
11247c478bd9Sstevel@tonic-gate {
11257c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
11267c478bd9Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
11277c478bd9Sstevel@tonic-gate 	int ct_fd = ct->ct_fd;
11287c478bd9Sstevel@tonic-gate 
112961961e0fSrobinson 	(void) rpc_fd_lock(vctbl, ct_fd);
11307c478bd9Sstevel@tonic-gate 
11317c478bd9Sstevel@tonic-gate 	if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
113261961e0fSrobinson 		(void) do_flush(ct, RPC_CL_BLOCKING_FLUSH);
113361961e0fSrobinson 		(void) unregister_nb(ct);
11347c478bd9Sstevel@tonic-gate 	}
11357c478bd9Sstevel@tonic-gate 
11367c478bd9Sstevel@tonic-gate 	if (ct->ct_closeit)
11377c478bd9Sstevel@tonic-gate 		(void) t_close(ct_fd);
11387c478bd9Sstevel@tonic-gate 	XDR_DESTROY(&(ct->ct_xdrs));
11397c478bd9Sstevel@tonic-gate 	if (ct->ct_addr.buf)
114061961e0fSrobinson 		free(ct->ct_addr.buf);
114161961e0fSrobinson 	free(ct);
11427c478bd9Sstevel@tonic-gate 	if (cl->cl_netid && cl->cl_netid[0])
114361961e0fSrobinson 		free(cl->cl_netid);
11447c478bd9Sstevel@tonic-gate 	if (cl->cl_tp && cl->cl_tp[0])
114561961e0fSrobinson 		free(cl->cl_tp);
114661961e0fSrobinson 	free(cl);
11477c478bd9Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct_fd);
11487c478bd9Sstevel@tonic-gate }
11497c478bd9Sstevel@tonic-gate 
11507c478bd9Sstevel@tonic-gate /*
11517c478bd9Sstevel@tonic-gate  * Interface between xdr serializer and vc connection.
11527c478bd9Sstevel@tonic-gate  * Behaves like the system calls, read & write, but keeps some error state
11537c478bd9Sstevel@tonic-gate  * around for the rpc level.
11547c478bd9Sstevel@tonic-gate  */
11557c478bd9Sstevel@tonic-gate static int
11567c478bd9Sstevel@tonic-gate read_vc(void *ct_tmp, caddr_t buf, int len)
11577c478bd9Sstevel@tonic-gate {
1158cb620785Sraf 	static pthread_key_t pfdp_key = PTHREAD_ONCE_KEY_NP;
11597c478bd9Sstevel@tonic-gate 	struct pollfd *pfdp;
11607c478bd9Sstevel@tonic-gate 	int npfd;		/* total number of pfdp allocated */
11617c478bd9Sstevel@tonic-gate 	struct ct_data *ct = ct_tmp;
11627c478bd9Sstevel@tonic-gate 	struct timeval starttime;
11637c478bd9Sstevel@tonic-gate 	struct timeval curtime;
11647c478bd9Sstevel@tonic-gate 	int poll_time;
11657c478bd9Sstevel@tonic-gate 	int delta;
11667c478bd9Sstevel@tonic-gate 
116761961e0fSrobinson 	if (len == 0)
11687c478bd9Sstevel@tonic-gate 		return (0);
11697c478bd9Sstevel@tonic-gate 
11707c478bd9Sstevel@tonic-gate 	/*
11717c478bd9Sstevel@tonic-gate 	 * Allocate just one the first time.  thr_get_storage() may
11727c478bd9Sstevel@tonic-gate 	 * return a larger buffer, left over from the last time we were
11737c478bd9Sstevel@tonic-gate 	 * here, but that's OK.  realloc() will deal with it properly.
11747c478bd9Sstevel@tonic-gate 	 */
11757c478bd9Sstevel@tonic-gate 	npfd = 1;
11767c478bd9Sstevel@tonic-gate 	pfdp = thr_get_storage(&pfdp_key, sizeof (struct pollfd), free);
11777c478bd9Sstevel@tonic-gate 	if (pfdp == NULL) {
11787c478bd9Sstevel@tonic-gate 		(void) syslog(LOG_ERR, clnt_vc_errstr,
11797c478bd9Sstevel@tonic-gate 		    clnt_read_vc_str, __no_mem_str);
11807c478bd9Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_SYSTEMERROR;
11817c478bd9Sstevel@tonic-gate 		rpc_callerr.re_errno = errno;
11827c478bd9Sstevel@tonic-gate 		rpc_callerr.re_terrno = 0;
11837c478bd9Sstevel@tonic-gate 		return (-1);
11847c478bd9Sstevel@tonic-gate 	}
11857c478bd9Sstevel@tonic-gate 
11867c478bd9Sstevel@tonic-gate 	/*
11877c478bd9Sstevel@tonic-gate 	 *	N.B.:  slot 0 in the pollfd array is reserved for the file
11887c478bd9Sstevel@tonic-gate 	 *	descriptor we're really interested in (as opposed to the
11897c478bd9Sstevel@tonic-gate 	 *	callback descriptors).
11907c478bd9Sstevel@tonic-gate 	 */
11917c478bd9Sstevel@tonic-gate 	pfdp[0].fd = ct->ct_fd;
11927c478bd9Sstevel@tonic-gate 	pfdp[0].events = MASKVAL;
11937c478bd9Sstevel@tonic-gate 	pfdp[0].revents = 0;
11947c478bd9Sstevel@tonic-gate 	poll_time = ct->ct_wait;
119561961e0fSrobinson 	if (gettimeofday(&starttime, NULL) == -1) {
11967c478bd9Sstevel@tonic-gate 		syslog(LOG_ERR, "Unable to get time of day: %m");
11977c478bd9Sstevel@tonic-gate 		return (-1);
11987c478bd9Sstevel@tonic-gate 	}
11997c478bd9Sstevel@tonic-gate 
12007c478bd9Sstevel@tonic-gate 	for (;;) {
12017c478bd9Sstevel@tonic-gate 		extern void (*_svc_getreqset_proc)();
12027c478bd9Sstevel@tonic-gate 		extern pollfd_t *svc_pollfd;
12037c478bd9Sstevel@tonic-gate 		extern int svc_max_pollfd;
12047c478bd9Sstevel@tonic-gate 		int fds;
12057c478bd9Sstevel@tonic-gate 
12067c478bd9Sstevel@tonic-gate 		/* VARIABLES PROTECTED BY svc_fd_lock: svc_pollfd */
12077c478bd9Sstevel@tonic-gate 
12087c478bd9Sstevel@tonic-gate 		if (_svc_getreqset_proc) {
12097c478bd9Sstevel@tonic-gate 			sig_rw_rdlock(&svc_fd_lock);
12107c478bd9Sstevel@tonic-gate 
12117c478bd9Sstevel@tonic-gate 			/* reallocate pfdp to svc_max_pollfd +1 */
12127c478bd9Sstevel@tonic-gate 			if (npfd != (svc_max_pollfd + 1)) {
12137c478bd9Sstevel@tonic-gate 				struct pollfd *tmp_pfdp = realloc(pfdp,
12147c478bd9Sstevel@tonic-gate 				    sizeof (struct pollfd) *
12157c478bd9Sstevel@tonic-gate 				    (svc_max_pollfd + 1));
12167c478bd9Sstevel@tonic-gate 				if (tmp_pfdp == NULL) {
12177c478bd9Sstevel@tonic-gate 					sig_rw_unlock(&svc_fd_lock);
12187c478bd9Sstevel@tonic-gate 					(void) syslog(LOG_ERR, clnt_vc_errstr,
12197c478bd9Sstevel@tonic-gate 					    clnt_read_vc_str, __no_mem_str);
12207c478bd9Sstevel@tonic-gate 					rpc_callerr.re_status = RPC_SYSTEMERROR;
12217c478bd9Sstevel@tonic-gate 					rpc_callerr.re_errno = errno;
12227c478bd9Sstevel@tonic-gate 					rpc_callerr.re_terrno = 0;
12237c478bd9Sstevel@tonic-gate 					return (-1);
12247c478bd9Sstevel@tonic-gate 				}
12257c478bd9Sstevel@tonic-gate 
12267c478bd9Sstevel@tonic-gate 				pfdp = tmp_pfdp;
12277c478bd9Sstevel@tonic-gate 				npfd = svc_max_pollfd + 1;
122861961e0fSrobinson 				(void) pthread_setspecific(pfdp_key, pfdp);
12297c478bd9Sstevel@tonic-gate 			}
12307c478bd9Sstevel@tonic-gate 			if (npfd > 1)
12317c478bd9Sstevel@tonic-gate 				(void) memcpy(&pfdp[1], svc_pollfd,
12327c478bd9Sstevel@tonic-gate 				    sizeof (struct pollfd) * (npfd - 1));
12337c478bd9Sstevel@tonic-gate 
12347c478bd9Sstevel@tonic-gate 			sig_rw_unlock(&svc_fd_lock);
12357c478bd9Sstevel@tonic-gate 		} else {
12367c478bd9Sstevel@tonic-gate 			npfd = 1;	/* don't forget about pfdp[0] */
12377c478bd9Sstevel@tonic-gate 		}
12387c478bd9Sstevel@tonic-gate 
12397c478bd9Sstevel@tonic-gate 		switch (fds = poll(pfdp, npfd, poll_time)) {
12407c478bd9Sstevel@tonic-gate 		case 0:
12417c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_TIMEDOUT;
12427c478bd9Sstevel@tonic-gate 			return (-1);
12437c478bd9Sstevel@tonic-gate 
12447c478bd9Sstevel@tonic-gate 		case -1:
12457c478bd9Sstevel@tonic-gate 			if (errno != EINTR)
12467c478bd9Sstevel@tonic-gate 				continue;
12477c478bd9Sstevel@tonic-gate 			else {
12487c478bd9Sstevel@tonic-gate 				/*
12497c478bd9Sstevel@tonic-gate 				 * interrupted by another signal,
12507c478bd9Sstevel@tonic-gate 				 * update time_waited
12517c478bd9Sstevel@tonic-gate 				 */
12527c478bd9Sstevel@tonic-gate 
125361961e0fSrobinson 				if (gettimeofday(&curtime, NULL) == -1) {
12547c478bd9Sstevel@tonic-gate 					syslog(LOG_ERR,
12557c478bd9Sstevel@tonic-gate 					    "Unable to get time of day:  %m");
12567c478bd9Sstevel@tonic-gate 					errno = 0;
12577c478bd9Sstevel@tonic-gate 					continue;
12587c478bd9Sstevel@tonic-gate 				};
12597c478bd9Sstevel@tonic-gate 				delta = (curtime.tv_sec -
12607c478bd9Sstevel@tonic-gate 				    starttime.tv_sec) * 1000 +
12617c478bd9Sstevel@tonic-gate 				    (curtime.tv_usec -
12627c478bd9Sstevel@tonic-gate 				    starttime.tv_usec) / 1000;
12637c478bd9Sstevel@tonic-gate 				poll_time -= delta;
12647c478bd9Sstevel@tonic-gate 				if (poll_time < 0) {
12655131caa1SMarcel Telka 					rpc_callerr.re_status = RPC_TIMEDOUT;
12667c478bd9Sstevel@tonic-gate 					errno = 0;
12677c478bd9Sstevel@tonic-gate 					return (-1);
12687c478bd9Sstevel@tonic-gate 				} else {
12697c478bd9Sstevel@tonic-gate 					errno = 0; /* reset it */
12707c478bd9Sstevel@tonic-gate 					continue;
12717c478bd9Sstevel@tonic-gate 				}
12727c478bd9Sstevel@tonic-gate 			}
12737c478bd9Sstevel@tonic-gate 		}
12747c478bd9Sstevel@tonic-gate 
12757c478bd9Sstevel@tonic-gate 		if (pfdp[0].revents == 0) {
12767c478bd9Sstevel@tonic-gate 			/* must be for server side of the house */
12777c478bd9Sstevel@tonic-gate 			(*_svc_getreqset_proc)(&pfdp[1], fds);
12787c478bd9Sstevel@tonic-gate 			continue;	/* do poll again */
12797c478bd9Sstevel@tonic-gate 		}
12807c478bd9Sstevel@tonic-gate 
12817c478bd9Sstevel@tonic-gate 		if (pfdp[0].revents & POLLNVAL) {
12827c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTRECV;
12837c478bd9Sstevel@tonic-gate 			/*
12847c478bd9Sstevel@tonic-gate 			 *	Note:  we're faking errno here because we
12857c478bd9Sstevel@tonic-gate 			 *	previously would have expected select() to
12867c478bd9Sstevel@tonic-gate 			 *	return -1 with errno EBADF.  Poll(BA_OS)
12877c478bd9Sstevel@tonic-gate 			 *	returns 0 and sets the POLLNVAL revents flag
12887c478bd9Sstevel@tonic-gate 			 *	instead.
12897c478bd9Sstevel@tonic-gate 			 */
12907c478bd9Sstevel@tonic-gate 			rpc_callerr.re_errno = errno = EBADF;
12917c478bd9Sstevel@tonic-gate 			return (-1);
12927c478bd9Sstevel@tonic-gate 		}
12937c478bd9Sstevel@tonic-gate 
12947c478bd9Sstevel@tonic-gate 		if (pfdp[0].revents & (POLLERR | POLLHUP)) {
12957c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTRECV;
12967c478bd9Sstevel@tonic-gate 			rpc_callerr.re_errno = errno = EPIPE;
12977c478bd9Sstevel@tonic-gate 			return (-1);
12987c478bd9Sstevel@tonic-gate 		}
12997c478bd9Sstevel@tonic-gate 		break;
13007c478bd9Sstevel@tonic-gate 	}
13017c478bd9Sstevel@tonic-gate 
13027c478bd9Sstevel@tonic-gate 	switch (len = t_rcvall(ct->ct_fd, buf, len)) {
13037c478bd9Sstevel@tonic-gate 	case 0:
13047c478bd9Sstevel@tonic-gate 		/* premature eof */
13057c478bd9Sstevel@tonic-gate 		rpc_callerr.re_errno = ENOLINK;
13067c478bd9Sstevel@tonic-gate 		rpc_callerr.re_terrno = 0;
13077c478bd9Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_CANTRECV;
13087c478bd9Sstevel@tonic-gate 		len = -1;	/* it's really an error */
13097c478bd9Sstevel@tonic-gate 		break;
13107c478bd9Sstevel@tonic-gate 
13117c478bd9Sstevel@tonic-gate 	case -1:
13127c478bd9Sstevel@tonic-gate 		rpc_callerr.re_terrno = t_errno;
13137c478bd9Sstevel@tonic-gate 		rpc_callerr.re_errno = 0;
13147c478bd9Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_CANTRECV;
13157c478bd9Sstevel@tonic-gate 		break;
13167c478bd9Sstevel@tonic-gate 	}
13177c478bd9Sstevel@tonic-gate 	return (len);
13187c478bd9Sstevel@tonic-gate }
13197c478bd9Sstevel@tonic-gate 
13207c478bd9Sstevel@tonic-gate static int
132161961e0fSrobinson write_vc(void *ct_tmp, caddr_t buf, int len)
13227c478bd9Sstevel@tonic-gate {
13237c478bd9Sstevel@tonic-gate 	int i, cnt;
13247c478bd9Sstevel@tonic-gate 	struct ct_data *ct = ct_tmp;
13257c478bd9Sstevel@tonic-gate 	int flag;
13267c478bd9Sstevel@tonic-gate 	int maxsz;
13277c478bd9Sstevel@tonic-gate 
13287c478bd9Sstevel@tonic-gate 	maxsz = ct->ct_tsdu;
13297c478bd9Sstevel@tonic-gate 
13307c478bd9Sstevel@tonic-gate 	/* Handle the non-blocking mode */
13317c478bd9Sstevel@tonic-gate 	if (ct->ct_is_oneway && ct->ct_io_mode == RPC_CL_NONBLOCKING) {
13327c478bd9Sstevel@tonic-gate 		/*
13337c478bd9Sstevel@tonic-gate 		 * Test a special case here. If the length of the current
13347c478bd9Sstevel@tonic-gate 		 * write is greater than the transport data unit, and the
13357c478bd9Sstevel@tonic-gate 		 * mode is non blocking, we return RPC_CANTSEND.
13367c478bd9Sstevel@tonic-gate 		 * XXX  this is not very clean.
13377c478bd9Sstevel@tonic-gate 		 */
13387c478bd9Sstevel@tonic-gate 		if (maxsz > 0 && len > maxsz) {
13397c478bd9Sstevel@tonic-gate 			rpc_callerr.re_terrno = errno;
13407c478bd9Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
13417c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSEND;
13427c478bd9Sstevel@tonic-gate 			return (-1);
13437c478bd9Sstevel@tonic-gate 		}
13447c478bd9Sstevel@tonic-gate 
13457c478bd9Sstevel@tonic-gate 		len = nb_send(ct, buf, (unsigned)len);
13467c478bd9Sstevel@tonic-gate 		if (len == -1) {
13477c478bd9Sstevel@tonic-gate 			rpc_callerr.re_terrno = errno;
13487c478bd9Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
13497c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSEND;
13507c478bd9Sstevel@tonic-gate 		} else if (len == -2) {
13517c478bd9Sstevel@tonic-gate 			rpc_callerr.re_terrno = 0;
13527c478bd9Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
13537c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSTORE;
13547c478bd9Sstevel@tonic-gate 		}
13557c478bd9Sstevel@tonic-gate 		return (len);
13567c478bd9Sstevel@tonic-gate 	}
13577c478bd9Sstevel@tonic-gate 
13587c478bd9Sstevel@tonic-gate 	if ((maxsz == 0) || (maxsz == -1)) {
13597c478bd9Sstevel@tonic-gate 		/*
13607c478bd9Sstevel@tonic-gate 		 * T_snd may return -1 for error on connection (connection
13617c478bd9Sstevel@tonic-gate 		 * needs to be repaired/closed, and -2 for flow-control
13627c478bd9Sstevel@tonic-gate 		 * handling error (no operation to do, just wait and call
13637c478bd9Sstevel@tonic-gate 		 * T_Flush()).
13647c478bd9Sstevel@tonic-gate 		 */
13657c478bd9Sstevel@tonic-gate 		if ((len = t_snd(ct->ct_fd, buf, (unsigned)len, 0)) == -1) {
13667c478bd9Sstevel@tonic-gate 			rpc_callerr.re_terrno = t_errno;
13677c478bd9Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
13687c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSEND;
13697c478bd9Sstevel@tonic-gate 		}
13707c478bd9Sstevel@tonic-gate 		return (len);
13717c478bd9Sstevel@tonic-gate 	}
13727c478bd9Sstevel@tonic-gate 
13737c478bd9Sstevel@tonic-gate 	/*
13747c478bd9Sstevel@tonic-gate 	 * This for those transports which have a max size for data.
13757c478bd9Sstevel@tonic-gate 	 */
13767c478bd9Sstevel@tonic-gate 	for (cnt = len, i = 0; cnt > 0; cnt -= i, buf += i) {
13777c478bd9Sstevel@tonic-gate 		flag = cnt > maxsz ? T_MORE : 0;
13787c478bd9Sstevel@tonic-gate 		if ((i = t_snd(ct->ct_fd, buf, (unsigned)MIN(cnt, maxsz),
13797c478bd9Sstevel@tonic-gate 		    flag)) == -1) {
13807c478bd9Sstevel@tonic-gate 			rpc_callerr.re_terrno = t_errno;
13817c478bd9Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
13827c478bd9Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSEND;
13837c478bd9Sstevel@tonic-gate 			return (-1);
13847c478bd9Sstevel@tonic-gate 		}
13857c478bd9Sstevel@tonic-gate 	}
13867c478bd9Sstevel@tonic-gate 	return (len);
13877c478bd9Sstevel@tonic-gate }
13887c478bd9Sstevel@tonic-gate 
13897c478bd9Sstevel@tonic-gate /*
13907c478bd9Sstevel@tonic-gate  * Receive the required bytes of data, even if it is fragmented.
13917c478bd9Sstevel@tonic-gate  */
13927c478bd9Sstevel@tonic-gate static int
139361961e0fSrobinson t_rcvall(int fd, char *buf, int len)
13947c478bd9Sstevel@tonic-gate {
13957c478bd9Sstevel@tonic-gate 	int moreflag;
13967c478bd9Sstevel@tonic-gate 	int final = 0;
13977c478bd9Sstevel@tonic-gate 	int res;
13987c478bd9Sstevel@tonic-gate 
13997c478bd9Sstevel@tonic-gate 	do {
14007c478bd9Sstevel@tonic-gate 		moreflag = 0;
14017c478bd9Sstevel@tonic-gate 		res = t_rcv(fd, buf, (unsigned)len, &moreflag);
14027c478bd9Sstevel@tonic-gate 		if (res == -1) {
14037c478bd9Sstevel@tonic-gate 			if (t_errno == TLOOK)
14047c478bd9Sstevel@tonic-gate 				switch (t_look(fd)) {
14057c478bd9Sstevel@tonic-gate 				case T_DISCONNECT:
140661961e0fSrobinson 					(void) t_rcvdis(fd, NULL);
140761961e0fSrobinson 					(void) t_snddis(fd, NULL);
14087c478bd9Sstevel@tonic-gate 					return (-1);
14097c478bd9Sstevel@tonic-gate 				case T_ORDREL:
14107c478bd9Sstevel@tonic-gate 				/* Received orderly release indication */
141161961e0fSrobinson 					(void) t_rcvrel(fd);
14127c478bd9Sstevel@tonic-gate 				/* Send orderly release indicator */
14137c478bd9Sstevel@tonic-gate 					(void) t_sndrel(fd);
14147c478bd9Sstevel@tonic-gate 					return (-1);
14157c478bd9Sstevel@tonic-gate 				default:
14167c478bd9Sstevel@tonic-gate 					return (-1);
14177c478bd9Sstevel@tonic-gate 				}
14187c478bd9Sstevel@tonic-gate 		} else if (res == 0) {
14197c478bd9Sstevel@tonic-gate 			return (0);
14207c478bd9Sstevel@tonic-gate 		}
14217c478bd9Sstevel@tonic-gate 		final += res;
14227c478bd9Sstevel@tonic-gate 		buf += res;
14237c478bd9Sstevel@tonic-gate 		len -= res;
14247c478bd9Sstevel@tonic-gate 	} while ((len > 0) && (moreflag & T_MORE));
14257c478bd9Sstevel@tonic-gate 	return (final);
14267c478bd9Sstevel@tonic-gate }
14277c478bd9Sstevel@tonic-gate 
14287c478bd9Sstevel@tonic-gate static struct clnt_ops *
14297c478bd9Sstevel@tonic-gate clnt_vc_ops(void)
14307c478bd9Sstevel@tonic-gate {
14317c478bd9Sstevel@tonic-gate 	static struct clnt_ops ops;
14327c478bd9Sstevel@tonic-gate 	extern mutex_t	ops_lock;
14337c478bd9Sstevel@tonic-gate 
14347c478bd9Sstevel@tonic-gate 	/* VARIABLES PROTECTED BY ops_lock: ops */
14357c478bd9Sstevel@tonic-gate 
14367c478bd9Sstevel@tonic-gate 	sig_mutex_lock(&ops_lock);
14377c478bd9Sstevel@tonic-gate 	if (ops.cl_call == NULL) {
14387c478bd9Sstevel@tonic-gate 		ops.cl_call = clnt_vc_call;
14397c478bd9Sstevel@tonic-gate 		ops.cl_send = clnt_vc_send;
14407c478bd9Sstevel@tonic-gate 		ops.cl_abort = clnt_vc_abort;
14417c478bd9Sstevel@tonic-gate 		ops.cl_geterr = clnt_vc_geterr;
14427c478bd9Sstevel@tonic-gate 		ops.cl_freeres = clnt_vc_freeres;
14437c478bd9Sstevel@tonic-gate 		ops.cl_destroy = clnt_vc_destroy;
14447c478bd9Sstevel@tonic-gate 		ops.cl_control = clnt_vc_control;
14457c478bd9Sstevel@tonic-gate 	}
14467c478bd9Sstevel@tonic-gate 	sig_mutex_unlock(&ops_lock);
14477c478bd9Sstevel@tonic-gate 	return (&ops);
14487c478bd9Sstevel@tonic-gate }
14497c478bd9Sstevel@tonic-gate 
14507c478bd9Sstevel@tonic-gate /*
14517c478bd9Sstevel@tonic-gate  * Make sure that the time is not garbage.   -1 value is disallowed.
14527c478bd9Sstevel@tonic-gate  * Note this is different from time_not_ok in clnt_dg.c
14537c478bd9Sstevel@tonic-gate  */
14547c478bd9Sstevel@tonic-gate static bool_t
145561961e0fSrobinson time_not_ok(struct timeval *t)
14567c478bd9Sstevel@tonic-gate {
14577c478bd9Sstevel@tonic-gate 	return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
14587c478bd9Sstevel@tonic-gate 	    t->tv_usec <= -1 || t->tv_usec > 1000000);
14597c478bd9Sstevel@tonic-gate }
14607c478bd9Sstevel@tonic-gate 
14617c478bd9Sstevel@tonic-gate 
14627c478bd9Sstevel@tonic-gate /* Compute the # of bytes that remains until the end of the buffer */
14637c478bd9Sstevel@tonic-gate #define	REMAIN_BYTES(p) (ct->ct_bufferSize-(ct->ct_##p - ct->ct_buffer))
14647c478bd9Sstevel@tonic-gate 
14657c478bd9Sstevel@tonic-gate static int
14667c478bd9Sstevel@tonic-gate addInBuffer(struct ct_data *ct, char *dataToAdd, unsigned int nBytes)
14677c478bd9Sstevel@tonic-gate {
14687c478bd9Sstevel@tonic-gate 	if (NULL == ct->ct_buffer) {
14697c478bd9Sstevel@tonic-gate 		/* Buffer not allocated yet. */
14707c478bd9Sstevel@tonic-gate 		char *buffer;
14717c478bd9Sstevel@tonic-gate 
147261961e0fSrobinson 		buffer = malloc(ct->ct_bufferSize);
14737c478bd9Sstevel@tonic-gate 		if (NULL == buffer) {
14747c478bd9Sstevel@tonic-gate 			errno = ENOMEM;
14757c478bd9Sstevel@tonic-gate 			return (-1);
14767c478bd9Sstevel@tonic-gate 		}
147761961e0fSrobinson 		(void) memcpy(buffer, dataToAdd, nBytes);
14787c478bd9Sstevel@tonic-gate 
14797c478bd9Sstevel@tonic-gate 		ct->ct_buffer = buffer;
14807c478bd9Sstevel@tonic-gate 		ct->ct_bufferReadPtr = buffer;
14817c478bd9Sstevel@tonic-gate 		ct->ct_bufferWritePtr = buffer + nBytes;
14827c478bd9Sstevel@tonic-gate 		ct->ct_bufferPendingSize = nBytes;
14837c478bd9Sstevel@tonic-gate 	} else {
14847c478bd9Sstevel@tonic-gate 		/*
14857c478bd9Sstevel@tonic-gate 		 * For an already allocated buffer, two mem copies
14867c478bd9Sstevel@tonic-gate 		 * might be needed, depending on the current
14877c478bd9Sstevel@tonic-gate 		 * writing position.
14887c478bd9Sstevel@tonic-gate 		 */
14897c478bd9Sstevel@tonic-gate 
14907c478bd9Sstevel@tonic-gate 		/* Compute the length of the first copy. */
14917c478bd9Sstevel@tonic-gate 		int len = MIN(nBytes, REMAIN_BYTES(bufferWritePtr));
14927c478bd9Sstevel@tonic-gate 
14937c478bd9Sstevel@tonic-gate 		ct->ct_bufferPendingSize += nBytes;
14947c478bd9Sstevel@tonic-gate 
149561961e0fSrobinson 		(void) memcpy(ct->ct_bufferWritePtr, dataToAdd, len);
14967c478bd9Sstevel@tonic-gate 		ct->ct_bufferWritePtr += len;
14977c478bd9Sstevel@tonic-gate 		nBytes -= len;
14987c478bd9Sstevel@tonic-gate 		if (0 == nBytes) {
14997c478bd9Sstevel@tonic-gate 			/* One memcopy needed. */
15007c478bd9Sstevel@tonic-gate 
15017c478bd9Sstevel@tonic-gate 			/*
15027c478bd9Sstevel@tonic-gate 			 * If the write pointer is at the end of the buffer,
15037c478bd9Sstevel@tonic-gate 			 * wrap it now.
15047c478bd9Sstevel@tonic-gate 			 */
15057c478bd9Sstevel@tonic-gate 			if (ct->ct_bufferWritePtr ==
15067c478bd9Sstevel@tonic-gate 			    (ct->ct_buffer + ct->ct_bufferSize)) {
15077c478bd9Sstevel@tonic-gate 				ct->ct_bufferWritePtr = ct->ct_buffer;
15087c478bd9Sstevel@tonic-gate 			}
15097c478bd9Sstevel@tonic-gate 		} else {
15107c478bd9Sstevel@tonic-gate 			/* Two memcopy needed. */
15117c478bd9Sstevel@tonic-gate 			dataToAdd += len;
15127c478bd9Sstevel@tonic-gate 
15137c478bd9Sstevel@tonic-gate 			/*
15147c478bd9Sstevel@tonic-gate 			 * Copy the remaining data to the beginning of the
15157c478bd9Sstevel@tonic-gate 			 * buffer
15167c478bd9Sstevel@tonic-gate 			 */
151761961e0fSrobinson 			(void) memcpy(ct->ct_buffer, dataToAdd, nBytes);
15187c478bd9Sstevel@tonic-gate 			ct->ct_bufferWritePtr = ct->ct_buffer + nBytes;
15197c478bd9Sstevel@tonic-gate 		}
15207c478bd9Sstevel@tonic-gate 	}
15217c478bd9Sstevel@tonic-gate 	return (0);
15227c478bd9Sstevel@tonic-gate }
15237c478bd9Sstevel@tonic-gate 
15247c478bd9Sstevel@tonic-gate static void
15257c478bd9Sstevel@tonic-gate consumeFromBuffer(struct ct_data *ct, unsigned int nBytes)
15267c478bd9Sstevel@tonic-gate {
15277c478bd9Sstevel@tonic-gate 	ct->ct_bufferPendingSize -= nBytes;
15287c478bd9Sstevel@tonic-gate 	if (ct->ct_bufferPendingSize == 0) {
15297c478bd9Sstevel@tonic-gate 		/*
15307c478bd9Sstevel@tonic-gate 		 * If the buffer contains no data, we set the two pointers at
15317c478bd9Sstevel@tonic-gate 		 * the beginning of the buffer (to miminize buffer wraps).
15327c478bd9Sstevel@tonic-gate 		 */
15337c478bd9Sstevel@tonic-gate 		ct->ct_bufferReadPtr = ct->ct_bufferWritePtr = ct->ct_buffer;
15347c478bd9Sstevel@tonic-gate 	} else {
15357c478bd9Sstevel@tonic-gate 		ct->ct_bufferReadPtr += nBytes;
15367c478bd9Sstevel@tonic-gate 		if (ct->ct_bufferReadPtr >
15377c478bd9Sstevel@tonic-gate 		    ct->ct_buffer + ct->ct_bufferSize) {
15387c478bd9Sstevel@tonic-gate 			ct->ct_bufferReadPtr -= ct->ct_bufferSize;
15397c478bd9Sstevel@tonic-gate 		}
15407c478bd9Sstevel@tonic-gate 	}
15417c478bd9Sstevel@tonic-gate }
15427c478bd9Sstevel@tonic-gate 
15437c478bd9Sstevel@tonic-gate static int
15447c478bd9Sstevel@tonic-gate iovFromBuffer(struct ct_data *ct, struct iovec *iov)
15457c478bd9Sstevel@tonic-gate {
15467c478bd9Sstevel@tonic-gate 	int l;
15477c478bd9Sstevel@tonic-gate 
15487c478bd9Sstevel@tonic-gate 	if (ct->ct_bufferPendingSize == 0)
15497c478bd9Sstevel@tonic-gate 		return (0);
15507c478bd9Sstevel@tonic-gate 
15517c478bd9Sstevel@tonic-gate 	l = REMAIN_BYTES(bufferReadPtr);
15527c478bd9Sstevel@tonic-gate 	if (l < ct->ct_bufferPendingSize) {
15537c478bd9Sstevel@tonic-gate 		/* Buffer in two fragments. */
15547c478bd9Sstevel@tonic-gate 		iov[0].iov_base = ct->ct_bufferReadPtr;
15557c478bd9Sstevel@tonic-gate 		iov[0].iov_len  = l;
15567c478bd9Sstevel@tonic-gate 
15577c478bd9Sstevel@tonic-gate 		iov[1].iov_base = ct->ct_buffer;
15587c478bd9Sstevel@tonic-gate 		iov[1].iov_len  = ct->ct_bufferPendingSize - l;
15597c478bd9Sstevel@tonic-gate 		return (2);
15607c478bd9Sstevel@tonic-gate 	} else {
15617c478bd9Sstevel@tonic-gate 		/* Buffer in one fragment. */
15627c478bd9Sstevel@tonic-gate 		iov[0].iov_base = ct->ct_bufferReadPtr;
15637c478bd9Sstevel@tonic-gate 		iov[0].iov_len  = ct->ct_bufferPendingSize;
15647c478bd9Sstevel@tonic-gate 		return (1);
15657c478bd9Sstevel@tonic-gate 	}
15667c478bd9Sstevel@tonic-gate }
15677c478bd9Sstevel@tonic-gate 
15687c478bd9Sstevel@tonic-gate static bool_t
15697c478bd9Sstevel@tonic-gate set_flush_mode(struct ct_data *ct, int mode)
15707c478bd9Sstevel@tonic-gate {
15717c478bd9Sstevel@tonic-gate 	switch (mode) {
15727c478bd9Sstevel@tonic-gate 	case RPC_CL_BLOCKING_FLUSH:
15737c478bd9Sstevel@tonic-gate 		/* flush as most as possible without blocking */
15747c478bd9Sstevel@tonic-gate 	case RPC_CL_BESTEFFORT_FLUSH:
15757c478bd9Sstevel@tonic-gate 		/* flush the buffer completely (possibly blocking) */
15767c478bd9Sstevel@tonic-gate 	case RPC_CL_DEFAULT_FLUSH:
15777c478bd9Sstevel@tonic-gate 		/* flush according to the currently defined policy */
15787c478bd9Sstevel@tonic-gate 		ct->ct_blocking_mode = mode;
15797c478bd9Sstevel@tonic-gate 		return (TRUE);
15807c478bd9Sstevel@tonic-gate 	default:
15817c478bd9Sstevel@tonic-gate 		return (FALSE);
15827c478bd9Sstevel@tonic-gate 	}
15837c478bd9Sstevel@tonic-gate }
15847c478bd9Sstevel@tonic-gate 
15857c478bd9Sstevel@tonic-gate static bool_t
15867c478bd9Sstevel@tonic-gate set_io_mode(struct ct_data *ct, int ioMode)
15877c478bd9Sstevel@tonic-gate {
15887c478bd9Sstevel@tonic-gate 	switch (ioMode) {
15897c478bd9Sstevel@tonic-gate 	case RPC_CL_BLOCKING:
15907c478bd9Sstevel@tonic-gate 		if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
15917c478bd9Sstevel@tonic-gate 			if (NULL != ct->ct_buffer) {
15927c478bd9Sstevel@tonic-gate 				/*
15937c478bd9Sstevel@tonic-gate 				 * If a buffer was allocated for this
15947c478bd9Sstevel@tonic-gate 				 * connection, flush it now, and free it.
15957c478bd9Sstevel@tonic-gate 				 */
159661961e0fSrobinson 				(void) do_flush(ct, RPC_CL_BLOCKING_FLUSH);
15977c478bd9Sstevel@tonic-gate 				free(ct->ct_buffer);
15987c478bd9Sstevel@tonic-gate 				ct->ct_buffer = NULL;
15997c478bd9Sstevel@tonic-gate 			}
160061961e0fSrobinson 			(void) unregister_nb(ct);
16017c478bd9Sstevel@tonic-gate 			ct->ct_io_mode = ioMode;
16027c478bd9Sstevel@tonic-gate 		}
16037c478bd9Sstevel@tonic-gate 		break;
16047c478bd9Sstevel@tonic-gate 	case RPC_CL_NONBLOCKING:
16057c478bd9Sstevel@tonic-gate 		if (ct->ct_io_mode == RPC_CL_BLOCKING) {
16067c478bd9Sstevel@tonic-gate 			if (-1 == register_nb(ct)) {
16077c478bd9Sstevel@tonic-gate 				return (FALSE);
16087c478bd9Sstevel@tonic-gate 			}
16097c478bd9Sstevel@tonic-gate 			ct->ct_io_mode = ioMode;
16107c478bd9Sstevel@tonic-gate 		}
16117c478bd9Sstevel@tonic-gate 		break;
16127c478bd9Sstevel@tonic-gate 	default:
16137c478bd9Sstevel@tonic-gate 		return (FALSE);
16147c478bd9Sstevel@tonic-gate 	}
16157c478bd9Sstevel@tonic-gate 	return (TRUE);
16167c478bd9Sstevel@tonic-gate }
16177c478bd9Sstevel@tonic-gate 
16187c478bd9Sstevel@tonic-gate static int
16197c478bd9Sstevel@tonic-gate do_flush(struct ct_data *ct, uint_t flush_mode)
16207c478bd9Sstevel@tonic-gate {
16217c478bd9Sstevel@tonic-gate 	int result;
16227c478bd9Sstevel@tonic-gate 	if (ct->ct_bufferPendingSize == 0) {
16237c478bd9Sstevel@tonic-gate 		return (0);
16247c478bd9Sstevel@tonic-gate 	}
16257c478bd9Sstevel@tonic-gate 
16267c478bd9Sstevel@tonic-gate 	switch (flush_mode) {
16277c478bd9Sstevel@tonic-gate 	case RPC_CL_BLOCKING_FLUSH:
16287c478bd9Sstevel@tonic-gate 		if (!set_blocking_connection(ct, TRUE)) {
16297c478bd9Sstevel@tonic-gate 			return (-1);
16307c478bd9Sstevel@tonic-gate 		}
16317c478bd9Sstevel@tonic-gate 		while (ct->ct_bufferPendingSize > 0) {
16327c478bd9Sstevel@tonic-gate 			if (REMAIN_BYTES(bufferReadPtr) <
16337c478bd9Sstevel@tonic-gate 			    ct->ct_bufferPendingSize) {
16347c478bd9Sstevel@tonic-gate 				struct iovec iov[2];
163561961e0fSrobinson 				(void) iovFromBuffer(ct, iov);
16367c478bd9Sstevel@tonic-gate 				result = writev(ct->ct_fd, iov, 2);
16377c478bd9Sstevel@tonic-gate 			} else {
16387c478bd9Sstevel@tonic-gate 				result = t_snd(ct->ct_fd, ct->ct_bufferReadPtr,
16397c478bd9Sstevel@tonic-gate 				    ct->ct_bufferPendingSize, 0);
16407c478bd9Sstevel@tonic-gate 			}
16417c478bd9Sstevel@tonic-gate 			if (result < 0) {
16427c478bd9Sstevel@tonic-gate 				return (-1);
16437c478bd9Sstevel@tonic-gate 			}
16447c478bd9Sstevel@tonic-gate 			consumeFromBuffer(ct, result);
16457c478bd9Sstevel@tonic-gate 		}
16467c478bd9Sstevel@tonic-gate 
16477c478bd9Sstevel@tonic-gate 		break;
16487c478bd9Sstevel@tonic-gate 
16497c478bd9Sstevel@tonic-gate 	case RPC_CL_BESTEFFORT_FLUSH:
165061961e0fSrobinson 		(void) set_blocking_connection(ct, FALSE);
16517c478bd9Sstevel@tonic-gate 		if (REMAIN_BYTES(bufferReadPtr) < ct->ct_bufferPendingSize) {
16527c478bd9Sstevel@tonic-gate 			struct iovec iov[2];
165361961e0fSrobinson 			(void) iovFromBuffer(ct, iov);
16547c478bd9Sstevel@tonic-gate 			result = writev(ct->ct_fd, iov, 2);
16557c478bd9Sstevel@tonic-gate 		} else {
16567c478bd9Sstevel@tonic-gate 			result = t_snd(ct->ct_fd, ct->ct_bufferReadPtr,
16577c478bd9Sstevel@tonic-gate 			    ct->ct_bufferPendingSize, 0);
16587c478bd9Sstevel@tonic-gate 		}
16597c478bd9Sstevel@tonic-gate 		if (result < 0) {
16607c478bd9Sstevel@tonic-gate 			if (errno != EWOULDBLOCK) {
16617c478bd9Sstevel@tonic-gate 				perror("flush");
16627c478bd9Sstevel@tonic-gate 				return (-1);
16637c478bd9Sstevel@tonic-gate 			}
16647c478bd9Sstevel@tonic-gate 			return (0);
16657c478bd9Sstevel@tonic-gate 		}
16667c478bd9Sstevel@tonic-gate 		if (result > 0)
16677c478bd9Sstevel@tonic-gate 			consumeFromBuffer(ct, result);
16687c478bd9Sstevel@tonic-gate 		break;
16697c478bd9Sstevel@tonic-gate 	}
16707c478bd9Sstevel@tonic-gate 	return (0);
16717c478bd9Sstevel@tonic-gate }
16727c478bd9Sstevel@tonic-gate 
16737c478bd9Sstevel@tonic-gate /*
16747c478bd9Sstevel@tonic-gate  * Non blocking send.
16757c478bd9Sstevel@tonic-gate  */
16767c478bd9Sstevel@tonic-gate 
16777c478bd9Sstevel@tonic-gate static int
16787c478bd9Sstevel@tonic-gate nb_send(struct ct_data *ct, void *buff, unsigned int nBytes)
16797c478bd9Sstevel@tonic-gate {
16807c478bd9Sstevel@tonic-gate 	int result;
16817c478bd9Sstevel@tonic-gate 
16827c478bd9Sstevel@tonic-gate 	if (!(ntohl(*(uint32_t *)buff) & 2^31)) {
16837c478bd9Sstevel@tonic-gate 		return (-1);
16847c478bd9Sstevel@tonic-gate 	}
16857c478bd9Sstevel@tonic-gate 
16867c478bd9Sstevel@tonic-gate 	/*
16877c478bd9Sstevel@tonic-gate 	 * Check to see if the current message can be stored fully in the
16887c478bd9Sstevel@tonic-gate 	 * buffer. We have to check this now because it may be impossible
16897c478bd9Sstevel@tonic-gate 	 * to send any data, so the message must be stored in the buffer.
16907c478bd9Sstevel@tonic-gate 	 */
16917c478bd9Sstevel@tonic-gate 	if (nBytes > (ct->ct_bufferSize - ct->ct_bufferPendingSize)) {
16927c478bd9Sstevel@tonic-gate 		/* Try to flush  (to free some space). */
169361961e0fSrobinson 		(void) do_flush(ct, RPC_CL_BESTEFFORT_FLUSH);
16947c478bd9Sstevel@tonic-gate 
16957c478bd9Sstevel@tonic-gate 		/* Can we store the message now ? */
16967c478bd9Sstevel@tonic-gate 		if (nBytes > (ct->ct_bufferSize - ct->ct_bufferPendingSize))
16977c478bd9Sstevel@tonic-gate 			return (-2);
16987c478bd9Sstevel@tonic-gate 	}
16997c478bd9Sstevel@tonic-gate 
170061961e0fSrobinson 	(void) set_blocking_connection(ct, FALSE);
17017c478bd9Sstevel@tonic-gate 
17027c478bd9Sstevel@tonic-gate 	/*
17037c478bd9Sstevel@tonic-gate 	 * If there is no data pending, we can simply try
17047c478bd9Sstevel@tonic-gate 	 * to send our data.
17057c478bd9Sstevel@tonic-gate 	 */
17067c478bd9Sstevel@tonic-gate 	if (ct->ct_bufferPendingSize == 0) {
17077c478bd9Sstevel@tonic-gate 		result = t_snd(ct->ct_fd, buff, nBytes, 0);
17087c478bd9Sstevel@tonic-gate 		if (result == -1) {
17097c478bd9Sstevel@tonic-gate 			if (errno == EWOULDBLOCK) {
17107c478bd9Sstevel@tonic-gate 				result = 0;
17117c478bd9Sstevel@tonic-gate 			} else {
17127c478bd9Sstevel@tonic-gate 				perror("send");
17137c478bd9Sstevel@tonic-gate 				return (-1);
17147c478bd9Sstevel@tonic-gate 			}
17157c478bd9Sstevel@tonic-gate 		}
17167c478bd9Sstevel@tonic-gate 		/*
17177c478bd9Sstevel@tonic-gate 		 * If we have not sent all data, we must store them
17187c478bd9Sstevel@tonic-gate 		 * in the buffer.
17197c478bd9Sstevel@tonic-gate 		 */
17207c478bd9Sstevel@tonic-gate 		if (result != nBytes) {
17217c478bd9Sstevel@tonic-gate 			if (addInBuffer(ct, (char *)buff + result,
17227c478bd9Sstevel@tonic-gate 			    nBytes - result) == -1) {
17237c478bd9Sstevel@tonic-gate 				return (-1);
17247c478bd9Sstevel@tonic-gate 			}
17257c478bd9Sstevel@tonic-gate 		}
17267c478bd9Sstevel@tonic-gate 	} else {
17277c478bd9Sstevel@tonic-gate 		/*
17287c478bd9Sstevel@tonic-gate 		 * Some data pending in the buffer.  We try to send
17297c478bd9Sstevel@tonic-gate 		 * both buffer data and current message in one shot.
17307c478bd9Sstevel@tonic-gate 		 */
17317c478bd9Sstevel@tonic-gate 		struct iovec iov[3];
17327c478bd9Sstevel@tonic-gate 		int i = iovFromBuffer(ct, &iov[0]);
17337c478bd9Sstevel@tonic-gate 
17347c478bd9Sstevel@tonic-gate 		iov[i].iov_base = buff;
17357c478bd9Sstevel@tonic-gate 		iov[i].iov_len  = nBytes;
17367c478bd9Sstevel@tonic-gate 
17377c478bd9Sstevel@tonic-gate 		result = writev(ct->ct_fd, iov, i+1);
17387c478bd9Sstevel@tonic-gate 		if (result == -1) {
17397c478bd9Sstevel@tonic-gate 			if (errno == EWOULDBLOCK) {
17407c478bd9Sstevel@tonic-gate 				/* No bytes sent */
17417c478bd9Sstevel@tonic-gate 				result = 0;
17427c478bd9Sstevel@tonic-gate 			} else {
17437c478bd9Sstevel@tonic-gate 				return (-1);
17447c478bd9Sstevel@tonic-gate 			}
17457c478bd9Sstevel@tonic-gate 		}
17467c478bd9Sstevel@tonic-gate 
17477c478bd9Sstevel@tonic-gate 		/*
17487c478bd9Sstevel@tonic-gate 		 * Add the bytes from the message
17497c478bd9Sstevel@tonic-gate 		 * that we have not sent.
17507c478bd9Sstevel@tonic-gate 		 */
17517c478bd9Sstevel@tonic-gate 		if (result <= ct->ct_bufferPendingSize) {
17527c478bd9Sstevel@tonic-gate 			/* No bytes from the message sent */
17537c478bd9Sstevel@tonic-gate 			consumeFromBuffer(ct, result);
17547c478bd9Sstevel@tonic-gate 			if (addInBuffer(ct, buff, nBytes) == -1) {
17557c478bd9Sstevel@tonic-gate 				return (-1);
17567c478bd9Sstevel@tonic-gate 			}
17577c478bd9Sstevel@tonic-gate 		} else {
17587c478bd9Sstevel@tonic-gate 			/*
17597c478bd9Sstevel@tonic-gate 			 * Some bytes of the message are sent.
17607c478bd9Sstevel@tonic-gate 			 * Compute the length of the message that has
17617c478bd9Sstevel@tonic-gate 			 * been sent.
17627c478bd9Sstevel@tonic-gate 			 */
17637c478bd9Sstevel@tonic-gate 			int len = result - ct->ct_bufferPendingSize;
17647c478bd9Sstevel@tonic-gate 
17657c478bd9Sstevel@tonic-gate 			/* So, empty the buffer. */
17667c478bd9Sstevel@tonic-gate 			ct->ct_bufferReadPtr = ct->ct_buffer;
17677c478bd9Sstevel@tonic-gate 			ct->ct_bufferWritePtr = ct->ct_buffer;
17687c478bd9Sstevel@tonic-gate 			ct->ct_bufferPendingSize = 0;
17697c478bd9Sstevel@tonic-gate 
17707c478bd9Sstevel@tonic-gate 			/* And add the remaining part of the message. */
17717c478bd9Sstevel@tonic-gate 			if (len != nBytes) {
17727c478bd9Sstevel@tonic-gate 				if (addInBuffer(ct, (char *)buff + len,
17737c478bd9Sstevel@tonic-gate 				    nBytes-len) == -1) {
17747c478bd9Sstevel@tonic-gate 					return (-1);
17757c478bd9Sstevel@tonic-gate 				}
17767c478bd9Sstevel@tonic-gate 			}
17777c478bd9Sstevel@tonic-gate 		}
17787c478bd9Sstevel@tonic-gate 	}
17797c478bd9Sstevel@tonic-gate 	return (nBytes);
17807c478bd9Sstevel@tonic-gate }
17817c478bd9Sstevel@tonic-gate 
17827c478bd9Sstevel@tonic-gate static void
178361961e0fSrobinson flush_registered_clients(void)
17847c478bd9Sstevel@tonic-gate {
17857c478bd9Sstevel@tonic-gate 	struct nb_reg_node *node;
17867c478bd9Sstevel@tonic-gate 
17877c478bd9Sstevel@tonic-gate 	if (LIST_ISEMPTY(nb_first)) {
17887c478bd9Sstevel@tonic-gate 		return;
17897c478bd9Sstevel@tonic-gate 	}
17907c478bd9Sstevel@tonic-gate 
17917c478bd9Sstevel@tonic-gate 	LIST_FOR_EACH(nb_first, node) {
179261961e0fSrobinson 		(void) do_flush(node->ct, RPC_CL_BLOCKING_FLUSH);
17937c478bd9Sstevel@tonic-gate 	}
17947c478bd9Sstevel@tonic-gate }
17957c478bd9Sstevel@tonic-gate 
17967c478bd9Sstevel@tonic-gate static int
179761961e0fSrobinson allocate_chunk(void)
17987c478bd9Sstevel@tonic-gate {
17997c478bd9Sstevel@tonic-gate #define	CHUNK_SIZE 16
180061961e0fSrobinson 	struct nb_reg_node *chk =
18017c478bd9Sstevel@tonic-gate 	    malloc(sizeof (struct nb_reg_node) * CHUNK_SIZE);
18027c478bd9Sstevel@tonic-gate 	struct nb_reg_node *n;
18037c478bd9Sstevel@tonic-gate 	int i;
18047c478bd9Sstevel@tonic-gate 
18057c478bd9Sstevel@tonic-gate 	if (NULL == chk) {
18067c478bd9Sstevel@tonic-gate 		return (-1);
18077c478bd9Sstevel@tonic-gate 	}
18087c478bd9Sstevel@tonic-gate 
18097c478bd9Sstevel@tonic-gate 	n = chk;
18107c478bd9Sstevel@tonic-gate 	for (i = 0; i < CHUNK_SIZE-1; ++i) {
18117c478bd9Sstevel@tonic-gate 		n[i].next = &(n[i+1]);
18127c478bd9Sstevel@tonic-gate 	}
18137c478bd9Sstevel@tonic-gate 	n[CHUNK_SIZE-1].next = (struct nb_reg_node *)&nb_free;
18147c478bd9Sstevel@tonic-gate 	nb_free = chk;
18157c478bd9Sstevel@tonic-gate 	return (0);
18167c478bd9Sstevel@tonic-gate }
18177c478bd9Sstevel@tonic-gate 
18187c478bd9Sstevel@tonic-gate static int
18197c478bd9Sstevel@tonic-gate register_nb(struct ct_data *ct)
18207c478bd9Sstevel@tonic-gate {
18217c478bd9Sstevel@tonic-gate 	struct nb_reg_node *node;
18227c478bd9Sstevel@tonic-gate 
182361961e0fSrobinson 	(void) mutex_lock(&nb_list_mutex);
18247c478bd9Sstevel@tonic-gate 
18257c478bd9Sstevel@tonic-gate 	if (LIST_ISEMPTY(nb_free) && (allocate_chunk() == -1)) {
182661961e0fSrobinson 		(void) mutex_unlock(&nb_list_mutex);
18277c478bd9Sstevel@tonic-gate 		errno = ENOMEM;
18287c478bd9Sstevel@tonic-gate 		return (-1);
18297c478bd9Sstevel@tonic-gate 	}
18307c478bd9Sstevel@tonic-gate 
18317c478bd9Sstevel@tonic-gate 	if (!exit_handler_set) {
183261961e0fSrobinson 		(void) atexit(flush_registered_clients);
18337c478bd9Sstevel@tonic-gate 		exit_handler_set = TRUE;
18347c478bd9Sstevel@tonic-gate 	}
18357c478bd9Sstevel@tonic-gate 	/* Get the first free node */
18367c478bd9Sstevel@tonic-gate 	LIST_EXTRACT(nb_free, node);
18377c478bd9Sstevel@tonic-gate 
18387c478bd9Sstevel@tonic-gate 	node->ct = ct;
18397c478bd9Sstevel@tonic-gate 
18407c478bd9Sstevel@tonic-gate 	LIST_ADD(nb_first, node);
184161961e0fSrobinson 	(void) mutex_unlock(&nb_list_mutex);
18427c478bd9Sstevel@tonic-gate 
18437c478bd9Sstevel@tonic-gate 	return (0);
18447c478bd9Sstevel@tonic-gate }
18457c478bd9Sstevel@tonic-gate 
18467c478bd9Sstevel@tonic-gate static int
18477c478bd9Sstevel@tonic-gate unregister_nb(struct ct_data *ct)
18487c478bd9Sstevel@tonic-gate {
18497c478bd9Sstevel@tonic-gate 	struct nb_reg_node *node;
18507c478bd9Sstevel@tonic-gate 
185161961e0fSrobinson 	(void) mutex_lock(&nb_list_mutex);
18527c478bd9Sstevel@tonic-gate 	assert(!LIST_ISEMPTY(nb_first));
18537c478bd9Sstevel@tonic-gate 
18547c478bd9Sstevel@tonic-gate 	node = nb_first;
18557c478bd9Sstevel@tonic-gate 	LIST_FOR_EACH(nb_first, node) {
18567c478bd9Sstevel@tonic-gate 		if (node->next->ct == ct) {
18577c478bd9Sstevel@tonic-gate 			/* Get the node to unregister. */
18587c478bd9Sstevel@tonic-gate 			struct nb_reg_node *n = node->next;
18597c478bd9Sstevel@tonic-gate 			node->next = n->next;
18607c478bd9Sstevel@tonic-gate 
18617c478bd9Sstevel@tonic-gate 			n->ct = NULL;
18627c478bd9Sstevel@tonic-gate 			LIST_ADD(nb_free, n);
18637c478bd9Sstevel@tonic-gate 			break;
18647c478bd9Sstevel@tonic-gate 		}
18657c478bd9Sstevel@tonic-gate 	}
186661961e0fSrobinson 	(void) mutex_unlock(&nb_list_mutex);
18677c478bd9Sstevel@tonic-gate 	return (0);
18687c478bd9Sstevel@tonic-gate }
1869