xref: /freebsd/lib/libc/rpc/clnt_vc.c (revision dc36d6f9bb1753f3808552f3afd30eda9a7b206a)
1 /*	$NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 2009, Sun Microsystems, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  * - Redistributions of source code must retain the above copyright notice,
12  *   this list of conditions and the following disclaimer.
13  * - Redistributions in binary form must reproduce the above copyright notice,
14  *   this list of conditions and the following disclaimer in the documentation
15  *   and/or other materials provided with the distribution.
16  * - Neither the name of Sun Microsystems, Inc. nor the names of its
17  *   contributors may be used to endorse or promote products derived
18  *   from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
35  *
36  * Copyright (C) 1984, Sun Microsystems, Inc.
37  *
38  * TCP based RPC supports 'batched calls'.
39  * A sequence of calls may be batched-up in a send buffer.  The rpc call
40  * return immediately to the client even though the call was not necessarily
41  * sent.  The batching occurs if the results' xdr routine is NULL (0) AND
42  * the rpc timeout value is zero (see clnt.h, rpc).
43  *
44  * Clients should NOT casually batch calls that in fact return results; that is,
45  * the server side should be aware that a call is batched and not produce any
46  * return message.  Batched calls that produce many result messages can
47  * deadlock (netlock) the client and the server....
48  *
49  * Now go hang yourself.
50  */
51 
52 #include "namespace.h"
53 #include "reentrant.h"
54 #include <sys/types.h>
55 #include <sys/poll.h>
56 #include <sys/syslog.h>
57 #include <sys/socket.h>
58 #include <sys/tree.h>
59 #include <sys/un.h>
60 #include <sys/uio.h>
61 
62 #include <arpa/inet.h>
63 #include <assert.h>
64 #include <err.h>
65 #include <errno.h>
66 #include <netdb.h>
67 #include <pthread.h>
68 #include <stdio.h>
69 #include <stdbool.h>
70 #include <stdlib.h>
71 #include <string.h>
72 #include <unistd.h>
73 #include <signal.h>
74 
75 #include <rpc/rpc.h>
76 #include <rpc/rpcsec_gss.h>
77 #include "un-namespace.h"
78 #include "rpc_com.h"
79 #include "mt_misc.h"
80 
81 #define MCALL_MSG_SIZE 24
82 
83 struct cmessage {
84         struct cmsghdr cmsg;
85         struct cmsgcred cmcred;
86 };
87 
88 static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t, void *,
89     xdrproc_t, void *, struct timeval);
90 static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
91 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
92 static void clnt_vc_abort(CLIENT *);
93 static bool_t clnt_vc_control(CLIENT *, u_int, void *);
94 static void clnt_vc_destroy(CLIENT *);
95 static struct clnt_ops *clnt_vc_ops(void);
96 static bool_t time_not_ok(struct timeval *);
97 static int read_vc(void *, void *, int);
98 static int write_vc(void *, void *, int);
99 static int __msgwrite(int, void *, size_t);
100 static int __msgread(int, void *, size_t);
101 
102 struct ct_data {
103 	int		ct_fd;		/* connection's fd */
104 	bool_t		ct_closeit;	/* close it on destroy */
105 	struct timeval	ct_wait;	/* wait interval in milliseconds */
106 	bool_t          ct_waitset;	/* wait set by clnt_control? */
107 	struct netbuf	ct_addr;	/* remote addr */
108 	struct rpc_err	ct_error;
109 	union {
110 		char	ct_mcallc[MCALL_MSG_SIZE];	/* marshalled callmsg */
111 		u_int32_t ct_mcalli;
112 	} ct_u;
113 	u_int		ct_mpos;	/* pos after marshal */
114 	XDR		ct_xdrs;	/* XDR stream */
115 };
116 
117 /*
118  *      This machinery implements per-fd locks for MT-safety.  It is not
119  *      sufficient to do per-CLIENT handle locks for MT-safety because a
120  *      user may create more than one CLIENT handle with the same fd behind
121  *      it.  Therefore, we allocate an associative array of flags and condition
122  *      variables (vc_fd).  The flags and the array are protected by the
123  *      clnt_fd_lock mutex.  vc_fd_lock[fd] == 1 => a call is active on some
124  *      CLIENT handle created for that fd.  The current implementation holds
125  *      locks across the entire RPC and reply.  Yes, this is silly, and as soon
126  *      as this code is proven to work, this should be the first thing fixed.
127  *      One step at a time.
128  */
129 struct vc_fd {
130 	RB_ENTRY(vc_fd) vc_link;
131 	int fd;
132 	mutex_t mtx;
133 };
134 static inline int
cmp_vc_fd(struct vc_fd * a,struct vc_fd * b)135 cmp_vc_fd(struct vc_fd *a, struct vc_fd *b)
136 {
137        if (a->fd > b->fd) {
138                return (1);
139        } else if (a->fd < b->fd) {
140                return (-1);
141        } else {
142                return (0);
143        }
144 }
145 RB_HEAD(vc_fd_list, vc_fd);
146 RB_PROTOTYPE(vc_fd_list, vc_fd, vc_link, cmp_vc_fd);
147 RB_GENERATE(vc_fd_list, vc_fd, vc_link, cmp_vc_fd);
148 struct vc_fd_list vc_fd_head = RB_INITIALIZER(&vc_fd_head);
149 
150 /*
151  * Find the lock structure for the given file descriptor, or initialize it if
152  * it does not already exist.  The clnt_fd_lock mutex must be held.
153  */
154 static struct vc_fd *
vc_fd_find(int fd)155 vc_fd_find(int fd)
156 {
157 	struct vc_fd key, *elem;
158 
159 	key.fd = fd;
160 	elem = RB_FIND(vc_fd_list, &vc_fd_head, &key);
161 	if (elem == NULL) {
162 		elem = calloc(1, sizeof(*elem));
163 		elem->fd = fd;
164 		mutex_init(&elem->mtx, NULL);
165 		RB_INSERT(vc_fd_list, &vc_fd_head, elem);
166 	}
167 	return (elem);
168 }
169 
170 static void
release_fd_lock(struct vc_fd * elem,sigset_t mask)171 release_fd_lock(struct vc_fd *elem, sigset_t mask)
172 {
173 	mutex_unlock(&elem->mtx);
174 	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
175 }
176 
177 static const char clnt_vc_errstr[] = "%s : %s";
178 static const char clnt_vc_str[] = "clnt_vc_create";
179 static const char __no_mem_str[] = "out of memory";
180 
181 /*
182  * Create a client handle for a connection.
183  * Default options are set, which the user can change using clnt_control()'s.
184  * The rpc/vc package does buffering similar to stdio, so the client
185  * must pick send and receive buffer sizes, 0 => use the default.
186  * NB: fd is copied into a private area.
187  * NB: The rpch->cl_auth is set null authentication. Caller may wish to
188  * set this something more useful.
189  *
190  * fd should be an open socket
191  *
192  * fd - open file descriptor
193  * raddr - servers address
194  * prog  - program number
195  * vers  - version number
196  * sendsz - buffer send size
197  * recvsz - buffer recv size
198  */
199 CLIENT *
clnt_vc_create(int fd,const struct netbuf * raddr,const rpcprog_t prog,const rpcvers_t vers,u_int sendsz,u_int recvsz)200 clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog,
201     const rpcvers_t vers, u_int sendsz, u_int recvsz)
202 {
203 	CLIENT *cl;			/* client handle */
204 	struct ct_data *ct = NULL;	/* client handle */
205 	struct timeval now;
206 	struct rpc_msg call_msg;
207 	static u_int32_t disrupt;
208 	struct sockaddr_storage ss;
209 	socklen_t slen;
210 	struct __rpc_sockinfo si;
211 
212 	if (disrupt == 0)
213 		disrupt = (u_int32_t)(long)raddr;
214 
215 	cl = (CLIENT *)mem_alloc(sizeof (*cl));
216 	ct = (struct ct_data *)mem_alloc(sizeof (*ct));
217 	if ((cl == (CLIENT *)NULL) || (ct == (struct ct_data *)NULL)) {
218 		(void) syslog(LOG_ERR, clnt_vc_errstr,
219 		    clnt_vc_str, __no_mem_str);
220 		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
221 		rpc_createerr.cf_error.re_errno = errno;
222 		goto err;
223 	}
224 	ct->ct_addr.buf = NULL;
225 
226 	/*
227 	 * XXX - fvdl connecting while holding a mutex?
228 	 */
229 	slen = sizeof ss;
230 	if (_getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) {
231 		if (errno != ENOTCONN) {
232 			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
233 			rpc_createerr.cf_error.re_errno = errno;
234 			mutex_unlock(&clnt_fd_lock);
235 			goto err;
236 		}
237 		if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){
238 			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
239 			rpc_createerr.cf_error.re_errno = errno;
240 			mutex_unlock(&clnt_fd_lock);
241 			goto err;
242 		}
243 	}
244 	mutex_unlock(&clnt_fd_lock);
245 	if (!__rpc_fd2sockinfo(fd, &si))
246 		goto err;
247 
248 	ct->ct_closeit = FALSE;
249 
250 	/*
251 	 * Set up private data struct
252 	 */
253 	ct->ct_fd = fd;
254 	ct->ct_wait.tv_usec = 0;
255 	ct->ct_waitset = FALSE;
256 	ct->ct_addr.buf = malloc(raddr->maxlen);
257 	if (ct->ct_addr.buf == NULL)
258 		goto err;
259 	memcpy(ct->ct_addr.buf, raddr->buf, raddr->len);
260 	ct->ct_addr.len = raddr->len;
261 	ct->ct_addr.maxlen = raddr->maxlen;
262 
263 	/*
264 	 * Initialize call message
265 	 */
266 	(void)gettimeofday(&now, NULL);
267 	call_msg.rm_xid = ((u_int32_t)++disrupt) ^ __RPC_GETXID(&now);
268 	call_msg.rm_direction = CALL;
269 	call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
270 	call_msg.rm_call.cb_prog = (u_int32_t)prog;
271 	call_msg.rm_call.cb_vers = (u_int32_t)vers;
272 
273 	/*
274 	 * pre-serialize the static part of the call msg and stash it away
275 	 */
276 	xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE,
277 	    XDR_ENCODE);
278 	if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) {
279 		if (ct->ct_closeit) {
280 			(void)_close(fd);
281 		}
282 		goto err;
283 	}
284 	ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs));
285 	XDR_DESTROY(&(ct->ct_xdrs));
286 	assert(ct->ct_mpos + sizeof(uint32_t) <= MCALL_MSG_SIZE);
287 
288 	/*
289 	 * Create a client handle which uses xdrrec for serialization
290 	 * and authnone for authentication.
291 	 */
292 	cl->cl_ops = clnt_vc_ops();
293 	cl->cl_private = ct;
294 	cl->cl_auth = authnone_create();
295 	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
296 	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
297 	xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz,
298 	    cl->cl_private, read_vc, write_vc);
299 	return (cl);
300 
301 err:
302 	if (ct) {
303 		if (ct->ct_addr.len)
304 			mem_free(ct->ct_addr.buf, ct->ct_addr.len);
305 		mem_free(ct, sizeof (struct ct_data));
306 	}
307 	if (cl)
308 		mem_free(cl, sizeof (CLIENT));
309 	return ((CLIENT *)NULL);
310 }
311 
312 static enum clnt_stat
clnt_vc_call(CLIENT * cl,rpcproc_t proc,xdrproc_t xdr_args,void * args_ptr,xdrproc_t xdr_results,void * results_ptr,struct timeval timeout)313 clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, void *args_ptr,
314     xdrproc_t xdr_results, void *results_ptr, struct timeval timeout)
315 {
316 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
317 	XDR *xdrs = &(ct->ct_xdrs);
318 	struct rpc_msg reply_msg;
319 	struct vc_fd *elem;
320 	u_int32_t x_id;
321 	u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli;    /* yuk */
322 	bool_t shipnow;
323 	int refreshes = 2;
324 	sigset_t mask, newmask;
325 	bool_t reply_stat;
326 
327 	assert(cl != NULL);
328 
329 	sigfillset(&newmask);
330 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
331 	mutex_lock(&clnt_fd_lock);
332 	elem = vc_fd_find(ct->ct_fd);
333 	mutex_unlock(&clnt_fd_lock);
334 	mutex_lock(&elem->mtx);
335 	if (!ct->ct_waitset) {
336 		/* If time is not within limits, we ignore it. */
337 		if (time_not_ok(&timeout) == FALSE)
338 			ct->ct_wait = timeout;
339 	}
340 
341 	shipnow =
342 	    (xdr_results == NULL && timeout.tv_sec == 0
343 	    && timeout.tv_usec == 0) ? FALSE : TRUE;
344 
345 call_again:
346 	xdrs->x_op = XDR_ENCODE;
347 	ct->ct_error.re_status = RPC_SUCCESS;
348 	x_id = ntohl(--(*msg_x_id));
349 
350 	if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
351 		if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) ||
352 		    (! XDR_PUTINT32(xdrs, &proc)) ||
353 		    (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
354 		    (! (*xdr_args)(xdrs, args_ptr))) {
355 			if (ct->ct_error.re_status == RPC_SUCCESS)
356 				ct->ct_error.re_status = RPC_CANTENCODEARGS;
357 			(void)xdrrec_endofrecord(xdrs, TRUE);
358 			release_fd_lock(elem, mask);
359 			return (ct->ct_error.re_status);
360 		}
361 	} else {
362 		*(uint32_t *) &ct->ct_u.ct_mcallc[ct->ct_mpos] = htonl(proc);
363 		if (! __rpc_gss_wrap(cl->cl_auth, ct->ct_u.ct_mcallc,
364 			ct->ct_mpos + sizeof(uint32_t),
365 			xdrs, xdr_args, args_ptr)) {
366 			if (ct->ct_error.re_status == RPC_SUCCESS)
367 				ct->ct_error.re_status = RPC_CANTENCODEARGS;
368 			(void)xdrrec_endofrecord(xdrs, TRUE);
369 			release_fd_lock(elem, mask);
370 			return (ct->ct_error.re_status);
371 		}
372 	}
373 	if (! xdrrec_endofrecord(xdrs, shipnow)) {
374 		release_fd_lock(elem, mask);
375 		return (ct->ct_error.re_status = RPC_CANTSEND);
376 	}
377 	if (! shipnow) {
378 		release_fd_lock(elem, mask);
379 		return (RPC_SUCCESS);
380 	}
381 	/*
382 	 * Hack to provide rpc-based message passing
383 	 */
384 	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
385 		release_fd_lock(elem, mask);
386 		return(ct->ct_error.re_status = RPC_TIMEDOUT);
387 	}
388 
389 
390 	/*
391 	 * Keep receiving until we get a valid transaction id
392 	 */
393 	xdrs->x_op = XDR_DECODE;
394 	while (TRUE) {
395 		reply_msg.acpted_rply.ar_verf = _null_auth;
396 		reply_msg.acpted_rply.ar_results.where = NULL;
397 		reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
398 		if (! xdrrec_skiprecord(xdrs)) {
399 			release_fd_lock(elem, mask);
400 			return (ct->ct_error.re_status);
401 		}
402 		/* now decode and validate the response header */
403 		if (! xdr_replymsg(xdrs, &reply_msg)) {
404 			if (ct->ct_error.re_status == RPC_SUCCESS)
405 				continue;
406 			release_fd_lock(elem, mask);
407 			return (ct->ct_error.re_status);
408 		}
409 		if (reply_msg.rm_xid == x_id)
410 			break;
411 	}
412 
413 	/*
414 	 * process header
415 	 */
416 	_seterr_reply(&reply_msg, &(ct->ct_error));
417 	if (ct->ct_error.re_status == RPC_SUCCESS) {
418 		if (! AUTH_VALIDATE(cl->cl_auth,
419 		    &reply_msg.acpted_rply.ar_verf)) {
420 			ct->ct_error.re_status = RPC_AUTHERROR;
421 			ct->ct_error.re_why = AUTH_INVALIDRESP;
422 		} else {
423 			if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
424 				reply_stat = (*xdr_results)(xdrs, results_ptr);
425 			} else {
426 				reply_stat = __rpc_gss_unwrap(cl->cl_auth,
427 				    xdrs, xdr_results, results_ptr);
428 			}
429 			if (! reply_stat) {
430 				if (ct->ct_error.re_status == RPC_SUCCESS)
431 					ct->ct_error.re_status =
432 						RPC_CANTDECODERES;
433 			}
434 		}
435 		/* free verifier ... */
436 		if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
437 			xdrs->x_op = XDR_FREE;
438 			(void)xdr_opaque_auth(xdrs,
439 			    &(reply_msg.acpted_rply.ar_verf));
440 		}
441 	}  /* end successful completion */
442 	else {
443 		/* maybe our credentials need to be refreshed ... */
444 		if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg))
445 			goto call_again;
446 	}  /* end of unsuccessful completion */
447 	release_fd_lock(elem, mask);
448 	return (ct->ct_error.re_status);
449 }
450 
451 static void
clnt_vc_geterr(CLIENT * cl,struct rpc_err * errp)452 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
453 {
454 	struct ct_data *ct;
455 
456 	assert(cl != NULL);
457 	assert(errp != NULL);
458 
459 	ct = (struct ct_data *) cl->cl_private;
460 	*errp = ct->ct_error;
461 }
462 
463 static bool_t
clnt_vc_freeres(CLIENT * cl,xdrproc_t xdr_res,void * res_ptr)464 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
465 {
466 	struct ct_data *ct;
467 	struct vc_fd *elem;
468 	XDR *xdrs;
469 	bool_t dummy;
470 	sigset_t mask;
471 	sigset_t newmask;
472 
473 	assert(cl != NULL);
474 
475 	ct = (struct ct_data *)cl->cl_private;
476 	xdrs = &(ct->ct_xdrs);
477 
478 	sigfillset(&newmask);
479 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
480 	mutex_lock(&clnt_fd_lock);
481 	elem = vc_fd_find(ct->ct_fd);
482 	mutex_lock(&elem->mtx);
483 	xdrs->x_op = XDR_FREE;
484 	dummy = (*xdr_res)(xdrs, res_ptr);
485 
486 	mutex_unlock(&clnt_fd_lock);
487 	release_fd_lock(elem, mask);
488 	return dummy;
489 }
490 
491 /*ARGSUSED*/
492 static void
clnt_vc_abort(CLIENT * cl)493 clnt_vc_abort(CLIENT *cl)
494 {
495 }
496 
497 static __inline void
htonlp(void * dst,const void * src,uint32_t incr)498 htonlp(void *dst, const void *src, uint32_t incr)
499 {
500 	/* We are aligned, so we think */
501 	*(uint32_t *)dst = htonl(*(const uint32_t *)src + incr);
502 }
503 
504 static __inline void
ntohlp(void * dst,const void * src)505 ntohlp(void *dst, const void *src)
506 {
507 	/* We are aligned, so we think */
508 	*(uint32_t *)dst = htonl(*(const uint32_t *)src);
509 }
510 
511 static bool_t
clnt_vc_control(CLIENT * cl,u_int request,void * info)512 clnt_vc_control(CLIENT *cl, u_int request, void *info)
513 {
514 	struct ct_data *ct;
515 	struct vc_fd *elem;
516 	void *infop = info;
517 	sigset_t mask;
518 	sigset_t newmask;
519 
520 	assert(cl != NULL);
521 
522 	ct = (struct ct_data *)cl->cl_private;
523 
524 	sigfillset(&newmask);
525 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
526 	mutex_lock(&clnt_fd_lock);
527 	elem = vc_fd_find(ct->ct_fd);
528 	mutex_unlock(&clnt_fd_lock);
529 	mutex_lock(&elem->mtx);
530 
531 	switch (request) {
532 	case CLSET_FD_CLOSE:
533 		ct->ct_closeit = TRUE;
534 		release_fd_lock(elem, mask);
535 		return (TRUE);
536 	case CLSET_FD_NCLOSE:
537 		ct->ct_closeit = FALSE;
538 		release_fd_lock(elem, mask);
539 		return (TRUE);
540 	default:
541 		break;
542 	}
543 
544 	/* for other requests which use info */
545 	if (info == NULL) {
546 		release_fd_lock(elem, mask);
547 		return (FALSE);
548 	}
549 	switch (request) {
550 	case CLSET_TIMEOUT:
551 		if (time_not_ok((struct timeval *)info)) {
552 			release_fd_lock(elem, mask);
553 			return (FALSE);
554 		}
555 		ct->ct_wait = *(struct timeval *)infop;
556 		ct->ct_waitset = TRUE;
557 		break;
558 	case CLGET_TIMEOUT:
559 		*(struct timeval *)infop = ct->ct_wait;
560 		break;
561 	case CLGET_SERVER_ADDR:
562 		(void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len);
563 		break;
564 	case CLGET_FD:
565 		*(int *)info = ct->ct_fd;
566 		break;
567 	case CLGET_SVC_ADDR:
568 		/* The caller should not free this memory area */
569 		*(struct netbuf *)info = ct->ct_addr;
570 		break;
571 	case CLSET_SVC_ADDR:		/* set to new address */
572 		release_fd_lock(elem, mask);
573 		return (FALSE);
574 	case CLGET_XID:
575 		/*
576 		 * use the knowledge that xid is the
577 		 * first element in the call structure
578 		 * This will get the xid of the PREVIOUS call
579 		 */
580 		ntohlp(info, &ct->ct_u.ct_mcalli);
581 		break;
582 	case CLSET_XID:
583 		/* This will set the xid of the NEXT call */
584 		/* increment by 1 as clnt_vc_call() decrements once */
585 		htonlp(&ct->ct_u.ct_mcalli, info, 1);
586 		break;
587 	case CLGET_VERS:
588 		/*
589 		 * This RELIES on the information that, in the call body,
590 		 * the version number field is the fifth field from the
591 		 * beginning of the RPC header. MUST be changed if the
592 		 * call_struct is changed
593 		 */
594 		ntohlp(info, ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT);
595 		break;
596 
597 	case CLSET_VERS:
598 		htonlp(ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT, info, 0);
599 		break;
600 
601 	case CLGET_PROG:
602 		/*
603 		 * This RELIES on the information that, in the call body,
604 		 * the program number field is the fourth field from the
605 		 * beginning of the RPC header. MUST be changed if the
606 		 * call_struct is changed
607 		 */
608 		ntohlp(info, ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT);
609 		break;
610 
611 	case CLSET_PROG:
612 		htonlp(ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT, info, 0);
613 		break;
614 
615 	default:
616 		release_fd_lock(elem, mask);
617 		return (FALSE);
618 	}
619 	release_fd_lock(elem, mask);
620 	return (TRUE);
621 }
622 
623 
624 static void
clnt_vc_destroy(CLIENT * cl)625 clnt_vc_destroy(CLIENT *cl)
626 {
627 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
628 	struct vc_fd *elem;
629 	int ct_fd = ct->ct_fd;
630 	sigset_t mask;
631 	sigset_t newmask;
632 
633 	assert(cl != NULL);
634 
635 	ct = (struct ct_data *) cl->cl_private;
636 
637 	sigfillset(&newmask);
638 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
639 	mutex_lock(&clnt_fd_lock);
640 	elem = vc_fd_find(ct_fd);
641 	mutex_lock(&elem->mtx);
642 	if (ct->ct_closeit && ct->ct_fd != -1) {
643 		(void)_close(ct->ct_fd);
644 	}
645 	XDR_DESTROY(&(ct->ct_xdrs));
646 	free(ct->ct_addr.buf);
647 	mem_free(ct, sizeof(struct ct_data));
648 	if (cl->cl_netid && cl->cl_netid[0])
649 		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
650 	if (cl->cl_tp && cl->cl_tp[0])
651 		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
652 	mem_free(cl, sizeof(CLIENT));
653 	mutex_unlock(&clnt_fd_lock);
654 	release_fd_lock(elem, mask);
655 }
656 
657 /*
658  * Interface between xdr serializer and tcp connection.
659  * Behaves like the system calls, read & write, but keeps some error state
660  * around for the rpc level.
661  */
662 static int
read_vc(void * ctp,void * buf,int len)663 read_vc(void *ctp, void *buf, int len)
664 {
665 	struct sockaddr sa;
666 	socklen_t sal;
667 	struct ct_data *ct = (struct ct_data *)ctp;
668 	struct pollfd fd;
669 	int milliseconds = (int)((ct->ct_wait.tv_sec * 1000) +
670 	    (ct->ct_wait.tv_usec / 1000));
671 
672 	if (len == 0)
673 		return (0);
674 	fd.fd = ct->ct_fd;
675 	fd.events = POLLIN;
676 	for (;;) {
677 		switch (_poll(&fd, 1, milliseconds)) {
678 		case 0:
679 			ct->ct_error.re_status = RPC_TIMEDOUT;
680 			return (-1);
681 
682 		case -1:
683 			if (errno == EINTR)
684 				continue;
685 			ct->ct_error.re_status = RPC_CANTRECV;
686 			ct->ct_error.re_errno = errno;
687 			return (-1);
688 		}
689 		break;
690 	}
691 
692 	sal = sizeof(sa);
693 	if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) &&
694 	    (sa.sa_family == AF_LOCAL)) {
695 		len = __msgread(ct->ct_fd, buf, (size_t)len);
696 	} else {
697 		len = _read(ct->ct_fd, buf, (size_t)len);
698 	}
699 
700 	switch (len) {
701 	case 0:
702 		/* premature eof */
703 		ct->ct_error.re_errno = ECONNRESET;
704 		ct->ct_error.re_status = RPC_CANTRECV;
705 		len = -1;  /* it's really an error */
706 		break;
707 
708 	case -1:
709 		ct->ct_error.re_errno = errno;
710 		ct->ct_error.re_status = RPC_CANTRECV;
711 		break;
712 	}
713 	return (len);
714 }
715 
716 static int
write_vc(void * ctp,void * buf,int len)717 write_vc(void *ctp, void *buf, int len)
718 {
719 	struct sockaddr sa;
720 	socklen_t sal;
721 	struct ct_data *ct = (struct ct_data *)ctp;
722 	int i, cnt;
723 
724 	sal = sizeof(sa);
725 	if ((_getpeername(ct->ct_fd, &sa, &sal) == 0) &&
726 	    (sa.sa_family == AF_LOCAL)) {
727 		for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) {
728 			if ((i = __msgwrite(ct->ct_fd, buf,
729 			     (size_t)cnt)) == -1) {
730 				ct->ct_error.re_errno = errno;
731 				ct->ct_error.re_status = RPC_CANTSEND;
732 				return (-1);
733 			}
734 		}
735 	} else {
736 		for (cnt = len; cnt > 0; cnt -= i, buf = (char *)buf + i) {
737 			if ((i = _write(ct->ct_fd, buf, (size_t)cnt)) == -1) {
738 				ct->ct_error.re_errno = errno;
739 				ct->ct_error.re_status = RPC_CANTSEND;
740 				return (-1);
741 			}
742 		}
743 	}
744 	return (len);
745 }
746 
747 static struct clnt_ops *
clnt_vc_ops(void)748 clnt_vc_ops(void)
749 {
750 	static struct clnt_ops ops;
751 	sigset_t mask, newmask;
752 
753 	/* VARIABLES PROTECTED BY ops_lock: ops */
754 
755 	sigfillset(&newmask);
756 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
757 	mutex_lock(&ops_lock);
758 	if (ops.cl_call == NULL) {
759 		ops.cl_call = clnt_vc_call;
760 		ops.cl_abort = clnt_vc_abort;
761 		ops.cl_geterr = clnt_vc_geterr;
762 		ops.cl_freeres = clnt_vc_freeres;
763 		ops.cl_destroy = clnt_vc_destroy;
764 		ops.cl_control = clnt_vc_control;
765 	}
766 	mutex_unlock(&ops_lock);
767 	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
768 	return (&ops);
769 }
770 
771 /*
772  * Make sure that the time is not garbage.   -1 value is disallowed.
773  * Note this is different from time_not_ok in clnt_dg.c
774  */
775 static bool_t
time_not_ok(struct timeval * t)776 time_not_ok(struct timeval *t)
777 {
778 	return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
779 		t->tv_usec <= -1 || t->tv_usec > 1000000);
780 }
781 
782 static int
__msgread(int sock,void * buf,size_t cnt)783 __msgread(int sock, void *buf, size_t cnt)
784 {
785 	struct iovec iov[1];
786 	struct msghdr msg;
787 	union {
788 		struct cmsghdr cmsg;
789 		char control[CMSG_SPACE(sizeof(struct cmsgcred))];
790 	} cm;
791 
792 	bzero((char *)&cm, sizeof(cm));
793 	iov[0].iov_base = buf;
794 	iov[0].iov_len = cnt;
795 
796 	msg.msg_iov = iov;
797 	msg.msg_iovlen = 1;
798 	msg.msg_name = NULL;
799 	msg.msg_namelen = 0;
800 	msg.msg_control = (caddr_t)&cm;
801 	msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred));
802 	msg.msg_flags = 0;
803 
804 	return(_recvmsg(sock, &msg, 0));
805 }
806 
807 static int
__msgwrite(int sock,void * buf,size_t cnt)808 __msgwrite(int sock, void *buf, size_t cnt)
809 {
810 	struct iovec iov[1];
811 	struct msghdr msg;
812 	union {
813 		struct cmsghdr cmsg;
814 		char control[CMSG_SPACE(sizeof(struct cmsgcred))];
815 	} cm;
816 
817 	bzero((char *)&cm, sizeof(cm));
818 	iov[0].iov_base = buf;
819 	iov[0].iov_len = cnt;
820 
821 	cm.cmsg.cmsg_type = SCM_CREDS;
822 	cm.cmsg.cmsg_level = SOL_SOCKET;
823 	cm.cmsg.cmsg_len = CMSG_LEN(sizeof(struct cmsgcred));
824 
825 	msg.msg_iov = iov;
826 	msg.msg_iovlen = 1;
827 	msg.msg_name = NULL;
828 	msg.msg_namelen = 0;
829 	msg.msg_control = (caddr_t)&cm;
830 	msg.msg_controllen = CMSG_SPACE(sizeof(struct cmsgcred));
831 	msg.msg_flags = 0;
832 
833 	return(_sendmsg(sock, &msg, 0));
834 }
835