xref: /freebsd/sys/rpc/clnt_vc.c (revision 8fc257994d0ce2396196d7a06d50d20c8015f4b7)
1 /*	$NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $	*/
2 
3 /*
4  * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5  * unrestricted use provided that this legend is included on all tape
6  * media and as a part of the software program in whole or part.  Users
7  * may copy or modify Sun RPC without charge, but are not authorized
8  * to license or distribute it to anyone else except as part of a product or
9  * program developed by the user.
10  *
11  * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12  * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13  * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14  *
15  * Sun RPC is provided with no support and without any obligation on the
16  * part of Sun Microsystems, Inc. to assist in its use, correction,
17  * modification or enhancement.
18  *
19  * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20  * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21  * OR ANY PART THEREOF.
22  *
23  * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24  * or profits or other special, indirect and consequential damages, even if
25  * Sun has been advised of the possibility of such damages.
26  *
27  * Sun Microsystems, Inc.
28  * 2550 Garcia Avenue
29  * Mountain View, California  94043
30  */
31 
32 #if defined(LIBC_SCCS) && !defined(lint)
33 static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro";
34 static char *sccsid = "@(#)clnt_tcp.c	2.2 88/08/01 4.0 RPCSRC";
35 static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
36 #endif
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 /*
41  * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
42  *
43  * Copyright (C) 1984, Sun Microsystems, Inc.
44  *
45  * TCP based RPC supports 'batched calls'.
46  * A sequence of calls may be batched-up in a send buffer.  The rpc call
47  * return immediately to the client even though the call was not necessarily
48  * sent.  The batching occurs if the results' xdr routine is NULL (0) AND
49  * the rpc timeout value is zero (see clnt.h, rpc).
50  *
51  * Clients should NOT casually batch calls that in fact return results; that is,
52  * the server side should be aware that a call is batched and not produce any
53  * return message.  Batched calls that produce many result messages can
54  * deadlock (netlock) the client and the server....
55  *
56  * Now go hang yourself.
57  */
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/lock.h>
62 #include <sys/malloc.h>
63 #include <sys/mbuf.h>
64 #include <sys/mutex.h>
65 #include <sys/pcpu.h>
66 #include <sys/proc.h>
67 #include <sys/protosw.h>
68 #include <sys/socket.h>
69 #include <sys/socketvar.h>
70 #include <sys/syslog.h>
71 #include <sys/time.h>
72 #include <sys/uio.h>
73 
74 #include <net/vnet.h>
75 
76 #include <netinet/tcp.h>
77 
78 #include <rpc/rpc.h>
79 #include <rpc/rpc_com.h>
80 
81 #define MCALL_MSG_SIZE 24
82 
83 struct cmessage {
84         struct cmsghdr cmsg;
85         struct cmsgcred cmcred;
86 };
87 
88 static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *,
89     rpcproc_t, struct mbuf *, struct mbuf **, struct timeval);
90 static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
91 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
92 static void clnt_vc_abort(CLIENT *);
93 static bool_t clnt_vc_control(CLIENT *, u_int, void *);
94 static void clnt_vc_close(CLIENT *);
95 static void clnt_vc_destroy(CLIENT *);
96 static bool_t time_not_ok(struct timeval *);
97 static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag);
98 
99 static struct clnt_ops clnt_vc_ops = {
100 	.cl_call =	clnt_vc_call,
101 	.cl_abort =	clnt_vc_abort,
102 	.cl_geterr =	clnt_vc_geterr,
103 	.cl_freeres =	clnt_vc_freeres,
104 	.cl_close =	clnt_vc_close,
105 	.cl_destroy =	clnt_vc_destroy,
106 	.cl_control =	clnt_vc_control
107 };
108 
109 /*
110  * A pending RPC request which awaits a reply. Requests which have
111  * received their reply will have cr_xid set to zero and cr_mrep to
112  * the mbuf chain of the reply.
113  */
114 struct ct_request {
115 	TAILQ_ENTRY(ct_request) cr_link;
116 	uint32_t		cr_xid;		/* XID of request */
117 	struct mbuf		*cr_mrep;	/* reply received by upcall */
118 	int			cr_error;	/* any error from upcall */
119 	char			cr_verf[MAX_AUTH_BYTES]; /* reply verf */
120 };
121 
122 TAILQ_HEAD(ct_request_list, ct_request);
123 
124 struct ct_data {
125 	struct mtx	ct_lock;
126 	int		ct_threads;	/* number of threads in clnt_vc_call */
127 	bool_t		ct_closing;	/* TRUE if we are closing */
128 	bool_t		ct_closed;	/* TRUE if we are closed */
129 	struct socket	*ct_socket;	/* connection socket */
130 	bool_t		ct_closeit;	/* close it on destroy */
131 	struct timeval	ct_wait;	/* wait interval in milliseconds */
132 	struct sockaddr_storage	ct_addr; /* remote addr */
133 	struct rpc_err	ct_error;
134 	uint32_t	ct_xid;
135 	char		ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */
136 	size_t		ct_mpos;	/* pos after marshal */
137 	const char	*ct_waitchan;
138 	int		ct_waitflag;
139 	struct mbuf	*ct_record;	/* current reply record */
140 	size_t		ct_record_resid; /* how much left of reply to read */
141 	bool_t		ct_record_eor;	 /* true if reading last fragment */
142 	struct ct_request_list ct_pending;
143 	int		ct_upcallrefs;	/* Ref cnt of upcalls in prog. */
144 };
145 
146 static void clnt_vc_upcallsdone(struct ct_data *);
147 
148 static const char clnt_vc_errstr[] = "%s : %s";
149 static const char clnt_vc_str[] = "clnt_vc_create";
150 static const char clnt_read_vc_str[] = "read_vc";
151 static const char __no_mem_str[] = "out of memory";
152 
153 /*
154  * Create a client handle for a connection.
155  * Default options are set, which the user can change using clnt_control()'s.
156  * The rpc/vc package does buffering similar to stdio, so the client
157  * must pick send and receive buffer sizes, 0 => use the default.
158  * NB: fd is copied into a private area.
159  * NB: The rpch->cl_auth is set null authentication. Caller may wish to
160  * set this something more useful.
161  *
162  * fd should be an open socket
163  */
164 CLIENT *
165 clnt_vc_create(
166 	struct socket *so,		/* open file descriptor */
167 	struct sockaddr *raddr,		/* servers address */
168 	const rpcprog_t prog,		/* program number */
169 	const rpcvers_t vers,		/* version number */
170 	size_t sendsz,			/* buffer recv size */
171 	size_t recvsz)			/* buffer send size */
172 {
173 	CLIENT *cl;			/* client handle */
174 	struct ct_data *ct = NULL;	/* client handle */
175 	struct timeval now;
176 	struct rpc_msg call_msg;
177 	static uint32_t disrupt;
178 	struct __rpc_sockinfo si;
179 	XDR xdrs;
180 	int error, interrupted, one = 1;
181 	struct sockopt sopt;
182 
183 	if (disrupt == 0)
184 		disrupt = (uint32_t)(long)raddr;
185 
186 	cl = (CLIENT *)mem_alloc(sizeof (*cl));
187 	ct = (struct ct_data *)mem_alloc(sizeof (*ct));
188 
189 	mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF);
190 	ct->ct_threads = 0;
191 	ct->ct_closing = FALSE;
192 	ct->ct_closed = FALSE;
193 	ct->ct_upcallrefs = 0;
194 
195 	if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
196 		error = soconnect(so, raddr, curthread);
197 		SOCK_LOCK(so);
198 		interrupted = 0;
199 		while ((so->so_state & SS_ISCONNECTING)
200 		    && so->so_error == 0) {
201 			error = msleep(&so->so_timeo, SOCK_MTX(so),
202 			    PSOCK | PCATCH | PBDRY, "connec", 0);
203 			if (error) {
204 				if (error == EINTR || error == ERESTART)
205 					interrupted = 1;
206 				break;
207 			}
208 		}
209 		if (error == 0) {
210 			error = so->so_error;
211 			so->so_error = 0;
212 		}
213 		SOCK_UNLOCK(so);
214 		if (error) {
215 			if (!interrupted)
216 				so->so_state &= ~SS_ISCONNECTING;
217 			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
218 			rpc_createerr.cf_error.re_errno = error;
219 			goto err;
220 		}
221 	}
222 
223 	CURVNET_SET(so->so_vnet);
224 	if (!__rpc_socket2sockinfo(so, &si)) {
225 		CURVNET_RESTORE();
226 		goto err;
227 	}
228 
229 	if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
230 		bzero(&sopt, sizeof(sopt));
231 		sopt.sopt_dir = SOPT_SET;
232 		sopt.sopt_level = SOL_SOCKET;
233 		sopt.sopt_name = SO_KEEPALIVE;
234 		sopt.sopt_val = &one;
235 		sopt.sopt_valsize = sizeof(one);
236 		sosetopt(so, &sopt);
237 	}
238 
239 	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
240 		bzero(&sopt, sizeof(sopt));
241 		sopt.sopt_dir = SOPT_SET;
242 		sopt.sopt_level = IPPROTO_TCP;
243 		sopt.sopt_name = TCP_NODELAY;
244 		sopt.sopt_val = &one;
245 		sopt.sopt_valsize = sizeof(one);
246 		sosetopt(so, &sopt);
247 	}
248 	CURVNET_RESTORE();
249 
250 	ct->ct_closeit = FALSE;
251 
252 	/*
253 	 * Set up private data struct
254 	 */
255 	ct->ct_socket = so;
256 	ct->ct_wait.tv_sec = -1;
257 	ct->ct_wait.tv_usec = -1;
258 	memcpy(&ct->ct_addr, raddr, raddr->sa_len);
259 
260 	/*
261 	 * Initialize call message
262 	 */
263 	getmicrotime(&now);
264 	ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now);
265 	call_msg.rm_xid = ct->ct_xid;
266 	call_msg.rm_direction = CALL;
267 	call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
268 	call_msg.rm_call.cb_prog = (uint32_t)prog;
269 	call_msg.rm_call.cb_vers = (uint32_t)vers;
270 
271 	/*
272 	 * pre-serialize the static part of the call msg and stash it away
273 	 */
274 	xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE,
275 	    XDR_ENCODE);
276 	if (! xdr_callhdr(&xdrs, &call_msg)) {
277 		if (ct->ct_closeit) {
278 			soclose(ct->ct_socket);
279 		}
280 		goto err;
281 	}
282 	ct->ct_mpos = XDR_GETPOS(&xdrs);
283 	XDR_DESTROY(&xdrs);
284 	ct->ct_waitchan = "rpcrecv";
285 	ct->ct_waitflag = 0;
286 
287 	/*
288 	 * Create a client handle which uses xdrrec for serialization
289 	 * and authnone for authentication.
290 	 */
291 	cl->cl_refs = 1;
292 	cl->cl_ops = &clnt_vc_ops;
293 	cl->cl_private = ct;
294 	cl->cl_auth = authnone_create();
295 	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
296 	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
297 	soreserve(ct->ct_socket, sendsz, recvsz);
298 
299 	SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
300 	soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct);
301 	SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
302 
303 	ct->ct_record = NULL;
304 	ct->ct_record_resid = 0;
305 	TAILQ_INIT(&ct->ct_pending);
306 	return (cl);
307 
308 err:
309 	if (cl) {
310 		if (ct) {
311 			mtx_destroy(&ct->ct_lock);
312 			mem_free(ct, sizeof (struct ct_data));
313 		}
314 		if (cl)
315 			mem_free(cl, sizeof (CLIENT));
316 	}
317 	return ((CLIENT *)NULL);
318 }
319 
320 static enum clnt_stat
321 clnt_vc_call(
322 	CLIENT		*cl,		/* client handle */
323 	struct rpc_callextra *ext,	/* call metadata */
324 	rpcproc_t	proc,		/* procedure number */
325 	struct mbuf	*args,		/* pointer to args */
326 	struct mbuf	**resultsp,	/* pointer to results */
327 	struct timeval	utimeout)
328 {
329 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
330 	AUTH *auth;
331 	struct rpc_err *errp;
332 	enum clnt_stat stat;
333 	XDR xdrs;
334 	struct rpc_msg reply_msg;
335 	bool_t ok;
336 	int nrefreshes = 2;		/* number of times to refresh cred */
337 	struct timeval timeout;
338 	uint32_t xid;
339 	struct mbuf *mreq = NULL, *results;
340 	struct ct_request *cr;
341 	int error;
342 
343 	cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK);
344 
345 	mtx_lock(&ct->ct_lock);
346 
347 	if (ct->ct_closing || ct->ct_closed) {
348 		mtx_unlock(&ct->ct_lock);
349 		free(cr, M_RPC);
350 		return (RPC_CANTSEND);
351 	}
352 	ct->ct_threads++;
353 
354 	if (ext) {
355 		auth = ext->rc_auth;
356 		errp = &ext->rc_err;
357 	} else {
358 		auth = cl->cl_auth;
359 		errp = &ct->ct_error;
360 	}
361 
362 	cr->cr_mrep = NULL;
363 	cr->cr_error = 0;
364 
365 	if (ct->ct_wait.tv_usec == -1) {
366 		timeout = utimeout;	/* use supplied timeout */
367 	} else {
368 		timeout = ct->ct_wait;	/* use default timeout */
369 	}
370 
371 call_again:
372 	mtx_assert(&ct->ct_lock, MA_OWNED);
373 
374 	ct->ct_xid++;
375 	xid = ct->ct_xid;
376 
377 	mtx_unlock(&ct->ct_lock);
378 
379 	/*
380 	 * Leave space to pre-pend the record mark.
381 	 */
382 	MGETHDR(mreq, M_WAIT, MT_DATA);
383 	mreq->m_data += sizeof(uint32_t);
384 	KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN,
385 	    ("RPC header too big"));
386 	bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos);
387 	mreq->m_len = ct->ct_mpos;
388 
389 	/*
390 	 * The XID is the first thing in the request.
391 	 */
392 	*mtod(mreq, uint32_t *) = htonl(xid);
393 
394 	xdrmbuf_create(&xdrs, mreq, XDR_ENCODE);
395 
396 	errp->re_status = stat = RPC_SUCCESS;
397 
398 	if ((! XDR_PUTINT32(&xdrs, &proc)) ||
399 	    (! AUTH_MARSHALL(auth, xid, &xdrs,
400 		m_copym(args, 0, M_COPYALL, M_WAITOK)))) {
401 		errp->re_status = stat = RPC_CANTENCODEARGS;
402 		mtx_lock(&ct->ct_lock);
403 		goto out;
404 	}
405 	mreq->m_pkthdr.len = m_length(mreq, NULL);
406 
407 	/*
408 	 * Prepend a record marker containing the packet length.
409 	 */
410 	M_PREPEND(mreq, sizeof(uint32_t), M_WAIT);
411 	*mtod(mreq, uint32_t *) =
412 		htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t)));
413 
414 	cr->cr_xid = xid;
415 	mtx_lock(&ct->ct_lock);
416 	/*
417 	 * Check to see if the other end has already started to close down
418 	 * the connection. The upcall will have set ct_error.re_status
419 	 * to RPC_CANTRECV if this is the case.
420 	 * If the other end starts to close down the connection after this
421 	 * point, it will be detected later when cr_error is checked,
422 	 * since the request is in the ct_pending queue.
423 	 */
424 	if (ct->ct_error.re_status == RPC_CANTRECV) {
425 		if (errp != &ct->ct_error) {
426 			errp->re_errno = ct->ct_error.re_errno;
427 			errp->re_status = RPC_CANTRECV;
428 		}
429 		stat = RPC_CANTRECV;
430 		goto out;
431 	}
432 	TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link);
433 	mtx_unlock(&ct->ct_lock);
434 
435 	/*
436 	 * sosend consumes mreq.
437 	 */
438 	error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread);
439 	mreq = NULL;
440 	if (error == EMSGSIZE) {
441 		SOCKBUF_LOCK(&ct->ct_socket->so_snd);
442 		sbwait(&ct->ct_socket->so_snd);
443 		SOCKBUF_UNLOCK(&ct->ct_socket->so_snd);
444 		AUTH_VALIDATE(auth, xid, NULL, NULL);
445 		mtx_lock(&ct->ct_lock);
446 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
447 		goto call_again;
448 	}
449 
450 	reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL;
451 	reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf;
452 	reply_msg.acpted_rply.ar_verf.oa_length = 0;
453 	reply_msg.acpted_rply.ar_results.where = NULL;
454 	reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
455 
456 	mtx_lock(&ct->ct_lock);
457 	if (error) {
458 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
459 		errp->re_errno = error;
460 		errp->re_status = stat = RPC_CANTSEND;
461 		goto out;
462 	}
463 
464 	/*
465 	 * Check to see if we got an upcall while waiting for the
466 	 * lock. In both these cases, the request has been removed
467 	 * from ct->ct_pending.
468 	 */
469 	if (cr->cr_error) {
470 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
471 		errp->re_errno = cr->cr_error;
472 		errp->re_status = stat = RPC_CANTRECV;
473 		goto out;
474 	}
475 	if (cr->cr_mrep) {
476 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
477 		goto got_reply;
478 	}
479 
480 	/*
481 	 * Hack to provide rpc-based message passing
482 	 */
483 	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
484 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
485 		errp->re_status = stat = RPC_TIMEDOUT;
486 		goto out;
487 	}
488 
489 	error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan,
490 	    tvtohz(&timeout));
491 
492 	TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
493 
494 	if (error) {
495 		/*
496 		 * The sleep returned an error so our request is still
497 		 * on the list. Turn the error code into an
498 		 * appropriate client status.
499 		 */
500 		errp->re_errno = error;
501 		switch (error) {
502 		case EINTR:
503 		case ERESTART:
504 			stat = RPC_INTR;
505 			break;
506 		case EWOULDBLOCK:
507 			stat = RPC_TIMEDOUT;
508 			break;
509 		default:
510 			stat = RPC_CANTRECV;
511 		}
512 		errp->re_status = stat;
513 		goto out;
514 	} else {
515 		/*
516 		 * We were woken up by the upcall.  If the
517 		 * upcall had a receive error, report that,
518 		 * otherwise we have a reply.
519 		 */
520 		if (cr->cr_error) {
521 			errp->re_errno = cr->cr_error;
522 			errp->re_status = stat = RPC_CANTRECV;
523 			goto out;
524 		}
525 	}
526 
527 got_reply:
528 	/*
529 	 * Now decode and validate the response. We need to drop the
530 	 * lock since xdr_replymsg may end up sleeping in malloc.
531 	 */
532 	mtx_unlock(&ct->ct_lock);
533 
534 	if (ext && ext->rc_feedback)
535 		ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg);
536 
537 	xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE);
538 	ok = xdr_replymsg(&xdrs, &reply_msg);
539 	cr->cr_mrep = NULL;
540 
541 	if (ok) {
542 		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
543 		    (reply_msg.acpted_rply.ar_stat == SUCCESS))
544 			errp->re_status = stat = RPC_SUCCESS;
545 		else
546 			stat = _seterr_reply(&reply_msg, errp);
547 
548 		if (stat == RPC_SUCCESS) {
549 			results = xdrmbuf_getall(&xdrs);
550 			if (!AUTH_VALIDATE(auth, xid,
551 				&reply_msg.acpted_rply.ar_verf,
552 				&results)) {
553 				errp->re_status = stat = RPC_AUTHERROR;
554 				errp->re_why = AUTH_INVALIDRESP;
555 			} else {
556 				KASSERT(results,
557 				    ("auth validated but no result"));
558 				*resultsp = results;
559 			}
560 		}		/* end successful completion */
561 		/*
562 		 * If unsuccesful AND error is an authentication error
563 		 * then refresh credentials and try again, else break
564 		 */
565 		else if (stat == RPC_AUTHERROR)
566 			/* maybe our credentials need to be refreshed ... */
567 			if (nrefreshes > 0 &&
568 			    AUTH_REFRESH(auth, &reply_msg)) {
569 				nrefreshes--;
570 				XDR_DESTROY(&xdrs);
571 				mtx_lock(&ct->ct_lock);
572 				goto call_again;
573 			}
574 		/* end of unsuccessful completion */
575 	}	/* end of valid reply message */
576 	else {
577 		errp->re_status = stat = RPC_CANTDECODERES;
578 	}
579 	XDR_DESTROY(&xdrs);
580 	mtx_lock(&ct->ct_lock);
581 out:
582 	mtx_assert(&ct->ct_lock, MA_OWNED);
583 
584 	KASSERT(stat != RPC_SUCCESS || *resultsp,
585 	    ("RPC_SUCCESS without reply"));
586 
587 	if (mreq)
588 		m_freem(mreq);
589 	if (cr->cr_mrep)
590 		m_freem(cr->cr_mrep);
591 
592 	ct->ct_threads--;
593 	if (ct->ct_closing)
594 		wakeup(ct);
595 
596 	mtx_unlock(&ct->ct_lock);
597 
598 	if (auth && stat != RPC_SUCCESS)
599 		AUTH_VALIDATE(auth, xid, NULL, NULL);
600 
601 	free(cr, M_RPC);
602 
603 	return (stat);
604 }
605 
606 static void
607 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
608 {
609 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
610 
611 	*errp = ct->ct_error;
612 }
613 
614 static bool_t
615 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
616 {
617 	XDR xdrs;
618 	bool_t dummy;
619 
620 	xdrs.x_op = XDR_FREE;
621 	dummy = (*xdr_res)(&xdrs, res_ptr);
622 
623 	return (dummy);
624 }
625 
626 /*ARGSUSED*/
627 static void
628 clnt_vc_abort(CLIENT *cl)
629 {
630 }
631 
632 static bool_t
633 clnt_vc_control(CLIENT *cl, u_int request, void *info)
634 {
635 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
636 	void *infop = info;
637 
638 	mtx_lock(&ct->ct_lock);
639 
640 	switch (request) {
641 	case CLSET_FD_CLOSE:
642 		ct->ct_closeit = TRUE;
643 		mtx_unlock(&ct->ct_lock);
644 		return (TRUE);
645 	case CLSET_FD_NCLOSE:
646 		ct->ct_closeit = FALSE;
647 		mtx_unlock(&ct->ct_lock);
648 		return (TRUE);
649 	default:
650 		break;
651 	}
652 
653 	/* for other requests which use info */
654 	if (info == NULL) {
655 		mtx_unlock(&ct->ct_lock);
656 		return (FALSE);
657 	}
658 	switch (request) {
659 	case CLSET_TIMEOUT:
660 		if (time_not_ok((struct timeval *)info)) {
661 			mtx_unlock(&ct->ct_lock);
662 			return (FALSE);
663 		}
664 		ct->ct_wait = *(struct timeval *)infop;
665 		break;
666 	case CLGET_TIMEOUT:
667 		*(struct timeval *)infop = ct->ct_wait;
668 		break;
669 	case CLGET_SERVER_ADDR:
670 		(void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len);
671 		break;
672 	case CLGET_SVC_ADDR:
673 		/*
674 		 * Slightly different semantics to userland - we use
675 		 * sockaddr instead of netbuf.
676 		 */
677 		memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len);
678 		break;
679 	case CLSET_SVC_ADDR:		/* set to new address */
680 		mtx_unlock(&ct->ct_lock);
681 		return (FALSE);
682 	case CLGET_XID:
683 		*(uint32_t *)info = ct->ct_xid;
684 		break;
685 	case CLSET_XID:
686 		/* This will set the xid of the NEXT call */
687 		/* decrement by 1 as clnt_vc_call() increments once */
688 		ct->ct_xid = *(uint32_t *)info - 1;
689 		break;
690 	case CLGET_VERS:
691 		/*
692 		 * This RELIES on the information that, in the call body,
693 		 * the version number field is the fifth field from the
694 		 * begining of the RPC header. MUST be changed if the
695 		 * call_struct is changed
696 		 */
697 		*(uint32_t *)info =
698 		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
699 		    4 * BYTES_PER_XDR_UNIT));
700 		break;
701 
702 	case CLSET_VERS:
703 		*(uint32_t *)(void *)(ct->ct_mcallc +
704 		    4 * BYTES_PER_XDR_UNIT) =
705 		    htonl(*(uint32_t *)info);
706 		break;
707 
708 	case CLGET_PROG:
709 		/*
710 		 * This RELIES on the information that, in the call body,
711 		 * the program number field is the fourth field from the
712 		 * begining of the RPC header. MUST be changed if the
713 		 * call_struct is changed
714 		 */
715 		*(uint32_t *)info =
716 		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
717 		    3 * BYTES_PER_XDR_UNIT));
718 		break;
719 
720 	case CLSET_PROG:
721 		*(uint32_t *)(void *)(ct->ct_mcallc +
722 		    3 * BYTES_PER_XDR_UNIT) =
723 		    htonl(*(uint32_t *)info);
724 		break;
725 
726 	case CLSET_WAITCHAN:
727 		ct->ct_waitchan = (const char *)info;
728 		break;
729 
730 	case CLGET_WAITCHAN:
731 		*(const char **) info = ct->ct_waitchan;
732 		break;
733 
734 	case CLSET_INTERRUPTIBLE:
735 		if (*(int *) info)
736 			ct->ct_waitflag = PCATCH | PBDRY;
737 		else
738 			ct->ct_waitflag = 0;
739 		break;
740 
741 	case CLGET_INTERRUPTIBLE:
742 		if (ct->ct_waitflag)
743 			*(int *) info = TRUE;
744 		else
745 			*(int *) info = FALSE;
746 		break;
747 
748 	default:
749 		mtx_unlock(&ct->ct_lock);
750 		return (FALSE);
751 	}
752 
753 	mtx_unlock(&ct->ct_lock);
754 	return (TRUE);
755 }
756 
757 static void
758 clnt_vc_close(CLIENT *cl)
759 {
760 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
761 	struct ct_request *cr;
762 
763 	mtx_lock(&ct->ct_lock);
764 
765 	if (ct->ct_closed) {
766 		mtx_unlock(&ct->ct_lock);
767 		return;
768 	}
769 
770 	if (ct->ct_closing) {
771 		while (ct->ct_closing)
772 			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
773 		KASSERT(ct->ct_closed, ("client should be closed"));
774 		mtx_unlock(&ct->ct_lock);
775 		return;
776 	}
777 
778 	if (ct->ct_socket) {
779 		ct->ct_closing = TRUE;
780 		mtx_unlock(&ct->ct_lock);
781 
782 		SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
783 		soupcall_clear(ct->ct_socket, SO_RCV);
784 		clnt_vc_upcallsdone(ct);
785 		SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
786 
787 		/*
788 		 * Abort any pending requests and wait until everyone
789 		 * has finished with clnt_vc_call.
790 		 */
791 		mtx_lock(&ct->ct_lock);
792 		TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
793 			cr->cr_xid = 0;
794 			cr->cr_error = ESHUTDOWN;
795 			wakeup(cr);
796 		}
797 
798 		while (ct->ct_threads)
799 			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
800 	}
801 
802 	ct->ct_closing = FALSE;
803 	ct->ct_closed = TRUE;
804 	mtx_unlock(&ct->ct_lock);
805 	wakeup(ct);
806 }
807 
808 static void
809 clnt_vc_destroy(CLIENT *cl)
810 {
811 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
812 	struct socket *so = NULL;
813 
814 	clnt_vc_close(cl);
815 
816 	mtx_lock(&ct->ct_lock);
817 
818 	if (ct->ct_socket) {
819 		if (ct->ct_closeit) {
820 			so = ct->ct_socket;
821 		}
822 	}
823 
824 	mtx_unlock(&ct->ct_lock);
825 
826 	mtx_destroy(&ct->ct_lock);
827 	if (so) {
828 		soshutdown(so, SHUT_WR);
829 		soclose(so);
830 	}
831 	mem_free(ct, sizeof(struct ct_data));
832 	mem_free(cl, sizeof(CLIENT));
833 }
834 
835 /*
836  * Make sure that the time is not garbage.   -1 value is disallowed.
837  * Note this is different from time_not_ok in clnt_dg.c
838  */
839 static bool_t
840 time_not_ok(struct timeval *t)
841 {
842 	return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
843 		t->tv_usec <= -1 || t->tv_usec > 1000000);
844 }
845 
846 int
847 clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
848 {
849 	struct ct_data *ct = (struct ct_data *) arg;
850 	struct uio uio;
851 	struct mbuf *m;
852 	struct ct_request *cr;
853 	int error, rcvflag, foundreq;
854 	uint32_t xid, header;
855 	bool_t do_read;
856 
857 	ct->ct_upcallrefs++;
858 	uio.uio_td = curthread;
859 	do {
860 		/*
861 		 * If ct_record_resid is zero, we are waiting for a
862 		 * record mark.
863 		 */
864 		if (ct->ct_record_resid == 0) {
865 
866 			/*
867 			 * Make sure there is either a whole record
868 			 * mark in the buffer or there is some other
869 			 * error condition
870 			 */
871 			do_read = FALSE;
872 			if (so->so_rcv.sb_cc >= sizeof(uint32_t)
873 			    || (so->so_rcv.sb_state & SBS_CANTRCVMORE)
874 			    || so->so_error)
875 				do_read = TRUE;
876 
877 			if (!do_read)
878 				break;
879 
880 			SOCKBUF_UNLOCK(&so->so_rcv);
881 			uio.uio_resid = sizeof(uint32_t);
882 			m = NULL;
883 			rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
884 			error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
885 			SOCKBUF_LOCK(&so->so_rcv);
886 
887 			if (error == EWOULDBLOCK)
888 				break;
889 
890 			/*
891 			 * If there was an error, wake up all pending
892 			 * requests.
893 			 */
894 			if (error || uio.uio_resid > 0) {
895 			wakeup_all:
896 				mtx_lock(&ct->ct_lock);
897 				if (!error) {
898 					/*
899 					 * We must have got EOF trying
900 					 * to read from the stream.
901 					 */
902 					error = ECONNRESET;
903 				}
904 				ct->ct_error.re_status = RPC_CANTRECV;
905 				ct->ct_error.re_errno = error;
906 				TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
907 					cr->cr_error = error;
908 					wakeup(cr);
909 				}
910 				mtx_unlock(&ct->ct_lock);
911 				break;
912 			}
913 			bcopy(mtod(m, uint32_t *), &header, sizeof(uint32_t));
914 			header = ntohl(header);
915 			ct->ct_record = NULL;
916 			ct->ct_record_resid = header & 0x7fffffff;
917 			ct->ct_record_eor = ((header & 0x80000000) != 0);
918 			m_freem(m);
919 		} else {
920 			/*
921 			 * Wait until the socket has the whole record
922 			 * buffered.
923 			 */
924 			do_read = FALSE;
925 			if (so->so_rcv.sb_cc >= ct->ct_record_resid
926 			    || (so->so_rcv.sb_state & SBS_CANTRCVMORE)
927 			    || so->so_error)
928 				do_read = TRUE;
929 
930 			if (!do_read)
931 				break;
932 
933 			/*
934 			 * We have the record mark. Read as much as
935 			 * the socket has buffered up to the end of
936 			 * this record.
937 			 */
938 			SOCKBUF_UNLOCK(&so->so_rcv);
939 			uio.uio_resid = ct->ct_record_resid;
940 			m = NULL;
941 			rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
942 			error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
943 			SOCKBUF_LOCK(&so->so_rcv);
944 
945 			if (error == EWOULDBLOCK)
946 				break;
947 
948 			if (error || uio.uio_resid == ct->ct_record_resid)
949 				goto wakeup_all;
950 
951 			/*
952 			 * If we have part of the record already,
953 			 * chain this bit onto the end.
954 			 */
955 			if (ct->ct_record)
956 				m_last(ct->ct_record)->m_next = m;
957 			else
958 				ct->ct_record = m;
959 
960 			ct->ct_record_resid = uio.uio_resid;
961 
962 			/*
963 			 * If we have the entire record, see if we can
964 			 * match it to a request.
965 			 */
966 			if (ct->ct_record_resid == 0
967 			    && ct->ct_record_eor) {
968 				/*
969 				 * The XID is in the first uint32_t of
970 				 * the reply.
971 				 */
972 				if (ct->ct_record->m_len < sizeof(xid))
973 					ct->ct_record =
974 						m_pullup(ct->ct_record,
975 						    sizeof(xid));
976 				if (!ct->ct_record)
977 					break;
978 				bcopy(mtod(ct->ct_record, uint32_t *),
979 				    &xid, sizeof(uint32_t));
980 				xid = ntohl(xid);
981 
982 				mtx_lock(&ct->ct_lock);
983 				foundreq = 0;
984 				TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
985 					if (cr->cr_xid == xid) {
986 						/*
987 						 * This one
988 						 * matches. We leave
989 						 * the reply mbuf in
990 						 * cr->cr_mrep. Set
991 						 * the XID to zero so
992 						 * that we will ignore
993 						 * any duplicaed
994 						 * replies.
995 						 */
996 						cr->cr_xid = 0;
997 						cr->cr_mrep = ct->ct_record;
998 						cr->cr_error = 0;
999 						foundreq = 1;
1000 						wakeup(cr);
1001 						break;
1002 					}
1003 				}
1004 				mtx_unlock(&ct->ct_lock);
1005 
1006 				if (!foundreq)
1007 					m_freem(ct->ct_record);
1008 				ct->ct_record = NULL;
1009 			}
1010 		}
1011 	} while (m);
1012 	ct->ct_upcallrefs--;
1013 	if (ct->ct_upcallrefs < 0)
1014 		panic("rpcvc upcall refcnt");
1015 	if (ct->ct_upcallrefs == 0)
1016 		wakeup(&ct->ct_upcallrefs);
1017 	return (SU_OK);
1018 }
1019 
1020 /*
1021  * Wait for all upcalls in progress to complete.
1022  */
1023 static void
1024 clnt_vc_upcallsdone(struct ct_data *ct)
1025 {
1026 
1027 	SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv);
1028 
1029 	while (ct->ct_upcallrefs > 0)
1030 		(void) msleep(&ct->ct_upcallrefs,
1031 		    SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0);
1032 }
1033