xref: /freebsd/sys/rpc/clnt_vc.c (revision 86077f4fd11070518a6d04eee7fdb93cbbfb1b52)
1 /*	$NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 2009, Sun Microsystems, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  * - Redistributions of source code must retain the above copyright notice,
12  *   this list of conditions and the following disclaimer.
13  * - Redistributions in binary form must reproduce the above copyright notice,
14  *   this list of conditions and the following disclaimer in the documentation
15  *   and/or other materials provided with the distribution.
16  * - Neither the name of Sun Microsystems, Inc. nor the names of its
17  *   contributors may be used to endorse or promote products derived
18  *   from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 /*
35  * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
36  *
37  * Copyright (C) 1984, Sun Microsystems, Inc.
38  *
39  * TCP based RPC supports 'batched calls'.
40  * A sequence of calls may be batched-up in a send buffer.  The rpc call
41  * return immediately to the client even though the call was not necessarily
42  * sent.  The batching occurs if the results' xdr routine is NULL (0) AND
43  * the rpc timeout value is zero (see clnt.h, rpc).
44  *
45  * Clients should NOT casually batch calls that in fact return results; that is,
46  * the server side should be aware that a call is batched and not produce any
47  * return message.  Batched calls that produce many result messages can
48  * deadlock (netlock) the client and the server....
49  *
50  * Now go hang yourself.
51  */
52 
53 #include "opt_kern_tls.h"
54 
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/ktls.h>
60 #include <sys/lock.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/mutex.h>
64 #include <sys/pcpu.h>
65 #include <sys/proc.h>
66 #include <sys/protosw.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/sx.h>
70 #include <sys/syslog.h>
71 #include <sys/time.h>
72 #include <sys/uio.h>
73 
74 #include <net/vnet.h>
75 
76 #include <netinet/tcp.h>
77 
78 #include <rpc/rpc.h>
79 #include <rpc/rpc_com.h>
80 #include <rpc/krpc.h>
81 #include <rpc/rpcsec_tls.h>
82 
83 struct cmessage {
84         struct cmsghdr cmsg;
85         struct cmsgcred cmcred;
86 };
87 
88 static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *,
89     rpcproc_t, struct mbuf *, struct mbuf **, struct timeval);
90 static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
91 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
92 static void clnt_vc_abort(CLIENT *);
93 static bool_t clnt_vc_control(CLIENT *, u_int, void *);
94 static void clnt_vc_close(CLIENT *);
95 static void clnt_vc_destroy(CLIENT *);
96 static bool_t time_not_ok(struct timeval *);
97 static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag);
98 static void clnt_vc_dotlsupcall(void *data);
99 
100 static const struct clnt_ops clnt_vc_ops = {
101 	.cl_call =	clnt_vc_call,
102 	.cl_abort =	clnt_vc_abort,
103 	.cl_geterr =	clnt_vc_geterr,
104 	.cl_freeres =	clnt_vc_freeres,
105 	.cl_close =	clnt_vc_close,
106 	.cl_destroy =	clnt_vc_destroy,
107 	.cl_control =	clnt_vc_control
108 };
109 
110 static void clnt_vc_upcallsdone(struct ct_data *);
111 
112 /*
113  * Create a client handle for a connection.
114  * Default options are set, which the user can change using clnt_control()'s.
115  * The rpc/vc package does buffering similar to stdio, so the client
116  * must pick send and receive buffer sizes, 0 => use the default.
117  * NB: fd is copied into a private area.
118  * NB: The rpch->cl_auth is set null authentication. Caller may wish to
119  * set this something more useful.
120  *
121  * fd should be an open socket
122  */
123 CLIENT *
124 clnt_vc_create(
125 	struct socket *so,		/* open file descriptor */
126 	struct sockaddr *raddr,		/* servers address */
127 	const rpcprog_t prog,		/* program number */
128 	const rpcvers_t vers,		/* version number */
129 	size_t sendsz,			/* buffer recv size */
130 	size_t recvsz,			/* buffer send size */
131 	int intrflag)			/* interruptible */
132 {
133 	CLIENT *cl;			/* client handle */
134 	struct ct_data *ct = NULL;	/* client handle */
135 	struct timeval now;
136 	struct rpc_msg call_msg;
137 	static uint32_t disrupt;
138 	struct __rpc_sockinfo si;
139 	XDR xdrs;
140 	int error, interrupted, one = 1, sleep_flag;
141 	struct sockopt sopt;
142 
143 	if (disrupt == 0)
144 		disrupt = (uint32_t)(long)raddr;
145 
146 	cl = (CLIENT *)mem_alloc(sizeof (*cl));
147 	ct = (struct ct_data *)mem_alloc(sizeof (*ct));
148 
149 	mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF);
150 	ct->ct_threads = 0;
151 	ct->ct_closing = FALSE;
152 	ct->ct_closed = FALSE;
153 	ct->ct_upcallrefs = 0;
154 	ct->ct_rcvstate = RPCRCVSTATE_NORMAL;
155 
156 	if ((so->so_state & SS_ISCONNECTED) == 0) {
157 		error = soconnect(so, raddr, curthread);
158 		SOCK_LOCK(so);
159 		interrupted = 0;
160 		sleep_flag = PSOCK;
161 		if (intrflag != 0)
162 			sleep_flag |= PCATCH;
163 		while ((so->so_state & SS_ISCONNECTING)
164 		    && so->so_error == 0) {
165 			error = msleep(&so->so_timeo, SOCK_MTX(so),
166 			    sleep_flag, "connec", 0);
167 			if (error) {
168 				if (error == EINTR || error == ERESTART)
169 					interrupted = 1;
170 				break;
171 			}
172 		}
173 		if (error == 0) {
174 			error = so->so_error;
175 			so->so_error = 0;
176 		}
177 		SOCK_UNLOCK(so);
178 		if (error) {
179 			if (!interrupted)
180 				so->so_state &= ~SS_ISCONNECTING;
181 			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
182 			rpc_createerr.cf_error.re_errno = error;
183 			goto err;
184 		}
185 	}
186 
187 	if (!__rpc_socket2sockinfo(so, &si)) {
188 		goto err;
189 	}
190 
191 	if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
192 		bzero(&sopt, sizeof(sopt));
193 		sopt.sopt_dir = SOPT_SET;
194 		sopt.sopt_level = SOL_SOCKET;
195 		sopt.sopt_name = SO_KEEPALIVE;
196 		sopt.sopt_val = &one;
197 		sopt.sopt_valsize = sizeof(one);
198 		sosetopt(so, &sopt);
199 	}
200 
201 	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
202 		bzero(&sopt, sizeof(sopt));
203 		sopt.sopt_dir = SOPT_SET;
204 		sopt.sopt_level = IPPROTO_TCP;
205 		sopt.sopt_name = TCP_NODELAY;
206 		sopt.sopt_val = &one;
207 		sopt.sopt_valsize = sizeof(one);
208 		sosetopt(so, &sopt);
209 	}
210 
211 	ct->ct_closeit = FALSE;
212 
213 	/*
214 	 * Set up private data struct
215 	 */
216 	ct->ct_socket = so;
217 	ct->ct_wait.tv_sec = -1;
218 	ct->ct_wait.tv_usec = -1;
219 	memcpy(&ct->ct_addr, raddr, raddr->sa_len);
220 
221 	/*
222 	 * Initialize call message
223 	 */
224 	getmicrotime(&now);
225 	ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now);
226 	call_msg.rm_xid = ct->ct_xid;
227 	call_msg.rm_direction = CALL;
228 	call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
229 	call_msg.rm_call.cb_prog = (uint32_t)prog;
230 	call_msg.rm_call.cb_vers = (uint32_t)vers;
231 
232 	/*
233 	 * pre-serialize the static part of the call msg and stash it away
234 	 */
235 	xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE,
236 	    XDR_ENCODE);
237 	if (! xdr_callhdr(&xdrs, &call_msg))
238 		goto err;
239 	ct->ct_mpos = XDR_GETPOS(&xdrs);
240 	XDR_DESTROY(&xdrs);
241 	ct->ct_waitchan = "rpcrecv";
242 	ct->ct_waitflag = 0;
243 
244 	/*
245 	 * Create a client handle which uses xdrrec for serialization
246 	 * and authnone for authentication.
247 	 */
248 	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
249 	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
250 	error = soreserve(ct->ct_socket, sendsz, recvsz);
251 	if (error != 0)
252 		goto err;
253 	cl->cl_refs = 1;
254 	cl->cl_ops = &clnt_vc_ops;
255 	cl->cl_private = ct;
256 	cl->cl_auth = authnone_create();
257 
258 	SOCK_RECVBUF_LOCK(ct->ct_socket);
259 	soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct);
260 	SOCK_RECVBUF_UNLOCK(ct->ct_socket);
261 
262 	ct->ct_raw = NULL;
263 	ct->ct_record = NULL;
264 	ct->ct_record_resid = 0;
265 	ct->ct_sslrefno = 0;
266 	TAILQ_INIT(&ct->ct_pending);
267 	return (cl);
268 
269 err:
270 	mtx_destroy(&ct->ct_lock);
271 	mem_free(ct, sizeof (struct ct_data));
272 	mem_free(cl, sizeof (CLIENT));
273 
274 	return ((CLIENT *)NULL);
275 }
276 
277 static enum clnt_stat
278 clnt_vc_call(
279 	CLIENT		*cl,		/* client handle */
280 	struct rpc_callextra *ext,	/* call metadata */
281 	rpcproc_t	proc,		/* procedure number */
282 	struct mbuf	*args,		/* pointer to args */
283 	struct mbuf	**resultsp,	/* pointer to results */
284 	struct timeval	utimeout)
285 {
286 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
287 	AUTH *auth;
288 	struct rpc_err *errp;
289 	enum clnt_stat stat;
290 	XDR xdrs;
291 	struct rpc_msg reply_msg;
292 	bool_t ok;
293 	int nrefreshes = 2;		/* number of times to refresh cred */
294 	struct timeval timeout;
295 	uint32_t xid;
296 	struct mbuf *mreq = NULL, *results;
297 	struct ct_request *cr;
298 	int error, maxextsiz, trycnt;
299 #ifdef KERN_TLS
300 	u_int maxlen;
301 #endif
302 
303 	cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK);
304 
305 	mtx_lock(&ct->ct_lock);
306 
307 	if (ct->ct_closing || ct->ct_closed) {
308 		mtx_unlock(&ct->ct_lock);
309 		free(cr, M_RPC);
310 		return (RPC_CANTSEND);
311 	}
312 	ct->ct_threads++;
313 
314 	if (ext) {
315 		auth = ext->rc_auth;
316 		errp = &ext->rc_err;
317 	} else {
318 		auth = cl->cl_auth;
319 		errp = &ct->ct_error;
320 	}
321 
322 	cr->cr_mrep = NULL;
323 	cr->cr_error = 0;
324 
325 	if (ct->ct_wait.tv_usec == -1) {
326 		timeout = utimeout;	/* use supplied timeout */
327 	} else {
328 		timeout = ct->ct_wait;	/* use default timeout */
329 	}
330 
331 	/*
332 	 * After 15sec of looping, allow it to return RPC_CANTSEND, which will
333 	 * cause the clnt_reconnect layer to create a new TCP connection.
334 	 */
335 	trycnt = 15 * hz;
336 call_again:
337 	mtx_assert(&ct->ct_lock, MA_OWNED);
338 	if (ct->ct_closing || ct->ct_closed) {
339 		ct->ct_threads--;
340 		wakeup(ct);
341 		mtx_unlock(&ct->ct_lock);
342 		free(cr, M_RPC);
343 		return (RPC_CANTSEND);
344 	}
345 
346 	ct->ct_xid++;
347 	xid = ct->ct_xid;
348 
349 	mtx_unlock(&ct->ct_lock);
350 
351 	/*
352 	 * Leave space to pre-pend the record mark.
353 	 */
354 	mreq = m_gethdr(M_WAITOK, MT_DATA);
355 	mreq->m_data += sizeof(uint32_t);
356 	KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN,
357 	    ("RPC header too big"));
358 	bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos);
359 	mreq->m_len = ct->ct_mpos;
360 
361 	/*
362 	 * The XID is the first thing in the request.
363 	 */
364 	*mtod(mreq, uint32_t *) = htonl(xid);
365 
366 	xdrmbuf_create(&xdrs, mreq, XDR_ENCODE);
367 
368 	errp->re_status = stat = RPC_SUCCESS;
369 
370 	if ((! XDR_PUTINT32(&xdrs, &proc)) ||
371 	    (! AUTH_MARSHALL(auth, xid, &xdrs,
372 		m_copym(args, 0, M_COPYALL, M_WAITOK)))) {
373 		errp->re_status = stat = RPC_CANTENCODEARGS;
374 		mtx_lock(&ct->ct_lock);
375 		goto out;
376 	}
377 	mreq->m_pkthdr.len = m_length(mreq, NULL);
378 
379 	/*
380 	 * Prepend a record marker containing the packet length.
381 	 */
382 	M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK);
383 	*mtod(mreq, uint32_t *) =
384 		htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t)));
385 
386 	cr->cr_xid = xid;
387 	mtx_lock(&ct->ct_lock);
388 	/*
389 	 * Check to see if the other end has already started to close down
390 	 * the connection. The upcall will have set ct_error.re_status
391 	 * to RPC_CANTRECV if this is the case.
392 	 * If the other end starts to close down the connection after this
393 	 * point, it will be detected later when cr_error is checked,
394 	 * since the request is in the ct_pending queue.
395 	 */
396 	if (ct->ct_error.re_status == RPC_CANTRECV) {
397 		if (errp != &ct->ct_error) {
398 			errp->re_errno = ct->ct_error.re_errno;
399 			errp->re_status = RPC_CANTRECV;
400 		}
401 		stat = RPC_CANTRECV;
402 		goto out;
403 	}
404 
405 	/* For TLS, wait for an upcall to be done, as required. */
406 	while ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL |
407 	    RPCRCVSTATE_NONAPPDATA)) == 0)
408 		msleep(&ct->ct_rcvstate, &ct->ct_lock, 0, "rpcrcvst", hz);
409 
410 	TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link);
411 	mtx_unlock(&ct->ct_lock);
412 
413 	if (ct->ct_sslrefno != 0) {
414 		/*
415 		 * Copy the mbuf chain to a chain of ext_pgs mbuf(s)
416 		 * as required by KERN_TLS.
417 		 */
418 		maxextsiz = TLS_MAX_MSG_SIZE_V10_2;
419 #ifdef KERN_TLS
420 		if (rpctls_getinfo(&maxlen, false, false))
421 			maxextsiz = min(maxextsiz, maxlen);
422 #endif
423 		mreq = _rpc_copym_into_ext_pgs(mreq, maxextsiz);
424 	}
425 	/*
426 	 * sosend consumes mreq.
427 	 */
428 	error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread);
429 	mreq = NULL;
430 	if (error == EMSGSIZE || (error == ERESTART &&
431 	    (ct->ct_waitflag & PCATCH) == 0 && trycnt-- > 0)) {
432 		SOCK_SENDBUF_LOCK(ct->ct_socket);
433 		sbwait(ct->ct_socket, SO_SND);
434 		SOCK_SENDBUF_UNLOCK(ct->ct_socket);
435 		AUTH_VALIDATE(auth, xid, NULL, NULL);
436 		mtx_lock(&ct->ct_lock);
437 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
438 		/* Sleep for 1 clock tick before trying the sosend() again. */
439 		mtx_unlock(&ct->ct_lock);
440 		pause("rpclpsnd", 1);
441 		mtx_lock(&ct->ct_lock);
442 		goto call_again;
443 	}
444 
445 	reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL;
446 	reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf;
447 	reply_msg.acpted_rply.ar_verf.oa_length = 0;
448 	reply_msg.acpted_rply.ar_results.where = NULL;
449 	reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
450 
451 	mtx_lock(&ct->ct_lock);
452 	if (error) {
453 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
454 		errp->re_errno = error;
455 		errp->re_status = stat = RPC_CANTSEND;
456 		goto out;
457 	}
458 
459 	/*
460 	 * Check to see if we got an upcall while waiting for the
461 	 * lock. In both these cases, the request has been removed
462 	 * from ct->ct_pending.
463 	 */
464 	if (cr->cr_error) {
465 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
466 		errp->re_errno = cr->cr_error;
467 		errp->re_status = stat = RPC_CANTRECV;
468 		goto out;
469 	}
470 	if (cr->cr_mrep) {
471 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
472 		goto got_reply;
473 	}
474 
475 	/*
476 	 * Hack to provide rpc-based message passing
477 	 */
478 	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
479 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
480 		errp->re_status = stat = RPC_TIMEDOUT;
481 		goto out;
482 	}
483 
484 	error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan,
485 	    tvtohz(&timeout));
486 
487 	TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
488 
489 	if (error) {
490 		/*
491 		 * The sleep returned an error so our request is still
492 		 * on the list. Turn the error code into an
493 		 * appropriate client status.
494 		 */
495 		errp->re_errno = error;
496 		switch (error) {
497 		case EINTR:
498 			stat = RPC_INTR;
499 			break;
500 		case EWOULDBLOCK:
501 			stat = RPC_TIMEDOUT;
502 			break;
503 		default:
504 			stat = RPC_CANTRECV;
505 		}
506 		errp->re_status = stat;
507 		goto out;
508 	} else {
509 		/*
510 		 * We were woken up by the upcall.  If the
511 		 * upcall had a receive error, report that,
512 		 * otherwise we have a reply.
513 		 */
514 		if (cr->cr_error) {
515 			errp->re_errno = cr->cr_error;
516 			errp->re_status = stat = RPC_CANTRECV;
517 			goto out;
518 		}
519 	}
520 
521 got_reply:
522 	/*
523 	 * Now decode and validate the response. We need to drop the
524 	 * lock since xdr_replymsg may end up sleeping in malloc.
525 	 */
526 	mtx_unlock(&ct->ct_lock);
527 
528 	if (ext && ext->rc_feedback)
529 		ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg);
530 
531 	xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE);
532 	ok = xdr_replymsg(&xdrs, &reply_msg);
533 	cr->cr_mrep = NULL;
534 
535 	if (ok) {
536 		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
537 		    (reply_msg.acpted_rply.ar_stat == SUCCESS))
538 			errp->re_status = stat = RPC_SUCCESS;
539 		else
540 			stat = _seterr_reply(&reply_msg, errp);
541 
542 		if (stat == RPC_SUCCESS) {
543 			results = xdrmbuf_getall(&xdrs);
544 			if (!AUTH_VALIDATE(auth, xid,
545 				&reply_msg.acpted_rply.ar_verf,
546 				&results)) {
547 				errp->re_status = stat = RPC_AUTHERROR;
548 				errp->re_why = AUTH_INVALIDRESP;
549 			} else {
550 				KASSERT(results,
551 				    ("auth validated but no result"));
552 				*resultsp = results;
553 			}
554 		}		/* end successful completion */
555 		/*
556 		 * If unsuccessful AND error is an authentication error
557 		 * then refresh credentials and try again, else break
558 		 */
559 		else if (stat == RPC_AUTHERROR)
560 			/* maybe our credentials need to be refreshed ... */
561 			if (nrefreshes > 0 &&
562 			    AUTH_REFRESH(auth, &reply_msg)) {
563 				nrefreshes--;
564 				XDR_DESTROY(&xdrs);
565 				mtx_lock(&ct->ct_lock);
566 				goto call_again;
567 			}
568 		/* end of unsuccessful completion */
569 	}	/* end of valid reply message */
570 	else {
571 		errp->re_status = stat = RPC_CANTDECODERES;
572 	}
573 	XDR_DESTROY(&xdrs);
574 	mtx_lock(&ct->ct_lock);
575 out:
576 	mtx_assert(&ct->ct_lock, MA_OWNED);
577 
578 	KASSERT(stat != RPC_SUCCESS || *resultsp,
579 	    ("RPC_SUCCESS without reply"));
580 
581 	if (mreq)
582 		m_freem(mreq);
583 	if (cr->cr_mrep)
584 		m_freem(cr->cr_mrep);
585 
586 	ct->ct_threads--;
587 	if (ct->ct_closing)
588 		wakeup(ct);
589 
590 	mtx_unlock(&ct->ct_lock);
591 
592 	if (auth && stat != RPC_SUCCESS)
593 		AUTH_VALIDATE(auth, xid, NULL, NULL);
594 
595 	free(cr, M_RPC);
596 
597 	return (stat);
598 }
599 
600 static void
601 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
602 {
603 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
604 
605 	*errp = ct->ct_error;
606 }
607 
608 static bool_t
609 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
610 {
611 	XDR xdrs;
612 	bool_t dummy;
613 
614 	xdrs.x_op = XDR_FREE;
615 	dummy = (*xdr_res)(&xdrs, res_ptr);
616 
617 	return (dummy);
618 }
619 
620 /*ARGSUSED*/
621 static void
622 clnt_vc_abort(CLIENT *cl)
623 {
624 }
625 
626 static bool_t
627 clnt_vc_control(CLIENT *cl, u_int request, void *info)
628 {
629 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
630 	void *infop = info;
631 	SVCXPRT *xprt;
632 	uint64_t *p;
633 	int error;
634 	static u_int thrdnum = 0;
635 
636 	mtx_lock(&ct->ct_lock);
637 
638 	switch (request) {
639 	case CLSET_FD_CLOSE:
640 		ct->ct_closeit = TRUE;
641 		mtx_unlock(&ct->ct_lock);
642 		return (TRUE);
643 	case CLSET_FD_NCLOSE:
644 		ct->ct_closeit = FALSE;
645 		mtx_unlock(&ct->ct_lock);
646 		return (TRUE);
647 	default:
648 		break;
649 	}
650 
651 	/* for other requests which use info */
652 	if (info == NULL) {
653 		mtx_unlock(&ct->ct_lock);
654 		return (FALSE);
655 	}
656 	switch (request) {
657 	case CLSET_TIMEOUT:
658 		if (time_not_ok((struct timeval *)info)) {
659 			mtx_unlock(&ct->ct_lock);
660 			return (FALSE);
661 		}
662 		ct->ct_wait = *(struct timeval *)infop;
663 		break;
664 	case CLGET_TIMEOUT:
665 		*(struct timeval *)infop = ct->ct_wait;
666 		break;
667 	case CLGET_SERVER_ADDR:
668 		(void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len);
669 		break;
670 	case CLGET_SVC_ADDR:
671 		/*
672 		 * Slightly different semantics to userland - we use
673 		 * sockaddr instead of netbuf.
674 		 */
675 		memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len);
676 		break;
677 	case CLSET_SVC_ADDR:		/* set to new address */
678 		mtx_unlock(&ct->ct_lock);
679 		return (FALSE);
680 	case CLGET_XID:
681 		*(uint32_t *)info = ct->ct_xid;
682 		break;
683 	case CLSET_XID:
684 		/* This will set the xid of the NEXT call */
685 		/* decrement by 1 as clnt_vc_call() increments once */
686 		ct->ct_xid = *(uint32_t *)info - 1;
687 		break;
688 	case CLGET_VERS:
689 		/*
690 		 * This RELIES on the information that, in the call body,
691 		 * the version number field is the fifth field from the
692 		 * beginning of the RPC header. MUST be changed if the
693 		 * call_struct is changed
694 		 */
695 		*(uint32_t *)info =
696 		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
697 		    4 * BYTES_PER_XDR_UNIT));
698 		break;
699 
700 	case CLSET_VERS:
701 		*(uint32_t *)(void *)(ct->ct_mcallc +
702 		    4 * BYTES_PER_XDR_UNIT) =
703 		    htonl(*(uint32_t *)info);
704 		break;
705 
706 	case CLGET_PROG:
707 		/*
708 		 * This RELIES on the information that, in the call body,
709 		 * the program number field is the fourth field from the
710 		 * beginning of the RPC header. MUST be changed if the
711 		 * call_struct is changed
712 		 */
713 		*(uint32_t *)info =
714 		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
715 		    3 * BYTES_PER_XDR_UNIT));
716 		break;
717 
718 	case CLSET_PROG:
719 		*(uint32_t *)(void *)(ct->ct_mcallc +
720 		    3 * BYTES_PER_XDR_UNIT) =
721 		    htonl(*(uint32_t *)info);
722 		break;
723 
724 	case CLSET_WAITCHAN:
725 		ct->ct_waitchan = (const char *)info;
726 		break;
727 
728 	case CLGET_WAITCHAN:
729 		*(const char **) info = ct->ct_waitchan;
730 		break;
731 
732 	case CLSET_INTERRUPTIBLE:
733 		if (*(int *) info)
734 			ct->ct_waitflag = PCATCH;
735 		else
736 			ct->ct_waitflag = 0;
737 		break;
738 
739 	case CLGET_INTERRUPTIBLE:
740 		if (ct->ct_waitflag)
741 			*(int *) info = TRUE;
742 		else
743 			*(int *) info = FALSE;
744 		break;
745 
746 	case CLSET_BACKCHANNEL:
747 		xprt = (SVCXPRT *)info;
748 		if (ct->ct_backchannelxprt == NULL) {
749 			SVC_ACQUIRE(xprt);
750 			xprt->xp_p2 = ct;
751 			if (ct->ct_sslrefno != 0)
752 				xprt->xp_tls = RPCTLS_FLAGS_HANDSHAKE;
753 			ct->ct_backchannelxprt = xprt;
754 		}
755 		break;
756 
757 	case CLSET_TLS:
758 		p = (uint64_t *)info;
759 		ct->ct_sslsec = *p++;
760 		ct->ct_sslusec = *p++;
761 		ct->ct_sslrefno = *p;
762 		if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) {
763 			/* cl ref cnt is released by clnt_vc_dotlsupcall(). */
764 			CLNT_ACQUIRE(cl);
765 			mtx_unlock(&ct->ct_lock);
766 			/* Start the kthread that handles upcalls. */
767 			error = kthread_add(clnt_vc_dotlsupcall, cl,
768 			    NULL, NULL, 0, 0, "krpctls%u", thrdnum++);
769 			if (error != 0)
770 				panic("Can't add KRPC thread error %d", error);
771 		} else
772 			mtx_unlock(&ct->ct_lock);
773 		return (TRUE);
774 
775 	case CLSET_BLOCKRCV:
776 		if (*(int *) info) {
777 			ct->ct_rcvstate &= ~RPCRCVSTATE_NORMAL;
778 			ct->ct_rcvstate |= RPCRCVSTATE_TLSHANDSHAKE;
779 		} else {
780 			ct->ct_rcvstate &= ~RPCRCVSTATE_TLSHANDSHAKE;
781 			ct->ct_rcvstate |= RPCRCVSTATE_NORMAL;
782 		}
783 		break;
784 
785 	default:
786 		mtx_unlock(&ct->ct_lock);
787 		return (FALSE);
788 	}
789 
790 	mtx_unlock(&ct->ct_lock);
791 	return (TRUE);
792 }
793 
794 static void
795 clnt_vc_close(CLIENT *cl)
796 {
797 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
798 	struct ct_request *cr;
799 
800 	mtx_lock(&ct->ct_lock);
801 
802 	if (ct->ct_closed) {
803 		mtx_unlock(&ct->ct_lock);
804 		return;
805 	}
806 
807 	if (ct->ct_closing) {
808 		while (ct->ct_closing)
809 			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
810 		KASSERT(ct->ct_closed, ("client should be closed"));
811 		mtx_unlock(&ct->ct_lock);
812 		return;
813 	}
814 
815 	if (ct->ct_socket) {
816 		ct->ct_closing = TRUE;
817 		mtx_unlock(&ct->ct_lock);
818 
819 		SOCK_RECVBUF_LOCK(ct->ct_socket);
820 		if (ct->ct_socket->so_rcv.sb_upcall != NULL) {
821 			soupcall_clear(ct->ct_socket, SO_RCV);
822 			clnt_vc_upcallsdone(ct);
823 		}
824 		SOCK_RECVBUF_UNLOCK(ct->ct_socket);
825 
826 		/*
827 		 * Abort any pending requests and wait until everyone
828 		 * has finished with clnt_vc_call.
829 		 */
830 		mtx_lock(&ct->ct_lock);
831 		TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
832 			cr->cr_xid = 0;
833 			cr->cr_error = ESHUTDOWN;
834 			wakeup(cr);
835 		}
836 
837 		while (ct->ct_threads)
838 			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
839 	}
840 
841 	ct->ct_closing = FALSE;
842 	ct->ct_closed = TRUE;
843 	wakeup(&ct->ct_sslrefno);
844 	mtx_unlock(&ct->ct_lock);
845 	wakeup(ct);
846 }
847 
848 static void
849 clnt_vc_destroy(CLIENT *cl)
850 {
851 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
852 	struct socket *so;
853 	SVCXPRT *xprt;
854 	uint32_t reterr;
855 
856 	clnt_vc_close(cl);
857 
858 	mtx_lock(&ct->ct_lock);
859 	xprt = ct->ct_backchannelxprt;
860 	ct->ct_backchannelxprt = NULL;
861 	if (xprt != NULL) {
862 		mtx_unlock(&ct->ct_lock);	/* To avoid a LOR. */
863 		sx_xlock(&xprt->xp_lock);
864 		mtx_lock(&ct->ct_lock);
865 		xprt->xp_p2 = NULL;
866 		sx_xunlock(&xprt->xp_lock);
867 		SVC_RELEASE(xprt);
868 	}
869 
870 	/* Wait for the upcall kthread to terminate. */
871 	while ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLTHREAD) != 0)
872 		msleep(&ct->ct_sslrefno, &ct->ct_lock, 0,
873 		    "clntvccl", hz);
874 	mtx_unlock(&ct->ct_lock);
875 	mtx_destroy(&ct->ct_lock);
876 
877 	so = ct->ct_closeit ? ct->ct_socket : NULL;
878 	if (so) {
879 		if (ct->ct_sslrefno != 0) {
880 			/*
881 			 * If the TLS handshake is in progress, the upcall
882 			 * will fail, but the socket should be closed by the
883 			 * daemon, since the connect upcall has just failed.
884 			 */
885 			if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) {
886 				/*
887 				 * If the upcall fails, the socket has
888 				 * probably been closed via the rpctlscd
889 				 * daemon having crashed or been
890 				 * restarted, so ignore return stat.
891 				 */
892 				rpctls_cl_disconnect(ct->ct_sslsec,
893 				    ct->ct_sslusec, ct->ct_sslrefno,
894 				    &reterr);
895 			}
896 			/* Must sorele() to get rid of reference. */
897 			CURVNET_SET(so->so_vnet);
898 			sorele(so);
899 			CURVNET_RESTORE();
900 		} else {
901 			soshutdown(so, SHUT_WR);
902 			soclose(so);
903 		}
904 	}
905 	m_freem(ct->ct_record);
906 	m_freem(ct->ct_raw);
907 	mem_free(ct, sizeof(struct ct_data));
908 	if (cl->cl_netid && cl->cl_netid[0])
909 		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
910 	if (cl->cl_tp && cl->cl_tp[0])
911 		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
912 	mem_free(cl, sizeof(CLIENT));
913 }
914 
915 /*
916  * Make sure that the time is not garbage.   -1 value is disallowed.
917  * Note this is different from time_not_ok in clnt_dg.c
918  */
919 static bool_t
920 time_not_ok(struct timeval *t)
921 {
922 	return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
923 		t->tv_usec <= -1 || t->tv_usec > 1000000);
924 }
925 
926 int
927 clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
928 {
929 	struct ct_data *ct = (struct ct_data *) arg;
930 	struct uio uio;
931 	struct mbuf *m, *m2;
932 	struct ct_request *cr;
933 	int error, rcvflag, foundreq;
934 	uint32_t xid_plus_direction[2], header;
935 	SVCXPRT *xprt;
936 	struct cf_conn *cd;
937 	u_int rawlen;
938 	struct cmsghdr *cmsg;
939 	struct tls_get_record tgr;
940 
941 	/*
942 	 * RPC-over-TLS needs to block reception during
943 	 * upcalls since the upcall will be doing I/O on
944 	 * the socket via openssl library calls.
945 	 */
946 	mtx_lock(&ct->ct_lock);
947 	if ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL |
948 	    RPCRCVSTATE_NONAPPDATA)) == 0) {
949 		/* Mark that a socket upcall needs to be done. */
950 		if ((ct->ct_rcvstate & (RPCRCVSTATE_UPCALLNEEDED |
951 		    RPCRCVSTATE_UPCALLINPROG)) != 0)
952 			ct->ct_rcvstate |= RPCRCVSTATE_SOUPCALLNEEDED;
953 		mtx_unlock(&ct->ct_lock);
954 		return (SU_OK);
955 	}
956 	mtx_unlock(&ct->ct_lock);
957 
958 	/*
959 	 * If another thread is already here, it must be in
960 	 * soreceive(), so just return to avoid races with it.
961 	 * ct_upcallrefs is protected by the socket receive buffer lock
962 	 * which is held in this function, except when
963 	 * soreceive() is called.
964 	 */
965 	if (ct->ct_upcallrefs > 0)
966 		return (SU_OK);
967 	ct->ct_upcallrefs++;
968 
969 	/*
970 	 * Read as much as possible off the socket and link it
971 	 * onto ct_raw.
972 	 */
973 	for (;;) {
974 		uio.uio_resid = 1000000000;
975 		uio.uio_td = curthread;
976 		m2 = m = NULL;
977 		rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
978 		if (ct->ct_sslrefno != 0 && (ct->ct_rcvstate &
979 		    RPCRCVSTATE_NORMAL) != 0)
980 			rcvflag |= MSG_TLSAPPDATA;
981 		SOCK_RECVBUF_UNLOCK(so);
982 		error = soreceive(so, NULL, &uio, &m, &m2, &rcvflag);
983 		SOCK_RECVBUF_LOCK(so);
984 
985 		if (error == EWOULDBLOCK) {
986 			/*
987 			 * We must re-test for readability after
988 			 * taking the lock to protect us in the case
989 			 * where a new packet arrives on the socket
990 			 * after our call to soreceive fails with
991 			 * EWOULDBLOCK.
992 			 */
993 			error = 0;
994 			if (!soreadable(so))
995 				break;
996 			continue;
997 		}
998 		if (error == 0 && m == NULL) {
999 			/*
1000 			 * We must have got EOF trying
1001 			 * to read from the stream.
1002 			 */
1003 			error = ECONNRESET;
1004 		}
1005 
1006 		/*
1007 		 * A return of ENXIO indicates that there is an
1008 		 * alert record at the head of the
1009 		 * socket's receive queue, for TLS connections.
1010 		 * This record needs to be handled in userland
1011 		 * via an SSL_read() call, so do an upcall to the daemon.
1012 		 */
1013 		if (ct->ct_sslrefno != 0 && error == ENXIO) {
1014 			/* Disable reception, marking an upcall needed. */
1015 			mtx_lock(&ct->ct_lock);
1016 			ct->ct_rcvstate |= RPCRCVSTATE_UPCALLNEEDED;
1017 			/*
1018 			 * If an upcall in needed, wake up the kthread
1019 			 * that runs clnt_vc_dotlsupcall().
1020 			 */
1021 			wakeup(&ct->ct_sslrefno);
1022 			mtx_unlock(&ct->ct_lock);
1023 			break;
1024 		}
1025 		if (error != 0)
1026 			break;
1027 
1028 		/* Process any record header(s). */
1029 		if (m2 != NULL) {
1030 			cmsg = mtod(m2, struct cmsghdr *);
1031 			if (cmsg->cmsg_type == TLS_GET_RECORD &&
1032 			    cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) {
1033 				memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr));
1034 				/*
1035 				 * TLS_RLTYPE_ALERT records should be handled
1036 				 * since soreceive() would have returned
1037 				 * ENXIO.  Just throw any other
1038 				 * non-TLS_RLTYPE_APP records away.
1039 				 */
1040 				if (tgr.tls_type != TLS_RLTYPE_APP) {
1041 					m_freem(m);
1042 					m_free(m2);
1043 					mtx_lock(&ct->ct_lock);
1044 					ct->ct_rcvstate &=
1045 					    ~RPCRCVSTATE_NONAPPDATA;
1046 					ct->ct_rcvstate |= RPCRCVSTATE_NORMAL;
1047 					mtx_unlock(&ct->ct_lock);
1048 					continue;
1049 				}
1050 			}
1051 			m_free(m2);
1052 		}
1053 
1054 		if (ct->ct_raw != NULL)
1055 			m_last(ct->ct_raw)->m_next = m;
1056 		else
1057 			ct->ct_raw = m;
1058 	}
1059 	rawlen = m_length(ct->ct_raw, NULL);
1060 
1061 	/* Now, process as much of ct_raw as possible. */
1062 	for (;;) {
1063 		/*
1064 		 * If ct_record_resid is zero, we are waiting for a
1065 		 * record mark.
1066 		 */
1067 		if (ct->ct_record_resid == 0) {
1068 			if (rawlen < sizeof(uint32_t))
1069 				break;
1070 			m_copydata(ct->ct_raw, 0, sizeof(uint32_t),
1071 			    (char *)&header);
1072 			header = ntohl(header);
1073 			ct->ct_record_resid = header & 0x7fffffff;
1074 			ct->ct_record_eor = ((header & 0x80000000) != 0);
1075 			m_adj(ct->ct_raw, sizeof(uint32_t));
1076 			rawlen -= sizeof(uint32_t);
1077 		} else {
1078 			/*
1079 			 * Move as much of the record as possible to
1080 			 * ct_record.
1081 			 */
1082 			if (rawlen == 0)
1083 				break;
1084 			if (rawlen <= ct->ct_record_resid) {
1085 				if (ct->ct_record != NULL)
1086 					m_last(ct->ct_record)->m_next =
1087 					    ct->ct_raw;
1088 				else
1089 					ct->ct_record = ct->ct_raw;
1090 				ct->ct_raw = NULL;
1091 				ct->ct_record_resid -= rawlen;
1092 				rawlen = 0;
1093 			} else {
1094 				m = m_split(ct->ct_raw, ct->ct_record_resid,
1095 				    M_NOWAIT);
1096 				if (m == NULL)
1097 					break;
1098 				if (ct->ct_record != NULL)
1099 					m_last(ct->ct_record)->m_next =
1100 					    ct->ct_raw;
1101 				else
1102 					ct->ct_record = ct->ct_raw;
1103 				rawlen -= ct->ct_record_resid;
1104 				ct->ct_record_resid = 0;
1105 				ct->ct_raw = m;
1106 			}
1107 			if (ct->ct_record_resid > 0)
1108 				break;
1109 
1110 			/*
1111 			 * If we have the entire record, see if we can
1112 			 * match it to a request.
1113 			 */
1114 			if (ct->ct_record_eor) {
1115 				/*
1116 				 * The XID is in the first uint32_t of
1117 				 * the reply and the message direction
1118 				 * is the second one.
1119 				 */
1120 				if (ct->ct_record->m_len <
1121 				    sizeof(xid_plus_direction) &&
1122 				    m_length(ct->ct_record, NULL) <
1123 				    sizeof(xid_plus_direction)) {
1124 					/*
1125 					 * What to do now?
1126 					 * The data in the TCP stream is
1127 					 * corrupted such that there is no
1128 					 * valid RPC message to parse.
1129 					 * I think it best to close this
1130 					 * connection and allow
1131 					 * clnt_reconnect_call() to try
1132 					 * and establish a new one.
1133 					 */
1134 					printf("clnt_vc_soupcall: "
1135 					    "connection data corrupted\n");
1136 					error = ECONNRESET;
1137 					goto wakeup_all;
1138 				}
1139 				m_copydata(ct->ct_record, 0,
1140 				    sizeof(xid_plus_direction),
1141 				    (char *)xid_plus_direction);
1142 				xid_plus_direction[0] =
1143 				    ntohl(xid_plus_direction[0]);
1144 				xid_plus_direction[1] =
1145 				    ntohl(xid_plus_direction[1]);
1146 				/* Check message direction. */
1147 				if (xid_plus_direction[1] == CALL) {
1148 					/* This is a backchannel request. */
1149 					mtx_lock(&ct->ct_lock);
1150 					xprt = ct->ct_backchannelxprt;
1151 					if (xprt == NULL) {
1152 						mtx_unlock(&ct->ct_lock);
1153 						/* Just throw it away. */
1154 						m_freem(ct->ct_record);
1155 						ct->ct_record = NULL;
1156 					} else {
1157 						cd = (struct cf_conn *)
1158 						    xprt->xp_p1;
1159 						m2 = cd->mreq;
1160 						/*
1161 						 * The requests are chained
1162 						 * in the m_nextpkt list.
1163 						 */
1164 						while (m2 != NULL &&
1165 						    m2->m_nextpkt != NULL)
1166 							/* Find end of list. */
1167 							m2 = m2->m_nextpkt;
1168 						if (m2 != NULL)
1169 							m2->m_nextpkt =
1170 							    ct->ct_record;
1171 						else
1172 							cd->mreq =
1173 							    ct->ct_record;
1174 						ct->ct_record->m_nextpkt =
1175 						    NULL;
1176 						ct->ct_record = NULL;
1177 						xprt_active(xprt);
1178 						mtx_unlock(&ct->ct_lock);
1179 					}
1180 				} else {
1181 					mtx_lock(&ct->ct_lock);
1182 					foundreq = 0;
1183 					TAILQ_FOREACH(cr, &ct->ct_pending,
1184 					    cr_link) {
1185 						if (cr->cr_xid ==
1186 						    xid_plus_direction[0]) {
1187 							/*
1188 							 * This one
1189 							 * matches. We leave
1190 							 * the reply mbuf in
1191 							 * cr->cr_mrep. Set
1192 							 * the XID to zero so
1193 							 * that we will ignore
1194 							 * any duplicated
1195 							 * replies.
1196 							 */
1197 							cr->cr_xid = 0;
1198 							cr->cr_mrep =
1199 							    ct->ct_record;
1200 							cr->cr_error = 0;
1201 							foundreq = 1;
1202 							wakeup(cr);
1203 							break;
1204 						}
1205 					}
1206 					mtx_unlock(&ct->ct_lock);
1207 
1208 					if (!foundreq)
1209 						m_freem(ct->ct_record);
1210 					ct->ct_record = NULL;
1211 				}
1212 			}
1213 		}
1214 	}
1215 
1216 	if (error != 0) {
1217 	wakeup_all:
1218 		/*
1219 		 * This socket is broken, so mark that it cannot
1220 		 * receive and fail all RPCs waiting for a reply
1221 		 * on it, so that they will be retried on a new
1222 		 * TCP connection created by clnt_reconnect_X().
1223 		 */
1224 		mtx_lock(&ct->ct_lock);
1225 		ct->ct_error.re_status = RPC_CANTRECV;
1226 		ct->ct_error.re_errno = error;
1227 		TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
1228 			cr->cr_error = error;
1229 			wakeup(cr);
1230 		}
1231 		mtx_unlock(&ct->ct_lock);
1232 	}
1233 
1234 	ct->ct_upcallrefs--;
1235 	if (ct->ct_upcallrefs < 0)
1236 		panic("rpcvc upcall refcnt");
1237 	if (ct->ct_upcallrefs == 0)
1238 		wakeup(&ct->ct_upcallrefs);
1239 	return (SU_OK);
1240 }
1241 
1242 /*
1243  * Wait for all upcalls in progress to complete.
1244  */
1245 static void
1246 clnt_vc_upcallsdone(struct ct_data *ct)
1247 {
1248 
1249 	SOCK_RECVBUF_LOCK_ASSERT(ct->ct_socket);
1250 
1251 	while (ct->ct_upcallrefs > 0)
1252 		(void) msleep(&ct->ct_upcallrefs,
1253 		    SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0);
1254 }
1255 
1256 /*
1257  * Do a TLS upcall to the rpctlscd daemon, as required.
1258  * This function runs as a kthread.
1259  */
1260 static void
1261 clnt_vc_dotlsupcall(void *data)
1262 {
1263 	CLIENT *cl = (CLIENT *)data;
1264 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
1265 	enum clnt_stat ret;
1266 	uint32_t reterr;
1267 
1268 	CURVNET_SET(ct->ct_socket->so_vnet);
1269 	mtx_lock(&ct->ct_lock);
1270 	ct->ct_rcvstate |= RPCRCVSTATE_UPCALLTHREAD;
1271 	while (!ct->ct_closed) {
1272 		if ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLNEEDED) != 0) {
1273 			ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLNEEDED;
1274 			ct->ct_rcvstate |= RPCRCVSTATE_UPCALLINPROG;
1275 			if (ct->ct_sslrefno != 0 && ct->ct_sslrefno !=
1276 			    RPCTLS_REFNO_HANDSHAKE) {
1277 				mtx_unlock(&ct->ct_lock);
1278 				ret = rpctls_cl_handlerecord(ct->ct_sslsec,
1279 				    ct->ct_sslusec, ct->ct_sslrefno, &reterr);
1280 				mtx_lock(&ct->ct_lock);
1281 			}
1282 			ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLINPROG;
1283 			if (ret == RPC_SUCCESS && reterr == RPCTLSERR_OK)
1284 				ct->ct_rcvstate |= RPCRCVSTATE_NORMAL;
1285 			else
1286 				ct->ct_rcvstate |= RPCRCVSTATE_NONAPPDATA;
1287 			wakeup(&ct->ct_rcvstate);
1288 		}
1289 		if ((ct->ct_rcvstate & RPCRCVSTATE_SOUPCALLNEEDED) != 0) {
1290 			ct->ct_rcvstate &= ~RPCRCVSTATE_SOUPCALLNEEDED;
1291 			mtx_unlock(&ct->ct_lock);
1292 			SOCK_RECVBUF_LOCK(ct->ct_socket);
1293 			clnt_vc_soupcall(ct->ct_socket, ct, M_NOWAIT);
1294 			SOCK_RECVBUF_UNLOCK(ct->ct_socket);
1295 			mtx_lock(&ct->ct_lock);
1296 		}
1297 		msleep(&ct->ct_sslrefno, &ct->ct_lock, 0, "clntvcdu", hz);
1298 	}
1299 	ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLTHREAD;
1300 	wakeup(&ct->ct_sslrefno);
1301 	mtx_unlock(&ct->ct_lock);
1302 	CLNT_RELEASE(cl);
1303 	CURVNET_RESTORE();
1304 	kthread_exit();
1305 }
1306