xref: /freebsd/sys/rpc/clnt_vc.c (revision 7a7741af18d6c8a804cc643cb7ecda9d730c6aa6)
1 /*	$NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 2009, Sun Microsystems, Inc.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  * - Redistributions of source code must retain the above copyright notice,
12  *   this list of conditions and the following disclaimer.
13  * - Redistributions in binary form must reproduce the above copyright notice,
14  *   this list of conditions and the following disclaimer in the documentation
15  *   and/or other materials provided with the distribution.
16  * - Neither the name of Sun Microsystems, Inc. nor the names of its
17  *   contributors may be used to endorse or promote products derived
18  *   from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 /*
35  * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
36  *
37  * Copyright (C) 1984, Sun Microsystems, Inc.
38  *
39  * TCP based RPC supports 'batched calls'.
40  * A sequence of calls may be batched-up in a send buffer.  The rpc call
41  * return immediately to the client even though the call was not necessarily
42  * sent.  The batching occurs if the results' xdr routine is NULL (0) AND
43  * the rpc timeout value is zero (see clnt.h, rpc).
44  *
45  * Clients should NOT casually batch calls that in fact return results; that is,
46  * the server side should be aware that a call is batched and not produce any
47  * return message.  Batched calls that produce many result messages can
48  * deadlock (netlock) the client and the server....
49  *
50  * Now go hang yourself.
51  */
52 
53 #include "opt_kern_tls.h"
54 
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/ktls.h>
60 #include <sys/lock.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/mutex.h>
64 #include <sys/pcpu.h>
65 #include <sys/proc.h>
66 #include <sys/protosw.h>
67 #include <sys/socket.h>
68 #include <sys/socketvar.h>
69 #include <sys/sx.h>
70 #include <sys/syslog.h>
71 #include <sys/time.h>
72 #include <sys/uio.h>
73 
74 #include <net/vnet.h>
75 
76 #include <netinet/tcp.h>
77 
78 #include <rpc/rpc.h>
79 #include <rpc/rpc_com.h>
80 #include <rpc/krpc.h>
81 #include <rpc/rpcsec_tls.h>
82 
83 struct cmessage {
84         struct cmsghdr cmsg;
85         struct cmsgcred cmcred;
86 };
87 
88 static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *,
89     rpcproc_t, struct mbuf *, struct mbuf **, struct timeval);
90 static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
91 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *);
92 static void clnt_vc_abort(CLIENT *);
93 static bool_t clnt_vc_control(CLIENT *, u_int, void *);
94 static void clnt_vc_close(CLIENT *);
95 static void clnt_vc_destroy(CLIENT *);
96 static bool_t time_not_ok(struct timeval *);
97 static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag);
98 static void clnt_vc_dotlsupcall(void *data);
99 
100 static const struct clnt_ops clnt_vc_ops = {
101 	.cl_call =	clnt_vc_call,
102 	.cl_abort =	clnt_vc_abort,
103 	.cl_geterr =	clnt_vc_geterr,
104 	.cl_freeres =	clnt_vc_freeres,
105 	.cl_close =	clnt_vc_close,
106 	.cl_destroy =	clnt_vc_destroy,
107 	.cl_control =	clnt_vc_control
108 };
109 
110 static void clnt_vc_upcallsdone(struct ct_data *);
111 
112 /*
113  * Create a client handle for a connection.
114  * Default options are set, which the user can change using clnt_control()'s.
115  * The rpc/vc package does buffering similar to stdio, so the client
116  * must pick send and receive buffer sizes, 0 => use the default.
117  * NB: fd is copied into a private area.
118  * NB: The rpch->cl_auth is set null authentication. Caller may wish to
119  * set this something more useful.
120  *
121  * fd should be an open socket
122  */
123 CLIENT *
124 clnt_vc_create(
125 	struct socket *so,		/* open file descriptor */
126 	struct sockaddr *raddr,		/* servers address */
127 	const rpcprog_t prog,		/* program number */
128 	const rpcvers_t vers,		/* version number */
129 	size_t sendsz,			/* buffer recv size */
130 	size_t recvsz,			/* buffer send size */
131 	int intrflag)			/* interruptible */
132 {
133 	CLIENT *cl;			/* client handle */
134 	struct ct_data *ct = NULL;	/* client handle */
135 	struct timeval now;
136 	struct rpc_msg call_msg;
137 	static uint32_t disrupt;
138 	struct __rpc_sockinfo si;
139 	XDR xdrs;
140 	int error, interrupted, one = 1, sleep_flag;
141 	struct sockopt sopt;
142 
143 	if (disrupt == 0)
144 		disrupt = (uint32_t)(long)raddr;
145 
146 	cl = (CLIENT *)mem_alloc(sizeof (*cl));
147 	ct = (struct ct_data *)mem_alloc(sizeof (*ct));
148 
149 	mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF);
150 	ct->ct_threads = 0;
151 	ct->ct_closing = FALSE;
152 	ct->ct_closed = FALSE;
153 	ct->ct_upcallrefs = 0;
154 	ct->ct_rcvstate = RPCRCVSTATE_NORMAL;
155 
156 	if ((so->so_state & SS_ISCONNECTED) == 0) {
157 		error = soconnect(so, raddr, curthread);
158 		SOCK_LOCK(so);
159 		interrupted = 0;
160 		sleep_flag = PSOCK;
161 		if (intrflag != 0)
162 			sleep_flag |= PCATCH;
163 		while ((so->so_state & SS_ISCONNECTING)
164 		    && so->so_error == 0) {
165 			error = msleep(&so->so_timeo, SOCK_MTX(so),
166 			    sleep_flag, "connec", 0);
167 			if (error) {
168 				if (error == EINTR || error == ERESTART)
169 					interrupted = 1;
170 				break;
171 			}
172 		}
173 		if (error == 0) {
174 			error = so->so_error;
175 			so->so_error = 0;
176 		}
177 		SOCK_UNLOCK(so);
178 		if (error) {
179 			if (!interrupted)
180 				so->so_state &= ~SS_ISCONNECTING;
181 			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
182 			rpc_createerr.cf_error.re_errno = error;
183 			goto err;
184 		}
185 	}
186 
187 	if (!__rpc_socket2sockinfo(so, &si)) {
188 		goto err;
189 	}
190 
191 	if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
192 		bzero(&sopt, sizeof(sopt));
193 		sopt.sopt_dir = SOPT_SET;
194 		sopt.sopt_level = SOL_SOCKET;
195 		sopt.sopt_name = SO_KEEPALIVE;
196 		sopt.sopt_val = &one;
197 		sopt.sopt_valsize = sizeof(one);
198 		sosetopt(so, &sopt);
199 	}
200 
201 	if (so->so_proto->pr_protocol == IPPROTO_TCP) {
202 		bzero(&sopt, sizeof(sopt));
203 		sopt.sopt_dir = SOPT_SET;
204 		sopt.sopt_level = IPPROTO_TCP;
205 		sopt.sopt_name = TCP_NODELAY;
206 		sopt.sopt_val = &one;
207 		sopt.sopt_valsize = sizeof(one);
208 		sosetopt(so, &sopt);
209 	}
210 
211 	ct->ct_closeit = FALSE;
212 
213 	/*
214 	 * Set up private data struct
215 	 */
216 	ct->ct_socket = so;
217 	ct->ct_wait.tv_sec = -1;
218 	ct->ct_wait.tv_usec = -1;
219 	memcpy(&ct->ct_addr, raddr, raddr->sa_len);
220 
221 	/*
222 	 * Initialize call message
223 	 */
224 	getmicrotime(&now);
225 	ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now);
226 	call_msg.rm_xid = ct->ct_xid;
227 	call_msg.rm_direction = CALL;
228 	call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
229 	call_msg.rm_call.cb_prog = (uint32_t)prog;
230 	call_msg.rm_call.cb_vers = (uint32_t)vers;
231 
232 	/*
233 	 * pre-serialize the static part of the call msg and stash it away
234 	 */
235 	xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE,
236 	    XDR_ENCODE);
237 	if (! xdr_callhdr(&xdrs, &call_msg)) {
238 		if (ct->ct_closeit) {
239 			soclose(ct->ct_socket);
240 		}
241 		goto err;
242 	}
243 	ct->ct_mpos = XDR_GETPOS(&xdrs);
244 	XDR_DESTROY(&xdrs);
245 	ct->ct_waitchan = "rpcrecv";
246 	ct->ct_waitflag = 0;
247 
248 	/*
249 	 * Create a client handle which uses xdrrec for serialization
250 	 * and authnone for authentication.
251 	 */
252 	sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
253 	recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
254 	error = soreserve(ct->ct_socket, sendsz, recvsz);
255 	if (error != 0) {
256 		if (ct->ct_closeit) {
257 			soclose(ct->ct_socket);
258 		}
259 		goto err;
260 	}
261 	cl->cl_refs = 1;
262 	cl->cl_ops = &clnt_vc_ops;
263 	cl->cl_private = ct;
264 	cl->cl_auth = authnone_create();
265 
266 	SOCK_RECVBUF_LOCK(ct->ct_socket);
267 	soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct);
268 	SOCK_RECVBUF_UNLOCK(ct->ct_socket);
269 
270 	ct->ct_raw = NULL;
271 	ct->ct_record = NULL;
272 	ct->ct_record_resid = 0;
273 	ct->ct_sslrefno = 0;
274 	TAILQ_INIT(&ct->ct_pending);
275 	return (cl);
276 
277 err:
278 	mtx_destroy(&ct->ct_lock);
279 	mem_free(ct, sizeof (struct ct_data));
280 	mem_free(cl, sizeof (CLIENT));
281 
282 	return ((CLIENT *)NULL);
283 }
284 
285 static enum clnt_stat
286 clnt_vc_call(
287 	CLIENT		*cl,		/* client handle */
288 	struct rpc_callextra *ext,	/* call metadata */
289 	rpcproc_t	proc,		/* procedure number */
290 	struct mbuf	*args,		/* pointer to args */
291 	struct mbuf	**resultsp,	/* pointer to results */
292 	struct timeval	utimeout)
293 {
294 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
295 	AUTH *auth;
296 	struct rpc_err *errp;
297 	enum clnt_stat stat;
298 	XDR xdrs;
299 	struct rpc_msg reply_msg;
300 	bool_t ok;
301 	int nrefreshes = 2;		/* number of times to refresh cred */
302 	struct timeval timeout;
303 	uint32_t xid;
304 	struct mbuf *mreq = NULL, *results;
305 	struct ct_request *cr;
306 	int error, maxextsiz, trycnt;
307 #ifdef KERN_TLS
308 	u_int maxlen;
309 #endif
310 
311 	cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK);
312 
313 	mtx_lock(&ct->ct_lock);
314 
315 	if (ct->ct_closing || ct->ct_closed) {
316 		mtx_unlock(&ct->ct_lock);
317 		free(cr, M_RPC);
318 		return (RPC_CANTSEND);
319 	}
320 	ct->ct_threads++;
321 
322 	if (ext) {
323 		auth = ext->rc_auth;
324 		errp = &ext->rc_err;
325 	} else {
326 		auth = cl->cl_auth;
327 		errp = &ct->ct_error;
328 	}
329 
330 	cr->cr_mrep = NULL;
331 	cr->cr_error = 0;
332 
333 	if (ct->ct_wait.tv_usec == -1) {
334 		timeout = utimeout;	/* use supplied timeout */
335 	} else {
336 		timeout = ct->ct_wait;	/* use default timeout */
337 	}
338 
339 	/*
340 	 * After 15sec of looping, allow it to return RPC_CANTSEND, which will
341 	 * cause the clnt_reconnect layer to create a new TCP connection.
342 	 */
343 	trycnt = 15 * hz;
344 call_again:
345 	mtx_assert(&ct->ct_lock, MA_OWNED);
346 	if (ct->ct_closing || ct->ct_closed) {
347 		ct->ct_threads--;
348 		wakeup(ct);
349 		mtx_unlock(&ct->ct_lock);
350 		free(cr, M_RPC);
351 		return (RPC_CANTSEND);
352 	}
353 
354 	ct->ct_xid++;
355 	xid = ct->ct_xid;
356 
357 	mtx_unlock(&ct->ct_lock);
358 
359 	/*
360 	 * Leave space to pre-pend the record mark.
361 	 */
362 	mreq = m_gethdr(M_WAITOK, MT_DATA);
363 	mreq->m_data += sizeof(uint32_t);
364 	KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN,
365 	    ("RPC header too big"));
366 	bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos);
367 	mreq->m_len = ct->ct_mpos;
368 
369 	/*
370 	 * The XID is the first thing in the request.
371 	 */
372 	*mtod(mreq, uint32_t *) = htonl(xid);
373 
374 	xdrmbuf_create(&xdrs, mreq, XDR_ENCODE);
375 
376 	errp->re_status = stat = RPC_SUCCESS;
377 
378 	if ((! XDR_PUTINT32(&xdrs, &proc)) ||
379 	    (! AUTH_MARSHALL(auth, xid, &xdrs,
380 		m_copym(args, 0, M_COPYALL, M_WAITOK)))) {
381 		errp->re_status = stat = RPC_CANTENCODEARGS;
382 		mtx_lock(&ct->ct_lock);
383 		goto out;
384 	}
385 	mreq->m_pkthdr.len = m_length(mreq, NULL);
386 
387 	/*
388 	 * Prepend a record marker containing the packet length.
389 	 */
390 	M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK);
391 	*mtod(mreq, uint32_t *) =
392 		htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t)));
393 
394 	cr->cr_xid = xid;
395 	mtx_lock(&ct->ct_lock);
396 	/*
397 	 * Check to see if the other end has already started to close down
398 	 * the connection. The upcall will have set ct_error.re_status
399 	 * to RPC_CANTRECV if this is the case.
400 	 * If the other end starts to close down the connection after this
401 	 * point, it will be detected later when cr_error is checked,
402 	 * since the request is in the ct_pending queue.
403 	 */
404 	if (ct->ct_error.re_status == RPC_CANTRECV) {
405 		if (errp != &ct->ct_error) {
406 			errp->re_errno = ct->ct_error.re_errno;
407 			errp->re_status = RPC_CANTRECV;
408 		}
409 		stat = RPC_CANTRECV;
410 		goto out;
411 	}
412 
413 	/* For TLS, wait for an upcall to be done, as required. */
414 	while ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL |
415 	    RPCRCVSTATE_NONAPPDATA)) == 0)
416 		msleep(&ct->ct_rcvstate, &ct->ct_lock, 0, "rpcrcvst", hz);
417 
418 	TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link);
419 	mtx_unlock(&ct->ct_lock);
420 
421 	if (ct->ct_sslrefno != 0) {
422 		/*
423 		 * Copy the mbuf chain to a chain of ext_pgs mbuf(s)
424 		 * as required by KERN_TLS.
425 		 */
426 		maxextsiz = TLS_MAX_MSG_SIZE_V10_2;
427 #ifdef KERN_TLS
428 		if (rpctls_getinfo(&maxlen, false, false))
429 			maxextsiz = min(maxextsiz, maxlen);
430 #endif
431 		mreq = _rpc_copym_into_ext_pgs(mreq, maxextsiz);
432 	}
433 	/*
434 	 * sosend consumes mreq.
435 	 */
436 	error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread);
437 	mreq = NULL;
438 	if (error == EMSGSIZE || (error == ERESTART &&
439 	    (ct->ct_waitflag & PCATCH) == 0 && trycnt-- > 0)) {
440 		SOCK_SENDBUF_LOCK(ct->ct_socket);
441 		sbwait(ct->ct_socket, SO_SND);
442 		SOCK_SENDBUF_UNLOCK(ct->ct_socket);
443 		AUTH_VALIDATE(auth, xid, NULL, NULL);
444 		mtx_lock(&ct->ct_lock);
445 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
446 		/* Sleep for 1 clock tick before trying the sosend() again. */
447 		mtx_unlock(&ct->ct_lock);
448 		pause("rpclpsnd", 1);
449 		mtx_lock(&ct->ct_lock);
450 		goto call_again;
451 	}
452 
453 	reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL;
454 	reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf;
455 	reply_msg.acpted_rply.ar_verf.oa_length = 0;
456 	reply_msg.acpted_rply.ar_results.where = NULL;
457 	reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
458 
459 	mtx_lock(&ct->ct_lock);
460 	if (error) {
461 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
462 		errp->re_errno = error;
463 		errp->re_status = stat = RPC_CANTSEND;
464 		goto out;
465 	}
466 
467 	/*
468 	 * Check to see if we got an upcall while waiting for the
469 	 * lock. In both these cases, the request has been removed
470 	 * from ct->ct_pending.
471 	 */
472 	if (cr->cr_error) {
473 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
474 		errp->re_errno = cr->cr_error;
475 		errp->re_status = stat = RPC_CANTRECV;
476 		goto out;
477 	}
478 	if (cr->cr_mrep) {
479 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
480 		goto got_reply;
481 	}
482 
483 	/*
484 	 * Hack to provide rpc-based message passing
485 	 */
486 	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
487 		TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
488 		errp->re_status = stat = RPC_TIMEDOUT;
489 		goto out;
490 	}
491 
492 	error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan,
493 	    tvtohz(&timeout));
494 
495 	TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
496 
497 	if (error) {
498 		/*
499 		 * The sleep returned an error so our request is still
500 		 * on the list. Turn the error code into an
501 		 * appropriate client status.
502 		 */
503 		errp->re_errno = error;
504 		switch (error) {
505 		case EINTR:
506 			stat = RPC_INTR;
507 			break;
508 		case EWOULDBLOCK:
509 			stat = RPC_TIMEDOUT;
510 			break;
511 		default:
512 			stat = RPC_CANTRECV;
513 		}
514 		errp->re_status = stat;
515 		goto out;
516 	} else {
517 		/*
518 		 * We were woken up by the upcall.  If the
519 		 * upcall had a receive error, report that,
520 		 * otherwise we have a reply.
521 		 */
522 		if (cr->cr_error) {
523 			errp->re_errno = cr->cr_error;
524 			errp->re_status = stat = RPC_CANTRECV;
525 			goto out;
526 		}
527 	}
528 
529 got_reply:
530 	/*
531 	 * Now decode and validate the response. We need to drop the
532 	 * lock since xdr_replymsg may end up sleeping in malloc.
533 	 */
534 	mtx_unlock(&ct->ct_lock);
535 
536 	if (ext && ext->rc_feedback)
537 		ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg);
538 
539 	xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE);
540 	ok = xdr_replymsg(&xdrs, &reply_msg);
541 	cr->cr_mrep = NULL;
542 
543 	if (ok) {
544 		if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
545 		    (reply_msg.acpted_rply.ar_stat == SUCCESS))
546 			errp->re_status = stat = RPC_SUCCESS;
547 		else
548 			stat = _seterr_reply(&reply_msg, errp);
549 
550 		if (stat == RPC_SUCCESS) {
551 			results = xdrmbuf_getall(&xdrs);
552 			if (!AUTH_VALIDATE(auth, xid,
553 				&reply_msg.acpted_rply.ar_verf,
554 				&results)) {
555 				errp->re_status = stat = RPC_AUTHERROR;
556 				errp->re_why = AUTH_INVALIDRESP;
557 			} else {
558 				KASSERT(results,
559 				    ("auth validated but no result"));
560 				*resultsp = results;
561 			}
562 		}		/* end successful completion */
563 		/*
564 		 * If unsuccessful AND error is an authentication error
565 		 * then refresh credentials and try again, else break
566 		 */
567 		else if (stat == RPC_AUTHERROR)
568 			/* maybe our credentials need to be refreshed ... */
569 			if (nrefreshes > 0 &&
570 			    AUTH_REFRESH(auth, &reply_msg)) {
571 				nrefreshes--;
572 				XDR_DESTROY(&xdrs);
573 				mtx_lock(&ct->ct_lock);
574 				goto call_again;
575 			}
576 		/* end of unsuccessful completion */
577 	}	/* end of valid reply message */
578 	else {
579 		errp->re_status = stat = RPC_CANTDECODERES;
580 	}
581 	XDR_DESTROY(&xdrs);
582 	mtx_lock(&ct->ct_lock);
583 out:
584 	mtx_assert(&ct->ct_lock, MA_OWNED);
585 
586 	KASSERT(stat != RPC_SUCCESS || *resultsp,
587 	    ("RPC_SUCCESS without reply"));
588 
589 	if (mreq)
590 		m_freem(mreq);
591 	if (cr->cr_mrep)
592 		m_freem(cr->cr_mrep);
593 
594 	ct->ct_threads--;
595 	if (ct->ct_closing)
596 		wakeup(ct);
597 
598 	mtx_unlock(&ct->ct_lock);
599 
600 	if (auth && stat != RPC_SUCCESS)
601 		AUTH_VALIDATE(auth, xid, NULL, NULL);
602 
603 	free(cr, M_RPC);
604 
605 	return (stat);
606 }
607 
608 static void
609 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
610 {
611 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
612 
613 	*errp = ct->ct_error;
614 }
615 
616 static bool_t
617 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
618 {
619 	XDR xdrs;
620 	bool_t dummy;
621 
622 	xdrs.x_op = XDR_FREE;
623 	dummy = (*xdr_res)(&xdrs, res_ptr);
624 
625 	return (dummy);
626 }
627 
628 /*ARGSUSED*/
629 static void
630 clnt_vc_abort(CLIENT *cl)
631 {
632 }
633 
634 static bool_t
635 clnt_vc_control(CLIENT *cl, u_int request, void *info)
636 {
637 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
638 	void *infop = info;
639 	SVCXPRT *xprt;
640 	uint64_t *p;
641 	int error;
642 	static u_int thrdnum = 0;
643 
644 	mtx_lock(&ct->ct_lock);
645 
646 	switch (request) {
647 	case CLSET_FD_CLOSE:
648 		ct->ct_closeit = TRUE;
649 		mtx_unlock(&ct->ct_lock);
650 		return (TRUE);
651 	case CLSET_FD_NCLOSE:
652 		ct->ct_closeit = FALSE;
653 		mtx_unlock(&ct->ct_lock);
654 		return (TRUE);
655 	default:
656 		break;
657 	}
658 
659 	/* for other requests which use info */
660 	if (info == NULL) {
661 		mtx_unlock(&ct->ct_lock);
662 		return (FALSE);
663 	}
664 	switch (request) {
665 	case CLSET_TIMEOUT:
666 		if (time_not_ok((struct timeval *)info)) {
667 			mtx_unlock(&ct->ct_lock);
668 			return (FALSE);
669 		}
670 		ct->ct_wait = *(struct timeval *)infop;
671 		break;
672 	case CLGET_TIMEOUT:
673 		*(struct timeval *)infop = ct->ct_wait;
674 		break;
675 	case CLGET_SERVER_ADDR:
676 		(void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len);
677 		break;
678 	case CLGET_SVC_ADDR:
679 		/*
680 		 * Slightly different semantics to userland - we use
681 		 * sockaddr instead of netbuf.
682 		 */
683 		memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len);
684 		break;
685 	case CLSET_SVC_ADDR:		/* set to new address */
686 		mtx_unlock(&ct->ct_lock);
687 		return (FALSE);
688 	case CLGET_XID:
689 		*(uint32_t *)info = ct->ct_xid;
690 		break;
691 	case CLSET_XID:
692 		/* This will set the xid of the NEXT call */
693 		/* decrement by 1 as clnt_vc_call() increments once */
694 		ct->ct_xid = *(uint32_t *)info - 1;
695 		break;
696 	case CLGET_VERS:
697 		/*
698 		 * This RELIES on the information that, in the call body,
699 		 * the version number field is the fifth field from the
700 		 * beginning of the RPC header. MUST be changed if the
701 		 * call_struct is changed
702 		 */
703 		*(uint32_t *)info =
704 		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
705 		    4 * BYTES_PER_XDR_UNIT));
706 		break;
707 
708 	case CLSET_VERS:
709 		*(uint32_t *)(void *)(ct->ct_mcallc +
710 		    4 * BYTES_PER_XDR_UNIT) =
711 		    htonl(*(uint32_t *)info);
712 		break;
713 
714 	case CLGET_PROG:
715 		/*
716 		 * This RELIES on the information that, in the call body,
717 		 * the program number field is the fourth field from the
718 		 * beginning of the RPC header. MUST be changed if the
719 		 * call_struct is changed
720 		 */
721 		*(uint32_t *)info =
722 		    ntohl(*(uint32_t *)(void *)(ct->ct_mcallc +
723 		    3 * BYTES_PER_XDR_UNIT));
724 		break;
725 
726 	case CLSET_PROG:
727 		*(uint32_t *)(void *)(ct->ct_mcallc +
728 		    3 * BYTES_PER_XDR_UNIT) =
729 		    htonl(*(uint32_t *)info);
730 		break;
731 
732 	case CLSET_WAITCHAN:
733 		ct->ct_waitchan = (const char *)info;
734 		break;
735 
736 	case CLGET_WAITCHAN:
737 		*(const char **) info = ct->ct_waitchan;
738 		break;
739 
740 	case CLSET_INTERRUPTIBLE:
741 		if (*(int *) info)
742 			ct->ct_waitflag = PCATCH;
743 		else
744 			ct->ct_waitflag = 0;
745 		break;
746 
747 	case CLGET_INTERRUPTIBLE:
748 		if (ct->ct_waitflag)
749 			*(int *) info = TRUE;
750 		else
751 			*(int *) info = FALSE;
752 		break;
753 
754 	case CLSET_BACKCHANNEL:
755 		xprt = (SVCXPRT *)info;
756 		if (ct->ct_backchannelxprt == NULL) {
757 			SVC_ACQUIRE(xprt);
758 			xprt->xp_p2 = ct;
759 			if (ct->ct_sslrefno != 0)
760 				xprt->xp_tls = RPCTLS_FLAGS_HANDSHAKE;
761 			ct->ct_backchannelxprt = xprt;
762 		}
763 		break;
764 
765 	case CLSET_TLS:
766 		p = (uint64_t *)info;
767 		ct->ct_sslsec = *p++;
768 		ct->ct_sslusec = *p++;
769 		ct->ct_sslrefno = *p;
770 		if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) {
771 			/* cl ref cnt is released by clnt_vc_dotlsupcall(). */
772 			CLNT_ACQUIRE(cl);
773 			mtx_unlock(&ct->ct_lock);
774 			/* Start the kthread that handles upcalls. */
775 			error = kthread_add(clnt_vc_dotlsupcall, cl,
776 			    NULL, NULL, 0, 0, "krpctls%u", thrdnum++);
777 			if (error != 0)
778 				panic("Can't add KRPC thread error %d", error);
779 		} else
780 			mtx_unlock(&ct->ct_lock);
781 		return (TRUE);
782 
783 	case CLSET_BLOCKRCV:
784 		if (*(int *) info) {
785 			ct->ct_rcvstate &= ~RPCRCVSTATE_NORMAL;
786 			ct->ct_rcvstate |= RPCRCVSTATE_TLSHANDSHAKE;
787 		} else {
788 			ct->ct_rcvstate &= ~RPCRCVSTATE_TLSHANDSHAKE;
789 			ct->ct_rcvstate |= RPCRCVSTATE_NORMAL;
790 		}
791 		break;
792 
793 	default:
794 		mtx_unlock(&ct->ct_lock);
795 		return (FALSE);
796 	}
797 
798 	mtx_unlock(&ct->ct_lock);
799 	return (TRUE);
800 }
801 
802 static void
803 clnt_vc_close(CLIENT *cl)
804 {
805 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
806 	struct ct_request *cr;
807 
808 	mtx_lock(&ct->ct_lock);
809 
810 	if (ct->ct_closed) {
811 		mtx_unlock(&ct->ct_lock);
812 		return;
813 	}
814 
815 	if (ct->ct_closing) {
816 		while (ct->ct_closing)
817 			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
818 		KASSERT(ct->ct_closed, ("client should be closed"));
819 		mtx_unlock(&ct->ct_lock);
820 		return;
821 	}
822 
823 	if (ct->ct_socket) {
824 		ct->ct_closing = TRUE;
825 		mtx_unlock(&ct->ct_lock);
826 
827 		SOCK_RECVBUF_LOCK(ct->ct_socket);
828 		if (ct->ct_socket->so_rcv.sb_upcall != NULL) {
829 			soupcall_clear(ct->ct_socket, SO_RCV);
830 			clnt_vc_upcallsdone(ct);
831 		}
832 		SOCK_RECVBUF_UNLOCK(ct->ct_socket);
833 
834 		/*
835 		 * Abort any pending requests and wait until everyone
836 		 * has finished with clnt_vc_call.
837 		 */
838 		mtx_lock(&ct->ct_lock);
839 		TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
840 			cr->cr_xid = 0;
841 			cr->cr_error = ESHUTDOWN;
842 			wakeup(cr);
843 		}
844 
845 		while (ct->ct_threads)
846 			msleep(ct, &ct->ct_lock, 0, "rpcclose", 0);
847 	}
848 
849 	ct->ct_closing = FALSE;
850 	ct->ct_closed = TRUE;
851 	wakeup(&ct->ct_sslrefno);
852 	mtx_unlock(&ct->ct_lock);
853 	wakeup(ct);
854 }
855 
856 static void
857 clnt_vc_destroy(CLIENT *cl)
858 {
859 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
860 	struct socket *so = NULL;
861 	SVCXPRT *xprt;
862 	uint32_t reterr;
863 
864 	clnt_vc_close(cl);
865 
866 	mtx_lock(&ct->ct_lock);
867 	xprt = ct->ct_backchannelxprt;
868 	ct->ct_backchannelxprt = NULL;
869 	if (xprt != NULL) {
870 		mtx_unlock(&ct->ct_lock);	/* To avoid a LOR. */
871 		sx_xlock(&xprt->xp_lock);
872 		mtx_lock(&ct->ct_lock);
873 		xprt->xp_p2 = NULL;
874 		sx_xunlock(&xprt->xp_lock);
875 		SVC_RELEASE(xprt);
876 	}
877 
878 	if (ct->ct_socket) {
879 		if (ct->ct_closeit) {
880 			so = ct->ct_socket;
881 		}
882 	}
883 
884 	/* Wait for the upcall kthread to terminate. */
885 	while ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLTHREAD) != 0)
886 		msleep(&ct->ct_sslrefno, &ct->ct_lock, 0,
887 		    "clntvccl", hz);
888 	mtx_unlock(&ct->ct_lock);
889 
890 	mtx_destroy(&ct->ct_lock);
891 	if (so) {
892 		if (ct->ct_sslrefno != 0) {
893 			/*
894 			 * If the TLS handshake is in progress, the upcall
895 			 * will fail, but the socket should be closed by the
896 			 * daemon, since the connect upcall has just failed.
897 			 */
898 			if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) {
899 				/*
900 				 * If the upcall fails, the socket has
901 				 * probably been closed via the rpctlscd
902 				 * daemon having crashed or been
903 				 * restarted, so ignore return stat.
904 				 */
905 				rpctls_cl_disconnect(ct->ct_sslsec,
906 				    ct->ct_sslusec, ct->ct_sslrefno,
907 				    &reterr);
908 			}
909 			/* Must sorele() to get rid of reference. */
910 			CURVNET_SET(so->so_vnet);
911 			sorele(so);
912 			CURVNET_RESTORE();
913 		} else {
914 			soshutdown(so, SHUT_WR);
915 			soclose(so);
916 		}
917 	}
918 	m_freem(ct->ct_record);
919 	m_freem(ct->ct_raw);
920 	mem_free(ct, sizeof(struct ct_data));
921 	if (cl->cl_netid && cl->cl_netid[0])
922 		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
923 	if (cl->cl_tp && cl->cl_tp[0])
924 		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
925 	mem_free(cl, sizeof(CLIENT));
926 }
927 
928 /*
929  * Make sure that the time is not garbage.   -1 value is disallowed.
930  * Note this is different from time_not_ok in clnt_dg.c
931  */
932 static bool_t
933 time_not_ok(struct timeval *t)
934 {
935 	return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
936 		t->tv_usec <= -1 || t->tv_usec > 1000000);
937 }
938 
939 int
940 clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
941 {
942 	struct ct_data *ct = (struct ct_data *) arg;
943 	struct uio uio;
944 	struct mbuf *m, *m2;
945 	struct ct_request *cr;
946 	int error, rcvflag, foundreq;
947 	uint32_t xid_plus_direction[2], header;
948 	SVCXPRT *xprt;
949 	struct cf_conn *cd;
950 	u_int rawlen;
951 	struct cmsghdr *cmsg;
952 	struct tls_get_record tgr;
953 
954 	/*
955 	 * RPC-over-TLS needs to block reception during
956 	 * upcalls since the upcall will be doing I/O on
957 	 * the socket via openssl library calls.
958 	 */
959 	mtx_lock(&ct->ct_lock);
960 	if ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL |
961 	    RPCRCVSTATE_NONAPPDATA)) == 0) {
962 		/* Mark that a socket upcall needs to be done. */
963 		if ((ct->ct_rcvstate & (RPCRCVSTATE_UPCALLNEEDED |
964 		    RPCRCVSTATE_UPCALLINPROG)) != 0)
965 			ct->ct_rcvstate |= RPCRCVSTATE_SOUPCALLNEEDED;
966 		mtx_unlock(&ct->ct_lock);
967 		return (SU_OK);
968 	}
969 	mtx_unlock(&ct->ct_lock);
970 
971 	/*
972 	 * If another thread is already here, it must be in
973 	 * soreceive(), so just return to avoid races with it.
974 	 * ct_upcallrefs is protected by the socket receive buffer lock
975 	 * which is held in this function, except when
976 	 * soreceive() is called.
977 	 */
978 	if (ct->ct_upcallrefs > 0)
979 		return (SU_OK);
980 	ct->ct_upcallrefs++;
981 
982 	/*
983 	 * Read as much as possible off the socket and link it
984 	 * onto ct_raw.
985 	 */
986 	for (;;) {
987 		uio.uio_resid = 1000000000;
988 		uio.uio_td = curthread;
989 		m2 = m = NULL;
990 		rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK;
991 		if (ct->ct_sslrefno != 0 && (ct->ct_rcvstate &
992 		    RPCRCVSTATE_NORMAL) != 0)
993 			rcvflag |= MSG_TLSAPPDATA;
994 		SOCK_RECVBUF_UNLOCK(so);
995 		error = soreceive(so, NULL, &uio, &m, &m2, &rcvflag);
996 		SOCK_RECVBUF_LOCK(so);
997 
998 		if (error == EWOULDBLOCK) {
999 			/*
1000 			 * We must re-test for readability after
1001 			 * taking the lock to protect us in the case
1002 			 * where a new packet arrives on the socket
1003 			 * after our call to soreceive fails with
1004 			 * EWOULDBLOCK.
1005 			 */
1006 			error = 0;
1007 			if (!soreadable(so))
1008 				break;
1009 			continue;
1010 		}
1011 		if (error == 0 && m == NULL) {
1012 			/*
1013 			 * We must have got EOF trying
1014 			 * to read from the stream.
1015 			 */
1016 			error = ECONNRESET;
1017 		}
1018 
1019 		/*
1020 		 * A return of ENXIO indicates that there is an
1021 		 * alert record at the head of the
1022 		 * socket's receive queue, for TLS connections.
1023 		 * This record needs to be handled in userland
1024 		 * via an SSL_read() call, so do an upcall to the daemon.
1025 		 */
1026 		if (ct->ct_sslrefno != 0 && error == ENXIO) {
1027 			/* Disable reception, marking an upcall needed. */
1028 			mtx_lock(&ct->ct_lock);
1029 			ct->ct_rcvstate |= RPCRCVSTATE_UPCALLNEEDED;
1030 			/*
1031 			 * If an upcall in needed, wake up the kthread
1032 			 * that runs clnt_vc_dotlsupcall().
1033 			 */
1034 			wakeup(&ct->ct_sslrefno);
1035 			mtx_unlock(&ct->ct_lock);
1036 			break;
1037 		}
1038 		if (error != 0)
1039 			break;
1040 
1041 		/* Process any record header(s). */
1042 		if (m2 != NULL) {
1043 			cmsg = mtod(m2, struct cmsghdr *);
1044 			if (cmsg->cmsg_type == TLS_GET_RECORD &&
1045 			    cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) {
1046 				memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr));
1047 				/*
1048 				 * TLS_RLTYPE_ALERT records should be handled
1049 				 * since soreceive() would have returned
1050 				 * ENXIO.  Just throw any other
1051 				 * non-TLS_RLTYPE_APP records away.
1052 				 */
1053 				if (tgr.tls_type != TLS_RLTYPE_APP) {
1054 					m_freem(m);
1055 					m_free(m2);
1056 					mtx_lock(&ct->ct_lock);
1057 					ct->ct_rcvstate &=
1058 					    ~RPCRCVSTATE_NONAPPDATA;
1059 					ct->ct_rcvstate |= RPCRCVSTATE_NORMAL;
1060 					mtx_unlock(&ct->ct_lock);
1061 					continue;
1062 				}
1063 			}
1064 			m_free(m2);
1065 		}
1066 
1067 		if (ct->ct_raw != NULL)
1068 			m_last(ct->ct_raw)->m_next = m;
1069 		else
1070 			ct->ct_raw = m;
1071 	}
1072 	rawlen = m_length(ct->ct_raw, NULL);
1073 
1074 	/* Now, process as much of ct_raw as possible. */
1075 	for (;;) {
1076 		/*
1077 		 * If ct_record_resid is zero, we are waiting for a
1078 		 * record mark.
1079 		 */
1080 		if (ct->ct_record_resid == 0) {
1081 			if (rawlen < sizeof(uint32_t))
1082 				break;
1083 			m_copydata(ct->ct_raw, 0, sizeof(uint32_t),
1084 			    (char *)&header);
1085 			header = ntohl(header);
1086 			ct->ct_record_resid = header & 0x7fffffff;
1087 			ct->ct_record_eor = ((header & 0x80000000) != 0);
1088 			m_adj(ct->ct_raw, sizeof(uint32_t));
1089 			rawlen -= sizeof(uint32_t);
1090 		} else {
1091 			/*
1092 			 * Move as much of the record as possible to
1093 			 * ct_record.
1094 			 */
1095 			if (rawlen == 0)
1096 				break;
1097 			if (rawlen <= ct->ct_record_resid) {
1098 				if (ct->ct_record != NULL)
1099 					m_last(ct->ct_record)->m_next =
1100 					    ct->ct_raw;
1101 				else
1102 					ct->ct_record = ct->ct_raw;
1103 				ct->ct_raw = NULL;
1104 				ct->ct_record_resid -= rawlen;
1105 				rawlen = 0;
1106 			} else {
1107 				m = m_split(ct->ct_raw, ct->ct_record_resid,
1108 				    M_NOWAIT);
1109 				if (m == NULL)
1110 					break;
1111 				if (ct->ct_record != NULL)
1112 					m_last(ct->ct_record)->m_next =
1113 					    ct->ct_raw;
1114 				else
1115 					ct->ct_record = ct->ct_raw;
1116 				rawlen -= ct->ct_record_resid;
1117 				ct->ct_record_resid = 0;
1118 				ct->ct_raw = m;
1119 			}
1120 			if (ct->ct_record_resid > 0)
1121 				break;
1122 
1123 			/*
1124 			 * If we have the entire record, see if we can
1125 			 * match it to a request.
1126 			 */
1127 			if (ct->ct_record_eor) {
1128 				/*
1129 				 * The XID is in the first uint32_t of
1130 				 * the reply and the message direction
1131 				 * is the second one.
1132 				 */
1133 				if (ct->ct_record->m_len <
1134 				    sizeof(xid_plus_direction) &&
1135 				    m_length(ct->ct_record, NULL) <
1136 				    sizeof(xid_plus_direction)) {
1137 					/*
1138 					 * What to do now?
1139 					 * The data in the TCP stream is
1140 					 * corrupted such that there is no
1141 					 * valid RPC message to parse.
1142 					 * I think it best to close this
1143 					 * connection and allow
1144 					 * clnt_reconnect_call() to try
1145 					 * and establish a new one.
1146 					 */
1147 					printf("clnt_vc_soupcall: "
1148 					    "connection data corrupted\n");
1149 					error = ECONNRESET;
1150 					goto wakeup_all;
1151 				}
1152 				m_copydata(ct->ct_record, 0,
1153 				    sizeof(xid_plus_direction),
1154 				    (char *)xid_plus_direction);
1155 				xid_plus_direction[0] =
1156 				    ntohl(xid_plus_direction[0]);
1157 				xid_plus_direction[1] =
1158 				    ntohl(xid_plus_direction[1]);
1159 				/* Check message direction. */
1160 				if (xid_plus_direction[1] == CALL) {
1161 					/* This is a backchannel request. */
1162 					mtx_lock(&ct->ct_lock);
1163 					xprt = ct->ct_backchannelxprt;
1164 					if (xprt == NULL) {
1165 						mtx_unlock(&ct->ct_lock);
1166 						/* Just throw it away. */
1167 						m_freem(ct->ct_record);
1168 						ct->ct_record = NULL;
1169 					} else {
1170 						cd = (struct cf_conn *)
1171 						    xprt->xp_p1;
1172 						m2 = cd->mreq;
1173 						/*
1174 						 * The requests are chained
1175 						 * in the m_nextpkt list.
1176 						 */
1177 						while (m2 != NULL &&
1178 						    m2->m_nextpkt != NULL)
1179 							/* Find end of list. */
1180 							m2 = m2->m_nextpkt;
1181 						if (m2 != NULL)
1182 							m2->m_nextpkt =
1183 							    ct->ct_record;
1184 						else
1185 							cd->mreq =
1186 							    ct->ct_record;
1187 						ct->ct_record->m_nextpkt =
1188 						    NULL;
1189 						ct->ct_record = NULL;
1190 						xprt_active(xprt);
1191 						mtx_unlock(&ct->ct_lock);
1192 					}
1193 				} else {
1194 					mtx_lock(&ct->ct_lock);
1195 					foundreq = 0;
1196 					TAILQ_FOREACH(cr, &ct->ct_pending,
1197 					    cr_link) {
1198 						if (cr->cr_xid ==
1199 						    xid_plus_direction[0]) {
1200 							/*
1201 							 * This one
1202 							 * matches. We leave
1203 							 * the reply mbuf in
1204 							 * cr->cr_mrep. Set
1205 							 * the XID to zero so
1206 							 * that we will ignore
1207 							 * any duplicated
1208 							 * replies.
1209 							 */
1210 							cr->cr_xid = 0;
1211 							cr->cr_mrep =
1212 							    ct->ct_record;
1213 							cr->cr_error = 0;
1214 							foundreq = 1;
1215 							wakeup(cr);
1216 							break;
1217 						}
1218 					}
1219 					mtx_unlock(&ct->ct_lock);
1220 
1221 					if (!foundreq)
1222 						m_freem(ct->ct_record);
1223 					ct->ct_record = NULL;
1224 				}
1225 			}
1226 		}
1227 	}
1228 
1229 	if (error != 0) {
1230 	wakeup_all:
1231 		/*
1232 		 * This socket is broken, so mark that it cannot
1233 		 * receive and fail all RPCs waiting for a reply
1234 		 * on it, so that they will be retried on a new
1235 		 * TCP connection created by clnt_reconnect_X().
1236 		 */
1237 		mtx_lock(&ct->ct_lock);
1238 		ct->ct_error.re_status = RPC_CANTRECV;
1239 		ct->ct_error.re_errno = error;
1240 		TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) {
1241 			cr->cr_error = error;
1242 			wakeup(cr);
1243 		}
1244 		mtx_unlock(&ct->ct_lock);
1245 	}
1246 
1247 	ct->ct_upcallrefs--;
1248 	if (ct->ct_upcallrefs < 0)
1249 		panic("rpcvc upcall refcnt");
1250 	if (ct->ct_upcallrefs == 0)
1251 		wakeup(&ct->ct_upcallrefs);
1252 	return (SU_OK);
1253 }
1254 
1255 /*
1256  * Wait for all upcalls in progress to complete.
1257  */
1258 static void
1259 clnt_vc_upcallsdone(struct ct_data *ct)
1260 {
1261 
1262 	SOCK_RECVBUF_LOCK_ASSERT(ct->ct_socket);
1263 
1264 	while (ct->ct_upcallrefs > 0)
1265 		(void) msleep(&ct->ct_upcallrefs,
1266 		    SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0);
1267 }
1268 
1269 /*
1270  * Do a TLS upcall to the rpctlscd daemon, as required.
1271  * This function runs as a kthread.
1272  */
1273 static void
1274 clnt_vc_dotlsupcall(void *data)
1275 {
1276 	CLIENT *cl = (CLIENT *)data;
1277 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
1278 	enum clnt_stat ret;
1279 	uint32_t reterr;
1280 
1281 	mtx_lock(&ct->ct_lock);
1282 	ct->ct_rcvstate |= RPCRCVSTATE_UPCALLTHREAD;
1283 	while (!ct->ct_closed) {
1284 		if ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLNEEDED) != 0) {
1285 			ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLNEEDED;
1286 			ct->ct_rcvstate |= RPCRCVSTATE_UPCALLINPROG;
1287 			if (ct->ct_sslrefno != 0 && ct->ct_sslrefno !=
1288 			    RPCTLS_REFNO_HANDSHAKE) {
1289 				mtx_unlock(&ct->ct_lock);
1290 				ret = rpctls_cl_handlerecord(ct->ct_sslsec,
1291 				    ct->ct_sslusec, ct->ct_sslrefno, &reterr);
1292 				mtx_lock(&ct->ct_lock);
1293 			}
1294 			ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLINPROG;
1295 			if (ret == RPC_SUCCESS && reterr == RPCTLSERR_OK)
1296 				ct->ct_rcvstate |= RPCRCVSTATE_NORMAL;
1297 			else
1298 				ct->ct_rcvstate |= RPCRCVSTATE_NONAPPDATA;
1299 			wakeup(&ct->ct_rcvstate);
1300 		}
1301 		if ((ct->ct_rcvstate & RPCRCVSTATE_SOUPCALLNEEDED) != 0) {
1302 			ct->ct_rcvstate &= ~RPCRCVSTATE_SOUPCALLNEEDED;
1303 			mtx_unlock(&ct->ct_lock);
1304 			SOCK_RECVBUF_LOCK(ct->ct_socket);
1305 			clnt_vc_soupcall(ct->ct_socket, ct, M_NOWAIT);
1306 			SOCK_RECVBUF_UNLOCK(ct->ct_socket);
1307 			mtx_lock(&ct->ct_lock);
1308 		}
1309 		msleep(&ct->ct_sslrefno, &ct->ct_lock, 0, "clntvcdu", hz);
1310 	}
1311 	ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLTHREAD;
1312 	wakeup(&ct->ct_sslrefno);
1313 	mtx_unlock(&ct->ct_lock);
1314 	CLNT_RELEASE(cl);
1315 	kthread_exit();
1316 }
1317