1 /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * Copyright (c) 2009, Sun Microsystems, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * - Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * - Neither the name of Sun Microsystems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 /* 35 * clnt_tcp.c, Implements a TCP/IP based, client side RPC. 36 * 37 * Copyright (C) 1984, Sun Microsystems, Inc. 38 * 39 * TCP based RPC supports 'batched calls'. 40 * A sequence of calls may be batched-up in a send buffer. The rpc call 41 * return immediately to the client even though the call was not necessarily 42 * sent. The batching occurs if the results' xdr routine is NULL (0) AND 43 * the rpc timeout value is zero (see clnt.h, rpc). 44 * 45 * Clients should NOT casually batch calls that in fact return results; that is, 46 * the server side should be aware that a call is batched and not produce any 47 * return message. Batched calls that produce many result messages can 48 * deadlock (netlock) the client and the server.... 49 * 50 * Now go hang yourself. 51 */ 52 53 #include "opt_kern_tls.h" 54 55 #include <sys/param.h> 56 #include <sys/systm.h> 57 #include <sys/kernel.h> 58 #include <sys/kthread.h> 59 #include <sys/ktls.h> 60 #include <sys/lock.h> 61 #include <sys/malloc.h> 62 #include <sys/mbuf.h> 63 #include <sys/mutex.h> 64 #include <sys/pcpu.h> 65 #include <sys/proc.h> 66 #include <sys/protosw.h> 67 #include <sys/socket.h> 68 #include <sys/socketvar.h> 69 #include <sys/sx.h> 70 #include <sys/syslog.h> 71 #include <sys/time.h> 72 #include <sys/uio.h> 73 74 #include <net/vnet.h> 75 76 #include <netinet/tcp.h> 77 78 #include <rpc/rpc.h> 79 #include <rpc/rpc_com.h> 80 #include <rpc/krpc.h> 81 #include <rpc/rpcsec_tls.h> 82 83 struct cmessage { 84 struct cmsghdr cmsg; 85 struct cmsgcred cmcred; 86 }; 87 88 static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *, 89 rpcproc_t, struct mbuf *, struct mbuf **, struct timeval); 90 static void clnt_vc_geterr(CLIENT *, struct rpc_err *); 91 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *); 92 static void clnt_vc_abort(CLIENT *); 93 static bool_t clnt_vc_control(CLIENT *, u_int, void *); 94 static void clnt_vc_close(CLIENT *); 95 static void clnt_vc_destroy(CLIENT *); 96 static bool_t time_not_ok(struct timeval *); 97 static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag); 98 static void clnt_vc_dotlsupcall(void *data); 99 100 static const struct clnt_ops clnt_vc_ops = { 101 .cl_call = clnt_vc_call, 102 .cl_abort = clnt_vc_abort, 103 .cl_geterr = clnt_vc_geterr, 104 .cl_freeres = clnt_vc_freeres, 105 .cl_close = clnt_vc_close, 106 .cl_destroy = clnt_vc_destroy, 107 .cl_control = clnt_vc_control 108 }; 109 110 static void clnt_vc_upcallsdone(struct ct_data *); 111 112 /* 113 * Create a client handle for a connection. 114 * Default options are set, which the user can change using clnt_control()'s. 115 * The rpc/vc package does buffering similar to stdio, so the client 116 * must pick send and receive buffer sizes, 0 => use the default. 117 * NB: fd is copied into a private area. 118 * NB: The rpch->cl_auth is set null authentication. Caller may wish to 119 * set this something more useful. 120 * 121 * fd should be an open socket 122 */ 123 CLIENT * 124 clnt_vc_create( 125 struct socket *so, /* open file descriptor */ 126 struct sockaddr *raddr, /* servers address */ 127 const rpcprog_t prog, /* program number */ 128 const rpcvers_t vers, /* version number */ 129 size_t sendsz, /* buffer recv size */ 130 size_t recvsz, /* buffer send size */ 131 int intrflag) /* interruptible */ 132 { 133 CLIENT *cl; /* client handle */ 134 struct ct_data *ct = NULL; /* client handle */ 135 struct timeval now; 136 struct rpc_msg call_msg; 137 static uint32_t disrupt; 138 struct __rpc_sockinfo si; 139 XDR xdrs; 140 int error, interrupted, one = 1, sleep_flag; 141 struct sockopt sopt; 142 143 if (disrupt == 0) 144 disrupt = (uint32_t)(long)raddr; 145 146 cl = (CLIENT *)mem_alloc(sizeof (*cl)); 147 ct = (struct ct_data *)mem_alloc(sizeof (*ct)); 148 149 mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF); 150 ct->ct_threads = 0; 151 ct->ct_closing = FALSE; 152 ct->ct_closed = FALSE; 153 ct->ct_upcallrefs = 0; 154 ct->ct_rcvstate = RPCRCVSTATE_NORMAL; 155 156 if ((so->so_state & SS_ISCONNECTED) == 0) { 157 error = soconnect(so, raddr, curthread); 158 SOCK_LOCK(so); 159 interrupted = 0; 160 sleep_flag = PSOCK; 161 if (intrflag != 0) 162 sleep_flag |= PCATCH; 163 while ((so->so_state & SS_ISCONNECTING) 164 && so->so_error == 0) { 165 error = msleep(&so->so_timeo, SOCK_MTX(so), 166 sleep_flag, "connec", 0); 167 if (error) { 168 if (error == EINTR || error == ERESTART) 169 interrupted = 1; 170 break; 171 } 172 } 173 if (error == 0) { 174 error = so->so_error; 175 so->so_error = 0; 176 } 177 SOCK_UNLOCK(so); 178 if (error) { 179 if (!interrupted) 180 so->so_state &= ~SS_ISCONNECTING; 181 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 182 rpc_createerr.cf_error.re_errno = error; 183 goto err; 184 } 185 } 186 187 if (!__rpc_socket2sockinfo(so, &si)) { 188 goto err; 189 } 190 191 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 192 bzero(&sopt, sizeof(sopt)); 193 sopt.sopt_dir = SOPT_SET; 194 sopt.sopt_level = SOL_SOCKET; 195 sopt.sopt_name = SO_KEEPALIVE; 196 sopt.sopt_val = &one; 197 sopt.sopt_valsize = sizeof(one); 198 sosetopt(so, &sopt); 199 } 200 201 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 202 bzero(&sopt, sizeof(sopt)); 203 sopt.sopt_dir = SOPT_SET; 204 sopt.sopt_level = IPPROTO_TCP; 205 sopt.sopt_name = TCP_NODELAY; 206 sopt.sopt_val = &one; 207 sopt.sopt_valsize = sizeof(one); 208 sosetopt(so, &sopt); 209 } 210 211 ct->ct_closeit = FALSE; 212 213 /* 214 * Set up private data struct 215 */ 216 ct->ct_socket = so; 217 ct->ct_wait.tv_sec = -1; 218 ct->ct_wait.tv_usec = -1; 219 memcpy(&ct->ct_addr, raddr, raddr->sa_len); 220 221 /* 222 * Initialize call message 223 */ 224 getmicrotime(&now); 225 ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now); 226 call_msg.rm_xid = ct->ct_xid; 227 call_msg.rm_direction = CALL; 228 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 229 call_msg.rm_call.cb_prog = (uint32_t)prog; 230 call_msg.rm_call.cb_vers = (uint32_t)vers; 231 232 /* 233 * pre-serialize the static part of the call msg and stash it away 234 */ 235 xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE, 236 XDR_ENCODE); 237 if (! xdr_callhdr(&xdrs, &call_msg)) { 238 if (ct->ct_closeit) { 239 soclose(ct->ct_socket); 240 } 241 goto err; 242 } 243 ct->ct_mpos = XDR_GETPOS(&xdrs); 244 XDR_DESTROY(&xdrs); 245 ct->ct_waitchan = "rpcrecv"; 246 ct->ct_waitflag = 0; 247 248 /* 249 * Create a client handle which uses xdrrec for serialization 250 * and authnone for authentication. 251 */ 252 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); 253 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); 254 error = soreserve(ct->ct_socket, sendsz, recvsz); 255 if (error != 0) { 256 if (ct->ct_closeit) { 257 soclose(ct->ct_socket); 258 } 259 goto err; 260 } 261 cl->cl_refs = 1; 262 cl->cl_ops = &clnt_vc_ops; 263 cl->cl_private = ct; 264 cl->cl_auth = authnone_create(); 265 266 SOCKBUF_LOCK(&ct->ct_socket->so_rcv); 267 soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct); 268 SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); 269 270 ct->ct_raw = NULL; 271 ct->ct_record = NULL; 272 ct->ct_record_resid = 0; 273 ct->ct_sslrefno = 0; 274 TAILQ_INIT(&ct->ct_pending); 275 return (cl); 276 277 err: 278 mtx_destroy(&ct->ct_lock); 279 mem_free(ct, sizeof (struct ct_data)); 280 mem_free(cl, sizeof (CLIENT)); 281 282 return ((CLIENT *)NULL); 283 } 284 285 static enum clnt_stat 286 clnt_vc_call( 287 CLIENT *cl, /* client handle */ 288 struct rpc_callextra *ext, /* call metadata */ 289 rpcproc_t proc, /* procedure number */ 290 struct mbuf *args, /* pointer to args */ 291 struct mbuf **resultsp, /* pointer to results */ 292 struct timeval utimeout) 293 { 294 struct ct_data *ct = (struct ct_data *) cl->cl_private; 295 AUTH *auth; 296 struct rpc_err *errp; 297 enum clnt_stat stat; 298 XDR xdrs; 299 struct rpc_msg reply_msg; 300 bool_t ok; 301 int nrefreshes = 2; /* number of times to refresh cred */ 302 struct timeval timeout; 303 uint32_t xid; 304 struct mbuf *mreq = NULL, *results; 305 struct ct_request *cr; 306 int error, maxextsiz, trycnt; 307 #ifdef KERN_TLS 308 u_int maxlen; 309 #endif 310 311 cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK); 312 313 mtx_lock(&ct->ct_lock); 314 315 if (ct->ct_closing || ct->ct_closed) { 316 mtx_unlock(&ct->ct_lock); 317 free(cr, M_RPC); 318 return (RPC_CANTSEND); 319 } 320 ct->ct_threads++; 321 322 if (ext) { 323 auth = ext->rc_auth; 324 errp = &ext->rc_err; 325 } else { 326 auth = cl->cl_auth; 327 errp = &ct->ct_error; 328 } 329 330 cr->cr_mrep = NULL; 331 cr->cr_error = 0; 332 333 if (ct->ct_wait.tv_usec == -1) { 334 timeout = utimeout; /* use supplied timeout */ 335 } else { 336 timeout = ct->ct_wait; /* use default timeout */ 337 } 338 339 /* 340 * After 15sec of looping, allow it to return RPC_CANTSEND, which will 341 * cause the clnt_reconnect layer to create a new TCP connection. 342 */ 343 trycnt = 15 * hz; 344 call_again: 345 mtx_assert(&ct->ct_lock, MA_OWNED); 346 if (ct->ct_closing || ct->ct_closed) { 347 ct->ct_threads--; 348 wakeup(ct); 349 mtx_unlock(&ct->ct_lock); 350 free(cr, M_RPC); 351 return (RPC_CANTSEND); 352 } 353 354 ct->ct_xid++; 355 xid = ct->ct_xid; 356 357 mtx_unlock(&ct->ct_lock); 358 359 /* 360 * Leave space to pre-pend the record mark. 361 */ 362 mreq = m_gethdr(M_WAITOK, MT_DATA); 363 mreq->m_data += sizeof(uint32_t); 364 KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN, 365 ("RPC header too big")); 366 bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos); 367 mreq->m_len = ct->ct_mpos; 368 369 /* 370 * The XID is the first thing in the request. 371 */ 372 *mtod(mreq, uint32_t *) = htonl(xid); 373 374 xdrmbuf_create(&xdrs, mreq, XDR_ENCODE); 375 376 errp->re_status = stat = RPC_SUCCESS; 377 378 if ((! XDR_PUTINT32(&xdrs, &proc)) || 379 (! AUTH_MARSHALL(auth, xid, &xdrs, 380 m_copym(args, 0, M_COPYALL, M_WAITOK)))) { 381 errp->re_status = stat = RPC_CANTENCODEARGS; 382 mtx_lock(&ct->ct_lock); 383 goto out; 384 } 385 mreq->m_pkthdr.len = m_length(mreq, NULL); 386 387 /* 388 * Prepend a record marker containing the packet length. 389 */ 390 M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK); 391 *mtod(mreq, uint32_t *) = 392 htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t))); 393 394 cr->cr_xid = xid; 395 mtx_lock(&ct->ct_lock); 396 /* 397 * Check to see if the other end has already started to close down 398 * the connection. The upcall will have set ct_error.re_status 399 * to RPC_CANTRECV if this is the case. 400 * If the other end starts to close down the connection after this 401 * point, it will be detected later when cr_error is checked, 402 * since the request is in the ct_pending queue. 403 */ 404 if (ct->ct_error.re_status == RPC_CANTRECV) { 405 if (errp != &ct->ct_error) { 406 errp->re_errno = ct->ct_error.re_errno; 407 errp->re_status = RPC_CANTRECV; 408 } 409 stat = RPC_CANTRECV; 410 goto out; 411 } 412 413 /* For TLS, wait for an upcall to be done, as required. */ 414 while ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL | 415 RPCRCVSTATE_NONAPPDATA)) == 0) 416 msleep(&ct->ct_rcvstate, &ct->ct_lock, 0, "rpcrcvst", hz); 417 418 TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link); 419 mtx_unlock(&ct->ct_lock); 420 421 if (ct->ct_sslrefno != 0) { 422 /* 423 * Copy the mbuf chain to a chain of ext_pgs mbuf(s) 424 * as required by KERN_TLS. 425 */ 426 maxextsiz = TLS_MAX_MSG_SIZE_V10_2; 427 #ifdef KERN_TLS 428 if (rpctls_getinfo(&maxlen, false, false)) 429 maxextsiz = min(maxextsiz, maxlen); 430 #endif 431 mreq = _rpc_copym_into_ext_pgs(mreq, maxextsiz); 432 } 433 /* 434 * sosend consumes mreq. 435 */ 436 error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread); 437 mreq = NULL; 438 if (error == EMSGSIZE || (error == ERESTART && 439 (ct->ct_waitflag & PCATCH) == 0 && trycnt-- > 0)) { 440 SOCKBUF_LOCK(&ct->ct_socket->so_snd); 441 sbwait(ct->ct_socket, SO_SND); 442 SOCKBUF_UNLOCK(&ct->ct_socket->so_snd); 443 AUTH_VALIDATE(auth, xid, NULL, NULL); 444 mtx_lock(&ct->ct_lock); 445 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 446 /* Sleep for 1 clock tick before trying the sosend() again. */ 447 mtx_unlock(&ct->ct_lock); 448 pause("rpclpsnd", 1); 449 mtx_lock(&ct->ct_lock); 450 goto call_again; 451 } 452 453 reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL; 454 reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf; 455 reply_msg.acpted_rply.ar_verf.oa_length = 0; 456 reply_msg.acpted_rply.ar_results.where = NULL; 457 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; 458 459 mtx_lock(&ct->ct_lock); 460 if (error) { 461 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 462 errp->re_errno = error; 463 errp->re_status = stat = RPC_CANTSEND; 464 goto out; 465 } 466 467 /* 468 * Check to see if we got an upcall while waiting for the 469 * lock. In both these cases, the request has been removed 470 * from ct->ct_pending. 471 */ 472 if (cr->cr_error) { 473 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 474 errp->re_errno = cr->cr_error; 475 errp->re_status = stat = RPC_CANTRECV; 476 goto out; 477 } 478 if (cr->cr_mrep) { 479 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 480 goto got_reply; 481 } 482 483 /* 484 * Hack to provide rpc-based message passing 485 */ 486 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { 487 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 488 errp->re_status = stat = RPC_TIMEDOUT; 489 goto out; 490 } 491 492 error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan, 493 tvtohz(&timeout)); 494 495 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 496 497 if (error) { 498 /* 499 * The sleep returned an error so our request is still 500 * on the list. Turn the error code into an 501 * appropriate client status. 502 */ 503 errp->re_errno = error; 504 switch (error) { 505 case EINTR: 506 stat = RPC_INTR; 507 break; 508 case EWOULDBLOCK: 509 stat = RPC_TIMEDOUT; 510 break; 511 default: 512 stat = RPC_CANTRECV; 513 } 514 errp->re_status = stat; 515 goto out; 516 } else { 517 /* 518 * We were woken up by the upcall. If the 519 * upcall had a receive error, report that, 520 * otherwise we have a reply. 521 */ 522 if (cr->cr_error) { 523 errp->re_errno = cr->cr_error; 524 errp->re_status = stat = RPC_CANTRECV; 525 goto out; 526 } 527 } 528 529 got_reply: 530 /* 531 * Now decode and validate the response. We need to drop the 532 * lock since xdr_replymsg may end up sleeping in malloc. 533 */ 534 mtx_unlock(&ct->ct_lock); 535 536 if (ext && ext->rc_feedback) 537 ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg); 538 539 xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE); 540 ok = xdr_replymsg(&xdrs, &reply_msg); 541 cr->cr_mrep = NULL; 542 543 if (ok) { 544 if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) && 545 (reply_msg.acpted_rply.ar_stat == SUCCESS)) 546 errp->re_status = stat = RPC_SUCCESS; 547 else 548 stat = _seterr_reply(&reply_msg, errp); 549 550 if (stat == RPC_SUCCESS) { 551 results = xdrmbuf_getall(&xdrs); 552 if (!AUTH_VALIDATE(auth, xid, 553 &reply_msg.acpted_rply.ar_verf, 554 &results)) { 555 errp->re_status = stat = RPC_AUTHERROR; 556 errp->re_why = AUTH_INVALIDRESP; 557 } else { 558 KASSERT(results, 559 ("auth validated but no result")); 560 *resultsp = results; 561 } 562 } /* end successful completion */ 563 /* 564 * If unsuccessful AND error is an authentication error 565 * then refresh credentials and try again, else break 566 */ 567 else if (stat == RPC_AUTHERROR) 568 /* maybe our credentials need to be refreshed ... */ 569 if (nrefreshes > 0 && 570 AUTH_REFRESH(auth, &reply_msg)) { 571 nrefreshes--; 572 XDR_DESTROY(&xdrs); 573 mtx_lock(&ct->ct_lock); 574 goto call_again; 575 } 576 /* end of unsuccessful completion */ 577 } /* end of valid reply message */ 578 else { 579 errp->re_status = stat = RPC_CANTDECODERES; 580 } 581 XDR_DESTROY(&xdrs); 582 mtx_lock(&ct->ct_lock); 583 out: 584 mtx_assert(&ct->ct_lock, MA_OWNED); 585 586 KASSERT(stat != RPC_SUCCESS || *resultsp, 587 ("RPC_SUCCESS without reply")); 588 589 if (mreq) 590 m_freem(mreq); 591 if (cr->cr_mrep) 592 m_freem(cr->cr_mrep); 593 594 ct->ct_threads--; 595 if (ct->ct_closing) 596 wakeup(ct); 597 598 mtx_unlock(&ct->ct_lock); 599 600 if (auth && stat != RPC_SUCCESS) 601 AUTH_VALIDATE(auth, xid, NULL, NULL); 602 603 free(cr, M_RPC); 604 605 return (stat); 606 } 607 608 static void 609 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp) 610 { 611 struct ct_data *ct = (struct ct_data *) cl->cl_private; 612 613 *errp = ct->ct_error; 614 } 615 616 static bool_t 617 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) 618 { 619 XDR xdrs; 620 bool_t dummy; 621 622 xdrs.x_op = XDR_FREE; 623 dummy = (*xdr_res)(&xdrs, res_ptr); 624 625 return (dummy); 626 } 627 628 /*ARGSUSED*/ 629 static void 630 clnt_vc_abort(CLIENT *cl) 631 { 632 } 633 634 static bool_t 635 clnt_vc_control(CLIENT *cl, u_int request, void *info) 636 { 637 struct ct_data *ct = (struct ct_data *)cl->cl_private; 638 void *infop = info; 639 SVCXPRT *xprt; 640 uint64_t *p; 641 int error; 642 static u_int thrdnum = 0; 643 644 mtx_lock(&ct->ct_lock); 645 646 switch (request) { 647 case CLSET_FD_CLOSE: 648 ct->ct_closeit = TRUE; 649 mtx_unlock(&ct->ct_lock); 650 return (TRUE); 651 case CLSET_FD_NCLOSE: 652 ct->ct_closeit = FALSE; 653 mtx_unlock(&ct->ct_lock); 654 return (TRUE); 655 default: 656 break; 657 } 658 659 /* for other requests which use info */ 660 if (info == NULL) { 661 mtx_unlock(&ct->ct_lock); 662 return (FALSE); 663 } 664 switch (request) { 665 case CLSET_TIMEOUT: 666 if (time_not_ok((struct timeval *)info)) { 667 mtx_unlock(&ct->ct_lock); 668 return (FALSE); 669 } 670 ct->ct_wait = *(struct timeval *)infop; 671 break; 672 case CLGET_TIMEOUT: 673 *(struct timeval *)infop = ct->ct_wait; 674 break; 675 case CLGET_SERVER_ADDR: 676 (void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len); 677 break; 678 case CLGET_SVC_ADDR: 679 /* 680 * Slightly different semantics to userland - we use 681 * sockaddr instead of netbuf. 682 */ 683 memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len); 684 break; 685 case CLSET_SVC_ADDR: /* set to new address */ 686 mtx_unlock(&ct->ct_lock); 687 return (FALSE); 688 case CLGET_XID: 689 *(uint32_t *)info = ct->ct_xid; 690 break; 691 case CLSET_XID: 692 /* This will set the xid of the NEXT call */ 693 /* decrement by 1 as clnt_vc_call() increments once */ 694 ct->ct_xid = *(uint32_t *)info - 1; 695 break; 696 case CLGET_VERS: 697 /* 698 * This RELIES on the information that, in the call body, 699 * the version number field is the fifth field from the 700 * beginning of the RPC header. MUST be changed if the 701 * call_struct is changed 702 */ 703 *(uint32_t *)info = 704 ntohl(*(uint32_t *)(void *)(ct->ct_mcallc + 705 4 * BYTES_PER_XDR_UNIT)); 706 break; 707 708 case CLSET_VERS: 709 *(uint32_t *)(void *)(ct->ct_mcallc + 710 4 * BYTES_PER_XDR_UNIT) = 711 htonl(*(uint32_t *)info); 712 break; 713 714 case CLGET_PROG: 715 /* 716 * This RELIES on the information that, in the call body, 717 * the program number field is the fourth field from the 718 * beginning of the RPC header. MUST be changed if the 719 * call_struct is changed 720 */ 721 *(uint32_t *)info = 722 ntohl(*(uint32_t *)(void *)(ct->ct_mcallc + 723 3 * BYTES_PER_XDR_UNIT)); 724 break; 725 726 case CLSET_PROG: 727 *(uint32_t *)(void *)(ct->ct_mcallc + 728 3 * BYTES_PER_XDR_UNIT) = 729 htonl(*(uint32_t *)info); 730 break; 731 732 case CLSET_WAITCHAN: 733 ct->ct_waitchan = (const char *)info; 734 break; 735 736 case CLGET_WAITCHAN: 737 *(const char **) info = ct->ct_waitchan; 738 break; 739 740 case CLSET_INTERRUPTIBLE: 741 if (*(int *) info) 742 ct->ct_waitflag = PCATCH; 743 else 744 ct->ct_waitflag = 0; 745 break; 746 747 case CLGET_INTERRUPTIBLE: 748 if (ct->ct_waitflag) 749 *(int *) info = TRUE; 750 else 751 *(int *) info = FALSE; 752 break; 753 754 case CLSET_BACKCHANNEL: 755 xprt = (SVCXPRT *)info; 756 if (ct->ct_backchannelxprt == NULL) { 757 xprt->xp_p2 = ct; 758 if (ct->ct_sslrefno != 0) 759 xprt->xp_tls = RPCTLS_FLAGS_HANDSHAKE; 760 ct->ct_backchannelxprt = xprt; 761 } 762 break; 763 764 case CLSET_TLS: 765 p = (uint64_t *)info; 766 ct->ct_sslsec = *p++; 767 ct->ct_sslusec = *p++; 768 ct->ct_sslrefno = *p; 769 if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) { 770 mtx_unlock(&ct->ct_lock); 771 /* Start the kthread that handles upcalls. */ 772 error = kthread_add(clnt_vc_dotlsupcall, ct, 773 NULL, NULL, 0, 0, "krpctls%u", thrdnum++); 774 if (error != 0) 775 panic("Can't add KRPC thread error %d", error); 776 } else 777 mtx_unlock(&ct->ct_lock); 778 return (TRUE); 779 780 case CLSET_BLOCKRCV: 781 if (*(int *) info) { 782 ct->ct_rcvstate &= ~RPCRCVSTATE_NORMAL; 783 ct->ct_rcvstate |= RPCRCVSTATE_TLSHANDSHAKE; 784 } else { 785 ct->ct_rcvstate &= ~RPCRCVSTATE_TLSHANDSHAKE; 786 ct->ct_rcvstate |= RPCRCVSTATE_NORMAL; 787 } 788 break; 789 790 default: 791 mtx_unlock(&ct->ct_lock); 792 return (FALSE); 793 } 794 795 mtx_unlock(&ct->ct_lock); 796 return (TRUE); 797 } 798 799 static void 800 clnt_vc_close(CLIENT *cl) 801 { 802 struct ct_data *ct = (struct ct_data *) cl->cl_private; 803 struct ct_request *cr; 804 805 mtx_lock(&ct->ct_lock); 806 807 if (ct->ct_closed) { 808 mtx_unlock(&ct->ct_lock); 809 return; 810 } 811 812 if (ct->ct_closing) { 813 while (ct->ct_closing) 814 msleep(ct, &ct->ct_lock, 0, "rpcclose", 0); 815 KASSERT(ct->ct_closed, ("client should be closed")); 816 mtx_unlock(&ct->ct_lock); 817 return; 818 } 819 820 if (ct->ct_socket) { 821 ct->ct_closing = TRUE; 822 mtx_unlock(&ct->ct_lock); 823 824 SOCKBUF_LOCK(&ct->ct_socket->so_rcv); 825 if (ct->ct_socket->so_rcv.sb_upcall != NULL) { 826 soupcall_clear(ct->ct_socket, SO_RCV); 827 clnt_vc_upcallsdone(ct); 828 } 829 SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); 830 831 /* 832 * Abort any pending requests and wait until everyone 833 * has finished with clnt_vc_call. 834 */ 835 mtx_lock(&ct->ct_lock); 836 TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { 837 cr->cr_xid = 0; 838 cr->cr_error = ESHUTDOWN; 839 wakeup(cr); 840 } 841 842 while (ct->ct_threads) 843 msleep(ct, &ct->ct_lock, 0, "rpcclose", 0); 844 } 845 846 ct->ct_closing = FALSE; 847 ct->ct_closed = TRUE; 848 wakeup(&ct->ct_sslrefno); 849 mtx_unlock(&ct->ct_lock); 850 wakeup(ct); 851 } 852 853 static void 854 clnt_vc_destroy(CLIENT *cl) 855 { 856 struct ct_data *ct = (struct ct_data *) cl->cl_private; 857 struct socket *so = NULL; 858 SVCXPRT *xprt; 859 uint32_t reterr; 860 861 clnt_vc_close(cl); 862 863 mtx_lock(&ct->ct_lock); 864 xprt = ct->ct_backchannelxprt; 865 ct->ct_backchannelxprt = NULL; 866 if (xprt != NULL) { 867 mtx_unlock(&ct->ct_lock); /* To avoid a LOR. */ 868 sx_xlock(&xprt->xp_lock); 869 mtx_lock(&ct->ct_lock); 870 xprt->xp_p2 = NULL; 871 sx_xunlock(&xprt->xp_lock); 872 } 873 874 if (ct->ct_socket) { 875 if (ct->ct_closeit) { 876 so = ct->ct_socket; 877 } 878 } 879 880 /* Wait for the upcall kthread to terminate. */ 881 while ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLTHREAD) != 0) 882 msleep(&ct->ct_sslrefno, &ct->ct_lock, 0, 883 "clntvccl", hz); 884 mtx_unlock(&ct->ct_lock); 885 886 mtx_destroy(&ct->ct_lock); 887 if (so) { 888 if (ct->ct_sslrefno != 0) { 889 /* 890 * If the TLS handshake is in progress, the upcall 891 * will fail, but the socket should be closed by the 892 * daemon, since the connect upcall has just failed. 893 */ 894 if (ct->ct_sslrefno != RPCTLS_REFNO_HANDSHAKE) { 895 /* 896 * If the upcall fails, the socket has 897 * probably been closed via the rpctlscd 898 * daemon having crashed or been 899 * restarted, so ignore return stat. 900 */ 901 rpctls_cl_disconnect(ct->ct_sslsec, 902 ct->ct_sslusec, ct->ct_sslrefno, 903 &reterr); 904 } 905 /* Must sorele() to get rid of reference. */ 906 CURVNET_SET(so->so_vnet); 907 sorele(so); 908 CURVNET_RESTORE(); 909 } else { 910 soshutdown(so, SHUT_WR); 911 soclose(so); 912 } 913 } 914 m_freem(ct->ct_record); 915 m_freem(ct->ct_raw); 916 mem_free(ct, sizeof(struct ct_data)); 917 if (cl->cl_netid && cl->cl_netid[0]) 918 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); 919 if (cl->cl_tp && cl->cl_tp[0]) 920 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); 921 mem_free(cl, sizeof(CLIENT)); 922 } 923 924 /* 925 * Make sure that the time is not garbage. -1 value is disallowed. 926 * Note this is different from time_not_ok in clnt_dg.c 927 */ 928 static bool_t 929 time_not_ok(struct timeval *t) 930 { 931 return (t->tv_sec <= -1 || t->tv_sec > 100000000 || 932 t->tv_usec <= -1 || t->tv_usec > 1000000); 933 } 934 935 int 936 clnt_vc_soupcall(struct socket *so, void *arg, int waitflag) 937 { 938 struct ct_data *ct = (struct ct_data *) arg; 939 struct uio uio; 940 struct mbuf *m, *m2; 941 struct ct_request *cr; 942 int error, rcvflag, foundreq; 943 uint32_t xid_plus_direction[2], header; 944 SVCXPRT *xprt; 945 struct cf_conn *cd; 946 u_int rawlen; 947 struct cmsghdr *cmsg; 948 struct tls_get_record tgr; 949 950 /* 951 * RPC-over-TLS needs to block reception during 952 * upcalls since the upcall will be doing I/O on 953 * the socket via openssl library calls. 954 */ 955 mtx_lock(&ct->ct_lock); 956 if ((ct->ct_rcvstate & (RPCRCVSTATE_NORMAL | 957 RPCRCVSTATE_NONAPPDATA)) == 0) { 958 /* Mark that a socket upcall needs to be done. */ 959 if ((ct->ct_rcvstate & (RPCRCVSTATE_UPCALLNEEDED | 960 RPCRCVSTATE_UPCALLINPROG)) != 0) 961 ct->ct_rcvstate |= RPCRCVSTATE_SOUPCALLNEEDED; 962 mtx_unlock(&ct->ct_lock); 963 return (SU_OK); 964 } 965 mtx_unlock(&ct->ct_lock); 966 967 /* 968 * If another thread is already here, it must be in 969 * soreceive(), so just return to avoid races with it. 970 * ct_upcallrefs is protected by the SOCKBUF_LOCK(), 971 * which is held in this function, except when 972 * soreceive() is called. 973 */ 974 if (ct->ct_upcallrefs > 0) 975 return (SU_OK); 976 ct->ct_upcallrefs++; 977 978 /* 979 * Read as much as possible off the socket and link it 980 * onto ct_raw. 981 */ 982 for (;;) { 983 uio.uio_resid = 1000000000; 984 uio.uio_td = curthread; 985 m2 = m = NULL; 986 rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK; 987 if (ct->ct_sslrefno != 0 && (ct->ct_rcvstate & 988 RPCRCVSTATE_NORMAL) != 0) 989 rcvflag |= MSG_TLSAPPDATA; 990 SOCKBUF_UNLOCK(&so->so_rcv); 991 error = soreceive(so, NULL, &uio, &m, &m2, &rcvflag); 992 SOCKBUF_LOCK(&so->so_rcv); 993 994 if (error == EWOULDBLOCK) { 995 /* 996 * We must re-test for readability after 997 * taking the lock to protect us in the case 998 * where a new packet arrives on the socket 999 * after our call to soreceive fails with 1000 * EWOULDBLOCK. 1001 */ 1002 error = 0; 1003 if (!soreadable(so)) 1004 break; 1005 continue; 1006 } 1007 if (error == 0 && m == NULL) { 1008 /* 1009 * We must have got EOF trying 1010 * to read from the stream. 1011 */ 1012 error = ECONNRESET; 1013 } 1014 1015 /* 1016 * A return of ENXIO indicates that there is an 1017 * alert record at the head of the 1018 * socket's receive queue, for TLS connections. 1019 * This record needs to be handled in userland 1020 * via an SSL_read() call, so do an upcall to the daemon. 1021 */ 1022 if (ct->ct_sslrefno != 0 && error == ENXIO) { 1023 /* Disable reception, marking an upcall needed. */ 1024 mtx_lock(&ct->ct_lock); 1025 ct->ct_rcvstate |= RPCRCVSTATE_UPCALLNEEDED; 1026 /* 1027 * If an upcall in needed, wake up the kthread 1028 * that runs clnt_vc_dotlsupcall(). 1029 */ 1030 wakeup(&ct->ct_sslrefno); 1031 mtx_unlock(&ct->ct_lock); 1032 break; 1033 } 1034 if (error != 0) 1035 break; 1036 1037 /* Process any record header(s). */ 1038 if (m2 != NULL) { 1039 cmsg = mtod(m2, struct cmsghdr *); 1040 if (cmsg->cmsg_type == TLS_GET_RECORD && 1041 cmsg->cmsg_len == CMSG_LEN(sizeof(tgr))) { 1042 memcpy(&tgr, CMSG_DATA(cmsg), sizeof(tgr)); 1043 /* 1044 * TLS_RLTYPE_ALERT records should be handled 1045 * since soreceive() would have returned 1046 * ENXIO. Just throw any other 1047 * non-TLS_RLTYPE_APP records away. 1048 */ 1049 if (tgr.tls_type != TLS_RLTYPE_APP) { 1050 m_freem(m); 1051 m_free(m2); 1052 mtx_lock(&ct->ct_lock); 1053 ct->ct_rcvstate &= 1054 ~RPCRCVSTATE_NONAPPDATA; 1055 ct->ct_rcvstate |= RPCRCVSTATE_NORMAL; 1056 mtx_unlock(&ct->ct_lock); 1057 continue; 1058 } 1059 } 1060 m_free(m2); 1061 } 1062 1063 if (ct->ct_raw != NULL) 1064 m_last(ct->ct_raw)->m_next = m; 1065 else 1066 ct->ct_raw = m; 1067 } 1068 rawlen = m_length(ct->ct_raw, NULL); 1069 1070 /* Now, process as much of ct_raw as possible. */ 1071 for (;;) { 1072 /* 1073 * If ct_record_resid is zero, we are waiting for a 1074 * record mark. 1075 */ 1076 if (ct->ct_record_resid == 0) { 1077 if (rawlen < sizeof(uint32_t)) 1078 break; 1079 m_copydata(ct->ct_raw, 0, sizeof(uint32_t), 1080 (char *)&header); 1081 header = ntohl(header); 1082 ct->ct_record_resid = header & 0x7fffffff; 1083 ct->ct_record_eor = ((header & 0x80000000) != 0); 1084 m_adj(ct->ct_raw, sizeof(uint32_t)); 1085 rawlen -= sizeof(uint32_t); 1086 } else { 1087 /* 1088 * Move as much of the record as possible to 1089 * ct_record. 1090 */ 1091 if (rawlen == 0) 1092 break; 1093 if (rawlen <= ct->ct_record_resid) { 1094 if (ct->ct_record != NULL) 1095 m_last(ct->ct_record)->m_next = 1096 ct->ct_raw; 1097 else 1098 ct->ct_record = ct->ct_raw; 1099 ct->ct_raw = NULL; 1100 ct->ct_record_resid -= rawlen; 1101 rawlen = 0; 1102 } else { 1103 m = m_split(ct->ct_raw, ct->ct_record_resid, 1104 M_NOWAIT); 1105 if (m == NULL) 1106 break; 1107 if (ct->ct_record != NULL) 1108 m_last(ct->ct_record)->m_next = 1109 ct->ct_raw; 1110 else 1111 ct->ct_record = ct->ct_raw; 1112 rawlen -= ct->ct_record_resid; 1113 ct->ct_record_resid = 0; 1114 ct->ct_raw = m; 1115 } 1116 if (ct->ct_record_resid > 0) 1117 break; 1118 1119 /* 1120 * If we have the entire record, see if we can 1121 * match it to a request. 1122 */ 1123 if (ct->ct_record_eor) { 1124 /* 1125 * The XID is in the first uint32_t of 1126 * the reply and the message direction 1127 * is the second one. 1128 */ 1129 if (ct->ct_record->m_len < 1130 sizeof(xid_plus_direction) && 1131 m_length(ct->ct_record, NULL) < 1132 sizeof(xid_plus_direction)) { 1133 /* 1134 * What to do now? 1135 * The data in the TCP stream is 1136 * corrupted such that there is no 1137 * valid RPC message to parse. 1138 * I think it best to close this 1139 * connection and allow 1140 * clnt_reconnect_call() to try 1141 * and establish a new one. 1142 */ 1143 printf("clnt_vc_soupcall: " 1144 "connection data corrupted\n"); 1145 error = ECONNRESET; 1146 goto wakeup_all; 1147 } 1148 m_copydata(ct->ct_record, 0, 1149 sizeof(xid_plus_direction), 1150 (char *)xid_plus_direction); 1151 xid_plus_direction[0] = 1152 ntohl(xid_plus_direction[0]); 1153 xid_plus_direction[1] = 1154 ntohl(xid_plus_direction[1]); 1155 /* Check message direction. */ 1156 if (xid_plus_direction[1] == CALL) { 1157 /* This is a backchannel request. */ 1158 mtx_lock(&ct->ct_lock); 1159 xprt = ct->ct_backchannelxprt; 1160 if (xprt == NULL) { 1161 mtx_unlock(&ct->ct_lock); 1162 /* Just throw it away. */ 1163 m_freem(ct->ct_record); 1164 ct->ct_record = NULL; 1165 } else { 1166 cd = (struct cf_conn *) 1167 xprt->xp_p1; 1168 m2 = cd->mreq; 1169 /* 1170 * The requests are chained 1171 * in the m_nextpkt list. 1172 */ 1173 while (m2 != NULL && 1174 m2->m_nextpkt != NULL) 1175 /* Find end of list. */ 1176 m2 = m2->m_nextpkt; 1177 if (m2 != NULL) 1178 m2->m_nextpkt = 1179 ct->ct_record; 1180 else 1181 cd->mreq = 1182 ct->ct_record; 1183 ct->ct_record->m_nextpkt = 1184 NULL; 1185 ct->ct_record = NULL; 1186 xprt_active(xprt); 1187 mtx_unlock(&ct->ct_lock); 1188 } 1189 } else { 1190 mtx_lock(&ct->ct_lock); 1191 foundreq = 0; 1192 TAILQ_FOREACH(cr, &ct->ct_pending, 1193 cr_link) { 1194 if (cr->cr_xid == 1195 xid_plus_direction[0]) { 1196 /* 1197 * This one 1198 * matches. We leave 1199 * the reply mbuf in 1200 * cr->cr_mrep. Set 1201 * the XID to zero so 1202 * that we will ignore 1203 * any duplicated 1204 * replies. 1205 */ 1206 cr->cr_xid = 0; 1207 cr->cr_mrep = 1208 ct->ct_record; 1209 cr->cr_error = 0; 1210 foundreq = 1; 1211 wakeup(cr); 1212 break; 1213 } 1214 } 1215 mtx_unlock(&ct->ct_lock); 1216 1217 if (!foundreq) 1218 m_freem(ct->ct_record); 1219 ct->ct_record = NULL; 1220 } 1221 } 1222 } 1223 } 1224 1225 if (error != 0) { 1226 wakeup_all: 1227 /* 1228 * This socket is broken, so mark that it cannot 1229 * receive and fail all RPCs waiting for a reply 1230 * on it, so that they will be retried on a new 1231 * TCP connection created by clnt_reconnect_X(). 1232 */ 1233 mtx_lock(&ct->ct_lock); 1234 ct->ct_error.re_status = RPC_CANTRECV; 1235 ct->ct_error.re_errno = error; 1236 TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { 1237 cr->cr_error = error; 1238 wakeup(cr); 1239 } 1240 mtx_unlock(&ct->ct_lock); 1241 } 1242 1243 ct->ct_upcallrefs--; 1244 if (ct->ct_upcallrefs < 0) 1245 panic("rpcvc upcall refcnt"); 1246 if (ct->ct_upcallrefs == 0) 1247 wakeup(&ct->ct_upcallrefs); 1248 return (SU_OK); 1249 } 1250 1251 /* 1252 * Wait for all upcalls in progress to complete. 1253 */ 1254 static void 1255 clnt_vc_upcallsdone(struct ct_data *ct) 1256 { 1257 1258 SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv); 1259 1260 while (ct->ct_upcallrefs > 0) 1261 (void) msleep(&ct->ct_upcallrefs, 1262 SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0); 1263 } 1264 1265 /* 1266 * Do a TLS upcall to the rpctlscd daemon, as required. 1267 * This function runs as a kthread. 1268 */ 1269 static void 1270 clnt_vc_dotlsupcall(void *data) 1271 { 1272 struct ct_data *ct = (struct ct_data *)data; 1273 enum clnt_stat ret; 1274 uint32_t reterr; 1275 1276 mtx_lock(&ct->ct_lock); 1277 ct->ct_rcvstate |= RPCRCVSTATE_UPCALLTHREAD; 1278 while (!ct->ct_closed) { 1279 if ((ct->ct_rcvstate & RPCRCVSTATE_UPCALLNEEDED) != 0) { 1280 ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLNEEDED; 1281 ct->ct_rcvstate |= RPCRCVSTATE_UPCALLINPROG; 1282 if (ct->ct_sslrefno != 0 && ct->ct_sslrefno != 1283 RPCTLS_REFNO_HANDSHAKE) { 1284 mtx_unlock(&ct->ct_lock); 1285 ret = rpctls_cl_handlerecord(ct->ct_sslsec, 1286 ct->ct_sslusec, ct->ct_sslrefno, &reterr); 1287 mtx_lock(&ct->ct_lock); 1288 } 1289 ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLINPROG; 1290 if (ret == RPC_SUCCESS && reterr == RPCTLSERR_OK) 1291 ct->ct_rcvstate |= RPCRCVSTATE_NORMAL; 1292 else 1293 ct->ct_rcvstate |= RPCRCVSTATE_NONAPPDATA; 1294 wakeup(&ct->ct_rcvstate); 1295 } 1296 if ((ct->ct_rcvstate & RPCRCVSTATE_SOUPCALLNEEDED) != 0) { 1297 ct->ct_rcvstate &= ~RPCRCVSTATE_SOUPCALLNEEDED; 1298 mtx_unlock(&ct->ct_lock); 1299 SOCKBUF_LOCK(&ct->ct_socket->so_rcv); 1300 clnt_vc_soupcall(ct->ct_socket, ct, M_NOWAIT); 1301 SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); 1302 mtx_lock(&ct->ct_lock); 1303 } 1304 msleep(&ct->ct_sslrefno, &ct->ct_lock, 0, "clntvcdu", hz); 1305 } 1306 ct->ct_rcvstate &= ~RPCRCVSTATE_UPCALLTHREAD; 1307 wakeup(&ct->ct_sslrefno); 1308 mtx_unlock(&ct->ct_lock); 1309 kthread_exit(); 1310 } 1311