1 /* $NetBSD: clnt_vc.c,v 1.4 2000/07/14 08:40:42 fvdl Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * Copyright (c) 2009, Sun Microsystems, Inc. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions are met: 11 * - Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * - Neither the name of Sun Microsystems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #if defined(LIBC_SCCS) && !defined(lint) 34 static char *sccsid2 = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro"; 35 static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC"; 36 static char sccsid3[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro"; 37 #endif 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 /* 42 * clnt_tcp.c, Implements a TCP/IP based, client side RPC. 43 * 44 * Copyright (C) 1984, Sun Microsystems, Inc. 45 * 46 * TCP based RPC supports 'batched calls'. 47 * A sequence of calls may be batched-up in a send buffer. The rpc call 48 * return immediately to the client even though the call was not necessarily 49 * sent. The batching occurs if the results' xdr routine is NULL (0) AND 50 * the rpc timeout value is zero (see clnt.h, rpc). 51 * 52 * Clients should NOT casually batch calls that in fact return results; that is, 53 * the server side should be aware that a call is batched and not produce any 54 * return message. Batched calls that produce many result messages can 55 * deadlock (netlock) the client and the server.... 56 * 57 * Now go hang yourself. 58 */ 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/kernel.h> 63 #include <sys/lock.h> 64 #include <sys/malloc.h> 65 #include <sys/mbuf.h> 66 #include <sys/mutex.h> 67 #include <sys/pcpu.h> 68 #include <sys/proc.h> 69 #include <sys/protosw.h> 70 #include <sys/socket.h> 71 #include <sys/socketvar.h> 72 #include <sys/sx.h> 73 #include <sys/syslog.h> 74 #include <sys/time.h> 75 #include <sys/uio.h> 76 77 #include <net/vnet.h> 78 79 #include <netinet/tcp.h> 80 81 #include <rpc/rpc.h> 82 #include <rpc/rpc_com.h> 83 #include <rpc/krpc.h> 84 85 struct cmessage { 86 struct cmsghdr cmsg; 87 struct cmsgcred cmcred; 88 }; 89 90 static enum clnt_stat clnt_vc_call(CLIENT *, struct rpc_callextra *, 91 rpcproc_t, struct mbuf *, struct mbuf **, struct timeval); 92 static void clnt_vc_geterr(CLIENT *, struct rpc_err *); 93 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, void *); 94 static void clnt_vc_abort(CLIENT *); 95 static bool_t clnt_vc_control(CLIENT *, u_int, void *); 96 static void clnt_vc_close(CLIENT *); 97 static void clnt_vc_destroy(CLIENT *); 98 static bool_t time_not_ok(struct timeval *); 99 static int clnt_vc_soupcall(struct socket *so, void *arg, int waitflag); 100 101 static struct clnt_ops clnt_vc_ops = { 102 .cl_call = clnt_vc_call, 103 .cl_abort = clnt_vc_abort, 104 .cl_geterr = clnt_vc_geterr, 105 .cl_freeres = clnt_vc_freeres, 106 .cl_close = clnt_vc_close, 107 .cl_destroy = clnt_vc_destroy, 108 .cl_control = clnt_vc_control 109 }; 110 111 static void clnt_vc_upcallsdone(struct ct_data *); 112 113 static int fake_wchan; 114 115 /* 116 * Create a client handle for a connection. 117 * Default options are set, which the user can change using clnt_control()'s. 118 * The rpc/vc package does buffering similar to stdio, so the client 119 * must pick send and receive buffer sizes, 0 => use the default. 120 * NB: fd is copied into a private area. 121 * NB: The rpch->cl_auth is set null authentication. Caller may wish to 122 * set this something more useful. 123 * 124 * fd should be an open socket 125 */ 126 CLIENT * 127 clnt_vc_create( 128 struct socket *so, /* open file descriptor */ 129 struct sockaddr *raddr, /* servers address */ 130 const rpcprog_t prog, /* program number */ 131 const rpcvers_t vers, /* version number */ 132 size_t sendsz, /* buffer recv size */ 133 size_t recvsz, /* buffer send size */ 134 int intrflag) /* interruptible */ 135 { 136 CLIENT *cl; /* client handle */ 137 struct ct_data *ct = NULL; /* client handle */ 138 struct timeval now; 139 struct rpc_msg call_msg; 140 static uint32_t disrupt; 141 struct __rpc_sockinfo si; 142 XDR xdrs; 143 int error, interrupted, one = 1, sleep_flag; 144 struct sockopt sopt; 145 146 if (disrupt == 0) 147 disrupt = (uint32_t)(long)raddr; 148 149 cl = (CLIENT *)mem_alloc(sizeof (*cl)); 150 ct = (struct ct_data *)mem_alloc(sizeof (*ct)); 151 152 mtx_init(&ct->ct_lock, "ct->ct_lock", NULL, MTX_DEF); 153 ct->ct_threads = 0; 154 ct->ct_closing = FALSE; 155 ct->ct_closed = FALSE; 156 ct->ct_upcallrefs = 0; 157 158 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { 159 error = soconnect(so, raddr, curthread); 160 SOCK_LOCK(so); 161 interrupted = 0; 162 sleep_flag = PSOCK; 163 if (intrflag != 0) 164 sleep_flag |= PCATCH; 165 while ((so->so_state & SS_ISCONNECTING) 166 && so->so_error == 0) { 167 error = msleep(&so->so_timeo, SOCK_MTX(so), 168 sleep_flag, "connec", 0); 169 if (error) { 170 if (error == EINTR || error == ERESTART) 171 interrupted = 1; 172 break; 173 } 174 } 175 if (error == 0) { 176 error = so->so_error; 177 so->so_error = 0; 178 } 179 SOCK_UNLOCK(so); 180 if (error) { 181 if (!interrupted) 182 so->so_state &= ~SS_ISCONNECTING; 183 rpc_createerr.cf_stat = RPC_SYSTEMERROR; 184 rpc_createerr.cf_error.re_errno = error; 185 goto err; 186 } 187 } 188 189 if (!__rpc_socket2sockinfo(so, &si)) { 190 goto err; 191 } 192 193 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 194 bzero(&sopt, sizeof(sopt)); 195 sopt.sopt_dir = SOPT_SET; 196 sopt.sopt_level = SOL_SOCKET; 197 sopt.sopt_name = SO_KEEPALIVE; 198 sopt.sopt_val = &one; 199 sopt.sopt_valsize = sizeof(one); 200 sosetopt(so, &sopt); 201 } 202 203 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 204 bzero(&sopt, sizeof(sopt)); 205 sopt.sopt_dir = SOPT_SET; 206 sopt.sopt_level = IPPROTO_TCP; 207 sopt.sopt_name = TCP_NODELAY; 208 sopt.sopt_val = &one; 209 sopt.sopt_valsize = sizeof(one); 210 sosetopt(so, &sopt); 211 } 212 213 ct->ct_closeit = FALSE; 214 215 /* 216 * Set up private data struct 217 */ 218 ct->ct_socket = so; 219 ct->ct_wait.tv_sec = -1; 220 ct->ct_wait.tv_usec = -1; 221 memcpy(&ct->ct_addr, raddr, raddr->sa_len); 222 223 /* 224 * Initialize call message 225 */ 226 getmicrotime(&now); 227 ct->ct_xid = ((uint32_t)++disrupt) ^ __RPC_GETXID(&now); 228 call_msg.rm_xid = ct->ct_xid; 229 call_msg.rm_direction = CALL; 230 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 231 call_msg.rm_call.cb_prog = (uint32_t)prog; 232 call_msg.rm_call.cb_vers = (uint32_t)vers; 233 234 /* 235 * pre-serialize the static part of the call msg and stash it away 236 */ 237 xdrmem_create(&xdrs, ct->ct_mcallc, MCALL_MSG_SIZE, 238 XDR_ENCODE); 239 if (! xdr_callhdr(&xdrs, &call_msg)) { 240 if (ct->ct_closeit) { 241 soclose(ct->ct_socket); 242 } 243 goto err; 244 } 245 ct->ct_mpos = XDR_GETPOS(&xdrs); 246 XDR_DESTROY(&xdrs); 247 ct->ct_waitchan = "rpcrecv"; 248 ct->ct_waitflag = 0; 249 250 /* 251 * Create a client handle which uses xdrrec for serialization 252 * and authnone for authentication. 253 */ 254 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); 255 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); 256 error = soreserve(ct->ct_socket, sendsz, recvsz); 257 if (error != 0) { 258 if (ct->ct_closeit) { 259 soclose(ct->ct_socket); 260 } 261 goto err; 262 } 263 cl->cl_refs = 1; 264 cl->cl_ops = &clnt_vc_ops; 265 cl->cl_private = ct; 266 cl->cl_auth = authnone_create(); 267 268 SOCKBUF_LOCK(&ct->ct_socket->so_rcv); 269 soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct); 270 SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); 271 272 ct->ct_raw = NULL; 273 ct->ct_record = NULL; 274 ct->ct_record_resid = 0; 275 TAILQ_INIT(&ct->ct_pending); 276 return (cl); 277 278 err: 279 mtx_destroy(&ct->ct_lock); 280 mem_free(ct, sizeof (struct ct_data)); 281 mem_free(cl, sizeof (CLIENT)); 282 283 return ((CLIENT *)NULL); 284 } 285 286 static enum clnt_stat 287 clnt_vc_call( 288 CLIENT *cl, /* client handle */ 289 struct rpc_callextra *ext, /* call metadata */ 290 rpcproc_t proc, /* procedure number */ 291 struct mbuf *args, /* pointer to args */ 292 struct mbuf **resultsp, /* pointer to results */ 293 struct timeval utimeout) 294 { 295 struct ct_data *ct = (struct ct_data *) cl->cl_private; 296 AUTH *auth; 297 struct rpc_err *errp; 298 enum clnt_stat stat; 299 XDR xdrs; 300 struct rpc_msg reply_msg; 301 bool_t ok; 302 int nrefreshes = 2; /* number of times to refresh cred */ 303 struct timeval timeout; 304 uint32_t xid; 305 struct mbuf *mreq = NULL, *results; 306 struct ct_request *cr; 307 int error, trycnt; 308 309 cr = malloc(sizeof(struct ct_request), M_RPC, M_WAITOK); 310 311 mtx_lock(&ct->ct_lock); 312 313 if (ct->ct_closing || ct->ct_closed) { 314 mtx_unlock(&ct->ct_lock); 315 free(cr, M_RPC); 316 return (RPC_CANTSEND); 317 } 318 ct->ct_threads++; 319 320 if (ext) { 321 auth = ext->rc_auth; 322 errp = &ext->rc_err; 323 } else { 324 auth = cl->cl_auth; 325 errp = &ct->ct_error; 326 } 327 328 cr->cr_mrep = NULL; 329 cr->cr_error = 0; 330 331 if (ct->ct_wait.tv_usec == -1) { 332 timeout = utimeout; /* use supplied timeout */ 333 } else { 334 timeout = ct->ct_wait; /* use default timeout */ 335 } 336 337 /* 338 * After 15sec of looping, allow it to return RPC_CANTSEND, which will 339 * cause the clnt_reconnect layer to create a new TCP connection. 340 */ 341 trycnt = 15 * hz; 342 call_again: 343 mtx_assert(&ct->ct_lock, MA_OWNED); 344 if (ct->ct_closing || ct->ct_closed) { 345 ct->ct_threads--; 346 wakeup(ct); 347 mtx_unlock(&ct->ct_lock); 348 free(cr, M_RPC); 349 return (RPC_CANTSEND); 350 } 351 352 ct->ct_xid++; 353 xid = ct->ct_xid; 354 355 mtx_unlock(&ct->ct_lock); 356 357 /* 358 * Leave space to pre-pend the record mark. 359 */ 360 mreq = m_gethdr(M_WAITOK, MT_DATA); 361 mreq->m_data += sizeof(uint32_t); 362 KASSERT(ct->ct_mpos + sizeof(uint32_t) <= MHLEN, 363 ("RPC header too big")); 364 bcopy(ct->ct_mcallc, mreq->m_data, ct->ct_mpos); 365 mreq->m_len = ct->ct_mpos; 366 367 /* 368 * The XID is the first thing in the request. 369 */ 370 *mtod(mreq, uint32_t *) = htonl(xid); 371 372 xdrmbuf_create(&xdrs, mreq, XDR_ENCODE); 373 374 errp->re_status = stat = RPC_SUCCESS; 375 376 if ((! XDR_PUTINT32(&xdrs, &proc)) || 377 (! AUTH_MARSHALL(auth, xid, &xdrs, 378 m_copym(args, 0, M_COPYALL, M_WAITOK)))) { 379 errp->re_status = stat = RPC_CANTENCODEARGS; 380 mtx_lock(&ct->ct_lock); 381 goto out; 382 } 383 mreq->m_pkthdr.len = m_length(mreq, NULL); 384 385 /* 386 * Prepend a record marker containing the packet length. 387 */ 388 M_PREPEND(mreq, sizeof(uint32_t), M_WAITOK); 389 *mtod(mreq, uint32_t *) = 390 htonl(0x80000000 | (mreq->m_pkthdr.len - sizeof(uint32_t))); 391 392 cr->cr_xid = xid; 393 mtx_lock(&ct->ct_lock); 394 /* 395 * Check to see if the other end has already started to close down 396 * the connection. The upcall will have set ct_error.re_status 397 * to RPC_CANTRECV if this is the case. 398 * If the other end starts to close down the connection after this 399 * point, it will be detected later when cr_error is checked, 400 * since the request is in the ct_pending queue. 401 */ 402 if (ct->ct_error.re_status == RPC_CANTRECV) { 403 if (errp != &ct->ct_error) { 404 errp->re_errno = ct->ct_error.re_errno; 405 errp->re_status = RPC_CANTRECV; 406 } 407 stat = RPC_CANTRECV; 408 goto out; 409 } 410 TAILQ_INSERT_TAIL(&ct->ct_pending, cr, cr_link); 411 mtx_unlock(&ct->ct_lock); 412 413 /* 414 * sosend consumes mreq. 415 */ 416 error = sosend(ct->ct_socket, NULL, NULL, mreq, NULL, 0, curthread); 417 mreq = NULL; 418 if (error == EMSGSIZE || (error == ERESTART && 419 (ct->ct_waitflag & PCATCH) == 0 && trycnt-- > 0)) { 420 SOCKBUF_LOCK(&ct->ct_socket->so_snd); 421 sbwait(&ct->ct_socket->so_snd); 422 SOCKBUF_UNLOCK(&ct->ct_socket->so_snd); 423 AUTH_VALIDATE(auth, xid, NULL, NULL); 424 mtx_lock(&ct->ct_lock); 425 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 426 /* Sleep for 1 clock tick before trying the sosend() again. */ 427 msleep(&fake_wchan, &ct->ct_lock, 0, "rpclpsnd", 1); 428 goto call_again; 429 } 430 431 reply_msg.acpted_rply.ar_verf.oa_flavor = AUTH_NULL; 432 reply_msg.acpted_rply.ar_verf.oa_base = cr->cr_verf; 433 reply_msg.acpted_rply.ar_verf.oa_length = 0; 434 reply_msg.acpted_rply.ar_results.where = NULL; 435 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; 436 437 mtx_lock(&ct->ct_lock); 438 if (error) { 439 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 440 errp->re_errno = error; 441 errp->re_status = stat = RPC_CANTSEND; 442 goto out; 443 } 444 445 /* 446 * Check to see if we got an upcall while waiting for the 447 * lock. In both these cases, the request has been removed 448 * from ct->ct_pending. 449 */ 450 if (cr->cr_error) { 451 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 452 errp->re_errno = cr->cr_error; 453 errp->re_status = stat = RPC_CANTRECV; 454 goto out; 455 } 456 if (cr->cr_mrep) { 457 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 458 goto got_reply; 459 } 460 461 /* 462 * Hack to provide rpc-based message passing 463 */ 464 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { 465 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 466 errp->re_status = stat = RPC_TIMEDOUT; 467 goto out; 468 } 469 470 error = msleep(cr, &ct->ct_lock, ct->ct_waitflag, ct->ct_waitchan, 471 tvtohz(&timeout)); 472 473 TAILQ_REMOVE(&ct->ct_pending, cr, cr_link); 474 475 if (error) { 476 /* 477 * The sleep returned an error so our request is still 478 * on the list. Turn the error code into an 479 * appropriate client status. 480 */ 481 errp->re_errno = error; 482 switch (error) { 483 case EINTR: 484 stat = RPC_INTR; 485 break; 486 case EWOULDBLOCK: 487 stat = RPC_TIMEDOUT; 488 break; 489 default: 490 stat = RPC_CANTRECV; 491 } 492 errp->re_status = stat; 493 goto out; 494 } else { 495 /* 496 * We were woken up by the upcall. If the 497 * upcall had a receive error, report that, 498 * otherwise we have a reply. 499 */ 500 if (cr->cr_error) { 501 errp->re_errno = cr->cr_error; 502 errp->re_status = stat = RPC_CANTRECV; 503 goto out; 504 } 505 } 506 507 got_reply: 508 /* 509 * Now decode and validate the response. We need to drop the 510 * lock since xdr_replymsg may end up sleeping in malloc. 511 */ 512 mtx_unlock(&ct->ct_lock); 513 514 if (ext && ext->rc_feedback) 515 ext->rc_feedback(FEEDBACK_OK, proc, ext->rc_feedback_arg); 516 517 xdrmbuf_create(&xdrs, cr->cr_mrep, XDR_DECODE); 518 ok = xdr_replymsg(&xdrs, &reply_msg); 519 cr->cr_mrep = NULL; 520 521 if (ok) { 522 if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) && 523 (reply_msg.acpted_rply.ar_stat == SUCCESS)) 524 errp->re_status = stat = RPC_SUCCESS; 525 else 526 stat = _seterr_reply(&reply_msg, errp); 527 528 if (stat == RPC_SUCCESS) { 529 results = xdrmbuf_getall(&xdrs); 530 if (!AUTH_VALIDATE(auth, xid, 531 &reply_msg.acpted_rply.ar_verf, 532 &results)) { 533 errp->re_status = stat = RPC_AUTHERROR; 534 errp->re_why = AUTH_INVALIDRESP; 535 } else { 536 KASSERT(results, 537 ("auth validated but no result")); 538 *resultsp = results; 539 } 540 } /* end successful completion */ 541 /* 542 * If unsuccessful AND error is an authentication error 543 * then refresh credentials and try again, else break 544 */ 545 else if (stat == RPC_AUTHERROR) 546 /* maybe our credentials need to be refreshed ... */ 547 if (nrefreshes > 0 && 548 AUTH_REFRESH(auth, &reply_msg)) { 549 nrefreshes--; 550 XDR_DESTROY(&xdrs); 551 mtx_lock(&ct->ct_lock); 552 goto call_again; 553 } 554 /* end of unsuccessful completion */ 555 } /* end of valid reply message */ 556 else { 557 errp->re_status = stat = RPC_CANTDECODERES; 558 } 559 XDR_DESTROY(&xdrs); 560 mtx_lock(&ct->ct_lock); 561 out: 562 mtx_assert(&ct->ct_lock, MA_OWNED); 563 564 KASSERT(stat != RPC_SUCCESS || *resultsp, 565 ("RPC_SUCCESS without reply")); 566 567 if (mreq) 568 m_freem(mreq); 569 if (cr->cr_mrep) 570 m_freem(cr->cr_mrep); 571 572 ct->ct_threads--; 573 if (ct->ct_closing) 574 wakeup(ct); 575 576 mtx_unlock(&ct->ct_lock); 577 578 if (auth && stat != RPC_SUCCESS) 579 AUTH_VALIDATE(auth, xid, NULL, NULL); 580 581 free(cr, M_RPC); 582 583 return (stat); 584 } 585 586 static void 587 clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp) 588 { 589 struct ct_data *ct = (struct ct_data *) cl->cl_private; 590 591 *errp = ct->ct_error; 592 } 593 594 static bool_t 595 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr) 596 { 597 XDR xdrs; 598 bool_t dummy; 599 600 xdrs.x_op = XDR_FREE; 601 dummy = (*xdr_res)(&xdrs, res_ptr); 602 603 return (dummy); 604 } 605 606 /*ARGSUSED*/ 607 static void 608 clnt_vc_abort(CLIENT *cl) 609 { 610 } 611 612 static bool_t 613 clnt_vc_control(CLIENT *cl, u_int request, void *info) 614 { 615 struct ct_data *ct = (struct ct_data *)cl->cl_private; 616 void *infop = info; 617 SVCXPRT *xprt; 618 619 mtx_lock(&ct->ct_lock); 620 621 switch (request) { 622 case CLSET_FD_CLOSE: 623 ct->ct_closeit = TRUE; 624 mtx_unlock(&ct->ct_lock); 625 return (TRUE); 626 case CLSET_FD_NCLOSE: 627 ct->ct_closeit = FALSE; 628 mtx_unlock(&ct->ct_lock); 629 return (TRUE); 630 default: 631 break; 632 } 633 634 /* for other requests which use info */ 635 if (info == NULL) { 636 mtx_unlock(&ct->ct_lock); 637 return (FALSE); 638 } 639 switch (request) { 640 case CLSET_TIMEOUT: 641 if (time_not_ok((struct timeval *)info)) { 642 mtx_unlock(&ct->ct_lock); 643 return (FALSE); 644 } 645 ct->ct_wait = *(struct timeval *)infop; 646 break; 647 case CLGET_TIMEOUT: 648 *(struct timeval *)infop = ct->ct_wait; 649 break; 650 case CLGET_SERVER_ADDR: 651 (void) memcpy(info, &ct->ct_addr, (size_t)ct->ct_addr.ss_len); 652 break; 653 case CLGET_SVC_ADDR: 654 /* 655 * Slightly different semantics to userland - we use 656 * sockaddr instead of netbuf. 657 */ 658 memcpy(info, &ct->ct_addr, ct->ct_addr.ss_len); 659 break; 660 case CLSET_SVC_ADDR: /* set to new address */ 661 mtx_unlock(&ct->ct_lock); 662 return (FALSE); 663 case CLGET_XID: 664 *(uint32_t *)info = ct->ct_xid; 665 break; 666 case CLSET_XID: 667 /* This will set the xid of the NEXT call */ 668 /* decrement by 1 as clnt_vc_call() increments once */ 669 ct->ct_xid = *(uint32_t *)info - 1; 670 break; 671 case CLGET_VERS: 672 /* 673 * This RELIES on the information that, in the call body, 674 * the version number field is the fifth field from the 675 * beginning of the RPC header. MUST be changed if the 676 * call_struct is changed 677 */ 678 *(uint32_t *)info = 679 ntohl(*(uint32_t *)(void *)(ct->ct_mcallc + 680 4 * BYTES_PER_XDR_UNIT)); 681 break; 682 683 case CLSET_VERS: 684 *(uint32_t *)(void *)(ct->ct_mcallc + 685 4 * BYTES_PER_XDR_UNIT) = 686 htonl(*(uint32_t *)info); 687 break; 688 689 case CLGET_PROG: 690 /* 691 * This RELIES on the information that, in the call body, 692 * the program number field is the fourth field from the 693 * beginning of the RPC header. MUST be changed if the 694 * call_struct is changed 695 */ 696 *(uint32_t *)info = 697 ntohl(*(uint32_t *)(void *)(ct->ct_mcallc + 698 3 * BYTES_PER_XDR_UNIT)); 699 break; 700 701 case CLSET_PROG: 702 *(uint32_t *)(void *)(ct->ct_mcallc + 703 3 * BYTES_PER_XDR_UNIT) = 704 htonl(*(uint32_t *)info); 705 break; 706 707 case CLSET_WAITCHAN: 708 ct->ct_waitchan = (const char *)info; 709 break; 710 711 case CLGET_WAITCHAN: 712 *(const char **) info = ct->ct_waitchan; 713 break; 714 715 case CLSET_INTERRUPTIBLE: 716 if (*(int *) info) 717 ct->ct_waitflag = PCATCH; 718 else 719 ct->ct_waitflag = 0; 720 break; 721 722 case CLGET_INTERRUPTIBLE: 723 if (ct->ct_waitflag) 724 *(int *) info = TRUE; 725 else 726 *(int *) info = FALSE; 727 break; 728 729 case CLSET_BACKCHANNEL: 730 xprt = (SVCXPRT *)info; 731 if (ct->ct_backchannelxprt == NULL) { 732 xprt->xp_p2 = ct; 733 ct->ct_backchannelxprt = xprt; 734 } 735 break; 736 737 default: 738 mtx_unlock(&ct->ct_lock); 739 return (FALSE); 740 } 741 742 mtx_unlock(&ct->ct_lock); 743 return (TRUE); 744 } 745 746 static void 747 clnt_vc_close(CLIENT *cl) 748 { 749 struct ct_data *ct = (struct ct_data *) cl->cl_private; 750 struct ct_request *cr; 751 752 mtx_lock(&ct->ct_lock); 753 754 if (ct->ct_closed) { 755 mtx_unlock(&ct->ct_lock); 756 return; 757 } 758 759 if (ct->ct_closing) { 760 while (ct->ct_closing) 761 msleep(ct, &ct->ct_lock, 0, "rpcclose", 0); 762 KASSERT(ct->ct_closed, ("client should be closed")); 763 mtx_unlock(&ct->ct_lock); 764 return; 765 } 766 767 if (ct->ct_socket) { 768 ct->ct_closing = TRUE; 769 mtx_unlock(&ct->ct_lock); 770 771 SOCKBUF_LOCK(&ct->ct_socket->so_rcv); 772 soupcall_clear(ct->ct_socket, SO_RCV); 773 clnt_vc_upcallsdone(ct); 774 SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv); 775 776 /* 777 * Abort any pending requests and wait until everyone 778 * has finished with clnt_vc_call. 779 */ 780 mtx_lock(&ct->ct_lock); 781 TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { 782 cr->cr_xid = 0; 783 cr->cr_error = ESHUTDOWN; 784 wakeup(cr); 785 } 786 787 while (ct->ct_threads) 788 msleep(ct, &ct->ct_lock, 0, "rpcclose", 0); 789 } 790 791 ct->ct_closing = FALSE; 792 ct->ct_closed = TRUE; 793 mtx_unlock(&ct->ct_lock); 794 wakeup(ct); 795 } 796 797 static void 798 clnt_vc_destroy(CLIENT *cl) 799 { 800 struct ct_data *ct = (struct ct_data *) cl->cl_private; 801 struct socket *so = NULL; 802 SVCXPRT *xprt; 803 804 clnt_vc_close(cl); 805 806 mtx_lock(&ct->ct_lock); 807 xprt = ct->ct_backchannelxprt; 808 ct->ct_backchannelxprt = NULL; 809 if (xprt != NULL) { 810 mtx_unlock(&ct->ct_lock); /* To avoid a LOR. */ 811 sx_xlock(&xprt->xp_lock); 812 mtx_lock(&ct->ct_lock); 813 xprt->xp_p2 = NULL; 814 sx_xunlock(&xprt->xp_lock); 815 } 816 817 if (ct->ct_socket) { 818 if (ct->ct_closeit) { 819 so = ct->ct_socket; 820 } 821 } 822 823 mtx_unlock(&ct->ct_lock); 824 825 mtx_destroy(&ct->ct_lock); 826 if (so) { 827 soshutdown(so, SHUT_WR); 828 soclose(so); 829 } 830 m_freem(ct->ct_record); 831 m_freem(ct->ct_raw); 832 mem_free(ct, sizeof(struct ct_data)); 833 if (cl->cl_netid && cl->cl_netid[0]) 834 mem_free(cl->cl_netid, strlen(cl->cl_netid) +1); 835 if (cl->cl_tp && cl->cl_tp[0]) 836 mem_free(cl->cl_tp, strlen(cl->cl_tp) +1); 837 mem_free(cl, sizeof(CLIENT)); 838 } 839 840 /* 841 * Make sure that the time is not garbage. -1 value is disallowed. 842 * Note this is different from time_not_ok in clnt_dg.c 843 */ 844 static bool_t 845 time_not_ok(struct timeval *t) 846 { 847 return (t->tv_sec <= -1 || t->tv_sec > 100000000 || 848 t->tv_usec <= -1 || t->tv_usec > 1000000); 849 } 850 851 int 852 clnt_vc_soupcall(struct socket *so, void *arg, int waitflag) 853 { 854 struct ct_data *ct = (struct ct_data *) arg; 855 struct uio uio; 856 struct mbuf *m, *m2; 857 struct ct_request *cr; 858 int error, rcvflag, foundreq; 859 uint32_t xid_plus_direction[2], header; 860 SVCXPRT *xprt; 861 struct cf_conn *cd; 862 u_int rawlen; 863 864 /* 865 * If another thread is already here, it must be in 866 * soreceive(), so just return to avoid races with it. 867 * ct_upcallrefs is protected by the SOCKBUF_LOCK(), 868 * which is held in this function, except when 869 * soreceive() is called. 870 */ 871 if (ct->ct_upcallrefs > 0) 872 return (SU_OK); 873 ct->ct_upcallrefs++; 874 875 /* 876 * Read as much as possible off the socket and link it 877 * onto ct_raw. 878 */ 879 for (;;) { 880 uio.uio_resid = 1000000000; 881 uio.uio_td = curthread; 882 m2 = m = NULL; 883 rcvflag = MSG_DONTWAIT | MSG_SOCALLBCK; 884 SOCKBUF_UNLOCK(&so->so_rcv); 885 error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag); 886 SOCKBUF_LOCK(&so->so_rcv); 887 888 if (error == EWOULDBLOCK) { 889 /* 890 * We must re-test for readability after 891 * taking the lock to protect us in the case 892 * where a new packet arrives on the socket 893 * after our call to soreceive fails with 894 * EWOULDBLOCK. 895 */ 896 error = 0; 897 if (!soreadable(so)) 898 break; 899 continue; 900 } 901 if (error == 0 && m == NULL) { 902 /* 903 * We must have got EOF trying 904 * to read from the stream. 905 */ 906 error = ECONNRESET; 907 } 908 if (error != 0) 909 break; 910 911 if (ct->ct_raw != NULL) 912 m_last(ct->ct_raw)->m_next = m; 913 else 914 ct->ct_raw = m; 915 } 916 rawlen = m_length(ct->ct_raw, NULL); 917 918 /* Now, process as much of ct_raw as possible. */ 919 for (;;) { 920 /* 921 * If ct_record_resid is zero, we are waiting for a 922 * record mark. 923 */ 924 if (ct->ct_record_resid == 0) { 925 if (rawlen < sizeof(uint32_t)) 926 break; 927 m_copydata(ct->ct_raw, 0, sizeof(uint32_t), 928 (char *)&header); 929 header = ntohl(header); 930 ct->ct_record_resid = header & 0x7fffffff; 931 ct->ct_record_eor = ((header & 0x80000000) != 0); 932 m_adj(ct->ct_raw, sizeof(uint32_t)); 933 rawlen -= sizeof(uint32_t); 934 } else { 935 /* 936 * Move as much of the record as possible to 937 * ct_record. 938 */ 939 if (rawlen == 0) 940 break; 941 if (rawlen <= ct->ct_record_resid) { 942 if (ct->ct_record != NULL) 943 m_last(ct->ct_record)->m_next = 944 ct->ct_raw; 945 else 946 ct->ct_record = ct->ct_raw; 947 ct->ct_raw = NULL; 948 ct->ct_record_resid -= rawlen; 949 rawlen = 0; 950 } else { 951 m = m_split(ct->ct_raw, ct->ct_record_resid, 952 M_NOWAIT); 953 if (m == NULL) 954 break; 955 if (ct->ct_record != NULL) 956 m_last(ct->ct_record)->m_next = 957 ct->ct_raw; 958 else 959 ct->ct_record = ct->ct_raw; 960 rawlen -= ct->ct_record_resid; 961 ct->ct_record_resid = 0; 962 ct->ct_raw = m; 963 } 964 if (ct->ct_record_resid > 0) 965 break; 966 967 /* 968 * If we have the entire record, see if we can 969 * match it to a request. 970 */ 971 if (ct->ct_record_eor) { 972 /* 973 * The XID is in the first uint32_t of 974 * the reply and the message direction 975 * is the second one. 976 */ 977 if (ct->ct_record->m_len < 978 sizeof(xid_plus_direction) && 979 m_length(ct->ct_record, NULL) < 980 sizeof(xid_plus_direction)) { 981 /* 982 * What to do now? 983 * The data in the TCP stream is 984 * corrupted such that there is no 985 * valid RPC message to parse. 986 * I think it best to close this 987 * connection and allow 988 * clnt_reconnect_call() to try 989 * and establish a new one. 990 */ 991 printf("clnt_vc_soupcall: " 992 "connection data corrupted\n"); 993 error = ECONNRESET; 994 goto wakeup_all; 995 } 996 m_copydata(ct->ct_record, 0, 997 sizeof(xid_plus_direction), 998 (char *)xid_plus_direction); 999 xid_plus_direction[0] = 1000 ntohl(xid_plus_direction[0]); 1001 xid_plus_direction[1] = 1002 ntohl(xid_plus_direction[1]); 1003 /* Check message direction. */ 1004 if (xid_plus_direction[1] == CALL) { 1005 /* This is a backchannel request. */ 1006 mtx_lock(&ct->ct_lock); 1007 xprt = ct->ct_backchannelxprt; 1008 if (xprt == NULL) { 1009 mtx_unlock(&ct->ct_lock); 1010 /* Just throw it away. */ 1011 m_freem(ct->ct_record); 1012 ct->ct_record = NULL; 1013 } else { 1014 cd = (struct cf_conn *) 1015 xprt->xp_p1; 1016 m2 = cd->mreq; 1017 /* 1018 * The requests are chained 1019 * in the m_nextpkt list. 1020 */ 1021 while (m2 != NULL && 1022 m2->m_nextpkt != NULL) 1023 /* Find end of list. */ 1024 m2 = m2->m_nextpkt; 1025 if (m2 != NULL) 1026 m2->m_nextpkt = 1027 ct->ct_record; 1028 else 1029 cd->mreq = 1030 ct->ct_record; 1031 ct->ct_record->m_nextpkt = 1032 NULL; 1033 ct->ct_record = NULL; 1034 xprt_active(xprt); 1035 mtx_unlock(&ct->ct_lock); 1036 } 1037 } else { 1038 mtx_lock(&ct->ct_lock); 1039 foundreq = 0; 1040 TAILQ_FOREACH(cr, &ct->ct_pending, 1041 cr_link) { 1042 if (cr->cr_xid == 1043 xid_plus_direction[0]) { 1044 /* 1045 * This one 1046 * matches. We leave 1047 * the reply mbuf in 1048 * cr->cr_mrep. Set 1049 * the XID to zero so 1050 * that we will ignore 1051 * any duplicated 1052 * replies. 1053 */ 1054 cr->cr_xid = 0; 1055 cr->cr_mrep = 1056 ct->ct_record; 1057 cr->cr_error = 0; 1058 foundreq = 1; 1059 wakeup(cr); 1060 break; 1061 } 1062 } 1063 mtx_unlock(&ct->ct_lock); 1064 1065 if (!foundreq) 1066 m_freem(ct->ct_record); 1067 ct->ct_record = NULL; 1068 } 1069 } 1070 } 1071 } 1072 1073 if (error != 0) { 1074 wakeup_all: 1075 /* 1076 * This socket is broken, so mark that it cannot 1077 * receive and fail all RPCs waiting for a reply 1078 * on it, so that they will be retried on a new 1079 * TCP connection created by clnt_reconnect_X(). 1080 */ 1081 mtx_lock(&ct->ct_lock); 1082 ct->ct_error.re_status = RPC_CANTRECV; 1083 ct->ct_error.re_errno = error; 1084 TAILQ_FOREACH(cr, &ct->ct_pending, cr_link) { 1085 cr->cr_error = error; 1086 wakeup(cr); 1087 } 1088 mtx_unlock(&ct->ct_lock); 1089 } 1090 1091 ct->ct_upcallrefs--; 1092 if (ct->ct_upcallrefs < 0) 1093 panic("rpcvc upcall refcnt"); 1094 if (ct->ct_upcallrefs == 0) 1095 wakeup(&ct->ct_upcallrefs); 1096 return (SU_OK); 1097 } 1098 1099 /* 1100 * Wait for all upcalls in progress to complete. 1101 */ 1102 static void 1103 clnt_vc_upcallsdone(struct ct_data *ct) 1104 { 1105 1106 SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv); 1107 1108 while (ct->ct_upcallrefs > 0) 1109 (void) msleep(&ct->ct_upcallrefs, 1110 SOCKBUF_MTX(&ct->ct_socket->so_rcv), 0, "rpcvcup", 0); 1111 } 1112