1 /* $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $ */ 2 3 /* 4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for 5 * unrestricted use provided that this legend is included on all tape 6 * media and as a part of the software program in whole or part. Users 7 * may copy or modify Sun RPC without charge, but are not authorized 8 * to license or distribute it to anyone else except as part of a product or 9 * program developed by the user. 10 * 11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE 12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR 13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. 14 * 15 * Sun RPC is provided with no support and without any obligation on the 16 * part of Sun Microsystems, Inc. to assist in its use, correction, 17 * modification or enhancement. 18 * 19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE 20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC 21 * OR ANY PART THEREOF. 22 * 23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue 24 * or profits or other special, indirect and consequential damages, even if 25 * Sun has been advised of the possibility of such damages. 26 * 27 * Sun Microsystems, Inc. 28 * 2550 Garcia Avenue 29 * Mountain View, California 94043 30 */ 31 32 #if defined(LIBC_SCCS) && !defined(lint) 33 static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro"; 34 static char *sccsid = "@(#)svc_tcp.c 2.2 88/08/01 4.0 RPCSRC"; 35 #endif 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 /* 40 * svc_vc.c, Server side for Connection Oriented based RPC. 41 * 42 * Actually implements two flavors of transporter - 43 * a tcp rendezvouser (a listner and connection establisher) 44 * and a record/tcp stream. 45 */ 46 47 #include <sys/param.h> 48 #include <sys/lock.h> 49 #include <sys/kernel.h> 50 #include <sys/malloc.h> 51 #include <sys/mbuf.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/protosw.h> 55 #include <sys/queue.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/sx.h> 59 #include <sys/systm.h> 60 #include <sys/uio.h> 61 62 #include <net/vnet.h> 63 64 #include <netinet/tcp.h> 65 66 #include <rpc/rpc.h> 67 68 #include <rpc/rpc_com.h> 69 70 #include <security/mac/mac_framework.h> 71 72 static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *, 73 struct sockaddr **, struct mbuf **); 74 static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *); 75 static void svc_vc_rendezvous_destroy(SVCXPRT *); 76 static bool_t svc_vc_null(void); 77 static void svc_vc_destroy(SVCXPRT *); 78 static enum xprt_stat svc_vc_stat(SVCXPRT *); 79 static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *, 80 struct sockaddr **, struct mbuf **); 81 static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *, 82 struct sockaddr *, struct mbuf *); 83 static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in); 84 static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq, 85 void *in); 86 static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so, 87 struct sockaddr *raddr); 88 static int svc_vc_accept(struct socket *head, struct socket **sop); 89 static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag); 90 91 static struct xp_ops svc_vc_rendezvous_ops = { 92 .xp_recv = svc_vc_rendezvous_recv, 93 .xp_stat = svc_vc_rendezvous_stat, 94 .xp_reply = (bool_t (*)(SVCXPRT *, struct rpc_msg *, 95 struct sockaddr *, struct mbuf *))svc_vc_null, 96 .xp_destroy = svc_vc_rendezvous_destroy, 97 .xp_control = svc_vc_rendezvous_control 98 }; 99 100 static struct xp_ops svc_vc_ops = { 101 .xp_recv = svc_vc_recv, 102 .xp_stat = svc_vc_stat, 103 .xp_reply = svc_vc_reply, 104 .xp_destroy = svc_vc_destroy, 105 .xp_control = svc_vc_control 106 }; 107 108 struct cf_conn { /* kept in xprt->xp_p1 for actual connection */ 109 enum xprt_stat strm_stat; 110 struct mbuf *mpending; /* unparsed data read from the socket */ 111 struct mbuf *mreq; /* current record being built from mpending */ 112 uint32_t resid; /* number of bytes needed for fragment */ 113 bool_t eor; /* reading last fragment of current record */ 114 }; 115 116 /* 117 * Usage: 118 * xprt = svc_vc_create(sock, send_buf_size, recv_buf_size); 119 * 120 * Creates, registers, and returns a (rpc) tcp based transporter. 121 * Once *xprt is initialized, it is registered as a transporter 122 * see (svc.h, xprt_register). This routine returns 123 * a NULL if a problem occurred. 124 * 125 * The filedescriptor passed in is expected to refer to a bound, but 126 * not yet connected socket. 127 * 128 * Since streams do buffered io similar to stdio, the caller can specify 129 * how big the send and receive buffers are via the second and third parms; 130 * 0 => use the system default. 131 */ 132 SVCXPRT * 133 svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize, 134 size_t recvsize) 135 { 136 SVCXPRT *xprt; 137 struct sockaddr* sa; 138 int error; 139 140 if (so->so_state & SS_ISCONNECTED) { 141 error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa); 142 if (error) 143 return (NULL); 144 xprt = svc_vc_create_conn(pool, so, sa); 145 free(sa, M_SONAME); 146 return (xprt); 147 } 148 149 xprt = svc_xprt_alloc(); 150 sx_init(&xprt->xp_lock, "xprt->xp_lock"); 151 xprt->xp_pool = pool; 152 xprt->xp_socket = so; 153 xprt->xp_p1 = NULL; 154 xprt->xp_p2 = NULL; 155 xprt->xp_ops = &svc_vc_rendezvous_ops; 156 157 CURVNET_SET(so->so_vnet); 158 error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); 159 if (error) { 160 CURVNET_RESTORE(); 161 goto cleanup_svc_vc_create; 162 } 163 164 memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); 165 free(sa, M_SONAME); 166 167 xprt_register(xprt); 168 169 solisten(so, SOMAXCONN, curthread); 170 CURVNET_RESTORE(); 171 172 SOCKBUF_LOCK(&so->so_rcv); 173 xprt->xp_upcallset = 1; 174 soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt); 175 SOCKBUF_UNLOCK(&so->so_rcv); 176 177 return (xprt); 178 cleanup_svc_vc_create: 179 if (xprt) 180 svc_xprt_free(xprt); 181 return (NULL); 182 } 183 184 /* 185 * Create a new transport for a socket optained via soaccept(). 186 */ 187 SVCXPRT * 188 svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr) 189 { 190 SVCXPRT *xprt = NULL; 191 struct cf_conn *cd = NULL; 192 struct sockaddr* sa = NULL; 193 struct sockopt opt; 194 int one = 1; 195 int error; 196 197 bzero(&opt, sizeof(struct sockopt)); 198 opt.sopt_dir = SOPT_SET; 199 opt.sopt_level = SOL_SOCKET; 200 opt.sopt_name = SO_KEEPALIVE; 201 opt.sopt_val = &one; 202 opt.sopt_valsize = sizeof(one); 203 CURVNET_SET(so->so_vnet); 204 error = sosetopt(so, &opt); 205 if (error) { 206 CURVNET_RESTORE(); 207 return (NULL); 208 } 209 210 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 211 bzero(&opt, sizeof(struct sockopt)); 212 opt.sopt_dir = SOPT_SET; 213 opt.sopt_level = IPPROTO_TCP; 214 opt.sopt_name = TCP_NODELAY; 215 opt.sopt_val = &one; 216 opt.sopt_valsize = sizeof(one); 217 error = sosetopt(so, &opt); 218 if (error) { 219 CURVNET_RESTORE(); 220 return (NULL); 221 } 222 } 223 CURVNET_RESTORE(); 224 225 cd = mem_alloc(sizeof(*cd)); 226 cd->strm_stat = XPRT_IDLE; 227 228 xprt = svc_xprt_alloc(); 229 sx_init(&xprt->xp_lock, "xprt->xp_lock"); 230 xprt->xp_pool = pool; 231 xprt->xp_socket = so; 232 xprt->xp_p1 = cd; 233 xprt->xp_p2 = NULL; 234 xprt->xp_ops = &svc_vc_ops; 235 236 /* 237 * See http://www.connectathon.org/talks96/nfstcp.pdf - client 238 * has a 5 minute timer, server has a 6 minute timer. 239 */ 240 xprt->xp_idletimeout = 6 * 60; 241 242 memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len); 243 244 error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa); 245 if (error) 246 goto cleanup_svc_vc_create; 247 248 memcpy(&xprt->xp_ltaddr, sa, sa->sa_len); 249 free(sa, M_SONAME); 250 251 xprt_register(xprt); 252 253 SOCKBUF_LOCK(&so->so_rcv); 254 xprt->xp_upcallset = 1; 255 soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt); 256 SOCKBUF_UNLOCK(&so->so_rcv); 257 258 /* 259 * Throw the transport into the active list in case it already 260 * has some data buffered. 261 */ 262 sx_xlock(&xprt->xp_lock); 263 xprt_active(xprt); 264 sx_xunlock(&xprt->xp_lock); 265 266 return (xprt); 267 cleanup_svc_vc_create: 268 if (xprt) { 269 mem_free(xprt, sizeof(*xprt)); 270 } 271 if (cd) 272 mem_free(cd, sizeof(*cd)); 273 return (NULL); 274 } 275 276 /* 277 * This does all of the accept except the final call to soaccept. The 278 * caller will call soaccept after dropping its locks (soaccept may 279 * call malloc). 280 */ 281 int 282 svc_vc_accept(struct socket *head, struct socket **sop) 283 { 284 int error = 0; 285 struct socket *so; 286 287 if ((head->so_options & SO_ACCEPTCONN) == 0) { 288 error = EINVAL; 289 goto done; 290 } 291 #ifdef MAC 292 error = mac_socket_check_accept(curthread->td_ucred, head); 293 if (error != 0) 294 goto done; 295 #endif 296 ACCEPT_LOCK(); 297 if (TAILQ_EMPTY(&head->so_comp)) { 298 ACCEPT_UNLOCK(); 299 error = EWOULDBLOCK; 300 goto done; 301 } 302 so = TAILQ_FIRST(&head->so_comp); 303 KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP")); 304 KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP")); 305 306 /* 307 * Before changing the flags on the socket, we have to bump the 308 * reference count. Otherwise, if the protocol calls sofree(), 309 * the socket will be released due to a zero refcount. 310 * XXX might not need soref() since this is simpler than kern_accept. 311 */ 312 SOCK_LOCK(so); /* soref() and so_state update */ 313 soref(so); /* file descriptor reference */ 314 315 TAILQ_REMOVE(&head->so_comp, so, so_list); 316 head->so_qlen--; 317 so->so_state |= (head->so_state & SS_NBIO); 318 so->so_qstate &= ~SQ_COMP; 319 so->so_head = NULL; 320 321 SOCK_UNLOCK(so); 322 ACCEPT_UNLOCK(); 323 324 *sop = so; 325 326 /* connection has been removed from the listen queue */ 327 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0); 328 done: 329 return (error); 330 } 331 332 /*ARGSUSED*/ 333 static bool_t 334 svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg, 335 struct sockaddr **addrp, struct mbuf **mp) 336 { 337 struct socket *so = NULL; 338 struct sockaddr *sa = NULL; 339 int error; 340 SVCXPRT *new_xprt; 341 342 /* 343 * The socket upcall calls xprt_active() which will eventually 344 * cause the server to call us here. We attempt to accept a 345 * connection from the socket and turn it into a new 346 * transport. If the accept fails, we have drained all pending 347 * connections so we call xprt_inactive(). 348 */ 349 sx_xlock(&xprt->xp_lock); 350 351 error = svc_vc_accept(xprt->xp_socket, &so); 352 353 if (error == EWOULDBLOCK) { 354 /* 355 * We must re-test for new connections after taking 356 * the lock to protect us in the case where a new 357 * connection arrives after our call to accept fails 358 * with EWOULDBLOCK. The pool lock protects us from 359 * racing the upcall after our TAILQ_EMPTY() call 360 * returns false. 361 */ 362 ACCEPT_LOCK(); 363 mtx_lock(&xprt->xp_pool->sp_lock); 364 if (TAILQ_EMPTY(&xprt->xp_socket->so_comp)) 365 xprt_inactive_locked(xprt); 366 mtx_unlock(&xprt->xp_pool->sp_lock); 367 ACCEPT_UNLOCK(); 368 sx_xunlock(&xprt->xp_lock); 369 return (FALSE); 370 } 371 372 if (error) { 373 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); 374 if (xprt->xp_upcallset) { 375 xprt->xp_upcallset = 0; 376 soupcall_clear(xprt->xp_socket, SO_RCV); 377 } 378 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); 379 xprt_inactive(xprt); 380 sx_xunlock(&xprt->xp_lock); 381 return (FALSE); 382 } 383 384 sx_xunlock(&xprt->xp_lock); 385 386 sa = 0; 387 error = soaccept(so, &sa); 388 389 if (error) { 390 /* 391 * XXX not sure if I need to call sofree or soclose here. 392 */ 393 if (sa) 394 free(sa, M_SONAME); 395 return (FALSE); 396 } 397 398 /* 399 * svc_vc_create_conn will call xprt_register - we don't need 400 * to do anything with the new connection except derefence it. 401 */ 402 new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa); 403 if (!new_xprt) { 404 soclose(so); 405 } else { 406 SVC_RELEASE(new_xprt); 407 } 408 409 free(sa, M_SONAME); 410 411 return (FALSE); /* there is never an rpc msg to be processed */ 412 } 413 414 /*ARGSUSED*/ 415 static enum xprt_stat 416 svc_vc_rendezvous_stat(SVCXPRT *xprt) 417 { 418 419 return (XPRT_IDLE); 420 } 421 422 static void 423 svc_vc_destroy_common(SVCXPRT *xprt) 424 { 425 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); 426 if (xprt->xp_upcallset) { 427 xprt->xp_upcallset = 0; 428 soupcall_clear(xprt->xp_socket, SO_RCV); 429 } 430 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); 431 432 sx_destroy(&xprt->xp_lock); 433 if (xprt->xp_socket) 434 (void)soclose(xprt->xp_socket); 435 436 if (xprt->xp_netid) 437 (void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); 438 svc_xprt_free(xprt); 439 } 440 441 static void 442 svc_vc_rendezvous_destroy(SVCXPRT *xprt) 443 { 444 445 svc_vc_destroy_common(xprt); 446 } 447 448 static void 449 svc_vc_destroy(SVCXPRT *xprt) 450 { 451 struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1; 452 453 svc_vc_destroy_common(xprt); 454 455 if (cd->mreq) 456 m_freem(cd->mreq); 457 if (cd->mpending) 458 m_freem(cd->mpending); 459 mem_free(cd, sizeof(*cd)); 460 } 461 462 /*ARGSUSED*/ 463 static bool_t 464 svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in) 465 { 466 return (FALSE); 467 } 468 469 static bool_t 470 svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in) 471 { 472 473 return (FALSE); 474 } 475 476 static enum xprt_stat 477 svc_vc_stat(SVCXPRT *xprt) 478 { 479 struct cf_conn *cd; 480 struct mbuf *m; 481 size_t n; 482 483 cd = (struct cf_conn *)(xprt->xp_p1); 484 485 if (cd->strm_stat == XPRT_DIED) 486 return (XPRT_DIED); 487 488 /* 489 * Return XPRT_MOREREQS if we have buffered data and we are 490 * mid-record or if we have enough data for a record 491 * marker. Since this is only a hint, we read mpending and 492 * resid outside the lock. We do need to take the lock if we 493 * have to traverse the mbuf chain. 494 */ 495 if (cd->mpending) { 496 if (cd->resid) 497 return (XPRT_MOREREQS); 498 n = 0; 499 sx_xlock(&xprt->xp_lock); 500 m = cd->mpending; 501 while (m && n < sizeof(uint32_t)) { 502 n += m->m_len; 503 m = m->m_next; 504 } 505 sx_xunlock(&xprt->xp_lock); 506 if (n >= sizeof(uint32_t)) 507 return (XPRT_MOREREQS); 508 } 509 510 if (soreadable(xprt->xp_socket)) 511 return (XPRT_MOREREQS); 512 513 return (XPRT_IDLE); 514 } 515 516 static bool_t 517 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg, 518 struct sockaddr **addrp, struct mbuf **mp) 519 { 520 struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1; 521 struct uio uio; 522 struct mbuf *m; 523 XDR xdrs; 524 int error, rcvflag; 525 526 /* 527 * Serialise access to the socket and our own record parsing 528 * state. 529 */ 530 sx_xlock(&xprt->xp_lock); 531 532 for (;;) { 533 /* 534 * If we have an mbuf chain in cd->mpending, try to parse a 535 * record from it, leaving the result in cd->mreq. If we don't 536 * have a complete record, leave the partial result in 537 * cd->mreq and try to read more from the socket. 538 */ 539 if (cd->mpending) { 540 /* 541 * If cd->resid is non-zero, we have part of the 542 * record already, otherwise we are expecting a record 543 * marker. 544 */ 545 if (!cd->resid) { 546 /* 547 * See if there is enough data buffered to 548 * make up a record marker. Make sure we can 549 * handle the case where the record marker is 550 * split across more than one mbuf. 551 */ 552 size_t n = 0; 553 uint32_t header; 554 555 m = cd->mpending; 556 while (n < sizeof(uint32_t) && m) { 557 n += m->m_len; 558 m = m->m_next; 559 } 560 if (n < sizeof(uint32_t)) 561 goto readmore; 562 if (cd->mpending->m_len < sizeof(uint32_t)) 563 cd->mpending = m_pullup(cd->mpending, 564 sizeof(uint32_t)); 565 memcpy(&header, mtod(cd->mpending, uint32_t *), 566 sizeof(header)); 567 header = ntohl(header); 568 cd->eor = (header & 0x80000000) != 0; 569 cd->resid = header & 0x7fffffff; 570 m_adj(cd->mpending, sizeof(uint32_t)); 571 } 572 573 /* 574 * Start pulling off mbufs from cd->mpending 575 * until we either have a complete record or 576 * we run out of data. We use m_split to pull 577 * data - it will pull as much as possible and 578 * split the last mbuf if necessary. 579 */ 580 while (cd->mpending && cd->resid) { 581 m = cd->mpending; 582 if (cd->mpending->m_next 583 || cd->mpending->m_len > cd->resid) 584 cd->mpending = m_split(cd->mpending, 585 cd->resid, M_WAIT); 586 else 587 cd->mpending = NULL; 588 if (cd->mreq) 589 m_last(cd->mreq)->m_next = m; 590 else 591 cd->mreq = m; 592 while (m) { 593 cd->resid -= m->m_len; 594 m = m->m_next; 595 } 596 } 597 598 /* 599 * If cd->resid is zero now, we have managed to 600 * receive a record fragment from the stream. Check 601 * for the end-of-record mark to see if we need more. 602 */ 603 if (cd->resid == 0) { 604 if (!cd->eor) 605 continue; 606 607 /* 608 * Success - we have a complete record in 609 * cd->mreq. 610 */ 611 xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE); 612 cd->mreq = NULL; 613 sx_xunlock(&xprt->xp_lock); 614 615 if (! xdr_callmsg(&xdrs, msg)) { 616 XDR_DESTROY(&xdrs); 617 return (FALSE); 618 } 619 620 *addrp = NULL; 621 *mp = xdrmbuf_getall(&xdrs); 622 XDR_DESTROY(&xdrs); 623 624 return (TRUE); 625 } 626 } 627 628 readmore: 629 /* 630 * The socket upcall calls xprt_active() which will eventually 631 * cause the server to call us here. We attempt to 632 * read as much as possible from the socket and put 633 * the result in cd->mpending. If the read fails, 634 * we have drained both cd->mpending and the socket so 635 * we can call xprt_inactive(). 636 */ 637 uio.uio_resid = 1000000000; 638 uio.uio_td = curthread; 639 m = NULL; 640 rcvflag = MSG_DONTWAIT; 641 CURVNET_SET(xprt->xp_socket->so_vnet); 642 error = soreceive(xprt->xp_socket, NULL, &uio, &m, NULL, 643 &rcvflag); 644 CURVNET_RESTORE(); 645 646 if (error == EWOULDBLOCK) { 647 /* 648 * We must re-test for readability after 649 * taking the lock to protect us in the case 650 * where a new packet arrives on the socket 651 * after our call to soreceive fails with 652 * EWOULDBLOCK. The pool lock protects us from 653 * racing the upcall after our soreadable() 654 * call returns false. 655 */ 656 mtx_lock(&xprt->xp_pool->sp_lock); 657 if (!soreadable(xprt->xp_socket)) 658 xprt_inactive_locked(xprt); 659 mtx_unlock(&xprt->xp_pool->sp_lock); 660 sx_xunlock(&xprt->xp_lock); 661 return (FALSE); 662 } 663 664 if (error) { 665 SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); 666 if (xprt->xp_upcallset) { 667 xprt->xp_upcallset = 0; 668 soupcall_clear(xprt->xp_socket, SO_RCV); 669 } 670 SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); 671 xprt_inactive(xprt); 672 cd->strm_stat = XPRT_DIED; 673 sx_xunlock(&xprt->xp_lock); 674 return (FALSE); 675 } 676 677 if (!m) { 678 /* 679 * EOF - the other end has closed the socket. 680 */ 681 xprt_inactive(xprt); 682 cd->strm_stat = XPRT_DIED; 683 sx_xunlock(&xprt->xp_lock); 684 return (FALSE); 685 } 686 687 if (cd->mpending) 688 m_last(cd->mpending)->m_next = m; 689 else 690 cd->mpending = m; 691 } 692 } 693 694 static bool_t 695 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg, 696 struct sockaddr *addr, struct mbuf *m) 697 { 698 XDR xdrs; 699 struct mbuf *mrep; 700 bool_t stat = TRUE; 701 int error; 702 703 /* 704 * Leave space for record mark. 705 */ 706 MGETHDR(mrep, M_WAIT, MT_DATA); 707 mrep->m_len = 0; 708 mrep->m_data += sizeof(uint32_t); 709 710 xdrmbuf_create(&xdrs, mrep, XDR_ENCODE); 711 712 if (msg->rm_reply.rp_stat == MSG_ACCEPTED && 713 msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { 714 if (!xdr_replymsg(&xdrs, msg)) 715 stat = FALSE; 716 else 717 xdrmbuf_append(&xdrs, m); 718 } else { 719 stat = xdr_replymsg(&xdrs, msg); 720 } 721 722 if (stat) { 723 m_fixhdr(mrep); 724 725 /* 726 * Prepend a record marker containing the reply length. 727 */ 728 M_PREPEND(mrep, sizeof(uint32_t), M_WAIT); 729 *mtod(mrep, uint32_t *) = 730 htonl(0x80000000 | (mrep->m_pkthdr.len 731 - sizeof(uint32_t))); 732 error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL, 733 0, curthread); 734 if (!error) { 735 stat = TRUE; 736 } 737 } else { 738 m_freem(mrep); 739 } 740 741 XDR_DESTROY(&xdrs); 742 xprt->xp_p2 = NULL; 743 744 return (stat); 745 } 746 747 static bool_t 748 svc_vc_null() 749 { 750 751 return (FALSE); 752 } 753 754 static int 755 svc_vc_soupcall(struct socket *so, void *arg, int waitflag) 756 { 757 SVCXPRT *xprt = (SVCXPRT *) arg; 758 759 xprt_active(xprt); 760 return (SU_OK); 761 } 762 763 #if 0 764 /* 765 * Get the effective UID of the sending process. Used by rpcbind, keyserv 766 * and rpc.yppasswdd on AF_LOCAL. 767 */ 768 int 769 __rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) { 770 int sock, ret; 771 gid_t egid; 772 uid_t euid; 773 struct sockaddr *sa; 774 775 sock = transp->xp_fd; 776 sa = (struct sockaddr *)transp->xp_rtaddr; 777 if (sa->sa_family == AF_LOCAL) { 778 ret = getpeereid(sock, &euid, &egid); 779 if (ret == 0) 780 *uid = euid; 781 return (ret); 782 } else 783 return (-1); 784 } 785 #endif 786