1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 /* 30 * Portions of this source code were derived from Berkeley 31 * 4.3 BSD under license from the Regents of the University of 32 * California. 33 */ 34 35 #pragma ident "%Z%%M% %I% %E% SMI" 36 37 /* 38 * Server side for Connection Oriented RPC. 39 * 40 * Actually implements two flavors of transporter - 41 * a rendezvouser (a listener and connection establisher) 42 * and a record stream. 43 */ 44 45 #include "mt.h" 46 #include "rpc_mt.h" 47 #include <stdio.h> 48 #include <stdlib.h> 49 #include <rpc/rpc.h> 50 #include <sys/types.h> 51 #include <errno.h> 52 #include <sys/stat.h> 53 #include <sys/mkdev.h> 54 #include <sys/poll.h> 55 #include <syslog.h> 56 #include <rpc/nettype.h> 57 #include <tiuser.h> 58 #include <string.h> 59 #include <stropts.h> 60 #include <stdlib.h> 61 #include <unistd.h> 62 #include <sys/timod.h> 63 #include <limits.h> 64 65 #ifndef MIN 66 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 67 #endif 68 69 #define CLEANUP_SIZE 1024 70 71 extern int nsvc_xdrs; 72 extern int __rpc_connmaxrec; 73 extern int __rpc_irtimeout; 74 75 extern SVCXPRT **svc_xports; 76 extern int __td_setnodelay(int); 77 extern bool_t __xdrrec_getbytes_nonblock(XDR *, enum xprt_stat *); 78 extern bool_t __xdrrec_set_conn_nonblock(XDR *, uint32_t); 79 extern int _t_do_ioctl(int, char *, int, int, int *); 80 extern int __rpc_legal_connmaxrec(int); 81 /* Structure used to initialize SVC_XP_AUTH(xprt).svc_ah_ops. */ 82 extern struct svc_auth_ops svc_auth_any_ops; 83 extern void __xprt_unregister_private(const SVCXPRT *, bool_t); 84 85 static struct xp_ops *svc_vc_ops(void); 86 static struct xp_ops *svc_vc_rendezvous_ops(void); 87 static void svc_vc_destroy(SVCXPRT *); 88 static bool_t svc_vc_nonblock(SVCXPRT *, SVCXPRT *); 89 static int read_vc(SVCXPRT *, caddr_t, int); 90 static int write_vc(SVCXPRT *, caddr_t, int); 91 static SVCXPRT *makefd_xprt(int, uint_t, uint_t, t_scalar_t, char *); 92 static bool_t fd_is_dead(int); 93 static void update_nonblock_timestamps(SVCXPRT *); 94 95 struct cf_rendezvous { /* kept in xprt->xp_p1 for rendezvouser */ 96 uint_t sendsize; 97 uint_t recvsize; 98 struct t_call *t_call; 99 struct t_bind *t_bind; 100 t_scalar_t cf_tsdu; 101 char *cf_cache; 102 int tcp_flag; 103 int tcp_keepalive; 104 int cf_connmaxrec; 105 }; 106 107 struct cf_conn { /* kept in xprt->xp_p1 for actual connection */ 108 uint_t sendsize; 109 uint_t recvsize; 110 enum xprt_stat strm_stat; 111 uint32_t x_id; 112 t_scalar_t cf_tsdu; 113 XDR xdrs; 114 char *cf_cache; 115 char verf_body[MAX_AUTH_BYTES]; 116 bool_t cf_conn_nonblock; 117 time_t cf_conn_nonblock_timestamp; 118 }; 119 120 static int t_rcvall(int, char *, int); 121 static int t_rcvnonblock(SVCXPRT *, caddr_t, int); 122 static void svc_timeout_nonblock_xprt_and_LRU(bool_t); 123 124 extern int __xdrrec_setfirst(XDR *); 125 extern int __xdrrec_resetfirst(XDR *); 126 extern int __is_xdrrec_first(XDR *); 127 128 void __svc_nisplus_enable_timestamps(void); 129 void __svc_timeout_nonblock_xprt(void); 130 131 /* 132 * This is intended as a performance improvement on the old string handling 133 * stuff by read only moving data into the text segment. 134 * Format = <routine> : <error> 135 */ 136 137 static const char errstring[] = " %s : %s"; 138 139 /* Routine names */ 140 141 static const char svc_vc_create_str[] = "svc_vc_create"; 142 static const char svc_fd_create_str[] = "svc_fd_create"; 143 static const char makefd_xprt_str[] = "svc_vc_create: makefd_xprt "; 144 static const char rendezvous_request_str[] = "rendezvous_request"; 145 static const char svc_vc_fderr[] = 146 "fd > FD_SETSIZE; Use rpc_control(RPC_SVC_USE_POLLFD,...);"; 147 static const char do_accept_str[] = "do_accept"; 148 149 /* error messages */ 150 151 static const char no_mem_str[] = "out of memory"; 152 static const char no_tinfo_str[] = "could not get transport information"; 153 static const char no_fcntl_getfl_str[] = "could not get status flags and modes"; 154 static const char no_nonblock_str[] = "could not set transport non-blocking"; 155 156 /* 157 * Records a timestamp when data comes in on a descriptor. This is 158 * only used if timestamps are enabled with __svc_nisplus_enable_timestamps(). 159 */ 160 static long *timestamps; 161 static int ntimestamps; /* keep track how many timestamps */ 162 static mutex_t timestamp_lock = DEFAULTMUTEX; 163 164 /* 165 * Used to determine whether the time-out logic should be executed. 166 */ 167 static bool_t check_nonblock_timestamps = FALSE; 168 169 void 170 svc_vc_xprtfree(SVCXPRT *xprt) 171 { 172 /* LINTED pointer alignment */ 173 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL; 174 struct cf_rendezvous *r = xprt ? 175 /* LINTED pointer alignment */ 176 (struct cf_rendezvous *)xprt->xp_p1 : NULL; 177 178 if (!xprt) 179 return; 180 181 if (xprt->xp_tp) 182 free(xprt->xp_tp); 183 if (xprt->xp_netid) 184 free(xprt->xp_netid); 185 if (xt && (xt->parent == NULL)) { 186 if (xprt->xp_ltaddr.buf) 187 free(xprt->xp_ltaddr.buf); 188 if (xprt->xp_rtaddr.buf) 189 free(xprt->xp_rtaddr.buf); 190 } 191 if (r) { 192 if (r->t_call) 193 (void) t_free((char *)r->t_call, T_CALL); 194 if (r->t_bind) 195 (void) t_free((char *)r->t_bind, T_BIND); 196 free(r); 197 } 198 svc_xprt_free(xprt); 199 } 200 201 /* 202 * Usage: 203 * xprt = svc_vc_create(fd, sendsize, recvsize); 204 * Since connection streams do buffered io similar to stdio, the caller 205 * can specify how big the send and receive buffers are. If recvsize 206 * or sendsize are 0, defaults will be chosen. 207 * fd should be open and bound. 208 */ 209 SVCXPRT * 210 svc_vc_create_private(int fd, uint_t sendsize, uint_t recvsize) 211 { 212 struct cf_rendezvous *r; 213 SVCXPRT *xprt; 214 struct t_info tinfo; 215 216 if (RPC_FD_NOTIN_FDSET(fd)) { 217 errno = EBADF; 218 t_errno = TBADF; 219 (void) syslog(LOG_ERR, errstring, svc_vc_create_str, 220 svc_vc_fderr); 221 return (NULL); 222 } 223 if ((xprt = svc_xprt_alloc()) == NULL) { 224 (void) syslog(LOG_ERR, errstring, 225 svc_vc_create_str, no_mem_str); 226 return (NULL); 227 } 228 /* LINTED pointer alignment */ 229 svc_flags(xprt) |= SVC_RENDEZVOUS; 230 231 r = calloc(1, sizeof (*r)); 232 if (r == NULL) { 233 (void) syslog(LOG_ERR, errstring, 234 svc_vc_create_str, no_mem_str); 235 svc_vc_xprtfree(xprt); 236 return (NULL); 237 } 238 if (t_getinfo(fd, &tinfo) == -1) { 239 char errorstr[100]; 240 241 __tli_sys_strerror(errorstr, sizeof (errorstr), 242 t_errno, errno); 243 (void) syslog(LOG_ERR, "%s : %s : %s", 244 svc_vc_create_str, no_tinfo_str, errorstr); 245 free(r); 246 svc_vc_xprtfree(xprt); 247 return (NULL); 248 } 249 /* 250 * Find the receive and the send size 251 */ 252 r->sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu); 253 r->recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu); 254 if ((r->sendsize == 0) || (r->recvsize == 0)) { 255 syslog(LOG_ERR, 256 "svc_vc_create: transport does not support " 257 "data transfer"); 258 free(r); 259 svc_vc_xprtfree(xprt); 260 return (NULL); 261 } 262 263 /* LINTED pointer alignment */ 264 r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT); 265 if (r->t_call == NULL) { 266 (void) syslog(LOG_ERR, errstring, 267 svc_vc_create_str, no_mem_str); 268 free(r); 269 svc_vc_xprtfree(xprt); 270 return (NULL); 271 } 272 273 /* LINTED pointer alignment */ 274 r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR); 275 if (r->t_bind == NULL) { 276 (void) syslog(LOG_ERR, errstring, 277 svc_vc_create_str, no_mem_str); 278 (void) t_free((char *)r->t_call, T_CALL); 279 free(r); 280 svc_vc_xprtfree(xprt); 281 return (NULL); 282 } 283 284 r->cf_tsdu = tinfo.tsdu; 285 r->tcp_flag = FALSE; 286 r->tcp_keepalive = FALSE; 287 r->cf_connmaxrec = __rpc_connmaxrec; 288 xprt->xp_fd = fd; 289 xprt->xp_p1 = (caddr_t)r; 290 xprt->xp_p2 = NULL; 291 xprt->xp_verf = _null_auth; 292 xprt->xp_ops = svc_vc_rendezvous_ops(); 293 /* LINTED pointer alignment */ 294 SVC_XP_AUTH(xprt).svc_ah_ops = svc_auth_any_ops; 295 /* LINTED pointer alignment */ 296 SVC_XP_AUTH(xprt).svc_ah_private = NULL; 297 298 return (xprt); 299 } 300 301 SVCXPRT * 302 svc_vc_create(const int fd, const uint_t sendsize, const uint_t recvsize) 303 { 304 SVCXPRT *xprt; 305 306 if ((xprt = svc_vc_create_private(fd, sendsize, recvsize)) != NULL) 307 xprt_register(xprt); 308 return (xprt); 309 } 310 311 SVCXPRT * 312 svc_vc_xprtcopy(SVCXPRT *parent) 313 { 314 SVCXPRT *xprt; 315 struct cf_rendezvous *r, *pr; 316 int fd = parent->xp_fd; 317 318 if ((xprt = svc_xprt_alloc()) == NULL) 319 return (NULL); 320 321 /* LINTED pointer alignment */ 322 SVCEXT(xprt)->parent = parent; 323 /* LINTED pointer alignment */ 324 SVCEXT(xprt)->flags = SVCEXT(parent)->flags; 325 326 xprt->xp_fd = fd; 327 xprt->xp_ops = svc_vc_rendezvous_ops(); 328 if (parent->xp_tp) { 329 xprt->xp_tp = (char *)strdup(parent->xp_tp); 330 if (xprt->xp_tp == NULL) { 331 syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed"); 332 svc_vc_xprtfree(xprt); 333 return (NULL); 334 } 335 } 336 if (parent->xp_netid) { 337 xprt->xp_netid = (char *)strdup(parent->xp_netid); 338 if (xprt->xp_netid == NULL) { 339 syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed"); 340 if (xprt->xp_tp) 341 free(xprt->xp_tp); 342 svc_vc_xprtfree(xprt); 343 return (NULL); 344 } 345 } 346 347 /* 348 * can share both local and remote address 349 */ 350 xprt->xp_ltaddr = parent->xp_ltaddr; 351 xprt->xp_rtaddr = parent->xp_rtaddr; /* XXX - not used for rendezvous */ 352 xprt->xp_type = parent->xp_type; 353 xprt->xp_verf = parent->xp_verf; 354 355 if ((r = calloc(1, sizeof (*r))) == NULL) { 356 svc_vc_xprtfree(xprt); 357 return (NULL); 358 } 359 xprt->xp_p1 = (caddr_t)r; 360 /* LINTED pointer alignment */ 361 pr = (struct cf_rendezvous *)parent->xp_p1; 362 r->sendsize = pr->sendsize; 363 r->recvsize = pr->recvsize; 364 r->cf_tsdu = pr->cf_tsdu; 365 r->cf_cache = pr->cf_cache; 366 r->tcp_flag = pr->tcp_flag; 367 r->tcp_keepalive = pr->tcp_keepalive; 368 r->cf_connmaxrec = pr->cf_connmaxrec; 369 /* LINTED pointer alignment */ 370 r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT); 371 if (r->t_call == NULL) { 372 svc_vc_xprtfree(xprt); 373 return (NULL); 374 } 375 /* LINTED pointer alignment */ 376 r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR); 377 if (r->t_bind == NULL) { 378 svc_vc_xprtfree(xprt); 379 return (NULL); 380 } 381 382 return (xprt); 383 } 384 385 /* 386 * XXX : Used for setting flag to indicate that this is TCP 387 */ 388 389 /*ARGSUSED*/ 390 int 391 __svc_vc_setflag(SVCXPRT *xprt, int flag) 392 { 393 struct cf_rendezvous *r; 394 395 /* LINTED pointer alignment */ 396 r = (struct cf_rendezvous *)xprt->xp_p1; 397 r->tcp_flag = TRUE; 398 return (1); 399 } 400 401 /* 402 * used for the actual connection. 403 */ 404 SVCXPRT * 405 svc_fd_create_private(int fd, uint_t sendsize, uint_t recvsize) 406 { 407 struct t_info tinfo; 408 SVCXPRT *dummy; 409 struct netbuf tres = {0}; 410 411 if (RPC_FD_NOTIN_FDSET(fd)) { 412 errno = EBADF; 413 t_errno = TBADF; 414 (void) syslog(LOG_ERR, errstring, 415 svc_fd_create_str, svc_vc_fderr); 416 return (NULL); 417 } 418 if (t_getinfo(fd, &tinfo) == -1) { 419 char errorstr[100]; 420 421 __tli_sys_strerror(errorstr, sizeof (errorstr), 422 t_errno, errno); 423 (void) syslog(LOG_ERR, "%s : %s : %s", 424 svc_fd_create_str, no_tinfo_str, errorstr); 425 return (NULL); 426 } 427 /* 428 * Find the receive and the send size 429 */ 430 sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu); 431 recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu); 432 if ((sendsize == 0) || (recvsize == 0)) { 433 syslog(LOG_ERR, errstring, svc_fd_create_str, 434 "transport does not support data transfer"); 435 return (NULL); 436 } 437 dummy = makefd_xprt(fd, sendsize, recvsize, tinfo.tsdu, NULL); 438 /* NULL signifies no dup cache */ 439 /* Assign the local bind address */ 440 if (t_getname(fd, &tres, LOCALNAME) == -1) 441 tres.len = 0; 442 dummy->xp_ltaddr = tres; 443 /* Fill in type of service */ 444 dummy->xp_type = tinfo.servtype; 445 return (dummy); 446 } 447 448 SVCXPRT * 449 svc_fd_create(const int fd, const uint_t sendsize, const uint_t recvsize) 450 { 451 SVCXPRT *xprt; 452 453 if ((xprt = svc_fd_create_private(fd, sendsize, recvsize)) != NULL) 454 xprt_register(xprt); 455 return (xprt); 456 } 457 458 void 459 svc_fd_xprtfree(SVCXPRT *xprt) 460 { 461 /* LINTED pointer alignment */ 462 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL; 463 /* LINTED pointer alignment */ 464 struct cf_conn *cd = xprt ? (struct cf_conn *)xprt->xp_p1 : NULL; 465 466 if (!xprt) 467 return; 468 469 if (xprt->xp_tp) 470 free(xprt->xp_tp); 471 if (xprt->xp_netid) 472 free(xprt->xp_netid); 473 if (xt && (xt->parent == NULL)) { 474 if (xprt->xp_ltaddr.buf) 475 free(xprt->xp_ltaddr.buf); 476 if (xprt->xp_rtaddr.buf) 477 free(xprt->xp_rtaddr.buf); 478 } 479 if (cd) { 480 XDR_DESTROY(&(cd->xdrs)); 481 free(cd); 482 } 483 if (xt && (xt->parent == NULL) && xprt->xp_p2) { 484 /* LINTED pointer alignment */ 485 free(((struct netbuf *)xprt->xp_p2)->buf); 486 free(xprt->xp_p2); 487 } 488 svc_xprt_free(xprt); 489 } 490 491 static SVCXPRT * 492 makefd_xprt(int fd, uint_t sendsize, uint_t recvsize, t_scalar_t tsdu, 493 char *cache) 494 { 495 SVCXPRT *xprt; 496 struct cf_conn *cd; 497 498 xprt = svc_xprt_alloc(); 499 if (xprt == NULL) { 500 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str); 501 return (NULL); 502 } 503 /* LINTED pointer alignment */ 504 svc_flags(xprt) |= SVC_CONNECTION; 505 506 cd = malloc(sizeof (struct cf_conn)); 507 if (cd == NULL) { 508 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str); 509 svc_fd_xprtfree(xprt); 510 return (NULL); 511 } 512 cd->sendsize = sendsize; 513 cd->recvsize = recvsize; 514 cd->strm_stat = XPRT_IDLE; 515 cd->cf_tsdu = tsdu; 516 cd->cf_cache = cache; 517 cd->cf_conn_nonblock = FALSE; 518 cd->cf_conn_nonblock_timestamp = 0; 519 cd->xdrs.x_ops = NULL; 520 xdrrec_create(&(cd->xdrs), sendsize, 0, (caddr_t)xprt, 521 (int(*)())NULL, (int(*)(void *, char *, int))write_vc); 522 if (cd->xdrs.x_ops == NULL) { 523 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str); 524 free(cd); 525 svc_fd_xprtfree(xprt); 526 return (NULL); 527 } 528 529 (void) rw_wrlock(&svc_fd_lock); 530 if (svc_xdrs == NULL) { 531 svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *)); 532 if (svc_xdrs == NULL) { 533 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, 534 no_mem_str); 535 XDR_DESTROY(&(cd->xdrs)); 536 free(cd); 537 svc_fd_xprtfree(xprt); 538 (void) rw_unlock(&svc_fd_lock); 539 return (NULL); 540 } 541 nsvc_xdrs = FD_INCREMENT; 542 } 543 544 while (fd >= nsvc_xdrs) { 545 XDR **tmp_xdrs = svc_xdrs; 546 tmp_xdrs = realloc(svc_xdrs, 547 sizeof (XDR *) * (nsvc_xdrs + FD_INCREMENT)); 548 if (tmp_xdrs == NULL) { 549 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, 550 no_mem_str); 551 XDR_DESTROY(&(cd->xdrs)); 552 free(cd); 553 svc_fd_xprtfree(xprt); 554 (void) rw_unlock(&svc_fd_lock); 555 return (NULL); 556 } 557 558 svc_xdrs = tmp_xdrs; 559 /* initial the new array to 0 from the last allocated array */ 560 (void) memset(&svc_xdrs[nsvc_xdrs], 0, 561 sizeof (XDR *) * FD_INCREMENT); 562 nsvc_xdrs += FD_INCREMENT; 563 } 564 565 if (svc_xdrs[fd] != NULL) { 566 XDR_DESTROY(svc_xdrs[fd]); 567 } else if ((svc_xdrs[fd] = malloc(sizeof (XDR))) == NULL) { 568 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str); 569 XDR_DESTROY(&(cd->xdrs)); 570 free(cd); 571 svc_fd_xprtfree(xprt); 572 (void) rw_unlock(&svc_fd_lock); 573 return (NULL); 574 } 575 (void) memset(svc_xdrs[fd], 0, sizeof (XDR)); 576 xdrrec_create(svc_xdrs[fd], 0, recvsize, (caddr_t)xprt, 577 (int(*)(void *, char *, int))read_vc, (int(*)())NULL); 578 if (svc_xdrs[fd]->x_ops == NULL) { 579 free(svc_xdrs[fd]); 580 svc_xdrs[fd] = NULL; 581 XDR_DESTROY(&(cd->xdrs)); 582 free(cd); 583 svc_fd_xprtfree(xprt); 584 (void) rw_unlock(&svc_fd_lock); 585 return (NULL); 586 } 587 (void) rw_unlock(&svc_fd_lock); 588 589 xprt->xp_p1 = (caddr_t)cd; 590 xprt->xp_p2 = NULL; 591 xprt->xp_verf.oa_base = cd->verf_body; 592 xprt->xp_ops = svc_vc_ops(); /* truely deals with calls */ 593 xprt->xp_fd = fd; 594 return (xprt); 595 } 596 597 SVCXPRT * 598 svc_fd_xprtcopy(SVCXPRT *parent) 599 { 600 SVCXPRT *xprt; 601 struct cf_conn *cd, *pcd; 602 603 if ((xprt = svc_xprt_alloc()) == NULL) 604 return (NULL); 605 606 /* LINTED pointer alignment */ 607 SVCEXT(xprt)->parent = parent; 608 /* LINTED pointer alignment */ 609 SVCEXT(xprt)->flags = SVCEXT(parent)->flags; 610 611 xprt->xp_fd = parent->xp_fd; 612 xprt->xp_ops = svc_vc_ops(); 613 if (parent->xp_tp) { 614 xprt->xp_tp = (char *)strdup(parent->xp_tp); 615 if (xprt->xp_tp == NULL) { 616 syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed"); 617 svc_fd_xprtfree(xprt); 618 return (NULL); 619 } 620 } 621 if (parent->xp_netid) { 622 xprt->xp_netid = (char *)strdup(parent->xp_netid); 623 if (xprt->xp_netid == NULL) { 624 syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed"); 625 if (xprt->xp_tp) 626 free(xprt->xp_tp); 627 svc_fd_xprtfree(xprt); 628 return (NULL); 629 } 630 } 631 /* 632 * share local and remote addresses with parent 633 */ 634 xprt->xp_ltaddr = parent->xp_ltaddr; 635 xprt->xp_rtaddr = parent->xp_rtaddr; 636 xprt->xp_type = parent->xp_type; 637 638 if ((cd = malloc(sizeof (struct cf_conn))) == NULL) { 639 svc_fd_xprtfree(xprt); 640 return (NULL); 641 } 642 /* LINTED pointer alignment */ 643 pcd = (struct cf_conn *)parent->xp_p1; 644 cd->sendsize = pcd->sendsize; 645 cd->recvsize = pcd->recvsize; 646 cd->strm_stat = pcd->strm_stat; 647 cd->x_id = pcd->x_id; 648 cd->cf_tsdu = pcd->cf_tsdu; 649 cd->cf_cache = pcd->cf_cache; 650 cd->cf_conn_nonblock = pcd->cf_conn_nonblock; 651 cd->cf_conn_nonblock_timestamp = pcd->cf_conn_nonblock_timestamp; 652 cd->xdrs.x_ops = NULL; 653 xdrrec_create(&(cd->xdrs), cd->sendsize, 0, (caddr_t)xprt, 654 (int(*)())NULL, (int(*)(void *, char *, int))write_vc); 655 if (cd->xdrs.x_ops == NULL) { 656 free(cd); 657 svc_fd_xprtfree(xprt); 658 return (NULL); 659 } 660 xprt->xp_verf.oa_base = cd->verf_body; 661 xprt->xp_p1 = (char *)cd; 662 xprt->xp_p2 = parent->xp_p2; /* shared */ 663 664 return (xprt); 665 } 666 667 /* 668 * This routine is called by svc_getreqset(), when a packet is recd. 669 * The listener process creates another end point on which the actual 670 * connection is carried. It returns FALSE to indicate that it was 671 * not a rpc packet (falsely though), but as a side effect creates 672 * another endpoint which is also registered, which then always 673 * has a request ready to be served. 674 */ 675 /* ARGSUSED1 */ 676 static bool_t 677 rendezvous_request(SVCXPRT *xprt, struct rpc_msg *msg) 678 { 679 struct cf_rendezvous *r; 680 char *tpname = NULL; 681 char devbuf[256]; 682 static void do_accept(); 683 684 /* LINTED pointer alignment */ 685 r = (struct cf_rendezvous *)xprt->xp_p1; 686 687 again: 688 switch (t_look(xprt->xp_fd)) { 689 case T_DISCONNECT: 690 (void) t_rcvdis(xprt->xp_fd, NULL); 691 return (FALSE); 692 693 case T_LISTEN: 694 695 if (t_listen(xprt->xp_fd, r->t_call) == -1) { 696 if ((t_errno == TSYSERR) && (errno == EINTR)) 697 goto again; 698 699 if (t_errno == TLOOK) { 700 if (t_look(xprt->xp_fd) == T_DISCONNECT) 701 (void) t_rcvdis(xprt->xp_fd, NULL); 702 } 703 return (FALSE); 704 } 705 break; 706 default: 707 return (FALSE); 708 } 709 /* 710 * Now create another endpoint, and accept the connection 711 * on it. 712 */ 713 714 if (xprt->xp_tp) { 715 tpname = xprt->xp_tp; 716 } else { 717 /* 718 * If xprt->xp_tp is NULL, then try to extract the 719 * transport protocol information from the transport 720 * protcol corresponding to xprt->xp_fd 721 */ 722 struct netconfig *nconf; 723 tpname = devbuf; 724 if ((nconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) 725 == NULL) { 726 (void) syslog(LOG_ERR, errstring, 727 rendezvous_request_str, 728 "no suitable transport"); 729 goto err; 730 } 731 (void) strcpy(tpname, nconf->nc_device); 732 freenetconfigent(nconf); 733 } 734 735 do_accept(xprt->xp_fd, tpname, xprt->xp_netid, r->t_call, r); 736 737 err: 738 return (FALSE); /* there is never an rpc msg to be processed */ 739 } 740 741 static void 742 do_accept(int srcfd, char *tpname, char *netid, struct t_call *tcp, 743 struct cf_rendezvous *r) 744 { 745 int destfd; 746 struct t_call t_call; 747 struct t_call *tcp2 = NULL; 748 struct t_info tinfo; 749 SVCXPRT *xprt = NULL; 750 SVCXPRT *xprt_srcfd = NULL; 751 char *option, *option_ret; 752 struct opthdr *opt; 753 struct t_optmgmt optreq, optret; 754 int *p_optval; 755 756 destfd = t_open(tpname, O_RDWR, &tinfo); 757 if (check_nonblock_timestamps) { 758 if (destfd == -1 && t_errno == TSYSERR && errno == EMFILE) { 759 /* 760 * Since there are nonblocking connection xprts and 761 * too many open files, the LRU connection xprt should 762 * get destroyed in case an attacker has been creating 763 * many connections. 764 */ 765 (void) mutex_lock(&svc_mutex); 766 svc_timeout_nonblock_xprt_and_LRU(TRUE); 767 (void) mutex_unlock(&svc_mutex); 768 destfd = t_open(tpname, O_RDWR, &tinfo); 769 } else { 770 /* 771 * Destroy/timeout all nonblock connection xprts 772 * that have not had recent activity. 773 * Do not destroy LRU xprt unless there are 774 * too many open files. 775 */ 776 (void) mutex_lock(&svc_mutex); 777 svc_timeout_nonblock_xprt_and_LRU(FALSE); 778 (void) mutex_unlock(&svc_mutex); 779 } 780 } 781 if (destfd == -1) { 782 char errorstr[100]; 783 784 __tli_sys_strerror(errorstr, sizeof (errorstr), t_errno, 785 errno); 786 (void) syslog(LOG_ERR, "%s : %s : %s", do_accept_str, 787 "can't open connection", errorstr); 788 (void) t_snddis(srcfd, tcp); 789 return; 790 } 791 if (RPC_FD_NOTIN_FDSET(destfd)) { 792 (void) syslog(LOG_ERR, errstring, do_accept_str, 793 svc_vc_fderr); 794 (void) t_close(destfd); 795 (void) t_snddis(srcfd, tcp); 796 errno = EBADF; 797 t_errno = TBADF; 798 return; 799 } 800 (void) fcntl(destfd, F_SETFD, 1); /* make it "close on exec" */ 801 if ((tinfo.servtype != T_COTS) && (tinfo.servtype != T_COTS_ORD)) { 802 /* Not a connection oriented mode */ 803 (void) syslog(LOG_ERR, errstring, do_accept_str, 804 "do_accept: illegal transport"); 805 (void) t_close(destfd); 806 (void) t_snddis(srcfd, tcp); 807 return; 808 } 809 810 811 if (t_bind(destfd, NULL, r->t_bind) == -1) { 812 char errorstr[100]; 813 814 __tli_sys_strerror(errorstr, sizeof (errorstr), t_errno, 815 errno); 816 (void) syslog(LOG_ERR, " %s : %s : %s", do_accept_str, 817 "t_bind failed", errorstr); 818 (void) t_close(destfd); 819 (void) t_snddis(srcfd, tcp); 820 return; 821 } 822 823 if (r->tcp_flag) /* if TCP, set NODELAY flag */ 824 (void) __td_setnodelay(destfd); 825 826 /* 827 * This connection is not listening, hence no need to set 828 * the qlen. 829 */ 830 831 /* 832 * XXX: The local transport chokes on its own listen 833 * options so we zero them for now 834 */ 835 t_call = *tcp; 836 t_call.opt.len = 0; 837 t_call.opt.maxlen = 0; 838 t_call.opt.buf = NULL; 839 840 while (t_accept(srcfd, destfd, &t_call) == -1) { 841 char errorstr[100]; 842 843 switch (t_errno) { 844 case TLOOK: 845 again: 846 switch (t_look(srcfd)) { 847 case T_CONNECT: 848 case T_DATA: 849 case T_EXDATA: 850 /* this should not happen */ 851 break; 852 853 case T_DISCONNECT: 854 (void) t_rcvdis(srcfd, NULL); 855 break; 856 857 case T_LISTEN: 858 if (tcp2 == NULL) 859 /* LINTED pointer alignment */ 860 tcp2 = (struct t_call *)t_alloc(srcfd, 861 T_CALL, T_ADDR | T_OPT); 862 if (tcp2 == NULL) { 863 864 (void) t_close(destfd); 865 (void) t_snddis(srcfd, tcp); 866 syslog(LOG_ERR, errstring, 867 do_accept_str, no_mem_str); 868 return; 869 /* NOTREACHED */ 870 } 871 if (t_listen(srcfd, tcp2) == -1) { 872 switch (t_errno) { 873 case TSYSERR: 874 if (errno == EINTR) 875 goto again; 876 break; 877 878 case TLOOK: 879 goto again; 880 } 881 (void) t_free((char *)tcp2, T_CALL); 882 (void) t_close(destfd); 883 (void) t_snddis(srcfd, tcp); 884 return; 885 /* NOTREACHED */ 886 } 887 do_accept(srcfd, tpname, netid, tcp2, r); 888 break; 889 890 case T_ORDREL: 891 (void) t_rcvrel(srcfd); 892 (void) t_sndrel(srcfd); 893 break; 894 } 895 if (tcp2) { 896 (void) t_free((char *)tcp2, T_CALL); 897 tcp2 = NULL; 898 } 899 break; 900 901 case TBADSEQ: 902 /* 903 * This can happen if the remote side has 904 * disconnected before the connection is 905 * accepted. In this case, a disconnect 906 * should not be sent on srcfd (important! 907 * the listening fd will be hosed otherwise!). 908 * This error is not logged since this is an 909 * operational situation that is recoverable. 910 */ 911 (void) t_close(destfd); 912 return; 913 /* NOTREACHED */ 914 915 case TOUTSTATE: 916 /* 917 * This can happen if the t_rcvdis() or t_rcvrel()/ 918 * t_sndrel() put srcfd into the T_IDLE state. 919 */ 920 if (t_getstate(srcfd) == T_IDLE) { 921 (void) t_close(destfd); 922 (void) t_snddis(srcfd, tcp); 923 return; 924 } 925 /* else FALL THROUGH TO */ 926 927 default: 928 __tli_sys_strerror(errorstr, sizeof (errorstr), 929 t_errno, errno); 930 (void) syslog(LOG_ERR, 931 "cannot accept connection: %s (current state %d)", 932 errorstr, t_getstate(srcfd)); 933 (void) t_close(destfd); 934 (void) t_snddis(srcfd, tcp); 935 return; 936 /* NOTREACHED */ 937 } 938 } 939 940 if (r->tcp_flag && r->tcp_keepalive) { 941 option = malloc(sizeof (struct opthdr) + sizeof (int)); 942 option_ret = malloc(sizeof (struct opthdr) + sizeof (int)); 943 if (option && option_ret) { 944 /* LINTED pointer cast */ 945 opt = (struct opthdr *)option; 946 opt->level = SOL_SOCKET; 947 opt->name = SO_KEEPALIVE; 948 opt->len = sizeof (int); 949 p_optval = (int *)(opt + 1); 950 *p_optval = SO_KEEPALIVE; 951 optreq.opt.maxlen = optreq.opt.len = 952 sizeof (struct opthdr) + sizeof (int); 953 optreq.opt.buf = (char *)option; 954 optreq.flags = T_NEGOTIATE; 955 optret.opt.maxlen = sizeof (struct opthdr) 956 + sizeof (int); 957 optret.opt.buf = (char *)option_ret; 958 (void) t_optmgmt(destfd, &optreq, &optret); 959 free(option); 960 free(option_ret); 961 } else { 962 if (option) 963 free(option); 964 if (option_ret) 965 free(option_ret); 966 } 967 } 968 969 970 /* 971 * make a new transporter 972 */ 973 xprt = makefd_xprt(destfd, r->sendsize, r->recvsize, r->cf_tsdu, 974 r->cf_cache); 975 if (xprt == NULL) { 976 /* 977 * makefd_xprt() returns a NULL xprt only when 978 * it's out of memory. 979 */ 980 goto memerr; 981 } 982 983 /* 984 * Copy the new local and remote bind information 985 */ 986 987 xprt->xp_rtaddr.len = tcp->addr.len; 988 xprt->xp_rtaddr.maxlen = tcp->addr.len; 989 if ((xprt->xp_rtaddr.buf = malloc(tcp->addr.len)) == NULL) 990 goto memerr; 991 (void) memcpy(xprt->xp_rtaddr.buf, tcp->addr.buf, tcp->addr.len); 992 993 if (strcmp(netid, "tcp") == 0) { 994 xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in); 995 if ((xprt->xp_ltaddr.buf = 996 malloc(xprt->xp_ltaddr.maxlen)) == NULL) 997 goto memerr; 998 if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) { 999 (void) syslog(LOG_ERR, 1000 "do_accept: t_getname for tcp failed!"); 1001 goto xprt_err; 1002 } 1003 } else if (strcmp(netid, "tcp6") == 0) { 1004 xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in6); 1005 if ((xprt->xp_ltaddr.buf = 1006 malloc(xprt->xp_ltaddr.maxlen)) == NULL) 1007 goto memerr; 1008 if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) { 1009 (void) syslog(LOG_ERR, 1010 "do_accept: t_getname for tcp6 failed!"); 1011 goto xprt_err; 1012 } 1013 } 1014 1015 xprt->xp_tp = strdup(tpname); 1016 xprt->xp_netid = strdup(netid); 1017 if ((xprt->xp_tp == NULL) || 1018 (xprt->xp_netid == NULL)) { 1019 goto memerr; 1020 } 1021 if (tcp->opt.len > 0) { 1022 struct netbuf *netptr; 1023 1024 xprt->xp_p2 = malloc(sizeof (struct netbuf)); 1025 1026 if (xprt->xp_p2 != NULL) { 1027 /* LINTED pointer alignment */ 1028 netptr = (struct netbuf *)xprt->xp_p2; 1029 1030 netptr->len = tcp->opt.len; 1031 netptr->maxlen = tcp->opt.len; 1032 if ((netptr->buf = malloc(tcp->opt.len)) == NULL) 1033 goto memerr; 1034 (void) memcpy(netptr->buf, tcp->opt.buf, tcp->opt.len); 1035 } else 1036 goto memerr; 1037 } 1038 /* (void) ioctl(destfd, I_POP, NULL); */ 1039 1040 /* 1041 * If a nonblocked connection fd has been requested, 1042 * perform the necessary operations. 1043 */ 1044 xprt_srcfd = svc_xports[srcfd]; 1045 /* LINTED pointer cast */ 1046 if (((struct cf_rendezvous *)(xprt_srcfd->xp_p1))->cf_connmaxrec) { 1047 if (!svc_vc_nonblock(xprt_srcfd, xprt)) 1048 goto xprt_err; 1049 } 1050 1051 /* 1052 * Copy the call back declared for the service to the current 1053 * connection 1054 */ 1055 xprt->xp_closeclnt = xprt_srcfd->xp_closeclnt; 1056 xprt_register(xprt); 1057 1058 return; 1059 1060 memerr: 1061 (void) syslog(LOG_ERR, errstring, do_accept_str, no_mem_str); 1062 xprt_err: 1063 if (xprt) 1064 svc_vc_destroy(xprt); 1065 (void) t_close(destfd); 1066 } 1067 1068 /* 1069 * This routine performs the necessary fcntl() operations to create 1070 * a nonblocked connection fd. 1071 * It also adjusts the sizes and allocates the buffer 1072 * for the nonblocked operations, and updates the associated 1073 * timestamp field in struct cf_conn for timeout bookkeeping. 1074 */ 1075 static bool_t 1076 svc_vc_nonblock(SVCXPRT *xprt_rendezvous, SVCXPRT *xprt_conn) 1077 { 1078 int nn; 1079 int fdconn = xprt_conn->xp_fd; 1080 struct cf_rendezvous *r = 1081 /* LINTED pointer cast */ 1082 (struct cf_rendezvous *)xprt_rendezvous->xp_p1; 1083 /* LINTED pointer cast */ 1084 struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1; 1085 uint32_t maxrecsz; 1086 1087 if ((nn = fcntl(fdconn, F_GETFL, 0)) < 0) { 1088 (void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str, 1089 no_fcntl_getfl_str); 1090 return (FALSE); 1091 } 1092 1093 if (fcntl(fdconn, F_SETFL, nn|O_NONBLOCK) != 0) { 1094 (void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str, 1095 no_nonblock_str); 1096 return (FALSE); 1097 } 1098 1099 cd->cf_conn_nonblock = TRUE; 1100 /* 1101 * If the max fragment size has not been set via 1102 * rpc_control(), use the default. 1103 */ 1104 if ((maxrecsz = r->cf_connmaxrec) == 0) 1105 maxrecsz = r->recvsize; 1106 /* Set XDR stream to use non-blocking semantics. */ 1107 if (__xdrrec_set_conn_nonblock(svc_xdrs[fdconn], maxrecsz)) { 1108 check_nonblock_timestamps = TRUE; 1109 update_nonblock_timestamps(xprt_conn); 1110 return (TRUE); 1111 } 1112 return (FALSE); 1113 } 1114 1115 /* ARGSUSED */ 1116 static enum xprt_stat 1117 rendezvous_stat(SVCXPRT *xprt) 1118 { 1119 return (XPRT_IDLE); 1120 } 1121 1122 static void 1123 svc_vc_destroy(SVCXPRT *xprt) 1124 { 1125 (void) mutex_lock(&svc_mutex); 1126 _svc_vc_destroy_private(xprt, TRUE); 1127 (void) svc_timeout_nonblock_xprt_and_LRU(FALSE); 1128 (void) mutex_unlock(&svc_mutex); 1129 } 1130 1131 void 1132 _svc_vc_destroy_private(SVCXPRT *xprt, bool_t lock_not_held) 1133 { 1134 if (svc_mt_mode != RPC_SVC_MT_NONE) { 1135 /* LINTED pointer alignment */ 1136 if (SVCEXT(xprt)->parent) 1137 /* LINTED pointer alignment */ 1138 xprt = SVCEXT(xprt)->parent; 1139 /* LINTED pointer alignment */ 1140 svc_flags(xprt) |= SVC_DEFUNCT; 1141 /* LINTED pointer alignment */ 1142 if (SVCEXT(xprt)->refcnt > 0) 1143 return; 1144 } 1145 1146 if (xprt->xp_closeclnt != NULL) { 1147 svc_errorhandler_t cb = xprt->xp_closeclnt; 1148 1149 /* 1150 * Reset the pointer here to avoid reentrance on the same 1151 * SVCXPRT handle. 1152 */ 1153 xprt->xp_closeclnt = NULL; 1154 cb(xprt, (xprt->xp_rtaddr.len != 0)); 1155 } 1156 1157 __xprt_unregister_private(xprt, lock_not_held); 1158 (void) t_close(xprt->xp_fd); 1159 1160 (void) mutex_lock(×tamp_lock); 1161 if (timestamps && xprt->xp_fd < ntimestamps) { 1162 timestamps[xprt->xp_fd] = 0; 1163 } 1164 (void) mutex_unlock(×tamp_lock); 1165 1166 if (svc_mt_mode != RPC_SVC_MT_NONE) { 1167 svc_xprt_destroy(xprt); 1168 } else { 1169 /* LINTED pointer alignment */ 1170 if (svc_type(xprt) == SVC_RENDEZVOUS) 1171 svc_vc_xprtfree(xprt); 1172 else 1173 svc_fd_xprtfree(xprt); 1174 } 1175 } 1176 1177 /*ARGSUSED*/ 1178 static bool_t 1179 svc_vc_control(SVCXPRT *xprt, const uint_t rq, void *in) 1180 { 1181 switch (rq) { 1182 case SVCSET_RECVERRHANDLER: 1183 xprt->xp_closeclnt = (svc_errorhandler_t)in; 1184 return (TRUE); 1185 case SVCGET_RECVERRHANDLER: 1186 *(svc_errorhandler_t *)in = xprt->xp_closeclnt; 1187 return (TRUE); 1188 case SVCGET_XID: 1189 if (xprt->xp_p1 == NULL) 1190 return (FALSE); 1191 /* LINTED pointer alignment */ 1192 *(uint32_t *)in = ((struct cf_conn *)(xprt->xp_p1))->x_id; 1193 return (TRUE); 1194 default: 1195 return (FALSE); 1196 } 1197 } 1198 1199 static bool_t 1200 rendezvous_control(SVCXPRT *xprt, const uint_t rq, void *in) 1201 { 1202 struct cf_rendezvous *r; 1203 int tmp; 1204 1205 switch (rq) { 1206 case SVCSET_RECVERRHANDLER: 1207 xprt->xp_closeclnt = (svc_errorhandler_t)in; 1208 return (TRUE); 1209 case SVCGET_RECVERRHANDLER: 1210 *(svc_errorhandler_t *)in = xprt->xp_closeclnt; 1211 return (TRUE); 1212 case SVCSET_KEEPALIVE: 1213 /* LINTED pointer cast */ 1214 r = (struct cf_rendezvous *)xprt->xp_p1; 1215 if (r->tcp_flag) { 1216 r->tcp_keepalive = (int)(intptr_t)in; 1217 return (TRUE); 1218 } 1219 return (FALSE); 1220 case SVCSET_CONNMAXREC: 1221 /* 1222 * Override the default maximum record size, set via 1223 * rpc_control(), for this connection. Only appropriate 1224 * for connection oriented transports, but is ignored for 1225 * the connectionless case, so no need to check the 1226 * connection type here. 1227 */ 1228 /* LINTED pointer cast */ 1229 r = (struct cf_rendezvous *)xprt->xp_p1; 1230 tmp = __rpc_legal_connmaxrec(*(int *)in); 1231 if (r != 0 && tmp >= 0) { 1232 r->cf_connmaxrec = tmp; 1233 return (TRUE); 1234 } 1235 return (FALSE); 1236 case SVCGET_CONNMAXREC: 1237 /* LINTED pointer cast */ 1238 r = (struct cf_rendezvous *)xprt->xp_p1; 1239 if (r != 0) { 1240 *(int *)in = r->cf_connmaxrec; 1241 return (TRUE); 1242 } 1243 return (FALSE); 1244 case SVCGET_XID: /* fall through for now */ 1245 default: 1246 return (FALSE); 1247 } 1248 } 1249 1250 /* 1251 * All read operations timeout after 35 seconds. 1252 * A timeout is fatal for the connection. 1253 * update_timestamps() is used by nisplus operations, 1254 * update_nonblock_timestamps() is used for nonblocked 1255 * connection fds. 1256 */ 1257 #define WAIT_PER_TRY 35000 /* milliseconds */ 1258 1259 static void 1260 update_timestamps(int fd) 1261 { 1262 (void) mutex_lock(×tamp_lock); 1263 if (timestamps) { 1264 struct timeval tv; 1265 1266 (void) gettimeofday(&tv, NULL); 1267 while (fd >= ntimestamps) { 1268 long *tmp_timestamps = timestamps; 1269 1270 /* allocate more timestamps */ 1271 tmp_timestamps = realloc(timestamps, 1272 sizeof (long) * 1273 (ntimestamps + FD_INCREMENT)); 1274 if (tmp_timestamps == NULL) { 1275 (void) mutex_unlock(×tamp_lock); 1276 syslog(LOG_ERR, 1277 "update_timestamps: out of memory"); 1278 return; 1279 } 1280 1281 timestamps = tmp_timestamps; 1282 (void) memset(×tamps[ntimestamps], 0, 1283 sizeof (long) * FD_INCREMENT); 1284 ntimestamps += FD_INCREMENT; 1285 } 1286 timestamps[fd] = tv.tv_sec; 1287 } 1288 (void) mutex_unlock(×tamp_lock); 1289 } 1290 1291 static void 1292 update_nonblock_timestamps(SVCXPRT *xprt_conn) 1293 { 1294 struct timeval tv; 1295 /* LINTED pointer cast */ 1296 struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1; 1297 1298 (void) gettimeofday(&tv, NULL); 1299 cd->cf_conn_nonblock_timestamp = tv.tv_sec; 1300 } 1301 1302 /* 1303 * reads data from the vc conection. 1304 * any error is fatal and the connection is closed. 1305 * (And a read of zero bytes is a half closed stream => error.) 1306 */ 1307 static int 1308 read_vc(SVCXPRT *xprt, caddr_t buf, int len) 1309 { 1310 int fd = xprt->xp_fd; 1311 XDR *xdrs = svc_xdrs[fd]; 1312 struct pollfd pfd; 1313 int ret; 1314 1315 /* 1316 * Make sure the connection is not already dead. 1317 */ 1318 /* LINTED pointer alignment */ 1319 if (svc_failed(xprt)) 1320 return (-1); 1321 1322 /* LINTED pointer cast */ 1323 if (((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock) { 1324 /* 1325 * For nonblocked reads, only update the 1326 * timestamps to record the activity so the 1327 * connection will not be timedout. 1328 * Up to "len" bytes are requested. 1329 * If fewer than "len" bytes are received, the 1330 * connection is poll()ed again. 1331 * The poll() for the connection fd is performed 1332 * in the main poll() so that all outstanding fds 1333 * are polled rather than just the vc connection. 1334 * Polling on only the vc connection until the entire 1335 * fragment has been read can be exploited in 1336 * a Denial of Service Attack such as telnet <host> 111. 1337 */ 1338 if ((len = t_rcvnonblock(xprt, buf, len)) >= 0) { 1339 if (len > 0) { 1340 update_timestamps(fd); 1341 update_nonblock_timestamps(xprt); 1342 } 1343 return (len); 1344 } 1345 goto fatal_err; 1346 } 1347 1348 if (!__is_xdrrec_first(xdrs)) { 1349 1350 pfd.fd = fd; 1351 pfd.events = MASKVAL; 1352 1353 do { 1354 if ((ret = poll(&pfd, 1, WAIT_PER_TRY)) <= 0) { 1355 /* 1356 * If errno is EINTR, ERESTART, or EAGAIN 1357 * ignore error and repeat poll 1358 */ 1359 if (ret < 0 && (errno == EINTR || 1360 errno == ERESTART || errno == EAGAIN)) 1361 continue; 1362 goto fatal_err; 1363 } 1364 } while (pfd.revents == 0); 1365 if (pfd.revents & POLLNVAL) 1366 goto fatal_err; 1367 } 1368 (void) __xdrrec_resetfirst(xdrs); 1369 if ((len = t_rcvall(fd, buf, len)) > 0) { 1370 update_timestamps(fd); 1371 return (len); 1372 } 1373 1374 fatal_err: 1375 /* LINTED pointer alignment */ 1376 ((struct cf_conn *)(xprt->xp_p1))->strm_stat = XPRT_DIED; 1377 /* LINTED pointer alignment */ 1378 svc_flags(xprt) |= SVC_FAILED; 1379 return (-1); 1380 } 1381 1382 /* 1383 * Requests up to "len" bytes of data. 1384 * Returns number of bytes actually received, or error indication. 1385 */ 1386 static int 1387 t_rcvnonblock(SVCXPRT *xprt, caddr_t buf, int len) 1388 { 1389 int fd = xprt->xp_fd; 1390 int flag; 1391 int res; 1392 1393 res = t_rcv(fd, buf, (unsigned)len, &flag); 1394 if (res == -1) { 1395 switch (t_errno) { 1396 case TLOOK: 1397 switch (t_look(fd)) { 1398 case T_DISCONNECT: 1399 (void) t_rcvdis(fd, NULL); 1400 break; 1401 case T_ORDREL: 1402 (void) t_rcvrel(fd); 1403 (void) t_sndrel(fd); 1404 break; 1405 default: 1406 break; 1407 } 1408 break; 1409 case TNODATA: 1410 /* 1411 * Either poll() lied, or the xprt/fd was closed and 1412 * re-opened under our feet. Return 0, so that we go 1413 * back to waiting for data. 1414 */ 1415 res = 0; 1416 break; 1417 /* Should handle TBUFOVFLW TSYSERR ? */ 1418 default: 1419 break; 1420 } 1421 } 1422 return (res); 1423 } 1424 1425 /* 1426 * Timeout out nonblocked connection fds 1427 * If there has been no activity on the fd for __rpc_irtimeout 1428 * seconds, timeout the fd by destroying its xprt. 1429 * If the caller gets an EMFILE error, the caller may also request 1430 * that the least busy xprt gets destroyed as well. 1431 * svc_thr_mutex is held when this is called. 1432 * svc_mutex is held when this is called. 1433 */ 1434 static void 1435 svc_timeout_nonblock_xprt_and_LRU(bool_t destroy_lru) 1436 { 1437 SVCXPRT *xprt; 1438 SVCXPRT *dead_xprt[CLEANUP_SIZE]; 1439 SVCXPRT *candidate_xprt = NULL; 1440 struct cf_conn *cd; 1441 int i, fd_idx = 0, dead_idx = 0; 1442 struct timeval now; 1443 time_t lasttime, maxctime = 0; 1444 extern rwlock_t svc_fd_lock; 1445 1446 if (!check_nonblock_timestamps) 1447 return; 1448 1449 (void) gettimeofday(&now, NULL); 1450 if (svc_xports == NULL) 1451 return; 1452 /* 1453 * Hold svc_fd_lock to protect 1454 * svc_xports, svc_maxpollfd, svc_max_pollfd 1455 */ 1456 (void) rw_wrlock(&svc_fd_lock); 1457 for (;;) { 1458 /* 1459 * Timeout upto CLEANUP_SIZE connection fds per 1460 * iteration for the while(1) loop 1461 */ 1462 for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) { 1463 if ((xprt = svc_xports[fd_idx]) == NULL) { 1464 continue; 1465 } 1466 /* Only look at connection fds */ 1467 /* LINTED pointer cast */ 1468 if (svc_type(xprt) != SVC_CONNECTION) { 1469 continue; 1470 } 1471 /* LINTED pointer cast */ 1472 cd = (struct cf_conn *)xprt->xp_p1; 1473 if (!cd->cf_conn_nonblock) 1474 continue; 1475 lasttime = now.tv_sec - cd->cf_conn_nonblock_timestamp; 1476 if (lasttime >= __rpc_irtimeout && 1477 __rpc_irtimeout != 0) { 1478 /* Enter in timedout/dead array */ 1479 dead_xprt[dead_idx++] = xprt; 1480 if (dead_idx >= CLEANUP_SIZE) 1481 break; 1482 } else 1483 if (lasttime > maxctime) { 1484 /* Possible LRU xprt */ 1485 candidate_xprt = xprt; 1486 maxctime = lasttime; 1487 } 1488 } 1489 1490 for (i = 0; i < dead_idx; i++) { 1491 /* Still holding svc_fd_lock */ 1492 _svc_vc_destroy_private(dead_xprt[i], FALSE); 1493 } 1494 1495 /* 1496 * If all the nonblocked fds have been checked, we're done. 1497 */ 1498 if (fd_idx++ >= svc_max_pollfd) 1499 break; 1500 } 1501 if ((destroy_lru) && (candidate_xprt != NULL)) { 1502 _svc_vc_destroy_private(candidate_xprt, FALSE); 1503 } 1504 (void) rw_unlock(&svc_fd_lock); 1505 } 1506 /* 1507 * Receive the required bytes of data, even if it is fragmented. 1508 */ 1509 static int 1510 t_rcvall(int fd, char *buf, int len) 1511 { 1512 int flag; 1513 int final = 0; 1514 int res; 1515 1516 do { 1517 res = t_rcv(fd, buf, (unsigned)len, &flag); 1518 if (res == -1) { 1519 if (t_errno == TLOOK) { 1520 switch (t_look(fd)) { 1521 case T_DISCONNECT: 1522 (void) t_rcvdis(fd, NULL); 1523 break; 1524 case T_ORDREL: 1525 (void) t_rcvrel(fd); 1526 (void) t_sndrel(fd); 1527 break; 1528 default: 1529 break; 1530 } 1531 } 1532 break; 1533 } 1534 final += res; 1535 buf += res; 1536 len -= res; 1537 } while (len && (flag & T_MORE)); 1538 return (res == -1 ? -1 : final); 1539 } 1540 1541 /* 1542 * writes data to the vc connection. 1543 * Any error is fatal and the connection is closed. 1544 */ 1545 static int 1546 write_vc(SVCXPRT *xprt, caddr_t buf, int len) 1547 { 1548 int i, cnt; 1549 int flag; 1550 int maxsz; 1551 int nonblock; 1552 struct pollfd pfd; 1553 1554 /* LINTED pointer alignment */ 1555 maxsz = ((struct cf_conn *)(xprt->xp_p1))->cf_tsdu; 1556 /* LINTED pointer cast */ 1557 nonblock = ((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock; 1558 if (nonblock && maxsz <= 0) 1559 maxsz = len; 1560 if ((maxsz == 0) || (maxsz == -1)) { 1561 if ((len = t_snd(xprt->xp_fd, buf, (unsigned)len, 1562 (int)0)) == -1) { 1563 if (t_errno == TLOOK) { 1564 switch (t_look(xprt->xp_fd)) { 1565 case T_DISCONNECT: 1566 (void) t_rcvdis(xprt->xp_fd, NULL); 1567 break; 1568 case T_ORDREL: 1569 (void) t_rcvrel(xprt->xp_fd); 1570 (void) t_sndrel(xprt->xp_fd); 1571 break; 1572 default: 1573 break; 1574 } 1575 } 1576 /* LINTED pointer alignment */ 1577 ((struct cf_conn *)(xprt->xp_p1))->strm_stat 1578 = XPRT_DIED; 1579 /* LINTED pointer alignment */ 1580 svc_flags(xprt) |= SVC_FAILED; 1581 } 1582 return (len); 1583 } 1584 1585 /* 1586 * Setup for polling. We want to be able to write normal 1587 * data to the transport 1588 */ 1589 pfd.fd = xprt->xp_fd; 1590 pfd.events = POLLWRNORM; 1591 1592 /* 1593 * This for those transports which have a max size for data, 1594 * and for the non-blocking case, where t_snd() may send less 1595 * than requested. 1596 */ 1597 for (cnt = len, i = 0; cnt > 0; cnt -= i, buf += i) { 1598 flag = cnt > maxsz ? T_MORE : 0; 1599 if ((i = t_snd(xprt->xp_fd, buf, 1600 (unsigned)MIN(cnt, maxsz), flag)) == -1) { 1601 if (t_errno == TLOOK) { 1602 switch (t_look(xprt->xp_fd)) { 1603 case T_DISCONNECT: 1604 (void) t_rcvdis(xprt->xp_fd, NULL); 1605 break; 1606 case T_ORDREL: 1607 (void) t_rcvrel(xprt->xp_fd); 1608 break; 1609 default: 1610 break; 1611 } 1612 } else if (t_errno == TFLOW) { 1613 /* Try again */ 1614 i = 0; 1615 /* Wait till we can write to the transport */ 1616 do { 1617 if (poll(&pfd, 1, WAIT_PER_TRY) < 0) { 1618 /* 1619 * If errno is ERESTART, or 1620 * EAGAIN ignore error and repeat poll 1621 */ 1622 if (errno == ERESTART || 1623 errno == EAGAIN) 1624 continue; 1625 else 1626 goto fatal_err; 1627 } 1628 } while (pfd.revents == 0); 1629 if (pfd.revents & (POLLNVAL | POLLERR | 1630 POLLHUP)) 1631 goto fatal_err; 1632 continue; 1633 } 1634 fatal_err: 1635 /* LINTED pointer alignment */ 1636 ((struct cf_conn *)(xprt->xp_p1))->strm_stat 1637 = XPRT_DIED; 1638 /* LINTED pointer alignment */ 1639 svc_flags(xprt) |= SVC_FAILED; 1640 return (-1); 1641 } 1642 } 1643 return (len); 1644 } 1645 1646 static enum xprt_stat 1647 svc_vc_stat(SVCXPRT *xprt) 1648 { 1649 /* LINTED pointer alignment */ 1650 SVCXPRT *parent = SVCEXT(xprt)->parent ? SVCEXT(xprt)->parent : xprt; 1651 1652 /* LINTED pointer alignment */ 1653 if (svc_failed(parent) || svc_failed(xprt)) 1654 return (XPRT_DIED); 1655 if (!xdrrec_eof(svc_xdrs[xprt->xp_fd])) 1656 return (XPRT_MOREREQS); 1657 /* 1658 * xdrrec_eof could have noticed that the connection is dead, so 1659 * check status again. 1660 */ 1661 /* LINTED pointer alignment */ 1662 if (svc_failed(parent) || svc_failed(xprt)) 1663 return (XPRT_DIED); 1664 return (XPRT_IDLE); 1665 } 1666 1667 1668 1669 static bool_t 1670 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg) 1671 { 1672 /* LINTED pointer alignment */ 1673 struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1); 1674 XDR *xdrs = svc_xdrs[xprt->xp_fd]; 1675 1676 xdrs->x_op = XDR_DECODE; 1677 1678 if (cd->cf_conn_nonblock) { 1679 /* Get the next input */ 1680 if (!__xdrrec_getbytes_nonblock(xdrs, &cd->strm_stat)) { 1681 /* 1682 * The entire record has not been received. 1683 * If the xprt has died, pass it along in svc_flags. 1684 * Return FALSE; For nonblocked vc connection, 1685 * xdr_callmsg() is called only after the entire 1686 * record has been received. For blocked vc 1687 * connection, the data is received on the fly as it 1688 * is being processed through the xdr routines. 1689 */ 1690 if (cd->strm_stat == XPRT_DIED) 1691 /* LINTED pointer cast */ 1692 svc_flags(xprt) |= SVC_FAILED; 1693 return (FALSE); 1694 } 1695 } else { 1696 if (!xdrrec_skiprecord(xdrs)) 1697 return (FALSE); 1698 (void) __xdrrec_setfirst(xdrs); 1699 } 1700 1701 if (xdr_callmsg(xdrs, msg)) { 1702 cd->x_id = msg->rm_xid; 1703 return (TRUE); 1704 } 1705 1706 /* 1707 * If a non-blocking connection, drop it when message decode fails. 1708 * We are either under attack, or we're talking to a broken client. 1709 */ 1710 if (cd->cf_conn_nonblock) { 1711 /* LINTED pointer cast */ 1712 svc_flags(xprt) |= SVC_FAILED; 1713 } 1714 1715 return (FALSE); 1716 } 1717 1718 static bool_t 1719 svc_vc_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr) 1720 { 1721 bool_t dummy; 1722 1723 /* LINTED pointer alignment */ 1724 dummy = SVCAUTH_UNWRAP(&SVC_XP_AUTH(xprt), svc_xdrs[xprt->xp_fd], 1725 xdr_args, args_ptr); 1726 if (svc_mt_mode != RPC_SVC_MT_NONE) 1727 svc_args_done(xprt); 1728 return (dummy); 1729 } 1730 1731 static bool_t 1732 svc_vc_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr) 1733 { 1734 /* LINTED pointer alignment */ 1735 XDR *xdrs = &(((struct cf_conn *)(xprt->xp_p1))->xdrs); 1736 1737 xdrs->x_op = XDR_FREE; 1738 return ((*xdr_args)(xdrs, args_ptr)); 1739 } 1740 1741 static bool_t 1742 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg) 1743 { 1744 /* LINTED pointer alignment */ 1745 struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1); 1746 XDR *xdrs = &(cd->xdrs); 1747 bool_t stat = FALSE; 1748 xdrproc_t xdr_results; 1749 caddr_t xdr_location; 1750 bool_t has_args; 1751 1752 #ifdef __lock_lint 1753 (void) mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent)); 1754 #else 1755 if (svc_mt_mode != RPC_SVC_MT_NONE) 1756 /* LINTED pointer alignment */ 1757 (void) mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent)); 1758 #endif 1759 1760 if (msg->rm_reply.rp_stat == MSG_ACCEPTED && 1761 msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { 1762 has_args = TRUE; 1763 xdr_results = msg->acpted_rply.ar_results.proc; 1764 xdr_location = msg->acpted_rply.ar_results.where; 1765 msg->acpted_rply.ar_results.proc = xdr_void; 1766 msg->acpted_rply.ar_results.where = NULL; 1767 } else 1768 has_args = FALSE; 1769 1770 xdrs->x_op = XDR_ENCODE; 1771 msg->rm_xid = cd->x_id; 1772 /* LINTED pointer alignment */ 1773 if (xdr_replymsg(xdrs, msg) && (!has_args || SVCAUTH_WRAP( 1774 &SVC_XP_AUTH(xprt), xdrs, xdr_results, xdr_location))) { 1775 stat = TRUE; 1776 } 1777 (void) xdrrec_endofrecord(xdrs, TRUE); 1778 1779 #ifdef __lock_lint 1780 (void) mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent)); 1781 #else 1782 if (svc_mt_mode != RPC_SVC_MT_NONE) 1783 /* LINTED pointer alignment */ 1784 (void) mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent)); 1785 #endif 1786 1787 return (stat); 1788 } 1789 1790 static struct xp_ops * 1791 svc_vc_ops(void) 1792 { 1793 static struct xp_ops ops; 1794 extern mutex_t ops_lock; 1795 1796 /* VARIABLES PROTECTED BY ops_lock: ops */ 1797 1798 (void) mutex_lock(&ops_lock); 1799 if (ops.xp_recv == NULL) { 1800 ops.xp_recv = svc_vc_recv; 1801 ops.xp_stat = svc_vc_stat; 1802 ops.xp_getargs = svc_vc_getargs; 1803 ops.xp_reply = svc_vc_reply; 1804 ops.xp_freeargs = svc_vc_freeargs; 1805 ops.xp_destroy = svc_vc_destroy; 1806 ops.xp_control = svc_vc_control; 1807 } 1808 (void) mutex_unlock(&ops_lock); 1809 return (&ops); 1810 } 1811 1812 static struct xp_ops * 1813 svc_vc_rendezvous_ops(void) 1814 { 1815 static struct xp_ops ops; 1816 extern mutex_t ops_lock; 1817 1818 (void) mutex_lock(&ops_lock); 1819 if (ops.xp_recv == NULL) { 1820 ops.xp_recv = rendezvous_request; 1821 ops.xp_stat = rendezvous_stat; 1822 ops.xp_getargs = (bool_t (*)())abort; 1823 ops.xp_reply = (bool_t (*)())abort; 1824 ops.xp_freeargs = (bool_t (*)())abort, 1825 ops.xp_destroy = svc_vc_destroy; 1826 ops.xp_control = rendezvous_control; 1827 } 1828 (void) mutex_unlock(&ops_lock); 1829 return (&ops); 1830 } 1831 1832 /* 1833 * PRIVATE RPC INTERFACE 1834 * 1835 * This is a hack to let NIS+ clean up connections that have already been 1836 * closed. This problem arises because rpc.nisd forks a child to handle 1837 * existing connections when it does checkpointing. The child may close 1838 * some of these connections. But the descriptors still stay open in the 1839 * parent, and because TLI descriptors don't support persistent EOF 1840 * condition (like sockets do), the parent will never detect that these 1841 * descriptors are dead. 1842 * 1843 * The following internal procedure __svc_nisplus_fdcleanup_hack() - should 1844 * be removed as soon as rpc.nisd is rearchitected to do the right thing. 1845 * This procedure should not find its way into any header files. 1846 * 1847 * This procedure should be called only when rpc.nisd knows that there 1848 * are no children servicing clients. 1849 */ 1850 1851 static bool_t 1852 fd_is_dead(int fd) 1853 { 1854 struct T_info_ack inforeq; 1855 int retval; 1856 1857 inforeq.PRIM_type = T_INFO_REQ; 1858 if (!_t_do_ioctl(fd, (caddr_t)&inforeq, sizeof (struct T_info_req), 1859 TI_GETINFO, &retval)) 1860 return (TRUE); 1861 if (retval != (int)sizeof (struct T_info_ack)) 1862 return (TRUE); 1863 1864 switch (inforeq.CURRENT_state) { 1865 case TS_UNBND: 1866 case TS_IDLE: 1867 return (TRUE); 1868 default: 1869 break; 1870 } 1871 return (FALSE); 1872 } 1873 1874 void 1875 __svc_nisplus_fdcleanup_hack(void) 1876 { 1877 SVCXPRT *xprt; 1878 SVCXPRT *dead_xprt[CLEANUP_SIZE]; 1879 int i, fd_idx = 0, dead_idx = 0; 1880 1881 if (svc_xports == NULL) 1882 return; 1883 for (;;) { 1884 (void) rw_wrlock(&svc_fd_lock); 1885 for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) { 1886 if ((xprt = svc_xports[fd_idx]) == NULL) 1887 continue; 1888 /* LINTED pointer alignment */ 1889 if (svc_type(xprt) != SVC_CONNECTION) 1890 continue; 1891 if (fd_is_dead(fd_idx)) { 1892 dead_xprt[dead_idx++] = xprt; 1893 if (dead_idx >= CLEANUP_SIZE) 1894 break; 1895 } 1896 } 1897 1898 for (i = 0; i < dead_idx; i++) { 1899 /* Still holding svc_fd_lock */ 1900 _svc_vc_destroy_private(dead_xprt[i], FALSE); 1901 } 1902 (void) rw_unlock(&svc_fd_lock); 1903 if (fd_idx++ >= svc_max_pollfd) 1904 return; 1905 } 1906 } 1907 1908 void 1909 __svc_nisplus_enable_timestamps(void) 1910 { 1911 (void) mutex_lock(×tamp_lock); 1912 if (!timestamps) { 1913 timestamps = calloc(FD_INCREMENT, sizeof (long)); 1914 if (timestamps != NULL) 1915 ntimestamps = FD_INCREMENT; 1916 else { 1917 (void) mutex_unlock(×tamp_lock); 1918 syslog(LOG_ERR, 1919 "__svc_nisplus_enable_timestamps: " 1920 "out of memory"); 1921 return; 1922 } 1923 } 1924 (void) mutex_unlock(×tamp_lock); 1925 } 1926 1927 void 1928 __svc_nisplus_purge_since(long since) 1929 { 1930 SVCXPRT *xprt; 1931 SVCXPRT *dead_xprt[CLEANUP_SIZE]; 1932 int i, fd_idx = 0, dead_idx = 0; 1933 1934 if (svc_xports == NULL) 1935 return; 1936 for (;;) { 1937 (void) rw_wrlock(&svc_fd_lock); 1938 (void) mutex_lock(×tamp_lock); 1939 for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) { 1940 if ((xprt = svc_xports[fd_idx]) == NULL) { 1941 continue; 1942 } 1943 /* LINTED pointer cast */ 1944 if (svc_type(xprt) != SVC_CONNECTION) { 1945 continue; 1946 } 1947 if (fd_idx >= ntimestamps) { 1948 break; 1949 } 1950 if (timestamps[fd_idx] && 1951 timestamps[fd_idx] < since) { 1952 dead_xprt[dead_idx++] = xprt; 1953 if (dead_idx >= CLEANUP_SIZE) 1954 break; 1955 } 1956 } 1957 (void) mutex_unlock(×tamp_lock); 1958 1959 for (i = 0; i < dead_idx; i++) { 1960 /* Still holding svc_fd_lock */ 1961 _svc_vc_destroy_private(dead_xprt[i], FALSE); 1962 } 1963 (void) rw_unlock(&svc_fd_lock); 1964 if (fd_idx++ >= svc_max_pollfd) 1965 return; 1966 } 1967 } 1968 1969 /* 1970 * dup cache wrapper functions for vc requests. The set of dup 1971 * functions were written with the view that they may be expanded 1972 * during creation of a generic svc_vc_enablecache routine 1973 * which would have a size based cache, rather than a time based cache. 1974 * The real work is done in generic svc.c 1975 */ 1976 bool_t 1977 __svc_vc_dupcache_init(SVCXPRT *xprt, void *condition, int basis) 1978 { 1979 return (__svc_dupcache_init(condition, basis, 1980 /* LINTED pointer alignment */ 1981 &(((struct cf_rendezvous *)xprt->xp_p1)->cf_cache))); 1982 } 1983 1984 int 1985 __svc_vc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz) 1986 { 1987 return (__svc_dup(req, resp_buf, resp_bufsz, 1988 /* LINTED pointer alignment */ 1989 ((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache)); 1990 } 1991 1992 int 1993 __svc_vc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz, 1994 int status) 1995 { 1996 return (__svc_dupdone(req, resp_buf, resp_bufsz, status, 1997 /* LINTED pointer alignment */ 1998 ((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache)); 1999 } 2000