1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T 29 * All Rights Reserved 30 */ 31 32 /* 33 * Portions of this source code were derived from Berkeley 4.3 BSD 34 * under license from the Regents of the University of California. 35 */ 36 37 #pragma ident "%Z%%M% %I% %E% SMI" 38 39 /* 40 * Implements a kernel based, client side RPC. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/types.h> 45 #include <sys/systm.h> 46 #include <sys/sysmacros.h> 47 #include <sys/stream.h> 48 #include <sys/strsubr.h> 49 #include <sys/ddi.h> 50 #include <sys/tiuser.h> 51 #include <sys/tihdr.h> 52 #include <sys/t_kuser.h> 53 #include <sys/errno.h> 54 #include <sys/kmem.h> 55 #include <sys/debug.h> 56 #include <sys/kstat.h> 57 #include <sys/t_lock.h> 58 #include <sys/cmn_err.h> 59 #include <sys/conf.h> 60 #include <sys/disp.h> 61 #include <sys/taskq.h> 62 #include <sys/list.h> 63 #include <sys/atomic.h> 64 #include <sys/zone.h> 65 #include <netinet/in.h> 66 #include <rpc/types.h> 67 #include <rpc/xdr.h> 68 #include <rpc/auth.h> 69 #include <rpc/clnt.h> 70 #include <rpc/rpc_msg.h> 71 72 static enum clnt_stat clnt_clts_kcallit(CLIENT *, rpcproc_t, xdrproc_t, 73 caddr_t, xdrproc_t, caddr_t, struct timeval); 74 static void clnt_clts_kabort(CLIENT *); 75 static void clnt_clts_kerror(CLIENT *, struct rpc_err *); 76 static bool_t clnt_clts_kfreeres(CLIENT *, xdrproc_t, caddr_t); 77 static bool_t clnt_clts_kcontrol(CLIENT *, int, char *); 78 static void clnt_clts_kdestroy(CLIENT *); 79 static int clnt_clts_ksettimers(CLIENT *, struct rpc_timers *, 80 struct rpc_timers *, int, void (*)(), caddr_t, uint32_t); 81 82 /* 83 * Operations vector for CLTS based RPC 84 */ 85 static struct clnt_ops clts_ops = { 86 clnt_clts_kcallit, /* do rpc call */ 87 clnt_clts_kabort, /* abort call */ 88 clnt_clts_kerror, /* return error status */ 89 clnt_clts_kfreeres, /* free results */ 90 clnt_clts_kdestroy, /* destroy rpc handle */ 91 clnt_clts_kcontrol, /* the ioctl() of rpc */ 92 clnt_clts_ksettimers /* set retry timers */ 93 }; 94 95 /* 96 * Endpoint for CLTS (INET, INET6, loopback, etc.) 97 */ 98 typedef struct endpnt_type { 99 struct endpnt_type *e_next; /* pointer to next endpoint type */ 100 list_t e_pool; /* list of available endpoints */ 101 list_t e_ilist; /* list of idle endpints */ 102 struct endpnt *e_pcurr; /* pointer to current endpoint */ 103 char e_protofmly[KNC_STRSIZE]; /* protocol family */ 104 dev_t e_rdev; /* device */ 105 kmutex_t e_plock; /* pool lock */ 106 kmutex_t e_ilock; /* idle list lock */ 107 timeout_id_t e_itimer; /* timer to dispatch the taskq */ 108 uint_t e_cnt; /* number of endpoints in the pool */ 109 zoneid_t e_zoneid; /* zoneid of endpoint type */ 110 kcondvar_t e_async_cv; /* cv for asynchronous reap threads */ 111 uint_t e_async_count; /* count of asynchronous reap threads */ 112 } endpnt_type_t; 113 114 typedef struct endpnt { 115 list_node_t e_node; /* link to the pool */ 116 list_node_t e_idle; /* link to the idle list */ 117 endpnt_type_t *e_type; /* back pointer to endpoint type */ 118 TIUSER *e_tiptr; /* pointer to transport endpoint */ 119 queue_t *e_wq; /* write queue */ 120 uint_t e_flags; /* endpoint flags */ 121 uint_t e_ref; /* ref count on endpoint */ 122 kcondvar_t e_cv; /* condition variable */ 123 kmutex_t e_lock; /* protects cv and flags */ 124 time_t e_itime; /* time when rele'd */ 125 } endpnt_t; 126 127 #define ENDPNT_ESTABLISHED 0x1 /* endpoint is established */ 128 #define ENDPNT_WAITING 0x2 /* thread waiting for endpoint */ 129 #define ENDPNT_BOUND 0x4 /* endpoint is bound */ 130 #define ENDPNT_STALE 0x8 /* endpoint is dead */ 131 #define ENDPNT_ONIDLE 0x10 /* endpoint is on the idle list */ 132 133 static krwlock_t endpnt_type_lock; /* protects endpnt_type_list */ 134 static endpnt_type_t *endpnt_type_list = NULL; /* list of CLTS endpoints */ 135 static struct kmem_cache *endpnt_cache; /* cache of endpnt_t's */ 136 static taskq_t *endpnt_taskq; /* endpnt_t reaper thread */ 137 static bool_t taskq_created; /* flag for endpnt_taskq */ 138 static kmutex_t endpnt_taskq_lock; /* taskq lock */ 139 static zone_key_t endpnt_destructor_key; 140 141 #define DEFAULT_ENDPOINT_REAP_INTERVAL 60 /* 1 minute */ 142 #define DEFAULT_INTERVAL_SHIFT 30 /* 30 seconds */ 143 144 /* 145 * Endpoint tunables 146 */ 147 static int clnt_clts_max_endpoints = -1; 148 static int clnt_clts_hash_size = DEFAULT_HASH_SIZE; 149 static time_t clnt_clts_endpoint_reap_interval = -1; 150 static clock_t clnt_clts_taskq_dispatch_interval; 151 152 /* 153 * Response completion hash queue 154 */ 155 static call_table_t *clts_call_ht; 156 157 /* 158 * Routines for the endpoint manager 159 */ 160 static struct endpnt_type *endpnt_type_create(struct knetconfig *); 161 static void endpnt_type_free(struct endpnt_type *); 162 static int check_endpnt(struct endpnt *, struct endpnt **); 163 static struct endpnt *endpnt_get(struct knetconfig *, int); 164 static void endpnt_rele(struct endpnt *); 165 static void endpnt_reap_settimer(endpnt_type_t *); 166 static void endpnt_reap(endpnt_type_t *); 167 static void endpnt_reap_dispatch(void *); 168 static void endpnt_reclaim(zoneid_t); 169 170 171 /* 172 * Request dipatching function. 173 */ 174 static int clnt_clts_dispatch_send(queue_t *q, mblk_t *, struct netbuf *addr, 175 calllist_t *, uint_t); 176 177 /* 178 * The size of the preserialized RPC header information. 179 */ 180 #define CKU_HDRSIZE 20 181 /* 182 * The initial allocation size. It is small to reduce space requirements. 183 */ 184 #define CKU_INITSIZE 2048 185 /* 186 * The size of additional allocations, if required. It is larger to 187 * reduce the number of actual allocations. 188 */ 189 #define CKU_ALLOCSIZE 8192 190 191 /* 192 * Private data per rpc handle. This structure is allocated by 193 * clnt_clts_kcreate, and freed by clnt_clts_kdestroy. 194 */ 195 struct cku_private { 196 CLIENT cku_client; /* client handle */ 197 int cku_retrys; /* request retrys */ 198 calllist_t cku_call; 199 struct endpnt *cku_endpnt; /* open end point */ 200 struct knetconfig cku_config; 201 struct netbuf cku_addr; /* remote address */ 202 struct rpc_err cku_err; /* error status */ 203 XDR cku_outxdr; /* xdr stream for output */ 204 XDR cku_inxdr; /* xdr stream for input */ 205 char cku_rpchdr[CKU_HDRSIZE + 4]; /* rpc header */ 206 struct cred *cku_cred; /* credentials */ 207 struct rpc_timers *cku_timers; /* for estimating RTT */ 208 struct rpc_timers *cku_timeall; /* for estimating RTT */ 209 void (*cku_feedback)(int, int, caddr_t); 210 /* ptr to feedback rtn */ 211 caddr_t cku_feedarg; /* argument for feedback func */ 212 uint32_t cku_xid; /* current XID */ 213 bool_t cku_bcast; /* RPC broadcast hint */ 214 int cku_useresvport; /* Use reserved port */ 215 struct rpc_clts_client *cku_stats; /* counters for the zone */ 216 }; 217 218 static const struct rpc_clts_client { 219 kstat_named_t rccalls; 220 kstat_named_t rcbadcalls; 221 kstat_named_t rcretrans; 222 kstat_named_t rcbadxids; 223 kstat_named_t rctimeouts; 224 kstat_named_t rcnewcreds; 225 kstat_named_t rcbadverfs; 226 kstat_named_t rctimers; 227 kstat_named_t rcnomem; 228 kstat_named_t rccantsend; 229 } clts_rcstat_tmpl = { 230 { "calls", KSTAT_DATA_UINT64 }, 231 { "badcalls", KSTAT_DATA_UINT64 }, 232 { "retrans", KSTAT_DATA_UINT64 }, 233 { "badxids", KSTAT_DATA_UINT64 }, 234 { "timeouts", KSTAT_DATA_UINT64 }, 235 { "newcreds", KSTAT_DATA_UINT64 }, 236 { "badverfs", KSTAT_DATA_UINT64 }, 237 { "timers", KSTAT_DATA_UINT64 }, 238 { "nomem", KSTAT_DATA_UINT64 }, 239 { "cantsend", KSTAT_DATA_UINT64 }, 240 }; 241 242 static uint_t clts_rcstat_ndata = 243 sizeof (clts_rcstat_tmpl) / sizeof (kstat_named_t); 244 245 #define RCSTAT_INCR(s, x) \ 246 atomic_add_64(&(s)->x.value.ui64, 1) 247 248 #define ptoh(p) (&((p)->cku_client)) 249 #define htop(h) ((struct cku_private *)((h)->cl_private)) 250 251 /* 252 * Times to retry 253 */ 254 #define SNDTRIES 4 255 #define REFRESHES 2 /* authentication refreshes */ 256 257 /* 258 * The following is used to determine the global default behavior for 259 * CLTS when binding to a local port. 260 * 261 * If the value is set to 1 the default will be to select a reserved 262 * (aka privileged) port, if the value is zero the default will be to 263 * use non-reserved ports. Users of kRPC may override this by using 264 * CLNT_CONTROL() and CLSET_BINDRESVPORT. 265 */ 266 static int clnt_clts_do_bindresvport = 1; 267 268 #define BINDRESVPORT_RETRIES 5 269 270 void 271 clnt_clts_stats_init(zoneid_t zoneid, struct rpc_clts_client **statsp) 272 { 273 kstat_t *ksp; 274 kstat_named_t *knp; 275 276 knp = rpcstat_zone_init_common(zoneid, "unix", "rpc_clts_client", 277 (const kstat_named_t *)&clts_rcstat_tmpl, 278 sizeof (clts_rcstat_tmpl)); 279 /* 280 * Backwards compatibility for old kstat clients 281 */ 282 ksp = kstat_create_zone("unix", 0, "rpc_client", "rpc", 283 KSTAT_TYPE_NAMED, clts_rcstat_ndata, 284 KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE, zoneid); 285 if (ksp) { 286 ksp->ks_data = knp; 287 kstat_install(ksp); 288 } 289 *statsp = (struct rpc_clts_client *)knp; 290 } 291 292 void 293 clnt_clts_stats_fini(zoneid_t zoneid, struct rpc_clts_client **statsp) 294 { 295 rpcstat_zone_fini_common(zoneid, "unix", "rpc_clts_client"); 296 kstat_delete_byname_zone("unix", 0, "rpc_client", zoneid); 297 kmem_free(*statsp, sizeof (clts_rcstat_tmpl)); 298 } 299 300 /* 301 * Create an rpc handle for a clts rpc connection. 302 * Allocates space for the handle structure and the private data. 303 */ 304 /* ARGSUSED */ 305 int 306 clnt_clts_kcreate(struct knetconfig *config, struct netbuf *addr, 307 rpcprog_t pgm, rpcvers_t vers, int retrys, struct cred *cred, 308 CLIENT **cl) 309 { 310 CLIENT *h; 311 struct cku_private *p; 312 struct rpc_msg call_msg; 313 int error; 314 int plen; 315 316 if (cl == NULL) 317 return (EINVAL); 318 319 *cl = NULL; 320 error = 0; 321 322 p = kmem_zalloc(sizeof (*p), KM_SLEEP); 323 324 h = ptoh(p); 325 326 /* handle */ 327 h->cl_ops = &clts_ops; 328 h->cl_private = (caddr_t)p; 329 h->cl_auth = authkern_create(); 330 331 /* call message, just used to pre-serialize below */ 332 call_msg.rm_xid = 0; 333 call_msg.rm_direction = CALL; 334 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 335 call_msg.rm_call.cb_prog = pgm; 336 call_msg.rm_call.cb_vers = vers; 337 338 /* private */ 339 clnt_clts_kinit(h, addr, retrys, cred); 340 341 xdrmem_create(&p->cku_outxdr, p->cku_rpchdr, CKU_HDRSIZE, XDR_ENCODE); 342 343 /* pre-serialize call message header */ 344 if (!xdr_callhdr(&p->cku_outxdr, &call_msg)) { 345 error = EINVAL; /* XXX */ 346 goto bad; 347 } 348 349 p->cku_config.knc_rdev = config->knc_rdev; 350 p->cku_config.knc_semantics = config->knc_semantics; 351 plen = strlen(config->knc_protofmly) + 1; 352 p->cku_config.knc_protofmly = kmem_alloc(plen, KM_SLEEP); 353 bcopy(config->knc_protofmly, p->cku_config.knc_protofmly, plen); 354 p->cku_useresvport = -1; /* value is has not been set */ 355 356 cv_init(&p->cku_call.call_cv, NULL, CV_DEFAULT, NULL); 357 mutex_init(&p->cku_call.call_lock, NULL, MUTEX_DEFAULT, NULL); 358 359 *cl = h; 360 return (0); 361 362 bad: 363 auth_destroy(h->cl_auth); 364 kmem_free(p->cku_addr.buf, addr->maxlen); 365 kmem_free(p, sizeof (struct cku_private)); 366 367 return (error); 368 } 369 370 void 371 clnt_clts_kinit(CLIENT *h, struct netbuf *addr, int retrys, cred_t *cred) 372 { 373 /* LINTED pointer alignment */ 374 struct cku_private *p = htop(h); 375 struct rpcstat *rsp; 376 377 rsp = zone_getspecific(rpcstat_zone_key, curproc->p_zone); 378 ASSERT(rsp != NULL); 379 380 p->cku_retrys = retrys; 381 382 if (p->cku_addr.maxlen < addr->len) { 383 if (p->cku_addr.maxlen != 0 && p->cku_addr.buf != NULL) 384 kmem_free(p->cku_addr.buf, p->cku_addr.maxlen); 385 386 p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP); 387 p->cku_addr.maxlen = addr->maxlen; 388 } 389 390 p->cku_addr.len = addr->len; 391 bcopy(addr->buf, p->cku_addr.buf, addr->len); 392 393 p->cku_cred = cred; 394 p->cku_xid = 0; 395 p->cku_timers = NULL; 396 p->cku_timeall = NULL; 397 p->cku_feedback = NULL; 398 p->cku_bcast = FALSE; 399 p->cku_call.call_xid = 0; 400 p->cku_call.call_hash = 0; 401 p->cku_call.call_notified = FALSE; 402 p->cku_call.call_next = NULL; 403 p->cku_call.call_prev = NULL; 404 p->cku_call.call_reply = NULL; 405 p->cku_call.call_wq = NULL; 406 p->cku_stats = rsp->rpc_clts_client; 407 } 408 409 /* 410 * set the timers. Return current retransmission timeout. 411 */ 412 static int 413 clnt_clts_ksettimers(CLIENT *h, struct rpc_timers *t, struct rpc_timers *all, 414 int minimum, void (*feedback)(int, int, caddr_t), caddr_t arg, 415 uint32_t xid) 416 { 417 /* LINTED pointer alignment */ 418 struct cku_private *p = htop(h); 419 int value; 420 421 p->cku_feedback = feedback; 422 p->cku_feedarg = arg; 423 p->cku_timers = t; 424 p->cku_timeall = all; 425 if (xid) 426 p->cku_xid = xid; 427 value = all->rt_rtxcur; 428 value += t->rt_rtxcur; 429 if (value < minimum) 430 return (minimum); 431 RCSTAT_INCR(p->cku_stats, rctimers); 432 return (value); 433 } 434 435 /* 436 * Time out back off function. tim is in HZ 437 */ 438 #define MAXTIMO (20 * hz) 439 #define backoff(tim) (((tim) < MAXTIMO) ? dobackoff(tim) : (tim)) 440 #define dobackoff(tim) ((((tim) << 1) > MAXTIMO) ? MAXTIMO : ((tim) << 1)) 441 442 #define RETRY_POLL_TIMO 30 443 444 /* 445 * Call remote procedure. 446 * Most of the work of rpc is done here. We serialize what is left 447 * of the header (some was pre-serialized in the handle), serialize 448 * the arguments, and send it off. We wait for a reply or a time out. 449 * Timeout causes an immediate return, other packet problems may cause 450 * a retry on the receive. When a good packet is received we deserialize 451 * it, and check verification. A bad reply code will cause one retry 452 * with full (longhand) credentials. 453 */ 454 enum clnt_stat 455 clnt_clts_kcallit_addr(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args, 456 caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, 457 struct timeval wait, struct netbuf *sin) 458 { 459 /* LINTED pointer alignment */ 460 struct cku_private *p = htop(h); 461 XDR *xdrs; 462 int stries = p->cku_retrys; 463 int refreshes = REFRESHES; /* number of times to refresh cred */ 464 int round_trip; /* time the RPC */ 465 int error; 466 int hdrsz; 467 mblk_t *mp; 468 mblk_t *mpdup; 469 mblk_t *resp = NULL; 470 mblk_t *tmp; 471 calllist_t *call = &p->cku_call; 472 clock_t timout = 0; 473 bool_t interrupted; 474 enum clnt_stat status; 475 struct rpc_msg reply_msg; 476 enum clnt_stat re_status; 477 endpnt_t *endpt; 478 479 RCSTAT_INCR(p->cku_stats, rccalls); 480 481 RPCLOG(2, "clnt_clts_kcallit_addr: wait.tv_sec: %ld\n", wait.tv_sec); 482 RPCLOG(2, "clnt_clts_kcallit_addr: wait.tv_usec: %ld\n", wait.tv_usec); 483 484 timout = TIMEVAL_TO_TICK(&wait); 485 486 if (p->cku_xid == 0) { 487 p->cku_xid = alloc_xid(); 488 if (p->cku_endpnt != NULL) 489 endpnt_rele(p->cku_endpnt); 490 p->cku_endpnt = NULL; 491 } 492 493 mpdup = NULL; 494 call_again: 495 496 if (mpdup == NULL) { 497 498 while ((mp = allocb(CKU_INITSIZE, BPRI_LO)) == NULL) { 499 if (strwaitbuf(CKU_INITSIZE, BPRI_LO)) { 500 p->cku_err.re_status = RPC_SYSTEMERROR; 501 p->cku_err.re_errno = ENOSR; 502 goto done; 503 } 504 } 505 506 xdrs = &p->cku_outxdr; 507 xdrmblk_init(xdrs, mp, XDR_ENCODE, CKU_ALLOCSIZE); 508 509 if (h->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 510 /* 511 * Copy in the preserialized RPC header 512 * information. 513 */ 514 bcopy(p->cku_rpchdr, mp->b_rptr, CKU_HDRSIZE); 515 516 /* 517 * transaction id is the 1st thing in the output 518 * buffer. 519 */ 520 /* LINTED pointer alignment */ 521 (*(uint32_t *)(mp->b_rptr)) = p->cku_xid; 522 523 /* Skip the preserialized stuff. */ 524 XDR_SETPOS(xdrs, CKU_HDRSIZE); 525 526 /* Serialize dynamic stuff into the output buffer. */ 527 if ((!XDR_PUTINT32(xdrs, (int32_t *)&procnum)) || 528 (!AUTH_MARSHALL(h->cl_auth, xdrs, p->cku_cred)) || 529 (!(*xdr_args)(xdrs, argsp))) { 530 freemsg(mp); 531 p->cku_err.re_status = RPC_CANTENCODEARGS; 532 p->cku_err.re_errno = EIO; 533 goto done; 534 } 535 } else { 536 uint32_t *uproc = (uint32_t *) 537 &p->cku_rpchdr[CKU_HDRSIZE]; 538 IXDR_PUT_U_INT32(uproc, procnum); 539 540 (*(uint32_t *)(&p->cku_rpchdr[0])) = p->cku_xid; 541 XDR_SETPOS(xdrs, 0); 542 543 /* Serialize the procedure number and the arguments. */ 544 if (!AUTH_WRAP(h->cl_auth, (caddr_t)p->cku_rpchdr, 545 CKU_HDRSIZE+4, xdrs, xdr_args, argsp)) { 546 freemsg(mp); 547 p->cku_err.re_status = RPC_CANTENCODEARGS; 548 p->cku_err.re_errno = EIO; 549 goto done; 550 } 551 } 552 } else 553 mp = mpdup; 554 555 mpdup = dupmsg(mp); 556 if (mpdup == NULL) { 557 freemsg(mp); 558 p->cku_err.re_status = RPC_SYSTEMERROR; 559 p->cku_err.re_errno = ENOSR; 560 goto done; 561 } 562 563 /* 564 * Grab an endpnt only if the endpoint is NULL. We could be retrying 565 * the request and in this case we want to go through the same 566 * source port, so that the duplicate request cache may detect a 567 * retry. 568 */ 569 570 if (p->cku_endpnt == NULL) 571 p->cku_endpnt = endpnt_get(&p->cku_config, p->cku_useresvport); 572 573 if (p->cku_endpnt == NULL) { 574 freemsg(mp); 575 p->cku_err.re_status = RPC_SYSTEMERROR; 576 p->cku_err.re_errno = ENOSR; 577 goto done; 578 } 579 580 round_trip = lbolt; 581 582 error = clnt_clts_dispatch_send(p->cku_endpnt->e_wq, mp, 583 &p->cku_addr, call, p->cku_xid); 584 585 if (error != 0) { 586 freemsg(mp); 587 p->cku_err.re_status = RPC_CANTSEND; 588 p->cku_err.re_errno = error; 589 RCSTAT_INCR(p->cku_stats, rccantsend); 590 goto done1; 591 } 592 593 RPCLOG(64, "clnt_clts_kcallit_addr: sent call for xid 0x%x\n", 594 p->cku_xid); 595 596 /* 597 * There are two reasons for which we go back to to tryread. 598 * 599 * a) In case the status is RPC_PROCUNAVAIL and we sent out a 600 * broadcast we should not get any invalid messages with the 601 * RPC_PROCUNAVAIL error back. Some broken RPC implementations 602 * send them and for this we have to ignore them ( as we would 603 * have never received them ) and look for another message 604 * which might contain the valid response because we don't know 605 * how many broken implementations are in the network. So we are 606 * going to loop until 607 * - we received a valid response 608 * - we have processed all invalid responses and 609 * got a time out when we try to receive again a 610 * message. 611 * 612 * b) We will jump back to tryread also in case we failed 613 * within the AUTH_VALIDATE. In this case we should move 614 * on and loop until we received a valid response or we 615 * have processed all responses with broken authentication 616 * and we got a time out when we try to receive a message. 617 */ 618 tryread: 619 mutex_enter(&call->call_lock); 620 interrupted = FALSE; 621 if (call->call_notified == FALSE) { 622 klwp_t *lwp = ttolwp(curthread); 623 clock_t cv_wait_ret = 1; /* init to > 0 */ 624 clock_t cv_timout = timout; 625 626 if (lwp != NULL) 627 lwp->lwp_nostop++; 628 629 cv_timout += lbolt; 630 631 if (h->cl_nosignal) 632 while ((cv_wait_ret = 633 cv_timedwait(&call->call_cv, 634 &call->call_lock, cv_timout)) > 0 && 635 call->call_notified == FALSE); 636 else 637 while ((cv_wait_ret = 638 cv_timedwait_sig(&call->call_cv, 639 &call->call_lock, cv_timout)) > 0 && 640 call->call_notified == FALSE); 641 642 if (cv_wait_ret == 0) 643 interrupted = TRUE; 644 645 if (lwp != NULL) 646 lwp->lwp_nostop--; 647 } 648 resp = call->call_reply; 649 call->call_reply = NULL; 650 status = call->call_status; 651 /* 652 * We have to reset the call_notified here. In case we have 653 * to do a retry ( e.g. in case we got a RPC_PROCUNAVAIL 654 * error ) we need to set this to false to ensure that 655 * we will wait for the next message. When the next message 656 * is going to arrive the function clnt_clts_dispatch_notify 657 * will set this to true again. 658 */ 659 call->call_notified = FALSE; 660 mutex_exit(&call->call_lock); 661 662 if (status == RPC_TIMEDOUT) { 663 if (interrupted) { 664 /* 665 * We got interrupted, bail out 666 */ 667 p->cku_err.re_status = RPC_INTR; 668 p->cku_err.re_errno = EINTR; 669 goto done1; 670 } else { 671 /* 672 * It's possible that our response arrived 673 * right after we timed out. Check to see 674 * if it has arrived before we remove the 675 * calllist from the dispatch queue. 676 */ 677 mutex_enter(&call->call_lock); 678 if (call->call_notified == TRUE) { 679 resp = call->call_reply; 680 call->call_reply = NULL; 681 mutex_exit(&call->call_lock); 682 RPCLOG(8, "clnt_clts_kcallit_addr: " 683 "response received for request " 684 "w/xid 0x%x after timeout\n", 685 p->cku_xid); 686 goto getresponse; 687 } 688 mutex_exit(&call->call_lock); 689 690 RPCLOG(8, "clnt_clts_kcallit_addr: " 691 "request w/xid 0x%x timedout " 692 "waiting for reply\n", p->cku_xid); 693 #if 0 /* XXX not yet */ 694 /* 695 * Timeout may be due to a dead gateway. Send 696 * an ioctl downstream advising deletion of 697 * route when we reach the half-way point to 698 * timing out. 699 */ 700 if (stries == p->cku_retrys/2) { 701 t_kadvise(p->cku_endpnt->e_tiptr, 702 (uchar_t *)p->cku_addr.buf, 703 p->cku_addr.len); 704 } 705 #endif /* not yet */ 706 p->cku_err.re_status = RPC_TIMEDOUT; 707 p->cku_err.re_errno = ETIMEDOUT; 708 RCSTAT_INCR(p->cku_stats, rctimeouts); 709 goto done1; 710 } 711 } 712 713 getresponse: 714 /* 715 * Check to see if a response arrived. If it one is 716 * present then proceed to process the reponse. Otherwise 717 * fall through to retry or retransmit the request. This 718 * is probably not the optimal thing to do, but since we 719 * are most likely dealing with a unrealiable transport it 720 * is the safe thing to so. 721 */ 722 if (resp == NULL) { 723 p->cku_err.re_status = RPC_CANTRECV; 724 p->cku_err.re_errno = EIO; 725 goto done1; 726 } 727 728 /* 729 * Prepare the message for further processing. We need to remove 730 * the datagram header and copy the source address if necessary. No 731 * need to verify the header since rpcmod took care of that. 732 */ 733 /* 734 * Copy the source address if the caller has supplied a netbuf. 735 */ 736 if (sin != NULL) { 737 union T_primitives *pptr; 738 739 pptr = (union T_primitives *)resp->b_rptr; 740 bcopy(resp->b_rptr + pptr->unitdata_ind.SRC_offset, sin->buf, 741 pptr->unitdata_ind.SRC_length); 742 sin->len = pptr->unitdata_ind.SRC_length; 743 } 744 745 /* 746 * Pop off the datagram header. 747 */ 748 hdrsz = resp->b_wptr - resp->b_rptr; 749 if ((resp->b_wptr - (resp->b_rptr + hdrsz)) == 0) { 750 tmp = resp; 751 resp = resp->b_cont; 752 tmp->b_cont = NULL; 753 freeb(tmp); 754 } else { 755 unsigned char *ud_off = resp->b_rptr; 756 resp->b_rptr += hdrsz; 757 tmp = dupb(resp); 758 if (tmp == NULL) { 759 p->cku_err.re_status = RPC_SYSTEMERROR; 760 p->cku_err.re_errno = ENOSR; 761 freemsg(resp); 762 goto done1; 763 } 764 tmp->b_cont = resp->b_cont; 765 resp->b_rptr = ud_off; 766 freeb(resp); 767 resp = tmp; 768 } 769 770 round_trip = lbolt - round_trip; 771 /* 772 * Van Jacobson timer algorithm here, only if NOT a retransmission. 773 */ 774 if (p->cku_timers != NULL && stries == p->cku_retrys) { 775 int rt; 776 777 rt = round_trip; 778 rt -= (p->cku_timers->rt_srtt >> 3); 779 p->cku_timers->rt_srtt += rt; 780 if (rt < 0) 781 rt = - rt; 782 rt -= (p->cku_timers->rt_deviate >> 2); 783 p->cku_timers->rt_deviate += rt; 784 p->cku_timers->rt_rtxcur = 785 (clock_t)((p->cku_timers->rt_srtt >> 2) + 786 p->cku_timers->rt_deviate) >> 1; 787 788 rt = round_trip; 789 rt -= (p->cku_timeall->rt_srtt >> 3); 790 p->cku_timeall->rt_srtt += rt; 791 if (rt < 0) 792 rt = - rt; 793 rt -= (p->cku_timeall->rt_deviate >> 2); 794 p->cku_timeall->rt_deviate += rt; 795 p->cku_timeall->rt_rtxcur = 796 (clock_t)((p->cku_timeall->rt_srtt >> 2) + 797 p->cku_timeall->rt_deviate) >> 1; 798 if (p->cku_feedback != NULL) { 799 (*p->cku_feedback)(FEEDBACK_OK, procnum, 800 p->cku_feedarg); 801 } 802 } 803 804 /* 805 * Process reply 806 */ 807 xdrs = &(p->cku_inxdr); 808 xdrmblk_init(xdrs, resp, XDR_DECODE, 0); 809 810 reply_msg.rm_direction = REPLY; 811 reply_msg.rm_reply.rp_stat = MSG_ACCEPTED; 812 reply_msg.acpted_rply.ar_stat = SUCCESS; 813 reply_msg.acpted_rply.ar_verf = _null_auth; 814 /* 815 * xdr_results will be done in AUTH_UNWRAP. 816 */ 817 reply_msg.acpted_rply.ar_results.where = NULL; 818 reply_msg.acpted_rply.ar_results.proc = xdr_void; 819 820 /* 821 * Decode and validate the response. 822 */ 823 if (!xdr_replymsg(xdrs, &reply_msg)) { 824 p->cku_err.re_status = RPC_CANTDECODERES; 825 p->cku_err.re_errno = EIO; 826 (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 827 goto done1; 828 } 829 830 _seterr_reply(&reply_msg, &(p->cku_err)); 831 832 re_status = p->cku_err.re_status; 833 if (re_status == RPC_SUCCESS) { 834 /* 835 * Reply is good, check auth. 836 */ 837 if (!AUTH_VALIDATE(h->cl_auth, 838 &reply_msg.acpted_rply.ar_verf)) { 839 p->cku_err.re_status = RPC_AUTHERROR; 840 p->cku_err.re_why = AUTH_INVALIDRESP; 841 RCSTAT_INCR(p->cku_stats, rcbadverfs); 842 (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 843 goto tryread; 844 } 845 if (!AUTH_UNWRAP(h->cl_auth, xdrs, xdr_results, resultsp)) { 846 p->cku_err.re_status = RPC_CANTDECODERES; 847 p->cku_err.re_errno = EIO; 848 } 849 (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 850 goto done1; 851 } 852 /* set errno in case we can't recover */ 853 if (re_status != RPC_VERSMISMATCH && 854 re_status != RPC_AUTHERROR && 855 re_status != RPC_PROGVERSMISMATCH) 856 p->cku_err.re_errno = EIO; 857 /* 858 * Determine whether or not we're doing an RPC 859 * broadcast. Some server implementations don't 860 * follow RFC 1050, section 7.4.2 in that they 861 * don't remain silent when they see a proc 862 * they don't support. Therefore we keep trying 863 * to receive on RPC_PROCUNAVAIL, hoping to get 864 * a valid response from a compliant server. 865 */ 866 if (re_status == RPC_PROCUNAVAIL && p->cku_bcast) { 867 (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 868 goto tryread; 869 } 870 if (re_status == RPC_AUTHERROR) { 871 /* 872 * Maybe our credential need to be refreshed 873 */ 874 if (refreshes > 0 && 875 AUTH_REFRESH(h->cl_auth, &reply_msg, p->cku_cred)) { 876 /* 877 * The credential is refreshed. Try the request again. 878 * Even if stries == 0, we still retry as long as 879 * refreshes > 0. This prevents a soft authentication 880 * error turning into a hard one at an upper level. 881 */ 882 refreshes--; 883 RCSTAT_INCR(p->cku_stats, rcbadcalls); 884 RCSTAT_INCR(p->cku_stats, rcnewcreds); 885 886 (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 887 freemsg(mpdup); 888 call_table_remove(call); 889 mutex_enter(&call->call_lock); 890 if (call->call_reply != NULL) { 891 freemsg(call->call_reply); 892 call->call_reply = NULL; 893 } 894 mutex_exit(&call->call_lock); 895 896 freemsg(resp); 897 mpdup = NULL; 898 goto call_again; 899 } 900 /* 901 * We have used the client handle to do an AUTH_REFRESH 902 * and the RPC status may be set to RPC_SUCCESS; 903 * Let's make sure to set it to RPC_AUTHERROR. 904 */ 905 p->cku_err.re_status = RPC_CANTDECODERES; 906 907 /* 908 * Map recoverable and unrecoverable 909 * authentication errors to appropriate errno 910 */ 911 switch (p->cku_err.re_why) { 912 case AUTH_TOOWEAK: 913 /* 914 * Could be an nfsportmon failure, set 915 * useresvport and try again. 916 */ 917 if (p->cku_useresvport != 1) { 918 p->cku_useresvport = 1; 919 (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 920 freemsg(mpdup); 921 922 call_table_remove(call); 923 mutex_enter(&call->call_lock); 924 if (call->call_reply != NULL) { 925 freemsg(call->call_reply); 926 call->call_reply = NULL; 927 } 928 mutex_exit(&call->call_lock); 929 930 freemsg(resp); 931 mpdup = NULL; 932 endpt = p->cku_endpnt; 933 if (endpt->e_tiptr != NULL) { 934 mutex_enter(&endpt->e_lock); 935 endpt->e_flags &= ~ENDPNT_BOUND; 936 (void) t_kclose(endpt->e_tiptr, 1); 937 endpt->e_tiptr = NULL; 938 mutex_exit(&endpt->e_lock); 939 940 } 941 942 p->cku_xid = alloc_xid(); 943 endpnt_rele(p->cku_endpnt); 944 p->cku_endpnt = NULL; 945 goto call_again; 946 } 947 /* FALLTHRU */ 948 case AUTH_BADCRED: 949 case AUTH_BADVERF: 950 case AUTH_INVALIDRESP: 951 case AUTH_FAILED: 952 case RPCSEC_GSS_NOCRED: 953 case RPCSEC_GSS_FAILED: 954 p->cku_err.re_errno = EACCES; 955 break; 956 case AUTH_REJECTEDCRED: 957 case AUTH_REJECTEDVERF: 958 default: 959 p->cku_err.re_errno = EIO; 960 break; 961 } 962 RPCLOG(1, "clnt_clts_kcallit : authentication failed " 963 "with RPC_AUTHERROR of type %d\n", 964 p->cku_err.re_why); 965 } 966 967 (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 968 969 done1: 970 call_table_remove(call); 971 mutex_enter(&call->call_lock); 972 if (call->call_reply != NULL) { 973 freemsg(call->call_reply); 974 call->call_reply = NULL; 975 } 976 mutex_exit(&call->call_lock); 977 RPCLOG(64, "clnt_clts_kcallit_addr: xid 0x%x taken off dispatch list", 978 p->cku_xid); 979 980 done: 981 if (resp != NULL) { 982 freemsg(resp); 983 resp = NULL; 984 } 985 986 if ((p->cku_err.re_status != RPC_SUCCESS) && 987 (p->cku_err.re_status != RPC_INTR) && 988 (p->cku_err.re_status != RPC_UDERROR) && 989 !IS_UNRECOVERABLE_RPC(p->cku_err.re_status)) { 990 if (p->cku_feedback != NULL && stries == p->cku_retrys) { 991 (*p->cku_feedback)(FEEDBACK_REXMIT1, procnum, 992 p->cku_feedarg); 993 } 994 995 timout = backoff(timout); 996 if (p->cku_timeall != (struct rpc_timers *)0) 997 p->cku_timeall->rt_rtxcur = timout; 998 999 if (p->cku_err.re_status == RPC_SYSTEMERROR || 1000 p->cku_err.re_status == RPC_CANTSEND) { 1001 /* 1002 * Errors due to lack of resources, wait a bit 1003 * and try again. 1004 */ 1005 (void) delay(hz/10); 1006 /* (void) sleep((caddr_t)&lbolt, PZERO-4); */ 1007 } 1008 if (stries-- > 0) { 1009 RCSTAT_INCR(p->cku_stats, rcretrans); 1010 goto call_again; 1011 } 1012 } 1013 1014 if (mpdup != NULL) 1015 freemsg(mpdup); 1016 1017 if (p->cku_err.re_status != RPC_SUCCESS) { 1018 RCSTAT_INCR(p->cku_stats, rcbadcalls); 1019 } 1020 1021 /* 1022 * Allow the endpoint to be held by the client handle in case this 1023 * RPC was not successful. A retry may occur at a higher level and 1024 * in this case we may want to send the request over the same 1025 * source port. 1026 */ 1027 if (p->cku_err.re_status == RPC_SUCCESS && p->cku_endpnt != NULL) { 1028 endpnt_rele(p->cku_endpnt); 1029 p->cku_endpnt = NULL; 1030 } 1031 1032 return (p->cku_err.re_status); 1033 } 1034 1035 static enum clnt_stat 1036 clnt_clts_kcallit(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args, 1037 caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, 1038 struct timeval wait) 1039 { 1040 return (clnt_clts_kcallit_addr(h, procnum, xdr_args, argsp, 1041 xdr_results, resultsp, wait, NULL)); 1042 } 1043 1044 /* 1045 * Return error info on this handle. 1046 */ 1047 static void 1048 clnt_clts_kerror(CLIENT *h, struct rpc_err *err) 1049 { 1050 /* LINTED pointer alignment */ 1051 struct cku_private *p = htop(h); 1052 1053 *err = p->cku_err; 1054 } 1055 1056 static bool_t 1057 clnt_clts_kfreeres(CLIENT *h, xdrproc_t xdr_res, caddr_t res_ptr) 1058 { 1059 /* LINTED pointer alignment */ 1060 struct cku_private *p = htop(h); 1061 XDR *xdrs; 1062 1063 xdrs = &(p->cku_outxdr); 1064 xdrs->x_op = XDR_FREE; 1065 return ((*xdr_res)(xdrs, res_ptr)); 1066 } 1067 1068 /*ARGSUSED*/ 1069 static void 1070 clnt_clts_kabort(CLIENT *h) 1071 { 1072 } 1073 1074 static bool_t 1075 clnt_clts_kcontrol(CLIENT *h, int cmd, char *arg) 1076 { 1077 /* LINTED pointer alignment */ 1078 struct cku_private *p = htop(h); 1079 1080 switch (cmd) { 1081 case CLSET_XID: 1082 p->cku_xid = *((uint32_t *)arg); 1083 return (TRUE); 1084 1085 case CLGET_XID: 1086 *((uint32_t *)arg) = p->cku_xid; 1087 return (TRUE); 1088 1089 case CLSET_BCAST: 1090 p->cku_bcast = *((uint32_t *)arg); 1091 return (TRUE); 1092 1093 case CLGET_BCAST: 1094 *((uint32_t *)arg) = p->cku_bcast; 1095 return (TRUE); 1096 case CLSET_BINDRESVPORT: 1097 if (arg == NULL) 1098 return (FALSE); 1099 1100 if (*(int *)arg != 1 && *(int *)arg != 0) 1101 return (FALSE); 1102 1103 p->cku_useresvport = *(int *)arg; 1104 1105 return (TRUE); 1106 1107 case CLGET_BINDRESVPORT: 1108 if (arg == NULL) 1109 return (FALSE); 1110 1111 *(int *)arg = p->cku_useresvport; 1112 1113 return (TRUE); 1114 1115 default: 1116 return (FALSE); 1117 } 1118 } 1119 1120 /* 1121 * Destroy rpc handle. 1122 * Frees the space used for output buffer, private data, and handle 1123 * structure, and the file pointer/TLI data on last reference. 1124 */ 1125 static void 1126 clnt_clts_kdestroy(CLIENT *h) 1127 { 1128 /* LINTED pointer alignment */ 1129 struct cku_private *p = htop(h); 1130 calllist_t *call = &p->cku_call; 1131 1132 int plen; 1133 1134 RPCLOG(8, "clnt_clts_kdestroy h: %p\n", (void *)h); 1135 RPCLOG(8, "clnt_clts_kdestroy h: xid=0x%x\n", p->cku_xid); 1136 1137 if (p->cku_endpnt != NULL) 1138 endpnt_rele(p->cku_endpnt); 1139 1140 cv_destroy(&call->call_cv); 1141 mutex_destroy(&call->call_lock); 1142 1143 plen = strlen(p->cku_config.knc_protofmly) + 1; 1144 kmem_free(p->cku_config.knc_protofmly, plen); 1145 kmem_free(p->cku_addr.buf, p->cku_addr.maxlen); 1146 kmem_free(p, sizeof (*p)); 1147 } 1148 1149 /* 1150 * The connectionless (CLTS) kRPC endpoint management subsystem. 1151 * 1152 * Because endpoints are potentially shared among threads making RPC calls, 1153 * they are managed in a pool according to type (endpnt_type_t). Each 1154 * endpnt_type_t points to a list of usable endpoints through the e_pool 1155 * field, which is of type list_t. list_t is a doubly-linked list. 1156 * The number of endpoints in the pool is stored in the e_cnt field of 1157 * endpnt_type_t and the endpoints are reference counted using the e_ref field 1158 * in the endpnt_t structure. 1159 * 1160 * As an optimization, endpoints that have no references are also linked 1161 * to an idle list via e_ilist which is also of type list_t. When a thread 1162 * calls endpnt_get() to obtain a transport endpoint, the idle list is first 1163 * consulted and if such an endpoint exists, it is removed from the idle list 1164 * and returned to the caller. 1165 * 1166 * If the idle list is empty, then a check is made to see if more endpoints 1167 * can be created. If so, we proceed and create a new endpoint which is added 1168 * to the pool and returned to the caller. If we have reached the limit and 1169 * cannot make a new endpoint then one is returned to the caller via round- 1170 * robin policy. 1171 * 1172 * When an endpoint is placed on the idle list by a thread calling 1173 * endpnt_rele(), it is timestamped and then a reaper taskq is scheduled to 1174 * be dispatched if one hasn't already been. When the timer fires, the 1175 * taskq traverses the idle list and checks to see which endpoints are 1176 * eligible to be closed. It determines this by checking if the timestamp 1177 * when the endpoint was released has exceeded the the threshold for how long 1178 * it should stay alive. 1179 * 1180 * endpnt_t structures remain persistent until the memory reclaim callback, 1181 * endpnt_reclaim(), is invoked. 1182 * 1183 * Here is an example of how the data structures would be laid out by the 1184 * subsystem: 1185 * 1186 * endpnt_type_t 1187 * 1188 * loopback inet 1189 * _______________ ______________ 1190 * | e_next |----------------------->| e_next |---->> 1191 * | e_pool |<---+ | e_pool |<----+ 1192 * | e_ilist |<---+--+ | e_ilist |<----+--+ 1193 * +->| e_pcurr |----+--+--+ +->| e_pcurr |-----+--+--+ 1194 * | | ... | | | | | | ... | | | | 1195 * | | e_itimer (90) | | | | | | e_itimer (0) | | | | 1196 * | | e_cnt (1) | | | | | | e_cnt (3) | | | | 1197 * | +---------------+ | | | | +--------------+ | | | 1198 * | | | | | | | | 1199 * | endpnt_t | | | | | | | 1200 * | ____________ | | | | ____________ | | | 1201 * | | e_node |<------+ | | | | e_node |<------+ | | 1202 * | | e_idle |<---------+ | | | e_idle | | | | 1203 * +--| e_type |<------------+ +--| e_type | | | | 1204 * | e_tiptr | | | e_tiptr | | | | 1205 * | ... | | | ... | | | | 1206 * | e_lock | | | e_lock | | | | 1207 * | ... | | | ... | | | | 1208 * | e_ref (0) | | | e_ref (2) | | | | 1209 * | e_itime | | | e_itime | | | | 1210 * +------------+ | +------------+ | | | 1211 * | | | | 1212 * | | | | 1213 * | ____________ | | | 1214 * | | e_node |<------+ | | 1215 * | | e_idle |<------+--+ | 1216 * +--| e_type | | | 1217 * | | e_tiptr | | | 1218 * | | ... | | | 1219 * | | e_lock | | | 1220 * | | ... | | | 1221 * | | e_ref (0) | | | 1222 * | | e_itime | | | 1223 * | +------------+ | | 1224 * | | | 1225 * | | | 1226 * | ____________ | | 1227 * | | e_node |<------+ | 1228 * | | e_idle | | 1229 * +--| e_type |<------------+ 1230 * | e_tiptr | 1231 * | ... | 1232 * | e_lock | 1233 * | ... | 1234 * | e_ref (1) | 1235 * | e_itime | 1236 * +------------+ 1237 * 1238 * Endpoint locking strategy: 1239 * 1240 * The following functions manipulate lists which hold the endpoint and the 1241 * endpoints themselves: 1242 * 1243 * endpnt_get()/check_endpnt()/endpnt_rele()/endpnt_reap()/do_endpnt_reclaim() 1244 * 1245 * Lock description follows: 1246 * 1247 * endpnt_type_lock: Global reader/writer lock which protects accesses to the 1248 * endpnt_type_list. 1249 * 1250 * e_plock: Lock defined in the endpnt_type_t. It is intended to 1251 * protect accesses to the pool of endopints (e_pool) for a given 1252 * endpnt_type_t. 1253 * 1254 * e_ilock: Lock defined in endpnt_type_t. It is intended to protect accesses 1255 * to the idle list (e_ilist) of available endpoints for a given 1256 * endpnt_type_t. It also protects access to the e_itimer, e_async_cv, 1257 * and e_async_count fields in endpnt_type_t. 1258 * 1259 * e_lock: Lock defined in the endpnt structure. It is intended to protect 1260 * flags, cv, and ref count. 1261 * 1262 * The order goes as follows so as not to induce deadlock. 1263 * 1264 * endpnt_type_lock -> e_plock -> e_ilock -> e_lock 1265 * 1266 * Interaction with Zones and shutting down: 1267 * 1268 * endpnt_type_ts are uniquely identified by the (e_zoneid, e_rdev, e_protofmly) 1269 * tuple, which means that a zone may not reuse another zone's idle endpoints 1270 * without first doing a t_kclose(). 1271 * 1272 * A zone's endpnt_type_ts are destroyed when a zone is shut down; e_async_cv 1273 * and e_async_count are used to keep track of the threads in endpnt_taskq 1274 * trying to reap endpnt_ts in the endpnt_type_t. 1275 */ 1276 1277 /* 1278 * Allocate and initialize an endpnt_type_t 1279 */ 1280 static struct endpnt_type * 1281 endpnt_type_create(struct knetconfig *config) 1282 { 1283 struct endpnt_type *etype; 1284 1285 /* 1286 * Allocate a new endpoint type to hang a list of 1287 * endpoints off of it. 1288 */ 1289 etype = kmem_alloc(sizeof (struct endpnt_type), KM_SLEEP); 1290 etype->e_next = NULL; 1291 etype->e_pcurr = NULL; 1292 etype->e_itimer = 0; 1293 etype->e_cnt = 0; 1294 1295 (void) strncpy(etype->e_protofmly, config->knc_protofmly, KNC_STRSIZE); 1296 mutex_init(&etype->e_plock, NULL, MUTEX_DEFAULT, NULL); 1297 mutex_init(&etype->e_ilock, NULL, MUTEX_DEFAULT, NULL); 1298 etype->e_rdev = config->knc_rdev; 1299 etype->e_zoneid = getzoneid(); 1300 etype->e_async_count = 0; 1301 cv_init(&etype->e_async_cv, NULL, CV_DEFAULT, NULL); 1302 1303 list_create(&etype->e_pool, sizeof (endpnt_t), 1304 offsetof(endpnt_t, e_node)); 1305 list_create(&etype->e_ilist, sizeof (endpnt_t), 1306 offsetof(endpnt_t, e_idle)); 1307 1308 /* 1309 * Check to see if we need to create a taskq for endpoint 1310 * reaping 1311 */ 1312 mutex_enter(&endpnt_taskq_lock); 1313 if (taskq_created == FALSE) { 1314 taskq_created = TRUE; 1315 mutex_exit(&endpnt_taskq_lock); 1316 ASSERT(endpnt_taskq == NULL); 1317 endpnt_taskq = taskq_create("clts_endpnt_taskq", 1, 1318 minclsyspri, 200, INT_MAX, 0); 1319 } else 1320 mutex_exit(&endpnt_taskq_lock); 1321 1322 return (etype); 1323 } 1324 1325 /* 1326 * Free an endpnt_type_t 1327 */ 1328 static void 1329 endpnt_type_free(struct endpnt_type *etype) 1330 { 1331 mutex_destroy(&etype->e_plock); 1332 mutex_destroy(&etype->e_ilock); 1333 list_destroy(&etype->e_pool); 1334 list_destroy(&etype->e_ilist); 1335 kmem_free(etype, sizeof (endpnt_type_t)); 1336 } 1337 1338 /* 1339 * Check the endpoint to ensure that it is suitable for use. 1340 * 1341 * Possible return values: 1342 * 1343 * return (1) - Endpoint is established, but needs to be re-opened. 1344 * return (0) && *newp == NULL - Endpoint is established, but unusable. 1345 * return (0) && *newp != NULL - Endpoint is established and usable. 1346 */ 1347 static int 1348 check_endpnt(struct endpnt *endp, struct endpnt **newp) 1349 { 1350 *newp = endp; 1351 1352 mutex_enter(&endp->e_lock); 1353 ASSERT(endp->e_ref >= 1); 1354 1355 /* 1356 * The first condition we check for is if the endpoint has been 1357 * allocated, but is unusable either because it has been closed or 1358 * has been marked stale. Only *one* thread will be allowed to 1359 * execute the then clause. This is enforced becuase the first thread 1360 * to check this condition will clear the flags, so that subsequent 1361 * thread(s) checking this endpoint will move on. 1362 */ 1363 if ((endp->e_flags & ENDPNT_ESTABLISHED) && 1364 (!(endp->e_flags & ENDPNT_BOUND) || 1365 (endp->e_flags & ENDPNT_STALE))) { 1366 /* 1367 * Clear the flags here since they will be 1368 * set again by this thread. They need to be 1369 * individually cleared because we want to maintain 1370 * the state for ENDPNT_ONIDLE. 1371 */ 1372 endp->e_flags &= ~(ENDPNT_ESTABLISHED | 1373 ENDPNT_WAITING | ENDPNT_BOUND | ENDPNT_STALE); 1374 mutex_exit(&endp->e_lock); 1375 return (1); 1376 } 1377 1378 /* 1379 * The second condition is meant for any thread that is waiting for 1380 * an endpoint to become established. It will cv_wait() until 1381 * the condition for the endpoint has been changed to ENDPNT_BOUND or 1382 * ENDPNT_STALE. 1383 */ 1384 while (!(endp->e_flags & ENDPNT_BOUND) && 1385 !(endp->e_flags & ENDPNT_STALE)) { 1386 endp->e_flags |= ENDPNT_WAITING; 1387 cv_wait(&endp->e_cv, &endp->e_lock); 1388 } 1389 1390 ASSERT(endp->e_flags & ENDPNT_ESTABLISHED); 1391 1392 /* 1393 * The last case we check for is if the endpoint has been marked stale. 1394 * If this is the case then set *newp to NULL and return, so that the 1395 * caller is notified of the error and can take appropriate action. 1396 */ 1397 if (endp->e_flags & ENDPNT_STALE) { 1398 endp->e_ref--; 1399 *newp = NULL; 1400 } 1401 mutex_exit(&endp->e_lock); 1402 return (0); 1403 } 1404 1405 #ifdef DEBUG 1406 /* 1407 * Provide a fault injection setting to test error conditions. 1408 */ 1409 static int endpnt_get_return_null = 0; 1410 #endif 1411 1412 /* 1413 * Returns a handle (struct endpnt *) to an open and bound endpoint 1414 * specified by the knetconfig passed in. Returns NULL if no valid endpoint 1415 * can be obtained. 1416 */ 1417 static struct endpnt * 1418 endpnt_get(struct knetconfig *config, int useresvport) 1419 { 1420 struct endpnt_type *n_etype = NULL; 1421 struct endpnt_type *np = NULL; 1422 struct endpnt *new = NULL; 1423 struct endpnt *endp = NULL; 1424 struct endpnt *next = NULL; 1425 TIUSER *tiptr = NULL; 1426 int rtries = BINDRESVPORT_RETRIES; 1427 int i = 0; 1428 int error; 1429 int retval; 1430 zoneid_t zoneid = getzoneid(); 1431 1432 RPCLOG(1, "endpnt_get: protofmly %s, ", config->knc_protofmly); 1433 RPCLOG(1, "rdev %ld\n", config->knc_rdev); 1434 1435 #ifdef DEBUG 1436 /* 1437 * Inject fault if desired. Pretend we have a stale endpoint 1438 * and return NULL. 1439 */ 1440 if (endpnt_get_return_null > 0) { 1441 endpnt_get_return_null--; 1442 return (NULL); 1443 } 1444 #endif 1445 rw_enter(&endpnt_type_lock, RW_READER); 1446 1447 top: 1448 for (np = endpnt_type_list; np != NULL; np = np->e_next) 1449 if ((np->e_zoneid == zoneid) && 1450 (np->e_rdev == config->knc_rdev) && 1451 (strcmp(np->e_protofmly, 1452 config->knc_protofmly) == 0)) 1453 break; 1454 1455 if (np == NULL && n_etype != NULL) { 1456 ASSERT(rw_write_held(&endpnt_type_lock)); 1457 1458 /* 1459 * Link the endpoint type onto the list 1460 */ 1461 n_etype->e_next = endpnt_type_list; 1462 endpnt_type_list = n_etype; 1463 np = n_etype; 1464 n_etype = NULL; 1465 } 1466 1467 if (np == NULL) { 1468 /* 1469 * The logic here is that we were unable to find an 1470 * endpnt_type_t that matched our criteria, so we allocate a 1471 * new one. Because kmem_alloc() needs to be called with 1472 * KM_SLEEP, we drop our locks so that we don't induce 1473 * deadlock. After allocating and initializing the 1474 * endpnt_type_t, we reaquire the lock and go back to check 1475 * if this entry needs to be added to the list. Since we do 1476 * some operations without any locking other threads may 1477 * have been looking for the same endpnt_type_t and gone 1478 * through this code path. We check for this case and allow 1479 * one thread to link its endpnt_type_t to the list and the 1480 * other threads will simply free theirs. 1481 */ 1482 rw_exit(&endpnt_type_lock); 1483 n_etype = endpnt_type_create(config); 1484 1485 /* 1486 * We need to reaquire the lock with RW_WRITER here so that 1487 * we can safely link the new endpoint type onto the list. 1488 */ 1489 rw_enter(&endpnt_type_lock, RW_WRITER); 1490 goto top; 1491 } 1492 1493 rw_exit(&endpnt_type_lock); 1494 /* 1495 * If n_etype is not NULL, then another thread was able to 1496 * insert an endpnt_type_t of this type onto the list before 1497 * we did. Go ahead and free ours. 1498 */ 1499 if (n_etype != NULL) 1500 endpnt_type_free(n_etype); 1501 1502 mutex_enter(&np->e_ilock); 1503 /* 1504 * The algorithm to hand out endpoints is to first 1505 * give out those that are idle if such endpoints 1506 * exist. Otherwise, create a new one if we haven't 1507 * reached the max threshold. Finally, we give out 1508 * endpoints in a pseudo LRU fashion (round-robin). 1509 * 1510 * Note: The idle list is merely a hint of those endpoints 1511 * that should be idle. There exists a window after the 1512 * endpoint is released and before it is linked back onto the 1513 * idle list where a thread could get a reference to it and 1514 * use it. This is okay, since the reference counts will 1515 * still be consistent. 1516 */ 1517 if ((endp = (endpnt_t *)list_head(&np->e_ilist)) != NULL) { 1518 timeout_id_t t_id = 0; 1519 1520 mutex_enter(&endp->e_lock); 1521 endp->e_ref++; 1522 endp->e_itime = 0; 1523 endp->e_flags &= ~ENDPNT_ONIDLE; 1524 mutex_exit(&endp->e_lock); 1525 1526 /* 1527 * Pop the endpoint off the idle list and hand it off 1528 */ 1529 list_remove(&np->e_ilist, endp); 1530 1531 if (np->e_itimer != 0) { 1532 t_id = np->e_itimer; 1533 np->e_itimer = 0; 1534 } 1535 mutex_exit(&np->e_ilock); 1536 /* 1537 * Reset the idle timer if it has been set 1538 */ 1539 if (t_id != (timeout_id_t)0) 1540 (void) untimeout(t_id); 1541 1542 if (check_endpnt(endp, &new) == 0) 1543 return (new); 1544 } else if (np->e_cnt >= clnt_clts_max_endpoints) { 1545 /* 1546 * There are no idle endpoints currently, so 1547 * create a new one if we have not reached the maximum or 1548 * hand one out in round-robin. 1549 */ 1550 mutex_exit(&np->e_ilock); 1551 mutex_enter(&np->e_plock); 1552 endp = np->e_pcurr; 1553 mutex_enter(&endp->e_lock); 1554 endp->e_ref++; 1555 mutex_exit(&endp->e_lock); 1556 1557 ASSERT(endp != NULL); 1558 /* 1559 * Advance the pointer to the next eligible endpoint, if 1560 * necessary. 1561 */ 1562 if (np->e_cnt > 1) { 1563 next = (endpnt_t *)list_next(&np->e_pool, np->e_pcurr); 1564 if (next == NULL) 1565 next = (endpnt_t *)list_head(&np->e_pool); 1566 np->e_pcurr = next; 1567 } 1568 1569 mutex_exit(&np->e_plock); 1570 1571 /* 1572 * We need to check to see if this endpoint is bound or 1573 * not. If it is in progress then just wait until 1574 * the set up is complete 1575 */ 1576 if (check_endpnt(endp, &new) == 0) 1577 return (new); 1578 } else { 1579 mutex_exit(&np->e_ilock); 1580 mutex_enter(&np->e_plock); 1581 1582 /* 1583 * Allocate a new endpoint to use. If we can't allocate any 1584 * more memory then use one that is already established if any 1585 * such endpoints exist. 1586 */ 1587 new = kmem_cache_alloc(endpnt_cache, KM_NOSLEEP); 1588 if (new == NULL) { 1589 RPCLOG0(1, "endpnt_get: kmem_cache_alloc failed\n"); 1590 /* 1591 * Try to recover by using an existing endpoint. 1592 */ 1593 if (np->e_cnt <= 0) { 1594 mutex_exit(&np->e_plock); 1595 return (NULL); 1596 } 1597 endp = np->e_pcurr; 1598 if ((next = list_next(&np->e_pool, np->e_pcurr)) != 1599 NULL) 1600 np->e_pcurr = next; 1601 ASSERT(endp != NULL); 1602 mutex_enter(&endp->e_lock); 1603 endp->e_ref++; 1604 mutex_exit(&endp->e_lock); 1605 mutex_exit(&np->e_plock); 1606 1607 if (check_endpnt(endp, &new) == 0) 1608 return (new); 1609 } else { 1610 /* 1611 * Partially init an endpoint structure and put 1612 * it on the list, so that other interested threads 1613 * know that one is being created 1614 */ 1615 bzero(new, sizeof (struct endpnt)); 1616 1617 cv_init(&new->e_cv, NULL, CV_DEFAULT, NULL); 1618 mutex_init(&new->e_lock, NULL, MUTEX_DEFAULT, NULL); 1619 new->e_ref = 1; 1620 new->e_type = np; 1621 1622 /* 1623 * Link the endpoint into the pool. 1624 */ 1625 list_insert_head(&np->e_pool, new); 1626 np->e_cnt++; 1627 if (np->e_pcurr == NULL) 1628 np->e_pcurr = new; 1629 mutex_exit(&np->e_plock); 1630 } 1631 } 1632 1633 /* 1634 * The transport should be opened with sufficient privs 1635 */ 1636 error = t_kopen(NULL, config->knc_rdev, FREAD|FWRITE|FNDELAY, &tiptr, 1637 kcred); 1638 if (error) { 1639 RPCLOG(1, "endpnt_get: t_kopen: %d\n", error); 1640 goto bad; 1641 } 1642 1643 new->e_tiptr = tiptr; 1644 rpc_poptimod(tiptr->fp->f_vnode); 1645 1646 /* 1647 * Allow the kernel to push the module on behalf of the user. 1648 */ 1649 error = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"rpcmod", 0, 1650 K_TO_K, kcred, &retval); 1651 if (error) { 1652 RPCLOG(1, "endpnt_get: kstr_push on rpcmod failed %d\n", error); 1653 goto bad; 1654 } 1655 1656 error = strioctl(tiptr->fp->f_vnode, RPC_CLIENT, 0, 0, K_TO_K, 1657 kcred, &retval); 1658 if (error) { 1659 RPCLOG(1, "endpnt_get: strioctl failed %d\n", error); 1660 goto bad; 1661 } 1662 1663 /* 1664 * Connectionless data flow should bypass the stream head. 1665 */ 1666 new->e_wq = tiptr->fp->f_vnode->v_stream->sd_wrq->q_next; 1667 1668 error = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"timod", 0, 1669 K_TO_K, kcred, &retval); 1670 if (error) { 1671 RPCLOG(1, "endpnt_get: kstr_push on timod failed %d\n", error); 1672 goto bad; 1673 } 1674 1675 /* 1676 * Attempt to bind the endpoint. If we fail then propogate 1677 * error back to calling subsystem, so that it can be handled 1678 * appropriately. 1679 * If the caller has not specified reserved port usage then 1680 * take the system default. 1681 */ 1682 if (useresvport == -1) 1683 useresvport = clnt_clts_do_bindresvport; 1684 1685 if (useresvport && 1686 (strcmp(config->knc_protofmly, NC_INET) == 0 || 1687 strcmp(config->knc_protofmly, NC_INET6) == 0)) { 1688 1689 while ((error = 1690 bindresvport(new->e_tiptr, NULL, NULL, FALSE)) != 0) { 1691 RPCLOG(1, 1692 "endpnt_get: bindresvport error %d\n", 1693 error); 1694 if (error != EPROTO) { 1695 if (rtries-- <= 0) 1696 goto bad; 1697 1698 delay(hz << i++); 1699 continue; 1700 } 1701 1702 (void) t_kclose(new->e_tiptr, 1); 1703 /* 1704 * reopen with all privileges 1705 */ 1706 error = t_kopen(NULL, config->knc_rdev, 1707 FREAD|FWRITE|FNDELAY, 1708 &new->e_tiptr, kcred); 1709 if (error) { 1710 RPCLOG(1, "endpnt_get: t_kopen: %d\n", error); 1711 new->e_tiptr = NULL; 1712 goto bad; 1713 } 1714 } 1715 } else if ((error = t_kbind(new->e_tiptr, NULL, NULL)) != 0) { 1716 RPCLOG(1, "endpnt_get: t_kbind failed: %d\n", error); 1717 goto bad; 1718 } 1719 1720 /* 1721 * Set the flags and notify and waiters that we have an established 1722 * endpoint. 1723 */ 1724 mutex_enter(&new->e_lock); 1725 new->e_flags |= ENDPNT_ESTABLISHED; 1726 new->e_flags |= ENDPNT_BOUND; 1727 if (new->e_flags & ENDPNT_WAITING) { 1728 cv_broadcast(&new->e_cv); 1729 new->e_flags &= ~ENDPNT_WAITING; 1730 } 1731 mutex_exit(&new->e_lock); 1732 1733 return (new); 1734 1735 bad: 1736 ASSERT(new != NULL); 1737 /* 1738 * mark this endpoint as stale and notify any threads waiting 1739 * on this endpoint that it will be going away. 1740 */ 1741 mutex_enter(&new->e_lock); 1742 if (new->e_ref > 0) { 1743 new->e_flags |= ENDPNT_ESTABLISHED; 1744 new->e_flags |= ENDPNT_STALE; 1745 if (new->e_flags & ENDPNT_WAITING) { 1746 cv_broadcast(&new->e_cv); 1747 new->e_flags &= ~ENDPNT_WAITING; 1748 } 1749 } 1750 new->e_ref--; 1751 new->e_tiptr = NULL; 1752 mutex_exit(&new->e_lock); 1753 1754 /* 1755 * If there was a transport endopoint opened, then close it. 1756 */ 1757 if (tiptr != NULL) 1758 (void) t_kclose(tiptr, 1); 1759 1760 return (NULL); 1761 } 1762 1763 /* 1764 * Release a referece to the endpoint 1765 */ 1766 static void 1767 endpnt_rele(struct endpnt *sp) 1768 { 1769 mutex_enter(&sp->e_lock); 1770 ASSERT(sp->e_ref > 0); 1771 sp->e_ref--; 1772 /* 1773 * If the ref count is zero, then start the idle timer and link 1774 * the endpoint onto the idle list. 1775 */ 1776 if (sp->e_ref == 0) { 1777 sp->e_itime = gethrestime_sec(); 1778 1779 /* 1780 * Check to see if the endpoint is already linked to the idle 1781 * list, so that we don't try to reinsert it. 1782 */ 1783 if (sp->e_flags & ENDPNT_ONIDLE) { 1784 mutex_exit(&sp->e_lock); 1785 mutex_enter(&sp->e_type->e_ilock); 1786 endpnt_reap_settimer(sp->e_type); 1787 mutex_exit(&sp->e_type->e_ilock); 1788 return; 1789 } 1790 1791 sp->e_flags |= ENDPNT_ONIDLE; 1792 mutex_exit(&sp->e_lock); 1793 mutex_enter(&sp->e_type->e_ilock); 1794 list_insert_tail(&sp->e_type->e_ilist, sp); 1795 endpnt_reap_settimer(sp->e_type); 1796 mutex_exit(&sp->e_type->e_ilock); 1797 } else 1798 mutex_exit(&sp->e_lock); 1799 } 1800 1801 static void 1802 endpnt_reap_settimer(endpnt_type_t *etp) 1803 { 1804 if (etp->e_itimer == (timeout_id_t)0) 1805 etp->e_itimer = timeout(endpnt_reap_dispatch, (void *)etp, 1806 clnt_clts_taskq_dispatch_interval); 1807 } 1808 1809 static void 1810 endpnt_reap_dispatch(void *a) 1811 { 1812 endpnt_type_t *etp = a; 1813 1814 /* 1815 * The idle timer has fired, so dispatch the taskq to close the 1816 * endpoint. 1817 */ 1818 if (taskq_dispatch(endpnt_taskq, (task_func_t *)endpnt_reap, etp, 1819 TQ_NOSLEEP) == NULL) 1820 return; 1821 mutex_enter(&etp->e_ilock); 1822 etp->e_async_count++; 1823 mutex_exit(&etp->e_ilock); 1824 } 1825 1826 /* 1827 * Traverse the idle list and close those endpoints that have reached their 1828 * timeout interval. 1829 */ 1830 static void 1831 endpnt_reap(endpnt_type_t *etp) 1832 { 1833 struct endpnt *e; 1834 struct endpnt *next_node = NULL; 1835 1836 mutex_enter(&etp->e_ilock); 1837 e = list_head(&etp->e_ilist); 1838 while (e != NULL) { 1839 next_node = list_next(&etp->e_ilist, e); 1840 1841 mutex_enter(&e->e_lock); 1842 if (e->e_ref > 0) { 1843 mutex_exit(&e->e_lock); 1844 e = next_node; 1845 continue; 1846 } 1847 1848 ASSERT(e->e_ref == 0); 1849 if (e->e_itime > 0 && 1850 (e->e_itime + clnt_clts_endpoint_reap_interval) < 1851 gethrestime_sec()) { 1852 e->e_flags &= ~ENDPNT_BOUND; 1853 (void) t_kclose(e->e_tiptr, 1); 1854 e->e_tiptr = NULL; 1855 e->e_itime = 0; 1856 } 1857 mutex_exit(&e->e_lock); 1858 e = next_node; 1859 } 1860 etp->e_itimer = 0; 1861 if (--etp->e_async_count == 0) 1862 cv_signal(&etp->e_async_cv); 1863 mutex_exit(&etp->e_ilock); 1864 } 1865 1866 static void 1867 endpnt_reclaim(zoneid_t zoneid) 1868 { 1869 struct endpnt_type *np; 1870 struct endpnt *e; 1871 struct endpnt *next_node = NULL; 1872 list_t free_list; 1873 int rcnt = 0; 1874 1875 list_create(&free_list, sizeof (endpnt_t), offsetof(endpnt_t, e_node)); 1876 1877 RPCLOG0(1, "endpnt_reclaim: reclaim callback started\n"); 1878 rw_enter(&endpnt_type_lock, RW_READER); 1879 for (np = endpnt_type_list; np != NULL; np = np->e_next) { 1880 if (zoneid != ALL_ZONES && zoneid != np->e_zoneid) 1881 continue; 1882 1883 mutex_enter(&np->e_plock); 1884 RPCLOG(1, "endpnt_reclaim: protofmly %s, ", 1885 np->e_protofmly); 1886 RPCLOG(1, "rdev %ld\n", np->e_rdev); 1887 RPCLOG(1, "endpnt_reclaim: found %d endpoint(s)\n", 1888 np->e_cnt); 1889 1890 if (np->e_cnt == 0) { 1891 mutex_exit(&np->e_plock); 1892 continue; 1893 } 1894 1895 /* 1896 * The nice thing about maintaining an idle list is that if 1897 * there are any endpoints to reclaim, they are going to be 1898 * on this list. Just go through and reap the one's that 1899 * have ref counts of zero. 1900 */ 1901 mutex_enter(&np->e_ilock); 1902 e = list_head(&np->e_ilist); 1903 while (e != NULL) { 1904 next_node = list_next(&np->e_ilist, e); 1905 mutex_enter(&e->e_lock); 1906 if (e->e_ref > 0) { 1907 mutex_exit(&e->e_lock); 1908 e = next_node; 1909 continue; 1910 } 1911 ASSERT(e->e_ref == 0); 1912 mutex_exit(&e->e_lock); 1913 1914 list_remove(&np->e_ilist, e); 1915 list_remove(&np->e_pool, e); 1916 list_insert_head(&free_list, e); 1917 1918 rcnt++; 1919 np->e_cnt--; 1920 e = next_node; 1921 } 1922 mutex_exit(&np->e_ilock); 1923 /* 1924 * Reset the current pointer to be safe 1925 */ 1926 if ((e = (struct endpnt *)list_head(&np->e_pool)) != NULL) 1927 np->e_pcurr = e; 1928 else { 1929 ASSERT(np->e_cnt == 0); 1930 np->e_pcurr = NULL; 1931 } 1932 1933 mutex_exit(&np->e_plock); 1934 } 1935 rw_exit(&endpnt_type_lock); 1936 1937 while ((e = list_head(&free_list)) != NULL) { 1938 list_remove(&free_list, e); 1939 if (e->e_tiptr != NULL) 1940 (void) t_kclose(e->e_tiptr, 1); 1941 1942 cv_destroy(&e->e_cv); 1943 mutex_destroy(&e->e_lock); 1944 kmem_cache_free(endpnt_cache, e); 1945 } 1946 list_destroy(&free_list); 1947 RPCLOG(1, "endpnt_reclaim: reclaimed %d endpoint(s)\n", rcnt); 1948 } 1949 1950 /* 1951 * Endpoint reclaim zones destructor callback routine. 1952 * 1953 * After reclaiming any cached entries, we basically go through the endpnt_type 1954 * list, canceling outstanding timeouts and free'ing data structures. 1955 */ 1956 /* ARGSUSED */ 1957 static void 1958 endpnt_destructor(zoneid_t zoneid, void *a) 1959 { 1960 struct endpnt_type **npp; 1961 struct endpnt_type *np; 1962 struct endpnt_type *free_list = NULL; 1963 timeout_id_t t_id = 0; 1964 extern void clcleanup_zone(zoneid_t); 1965 extern void clcleanup4_zone(zoneid_t); 1966 1967 /* Make sure NFS client handles are released. */ 1968 clcleanup_zone(zoneid); 1969 clcleanup4_zone(zoneid); 1970 1971 endpnt_reclaim(zoneid); 1972 /* 1973 * We don't need to be holding on to any locks across the call to 1974 * endpnt_reclaim() and the code below; we know that no-one can 1975 * be holding open connections for this zone (all processes and kernel 1976 * threads are gone), so nothing could be adding anything to the list. 1977 */ 1978 rw_enter(&endpnt_type_lock, RW_WRITER); 1979 npp = &endpnt_type_list; 1980 while ((np = *npp) != NULL) { 1981 if (np->e_zoneid != zoneid) { 1982 npp = &np->e_next; 1983 continue; 1984 } 1985 mutex_enter(&np->e_plock); 1986 mutex_enter(&np->e_ilock); 1987 if (np->e_itimer != 0) { 1988 t_id = np->e_itimer; 1989 np->e_itimer = 0; 1990 } 1991 ASSERT(np->e_cnt == 0); 1992 ASSERT(list_head(&np->e_pool) == NULL); 1993 ASSERT(list_head(&np->e_ilist) == NULL); 1994 1995 mutex_exit(&np->e_ilock); 1996 mutex_exit(&np->e_plock); 1997 1998 /* 1999 * untimeout() any outstanding timers that have not yet fired. 2000 */ 2001 if (t_id != (timeout_id_t)0) 2002 (void) untimeout(t_id); 2003 *npp = np->e_next; 2004 np->e_next = free_list; 2005 free_list = np; 2006 } 2007 rw_exit(&endpnt_type_lock); 2008 2009 while (free_list != NULL) { 2010 np = free_list; 2011 free_list = free_list->e_next; 2012 /* 2013 * Wait for threads in endpnt_taskq trying to reap endpnt_ts in 2014 * the endpnt_type_t. 2015 */ 2016 mutex_enter(&np->e_ilock); 2017 while (np->e_async_count > 0) 2018 cv_wait(&np->e_async_cv, &np->e_ilock); 2019 cv_destroy(&np->e_async_cv); 2020 mutex_destroy(&np->e_plock); 2021 mutex_destroy(&np->e_ilock); 2022 list_destroy(&np->e_pool); 2023 list_destroy(&np->e_ilist); 2024 kmem_free(np, sizeof (endpnt_type_t)); 2025 } 2026 } 2027 2028 /* 2029 * Endpoint reclaim kmem callback routine. 2030 */ 2031 /* ARGSUSED */ 2032 static void 2033 endpnt_repossess(void *a) 2034 { 2035 /* 2036 * Reclaim idle endpnt's from all zones. 2037 */ 2038 if (endpnt_taskq != NULL) 2039 (void) taskq_dispatch(endpnt_taskq, 2040 (task_func_t *)endpnt_reclaim, (void *)ALL_ZONES, 2041 TQ_NOSLEEP); 2042 } 2043 2044 /* 2045 * RPC request dispatch routine. Constructs a datagram message and wraps it 2046 * around the RPC request to pass downstream. 2047 */ 2048 static int 2049 clnt_clts_dispatch_send(queue_t *q, mblk_t *mp, struct netbuf *addr, 2050 calllist_t *cp, uint_t xid) 2051 { 2052 mblk_t *bp; 2053 int msgsz; 2054 struct T_unitdata_req *udreq; 2055 2056 /* 2057 * Set up the call record. 2058 */ 2059 cp->call_wq = q; 2060 cp->call_xid = xid; 2061 cp->call_status = RPC_TIMEDOUT; 2062 cp->call_notified = FALSE; 2063 RPCLOG(64, 2064 "clnt_clts_dispatch_send: putting xid 0x%x on " 2065 "dispatch list\n", xid); 2066 cp->call_hash = call_hash(xid, clnt_clts_hash_size); 2067 cp->call_bucket = &clts_call_ht[cp->call_hash]; 2068 call_table_enter(cp); 2069 2070 /* 2071 * Construct the datagram 2072 */ 2073 msgsz = (int)TUNITDATAREQSZ; 2074 while (!(bp = allocb(msgsz + addr->len, BPRI_LO))) { 2075 if (strwaitbuf(msgsz + addr->len, BPRI_LO)) 2076 return (ENOSR); 2077 } 2078 2079 udreq = (struct T_unitdata_req *)bp->b_wptr; 2080 udreq->PRIM_type = T_UNITDATA_REQ; 2081 udreq->DEST_length = addr->len; 2082 2083 if (addr->len) { 2084 bcopy(addr->buf, bp->b_wptr + msgsz, addr->len); 2085 udreq->DEST_offset = (t_scalar_t)msgsz; 2086 msgsz += addr->len; 2087 } else 2088 udreq->DEST_offset = 0; 2089 udreq->OPT_length = 0; 2090 udreq->OPT_offset = 0; 2091 2092 bp->b_datap->db_type = M_PROTO; 2093 bp->b_wptr += msgsz; 2094 2095 /* 2096 * Link the datagram header with the actual data 2097 */ 2098 linkb(bp, mp); 2099 2100 /* 2101 * Send downstream. 2102 */ 2103 put(cp->call_wq, bp); 2104 2105 return (0); 2106 } 2107 2108 /* 2109 * RPC response delivery routine. Deliver the response to the waiting 2110 * thread by matching the xid. 2111 */ 2112 void 2113 clnt_clts_dispatch_notify(mblk_t *mp, int resp_off, zoneid_t zoneid) 2114 { 2115 calllist_t *e = NULL; 2116 call_table_t *chtp; 2117 uint32_t xid; 2118 uint_t hash; 2119 unsigned char *hdr_offset; 2120 mblk_t *resp; 2121 2122 /* 2123 * If the RPC response is not contained in the same mblk as the 2124 * datagram header, then move to the next mblk. 2125 */ 2126 hdr_offset = mp->b_rptr; 2127 resp = mp; 2128 if ((mp->b_wptr - (mp->b_rptr + resp_off)) == 0) 2129 resp = mp->b_cont; 2130 else 2131 resp->b_rptr += resp_off; 2132 2133 ASSERT(resp != NULL); 2134 2135 if ((IS_P2ALIGNED(resp->b_rptr, sizeof (uint32_t))) && 2136 (resp->b_wptr - resp->b_rptr) >= sizeof (xid)) 2137 xid = *((uint32_t *)resp->b_rptr); 2138 else { 2139 int i = 0; 2140 unsigned char *p = (unsigned char *)&xid; 2141 unsigned char *rptr; 2142 mblk_t *tmp = resp; 2143 2144 /* 2145 * Copy the xid, byte-by-byte into xid. 2146 */ 2147 while (tmp) { 2148 rptr = tmp->b_rptr; 2149 while (rptr < tmp->b_wptr) { 2150 *p++ = *rptr++; 2151 if (++i >= sizeof (xid)) 2152 goto done_xid_copy; 2153 } 2154 tmp = tmp->b_cont; 2155 } 2156 2157 /* 2158 * If we got here, we ran out of mblk space before the 2159 * xid could be copied. 2160 */ 2161 ASSERT(tmp == NULL && i < sizeof (xid)); 2162 2163 RPCLOG0(1, 2164 "clnt_dispatch_notify(clts): message less than " 2165 "size of xid\n"); 2166 2167 freemsg(mp); 2168 return; 2169 } 2170 2171 done_xid_copy: 2172 2173 /* 2174 * Reset the read pointer back to the beginning of the protocol 2175 * header if we moved it. 2176 */ 2177 if (mp->b_rptr != hdr_offset) 2178 mp->b_rptr = hdr_offset; 2179 2180 hash = call_hash(xid, clnt_clts_hash_size); 2181 chtp = &clts_call_ht[hash]; 2182 /* call_table_find returns with the hash bucket locked */ 2183 call_table_find(chtp, xid, e); 2184 2185 if (e != NULL) { 2186 mutex_enter(&e->call_lock); 2187 /* 2188 * found thread waiting for this reply. 2189 */ 2190 if (e->call_reply) { 2191 RPCLOG(8, 2192 "clnt_dispatch_notify (clts): discarding old " 2193 "reply for xid 0x%x\n", 2194 xid); 2195 freemsg(e->call_reply); 2196 } 2197 e->call_notified = TRUE; 2198 e->call_reply = mp; 2199 e->call_status = RPC_SUCCESS; 2200 cv_signal(&e->call_cv); 2201 mutex_exit(&e->call_lock); 2202 mutex_exit(&chtp->ct_lock); 2203 } else { 2204 zone_t *zone; 2205 struct rpcstat *rpcstat; 2206 2207 mutex_exit(&chtp->ct_lock); 2208 RPCLOG(8, "clnt_dispatch_notify (clts): no caller for reply " 2209 "0x%x\n", xid); 2210 freemsg(mp); 2211 /* 2212 * This is unfortunate, but we need to lookup the zone so we 2213 * can increment its "rcbadxids" counter. 2214 */ 2215 zone = zone_find_by_id(zoneid); 2216 if (zone == NULL) { 2217 /* 2218 * The zone went away... 2219 */ 2220 return; 2221 } 2222 rpcstat = zone_getspecific(rpcstat_zone_key, zone); 2223 if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) { 2224 /* 2225 * Not interested 2226 */ 2227 zone_rele(zone); 2228 return; 2229 } 2230 RCSTAT_INCR(rpcstat->rpc_clts_client, rcbadxids); 2231 zone_rele(zone); 2232 } 2233 } 2234 2235 /* 2236 * Init routine. Called when rpcmod is loaded. 2237 */ 2238 void 2239 clnt_clts_init(void) 2240 { 2241 endpnt_cache = kmem_cache_create("clnt_clts_endpnt_cache", 2242 sizeof (struct endpnt), 0, NULL, NULL, endpnt_repossess, NULL, 2243 NULL, 0); 2244 2245 rw_init(&endpnt_type_lock, NULL, RW_DEFAULT, NULL); 2246 2247 /* 2248 * Perform simple bounds checking to make sure that the setting is 2249 * reasonable 2250 */ 2251 if (clnt_clts_max_endpoints <= 0) { 2252 if (clnt_clts_do_bindresvport) 2253 clnt_clts_max_endpoints = RESERVED_PORTSPACE; 2254 else 2255 clnt_clts_max_endpoints = NONRESERVED_PORTSPACE; 2256 } 2257 2258 if (clnt_clts_do_bindresvport && 2259 clnt_clts_max_endpoints > RESERVED_PORTSPACE) 2260 clnt_clts_max_endpoints = RESERVED_PORTSPACE; 2261 else if (clnt_clts_max_endpoints > NONRESERVED_PORTSPACE) 2262 clnt_clts_max_endpoints = NONRESERVED_PORTSPACE; 2263 2264 if (clnt_clts_hash_size < DEFAULT_MIN_HASH_SIZE) 2265 clnt_clts_hash_size = DEFAULT_MIN_HASH_SIZE; 2266 2267 /* 2268 * Defer creating the taskq until rpcmod gets pushed. If we are 2269 * in diskless boot mode, rpcmod will get loaded early even before 2270 * thread_create() is available. 2271 */ 2272 endpnt_taskq = NULL; 2273 taskq_created = FALSE; 2274 mutex_init(&endpnt_taskq_lock, NULL, MUTEX_DEFAULT, NULL); 2275 2276 if (clnt_clts_endpoint_reap_interval < DEFAULT_ENDPOINT_REAP_INTERVAL) 2277 clnt_clts_endpoint_reap_interval = 2278 DEFAULT_ENDPOINT_REAP_INTERVAL; 2279 2280 /* 2281 * Dispatch the taskq at an interval which is offset from the 2282 * interval that the endpoints should be reaped. 2283 */ 2284 clnt_clts_taskq_dispatch_interval = 2285 (clnt_clts_endpoint_reap_interval + DEFAULT_INTERVAL_SHIFT) 2286 * hz; 2287 2288 /* 2289 * Initialize the completion queue 2290 */ 2291 clts_call_ht = call_table_init(clnt_clts_hash_size); 2292 /* 2293 * Initialize the zone destructor callback. 2294 */ 2295 zone_key_create(&endpnt_destructor_key, NULL, NULL, endpnt_destructor); 2296 } 2297 2298 void 2299 clnt_clts_fini(void) 2300 { 2301 (void) zone_key_delete(endpnt_destructor_key); 2302 } 2303