1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved. 29 */ 30 31 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 32 /* All Rights Reserved */ 33 34 /* 35 * Portions of this source code were derived from Berkeley 4.3 BSD 36 * under license from the Regents of the University of California. 37 */ 38 39 /* 40 * Server-side remote procedure call interface. 41 * 42 * Master transport handle (SVCMASTERXPRT). 43 * The master transport handle structure is shared among service 44 * threads processing events on the transport. Some fields in the 45 * master structure are protected by locks 46 * - xp_req_lock protects the request queue: 47 * xp_req_head, xp_req_tail 48 * - xp_thread_lock protects the thread (clone) counts 49 * xp_threads, xp_detached_threads, xp_wq 50 * Each master transport is registered to exactly one thread pool. 51 * 52 * Clone transport handle (SVCXPRT) 53 * The clone transport handle structure is a per-service-thread handle 54 * to the transport. The structure carries all the fields/buffers used 55 * for request processing. A service thread or, in other words, a clone 56 * structure, can be linked to an arbitrary master structure to process 57 * requests on this transport. The master handle keeps track of reference 58 * counts of threads (clones) linked to it. A service thread can switch 59 * to another transport by unlinking its clone handle from the current 60 * transport and linking to a new one. Switching is relatively inexpensive 61 * but it involves locking (master's xprt->xp_thread_lock). 62 * 63 * Pools. 64 * A pool represents a kernel RPC service (NFS, Lock Manager, etc.). 65 * Transports related to the service are registered to the service pool. 66 * Service threads can switch between different transports in the pool. 67 * Thus, each service has its own pool of service threads. The maximum 68 * number of threads in a pool is pool->p_maxthreads. This limit allows 69 * to restrict resource usage by the service. Some fields are protected 70 * by locks: 71 * - p_req_lock protects several counts and flags: 72 * p_reqs, p_walkers, p_asleep, p_drowsy, p_req_cv 73 * - p_thread_lock governs other thread counts: 74 * p_threads, p_detached_threads, p_reserved_threads, p_closing 75 * 76 * In addition, each pool contains a doubly-linked list of transports, 77 * an `xprt-ready' queue and a creator thread (see below). Threads in 78 * the pool share some other parameters such as stack size and 79 * polling timeout. 80 * 81 * Pools are initialized through the svc_pool_create() function called from 82 * the nfssys() system call. However, thread creation must be done by 83 * the userland agent. This is done by using SVCPOOL_WAIT and 84 * SVCPOOL_RUN arguments to nfssys(), which call svc_wait() and 85 * svc_do_run(), respectively. Once the pool has been initialized, 86 * the userland process must set up a 'creator' thread. This thread 87 * should park itself in the kernel by calling svc_wait(). If 88 * svc_wait() returns successfully, it should fork off a new worker 89 * thread, which then calls svc_do_run() in order to get work. When 90 * that thread is complete, svc_do_run() will return, and the user 91 * program should call thr_exit(). 92 * 93 * When we try to register a new pool and there is an old pool with 94 * the same id in the doubly linked pool list (this happens when we kill 95 * and restart nfsd or lockd), then we unlink the old pool from the list 96 * and mark its state as `closing'. After that the transports can still 97 * process requests but new transports won't be registered. When all the 98 * transports and service threads associated with the pool are gone the 99 * creator thread (see below) will clean up the pool structure and exit. 100 * 101 * svc_queuereq() and svc_run(). 102 * The kernel RPC server is interrupt driven. The svc_queuereq() interrupt 103 * routine is called to deliver an RPC request. The service threads 104 * loop in svc_run(). The interrupt function queues a request on the 105 * transport's queue and it makes sure that the request is serviced. 106 * It may either wake up one of sleeping threads, or ask for a new thread 107 * to be created, or, if the previous request is just being picked up, do 108 * nothing. In the last case the service thread that is picking up the 109 * previous request will wake up or create the next thread. After a service 110 * thread processes a request and sends a reply it returns to svc_run() 111 * and svc_run() calls svc_poll() to find new input. 112 * 113 * There is no longer an "inconsistent" but "safe" optimization in the 114 * svc_queuereq() code. This "inconsistent" state was leading to 115 * inconsistencies between the actual number of requests and the value 116 * of p_reqs (the total number of requests). Because of this, hangs were 117 * occurring in svc_poll() where p_reqs was greater than one and no 118 * requests were found on the request queues. 119 * 120 * svc_poll(). 121 * In order to avoid unnecessary locking, which causes performance 122 * problems, we always look for a pending request on the current transport. 123 * If there is none we take a hint from the pool's `xprt-ready' queue. 124 * If the queue had an overflow we switch to the `drain' mode checking 125 * each transport in the pool's transport list. Once we find a 126 * master transport handle with a pending request we latch the request 127 * lock on this transport and return to svc_run(). If the request 128 * belongs to a transport different than the one the service thread is 129 * linked to we need to unlink and link again. 130 * 131 * A service thread goes asleep when there are no pending 132 * requests on the transports registered on the pool's transports. 133 * All the pool's threads sleep on the same condition variable. 134 * If a thread has been sleeping for too long period of time 135 * (by default 5 seconds) it wakes up and exits. Also when a transport 136 * is closing sleeping threads wake up to unlink from this transport. 137 * 138 * The `xprt-ready' queue. 139 * If a service thread finds no request on a transport it is currently linked 140 * to it will find another transport with a pending request. To make 141 * this search more efficient each pool has an `xprt-ready' queue. 142 * The queue is a FIFO. When the interrupt routine queues a request it also 143 * inserts a pointer to the transport into the `xprt-ready' queue. A 144 * thread looking for a transport with a pending request can pop up a 145 * transport and check for a request. The request can be already gone 146 * since it could be taken by a thread linked to that transport. In such a 147 * case we try the next hint. The `xprt-ready' queue has fixed size (by 148 * default 256 nodes). If it overflows svc_poll() has to switch to the 149 * less efficient but safe `drain' mode and walk through the pool's 150 * transport list. 151 * 152 * Both the svc_poll() loop and the `xprt-ready' queue are optimized 153 * for the peak load case that is for the situation when the queue is not 154 * empty, there are all the time few pending requests, and a service 155 * thread which has just processed a request does not go asleep but picks 156 * up immediately the next request. 157 * 158 * Thread creator. 159 * Each pool has a thread creator associated with it. The creator thread 160 * sleeps on a condition variable and waits for a signal to create a 161 * service thread. The actual thread creation is done in userland by 162 * the method described in "Pools" above. 163 * 164 * Signaling threads should turn on the `creator signaled' flag, and 165 * can avoid sending signals when the flag is on. The flag is cleared 166 * when the thread is created. 167 * 168 * When the pool is in closing state (ie it has been already unregistered 169 * from the pool list) the last thread on the last transport in the pool 170 * should turn the p_creator_exit flag on. The creator thread will 171 * clean up the pool structure and exit. 172 * 173 * Thread reservation; Detaching service threads. 174 * A service thread can detach itself to block for an extended amount 175 * of time. However, to keep the service active we need to guarantee 176 * at least pool->p_redline non-detached threads that can process incoming 177 * requests. This, the maximum number of detached and reserved threads is 178 * p->p_maxthreads - p->p_redline. A service thread should first acquire 179 * a reservation, and if the reservation was granted it can detach itself. 180 * If a reservation was granted but the thread does not detach itself 181 * it should cancel the reservation before it returns to svc_run(). 182 */ 183 184 #include <sys/param.h> 185 #include <sys/types.h> 186 #include <rpc/types.h> 187 #include <sys/socket.h> 188 #include <sys/time.h> 189 #include <sys/tiuser.h> 190 #include <sys/t_kuser.h> 191 #include <netinet/in.h> 192 #include <rpc/xdr.h> 193 #include <rpc/auth.h> 194 #include <rpc/clnt.h> 195 #include <rpc/rpc_msg.h> 196 #include <rpc/svc.h> 197 #include <sys/proc.h> 198 #include <sys/user.h> 199 #include <sys/stream.h> 200 #include <sys/strsubr.h> 201 #include <sys/tihdr.h> 202 #include <sys/debug.h> 203 #include <sys/cmn_err.h> 204 #include <sys/file.h> 205 #include <sys/systm.h> 206 #include <sys/callb.h> 207 #include <sys/vtrace.h> 208 #include <sys/zone.h> 209 #include <nfs/nfs.h> 210 #include <sys/tsol/label_macro.h> 211 212 #define RQCRED_SIZE 400 /* this size is excessive */ 213 214 /* 215 * Defines for svc_poll() 216 */ 217 #define SVC_EXPRTGONE ((SVCMASTERXPRT *)1) /* Transport is closing */ 218 #define SVC_ETIMEDOUT ((SVCMASTERXPRT *)2) /* Timeout */ 219 #define SVC_EINTR ((SVCMASTERXPRT *)3) /* Interrupted by signal */ 220 221 /* 222 * Default stack size for service threads. 223 */ 224 #define DEFAULT_SVC_RUN_STKSIZE (0) /* default kernel stack */ 225 226 int svc_default_stksize = DEFAULT_SVC_RUN_STKSIZE; 227 228 /* 229 * Default polling timeout for service threads. 230 * Multiplied by hz when used. 231 */ 232 #define DEFAULT_SVC_POLL_TIMEOUT (5) /* seconds */ 233 234 clock_t svc_default_timeout = DEFAULT_SVC_POLL_TIMEOUT; 235 236 /* 237 * Size of the `xprt-ready' queue. 238 */ 239 #define DEFAULT_SVC_QSIZE (256) /* qnodes */ 240 241 size_t svc_default_qsize = DEFAULT_SVC_QSIZE; 242 243 /* 244 * Default limit for the number of service threads. 245 */ 246 #define DEFAULT_SVC_MAXTHREADS (INT16_MAX) 247 248 int svc_default_maxthreads = DEFAULT_SVC_MAXTHREADS; 249 250 /* 251 * Maximum number of requests from the same transport (in `drain' mode). 252 */ 253 #define DEFAULT_SVC_MAX_SAME_XPRT (8) 254 255 int svc_default_max_same_xprt = DEFAULT_SVC_MAX_SAME_XPRT; 256 257 258 /* 259 * Default `Redline' of non-detached threads. 260 * Total number of detached and reserved threads in an RPC server 261 * thread pool is limited to pool->p_maxthreads - svc_redline. 262 */ 263 #define DEFAULT_SVC_REDLINE (1) 264 265 int svc_default_redline = DEFAULT_SVC_REDLINE; 266 267 /* 268 * A node for the `xprt-ready' queue. 269 * See below. 270 */ 271 struct __svcxprt_qnode { 272 __SVCXPRT_QNODE *q_next; 273 SVCMASTERXPRT *q_xprt; 274 }; 275 276 /* 277 * Global SVC variables (private). 278 */ 279 struct svc_globals { 280 SVCPOOL *svc_pools; 281 kmutex_t svc_plock; 282 }; 283 284 /* 285 * Debug variable to check for rdma based 286 * transport startup and cleanup. Contorlled 287 * through /etc/system. Off by default. 288 */ 289 int rdma_check = 0; 290 291 /* 292 * Authentication parameters list. 293 */ 294 static caddr_t rqcred_head; 295 static kmutex_t rqcred_lock; 296 297 /* 298 * Pointers to transport specific `rele' routines in rpcmod (set from rpcmod). 299 */ 300 void (*rpc_rele)(queue_t *, mblk_t *) = NULL; 301 void (*mir_rele)(queue_t *, mblk_t *) = NULL; 302 303 /* ARGSUSED */ 304 void 305 rpc_rdma_rele(queue_t *q, mblk_t *mp) 306 { 307 } 308 void (*rdma_rele)(queue_t *, mblk_t *) = rpc_rdma_rele; 309 310 311 /* 312 * This macro picks which `rele' routine to use, based on the transport type. 313 */ 314 #define RELE_PROC(xprt) \ 315 ((xprt)->xp_type == T_RDMA ? rdma_rele : \ 316 (((xprt)->xp_type == T_CLTS) ? rpc_rele : mir_rele)) 317 318 /* 319 * If true, then keep quiet about version mismatch. 320 * This macro is for broadcast RPC only. We have no broadcast RPC in 321 * kernel now but one may define a flag in the transport structure 322 * and redefine this macro. 323 */ 324 #define version_keepquiet(xprt) (FALSE) 325 326 /* 327 * ZSD key used to retrieve zone-specific svc globals 328 */ 329 static zone_key_t svc_zone_key; 330 331 static void svc_callout_free(SVCMASTERXPRT *); 332 static void svc_xprt_qinit(SVCPOOL *, size_t); 333 static void svc_xprt_qdestroy(SVCPOOL *); 334 static void svc_thread_creator(SVCPOOL *); 335 static void svc_creator_signal(SVCPOOL *); 336 static void svc_creator_signalexit(SVCPOOL *); 337 static void svc_pool_unregister(struct svc_globals *, SVCPOOL *); 338 static int svc_run(SVCPOOL *); 339 340 /* ARGSUSED */ 341 static void * 342 svc_zoneinit(zoneid_t zoneid) 343 { 344 struct svc_globals *svc; 345 346 svc = kmem_alloc(sizeof (*svc), KM_SLEEP); 347 mutex_init(&svc->svc_plock, NULL, MUTEX_DEFAULT, NULL); 348 svc->svc_pools = NULL; 349 return (svc); 350 } 351 352 /* ARGSUSED */ 353 static void 354 svc_zoneshutdown(zoneid_t zoneid, void *arg) 355 { 356 struct svc_globals *svc = arg; 357 SVCPOOL *pool; 358 359 mutex_enter(&svc->svc_plock); 360 while ((pool = svc->svc_pools) != NULL) { 361 svc_pool_unregister(svc, pool); 362 } 363 mutex_exit(&svc->svc_plock); 364 } 365 366 /* ARGSUSED */ 367 static void 368 svc_zonefini(zoneid_t zoneid, void *arg) 369 { 370 struct svc_globals *svc = arg; 371 372 ASSERT(svc->svc_pools == NULL); 373 mutex_destroy(&svc->svc_plock); 374 kmem_free(svc, sizeof (*svc)); 375 } 376 377 /* 378 * Global SVC init routine. 379 * Initialize global generic and transport type specific structures 380 * used by the kernel RPC server side. This routine is called only 381 * once when the module is being loaded. 382 */ 383 void 384 svc_init() 385 { 386 zone_key_create(&svc_zone_key, svc_zoneinit, svc_zoneshutdown, 387 svc_zonefini); 388 svc_cots_init(); 389 svc_clts_init(); 390 } 391 392 /* 393 * Destroy the SVCPOOL structure. 394 */ 395 static void 396 svc_pool_cleanup(SVCPOOL *pool) 397 { 398 ASSERT(pool->p_threads + pool->p_detached_threads == 0); 399 ASSERT(pool->p_lcount == 0); 400 ASSERT(pool->p_closing); 401 402 /* 403 * Call the user supplied shutdown function. This is done 404 * here so the user of the pool will be able to cleanup 405 * service related resources. 406 */ 407 if (pool->p_shutdown != NULL) 408 (pool->p_shutdown)(); 409 410 /* Destroy `xprt-ready' queue */ 411 svc_xprt_qdestroy(pool); 412 413 /* Destroy transport list */ 414 rw_destroy(&pool->p_lrwlock); 415 416 /* Destroy locks and condition variables */ 417 mutex_destroy(&pool->p_thread_lock); 418 mutex_destroy(&pool->p_req_lock); 419 cv_destroy(&pool->p_req_cv); 420 421 /* Destroy creator's locks and condition variables */ 422 mutex_destroy(&pool->p_creator_lock); 423 cv_destroy(&pool->p_creator_cv); 424 mutex_destroy(&pool->p_user_lock); 425 cv_destroy(&pool->p_user_cv); 426 427 /* Free pool structure */ 428 kmem_free(pool, sizeof (SVCPOOL)); 429 } 430 431 /* 432 * If all the transports and service threads are already gone 433 * signal the creator thread to clean up and exit. 434 */ 435 static bool_t 436 svc_pool_tryexit(SVCPOOL *pool) 437 { 438 ASSERT(MUTEX_HELD(&pool->p_thread_lock)); 439 ASSERT(pool->p_closing); 440 441 if (pool->p_threads + pool->p_detached_threads == 0) { 442 rw_enter(&pool->p_lrwlock, RW_READER); 443 if (pool->p_lcount == 0) { 444 /* 445 * Release the locks before sending a signal. 446 */ 447 rw_exit(&pool->p_lrwlock); 448 mutex_exit(&pool->p_thread_lock); 449 450 /* 451 * Notify the creator thread to clean up and exit 452 * 453 * NOTICE: No references to the pool beyond this point! 454 * The pool is being destroyed. 455 */ 456 ASSERT(!MUTEX_HELD(&pool->p_thread_lock)); 457 svc_creator_signalexit(pool); 458 459 return (TRUE); 460 } 461 rw_exit(&pool->p_lrwlock); 462 } 463 464 ASSERT(MUTEX_HELD(&pool->p_thread_lock)); 465 return (FALSE); 466 } 467 468 /* 469 * Find a pool with a given id. 470 */ 471 static SVCPOOL * 472 svc_pool_find(struct svc_globals *svc, int id) 473 { 474 SVCPOOL *pool; 475 476 ASSERT(MUTEX_HELD(&svc->svc_plock)); 477 478 /* 479 * Search the list for a pool with a matching id 480 * and register the transport handle with that pool. 481 */ 482 for (pool = svc->svc_pools; pool; pool = pool->p_next) 483 if (pool->p_id == id) 484 return (pool); 485 486 return (NULL); 487 } 488 489 /* 490 * PSARC 2003/523 Contract Private Interface 491 * svc_do_run 492 * Changes must be reviewed by Solaris File Sharing 493 * Changes must be communicated to contract-2003-523@sun.com 494 */ 495 int 496 svc_do_run(int id) 497 { 498 SVCPOOL *pool; 499 int err = 0; 500 struct svc_globals *svc; 501 502 svc = zone_getspecific(svc_zone_key, curproc->p_zone); 503 mutex_enter(&svc->svc_plock); 504 505 pool = svc_pool_find(svc, id); 506 507 mutex_exit(&svc->svc_plock); 508 509 if (pool == NULL) 510 return (ENOENT); 511 512 /* 513 * Increment counter of pool threads now 514 * that a thread has been created. 515 */ 516 mutex_enter(&pool->p_thread_lock); 517 pool->p_threads++; 518 mutex_exit(&pool->p_thread_lock); 519 520 /* Give work to the new thread. */ 521 err = svc_run(pool); 522 523 return (err); 524 } 525 526 /* 527 * Unregister a pool from the pool list. 528 * Set the closing state. If all the transports and service threads 529 * are already gone signal the creator thread to clean up and exit. 530 */ 531 static void 532 svc_pool_unregister(struct svc_globals *svc, SVCPOOL *pool) 533 { 534 SVCPOOL *next = pool->p_next; 535 SVCPOOL *prev = pool->p_prev; 536 537 ASSERT(MUTEX_HELD(&svc->svc_plock)); 538 539 /* Remove from the list */ 540 if (pool == svc->svc_pools) 541 svc->svc_pools = next; 542 if (next) 543 next->p_prev = prev; 544 if (prev) 545 prev->p_next = next; 546 pool->p_next = pool->p_prev = NULL; 547 548 /* 549 * Offline the pool. Mark the pool as closing. 550 * If there are no transports in this pool notify 551 * the creator thread to clean it up and exit. 552 */ 553 mutex_enter(&pool->p_thread_lock); 554 if (pool->p_offline != NULL) 555 (pool->p_offline)(); 556 pool->p_closing = TRUE; 557 if (svc_pool_tryexit(pool)) 558 return; 559 mutex_exit(&pool->p_thread_lock); 560 } 561 562 /* 563 * Register a pool with a given id in the global doubly linked pool list. 564 * - if there is a pool with the same id in the list then unregister it 565 * - insert the new pool into the list. 566 */ 567 static void 568 svc_pool_register(struct svc_globals *svc, SVCPOOL *pool, int id) 569 { 570 SVCPOOL *old_pool; 571 572 /* 573 * If there is a pool with the same id then remove it from 574 * the list and mark the pool as closing. 575 */ 576 mutex_enter(&svc->svc_plock); 577 578 if (old_pool = svc_pool_find(svc, id)) 579 svc_pool_unregister(svc, old_pool); 580 581 /* Insert into the doubly linked list */ 582 pool->p_id = id; 583 pool->p_next = svc->svc_pools; 584 pool->p_prev = NULL; 585 if (svc->svc_pools) 586 svc->svc_pools->p_prev = pool; 587 svc->svc_pools = pool; 588 589 mutex_exit(&svc->svc_plock); 590 } 591 592 /* 593 * Initialize a newly created pool structure 594 */ 595 static int 596 svc_pool_init(SVCPOOL *pool, uint_t maxthreads, uint_t redline, 597 uint_t qsize, uint_t timeout, uint_t stksize, uint_t max_same_xprt) 598 { 599 klwp_t *lwp = ttolwp(curthread); 600 601 ASSERT(pool); 602 603 if (maxthreads == 0) 604 maxthreads = svc_default_maxthreads; 605 if (redline == 0) 606 redline = svc_default_redline; 607 if (qsize == 0) 608 qsize = svc_default_qsize; 609 if (timeout == 0) 610 timeout = svc_default_timeout; 611 if (stksize == 0) 612 stksize = svc_default_stksize; 613 if (max_same_xprt == 0) 614 max_same_xprt = svc_default_max_same_xprt; 615 616 if (maxthreads < redline) 617 return (EINVAL); 618 619 /* Allocate and initialize the `xprt-ready' queue */ 620 svc_xprt_qinit(pool, qsize); 621 622 /* Initialize doubly-linked xprt list */ 623 rw_init(&pool->p_lrwlock, NULL, RW_DEFAULT, NULL); 624 625 /* 626 * Setting lwp_childstksz on the current lwp so that 627 * descendants of this lwp get the modified stacksize, if 628 * it is defined. It is important that either this lwp or 629 * one of its descendants do the actual servicepool thread 630 * creation to maintain the stacksize inheritance. 631 */ 632 if (lwp != NULL) 633 lwp->lwp_childstksz = stksize; 634 635 /* Initialize thread limits, locks and condition variables */ 636 pool->p_maxthreads = maxthreads; 637 pool->p_redline = redline; 638 pool->p_timeout = timeout * hz; 639 pool->p_stksize = stksize; 640 pool->p_max_same_xprt = max_same_xprt; 641 mutex_init(&pool->p_thread_lock, NULL, MUTEX_DEFAULT, NULL); 642 mutex_init(&pool->p_req_lock, NULL, MUTEX_DEFAULT, NULL); 643 cv_init(&pool->p_req_cv, NULL, CV_DEFAULT, NULL); 644 645 /* Initialize userland creator */ 646 pool->p_user_exit = FALSE; 647 pool->p_signal_create_thread = FALSE; 648 pool->p_user_waiting = FALSE; 649 mutex_init(&pool->p_user_lock, NULL, MUTEX_DEFAULT, NULL); 650 cv_init(&pool->p_user_cv, NULL, CV_DEFAULT, NULL); 651 652 /* Initialize the creator and start the creator thread */ 653 pool->p_creator_exit = FALSE; 654 mutex_init(&pool->p_creator_lock, NULL, MUTEX_DEFAULT, NULL); 655 cv_init(&pool->p_creator_cv, NULL, CV_DEFAULT, NULL); 656 657 (void) zthread_create(NULL, pool->p_stksize, svc_thread_creator, 658 pool, 0, minclsyspri); 659 660 return (0); 661 } 662 663 /* 664 * PSARC 2003/523 Contract Private Interface 665 * svc_pool_create 666 * Changes must be reviewed by Solaris File Sharing 667 * Changes must be communicated to contract-2003-523@sun.com 668 * 669 * Create an kernel RPC server-side thread/transport pool. 670 * 671 * This is public interface for creation of a server RPC thread pool 672 * for a given service provider. Transports registered with the pool's id 673 * will be served by a pool's threads. This function is called from the 674 * nfssys() system call. 675 */ 676 int 677 svc_pool_create(struct svcpool_args *args) 678 { 679 SVCPOOL *pool; 680 int error; 681 struct svc_globals *svc; 682 683 /* 684 * Caller should check credentials in a way appropriate 685 * in the context of the call. 686 */ 687 688 svc = zone_getspecific(svc_zone_key, curproc->p_zone); 689 /* Allocate a new pool */ 690 pool = kmem_zalloc(sizeof (SVCPOOL), KM_SLEEP); 691 692 /* 693 * Initialize the pool structure and create a creator thread. 694 */ 695 error = svc_pool_init(pool, args->maxthreads, args->redline, 696 args->qsize, args->timeout, args->stksize, args->max_same_xprt); 697 698 if (error) { 699 kmem_free(pool, sizeof (SVCPOOL)); 700 return (error); 701 } 702 703 /* Register the pool with the global pool list */ 704 svc_pool_register(svc, pool, args->id); 705 706 return (0); 707 } 708 709 int 710 svc_pool_control(int id, int cmd, void *arg) 711 { 712 SVCPOOL *pool; 713 struct svc_globals *svc; 714 715 svc = zone_getspecific(svc_zone_key, curproc->p_zone); 716 717 switch (cmd) { 718 case SVCPSET_SHUTDOWN_PROC: 719 /* 720 * Search the list for a pool with a matching id 721 * and register the transport handle with that pool. 722 */ 723 mutex_enter(&svc->svc_plock); 724 725 if ((pool = svc_pool_find(svc, id)) == NULL) { 726 mutex_exit(&svc->svc_plock); 727 return (ENOENT); 728 } 729 /* 730 * Grab the transport list lock before releasing the 731 * pool list lock 732 */ 733 rw_enter(&pool->p_lrwlock, RW_WRITER); 734 mutex_exit(&svc->svc_plock); 735 736 pool->p_shutdown = *((void (*)())arg); 737 738 rw_exit(&pool->p_lrwlock); 739 740 return (0); 741 case SVCPSET_UNREGISTER_PROC: 742 /* 743 * Search the list for a pool with a matching id 744 * and register the unregister callback handle with that pool. 745 */ 746 mutex_enter(&svc->svc_plock); 747 748 if ((pool = svc_pool_find(svc, id)) == NULL) { 749 mutex_exit(&svc->svc_plock); 750 return (ENOENT); 751 } 752 /* 753 * Grab the transport list lock before releasing the 754 * pool list lock 755 */ 756 rw_enter(&pool->p_lrwlock, RW_WRITER); 757 mutex_exit(&svc->svc_plock); 758 759 pool->p_offline = *((void (*)())arg); 760 761 rw_exit(&pool->p_lrwlock); 762 763 return (0); 764 default: 765 return (EINVAL); 766 } 767 } 768 769 /* 770 * Pool's transport list manipulation routines. 771 * - svc_xprt_register() 772 * - svc_xprt_unregister() 773 * 774 * svc_xprt_register() is called from svc_tli_kcreate() to 775 * insert a new master transport handle into the doubly linked 776 * list of server transport handles (one list per pool). 777 * 778 * The list is used by svc_poll(), when it operates in `drain' 779 * mode, to search for a next transport with a pending request. 780 */ 781 782 int 783 svc_xprt_register(SVCMASTERXPRT *xprt, int id) 784 { 785 SVCMASTERXPRT *prev, *next; 786 SVCPOOL *pool; 787 struct svc_globals *svc; 788 789 svc = zone_getspecific(svc_zone_key, curproc->p_zone); 790 /* 791 * Search the list for a pool with a matching id 792 * and register the transport handle with that pool. 793 */ 794 mutex_enter(&svc->svc_plock); 795 796 if ((pool = svc_pool_find(svc, id)) == NULL) { 797 mutex_exit(&svc->svc_plock); 798 return (ENOENT); 799 } 800 801 /* Grab the transport list lock before releasing the pool list lock */ 802 rw_enter(&pool->p_lrwlock, RW_WRITER); 803 mutex_exit(&svc->svc_plock); 804 805 /* Don't register new transports when the pool is in closing state */ 806 if (pool->p_closing) { 807 rw_exit(&pool->p_lrwlock); 808 return (EBUSY); 809 } 810 811 /* 812 * Initialize xp_pool to point to the pool. 813 * We don't want to go through the pool list every time. 814 */ 815 xprt->xp_pool = pool; 816 817 /* 818 * Insert a transport handle into the list. 819 * The list head points to the most recently inserted transport. 820 */ 821 if (pool->p_lhead == NULL) 822 pool->p_lhead = xprt->xp_prev = xprt->xp_next = xprt; 823 else { 824 next = pool->p_lhead; 825 prev = pool->p_lhead->xp_prev; 826 827 xprt->xp_next = next; 828 xprt->xp_prev = prev; 829 830 pool->p_lhead = prev->xp_next = next->xp_prev = xprt; 831 } 832 833 /* Increment the transports count */ 834 pool->p_lcount++; 835 836 rw_exit(&pool->p_lrwlock); 837 return (0); 838 } 839 840 /* 841 * Called from svc_xprt_cleanup() to remove a master transport handle 842 * from the pool's list of server transports (when a transport is 843 * being destroyed). 844 */ 845 void 846 svc_xprt_unregister(SVCMASTERXPRT *xprt) 847 { 848 SVCPOOL *pool = xprt->xp_pool; 849 850 /* 851 * Unlink xprt from the list. 852 * If the list head points to this xprt then move it 853 * to the next xprt or reset to NULL if this is the last 854 * xprt in the list. 855 */ 856 rw_enter(&pool->p_lrwlock, RW_WRITER); 857 858 if (xprt == xprt->xp_next) 859 pool->p_lhead = NULL; 860 else { 861 SVCMASTERXPRT *next = xprt->xp_next; 862 SVCMASTERXPRT *prev = xprt->xp_prev; 863 864 next->xp_prev = prev; 865 prev->xp_next = next; 866 867 if (pool->p_lhead == xprt) 868 pool->p_lhead = next; 869 } 870 871 xprt->xp_next = xprt->xp_prev = NULL; 872 873 /* Decrement list count */ 874 pool->p_lcount--; 875 876 rw_exit(&pool->p_lrwlock); 877 } 878 879 static void 880 svc_xprt_qdestroy(SVCPOOL *pool) 881 { 882 mutex_destroy(&pool->p_qend_lock); 883 kmem_free(pool->p_qbody, pool->p_qsize * sizeof (__SVCXPRT_QNODE)); 884 } 885 886 /* 887 * Initialize an `xprt-ready' queue for a given pool. 888 */ 889 static void 890 svc_xprt_qinit(SVCPOOL *pool, size_t qsize) 891 { 892 int i; 893 894 pool->p_qsize = qsize; 895 pool->p_qbody = kmem_zalloc(pool->p_qsize * sizeof (__SVCXPRT_QNODE), 896 KM_SLEEP); 897 898 for (i = 0; i < pool->p_qsize - 1; i++) 899 pool->p_qbody[i].q_next = &(pool->p_qbody[i+1]); 900 901 pool->p_qbody[pool->p_qsize-1].q_next = &(pool->p_qbody[0]); 902 pool->p_qtop = &(pool->p_qbody[0]); 903 pool->p_qend = &(pool->p_qbody[0]); 904 905 mutex_init(&pool->p_qend_lock, NULL, MUTEX_DEFAULT, NULL); 906 } 907 908 /* 909 * Called from the svc_queuereq() interrupt routine to queue 910 * a hint for svc_poll() which transport has a pending request. 911 * - insert a pointer to xprt into the xprt-ready queue (FIFO) 912 * - if the xprt-ready queue is full turn the overflow flag on. 913 * 914 * NOTICE: pool->p_qtop is protected by the the pool's request lock 915 * and the caller (svc_queuereq()) must hold the lock. 916 */ 917 static void 918 svc_xprt_qput(SVCPOOL *pool, SVCMASTERXPRT *xprt) 919 { 920 ASSERT(MUTEX_HELD(&pool->p_req_lock)); 921 922 /* If the overflow flag is there is nothing we can do */ 923 if (pool->p_qoverflow) 924 return; 925 926 /* If the queue is full turn the overflow flag on and exit */ 927 if (pool->p_qtop->q_next == pool->p_qend) { 928 mutex_enter(&pool->p_qend_lock); 929 if (pool->p_qtop->q_next == pool->p_qend) { 930 pool->p_qoverflow = TRUE; 931 mutex_exit(&pool->p_qend_lock); 932 return; 933 } 934 mutex_exit(&pool->p_qend_lock); 935 } 936 937 /* Insert a hint and move pool->p_qtop */ 938 pool->p_qtop->q_xprt = xprt; 939 pool->p_qtop = pool->p_qtop->q_next; 940 } 941 942 /* 943 * Called from svc_poll() to get a hint which transport has a 944 * pending request. Returns a pointer to a transport or NULL if the 945 * `xprt-ready' queue is empty. 946 * 947 * Since we do not acquire the pool's request lock while checking if 948 * the queue is empty we may miss a request that is just being delivered. 949 * However this is ok since svc_poll() will retry again until the 950 * count indicates that there are pending requests for this pool. 951 */ 952 static SVCMASTERXPRT * 953 svc_xprt_qget(SVCPOOL *pool) 954 { 955 SVCMASTERXPRT *xprt; 956 957 mutex_enter(&pool->p_qend_lock); 958 do { 959 /* 960 * If the queue is empty return NULL. 961 * Since we do not acquire the pool's request lock which 962 * protects pool->p_qtop this is not exact check. However, 963 * this is safe - if we miss a request here svc_poll() 964 * will retry again. 965 */ 966 if (pool->p_qend == pool->p_qtop) { 967 mutex_exit(&pool->p_qend_lock); 968 return (NULL); 969 } 970 971 /* Get a hint and move pool->p_qend */ 972 xprt = pool->p_qend->q_xprt; 973 pool->p_qend = pool->p_qend->q_next; 974 975 /* Skip fields deleted by svc_xprt_qdelete() */ 976 } while (xprt == NULL); 977 mutex_exit(&pool->p_qend_lock); 978 979 return (xprt); 980 } 981 982 /* 983 * Delete all the references to a transport handle that 984 * is being destroyed from the xprt-ready queue. 985 * Deleted pointers are replaced with NULLs. 986 */ 987 static void 988 svc_xprt_qdelete(SVCPOOL *pool, SVCMASTERXPRT *xprt) 989 { 990 __SVCXPRT_QNODE *q = pool->p_qend; 991 __SVCXPRT_QNODE *qtop = pool->p_qtop; 992 993 /* 994 * Delete all the references to xprt between the current 995 * position of pool->p_qend and current pool->p_qtop. 996 */ 997 for (;;) { 998 if (q->q_xprt == xprt) 999 q->q_xprt = NULL; 1000 if (q == qtop) 1001 return; 1002 q = q->q_next; 1003 } 1004 } 1005 1006 /* 1007 * Destructor for a master server transport handle. 1008 * - if there are no more non-detached threads linked to this transport 1009 * then, if requested, call xp_closeproc (we don't wait for detached 1010 * threads linked to this transport to complete). 1011 * - if there are no more threads linked to this 1012 * transport then 1013 * a) remove references to this transport from the xprt-ready queue 1014 * b) remove a reference to this transport from the pool's transport list 1015 * c) call a transport specific `destroy' function 1016 * d) cancel remaining thread reservations. 1017 * 1018 * NOTICE: Caller must hold the transport's thread lock. 1019 */ 1020 static void 1021 svc_xprt_cleanup(SVCMASTERXPRT *xprt, bool_t detached) 1022 { 1023 ASSERT(MUTEX_HELD(&xprt->xp_thread_lock)); 1024 ASSERT(xprt->xp_wq == NULL); 1025 1026 /* 1027 * If called from the last non-detached thread 1028 * it should call the closeproc on this transport. 1029 */ 1030 if (!detached && xprt->xp_threads == 0 && xprt->xp_closeproc) { 1031 (*(xprt->xp_closeproc)) (xprt); 1032 } 1033 1034 if (xprt->xp_threads + xprt->xp_detached_threads > 0) 1035 mutex_exit(&xprt->xp_thread_lock); 1036 else { 1037 /* Remove references to xprt from the `xprt-ready' queue */ 1038 svc_xprt_qdelete(xprt->xp_pool, xprt); 1039 1040 /* Unregister xprt from the pool's transport list */ 1041 svc_xprt_unregister(xprt); 1042 svc_callout_free(xprt); 1043 SVC_DESTROY(xprt); 1044 } 1045 } 1046 1047 /* 1048 * Find a dispatch routine for a given prog/vers pair. 1049 * This function is called from svc_getreq() to search the callout 1050 * table for an entry with a matching RPC program number `prog' 1051 * and a version range that covers `vers'. 1052 * - if it finds a matching entry it returns pointer to the dispatch routine 1053 * - otherwise it returns NULL and, if `minp' or `maxp' are not NULL, 1054 * fills them with, respectively, lowest version and highest version 1055 * supported for the program `prog' 1056 */ 1057 static SVC_DISPATCH * 1058 svc_callout_find(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers, 1059 rpcvers_t *vers_min, rpcvers_t *vers_max) 1060 { 1061 SVC_CALLOUT_TABLE *sct = xprt->xp_sct; 1062 int i; 1063 1064 *vers_min = ~(rpcvers_t)0; 1065 *vers_max = 0; 1066 1067 for (i = 0; i < sct->sct_size; i++) { 1068 SVC_CALLOUT *sc = &sct->sct_sc[i]; 1069 1070 if (prog == sc->sc_prog) { 1071 if (vers >= sc->sc_versmin && vers <= sc->sc_versmax) 1072 return (sc->sc_dispatch); 1073 1074 if (*vers_max < sc->sc_versmax) 1075 *vers_max = sc->sc_versmax; 1076 if (*vers_min > sc->sc_versmin) 1077 *vers_min = sc->sc_versmin; 1078 } 1079 } 1080 1081 return (NULL); 1082 } 1083 1084 /* 1085 * Optionally free callout table allocated for this transport by 1086 * the service provider. 1087 */ 1088 static void 1089 svc_callout_free(SVCMASTERXPRT *xprt) 1090 { 1091 SVC_CALLOUT_TABLE *sct = xprt->xp_sct; 1092 1093 if (sct->sct_free) { 1094 kmem_free(sct->sct_sc, sct->sct_size * sizeof (SVC_CALLOUT)); 1095 kmem_free(sct, sizeof (SVC_CALLOUT_TABLE)); 1096 } 1097 } 1098 1099 /* 1100 * Send a reply to an RPC request 1101 * 1102 * PSARC 2003/523 Contract Private Interface 1103 * svc_sendreply 1104 * Changes must be reviewed by Solaris File Sharing 1105 * Changes must be communicated to contract-2003-523@sun.com 1106 */ 1107 bool_t 1108 svc_sendreply(const SVCXPRT *clone_xprt, const xdrproc_t xdr_results, 1109 const caddr_t xdr_location) 1110 { 1111 struct rpc_msg rply; 1112 1113 rply.rm_direction = REPLY; 1114 rply.rm_reply.rp_stat = MSG_ACCEPTED; 1115 rply.acpted_rply.ar_verf = clone_xprt->xp_verf; 1116 rply.acpted_rply.ar_stat = SUCCESS; 1117 rply.acpted_rply.ar_results.where = xdr_location; 1118 rply.acpted_rply.ar_results.proc = xdr_results; 1119 1120 return (SVC_REPLY((SVCXPRT *)clone_xprt, &rply)); 1121 } 1122 1123 /* 1124 * No procedure error reply 1125 * 1126 * PSARC 2003/523 Contract Private Interface 1127 * svcerr_noproc 1128 * Changes must be reviewed by Solaris File Sharing 1129 * Changes must be communicated to contract-2003-523@sun.com 1130 */ 1131 void 1132 svcerr_noproc(const SVCXPRT *clone_xprt) 1133 { 1134 struct rpc_msg rply; 1135 1136 rply.rm_direction = REPLY; 1137 rply.rm_reply.rp_stat = MSG_ACCEPTED; 1138 rply.acpted_rply.ar_verf = clone_xprt->xp_verf; 1139 rply.acpted_rply.ar_stat = PROC_UNAVAIL; 1140 SVC_FREERES((SVCXPRT *)clone_xprt); 1141 SVC_REPLY((SVCXPRT *)clone_xprt, &rply); 1142 } 1143 1144 /* 1145 * Can't decode arguments error reply 1146 * 1147 * PSARC 2003/523 Contract Private Interface 1148 * svcerr_decode 1149 * Changes must be reviewed by Solaris File Sharing 1150 * Changes must be communicated to contract-2003-523@sun.com 1151 */ 1152 void 1153 svcerr_decode(const SVCXPRT *clone_xprt) 1154 { 1155 struct rpc_msg rply; 1156 1157 rply.rm_direction = REPLY; 1158 rply.rm_reply.rp_stat = MSG_ACCEPTED; 1159 rply.acpted_rply.ar_verf = clone_xprt->xp_verf; 1160 rply.acpted_rply.ar_stat = GARBAGE_ARGS; 1161 SVC_FREERES((SVCXPRT *)clone_xprt); 1162 SVC_REPLY((SVCXPRT *)clone_xprt, &rply); 1163 } 1164 1165 /* 1166 * Some system error 1167 */ 1168 void 1169 svcerr_systemerr(const SVCXPRT *clone_xprt) 1170 { 1171 struct rpc_msg rply; 1172 1173 rply.rm_direction = REPLY; 1174 rply.rm_reply.rp_stat = MSG_ACCEPTED; 1175 rply.acpted_rply.ar_verf = clone_xprt->xp_verf; 1176 rply.acpted_rply.ar_stat = SYSTEM_ERR; 1177 SVC_FREERES((SVCXPRT *)clone_xprt); 1178 SVC_REPLY((SVCXPRT *)clone_xprt, &rply); 1179 } 1180 1181 /* 1182 * Authentication error reply 1183 */ 1184 void 1185 svcerr_auth(const SVCXPRT *clone_xprt, const enum auth_stat why) 1186 { 1187 struct rpc_msg rply; 1188 1189 rply.rm_direction = REPLY; 1190 rply.rm_reply.rp_stat = MSG_DENIED; 1191 rply.rjcted_rply.rj_stat = AUTH_ERROR; 1192 rply.rjcted_rply.rj_why = why; 1193 SVC_FREERES((SVCXPRT *)clone_xprt); 1194 SVC_REPLY((SVCXPRT *)clone_xprt, &rply); 1195 } 1196 1197 /* 1198 * Authentication too weak error reply 1199 */ 1200 void 1201 svcerr_weakauth(const SVCXPRT *clone_xprt) 1202 { 1203 svcerr_auth((SVCXPRT *)clone_xprt, AUTH_TOOWEAK); 1204 } 1205 1206 /* 1207 * Authentication error; bad credentials 1208 */ 1209 void 1210 svcerr_badcred(const SVCXPRT *clone_xprt) 1211 { 1212 struct rpc_msg rply; 1213 1214 rply.rm_direction = REPLY; 1215 rply.rm_reply.rp_stat = MSG_DENIED; 1216 rply.rjcted_rply.rj_stat = AUTH_ERROR; 1217 rply.rjcted_rply.rj_why = AUTH_BADCRED; 1218 SVC_FREERES((SVCXPRT *)clone_xprt); 1219 SVC_REPLY((SVCXPRT *)clone_xprt, &rply); 1220 } 1221 1222 /* 1223 * Program unavailable error reply 1224 * 1225 * PSARC 2003/523 Contract Private Interface 1226 * svcerr_noprog 1227 * Changes must be reviewed by Solaris File Sharing 1228 * Changes must be communicated to contract-2003-523@sun.com 1229 */ 1230 void 1231 svcerr_noprog(const SVCXPRT *clone_xprt) 1232 { 1233 struct rpc_msg rply; 1234 1235 rply.rm_direction = REPLY; 1236 rply.rm_reply.rp_stat = MSG_ACCEPTED; 1237 rply.acpted_rply.ar_verf = clone_xprt->xp_verf; 1238 rply.acpted_rply.ar_stat = PROG_UNAVAIL; 1239 SVC_FREERES((SVCXPRT *)clone_xprt); 1240 SVC_REPLY((SVCXPRT *)clone_xprt, &rply); 1241 } 1242 1243 /* 1244 * Program version mismatch error reply 1245 * 1246 * PSARC 2003/523 Contract Private Interface 1247 * svcerr_progvers 1248 * Changes must be reviewed by Solaris File Sharing 1249 * Changes must be communicated to contract-2003-523@sun.com 1250 */ 1251 void 1252 svcerr_progvers(const SVCXPRT *clone_xprt, 1253 const rpcvers_t low_vers, const rpcvers_t high_vers) 1254 { 1255 struct rpc_msg rply; 1256 1257 rply.rm_direction = REPLY; 1258 rply.rm_reply.rp_stat = MSG_ACCEPTED; 1259 rply.acpted_rply.ar_verf = clone_xprt->xp_verf; 1260 rply.acpted_rply.ar_stat = PROG_MISMATCH; 1261 rply.acpted_rply.ar_vers.low = low_vers; 1262 rply.acpted_rply.ar_vers.high = high_vers; 1263 SVC_FREERES((SVCXPRT *)clone_xprt); 1264 SVC_REPLY((SVCXPRT *)clone_xprt, &rply); 1265 } 1266 1267 /* 1268 * Get server side input from some transport. 1269 * 1270 * Statement of authentication parameters management: 1271 * This function owns and manages all authentication parameters, specifically 1272 * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and 1273 * the "cooked" credentials (rqst->rq_clntcred). 1274 * However, this function does not know the structure of the cooked 1275 * credentials, so it make the following assumptions: 1276 * a) the structure is contiguous (no pointers), and 1277 * b) the cred structure size does not exceed RQCRED_SIZE bytes. 1278 * In all events, all three parameters are freed upon exit from this routine. 1279 * The storage is trivially managed on the call stack in user land, but 1280 * is malloced in kernel land. 1281 * 1282 * Note: the xprt's xp_svc_lock is not held while the service's dispatch 1283 * routine is running. If we decide to implement svc_unregister(), we'll 1284 * need to decide whether it's okay for a thread to unregister a service 1285 * while a request is being processed. If we decide that this is a 1286 * problem, we can probably use some sort of reference counting scheme to 1287 * keep the callout entry from going away until the request has completed. 1288 */ 1289 static void 1290 svc_getreq( 1291 SVCXPRT *clone_xprt, /* clone transport handle */ 1292 mblk_t *mp) 1293 { 1294 struct rpc_msg msg; 1295 struct svc_req r; 1296 char *cred_area; /* too big to allocate on call stack */ 1297 1298 TRACE_0(TR_FAC_KRPC, TR_SVC_GETREQ_START, 1299 "svc_getreq_start:"); 1300 1301 ASSERT(clone_xprt->xp_master != NULL); 1302 ASSERT(!is_system_labeled() || DB_CRED(mp) != NULL || 1303 mp->b_datap->db_type != M_DATA); 1304 1305 /* 1306 * Firstly, allocate the authentication parameters' storage 1307 */ 1308 mutex_enter(&rqcred_lock); 1309 if (rqcred_head) { 1310 cred_area = rqcred_head; 1311 1312 /* LINTED pointer alignment */ 1313 rqcred_head = *(caddr_t *)rqcred_head; 1314 mutex_exit(&rqcred_lock); 1315 } else { 1316 mutex_exit(&rqcred_lock); 1317 cred_area = kmem_alloc(2 * MAX_AUTH_BYTES + RQCRED_SIZE, 1318 KM_SLEEP); 1319 } 1320 msg.rm_call.cb_cred.oa_base = cred_area; 1321 msg.rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]); 1322 r.rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]); 1323 1324 /* 1325 * underlying transport recv routine may modify mblk data 1326 * and make it difficult to extract label afterwards. So 1327 * get the label from the raw mblk data now. 1328 */ 1329 if (is_system_labeled()) { 1330 mblk_t *lmp; 1331 1332 r.rq_label = kmem_alloc(sizeof (bslabel_t), KM_SLEEP); 1333 if (DB_CRED(mp) != NULL) 1334 lmp = mp; 1335 else { 1336 ASSERT(mp->b_cont != NULL); 1337 lmp = mp->b_cont; 1338 ASSERT(DB_CRED(lmp) != NULL); 1339 } 1340 bcopy(label2bslabel(crgetlabel(DB_CRED(lmp))), r.rq_label, 1341 sizeof (bslabel_t)); 1342 } else { 1343 r.rq_label = NULL; 1344 } 1345 1346 /* 1347 * Now receive a message from the transport. 1348 */ 1349 if (SVC_RECV(clone_xprt, mp, &msg)) { 1350 void (*dispatchroutine) (struct svc_req *, SVCXPRT *); 1351 rpcvers_t vers_min; 1352 rpcvers_t vers_max; 1353 bool_t no_dispatch; 1354 enum auth_stat why; 1355 1356 /* 1357 * Find the registered program and call its 1358 * dispatch routine. 1359 */ 1360 r.rq_xprt = clone_xprt; 1361 r.rq_prog = msg.rm_call.cb_prog; 1362 r.rq_vers = msg.rm_call.cb_vers; 1363 r.rq_proc = msg.rm_call.cb_proc; 1364 r.rq_cred = msg.rm_call.cb_cred; 1365 1366 /* 1367 * First authenticate the message. 1368 */ 1369 TRACE_0(TR_FAC_KRPC, TR_SVC_GETREQ_AUTH_START, 1370 "svc_getreq_auth_start:"); 1371 if ((why = sec_svc_msg(&r, &msg, &no_dispatch)) != AUTH_OK) { 1372 TRACE_1(TR_FAC_KRPC, TR_SVC_GETREQ_AUTH_END, 1373 "svc_getreq_auth_end:(%S)", "failed"); 1374 svcerr_auth(clone_xprt, why); 1375 /* 1376 * Free the arguments. 1377 */ 1378 (void) SVC_FREEARGS(clone_xprt, NULL, NULL); 1379 } else if (no_dispatch) { 1380 /* 1381 * XXX - when bug id 4053736 is done, remove 1382 * the SVC_FREEARGS() call. 1383 */ 1384 (void) SVC_FREEARGS(clone_xprt, NULL, NULL); 1385 } else { 1386 TRACE_1(TR_FAC_KRPC, TR_SVC_GETREQ_AUTH_END, 1387 "svc_getreq_auth_end:(%S)", "good"); 1388 1389 dispatchroutine = svc_callout_find(clone_xprt, 1390 r.rq_prog, r.rq_vers, &vers_min, &vers_max); 1391 1392 if (dispatchroutine) { 1393 (*dispatchroutine) (&r, clone_xprt); 1394 } else { 1395 /* 1396 * If we got here, the program or version 1397 * is not served ... 1398 */ 1399 if (vers_max == 0 || 1400 version_keepquiet(clone_xprt)) 1401 svcerr_noprog(clone_xprt); 1402 else 1403 svcerr_progvers(clone_xprt, vers_min, 1404 vers_max); 1405 1406 /* 1407 * Free the arguments. For successful calls 1408 * this is done by the dispatch routine. 1409 */ 1410 (void) SVC_FREEARGS(clone_xprt, NULL, NULL); 1411 /* Fall through to ... */ 1412 } 1413 /* 1414 * Call cleanup procedure for RPCSEC_GSS. 1415 * This is a hack since there is currently no 1416 * op, such as SVC_CLEANAUTH. rpc_gss_cleanup 1417 * should only be called for a non null proc. 1418 * Null procs in RPC GSS are overloaded to 1419 * provide context setup and control. The main 1420 * purpose of rpc_gss_cleanup is to decrement the 1421 * reference count associated with the cached 1422 * GSS security context. We should never get here 1423 * for an RPCSEC_GSS null proc since *no_dispatch 1424 * would have been set to true from sec_svc_msg above. 1425 */ 1426 if (r.rq_cred.oa_flavor == RPCSEC_GSS) 1427 rpc_gss_cleanup(clone_xprt); 1428 } 1429 } 1430 1431 if (r.rq_label != NULL) 1432 kmem_free(r.rq_label, sizeof (bslabel_t)); 1433 1434 /* 1435 * Free authentication parameters' storage 1436 */ 1437 mutex_enter(&rqcred_lock); 1438 /* LINTED pointer alignment */ 1439 *(caddr_t *)cred_area = rqcred_head; 1440 rqcred_head = cred_area; 1441 mutex_exit(&rqcred_lock); 1442 } 1443 1444 /* 1445 * Allocate new clone transport handle. 1446 */ 1447 static SVCXPRT * 1448 svc_clone_init(void) 1449 { 1450 SVCXPRT *clone_xprt; 1451 1452 clone_xprt = kmem_zalloc(sizeof (SVCXPRT), KM_SLEEP); 1453 clone_xprt->xp_cred = crget(); 1454 return (clone_xprt); 1455 } 1456 1457 /* 1458 * Free memory allocated by svc_clone_init. 1459 */ 1460 static void 1461 svc_clone_free(SVCXPRT *clone_xprt) 1462 { 1463 /* Fre credentials from crget() */ 1464 if (clone_xprt->xp_cred) 1465 crfree(clone_xprt->xp_cred); 1466 kmem_free(clone_xprt, sizeof (SVCXPRT)); 1467 } 1468 1469 /* 1470 * Link a per-thread clone transport handle to a master 1471 * - increment a thread reference count on the master 1472 * - copy some of the master's fields to the clone 1473 * - call a transport specific clone routine. 1474 */ 1475 static void 1476 svc_clone_link(SVCMASTERXPRT *xprt, SVCXPRT *clone_xprt) 1477 { 1478 cred_t *cred = clone_xprt->xp_cred; 1479 1480 ASSERT(cred); 1481 1482 /* 1483 * Bump up master's thread count. 1484 * Linking a per-thread clone transport handle to a master 1485 * associates a service thread with the master. 1486 */ 1487 mutex_enter(&xprt->xp_thread_lock); 1488 xprt->xp_threads++; 1489 mutex_exit(&xprt->xp_thread_lock); 1490 1491 /* Clear everything */ 1492 bzero(clone_xprt, sizeof (SVCXPRT)); 1493 1494 /* Set pointer to the master transport stucture */ 1495 clone_xprt->xp_master = xprt; 1496 1497 /* Structure copy of all the common fields */ 1498 clone_xprt->xp_xpc = xprt->xp_xpc; 1499 1500 /* Restore per-thread fields (xp_cred) */ 1501 clone_xprt->xp_cred = cred; 1502 1503 1504 /* 1505 * NOTICE: There is no transport-type specific code now. 1506 * If you want to add a transport-type specific cloning code 1507 * add one more operation (e.g. xp_clone()) to svc_ops, 1508 * implement it for each transport type, and call it here 1509 * through an appropriate macro (e.g. SVC_CLONE()). 1510 */ 1511 } 1512 1513 /* 1514 * Unlink a non-detached clone transport handle from a master 1515 * - decrement a thread reference count on the master 1516 * - if the transport is closing (xp_wq is NULL) call svc_xprt_cleanup(); 1517 * if this is the last non-detached/absolute thread on this transport 1518 * then it will close/destroy the transport 1519 * - call transport specific function to destroy the clone handle 1520 * - clear xp_master to avoid recursion. 1521 */ 1522 static void 1523 svc_clone_unlink(SVCXPRT *clone_xprt) 1524 { 1525 SVCMASTERXPRT *xprt = clone_xprt->xp_master; 1526 1527 /* This cannot be a detached thread */ 1528 ASSERT(!clone_xprt->xp_detached); 1529 ASSERT(xprt->xp_threads > 0); 1530 1531 /* Decrement a reference count on the transport */ 1532 mutex_enter(&xprt->xp_thread_lock); 1533 xprt->xp_threads--; 1534 1535 /* svc_xprt_cleanup() unlocks xp_thread_lock or destroys xprt */ 1536 if (xprt->xp_wq) 1537 mutex_exit(&xprt->xp_thread_lock); 1538 else 1539 svc_xprt_cleanup(xprt, FALSE); 1540 1541 /* Call a transport specific clone `destroy' function */ 1542 SVC_CLONE_DESTROY(clone_xprt); 1543 1544 /* Clear xp_master */ 1545 clone_xprt->xp_master = NULL; 1546 } 1547 1548 /* 1549 * Unlink a detached clone transport handle from a master 1550 * - decrement the thread count on the master 1551 * - if the transport is closing (xp_wq is NULL) call svc_xprt_cleanup(); 1552 * if this is the last thread on this transport then it will destroy 1553 * the transport. 1554 * - call a transport specific function to destroy the clone handle 1555 * - clear xp_master to avoid recursion. 1556 */ 1557 static void 1558 svc_clone_unlinkdetached(SVCXPRT *clone_xprt) 1559 { 1560 SVCMASTERXPRT *xprt = clone_xprt->xp_master; 1561 1562 /* This must be a detached thread */ 1563 ASSERT(clone_xprt->xp_detached); 1564 ASSERT(xprt->xp_detached_threads > 0); 1565 ASSERT(xprt->xp_threads + xprt->xp_detached_threads > 0); 1566 1567 /* Grab xprt->xp_thread_lock and decrement link counts */ 1568 mutex_enter(&xprt->xp_thread_lock); 1569 xprt->xp_detached_threads--; 1570 1571 /* svc_xprt_cleanup() unlocks xp_thread_lock or destroys xprt */ 1572 if (xprt->xp_wq) 1573 mutex_exit(&xprt->xp_thread_lock); 1574 else 1575 svc_xprt_cleanup(xprt, TRUE); 1576 1577 /* Call transport specific clone `destroy' function */ 1578 SVC_CLONE_DESTROY(clone_xprt); 1579 1580 /* Clear xp_master */ 1581 clone_xprt->xp_master = NULL; 1582 } 1583 1584 /* 1585 * Try to exit a non-detached service thread 1586 * - check if there are enough threads left 1587 * - if this thread (ie its clone transport handle) are linked 1588 * to a master transport then unlink it 1589 * - free the clone structure 1590 * - return to userland for thread exit 1591 * 1592 * If this is the last non-detached or the last thread on this 1593 * transport then the call to svc_clone_unlink() will, respectively, 1594 * close and/or destroy the transport. 1595 */ 1596 static void 1597 svc_thread_exit(SVCPOOL *pool, SVCXPRT *clone_xprt) 1598 { 1599 if (clone_xprt->xp_master) 1600 svc_clone_unlink(clone_xprt); 1601 svc_clone_free(clone_xprt); 1602 1603 mutex_enter(&pool->p_thread_lock); 1604 pool->p_threads--; 1605 if (pool->p_closing && svc_pool_tryexit(pool)) 1606 /* return - thread exit will be handled at user level */ 1607 return; 1608 mutex_exit(&pool->p_thread_lock); 1609 1610 /* return - thread exit will be handled at user level */ 1611 } 1612 1613 /* 1614 * Exit a detached service thread that returned to svc_run 1615 * - decrement the `detached thread' count for the pool 1616 * - unlink the detached clone transport handle from the master 1617 * - free the clone structure 1618 * - return to userland for thread exit 1619 * 1620 * If this is the last thread on this transport then the call 1621 * to svc_clone_unlinkdetached() will destroy the transport. 1622 */ 1623 static void 1624 svc_thread_exitdetached(SVCPOOL *pool, SVCXPRT *clone_xprt) 1625 { 1626 /* This must be a detached thread */ 1627 ASSERT(clone_xprt->xp_master); 1628 ASSERT(clone_xprt->xp_detached); 1629 ASSERT(!MUTEX_HELD(&pool->p_thread_lock)); 1630 1631 svc_clone_unlinkdetached(clone_xprt); 1632 svc_clone_free(clone_xprt); 1633 1634 mutex_enter(&pool->p_thread_lock); 1635 1636 ASSERT(pool->p_reserved_threads >= 0); 1637 ASSERT(pool->p_detached_threads > 0); 1638 1639 pool->p_detached_threads--; 1640 if (pool->p_closing && svc_pool_tryexit(pool)) 1641 /* return - thread exit will be handled at user level */ 1642 return; 1643 mutex_exit(&pool->p_thread_lock); 1644 1645 /* return - thread exit will be handled at user level */ 1646 } 1647 1648 /* 1649 * PSARC 2003/523 Contract Private Interface 1650 * svc_wait 1651 * Changes must be reviewed by Solaris File Sharing 1652 * Changes must be communicated to contract-2003-523@sun.com 1653 */ 1654 int 1655 svc_wait(int id) 1656 { 1657 SVCPOOL *pool; 1658 int err = 0; 1659 struct svc_globals *svc; 1660 1661 svc = zone_getspecific(svc_zone_key, curproc->p_zone); 1662 mutex_enter(&svc->svc_plock); 1663 pool = svc_pool_find(svc, id); 1664 mutex_exit(&svc->svc_plock); 1665 1666 if (pool == NULL) 1667 return (ENOENT); 1668 1669 mutex_enter(&pool->p_user_lock); 1670 1671 /* Check if there's already a user thread waiting on this pool */ 1672 if (pool->p_user_waiting) { 1673 mutex_exit(&pool->p_user_lock); 1674 return (EBUSY); 1675 } 1676 1677 pool->p_user_waiting = TRUE; 1678 1679 /* Go to sleep, waiting for the signaled flag. */ 1680 while (!pool->p_signal_create_thread && !pool->p_user_exit) { 1681 if (cv_wait_sig(&pool->p_user_cv, &pool->p_user_lock) == 0) { 1682 /* Interrupted, return to handle exit or signal */ 1683 pool->p_user_waiting = FALSE; 1684 pool->p_signal_create_thread = FALSE; 1685 mutex_exit(&pool->p_user_lock); 1686 1687 /* 1688 * Thread has been interrupted and therefore 1689 * the service daemon is leaving as well so 1690 * let's go ahead and remove the service 1691 * pool at this time. 1692 */ 1693 mutex_enter(&svc->svc_plock); 1694 svc_pool_unregister(svc, pool); 1695 mutex_exit(&svc->svc_plock); 1696 1697 return (EINTR); 1698 } 1699 } 1700 1701 pool->p_signal_create_thread = FALSE; 1702 pool->p_user_waiting = FALSE; 1703 1704 /* 1705 * About to exit the service pool. Set return value 1706 * to let the userland code know our intent. Signal 1707 * svc_thread_creator() so that it can clean up the 1708 * pool structure. 1709 */ 1710 if (pool->p_user_exit) { 1711 err = ECANCELED; 1712 cv_signal(&pool->p_user_cv); 1713 } 1714 1715 mutex_exit(&pool->p_user_lock); 1716 1717 /* Return to userland with error code, for possible thread creation. */ 1718 return (err); 1719 } 1720 1721 /* 1722 * `Service threads' creator thread. 1723 * The creator thread waits for a signal to create new thread. 1724 */ 1725 static void 1726 svc_thread_creator(SVCPOOL *pool) 1727 { 1728 callb_cpr_t cpr_info; /* CPR info for the creator thread */ 1729 1730 CALLB_CPR_INIT(&cpr_info, &pool->p_creator_lock, callb_generic_cpr, 1731 "svc_thread_creator"); 1732 1733 for (;;) { 1734 mutex_enter(&pool->p_creator_lock); 1735 1736 /* Check if someone set the exit flag */ 1737 if (pool->p_creator_exit) 1738 break; 1739 1740 /* Clear the `signaled' flag and go asleep */ 1741 pool->p_creator_signaled = FALSE; 1742 1743 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1744 cv_wait(&pool->p_creator_cv, &pool->p_creator_lock); 1745 CALLB_CPR_SAFE_END(&cpr_info, &pool->p_creator_lock); 1746 1747 /* Check if someone signaled to exit */ 1748 if (pool->p_creator_exit) 1749 break; 1750 1751 mutex_exit(&pool->p_creator_lock); 1752 1753 mutex_enter(&pool->p_thread_lock); 1754 1755 /* 1756 * When the pool is in closing state and all the transports 1757 * are gone the creator should not create any new threads. 1758 */ 1759 if (pool->p_closing) { 1760 rw_enter(&pool->p_lrwlock, RW_READER); 1761 if (pool->p_lcount == 0) { 1762 rw_exit(&pool->p_lrwlock); 1763 mutex_exit(&pool->p_thread_lock); 1764 continue; 1765 } 1766 rw_exit(&pool->p_lrwlock); 1767 } 1768 1769 /* 1770 * Create a new service thread now. 1771 */ 1772 ASSERT(pool->p_reserved_threads >= 0); 1773 ASSERT(pool->p_detached_threads >= 0); 1774 1775 if (pool->p_threads + pool->p_detached_threads < 1776 pool->p_maxthreads) { 1777 /* 1778 * Signal the service pool wait thread 1779 * only if it hasn't already been signaled. 1780 */ 1781 mutex_enter(&pool->p_user_lock); 1782 if (pool->p_signal_create_thread == FALSE) { 1783 pool->p_signal_create_thread = TRUE; 1784 cv_signal(&pool->p_user_cv); 1785 } 1786 mutex_exit(&pool->p_user_lock); 1787 1788 } 1789 1790 mutex_exit(&pool->p_thread_lock); 1791 } 1792 1793 /* 1794 * Pool is closed. Cleanup and exit. 1795 */ 1796 1797 /* Signal userland creator thread that it can stop now. */ 1798 mutex_enter(&pool->p_user_lock); 1799 pool->p_user_exit = TRUE; 1800 cv_broadcast(&pool->p_user_cv); 1801 mutex_exit(&pool->p_user_lock); 1802 1803 /* Wait for svc_wait() to be done with the pool */ 1804 mutex_enter(&pool->p_user_lock); 1805 while (pool->p_user_waiting) { 1806 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1807 cv_wait(&pool->p_user_cv, &pool->p_user_lock); 1808 CALLB_CPR_SAFE_END(&cpr_info, &pool->p_creator_lock); 1809 } 1810 mutex_exit(&pool->p_user_lock); 1811 1812 CALLB_CPR_EXIT(&cpr_info); 1813 svc_pool_cleanup(pool); 1814 zthread_exit(); 1815 } 1816 1817 /* 1818 * If the creator thread is idle signal it to create 1819 * a new service thread. 1820 */ 1821 static void 1822 svc_creator_signal(SVCPOOL *pool) 1823 { 1824 mutex_enter(&pool->p_creator_lock); 1825 if (pool->p_creator_signaled == FALSE) { 1826 pool->p_creator_signaled = TRUE; 1827 cv_signal(&pool->p_creator_cv); 1828 } 1829 mutex_exit(&pool->p_creator_lock); 1830 } 1831 1832 /* 1833 * Notify the creator thread to clean up and exit. 1834 */ 1835 static void 1836 svc_creator_signalexit(SVCPOOL *pool) 1837 { 1838 mutex_enter(&pool->p_creator_lock); 1839 pool->p_creator_exit = TRUE; 1840 cv_signal(&pool->p_creator_cv); 1841 mutex_exit(&pool->p_creator_lock); 1842 } 1843 1844 /* 1845 * Polling part of the svc_run(). 1846 * - search for a transport with a pending request 1847 * - when one is found then latch the request lock and return to svc_run() 1848 * - if there is no request go asleep and wait for a signal 1849 * - handle two exceptions: 1850 * a) current transport is closing 1851 * b) timeout waiting for a new request 1852 * in both cases return to svc_run() 1853 */ 1854 static SVCMASTERXPRT * 1855 svc_poll(SVCPOOL *pool, SVCMASTERXPRT *xprt, SVCXPRT *clone_xprt) 1856 { 1857 /* 1858 * Main loop iterates until 1859 * a) we find a pending request, 1860 * b) detect that the current transport is closing 1861 * c) time out waiting for a new request. 1862 */ 1863 for (;;) { 1864 SVCMASTERXPRT *next; 1865 clock_t timeleft; 1866 1867 /* 1868 * Step 1. 1869 * Check if there is a pending request on the current 1870 * transport handle so that we can avoid cloning. 1871 * If so then decrement the `pending-request' count for 1872 * the pool and return to svc_run(). 1873 * 1874 * We need to prevent a potential starvation. When 1875 * a selected transport has all pending requests coming in 1876 * all the time then the service threads will never switch to 1877 * another transport. With a limited number of service 1878 * threads some transports may be never serviced. 1879 * To prevent such a scenario we pick up at most 1880 * pool->p_max_same_xprt requests from the same transport 1881 * and then take a hint from the xprt-ready queue or walk 1882 * the transport list. 1883 */ 1884 if (xprt && xprt->xp_req_head && (!pool->p_qoverflow || 1885 clone_xprt->xp_same_xprt++ < pool->p_max_same_xprt)) { 1886 mutex_enter(&xprt->xp_req_lock); 1887 if (xprt->xp_req_head) { 1888 mutex_enter(&pool->p_req_lock); 1889 pool->p_reqs--; 1890 if (pool->p_reqs == 0) 1891 pool->p_qoverflow = FALSE; 1892 mutex_exit(&pool->p_req_lock); 1893 1894 return (xprt); 1895 } 1896 mutex_exit(&xprt->xp_req_lock); 1897 } 1898 clone_xprt->xp_same_xprt = 0; 1899 1900 /* 1901 * Step 2. 1902 * If there is no request on the current transport try to 1903 * find another transport with a pending request. 1904 */ 1905 mutex_enter(&pool->p_req_lock); 1906 pool->p_walkers++; 1907 mutex_exit(&pool->p_req_lock); 1908 1909 /* 1910 * Make sure that transports will not be destroyed just 1911 * while we are checking them. 1912 */ 1913 rw_enter(&pool->p_lrwlock, RW_READER); 1914 1915 for (;;) { 1916 SVCMASTERXPRT *hint; 1917 1918 /* 1919 * Get the next transport from the xprt-ready queue. 1920 * This is a hint. There is no guarantee that the 1921 * transport still has a pending request since it 1922 * could be picked up by another thread in step 1. 1923 * 1924 * If the transport has a pending request then keep 1925 * it locked. Decrement the `pending-requests' for 1926 * the pool and `walking-threads' counts, and return 1927 * to svc_run(). 1928 */ 1929 hint = svc_xprt_qget(pool); 1930 1931 if (hint && hint->xp_req_head) { 1932 mutex_enter(&hint->xp_req_lock); 1933 if (hint->xp_req_head) { 1934 rw_exit(&pool->p_lrwlock); 1935 1936 mutex_enter(&pool->p_req_lock); 1937 pool->p_reqs--; 1938 if (pool->p_reqs == 0) 1939 pool->p_qoverflow = FALSE; 1940 pool->p_walkers--; 1941 mutex_exit(&pool->p_req_lock); 1942 1943 return (hint); 1944 } 1945 mutex_exit(&hint->xp_req_lock); 1946 } 1947 1948 /* 1949 * If there was no hint in the xprt-ready queue then 1950 * - if there is less pending requests than polling 1951 * threads go asleep 1952 * - otherwise check if there was an overflow in the 1953 * xprt-ready queue; if so, then we need to break 1954 * the `drain' mode 1955 */ 1956 if (hint == NULL) { 1957 if (pool->p_reqs < pool->p_walkers) { 1958 mutex_enter(&pool->p_req_lock); 1959 if (pool->p_reqs < pool->p_walkers) 1960 goto sleep; 1961 mutex_exit(&pool->p_req_lock); 1962 } 1963 if (pool->p_qoverflow) { 1964 break; 1965 } 1966 } 1967 } 1968 1969 /* 1970 * If there was an overflow in the xprt-ready queue then we 1971 * need to switch to the `drain' mode, i.e. walk through the 1972 * pool's transport list and search for a transport with a 1973 * pending request. If we manage to drain all the pending 1974 * requests then we can clear the overflow flag. This will 1975 * switch svc_poll() back to taking hints from the xprt-ready 1976 * queue (which is generally more efficient). 1977 * 1978 * If there are no registered transports simply go asleep. 1979 */ 1980 if (xprt == NULL && pool->p_lhead == NULL) { 1981 mutex_enter(&pool->p_req_lock); 1982 goto sleep; 1983 } 1984 1985 /* 1986 * `Walk' through the pool's list of master server 1987 * transport handles. Continue to loop until there are less 1988 * looping threads then pending requests. 1989 */ 1990 next = xprt ? xprt->xp_next : pool->p_lhead; 1991 1992 for (;;) { 1993 /* 1994 * Check if there is a request on this transport. 1995 * 1996 * Since blocking on a locked mutex is very expensive 1997 * check for a request without a lock first. If we miss 1998 * a request that is just being delivered but this will 1999 * cost at most one full walk through the list. 2000 */ 2001 if (next->xp_req_head) { 2002 /* 2003 * Check again, now with a lock. 2004 */ 2005 mutex_enter(&next->xp_req_lock); 2006 if (next->xp_req_head) { 2007 rw_exit(&pool->p_lrwlock); 2008 2009 mutex_enter(&pool->p_req_lock); 2010 pool->p_reqs--; 2011 if (pool->p_reqs == 0) 2012 pool->p_qoverflow = FALSE; 2013 pool->p_walkers--; 2014 mutex_exit(&pool->p_req_lock); 2015 2016 return (next); 2017 } 2018 mutex_exit(&next->xp_req_lock); 2019 } 2020 2021 /* 2022 * Continue to `walk' through the pool's 2023 * transport list until there is less requests 2024 * than walkers. Check this condition without 2025 * a lock first to avoid contention on a mutex. 2026 */ 2027 if (pool->p_reqs < pool->p_walkers) { 2028 /* Check again, now with the lock. */ 2029 mutex_enter(&pool->p_req_lock); 2030 if (pool->p_reqs < pool->p_walkers) 2031 break; /* goto sleep */ 2032 mutex_exit(&pool->p_req_lock); 2033 } 2034 2035 next = next->xp_next; 2036 } 2037 2038 sleep: 2039 /* 2040 * No work to do. Stop the `walk' and go asleep. 2041 * Decrement the `walking-threads' count for the pool. 2042 */ 2043 pool->p_walkers--; 2044 rw_exit(&pool->p_lrwlock); 2045 2046 /* 2047 * Count us as asleep, mark this thread as safe 2048 * for suspend and wait for a request. 2049 */ 2050 pool->p_asleep++; 2051 timeleft = cv_timedwait_sig(&pool->p_req_cv, &pool->p_req_lock, 2052 pool->p_timeout + lbolt); 2053 2054 /* 2055 * If the drowsy flag is on this means that 2056 * someone has signaled a wakeup. In such a case 2057 * the `asleep-threads' count has already updated 2058 * so just clear the flag. 2059 * 2060 * If the drowsy flag is off then we need to update 2061 * the `asleep-threads' count. 2062 */ 2063 if (pool->p_drowsy) { 2064 pool->p_drowsy = FALSE; 2065 /* 2066 * If the thread is here because it timedout, 2067 * instead of returning SVC_ETIMEDOUT, it is 2068 * time to do some more work. 2069 */ 2070 if (timeleft == -1) 2071 timeleft = 1; 2072 } else { 2073 pool->p_asleep--; 2074 } 2075 mutex_exit(&pool->p_req_lock); 2076 2077 /* 2078 * If we received a signal while waiting for a 2079 * request, inform svc_run(), so that we can return 2080 * to user level and exit. 2081 */ 2082 if (timeleft == 0) 2083 return (SVC_EINTR); 2084 2085 /* 2086 * If the current transport is gone then notify 2087 * svc_run() to unlink from it. 2088 */ 2089 if (xprt && xprt->xp_wq == NULL) 2090 return (SVC_EXPRTGONE); 2091 2092 /* 2093 * If we have timed out waiting for a request inform 2094 * svc_run() that we probably don't need this thread. 2095 */ 2096 if (timeleft == -1) 2097 return (SVC_ETIMEDOUT); 2098 } 2099 } 2100 2101 /* 2102 * Main loop of the kernel RPC server 2103 * - wait for input (find a transport with a pending request). 2104 * - dequeue the request 2105 * - call a registered server routine to process the requests 2106 * 2107 * There can many threads running concurrently in this loop 2108 * on the same or on different transports. 2109 */ 2110 static int 2111 svc_run(SVCPOOL *pool) 2112 { 2113 SVCMASTERXPRT *xprt = NULL; /* master transport handle */ 2114 SVCXPRT *clone_xprt; /* clone for this thread */ 2115 proc_t *p = ttoproc(curthread); 2116 2117 /* Allocate a clone transport handle for this thread */ 2118 clone_xprt = svc_clone_init(); 2119 2120 /* 2121 * The loop iterates until the thread becomes 2122 * idle too long or the transport is gone. 2123 */ 2124 for (;;) { 2125 SVCMASTERXPRT *next; 2126 mblk_t *mp; 2127 2128 TRACE_0(TR_FAC_KRPC, TR_SVC_RUN, "svc_run"); 2129 2130 /* 2131 * If the process is exiting/killed, return 2132 * immediately without processing any more 2133 * requests. 2134 */ 2135 if (p->p_flag & (SEXITING | SKILLED)) { 2136 svc_thread_exit(pool, clone_xprt); 2137 return (EINTR); 2138 } 2139 2140 /* Find a transport with a pending request */ 2141 next = svc_poll(pool, xprt, clone_xprt); 2142 2143 /* 2144 * If svc_poll() finds a transport with a request 2145 * it latches xp_req_lock on it. Therefore we need 2146 * to dequeue the request and release the lock as 2147 * soon as possible. 2148 */ 2149 ASSERT(next != NULL && 2150 (next == SVC_EXPRTGONE || 2151 next == SVC_ETIMEDOUT || 2152 next == SVC_EINTR || 2153 MUTEX_HELD(&next->xp_req_lock))); 2154 2155 /* Ooops! Current transport is closing. Unlink now */ 2156 if (next == SVC_EXPRTGONE) { 2157 svc_clone_unlink(clone_xprt); 2158 xprt = NULL; 2159 continue; 2160 } 2161 2162 /* Ooops! Timeout while waiting for a request. Exit */ 2163 if (next == SVC_ETIMEDOUT) { 2164 svc_thread_exit(pool, clone_xprt); 2165 return (0); 2166 } 2167 2168 /* 2169 * Interrupted by a signal while waiting for a 2170 * request. Return to userspace and exit. 2171 */ 2172 if (next == SVC_EINTR) { 2173 svc_thread_exit(pool, clone_xprt); 2174 return (EINTR); 2175 } 2176 2177 /* 2178 * De-queue the request and release the request lock 2179 * on this transport (latched by svc_poll()). 2180 */ 2181 mp = next->xp_req_head; 2182 next->xp_req_head = mp->b_next; 2183 mp->b_next = (mblk_t *)0; 2184 2185 TRACE_2(TR_FAC_KRPC, TR_NFSFP_QUE_REQ_DEQ, 2186 "rpc_que_req_deq:pool %p mp %p", pool, mp); 2187 mutex_exit(&next->xp_req_lock); 2188 2189 /* 2190 * If this is a new request on a current transport then 2191 * the clone structure is already properly initialized. 2192 * Otherwise, if the request is on a different transport, 2193 * unlink from the current master and link to 2194 * the one we got a request on. 2195 */ 2196 if (next != xprt) { 2197 if (xprt) 2198 svc_clone_unlink(clone_xprt); 2199 svc_clone_link(next, clone_xprt); 2200 xprt = next; 2201 } 2202 2203 /* 2204 * If there are more requests and req_cv hasn't 2205 * been signaled yet then wake up one more thread now. 2206 * 2207 * We avoid signaling req_cv until the most recently 2208 * signaled thread wakes up and gets CPU to clear 2209 * the `drowsy' flag. 2210 */ 2211 if (!(pool->p_drowsy || pool->p_reqs <= pool->p_walkers || 2212 pool->p_asleep == 0)) { 2213 mutex_enter(&pool->p_req_lock); 2214 2215 if (pool->p_drowsy || pool->p_reqs <= pool->p_walkers || 2216 pool->p_asleep == 0) 2217 mutex_exit(&pool->p_req_lock); 2218 else { 2219 pool->p_asleep--; 2220 pool->p_drowsy = TRUE; 2221 2222 cv_signal(&pool->p_req_cv); 2223 mutex_exit(&pool->p_req_lock); 2224 } 2225 } 2226 2227 /* 2228 * If there are no asleep/signaled threads, we are 2229 * still below pool->p_maxthreads limit, and no thread is 2230 * currently being created then signal the creator 2231 * for one more service thread. 2232 * 2233 * The asleep and drowsy checks are not protected 2234 * by a lock since it hurts performance and a wrong 2235 * decision is not essential. 2236 */ 2237 if (pool->p_asleep == 0 && !pool->p_drowsy && 2238 pool->p_threads + pool->p_detached_threads < 2239 pool->p_maxthreads) 2240 svc_creator_signal(pool); 2241 2242 /* 2243 * Process the request. 2244 */ 2245 svc_getreq(clone_xprt, mp); 2246 2247 /* If thread had a reservation it should have been canceled */ 2248 ASSERT(!clone_xprt->xp_reserved); 2249 2250 /* 2251 * If the clone is marked detached then exit. 2252 * The rpcmod slot has already been released 2253 * when we detached this thread. 2254 */ 2255 if (clone_xprt->xp_detached) { 2256 svc_thread_exitdetached(pool, clone_xprt); 2257 return (0); 2258 } 2259 2260 /* 2261 * Release our reference on the rpcmod 2262 * slot attached to xp_wq->q_ptr. 2263 */ 2264 (*RELE_PROC(xprt)) (clone_xprt->xp_wq, NULL); 2265 } 2266 /* NOTREACHED */ 2267 } 2268 2269 /* 2270 * Flush any pending requests for the queue and 2271 * and free the associated mblks. 2272 */ 2273 void 2274 svc_queueclean(queue_t *q) 2275 { 2276 SVCMASTERXPRT *xprt = ((void **) q->q_ptr)[0]; 2277 mblk_t *mp; 2278 SVCPOOL *pool; 2279 2280 /* 2281 * clean up the requests 2282 */ 2283 mutex_enter(&xprt->xp_req_lock); 2284 pool = xprt->xp_pool; 2285 while ((mp = xprt->xp_req_head) != NULL) { 2286 /* remove the request from the list and decrement p_reqs */ 2287 xprt->xp_req_head = mp->b_next; 2288 mutex_enter(&pool->p_req_lock); 2289 mp->b_next = (mblk_t *)0; 2290 pool->p_reqs--; 2291 mutex_exit(&pool->p_req_lock); 2292 (*RELE_PROC(xprt)) (xprt->xp_wq, mp); 2293 } 2294 mutex_exit(&xprt->xp_req_lock); 2295 } 2296 2297 /* 2298 * This routine is called by rpcmod to inform kernel RPC that a 2299 * queue is closing. It is called after all the requests have been 2300 * picked up (that is after all the slots on the queue have 2301 * been released by kernel RPC). It is also guaranteed that no more 2302 * request will be delivered on this transport. 2303 * 2304 * - clear xp_wq to mark the master server transport handle as closing 2305 * - if there are no more threads on this transport close/destroy it 2306 * - otherwise, broadcast threads sleeping in svc_poll(); the last 2307 * thread will close/destroy the transport. 2308 */ 2309 void 2310 svc_queueclose(queue_t *q) 2311 { 2312 SVCMASTERXPRT *xprt = ((void **) q->q_ptr)[0]; 2313 2314 if (xprt == NULL) { 2315 /* 2316 * If there is no master xprt associated with this stream, 2317 * then there is nothing to do. This happens regularly 2318 * with connection-oriented listening streams created by 2319 * nfsd. 2320 */ 2321 return; 2322 } 2323 2324 mutex_enter(&xprt->xp_thread_lock); 2325 2326 ASSERT(xprt->xp_req_head == NULL); 2327 ASSERT(xprt->xp_wq != NULL); 2328 2329 xprt->xp_wq = NULL; 2330 2331 if (xprt->xp_threads == 0) { 2332 SVCPOOL *pool = xprt->xp_pool; 2333 2334 /* 2335 * svc_xprt_cleanup() destroys the transport 2336 * or releases the transport thread lock 2337 */ 2338 svc_xprt_cleanup(xprt, FALSE); 2339 2340 mutex_enter(&pool->p_thread_lock); 2341 2342 /* 2343 * If the pool is in closing state and this was 2344 * the last transport in the pool then signal the creator 2345 * thread to clean up and exit. 2346 */ 2347 if (pool->p_closing && svc_pool_tryexit(pool)) { 2348 return; 2349 } 2350 mutex_exit(&pool->p_thread_lock); 2351 } else { 2352 /* 2353 * Wakeup threads sleeping in svc_poll() so that they 2354 * unlink from the transport 2355 */ 2356 mutex_enter(&xprt->xp_pool->p_req_lock); 2357 cv_broadcast(&xprt->xp_pool->p_req_cv); 2358 mutex_exit(&xprt->xp_pool->p_req_lock); 2359 2360 /* 2361 * NOTICE: No references to the master transport structure 2362 * beyond this point! 2363 */ 2364 mutex_exit(&xprt->xp_thread_lock); 2365 } 2366 } 2367 2368 /* 2369 * Interrupt `request delivery' routine called from rpcmod 2370 * - put a request at the tail of the transport request queue 2371 * - insert a hint for svc_poll() into the xprt-ready queue 2372 * - increment the `pending-requests' count for the pool 2373 * - wake up a thread sleeping in svc_poll() if necessary 2374 * - if all the threads are running ask the creator for a new one. 2375 */ 2376 void 2377 svc_queuereq(queue_t *q, mblk_t *mp) 2378 { 2379 SVCMASTERXPRT *xprt = ((void **) q->q_ptr)[0]; 2380 SVCPOOL *pool = xprt->xp_pool; 2381 2382 TRACE_0(TR_FAC_KRPC, TR_SVC_QUEUEREQ_START, "svc_queuereq_start"); 2383 2384 ASSERT(!is_system_labeled() || DB_CRED(mp) != NULL || 2385 mp->b_datap->db_type != M_DATA); 2386 2387 /* 2388 * Step 1. 2389 * Grab the transport's request lock and the 2390 * pool's request lock so that when we put 2391 * the request at the tail of the transport's 2392 * request queue, possibly put the request on 2393 * the xprt ready queue and increment the 2394 * pending request count it looks atomic. 2395 */ 2396 mutex_enter(&xprt->xp_req_lock); 2397 mutex_enter(&pool->p_req_lock); 2398 if (xprt->xp_req_head == NULL) 2399 xprt->xp_req_head = mp; 2400 else 2401 xprt->xp_req_tail->b_next = mp; 2402 xprt->xp_req_tail = mp; 2403 2404 /* 2405 * Step 2. 2406 * Insert a hint into the xprt-ready queue, increment 2407 * `pending-requests' count for the pool, and wake up 2408 * a thread sleeping in svc_poll() if necessary. 2409 */ 2410 2411 /* Insert pointer to this transport into the xprt-ready queue */ 2412 svc_xprt_qput(pool, xprt); 2413 2414 /* Increment the `pending-requests' count for the pool */ 2415 pool->p_reqs++; 2416 2417 TRACE_2(TR_FAC_KRPC, TR_NFSFP_QUE_REQ_ENQ, 2418 "rpc_que_req_enq:pool %p mp %p", pool, mp); 2419 2420 /* 2421 * If there are more requests and req_cv hasn't 2422 * been signaled yet then wake up one more thread now. 2423 * 2424 * We avoid signaling req_cv until the most recently 2425 * signaled thread wakes up and gets CPU to clear 2426 * the `drowsy' flag. 2427 */ 2428 if (pool->p_drowsy || pool->p_reqs <= pool->p_walkers || 2429 pool->p_asleep == 0) { 2430 mutex_exit(&pool->p_req_lock); 2431 } else { 2432 pool->p_drowsy = TRUE; 2433 pool->p_asleep--; 2434 2435 /* 2436 * Signal wakeup and drop the request lock. 2437 */ 2438 cv_signal(&pool->p_req_cv); 2439 mutex_exit(&pool->p_req_lock); 2440 } 2441 mutex_exit(&xprt->xp_req_lock); 2442 2443 /* 2444 * Step 3. 2445 * If there are no asleep/signaled threads, we are 2446 * still below pool->p_maxthreads limit, and no thread is 2447 * currently being created then signal the creator 2448 * for one more service thread. 2449 * 2450 * The asleep and drowsy checks are not not protected 2451 * by a lock since it hurts performance and a wrong 2452 * decision is not essential. 2453 */ 2454 if (pool->p_asleep == 0 && !pool->p_drowsy && 2455 pool->p_threads + pool->p_detached_threads < pool->p_maxthreads) 2456 svc_creator_signal(pool); 2457 2458 TRACE_1(TR_FAC_KRPC, TR_SVC_QUEUEREQ_END, 2459 "svc_queuereq_end:(%S)", "end"); 2460 } 2461 2462 /* 2463 * Reserve a service thread so that it can be detached later. 2464 * This reservation is required to make sure that when it tries to 2465 * detach itself the total number of detached threads does not exceed 2466 * pool->p_maxthreads - pool->p_redline (i.e. that we can have 2467 * up to pool->p_redline non-detached threads). 2468 * 2469 * If the thread does not detach itself later, it should cancel the 2470 * reservation before returning to svc_run(). 2471 * 2472 * - check if there is room for more reserved/detached threads 2473 * - if so, then increment the `reserved threads' count for the pool 2474 * - mark the thread as reserved (setting the flag in the clone transport 2475 * handle for this thread 2476 * - returns 1 if the reservation succeeded, 0 if it failed. 2477 */ 2478 int 2479 svc_reserve_thread(SVCXPRT *clone_xprt) 2480 { 2481 SVCPOOL *pool = clone_xprt->xp_master->xp_pool; 2482 2483 /* Recursive reservations are not allowed */ 2484 ASSERT(!clone_xprt->xp_reserved); 2485 ASSERT(!clone_xprt->xp_detached); 2486 2487 /* Check pool counts if there is room for reservation */ 2488 mutex_enter(&pool->p_thread_lock); 2489 if (pool->p_reserved_threads + pool->p_detached_threads >= 2490 pool->p_maxthreads - pool->p_redline) { 2491 mutex_exit(&pool->p_thread_lock); 2492 return (0); 2493 } 2494 pool->p_reserved_threads++; 2495 mutex_exit(&pool->p_thread_lock); 2496 2497 /* Mark the thread (clone handle) as reserved */ 2498 clone_xprt->xp_reserved = TRUE; 2499 2500 return (1); 2501 } 2502 2503 /* 2504 * Cancel a reservation for a thread. 2505 * - decrement the `reserved threads' count for the pool 2506 * - clear the flag in the clone transport handle for this thread. 2507 */ 2508 void 2509 svc_unreserve_thread(SVCXPRT *clone_xprt) 2510 { 2511 SVCPOOL *pool = clone_xprt->xp_master->xp_pool; 2512 2513 /* Thread must have a reservation */ 2514 ASSERT(clone_xprt->xp_reserved); 2515 ASSERT(!clone_xprt->xp_detached); 2516 2517 /* Decrement global count */ 2518 mutex_enter(&pool->p_thread_lock); 2519 pool->p_reserved_threads--; 2520 mutex_exit(&pool->p_thread_lock); 2521 2522 /* Clear reservation flag */ 2523 clone_xprt->xp_reserved = FALSE; 2524 } 2525 2526 /* 2527 * Detach a thread from its transport, so that it can block for an 2528 * extended time. Because the transport can be closed after the thread is 2529 * detached, the thread should have already sent off a reply if it was 2530 * going to send one. 2531 * 2532 * - decrement `non-detached threads' count and increment `detached threads' 2533 * counts for the transport 2534 * - decrement the `non-detached threads' and `reserved threads' 2535 * counts and increment the `detached threads' count for the pool 2536 * - release the rpcmod slot 2537 * - mark the clone (thread) as detached. 2538 * 2539 * No need to return a pointer to the thread's CPR information, since 2540 * the thread has a userland identity. 2541 * 2542 * NOTICE: a thread must not detach itself without making a prior reservation 2543 * through svc_thread_reserve(). 2544 */ 2545 callb_cpr_t * 2546 svc_detach_thread(SVCXPRT *clone_xprt) 2547 { 2548 SVCMASTERXPRT *xprt = clone_xprt->xp_master; 2549 SVCPOOL *pool = xprt->xp_pool; 2550 2551 /* Thread must have a reservation */ 2552 ASSERT(clone_xprt->xp_reserved); 2553 ASSERT(!clone_xprt->xp_detached); 2554 2555 /* Bookkeeping for this transport */ 2556 mutex_enter(&xprt->xp_thread_lock); 2557 xprt->xp_threads--; 2558 xprt->xp_detached_threads++; 2559 mutex_exit(&xprt->xp_thread_lock); 2560 2561 /* Bookkeeping for the pool */ 2562 mutex_enter(&pool->p_thread_lock); 2563 pool->p_threads--; 2564 pool->p_reserved_threads--; 2565 pool->p_detached_threads++; 2566 mutex_exit(&pool->p_thread_lock); 2567 2568 /* Release an rpcmod slot for this request */ 2569 (*RELE_PROC(xprt)) (clone_xprt->xp_wq, NULL); 2570 2571 /* Mark the clone (thread) as detached */ 2572 clone_xprt->xp_reserved = FALSE; 2573 clone_xprt->xp_detached = TRUE; 2574 2575 return (NULL); 2576 } 2577 2578 /* 2579 * This routine is responsible for extracting RDMA plugin master XPRT, 2580 * unregister from the SVCPOOL and initiate plugin specific cleanup. 2581 * It is passed a list/group of rdma transports as records which are 2582 * active in a given registered or unregistered kRPC thread pool. Its shuts 2583 * all active rdma transports in that pool. If the thread active on the trasport 2584 * happens to be last thread for that pool, it will signal the creater thread 2585 * to cleanup the pool and destroy the xprt in svc_queueclose() 2586 */ 2587 void 2588 rdma_stop(rdma_xprt_group_t *rdma_xprts) 2589 { 2590 SVCMASTERXPRT *xprt; 2591 rdma_xprt_record_t *curr_rec; 2592 queue_t *q; 2593 mblk_t *mp; 2594 int i, rtg_count; 2595 SVCPOOL *pool; 2596 2597 if (rdma_xprts->rtg_count == 0) 2598 return; 2599 2600 rtg_count = rdma_xprts->rtg_count; 2601 2602 for (i = 0; i < rtg_count; i++) { 2603 curr_rec = rdma_xprts->rtg_listhead; 2604 rdma_xprts->rtg_listhead = curr_rec->rtr_next; 2605 rdma_xprts->rtg_count--; 2606 curr_rec->rtr_next = NULL; 2607 xprt = curr_rec->rtr_xprt_ptr; 2608 q = xprt->xp_wq; 2609 svc_rdma_kstop(xprt); 2610 2611 mutex_enter(&xprt->xp_req_lock); 2612 pool = xprt->xp_pool; 2613 while ((mp = xprt->xp_req_head) != NULL) { 2614 /* 2615 * remove the request from the list and 2616 * decrement p_reqs 2617 */ 2618 xprt->xp_req_head = mp->b_next; 2619 mutex_enter(&pool->p_req_lock); 2620 mp->b_next = (mblk_t *)0; 2621 pool->p_reqs--; 2622 mutex_exit(&pool->p_req_lock); 2623 if (mp) { 2624 rdma_recv_data_t *rdp = (rdma_recv_data_t *) 2625 mp->b_rptr; 2626 RDMA_BUF_FREE(rdp->conn, &rdp->rpcmsg); 2627 RDMA_REL_CONN(rdp->conn); 2628 freemsg(mp); 2629 } 2630 } 2631 mutex_exit(&xprt->xp_req_lock); 2632 svc_queueclose(q); 2633 #ifdef DEBUG 2634 if (rdma_check) 2635 cmn_err(CE_NOTE, "rdma_stop: Exited svc_queueclose\n"); 2636 #endif 2637 /* 2638 * Free the rdma transport record for the expunged rdma 2639 * based master transport handle. 2640 */ 2641 kmem_free(curr_rec, sizeof (rdma_xprt_record_t)); 2642 if (!rdma_xprts->rtg_listhead) 2643 break; 2644 } 2645 } 2646