Lines Matching defs:pool

54  *   Each master transport is registered to exactly one thread pool.
68 * A pool represents a kernel RPC service (NFS, Lock Manager, etc.).
69 * Transports related to the service are registered to the service pool.
70 * Service threads can switch between different transports in the pool.
71 * Thus, each service has its own pool of service threads. The maximum
72 * number of threads in a pool is pool->p_maxthreads. This limit allows
80 * In addition, each pool contains a doubly-linked list of transports,
82 * the pool share some other parameters such as stack size and
89 * svc_do_run(), respectively. Once the pool has been initialized,
97 * When we try to register a new pool and there is an old pool with
98 * the same id in the doubly linked pool list (this happens when we kill
99 * and restart nfsd or lockd), then we unlink the old pool from the list
102 * transports and service threads associated with the pool are gone the
103 * creator thread (see below) will clean up the pool structure and exit.
120 * If there is none we take a hint from the pool's `xprt-ready' queue.
122 * each transport in the pool's transport list. Once we find a
129 * requests on the transports registered on the pool's transports.
130 * All the pool's threads sleep on the same condition variable.
138 * this search more efficient each pool has an `xprt-ready' queue.
146 * less efficient but safe `drain' mode and walk through the pool's
156 * Each pool has a thread creator associated with it. The creator thread
165 * When the pool is in closing state (ie it has been already unregistered
166 * from the pool list) the last thread on the last transport in the pool
168 * clean up the pool structure and exit.
173 * at least pool->p_redline non-detached threads that can process incoming
257 * thread pool is limited to pool->p_maxthreads - svc_redline.
358 SVCPOOL *pool;
361 while ((pool = svc->svc_pools) != NULL) {
362 svc_pool_unregister(svc, pool);
397 svc_pool_cleanup(SVCPOOL *pool)
399 ASSERT(pool->p_threads + pool->p_detached_threads == 0);
400 ASSERT(pool->p_lcount == 0);
401 ASSERT(pool->p_closing);
405 * here so the user of the pool will be able to cleanup
408 if (pool->p_shutdown != NULL)
409 (pool->p_shutdown)();
412 svc_xprt_qdestroy(pool);
415 rw_destroy(&pool->p_lrwlock);
418 mutex_destroy(&pool->p_thread_lock);
419 mutex_destroy(&pool->p_req_lock);
420 cv_destroy(&pool->p_req_cv);
423 mutex_destroy(&pool->p_creator_lock);
424 cv_destroy(&pool->p_creator_cv);
425 mutex_destroy(&pool->p_user_lock);
426 cv_destroy(&pool->p_user_cv);
428 /* Free pool structure */
429 kmem_free(pool, sizeof (SVCPOOL));
437 svc_pool_tryexit(SVCPOOL *pool)
439 ASSERT(MUTEX_HELD(&pool->p_thread_lock));
440 ASSERT(pool->p_closing);
442 if (pool->p_threads + pool->p_detached_threads == 0) {
443 rw_enter(&pool->p_lrwlock, RW_READER);
444 if (pool->p_lcount == 0) {
448 rw_exit(&pool->p_lrwlock);
449 mutex_exit(&pool->p_thread_lock);
454 * NOTICE: No references to the pool beyond this point!
455 * The pool is being destroyed.
457 ASSERT(!MUTEX_HELD(&pool->p_thread_lock));
458 svc_creator_signalexit(pool);
462 rw_exit(&pool->p_lrwlock);
465 ASSERT(MUTEX_HELD(&pool->p_thread_lock));
470 * Find a pool with a given id.
475 SVCPOOL *pool;
480 * Search the list for a pool with a matching id
481 * and register the transport handle with that pool.
483 for (pool = svc->svc_pools; pool; pool = pool->p_next)
484 if (pool->p_id == id)
485 return (pool);
499 SVCPOOL *pool;
506 pool = svc_pool_find(svc, id);
510 if (pool == NULL)
514 * Increment counter of pool threads now
517 mutex_enter(&pool->p_thread_lock);
518 pool->p_threads++;
519 mutex_exit(&pool->p_thread_lock);
522 err = svc_run(pool);
528 * Unregister a pool from the pool list.
533 svc_pool_unregister(struct svc_globals *svc, SVCPOOL *pool)
535 SVCPOOL *next = pool->p_next;
536 SVCPOOL *prev = pool->p_prev;
541 if (pool == svc->svc_pools)
547 pool->p_next = pool->p_prev = NULL;
550 * Offline the pool. Mark the pool as closing.
551 * If there are no transports in this pool notify
554 mutex_enter(&pool->p_thread_lock);
555 if (pool->p_offline != NULL)
556 (pool->p_offline)();
557 pool->p_closing = TRUE;
558 if (svc_pool_tryexit(pool))
560 mutex_exit(&pool->p_thread_lock);
564 * Register a pool with a given id in the global doubly linked pool list.
565 * - if there is a pool with the same id in the list then unregister it
566 * - insert the new pool into the list.
569 svc_pool_register(struct svc_globals *svc, SVCPOOL *pool, int id)
574 * If there is a pool with the same id then remove it from
575 * the list and mark the pool as closing.
583 pool->p_id = id;
584 pool->p_next = svc->svc_pools;
585 pool->p_prev = NULL;
587 svc->svc_pools->p_prev = pool;
588 svc->svc_pools = pool;
594 * Initialize a newly created pool structure
597 svc_pool_init(SVCPOOL *pool, uint_t maxthreads, uint_t redline,
602 ASSERT(pool);
621 svc_xprt_qinit(pool, qsize);
624 rw_init(&pool->p_lrwlock, NULL, RW_DEFAULT, NULL);
637 pool->p_maxthreads = maxthreads;
638 pool->p_redline = redline;
639 pool->p_timeout = timeout * hz;
640 pool->p_stksize = stksize;
641 pool->p_max_same_xprt = max_same_xprt;
642 mutex_init(&pool->p_thread_lock, NULL, MUTEX_DEFAULT, NULL);
643 mutex_init(&pool->p_req_lock, NULL, MUTEX_DEFAULT, NULL);
644 cv_init(&pool->p_req_cv, NULL, CV_DEFAULT, NULL);
647 pool->p_user_exit = FALSE;
648 pool->p_signal_create_thread = FALSE;
649 pool->p_user_waiting = FALSE;
650 mutex_init(&pool->p_user_lock, NULL, MUTEX_DEFAULT, NULL);
651 cv_init(&pool->p_user_cv, NULL, CV_DEFAULT, NULL);
654 pool->p_creator_exit = FALSE;
655 mutex_init(&pool->p_creator_lock, NULL, MUTEX_DEFAULT, NULL);
656 cv_init(&pool->p_creator_cv, NULL, CV_DEFAULT, NULL);
658 (void) zthread_create(NULL, pool->p_stksize, svc_thread_creator,
659 pool, 0, minclsyspri);
670 * Create an kernel RPC server-side thread/transport pool.
672 * This is public interface for creation of a server RPC thread pool
673 * for a given service provider. Transports registered with the pool's id
674 * will be served by a pool's threads. This function is called from the
680 SVCPOOL *pool;
690 /* Allocate a new pool */
691 pool = kmem_zalloc(sizeof (SVCPOOL), KM_SLEEP);
694 * Initialize the pool structure and create a creator thread.
696 error = svc_pool_init(pool, args->maxthreads, args->redline,
700 kmem_free(pool, sizeof (SVCPOOL));
704 /* Register the pool with the global pool list */
705 svc_pool_register(svc, pool, args->id);
713 SVCPOOL *pool;
721 * Search the list for a pool with a matching id
722 * and register the transport handle with that pool.
726 if ((pool = svc_pool_find(svc, id)) == NULL) {
732 * pool list lock
734 rw_enter(&pool->p_lrwlock, RW_WRITER);
737 pool->p_shutdown = *((void (*)())arg);
739 rw_exit(&pool->p_lrwlock);
744 * Search the list for a pool with a matching id
745 * and register the unregister callback handle with that pool.
749 if ((pool = svc_pool_find(svc, id)) == NULL) {
755 * pool list lock
757 rw_enter(&pool->p_lrwlock, RW_WRITER);
760 pool->p_offline = *((void (*)())arg);
762 rw_exit(&pool->p_lrwlock);
777 * list of server transport handles (one list per pool).
787 SVCPOOL *pool;
792 * Search the list for a pool with a matching id
793 * and register the transport handle with that pool.
797 if ((pool = svc_pool_find(svc, id)) == NULL) {
802 /* Grab the transport list lock before releasing the pool list lock */
803 rw_enter(&pool->p_lrwlock, RW_WRITER);
806 /* Don't register new transports when the pool is in closing state */
807 if (pool->p_closing) {
808 rw_exit(&pool->p_lrwlock);
813 * Initialize xp_pool to point to the pool.
814 * We don't want to go through the pool list every time.
816 xprt->xp_pool = pool;
822 if (pool->p_lhead == NULL)
823 pool->p_lhead = xprt->xp_prev = xprt->xp_next = xprt;
825 next = pool->p_lhead;
826 prev = pool->p_lhead->xp_prev;
831 pool->p_lhead = prev->xp_next = next->xp_prev = xprt;
835 pool->p_lcount++;
837 rw_exit(&pool->p_lrwlock);
843 * from the pool's list of server transports (when a transport is
849 SVCPOOL *pool = xprt->xp_pool;
857 rw_enter(&pool->p_lrwlock, RW_WRITER);
860 pool->p_lhead = NULL;
868 if (pool->p_lhead == xprt)
869 pool->p_lhead = next;
875 pool->p_lcount--;
877 rw_exit(&pool->p_lrwlock);
881 svc_xprt_qdestroy(SVCPOOL *pool)
883 mutex_destroy(&pool->p_qend_lock);
884 kmem_free(pool->p_qbody, pool->p_qsize * sizeof (__SVCXPRT_QNODE));
888 * Initialize an `xprt-ready' queue for a given pool.
891 svc_xprt_qinit(SVCPOOL *pool, size_t qsize)
895 pool->p_qsize = qsize;
896 pool->p_qbody = kmem_zalloc(pool->p_qsize * sizeof (__SVCXPRT_QNODE),
899 for (i = 0; i < pool->p_qsize - 1; i++)
900 pool->p_qbody[i].q_next = &(pool->p_qbody[i+1]);
902 pool->p_qbody[pool->p_qsize-1].q_next = &(pool->p_qbody[0]);
903 pool->p_qtop = &(pool->p_qbody[0]);
904 pool->p_qend = &(pool->p_qbody[0]);
906 mutex_init(&pool->p_qend_lock, NULL, MUTEX_DEFAULT, NULL);
915 * NOTICE: pool->p_qtop is protected by the pool's request lock
919 svc_xprt_qput(SVCPOOL *pool, SVCMASTERXPRT *xprt)
921 ASSERT(MUTEX_HELD(&pool->p_req_lock));
924 if (pool->p_qoverflow)
928 if (pool->p_qtop->q_next == pool->p_qend) {
929 mutex_enter(&pool->p_qend_lock);
930 if (pool->p_qtop->q_next == pool->p_qend) {
931 pool->p_qoverflow = TRUE;
932 mutex_exit(&pool->p_qend_lock);
935 mutex_exit(&pool->p_qend_lock);
938 /* Insert a hint and move pool->p_qtop */
939 pool->p_qtop->q_xprt = xprt;
940 pool->p_qtop = pool->p_qtop->q_next;
948 * Since we do not acquire the pool's request lock while checking if
951 * count indicates that there are pending requests for this pool.
954 svc_xprt_qget(SVCPOOL *pool)
958 mutex_enter(&pool->p_qend_lock);
962 * Since we do not acquire the pool's request lock which
963 * protects pool->p_qtop this is not exact check. However,
967 if (pool->p_qend == pool->p_qtop) {
968 mutex_exit(&pool->p_qend_lock);
972 /* Get a hint and move pool->p_qend */
973 xprt = pool->p_qend->q_xprt;
974 pool->p_qend = pool->p_qend->q_next;
978 mutex_exit(&pool->p_qend_lock);
989 svc_xprt_qdelete(SVCPOOL *pool, SVCMASTERXPRT *xprt)
993 mutex_enter(&pool->p_req_lock);
994 for (q = pool->p_qend; q != pool->p_qtop; q = q->q_next) {
998 mutex_exit(&pool->p_req_lock);
1009 * b) remove a reference to this transport from the pool's transport list
1035 /* Unregister xprt from the pool's transport list */
1582 svc_thread_exit(SVCPOOL *pool, SVCXPRT *clone_xprt)
1588 mutex_enter(&pool->p_thread_lock);
1589 pool->p_threads--;
1590 if (pool->p_closing && svc_pool_tryexit(pool))
1593 mutex_exit(&pool->p_thread_lock);
1600 * - decrement the `detached thread' count for the pool
1609 svc_thread_exitdetached(SVCPOOL *pool, SVCXPRT *clone_xprt)
1614 ASSERT(!MUTEX_HELD(&pool->p_thread_lock));
1619 mutex_enter(&pool->p_thread_lock);
1621 ASSERT(pool->p_reserved_threads >= 0);
1622 ASSERT(pool->p_detached_threads > 0);
1624 pool->p_detached_threads--;
1625 if (pool->p_closing && svc_pool_tryexit(pool))
1628 mutex_exit(&pool->p_thread_lock);
1642 SVCPOOL *pool;
1648 pool = svc_pool_find(svc, id);
1651 if (pool == NULL)
1654 mutex_enter(&pool->p_user_lock);
1656 /* Check if there's already a user thread waiting on this pool */
1657 if (pool->p_user_waiting) {
1658 mutex_exit(&pool->p_user_lock);
1662 pool->p_user_waiting = TRUE;
1665 while (!pool->p_signal_create_thread && !pool->p_user_exit) {
1666 if (cv_wait_sig(&pool->p_user_cv, &pool->p_user_lock) == 0) {
1668 pool->p_user_waiting = FALSE;
1669 pool->p_signal_create_thread = FALSE;
1670 mutex_exit(&pool->p_user_lock);
1676 * pool at this time.
1679 svc_pool_unregister(svc, pool);
1686 pool->p_signal_create_thread = FALSE;
1687 pool->p_user_waiting = FALSE;
1690 * About to exit the service pool. Set return value
1693 * pool structure.
1695 if (pool->p_user_exit) {
1697 cv_signal(&pool->p_user_cv);
1700 mutex_exit(&pool->p_user_lock);
1711 svc_thread_creator(SVCPOOL *pool)
1715 CALLB_CPR_INIT(&cpr_info, &pool->p_creator_lock, callb_generic_cpr,
1719 mutex_enter(&pool->p_creator_lock);
1722 if (pool->p_creator_exit)
1726 pool->p_creator_signaled = FALSE;
1729 cv_wait(&pool->p_creator_cv, &pool->p_creator_lock);
1730 CALLB_CPR_SAFE_END(&cpr_info, &pool->p_creator_lock);
1733 if (pool->p_creator_exit)
1736 mutex_exit(&pool->p_creator_lock);
1738 mutex_enter(&pool->p_thread_lock);
1741 * When the pool is in closing state and all the transports
1744 if (pool->p_closing) {
1745 rw_enter(&pool->p_lrwlock, RW_READER);
1746 if (pool->p_lcount == 0) {
1747 rw_exit(&pool->p_lrwlock);
1748 mutex_exit(&pool->p_thread_lock);
1751 rw_exit(&pool->p_lrwlock);
1757 ASSERT(pool->p_reserved_threads >= 0);
1758 ASSERT(pool->p_detached_threads >= 0);
1760 if (pool->p_threads + pool->p_detached_threads <
1761 pool->p_maxthreads) {
1763 * Signal the service pool wait thread
1766 mutex_enter(&pool->p_user_lock);
1767 if (pool->p_signal_create_thread == FALSE) {
1768 pool->p_signal_create_thread = TRUE;
1769 cv_signal(&pool->p_user_cv);
1771 mutex_exit(&pool->p_user_lock);
1775 mutex_exit(&pool->p_thread_lock);
1783 mutex_enter(&pool->p_user_lock);
1784 pool->p_user_exit = TRUE;
1785 cv_broadcast(&pool->p_user_cv);
1786 mutex_exit(&pool->p_user_lock);
1788 /* Wait for svc_wait() to be done with the pool */
1789 mutex_enter(&pool->p_user_lock);
1790 while (pool->p_user_waiting) {
1792 cv_wait(&pool->p_user_cv, &pool->p_user_lock);
1793 CALLB_CPR_SAFE_END(&cpr_info, &pool->p_creator_lock);
1795 mutex_exit(&pool->p_user_lock);
1798 svc_pool_cleanup(pool);
1807 svc_creator_signal(SVCPOOL *pool)
1809 mutex_enter(&pool->p_creator_lock);
1810 if (pool->p_creator_signaled == FALSE) {
1811 pool->p_creator_signaled = TRUE;
1812 cv_signal(&pool->p_creator_cv);
1814 mutex_exit(&pool->p_creator_lock);
1821 svc_creator_signalexit(SVCPOOL *pool)
1823 mutex_enter(&pool->p_creator_lock);
1824 pool->p_creator_exit = TRUE;
1825 cv_signal(&pool->p_creator_cv);
1826 mutex_exit(&pool->p_creator_lock);
1840 svc_poll(SVCPOOL *pool, SVCMASTERXPRT *xprt, SVCXPRT *clone_xprt)
1857 * the pool and return to svc_run().
1865 * pool->p_max_same_xprt requests from the same transport
1869 if (xprt && xprt->xp_req_head && (!pool->p_qoverflow ||
1870 clone_xprt->xp_same_xprt++ < pool->p_max_same_xprt)) {
1883 mutex_enter(&pool->p_req_lock);
1884 pool->p_walkers++;
1885 mutex_exit(&pool->p_req_lock);
1891 rw_enter(&pool->p_lrwlock, RW_READER);
1904 * the pool and `walking-threads' counts, and return
1907 hint = svc_xprt_qget(pool);
1912 rw_exit(&pool->p_lrwlock);
1914 mutex_enter(&pool->p_req_lock);
1915 pool->p_walkers--;
1916 mutex_exit(&pool->p_req_lock);
1932 if (pool->p_reqs < pool->p_walkers) {
1933 mutex_enter(&pool->p_req_lock);
1934 if (pool->p_reqs < pool->p_walkers)
1936 mutex_exit(&pool->p_req_lock);
1938 if (pool->p_qoverflow) {
1947 * pool's transport list and search for a transport with a
1955 if (xprt == NULL && pool->p_lhead == NULL) {
1956 mutex_enter(&pool->p_req_lock);
1961 * `Walk' through the pool's list of master server
1965 next = xprt ? xprt->xp_next : pool->p_lhead;
1982 rw_exit(&pool->p_lrwlock);
1984 mutex_enter(&pool->p_req_lock);
1985 pool->p_walkers--;
1986 mutex_exit(&pool->p_req_lock);
1994 * Continue to `walk' through the pool's
1999 if (pool->p_reqs < pool->p_walkers) {
2001 mutex_enter(&pool->p_req_lock);
2002 if (pool->p_reqs < pool->p_walkers)
2004 mutex_exit(&pool->p_req_lock);
2013 * Decrement the `walking-threads' count for the pool.
2015 pool->p_walkers--;
2016 rw_exit(&pool->p_lrwlock);
2022 pool->p_asleep++;
2023 timeleft = cv_reltimedwait_sig(&pool->p_req_cv,
2024 &pool->p_req_lock, pool->p_timeout, TR_CLOCK_TICK);
2035 if (pool->p_drowsy) {
2036 pool->p_drowsy = FALSE;
2045 pool->p_asleep--;
2047 mutex_exit(&pool->p_req_lock);
2102 * The max number of threads working on the pool is roughly pool->p_maxthreads.
2103 * Every thread could handle up to pool->p_max_same_xprt requests from one
2105 * In case all threads in the pool are working on a transport they will handle
2106 * no more than enough_reqs (pool->p_maxthreads * pool->p_max_same_xprt)
2117 * We want to prevent a particular pool exhausting the memory, so once the
2118 * total length of queued requests for the whole pool reaches the high
2123 * should be enough. We should also consider that up to pool->p_maxthreads
2124 * threads for the pool might work on large requests (this is not counted for
2145 * particular pool might grow up to 2 * the high watermark.
2151 * or once the total memory consumption for the whole pool falls below the low
2158 SVCPOOL *pool = xprt->xp_pool;
2160 int enough_reqs = pool->p_maxthreads * pool->p_max_same_xprt;
2177 * If this pool uses over 20% of memory and this transport is
2180 if (pool->p_size >= totalmem / 5 &&
2181 xprt->xp_size >= totalmem / 5 / pool->p_lcount)
2194 * If this pool still uses over 16% of memory and this transport is
2197 if (pool->p_size >= totalmem / 6 &&
2198 xprt->xp_size >= totalmem / 5 / pool->p_lcount / 2)
2216 svc_run(SVCPOOL *pool)
2243 svc_thread_exit(pool, clone_xprt);
2248 next = svc_poll(pool, xprt, clone_xprt);
2271 svc_thread_exit(pool, clone_xprt);
2280 svc_thread_exit(pool, clone_xprt);
2293 mutex_enter(&pool->p_req_lock);
2294 pool->p_reqs--;
2295 if (pool->p_reqs == 0)
2296 pool->p_qoverflow = FALSE;
2297 pool->p_size -= size;
2298 mutex_exit(&pool->p_req_lock);
2307 "rpc_que_req_deq:pool %p mp %p", pool, mp);
2332 if (!(pool->p_drowsy || pool->p_reqs <= pool->p_walkers ||
2333 pool->p_asleep == 0)) {
2334 mutex_enter(&pool->p_req_lock);
2336 if (pool->p_drowsy || pool->p_reqs <= pool->p_walkers ||
2337 pool->p_asleep == 0)
2338 mutex_exit(&pool->p_req_lock);
2340 pool->p_asleep--;
2341 pool->p_drowsy = TRUE;
2343 cv_signal(&pool->p_req_cv);
2344 mutex_exit(&pool->p_req_lock);
2350 * still below pool->p_maxthreads limit, and no thread is
2358 if (pool->p_asleep == 0 && !pool->p_drowsy &&
2359 pool->p_threads + pool->p_detached_threads <
2360 pool->p_maxthreads)
2361 svc_creator_signal(pool);
2377 svc_thread_exitdetached(pool, clone_xprt);
2404 SVCPOOL *pool;
2410 pool = xprt->xp_pool;
2418 mutex_enter(&pool->p_req_lock);
2419 pool->p_reqs -= xprt->xp_reqs;
2420 pool->p_size -= xprt->xp_size;
2421 mutex_exit(&pool->p_req_lock);
2465 SVCPOOL *pool = xprt->xp_pool;
2473 mutex_enter(&pool->p_thread_lock);
2476 * If the pool is in closing state and this was
2477 * the last transport in the pool then signal the creator
2480 if (pool->p_closing && svc_pool_tryexit(pool)) {
2483 mutex_exit(&pool->p_thread_lock);
2510 * - increment the `pending-requests' count for the pool
2519 SVCPOOL *pool = xprt->xp_pool;
2530 * pool's request lock so that when we put
2543 mutex_enter(&pool->p_req_lock);
2558 svc_xprt_qput(pool, xprt);
2561 pool->p_reqs++;
2566 pool->p_size += size;
2573 "rpc_que_req_enq:pool %p mp %p", pool, mp);
2583 if (pool->p_drowsy || pool->p_reqs <= pool->p_walkers ||
2584 pool->p_asleep == 0) {
2585 mutex_exit(&pool->p_req_lock);
2587 pool->p_drowsy = TRUE;
2588 pool->p_asleep--;
2593 cv_signal(&pool->p_req_cv);
2594 mutex_exit(&pool->p_req_lock);
2601 * still below pool->p_maxthreads limit, and no thread is
2609 if (pool->p_asleep == 0 && !pool->p_drowsy &&
2610 pool->p_threads + pool->p_detached_threads < pool->p_maxthreads)
2611 svc_creator_signal(pool);
2623 * pool->p_maxthreads - pool->p_redline (i.e. that we can have
2624 * up to pool->p_redline non-detached threads).
2630 * - if so, then increment the `reserved threads' count for the pool
2638 SVCPOOL *pool = clone_xprt->xp_master->xp_pool;
2644 /* Check pool counts if there is room for reservation */
2645 mutex_enter(&pool->p_thread_lock);
2646 if (pool->p_reserved_threads + pool->p_detached_threads >=
2647 pool->p_maxthreads - pool->p_redline) {
2648 mutex_exit(&pool->p_thread_lock);
2651 pool->p_reserved_threads++;
2652 mutex_exit(&pool->p_thread_lock);
2662 * - decrement the `reserved threads' count for the pool
2668 SVCPOOL *pool = clone_xprt->xp_master->xp_pool;
2675 mutex_enter(&pool->p_thread_lock);
2676 pool->p_reserved_threads--;
2677 mutex_exit(&pool->p_thread_lock);
2692 * counts and increment the `detached threads' count for the pool
2706 SVCPOOL *pool = xprt->xp_pool;
2719 /* Bookkeeping for the pool */
2720 mutex_enter(&pool->p_thread_lock);
2721 pool->p_threads--;
2722 pool->p_reserved_threads--;
2723 pool->p_detached_threads++;
2724 mutex_exit(&pool->p_thread_lock);
2745 * active in a given registered or unregistered kRPC thread pool. Its shuts
2746 * all active rdma transports in that pool. If the thread active on the trasport
2747 * happens to be last thread for that pool, it will signal the creater thread
2748 * to cleanup the pool and destroy the xprt in svc_queueclose()
2758 SVCPOOL *pool;
2775 pool = xprt->xp_pool;
2787 mutex_enter(&pool->p_req_lock);
2788 pool->p_reqs -= xprt->xp_reqs;
2789 pool->p_size -= xprt->xp_size;
2790 mutex_exit(&pool->p_req_lock);