17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
545916cd2Sjpk * Common Development and Distribution License (the "License").
645916cd2Sjpk * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
2197eda132Sraf
227c478bd9Sstevel@tonic-gate /*
23*2695d4f4SMarcel Telka * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24*2695d4f4SMarcel Telka */
25*2695d4f4SMarcel Telka
26*2695d4f4SMarcel Telka /*
2760536ef9SKaren Rochford * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
287c478bd9Sstevel@tonic-gate * Use is subject to license terms.
297c478bd9Sstevel@tonic-gate */
307c478bd9Sstevel@tonic-gate
317c478bd9Sstevel@tonic-gate /*
327c478bd9Sstevel@tonic-gate * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
337c478bd9Sstevel@tonic-gate */
347c478bd9Sstevel@tonic-gate
357c478bd9Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
367c478bd9Sstevel@tonic-gate /* All Rights Reserved */
377c478bd9Sstevel@tonic-gate
387c478bd9Sstevel@tonic-gate /*
397c478bd9Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD
407c478bd9Sstevel@tonic-gate * under license from the Regents of the University of California.
417c478bd9Sstevel@tonic-gate */
427c478bd9Sstevel@tonic-gate
437c478bd9Sstevel@tonic-gate /*
447c478bd9Sstevel@tonic-gate * Server-side remote procedure call interface.
457c478bd9Sstevel@tonic-gate *
467c478bd9Sstevel@tonic-gate * Master transport handle (SVCMASTERXPRT).
477c478bd9Sstevel@tonic-gate * The master transport handle structure is shared among service
487c478bd9Sstevel@tonic-gate * threads processing events on the transport. Some fields in the
497c478bd9Sstevel@tonic-gate * master structure are protected by locks
507c478bd9Sstevel@tonic-gate * - xp_req_lock protects the request queue:
51*2695d4f4SMarcel Telka * xp_req_head, xp_req_tail, xp_reqs, xp_size, xp_full, xp_enable
527c478bd9Sstevel@tonic-gate * - xp_thread_lock protects the thread (clone) counts
537c478bd9Sstevel@tonic-gate * xp_threads, xp_detached_threads, xp_wq
547c478bd9Sstevel@tonic-gate * Each master transport is registered to exactly one thread pool.
557c478bd9Sstevel@tonic-gate *
567c478bd9Sstevel@tonic-gate * Clone transport handle (SVCXPRT)
577c478bd9Sstevel@tonic-gate * The clone transport handle structure is a per-service-thread handle
587c478bd9Sstevel@tonic-gate * to the transport. The structure carries all the fields/buffers used
597c478bd9Sstevel@tonic-gate * for request processing. A service thread or, in other words, a clone
607c478bd9Sstevel@tonic-gate * structure, can be linked to an arbitrary master structure to process
617c478bd9Sstevel@tonic-gate * requests on this transport. The master handle keeps track of reference
627c478bd9Sstevel@tonic-gate * counts of threads (clones) linked to it. A service thread can switch
637c478bd9Sstevel@tonic-gate * to another transport by unlinking its clone handle from the current
647c478bd9Sstevel@tonic-gate * transport and linking to a new one. Switching is relatively inexpensive
657c478bd9Sstevel@tonic-gate * but it involves locking (master's xprt->xp_thread_lock).
667c478bd9Sstevel@tonic-gate *
677c478bd9Sstevel@tonic-gate * Pools.
687c478bd9Sstevel@tonic-gate * A pool represents a kernel RPC service (NFS, Lock Manager, etc.).
697c478bd9Sstevel@tonic-gate * Transports related to the service are registered to the service pool.
707c478bd9Sstevel@tonic-gate * Service threads can switch between different transports in the pool.
717c478bd9Sstevel@tonic-gate * Thus, each service has its own pool of service threads. The maximum
727c478bd9Sstevel@tonic-gate * number of threads in a pool is pool->p_maxthreads. This limit allows
737c478bd9Sstevel@tonic-gate * to restrict resource usage by the service. Some fields are protected
747c478bd9Sstevel@tonic-gate * by locks:
757c478bd9Sstevel@tonic-gate * - p_req_lock protects several counts and flags:
76*2695d4f4SMarcel Telka * p_reqs, p_size, p_walkers, p_asleep, p_drowsy, p_req_cv
777c478bd9Sstevel@tonic-gate * - p_thread_lock governs other thread counts:
787c478bd9Sstevel@tonic-gate * p_threads, p_detached_threads, p_reserved_threads, p_closing
797c478bd9Sstevel@tonic-gate *
807c478bd9Sstevel@tonic-gate * In addition, each pool contains a doubly-linked list of transports,
817c478bd9Sstevel@tonic-gate * an `xprt-ready' queue and a creator thread (see below). Threads in
827c478bd9Sstevel@tonic-gate * the pool share some other parameters such as stack size and
837c478bd9Sstevel@tonic-gate * polling timeout.
847c478bd9Sstevel@tonic-gate *
857c478bd9Sstevel@tonic-gate * Pools are initialized through the svc_pool_create() function called from
867c478bd9Sstevel@tonic-gate * the nfssys() system call. However, thread creation must be done by
877c478bd9Sstevel@tonic-gate * the userland agent. This is done by using SVCPOOL_WAIT and
887c478bd9Sstevel@tonic-gate * SVCPOOL_RUN arguments to nfssys(), which call svc_wait() and
897c478bd9Sstevel@tonic-gate * svc_do_run(), respectively. Once the pool has been initialized,
907c478bd9Sstevel@tonic-gate * the userland process must set up a 'creator' thread. This thread
917c478bd9Sstevel@tonic-gate * should park itself in the kernel by calling svc_wait(). If
927c478bd9Sstevel@tonic-gate * svc_wait() returns successfully, it should fork off a new worker
937c478bd9Sstevel@tonic-gate * thread, which then calls svc_do_run() in order to get work. When
947c478bd9Sstevel@tonic-gate * that thread is complete, svc_do_run() will return, and the user
957c478bd9Sstevel@tonic-gate * program should call thr_exit().
967c478bd9Sstevel@tonic-gate *
977c478bd9Sstevel@tonic-gate * When we try to register a new pool and there is an old pool with
987c478bd9Sstevel@tonic-gate * the same id in the doubly linked pool list (this happens when we kill
997c478bd9Sstevel@tonic-gate * and restart nfsd or lockd), then we unlink the old pool from the list
1007c478bd9Sstevel@tonic-gate * and mark its state as `closing'. After that the transports can still
1017c478bd9Sstevel@tonic-gate * process requests but new transports won't be registered. When all the
1027c478bd9Sstevel@tonic-gate * transports and service threads associated with the pool are gone the
1037c478bd9Sstevel@tonic-gate * creator thread (see below) will clean up the pool structure and exit.
1047c478bd9Sstevel@tonic-gate *
1057c478bd9Sstevel@tonic-gate * svc_queuereq() and svc_run().
1067c478bd9Sstevel@tonic-gate * The kernel RPC server is interrupt driven. The svc_queuereq() interrupt
1077c478bd9Sstevel@tonic-gate * routine is called to deliver an RPC request. The service threads
1087c478bd9Sstevel@tonic-gate * loop in svc_run(). The interrupt function queues a request on the
1097c478bd9Sstevel@tonic-gate * transport's queue and it makes sure that the request is serviced.
1107c478bd9Sstevel@tonic-gate * It may either wake up one of sleeping threads, or ask for a new thread
1117c478bd9Sstevel@tonic-gate * to be created, or, if the previous request is just being picked up, do
1127c478bd9Sstevel@tonic-gate * nothing. In the last case the service thread that is picking up the
1137c478bd9Sstevel@tonic-gate * previous request will wake up or create the next thread. After a service
1147c478bd9Sstevel@tonic-gate * thread processes a request and sends a reply it returns to svc_run()
1157c478bd9Sstevel@tonic-gate * and svc_run() calls svc_poll() to find new input.
1167c478bd9Sstevel@tonic-gate *
1177c478bd9Sstevel@tonic-gate * svc_poll().
1187c478bd9Sstevel@tonic-gate * In order to avoid unnecessary locking, which causes performance
1197c478bd9Sstevel@tonic-gate * problems, we always look for a pending request on the current transport.
1207c478bd9Sstevel@tonic-gate * If there is none we take a hint from the pool's `xprt-ready' queue.
1217c478bd9Sstevel@tonic-gate * If the queue had an overflow we switch to the `drain' mode checking
1227c478bd9Sstevel@tonic-gate * each transport in the pool's transport list. Once we find a
1237c478bd9Sstevel@tonic-gate * master transport handle with a pending request we latch the request
1247c478bd9Sstevel@tonic-gate * lock on this transport and return to svc_run(). If the request
1257c478bd9Sstevel@tonic-gate * belongs to a transport different than the one the service thread is
1267c478bd9Sstevel@tonic-gate * linked to we need to unlink and link again.
1277c478bd9Sstevel@tonic-gate *
1287c478bd9Sstevel@tonic-gate * A service thread goes asleep when there are no pending
1297c478bd9Sstevel@tonic-gate * requests on the transports registered on the pool's transports.
1307c478bd9Sstevel@tonic-gate * All the pool's threads sleep on the same condition variable.
1317c478bd9Sstevel@tonic-gate * If a thread has been sleeping for too long period of time
1327c478bd9Sstevel@tonic-gate * (by default 5 seconds) it wakes up and exits. Also when a transport
1337c478bd9Sstevel@tonic-gate * is closing sleeping threads wake up to unlink from this transport.
1347c478bd9Sstevel@tonic-gate *
1357c478bd9Sstevel@tonic-gate * The `xprt-ready' queue.
1367c478bd9Sstevel@tonic-gate * If a service thread finds no request on a transport it is currently linked
1377c478bd9Sstevel@tonic-gate * to it will find another transport with a pending request. To make
1387c478bd9Sstevel@tonic-gate * this search more efficient each pool has an `xprt-ready' queue.
1397c478bd9Sstevel@tonic-gate * The queue is a FIFO. When the interrupt routine queues a request it also
1407c478bd9Sstevel@tonic-gate * inserts a pointer to the transport into the `xprt-ready' queue. A
1417c478bd9Sstevel@tonic-gate * thread looking for a transport with a pending request can pop up a
1427c478bd9Sstevel@tonic-gate * transport and check for a request. The request can be already gone
1437c478bd9Sstevel@tonic-gate * since it could be taken by a thread linked to that transport. In such a
1447c478bd9Sstevel@tonic-gate * case we try the next hint. The `xprt-ready' queue has fixed size (by
1457c478bd9Sstevel@tonic-gate * default 256 nodes). If it overflows svc_poll() has to switch to the
1467c478bd9Sstevel@tonic-gate * less efficient but safe `drain' mode and walk through the pool's
1477c478bd9Sstevel@tonic-gate * transport list.
1487c478bd9Sstevel@tonic-gate *
1497c478bd9Sstevel@tonic-gate * Both the svc_poll() loop and the `xprt-ready' queue are optimized
1507c478bd9Sstevel@tonic-gate * for the peak load case that is for the situation when the queue is not
1517c478bd9Sstevel@tonic-gate * empty, there are all the time few pending requests, and a service
1527c478bd9Sstevel@tonic-gate * thread which has just processed a request does not go asleep but picks
1537c478bd9Sstevel@tonic-gate * up immediately the next request.
1547c478bd9Sstevel@tonic-gate *
1557c478bd9Sstevel@tonic-gate * Thread creator.
1567c478bd9Sstevel@tonic-gate * Each pool has a thread creator associated with it. The creator thread
1577c478bd9Sstevel@tonic-gate * sleeps on a condition variable and waits for a signal to create a
1587c478bd9Sstevel@tonic-gate * service thread. The actual thread creation is done in userland by
1597c478bd9Sstevel@tonic-gate * the method described in "Pools" above.
1607c478bd9Sstevel@tonic-gate *
1617c478bd9Sstevel@tonic-gate * Signaling threads should turn on the `creator signaled' flag, and
1627c478bd9Sstevel@tonic-gate * can avoid sending signals when the flag is on. The flag is cleared
1637c478bd9Sstevel@tonic-gate * when the thread is created.
1647c478bd9Sstevel@tonic-gate *
1657c478bd9Sstevel@tonic-gate * When the pool is in closing state (ie it has been already unregistered
1667c478bd9Sstevel@tonic-gate * from the pool list) the last thread on the last transport in the pool
1677c478bd9Sstevel@tonic-gate * should turn the p_creator_exit flag on. The creator thread will
1687c478bd9Sstevel@tonic-gate * clean up the pool structure and exit.
1697c478bd9Sstevel@tonic-gate *
1707c478bd9Sstevel@tonic-gate * Thread reservation; Detaching service threads.
1717c478bd9Sstevel@tonic-gate * A service thread can detach itself to block for an extended amount
1727c478bd9Sstevel@tonic-gate * of time. However, to keep the service active we need to guarantee
1737c478bd9Sstevel@tonic-gate * at least pool->p_redline non-detached threads that can process incoming
1747c478bd9Sstevel@tonic-gate * requests. This, the maximum number of detached and reserved threads is
1757c478bd9Sstevel@tonic-gate * p->p_maxthreads - p->p_redline. A service thread should first acquire
1767c478bd9Sstevel@tonic-gate * a reservation, and if the reservation was granted it can detach itself.
1777c478bd9Sstevel@tonic-gate * If a reservation was granted but the thread does not detach itself
1787c478bd9Sstevel@tonic-gate * it should cancel the reservation before it returns to svc_run().
1797c478bd9Sstevel@tonic-gate */
1807c478bd9Sstevel@tonic-gate
1817c478bd9Sstevel@tonic-gate #include <sys/param.h>
1827c478bd9Sstevel@tonic-gate #include <sys/types.h>
1837c478bd9Sstevel@tonic-gate #include <rpc/types.h>
1847c478bd9Sstevel@tonic-gate #include <sys/socket.h>
1857c478bd9Sstevel@tonic-gate #include <sys/time.h>
1867c478bd9Sstevel@tonic-gate #include <sys/tiuser.h>
1877c478bd9Sstevel@tonic-gate #include <sys/t_kuser.h>
1887c478bd9Sstevel@tonic-gate #include <netinet/in.h>
1897c478bd9Sstevel@tonic-gate #include <rpc/xdr.h>
1907c478bd9Sstevel@tonic-gate #include <rpc/auth.h>
1917c478bd9Sstevel@tonic-gate #include <rpc/clnt.h>
1927c478bd9Sstevel@tonic-gate #include <rpc/rpc_msg.h>
1937c478bd9Sstevel@tonic-gate #include <rpc/svc.h>
1947c478bd9Sstevel@tonic-gate #include <sys/proc.h>
1957c478bd9Sstevel@tonic-gate #include <sys/user.h>
1967c478bd9Sstevel@tonic-gate #include <sys/stream.h>
1977c478bd9Sstevel@tonic-gate #include <sys/strsubr.h>
198*2695d4f4SMarcel Telka #include <sys/strsun.h>
1997c478bd9Sstevel@tonic-gate #include <sys/tihdr.h>
2007c478bd9Sstevel@tonic-gate #include <sys/debug.h>
2017c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
2027c478bd9Sstevel@tonic-gate #include <sys/file.h>
2037c478bd9Sstevel@tonic-gate #include <sys/systm.h>
2047c478bd9Sstevel@tonic-gate #include <sys/callb.h>
2057c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
2067c478bd9Sstevel@tonic-gate #include <sys/zone.h>
2077c478bd9Sstevel@tonic-gate #include <nfs/nfs.h>
20845916cd2Sjpk #include <sys/tsol/label_macro.h>
2097c478bd9Sstevel@tonic-gate
2107c478bd9Sstevel@tonic-gate /*
2117c478bd9Sstevel@tonic-gate * Defines for svc_poll()
2127c478bd9Sstevel@tonic-gate */
2137c478bd9Sstevel@tonic-gate #define SVC_EXPRTGONE ((SVCMASTERXPRT *)1) /* Transport is closing */
2147c478bd9Sstevel@tonic-gate #define SVC_ETIMEDOUT ((SVCMASTERXPRT *)2) /* Timeout */
2157c478bd9Sstevel@tonic-gate #define SVC_EINTR ((SVCMASTERXPRT *)3) /* Interrupted by signal */
2167c478bd9Sstevel@tonic-gate
2177c478bd9Sstevel@tonic-gate /*
2187c478bd9Sstevel@tonic-gate * Default stack size for service threads.
2197c478bd9Sstevel@tonic-gate */
2207c478bd9Sstevel@tonic-gate #define DEFAULT_SVC_RUN_STKSIZE (0) /* default kernel stack */
2217c478bd9Sstevel@tonic-gate
2227c478bd9Sstevel@tonic-gate int svc_default_stksize = DEFAULT_SVC_RUN_STKSIZE;
2237c478bd9Sstevel@tonic-gate
2247c478bd9Sstevel@tonic-gate /*
2257c478bd9Sstevel@tonic-gate * Default polling timeout for service threads.
2267c478bd9Sstevel@tonic-gate * Multiplied by hz when used.
2277c478bd9Sstevel@tonic-gate */
2287c478bd9Sstevel@tonic-gate #define DEFAULT_SVC_POLL_TIMEOUT (5) /* seconds */
2297c478bd9Sstevel@tonic-gate
2307c478bd9Sstevel@tonic-gate clock_t svc_default_timeout = DEFAULT_SVC_POLL_TIMEOUT;
2317c478bd9Sstevel@tonic-gate
2327c478bd9Sstevel@tonic-gate /*
2337c478bd9Sstevel@tonic-gate * Size of the `xprt-ready' queue.
2347c478bd9Sstevel@tonic-gate */
2357c478bd9Sstevel@tonic-gate #define DEFAULT_SVC_QSIZE (256) /* qnodes */
2367c478bd9Sstevel@tonic-gate
2377c478bd9Sstevel@tonic-gate size_t svc_default_qsize = DEFAULT_SVC_QSIZE;
2387c478bd9Sstevel@tonic-gate
2397c478bd9Sstevel@tonic-gate /*
2407c478bd9Sstevel@tonic-gate * Default limit for the number of service threads.
2417c478bd9Sstevel@tonic-gate */
2427c478bd9Sstevel@tonic-gate #define DEFAULT_SVC_MAXTHREADS (INT16_MAX)
2437c478bd9Sstevel@tonic-gate
2447c478bd9Sstevel@tonic-gate int svc_default_maxthreads = DEFAULT_SVC_MAXTHREADS;
2457c478bd9Sstevel@tonic-gate
2467c478bd9Sstevel@tonic-gate /*
2477c478bd9Sstevel@tonic-gate * Maximum number of requests from the same transport (in `drain' mode).
2487c478bd9Sstevel@tonic-gate */
2497c478bd9Sstevel@tonic-gate #define DEFAULT_SVC_MAX_SAME_XPRT (8)
2507c478bd9Sstevel@tonic-gate
2517c478bd9Sstevel@tonic-gate int svc_default_max_same_xprt = DEFAULT_SVC_MAX_SAME_XPRT;
2527c478bd9Sstevel@tonic-gate
2537c478bd9Sstevel@tonic-gate
2547c478bd9Sstevel@tonic-gate /*
2557c478bd9Sstevel@tonic-gate * Default `Redline' of non-detached threads.
2567c478bd9Sstevel@tonic-gate * Total number of detached and reserved threads in an RPC server
2577c478bd9Sstevel@tonic-gate * thread pool is limited to pool->p_maxthreads - svc_redline.
2587c478bd9Sstevel@tonic-gate */
2597c478bd9Sstevel@tonic-gate #define DEFAULT_SVC_REDLINE (1)
2607c478bd9Sstevel@tonic-gate
2617c478bd9Sstevel@tonic-gate int svc_default_redline = DEFAULT_SVC_REDLINE;
2627c478bd9Sstevel@tonic-gate
2637c478bd9Sstevel@tonic-gate /*
2647c478bd9Sstevel@tonic-gate * A node for the `xprt-ready' queue.
2657c478bd9Sstevel@tonic-gate * See below.
2667c478bd9Sstevel@tonic-gate */
2677c478bd9Sstevel@tonic-gate struct __svcxprt_qnode {
2687c478bd9Sstevel@tonic-gate __SVCXPRT_QNODE *q_next;
2697c478bd9Sstevel@tonic-gate SVCMASTERXPRT *q_xprt;
2707c478bd9Sstevel@tonic-gate };
2717c478bd9Sstevel@tonic-gate
2727c478bd9Sstevel@tonic-gate /*
2737c478bd9Sstevel@tonic-gate * Global SVC variables (private).
2747c478bd9Sstevel@tonic-gate */
2757c478bd9Sstevel@tonic-gate struct svc_globals {
2767c478bd9Sstevel@tonic-gate SVCPOOL *svc_pools;
2777c478bd9Sstevel@tonic-gate kmutex_t svc_plock;
2787c478bd9Sstevel@tonic-gate };
2797c478bd9Sstevel@tonic-gate
2807c478bd9Sstevel@tonic-gate /*
2817c478bd9Sstevel@tonic-gate * Debug variable to check for rdma based
2827c478bd9Sstevel@tonic-gate * transport startup and cleanup. Contorlled
2837c478bd9Sstevel@tonic-gate * through /etc/system. Off by default.
2847c478bd9Sstevel@tonic-gate */
2857c478bd9Sstevel@tonic-gate int rdma_check = 0;
2867c478bd9Sstevel@tonic-gate
2877c478bd9Sstevel@tonic-gate /*
288*2695d4f4SMarcel Telka * This allows disabling flow control in svc_queuereq().
289*2695d4f4SMarcel Telka */
290*2695d4f4SMarcel Telka volatile int svc_flowcontrol_disable = 0;
291*2695d4f4SMarcel Telka
292*2695d4f4SMarcel Telka /*
2937c478bd9Sstevel@tonic-gate * Authentication parameters list.
2947c478bd9Sstevel@tonic-gate */
2957c478bd9Sstevel@tonic-gate static caddr_t rqcred_head;
2967c478bd9Sstevel@tonic-gate static kmutex_t rqcred_lock;
2977c478bd9Sstevel@tonic-gate
2987c478bd9Sstevel@tonic-gate /*
2997c478bd9Sstevel@tonic-gate * Pointers to transport specific `rele' routines in rpcmod (set from rpcmod).
3007c478bd9Sstevel@tonic-gate */
301*2695d4f4SMarcel Telka void (*rpc_rele)(queue_t *, mblk_t *, bool_t) = NULL;
302*2695d4f4SMarcel Telka void (*mir_rele)(queue_t *, mblk_t *, bool_t) = NULL;
3037c478bd9Sstevel@tonic-gate
3047c478bd9Sstevel@tonic-gate /* ARGSUSED */
3057c478bd9Sstevel@tonic-gate void
rpc_rdma_rele(queue_t * q,mblk_t * mp,bool_t enable)306*2695d4f4SMarcel Telka rpc_rdma_rele(queue_t *q, mblk_t *mp, bool_t enable)
3077c478bd9Sstevel@tonic-gate {
3087c478bd9Sstevel@tonic-gate }
309*2695d4f4SMarcel Telka void (*rdma_rele)(queue_t *, mblk_t *, bool_t) = rpc_rdma_rele;
3107c478bd9Sstevel@tonic-gate
3117c478bd9Sstevel@tonic-gate
3127c478bd9Sstevel@tonic-gate /*
3137c478bd9Sstevel@tonic-gate * This macro picks which `rele' routine to use, based on the transport type.
3147c478bd9Sstevel@tonic-gate */
3157c478bd9Sstevel@tonic-gate #define RELE_PROC(xprt) \
3167c478bd9Sstevel@tonic-gate ((xprt)->xp_type == T_RDMA ? rdma_rele : \
3177c478bd9Sstevel@tonic-gate (((xprt)->xp_type == T_CLTS) ? rpc_rele : mir_rele))
3187c478bd9Sstevel@tonic-gate
3197c478bd9Sstevel@tonic-gate /*
3207c478bd9Sstevel@tonic-gate * If true, then keep quiet about version mismatch.
3217c478bd9Sstevel@tonic-gate * This macro is for broadcast RPC only. We have no broadcast RPC in
3227c478bd9Sstevel@tonic-gate * kernel now but one may define a flag in the transport structure
3237c478bd9Sstevel@tonic-gate * and redefine this macro.
3247c478bd9Sstevel@tonic-gate */
3257c478bd9Sstevel@tonic-gate #define version_keepquiet(xprt) (FALSE)
3267c478bd9Sstevel@tonic-gate
3277c478bd9Sstevel@tonic-gate /*
3287c478bd9Sstevel@tonic-gate * ZSD key used to retrieve zone-specific svc globals
3297c478bd9Sstevel@tonic-gate */
3307c478bd9Sstevel@tonic-gate static zone_key_t svc_zone_key;
3317c478bd9Sstevel@tonic-gate
3327c478bd9Sstevel@tonic-gate static void svc_callout_free(SVCMASTERXPRT *);
3337c478bd9Sstevel@tonic-gate static void svc_xprt_qinit(SVCPOOL *, size_t);
3347c478bd9Sstevel@tonic-gate static void svc_xprt_qdestroy(SVCPOOL *);
3357c478bd9Sstevel@tonic-gate static void svc_thread_creator(SVCPOOL *);
3367c478bd9Sstevel@tonic-gate static void svc_creator_signal(SVCPOOL *);
3377c478bd9Sstevel@tonic-gate static void svc_creator_signalexit(SVCPOOL *);
3387c478bd9Sstevel@tonic-gate static void svc_pool_unregister(struct svc_globals *, SVCPOOL *);
3397c478bd9Sstevel@tonic-gate static int svc_run(SVCPOOL *);
3407c478bd9Sstevel@tonic-gate
3417c478bd9Sstevel@tonic-gate /* ARGSUSED */
3427c478bd9Sstevel@tonic-gate static void *
svc_zoneinit(zoneid_t zoneid)3437c478bd9Sstevel@tonic-gate svc_zoneinit(zoneid_t zoneid)
3447c478bd9Sstevel@tonic-gate {
3457c478bd9Sstevel@tonic-gate struct svc_globals *svc;
3467c478bd9Sstevel@tonic-gate
3477c478bd9Sstevel@tonic-gate svc = kmem_alloc(sizeof (*svc), KM_SLEEP);
3487c478bd9Sstevel@tonic-gate mutex_init(&svc->svc_plock, NULL, MUTEX_DEFAULT, NULL);
3497c478bd9Sstevel@tonic-gate svc->svc_pools = NULL;
3507c478bd9Sstevel@tonic-gate return (svc);
3517c478bd9Sstevel@tonic-gate }
3527c478bd9Sstevel@tonic-gate
3537c478bd9Sstevel@tonic-gate /* ARGSUSED */
3547c478bd9Sstevel@tonic-gate static void
svc_zoneshutdown(zoneid_t zoneid,void * arg)3557c478bd9Sstevel@tonic-gate svc_zoneshutdown(zoneid_t zoneid, void *arg)
3567c478bd9Sstevel@tonic-gate {
3577c478bd9Sstevel@tonic-gate struct svc_globals *svc = arg;
3587c478bd9Sstevel@tonic-gate SVCPOOL *pool;
3597c478bd9Sstevel@tonic-gate
3607c478bd9Sstevel@tonic-gate mutex_enter(&svc->svc_plock);
3617c478bd9Sstevel@tonic-gate while ((pool = svc->svc_pools) != NULL) {
3627c478bd9Sstevel@tonic-gate svc_pool_unregister(svc, pool);
3637c478bd9Sstevel@tonic-gate }
3647c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
3657c478bd9Sstevel@tonic-gate }
3667c478bd9Sstevel@tonic-gate
3677c478bd9Sstevel@tonic-gate /* ARGSUSED */
3687c478bd9Sstevel@tonic-gate static void
svc_zonefini(zoneid_t zoneid,void * arg)3697c478bd9Sstevel@tonic-gate svc_zonefini(zoneid_t zoneid, void *arg)
3707c478bd9Sstevel@tonic-gate {
3717c478bd9Sstevel@tonic-gate struct svc_globals *svc = arg;
3727c478bd9Sstevel@tonic-gate
3737c478bd9Sstevel@tonic-gate ASSERT(svc->svc_pools == NULL);
3747c478bd9Sstevel@tonic-gate mutex_destroy(&svc->svc_plock);
3757c478bd9Sstevel@tonic-gate kmem_free(svc, sizeof (*svc));
3767c478bd9Sstevel@tonic-gate }
3777c478bd9Sstevel@tonic-gate
3787c478bd9Sstevel@tonic-gate /*
3797c478bd9Sstevel@tonic-gate * Global SVC init routine.
3807c478bd9Sstevel@tonic-gate * Initialize global generic and transport type specific structures
3817c478bd9Sstevel@tonic-gate * used by the kernel RPC server side. This routine is called only
3827c478bd9Sstevel@tonic-gate * once when the module is being loaded.
3837c478bd9Sstevel@tonic-gate */
3847c478bd9Sstevel@tonic-gate void
svc_init()3857c478bd9Sstevel@tonic-gate svc_init()
3867c478bd9Sstevel@tonic-gate {
3877c478bd9Sstevel@tonic-gate zone_key_create(&svc_zone_key, svc_zoneinit, svc_zoneshutdown,
3887c478bd9Sstevel@tonic-gate svc_zonefini);
3897c478bd9Sstevel@tonic-gate svc_cots_init();
3907c478bd9Sstevel@tonic-gate svc_clts_init();
3917c478bd9Sstevel@tonic-gate }
3927c478bd9Sstevel@tonic-gate
3937c478bd9Sstevel@tonic-gate /*
3947c478bd9Sstevel@tonic-gate * Destroy the SVCPOOL structure.
3957c478bd9Sstevel@tonic-gate */
3967c478bd9Sstevel@tonic-gate static void
svc_pool_cleanup(SVCPOOL * pool)3977c478bd9Sstevel@tonic-gate svc_pool_cleanup(SVCPOOL *pool)
3987c478bd9Sstevel@tonic-gate {
3997c478bd9Sstevel@tonic-gate ASSERT(pool->p_threads + pool->p_detached_threads == 0);
4007c478bd9Sstevel@tonic-gate ASSERT(pool->p_lcount == 0);
4017c478bd9Sstevel@tonic-gate ASSERT(pool->p_closing);
4027c478bd9Sstevel@tonic-gate
4037c478bd9Sstevel@tonic-gate /*
4047c478bd9Sstevel@tonic-gate * Call the user supplied shutdown function. This is done
4057c478bd9Sstevel@tonic-gate * here so the user of the pool will be able to cleanup
4067c478bd9Sstevel@tonic-gate * service related resources.
4077c478bd9Sstevel@tonic-gate */
4087c478bd9Sstevel@tonic-gate if (pool->p_shutdown != NULL)
4097c478bd9Sstevel@tonic-gate (pool->p_shutdown)();
4107c478bd9Sstevel@tonic-gate
4117c478bd9Sstevel@tonic-gate /* Destroy `xprt-ready' queue */
4127c478bd9Sstevel@tonic-gate svc_xprt_qdestroy(pool);
4137c478bd9Sstevel@tonic-gate
4147c478bd9Sstevel@tonic-gate /* Destroy transport list */
4157c478bd9Sstevel@tonic-gate rw_destroy(&pool->p_lrwlock);
4167c478bd9Sstevel@tonic-gate
4177c478bd9Sstevel@tonic-gate /* Destroy locks and condition variables */
4187c478bd9Sstevel@tonic-gate mutex_destroy(&pool->p_thread_lock);
4197c478bd9Sstevel@tonic-gate mutex_destroy(&pool->p_req_lock);
4207c478bd9Sstevel@tonic-gate cv_destroy(&pool->p_req_cv);
4217c478bd9Sstevel@tonic-gate
4227c478bd9Sstevel@tonic-gate /* Destroy creator's locks and condition variables */
4237c478bd9Sstevel@tonic-gate mutex_destroy(&pool->p_creator_lock);
4247c478bd9Sstevel@tonic-gate cv_destroy(&pool->p_creator_cv);
4257c478bd9Sstevel@tonic-gate mutex_destroy(&pool->p_user_lock);
4267c478bd9Sstevel@tonic-gate cv_destroy(&pool->p_user_cv);
4277c478bd9Sstevel@tonic-gate
4287c478bd9Sstevel@tonic-gate /* Free pool structure */
4297c478bd9Sstevel@tonic-gate kmem_free(pool, sizeof (SVCPOOL));
4307c478bd9Sstevel@tonic-gate }
4317c478bd9Sstevel@tonic-gate
4327c478bd9Sstevel@tonic-gate /*
4337c478bd9Sstevel@tonic-gate * If all the transports and service threads are already gone
4347c478bd9Sstevel@tonic-gate * signal the creator thread to clean up and exit.
4357c478bd9Sstevel@tonic-gate */
4367c478bd9Sstevel@tonic-gate static bool_t
svc_pool_tryexit(SVCPOOL * pool)4377c478bd9Sstevel@tonic-gate svc_pool_tryexit(SVCPOOL *pool)
4387c478bd9Sstevel@tonic-gate {
4397c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pool->p_thread_lock));
4407c478bd9Sstevel@tonic-gate ASSERT(pool->p_closing);
4417c478bd9Sstevel@tonic-gate
4427c478bd9Sstevel@tonic-gate if (pool->p_threads + pool->p_detached_threads == 0) {
4437c478bd9Sstevel@tonic-gate rw_enter(&pool->p_lrwlock, RW_READER);
4447c478bd9Sstevel@tonic-gate if (pool->p_lcount == 0) {
4457c478bd9Sstevel@tonic-gate /*
4467c478bd9Sstevel@tonic-gate * Release the locks before sending a signal.
4477c478bd9Sstevel@tonic-gate */
4487c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
4497c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
4507c478bd9Sstevel@tonic-gate
4517c478bd9Sstevel@tonic-gate /*
4527c478bd9Sstevel@tonic-gate * Notify the creator thread to clean up and exit
4537c478bd9Sstevel@tonic-gate *
4547c478bd9Sstevel@tonic-gate * NOTICE: No references to the pool beyond this point!
4557c478bd9Sstevel@tonic-gate * The pool is being destroyed.
4567c478bd9Sstevel@tonic-gate */
4577c478bd9Sstevel@tonic-gate ASSERT(!MUTEX_HELD(&pool->p_thread_lock));
4587c478bd9Sstevel@tonic-gate svc_creator_signalexit(pool);
4597c478bd9Sstevel@tonic-gate
4607c478bd9Sstevel@tonic-gate return (TRUE);
4617c478bd9Sstevel@tonic-gate }
4627c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
4637c478bd9Sstevel@tonic-gate }
4647c478bd9Sstevel@tonic-gate
4657c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pool->p_thread_lock));
4667c478bd9Sstevel@tonic-gate return (FALSE);
4677c478bd9Sstevel@tonic-gate }
4687c478bd9Sstevel@tonic-gate
4697c478bd9Sstevel@tonic-gate /*
4707c478bd9Sstevel@tonic-gate * Find a pool with a given id.
4717c478bd9Sstevel@tonic-gate */
4727c478bd9Sstevel@tonic-gate static SVCPOOL *
svc_pool_find(struct svc_globals * svc,int id)4737c478bd9Sstevel@tonic-gate svc_pool_find(struct svc_globals *svc, int id)
4747c478bd9Sstevel@tonic-gate {
4757c478bd9Sstevel@tonic-gate SVCPOOL *pool;
4767c478bd9Sstevel@tonic-gate
4777c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&svc->svc_plock));
4787c478bd9Sstevel@tonic-gate
4797c478bd9Sstevel@tonic-gate /*
4807c478bd9Sstevel@tonic-gate * Search the list for a pool with a matching id
4817c478bd9Sstevel@tonic-gate * and register the transport handle with that pool.
4827c478bd9Sstevel@tonic-gate */
4837c478bd9Sstevel@tonic-gate for (pool = svc->svc_pools; pool; pool = pool->p_next)
4847c478bd9Sstevel@tonic-gate if (pool->p_id == id)
4857c478bd9Sstevel@tonic-gate return (pool);
4867c478bd9Sstevel@tonic-gate
4877c478bd9Sstevel@tonic-gate return (NULL);
4887c478bd9Sstevel@tonic-gate }
4897c478bd9Sstevel@tonic-gate
4907c478bd9Sstevel@tonic-gate /*
4917c478bd9Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
4927c478bd9Sstevel@tonic-gate * svc_do_run
4937c478bd9Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
4947c478bd9Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
4957c478bd9Sstevel@tonic-gate */
4967c478bd9Sstevel@tonic-gate int
svc_do_run(int id)4977c478bd9Sstevel@tonic-gate svc_do_run(int id)
4987c478bd9Sstevel@tonic-gate {
4997c478bd9Sstevel@tonic-gate SVCPOOL *pool;
5007c478bd9Sstevel@tonic-gate int err = 0;
5017c478bd9Sstevel@tonic-gate struct svc_globals *svc;
5027c478bd9Sstevel@tonic-gate
5037c478bd9Sstevel@tonic-gate svc = zone_getspecific(svc_zone_key, curproc->p_zone);
5047c478bd9Sstevel@tonic-gate mutex_enter(&svc->svc_plock);
5057c478bd9Sstevel@tonic-gate
5067c478bd9Sstevel@tonic-gate pool = svc_pool_find(svc, id);
5077c478bd9Sstevel@tonic-gate
5087c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
5097c478bd9Sstevel@tonic-gate
5107c478bd9Sstevel@tonic-gate if (pool == NULL)
5117c478bd9Sstevel@tonic-gate return (ENOENT);
5127c478bd9Sstevel@tonic-gate
5137c478bd9Sstevel@tonic-gate /*
5147c478bd9Sstevel@tonic-gate * Increment counter of pool threads now
5157c478bd9Sstevel@tonic-gate * that a thread has been created.
5167c478bd9Sstevel@tonic-gate */
5177c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_thread_lock);
5187c478bd9Sstevel@tonic-gate pool->p_threads++;
5197c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
5207c478bd9Sstevel@tonic-gate
5217c478bd9Sstevel@tonic-gate /* Give work to the new thread. */
5227c478bd9Sstevel@tonic-gate err = svc_run(pool);
5237c478bd9Sstevel@tonic-gate
5247c478bd9Sstevel@tonic-gate return (err);
5257c478bd9Sstevel@tonic-gate }
5267c478bd9Sstevel@tonic-gate
5277c478bd9Sstevel@tonic-gate /*
5287c478bd9Sstevel@tonic-gate * Unregister a pool from the pool list.
5297c478bd9Sstevel@tonic-gate * Set the closing state. If all the transports and service threads
5307c478bd9Sstevel@tonic-gate * are already gone signal the creator thread to clean up and exit.
5317c478bd9Sstevel@tonic-gate */
5327c478bd9Sstevel@tonic-gate static void
svc_pool_unregister(struct svc_globals * svc,SVCPOOL * pool)5337c478bd9Sstevel@tonic-gate svc_pool_unregister(struct svc_globals *svc, SVCPOOL *pool)
5347c478bd9Sstevel@tonic-gate {
5357c478bd9Sstevel@tonic-gate SVCPOOL *next = pool->p_next;
5367c478bd9Sstevel@tonic-gate SVCPOOL *prev = pool->p_prev;
5377c478bd9Sstevel@tonic-gate
5387c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&svc->svc_plock));
5397c478bd9Sstevel@tonic-gate
5407c478bd9Sstevel@tonic-gate /* Remove from the list */
5417c478bd9Sstevel@tonic-gate if (pool == svc->svc_pools)
5427c478bd9Sstevel@tonic-gate svc->svc_pools = next;
5437c478bd9Sstevel@tonic-gate if (next)
5447c478bd9Sstevel@tonic-gate next->p_prev = prev;
5457c478bd9Sstevel@tonic-gate if (prev)
5467c478bd9Sstevel@tonic-gate prev->p_next = next;
5477c478bd9Sstevel@tonic-gate pool->p_next = pool->p_prev = NULL;
5487c478bd9Sstevel@tonic-gate
5497c478bd9Sstevel@tonic-gate /*
5507c478bd9Sstevel@tonic-gate * Offline the pool. Mark the pool as closing.
5517c478bd9Sstevel@tonic-gate * If there are no transports in this pool notify
5527c478bd9Sstevel@tonic-gate * the creator thread to clean it up and exit.
5537c478bd9Sstevel@tonic-gate */
5547c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_thread_lock);
5557c478bd9Sstevel@tonic-gate if (pool->p_offline != NULL)
5567c478bd9Sstevel@tonic-gate (pool->p_offline)();
5577c478bd9Sstevel@tonic-gate pool->p_closing = TRUE;
5587c478bd9Sstevel@tonic-gate if (svc_pool_tryexit(pool))
5597c478bd9Sstevel@tonic-gate return;
5607c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
5617c478bd9Sstevel@tonic-gate }
5627c478bd9Sstevel@tonic-gate
5637c478bd9Sstevel@tonic-gate /*
5647c478bd9Sstevel@tonic-gate * Register a pool with a given id in the global doubly linked pool list.
5657c478bd9Sstevel@tonic-gate * - if there is a pool with the same id in the list then unregister it
5667c478bd9Sstevel@tonic-gate * - insert the new pool into the list.
5677c478bd9Sstevel@tonic-gate */
5687c478bd9Sstevel@tonic-gate static void
svc_pool_register(struct svc_globals * svc,SVCPOOL * pool,int id)5697c478bd9Sstevel@tonic-gate svc_pool_register(struct svc_globals *svc, SVCPOOL *pool, int id)
5707c478bd9Sstevel@tonic-gate {
5717c478bd9Sstevel@tonic-gate SVCPOOL *old_pool;
5727c478bd9Sstevel@tonic-gate
5737c478bd9Sstevel@tonic-gate /*
5747c478bd9Sstevel@tonic-gate * If there is a pool with the same id then remove it from
5757c478bd9Sstevel@tonic-gate * the list and mark the pool as closing.
5767c478bd9Sstevel@tonic-gate */
5777c478bd9Sstevel@tonic-gate mutex_enter(&svc->svc_plock);
5787c478bd9Sstevel@tonic-gate
5797c478bd9Sstevel@tonic-gate if (old_pool = svc_pool_find(svc, id))
5807c478bd9Sstevel@tonic-gate svc_pool_unregister(svc, old_pool);
5817c478bd9Sstevel@tonic-gate
5827c478bd9Sstevel@tonic-gate /* Insert into the doubly linked list */
5837c478bd9Sstevel@tonic-gate pool->p_id = id;
5847c478bd9Sstevel@tonic-gate pool->p_next = svc->svc_pools;
5857c478bd9Sstevel@tonic-gate pool->p_prev = NULL;
5867c478bd9Sstevel@tonic-gate if (svc->svc_pools)
5877c478bd9Sstevel@tonic-gate svc->svc_pools->p_prev = pool;
5887c478bd9Sstevel@tonic-gate svc->svc_pools = pool;
5897c478bd9Sstevel@tonic-gate
5907c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
5917c478bd9Sstevel@tonic-gate }
5927c478bd9Sstevel@tonic-gate
5937c478bd9Sstevel@tonic-gate /*
5947c478bd9Sstevel@tonic-gate * Initialize a newly created pool structure
5957c478bd9Sstevel@tonic-gate */
5967c478bd9Sstevel@tonic-gate static int
svc_pool_init(SVCPOOL * pool,uint_t maxthreads,uint_t redline,uint_t qsize,uint_t timeout,uint_t stksize,uint_t max_same_xprt)5977c478bd9Sstevel@tonic-gate svc_pool_init(SVCPOOL *pool, uint_t maxthreads, uint_t redline,
5987c478bd9Sstevel@tonic-gate uint_t qsize, uint_t timeout, uint_t stksize, uint_t max_same_xprt)
5997c478bd9Sstevel@tonic-gate {
6007c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread);
6017c478bd9Sstevel@tonic-gate
6027c478bd9Sstevel@tonic-gate ASSERT(pool);
6037c478bd9Sstevel@tonic-gate
6047c478bd9Sstevel@tonic-gate if (maxthreads == 0)
6057c478bd9Sstevel@tonic-gate maxthreads = svc_default_maxthreads;
6067c478bd9Sstevel@tonic-gate if (redline == 0)
6077c478bd9Sstevel@tonic-gate redline = svc_default_redline;
6087c478bd9Sstevel@tonic-gate if (qsize == 0)
6097c478bd9Sstevel@tonic-gate qsize = svc_default_qsize;
6107c478bd9Sstevel@tonic-gate if (timeout == 0)
6117c478bd9Sstevel@tonic-gate timeout = svc_default_timeout;
6127c478bd9Sstevel@tonic-gate if (stksize == 0)
6137c478bd9Sstevel@tonic-gate stksize = svc_default_stksize;
6147c478bd9Sstevel@tonic-gate if (max_same_xprt == 0)
6157c478bd9Sstevel@tonic-gate max_same_xprt = svc_default_max_same_xprt;
6167c478bd9Sstevel@tonic-gate
6177c478bd9Sstevel@tonic-gate if (maxthreads < redline)
6187c478bd9Sstevel@tonic-gate return (EINVAL);
6197c478bd9Sstevel@tonic-gate
6207c478bd9Sstevel@tonic-gate /* Allocate and initialize the `xprt-ready' queue */
6217c478bd9Sstevel@tonic-gate svc_xprt_qinit(pool, qsize);
6227c478bd9Sstevel@tonic-gate
6237c478bd9Sstevel@tonic-gate /* Initialize doubly-linked xprt list */
6247c478bd9Sstevel@tonic-gate rw_init(&pool->p_lrwlock, NULL, RW_DEFAULT, NULL);
6257c478bd9Sstevel@tonic-gate
6267c478bd9Sstevel@tonic-gate /*
6277c478bd9Sstevel@tonic-gate * Setting lwp_childstksz on the current lwp so that
6287c478bd9Sstevel@tonic-gate * descendants of this lwp get the modified stacksize, if
6297c478bd9Sstevel@tonic-gate * it is defined. It is important that either this lwp or
6307c478bd9Sstevel@tonic-gate * one of its descendants do the actual servicepool thread
6317c478bd9Sstevel@tonic-gate * creation to maintain the stacksize inheritance.
6327c478bd9Sstevel@tonic-gate */
6337c478bd9Sstevel@tonic-gate if (lwp != NULL)
6347c478bd9Sstevel@tonic-gate lwp->lwp_childstksz = stksize;
6357c478bd9Sstevel@tonic-gate
6367c478bd9Sstevel@tonic-gate /* Initialize thread limits, locks and condition variables */
6377c478bd9Sstevel@tonic-gate pool->p_maxthreads = maxthreads;
6387c478bd9Sstevel@tonic-gate pool->p_redline = redline;
6397c478bd9Sstevel@tonic-gate pool->p_timeout = timeout * hz;
6407c478bd9Sstevel@tonic-gate pool->p_stksize = stksize;
6417c478bd9Sstevel@tonic-gate pool->p_max_same_xprt = max_same_xprt;
6427c478bd9Sstevel@tonic-gate mutex_init(&pool->p_thread_lock, NULL, MUTEX_DEFAULT, NULL);
6437c478bd9Sstevel@tonic-gate mutex_init(&pool->p_req_lock, NULL, MUTEX_DEFAULT, NULL);
6447c478bd9Sstevel@tonic-gate cv_init(&pool->p_req_cv, NULL, CV_DEFAULT, NULL);
6457c478bd9Sstevel@tonic-gate
6467c478bd9Sstevel@tonic-gate /* Initialize userland creator */
6477c478bd9Sstevel@tonic-gate pool->p_user_exit = FALSE;
6487c478bd9Sstevel@tonic-gate pool->p_signal_create_thread = FALSE;
6497c478bd9Sstevel@tonic-gate pool->p_user_waiting = FALSE;
6507c478bd9Sstevel@tonic-gate mutex_init(&pool->p_user_lock, NULL, MUTEX_DEFAULT, NULL);
6517c478bd9Sstevel@tonic-gate cv_init(&pool->p_user_cv, NULL, CV_DEFAULT, NULL);
6527c478bd9Sstevel@tonic-gate
6537c478bd9Sstevel@tonic-gate /* Initialize the creator and start the creator thread */
6547c478bd9Sstevel@tonic-gate pool->p_creator_exit = FALSE;
6557c478bd9Sstevel@tonic-gate mutex_init(&pool->p_creator_lock, NULL, MUTEX_DEFAULT, NULL);
6567c478bd9Sstevel@tonic-gate cv_init(&pool->p_creator_cv, NULL, CV_DEFAULT, NULL);
6577c478bd9Sstevel@tonic-gate
6587c478bd9Sstevel@tonic-gate (void) zthread_create(NULL, pool->p_stksize, svc_thread_creator,
6597c478bd9Sstevel@tonic-gate pool, 0, minclsyspri);
6607c478bd9Sstevel@tonic-gate
6617c478bd9Sstevel@tonic-gate return (0);
6627c478bd9Sstevel@tonic-gate }
6637c478bd9Sstevel@tonic-gate
6647c478bd9Sstevel@tonic-gate /*
6657c478bd9Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
6667c478bd9Sstevel@tonic-gate * svc_pool_create
6677c478bd9Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
6687c478bd9Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
6697c478bd9Sstevel@tonic-gate *
6707c478bd9Sstevel@tonic-gate * Create an kernel RPC server-side thread/transport pool.
6717c478bd9Sstevel@tonic-gate *
6727c478bd9Sstevel@tonic-gate * This is public interface for creation of a server RPC thread pool
6737c478bd9Sstevel@tonic-gate * for a given service provider. Transports registered with the pool's id
6747c478bd9Sstevel@tonic-gate * will be served by a pool's threads. This function is called from the
6757c478bd9Sstevel@tonic-gate * nfssys() system call.
6767c478bd9Sstevel@tonic-gate */
6777c478bd9Sstevel@tonic-gate int
svc_pool_create(struct svcpool_args * args)6787c478bd9Sstevel@tonic-gate svc_pool_create(struct svcpool_args *args)
6797c478bd9Sstevel@tonic-gate {
6807c478bd9Sstevel@tonic-gate SVCPOOL *pool;
6817c478bd9Sstevel@tonic-gate int error;
6827c478bd9Sstevel@tonic-gate struct svc_globals *svc;
6837c478bd9Sstevel@tonic-gate
6847c478bd9Sstevel@tonic-gate /*
6857c478bd9Sstevel@tonic-gate * Caller should check credentials in a way appropriate
6867c478bd9Sstevel@tonic-gate * in the context of the call.
6877c478bd9Sstevel@tonic-gate */
6887c478bd9Sstevel@tonic-gate
6897c478bd9Sstevel@tonic-gate svc = zone_getspecific(svc_zone_key, curproc->p_zone);
6907c478bd9Sstevel@tonic-gate /* Allocate a new pool */
6917c478bd9Sstevel@tonic-gate pool = kmem_zalloc(sizeof (SVCPOOL), KM_SLEEP);
6927c478bd9Sstevel@tonic-gate
6937c478bd9Sstevel@tonic-gate /*
6947c478bd9Sstevel@tonic-gate * Initialize the pool structure and create a creator thread.
6957c478bd9Sstevel@tonic-gate */
6967c478bd9Sstevel@tonic-gate error = svc_pool_init(pool, args->maxthreads, args->redline,
6977c478bd9Sstevel@tonic-gate args->qsize, args->timeout, args->stksize, args->max_same_xprt);
6987c478bd9Sstevel@tonic-gate
6997c478bd9Sstevel@tonic-gate if (error) {
7007c478bd9Sstevel@tonic-gate kmem_free(pool, sizeof (SVCPOOL));
7017c478bd9Sstevel@tonic-gate return (error);
7027c478bd9Sstevel@tonic-gate }
7037c478bd9Sstevel@tonic-gate
7047c478bd9Sstevel@tonic-gate /* Register the pool with the global pool list */
7057c478bd9Sstevel@tonic-gate svc_pool_register(svc, pool, args->id);
7067c478bd9Sstevel@tonic-gate
7077c478bd9Sstevel@tonic-gate return (0);
7087c478bd9Sstevel@tonic-gate }
7097c478bd9Sstevel@tonic-gate
7107c478bd9Sstevel@tonic-gate int
svc_pool_control(int id,int cmd,void * arg)7117c478bd9Sstevel@tonic-gate svc_pool_control(int id, int cmd, void *arg)
7127c478bd9Sstevel@tonic-gate {
7137c478bd9Sstevel@tonic-gate SVCPOOL *pool;
7147c478bd9Sstevel@tonic-gate struct svc_globals *svc;
7157c478bd9Sstevel@tonic-gate
7167c478bd9Sstevel@tonic-gate svc = zone_getspecific(svc_zone_key, curproc->p_zone);
7177c478bd9Sstevel@tonic-gate
7187c478bd9Sstevel@tonic-gate switch (cmd) {
7197c478bd9Sstevel@tonic-gate case SVCPSET_SHUTDOWN_PROC:
7207c478bd9Sstevel@tonic-gate /*
7217c478bd9Sstevel@tonic-gate * Search the list for a pool with a matching id
7227c478bd9Sstevel@tonic-gate * and register the transport handle with that pool.
7237c478bd9Sstevel@tonic-gate */
7247c478bd9Sstevel@tonic-gate mutex_enter(&svc->svc_plock);
7257c478bd9Sstevel@tonic-gate
7267c478bd9Sstevel@tonic-gate if ((pool = svc_pool_find(svc, id)) == NULL) {
7277c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
7287c478bd9Sstevel@tonic-gate return (ENOENT);
7297c478bd9Sstevel@tonic-gate }
7307c478bd9Sstevel@tonic-gate /*
7317c478bd9Sstevel@tonic-gate * Grab the transport list lock before releasing the
7327c478bd9Sstevel@tonic-gate * pool list lock
7337c478bd9Sstevel@tonic-gate */
7347c478bd9Sstevel@tonic-gate rw_enter(&pool->p_lrwlock, RW_WRITER);
7357c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
7367c478bd9Sstevel@tonic-gate
7377c478bd9Sstevel@tonic-gate pool->p_shutdown = *((void (*)())arg);
7387c478bd9Sstevel@tonic-gate
7397c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
7407c478bd9Sstevel@tonic-gate
7417c478bd9Sstevel@tonic-gate return (0);
7427c478bd9Sstevel@tonic-gate case SVCPSET_UNREGISTER_PROC:
7437c478bd9Sstevel@tonic-gate /*
7447c478bd9Sstevel@tonic-gate * Search the list for a pool with a matching id
7457c478bd9Sstevel@tonic-gate * and register the unregister callback handle with that pool.
7467c478bd9Sstevel@tonic-gate */
7477c478bd9Sstevel@tonic-gate mutex_enter(&svc->svc_plock);
7487c478bd9Sstevel@tonic-gate
7497c478bd9Sstevel@tonic-gate if ((pool = svc_pool_find(svc, id)) == NULL) {
7507c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
7517c478bd9Sstevel@tonic-gate return (ENOENT);
7527c478bd9Sstevel@tonic-gate }
7537c478bd9Sstevel@tonic-gate /*
7547c478bd9Sstevel@tonic-gate * Grab the transport list lock before releasing the
7557c478bd9Sstevel@tonic-gate * pool list lock
7567c478bd9Sstevel@tonic-gate */
7577c478bd9Sstevel@tonic-gate rw_enter(&pool->p_lrwlock, RW_WRITER);
7587c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
7597c478bd9Sstevel@tonic-gate
7607c478bd9Sstevel@tonic-gate pool->p_offline = *((void (*)())arg);
7617c478bd9Sstevel@tonic-gate
7627c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
7637c478bd9Sstevel@tonic-gate
7647c478bd9Sstevel@tonic-gate return (0);
7657c478bd9Sstevel@tonic-gate default:
7667c478bd9Sstevel@tonic-gate return (EINVAL);
7677c478bd9Sstevel@tonic-gate }
7687c478bd9Sstevel@tonic-gate }
7697c478bd9Sstevel@tonic-gate
7707c478bd9Sstevel@tonic-gate /*
7717c478bd9Sstevel@tonic-gate * Pool's transport list manipulation routines.
7727c478bd9Sstevel@tonic-gate * - svc_xprt_register()
7737c478bd9Sstevel@tonic-gate * - svc_xprt_unregister()
7747c478bd9Sstevel@tonic-gate *
7757c478bd9Sstevel@tonic-gate * svc_xprt_register() is called from svc_tli_kcreate() to
7767c478bd9Sstevel@tonic-gate * insert a new master transport handle into the doubly linked
7777c478bd9Sstevel@tonic-gate * list of server transport handles (one list per pool).
7787c478bd9Sstevel@tonic-gate *
7797c478bd9Sstevel@tonic-gate * The list is used by svc_poll(), when it operates in `drain'
7807c478bd9Sstevel@tonic-gate * mode, to search for a next transport with a pending request.
7817c478bd9Sstevel@tonic-gate */
7827c478bd9Sstevel@tonic-gate
7837c478bd9Sstevel@tonic-gate int
svc_xprt_register(SVCMASTERXPRT * xprt,int id)7847c478bd9Sstevel@tonic-gate svc_xprt_register(SVCMASTERXPRT *xprt, int id)
7857c478bd9Sstevel@tonic-gate {
7867c478bd9Sstevel@tonic-gate SVCMASTERXPRT *prev, *next;
7877c478bd9Sstevel@tonic-gate SVCPOOL *pool;
7887c478bd9Sstevel@tonic-gate struct svc_globals *svc;
7897c478bd9Sstevel@tonic-gate
7907c478bd9Sstevel@tonic-gate svc = zone_getspecific(svc_zone_key, curproc->p_zone);
7917c478bd9Sstevel@tonic-gate /*
7927c478bd9Sstevel@tonic-gate * Search the list for a pool with a matching id
7937c478bd9Sstevel@tonic-gate * and register the transport handle with that pool.
7947c478bd9Sstevel@tonic-gate */
7957c478bd9Sstevel@tonic-gate mutex_enter(&svc->svc_plock);
7967c478bd9Sstevel@tonic-gate
7977c478bd9Sstevel@tonic-gate if ((pool = svc_pool_find(svc, id)) == NULL) {
7987c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
7997c478bd9Sstevel@tonic-gate return (ENOENT);
8007c478bd9Sstevel@tonic-gate }
8017c478bd9Sstevel@tonic-gate
8027c478bd9Sstevel@tonic-gate /* Grab the transport list lock before releasing the pool list lock */
8037c478bd9Sstevel@tonic-gate rw_enter(&pool->p_lrwlock, RW_WRITER);
8047c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
8057c478bd9Sstevel@tonic-gate
8067c478bd9Sstevel@tonic-gate /* Don't register new transports when the pool is in closing state */
8077c478bd9Sstevel@tonic-gate if (pool->p_closing) {
8087c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
8097c478bd9Sstevel@tonic-gate return (EBUSY);
8107c478bd9Sstevel@tonic-gate }
8117c478bd9Sstevel@tonic-gate
8127c478bd9Sstevel@tonic-gate /*
8137c478bd9Sstevel@tonic-gate * Initialize xp_pool to point to the pool.
8147c478bd9Sstevel@tonic-gate * We don't want to go through the pool list every time.
8157c478bd9Sstevel@tonic-gate */
8167c478bd9Sstevel@tonic-gate xprt->xp_pool = pool;
8177c478bd9Sstevel@tonic-gate
8187c478bd9Sstevel@tonic-gate /*
8197c478bd9Sstevel@tonic-gate * Insert a transport handle into the list.
8207c478bd9Sstevel@tonic-gate * The list head points to the most recently inserted transport.
8217c478bd9Sstevel@tonic-gate */
8227c478bd9Sstevel@tonic-gate if (pool->p_lhead == NULL)
8237c478bd9Sstevel@tonic-gate pool->p_lhead = xprt->xp_prev = xprt->xp_next = xprt;
8247c478bd9Sstevel@tonic-gate else {
8257c478bd9Sstevel@tonic-gate next = pool->p_lhead;
8267c478bd9Sstevel@tonic-gate prev = pool->p_lhead->xp_prev;
8277c478bd9Sstevel@tonic-gate
8287c478bd9Sstevel@tonic-gate xprt->xp_next = next;
8297c478bd9Sstevel@tonic-gate xprt->xp_prev = prev;
8307c478bd9Sstevel@tonic-gate
8317c478bd9Sstevel@tonic-gate pool->p_lhead = prev->xp_next = next->xp_prev = xprt;
8327c478bd9Sstevel@tonic-gate }
8337c478bd9Sstevel@tonic-gate
8347c478bd9Sstevel@tonic-gate /* Increment the transports count */
8357c478bd9Sstevel@tonic-gate pool->p_lcount++;
8367c478bd9Sstevel@tonic-gate
8377c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
8387c478bd9Sstevel@tonic-gate return (0);
8397c478bd9Sstevel@tonic-gate }
8407c478bd9Sstevel@tonic-gate
8417c478bd9Sstevel@tonic-gate /*
8427c478bd9Sstevel@tonic-gate * Called from svc_xprt_cleanup() to remove a master transport handle
8437c478bd9Sstevel@tonic-gate * from the pool's list of server transports (when a transport is
8447c478bd9Sstevel@tonic-gate * being destroyed).
8457c478bd9Sstevel@tonic-gate */
8467c478bd9Sstevel@tonic-gate void
svc_xprt_unregister(SVCMASTERXPRT * xprt)8477c478bd9Sstevel@tonic-gate svc_xprt_unregister(SVCMASTERXPRT *xprt)
8487c478bd9Sstevel@tonic-gate {
8497c478bd9Sstevel@tonic-gate SVCPOOL *pool = xprt->xp_pool;
8507c478bd9Sstevel@tonic-gate
8517c478bd9Sstevel@tonic-gate /*
8527c478bd9Sstevel@tonic-gate * Unlink xprt from the list.
8537c478bd9Sstevel@tonic-gate * If the list head points to this xprt then move it
8547c478bd9Sstevel@tonic-gate * to the next xprt or reset to NULL if this is the last
8557c478bd9Sstevel@tonic-gate * xprt in the list.
8567c478bd9Sstevel@tonic-gate */
8577c478bd9Sstevel@tonic-gate rw_enter(&pool->p_lrwlock, RW_WRITER);
8587c478bd9Sstevel@tonic-gate
8597c478bd9Sstevel@tonic-gate if (xprt == xprt->xp_next)
8607c478bd9Sstevel@tonic-gate pool->p_lhead = NULL;
8617c478bd9Sstevel@tonic-gate else {
8627c478bd9Sstevel@tonic-gate SVCMASTERXPRT *next = xprt->xp_next;
8637c478bd9Sstevel@tonic-gate SVCMASTERXPRT *prev = xprt->xp_prev;
8647c478bd9Sstevel@tonic-gate
8657c478bd9Sstevel@tonic-gate next->xp_prev = prev;
8667c478bd9Sstevel@tonic-gate prev->xp_next = next;
8677c478bd9Sstevel@tonic-gate
8687c478bd9Sstevel@tonic-gate if (pool->p_lhead == xprt)
8697c478bd9Sstevel@tonic-gate pool->p_lhead = next;
8707c478bd9Sstevel@tonic-gate }
8717c478bd9Sstevel@tonic-gate
8727c478bd9Sstevel@tonic-gate xprt->xp_next = xprt->xp_prev = NULL;
8737c478bd9Sstevel@tonic-gate
8747c478bd9Sstevel@tonic-gate /* Decrement list count */
8757c478bd9Sstevel@tonic-gate pool->p_lcount--;
8767c478bd9Sstevel@tonic-gate
8777c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
8787c478bd9Sstevel@tonic-gate }
8797c478bd9Sstevel@tonic-gate
8807c478bd9Sstevel@tonic-gate static void
svc_xprt_qdestroy(SVCPOOL * pool)8817c478bd9Sstevel@tonic-gate svc_xprt_qdestroy(SVCPOOL *pool)
8827c478bd9Sstevel@tonic-gate {
8837c478bd9Sstevel@tonic-gate mutex_destroy(&pool->p_qend_lock);
8847c478bd9Sstevel@tonic-gate kmem_free(pool->p_qbody, pool->p_qsize * sizeof (__SVCXPRT_QNODE));
8857c478bd9Sstevel@tonic-gate }
8867c478bd9Sstevel@tonic-gate
8877c478bd9Sstevel@tonic-gate /*
8887c478bd9Sstevel@tonic-gate * Initialize an `xprt-ready' queue for a given pool.
8897c478bd9Sstevel@tonic-gate */
8907c478bd9Sstevel@tonic-gate static void
svc_xprt_qinit(SVCPOOL * pool,size_t qsize)8917c478bd9Sstevel@tonic-gate svc_xprt_qinit(SVCPOOL *pool, size_t qsize)
8927c478bd9Sstevel@tonic-gate {
8937c478bd9Sstevel@tonic-gate int i;
8947c478bd9Sstevel@tonic-gate
8957c478bd9Sstevel@tonic-gate pool->p_qsize = qsize;
8967c478bd9Sstevel@tonic-gate pool->p_qbody = kmem_zalloc(pool->p_qsize * sizeof (__SVCXPRT_QNODE),
8977c478bd9Sstevel@tonic-gate KM_SLEEP);
8987c478bd9Sstevel@tonic-gate
8997c478bd9Sstevel@tonic-gate for (i = 0; i < pool->p_qsize - 1; i++)
9007c478bd9Sstevel@tonic-gate pool->p_qbody[i].q_next = &(pool->p_qbody[i+1]);
9017c478bd9Sstevel@tonic-gate
9027c478bd9Sstevel@tonic-gate pool->p_qbody[pool->p_qsize-1].q_next = &(pool->p_qbody[0]);
9037c478bd9Sstevel@tonic-gate pool->p_qtop = &(pool->p_qbody[0]);
9047c478bd9Sstevel@tonic-gate pool->p_qend = &(pool->p_qbody[0]);
9057c478bd9Sstevel@tonic-gate
9067c478bd9Sstevel@tonic-gate mutex_init(&pool->p_qend_lock, NULL, MUTEX_DEFAULT, NULL);
9077c478bd9Sstevel@tonic-gate }
9087c478bd9Sstevel@tonic-gate
9097c478bd9Sstevel@tonic-gate /*
9107c478bd9Sstevel@tonic-gate * Called from the svc_queuereq() interrupt routine to queue
9117c478bd9Sstevel@tonic-gate * a hint for svc_poll() which transport has a pending request.
9127c478bd9Sstevel@tonic-gate * - insert a pointer to xprt into the xprt-ready queue (FIFO)
9137c478bd9Sstevel@tonic-gate * - if the xprt-ready queue is full turn the overflow flag on.
9147c478bd9Sstevel@tonic-gate *
915*2695d4f4SMarcel Telka * NOTICE: pool->p_qtop is protected by the pool's request lock
9167c478bd9Sstevel@tonic-gate * and the caller (svc_queuereq()) must hold the lock.
9177c478bd9Sstevel@tonic-gate */
9187c478bd9Sstevel@tonic-gate static void
svc_xprt_qput(SVCPOOL * pool,SVCMASTERXPRT * xprt)9197c478bd9Sstevel@tonic-gate svc_xprt_qput(SVCPOOL *pool, SVCMASTERXPRT *xprt)
9207c478bd9Sstevel@tonic-gate {
9217c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pool->p_req_lock));
9227c478bd9Sstevel@tonic-gate
923*2695d4f4SMarcel Telka /* If the overflow flag is on there is nothing we can do */
9247c478bd9Sstevel@tonic-gate if (pool->p_qoverflow)
9257c478bd9Sstevel@tonic-gate return;
9267c478bd9Sstevel@tonic-gate
9277c478bd9Sstevel@tonic-gate /* If the queue is full turn the overflow flag on and exit */
9287c478bd9Sstevel@tonic-gate if (pool->p_qtop->q_next == pool->p_qend) {
9297c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_qend_lock);
9307c478bd9Sstevel@tonic-gate if (pool->p_qtop->q_next == pool->p_qend) {
9317c478bd9Sstevel@tonic-gate pool->p_qoverflow = TRUE;
9327c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_qend_lock);
9337c478bd9Sstevel@tonic-gate return;
9347c478bd9Sstevel@tonic-gate }
9357c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_qend_lock);
9367c478bd9Sstevel@tonic-gate }
9377c478bd9Sstevel@tonic-gate
9387c478bd9Sstevel@tonic-gate /* Insert a hint and move pool->p_qtop */
9397c478bd9Sstevel@tonic-gate pool->p_qtop->q_xprt = xprt;
9407c478bd9Sstevel@tonic-gate pool->p_qtop = pool->p_qtop->q_next;
9417c478bd9Sstevel@tonic-gate }
9427c478bd9Sstevel@tonic-gate
9437c478bd9Sstevel@tonic-gate /*
9447c478bd9Sstevel@tonic-gate * Called from svc_poll() to get a hint which transport has a
9457c478bd9Sstevel@tonic-gate * pending request. Returns a pointer to a transport or NULL if the
9467c478bd9Sstevel@tonic-gate * `xprt-ready' queue is empty.
9477c478bd9Sstevel@tonic-gate *
9487c478bd9Sstevel@tonic-gate * Since we do not acquire the pool's request lock while checking if
9497c478bd9Sstevel@tonic-gate * the queue is empty we may miss a request that is just being delivered.
9507c478bd9Sstevel@tonic-gate * However this is ok since svc_poll() will retry again until the
9517c478bd9Sstevel@tonic-gate * count indicates that there are pending requests for this pool.
9527c478bd9Sstevel@tonic-gate */
9537c478bd9Sstevel@tonic-gate static SVCMASTERXPRT *
svc_xprt_qget(SVCPOOL * pool)9547c478bd9Sstevel@tonic-gate svc_xprt_qget(SVCPOOL *pool)
9557c478bd9Sstevel@tonic-gate {
9567c478bd9Sstevel@tonic-gate SVCMASTERXPRT *xprt;
9577c478bd9Sstevel@tonic-gate
9587c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_qend_lock);
9597c478bd9Sstevel@tonic-gate do {
9607c478bd9Sstevel@tonic-gate /*
9617c478bd9Sstevel@tonic-gate * If the queue is empty return NULL.
9627c478bd9Sstevel@tonic-gate * Since we do not acquire the pool's request lock which
9637c478bd9Sstevel@tonic-gate * protects pool->p_qtop this is not exact check. However,
9647c478bd9Sstevel@tonic-gate * this is safe - if we miss a request here svc_poll()
9657c478bd9Sstevel@tonic-gate * will retry again.
9667c478bd9Sstevel@tonic-gate */
9677c478bd9Sstevel@tonic-gate if (pool->p_qend == pool->p_qtop) {
9687c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_qend_lock);
9697c478bd9Sstevel@tonic-gate return (NULL);
9707c478bd9Sstevel@tonic-gate }
9717c478bd9Sstevel@tonic-gate
9727c478bd9Sstevel@tonic-gate /* Get a hint and move pool->p_qend */
9737c478bd9Sstevel@tonic-gate xprt = pool->p_qend->q_xprt;
9747c478bd9Sstevel@tonic-gate pool->p_qend = pool->p_qend->q_next;
9757c478bd9Sstevel@tonic-gate
9767c478bd9Sstevel@tonic-gate /* Skip fields deleted by svc_xprt_qdelete() */
9777c478bd9Sstevel@tonic-gate } while (xprt == NULL);
9787c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_qend_lock);
9797c478bd9Sstevel@tonic-gate
9807c478bd9Sstevel@tonic-gate return (xprt);
9817c478bd9Sstevel@tonic-gate }
9827c478bd9Sstevel@tonic-gate
9837c478bd9Sstevel@tonic-gate /*
9847c478bd9Sstevel@tonic-gate * Delete all the references to a transport handle that
9857c478bd9Sstevel@tonic-gate * is being destroyed from the xprt-ready queue.
9867c478bd9Sstevel@tonic-gate * Deleted pointers are replaced with NULLs.
9877c478bd9Sstevel@tonic-gate */
9887c478bd9Sstevel@tonic-gate static void
svc_xprt_qdelete(SVCPOOL * pool,SVCMASTERXPRT * xprt)9897c478bd9Sstevel@tonic-gate svc_xprt_qdelete(SVCPOOL *pool, SVCMASTERXPRT *xprt)
9907c478bd9Sstevel@tonic-gate {
9918cc2da61SMarcel Telka __SVCXPRT_QNODE *q;
9927c478bd9Sstevel@tonic-gate
9938cc2da61SMarcel Telka mutex_enter(&pool->p_req_lock);
9948cc2da61SMarcel Telka for (q = pool->p_qend; q != pool->p_qtop; q = q->q_next) {
9957c478bd9Sstevel@tonic-gate if (q->q_xprt == xprt)
9967c478bd9Sstevel@tonic-gate q->q_xprt = NULL;
9977c478bd9Sstevel@tonic-gate }
9988cc2da61SMarcel Telka mutex_exit(&pool->p_req_lock);
9997c478bd9Sstevel@tonic-gate }
10007c478bd9Sstevel@tonic-gate
10017c478bd9Sstevel@tonic-gate /*
10027c478bd9Sstevel@tonic-gate * Destructor for a master server transport handle.
10037c478bd9Sstevel@tonic-gate * - if there are no more non-detached threads linked to this transport
10047c478bd9Sstevel@tonic-gate * then, if requested, call xp_closeproc (we don't wait for detached
10057c478bd9Sstevel@tonic-gate * threads linked to this transport to complete).
10067c478bd9Sstevel@tonic-gate * - if there are no more threads linked to this
10077c478bd9Sstevel@tonic-gate * transport then
10087c478bd9Sstevel@tonic-gate * a) remove references to this transport from the xprt-ready queue
10097c478bd9Sstevel@tonic-gate * b) remove a reference to this transport from the pool's transport list
10107c478bd9Sstevel@tonic-gate * c) call a transport specific `destroy' function
10117c478bd9Sstevel@tonic-gate * d) cancel remaining thread reservations.
10127c478bd9Sstevel@tonic-gate *
10137c478bd9Sstevel@tonic-gate * NOTICE: Caller must hold the transport's thread lock.
10147c478bd9Sstevel@tonic-gate */
10157c478bd9Sstevel@tonic-gate static void
svc_xprt_cleanup(SVCMASTERXPRT * xprt,bool_t detached)10167c478bd9Sstevel@tonic-gate svc_xprt_cleanup(SVCMASTERXPRT *xprt, bool_t detached)
10177c478bd9Sstevel@tonic-gate {
10187c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&xprt->xp_thread_lock));
10197c478bd9Sstevel@tonic-gate ASSERT(xprt->xp_wq == NULL);
10207c478bd9Sstevel@tonic-gate
10217c478bd9Sstevel@tonic-gate /*
10227c478bd9Sstevel@tonic-gate * If called from the last non-detached thread
10237c478bd9Sstevel@tonic-gate * it should call the closeproc on this transport.
10247c478bd9Sstevel@tonic-gate */
10257c478bd9Sstevel@tonic-gate if (!detached && xprt->xp_threads == 0 && xprt->xp_closeproc) {
10267c478bd9Sstevel@tonic-gate (*(xprt->xp_closeproc)) (xprt);
10277c478bd9Sstevel@tonic-gate }
10287c478bd9Sstevel@tonic-gate
10297c478bd9Sstevel@tonic-gate if (xprt->xp_threads + xprt->xp_detached_threads > 0)
10307c478bd9Sstevel@tonic-gate mutex_exit(&xprt->xp_thread_lock);
10317c478bd9Sstevel@tonic-gate else {
10327c478bd9Sstevel@tonic-gate /* Remove references to xprt from the `xprt-ready' queue */
10337c478bd9Sstevel@tonic-gate svc_xprt_qdelete(xprt->xp_pool, xprt);
10347c478bd9Sstevel@tonic-gate
10357c478bd9Sstevel@tonic-gate /* Unregister xprt from the pool's transport list */
10367c478bd9Sstevel@tonic-gate svc_xprt_unregister(xprt);
10377c478bd9Sstevel@tonic-gate svc_callout_free(xprt);
10387c478bd9Sstevel@tonic-gate SVC_DESTROY(xprt);
10397c478bd9Sstevel@tonic-gate }
10407c478bd9Sstevel@tonic-gate }
10417c478bd9Sstevel@tonic-gate
10427c478bd9Sstevel@tonic-gate /*
10437c478bd9Sstevel@tonic-gate * Find a dispatch routine for a given prog/vers pair.
10447c478bd9Sstevel@tonic-gate * This function is called from svc_getreq() to search the callout
10457c478bd9Sstevel@tonic-gate * table for an entry with a matching RPC program number `prog'
10467c478bd9Sstevel@tonic-gate * and a version range that covers `vers'.
10477c478bd9Sstevel@tonic-gate * - if it finds a matching entry it returns pointer to the dispatch routine
10487c478bd9Sstevel@tonic-gate * - otherwise it returns NULL and, if `minp' or `maxp' are not NULL,
10497c478bd9Sstevel@tonic-gate * fills them with, respectively, lowest version and highest version
10507c478bd9Sstevel@tonic-gate * supported for the program `prog'
10517c478bd9Sstevel@tonic-gate */
10527c478bd9Sstevel@tonic-gate static SVC_DISPATCH *
svc_callout_find(SVCXPRT * xprt,rpcprog_t prog,rpcvers_t vers,rpcvers_t * vers_min,rpcvers_t * vers_max)10537c478bd9Sstevel@tonic-gate svc_callout_find(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
10547c478bd9Sstevel@tonic-gate rpcvers_t *vers_min, rpcvers_t *vers_max)
10557c478bd9Sstevel@tonic-gate {
10567c478bd9Sstevel@tonic-gate SVC_CALLOUT_TABLE *sct = xprt->xp_sct;
10577c478bd9Sstevel@tonic-gate int i;
10587c478bd9Sstevel@tonic-gate
10597c478bd9Sstevel@tonic-gate *vers_min = ~(rpcvers_t)0;
10607c478bd9Sstevel@tonic-gate *vers_max = 0;
10617c478bd9Sstevel@tonic-gate
10627c478bd9Sstevel@tonic-gate for (i = 0; i < sct->sct_size; i++) {
10637c478bd9Sstevel@tonic-gate SVC_CALLOUT *sc = &sct->sct_sc[i];
10647c478bd9Sstevel@tonic-gate
10657c478bd9Sstevel@tonic-gate if (prog == sc->sc_prog) {
10667c478bd9Sstevel@tonic-gate if (vers >= sc->sc_versmin && vers <= sc->sc_versmax)
10677c478bd9Sstevel@tonic-gate return (sc->sc_dispatch);
10687c478bd9Sstevel@tonic-gate
10697c478bd9Sstevel@tonic-gate if (*vers_max < sc->sc_versmax)
10707c478bd9Sstevel@tonic-gate *vers_max = sc->sc_versmax;
10717c478bd9Sstevel@tonic-gate if (*vers_min > sc->sc_versmin)
10727c478bd9Sstevel@tonic-gate *vers_min = sc->sc_versmin;
10737c478bd9Sstevel@tonic-gate }
10747c478bd9Sstevel@tonic-gate }
10757c478bd9Sstevel@tonic-gate
10767c478bd9Sstevel@tonic-gate return (NULL);
10777c478bd9Sstevel@tonic-gate }
10787c478bd9Sstevel@tonic-gate
10797c478bd9Sstevel@tonic-gate /*
10807c478bd9Sstevel@tonic-gate * Optionally free callout table allocated for this transport by
10817c478bd9Sstevel@tonic-gate * the service provider.
10827c478bd9Sstevel@tonic-gate */
10837c478bd9Sstevel@tonic-gate static void
svc_callout_free(SVCMASTERXPRT * xprt)10847c478bd9Sstevel@tonic-gate svc_callout_free(SVCMASTERXPRT *xprt)
10857c478bd9Sstevel@tonic-gate {
10867c478bd9Sstevel@tonic-gate SVC_CALLOUT_TABLE *sct = xprt->xp_sct;
10877c478bd9Sstevel@tonic-gate
10887c478bd9Sstevel@tonic-gate if (sct->sct_free) {
10897c478bd9Sstevel@tonic-gate kmem_free(sct->sct_sc, sct->sct_size * sizeof (SVC_CALLOUT));
10907c478bd9Sstevel@tonic-gate kmem_free(sct, sizeof (SVC_CALLOUT_TABLE));
10917c478bd9Sstevel@tonic-gate }
10927c478bd9Sstevel@tonic-gate }
10937c478bd9Sstevel@tonic-gate
10947c478bd9Sstevel@tonic-gate /*
10957c478bd9Sstevel@tonic-gate * Send a reply to an RPC request
10967c478bd9Sstevel@tonic-gate *
10977c478bd9Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
10987c478bd9Sstevel@tonic-gate * svc_sendreply
10997c478bd9Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
11007c478bd9Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
11017c478bd9Sstevel@tonic-gate */
11027c478bd9Sstevel@tonic-gate bool_t
svc_sendreply(const SVCXPRT * clone_xprt,const xdrproc_t xdr_results,const caddr_t xdr_location)11037c478bd9Sstevel@tonic-gate svc_sendreply(const SVCXPRT *clone_xprt, const xdrproc_t xdr_results,
11047c478bd9Sstevel@tonic-gate const caddr_t xdr_location)
11057c478bd9Sstevel@tonic-gate {
11067c478bd9Sstevel@tonic-gate struct rpc_msg rply;
11077c478bd9Sstevel@tonic-gate
11087c478bd9Sstevel@tonic-gate rply.rm_direction = REPLY;
11097c478bd9Sstevel@tonic-gate rply.rm_reply.rp_stat = MSG_ACCEPTED;
11107c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_verf = clone_xprt->xp_verf;
11117c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_stat = SUCCESS;
11127c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_results.where = xdr_location;
11137c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_results.proc = xdr_results;
11147c478bd9Sstevel@tonic-gate
11157c478bd9Sstevel@tonic-gate return (SVC_REPLY((SVCXPRT *)clone_xprt, &rply));
11167c478bd9Sstevel@tonic-gate }
11177c478bd9Sstevel@tonic-gate
11187c478bd9Sstevel@tonic-gate /*
11197c478bd9Sstevel@tonic-gate * No procedure error reply
11207c478bd9Sstevel@tonic-gate *
11217c478bd9Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
11227c478bd9Sstevel@tonic-gate * svcerr_noproc
11237c478bd9Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
11247c478bd9Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
11257c478bd9Sstevel@tonic-gate */
11267c478bd9Sstevel@tonic-gate void
svcerr_noproc(const SVCXPRT * clone_xprt)11277c478bd9Sstevel@tonic-gate svcerr_noproc(const SVCXPRT *clone_xprt)
11287c478bd9Sstevel@tonic-gate {
11297c478bd9Sstevel@tonic-gate struct rpc_msg rply;
11307c478bd9Sstevel@tonic-gate
11317c478bd9Sstevel@tonic-gate rply.rm_direction = REPLY;
11327c478bd9Sstevel@tonic-gate rply.rm_reply.rp_stat = MSG_ACCEPTED;
11337c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_verf = clone_xprt->xp_verf;
11347c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_stat = PROC_UNAVAIL;
11357c478bd9Sstevel@tonic-gate SVC_FREERES((SVCXPRT *)clone_xprt);
11367c478bd9Sstevel@tonic-gate SVC_REPLY((SVCXPRT *)clone_xprt, &rply);
11377c478bd9Sstevel@tonic-gate }
11387c478bd9Sstevel@tonic-gate
11397c478bd9Sstevel@tonic-gate /*
11407c478bd9Sstevel@tonic-gate * Can't decode arguments error reply
11417c478bd9Sstevel@tonic-gate *
11427c478bd9Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
11437c478bd9Sstevel@tonic-gate * svcerr_decode
11447c478bd9Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
11457c478bd9Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
11467c478bd9Sstevel@tonic-gate */
11477c478bd9Sstevel@tonic-gate void
svcerr_decode(const SVCXPRT * clone_xprt)11487c478bd9Sstevel@tonic-gate svcerr_decode(const SVCXPRT *clone_xprt)
11497c478bd9Sstevel@tonic-gate {
11507c478bd9Sstevel@tonic-gate struct rpc_msg rply;
11517c478bd9Sstevel@tonic-gate
11527c478bd9Sstevel@tonic-gate rply.rm_direction = REPLY;
11537c478bd9Sstevel@tonic-gate rply.rm_reply.rp_stat = MSG_ACCEPTED;
11547c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_verf = clone_xprt->xp_verf;
11557c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_stat = GARBAGE_ARGS;
11567c478bd9Sstevel@tonic-gate SVC_FREERES((SVCXPRT *)clone_xprt);
11577c478bd9Sstevel@tonic-gate SVC_REPLY((SVCXPRT *)clone_xprt, &rply);
11587c478bd9Sstevel@tonic-gate }
11597c478bd9Sstevel@tonic-gate
11607c478bd9Sstevel@tonic-gate /*
11617c478bd9Sstevel@tonic-gate * Some system error
11627c478bd9Sstevel@tonic-gate */
11637c478bd9Sstevel@tonic-gate void
svcerr_systemerr(const SVCXPRT * clone_xprt)11647c478bd9Sstevel@tonic-gate svcerr_systemerr(const SVCXPRT *clone_xprt)
11657c478bd9Sstevel@tonic-gate {
11667c478bd9Sstevel@tonic-gate struct rpc_msg rply;
11677c478bd9Sstevel@tonic-gate
11687c478bd9Sstevel@tonic-gate rply.rm_direction = REPLY;
11697c478bd9Sstevel@tonic-gate rply.rm_reply.rp_stat = MSG_ACCEPTED;
11707c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_verf = clone_xprt->xp_verf;
11717c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_stat = SYSTEM_ERR;
11727c478bd9Sstevel@tonic-gate SVC_FREERES((SVCXPRT *)clone_xprt);
11737c478bd9Sstevel@tonic-gate SVC_REPLY((SVCXPRT *)clone_xprt, &rply);
11747c478bd9Sstevel@tonic-gate }
11757c478bd9Sstevel@tonic-gate
11767c478bd9Sstevel@tonic-gate /*
11777c478bd9Sstevel@tonic-gate * Authentication error reply
11787c478bd9Sstevel@tonic-gate */
11797c478bd9Sstevel@tonic-gate void
svcerr_auth(const SVCXPRT * clone_xprt,const enum auth_stat why)11807c478bd9Sstevel@tonic-gate svcerr_auth(const SVCXPRT *clone_xprt, const enum auth_stat why)
11817c478bd9Sstevel@tonic-gate {
11827c478bd9Sstevel@tonic-gate struct rpc_msg rply;
11837c478bd9Sstevel@tonic-gate
11847c478bd9Sstevel@tonic-gate rply.rm_direction = REPLY;
11857c478bd9Sstevel@tonic-gate rply.rm_reply.rp_stat = MSG_DENIED;
11867c478bd9Sstevel@tonic-gate rply.rjcted_rply.rj_stat = AUTH_ERROR;
11877c478bd9Sstevel@tonic-gate rply.rjcted_rply.rj_why = why;
11887c478bd9Sstevel@tonic-gate SVC_FREERES((SVCXPRT *)clone_xprt);
11897c478bd9Sstevel@tonic-gate SVC_REPLY((SVCXPRT *)clone_xprt, &rply);
11907c478bd9Sstevel@tonic-gate }
11917c478bd9Sstevel@tonic-gate
11927c478bd9Sstevel@tonic-gate /*
11937c478bd9Sstevel@tonic-gate * Authentication too weak error reply
11947c478bd9Sstevel@tonic-gate */
11957c478bd9Sstevel@tonic-gate void
svcerr_weakauth(const SVCXPRT * clone_xprt)11967c478bd9Sstevel@tonic-gate svcerr_weakauth(const SVCXPRT *clone_xprt)
11977c478bd9Sstevel@tonic-gate {
11987c478bd9Sstevel@tonic-gate svcerr_auth((SVCXPRT *)clone_xprt, AUTH_TOOWEAK);
11997c478bd9Sstevel@tonic-gate }
12007c478bd9Sstevel@tonic-gate
12017c478bd9Sstevel@tonic-gate /*
12022e9d26a4Srmesta * Authentication error; bad credentials
12032e9d26a4Srmesta */
12042e9d26a4Srmesta void
svcerr_badcred(const SVCXPRT * clone_xprt)12052e9d26a4Srmesta svcerr_badcred(const SVCXPRT *clone_xprt)
12062e9d26a4Srmesta {
12072e9d26a4Srmesta struct rpc_msg rply;
12082e9d26a4Srmesta
12092e9d26a4Srmesta rply.rm_direction = REPLY;
12102e9d26a4Srmesta rply.rm_reply.rp_stat = MSG_DENIED;
12112e9d26a4Srmesta rply.rjcted_rply.rj_stat = AUTH_ERROR;
12122e9d26a4Srmesta rply.rjcted_rply.rj_why = AUTH_BADCRED;
12132e9d26a4Srmesta SVC_FREERES((SVCXPRT *)clone_xprt);
12142e9d26a4Srmesta SVC_REPLY((SVCXPRT *)clone_xprt, &rply);
12152e9d26a4Srmesta }
12162e9d26a4Srmesta
12172e9d26a4Srmesta /*
12187c478bd9Sstevel@tonic-gate * Program unavailable error reply
12197c478bd9Sstevel@tonic-gate *
12207c478bd9Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
12217c478bd9Sstevel@tonic-gate * svcerr_noprog
12227c478bd9Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
12237c478bd9Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
12247c478bd9Sstevel@tonic-gate */
12257c478bd9Sstevel@tonic-gate void
svcerr_noprog(const SVCXPRT * clone_xprt)12267c478bd9Sstevel@tonic-gate svcerr_noprog(const SVCXPRT *clone_xprt)
12277c478bd9Sstevel@tonic-gate {
12287c478bd9Sstevel@tonic-gate struct rpc_msg rply;
12297c478bd9Sstevel@tonic-gate
12307c478bd9Sstevel@tonic-gate rply.rm_direction = REPLY;
12317c478bd9Sstevel@tonic-gate rply.rm_reply.rp_stat = MSG_ACCEPTED;
12327c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_verf = clone_xprt->xp_verf;
12337c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_stat = PROG_UNAVAIL;
12347c478bd9Sstevel@tonic-gate SVC_FREERES((SVCXPRT *)clone_xprt);
12357c478bd9Sstevel@tonic-gate SVC_REPLY((SVCXPRT *)clone_xprt, &rply);
12367c478bd9Sstevel@tonic-gate }
12377c478bd9Sstevel@tonic-gate
12387c478bd9Sstevel@tonic-gate /*
12397c478bd9Sstevel@tonic-gate * Program version mismatch error reply
12407c478bd9Sstevel@tonic-gate *
12417c478bd9Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
12427c478bd9Sstevel@tonic-gate * svcerr_progvers
12437c478bd9Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
12447c478bd9Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
12457c478bd9Sstevel@tonic-gate */
12467c478bd9Sstevel@tonic-gate void
svcerr_progvers(const SVCXPRT * clone_xprt,const rpcvers_t low_vers,const rpcvers_t high_vers)12477c478bd9Sstevel@tonic-gate svcerr_progvers(const SVCXPRT *clone_xprt,
12487c478bd9Sstevel@tonic-gate const rpcvers_t low_vers, const rpcvers_t high_vers)
12497c478bd9Sstevel@tonic-gate {
12507c478bd9Sstevel@tonic-gate struct rpc_msg rply;
12517c478bd9Sstevel@tonic-gate
12527c478bd9Sstevel@tonic-gate rply.rm_direction = REPLY;
12537c478bd9Sstevel@tonic-gate rply.rm_reply.rp_stat = MSG_ACCEPTED;
12547c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_verf = clone_xprt->xp_verf;
12557c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_stat = PROG_MISMATCH;
12567c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_vers.low = low_vers;
12577c478bd9Sstevel@tonic-gate rply.acpted_rply.ar_vers.high = high_vers;
12587c478bd9Sstevel@tonic-gate SVC_FREERES((SVCXPRT *)clone_xprt);
12597c478bd9Sstevel@tonic-gate SVC_REPLY((SVCXPRT *)clone_xprt, &rply);
12607c478bd9Sstevel@tonic-gate }
12617c478bd9Sstevel@tonic-gate
12627c478bd9Sstevel@tonic-gate /*
12637c478bd9Sstevel@tonic-gate * Get server side input from some transport.
12647c478bd9Sstevel@tonic-gate *
12657c478bd9Sstevel@tonic-gate * Statement of authentication parameters management:
12667c478bd9Sstevel@tonic-gate * This function owns and manages all authentication parameters, specifically
12677c478bd9Sstevel@tonic-gate * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
12687c478bd9Sstevel@tonic-gate * the "cooked" credentials (rqst->rq_clntcred).
12697c478bd9Sstevel@tonic-gate * However, this function does not know the structure of the cooked
12707c478bd9Sstevel@tonic-gate * credentials, so it make the following assumptions:
12717c478bd9Sstevel@tonic-gate * a) the structure is contiguous (no pointers), and
12727c478bd9Sstevel@tonic-gate * b) the cred structure size does not exceed RQCRED_SIZE bytes.
12737c478bd9Sstevel@tonic-gate * In all events, all three parameters are freed upon exit from this routine.
12747c478bd9Sstevel@tonic-gate * The storage is trivially managed on the call stack in user land, but
12757c478bd9Sstevel@tonic-gate * is malloced in kernel land.
12767c478bd9Sstevel@tonic-gate *
12777c478bd9Sstevel@tonic-gate * Note: the xprt's xp_svc_lock is not held while the service's dispatch
12787c478bd9Sstevel@tonic-gate * routine is running. If we decide to implement svc_unregister(), we'll
12797c478bd9Sstevel@tonic-gate * need to decide whether it's okay for a thread to unregister a service
12807c478bd9Sstevel@tonic-gate * while a request is being processed. If we decide that this is a
12817c478bd9Sstevel@tonic-gate * problem, we can probably use some sort of reference counting scheme to
12827c478bd9Sstevel@tonic-gate * keep the callout entry from going away until the request has completed.
12837c478bd9Sstevel@tonic-gate */
12847c478bd9Sstevel@tonic-gate static void
svc_getreq(SVCXPRT * clone_xprt,mblk_t * mp)12857c478bd9Sstevel@tonic-gate svc_getreq(
12867c478bd9Sstevel@tonic-gate SVCXPRT *clone_xprt, /* clone transport handle */
12877c478bd9Sstevel@tonic-gate mblk_t *mp)
12887c478bd9Sstevel@tonic-gate {
12897c478bd9Sstevel@tonic-gate struct rpc_msg msg;
12907c478bd9Sstevel@tonic-gate struct svc_req r;
12917c478bd9Sstevel@tonic-gate char *cred_area; /* too big to allocate on call stack */
12927c478bd9Sstevel@tonic-gate
12937c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_GETREQ_START,
12947c478bd9Sstevel@tonic-gate "svc_getreq_start:");
12957c478bd9Sstevel@tonic-gate
12967c478bd9Sstevel@tonic-gate ASSERT(clone_xprt->xp_master != NULL);
1297de8c4a14SErik Nordmark ASSERT(!is_system_labeled() || msg_getcred(mp, NULL) != NULL ||
129845916cd2Sjpk mp->b_datap->db_type != M_DATA);
12997c478bd9Sstevel@tonic-gate
13007c478bd9Sstevel@tonic-gate /*
13017c478bd9Sstevel@tonic-gate * Firstly, allocate the authentication parameters' storage
13027c478bd9Sstevel@tonic-gate */
13037c478bd9Sstevel@tonic-gate mutex_enter(&rqcred_lock);
13047c478bd9Sstevel@tonic-gate if (rqcred_head) {
13057c478bd9Sstevel@tonic-gate cred_area = rqcred_head;
13067c478bd9Sstevel@tonic-gate
13077c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
13087c478bd9Sstevel@tonic-gate rqcred_head = *(caddr_t *)rqcred_head;
13097c478bd9Sstevel@tonic-gate mutex_exit(&rqcred_lock);
13107c478bd9Sstevel@tonic-gate } else {
13117c478bd9Sstevel@tonic-gate mutex_exit(&rqcred_lock);
13127c478bd9Sstevel@tonic-gate cred_area = kmem_alloc(2 * MAX_AUTH_BYTES + RQCRED_SIZE,
13137c478bd9Sstevel@tonic-gate KM_SLEEP);
13147c478bd9Sstevel@tonic-gate }
13157c478bd9Sstevel@tonic-gate msg.rm_call.cb_cred.oa_base = cred_area;
13167c478bd9Sstevel@tonic-gate msg.rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
13177c478bd9Sstevel@tonic-gate r.rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
13187c478bd9Sstevel@tonic-gate
13197c478bd9Sstevel@tonic-gate /*
132045916cd2Sjpk * underlying transport recv routine may modify mblk data
132145916cd2Sjpk * and make it difficult to extract label afterwards. So
132245916cd2Sjpk * get the label from the raw mblk data now.
132345916cd2Sjpk */
132445916cd2Sjpk if (is_system_labeled()) {
1325de8c4a14SErik Nordmark cred_t *cr;
132645916cd2Sjpk
132745916cd2Sjpk r.rq_label = kmem_alloc(sizeof (bslabel_t), KM_SLEEP);
1328de8c4a14SErik Nordmark cr = msg_getcred(mp, NULL);
1329de8c4a14SErik Nordmark ASSERT(cr != NULL);
1330de8c4a14SErik Nordmark
1331de8c4a14SErik Nordmark bcopy(label2bslabel(crgetlabel(cr)), r.rq_label,
133245916cd2Sjpk sizeof (bslabel_t));
133345916cd2Sjpk } else {
133445916cd2Sjpk r.rq_label = NULL;
133545916cd2Sjpk }
133645916cd2Sjpk
133745916cd2Sjpk /*
13387c478bd9Sstevel@tonic-gate * Now receive a message from the transport.
13397c478bd9Sstevel@tonic-gate */
13407c478bd9Sstevel@tonic-gate if (SVC_RECV(clone_xprt, mp, &msg)) {
13417c478bd9Sstevel@tonic-gate void (*dispatchroutine) (struct svc_req *, SVCXPRT *);
13427c478bd9Sstevel@tonic-gate rpcvers_t vers_min;
13437c478bd9Sstevel@tonic-gate rpcvers_t vers_max;
13447c478bd9Sstevel@tonic-gate bool_t no_dispatch;
13457c478bd9Sstevel@tonic-gate enum auth_stat why;
13467c478bd9Sstevel@tonic-gate
13477c478bd9Sstevel@tonic-gate /*
13487c478bd9Sstevel@tonic-gate * Find the registered program and call its
13497c478bd9Sstevel@tonic-gate * dispatch routine.
13507c478bd9Sstevel@tonic-gate */
13517c478bd9Sstevel@tonic-gate r.rq_xprt = clone_xprt;
13527c478bd9Sstevel@tonic-gate r.rq_prog = msg.rm_call.cb_prog;
13537c478bd9Sstevel@tonic-gate r.rq_vers = msg.rm_call.cb_vers;
13547c478bd9Sstevel@tonic-gate r.rq_proc = msg.rm_call.cb_proc;
13557c478bd9Sstevel@tonic-gate r.rq_cred = msg.rm_call.cb_cred;
13567c478bd9Sstevel@tonic-gate
13577c478bd9Sstevel@tonic-gate /*
13587c478bd9Sstevel@tonic-gate * First authenticate the message.
13597c478bd9Sstevel@tonic-gate */
13607c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_GETREQ_AUTH_START,
13617c478bd9Sstevel@tonic-gate "svc_getreq_auth_start:");
13627c478bd9Sstevel@tonic-gate if ((why = sec_svc_msg(&r, &msg, &no_dispatch)) != AUTH_OK) {
13637c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_GETREQ_AUTH_END,
13647c478bd9Sstevel@tonic-gate "svc_getreq_auth_end:(%S)", "failed");
13657c478bd9Sstevel@tonic-gate svcerr_auth(clone_xprt, why);
13667c478bd9Sstevel@tonic-gate /*
13677c478bd9Sstevel@tonic-gate * Free the arguments.
13687c478bd9Sstevel@tonic-gate */
13697c478bd9Sstevel@tonic-gate (void) SVC_FREEARGS(clone_xprt, NULL, NULL);
13707c478bd9Sstevel@tonic-gate } else if (no_dispatch) {
13717c478bd9Sstevel@tonic-gate /*
13727c478bd9Sstevel@tonic-gate * XXX - when bug id 4053736 is done, remove
13737c478bd9Sstevel@tonic-gate * the SVC_FREEARGS() call.
13747c478bd9Sstevel@tonic-gate */
13757c478bd9Sstevel@tonic-gate (void) SVC_FREEARGS(clone_xprt, NULL, NULL);
13767c478bd9Sstevel@tonic-gate } else {
13777c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_GETREQ_AUTH_END,
13787c478bd9Sstevel@tonic-gate "svc_getreq_auth_end:(%S)", "good");
13797c478bd9Sstevel@tonic-gate
13807c478bd9Sstevel@tonic-gate dispatchroutine = svc_callout_find(clone_xprt,
13817c478bd9Sstevel@tonic-gate r.rq_prog, r.rq_vers, &vers_min, &vers_max);
13827c478bd9Sstevel@tonic-gate
13837c478bd9Sstevel@tonic-gate if (dispatchroutine) {
13847c478bd9Sstevel@tonic-gate (*dispatchroutine) (&r, clone_xprt);
13857c478bd9Sstevel@tonic-gate } else {
13867c478bd9Sstevel@tonic-gate /*
13877c478bd9Sstevel@tonic-gate * If we got here, the program or version
13887c478bd9Sstevel@tonic-gate * is not served ...
13897c478bd9Sstevel@tonic-gate */
13907c478bd9Sstevel@tonic-gate if (vers_max == 0 ||
13917c478bd9Sstevel@tonic-gate version_keepquiet(clone_xprt))
13927c478bd9Sstevel@tonic-gate svcerr_noprog(clone_xprt);
13937c478bd9Sstevel@tonic-gate else
13947c478bd9Sstevel@tonic-gate svcerr_progvers(clone_xprt, vers_min,
13957c478bd9Sstevel@tonic-gate vers_max);
13967c478bd9Sstevel@tonic-gate
13977c478bd9Sstevel@tonic-gate /*
13987c478bd9Sstevel@tonic-gate * Free the arguments. For successful calls
13997c478bd9Sstevel@tonic-gate * this is done by the dispatch routine.
14007c478bd9Sstevel@tonic-gate */
14017c478bd9Sstevel@tonic-gate (void) SVC_FREEARGS(clone_xprt, NULL, NULL);
14027c478bd9Sstevel@tonic-gate /* Fall through to ... */
14037c478bd9Sstevel@tonic-gate }
14047c478bd9Sstevel@tonic-gate /*
14057c478bd9Sstevel@tonic-gate * Call cleanup procedure for RPCSEC_GSS.
14067c478bd9Sstevel@tonic-gate * This is a hack since there is currently no
14077c478bd9Sstevel@tonic-gate * op, such as SVC_CLEANAUTH. rpc_gss_cleanup
14087c478bd9Sstevel@tonic-gate * should only be called for a non null proc.
14097c478bd9Sstevel@tonic-gate * Null procs in RPC GSS are overloaded to
14107c478bd9Sstevel@tonic-gate * provide context setup and control. The main
14117c478bd9Sstevel@tonic-gate * purpose of rpc_gss_cleanup is to decrement the
14127c478bd9Sstevel@tonic-gate * reference count associated with the cached
14137c478bd9Sstevel@tonic-gate * GSS security context. We should never get here
14147c478bd9Sstevel@tonic-gate * for an RPCSEC_GSS null proc since *no_dispatch
14157c478bd9Sstevel@tonic-gate * would have been set to true from sec_svc_msg above.
14167c478bd9Sstevel@tonic-gate */
14177c478bd9Sstevel@tonic-gate if (r.rq_cred.oa_flavor == RPCSEC_GSS)
14187c478bd9Sstevel@tonic-gate rpc_gss_cleanup(clone_xprt);
14197c478bd9Sstevel@tonic-gate }
14207c478bd9Sstevel@tonic-gate }
14217c478bd9Sstevel@tonic-gate
142245916cd2Sjpk if (r.rq_label != NULL)
142345916cd2Sjpk kmem_free(r.rq_label, sizeof (bslabel_t));
142445916cd2Sjpk
14257c478bd9Sstevel@tonic-gate /*
14267c478bd9Sstevel@tonic-gate * Free authentication parameters' storage
14277c478bd9Sstevel@tonic-gate */
14287c478bd9Sstevel@tonic-gate mutex_enter(&rqcred_lock);
14297c478bd9Sstevel@tonic-gate /* LINTED pointer alignment */
14307c478bd9Sstevel@tonic-gate *(caddr_t *)cred_area = rqcred_head;
14317c478bd9Sstevel@tonic-gate rqcred_head = cred_area;
14327c478bd9Sstevel@tonic-gate mutex_exit(&rqcred_lock);
14337c478bd9Sstevel@tonic-gate }
14347c478bd9Sstevel@tonic-gate
14357c478bd9Sstevel@tonic-gate /*
14367c478bd9Sstevel@tonic-gate * Allocate new clone transport handle.
14377c478bd9Sstevel@tonic-gate */
1438bfd8310aSGlenn Barry SVCXPRT *
svc_clone_init(void)14397c478bd9Sstevel@tonic-gate svc_clone_init(void)
14407c478bd9Sstevel@tonic-gate {
14417c478bd9Sstevel@tonic-gate SVCXPRT *clone_xprt;
14427c478bd9Sstevel@tonic-gate
14437c478bd9Sstevel@tonic-gate clone_xprt = kmem_zalloc(sizeof (SVCXPRT), KM_SLEEP);
14447c478bd9Sstevel@tonic-gate clone_xprt->xp_cred = crget();
14457c478bd9Sstevel@tonic-gate return (clone_xprt);
14467c478bd9Sstevel@tonic-gate }
14477c478bd9Sstevel@tonic-gate
14487c478bd9Sstevel@tonic-gate /*
14497c478bd9Sstevel@tonic-gate * Free memory allocated by svc_clone_init.
14507c478bd9Sstevel@tonic-gate */
1451bfd8310aSGlenn Barry void
svc_clone_free(SVCXPRT * clone_xprt)14527c478bd9Sstevel@tonic-gate svc_clone_free(SVCXPRT *clone_xprt)
14537c478bd9Sstevel@tonic-gate {
14547c478bd9Sstevel@tonic-gate /* Fre credentials from crget() */
14557c478bd9Sstevel@tonic-gate if (clone_xprt->xp_cred)
14567c478bd9Sstevel@tonic-gate crfree(clone_xprt->xp_cred);
14577c478bd9Sstevel@tonic-gate kmem_free(clone_xprt, sizeof (SVCXPRT));
14587c478bd9Sstevel@tonic-gate }
14597c478bd9Sstevel@tonic-gate
14607c478bd9Sstevel@tonic-gate /*
14617c478bd9Sstevel@tonic-gate * Link a per-thread clone transport handle to a master
14627c478bd9Sstevel@tonic-gate * - increment a thread reference count on the master
14637c478bd9Sstevel@tonic-gate * - copy some of the master's fields to the clone
14647c478bd9Sstevel@tonic-gate * - call a transport specific clone routine.
14657c478bd9Sstevel@tonic-gate */
1466bfd8310aSGlenn Barry void
svc_clone_link(SVCMASTERXPRT * xprt,SVCXPRT * clone_xprt,SVCXPRT * clone_xprt2)146760536ef9SKaren Rochford svc_clone_link(SVCMASTERXPRT *xprt, SVCXPRT *clone_xprt, SVCXPRT *clone_xprt2)
14687c478bd9Sstevel@tonic-gate {
14697c478bd9Sstevel@tonic-gate cred_t *cred = clone_xprt->xp_cred;
14707c478bd9Sstevel@tonic-gate
14717c478bd9Sstevel@tonic-gate ASSERT(cred);
14727c478bd9Sstevel@tonic-gate
14737c478bd9Sstevel@tonic-gate /*
14747c478bd9Sstevel@tonic-gate * Bump up master's thread count.
14757c478bd9Sstevel@tonic-gate * Linking a per-thread clone transport handle to a master
14767c478bd9Sstevel@tonic-gate * associates a service thread with the master.
14777c478bd9Sstevel@tonic-gate */
14787c478bd9Sstevel@tonic-gate mutex_enter(&xprt->xp_thread_lock);
14797c478bd9Sstevel@tonic-gate xprt->xp_threads++;
14807c478bd9Sstevel@tonic-gate mutex_exit(&xprt->xp_thread_lock);
14817c478bd9Sstevel@tonic-gate
14827c478bd9Sstevel@tonic-gate /* Clear everything */
14837c478bd9Sstevel@tonic-gate bzero(clone_xprt, sizeof (SVCXPRT));
14847c478bd9Sstevel@tonic-gate
14857c478bd9Sstevel@tonic-gate /* Set pointer to the master transport stucture */
14867c478bd9Sstevel@tonic-gate clone_xprt->xp_master = xprt;
14877c478bd9Sstevel@tonic-gate
14887c478bd9Sstevel@tonic-gate /* Structure copy of all the common fields */
14897c478bd9Sstevel@tonic-gate clone_xprt->xp_xpc = xprt->xp_xpc;
14907c478bd9Sstevel@tonic-gate
14917c478bd9Sstevel@tonic-gate /* Restore per-thread fields (xp_cred) */
14927c478bd9Sstevel@tonic-gate clone_xprt->xp_cred = cred;
14937c478bd9Sstevel@tonic-gate
149460536ef9SKaren Rochford if (clone_xprt2)
149560536ef9SKaren Rochford SVC_CLONE_XPRT(clone_xprt2, clone_xprt);
14967c478bd9Sstevel@tonic-gate }
14977c478bd9Sstevel@tonic-gate
14987c478bd9Sstevel@tonic-gate /*
14997c478bd9Sstevel@tonic-gate * Unlink a non-detached clone transport handle from a master
15007c478bd9Sstevel@tonic-gate * - decrement a thread reference count on the master
15017c478bd9Sstevel@tonic-gate * - if the transport is closing (xp_wq is NULL) call svc_xprt_cleanup();
15027c478bd9Sstevel@tonic-gate * if this is the last non-detached/absolute thread on this transport
15037c478bd9Sstevel@tonic-gate * then it will close/destroy the transport
15047c478bd9Sstevel@tonic-gate * - call transport specific function to destroy the clone handle
15057c478bd9Sstevel@tonic-gate * - clear xp_master to avoid recursion.
15067c478bd9Sstevel@tonic-gate */
1507bfd8310aSGlenn Barry void
svc_clone_unlink(SVCXPRT * clone_xprt)15087c478bd9Sstevel@tonic-gate svc_clone_unlink(SVCXPRT *clone_xprt)
15097c478bd9Sstevel@tonic-gate {
15107c478bd9Sstevel@tonic-gate SVCMASTERXPRT *xprt = clone_xprt->xp_master;
15117c478bd9Sstevel@tonic-gate
15127c478bd9Sstevel@tonic-gate /* This cannot be a detached thread */
15137c478bd9Sstevel@tonic-gate ASSERT(!clone_xprt->xp_detached);
15147c478bd9Sstevel@tonic-gate ASSERT(xprt->xp_threads > 0);
15157c478bd9Sstevel@tonic-gate
15167c478bd9Sstevel@tonic-gate /* Decrement a reference count on the transport */
15177c478bd9Sstevel@tonic-gate mutex_enter(&xprt->xp_thread_lock);
15187c478bd9Sstevel@tonic-gate xprt->xp_threads--;
15197c478bd9Sstevel@tonic-gate
15207c478bd9Sstevel@tonic-gate /* svc_xprt_cleanup() unlocks xp_thread_lock or destroys xprt */
15217c478bd9Sstevel@tonic-gate if (xprt->xp_wq)
15227c478bd9Sstevel@tonic-gate mutex_exit(&xprt->xp_thread_lock);
15237c478bd9Sstevel@tonic-gate else
15247c478bd9Sstevel@tonic-gate svc_xprt_cleanup(xprt, FALSE);
15257c478bd9Sstevel@tonic-gate
15267c478bd9Sstevel@tonic-gate /* Call a transport specific clone `destroy' function */
15277c478bd9Sstevel@tonic-gate SVC_CLONE_DESTROY(clone_xprt);
15287c478bd9Sstevel@tonic-gate
15297c478bd9Sstevel@tonic-gate /* Clear xp_master */
15307c478bd9Sstevel@tonic-gate clone_xprt->xp_master = NULL;
15317c478bd9Sstevel@tonic-gate }
15327c478bd9Sstevel@tonic-gate
15337c478bd9Sstevel@tonic-gate /*
15347c478bd9Sstevel@tonic-gate * Unlink a detached clone transport handle from a master
15357c478bd9Sstevel@tonic-gate * - decrement the thread count on the master
15367c478bd9Sstevel@tonic-gate * - if the transport is closing (xp_wq is NULL) call svc_xprt_cleanup();
15377c478bd9Sstevel@tonic-gate * if this is the last thread on this transport then it will destroy
15387c478bd9Sstevel@tonic-gate * the transport.
15397c478bd9Sstevel@tonic-gate * - call a transport specific function to destroy the clone handle
15407c478bd9Sstevel@tonic-gate * - clear xp_master to avoid recursion.
15417c478bd9Sstevel@tonic-gate */
15427c478bd9Sstevel@tonic-gate static void
svc_clone_unlinkdetached(SVCXPRT * clone_xprt)15437c478bd9Sstevel@tonic-gate svc_clone_unlinkdetached(SVCXPRT *clone_xprt)
15447c478bd9Sstevel@tonic-gate {
15457c478bd9Sstevel@tonic-gate SVCMASTERXPRT *xprt = clone_xprt->xp_master;
15467c478bd9Sstevel@tonic-gate
15477c478bd9Sstevel@tonic-gate /* This must be a detached thread */
15487c478bd9Sstevel@tonic-gate ASSERT(clone_xprt->xp_detached);
15497c478bd9Sstevel@tonic-gate ASSERT(xprt->xp_detached_threads > 0);
15507c478bd9Sstevel@tonic-gate ASSERT(xprt->xp_threads + xprt->xp_detached_threads > 0);
15517c478bd9Sstevel@tonic-gate
15527c478bd9Sstevel@tonic-gate /* Grab xprt->xp_thread_lock and decrement link counts */
15537c478bd9Sstevel@tonic-gate mutex_enter(&xprt->xp_thread_lock);
15547c478bd9Sstevel@tonic-gate xprt->xp_detached_threads--;
15557c478bd9Sstevel@tonic-gate
15567c478bd9Sstevel@tonic-gate /* svc_xprt_cleanup() unlocks xp_thread_lock or destroys xprt */
15577c478bd9Sstevel@tonic-gate if (xprt->xp_wq)
15587c478bd9Sstevel@tonic-gate mutex_exit(&xprt->xp_thread_lock);
15597c478bd9Sstevel@tonic-gate else
15607c478bd9Sstevel@tonic-gate svc_xprt_cleanup(xprt, TRUE);
15617c478bd9Sstevel@tonic-gate
15627c478bd9Sstevel@tonic-gate /* Call transport specific clone `destroy' function */
15637c478bd9Sstevel@tonic-gate SVC_CLONE_DESTROY(clone_xprt);
15647c478bd9Sstevel@tonic-gate
15657c478bd9Sstevel@tonic-gate /* Clear xp_master */
15667c478bd9Sstevel@tonic-gate clone_xprt->xp_master = NULL;
15677c478bd9Sstevel@tonic-gate }
15687c478bd9Sstevel@tonic-gate
15697c478bd9Sstevel@tonic-gate /*
15707c478bd9Sstevel@tonic-gate * Try to exit a non-detached service thread
15717c478bd9Sstevel@tonic-gate * - check if there are enough threads left
15727c478bd9Sstevel@tonic-gate * - if this thread (ie its clone transport handle) are linked
15737c478bd9Sstevel@tonic-gate * to a master transport then unlink it
15747c478bd9Sstevel@tonic-gate * - free the clone structure
15757c478bd9Sstevel@tonic-gate * - return to userland for thread exit
15767c478bd9Sstevel@tonic-gate *
15777c478bd9Sstevel@tonic-gate * If this is the last non-detached or the last thread on this
15787c478bd9Sstevel@tonic-gate * transport then the call to svc_clone_unlink() will, respectively,
15797c478bd9Sstevel@tonic-gate * close and/or destroy the transport.
15807c478bd9Sstevel@tonic-gate */
15817c478bd9Sstevel@tonic-gate static void
svc_thread_exit(SVCPOOL * pool,SVCXPRT * clone_xprt)15827c478bd9Sstevel@tonic-gate svc_thread_exit(SVCPOOL *pool, SVCXPRT *clone_xprt)
15837c478bd9Sstevel@tonic-gate {
15847c478bd9Sstevel@tonic-gate if (clone_xprt->xp_master)
15857c478bd9Sstevel@tonic-gate svc_clone_unlink(clone_xprt);
15867c478bd9Sstevel@tonic-gate svc_clone_free(clone_xprt);
15877c478bd9Sstevel@tonic-gate
15887c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_thread_lock);
15897c478bd9Sstevel@tonic-gate pool->p_threads--;
15907c478bd9Sstevel@tonic-gate if (pool->p_closing && svc_pool_tryexit(pool))
15917c478bd9Sstevel@tonic-gate /* return - thread exit will be handled at user level */
15927c478bd9Sstevel@tonic-gate return;
15937c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
15947c478bd9Sstevel@tonic-gate
15957c478bd9Sstevel@tonic-gate /* return - thread exit will be handled at user level */
15967c478bd9Sstevel@tonic-gate }
15977c478bd9Sstevel@tonic-gate
15987c478bd9Sstevel@tonic-gate /*
15997c478bd9Sstevel@tonic-gate * Exit a detached service thread that returned to svc_run
16007c478bd9Sstevel@tonic-gate * - decrement the `detached thread' count for the pool
16017c478bd9Sstevel@tonic-gate * - unlink the detached clone transport handle from the master
16027c478bd9Sstevel@tonic-gate * - free the clone structure
16037c478bd9Sstevel@tonic-gate * - return to userland for thread exit
16047c478bd9Sstevel@tonic-gate *
16057c478bd9Sstevel@tonic-gate * If this is the last thread on this transport then the call
16067c478bd9Sstevel@tonic-gate * to svc_clone_unlinkdetached() will destroy the transport.
16077c478bd9Sstevel@tonic-gate */
16087c478bd9Sstevel@tonic-gate static void
svc_thread_exitdetached(SVCPOOL * pool,SVCXPRT * clone_xprt)16097c478bd9Sstevel@tonic-gate svc_thread_exitdetached(SVCPOOL *pool, SVCXPRT *clone_xprt)
16107c478bd9Sstevel@tonic-gate {
16117c478bd9Sstevel@tonic-gate /* This must be a detached thread */
16127c478bd9Sstevel@tonic-gate ASSERT(clone_xprt->xp_master);
16137c478bd9Sstevel@tonic-gate ASSERT(clone_xprt->xp_detached);
16147c478bd9Sstevel@tonic-gate ASSERT(!MUTEX_HELD(&pool->p_thread_lock));
16157c478bd9Sstevel@tonic-gate
16167c478bd9Sstevel@tonic-gate svc_clone_unlinkdetached(clone_xprt);
16177c478bd9Sstevel@tonic-gate svc_clone_free(clone_xprt);
16187c478bd9Sstevel@tonic-gate
16197c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_thread_lock);
16207c478bd9Sstevel@tonic-gate
16217c478bd9Sstevel@tonic-gate ASSERT(pool->p_reserved_threads >= 0);
16227c478bd9Sstevel@tonic-gate ASSERT(pool->p_detached_threads > 0);
16237c478bd9Sstevel@tonic-gate
16247c478bd9Sstevel@tonic-gate pool->p_detached_threads--;
16257c478bd9Sstevel@tonic-gate if (pool->p_closing && svc_pool_tryexit(pool))
16267c478bd9Sstevel@tonic-gate /* return - thread exit will be handled at user level */
16277c478bd9Sstevel@tonic-gate return;
16287c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
16297c478bd9Sstevel@tonic-gate
16307c478bd9Sstevel@tonic-gate /* return - thread exit will be handled at user level */
16317c478bd9Sstevel@tonic-gate }
16327c478bd9Sstevel@tonic-gate
16337c478bd9Sstevel@tonic-gate /*
16347c478bd9Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
16357c478bd9Sstevel@tonic-gate * svc_wait
16367c478bd9Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
16377c478bd9Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
16387c478bd9Sstevel@tonic-gate */
16397c478bd9Sstevel@tonic-gate int
svc_wait(int id)16407c478bd9Sstevel@tonic-gate svc_wait(int id)
16417c478bd9Sstevel@tonic-gate {
16427c478bd9Sstevel@tonic-gate SVCPOOL *pool;
16437c478bd9Sstevel@tonic-gate int err = 0;
16447c478bd9Sstevel@tonic-gate struct svc_globals *svc;
16457c478bd9Sstevel@tonic-gate
16467c478bd9Sstevel@tonic-gate svc = zone_getspecific(svc_zone_key, curproc->p_zone);
16477c478bd9Sstevel@tonic-gate mutex_enter(&svc->svc_plock);
16487c478bd9Sstevel@tonic-gate pool = svc_pool_find(svc, id);
16497c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
16507c478bd9Sstevel@tonic-gate
16517c478bd9Sstevel@tonic-gate if (pool == NULL)
16527c478bd9Sstevel@tonic-gate return (ENOENT);
16537c478bd9Sstevel@tonic-gate
16547c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_user_lock);
16557c478bd9Sstevel@tonic-gate
16567c478bd9Sstevel@tonic-gate /* Check if there's already a user thread waiting on this pool */
16577c478bd9Sstevel@tonic-gate if (pool->p_user_waiting) {
16587c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_user_lock);
16597c478bd9Sstevel@tonic-gate return (EBUSY);
16607c478bd9Sstevel@tonic-gate }
16617c478bd9Sstevel@tonic-gate
16627c478bd9Sstevel@tonic-gate pool->p_user_waiting = TRUE;
16637c478bd9Sstevel@tonic-gate
16647c478bd9Sstevel@tonic-gate /* Go to sleep, waiting for the signaled flag. */
16657c478bd9Sstevel@tonic-gate while (!pool->p_signal_create_thread && !pool->p_user_exit) {
16667c478bd9Sstevel@tonic-gate if (cv_wait_sig(&pool->p_user_cv, &pool->p_user_lock) == 0) {
16677c478bd9Sstevel@tonic-gate /* Interrupted, return to handle exit or signal */
16687c478bd9Sstevel@tonic-gate pool->p_user_waiting = FALSE;
16697c478bd9Sstevel@tonic-gate pool->p_signal_create_thread = FALSE;
16707c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_user_lock);
16717c478bd9Sstevel@tonic-gate
16727c478bd9Sstevel@tonic-gate /*
16737c478bd9Sstevel@tonic-gate * Thread has been interrupted and therefore
16747c478bd9Sstevel@tonic-gate * the service daemon is leaving as well so
16757c478bd9Sstevel@tonic-gate * let's go ahead and remove the service
16767c478bd9Sstevel@tonic-gate * pool at this time.
16777c478bd9Sstevel@tonic-gate */
16787c478bd9Sstevel@tonic-gate mutex_enter(&svc->svc_plock);
16797c478bd9Sstevel@tonic-gate svc_pool_unregister(svc, pool);
16807c478bd9Sstevel@tonic-gate mutex_exit(&svc->svc_plock);
16817c478bd9Sstevel@tonic-gate
16827c478bd9Sstevel@tonic-gate return (EINTR);
16837c478bd9Sstevel@tonic-gate }
16847c478bd9Sstevel@tonic-gate }
16857c478bd9Sstevel@tonic-gate
16867c478bd9Sstevel@tonic-gate pool->p_signal_create_thread = FALSE;
16877c478bd9Sstevel@tonic-gate pool->p_user_waiting = FALSE;
16887c478bd9Sstevel@tonic-gate
16897c478bd9Sstevel@tonic-gate /*
16907c478bd9Sstevel@tonic-gate * About to exit the service pool. Set return value
16917c478bd9Sstevel@tonic-gate * to let the userland code know our intent. Signal
16927c478bd9Sstevel@tonic-gate * svc_thread_creator() so that it can clean up the
16937c478bd9Sstevel@tonic-gate * pool structure.
16947c478bd9Sstevel@tonic-gate */
16957c478bd9Sstevel@tonic-gate if (pool->p_user_exit) {
16967c478bd9Sstevel@tonic-gate err = ECANCELED;
16977c478bd9Sstevel@tonic-gate cv_signal(&pool->p_user_cv);
16987c478bd9Sstevel@tonic-gate }
16997c478bd9Sstevel@tonic-gate
17007c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_user_lock);
17017c478bd9Sstevel@tonic-gate
17027c478bd9Sstevel@tonic-gate /* Return to userland with error code, for possible thread creation. */
17037c478bd9Sstevel@tonic-gate return (err);
17047c478bd9Sstevel@tonic-gate }
17057c478bd9Sstevel@tonic-gate
17067c478bd9Sstevel@tonic-gate /*
17077c478bd9Sstevel@tonic-gate * `Service threads' creator thread.
17087c478bd9Sstevel@tonic-gate * The creator thread waits for a signal to create new thread.
17097c478bd9Sstevel@tonic-gate */
17107c478bd9Sstevel@tonic-gate static void
svc_thread_creator(SVCPOOL * pool)17117c478bd9Sstevel@tonic-gate svc_thread_creator(SVCPOOL *pool)
17127c478bd9Sstevel@tonic-gate {
17137c478bd9Sstevel@tonic-gate callb_cpr_t cpr_info; /* CPR info for the creator thread */
17147c478bd9Sstevel@tonic-gate
17157c478bd9Sstevel@tonic-gate CALLB_CPR_INIT(&cpr_info, &pool->p_creator_lock, callb_generic_cpr,
17167c478bd9Sstevel@tonic-gate "svc_thread_creator");
17177c478bd9Sstevel@tonic-gate
17187c478bd9Sstevel@tonic-gate for (;;) {
17197c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_creator_lock);
17207c478bd9Sstevel@tonic-gate
17217c478bd9Sstevel@tonic-gate /* Check if someone set the exit flag */
17227c478bd9Sstevel@tonic-gate if (pool->p_creator_exit)
17237c478bd9Sstevel@tonic-gate break;
17247c478bd9Sstevel@tonic-gate
17257c478bd9Sstevel@tonic-gate /* Clear the `signaled' flag and go asleep */
17267c478bd9Sstevel@tonic-gate pool->p_creator_signaled = FALSE;
17277c478bd9Sstevel@tonic-gate
17287c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cpr_info);
17297c478bd9Sstevel@tonic-gate cv_wait(&pool->p_creator_cv, &pool->p_creator_lock);
17307c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cpr_info, &pool->p_creator_lock);
17317c478bd9Sstevel@tonic-gate
17327c478bd9Sstevel@tonic-gate /* Check if someone signaled to exit */
17337c478bd9Sstevel@tonic-gate if (pool->p_creator_exit)
17347c478bd9Sstevel@tonic-gate break;
17357c478bd9Sstevel@tonic-gate
17367c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_creator_lock);
17377c478bd9Sstevel@tonic-gate
17387c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_thread_lock);
17397c478bd9Sstevel@tonic-gate
17407c478bd9Sstevel@tonic-gate /*
17417c478bd9Sstevel@tonic-gate * When the pool is in closing state and all the transports
17427c478bd9Sstevel@tonic-gate * are gone the creator should not create any new threads.
17437c478bd9Sstevel@tonic-gate */
17447c478bd9Sstevel@tonic-gate if (pool->p_closing) {
17457c478bd9Sstevel@tonic-gate rw_enter(&pool->p_lrwlock, RW_READER);
17467c478bd9Sstevel@tonic-gate if (pool->p_lcount == 0) {
17477c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
17487c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
17497c478bd9Sstevel@tonic-gate continue;
17507c478bd9Sstevel@tonic-gate }
17517c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
17527c478bd9Sstevel@tonic-gate }
17537c478bd9Sstevel@tonic-gate
17547c478bd9Sstevel@tonic-gate /*
17557c478bd9Sstevel@tonic-gate * Create a new service thread now.
17567c478bd9Sstevel@tonic-gate */
17577c478bd9Sstevel@tonic-gate ASSERT(pool->p_reserved_threads >= 0);
17587c478bd9Sstevel@tonic-gate ASSERT(pool->p_detached_threads >= 0);
17597c478bd9Sstevel@tonic-gate
17607c478bd9Sstevel@tonic-gate if (pool->p_threads + pool->p_detached_threads <
17617c478bd9Sstevel@tonic-gate pool->p_maxthreads) {
17627c478bd9Sstevel@tonic-gate /*
17637c478bd9Sstevel@tonic-gate * Signal the service pool wait thread
17647c478bd9Sstevel@tonic-gate * only if it hasn't already been signaled.
17657c478bd9Sstevel@tonic-gate */
17667c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_user_lock);
17677c478bd9Sstevel@tonic-gate if (pool->p_signal_create_thread == FALSE) {
17687c478bd9Sstevel@tonic-gate pool->p_signal_create_thread = TRUE;
17697c478bd9Sstevel@tonic-gate cv_signal(&pool->p_user_cv);
17707c478bd9Sstevel@tonic-gate }
17717c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_user_lock);
17727c478bd9Sstevel@tonic-gate
17737c478bd9Sstevel@tonic-gate }
17747c478bd9Sstevel@tonic-gate
17757c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
17767c478bd9Sstevel@tonic-gate }
17777c478bd9Sstevel@tonic-gate
17787c478bd9Sstevel@tonic-gate /*
17797c478bd9Sstevel@tonic-gate * Pool is closed. Cleanup and exit.
17807c478bd9Sstevel@tonic-gate */
17817c478bd9Sstevel@tonic-gate
17827c478bd9Sstevel@tonic-gate /* Signal userland creator thread that it can stop now. */
17837c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_user_lock);
17847c478bd9Sstevel@tonic-gate pool->p_user_exit = TRUE;
17857c478bd9Sstevel@tonic-gate cv_broadcast(&pool->p_user_cv);
17867c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_user_lock);
17877c478bd9Sstevel@tonic-gate
17887c478bd9Sstevel@tonic-gate /* Wait for svc_wait() to be done with the pool */
17897c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_user_lock);
17907c478bd9Sstevel@tonic-gate while (pool->p_user_waiting) {
17917c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cpr_info);
17927c478bd9Sstevel@tonic-gate cv_wait(&pool->p_user_cv, &pool->p_user_lock);
17937c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cpr_info, &pool->p_creator_lock);
17947c478bd9Sstevel@tonic-gate }
17957c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_user_lock);
17967c478bd9Sstevel@tonic-gate
17977c478bd9Sstevel@tonic-gate CALLB_CPR_EXIT(&cpr_info);
17987c478bd9Sstevel@tonic-gate svc_pool_cleanup(pool);
17997c478bd9Sstevel@tonic-gate zthread_exit();
18007c478bd9Sstevel@tonic-gate }
18017c478bd9Sstevel@tonic-gate
18027c478bd9Sstevel@tonic-gate /*
18037c478bd9Sstevel@tonic-gate * If the creator thread is idle signal it to create
18047c478bd9Sstevel@tonic-gate * a new service thread.
18057c478bd9Sstevel@tonic-gate */
18067c478bd9Sstevel@tonic-gate static void
svc_creator_signal(SVCPOOL * pool)18077c478bd9Sstevel@tonic-gate svc_creator_signal(SVCPOOL *pool)
18087c478bd9Sstevel@tonic-gate {
18097c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_creator_lock);
18107c478bd9Sstevel@tonic-gate if (pool->p_creator_signaled == FALSE) {
18117c478bd9Sstevel@tonic-gate pool->p_creator_signaled = TRUE;
18127c478bd9Sstevel@tonic-gate cv_signal(&pool->p_creator_cv);
18137c478bd9Sstevel@tonic-gate }
18147c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_creator_lock);
18157c478bd9Sstevel@tonic-gate }
18167c478bd9Sstevel@tonic-gate
18177c478bd9Sstevel@tonic-gate /*
18187c478bd9Sstevel@tonic-gate * Notify the creator thread to clean up and exit.
18197c478bd9Sstevel@tonic-gate */
18207c478bd9Sstevel@tonic-gate static void
svc_creator_signalexit(SVCPOOL * pool)18217c478bd9Sstevel@tonic-gate svc_creator_signalexit(SVCPOOL *pool)
18227c478bd9Sstevel@tonic-gate {
18237c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_creator_lock);
18247c478bd9Sstevel@tonic-gate pool->p_creator_exit = TRUE;
18257c478bd9Sstevel@tonic-gate cv_signal(&pool->p_creator_cv);
18267c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_creator_lock);
18277c478bd9Sstevel@tonic-gate }
18287c478bd9Sstevel@tonic-gate
18297c478bd9Sstevel@tonic-gate /*
18307c478bd9Sstevel@tonic-gate * Polling part of the svc_run().
18317c478bd9Sstevel@tonic-gate * - search for a transport with a pending request
18327c478bd9Sstevel@tonic-gate * - when one is found then latch the request lock and return to svc_run()
18337c478bd9Sstevel@tonic-gate * - if there is no request go asleep and wait for a signal
18347c478bd9Sstevel@tonic-gate * - handle two exceptions:
18357c478bd9Sstevel@tonic-gate * a) current transport is closing
18367c478bd9Sstevel@tonic-gate * b) timeout waiting for a new request
18377c478bd9Sstevel@tonic-gate * in both cases return to svc_run()
18387c478bd9Sstevel@tonic-gate */
18397c478bd9Sstevel@tonic-gate static SVCMASTERXPRT *
svc_poll(SVCPOOL * pool,SVCMASTERXPRT * xprt,SVCXPRT * clone_xprt)18407c478bd9Sstevel@tonic-gate svc_poll(SVCPOOL *pool, SVCMASTERXPRT *xprt, SVCXPRT *clone_xprt)
18417c478bd9Sstevel@tonic-gate {
18427c478bd9Sstevel@tonic-gate /*
18437c478bd9Sstevel@tonic-gate * Main loop iterates until
18447c478bd9Sstevel@tonic-gate * a) we find a pending request,
18457c478bd9Sstevel@tonic-gate * b) detect that the current transport is closing
18467c478bd9Sstevel@tonic-gate * c) time out waiting for a new request.
18477c478bd9Sstevel@tonic-gate */
18487c478bd9Sstevel@tonic-gate for (;;) {
18497c478bd9Sstevel@tonic-gate SVCMASTERXPRT *next;
18507c478bd9Sstevel@tonic-gate clock_t timeleft;
18517c478bd9Sstevel@tonic-gate
18527c478bd9Sstevel@tonic-gate /*
18537c478bd9Sstevel@tonic-gate * Step 1.
18547c478bd9Sstevel@tonic-gate * Check if there is a pending request on the current
18557c478bd9Sstevel@tonic-gate * transport handle so that we can avoid cloning.
18567c478bd9Sstevel@tonic-gate * If so then decrement the `pending-request' count for
18577c478bd9Sstevel@tonic-gate * the pool and return to svc_run().
18587c478bd9Sstevel@tonic-gate *
18597c478bd9Sstevel@tonic-gate * We need to prevent a potential starvation. When
18607c478bd9Sstevel@tonic-gate * a selected transport has all pending requests coming in
18617c478bd9Sstevel@tonic-gate * all the time then the service threads will never switch to
18627c478bd9Sstevel@tonic-gate * another transport. With a limited number of service
18637c478bd9Sstevel@tonic-gate * threads some transports may be never serviced.
18647c478bd9Sstevel@tonic-gate * To prevent such a scenario we pick up at most
18657c478bd9Sstevel@tonic-gate * pool->p_max_same_xprt requests from the same transport
18667c478bd9Sstevel@tonic-gate * and then take a hint from the xprt-ready queue or walk
18677c478bd9Sstevel@tonic-gate * the transport list.
18687c478bd9Sstevel@tonic-gate */
18697c478bd9Sstevel@tonic-gate if (xprt && xprt->xp_req_head && (!pool->p_qoverflow ||
18707c478bd9Sstevel@tonic-gate clone_xprt->xp_same_xprt++ < pool->p_max_same_xprt)) {
18717c478bd9Sstevel@tonic-gate mutex_enter(&xprt->xp_req_lock);
1872*2695d4f4SMarcel Telka if (xprt->xp_req_head)
18737c478bd9Sstevel@tonic-gate return (xprt);
18747c478bd9Sstevel@tonic-gate mutex_exit(&xprt->xp_req_lock);
18757c478bd9Sstevel@tonic-gate }
18767c478bd9Sstevel@tonic-gate clone_xprt->xp_same_xprt = 0;
18777c478bd9Sstevel@tonic-gate
18787c478bd9Sstevel@tonic-gate /*
18797c478bd9Sstevel@tonic-gate * Step 2.
18807c478bd9Sstevel@tonic-gate * If there is no request on the current transport try to
18817c478bd9Sstevel@tonic-gate * find another transport with a pending request.
18827c478bd9Sstevel@tonic-gate */
18837c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_req_lock);
18847c478bd9Sstevel@tonic-gate pool->p_walkers++;
18857c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
18867c478bd9Sstevel@tonic-gate
18877c478bd9Sstevel@tonic-gate /*
18887c478bd9Sstevel@tonic-gate * Make sure that transports will not be destroyed just
18897c478bd9Sstevel@tonic-gate * while we are checking them.
18907c478bd9Sstevel@tonic-gate */
18917c478bd9Sstevel@tonic-gate rw_enter(&pool->p_lrwlock, RW_READER);
18927c478bd9Sstevel@tonic-gate
18937c478bd9Sstevel@tonic-gate for (;;) {
18947c478bd9Sstevel@tonic-gate SVCMASTERXPRT *hint;
18957c478bd9Sstevel@tonic-gate
18967c478bd9Sstevel@tonic-gate /*
18977c478bd9Sstevel@tonic-gate * Get the next transport from the xprt-ready queue.
18987c478bd9Sstevel@tonic-gate * This is a hint. There is no guarantee that the
18997c478bd9Sstevel@tonic-gate * transport still has a pending request since it
19007c478bd9Sstevel@tonic-gate * could be picked up by another thread in step 1.
19017c478bd9Sstevel@tonic-gate *
19027c478bd9Sstevel@tonic-gate * If the transport has a pending request then keep
19037c478bd9Sstevel@tonic-gate * it locked. Decrement the `pending-requests' for
19047c478bd9Sstevel@tonic-gate * the pool and `walking-threads' counts, and return
19057c478bd9Sstevel@tonic-gate * to svc_run().
19067c478bd9Sstevel@tonic-gate */
19077c478bd9Sstevel@tonic-gate hint = svc_xprt_qget(pool);
19087c478bd9Sstevel@tonic-gate
19097c478bd9Sstevel@tonic-gate if (hint && hint->xp_req_head) {
19107c478bd9Sstevel@tonic-gate mutex_enter(&hint->xp_req_lock);
19117c478bd9Sstevel@tonic-gate if (hint->xp_req_head) {
19127c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
19137c478bd9Sstevel@tonic-gate
19147c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_req_lock);
19157c478bd9Sstevel@tonic-gate pool->p_walkers--;
19167c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
19177c478bd9Sstevel@tonic-gate
19187c478bd9Sstevel@tonic-gate return (hint);
19197c478bd9Sstevel@tonic-gate }
19207c478bd9Sstevel@tonic-gate mutex_exit(&hint->xp_req_lock);
19217c478bd9Sstevel@tonic-gate }
19227c478bd9Sstevel@tonic-gate
19237c478bd9Sstevel@tonic-gate /*
19247c478bd9Sstevel@tonic-gate * If there was no hint in the xprt-ready queue then
19257c478bd9Sstevel@tonic-gate * - if there is less pending requests than polling
19267c478bd9Sstevel@tonic-gate * threads go asleep
19277c478bd9Sstevel@tonic-gate * - otherwise check if there was an overflow in the
19287c478bd9Sstevel@tonic-gate * xprt-ready queue; if so, then we need to break
19297c478bd9Sstevel@tonic-gate * the `drain' mode
19307c478bd9Sstevel@tonic-gate */
19317c478bd9Sstevel@tonic-gate if (hint == NULL) {
19327c478bd9Sstevel@tonic-gate if (pool->p_reqs < pool->p_walkers) {
19337c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_req_lock);
19347c478bd9Sstevel@tonic-gate if (pool->p_reqs < pool->p_walkers)
19357c478bd9Sstevel@tonic-gate goto sleep;
19367c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
19377c478bd9Sstevel@tonic-gate }
19387c478bd9Sstevel@tonic-gate if (pool->p_qoverflow) {
19397c478bd9Sstevel@tonic-gate break;
19407c478bd9Sstevel@tonic-gate }
19417c478bd9Sstevel@tonic-gate }
19427c478bd9Sstevel@tonic-gate }
19437c478bd9Sstevel@tonic-gate
19447c478bd9Sstevel@tonic-gate /*
19457c478bd9Sstevel@tonic-gate * If there was an overflow in the xprt-ready queue then we
19467c478bd9Sstevel@tonic-gate * need to switch to the `drain' mode, i.e. walk through the
19477c478bd9Sstevel@tonic-gate * pool's transport list and search for a transport with a
19487c478bd9Sstevel@tonic-gate * pending request. If we manage to drain all the pending
19497c478bd9Sstevel@tonic-gate * requests then we can clear the overflow flag. This will
19507c478bd9Sstevel@tonic-gate * switch svc_poll() back to taking hints from the xprt-ready
19517c478bd9Sstevel@tonic-gate * queue (which is generally more efficient).
19527c478bd9Sstevel@tonic-gate *
19537c478bd9Sstevel@tonic-gate * If there are no registered transports simply go asleep.
19547c478bd9Sstevel@tonic-gate */
19557c478bd9Sstevel@tonic-gate if (xprt == NULL && pool->p_lhead == NULL) {
19567c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_req_lock);
19577c478bd9Sstevel@tonic-gate goto sleep;
19587c478bd9Sstevel@tonic-gate }
19597c478bd9Sstevel@tonic-gate
19607c478bd9Sstevel@tonic-gate /*
19617c478bd9Sstevel@tonic-gate * `Walk' through the pool's list of master server
19627c478bd9Sstevel@tonic-gate * transport handles. Continue to loop until there are less
19637c478bd9Sstevel@tonic-gate * looping threads then pending requests.
19647c478bd9Sstevel@tonic-gate */
19657c478bd9Sstevel@tonic-gate next = xprt ? xprt->xp_next : pool->p_lhead;
19667c478bd9Sstevel@tonic-gate
19677c478bd9Sstevel@tonic-gate for (;;) {
19687c478bd9Sstevel@tonic-gate /*
19697c478bd9Sstevel@tonic-gate * Check if there is a request on this transport.
19707c478bd9Sstevel@tonic-gate *
19717c478bd9Sstevel@tonic-gate * Since blocking on a locked mutex is very expensive
19727c478bd9Sstevel@tonic-gate * check for a request without a lock first. If we miss
19737c478bd9Sstevel@tonic-gate * a request that is just being delivered but this will
19747c478bd9Sstevel@tonic-gate * cost at most one full walk through the list.
19757c478bd9Sstevel@tonic-gate */
19767c478bd9Sstevel@tonic-gate if (next->xp_req_head) {
19777c478bd9Sstevel@tonic-gate /*
19787c478bd9Sstevel@tonic-gate * Check again, now with a lock.
19797c478bd9Sstevel@tonic-gate */
19807c478bd9Sstevel@tonic-gate mutex_enter(&next->xp_req_lock);
19817c478bd9Sstevel@tonic-gate if (next->xp_req_head) {
19827c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
19837c478bd9Sstevel@tonic-gate
19847c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_req_lock);
19857c478bd9Sstevel@tonic-gate pool->p_walkers--;
19867c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
19877c478bd9Sstevel@tonic-gate
19887c478bd9Sstevel@tonic-gate return (next);
19897c478bd9Sstevel@tonic-gate }
19907c478bd9Sstevel@tonic-gate mutex_exit(&next->xp_req_lock);
19917c478bd9Sstevel@tonic-gate }
19927c478bd9Sstevel@tonic-gate
19937c478bd9Sstevel@tonic-gate /*
19947c478bd9Sstevel@tonic-gate * Continue to `walk' through the pool's
19957c478bd9Sstevel@tonic-gate * transport list until there is less requests
19967c478bd9Sstevel@tonic-gate * than walkers. Check this condition without
19977c478bd9Sstevel@tonic-gate * a lock first to avoid contention on a mutex.
19987c478bd9Sstevel@tonic-gate */
19997c478bd9Sstevel@tonic-gate if (pool->p_reqs < pool->p_walkers) {
200007e75131Sgt29601 /* Check again, now with the lock. */
20017c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_req_lock);
20027c478bd9Sstevel@tonic-gate if (pool->p_reqs < pool->p_walkers)
20037c478bd9Sstevel@tonic-gate break; /* goto sleep */
20047c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
20057c478bd9Sstevel@tonic-gate }
20067c478bd9Sstevel@tonic-gate
20077c478bd9Sstevel@tonic-gate next = next->xp_next;
20087c478bd9Sstevel@tonic-gate }
20097c478bd9Sstevel@tonic-gate
20107c478bd9Sstevel@tonic-gate sleep:
20117c478bd9Sstevel@tonic-gate /*
20127c478bd9Sstevel@tonic-gate * No work to do. Stop the `walk' and go asleep.
20137c478bd9Sstevel@tonic-gate * Decrement the `walking-threads' count for the pool.
20147c478bd9Sstevel@tonic-gate */
20157c478bd9Sstevel@tonic-gate pool->p_walkers--;
20167c478bd9Sstevel@tonic-gate rw_exit(&pool->p_lrwlock);
20177c478bd9Sstevel@tonic-gate
20187c478bd9Sstevel@tonic-gate /*
20197c478bd9Sstevel@tonic-gate * Count us as asleep, mark this thread as safe
20207c478bd9Sstevel@tonic-gate * for suspend and wait for a request.
20217c478bd9Sstevel@tonic-gate */
20227c478bd9Sstevel@tonic-gate pool->p_asleep++;
2023d3d50737SRafael Vanoni timeleft = cv_reltimedwait_sig(&pool->p_req_cv,
2024d3d50737SRafael Vanoni &pool->p_req_lock, pool->p_timeout, TR_CLOCK_TICK);
20257c478bd9Sstevel@tonic-gate
20267c478bd9Sstevel@tonic-gate /*
20277c478bd9Sstevel@tonic-gate * If the drowsy flag is on this means that
20287c478bd9Sstevel@tonic-gate * someone has signaled a wakeup. In such a case
20297c478bd9Sstevel@tonic-gate * the `asleep-threads' count has already updated
20307c478bd9Sstevel@tonic-gate * so just clear the flag.
20317c478bd9Sstevel@tonic-gate *
20327c478bd9Sstevel@tonic-gate * If the drowsy flag is off then we need to update
20337c478bd9Sstevel@tonic-gate * the `asleep-threads' count.
20347c478bd9Sstevel@tonic-gate */
20357c478bd9Sstevel@tonic-gate if (pool->p_drowsy) {
20367c478bd9Sstevel@tonic-gate pool->p_drowsy = FALSE;
20377c478bd9Sstevel@tonic-gate /*
20387c478bd9Sstevel@tonic-gate * If the thread is here because it timedout,
20397c478bd9Sstevel@tonic-gate * instead of returning SVC_ETIMEDOUT, it is
20407c478bd9Sstevel@tonic-gate * time to do some more work.
20417c478bd9Sstevel@tonic-gate */
20427c478bd9Sstevel@tonic-gate if (timeleft == -1)
20437c478bd9Sstevel@tonic-gate timeleft = 1;
20447c478bd9Sstevel@tonic-gate } else {
20457c478bd9Sstevel@tonic-gate pool->p_asleep--;
20467c478bd9Sstevel@tonic-gate }
20477c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
20487c478bd9Sstevel@tonic-gate
20497c478bd9Sstevel@tonic-gate /*
20507c478bd9Sstevel@tonic-gate * If we received a signal while waiting for a
20517c478bd9Sstevel@tonic-gate * request, inform svc_run(), so that we can return
2052f7b93e0cSVallish Vaidyeshwara * to user level and exit.
20537c478bd9Sstevel@tonic-gate */
20547c478bd9Sstevel@tonic-gate if (timeleft == 0)
20557c478bd9Sstevel@tonic-gate return (SVC_EINTR);
20567c478bd9Sstevel@tonic-gate
20577c478bd9Sstevel@tonic-gate /*
20587c478bd9Sstevel@tonic-gate * If the current transport is gone then notify
20597c478bd9Sstevel@tonic-gate * svc_run() to unlink from it.
20607c478bd9Sstevel@tonic-gate */
20617c478bd9Sstevel@tonic-gate if (xprt && xprt->xp_wq == NULL)
20627c478bd9Sstevel@tonic-gate return (SVC_EXPRTGONE);
20637c478bd9Sstevel@tonic-gate
20647c478bd9Sstevel@tonic-gate /*
20657c478bd9Sstevel@tonic-gate * If we have timed out waiting for a request inform
20667c478bd9Sstevel@tonic-gate * svc_run() that we probably don't need this thread.
20677c478bd9Sstevel@tonic-gate */
20687c478bd9Sstevel@tonic-gate if (timeleft == -1)
20697c478bd9Sstevel@tonic-gate return (SVC_ETIMEDOUT);
20707c478bd9Sstevel@tonic-gate }
20717c478bd9Sstevel@tonic-gate }
20727c478bd9Sstevel@tonic-gate
20737c478bd9Sstevel@tonic-gate /*
2074*2695d4f4SMarcel Telka * calculate memory space used by message
2075*2695d4f4SMarcel Telka */
2076*2695d4f4SMarcel Telka static size_t
svc_msgsize(mblk_t * mp)2077*2695d4f4SMarcel Telka svc_msgsize(mblk_t *mp)
2078*2695d4f4SMarcel Telka {
2079*2695d4f4SMarcel Telka size_t count = 0;
2080*2695d4f4SMarcel Telka
2081*2695d4f4SMarcel Telka for (; mp; mp = mp->b_cont)
2082*2695d4f4SMarcel Telka count += MBLKSIZE(mp);
2083*2695d4f4SMarcel Telka
2084*2695d4f4SMarcel Telka return (count);
2085*2695d4f4SMarcel Telka }
2086*2695d4f4SMarcel Telka
2087*2695d4f4SMarcel Telka /*
2088*2695d4f4SMarcel Telka * svc_flowcontrol() attempts to turn the flow control on or off for the
2089*2695d4f4SMarcel Telka * transport.
2090*2695d4f4SMarcel Telka *
2091*2695d4f4SMarcel Telka * On input the xprt->xp_full determines whether the flow control is currently
2092*2695d4f4SMarcel Telka * off (FALSE) or on (TRUE). If it is off we do tests to see whether we should
2093*2695d4f4SMarcel Telka * turn it on, and vice versa.
2094*2695d4f4SMarcel Telka *
2095*2695d4f4SMarcel Telka * There are two conditions considered for the flow control. Both conditions
2096*2695d4f4SMarcel Telka * have the low and the high watermark. Once the high watermark is reached in
2097*2695d4f4SMarcel Telka * EITHER condition the flow control is turned on. For turning the flow
2098*2695d4f4SMarcel Telka * control off BOTH conditions must be below the low watermark.
2099*2695d4f4SMarcel Telka *
2100*2695d4f4SMarcel Telka * Condition #1 - Number of requests queued:
2101*2695d4f4SMarcel Telka *
2102*2695d4f4SMarcel Telka * The max number of threads working on the pool is roughly pool->p_maxthreads.
2103*2695d4f4SMarcel Telka * Every thread could handle up to pool->p_max_same_xprt requests from one
2104*2695d4f4SMarcel Telka * transport before it moves to another transport. See svc_poll() for details.
2105*2695d4f4SMarcel Telka * In case all threads in the pool are working on a transport they will handle
2106*2695d4f4SMarcel Telka * no more than enough_reqs (pool->p_maxthreads * pool->p_max_same_xprt)
2107*2695d4f4SMarcel Telka * requests in one shot from that transport. We are turning the flow control
2108*2695d4f4SMarcel Telka * on once the high watermark is reached for a transport so that the underlying
2109*2695d4f4SMarcel Telka * queue knows the rate of incoming requests is higher than we are able to
2110*2695d4f4SMarcel Telka * handle.
2111*2695d4f4SMarcel Telka *
2112*2695d4f4SMarcel Telka * The high watermark: 2 * enough_reqs
2113*2695d4f4SMarcel Telka * The low watermark: enough_reqs
2114*2695d4f4SMarcel Telka *
2115*2695d4f4SMarcel Telka * Condition #2 - Length of the data payload for the queued messages/requests:
2116*2695d4f4SMarcel Telka *
2117*2695d4f4SMarcel Telka * We want to prevent a particular pool exhausting the memory, so once the
2118*2695d4f4SMarcel Telka * total length of queued requests for the whole pool reaches the high
2119*2695d4f4SMarcel Telka * watermark we start to turn on the flow control for significant memory
2120*2695d4f4SMarcel Telka * consumers (individual transports). To keep the implementation simple
2121*2695d4f4SMarcel Telka * enough, this condition is not exact, because we count only the data part of
2122*2695d4f4SMarcel Telka * the queued requests and we ignore the overhead. For our purposes this
2123*2695d4f4SMarcel Telka * should be enough. We should also consider that up to pool->p_maxthreads
2124*2695d4f4SMarcel Telka * threads for the pool might work on large requests (this is not counted for
2125*2695d4f4SMarcel Telka * this condition). We need to leave some space for rest of the system and for
2126*2695d4f4SMarcel Telka * other big memory consumers (like ZFS). Also, after the flow control is
2127*2695d4f4SMarcel Telka * turned on (on cots transports) we can start to accumulate a few megabytes in
2128*2695d4f4SMarcel Telka * queues for each transport.
2129*2695d4f4SMarcel Telka *
2130*2695d4f4SMarcel Telka * Usually, the big memory consumers are NFS WRITE requests, so we do not
2131*2695d4f4SMarcel Telka * expect to see this condition met for other than NFS pools.
2132*2695d4f4SMarcel Telka *
2133*2695d4f4SMarcel Telka * The high watermark: 1/5 of available memory
2134*2695d4f4SMarcel Telka * The low watermark: 1/6 of available memory
2135*2695d4f4SMarcel Telka *
2136*2695d4f4SMarcel Telka * Once the high watermark is reached we turn the flow control on only for
2137*2695d4f4SMarcel Telka * transports exceeding a per-transport memory limit. The per-transport
2138*2695d4f4SMarcel Telka * fraction of memory is calculated as:
2139*2695d4f4SMarcel Telka *
2140*2695d4f4SMarcel Telka * the high watermark / number of transports
2141*2695d4f4SMarcel Telka *
2142*2695d4f4SMarcel Telka * For transports with less than the per-transport fraction of memory consumed,
2143*2695d4f4SMarcel Telka * the flow control is not turned on, so they are not blocked by a few "hungry"
2144*2695d4f4SMarcel Telka * transports. Because of this, the total memory consumption for the
2145*2695d4f4SMarcel Telka * particular pool might grow up to 2 * the high watermark.
2146*2695d4f4SMarcel Telka *
2147*2695d4f4SMarcel Telka * The individual transports are unblocked once their consumption is below:
2148*2695d4f4SMarcel Telka *
2149*2695d4f4SMarcel Telka * per-transport fraction of memory / 2
2150*2695d4f4SMarcel Telka *
2151*2695d4f4SMarcel Telka * or once the total memory consumption for the whole pool falls below the low
2152*2695d4f4SMarcel Telka * watermark.
2153*2695d4f4SMarcel Telka *
2154*2695d4f4SMarcel Telka */
2155*2695d4f4SMarcel Telka static void
svc_flowcontrol(SVCMASTERXPRT * xprt)2156*2695d4f4SMarcel Telka svc_flowcontrol(SVCMASTERXPRT *xprt)
2157*2695d4f4SMarcel Telka {
2158*2695d4f4SMarcel Telka SVCPOOL *pool = xprt->xp_pool;
2159*2695d4f4SMarcel Telka size_t totalmem = ptob(physmem);
2160*2695d4f4SMarcel Telka int enough_reqs = pool->p_maxthreads * pool->p_max_same_xprt;
2161*2695d4f4SMarcel Telka
2162*2695d4f4SMarcel Telka ASSERT(MUTEX_HELD(&xprt->xp_req_lock));
2163*2695d4f4SMarcel Telka
2164*2695d4f4SMarcel Telka /* Should we turn the flow control on? */
2165*2695d4f4SMarcel Telka if (xprt->xp_full == FALSE) {
2166*2695d4f4SMarcel Telka /* Is flow control disabled? */
2167*2695d4f4SMarcel Telka if (svc_flowcontrol_disable != 0)
2168*2695d4f4SMarcel Telka return;
2169*2695d4f4SMarcel Telka
2170*2695d4f4SMarcel Telka /* Is there enough requests queued? */
2171*2695d4f4SMarcel Telka if (xprt->xp_reqs >= enough_reqs * 2) {
2172*2695d4f4SMarcel Telka xprt->xp_full = TRUE;
2173*2695d4f4SMarcel Telka return;
2174*2695d4f4SMarcel Telka }
2175*2695d4f4SMarcel Telka
2176*2695d4f4SMarcel Telka /*
2177*2695d4f4SMarcel Telka * If this pool uses over 20% of memory and this transport is
2178*2695d4f4SMarcel Telka * significant memory consumer then we are full
2179*2695d4f4SMarcel Telka */
2180*2695d4f4SMarcel Telka if (pool->p_size >= totalmem / 5 &&
2181*2695d4f4SMarcel Telka xprt->xp_size >= totalmem / 5 / pool->p_lcount)
2182*2695d4f4SMarcel Telka xprt->xp_full = TRUE;
2183*2695d4f4SMarcel Telka
2184*2695d4f4SMarcel Telka return;
2185*2695d4f4SMarcel Telka }
2186*2695d4f4SMarcel Telka
2187*2695d4f4SMarcel Telka /* We might want to turn the flow control off */
2188*2695d4f4SMarcel Telka
2189*2695d4f4SMarcel Telka /* Do we still have enough requests? */
2190*2695d4f4SMarcel Telka if (xprt->xp_reqs > enough_reqs)
2191*2695d4f4SMarcel Telka return;
2192*2695d4f4SMarcel Telka
2193*2695d4f4SMarcel Telka /*
2194*2695d4f4SMarcel Telka * If this pool still uses over 16% of memory and this transport is
2195*2695d4f4SMarcel Telka * still significant memory consumer then we are still full
2196*2695d4f4SMarcel Telka */
2197*2695d4f4SMarcel Telka if (pool->p_size >= totalmem / 6 &&
2198*2695d4f4SMarcel Telka xprt->xp_size >= totalmem / 5 / pool->p_lcount / 2)
2199*2695d4f4SMarcel Telka return;
2200*2695d4f4SMarcel Telka
2201*2695d4f4SMarcel Telka /* Turn the flow control off and make sure rpcmod is notified */
2202*2695d4f4SMarcel Telka xprt->xp_full = FALSE;
2203*2695d4f4SMarcel Telka xprt->xp_enable = TRUE;
2204*2695d4f4SMarcel Telka }
2205*2695d4f4SMarcel Telka
2206*2695d4f4SMarcel Telka /*
22077c478bd9Sstevel@tonic-gate * Main loop of the kernel RPC server
22087c478bd9Sstevel@tonic-gate * - wait for input (find a transport with a pending request).
22097c478bd9Sstevel@tonic-gate * - dequeue the request
22107c478bd9Sstevel@tonic-gate * - call a registered server routine to process the requests
22117c478bd9Sstevel@tonic-gate *
22127c478bd9Sstevel@tonic-gate * There can many threads running concurrently in this loop
22137c478bd9Sstevel@tonic-gate * on the same or on different transports.
22147c478bd9Sstevel@tonic-gate */
22157c478bd9Sstevel@tonic-gate static int
svc_run(SVCPOOL * pool)22167c478bd9Sstevel@tonic-gate svc_run(SVCPOOL *pool)
22177c478bd9Sstevel@tonic-gate {
22187c478bd9Sstevel@tonic-gate SVCMASTERXPRT *xprt = NULL; /* master transport handle */
22197c478bd9Sstevel@tonic-gate SVCXPRT *clone_xprt; /* clone for this thread */
22207c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(curthread);
22217c478bd9Sstevel@tonic-gate
22227c478bd9Sstevel@tonic-gate /* Allocate a clone transport handle for this thread */
22237c478bd9Sstevel@tonic-gate clone_xprt = svc_clone_init();
22247c478bd9Sstevel@tonic-gate
22257c478bd9Sstevel@tonic-gate /*
22267c478bd9Sstevel@tonic-gate * The loop iterates until the thread becomes
22277c478bd9Sstevel@tonic-gate * idle too long or the transport is gone.
22287c478bd9Sstevel@tonic-gate */
22297c478bd9Sstevel@tonic-gate for (;;) {
22307c478bd9Sstevel@tonic-gate SVCMASTERXPRT *next;
22317c478bd9Sstevel@tonic-gate mblk_t *mp;
2232*2695d4f4SMarcel Telka bool_t enable;
2233*2695d4f4SMarcel Telka size_t size;
22347c478bd9Sstevel@tonic-gate
22357c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_RUN, "svc_run");
22367c478bd9Sstevel@tonic-gate
22377c478bd9Sstevel@tonic-gate /*
22387c478bd9Sstevel@tonic-gate * If the process is exiting/killed, return
22397c478bd9Sstevel@tonic-gate * immediately without processing any more
22407c478bd9Sstevel@tonic-gate * requests.
22417c478bd9Sstevel@tonic-gate */
224297eda132Sraf if (p->p_flag & (SEXITING | SKILLED)) {
22437c478bd9Sstevel@tonic-gate svc_thread_exit(pool, clone_xprt);
2244f7b93e0cSVallish Vaidyeshwara return (EINTR);
22457c478bd9Sstevel@tonic-gate }
22467c478bd9Sstevel@tonic-gate
22477c478bd9Sstevel@tonic-gate /* Find a transport with a pending request */
22487c478bd9Sstevel@tonic-gate next = svc_poll(pool, xprt, clone_xprt);
22497c478bd9Sstevel@tonic-gate
22507c478bd9Sstevel@tonic-gate /*
22517c478bd9Sstevel@tonic-gate * If svc_poll() finds a transport with a request
22527c478bd9Sstevel@tonic-gate * it latches xp_req_lock on it. Therefore we need
22537c478bd9Sstevel@tonic-gate * to dequeue the request and release the lock as
22547c478bd9Sstevel@tonic-gate * soon as possible.
22557c478bd9Sstevel@tonic-gate */
22567c478bd9Sstevel@tonic-gate ASSERT(next != NULL &&
22577c478bd9Sstevel@tonic-gate (next == SVC_EXPRTGONE ||
22587c478bd9Sstevel@tonic-gate next == SVC_ETIMEDOUT ||
22597c478bd9Sstevel@tonic-gate next == SVC_EINTR ||
22607c478bd9Sstevel@tonic-gate MUTEX_HELD(&next->xp_req_lock)));
22617c478bd9Sstevel@tonic-gate
22627c478bd9Sstevel@tonic-gate /* Ooops! Current transport is closing. Unlink now */
22637c478bd9Sstevel@tonic-gate if (next == SVC_EXPRTGONE) {
22647c478bd9Sstevel@tonic-gate svc_clone_unlink(clone_xprt);
22657c478bd9Sstevel@tonic-gate xprt = NULL;
22667c478bd9Sstevel@tonic-gate continue;
22677c478bd9Sstevel@tonic-gate }
22687c478bd9Sstevel@tonic-gate
22697c478bd9Sstevel@tonic-gate /* Ooops! Timeout while waiting for a request. Exit */
22707c478bd9Sstevel@tonic-gate if (next == SVC_ETIMEDOUT) {
22717c478bd9Sstevel@tonic-gate svc_thread_exit(pool, clone_xprt);
22727c478bd9Sstevel@tonic-gate return (0);
22737c478bd9Sstevel@tonic-gate }
22747c478bd9Sstevel@tonic-gate
22757c478bd9Sstevel@tonic-gate /*
22767c478bd9Sstevel@tonic-gate * Interrupted by a signal while waiting for a
2277f7b93e0cSVallish Vaidyeshwara * request. Return to userspace and exit.
22787c478bd9Sstevel@tonic-gate */
22797c478bd9Sstevel@tonic-gate if (next == SVC_EINTR) {
22807c478bd9Sstevel@tonic-gate svc_thread_exit(pool, clone_xprt);
22817c478bd9Sstevel@tonic-gate return (EINTR);
22827c478bd9Sstevel@tonic-gate }
22837c478bd9Sstevel@tonic-gate
22847c478bd9Sstevel@tonic-gate /*
22857c478bd9Sstevel@tonic-gate * De-queue the request and release the request lock
22867c478bd9Sstevel@tonic-gate * on this transport (latched by svc_poll()).
22877c478bd9Sstevel@tonic-gate */
22887c478bd9Sstevel@tonic-gate mp = next->xp_req_head;
22897c478bd9Sstevel@tonic-gate next->xp_req_head = mp->b_next;
22907c478bd9Sstevel@tonic-gate mp->b_next = (mblk_t *)0;
2291*2695d4f4SMarcel Telka size = svc_msgsize(mp);
2292*2695d4f4SMarcel Telka
2293*2695d4f4SMarcel Telka mutex_enter(&pool->p_req_lock);
2294*2695d4f4SMarcel Telka pool->p_reqs--;
2295*2695d4f4SMarcel Telka if (pool->p_reqs == 0)
2296*2695d4f4SMarcel Telka pool->p_qoverflow = FALSE;
2297*2695d4f4SMarcel Telka pool->p_size -= size;
2298*2695d4f4SMarcel Telka mutex_exit(&pool->p_req_lock);
2299*2695d4f4SMarcel Telka
2300*2695d4f4SMarcel Telka next->xp_reqs--;
2301*2695d4f4SMarcel Telka next->xp_size -= size;
2302*2695d4f4SMarcel Telka
2303*2695d4f4SMarcel Telka if (next->xp_full)
2304*2695d4f4SMarcel Telka svc_flowcontrol(next);
23057c478bd9Sstevel@tonic-gate
23067c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_KRPC, TR_NFSFP_QUE_REQ_DEQ,
23077c478bd9Sstevel@tonic-gate "rpc_que_req_deq:pool %p mp %p", pool, mp);
23087c478bd9Sstevel@tonic-gate mutex_exit(&next->xp_req_lock);
23097c478bd9Sstevel@tonic-gate
23107c478bd9Sstevel@tonic-gate /*
23117c478bd9Sstevel@tonic-gate * If this is a new request on a current transport then
23127c478bd9Sstevel@tonic-gate * the clone structure is already properly initialized.
23137c478bd9Sstevel@tonic-gate * Otherwise, if the request is on a different transport,
23147c478bd9Sstevel@tonic-gate * unlink from the current master and link to
23157c478bd9Sstevel@tonic-gate * the one we got a request on.
23167c478bd9Sstevel@tonic-gate */
23177c478bd9Sstevel@tonic-gate if (next != xprt) {
23187c478bd9Sstevel@tonic-gate if (xprt)
23197c478bd9Sstevel@tonic-gate svc_clone_unlink(clone_xprt);
232060536ef9SKaren Rochford svc_clone_link(next, clone_xprt, NULL);
23217c478bd9Sstevel@tonic-gate xprt = next;
23227c478bd9Sstevel@tonic-gate }
23237c478bd9Sstevel@tonic-gate
23247c478bd9Sstevel@tonic-gate /*
23257c478bd9Sstevel@tonic-gate * If there are more requests and req_cv hasn't
23267c478bd9Sstevel@tonic-gate * been signaled yet then wake up one more thread now.
23277c478bd9Sstevel@tonic-gate *
23287c478bd9Sstevel@tonic-gate * We avoid signaling req_cv until the most recently
23297c478bd9Sstevel@tonic-gate * signaled thread wakes up and gets CPU to clear
23307c478bd9Sstevel@tonic-gate * the `drowsy' flag.
23317c478bd9Sstevel@tonic-gate */
23327c478bd9Sstevel@tonic-gate if (!(pool->p_drowsy || pool->p_reqs <= pool->p_walkers ||
23337c478bd9Sstevel@tonic-gate pool->p_asleep == 0)) {
23347c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_req_lock);
23357c478bd9Sstevel@tonic-gate
23367c478bd9Sstevel@tonic-gate if (pool->p_drowsy || pool->p_reqs <= pool->p_walkers ||
23377c478bd9Sstevel@tonic-gate pool->p_asleep == 0)
23387c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
23397c478bd9Sstevel@tonic-gate else {
23407c478bd9Sstevel@tonic-gate pool->p_asleep--;
23417c478bd9Sstevel@tonic-gate pool->p_drowsy = TRUE;
23427c478bd9Sstevel@tonic-gate
23437c478bd9Sstevel@tonic-gate cv_signal(&pool->p_req_cv);
23447c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
23457c478bd9Sstevel@tonic-gate }
23467c478bd9Sstevel@tonic-gate }
23477c478bd9Sstevel@tonic-gate
23487c478bd9Sstevel@tonic-gate /*
23497c478bd9Sstevel@tonic-gate * If there are no asleep/signaled threads, we are
23507c478bd9Sstevel@tonic-gate * still below pool->p_maxthreads limit, and no thread is
23517c478bd9Sstevel@tonic-gate * currently being created then signal the creator
23527c478bd9Sstevel@tonic-gate * for one more service thread.
23537c478bd9Sstevel@tonic-gate *
23547c478bd9Sstevel@tonic-gate * The asleep and drowsy checks are not protected
23557c478bd9Sstevel@tonic-gate * by a lock since it hurts performance and a wrong
23567c478bd9Sstevel@tonic-gate * decision is not essential.
23577c478bd9Sstevel@tonic-gate */
23587c478bd9Sstevel@tonic-gate if (pool->p_asleep == 0 && !pool->p_drowsy &&
23597c478bd9Sstevel@tonic-gate pool->p_threads + pool->p_detached_threads <
23607c478bd9Sstevel@tonic-gate pool->p_maxthreads)
23617c478bd9Sstevel@tonic-gate svc_creator_signal(pool);
23627c478bd9Sstevel@tonic-gate
23637c478bd9Sstevel@tonic-gate /*
23647c478bd9Sstevel@tonic-gate * Process the request.
23657c478bd9Sstevel@tonic-gate */
23667c478bd9Sstevel@tonic-gate svc_getreq(clone_xprt, mp);
23677c478bd9Sstevel@tonic-gate
23687c478bd9Sstevel@tonic-gate /* If thread had a reservation it should have been canceled */
23697c478bd9Sstevel@tonic-gate ASSERT(!clone_xprt->xp_reserved);
23707c478bd9Sstevel@tonic-gate
23717c478bd9Sstevel@tonic-gate /*
23727c478bd9Sstevel@tonic-gate * If the clone is marked detached then exit.
23737c478bd9Sstevel@tonic-gate * The rpcmod slot has already been released
23747c478bd9Sstevel@tonic-gate * when we detached this thread.
23757c478bd9Sstevel@tonic-gate */
23767c478bd9Sstevel@tonic-gate if (clone_xprt->xp_detached) {
23777c478bd9Sstevel@tonic-gate svc_thread_exitdetached(pool, clone_xprt);
23787c478bd9Sstevel@tonic-gate return (0);
23797c478bd9Sstevel@tonic-gate }
23807c478bd9Sstevel@tonic-gate
23817c478bd9Sstevel@tonic-gate /*
23827c478bd9Sstevel@tonic-gate * Release our reference on the rpcmod
23837c478bd9Sstevel@tonic-gate * slot attached to xp_wq->q_ptr.
23847c478bd9Sstevel@tonic-gate */
2385*2695d4f4SMarcel Telka mutex_enter(&xprt->xp_req_lock);
2386*2695d4f4SMarcel Telka enable = xprt->xp_enable;
2387*2695d4f4SMarcel Telka if (enable)
2388*2695d4f4SMarcel Telka xprt->xp_enable = FALSE;
2389*2695d4f4SMarcel Telka mutex_exit(&xprt->xp_req_lock);
2390*2695d4f4SMarcel Telka (*RELE_PROC(xprt)) (clone_xprt->xp_wq, NULL, enable);
23917c478bd9Sstevel@tonic-gate }
23927c478bd9Sstevel@tonic-gate /* NOTREACHED */
23937c478bd9Sstevel@tonic-gate }
23947c478bd9Sstevel@tonic-gate
23957c478bd9Sstevel@tonic-gate /*
23967c478bd9Sstevel@tonic-gate * Flush any pending requests for the queue and
2397*2695d4f4SMarcel Telka * free the associated mblks.
23987c478bd9Sstevel@tonic-gate */
23997c478bd9Sstevel@tonic-gate void
svc_queueclean(queue_t * q)24007c478bd9Sstevel@tonic-gate svc_queueclean(queue_t *q)
24017c478bd9Sstevel@tonic-gate {
24027c478bd9Sstevel@tonic-gate SVCMASTERXPRT *xprt = ((void **) q->q_ptr)[0];
24037c478bd9Sstevel@tonic-gate mblk_t *mp;
240407e75131Sgt29601 SVCPOOL *pool;
24057c478bd9Sstevel@tonic-gate
24067c478bd9Sstevel@tonic-gate /*
24077c478bd9Sstevel@tonic-gate * clean up the requests
24087c478bd9Sstevel@tonic-gate */
24097c478bd9Sstevel@tonic-gate mutex_enter(&xprt->xp_req_lock);
241007e75131Sgt29601 pool = xprt->xp_pool;
24117c478bd9Sstevel@tonic-gate while ((mp = xprt->xp_req_head) != NULL) {
2412*2695d4f4SMarcel Telka /* remove the request from the list */
24137c478bd9Sstevel@tonic-gate xprt->xp_req_head = mp->b_next;
24147c478bd9Sstevel@tonic-gate mp->b_next = (mblk_t *)0;
2415*2695d4f4SMarcel Telka (*RELE_PROC(xprt)) (xprt->xp_wq, mp, FALSE);
24167c478bd9Sstevel@tonic-gate }
2417*2695d4f4SMarcel Telka
2418*2695d4f4SMarcel Telka mutex_enter(&pool->p_req_lock);
2419*2695d4f4SMarcel Telka pool->p_reqs -= xprt->xp_reqs;
2420*2695d4f4SMarcel Telka pool->p_size -= xprt->xp_size;
2421*2695d4f4SMarcel Telka mutex_exit(&pool->p_req_lock);
2422*2695d4f4SMarcel Telka
2423*2695d4f4SMarcel Telka xprt->xp_reqs = 0;
2424*2695d4f4SMarcel Telka xprt->xp_size = 0;
2425*2695d4f4SMarcel Telka xprt->xp_full = FALSE;
2426*2695d4f4SMarcel Telka xprt->xp_enable = FALSE;
24277c478bd9Sstevel@tonic-gate mutex_exit(&xprt->xp_req_lock);
24287c478bd9Sstevel@tonic-gate }
24297c478bd9Sstevel@tonic-gate
24307c478bd9Sstevel@tonic-gate /*
24317c478bd9Sstevel@tonic-gate * This routine is called by rpcmod to inform kernel RPC that a
24327c478bd9Sstevel@tonic-gate * queue is closing. It is called after all the requests have been
24337c478bd9Sstevel@tonic-gate * picked up (that is after all the slots on the queue have
24347c478bd9Sstevel@tonic-gate * been released by kernel RPC). It is also guaranteed that no more
24357c478bd9Sstevel@tonic-gate * request will be delivered on this transport.
24367c478bd9Sstevel@tonic-gate *
24377c478bd9Sstevel@tonic-gate * - clear xp_wq to mark the master server transport handle as closing
24387c478bd9Sstevel@tonic-gate * - if there are no more threads on this transport close/destroy it
2439eaf32bf7SMarcel Telka * - otherwise, leave the linked threads to close/destroy the transport
2440eaf32bf7SMarcel Telka * later.
24417c478bd9Sstevel@tonic-gate */
24427c478bd9Sstevel@tonic-gate void
svc_queueclose(queue_t * q)24437c478bd9Sstevel@tonic-gate svc_queueclose(queue_t *q)
24447c478bd9Sstevel@tonic-gate {
24457c478bd9Sstevel@tonic-gate SVCMASTERXPRT *xprt = ((void **) q->q_ptr)[0];
24467c478bd9Sstevel@tonic-gate
24477c478bd9Sstevel@tonic-gate if (xprt == NULL) {
24487c478bd9Sstevel@tonic-gate /*
24497c478bd9Sstevel@tonic-gate * If there is no master xprt associated with this stream,
24507c478bd9Sstevel@tonic-gate * then there is nothing to do. This happens regularly
24517c478bd9Sstevel@tonic-gate * with connection-oriented listening streams created by
24527c478bd9Sstevel@tonic-gate * nfsd.
24537c478bd9Sstevel@tonic-gate */
24547c478bd9Sstevel@tonic-gate return;
24557c478bd9Sstevel@tonic-gate }
24567c478bd9Sstevel@tonic-gate
24577c478bd9Sstevel@tonic-gate mutex_enter(&xprt->xp_thread_lock);
24587c478bd9Sstevel@tonic-gate
24597c478bd9Sstevel@tonic-gate ASSERT(xprt->xp_req_head == NULL);
24607c478bd9Sstevel@tonic-gate ASSERT(xprt->xp_wq != NULL);
24617c478bd9Sstevel@tonic-gate
24627c478bd9Sstevel@tonic-gate xprt->xp_wq = NULL;
24637c478bd9Sstevel@tonic-gate
24647c478bd9Sstevel@tonic-gate if (xprt->xp_threads == 0) {
24657c478bd9Sstevel@tonic-gate SVCPOOL *pool = xprt->xp_pool;
24667c478bd9Sstevel@tonic-gate
24677c478bd9Sstevel@tonic-gate /*
24687c478bd9Sstevel@tonic-gate * svc_xprt_cleanup() destroys the transport
24697c478bd9Sstevel@tonic-gate * or releases the transport thread lock
24707c478bd9Sstevel@tonic-gate */
24717c478bd9Sstevel@tonic-gate svc_xprt_cleanup(xprt, FALSE);
24727c478bd9Sstevel@tonic-gate
24737c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_thread_lock);
24747c478bd9Sstevel@tonic-gate
24757c478bd9Sstevel@tonic-gate /*
24767c478bd9Sstevel@tonic-gate * If the pool is in closing state and this was
24777c478bd9Sstevel@tonic-gate * the last transport in the pool then signal the creator
24787c478bd9Sstevel@tonic-gate * thread to clean up and exit.
24797c478bd9Sstevel@tonic-gate */
24807c478bd9Sstevel@tonic-gate if (pool->p_closing && svc_pool_tryexit(pool)) {
24817c478bd9Sstevel@tonic-gate return;
24827c478bd9Sstevel@tonic-gate }
24837c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
24847c478bd9Sstevel@tonic-gate } else {
24857c478bd9Sstevel@tonic-gate /*
2486eaf32bf7SMarcel Telka * There are still some threads linked to the transport. They
2487eaf32bf7SMarcel Telka * are very likely sleeping in svc_poll(). We could wake up
2488eaf32bf7SMarcel Telka * them by broadcasting on the p_req_cv condition variable, but
2489eaf32bf7SMarcel Telka * that might give us a performance penalty if there are too
2490eaf32bf7SMarcel Telka * many sleeping threads.
2491eaf32bf7SMarcel Telka *
2492eaf32bf7SMarcel Telka * Instead, we do nothing here. The linked threads will unlink
2493eaf32bf7SMarcel Telka * themselves and destroy the transport once they are woken up
2494eaf32bf7SMarcel Telka * on timeout, or by new request. There is no reason to hurry
2495eaf32bf7SMarcel Telka * up now with the thread wake up.
24967c478bd9Sstevel@tonic-gate */
24977c478bd9Sstevel@tonic-gate
24987c478bd9Sstevel@tonic-gate /*
24997c478bd9Sstevel@tonic-gate * NOTICE: No references to the master transport structure
25007c478bd9Sstevel@tonic-gate * beyond this point!
25017c478bd9Sstevel@tonic-gate */
25027c478bd9Sstevel@tonic-gate mutex_exit(&xprt->xp_thread_lock);
25037c478bd9Sstevel@tonic-gate }
25047c478bd9Sstevel@tonic-gate }
25057c478bd9Sstevel@tonic-gate
25067c478bd9Sstevel@tonic-gate /*
25077c478bd9Sstevel@tonic-gate * Interrupt `request delivery' routine called from rpcmod
25087c478bd9Sstevel@tonic-gate * - put a request at the tail of the transport request queue
25097c478bd9Sstevel@tonic-gate * - insert a hint for svc_poll() into the xprt-ready queue
25107c478bd9Sstevel@tonic-gate * - increment the `pending-requests' count for the pool
2511*2695d4f4SMarcel Telka * - handle flow control
25127c478bd9Sstevel@tonic-gate * - wake up a thread sleeping in svc_poll() if necessary
25137c478bd9Sstevel@tonic-gate * - if all the threads are running ask the creator for a new one.
25147c478bd9Sstevel@tonic-gate */
2515*2695d4f4SMarcel Telka bool_t
svc_queuereq(queue_t * q,mblk_t * mp,bool_t flowcontrol)2516*2695d4f4SMarcel Telka svc_queuereq(queue_t *q, mblk_t *mp, bool_t flowcontrol)
25177c478bd9Sstevel@tonic-gate {
25187c478bd9Sstevel@tonic-gate SVCMASTERXPRT *xprt = ((void **) q->q_ptr)[0];
25197c478bd9Sstevel@tonic-gate SVCPOOL *pool = xprt->xp_pool;
2520*2695d4f4SMarcel Telka size_t size;
25217c478bd9Sstevel@tonic-gate
25227c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_QUEUEREQ_START, "svc_queuereq_start");
25237c478bd9Sstevel@tonic-gate
2524de8c4a14SErik Nordmark ASSERT(!is_system_labeled() || msg_getcred(mp, NULL) != NULL ||
252545916cd2Sjpk mp->b_datap->db_type != M_DATA);
252645916cd2Sjpk
25277c478bd9Sstevel@tonic-gate /*
25287c478bd9Sstevel@tonic-gate * Step 1.
252907e75131Sgt29601 * Grab the transport's request lock and the
253007e75131Sgt29601 * pool's request lock so that when we put
25317c478bd9Sstevel@tonic-gate * the request at the tail of the transport's
253207e75131Sgt29601 * request queue, possibly put the request on
253307e75131Sgt29601 * the xprt ready queue and increment the
253407e75131Sgt29601 * pending request count it looks atomic.
25357c478bd9Sstevel@tonic-gate */
25367c478bd9Sstevel@tonic-gate mutex_enter(&xprt->xp_req_lock);
2537*2695d4f4SMarcel Telka if (flowcontrol && xprt->xp_full) {
2538*2695d4f4SMarcel Telka mutex_exit(&xprt->xp_req_lock);
2539*2695d4f4SMarcel Telka
2540*2695d4f4SMarcel Telka return (FALSE);
2541*2695d4f4SMarcel Telka }
2542*2695d4f4SMarcel Telka ASSERT(xprt->xp_full == FALSE);
254307e75131Sgt29601 mutex_enter(&pool->p_req_lock);
25447c478bd9Sstevel@tonic-gate if (xprt->xp_req_head == NULL)
25457c478bd9Sstevel@tonic-gate xprt->xp_req_head = mp;
25467c478bd9Sstevel@tonic-gate else
25477c478bd9Sstevel@tonic-gate xprt->xp_req_tail->b_next = mp;
25487c478bd9Sstevel@tonic-gate xprt->xp_req_tail = mp;
25497c478bd9Sstevel@tonic-gate
25507c478bd9Sstevel@tonic-gate /*
25517c478bd9Sstevel@tonic-gate * Step 2.
255207e75131Sgt29601 * Insert a hint into the xprt-ready queue, increment
2553*2695d4f4SMarcel Telka * counters, handle flow control, and wake up
255407e75131Sgt29601 * a thread sleeping in svc_poll() if necessary.
25557c478bd9Sstevel@tonic-gate */
25567c478bd9Sstevel@tonic-gate
25577c478bd9Sstevel@tonic-gate /* Insert pointer to this transport into the xprt-ready queue */
25587c478bd9Sstevel@tonic-gate svc_xprt_qput(pool, xprt);
25597c478bd9Sstevel@tonic-gate
2560*2695d4f4SMarcel Telka /* Increment counters */
25617c478bd9Sstevel@tonic-gate pool->p_reqs++;
2562*2695d4f4SMarcel Telka xprt->xp_reqs++;
2563*2695d4f4SMarcel Telka
2564*2695d4f4SMarcel Telka size = svc_msgsize(mp);
2565*2695d4f4SMarcel Telka xprt->xp_size += size;
2566*2695d4f4SMarcel Telka pool->p_size += size;
2567*2695d4f4SMarcel Telka
2568*2695d4f4SMarcel Telka /* Handle flow control */
2569*2695d4f4SMarcel Telka if (flowcontrol)
2570*2695d4f4SMarcel Telka svc_flowcontrol(xprt);
25717c478bd9Sstevel@tonic-gate
25727c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_KRPC, TR_NFSFP_QUE_REQ_ENQ,
25737c478bd9Sstevel@tonic-gate "rpc_que_req_enq:pool %p mp %p", pool, mp);
25747c478bd9Sstevel@tonic-gate
25757c478bd9Sstevel@tonic-gate /*
25767c478bd9Sstevel@tonic-gate * If there are more requests and req_cv hasn't
25777c478bd9Sstevel@tonic-gate * been signaled yet then wake up one more thread now.
25787c478bd9Sstevel@tonic-gate *
25797c478bd9Sstevel@tonic-gate * We avoid signaling req_cv until the most recently
25807c478bd9Sstevel@tonic-gate * signaled thread wakes up and gets CPU to clear
25817c478bd9Sstevel@tonic-gate * the `drowsy' flag.
25827c478bd9Sstevel@tonic-gate */
25837c478bd9Sstevel@tonic-gate if (pool->p_drowsy || pool->p_reqs <= pool->p_walkers ||
25847c478bd9Sstevel@tonic-gate pool->p_asleep == 0) {
25857c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
25867c478bd9Sstevel@tonic-gate } else {
25877c478bd9Sstevel@tonic-gate pool->p_drowsy = TRUE;
25887c478bd9Sstevel@tonic-gate pool->p_asleep--;
25897c478bd9Sstevel@tonic-gate
25907c478bd9Sstevel@tonic-gate /*
25917c478bd9Sstevel@tonic-gate * Signal wakeup and drop the request lock.
25927c478bd9Sstevel@tonic-gate */
25937c478bd9Sstevel@tonic-gate cv_signal(&pool->p_req_cv);
25947c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_req_lock);
25957c478bd9Sstevel@tonic-gate }
259607e75131Sgt29601 mutex_exit(&xprt->xp_req_lock);
25977c478bd9Sstevel@tonic-gate
25987c478bd9Sstevel@tonic-gate /*
25997c478bd9Sstevel@tonic-gate * Step 3.
26007c478bd9Sstevel@tonic-gate * If there are no asleep/signaled threads, we are
26017c478bd9Sstevel@tonic-gate * still below pool->p_maxthreads limit, and no thread is
26027c478bd9Sstevel@tonic-gate * currently being created then signal the creator
26037c478bd9Sstevel@tonic-gate * for one more service thread.
26047c478bd9Sstevel@tonic-gate *
26057c478bd9Sstevel@tonic-gate * The asleep and drowsy checks are not not protected
26067c478bd9Sstevel@tonic-gate * by a lock since it hurts performance and a wrong
26077c478bd9Sstevel@tonic-gate * decision is not essential.
26087c478bd9Sstevel@tonic-gate */
26097c478bd9Sstevel@tonic-gate if (pool->p_asleep == 0 && !pool->p_drowsy &&
26107c478bd9Sstevel@tonic-gate pool->p_threads + pool->p_detached_threads < pool->p_maxthreads)
26117c478bd9Sstevel@tonic-gate svc_creator_signal(pool);
26127c478bd9Sstevel@tonic-gate
26137c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_QUEUEREQ_END,
26147c478bd9Sstevel@tonic-gate "svc_queuereq_end:(%S)", "end");
2615*2695d4f4SMarcel Telka
2616*2695d4f4SMarcel Telka return (TRUE);
26177c478bd9Sstevel@tonic-gate }
26187c478bd9Sstevel@tonic-gate
26197c478bd9Sstevel@tonic-gate /*
26207c478bd9Sstevel@tonic-gate * Reserve a service thread so that it can be detached later.
26217c478bd9Sstevel@tonic-gate * This reservation is required to make sure that when it tries to
26227c478bd9Sstevel@tonic-gate * detach itself the total number of detached threads does not exceed
26237c478bd9Sstevel@tonic-gate * pool->p_maxthreads - pool->p_redline (i.e. that we can have
26247c478bd9Sstevel@tonic-gate * up to pool->p_redline non-detached threads).
26257c478bd9Sstevel@tonic-gate *
26267c478bd9Sstevel@tonic-gate * If the thread does not detach itself later, it should cancel the
26277c478bd9Sstevel@tonic-gate * reservation before returning to svc_run().
26287c478bd9Sstevel@tonic-gate *
26297c478bd9Sstevel@tonic-gate * - check if there is room for more reserved/detached threads
26307c478bd9Sstevel@tonic-gate * - if so, then increment the `reserved threads' count for the pool
26317c478bd9Sstevel@tonic-gate * - mark the thread as reserved (setting the flag in the clone transport
26327c478bd9Sstevel@tonic-gate * handle for this thread
26337c478bd9Sstevel@tonic-gate * - returns 1 if the reservation succeeded, 0 if it failed.
26347c478bd9Sstevel@tonic-gate */
26357c478bd9Sstevel@tonic-gate int
svc_reserve_thread(SVCXPRT * clone_xprt)26367c478bd9Sstevel@tonic-gate svc_reserve_thread(SVCXPRT *clone_xprt)
26377c478bd9Sstevel@tonic-gate {
26387c478bd9Sstevel@tonic-gate SVCPOOL *pool = clone_xprt->xp_master->xp_pool;
26397c478bd9Sstevel@tonic-gate
26407c478bd9Sstevel@tonic-gate /* Recursive reservations are not allowed */
26417c478bd9Sstevel@tonic-gate ASSERT(!clone_xprt->xp_reserved);
26427c478bd9Sstevel@tonic-gate ASSERT(!clone_xprt->xp_detached);
26437c478bd9Sstevel@tonic-gate
26447c478bd9Sstevel@tonic-gate /* Check pool counts if there is room for reservation */
26457c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_thread_lock);
26467c478bd9Sstevel@tonic-gate if (pool->p_reserved_threads + pool->p_detached_threads >=
26477c478bd9Sstevel@tonic-gate pool->p_maxthreads - pool->p_redline) {
26487c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
26497c478bd9Sstevel@tonic-gate return (0);
26507c478bd9Sstevel@tonic-gate }
26517c478bd9Sstevel@tonic-gate pool->p_reserved_threads++;
26527c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
26537c478bd9Sstevel@tonic-gate
26547c478bd9Sstevel@tonic-gate /* Mark the thread (clone handle) as reserved */
26557c478bd9Sstevel@tonic-gate clone_xprt->xp_reserved = TRUE;
26567c478bd9Sstevel@tonic-gate
26577c478bd9Sstevel@tonic-gate return (1);
26587c478bd9Sstevel@tonic-gate }
26597c478bd9Sstevel@tonic-gate
26607c478bd9Sstevel@tonic-gate /*
26617c478bd9Sstevel@tonic-gate * Cancel a reservation for a thread.
26627c478bd9Sstevel@tonic-gate * - decrement the `reserved threads' count for the pool
26637c478bd9Sstevel@tonic-gate * - clear the flag in the clone transport handle for this thread.
26647c478bd9Sstevel@tonic-gate */
26657c478bd9Sstevel@tonic-gate void
svc_unreserve_thread(SVCXPRT * clone_xprt)26667c478bd9Sstevel@tonic-gate svc_unreserve_thread(SVCXPRT *clone_xprt)
26677c478bd9Sstevel@tonic-gate {
26687c478bd9Sstevel@tonic-gate SVCPOOL *pool = clone_xprt->xp_master->xp_pool;
26697c478bd9Sstevel@tonic-gate
26707c478bd9Sstevel@tonic-gate /* Thread must have a reservation */
26717c478bd9Sstevel@tonic-gate ASSERT(clone_xprt->xp_reserved);
26727c478bd9Sstevel@tonic-gate ASSERT(!clone_xprt->xp_detached);
26737c478bd9Sstevel@tonic-gate
26747c478bd9Sstevel@tonic-gate /* Decrement global count */
26757c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_thread_lock);
26767c478bd9Sstevel@tonic-gate pool->p_reserved_threads--;
26777c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
26787c478bd9Sstevel@tonic-gate
26797c478bd9Sstevel@tonic-gate /* Clear reservation flag */
26807c478bd9Sstevel@tonic-gate clone_xprt->xp_reserved = FALSE;
26817c478bd9Sstevel@tonic-gate }
26827c478bd9Sstevel@tonic-gate
26837c478bd9Sstevel@tonic-gate /*
26847c478bd9Sstevel@tonic-gate * Detach a thread from its transport, so that it can block for an
26857c478bd9Sstevel@tonic-gate * extended time. Because the transport can be closed after the thread is
26867c478bd9Sstevel@tonic-gate * detached, the thread should have already sent off a reply if it was
26877c478bd9Sstevel@tonic-gate * going to send one.
26887c478bd9Sstevel@tonic-gate *
26897c478bd9Sstevel@tonic-gate * - decrement `non-detached threads' count and increment `detached threads'
26907c478bd9Sstevel@tonic-gate * counts for the transport
26917c478bd9Sstevel@tonic-gate * - decrement the `non-detached threads' and `reserved threads'
26927c478bd9Sstevel@tonic-gate * counts and increment the `detached threads' count for the pool
26937c478bd9Sstevel@tonic-gate * - release the rpcmod slot
26947c478bd9Sstevel@tonic-gate * - mark the clone (thread) as detached.
26957c478bd9Sstevel@tonic-gate *
26967c478bd9Sstevel@tonic-gate * No need to return a pointer to the thread's CPR information, since
26977c478bd9Sstevel@tonic-gate * the thread has a userland identity.
26987c478bd9Sstevel@tonic-gate *
26997c478bd9Sstevel@tonic-gate * NOTICE: a thread must not detach itself without making a prior reservation
27007c478bd9Sstevel@tonic-gate * through svc_thread_reserve().
27017c478bd9Sstevel@tonic-gate */
27027c478bd9Sstevel@tonic-gate callb_cpr_t *
svc_detach_thread(SVCXPRT * clone_xprt)27037c478bd9Sstevel@tonic-gate svc_detach_thread(SVCXPRT *clone_xprt)
27047c478bd9Sstevel@tonic-gate {
27057c478bd9Sstevel@tonic-gate SVCMASTERXPRT *xprt = clone_xprt->xp_master;
27067c478bd9Sstevel@tonic-gate SVCPOOL *pool = xprt->xp_pool;
2707*2695d4f4SMarcel Telka bool_t enable;
27087c478bd9Sstevel@tonic-gate
27097c478bd9Sstevel@tonic-gate /* Thread must have a reservation */
27107c478bd9Sstevel@tonic-gate ASSERT(clone_xprt->xp_reserved);
27117c478bd9Sstevel@tonic-gate ASSERT(!clone_xprt->xp_detached);
27127c478bd9Sstevel@tonic-gate
27137c478bd9Sstevel@tonic-gate /* Bookkeeping for this transport */
27147c478bd9Sstevel@tonic-gate mutex_enter(&xprt->xp_thread_lock);
27157c478bd9Sstevel@tonic-gate xprt->xp_threads--;
27167c478bd9Sstevel@tonic-gate xprt->xp_detached_threads++;
27177c478bd9Sstevel@tonic-gate mutex_exit(&xprt->xp_thread_lock);
27187c478bd9Sstevel@tonic-gate
27197c478bd9Sstevel@tonic-gate /* Bookkeeping for the pool */
27207c478bd9Sstevel@tonic-gate mutex_enter(&pool->p_thread_lock);
27217c478bd9Sstevel@tonic-gate pool->p_threads--;
27227c478bd9Sstevel@tonic-gate pool->p_reserved_threads--;
27237c478bd9Sstevel@tonic-gate pool->p_detached_threads++;
27247c478bd9Sstevel@tonic-gate mutex_exit(&pool->p_thread_lock);
27257c478bd9Sstevel@tonic-gate
27267c478bd9Sstevel@tonic-gate /* Release an rpcmod slot for this request */
2727*2695d4f4SMarcel Telka mutex_enter(&xprt->xp_req_lock);
2728*2695d4f4SMarcel Telka enable = xprt->xp_enable;
2729*2695d4f4SMarcel Telka if (enable)
2730*2695d4f4SMarcel Telka xprt->xp_enable = FALSE;
2731*2695d4f4SMarcel Telka mutex_exit(&xprt->xp_req_lock);
2732*2695d4f4SMarcel Telka (*RELE_PROC(xprt)) (clone_xprt->xp_wq, NULL, enable);
27337c478bd9Sstevel@tonic-gate
27347c478bd9Sstevel@tonic-gate /* Mark the clone (thread) as detached */
27357c478bd9Sstevel@tonic-gate clone_xprt->xp_reserved = FALSE;
27367c478bd9Sstevel@tonic-gate clone_xprt->xp_detached = TRUE;
27377c478bd9Sstevel@tonic-gate
27387c478bd9Sstevel@tonic-gate return (NULL);
27397c478bd9Sstevel@tonic-gate }
27407c478bd9Sstevel@tonic-gate
27417c478bd9Sstevel@tonic-gate /*
27427c478bd9Sstevel@tonic-gate * This routine is responsible for extracting RDMA plugin master XPRT,
27437c478bd9Sstevel@tonic-gate * unregister from the SVCPOOL and initiate plugin specific cleanup.
27447c478bd9Sstevel@tonic-gate * It is passed a list/group of rdma transports as records which are
27457c478bd9Sstevel@tonic-gate * active in a given registered or unregistered kRPC thread pool. Its shuts
27467c478bd9Sstevel@tonic-gate * all active rdma transports in that pool. If the thread active on the trasport
27477c478bd9Sstevel@tonic-gate * happens to be last thread for that pool, it will signal the creater thread
27487c478bd9Sstevel@tonic-gate * to cleanup the pool and destroy the xprt in svc_queueclose()
27497c478bd9Sstevel@tonic-gate */
27507c478bd9Sstevel@tonic-gate void
rdma_stop(rdma_xprt_group_t * rdma_xprts)275151f34d4bSRajkumar Sivaprakasam rdma_stop(rdma_xprt_group_t *rdma_xprts)
27527c478bd9Sstevel@tonic-gate {
27537c478bd9Sstevel@tonic-gate SVCMASTERXPRT *xprt;
27547c478bd9Sstevel@tonic-gate rdma_xprt_record_t *curr_rec;
27557c478bd9Sstevel@tonic-gate queue_t *q;
27567c478bd9Sstevel@tonic-gate mblk_t *mp;
275751f34d4bSRajkumar Sivaprakasam int i, rtg_count;
275807e75131Sgt29601 SVCPOOL *pool;
27597c478bd9Sstevel@tonic-gate
276051f34d4bSRajkumar Sivaprakasam if (rdma_xprts->rtg_count == 0)
27617c478bd9Sstevel@tonic-gate return;
27627c478bd9Sstevel@tonic-gate
276351f34d4bSRajkumar Sivaprakasam rtg_count = rdma_xprts->rtg_count;
276451f34d4bSRajkumar Sivaprakasam
276551f34d4bSRajkumar Sivaprakasam for (i = 0; i < rtg_count; i++) {
276651f34d4bSRajkumar Sivaprakasam curr_rec = rdma_xprts->rtg_listhead;
276751f34d4bSRajkumar Sivaprakasam rdma_xprts->rtg_listhead = curr_rec->rtr_next;
276851f34d4bSRajkumar Sivaprakasam rdma_xprts->rtg_count--;
27697c478bd9Sstevel@tonic-gate curr_rec->rtr_next = NULL;
27707c478bd9Sstevel@tonic-gate xprt = curr_rec->rtr_xprt_ptr;
27717c478bd9Sstevel@tonic-gate q = xprt->xp_wq;
27727c478bd9Sstevel@tonic-gate svc_rdma_kstop(xprt);
27737c478bd9Sstevel@tonic-gate
27747c478bd9Sstevel@tonic-gate mutex_enter(&xprt->xp_req_lock);
277507e75131Sgt29601 pool = xprt->xp_pool;
27767c478bd9Sstevel@tonic-gate while ((mp = xprt->xp_req_head) != NULL) {
2777*2695d4f4SMarcel Telka rdma_recv_data_t *rdp = (rdma_recv_data_t *)mp->b_rptr;
2778*2695d4f4SMarcel Telka
2779*2695d4f4SMarcel Telka /* remove the request from the list */
27807c478bd9Sstevel@tonic-gate xprt->xp_req_head = mp->b_next;
27817c478bd9Sstevel@tonic-gate mp->b_next = (mblk_t *)0;
2782*2695d4f4SMarcel Telka
278351f34d4bSRajkumar Sivaprakasam RDMA_BUF_FREE(rdp->conn, &rdp->rpcmsg);
278451f34d4bSRajkumar Sivaprakasam RDMA_REL_CONN(rdp->conn);
27857c478bd9Sstevel@tonic-gate freemsg(mp);
27867c478bd9Sstevel@tonic-gate }
2787*2695d4f4SMarcel Telka mutex_enter(&pool->p_req_lock);
2788*2695d4f4SMarcel Telka pool->p_reqs -= xprt->xp_reqs;
2789*2695d4f4SMarcel Telka pool->p_size -= xprt->xp_size;
2790*2695d4f4SMarcel Telka mutex_exit(&pool->p_req_lock);
2791*2695d4f4SMarcel Telka xprt->xp_reqs = 0;
2792*2695d4f4SMarcel Telka xprt->xp_size = 0;
2793*2695d4f4SMarcel Telka xprt->xp_full = FALSE;
2794*2695d4f4SMarcel Telka xprt->xp_enable = FALSE;
27957c478bd9Sstevel@tonic-gate mutex_exit(&xprt->xp_req_lock);
27967c478bd9Sstevel@tonic-gate svc_queueclose(q);
27977c478bd9Sstevel@tonic-gate #ifdef DEBUG
27987c478bd9Sstevel@tonic-gate if (rdma_check)
27997c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "rdma_stop: Exited svc_queueclose\n");
28007c478bd9Sstevel@tonic-gate #endif
28017c478bd9Sstevel@tonic-gate /*
28027c478bd9Sstevel@tonic-gate * Free the rdma transport record for the expunged rdma
28037c478bd9Sstevel@tonic-gate * based master transport handle.
28047c478bd9Sstevel@tonic-gate */
28057c478bd9Sstevel@tonic-gate kmem_free(curr_rec, sizeof (rdma_xprt_record_t));
280651f34d4bSRajkumar Sivaprakasam if (!rdma_xprts->rtg_listhead)
28077c478bd9Sstevel@tonic-gate break;
28087c478bd9Sstevel@tonic-gate }
28097c478bd9Sstevel@tonic-gate }
2810bfd8310aSGlenn Barry
2811bfd8310aSGlenn Barry
2812bfd8310aSGlenn Barry /*
2813bfd8310aSGlenn Barry * rpc_msg_dup/rpc_msg_free
2814bfd8310aSGlenn Barry * Currently only used by svc_rpcsec_gss.c but put in this file as it
2815bfd8310aSGlenn Barry * may be useful to others in the future.
2816bfd8310aSGlenn Barry * But future consumers should be careful cuz so far
2817bfd8310aSGlenn Barry * - only tested/used for call msgs (not reply)
2818bfd8310aSGlenn Barry * - only tested/used with call verf oa_length==0
2819bfd8310aSGlenn Barry */
2820bfd8310aSGlenn Barry struct rpc_msg *
rpc_msg_dup(struct rpc_msg * src)2821bfd8310aSGlenn Barry rpc_msg_dup(struct rpc_msg *src)
2822bfd8310aSGlenn Barry {
2823bfd8310aSGlenn Barry struct rpc_msg *dst;
2824bfd8310aSGlenn Barry struct opaque_auth oa_src, oa_dst;
2825bfd8310aSGlenn Barry
2826bfd8310aSGlenn Barry dst = kmem_alloc(sizeof (*dst), KM_SLEEP);
2827bfd8310aSGlenn Barry
2828bfd8310aSGlenn Barry dst->rm_xid = src->rm_xid;
2829bfd8310aSGlenn Barry dst->rm_direction = src->rm_direction;
2830bfd8310aSGlenn Barry
2831bfd8310aSGlenn Barry dst->rm_call.cb_rpcvers = src->rm_call.cb_rpcvers;
2832bfd8310aSGlenn Barry dst->rm_call.cb_prog = src->rm_call.cb_prog;
2833bfd8310aSGlenn Barry dst->rm_call.cb_vers = src->rm_call.cb_vers;
2834bfd8310aSGlenn Barry dst->rm_call.cb_proc = src->rm_call.cb_proc;
2835bfd8310aSGlenn Barry
2836bfd8310aSGlenn Barry /* dup opaque auth call body cred */
2837bfd8310aSGlenn Barry oa_src = src->rm_call.cb_cred;
2838bfd8310aSGlenn Barry
2839bfd8310aSGlenn Barry oa_dst.oa_flavor = oa_src.oa_flavor;
2840bfd8310aSGlenn Barry oa_dst.oa_base = kmem_alloc(oa_src.oa_length, KM_SLEEP);
2841bfd8310aSGlenn Barry
2842bfd8310aSGlenn Barry bcopy(oa_src.oa_base, oa_dst.oa_base, oa_src.oa_length);
2843bfd8310aSGlenn Barry oa_dst.oa_length = oa_src.oa_length;
2844bfd8310aSGlenn Barry
2845bfd8310aSGlenn Barry dst->rm_call.cb_cred = oa_dst;
2846bfd8310aSGlenn Barry
2847bfd8310aSGlenn Barry /* dup or just alloc opaque auth call body verifier */
2848bfd8310aSGlenn Barry if (src->rm_call.cb_verf.oa_length > 0) {
2849bfd8310aSGlenn Barry oa_src = src->rm_call.cb_verf;
2850bfd8310aSGlenn Barry
2851bfd8310aSGlenn Barry oa_dst.oa_flavor = oa_src.oa_flavor;
2852bfd8310aSGlenn Barry oa_dst.oa_base = kmem_alloc(oa_src.oa_length, KM_SLEEP);
2853bfd8310aSGlenn Barry
2854bfd8310aSGlenn Barry bcopy(oa_src.oa_base, oa_dst.oa_base, oa_src.oa_length);
2855bfd8310aSGlenn Barry oa_dst.oa_length = oa_src.oa_length;
2856bfd8310aSGlenn Barry
2857bfd8310aSGlenn Barry dst->rm_call.cb_verf = oa_dst;
2858bfd8310aSGlenn Barry } else {
2859bfd8310aSGlenn Barry oa_dst.oa_flavor = -1; /* will be set later */
2860bfd8310aSGlenn Barry oa_dst.oa_base = kmem_alloc(MAX_AUTH_BYTES, KM_SLEEP);
2861bfd8310aSGlenn Barry
2862bfd8310aSGlenn Barry oa_dst.oa_length = 0; /* will be set later */
2863bfd8310aSGlenn Barry
2864bfd8310aSGlenn Barry dst->rm_call.cb_verf = oa_dst;
2865bfd8310aSGlenn Barry }
2866bfd8310aSGlenn Barry return (dst);
2867bfd8310aSGlenn Barry
2868bfd8310aSGlenn Barry error:
2869bfd8310aSGlenn Barry kmem_free(dst->rm_call.cb_cred.oa_base, dst->rm_call.cb_cred.oa_length);
2870bfd8310aSGlenn Barry kmem_free(dst, sizeof (*dst));
2871bfd8310aSGlenn Barry return (NULL);
2872bfd8310aSGlenn Barry }
2873bfd8310aSGlenn Barry
2874bfd8310aSGlenn Barry void
rpc_msg_free(struct rpc_msg ** msg,int cb_verf_oa_length)2875bfd8310aSGlenn Barry rpc_msg_free(struct rpc_msg **msg, int cb_verf_oa_length)
2876bfd8310aSGlenn Barry {
2877bfd8310aSGlenn Barry struct rpc_msg *m = *msg;
2878bfd8310aSGlenn Barry
2879bfd8310aSGlenn Barry kmem_free(m->rm_call.cb_cred.oa_base, m->rm_call.cb_cred.oa_length);
2880bfd8310aSGlenn Barry m->rm_call.cb_cred.oa_base = NULL;
2881bfd8310aSGlenn Barry m->rm_call.cb_cred.oa_length = 0;
2882bfd8310aSGlenn Barry
2883bfd8310aSGlenn Barry kmem_free(m->rm_call.cb_verf.oa_base, cb_verf_oa_length);
2884bfd8310aSGlenn Barry m->rm_call.cb_verf.oa_base = NULL;
2885bfd8310aSGlenn Barry m->rm_call.cb_verf.oa_length = 0;
2886bfd8310aSGlenn Barry
2887bfd8310aSGlenn Barry kmem_free(m, sizeof (*m));
2888bfd8310aSGlenn Barry m = NULL;
2889bfd8310aSGlenn Barry }
2890