1dfdcada3SDoug Rabson /* $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $ */ 2dfdcada3SDoug Rabson 32e322d37SHiroki Sato /*- 42e322d37SHiroki Sato * Copyright (c) 2009, Sun Microsystems, Inc. 52e322d37SHiroki Sato * All rights reserved. 6dfdcada3SDoug Rabson * 72e322d37SHiroki Sato * Redistribution and use in source and binary forms, with or without 82e322d37SHiroki Sato * modification, are permitted provided that the following conditions are met: 92e322d37SHiroki Sato * - Redistributions of source code must retain the above copyright notice, 102e322d37SHiroki Sato * this list of conditions and the following disclaimer. 112e322d37SHiroki Sato * - Redistributions in binary form must reproduce the above copyright notice, 122e322d37SHiroki Sato * this list of conditions and the following disclaimer in the documentation 132e322d37SHiroki Sato * and/or other materials provided with the distribution. 142e322d37SHiroki Sato * - Neither the name of Sun Microsystems, Inc. nor the names of its 152e322d37SHiroki Sato * contributors may be used to endorse or promote products derived 162e322d37SHiroki Sato * from this software without specific prior written permission. 17dfdcada3SDoug Rabson * 182e322d37SHiroki Sato * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 192e322d37SHiroki Sato * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 202e322d37SHiroki Sato * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 212e322d37SHiroki Sato * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 222e322d37SHiroki Sato * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 232e322d37SHiroki Sato * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 242e322d37SHiroki Sato * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 252e322d37SHiroki Sato * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 262e322d37SHiroki Sato * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 272e322d37SHiroki Sato * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 282e322d37SHiroki Sato * POSSIBILITY OF SUCH DAMAGE. 29dfdcada3SDoug Rabson */ 30dfdcada3SDoug Rabson 31dfdcada3SDoug Rabson #if defined(LIBC_SCCS) && !defined(lint) 32dfdcada3SDoug Rabson static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro"; 33dfdcada3SDoug Rabson static char *sccsid = "@(#)svc.c 2.4 88/08/11 4.0 RPCSRC"; 34dfdcada3SDoug Rabson #endif 35dfdcada3SDoug Rabson #include <sys/cdefs.h> 36dfdcada3SDoug Rabson __FBSDID("$FreeBSD$"); 37dfdcada3SDoug Rabson 38dfdcada3SDoug Rabson /* 39dfdcada3SDoug Rabson * svc.c, Server-side remote procedure call interface. 40dfdcada3SDoug Rabson * 41dfdcada3SDoug Rabson * There are two sets of procedures here. The xprt routines are 42dfdcada3SDoug Rabson * for handling transport handles. The svc routines handle the 43dfdcada3SDoug Rabson * list of service routines. 44dfdcada3SDoug Rabson * 45dfdcada3SDoug Rabson * Copyright (C) 1984, Sun Microsystems, Inc. 46dfdcada3SDoug Rabson */ 47dfdcada3SDoug Rabson 48dfdcada3SDoug Rabson #include <sys/param.h> 49dfdcada3SDoug Rabson #include <sys/lock.h> 50dfdcada3SDoug Rabson #include <sys/kernel.h> 51a9148abdSDoug Rabson #include <sys/kthread.h> 52dfdcada3SDoug Rabson #include <sys/malloc.h> 53a9148abdSDoug Rabson #include <sys/mbuf.h> 54dfdcada3SDoug Rabson #include <sys/mutex.h> 55a9148abdSDoug Rabson #include <sys/proc.h> 56dfdcada3SDoug Rabson #include <sys/queue.h> 57a9148abdSDoug Rabson #include <sys/socketvar.h> 58dfdcada3SDoug Rabson #include <sys/systm.h> 59*d473bac7SAlexander Motin #include <sys/sx.h> 60dfdcada3SDoug Rabson #include <sys/ucred.h> 61dfdcada3SDoug Rabson 62dfdcada3SDoug Rabson #include <rpc/rpc.h> 63dfdcada3SDoug Rabson #include <rpc/rpcb_clnt.h> 64a9148abdSDoug Rabson #include <rpc/replay.h> 65dfdcada3SDoug Rabson 66ee31b83aSDoug Rabson #include <rpc/rpc_com.h> 67dfdcada3SDoug Rabson 68dfdcada3SDoug Rabson #define SVC_VERSQUIET 0x0001 /* keep quiet about vers mismatch */ 69a9148abdSDoug Rabson #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET) 70dfdcada3SDoug Rabson 71dfdcada3SDoug Rabson static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t, 72dfdcada3SDoug Rabson char *); 73a9148abdSDoug Rabson static void svc_new_thread(SVCPOOL *pool); 74a9148abdSDoug Rabson static void xprt_unregister_locked(SVCXPRT *xprt); 75f8fb069dSAlexander Motin static void svc_change_space_used(SVCPOOL *pool, int delta); 76f8fb069dSAlexander Motin static bool_t svc_request_space_available(SVCPOOL *pool); 77dfdcada3SDoug Rabson 78dfdcada3SDoug Rabson /* *************** SVCXPRT related stuff **************** */ 79dfdcada3SDoug Rabson 80a9148abdSDoug Rabson static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS); 81a9148abdSDoug Rabson static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS); 82a9148abdSDoug Rabson 83dfdcada3SDoug Rabson SVCPOOL* 84a9148abdSDoug Rabson svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base) 85dfdcada3SDoug Rabson { 86dfdcada3SDoug Rabson SVCPOOL *pool; 87dfdcada3SDoug Rabson 88dfdcada3SDoug Rabson pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO); 89dfdcada3SDoug Rabson 90dfdcada3SDoug Rabson mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF); 91a9148abdSDoug Rabson pool->sp_name = name; 92a9148abdSDoug Rabson pool->sp_state = SVCPOOL_INIT; 93a9148abdSDoug Rabson pool->sp_proc = NULL; 94dfdcada3SDoug Rabson TAILQ_INIT(&pool->sp_xlist); 95dfdcada3SDoug Rabson TAILQ_INIT(&pool->sp_active); 96dfdcada3SDoug Rabson TAILQ_INIT(&pool->sp_callouts); 97*d473bac7SAlexander Motin TAILQ_INIT(&pool->sp_lcallouts); 98a9148abdSDoug Rabson LIST_INIT(&pool->sp_threads); 99a9148abdSDoug Rabson LIST_INIT(&pool->sp_idlethreads); 100a9148abdSDoug Rabson pool->sp_minthreads = 1; 101a9148abdSDoug Rabson pool->sp_maxthreads = 1; 102a9148abdSDoug Rabson pool->sp_threadcount = 0; 103a9148abdSDoug Rabson 104a9148abdSDoug Rabson /* 105a9148abdSDoug Rabson * Don't use more than a quarter of mbuf clusters or more than 106a9148abdSDoug Rabson * 45Mb buffering requests. 107a9148abdSDoug Rabson */ 108a9148abdSDoug Rabson pool->sp_space_high = nmbclusters * MCLBYTES / 4; 109a9148abdSDoug Rabson if (pool->sp_space_high > 45 << 20) 110a9148abdSDoug Rabson pool->sp_space_high = 45 << 20; 111a9148abdSDoug Rabson pool->sp_space_low = 2 * pool->sp_space_high / 3; 112a9148abdSDoug Rabson 113a9148abdSDoug Rabson sysctl_ctx_init(&pool->sp_sysctl); 114a9148abdSDoug Rabson if (sysctl_base) { 115a9148abdSDoug Rabson SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, 116a9148abdSDoug Rabson "minthreads", CTLTYPE_INT | CTLFLAG_RW, 117a9148abdSDoug Rabson pool, 0, svcpool_minthread_sysctl, "I", ""); 118a9148abdSDoug Rabson SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, 119a9148abdSDoug Rabson "maxthreads", CTLTYPE_INT | CTLFLAG_RW, 120a9148abdSDoug Rabson pool, 0, svcpool_maxthread_sysctl, "I", ""); 121a9148abdSDoug Rabson SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 122a9148abdSDoug Rabson "threads", CTLFLAG_RD, &pool->sp_threadcount, 0, ""); 123a9148abdSDoug Rabson 124a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 125a9148abdSDoug Rabson "request_space_used", CTLFLAG_RD, 126a9148abdSDoug Rabson &pool->sp_space_used, 0, 127a9148abdSDoug Rabson "Space in parsed but not handled requests."); 128a9148abdSDoug Rabson 129a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 130a9148abdSDoug Rabson "request_space_used_highest", CTLFLAG_RD, 131a9148abdSDoug Rabson &pool->sp_space_used_highest, 0, 132a9148abdSDoug Rabson "Highest space used since reboot."); 133a9148abdSDoug Rabson 134a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 135a9148abdSDoug Rabson "request_space_high", CTLFLAG_RW, 136a9148abdSDoug Rabson &pool->sp_space_high, 0, 137a9148abdSDoug Rabson "Maximum space in parsed but not handled requests."); 138a9148abdSDoug Rabson 139a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 140a9148abdSDoug Rabson "request_space_low", CTLFLAG_RW, 141a9148abdSDoug Rabson &pool->sp_space_low, 0, 142a9148abdSDoug Rabson "Low water mark for request space."); 143a9148abdSDoug Rabson 144fbbb13f9SMatthew D Fleming SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 145a9148abdSDoug Rabson "request_space_throttled", CTLFLAG_RD, 146a9148abdSDoug Rabson &pool->sp_space_throttled, 0, 147a9148abdSDoug Rabson "Whether nfs requests are currently throttled"); 148a9148abdSDoug Rabson 149fbbb13f9SMatthew D Fleming SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 150a9148abdSDoug Rabson "request_space_throttle_count", CTLFLAG_RD, 151a9148abdSDoug Rabson &pool->sp_space_throttle_count, 0, 152a9148abdSDoug Rabson "Count of times throttling based on request space has occurred"); 153a9148abdSDoug Rabson } 154dfdcada3SDoug Rabson 155dfdcada3SDoug Rabson return pool; 156dfdcada3SDoug Rabson } 157dfdcada3SDoug Rabson 158dfdcada3SDoug Rabson void 159dfdcada3SDoug Rabson svcpool_destroy(SVCPOOL *pool) 160dfdcada3SDoug Rabson { 161a9148abdSDoug Rabson SVCXPRT *xprt, *nxprt; 162dfdcada3SDoug Rabson struct svc_callout *s; 163*d473bac7SAlexander Motin struct svc_loss_callout *sl; 164a9148abdSDoug Rabson struct svcxprt_list cleanup; 165dfdcada3SDoug Rabson 166a9148abdSDoug Rabson TAILQ_INIT(&cleanup); 167dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 168dfdcada3SDoug Rabson 169dfdcada3SDoug Rabson while (TAILQ_FIRST(&pool->sp_xlist)) { 170dfdcada3SDoug Rabson xprt = TAILQ_FIRST(&pool->sp_xlist); 171a9148abdSDoug Rabson xprt_unregister_locked(xprt); 172a9148abdSDoug Rabson TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); 173dfdcada3SDoug Rabson } 174dfdcada3SDoug Rabson 175*d473bac7SAlexander Motin while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) { 176dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 177dfdcada3SDoug Rabson svc_unreg(pool, s->sc_prog, s->sc_vers); 178dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 179dfdcada3SDoug Rabson } 180*d473bac7SAlexander Motin while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) { 181*d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 182*d473bac7SAlexander Motin svc_loss_unreg(pool, sl->slc_dispatch); 183*d473bac7SAlexander Motin mtx_lock(&pool->sp_lock); 184*d473bac7SAlexander Motin } 18575f2ae1aSRick Macklem mtx_unlock(&pool->sp_lock); 186dfdcada3SDoug Rabson 187a9148abdSDoug Rabson TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { 188a9148abdSDoug Rabson SVC_RELEASE(xprt); 189a9148abdSDoug Rabson } 190a9148abdSDoug Rabson 191a4fa5e6dSRick Macklem mtx_destroy(&pool->sp_lock); 192a4fa5e6dSRick Macklem 193a9148abdSDoug Rabson if (pool->sp_rcache) 194a9148abdSDoug Rabson replay_freecache(pool->sp_rcache); 195a9148abdSDoug Rabson 196a9148abdSDoug Rabson sysctl_ctx_free(&pool->sp_sysctl); 197dfdcada3SDoug Rabson free(pool, M_RPC); 198dfdcada3SDoug Rabson } 199dfdcada3SDoug Rabson 200a9148abdSDoug Rabson static bool_t 201a9148abdSDoug Rabson svcpool_active(SVCPOOL *pool) 202a9148abdSDoug Rabson { 203a9148abdSDoug Rabson enum svcpool_state state = pool->sp_state; 204a9148abdSDoug Rabson 205a9148abdSDoug Rabson if (state == SVCPOOL_INIT || state == SVCPOOL_CLOSING) 206a9148abdSDoug Rabson return (FALSE); 207a9148abdSDoug Rabson return (TRUE); 208a9148abdSDoug Rabson } 209a9148abdSDoug Rabson 210a9148abdSDoug Rabson /* 211a9148abdSDoug Rabson * Sysctl handler to set the minimum thread count on a pool 212a9148abdSDoug Rabson */ 213a9148abdSDoug Rabson static int 214a9148abdSDoug Rabson svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS) 215a9148abdSDoug Rabson { 216a9148abdSDoug Rabson SVCPOOL *pool; 217a9148abdSDoug Rabson int newminthreads, error, n; 218a9148abdSDoug Rabson 219a9148abdSDoug Rabson pool = oidp->oid_arg1; 220a9148abdSDoug Rabson newminthreads = pool->sp_minthreads; 221a9148abdSDoug Rabson error = sysctl_handle_int(oidp, &newminthreads, 0, req); 222a9148abdSDoug Rabson if (error == 0 && newminthreads != pool->sp_minthreads) { 223a9148abdSDoug Rabson if (newminthreads > pool->sp_maxthreads) 224a9148abdSDoug Rabson return (EINVAL); 225a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 226a9148abdSDoug Rabson if (newminthreads > pool->sp_minthreads 227a9148abdSDoug Rabson && svcpool_active(pool)) { 228a9148abdSDoug Rabson /* 229a9148abdSDoug Rabson * If the pool is running and we are 230a9148abdSDoug Rabson * increasing, create some more threads now. 231a9148abdSDoug Rabson */ 232a9148abdSDoug Rabson n = newminthreads - pool->sp_threadcount; 233a9148abdSDoug Rabson if (n > 0) { 234a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 235a9148abdSDoug Rabson while (n--) 236a9148abdSDoug Rabson svc_new_thread(pool); 237a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 238a9148abdSDoug Rabson } 239a9148abdSDoug Rabson } 240a9148abdSDoug Rabson pool->sp_minthreads = newminthreads; 241a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 242a9148abdSDoug Rabson } 243a9148abdSDoug Rabson return (error); 244a9148abdSDoug Rabson } 245a9148abdSDoug Rabson 246a9148abdSDoug Rabson /* 247a9148abdSDoug Rabson * Sysctl handler to set the maximum thread count on a pool 248a9148abdSDoug Rabson */ 249a9148abdSDoug Rabson static int 250a9148abdSDoug Rabson svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS) 251a9148abdSDoug Rabson { 252a9148abdSDoug Rabson SVCPOOL *pool; 253a9148abdSDoug Rabson SVCTHREAD *st; 254a9148abdSDoug Rabson int newmaxthreads, error; 255a9148abdSDoug Rabson 256a9148abdSDoug Rabson pool = oidp->oid_arg1; 257a9148abdSDoug Rabson newmaxthreads = pool->sp_maxthreads; 258a9148abdSDoug Rabson error = sysctl_handle_int(oidp, &newmaxthreads, 0, req); 259a9148abdSDoug Rabson if (error == 0 && newmaxthreads != pool->sp_maxthreads) { 260a9148abdSDoug Rabson if (newmaxthreads < pool->sp_minthreads) 261a9148abdSDoug Rabson return (EINVAL); 262a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 263a9148abdSDoug Rabson if (newmaxthreads < pool->sp_maxthreads 264a9148abdSDoug Rabson && svcpool_active(pool)) { 265a9148abdSDoug Rabson /* 266a9148abdSDoug Rabson * If the pool is running and we are 267a9148abdSDoug Rabson * decreasing, wake up some idle threads to 268a9148abdSDoug Rabson * encourage them to exit. 269a9148abdSDoug Rabson */ 270a9148abdSDoug Rabson LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink) 271a9148abdSDoug Rabson cv_signal(&st->st_cond); 272a9148abdSDoug Rabson } 273a9148abdSDoug Rabson pool->sp_maxthreads = newmaxthreads; 274a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 275a9148abdSDoug Rabson } 276a9148abdSDoug Rabson return (error); 277a9148abdSDoug Rabson } 278a9148abdSDoug Rabson 279dfdcada3SDoug Rabson /* 280dfdcada3SDoug Rabson * Activate a transport handle. 281dfdcada3SDoug Rabson */ 282dfdcada3SDoug Rabson void 283dfdcada3SDoug Rabson xprt_register(SVCXPRT *xprt) 284dfdcada3SDoug Rabson { 285dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 286dfdcada3SDoug Rabson 2876b97c9f0SRick Macklem SVC_ACQUIRE(xprt); 288dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 289dfdcada3SDoug Rabson xprt->xp_registered = TRUE; 290dfdcada3SDoug Rabson xprt->xp_active = FALSE; 291dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(&pool->sp_xlist, xprt, xp_link); 292dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 293dfdcada3SDoug Rabson } 294dfdcada3SDoug Rabson 295dfdcada3SDoug Rabson /* 296a9148abdSDoug Rabson * De-activate a transport handle. Note: the locked version doesn't 297a9148abdSDoug Rabson * release the transport - caller must do that after dropping the pool 298a9148abdSDoug Rabson * lock. 299dfdcada3SDoug Rabson */ 300dfdcada3SDoug Rabson static void 301a9148abdSDoug Rabson xprt_unregister_locked(SVCXPRT *xprt) 302dfdcada3SDoug Rabson { 303dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 304dfdcada3SDoug Rabson 3058a46eac5SGleb Smirnoff mtx_assert(&pool->sp_lock, MA_OWNED); 306bca2ec16SRick Macklem KASSERT(xprt->xp_registered == TRUE, 307bca2ec16SRick Macklem ("xprt_unregister_locked: not registered")); 308ba981145SAlexander Motin xprt_inactive_locked(xprt); 309dfdcada3SDoug Rabson TAILQ_REMOVE(&pool->sp_xlist, xprt, xp_link); 310dfdcada3SDoug Rabson xprt->xp_registered = FALSE; 311a9148abdSDoug Rabson } 312dfdcada3SDoug Rabson 313a9148abdSDoug Rabson void 314a9148abdSDoug Rabson xprt_unregister(SVCXPRT *xprt) 315a9148abdSDoug Rabson { 316a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool; 317a9148abdSDoug Rabson 318a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 319bca2ec16SRick Macklem if (xprt->xp_registered == FALSE) { 320bca2ec16SRick Macklem /* Already unregistered by another thread */ 321bca2ec16SRick Macklem mtx_unlock(&pool->sp_lock); 322bca2ec16SRick Macklem return; 323bca2ec16SRick Macklem } 324a9148abdSDoug Rabson xprt_unregister_locked(xprt); 325dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 326a9148abdSDoug Rabson 327a9148abdSDoug Rabson SVC_RELEASE(xprt); 328a9148abdSDoug Rabson } 329a9148abdSDoug Rabson 330ba981145SAlexander Motin /* 331ba981145SAlexander Motin * Attempt to assign a service thread to this transport. 332ba981145SAlexander Motin */ 333ba981145SAlexander Motin static int 334a9148abdSDoug Rabson xprt_assignthread(SVCXPRT *xprt) 335a9148abdSDoug Rabson { 336a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool; 337a9148abdSDoug Rabson SVCTHREAD *st; 338a9148abdSDoug Rabson 3398a46eac5SGleb Smirnoff mtx_assert(&pool->sp_lock, MA_OWNED); 340ba981145SAlexander Motin st = LIST_FIRST(&pool->sp_idlethreads); 341a9148abdSDoug Rabson if (st) { 342ba981145SAlexander Motin LIST_REMOVE(st, st_ilink); 343ba981145SAlexander Motin st->st_idle = FALSE; 344a9148abdSDoug Rabson SVC_ACQUIRE(xprt); 345a9148abdSDoug Rabson xprt->xp_thread = st; 346a9148abdSDoug Rabson st->st_xprt = xprt; 347a9148abdSDoug Rabson cv_signal(&st->st_cond); 348ba981145SAlexander Motin return (TRUE); 349a9148abdSDoug Rabson } else { 350a9148abdSDoug Rabson /* 351a9148abdSDoug Rabson * See if we can create a new thread. The 352a9148abdSDoug Rabson * actual thread creation happens in 353a9148abdSDoug Rabson * svc_run_internal because our locking state 354a9148abdSDoug Rabson * is poorly defined (we are typically called 355a9148abdSDoug Rabson * from a socket upcall). Don't create more 356a9148abdSDoug Rabson * than one thread per second. 357a9148abdSDoug Rabson */ 358a9148abdSDoug Rabson if (pool->sp_state == SVCPOOL_ACTIVE 359a9148abdSDoug Rabson && pool->sp_lastcreatetime < time_uptime 360a9148abdSDoug Rabson && pool->sp_threadcount < pool->sp_maxthreads) { 361a9148abdSDoug Rabson pool->sp_state = SVCPOOL_THREADWANTED; 362a9148abdSDoug Rabson } 363a9148abdSDoug Rabson } 364ba981145SAlexander Motin return (FALSE); 365dfdcada3SDoug Rabson } 366dfdcada3SDoug Rabson 367dfdcada3SDoug Rabson void 368dfdcada3SDoug Rabson xprt_active(SVCXPRT *xprt) 369dfdcada3SDoug Rabson { 370dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 371dfdcada3SDoug Rabson 372a4fa5e6dSRick Macklem mtx_lock(&pool->sp_lock); 373a4fa5e6dSRick Macklem 374a9148abdSDoug Rabson if (!xprt->xp_registered) { 375a9148abdSDoug Rabson /* 376a9148abdSDoug Rabson * Race with xprt_unregister - we lose. 377a9148abdSDoug Rabson */ 378a4fa5e6dSRick Macklem mtx_unlock(&pool->sp_lock); 379a9148abdSDoug Rabson return; 380a9148abdSDoug Rabson } 381a9148abdSDoug Rabson 382dfdcada3SDoug Rabson if (!xprt->xp_active) { 383dfdcada3SDoug Rabson xprt->xp_active = TRUE; 384ba981145SAlexander Motin if (xprt->xp_thread == NULL) { 385f8fb069dSAlexander Motin if (!svc_request_space_available(pool) || 386f8fb069dSAlexander Motin !xprt_assignthread(xprt)) 387ba981145SAlexander Motin TAILQ_INSERT_TAIL(&pool->sp_active, xprt, 388ba981145SAlexander Motin xp_alink); 389ba981145SAlexander Motin } 390dfdcada3SDoug Rabson } 391dfdcada3SDoug Rabson 392dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 393dfdcada3SDoug Rabson } 394dfdcada3SDoug Rabson 395dfdcada3SDoug Rabson void 396a9148abdSDoug Rabson xprt_inactive_locked(SVCXPRT *xprt) 397a9148abdSDoug Rabson { 398a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool; 399a9148abdSDoug Rabson 4008a46eac5SGleb Smirnoff mtx_assert(&pool->sp_lock, MA_OWNED); 401a9148abdSDoug Rabson if (xprt->xp_active) { 402ba981145SAlexander Motin if (xprt->xp_thread == NULL) 403a9148abdSDoug Rabson TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink); 404a9148abdSDoug Rabson xprt->xp_active = FALSE; 405a9148abdSDoug Rabson } 406a9148abdSDoug Rabson } 407a9148abdSDoug Rabson 408a9148abdSDoug Rabson void 409dfdcada3SDoug Rabson xprt_inactive(SVCXPRT *xprt) 410dfdcada3SDoug Rabson { 411dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 412dfdcada3SDoug Rabson 413dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 414a9148abdSDoug Rabson xprt_inactive_locked(xprt); 415dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 416dfdcada3SDoug Rabson } 417dfdcada3SDoug Rabson 418dfdcada3SDoug Rabson /* 4195c42b9dcSAlexander Motin * Variant of xprt_inactive() for use only when sure that port is 4205c42b9dcSAlexander Motin * assigned to thread. For example, withing receive handlers. 4215c42b9dcSAlexander Motin */ 4225c42b9dcSAlexander Motin void 4235c42b9dcSAlexander Motin xprt_inactive_self(SVCXPRT *xprt) 4245c42b9dcSAlexander Motin { 4255c42b9dcSAlexander Motin 4265c42b9dcSAlexander Motin KASSERT(xprt->xp_thread != NULL, 4275c42b9dcSAlexander Motin ("xprt_inactive_self(%p) with NULL xp_thread", xprt)); 4285c42b9dcSAlexander Motin xprt->xp_active = FALSE; 4295c42b9dcSAlexander Motin } 4305c42b9dcSAlexander Motin 4315c42b9dcSAlexander Motin /* 432dfdcada3SDoug Rabson * Add a service program to the callout list. 433dfdcada3SDoug Rabson * The dispatch routine will be called when a rpc request for this 434dfdcada3SDoug Rabson * program number comes in. 435dfdcada3SDoug Rabson */ 436dfdcada3SDoug Rabson bool_t 437dfdcada3SDoug Rabson svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers, 438dfdcada3SDoug Rabson void (*dispatch)(struct svc_req *, SVCXPRT *), 439dfdcada3SDoug Rabson const struct netconfig *nconf) 440dfdcada3SDoug Rabson { 441dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 442dfdcada3SDoug Rabson struct svc_callout *s; 443dfdcada3SDoug Rabson char *netid = NULL; 444dfdcada3SDoug Rabson int flag = 0; 445dfdcada3SDoug Rabson 446dfdcada3SDoug Rabson /* VARIABLES PROTECTED BY svc_lock: s, svc_head */ 447dfdcada3SDoug Rabson 448dfdcada3SDoug Rabson if (xprt->xp_netid) { 449dfdcada3SDoug Rabson netid = strdup(xprt->xp_netid, M_RPC); 450dfdcada3SDoug Rabson flag = 1; 451dfdcada3SDoug Rabson } else if (nconf && nconf->nc_netid) { 452dfdcada3SDoug Rabson netid = strdup(nconf->nc_netid, M_RPC); 453dfdcada3SDoug Rabson flag = 1; 454dfdcada3SDoug Rabson } /* must have been created with svc_raw_create */ 455dfdcada3SDoug Rabson if ((netid == NULL) && (flag == 1)) { 456dfdcada3SDoug Rabson return (FALSE); 457dfdcada3SDoug Rabson } 458dfdcada3SDoug Rabson 459dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 460dfdcada3SDoug Rabson if ((s = svc_find(pool, prog, vers, netid)) != NULL) { 461dfdcada3SDoug Rabson if (netid) 462dfdcada3SDoug Rabson free(netid, M_RPC); 463dfdcada3SDoug Rabson if (s->sc_dispatch == dispatch) 464dfdcada3SDoug Rabson goto rpcb_it; /* he is registering another xptr */ 465dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 466dfdcada3SDoug Rabson return (FALSE); 467dfdcada3SDoug Rabson } 468dfdcada3SDoug Rabson s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT); 469dfdcada3SDoug Rabson if (s == NULL) { 470dfdcada3SDoug Rabson if (netid) 471dfdcada3SDoug Rabson free(netid, M_RPC); 472dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 473dfdcada3SDoug Rabson return (FALSE); 474dfdcada3SDoug Rabson } 475dfdcada3SDoug Rabson 476dfdcada3SDoug Rabson s->sc_prog = prog; 477dfdcada3SDoug Rabson s->sc_vers = vers; 478dfdcada3SDoug Rabson s->sc_dispatch = dispatch; 479dfdcada3SDoug Rabson s->sc_netid = netid; 480dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link); 481dfdcada3SDoug Rabson 482dfdcada3SDoug Rabson if ((xprt->xp_netid == NULL) && (flag == 1) && netid) 483dfdcada3SDoug Rabson ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC); 484dfdcada3SDoug Rabson 485dfdcada3SDoug Rabson rpcb_it: 486dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 487dfdcada3SDoug Rabson /* now register the information with the local binder service */ 488dfdcada3SDoug Rabson if (nconf) { 489dfdcada3SDoug Rabson bool_t dummy; 490dfdcada3SDoug Rabson struct netconfig tnc; 491a9148abdSDoug Rabson struct netbuf nb; 492dfdcada3SDoug Rabson tnc = *nconf; 493a9148abdSDoug Rabson nb.buf = &xprt->xp_ltaddr; 494a9148abdSDoug Rabson nb.len = xprt->xp_ltaddr.ss_len; 495a9148abdSDoug Rabson dummy = rpcb_set(prog, vers, &tnc, &nb); 496dfdcada3SDoug Rabson return (dummy); 497dfdcada3SDoug Rabson } 498dfdcada3SDoug Rabson return (TRUE); 499dfdcada3SDoug Rabson } 500dfdcada3SDoug Rabson 501dfdcada3SDoug Rabson /* 502dfdcada3SDoug Rabson * Remove a service program from the callout list. 503dfdcada3SDoug Rabson */ 504dfdcada3SDoug Rabson void 505dfdcada3SDoug Rabson svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers) 506dfdcada3SDoug Rabson { 507dfdcada3SDoug Rabson struct svc_callout *s; 508dfdcada3SDoug Rabson 509dfdcada3SDoug Rabson /* unregister the information anyway */ 510dfdcada3SDoug Rabson (void) rpcb_unset(prog, vers, NULL); 511dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 512dfdcada3SDoug Rabson while ((s = svc_find(pool, prog, vers, NULL)) != NULL) { 513dfdcada3SDoug Rabson TAILQ_REMOVE(&pool->sp_callouts, s, sc_link); 514dfdcada3SDoug Rabson if (s->sc_netid) 515dfdcada3SDoug Rabson mem_free(s->sc_netid, sizeof (s->sc_netid) + 1); 516dfdcada3SDoug Rabson mem_free(s, sizeof (struct svc_callout)); 517dfdcada3SDoug Rabson } 518dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 519dfdcada3SDoug Rabson } 520dfdcada3SDoug Rabson 521*d473bac7SAlexander Motin /* 522*d473bac7SAlexander Motin * Add a service connection loss program to the callout list. 523*d473bac7SAlexander Motin * The dispatch routine will be called when some port in ths pool die. 524*d473bac7SAlexander Motin */ 525*d473bac7SAlexander Motin bool_t 526*d473bac7SAlexander Motin svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *)) 527*d473bac7SAlexander Motin { 528*d473bac7SAlexander Motin SVCPOOL *pool = xprt->xp_pool; 529*d473bac7SAlexander Motin struct svc_loss_callout *s; 530*d473bac7SAlexander Motin 531*d473bac7SAlexander Motin mtx_lock(&pool->sp_lock); 532*d473bac7SAlexander Motin TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) { 533*d473bac7SAlexander Motin if (s->slc_dispatch == dispatch) 534*d473bac7SAlexander Motin break; 535*d473bac7SAlexander Motin } 536*d473bac7SAlexander Motin if (s != NULL) { 537*d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 538*d473bac7SAlexander Motin return (TRUE); 539*d473bac7SAlexander Motin } 540*d473bac7SAlexander Motin s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT); 541*d473bac7SAlexander Motin if (s == NULL) { 542*d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 543*d473bac7SAlexander Motin return (FALSE); 544*d473bac7SAlexander Motin } 545*d473bac7SAlexander Motin s->slc_dispatch = dispatch; 546*d473bac7SAlexander Motin TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link); 547*d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 548*d473bac7SAlexander Motin return (TRUE); 549*d473bac7SAlexander Motin } 550*d473bac7SAlexander Motin 551*d473bac7SAlexander Motin /* 552*d473bac7SAlexander Motin * Remove a service connection loss program from the callout list. 553*d473bac7SAlexander Motin */ 554*d473bac7SAlexander Motin void 555*d473bac7SAlexander Motin svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *)) 556*d473bac7SAlexander Motin { 557*d473bac7SAlexander Motin struct svc_loss_callout *s; 558*d473bac7SAlexander Motin 559*d473bac7SAlexander Motin mtx_lock(&pool->sp_lock); 560*d473bac7SAlexander Motin TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) { 561*d473bac7SAlexander Motin if (s->slc_dispatch == dispatch) { 562*d473bac7SAlexander Motin TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link); 563*d473bac7SAlexander Motin free(s, M_RPC); 564*d473bac7SAlexander Motin break; 565*d473bac7SAlexander Motin } 566*d473bac7SAlexander Motin } 567*d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 568*d473bac7SAlexander Motin } 569*d473bac7SAlexander Motin 570dfdcada3SDoug Rabson /* ********************** CALLOUT list related stuff ************* */ 571dfdcada3SDoug Rabson 572dfdcada3SDoug Rabson /* 573dfdcada3SDoug Rabson * Search the callout list for a program number, return the callout 574dfdcada3SDoug Rabson * struct. 575dfdcada3SDoug Rabson */ 576dfdcada3SDoug Rabson static struct svc_callout * 577dfdcada3SDoug Rabson svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid) 578dfdcada3SDoug Rabson { 579dfdcada3SDoug Rabson struct svc_callout *s; 580dfdcada3SDoug Rabson 581dfdcada3SDoug Rabson mtx_assert(&pool->sp_lock, MA_OWNED); 582dfdcada3SDoug Rabson TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { 583dfdcada3SDoug Rabson if (s->sc_prog == prog && s->sc_vers == vers 584dfdcada3SDoug Rabson && (netid == NULL || s->sc_netid == NULL || 585dfdcada3SDoug Rabson strcmp(netid, s->sc_netid) == 0)) 586dfdcada3SDoug Rabson break; 587dfdcada3SDoug Rabson } 588dfdcada3SDoug Rabson 589dfdcada3SDoug Rabson return (s); 590dfdcada3SDoug Rabson } 591dfdcada3SDoug Rabson 592dfdcada3SDoug Rabson /* ******************* REPLY GENERATION ROUTINES ************ */ 593dfdcada3SDoug Rabson 594a9148abdSDoug Rabson static bool_t 595a9148abdSDoug Rabson svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply, 596a9148abdSDoug Rabson struct mbuf *body) 597a9148abdSDoug Rabson { 598a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 599a9148abdSDoug Rabson bool_t ok; 600a9148abdSDoug Rabson 601a9148abdSDoug Rabson if (rqstp->rq_args) { 602a9148abdSDoug Rabson m_freem(rqstp->rq_args); 603a9148abdSDoug Rabson rqstp->rq_args = NULL; 604a9148abdSDoug Rabson } 605a9148abdSDoug Rabson 606a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 607a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 608a9148abdSDoug Rabson rply, svc_getrpccaller(rqstp), body); 609a9148abdSDoug Rabson 610a9148abdSDoug Rabson if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body)) 611a9148abdSDoug Rabson return (FALSE); 612a9148abdSDoug Rabson 613*d473bac7SAlexander Motin ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq); 614a9148abdSDoug Rabson if (rqstp->rq_addr) { 615a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME); 616a9148abdSDoug Rabson rqstp->rq_addr = NULL; 617a9148abdSDoug Rabson } 618a9148abdSDoug Rabson 619a9148abdSDoug Rabson return (ok); 620a9148abdSDoug Rabson } 621a9148abdSDoug Rabson 622dfdcada3SDoug Rabson /* 623dfdcada3SDoug Rabson * Send a reply to an rpc request 624dfdcada3SDoug Rabson */ 625dfdcada3SDoug Rabson bool_t 626a9148abdSDoug Rabson svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location) 627a9148abdSDoug Rabson { 628a9148abdSDoug Rabson struct rpc_msg rply; 629a9148abdSDoug Rabson struct mbuf *m; 630a9148abdSDoug Rabson XDR xdrs; 631a9148abdSDoug Rabson bool_t ok; 632a9148abdSDoug Rabson 633a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 634a9148abdSDoug Rabson rply.rm_direction = REPLY; 635a9148abdSDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 636a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 637a9148abdSDoug Rabson rply.acpted_rply.ar_stat = SUCCESS; 638a9148abdSDoug Rabson rply.acpted_rply.ar_results.where = NULL; 639a9148abdSDoug Rabson rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; 640a9148abdSDoug Rabson 641bd54830bSGleb Smirnoff m = m_getcl(M_WAITOK, MT_DATA, 0); 642a9148abdSDoug Rabson xdrmbuf_create(&xdrs, m, XDR_ENCODE); 643a9148abdSDoug Rabson ok = xdr_results(&xdrs, xdr_location); 644a9148abdSDoug Rabson XDR_DESTROY(&xdrs); 645a9148abdSDoug Rabson 646a9148abdSDoug Rabson if (ok) { 647a9148abdSDoug Rabson return (svc_sendreply_common(rqstp, &rply, m)); 648a9148abdSDoug Rabson } else { 649a9148abdSDoug Rabson m_freem(m); 650a9148abdSDoug Rabson return (FALSE); 651a9148abdSDoug Rabson } 652a9148abdSDoug Rabson } 653a9148abdSDoug Rabson 654a9148abdSDoug Rabson bool_t 655a9148abdSDoug Rabson svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m) 656dfdcada3SDoug Rabson { 657dfdcada3SDoug Rabson struct rpc_msg rply; 658dfdcada3SDoug Rabson 659a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 660dfdcada3SDoug Rabson rply.rm_direction = REPLY; 661dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 662a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 663dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = SUCCESS; 664a9148abdSDoug Rabson rply.acpted_rply.ar_results.where = NULL; 665a9148abdSDoug Rabson rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; 666dfdcada3SDoug Rabson 667a9148abdSDoug Rabson return (svc_sendreply_common(rqstp, &rply, m)); 668dfdcada3SDoug Rabson } 669dfdcada3SDoug Rabson 670dfdcada3SDoug Rabson /* 671dfdcada3SDoug Rabson * No procedure error reply 672dfdcada3SDoug Rabson */ 673dfdcada3SDoug Rabson void 674a9148abdSDoug Rabson svcerr_noproc(struct svc_req *rqstp) 675dfdcada3SDoug Rabson { 676a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 677dfdcada3SDoug Rabson struct rpc_msg rply; 678dfdcada3SDoug Rabson 679a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 680dfdcada3SDoug Rabson rply.rm_direction = REPLY; 681dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 682a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 683dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROC_UNAVAIL; 684dfdcada3SDoug Rabson 685a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 686a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 687a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 688a9148abdSDoug Rabson 689a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 690dfdcada3SDoug Rabson } 691dfdcada3SDoug Rabson 692dfdcada3SDoug Rabson /* 693dfdcada3SDoug Rabson * Can't decode args error reply 694dfdcada3SDoug Rabson */ 695dfdcada3SDoug Rabson void 696a9148abdSDoug Rabson svcerr_decode(struct svc_req *rqstp) 697dfdcada3SDoug Rabson { 698a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 699dfdcada3SDoug Rabson struct rpc_msg rply; 700dfdcada3SDoug Rabson 701a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 702dfdcada3SDoug Rabson rply.rm_direction = REPLY; 703dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 704a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 705dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = GARBAGE_ARGS; 706dfdcada3SDoug Rabson 707a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 708a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 709a9148abdSDoug Rabson &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL); 710a9148abdSDoug Rabson 711a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 712dfdcada3SDoug Rabson } 713dfdcada3SDoug Rabson 714dfdcada3SDoug Rabson /* 715dfdcada3SDoug Rabson * Some system error 716dfdcada3SDoug Rabson */ 717dfdcada3SDoug Rabson void 718a9148abdSDoug Rabson svcerr_systemerr(struct svc_req *rqstp) 719dfdcada3SDoug Rabson { 720a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 721dfdcada3SDoug Rabson struct rpc_msg rply; 722dfdcada3SDoug Rabson 723a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 724dfdcada3SDoug Rabson rply.rm_direction = REPLY; 725dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 726a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 727dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = SYSTEM_ERR; 728dfdcada3SDoug Rabson 729a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 730a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 731a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 732a9148abdSDoug Rabson 733a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 734dfdcada3SDoug Rabson } 735dfdcada3SDoug Rabson 736dfdcada3SDoug Rabson /* 737dfdcada3SDoug Rabson * Authentication error reply 738dfdcada3SDoug Rabson */ 739dfdcada3SDoug Rabson void 740a9148abdSDoug Rabson svcerr_auth(struct svc_req *rqstp, enum auth_stat why) 741dfdcada3SDoug Rabson { 742a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 743dfdcada3SDoug Rabson struct rpc_msg rply; 744dfdcada3SDoug Rabson 745a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 746dfdcada3SDoug Rabson rply.rm_direction = REPLY; 747dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_DENIED; 748dfdcada3SDoug Rabson rply.rjcted_rply.rj_stat = AUTH_ERROR; 749dfdcada3SDoug Rabson rply.rjcted_rply.rj_why = why; 750dfdcada3SDoug Rabson 751a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 752a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 753a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 754a9148abdSDoug Rabson 755a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 756dfdcada3SDoug Rabson } 757dfdcada3SDoug Rabson 758dfdcada3SDoug Rabson /* 759dfdcada3SDoug Rabson * Auth too weak error reply 760dfdcada3SDoug Rabson */ 761dfdcada3SDoug Rabson void 762a9148abdSDoug Rabson svcerr_weakauth(struct svc_req *rqstp) 763dfdcada3SDoug Rabson { 764dfdcada3SDoug Rabson 765a9148abdSDoug Rabson svcerr_auth(rqstp, AUTH_TOOWEAK); 766dfdcada3SDoug Rabson } 767dfdcada3SDoug Rabson 768dfdcada3SDoug Rabson /* 769dfdcada3SDoug Rabson * Program unavailable error reply 770dfdcada3SDoug Rabson */ 771dfdcada3SDoug Rabson void 772a9148abdSDoug Rabson svcerr_noprog(struct svc_req *rqstp) 773dfdcada3SDoug Rabson { 774a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 775dfdcada3SDoug Rabson struct rpc_msg rply; 776dfdcada3SDoug Rabson 777a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 778dfdcada3SDoug Rabson rply.rm_direction = REPLY; 779dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 780a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 781dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROG_UNAVAIL; 782dfdcada3SDoug Rabson 783a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 784a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 785a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 786a9148abdSDoug Rabson 787a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 788dfdcada3SDoug Rabson } 789dfdcada3SDoug Rabson 790dfdcada3SDoug Rabson /* 791dfdcada3SDoug Rabson * Program version mismatch error reply 792dfdcada3SDoug Rabson */ 793dfdcada3SDoug Rabson void 794a9148abdSDoug Rabson svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers) 795dfdcada3SDoug Rabson { 796a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 797dfdcada3SDoug Rabson struct rpc_msg rply; 798dfdcada3SDoug Rabson 799a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 800dfdcada3SDoug Rabson rply.rm_direction = REPLY; 801dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 802a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 803dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROG_MISMATCH; 804dfdcada3SDoug Rabson rply.acpted_rply.ar_vers.low = (uint32_t)low_vers; 805dfdcada3SDoug Rabson rply.acpted_rply.ar_vers.high = (uint32_t)high_vers; 806dfdcada3SDoug Rabson 807a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 808a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 809a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 810a9148abdSDoug Rabson 811a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 812a9148abdSDoug Rabson } 813a9148abdSDoug Rabson 814a9148abdSDoug Rabson /* 815a9148abdSDoug Rabson * Allocate a new server transport structure. All fields are 816a9148abdSDoug Rabson * initialized to zero and xp_p3 is initialized to point at an 817a9148abdSDoug Rabson * extension structure to hold various flags and authentication 818a9148abdSDoug Rabson * parameters. 819a9148abdSDoug Rabson */ 820a9148abdSDoug Rabson SVCXPRT * 821a9148abdSDoug Rabson svc_xprt_alloc() 822a9148abdSDoug Rabson { 823a9148abdSDoug Rabson SVCXPRT *xprt; 824a9148abdSDoug Rabson SVCXPRT_EXT *ext; 825a9148abdSDoug Rabson 826a9148abdSDoug Rabson xprt = mem_alloc(sizeof(SVCXPRT)); 827a9148abdSDoug Rabson memset(xprt, 0, sizeof(SVCXPRT)); 828a9148abdSDoug Rabson ext = mem_alloc(sizeof(SVCXPRT_EXT)); 829a9148abdSDoug Rabson memset(ext, 0, sizeof(SVCXPRT_EXT)); 830a9148abdSDoug Rabson xprt->xp_p3 = ext; 831a9148abdSDoug Rabson refcount_init(&xprt->xp_refs, 1); 832a9148abdSDoug Rabson 833a9148abdSDoug Rabson return (xprt); 834a9148abdSDoug Rabson } 835a9148abdSDoug Rabson 836a9148abdSDoug Rabson /* 837a9148abdSDoug Rabson * Free a server transport structure. 838a9148abdSDoug Rabson */ 839a9148abdSDoug Rabson void 840a9148abdSDoug Rabson svc_xprt_free(xprt) 841a9148abdSDoug Rabson SVCXPRT *xprt; 842a9148abdSDoug Rabson { 843a9148abdSDoug Rabson 844a9148abdSDoug Rabson mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT)); 845a9148abdSDoug Rabson mem_free(xprt, sizeof(SVCXPRT)); 846dfdcada3SDoug Rabson } 847dfdcada3SDoug Rabson 848dfdcada3SDoug Rabson /* ******************* SERVER INPUT STUFF ******************* */ 849dfdcada3SDoug Rabson 850dfdcada3SDoug Rabson /* 851a9148abdSDoug Rabson * Read RPC requests from a transport and queue them to be 852a9148abdSDoug Rabson * executed. We handle authentication and replay cache replies here. 853a9148abdSDoug Rabson * Actually dispatching the RPC is deferred till svc_executereq. 854dfdcada3SDoug Rabson */ 855a9148abdSDoug Rabson static enum xprt_stat 856a9148abdSDoug Rabson svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret) 857dfdcada3SDoug Rabson { 858dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 859a9148abdSDoug Rabson struct svc_req *r; 860dfdcada3SDoug Rabson struct rpc_msg msg; 861a9148abdSDoug Rabson struct mbuf *args; 862*d473bac7SAlexander Motin struct svc_loss_callout *s; 863a9148abdSDoug Rabson enum xprt_stat stat; 864a9148abdSDoug Rabson 865a9148abdSDoug Rabson /* now receive msgs from xprtprt (support batch calls) */ 866a9148abdSDoug Rabson r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO); 867a9148abdSDoug Rabson 868a9148abdSDoug Rabson msg.rm_call.cb_cred.oa_base = r->rq_credarea; 869a9148abdSDoug Rabson msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES]; 870a9148abdSDoug Rabson r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES]; 871a9148abdSDoug Rabson if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) { 872a9148abdSDoug Rabson enum auth_stat why; 873a9148abdSDoug Rabson 874a9148abdSDoug Rabson /* 875a9148abdSDoug Rabson * Handle replays and authenticate before queuing the 876a9148abdSDoug Rabson * request to be executed. 877a9148abdSDoug Rabson */ 878a9148abdSDoug Rabson SVC_ACQUIRE(xprt); 879a9148abdSDoug Rabson r->rq_xprt = xprt; 880a9148abdSDoug Rabson if (pool->sp_rcache) { 881a9148abdSDoug Rabson struct rpc_msg repmsg; 882a9148abdSDoug Rabson struct mbuf *repbody; 883a9148abdSDoug Rabson enum replay_state rs; 884a9148abdSDoug Rabson rs = replay_find(pool->sp_rcache, &msg, 885a9148abdSDoug Rabson svc_getrpccaller(r), &repmsg, &repbody); 886a9148abdSDoug Rabson switch (rs) { 887a9148abdSDoug Rabson case RS_NEW: 888a9148abdSDoug Rabson break; 889a9148abdSDoug Rabson case RS_DONE: 890a9148abdSDoug Rabson SVC_REPLY(xprt, &repmsg, r->rq_addr, 891*d473bac7SAlexander Motin repbody, &r->rq_reply_seq); 892a9148abdSDoug Rabson if (r->rq_addr) { 893a9148abdSDoug Rabson free(r->rq_addr, M_SONAME); 894a9148abdSDoug Rabson r->rq_addr = NULL; 895a9148abdSDoug Rabson } 896578e600cSRick Macklem m_freem(args); 897a9148abdSDoug Rabson goto call_done; 898a9148abdSDoug Rabson 899a9148abdSDoug Rabson default: 900578e600cSRick Macklem m_freem(args); 901a9148abdSDoug Rabson goto call_done; 902a9148abdSDoug Rabson } 903a9148abdSDoug Rabson } 904a9148abdSDoug Rabson 905a9148abdSDoug Rabson r->rq_xid = msg.rm_xid; 906a9148abdSDoug Rabson r->rq_prog = msg.rm_call.cb_prog; 907a9148abdSDoug Rabson r->rq_vers = msg.rm_call.cb_vers; 908a9148abdSDoug Rabson r->rq_proc = msg.rm_call.cb_proc; 909a9148abdSDoug Rabson r->rq_size = sizeof(*r) + m_length(args, NULL); 910a9148abdSDoug Rabson r->rq_args = args; 911a9148abdSDoug Rabson if ((why = _authenticate(r, &msg)) != AUTH_OK) { 912a9148abdSDoug Rabson /* 913a9148abdSDoug Rabson * RPCSEC_GSS uses this return code 914a9148abdSDoug Rabson * for requests that form part of its 915a9148abdSDoug Rabson * context establishment protocol and 916a9148abdSDoug Rabson * should not be dispatched to the 917a9148abdSDoug Rabson * application. 918a9148abdSDoug Rabson */ 919a9148abdSDoug Rabson if (why != RPCSEC_GSS_NODISPATCH) 920a9148abdSDoug Rabson svcerr_auth(r, why); 921a9148abdSDoug Rabson goto call_done; 922a9148abdSDoug Rabson } 923a9148abdSDoug Rabson 924a9148abdSDoug Rabson if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) { 925a9148abdSDoug Rabson svcerr_decode(r); 926a9148abdSDoug Rabson goto call_done; 927a9148abdSDoug Rabson } 928a9148abdSDoug Rabson 929a9148abdSDoug Rabson /* 930a9148abdSDoug Rabson * Everything checks out, return request to caller. 931a9148abdSDoug Rabson */ 932a9148abdSDoug Rabson *rqstp_ret = r; 933a9148abdSDoug Rabson r = NULL; 934a9148abdSDoug Rabson } 935a9148abdSDoug Rabson call_done: 936a9148abdSDoug Rabson if (r) { 937a9148abdSDoug Rabson svc_freereq(r); 938a9148abdSDoug Rabson r = NULL; 939a9148abdSDoug Rabson } 940a9148abdSDoug Rabson if ((stat = SVC_STAT(xprt)) == XPRT_DIED) { 941*d473bac7SAlexander Motin TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) 942*d473bac7SAlexander Motin (*s->slc_dispatch)(xprt); 943a9148abdSDoug Rabson xprt_unregister(xprt); 944a9148abdSDoug Rabson } 945a9148abdSDoug Rabson 946a9148abdSDoug Rabson return (stat); 947a9148abdSDoug Rabson } 948a9148abdSDoug Rabson 949a9148abdSDoug Rabson static void 950a9148abdSDoug Rabson svc_executereq(struct svc_req *rqstp) 951a9148abdSDoug Rabson { 952a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 953a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool; 954dfdcada3SDoug Rabson int prog_found; 955dfdcada3SDoug Rabson rpcvers_t low_vers; 956dfdcada3SDoug Rabson rpcvers_t high_vers; 957dfdcada3SDoug Rabson struct svc_callout *s; 958dfdcada3SDoug Rabson 959dfdcada3SDoug Rabson /* now match message with a registered service*/ 960dfdcada3SDoug Rabson prog_found = FALSE; 961dfdcada3SDoug Rabson low_vers = (rpcvers_t) -1L; 962dfdcada3SDoug Rabson high_vers = (rpcvers_t) 0L; 963dfdcada3SDoug Rabson TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { 964a9148abdSDoug Rabson if (s->sc_prog == rqstp->rq_prog) { 965a9148abdSDoug Rabson if (s->sc_vers == rqstp->rq_vers) { 966a9148abdSDoug Rabson /* 967a9148abdSDoug Rabson * We hand ownership of r to the 968a9148abdSDoug Rabson * dispatch method - they must call 969a9148abdSDoug Rabson * svc_freereq. 970a9148abdSDoug Rabson */ 971a9148abdSDoug Rabson (*s->sc_dispatch)(rqstp, xprt); 972a9148abdSDoug Rabson return; 973dfdcada3SDoug Rabson } /* found correct version */ 974dfdcada3SDoug Rabson prog_found = TRUE; 975dfdcada3SDoug Rabson if (s->sc_vers < low_vers) 976dfdcada3SDoug Rabson low_vers = s->sc_vers; 977dfdcada3SDoug Rabson if (s->sc_vers > high_vers) 978dfdcada3SDoug Rabson high_vers = s->sc_vers; 979dfdcada3SDoug Rabson } /* found correct program */ 980dfdcada3SDoug Rabson } 981a9148abdSDoug Rabson 982dfdcada3SDoug Rabson /* 983dfdcada3SDoug Rabson * if we got here, the program or version 984dfdcada3SDoug Rabson * is not served ... 985dfdcada3SDoug Rabson */ 986dfdcada3SDoug Rabson if (prog_found) 987a9148abdSDoug Rabson svcerr_progvers(rqstp, low_vers, high_vers); 988dfdcada3SDoug Rabson else 989a9148abdSDoug Rabson svcerr_noprog(rqstp); 990a9148abdSDoug Rabson 991a9148abdSDoug Rabson svc_freereq(rqstp); 992dfdcada3SDoug Rabson } 993a9148abdSDoug Rabson 994a9148abdSDoug Rabson static void 995a9148abdSDoug Rabson svc_checkidle(SVCPOOL *pool) 996a9148abdSDoug Rabson { 997a9148abdSDoug Rabson SVCXPRT *xprt, *nxprt; 998a9148abdSDoug Rabson time_t timo; 999a9148abdSDoug Rabson struct svcxprt_list cleanup; 1000a9148abdSDoug Rabson 1001a9148abdSDoug Rabson TAILQ_INIT(&cleanup); 1002a9148abdSDoug Rabson TAILQ_FOREACH_SAFE(xprt, &pool->sp_xlist, xp_link, nxprt) { 1003dfdcada3SDoug Rabson /* 1004a9148abdSDoug Rabson * Only some transports have idle timers. Don't time 1005a9148abdSDoug Rabson * something out which is just waking up. 1006dfdcada3SDoug Rabson */ 1007a9148abdSDoug Rabson if (!xprt->xp_idletimeout || xprt->xp_thread) 1008a9148abdSDoug Rabson continue; 1009a9148abdSDoug Rabson 1010a9148abdSDoug Rabson timo = xprt->xp_lastactive + xprt->xp_idletimeout; 1011a9148abdSDoug Rabson if (time_uptime > timo) { 1012a9148abdSDoug Rabson xprt_unregister_locked(xprt); 1013a9148abdSDoug Rabson TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); 1014a9148abdSDoug Rabson } 1015a9148abdSDoug Rabson } 1016a9148abdSDoug Rabson 1017a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1018a9148abdSDoug Rabson TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { 1019a9148abdSDoug Rabson SVC_RELEASE(xprt); 1020a9148abdSDoug Rabson } 1021dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 1022a9148abdSDoug Rabson 1023a9148abdSDoug Rabson } 1024a9148abdSDoug Rabson 1025a9148abdSDoug Rabson static void 1026a9148abdSDoug Rabson svc_assign_waiting_sockets(SVCPOOL *pool) 1027a9148abdSDoug Rabson { 1028a9148abdSDoug Rabson SVCXPRT *xprt; 1029a9148abdSDoug Rabson 1030f8fb069dSAlexander Motin mtx_lock(&pool->sp_lock); 1031ba981145SAlexander Motin while ((xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) { 1032ba981145SAlexander Motin if (xprt_assignthread(xprt)) 1033ba981145SAlexander Motin TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink); 1034ba981145SAlexander Motin else 1035ba981145SAlexander Motin break; 1036a9148abdSDoug Rabson } 1037f8fb069dSAlexander Motin mtx_unlock(&pool->sp_lock); 1038f8fb069dSAlexander Motin } 1039f8fb069dSAlexander Motin 1040f8fb069dSAlexander Motin static void 1041f8fb069dSAlexander Motin svc_change_space_used(SVCPOOL *pool, int delta) 1042f8fb069dSAlexander Motin { 1043f8fb069dSAlexander Motin unsigned int value; 1044f8fb069dSAlexander Motin 1045f8fb069dSAlexander Motin value = atomic_fetchadd_int(&pool->sp_space_used, delta) + delta; 1046f8fb069dSAlexander Motin if (delta > 0) { 1047f8fb069dSAlexander Motin if (value >= pool->sp_space_high && !pool->sp_space_throttled) { 1048f8fb069dSAlexander Motin pool->sp_space_throttled = TRUE; 1049f8fb069dSAlexander Motin pool->sp_space_throttle_count++; 1050f8fb069dSAlexander Motin } 1051f8fb069dSAlexander Motin if (value > pool->sp_space_used_highest) 1052f8fb069dSAlexander Motin pool->sp_space_used_highest = value; 1053f8fb069dSAlexander Motin } else { 1054f8fb069dSAlexander Motin if (value < pool->sp_space_low && pool->sp_space_throttled) { 1055f8fb069dSAlexander Motin pool->sp_space_throttled = FALSE; 1056f8fb069dSAlexander Motin svc_assign_waiting_sockets(pool); 1057f8fb069dSAlexander Motin } 1058f8fb069dSAlexander Motin } 1059a9148abdSDoug Rabson } 1060a9148abdSDoug Rabson 1061a9148abdSDoug Rabson static bool_t 1062a9148abdSDoug Rabson svc_request_space_available(SVCPOOL *pool) 1063a9148abdSDoug Rabson { 1064a9148abdSDoug Rabson 1065f8fb069dSAlexander Motin if (pool->sp_space_throttled) 1066f8fb069dSAlexander Motin return (FALSE); 1067f8fb069dSAlexander Motin return (TRUE); 1068a9148abdSDoug Rabson } 1069a9148abdSDoug Rabson 1070a9148abdSDoug Rabson static void 1071a9148abdSDoug Rabson svc_run_internal(SVCPOOL *pool, bool_t ismaster) 1072a9148abdSDoug Rabson { 1073f8fb069dSAlexander Motin struct svc_reqlist reqs; 1074a9148abdSDoug Rabson SVCTHREAD *st, *stpref; 1075a9148abdSDoug Rabson SVCXPRT *xprt; 1076a9148abdSDoug Rabson enum xprt_stat stat; 1077a9148abdSDoug Rabson struct svc_req *rqstp; 1078f8fb069dSAlexander Motin size_t sz; 1079a9148abdSDoug Rabson int error; 1080a9148abdSDoug Rabson 1081a9148abdSDoug Rabson st = mem_alloc(sizeof(*st)); 1082f8fb069dSAlexander Motin st->st_pool = pool; 1083a9148abdSDoug Rabson st->st_xprt = NULL; 1084a9148abdSDoug Rabson STAILQ_INIT(&st->st_reqs); 1085a9148abdSDoug Rabson cv_init(&st->st_cond, "rpcsvc"); 1086f8fb069dSAlexander Motin STAILQ_INIT(&reqs); 1087a9148abdSDoug Rabson 1088a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 1089a9148abdSDoug Rabson LIST_INSERT_HEAD(&pool->sp_threads, st, st_link); 1090a9148abdSDoug Rabson 1091a9148abdSDoug Rabson /* 1092a9148abdSDoug Rabson * If we are a new thread which was spawned to cope with 1093a9148abdSDoug Rabson * increased load, set the state back to SVCPOOL_ACTIVE. 1094a9148abdSDoug Rabson */ 1095a9148abdSDoug Rabson if (pool->sp_state == SVCPOOL_THREADSTARTING) 1096a9148abdSDoug Rabson pool->sp_state = SVCPOOL_ACTIVE; 1097a9148abdSDoug Rabson 1098a9148abdSDoug Rabson while (pool->sp_state != SVCPOOL_CLOSING) { 1099a9148abdSDoug Rabson /* 1100db7cdfeeSAlexander Motin * Create new thread if requested. 1101db7cdfeeSAlexander Motin */ 1102db7cdfeeSAlexander Motin if (pool->sp_state == SVCPOOL_THREADWANTED) { 1103db7cdfeeSAlexander Motin pool->sp_state = SVCPOOL_THREADSTARTING; 1104db7cdfeeSAlexander Motin pool->sp_lastcreatetime = time_uptime; 1105db7cdfeeSAlexander Motin mtx_unlock(&pool->sp_lock); 1106db7cdfeeSAlexander Motin svc_new_thread(pool); 1107db7cdfeeSAlexander Motin mtx_lock(&pool->sp_lock); 1108db7cdfeeSAlexander Motin continue; 1109db7cdfeeSAlexander Motin } 1110db7cdfeeSAlexander Motin 1111db7cdfeeSAlexander Motin /* 1112a9148abdSDoug Rabson * Check for idle transports once per second. 1113a9148abdSDoug Rabson */ 1114a9148abdSDoug Rabson if (time_uptime > pool->sp_lastidlecheck) { 1115a9148abdSDoug Rabson pool->sp_lastidlecheck = time_uptime; 1116a9148abdSDoug Rabson svc_checkidle(pool); 1117a9148abdSDoug Rabson } 1118a9148abdSDoug Rabson 1119a9148abdSDoug Rabson xprt = st->st_xprt; 1120a9148abdSDoug Rabson if (!xprt && STAILQ_EMPTY(&st->st_reqs)) { 1121a9148abdSDoug Rabson /* 1122a9148abdSDoug Rabson * Enforce maxthreads count. 1123a9148abdSDoug Rabson */ 1124a9148abdSDoug Rabson if (pool->sp_threadcount > pool->sp_maxthreads) 1125a9148abdSDoug Rabson break; 1126a9148abdSDoug Rabson 1127a9148abdSDoug Rabson /* 1128a9148abdSDoug Rabson * Before sleeping, see if we can find an 1129a9148abdSDoug Rabson * active transport which isn't being serviced 1130a9148abdSDoug Rabson * by a thread. 1131a9148abdSDoug Rabson */ 1132ba981145SAlexander Motin if (svc_request_space_available(pool) && 1133ba981145SAlexander Motin (xprt = TAILQ_FIRST(&pool->sp_active)) != NULL) { 1134ba981145SAlexander Motin TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink); 1135a9148abdSDoug Rabson SVC_ACQUIRE(xprt); 1136a9148abdSDoug Rabson xprt->xp_thread = st; 1137a9148abdSDoug Rabson st->st_xprt = xprt; 1138a9148abdSDoug Rabson continue; 1139ba981145SAlexander Motin } 1140a9148abdSDoug Rabson 1141a9148abdSDoug Rabson LIST_INSERT_HEAD(&pool->sp_idlethreads, st, st_ilink); 1142ba981145SAlexander Motin st->st_idle = TRUE; 1143db7cdfeeSAlexander Motin if (ismaster || (!ismaster && 1144db7cdfeeSAlexander Motin pool->sp_threadcount > pool->sp_minthreads)) 1145db7cdfeeSAlexander Motin error = cv_timedwait_sig(&st->st_cond, 1146db7cdfeeSAlexander Motin &pool->sp_lock, 5 * hz); 1147db7cdfeeSAlexander Motin else 1148db7cdfeeSAlexander Motin error = cv_wait_sig(&st->st_cond, 1149db7cdfeeSAlexander Motin &pool->sp_lock); 1150ba981145SAlexander Motin if (st->st_idle) { 1151a9148abdSDoug Rabson LIST_REMOVE(st, st_ilink); 1152ba981145SAlexander Motin st->st_idle = FALSE; 1153ba981145SAlexander Motin } 1154a9148abdSDoug Rabson 1155a9148abdSDoug Rabson /* 1156a9148abdSDoug Rabson * Reduce worker thread count when idle. 1157a9148abdSDoug Rabson */ 1158a9148abdSDoug Rabson if (error == EWOULDBLOCK) { 1159a9148abdSDoug Rabson if (!ismaster 1160a9148abdSDoug Rabson && (pool->sp_threadcount 1161a9148abdSDoug Rabson > pool->sp_minthreads) 1162a9148abdSDoug Rabson && !st->st_xprt 1163a9148abdSDoug Rabson && STAILQ_EMPTY(&st->st_reqs)) 1164dfdcada3SDoug Rabson break; 1165db7cdfeeSAlexander Motin } else if (error) { 1166a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1167a9148abdSDoug Rabson svc_exit(pool); 1168a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 1169a9148abdSDoug Rabson break; 1170a9148abdSDoug Rabson } 1171a9148abdSDoug Rabson continue; 1172a9148abdSDoug Rabson } 1173a9148abdSDoug Rabson 1174a9148abdSDoug Rabson if (xprt) { 1175a9148abdSDoug Rabson /* 1176a9148abdSDoug Rabson * Drain the transport socket and queue up any 1177a9148abdSDoug Rabson * RPCs. 1178a9148abdSDoug Rabson */ 1179a9148abdSDoug Rabson xprt->xp_lastactive = time_uptime; 1180a9148abdSDoug Rabson do { 1181f8fb069dSAlexander Motin mtx_unlock(&pool->sp_lock); 1182a9148abdSDoug Rabson if (!svc_request_space_available(pool)) 1183a9148abdSDoug Rabson break; 1184a9148abdSDoug Rabson rqstp = NULL; 1185a9148abdSDoug Rabson stat = svc_getreq(xprt, &rqstp); 1186a9148abdSDoug Rabson if (rqstp) { 1187f8fb069dSAlexander Motin svc_change_space_used(pool, rqstp->rq_size); 1188a9148abdSDoug Rabson /* 1189a9148abdSDoug Rabson * See if the application has 1190a9148abdSDoug Rabson * a preference for some other 1191a9148abdSDoug Rabson * thread. 1192a9148abdSDoug Rabson */ 1193a9148abdSDoug Rabson stpref = st; 1194a9148abdSDoug Rabson if (pool->sp_assign) 1195a9148abdSDoug Rabson stpref = pool->sp_assign(st, 1196a9148abdSDoug Rabson rqstp); 1197f8fb069dSAlexander Motin else 1198f8fb069dSAlexander Motin mtx_lock(&pool->sp_lock); 1199a9148abdSDoug Rabson 1200a9148abdSDoug Rabson rqstp->rq_thread = stpref; 1201a9148abdSDoug Rabson STAILQ_INSERT_TAIL(&stpref->st_reqs, 1202a9148abdSDoug Rabson rqstp, rq_link); 1203a9148abdSDoug Rabson 1204a9148abdSDoug Rabson /* 1205a9148abdSDoug Rabson * If we assigned the request 1206a9148abdSDoug Rabson * to another thread, make 1207a9148abdSDoug Rabson * sure its awake and continue 1208a9148abdSDoug Rabson * reading from the 1209a9148abdSDoug Rabson * socket. Otherwise, try to 1210a9148abdSDoug Rabson * find some other thread to 1211a9148abdSDoug Rabson * read from the socket and 1212a9148abdSDoug Rabson * execute the request 1213a9148abdSDoug Rabson * immediately. 1214a9148abdSDoug Rabson */ 1215ba981145SAlexander Motin if (stpref == st) 1216a9148abdSDoug Rabson break; 1217ba981145SAlexander Motin if (stpref->st_idle) { 1218ba981145SAlexander Motin LIST_REMOVE(stpref, st_ilink); 1219ba981145SAlexander Motin stpref->st_idle = FALSE; 1220ba981145SAlexander Motin cv_signal(&stpref->st_cond); 1221a9148abdSDoug Rabson } 1222f8fb069dSAlexander Motin } else 1223f8fb069dSAlexander Motin mtx_lock(&pool->sp_lock); 1224a9148abdSDoug Rabson } while (stat == XPRT_MOREREQS 1225a9148abdSDoug Rabson && pool->sp_state != SVCPOOL_CLOSING); 1226a9148abdSDoug Rabson 1227a9148abdSDoug Rabson /* 1228a9148abdSDoug Rabson * Move this transport to the end of the 1229a9148abdSDoug Rabson * active list to ensure fairness when 1230a9148abdSDoug Rabson * multiple transports are active. If this was 1231a9148abdSDoug Rabson * the last queued request, svc_getreq will 1232a9148abdSDoug Rabson * end up calling xprt_inactive to remove from 1233a9148abdSDoug Rabson * the active list. 1234a9148abdSDoug Rabson */ 1235a9148abdSDoug Rabson xprt->xp_thread = NULL; 1236a9148abdSDoug Rabson st->st_xprt = NULL; 1237a9148abdSDoug Rabson if (xprt->xp_active) { 1238f8fb069dSAlexander Motin if (!svc_request_space_available(pool) || 1239f8fb069dSAlexander Motin !xprt_assignthread(xprt)) 1240ba981145SAlexander Motin TAILQ_INSERT_TAIL(&pool->sp_active, 1241ba981145SAlexander Motin xprt, xp_alink); 1242a9148abdSDoug Rabson } 1243f8fb069dSAlexander Motin STAILQ_CONCAT(&reqs, &st->st_reqs); 1244a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1245a9148abdSDoug Rabson SVC_RELEASE(xprt); 1246f8fb069dSAlexander Motin } else { 1247f8fb069dSAlexander Motin STAILQ_CONCAT(&reqs, &st->st_reqs); 1248f8fb069dSAlexander Motin mtx_unlock(&pool->sp_lock); 1249a9148abdSDoug Rabson } 1250a9148abdSDoug Rabson 1251a9148abdSDoug Rabson /* 1252a9148abdSDoug Rabson * Execute what we have queued. 1253a9148abdSDoug Rabson */ 1254f8fb069dSAlexander Motin sz = 0; 1255f8fb069dSAlexander Motin while ((rqstp = STAILQ_FIRST(&reqs)) != NULL) { 1256f8fb069dSAlexander Motin STAILQ_REMOVE_HEAD(&reqs, rq_link); 1257f8fb069dSAlexander Motin sz += rqstp->rq_size; 1258a9148abdSDoug Rabson svc_executereq(rqstp); 1259a9148abdSDoug Rabson } 1260f8fb069dSAlexander Motin svc_change_space_used(pool, -sz); 1261f8fb069dSAlexander Motin mtx_lock(&pool->sp_lock); 1262a9148abdSDoug Rabson } 1263a9148abdSDoug Rabson 1264a9148abdSDoug Rabson if (st->st_xprt) { 1265a9148abdSDoug Rabson xprt = st->st_xprt; 1266a9148abdSDoug Rabson st->st_xprt = NULL; 1267a9148abdSDoug Rabson SVC_RELEASE(xprt); 1268a9148abdSDoug Rabson } 1269a9148abdSDoug Rabson 1270a9148abdSDoug Rabson KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit")); 1271a9148abdSDoug Rabson LIST_REMOVE(st, st_link); 1272a9148abdSDoug Rabson pool->sp_threadcount--; 1273a9148abdSDoug Rabson 1274a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1275a9148abdSDoug Rabson 1276a9148abdSDoug Rabson cv_destroy(&st->st_cond); 1277a9148abdSDoug Rabson mem_free(st, sizeof(*st)); 1278a9148abdSDoug Rabson 1279a9148abdSDoug Rabson if (!ismaster) 1280a9148abdSDoug Rabson wakeup(pool); 1281a9148abdSDoug Rabson } 1282a9148abdSDoug Rabson 1283a9148abdSDoug Rabson static void 1284a9148abdSDoug Rabson svc_thread_start(void *arg) 1285a9148abdSDoug Rabson { 1286a9148abdSDoug Rabson 1287a9148abdSDoug Rabson svc_run_internal((SVCPOOL *) arg, FALSE); 1288a9148abdSDoug Rabson kthread_exit(); 1289a9148abdSDoug Rabson } 1290a9148abdSDoug Rabson 1291a9148abdSDoug Rabson static void 1292a9148abdSDoug Rabson svc_new_thread(SVCPOOL *pool) 1293a9148abdSDoug Rabson { 1294a9148abdSDoug Rabson struct thread *td; 1295a9148abdSDoug Rabson 1296a9148abdSDoug Rabson pool->sp_threadcount++; 1297a9148abdSDoug Rabson kthread_add(svc_thread_start, pool, 1298a9148abdSDoug Rabson pool->sp_proc, &td, 0, 0, 1299a9148abdSDoug Rabson "%s: service", pool->sp_name); 1300dfdcada3SDoug Rabson } 1301dfdcada3SDoug Rabson 1302dfdcada3SDoug Rabson void 1303dfdcada3SDoug Rabson svc_run(SVCPOOL *pool) 1304dfdcada3SDoug Rabson { 1305a9148abdSDoug Rabson int i; 1306a9148abdSDoug Rabson struct proc *p; 1307a9148abdSDoug Rabson struct thread *td; 1308dfdcada3SDoug Rabson 1309a9148abdSDoug Rabson p = curproc; 1310a9148abdSDoug Rabson td = curthread; 1311a9148abdSDoug Rabson snprintf(td->td_name, sizeof(td->td_name), 1312a9148abdSDoug Rabson "%s: master", pool->sp_name); 1313a9148abdSDoug Rabson pool->sp_state = SVCPOOL_ACTIVE; 1314a9148abdSDoug Rabson pool->sp_proc = p; 1315a9148abdSDoug Rabson pool->sp_lastcreatetime = time_uptime; 1316a9148abdSDoug Rabson pool->sp_threadcount = 1; 1317dfdcada3SDoug Rabson 1318a9148abdSDoug Rabson for (i = 1; i < pool->sp_minthreads; i++) { 1319a9148abdSDoug Rabson svc_new_thread(pool); 1320dfdcada3SDoug Rabson } 1321dfdcada3SDoug Rabson 1322a9148abdSDoug Rabson svc_run_internal(pool, TRUE); 1323dfdcada3SDoug Rabson 1324dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 1325a9148abdSDoug Rabson while (pool->sp_threadcount > 0) 1326a9148abdSDoug Rabson msleep(pool, &pool->sp_lock, 0, "svcexit", 0); 1327dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 1328dfdcada3SDoug Rabson } 1329dfdcada3SDoug Rabson 1330dfdcada3SDoug Rabson void 1331dfdcada3SDoug Rabson svc_exit(SVCPOOL *pool) 1332dfdcada3SDoug Rabson { 1333a9148abdSDoug Rabson SVCTHREAD *st; 1334a9148abdSDoug Rabson 1335dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 1336a9148abdSDoug Rabson 1337db7cdfeeSAlexander Motin if (pool->sp_state != SVCPOOL_CLOSING) { 1338a9148abdSDoug Rabson pool->sp_state = SVCPOOL_CLOSING; 1339a9148abdSDoug Rabson LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink) 1340a9148abdSDoug Rabson cv_signal(&st->st_cond); 1341db7cdfeeSAlexander Motin } 1342a9148abdSDoug Rabson 1343dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 1344dfdcada3SDoug Rabson } 1345a9148abdSDoug Rabson 1346a9148abdSDoug Rabson bool_t 1347a9148abdSDoug Rabson svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) 1348a9148abdSDoug Rabson { 1349a9148abdSDoug Rabson struct mbuf *m; 1350a9148abdSDoug Rabson XDR xdrs; 1351a9148abdSDoug Rabson bool_t stat; 1352a9148abdSDoug Rabson 1353a9148abdSDoug Rabson m = rqstp->rq_args; 1354a9148abdSDoug Rabson rqstp->rq_args = NULL; 1355a9148abdSDoug Rabson 1356a9148abdSDoug Rabson xdrmbuf_create(&xdrs, m, XDR_DECODE); 1357a9148abdSDoug Rabson stat = xargs(&xdrs, args); 1358a9148abdSDoug Rabson XDR_DESTROY(&xdrs); 1359a9148abdSDoug Rabson 1360a9148abdSDoug Rabson return (stat); 1361a9148abdSDoug Rabson } 1362a9148abdSDoug Rabson 1363a9148abdSDoug Rabson bool_t 1364a9148abdSDoug Rabson svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) 1365a9148abdSDoug Rabson { 1366a9148abdSDoug Rabson XDR xdrs; 1367a9148abdSDoug Rabson 1368a9148abdSDoug Rabson if (rqstp->rq_addr) { 1369a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME); 1370a9148abdSDoug Rabson rqstp->rq_addr = NULL; 1371a9148abdSDoug Rabson } 1372a9148abdSDoug Rabson 1373a9148abdSDoug Rabson xdrs.x_op = XDR_FREE; 1374a9148abdSDoug Rabson return (xargs(&xdrs, args)); 1375a9148abdSDoug Rabson } 1376a9148abdSDoug Rabson 1377a9148abdSDoug Rabson void 1378a9148abdSDoug Rabson svc_freereq(struct svc_req *rqstp) 1379a9148abdSDoug Rabson { 1380a9148abdSDoug Rabson SVCTHREAD *st; 1381a9148abdSDoug Rabson SVCPOOL *pool; 1382a9148abdSDoug Rabson 1383a9148abdSDoug Rabson st = rqstp->rq_thread; 1384a9148abdSDoug Rabson if (st) { 1385f8fb069dSAlexander Motin pool = st->st_pool; 1386a9148abdSDoug Rabson if (pool->sp_done) 1387a9148abdSDoug Rabson pool->sp_done(st, rqstp); 1388a9148abdSDoug Rabson } 1389a9148abdSDoug Rabson 1390a9148abdSDoug Rabson if (rqstp->rq_auth.svc_ah_ops) 1391a9148abdSDoug Rabson SVCAUTH_RELEASE(&rqstp->rq_auth); 1392a9148abdSDoug Rabson 1393a9148abdSDoug Rabson if (rqstp->rq_xprt) { 1394a9148abdSDoug Rabson SVC_RELEASE(rqstp->rq_xprt); 1395a9148abdSDoug Rabson } 1396a9148abdSDoug Rabson 1397a9148abdSDoug Rabson if (rqstp->rq_addr) 1398a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME); 1399a9148abdSDoug Rabson 1400a9148abdSDoug Rabson if (rqstp->rq_args) 1401a9148abdSDoug Rabson m_freem(rqstp->rq_args); 1402a9148abdSDoug Rabson 1403a9148abdSDoug Rabson free(rqstp, M_RPC); 1404a9148abdSDoug Rabson } 1405