1dfdcada3SDoug Rabson /* $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $ */ 2dfdcada3SDoug Rabson 3dfdcada3SDoug Rabson /* 4dfdcada3SDoug Rabson * Sun RPC is a product of Sun Microsystems, Inc. and is provided for 5dfdcada3SDoug Rabson * unrestricted use provided that this legend is included on all tape 6dfdcada3SDoug Rabson * media and as a part of the software program in whole or part. Users 7dfdcada3SDoug Rabson * may copy or modify Sun RPC without charge, but are not authorized 8dfdcada3SDoug Rabson * to license or distribute it to anyone else except as part of a product or 9dfdcada3SDoug Rabson * program developed by the user. 10dfdcada3SDoug Rabson * 11dfdcada3SDoug Rabson * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE 12dfdcada3SDoug Rabson * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR 13dfdcada3SDoug Rabson * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. 14dfdcada3SDoug Rabson * 15dfdcada3SDoug Rabson * Sun RPC is provided with no support and without any obligation on the 16dfdcada3SDoug Rabson * part of Sun Microsystems, Inc. to assist in its use, correction, 17dfdcada3SDoug Rabson * modification or enhancement. 18dfdcada3SDoug Rabson * 19dfdcada3SDoug Rabson * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE 20dfdcada3SDoug Rabson * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC 21dfdcada3SDoug Rabson * OR ANY PART THEREOF. 22dfdcada3SDoug Rabson * 23dfdcada3SDoug Rabson * In no event will Sun Microsystems, Inc. be liable for any lost revenue 24dfdcada3SDoug Rabson * or profits or other special, indirect and consequential damages, even if 25dfdcada3SDoug Rabson * Sun has been advised of the possibility of such damages. 26dfdcada3SDoug Rabson * 27dfdcada3SDoug Rabson * Sun Microsystems, Inc. 28dfdcada3SDoug Rabson * 2550 Garcia Avenue 29dfdcada3SDoug Rabson * Mountain View, California 94043 30dfdcada3SDoug Rabson */ 31dfdcada3SDoug Rabson 32dfdcada3SDoug Rabson #if defined(LIBC_SCCS) && !defined(lint) 33dfdcada3SDoug Rabson static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro"; 34dfdcada3SDoug Rabson static char *sccsid = "@(#)svc.c 2.4 88/08/11 4.0 RPCSRC"; 35dfdcada3SDoug Rabson #endif 36dfdcada3SDoug Rabson #include <sys/cdefs.h> 37dfdcada3SDoug Rabson __FBSDID("$FreeBSD$"); 38dfdcada3SDoug Rabson 39dfdcada3SDoug Rabson /* 40dfdcada3SDoug Rabson * svc.c, Server-side remote procedure call interface. 41dfdcada3SDoug Rabson * 42dfdcada3SDoug Rabson * There are two sets of procedures here. The xprt routines are 43dfdcada3SDoug Rabson * for handling transport handles. The svc routines handle the 44dfdcada3SDoug Rabson * list of service routines. 45dfdcada3SDoug Rabson * 46dfdcada3SDoug Rabson * Copyright (C) 1984, Sun Microsystems, Inc. 47dfdcada3SDoug Rabson */ 48dfdcada3SDoug Rabson 49dfdcada3SDoug Rabson #include <sys/param.h> 50dfdcada3SDoug Rabson #include <sys/lock.h> 51dfdcada3SDoug Rabson #include <sys/kernel.h> 52a9148abdSDoug Rabson #include <sys/kthread.h> 53dfdcada3SDoug Rabson #include <sys/malloc.h> 54a9148abdSDoug Rabson #include <sys/mbuf.h> 55dfdcada3SDoug Rabson #include <sys/mutex.h> 56a9148abdSDoug Rabson #include <sys/proc.h> 57dfdcada3SDoug Rabson #include <sys/queue.h> 58a9148abdSDoug Rabson #include <sys/socketvar.h> 59dfdcada3SDoug Rabson #include <sys/systm.h> 60dfdcada3SDoug Rabson #include <sys/ucred.h> 61dfdcada3SDoug Rabson 62dfdcada3SDoug Rabson #include <rpc/rpc.h> 63dfdcada3SDoug Rabson #include <rpc/rpcb_clnt.h> 64a9148abdSDoug Rabson #include <rpc/replay.h> 65dfdcada3SDoug Rabson 66ee31b83aSDoug Rabson #include <rpc/rpc_com.h> 67dfdcada3SDoug Rabson 68dfdcada3SDoug Rabson #define SVC_VERSQUIET 0x0001 /* keep quiet about vers mismatch */ 69a9148abdSDoug Rabson #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET) 70dfdcada3SDoug Rabson 71dfdcada3SDoug Rabson static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t, 72dfdcada3SDoug Rabson char *); 73a9148abdSDoug Rabson static void svc_new_thread(SVCPOOL *pool); 74a9148abdSDoug Rabson static void xprt_unregister_locked(SVCXPRT *xprt); 75dfdcada3SDoug Rabson 76dfdcada3SDoug Rabson /* *************** SVCXPRT related stuff **************** */ 77dfdcada3SDoug Rabson 78a9148abdSDoug Rabson static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS); 79a9148abdSDoug Rabson static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS); 80a9148abdSDoug Rabson 81dfdcada3SDoug Rabson SVCPOOL* 82a9148abdSDoug Rabson svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base) 83dfdcada3SDoug Rabson { 84dfdcada3SDoug Rabson SVCPOOL *pool; 85dfdcada3SDoug Rabson 86dfdcada3SDoug Rabson pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO); 87dfdcada3SDoug Rabson 88dfdcada3SDoug Rabson mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF); 89a9148abdSDoug Rabson pool->sp_name = name; 90a9148abdSDoug Rabson pool->sp_state = SVCPOOL_INIT; 91a9148abdSDoug Rabson pool->sp_proc = NULL; 92dfdcada3SDoug Rabson TAILQ_INIT(&pool->sp_xlist); 93dfdcada3SDoug Rabson TAILQ_INIT(&pool->sp_active); 94dfdcada3SDoug Rabson TAILQ_INIT(&pool->sp_callouts); 95a9148abdSDoug Rabson LIST_INIT(&pool->sp_threads); 96a9148abdSDoug Rabson LIST_INIT(&pool->sp_idlethreads); 97a9148abdSDoug Rabson pool->sp_minthreads = 1; 98a9148abdSDoug Rabson pool->sp_maxthreads = 1; 99a9148abdSDoug Rabson pool->sp_threadcount = 0; 100a9148abdSDoug Rabson 101a9148abdSDoug Rabson /* 102a9148abdSDoug Rabson * Don't use more than a quarter of mbuf clusters or more than 103a9148abdSDoug Rabson * 45Mb buffering requests. 104a9148abdSDoug Rabson */ 105a9148abdSDoug Rabson pool->sp_space_high = nmbclusters * MCLBYTES / 4; 106a9148abdSDoug Rabson if (pool->sp_space_high > 45 << 20) 107a9148abdSDoug Rabson pool->sp_space_high = 45 << 20; 108a9148abdSDoug Rabson pool->sp_space_low = 2 * pool->sp_space_high / 3; 109a9148abdSDoug Rabson 110a9148abdSDoug Rabson sysctl_ctx_init(&pool->sp_sysctl); 111a9148abdSDoug Rabson if (sysctl_base) { 112a9148abdSDoug Rabson SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, 113a9148abdSDoug Rabson "minthreads", CTLTYPE_INT | CTLFLAG_RW, 114a9148abdSDoug Rabson pool, 0, svcpool_minthread_sysctl, "I", ""); 115a9148abdSDoug Rabson SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, 116a9148abdSDoug Rabson "maxthreads", CTLTYPE_INT | CTLFLAG_RW, 117a9148abdSDoug Rabson pool, 0, svcpool_maxthread_sysctl, "I", ""); 118a9148abdSDoug Rabson SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 119a9148abdSDoug Rabson "threads", CTLFLAG_RD, &pool->sp_threadcount, 0, ""); 120a9148abdSDoug Rabson 121a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 122a9148abdSDoug Rabson "request_space_used", CTLFLAG_RD, 123a9148abdSDoug Rabson &pool->sp_space_used, 0, 124a9148abdSDoug Rabson "Space in parsed but not handled requests."); 125a9148abdSDoug Rabson 126a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 127a9148abdSDoug Rabson "request_space_used_highest", CTLFLAG_RD, 128a9148abdSDoug Rabson &pool->sp_space_used_highest, 0, 129a9148abdSDoug Rabson "Highest space used since reboot."); 130a9148abdSDoug Rabson 131a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 132a9148abdSDoug Rabson "request_space_high", CTLFLAG_RW, 133a9148abdSDoug Rabson &pool->sp_space_high, 0, 134a9148abdSDoug Rabson "Maximum space in parsed but not handled requests."); 135a9148abdSDoug Rabson 136a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 137a9148abdSDoug Rabson "request_space_low", CTLFLAG_RW, 138a9148abdSDoug Rabson &pool->sp_space_low, 0, 139a9148abdSDoug Rabson "Low water mark for request space."); 140a9148abdSDoug Rabson 141a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 142a9148abdSDoug Rabson "request_space_throttled", CTLFLAG_RD, 143a9148abdSDoug Rabson &pool->sp_space_throttled, 0, 144a9148abdSDoug Rabson "Whether nfs requests are currently throttled"); 145a9148abdSDoug Rabson 146a9148abdSDoug Rabson SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 147a9148abdSDoug Rabson "request_space_throttle_count", CTLFLAG_RD, 148a9148abdSDoug Rabson &pool->sp_space_throttle_count, 0, 149a9148abdSDoug Rabson "Count of times throttling based on request space has occurred"); 150a9148abdSDoug Rabson } 151dfdcada3SDoug Rabson 152dfdcada3SDoug Rabson return pool; 153dfdcada3SDoug Rabson } 154dfdcada3SDoug Rabson 155dfdcada3SDoug Rabson void 156dfdcada3SDoug Rabson svcpool_destroy(SVCPOOL *pool) 157dfdcada3SDoug Rabson { 158a9148abdSDoug Rabson SVCXPRT *xprt, *nxprt; 159dfdcada3SDoug Rabson struct svc_callout *s; 160a9148abdSDoug Rabson struct svcxprt_list cleanup; 161dfdcada3SDoug Rabson 162a9148abdSDoug Rabson TAILQ_INIT(&cleanup); 163dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 164dfdcada3SDoug Rabson 165dfdcada3SDoug Rabson while (TAILQ_FIRST(&pool->sp_xlist)) { 166dfdcada3SDoug Rabson xprt = TAILQ_FIRST(&pool->sp_xlist); 167a9148abdSDoug Rabson xprt_unregister_locked(xprt); 168a9148abdSDoug Rabson TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); 169dfdcada3SDoug Rabson } 170dfdcada3SDoug Rabson 171dfdcada3SDoug Rabson while (TAILQ_FIRST(&pool->sp_callouts)) { 172dfdcada3SDoug Rabson s = TAILQ_FIRST(&pool->sp_callouts); 173dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 174dfdcada3SDoug Rabson svc_unreg(pool, s->sc_prog, s->sc_vers); 175dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 176dfdcada3SDoug Rabson } 17775f2ae1aSRick Macklem mtx_unlock(&pool->sp_lock); 178dfdcada3SDoug Rabson 179a9148abdSDoug Rabson TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { 180a9148abdSDoug Rabson SVC_RELEASE(xprt); 181a9148abdSDoug Rabson } 182a9148abdSDoug Rabson 183a4fa5e6dSRick Macklem mtx_destroy(&pool->sp_lock); 184a4fa5e6dSRick Macklem 185a9148abdSDoug Rabson if (pool->sp_rcache) 186a9148abdSDoug Rabson replay_freecache(pool->sp_rcache); 187a9148abdSDoug Rabson 188a9148abdSDoug Rabson sysctl_ctx_free(&pool->sp_sysctl); 189dfdcada3SDoug Rabson free(pool, M_RPC); 190dfdcada3SDoug Rabson } 191dfdcada3SDoug Rabson 192a9148abdSDoug Rabson static bool_t 193a9148abdSDoug Rabson svcpool_active(SVCPOOL *pool) 194a9148abdSDoug Rabson { 195a9148abdSDoug Rabson enum svcpool_state state = pool->sp_state; 196a9148abdSDoug Rabson 197a9148abdSDoug Rabson if (state == SVCPOOL_INIT || state == SVCPOOL_CLOSING) 198a9148abdSDoug Rabson return (FALSE); 199a9148abdSDoug Rabson return (TRUE); 200a9148abdSDoug Rabson } 201a9148abdSDoug Rabson 202a9148abdSDoug Rabson /* 203a9148abdSDoug Rabson * Sysctl handler to set the minimum thread count on a pool 204a9148abdSDoug Rabson */ 205a9148abdSDoug Rabson static int 206a9148abdSDoug Rabson svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS) 207a9148abdSDoug Rabson { 208a9148abdSDoug Rabson SVCPOOL *pool; 209a9148abdSDoug Rabson int newminthreads, error, n; 210a9148abdSDoug Rabson 211a9148abdSDoug Rabson pool = oidp->oid_arg1; 212a9148abdSDoug Rabson newminthreads = pool->sp_minthreads; 213a9148abdSDoug Rabson error = sysctl_handle_int(oidp, &newminthreads, 0, req); 214a9148abdSDoug Rabson if (error == 0 && newminthreads != pool->sp_minthreads) { 215a9148abdSDoug Rabson if (newminthreads > pool->sp_maxthreads) 216a9148abdSDoug Rabson return (EINVAL); 217a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 218a9148abdSDoug Rabson if (newminthreads > pool->sp_minthreads 219a9148abdSDoug Rabson && svcpool_active(pool)) { 220a9148abdSDoug Rabson /* 221a9148abdSDoug Rabson * If the pool is running and we are 222a9148abdSDoug Rabson * increasing, create some more threads now. 223a9148abdSDoug Rabson */ 224a9148abdSDoug Rabson n = newminthreads - pool->sp_threadcount; 225a9148abdSDoug Rabson if (n > 0) { 226a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 227a9148abdSDoug Rabson while (n--) 228a9148abdSDoug Rabson svc_new_thread(pool); 229a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 230a9148abdSDoug Rabson } 231a9148abdSDoug Rabson } 232a9148abdSDoug Rabson pool->sp_minthreads = newminthreads; 233a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 234a9148abdSDoug Rabson } 235a9148abdSDoug Rabson return (error); 236a9148abdSDoug Rabson } 237a9148abdSDoug Rabson 238a9148abdSDoug Rabson /* 239a9148abdSDoug Rabson * Sysctl handler to set the maximum thread count on a pool 240a9148abdSDoug Rabson */ 241a9148abdSDoug Rabson static int 242a9148abdSDoug Rabson svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS) 243a9148abdSDoug Rabson { 244a9148abdSDoug Rabson SVCPOOL *pool; 245a9148abdSDoug Rabson SVCTHREAD *st; 246a9148abdSDoug Rabson int newmaxthreads, error; 247a9148abdSDoug Rabson 248a9148abdSDoug Rabson pool = oidp->oid_arg1; 249a9148abdSDoug Rabson newmaxthreads = pool->sp_maxthreads; 250a9148abdSDoug Rabson error = sysctl_handle_int(oidp, &newmaxthreads, 0, req); 251a9148abdSDoug Rabson if (error == 0 && newmaxthreads != pool->sp_maxthreads) { 252a9148abdSDoug Rabson if (newmaxthreads < pool->sp_minthreads) 253a9148abdSDoug Rabson return (EINVAL); 254a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 255a9148abdSDoug Rabson if (newmaxthreads < pool->sp_maxthreads 256a9148abdSDoug Rabson && svcpool_active(pool)) { 257a9148abdSDoug Rabson /* 258a9148abdSDoug Rabson * If the pool is running and we are 259a9148abdSDoug Rabson * decreasing, wake up some idle threads to 260a9148abdSDoug Rabson * encourage them to exit. 261a9148abdSDoug Rabson */ 262a9148abdSDoug Rabson LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink) 263a9148abdSDoug Rabson cv_signal(&st->st_cond); 264a9148abdSDoug Rabson } 265a9148abdSDoug Rabson pool->sp_maxthreads = newmaxthreads; 266a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 267a9148abdSDoug Rabson } 268a9148abdSDoug Rabson return (error); 269a9148abdSDoug Rabson } 270a9148abdSDoug Rabson 271dfdcada3SDoug Rabson /* 272dfdcada3SDoug Rabson * Activate a transport handle. 273dfdcada3SDoug Rabson */ 274dfdcada3SDoug Rabson void 275dfdcada3SDoug Rabson xprt_register(SVCXPRT *xprt) 276dfdcada3SDoug Rabson { 277dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 278dfdcada3SDoug Rabson 279dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 280dfdcada3SDoug Rabson xprt->xp_registered = TRUE; 281dfdcada3SDoug Rabson xprt->xp_active = FALSE; 282dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(&pool->sp_xlist, xprt, xp_link); 283dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 284dfdcada3SDoug Rabson } 285dfdcada3SDoug Rabson 286dfdcada3SDoug Rabson /* 287a9148abdSDoug Rabson * De-activate a transport handle. Note: the locked version doesn't 288a9148abdSDoug Rabson * release the transport - caller must do that after dropping the pool 289a9148abdSDoug Rabson * lock. 290dfdcada3SDoug Rabson */ 291dfdcada3SDoug Rabson static void 292a9148abdSDoug Rabson xprt_unregister_locked(SVCXPRT *xprt) 293dfdcada3SDoug Rabson { 294dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 295dfdcada3SDoug Rabson 296bca2ec16SRick Macklem KASSERT(xprt->xp_registered == TRUE, 297bca2ec16SRick Macklem ("xprt_unregister_locked: not registered")); 298dfdcada3SDoug Rabson if (xprt->xp_active) { 299dfdcada3SDoug Rabson TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink); 300dfdcada3SDoug Rabson xprt->xp_active = FALSE; 301dfdcada3SDoug Rabson } 302dfdcada3SDoug Rabson TAILQ_REMOVE(&pool->sp_xlist, xprt, xp_link); 303dfdcada3SDoug Rabson xprt->xp_registered = FALSE; 304a9148abdSDoug Rabson } 305dfdcada3SDoug Rabson 306a9148abdSDoug Rabson void 307a9148abdSDoug Rabson xprt_unregister(SVCXPRT *xprt) 308a9148abdSDoug Rabson { 309a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool; 310a9148abdSDoug Rabson 311a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 312bca2ec16SRick Macklem if (xprt->xp_registered == FALSE) { 313bca2ec16SRick Macklem /* Already unregistered by another thread */ 314bca2ec16SRick Macklem mtx_unlock(&pool->sp_lock); 315bca2ec16SRick Macklem return; 316bca2ec16SRick Macklem } 317a9148abdSDoug Rabson xprt_unregister_locked(xprt); 318dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 319a9148abdSDoug Rabson 320a9148abdSDoug Rabson SVC_RELEASE(xprt); 321a9148abdSDoug Rabson } 322a9148abdSDoug Rabson 323a9148abdSDoug Rabson static void 324a9148abdSDoug Rabson xprt_assignthread(SVCXPRT *xprt) 325a9148abdSDoug Rabson { 326a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool; 327a9148abdSDoug Rabson SVCTHREAD *st; 328a9148abdSDoug Rabson 329a9148abdSDoug Rabson /* 330a9148abdSDoug Rabson * Attempt to assign a service thread to this 331a9148abdSDoug Rabson * transport. 332a9148abdSDoug Rabson */ 333a9148abdSDoug Rabson LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink) { 334a9148abdSDoug Rabson if (st->st_xprt == NULL && STAILQ_EMPTY(&st->st_reqs)) 335a9148abdSDoug Rabson break; 336a9148abdSDoug Rabson } 337a9148abdSDoug Rabson if (st) { 338a9148abdSDoug Rabson SVC_ACQUIRE(xprt); 339a9148abdSDoug Rabson xprt->xp_thread = st; 340a9148abdSDoug Rabson st->st_xprt = xprt; 341a9148abdSDoug Rabson cv_signal(&st->st_cond); 342a9148abdSDoug Rabson } else { 343a9148abdSDoug Rabson /* 344a9148abdSDoug Rabson * See if we can create a new thread. The 345a9148abdSDoug Rabson * actual thread creation happens in 346a9148abdSDoug Rabson * svc_run_internal because our locking state 347a9148abdSDoug Rabson * is poorly defined (we are typically called 348a9148abdSDoug Rabson * from a socket upcall). Don't create more 349a9148abdSDoug Rabson * than one thread per second. 350a9148abdSDoug Rabson */ 351a9148abdSDoug Rabson if (pool->sp_state == SVCPOOL_ACTIVE 352a9148abdSDoug Rabson && pool->sp_lastcreatetime < time_uptime 353a9148abdSDoug Rabson && pool->sp_threadcount < pool->sp_maxthreads) { 354a9148abdSDoug Rabson pool->sp_state = SVCPOOL_THREADWANTED; 355a9148abdSDoug Rabson } 356a9148abdSDoug Rabson } 357dfdcada3SDoug Rabson } 358dfdcada3SDoug Rabson 359dfdcada3SDoug Rabson void 360dfdcada3SDoug Rabson xprt_active(SVCXPRT *xprt) 361dfdcada3SDoug Rabson { 362dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 363dfdcada3SDoug Rabson 364a4fa5e6dSRick Macklem mtx_lock(&pool->sp_lock); 365a4fa5e6dSRick Macklem 366a9148abdSDoug Rabson if (!xprt->xp_registered) { 367a9148abdSDoug Rabson /* 368a9148abdSDoug Rabson * Race with xprt_unregister - we lose. 369a9148abdSDoug Rabson */ 370a4fa5e6dSRick Macklem mtx_unlock(&pool->sp_lock); 371a9148abdSDoug Rabson return; 372a9148abdSDoug Rabson } 373a9148abdSDoug Rabson 374dfdcada3SDoug Rabson if (!xprt->xp_active) { 375dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(&pool->sp_active, xprt, xp_alink); 376dfdcada3SDoug Rabson xprt->xp_active = TRUE; 377a9148abdSDoug Rabson xprt_assignthread(xprt); 378dfdcada3SDoug Rabson } 379dfdcada3SDoug Rabson 380dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 381dfdcada3SDoug Rabson } 382dfdcada3SDoug Rabson 383dfdcada3SDoug Rabson void 384a9148abdSDoug Rabson xprt_inactive_locked(SVCXPRT *xprt) 385a9148abdSDoug Rabson { 386a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool; 387a9148abdSDoug Rabson 388a9148abdSDoug Rabson if (xprt->xp_active) { 389a9148abdSDoug Rabson TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink); 390a9148abdSDoug Rabson xprt->xp_active = FALSE; 391a9148abdSDoug Rabson } 392a9148abdSDoug Rabson } 393a9148abdSDoug Rabson 394a9148abdSDoug Rabson void 395dfdcada3SDoug Rabson xprt_inactive(SVCXPRT *xprt) 396dfdcada3SDoug Rabson { 397dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 398dfdcada3SDoug Rabson 399dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 400a9148abdSDoug Rabson xprt_inactive_locked(xprt); 401dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 402dfdcada3SDoug Rabson } 403dfdcada3SDoug Rabson 404dfdcada3SDoug Rabson /* 405dfdcada3SDoug Rabson * Add a service program to the callout list. 406dfdcada3SDoug Rabson * The dispatch routine will be called when a rpc request for this 407dfdcada3SDoug Rabson * program number comes in. 408dfdcada3SDoug Rabson */ 409dfdcada3SDoug Rabson bool_t 410dfdcada3SDoug Rabson svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers, 411dfdcada3SDoug Rabson void (*dispatch)(struct svc_req *, SVCXPRT *), 412dfdcada3SDoug Rabson const struct netconfig *nconf) 413dfdcada3SDoug Rabson { 414dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 415dfdcada3SDoug Rabson struct svc_callout *s; 416dfdcada3SDoug Rabson char *netid = NULL; 417dfdcada3SDoug Rabson int flag = 0; 418dfdcada3SDoug Rabson 419dfdcada3SDoug Rabson /* VARIABLES PROTECTED BY svc_lock: s, svc_head */ 420dfdcada3SDoug Rabson 421dfdcada3SDoug Rabson if (xprt->xp_netid) { 422dfdcada3SDoug Rabson netid = strdup(xprt->xp_netid, M_RPC); 423dfdcada3SDoug Rabson flag = 1; 424dfdcada3SDoug Rabson } else if (nconf && nconf->nc_netid) { 425dfdcada3SDoug Rabson netid = strdup(nconf->nc_netid, M_RPC); 426dfdcada3SDoug Rabson flag = 1; 427dfdcada3SDoug Rabson } /* must have been created with svc_raw_create */ 428dfdcada3SDoug Rabson if ((netid == NULL) && (flag == 1)) { 429dfdcada3SDoug Rabson return (FALSE); 430dfdcada3SDoug Rabson } 431dfdcada3SDoug Rabson 432dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 433dfdcada3SDoug Rabson if ((s = svc_find(pool, prog, vers, netid)) != NULL) { 434dfdcada3SDoug Rabson if (netid) 435dfdcada3SDoug Rabson free(netid, M_RPC); 436dfdcada3SDoug Rabson if (s->sc_dispatch == dispatch) 437dfdcada3SDoug Rabson goto rpcb_it; /* he is registering another xptr */ 438dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 439dfdcada3SDoug Rabson return (FALSE); 440dfdcada3SDoug Rabson } 441dfdcada3SDoug Rabson s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT); 442dfdcada3SDoug Rabson if (s == NULL) { 443dfdcada3SDoug Rabson if (netid) 444dfdcada3SDoug Rabson free(netid, M_RPC); 445dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 446dfdcada3SDoug Rabson return (FALSE); 447dfdcada3SDoug Rabson } 448dfdcada3SDoug Rabson 449dfdcada3SDoug Rabson s->sc_prog = prog; 450dfdcada3SDoug Rabson s->sc_vers = vers; 451dfdcada3SDoug Rabson s->sc_dispatch = dispatch; 452dfdcada3SDoug Rabson s->sc_netid = netid; 453dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link); 454dfdcada3SDoug Rabson 455dfdcada3SDoug Rabson if ((xprt->xp_netid == NULL) && (flag == 1) && netid) 456dfdcada3SDoug Rabson ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC); 457dfdcada3SDoug Rabson 458dfdcada3SDoug Rabson rpcb_it: 459dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 460dfdcada3SDoug Rabson /* now register the information with the local binder service */ 461dfdcada3SDoug Rabson if (nconf) { 462dfdcada3SDoug Rabson bool_t dummy; 463dfdcada3SDoug Rabson struct netconfig tnc; 464a9148abdSDoug Rabson struct netbuf nb; 465dfdcada3SDoug Rabson tnc = *nconf; 466a9148abdSDoug Rabson nb.buf = &xprt->xp_ltaddr; 467a9148abdSDoug Rabson nb.len = xprt->xp_ltaddr.ss_len; 468a9148abdSDoug Rabson dummy = rpcb_set(prog, vers, &tnc, &nb); 469dfdcada3SDoug Rabson return (dummy); 470dfdcada3SDoug Rabson } 471dfdcada3SDoug Rabson return (TRUE); 472dfdcada3SDoug Rabson } 473dfdcada3SDoug Rabson 474dfdcada3SDoug Rabson /* 475dfdcada3SDoug Rabson * Remove a service program from the callout list. 476dfdcada3SDoug Rabson */ 477dfdcada3SDoug Rabson void 478dfdcada3SDoug Rabson svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers) 479dfdcada3SDoug Rabson { 480dfdcada3SDoug Rabson struct svc_callout *s; 481dfdcada3SDoug Rabson 482dfdcada3SDoug Rabson /* unregister the information anyway */ 483dfdcada3SDoug Rabson (void) rpcb_unset(prog, vers, NULL); 484dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 485dfdcada3SDoug Rabson while ((s = svc_find(pool, prog, vers, NULL)) != NULL) { 486dfdcada3SDoug Rabson TAILQ_REMOVE(&pool->sp_callouts, s, sc_link); 487dfdcada3SDoug Rabson if (s->sc_netid) 488dfdcada3SDoug Rabson mem_free(s->sc_netid, sizeof (s->sc_netid) + 1); 489dfdcada3SDoug Rabson mem_free(s, sizeof (struct svc_callout)); 490dfdcada3SDoug Rabson } 491dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 492dfdcada3SDoug Rabson } 493dfdcada3SDoug Rabson 494dfdcada3SDoug Rabson /* ********************** CALLOUT list related stuff ************* */ 495dfdcada3SDoug Rabson 496dfdcada3SDoug Rabson /* 497dfdcada3SDoug Rabson * Search the callout list for a program number, return the callout 498dfdcada3SDoug Rabson * struct. 499dfdcada3SDoug Rabson */ 500dfdcada3SDoug Rabson static struct svc_callout * 501dfdcada3SDoug Rabson svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid) 502dfdcada3SDoug Rabson { 503dfdcada3SDoug Rabson struct svc_callout *s; 504dfdcada3SDoug Rabson 505dfdcada3SDoug Rabson mtx_assert(&pool->sp_lock, MA_OWNED); 506dfdcada3SDoug Rabson TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { 507dfdcada3SDoug Rabson if (s->sc_prog == prog && s->sc_vers == vers 508dfdcada3SDoug Rabson && (netid == NULL || s->sc_netid == NULL || 509dfdcada3SDoug Rabson strcmp(netid, s->sc_netid) == 0)) 510dfdcada3SDoug Rabson break; 511dfdcada3SDoug Rabson } 512dfdcada3SDoug Rabson 513dfdcada3SDoug Rabson return (s); 514dfdcada3SDoug Rabson } 515dfdcada3SDoug Rabson 516dfdcada3SDoug Rabson /* ******************* REPLY GENERATION ROUTINES ************ */ 517dfdcada3SDoug Rabson 518a9148abdSDoug Rabson static bool_t 519a9148abdSDoug Rabson svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply, 520a9148abdSDoug Rabson struct mbuf *body) 521a9148abdSDoug Rabson { 522a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 523a9148abdSDoug Rabson bool_t ok; 524a9148abdSDoug Rabson 525a9148abdSDoug Rabson if (rqstp->rq_args) { 526a9148abdSDoug Rabson m_freem(rqstp->rq_args); 527a9148abdSDoug Rabson rqstp->rq_args = NULL; 528a9148abdSDoug Rabson } 529a9148abdSDoug Rabson 530a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 531a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 532a9148abdSDoug Rabson rply, svc_getrpccaller(rqstp), body); 533a9148abdSDoug Rabson 534a9148abdSDoug Rabson if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body)) 535a9148abdSDoug Rabson return (FALSE); 536a9148abdSDoug Rabson 537a9148abdSDoug Rabson ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body); 538a9148abdSDoug Rabson if (rqstp->rq_addr) { 539a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME); 540a9148abdSDoug Rabson rqstp->rq_addr = NULL; 541a9148abdSDoug Rabson } 542a9148abdSDoug Rabson 543a9148abdSDoug Rabson return (ok); 544a9148abdSDoug Rabson } 545a9148abdSDoug Rabson 546dfdcada3SDoug Rabson /* 547dfdcada3SDoug Rabson * Send a reply to an rpc request 548dfdcada3SDoug Rabson */ 549dfdcada3SDoug Rabson bool_t 550a9148abdSDoug Rabson svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location) 551a9148abdSDoug Rabson { 552a9148abdSDoug Rabson struct rpc_msg rply; 553a9148abdSDoug Rabson struct mbuf *m; 554a9148abdSDoug Rabson XDR xdrs; 555a9148abdSDoug Rabson bool_t ok; 556a9148abdSDoug Rabson 557a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 558a9148abdSDoug Rabson rply.rm_direction = REPLY; 559a9148abdSDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 560a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 561a9148abdSDoug Rabson rply.acpted_rply.ar_stat = SUCCESS; 562a9148abdSDoug Rabson rply.acpted_rply.ar_results.where = NULL; 563a9148abdSDoug Rabson rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; 564a9148abdSDoug Rabson 565a9148abdSDoug Rabson MGET(m, M_WAIT, MT_DATA); 566a9148abdSDoug Rabson MCLGET(m, M_WAIT); 567a9148abdSDoug Rabson m->m_len = 0; 568a9148abdSDoug Rabson xdrmbuf_create(&xdrs, m, XDR_ENCODE); 569a9148abdSDoug Rabson ok = xdr_results(&xdrs, xdr_location); 570a9148abdSDoug Rabson XDR_DESTROY(&xdrs); 571a9148abdSDoug Rabson 572a9148abdSDoug Rabson if (ok) { 573a9148abdSDoug Rabson return (svc_sendreply_common(rqstp, &rply, m)); 574a9148abdSDoug Rabson } else { 575a9148abdSDoug Rabson m_freem(m); 576a9148abdSDoug Rabson return (FALSE); 577a9148abdSDoug Rabson } 578a9148abdSDoug Rabson } 579a9148abdSDoug Rabson 580a9148abdSDoug Rabson bool_t 581a9148abdSDoug Rabson svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m) 582dfdcada3SDoug Rabson { 583dfdcada3SDoug Rabson struct rpc_msg rply; 584dfdcada3SDoug Rabson 585a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 586dfdcada3SDoug Rabson rply.rm_direction = REPLY; 587dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 588a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 589dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = SUCCESS; 590a9148abdSDoug Rabson rply.acpted_rply.ar_results.where = NULL; 591a9148abdSDoug Rabson rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; 592dfdcada3SDoug Rabson 593a9148abdSDoug Rabson return (svc_sendreply_common(rqstp, &rply, m)); 594dfdcada3SDoug Rabson } 595dfdcada3SDoug Rabson 596dfdcada3SDoug Rabson /* 597dfdcada3SDoug Rabson * No procedure error reply 598dfdcada3SDoug Rabson */ 599dfdcada3SDoug Rabson void 600a9148abdSDoug Rabson svcerr_noproc(struct svc_req *rqstp) 601dfdcada3SDoug Rabson { 602a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 603dfdcada3SDoug Rabson struct rpc_msg rply; 604dfdcada3SDoug Rabson 605a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 606dfdcada3SDoug Rabson rply.rm_direction = REPLY; 607dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 608a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 609dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROC_UNAVAIL; 610dfdcada3SDoug Rabson 611a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 612a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 613a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 614a9148abdSDoug Rabson 615a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 616dfdcada3SDoug Rabson } 617dfdcada3SDoug Rabson 618dfdcada3SDoug Rabson /* 619dfdcada3SDoug Rabson * Can't decode args error reply 620dfdcada3SDoug Rabson */ 621dfdcada3SDoug Rabson void 622a9148abdSDoug Rabson svcerr_decode(struct svc_req *rqstp) 623dfdcada3SDoug Rabson { 624a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 625dfdcada3SDoug Rabson struct rpc_msg rply; 626dfdcada3SDoug Rabson 627a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 628dfdcada3SDoug Rabson rply.rm_direction = REPLY; 629dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 630a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 631dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = GARBAGE_ARGS; 632dfdcada3SDoug Rabson 633a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 634a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 635a9148abdSDoug Rabson &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL); 636a9148abdSDoug Rabson 637a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 638dfdcada3SDoug Rabson } 639dfdcada3SDoug Rabson 640dfdcada3SDoug Rabson /* 641dfdcada3SDoug Rabson * Some system error 642dfdcada3SDoug Rabson */ 643dfdcada3SDoug Rabson void 644a9148abdSDoug Rabson svcerr_systemerr(struct svc_req *rqstp) 645dfdcada3SDoug Rabson { 646a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 647dfdcada3SDoug Rabson struct rpc_msg rply; 648dfdcada3SDoug Rabson 649a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 650dfdcada3SDoug Rabson rply.rm_direction = REPLY; 651dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 652a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 653dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = SYSTEM_ERR; 654dfdcada3SDoug Rabson 655a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 656a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 657a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 658a9148abdSDoug Rabson 659a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 660dfdcada3SDoug Rabson } 661dfdcada3SDoug Rabson 662dfdcada3SDoug Rabson /* 663dfdcada3SDoug Rabson * Authentication error reply 664dfdcada3SDoug Rabson */ 665dfdcada3SDoug Rabson void 666a9148abdSDoug Rabson svcerr_auth(struct svc_req *rqstp, enum auth_stat why) 667dfdcada3SDoug Rabson { 668a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 669dfdcada3SDoug Rabson struct rpc_msg rply; 670dfdcada3SDoug Rabson 671a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 672dfdcada3SDoug Rabson rply.rm_direction = REPLY; 673dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_DENIED; 674dfdcada3SDoug Rabson rply.rjcted_rply.rj_stat = AUTH_ERROR; 675dfdcada3SDoug Rabson rply.rjcted_rply.rj_why = why; 676dfdcada3SDoug Rabson 677a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 678a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 679a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 680a9148abdSDoug Rabson 681a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 682dfdcada3SDoug Rabson } 683dfdcada3SDoug Rabson 684dfdcada3SDoug Rabson /* 685dfdcada3SDoug Rabson * Auth too weak error reply 686dfdcada3SDoug Rabson */ 687dfdcada3SDoug Rabson void 688a9148abdSDoug Rabson svcerr_weakauth(struct svc_req *rqstp) 689dfdcada3SDoug Rabson { 690dfdcada3SDoug Rabson 691a9148abdSDoug Rabson svcerr_auth(rqstp, AUTH_TOOWEAK); 692dfdcada3SDoug Rabson } 693dfdcada3SDoug Rabson 694dfdcada3SDoug Rabson /* 695dfdcada3SDoug Rabson * Program unavailable error reply 696dfdcada3SDoug Rabson */ 697dfdcada3SDoug Rabson void 698a9148abdSDoug Rabson svcerr_noprog(struct svc_req *rqstp) 699dfdcada3SDoug Rabson { 700a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 701dfdcada3SDoug Rabson struct rpc_msg rply; 702dfdcada3SDoug Rabson 703a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 704dfdcada3SDoug Rabson rply.rm_direction = REPLY; 705dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 706a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 707dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROG_UNAVAIL; 708dfdcada3SDoug Rabson 709a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 710a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 711a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 712a9148abdSDoug Rabson 713a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 714dfdcada3SDoug Rabson } 715dfdcada3SDoug Rabson 716dfdcada3SDoug Rabson /* 717dfdcada3SDoug Rabson * Program version mismatch error reply 718dfdcada3SDoug Rabson */ 719dfdcada3SDoug Rabson void 720a9148abdSDoug Rabson svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers) 721dfdcada3SDoug Rabson { 722a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 723dfdcada3SDoug Rabson struct rpc_msg rply; 724dfdcada3SDoug Rabson 725a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 726dfdcada3SDoug Rabson rply.rm_direction = REPLY; 727dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 728a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 729dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROG_MISMATCH; 730dfdcada3SDoug Rabson rply.acpted_rply.ar_vers.low = (uint32_t)low_vers; 731dfdcada3SDoug Rabson rply.acpted_rply.ar_vers.high = (uint32_t)high_vers; 732dfdcada3SDoug Rabson 733a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 734a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 735a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 736a9148abdSDoug Rabson 737a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 738a9148abdSDoug Rabson } 739a9148abdSDoug Rabson 740a9148abdSDoug Rabson /* 741a9148abdSDoug Rabson * Allocate a new server transport structure. All fields are 742a9148abdSDoug Rabson * initialized to zero and xp_p3 is initialized to point at an 743a9148abdSDoug Rabson * extension structure to hold various flags and authentication 744a9148abdSDoug Rabson * parameters. 745a9148abdSDoug Rabson */ 746a9148abdSDoug Rabson SVCXPRT * 747a9148abdSDoug Rabson svc_xprt_alloc() 748a9148abdSDoug Rabson { 749a9148abdSDoug Rabson SVCXPRT *xprt; 750a9148abdSDoug Rabson SVCXPRT_EXT *ext; 751a9148abdSDoug Rabson 752a9148abdSDoug Rabson xprt = mem_alloc(sizeof(SVCXPRT)); 753a9148abdSDoug Rabson memset(xprt, 0, sizeof(SVCXPRT)); 754a9148abdSDoug Rabson ext = mem_alloc(sizeof(SVCXPRT_EXT)); 755a9148abdSDoug Rabson memset(ext, 0, sizeof(SVCXPRT_EXT)); 756a9148abdSDoug Rabson xprt->xp_p3 = ext; 757a9148abdSDoug Rabson refcount_init(&xprt->xp_refs, 1); 758a9148abdSDoug Rabson 759a9148abdSDoug Rabson return (xprt); 760a9148abdSDoug Rabson } 761a9148abdSDoug Rabson 762a9148abdSDoug Rabson /* 763a9148abdSDoug Rabson * Free a server transport structure. 764a9148abdSDoug Rabson */ 765a9148abdSDoug Rabson void 766a9148abdSDoug Rabson svc_xprt_free(xprt) 767a9148abdSDoug Rabson SVCXPRT *xprt; 768a9148abdSDoug Rabson { 769a9148abdSDoug Rabson 770a9148abdSDoug Rabson mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT)); 771a9148abdSDoug Rabson mem_free(xprt, sizeof(SVCXPRT)); 772dfdcada3SDoug Rabson } 773dfdcada3SDoug Rabson 774dfdcada3SDoug Rabson /* ******************* SERVER INPUT STUFF ******************* */ 775dfdcada3SDoug Rabson 776dfdcada3SDoug Rabson /* 777a9148abdSDoug Rabson * Read RPC requests from a transport and queue them to be 778a9148abdSDoug Rabson * executed. We handle authentication and replay cache replies here. 779a9148abdSDoug Rabson * Actually dispatching the RPC is deferred till svc_executereq. 780dfdcada3SDoug Rabson */ 781a9148abdSDoug Rabson static enum xprt_stat 782a9148abdSDoug Rabson svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret) 783dfdcada3SDoug Rabson { 784dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 785a9148abdSDoug Rabson struct svc_req *r; 786dfdcada3SDoug Rabson struct rpc_msg msg; 787a9148abdSDoug Rabson struct mbuf *args; 788a9148abdSDoug Rabson enum xprt_stat stat; 789a9148abdSDoug Rabson 790a9148abdSDoug Rabson /* now receive msgs from xprtprt (support batch calls) */ 791a9148abdSDoug Rabson r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO); 792a9148abdSDoug Rabson 793a9148abdSDoug Rabson msg.rm_call.cb_cred.oa_base = r->rq_credarea; 794a9148abdSDoug Rabson msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES]; 795a9148abdSDoug Rabson r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES]; 796a9148abdSDoug Rabson if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) { 797a9148abdSDoug Rabson enum auth_stat why; 798a9148abdSDoug Rabson 799a9148abdSDoug Rabson /* 800a9148abdSDoug Rabson * Handle replays and authenticate before queuing the 801a9148abdSDoug Rabson * request to be executed. 802a9148abdSDoug Rabson */ 803a9148abdSDoug Rabson SVC_ACQUIRE(xprt); 804a9148abdSDoug Rabson r->rq_xprt = xprt; 805a9148abdSDoug Rabson if (pool->sp_rcache) { 806a9148abdSDoug Rabson struct rpc_msg repmsg; 807a9148abdSDoug Rabson struct mbuf *repbody; 808a9148abdSDoug Rabson enum replay_state rs; 809a9148abdSDoug Rabson rs = replay_find(pool->sp_rcache, &msg, 810a9148abdSDoug Rabson svc_getrpccaller(r), &repmsg, &repbody); 811a9148abdSDoug Rabson switch (rs) { 812a9148abdSDoug Rabson case RS_NEW: 813a9148abdSDoug Rabson break; 814a9148abdSDoug Rabson case RS_DONE: 815a9148abdSDoug Rabson SVC_REPLY(xprt, &repmsg, r->rq_addr, 816a9148abdSDoug Rabson repbody); 817a9148abdSDoug Rabson if (r->rq_addr) { 818a9148abdSDoug Rabson free(r->rq_addr, M_SONAME); 819a9148abdSDoug Rabson r->rq_addr = NULL; 820a9148abdSDoug Rabson } 821a9148abdSDoug Rabson goto call_done; 822a9148abdSDoug Rabson 823a9148abdSDoug Rabson default: 824a9148abdSDoug Rabson goto call_done; 825a9148abdSDoug Rabson } 826a9148abdSDoug Rabson } 827a9148abdSDoug Rabson 828a9148abdSDoug Rabson r->rq_xid = msg.rm_xid; 829a9148abdSDoug Rabson r->rq_prog = msg.rm_call.cb_prog; 830a9148abdSDoug Rabson r->rq_vers = msg.rm_call.cb_vers; 831a9148abdSDoug Rabson r->rq_proc = msg.rm_call.cb_proc; 832a9148abdSDoug Rabson r->rq_size = sizeof(*r) + m_length(args, NULL); 833a9148abdSDoug Rabson r->rq_args = args; 834a9148abdSDoug Rabson if ((why = _authenticate(r, &msg)) != AUTH_OK) { 835a9148abdSDoug Rabson /* 836a9148abdSDoug Rabson * RPCSEC_GSS uses this return code 837a9148abdSDoug Rabson * for requests that form part of its 838a9148abdSDoug Rabson * context establishment protocol and 839a9148abdSDoug Rabson * should not be dispatched to the 840a9148abdSDoug Rabson * application. 841a9148abdSDoug Rabson */ 842a9148abdSDoug Rabson if (why != RPCSEC_GSS_NODISPATCH) 843a9148abdSDoug Rabson svcerr_auth(r, why); 844a9148abdSDoug Rabson goto call_done; 845a9148abdSDoug Rabson } 846a9148abdSDoug Rabson 847a9148abdSDoug Rabson if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) { 848a9148abdSDoug Rabson svcerr_decode(r); 849a9148abdSDoug Rabson goto call_done; 850a9148abdSDoug Rabson } 851a9148abdSDoug Rabson 852a9148abdSDoug Rabson /* 853a9148abdSDoug Rabson * Everything checks out, return request to caller. 854a9148abdSDoug Rabson */ 855a9148abdSDoug Rabson *rqstp_ret = r; 856a9148abdSDoug Rabson r = NULL; 857a9148abdSDoug Rabson } 858a9148abdSDoug Rabson call_done: 859a9148abdSDoug Rabson if (r) { 860a9148abdSDoug Rabson svc_freereq(r); 861a9148abdSDoug Rabson r = NULL; 862a9148abdSDoug Rabson } 863a9148abdSDoug Rabson if ((stat = SVC_STAT(xprt)) == XPRT_DIED) { 864a9148abdSDoug Rabson xprt_unregister(xprt); 865a9148abdSDoug Rabson } 866a9148abdSDoug Rabson 867a9148abdSDoug Rabson return (stat); 868a9148abdSDoug Rabson } 869a9148abdSDoug Rabson 870a9148abdSDoug Rabson static void 871a9148abdSDoug Rabson svc_executereq(struct svc_req *rqstp) 872a9148abdSDoug Rabson { 873a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 874a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool; 875dfdcada3SDoug Rabson int prog_found; 876dfdcada3SDoug Rabson rpcvers_t low_vers; 877dfdcada3SDoug Rabson rpcvers_t high_vers; 878dfdcada3SDoug Rabson struct svc_callout *s; 879dfdcada3SDoug Rabson 880dfdcada3SDoug Rabson /* now match message with a registered service*/ 881dfdcada3SDoug Rabson prog_found = FALSE; 882dfdcada3SDoug Rabson low_vers = (rpcvers_t) -1L; 883dfdcada3SDoug Rabson high_vers = (rpcvers_t) 0L; 884dfdcada3SDoug Rabson TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { 885a9148abdSDoug Rabson if (s->sc_prog == rqstp->rq_prog) { 886a9148abdSDoug Rabson if (s->sc_vers == rqstp->rq_vers) { 887a9148abdSDoug Rabson /* 888a9148abdSDoug Rabson * We hand ownership of r to the 889a9148abdSDoug Rabson * dispatch method - they must call 890a9148abdSDoug Rabson * svc_freereq. 891a9148abdSDoug Rabson */ 892a9148abdSDoug Rabson (*s->sc_dispatch)(rqstp, xprt); 893a9148abdSDoug Rabson return; 894dfdcada3SDoug Rabson } /* found correct version */ 895dfdcada3SDoug Rabson prog_found = TRUE; 896dfdcada3SDoug Rabson if (s->sc_vers < low_vers) 897dfdcada3SDoug Rabson low_vers = s->sc_vers; 898dfdcada3SDoug Rabson if (s->sc_vers > high_vers) 899dfdcada3SDoug Rabson high_vers = s->sc_vers; 900dfdcada3SDoug Rabson } /* found correct program */ 901dfdcada3SDoug Rabson } 902a9148abdSDoug Rabson 903dfdcada3SDoug Rabson /* 904dfdcada3SDoug Rabson * if we got here, the program or version 905dfdcada3SDoug Rabson * is not served ... 906dfdcada3SDoug Rabson */ 907dfdcada3SDoug Rabson if (prog_found) 908a9148abdSDoug Rabson svcerr_progvers(rqstp, low_vers, high_vers); 909dfdcada3SDoug Rabson else 910a9148abdSDoug Rabson svcerr_noprog(rqstp); 911a9148abdSDoug Rabson 912a9148abdSDoug Rabson svc_freereq(rqstp); 913dfdcada3SDoug Rabson } 914a9148abdSDoug Rabson 915a9148abdSDoug Rabson static void 916a9148abdSDoug Rabson svc_checkidle(SVCPOOL *pool) 917a9148abdSDoug Rabson { 918a9148abdSDoug Rabson SVCXPRT *xprt, *nxprt; 919a9148abdSDoug Rabson time_t timo; 920a9148abdSDoug Rabson struct svcxprt_list cleanup; 921a9148abdSDoug Rabson 922a9148abdSDoug Rabson TAILQ_INIT(&cleanup); 923a9148abdSDoug Rabson TAILQ_FOREACH_SAFE(xprt, &pool->sp_xlist, xp_link, nxprt) { 924dfdcada3SDoug Rabson /* 925a9148abdSDoug Rabson * Only some transports have idle timers. Don't time 926a9148abdSDoug Rabson * something out which is just waking up. 927dfdcada3SDoug Rabson */ 928a9148abdSDoug Rabson if (!xprt->xp_idletimeout || xprt->xp_thread) 929a9148abdSDoug Rabson continue; 930a9148abdSDoug Rabson 931a9148abdSDoug Rabson timo = xprt->xp_lastactive + xprt->xp_idletimeout; 932a9148abdSDoug Rabson if (time_uptime > timo) { 933a9148abdSDoug Rabson xprt_unregister_locked(xprt); 934a9148abdSDoug Rabson TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); 935a9148abdSDoug Rabson } 936a9148abdSDoug Rabson } 937a9148abdSDoug Rabson 938a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 939a9148abdSDoug Rabson TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { 940a9148abdSDoug Rabson SVC_RELEASE(xprt); 941a9148abdSDoug Rabson } 942dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 943a9148abdSDoug Rabson 944a9148abdSDoug Rabson } 945a9148abdSDoug Rabson 946a9148abdSDoug Rabson static void 947a9148abdSDoug Rabson svc_assign_waiting_sockets(SVCPOOL *pool) 948a9148abdSDoug Rabson { 949a9148abdSDoug Rabson SVCXPRT *xprt; 950a9148abdSDoug Rabson 951a9148abdSDoug Rabson TAILQ_FOREACH(xprt, &pool->sp_active, xp_alink) { 952a9148abdSDoug Rabson if (!xprt->xp_thread) { 953a9148abdSDoug Rabson xprt_assignthread(xprt); 954a9148abdSDoug Rabson } 955a9148abdSDoug Rabson } 956a9148abdSDoug Rabson } 957a9148abdSDoug Rabson 958a9148abdSDoug Rabson static bool_t 959a9148abdSDoug Rabson svc_request_space_available(SVCPOOL *pool) 960a9148abdSDoug Rabson { 961a9148abdSDoug Rabson 962a9148abdSDoug Rabson mtx_assert(&pool->sp_lock, MA_OWNED); 963a9148abdSDoug Rabson 964a9148abdSDoug Rabson if (pool->sp_space_throttled) { 965a9148abdSDoug Rabson /* 966a9148abdSDoug Rabson * Below the low-water yet? If so, assign any waiting sockets. 967a9148abdSDoug Rabson */ 968a9148abdSDoug Rabson if (pool->sp_space_used < pool->sp_space_low) { 969a9148abdSDoug Rabson pool->sp_space_throttled = FALSE; 970a9148abdSDoug Rabson svc_assign_waiting_sockets(pool); 971a9148abdSDoug Rabson return TRUE; 972a9148abdSDoug Rabson } 973a9148abdSDoug Rabson 974a9148abdSDoug Rabson return FALSE; 975a9148abdSDoug Rabson } else { 976a9148abdSDoug Rabson if (pool->sp_space_used 977a9148abdSDoug Rabson >= pool->sp_space_high) { 978a9148abdSDoug Rabson pool->sp_space_throttled = TRUE; 979a9148abdSDoug Rabson pool->sp_space_throttle_count++; 980a9148abdSDoug Rabson return FALSE; 981a9148abdSDoug Rabson } 982a9148abdSDoug Rabson 983a9148abdSDoug Rabson return TRUE; 984a9148abdSDoug Rabson } 985a9148abdSDoug Rabson } 986a9148abdSDoug Rabson 987a9148abdSDoug Rabson static void 988a9148abdSDoug Rabson svc_run_internal(SVCPOOL *pool, bool_t ismaster) 989a9148abdSDoug Rabson { 990a9148abdSDoug Rabson SVCTHREAD *st, *stpref; 991a9148abdSDoug Rabson SVCXPRT *xprt; 992a9148abdSDoug Rabson enum xprt_stat stat; 993a9148abdSDoug Rabson struct svc_req *rqstp; 994a9148abdSDoug Rabson int error; 995a9148abdSDoug Rabson 996a9148abdSDoug Rabson st = mem_alloc(sizeof(*st)); 997a9148abdSDoug Rabson st->st_xprt = NULL; 998a9148abdSDoug Rabson STAILQ_INIT(&st->st_reqs); 999a9148abdSDoug Rabson cv_init(&st->st_cond, "rpcsvc"); 1000a9148abdSDoug Rabson 1001a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 1002a9148abdSDoug Rabson LIST_INSERT_HEAD(&pool->sp_threads, st, st_link); 1003a9148abdSDoug Rabson 1004a9148abdSDoug Rabson /* 1005a9148abdSDoug Rabson * If we are a new thread which was spawned to cope with 1006a9148abdSDoug Rabson * increased load, set the state back to SVCPOOL_ACTIVE. 1007a9148abdSDoug Rabson */ 1008a9148abdSDoug Rabson if (pool->sp_state == SVCPOOL_THREADSTARTING) 1009a9148abdSDoug Rabson pool->sp_state = SVCPOOL_ACTIVE; 1010a9148abdSDoug Rabson 1011a9148abdSDoug Rabson while (pool->sp_state != SVCPOOL_CLOSING) { 1012a9148abdSDoug Rabson /* 1013a9148abdSDoug Rabson * Check for idle transports once per second. 1014a9148abdSDoug Rabson */ 1015a9148abdSDoug Rabson if (time_uptime > pool->sp_lastidlecheck) { 1016a9148abdSDoug Rabson pool->sp_lastidlecheck = time_uptime; 1017a9148abdSDoug Rabson svc_checkidle(pool); 1018a9148abdSDoug Rabson } 1019a9148abdSDoug Rabson 1020a9148abdSDoug Rabson xprt = st->st_xprt; 1021a9148abdSDoug Rabson if (!xprt && STAILQ_EMPTY(&st->st_reqs)) { 1022a9148abdSDoug Rabson /* 1023a9148abdSDoug Rabson * Enforce maxthreads count. 1024a9148abdSDoug Rabson */ 1025a9148abdSDoug Rabson if (pool->sp_threadcount > pool->sp_maxthreads) 1026a9148abdSDoug Rabson break; 1027a9148abdSDoug Rabson 1028a9148abdSDoug Rabson /* 1029a9148abdSDoug Rabson * Before sleeping, see if we can find an 1030a9148abdSDoug Rabson * active transport which isn't being serviced 1031a9148abdSDoug Rabson * by a thread. 1032a9148abdSDoug Rabson */ 1033a9148abdSDoug Rabson if (svc_request_space_available(pool)) { 1034a9148abdSDoug Rabson TAILQ_FOREACH(xprt, &pool->sp_active, 1035a9148abdSDoug Rabson xp_alink) { 1036a9148abdSDoug Rabson if (!xprt->xp_thread) { 1037a9148abdSDoug Rabson SVC_ACQUIRE(xprt); 1038a9148abdSDoug Rabson xprt->xp_thread = st; 1039a9148abdSDoug Rabson st->st_xprt = xprt; 1040dfdcada3SDoug Rabson break; 1041dfdcada3SDoug Rabson } 1042a9148abdSDoug Rabson } 1043a9148abdSDoug Rabson } 1044a9148abdSDoug Rabson if (st->st_xprt) 1045a9148abdSDoug Rabson continue; 1046a9148abdSDoug Rabson 1047a9148abdSDoug Rabson LIST_INSERT_HEAD(&pool->sp_idlethreads, st, st_ilink); 1048a9148abdSDoug Rabson error = cv_timedwait_sig(&st->st_cond, &pool->sp_lock, 1049a9148abdSDoug Rabson 5 * hz); 1050a9148abdSDoug Rabson LIST_REMOVE(st, st_ilink); 1051a9148abdSDoug Rabson 1052a9148abdSDoug Rabson /* 1053a9148abdSDoug Rabson * Reduce worker thread count when idle. 1054a9148abdSDoug Rabson */ 1055a9148abdSDoug Rabson if (error == EWOULDBLOCK) { 1056a9148abdSDoug Rabson if (!ismaster 1057a9148abdSDoug Rabson && (pool->sp_threadcount 1058a9148abdSDoug Rabson > pool->sp_minthreads) 1059a9148abdSDoug Rabson && !st->st_xprt 1060a9148abdSDoug Rabson && STAILQ_EMPTY(&st->st_reqs)) 1061dfdcada3SDoug Rabson break; 1062dfdcada3SDoug Rabson } 1063a9148abdSDoug Rabson if (error == EWOULDBLOCK) 1064a9148abdSDoug Rabson continue; 1065a9148abdSDoug Rabson if (error) { 1066a9148abdSDoug Rabson if (pool->sp_state != SVCPOOL_CLOSING) { 1067a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1068a9148abdSDoug Rabson svc_exit(pool); 1069a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 1070a9148abdSDoug Rabson } 1071a9148abdSDoug Rabson break; 1072a9148abdSDoug Rabson } 1073a9148abdSDoug Rabson 1074a9148abdSDoug Rabson if (pool->sp_state == SVCPOOL_THREADWANTED) { 1075a9148abdSDoug Rabson pool->sp_state = SVCPOOL_THREADSTARTING; 1076a9148abdSDoug Rabson pool->sp_lastcreatetime = time_uptime; 1077a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1078a9148abdSDoug Rabson svc_new_thread(pool); 1079a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 1080a9148abdSDoug Rabson } 1081a9148abdSDoug Rabson continue; 1082a9148abdSDoug Rabson } 1083a9148abdSDoug Rabson 1084a9148abdSDoug Rabson if (xprt) { 1085a9148abdSDoug Rabson /* 1086a9148abdSDoug Rabson * Drain the transport socket and queue up any 1087a9148abdSDoug Rabson * RPCs. 1088a9148abdSDoug Rabson */ 1089a9148abdSDoug Rabson xprt->xp_lastactive = time_uptime; 1090a9148abdSDoug Rabson stat = XPRT_IDLE; 1091a9148abdSDoug Rabson do { 1092a9148abdSDoug Rabson if (!svc_request_space_available(pool)) 1093a9148abdSDoug Rabson break; 1094a9148abdSDoug Rabson rqstp = NULL; 1095a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1096a9148abdSDoug Rabson stat = svc_getreq(xprt, &rqstp); 1097a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 1098a9148abdSDoug Rabson if (rqstp) { 1099a9148abdSDoug Rabson /* 1100a9148abdSDoug Rabson * See if the application has 1101a9148abdSDoug Rabson * a preference for some other 1102a9148abdSDoug Rabson * thread. 1103a9148abdSDoug Rabson */ 1104a9148abdSDoug Rabson stpref = st; 1105a9148abdSDoug Rabson if (pool->sp_assign) 1106a9148abdSDoug Rabson stpref = pool->sp_assign(st, 1107a9148abdSDoug Rabson rqstp); 1108a9148abdSDoug Rabson 1109a9148abdSDoug Rabson pool->sp_space_used += 1110a9148abdSDoug Rabson rqstp->rq_size; 1111a9148abdSDoug Rabson if (pool->sp_space_used 1112a9148abdSDoug Rabson > pool->sp_space_used_highest) 1113a9148abdSDoug Rabson pool->sp_space_used_highest = 1114a9148abdSDoug Rabson pool->sp_space_used; 1115a9148abdSDoug Rabson rqstp->rq_thread = stpref; 1116a9148abdSDoug Rabson STAILQ_INSERT_TAIL(&stpref->st_reqs, 1117a9148abdSDoug Rabson rqstp, rq_link); 1118a9148abdSDoug Rabson stpref->st_reqcount++; 1119a9148abdSDoug Rabson 1120a9148abdSDoug Rabson /* 1121a9148abdSDoug Rabson * If we assigned the request 1122a9148abdSDoug Rabson * to another thread, make 1123a9148abdSDoug Rabson * sure its awake and continue 1124a9148abdSDoug Rabson * reading from the 1125a9148abdSDoug Rabson * socket. Otherwise, try to 1126a9148abdSDoug Rabson * find some other thread to 1127a9148abdSDoug Rabson * read from the socket and 1128a9148abdSDoug Rabson * execute the request 1129a9148abdSDoug Rabson * immediately. 1130a9148abdSDoug Rabson */ 1131a9148abdSDoug Rabson if (stpref != st) { 1132a9148abdSDoug Rabson cv_signal(&stpref->st_cond); 1133a9148abdSDoug Rabson continue; 1134a9148abdSDoug Rabson } else { 1135a9148abdSDoug Rabson break; 1136a9148abdSDoug Rabson } 1137a9148abdSDoug Rabson } 1138a9148abdSDoug Rabson } while (stat == XPRT_MOREREQS 1139a9148abdSDoug Rabson && pool->sp_state != SVCPOOL_CLOSING); 1140a9148abdSDoug Rabson 1141a9148abdSDoug Rabson /* 1142a9148abdSDoug Rabson * Move this transport to the end of the 1143a9148abdSDoug Rabson * active list to ensure fairness when 1144a9148abdSDoug Rabson * multiple transports are active. If this was 1145a9148abdSDoug Rabson * the last queued request, svc_getreq will 1146a9148abdSDoug Rabson * end up calling xprt_inactive to remove from 1147a9148abdSDoug Rabson * the active list. 1148a9148abdSDoug Rabson */ 1149a9148abdSDoug Rabson xprt->xp_thread = NULL; 1150a9148abdSDoug Rabson st->st_xprt = NULL; 1151a9148abdSDoug Rabson if (xprt->xp_active) { 1152a9148abdSDoug Rabson xprt_assignthread(xprt); 1153a9148abdSDoug Rabson TAILQ_REMOVE(&pool->sp_active, xprt, xp_alink); 1154a9148abdSDoug Rabson TAILQ_INSERT_TAIL(&pool->sp_active, xprt, 1155a9148abdSDoug Rabson xp_alink); 1156a9148abdSDoug Rabson } 1157a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1158a9148abdSDoug Rabson SVC_RELEASE(xprt); 1159a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 1160a9148abdSDoug Rabson } 1161a9148abdSDoug Rabson 1162a9148abdSDoug Rabson /* 1163a9148abdSDoug Rabson * Execute what we have queued. 1164a9148abdSDoug Rabson */ 1165a9148abdSDoug Rabson while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) { 1166a9148abdSDoug Rabson size_t sz = rqstp->rq_size; 1167a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1168a9148abdSDoug Rabson svc_executereq(rqstp); 1169a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 1170a9148abdSDoug Rabson pool->sp_space_used -= sz; 1171a9148abdSDoug Rabson } 1172a9148abdSDoug Rabson } 1173a9148abdSDoug Rabson 1174a9148abdSDoug Rabson if (st->st_xprt) { 1175a9148abdSDoug Rabson xprt = st->st_xprt; 1176a9148abdSDoug Rabson st->st_xprt = NULL; 1177a9148abdSDoug Rabson SVC_RELEASE(xprt); 1178a9148abdSDoug Rabson } 1179a9148abdSDoug Rabson 1180a9148abdSDoug Rabson KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit")); 1181a9148abdSDoug Rabson LIST_REMOVE(st, st_link); 1182a9148abdSDoug Rabson pool->sp_threadcount--; 1183a9148abdSDoug Rabson 1184a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1185a9148abdSDoug Rabson 1186a9148abdSDoug Rabson cv_destroy(&st->st_cond); 1187a9148abdSDoug Rabson mem_free(st, sizeof(*st)); 1188a9148abdSDoug Rabson 1189a9148abdSDoug Rabson if (!ismaster) 1190a9148abdSDoug Rabson wakeup(pool); 1191a9148abdSDoug Rabson } 1192a9148abdSDoug Rabson 1193a9148abdSDoug Rabson static void 1194a9148abdSDoug Rabson svc_thread_start(void *arg) 1195a9148abdSDoug Rabson { 1196a9148abdSDoug Rabson 1197a9148abdSDoug Rabson svc_run_internal((SVCPOOL *) arg, FALSE); 1198a9148abdSDoug Rabson kthread_exit(); 1199a9148abdSDoug Rabson } 1200a9148abdSDoug Rabson 1201a9148abdSDoug Rabson static void 1202a9148abdSDoug Rabson svc_new_thread(SVCPOOL *pool) 1203a9148abdSDoug Rabson { 1204a9148abdSDoug Rabson struct thread *td; 1205a9148abdSDoug Rabson 1206a9148abdSDoug Rabson pool->sp_threadcount++; 1207a9148abdSDoug Rabson kthread_add(svc_thread_start, pool, 1208a9148abdSDoug Rabson pool->sp_proc, &td, 0, 0, 1209a9148abdSDoug Rabson "%s: service", pool->sp_name); 1210dfdcada3SDoug Rabson } 1211dfdcada3SDoug Rabson 1212dfdcada3SDoug Rabson void 1213dfdcada3SDoug Rabson svc_run(SVCPOOL *pool) 1214dfdcada3SDoug Rabson { 1215a9148abdSDoug Rabson int i; 1216a9148abdSDoug Rabson struct proc *p; 1217a9148abdSDoug Rabson struct thread *td; 1218dfdcada3SDoug Rabson 1219a9148abdSDoug Rabson p = curproc; 1220a9148abdSDoug Rabson td = curthread; 1221a9148abdSDoug Rabson snprintf(td->td_name, sizeof(td->td_name), 1222a9148abdSDoug Rabson "%s: master", pool->sp_name); 1223a9148abdSDoug Rabson pool->sp_state = SVCPOOL_ACTIVE; 1224a9148abdSDoug Rabson pool->sp_proc = p; 1225a9148abdSDoug Rabson pool->sp_lastcreatetime = time_uptime; 1226a9148abdSDoug Rabson pool->sp_threadcount = 1; 1227dfdcada3SDoug Rabson 1228a9148abdSDoug Rabson for (i = 1; i < pool->sp_minthreads; i++) { 1229a9148abdSDoug Rabson svc_new_thread(pool); 1230dfdcada3SDoug Rabson } 1231dfdcada3SDoug Rabson 1232a9148abdSDoug Rabson svc_run_internal(pool, TRUE); 1233dfdcada3SDoug Rabson 1234dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 1235a9148abdSDoug Rabson while (pool->sp_threadcount > 0) 1236a9148abdSDoug Rabson msleep(pool, &pool->sp_lock, 0, "svcexit", 0); 1237dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 1238dfdcada3SDoug Rabson } 1239dfdcada3SDoug Rabson 1240dfdcada3SDoug Rabson void 1241dfdcada3SDoug Rabson svc_exit(SVCPOOL *pool) 1242dfdcada3SDoug Rabson { 1243a9148abdSDoug Rabson SVCTHREAD *st; 1244a9148abdSDoug Rabson 1245dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 1246a9148abdSDoug Rabson 1247a9148abdSDoug Rabson pool->sp_state = SVCPOOL_CLOSING; 1248a9148abdSDoug Rabson LIST_FOREACH(st, &pool->sp_idlethreads, st_ilink) 1249a9148abdSDoug Rabson cv_signal(&st->st_cond); 1250a9148abdSDoug Rabson 1251dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 1252dfdcada3SDoug Rabson } 1253a9148abdSDoug Rabson 1254a9148abdSDoug Rabson bool_t 1255a9148abdSDoug Rabson svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) 1256a9148abdSDoug Rabson { 1257a9148abdSDoug Rabson struct mbuf *m; 1258a9148abdSDoug Rabson XDR xdrs; 1259a9148abdSDoug Rabson bool_t stat; 1260a9148abdSDoug Rabson 1261a9148abdSDoug Rabson m = rqstp->rq_args; 1262a9148abdSDoug Rabson rqstp->rq_args = NULL; 1263a9148abdSDoug Rabson 1264a9148abdSDoug Rabson xdrmbuf_create(&xdrs, m, XDR_DECODE); 1265a9148abdSDoug Rabson stat = xargs(&xdrs, args); 1266a9148abdSDoug Rabson XDR_DESTROY(&xdrs); 1267a9148abdSDoug Rabson 1268a9148abdSDoug Rabson return (stat); 1269a9148abdSDoug Rabson } 1270a9148abdSDoug Rabson 1271a9148abdSDoug Rabson bool_t 1272a9148abdSDoug Rabson svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) 1273a9148abdSDoug Rabson { 1274a9148abdSDoug Rabson XDR xdrs; 1275a9148abdSDoug Rabson 1276a9148abdSDoug Rabson if (rqstp->rq_addr) { 1277a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME); 1278a9148abdSDoug Rabson rqstp->rq_addr = NULL; 1279a9148abdSDoug Rabson } 1280a9148abdSDoug Rabson 1281a9148abdSDoug Rabson xdrs.x_op = XDR_FREE; 1282a9148abdSDoug Rabson return (xargs(&xdrs, args)); 1283a9148abdSDoug Rabson } 1284a9148abdSDoug Rabson 1285a9148abdSDoug Rabson void 1286a9148abdSDoug Rabson svc_freereq(struct svc_req *rqstp) 1287a9148abdSDoug Rabson { 1288a9148abdSDoug Rabson SVCTHREAD *st; 1289a9148abdSDoug Rabson SVCXPRT *xprt; 1290a9148abdSDoug Rabson SVCPOOL *pool; 1291a9148abdSDoug Rabson 1292a9148abdSDoug Rabson st = rqstp->rq_thread; 1293a9148abdSDoug Rabson xprt = rqstp->rq_xprt; 1294a9148abdSDoug Rabson if (xprt) 1295a9148abdSDoug Rabson pool = xprt->xp_pool; 1296a9148abdSDoug Rabson else 1297a9148abdSDoug Rabson pool = NULL; 1298a9148abdSDoug Rabson if (st) { 1299a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 1300a9148abdSDoug Rabson KASSERT(rqstp == STAILQ_FIRST(&st->st_reqs), 1301a9148abdSDoug Rabson ("Freeing request out of order")); 1302a9148abdSDoug Rabson STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link); 1303a9148abdSDoug Rabson st->st_reqcount--; 1304a9148abdSDoug Rabson if (pool->sp_done) 1305a9148abdSDoug Rabson pool->sp_done(st, rqstp); 1306a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 1307a9148abdSDoug Rabson } 1308a9148abdSDoug Rabson 1309a9148abdSDoug Rabson if (rqstp->rq_auth.svc_ah_ops) 1310a9148abdSDoug Rabson SVCAUTH_RELEASE(&rqstp->rq_auth); 1311a9148abdSDoug Rabson 1312a9148abdSDoug Rabson if (rqstp->rq_xprt) { 1313a9148abdSDoug Rabson SVC_RELEASE(rqstp->rq_xprt); 1314a9148abdSDoug Rabson } 1315a9148abdSDoug Rabson 1316a9148abdSDoug Rabson if (rqstp->rq_addr) 1317a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME); 1318a9148abdSDoug Rabson 1319a9148abdSDoug Rabson if (rqstp->rq_args) 1320a9148abdSDoug Rabson m_freem(rqstp->rq_args); 1321a9148abdSDoug Rabson 1322a9148abdSDoug Rabson free(rqstp, M_RPC); 1323a9148abdSDoug Rabson } 1324