1dfdcada3SDoug Rabson /* $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $ */ 2dfdcada3SDoug Rabson 32e322d37SHiroki Sato /*- 42e322d37SHiroki Sato * Copyright (c) 2009, Sun Microsystems, Inc. 52e322d37SHiroki Sato * All rights reserved. 6dfdcada3SDoug Rabson * 72e322d37SHiroki Sato * Redistribution and use in source and binary forms, with or without 82e322d37SHiroki Sato * modification, are permitted provided that the following conditions are met: 92e322d37SHiroki Sato * - Redistributions of source code must retain the above copyright notice, 102e322d37SHiroki Sato * this list of conditions and the following disclaimer. 112e322d37SHiroki Sato * - Redistributions in binary form must reproduce the above copyright notice, 122e322d37SHiroki Sato * this list of conditions and the following disclaimer in the documentation 132e322d37SHiroki Sato * and/or other materials provided with the distribution. 142e322d37SHiroki Sato * - Neither the name of Sun Microsystems, Inc. nor the names of its 152e322d37SHiroki Sato * contributors may be used to endorse or promote products derived 162e322d37SHiroki Sato * from this software without specific prior written permission. 17dfdcada3SDoug Rabson * 182e322d37SHiroki Sato * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 192e322d37SHiroki Sato * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 202e322d37SHiroki Sato * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 212e322d37SHiroki Sato * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 222e322d37SHiroki Sato * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 232e322d37SHiroki Sato * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 242e322d37SHiroki Sato * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 252e322d37SHiroki Sato * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 262e322d37SHiroki Sato * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 272e322d37SHiroki Sato * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 282e322d37SHiroki Sato * POSSIBILITY OF SUCH DAMAGE. 29dfdcada3SDoug Rabson */ 30dfdcada3SDoug Rabson 31dfdcada3SDoug Rabson #if defined(LIBC_SCCS) && !defined(lint) 32dfdcada3SDoug Rabson static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro"; 33dfdcada3SDoug Rabson static char *sccsid = "@(#)svc.c 2.4 88/08/11 4.0 RPCSRC"; 34dfdcada3SDoug Rabson #endif 35dfdcada3SDoug Rabson #include <sys/cdefs.h> 36dfdcada3SDoug Rabson __FBSDID("$FreeBSD$"); 37dfdcada3SDoug Rabson 38dfdcada3SDoug Rabson /* 39dfdcada3SDoug Rabson * svc.c, Server-side remote procedure call interface. 40dfdcada3SDoug Rabson * 41dfdcada3SDoug Rabson * There are two sets of procedures here. The xprt routines are 42dfdcada3SDoug Rabson * for handling transport handles. The svc routines handle the 43dfdcada3SDoug Rabson * list of service routines. 44dfdcada3SDoug Rabson * 45dfdcada3SDoug Rabson * Copyright (C) 1984, Sun Microsystems, Inc. 46dfdcada3SDoug Rabson */ 47dfdcada3SDoug Rabson 48dfdcada3SDoug Rabson #include <sys/param.h> 49dfdcada3SDoug Rabson #include <sys/lock.h> 50dfdcada3SDoug Rabson #include <sys/kernel.h> 51a9148abdSDoug Rabson #include <sys/kthread.h> 52dfdcada3SDoug Rabson #include <sys/malloc.h> 53a9148abdSDoug Rabson #include <sys/mbuf.h> 54dfdcada3SDoug Rabson #include <sys/mutex.h> 55a9148abdSDoug Rabson #include <sys/proc.h> 56dfdcada3SDoug Rabson #include <sys/queue.h> 57a9148abdSDoug Rabson #include <sys/socketvar.h> 58dfdcada3SDoug Rabson #include <sys/systm.h> 59b563304cSAlexander Motin #include <sys/smp.h> 60d473bac7SAlexander Motin #include <sys/sx.h> 61dfdcada3SDoug Rabson #include <sys/ucred.h> 62dfdcada3SDoug Rabson 63dfdcada3SDoug Rabson #include <rpc/rpc.h> 64dfdcada3SDoug Rabson #include <rpc/rpcb_clnt.h> 65a9148abdSDoug Rabson #include <rpc/replay.h> 66dfdcada3SDoug Rabson 67ee31b83aSDoug Rabson #include <rpc/rpc_com.h> 68dfdcada3SDoug Rabson 69dfdcada3SDoug Rabson #define SVC_VERSQUIET 0x0001 /* keep quiet about vers mismatch */ 70a9148abdSDoug Rabson #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET) 71dfdcada3SDoug Rabson 72dfdcada3SDoug Rabson static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t, 73dfdcada3SDoug Rabson char *); 74b563304cSAlexander Motin static void svc_new_thread(SVCGROUP *grp); 75a9148abdSDoug Rabson static void xprt_unregister_locked(SVCXPRT *xprt); 763c42b5bfSGarrett Wollman static void svc_change_space_used(SVCPOOL *pool, long delta); 77f8fb069dSAlexander Motin static bool_t svc_request_space_available(SVCPOOL *pool); 78dfdcada3SDoug Rabson 79dfdcada3SDoug Rabson /* *************** SVCXPRT related stuff **************** */ 80dfdcada3SDoug Rabson 81a9148abdSDoug Rabson static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS); 82a9148abdSDoug Rabson static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS); 83b563304cSAlexander Motin static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS); 84a9148abdSDoug Rabson 85dfdcada3SDoug Rabson SVCPOOL* 86a9148abdSDoug Rabson svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base) 87dfdcada3SDoug Rabson { 88dfdcada3SDoug Rabson SVCPOOL *pool; 89b563304cSAlexander Motin SVCGROUP *grp; 90b563304cSAlexander Motin int g; 91dfdcada3SDoug Rabson 92dfdcada3SDoug Rabson pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO); 93dfdcada3SDoug Rabson 94dfdcada3SDoug Rabson mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF); 95a9148abdSDoug Rabson pool->sp_name = name; 96a9148abdSDoug Rabson pool->sp_state = SVCPOOL_INIT; 97a9148abdSDoug Rabson pool->sp_proc = NULL; 98dfdcada3SDoug Rabson TAILQ_INIT(&pool->sp_callouts); 99d473bac7SAlexander Motin TAILQ_INIT(&pool->sp_lcallouts); 100a9148abdSDoug Rabson pool->sp_minthreads = 1; 101a9148abdSDoug Rabson pool->sp_maxthreads = 1; 102b563304cSAlexander Motin pool->sp_groupcount = 1; 103b563304cSAlexander Motin for (g = 0; g < SVC_MAXGROUPS; g++) { 104b563304cSAlexander Motin grp = &pool->sp_groups[g]; 105b563304cSAlexander Motin mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF); 106b563304cSAlexander Motin grp->sg_pool = pool; 107b563304cSAlexander Motin grp->sg_state = SVCPOOL_ACTIVE; 108b563304cSAlexander Motin TAILQ_INIT(&grp->sg_xlist); 109b563304cSAlexander Motin TAILQ_INIT(&grp->sg_active); 110b563304cSAlexander Motin LIST_INIT(&grp->sg_idlethreads); 111b563304cSAlexander Motin grp->sg_minthreads = 1; 112b563304cSAlexander Motin grp->sg_maxthreads = 1; 113b563304cSAlexander Motin } 114a9148abdSDoug Rabson 115a9148abdSDoug Rabson /* 1163c42b5bfSGarrett Wollman * Don't use more than a quarter of mbuf clusters. Nota bene: 1173c42b5bfSGarrett Wollman * nmbclusters is an int, but nmbclusters*MCLBYTES may overflow 1183c42b5bfSGarrett Wollman * on LP64 architectures, so cast to u_long to avoid undefined 1193c42b5bfSGarrett Wollman * behavior. (ILP32 architectures cannot have nmbclusters 1203c42b5bfSGarrett Wollman * large enough to overflow for other reasons.) 121a9148abdSDoug Rabson */ 1223c42b5bfSGarrett Wollman pool->sp_space_high = (u_long)nmbclusters * MCLBYTES / 4; 1233c42b5bfSGarrett Wollman pool->sp_space_low = (pool->sp_space_high / 3) * 2; 124a9148abdSDoug Rabson 125a9148abdSDoug Rabson sysctl_ctx_init(&pool->sp_sysctl); 126a9148abdSDoug Rabson if (sysctl_base) { 127a9148abdSDoug Rabson SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, 128a9148abdSDoug Rabson "minthreads", CTLTYPE_INT | CTLFLAG_RW, 129b563304cSAlexander Motin pool, 0, svcpool_minthread_sysctl, "I", 130b563304cSAlexander Motin "Minimal number of threads"); 131a9148abdSDoug Rabson SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, 132a9148abdSDoug Rabson "maxthreads", CTLTYPE_INT | CTLFLAG_RW, 133b563304cSAlexander Motin pool, 0, svcpool_maxthread_sysctl, "I", 134b563304cSAlexander Motin "Maximal number of threads"); 135b563304cSAlexander Motin SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO, 136b563304cSAlexander Motin "threads", CTLTYPE_INT | CTLFLAG_RD, 137b563304cSAlexander Motin pool, 0, svcpool_threads_sysctl, "I", 138b563304cSAlexander Motin "Current number of threads"); 139a9148abdSDoug Rabson SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 140b563304cSAlexander Motin "groups", CTLFLAG_RD, &pool->sp_groupcount, 0, 141b563304cSAlexander Motin "Number of thread groups"); 142a9148abdSDoug Rabson 1433c42b5bfSGarrett Wollman SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, 144a9148abdSDoug Rabson "request_space_used", CTLFLAG_RD, 1453c42b5bfSGarrett Wollman &pool->sp_space_used, 146a9148abdSDoug Rabson "Space in parsed but not handled requests."); 147a9148abdSDoug Rabson 1483c42b5bfSGarrett Wollman SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, 149a9148abdSDoug Rabson "request_space_used_highest", CTLFLAG_RD, 1503c42b5bfSGarrett Wollman &pool->sp_space_used_highest, 151a9148abdSDoug Rabson "Highest space used since reboot."); 152a9148abdSDoug Rabson 1533c42b5bfSGarrett Wollman SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, 154a9148abdSDoug Rabson "request_space_high", CTLFLAG_RW, 1553c42b5bfSGarrett Wollman &pool->sp_space_high, 156a9148abdSDoug Rabson "Maximum space in parsed but not handled requests."); 157a9148abdSDoug Rabson 1583c42b5bfSGarrett Wollman SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO, 159a9148abdSDoug Rabson "request_space_low", CTLFLAG_RW, 1603c42b5bfSGarrett Wollman &pool->sp_space_low, 161a9148abdSDoug Rabson "Low water mark for request space."); 162a9148abdSDoug Rabson 163fbbb13f9SMatthew D Fleming SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 164a9148abdSDoug Rabson "request_space_throttled", CTLFLAG_RD, 165a9148abdSDoug Rabson &pool->sp_space_throttled, 0, 166a9148abdSDoug Rabson "Whether nfs requests are currently throttled"); 167a9148abdSDoug Rabson 168fbbb13f9SMatthew D Fleming SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO, 169a9148abdSDoug Rabson "request_space_throttle_count", CTLFLAG_RD, 170a9148abdSDoug Rabson &pool->sp_space_throttle_count, 0, 171a9148abdSDoug Rabson "Count of times throttling based on request space has occurred"); 172a9148abdSDoug Rabson } 173dfdcada3SDoug Rabson 174dfdcada3SDoug Rabson return pool; 175dfdcada3SDoug Rabson } 176dfdcada3SDoug Rabson 177dfdcada3SDoug Rabson void 178dfdcada3SDoug Rabson svcpool_destroy(SVCPOOL *pool) 179dfdcada3SDoug Rabson { 180b563304cSAlexander Motin SVCGROUP *grp; 181a9148abdSDoug Rabson SVCXPRT *xprt, *nxprt; 182dfdcada3SDoug Rabson struct svc_callout *s; 183d473bac7SAlexander Motin struct svc_loss_callout *sl; 184a9148abdSDoug Rabson struct svcxprt_list cleanup; 185b563304cSAlexander Motin int g; 186dfdcada3SDoug Rabson 187a9148abdSDoug Rabson TAILQ_INIT(&cleanup); 188dfdcada3SDoug Rabson 189b563304cSAlexander Motin for (g = 0; g < SVC_MAXGROUPS; g++) { 190b563304cSAlexander Motin grp = &pool->sp_groups[g]; 191b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 192b563304cSAlexander Motin while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) { 193a9148abdSDoug Rabson xprt_unregister_locked(xprt); 194a9148abdSDoug Rabson TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); 195dfdcada3SDoug Rabson } 196b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 197b563304cSAlexander Motin } 198b563304cSAlexander Motin TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { 199b563304cSAlexander Motin SVC_RELEASE(xprt); 200b563304cSAlexander Motin } 201dfdcada3SDoug Rabson 202b563304cSAlexander Motin mtx_lock(&pool->sp_lock); 203d473bac7SAlexander Motin while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) { 204dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 205dfdcada3SDoug Rabson svc_unreg(pool, s->sc_prog, s->sc_vers); 206dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 207dfdcada3SDoug Rabson } 208d473bac7SAlexander Motin while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) { 209d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 210d473bac7SAlexander Motin svc_loss_unreg(pool, sl->slc_dispatch); 211d473bac7SAlexander Motin mtx_lock(&pool->sp_lock); 212d473bac7SAlexander Motin } 21375f2ae1aSRick Macklem mtx_unlock(&pool->sp_lock); 214dfdcada3SDoug Rabson 215b563304cSAlexander Motin for (g = 0; g < SVC_MAXGROUPS; g++) { 216b563304cSAlexander Motin grp = &pool->sp_groups[g]; 217b563304cSAlexander Motin mtx_destroy(&grp->sg_lock); 218a9148abdSDoug Rabson } 219a4fa5e6dSRick Macklem mtx_destroy(&pool->sp_lock); 220a4fa5e6dSRick Macklem 221a9148abdSDoug Rabson if (pool->sp_rcache) 222a9148abdSDoug Rabson replay_freecache(pool->sp_rcache); 223a9148abdSDoug Rabson 224a9148abdSDoug Rabson sysctl_ctx_free(&pool->sp_sysctl); 225dfdcada3SDoug Rabson free(pool, M_RPC); 226dfdcada3SDoug Rabson } 227dfdcada3SDoug Rabson 228b563304cSAlexander Motin /* 229b563304cSAlexander Motin * Sysctl handler to get the present thread count on a pool 230b563304cSAlexander Motin */ 231b563304cSAlexander Motin static int 232b563304cSAlexander Motin svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS) 233a9148abdSDoug Rabson { 234b563304cSAlexander Motin SVCPOOL *pool; 235b563304cSAlexander Motin int threads, error, g; 236a9148abdSDoug Rabson 237b563304cSAlexander Motin pool = oidp->oid_arg1; 238b563304cSAlexander Motin threads = 0; 239b563304cSAlexander Motin mtx_lock(&pool->sp_lock); 240b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) 241b563304cSAlexander Motin threads += pool->sp_groups[g].sg_threadcount; 242b563304cSAlexander Motin mtx_unlock(&pool->sp_lock); 243b563304cSAlexander Motin error = sysctl_handle_int(oidp, &threads, 0, req); 244b563304cSAlexander Motin return (error); 245a9148abdSDoug Rabson } 246a9148abdSDoug Rabson 247a9148abdSDoug Rabson /* 248a9148abdSDoug Rabson * Sysctl handler to set the minimum thread count on a pool 249a9148abdSDoug Rabson */ 250a9148abdSDoug Rabson static int 251a9148abdSDoug Rabson svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS) 252a9148abdSDoug Rabson { 253a9148abdSDoug Rabson SVCPOOL *pool; 254b563304cSAlexander Motin int newminthreads, error, g; 255a9148abdSDoug Rabson 256a9148abdSDoug Rabson pool = oidp->oid_arg1; 257a9148abdSDoug Rabson newminthreads = pool->sp_minthreads; 258a9148abdSDoug Rabson error = sysctl_handle_int(oidp, &newminthreads, 0, req); 259a9148abdSDoug Rabson if (error == 0 && newminthreads != pool->sp_minthreads) { 260a9148abdSDoug Rabson if (newminthreads > pool->sp_maxthreads) 261a9148abdSDoug Rabson return (EINVAL); 262a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 263a9148abdSDoug Rabson pool->sp_minthreads = newminthreads; 264b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) { 265b563304cSAlexander Motin pool->sp_groups[g].sg_minthreads = max(1, 266b563304cSAlexander Motin pool->sp_minthreads / pool->sp_groupcount); 267b563304cSAlexander Motin } 268a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 269a9148abdSDoug Rabson } 270a9148abdSDoug Rabson return (error); 271a9148abdSDoug Rabson } 272a9148abdSDoug Rabson 273a9148abdSDoug Rabson /* 274a9148abdSDoug Rabson * Sysctl handler to set the maximum thread count on a pool 275a9148abdSDoug Rabson */ 276a9148abdSDoug Rabson static int 277a9148abdSDoug Rabson svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS) 278a9148abdSDoug Rabson { 279a9148abdSDoug Rabson SVCPOOL *pool; 280b563304cSAlexander Motin int newmaxthreads, error, g; 281a9148abdSDoug Rabson 282a9148abdSDoug Rabson pool = oidp->oid_arg1; 283a9148abdSDoug Rabson newmaxthreads = pool->sp_maxthreads; 284a9148abdSDoug Rabson error = sysctl_handle_int(oidp, &newmaxthreads, 0, req); 285a9148abdSDoug Rabson if (error == 0 && newmaxthreads != pool->sp_maxthreads) { 286a9148abdSDoug Rabson if (newmaxthreads < pool->sp_minthreads) 287a9148abdSDoug Rabson return (EINVAL); 288a9148abdSDoug Rabson mtx_lock(&pool->sp_lock); 289a9148abdSDoug Rabson pool->sp_maxthreads = newmaxthreads; 290b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) { 291b563304cSAlexander Motin pool->sp_groups[g].sg_maxthreads = max(1, 292b563304cSAlexander Motin pool->sp_maxthreads / pool->sp_groupcount); 293b563304cSAlexander Motin } 294a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock); 295a9148abdSDoug Rabson } 296a9148abdSDoug Rabson return (error); 297a9148abdSDoug Rabson } 298a9148abdSDoug Rabson 299dfdcada3SDoug Rabson /* 300dfdcada3SDoug Rabson * Activate a transport handle. 301dfdcada3SDoug Rabson */ 302dfdcada3SDoug Rabson void 303dfdcada3SDoug Rabson xprt_register(SVCXPRT *xprt) 304dfdcada3SDoug Rabson { 305dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 306b563304cSAlexander Motin SVCGROUP *grp; 307b563304cSAlexander Motin int g; 308dfdcada3SDoug Rabson 3096b97c9f0SRick Macklem SVC_ACQUIRE(xprt); 310b563304cSAlexander Motin g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount; 311b563304cSAlexander Motin xprt->xp_group = grp = &pool->sp_groups[g]; 312b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 313dfdcada3SDoug Rabson xprt->xp_registered = TRUE; 314dfdcada3SDoug Rabson xprt->xp_active = FALSE; 315b563304cSAlexander Motin TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link); 316b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 317dfdcada3SDoug Rabson } 318dfdcada3SDoug Rabson 319dfdcada3SDoug Rabson /* 320a9148abdSDoug Rabson * De-activate a transport handle. Note: the locked version doesn't 321a9148abdSDoug Rabson * release the transport - caller must do that after dropping the pool 322a9148abdSDoug Rabson * lock. 323dfdcada3SDoug Rabson */ 324dfdcada3SDoug Rabson static void 325a9148abdSDoug Rabson xprt_unregister_locked(SVCXPRT *xprt) 326dfdcada3SDoug Rabson { 327b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group; 328dfdcada3SDoug Rabson 329b563304cSAlexander Motin mtx_assert(&grp->sg_lock, MA_OWNED); 330bca2ec16SRick Macklem KASSERT(xprt->xp_registered == TRUE, 331bca2ec16SRick Macklem ("xprt_unregister_locked: not registered")); 332ba981145SAlexander Motin xprt_inactive_locked(xprt); 333b563304cSAlexander Motin TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link); 334dfdcada3SDoug Rabson xprt->xp_registered = FALSE; 335a9148abdSDoug Rabson } 336dfdcada3SDoug Rabson 337a9148abdSDoug Rabson void 338a9148abdSDoug Rabson xprt_unregister(SVCXPRT *xprt) 339a9148abdSDoug Rabson { 340b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group; 341a9148abdSDoug Rabson 342b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 343bca2ec16SRick Macklem if (xprt->xp_registered == FALSE) { 344bca2ec16SRick Macklem /* Already unregistered by another thread */ 345b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 346bca2ec16SRick Macklem return; 347bca2ec16SRick Macklem } 348a9148abdSDoug Rabson xprt_unregister_locked(xprt); 349b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 350a9148abdSDoug Rabson 351a9148abdSDoug Rabson SVC_RELEASE(xprt); 352a9148abdSDoug Rabson } 353a9148abdSDoug Rabson 354ba981145SAlexander Motin /* 355ba981145SAlexander Motin * Attempt to assign a service thread to this transport. 356ba981145SAlexander Motin */ 357ba981145SAlexander Motin static int 358a9148abdSDoug Rabson xprt_assignthread(SVCXPRT *xprt) 359a9148abdSDoug Rabson { 360b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group; 361a9148abdSDoug Rabson SVCTHREAD *st; 362a9148abdSDoug Rabson 363b563304cSAlexander Motin mtx_assert(&grp->sg_lock, MA_OWNED); 364b563304cSAlexander Motin st = LIST_FIRST(&grp->sg_idlethreads); 365a9148abdSDoug Rabson if (st) { 366ba981145SAlexander Motin LIST_REMOVE(st, st_ilink); 367a9148abdSDoug Rabson SVC_ACQUIRE(xprt); 368a9148abdSDoug Rabson xprt->xp_thread = st; 369a9148abdSDoug Rabson st->st_xprt = xprt; 370a9148abdSDoug Rabson cv_signal(&st->st_cond); 371ba981145SAlexander Motin return (TRUE); 372a9148abdSDoug Rabson } else { 373a9148abdSDoug Rabson /* 374a9148abdSDoug Rabson * See if we can create a new thread. The 375a9148abdSDoug Rabson * actual thread creation happens in 376a9148abdSDoug Rabson * svc_run_internal because our locking state 377a9148abdSDoug Rabson * is poorly defined (we are typically called 378a9148abdSDoug Rabson * from a socket upcall). Don't create more 379a9148abdSDoug Rabson * than one thread per second. 380a9148abdSDoug Rabson */ 381b563304cSAlexander Motin if (grp->sg_state == SVCPOOL_ACTIVE 382b563304cSAlexander Motin && grp->sg_lastcreatetime < time_uptime 383b563304cSAlexander Motin && grp->sg_threadcount < grp->sg_maxthreads) { 384b563304cSAlexander Motin grp->sg_state = SVCPOOL_THREADWANTED; 385a9148abdSDoug Rabson } 386a9148abdSDoug Rabson } 387ba981145SAlexander Motin return (FALSE); 388dfdcada3SDoug Rabson } 389dfdcada3SDoug Rabson 390dfdcada3SDoug Rabson void 391dfdcada3SDoug Rabson xprt_active(SVCXPRT *xprt) 392dfdcada3SDoug Rabson { 393b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group; 394dfdcada3SDoug Rabson 395b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 396a4fa5e6dSRick Macklem 397a9148abdSDoug Rabson if (!xprt->xp_registered) { 398a9148abdSDoug Rabson /* 399a9148abdSDoug Rabson * Race with xprt_unregister - we lose. 400a9148abdSDoug Rabson */ 401b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 402a9148abdSDoug Rabson return; 403a9148abdSDoug Rabson } 404a9148abdSDoug Rabson 405dfdcada3SDoug Rabson if (!xprt->xp_active) { 406dfdcada3SDoug Rabson xprt->xp_active = TRUE; 407ba981145SAlexander Motin if (xprt->xp_thread == NULL) { 408b563304cSAlexander Motin if (!svc_request_space_available(xprt->xp_pool) || 409f8fb069dSAlexander Motin !xprt_assignthread(xprt)) 410b563304cSAlexander Motin TAILQ_INSERT_TAIL(&grp->sg_active, xprt, 411ba981145SAlexander Motin xp_alink); 412ba981145SAlexander Motin } 413dfdcada3SDoug Rabson } 414dfdcada3SDoug Rabson 415b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 416dfdcada3SDoug Rabson } 417dfdcada3SDoug Rabson 418dfdcada3SDoug Rabson void 419a9148abdSDoug Rabson xprt_inactive_locked(SVCXPRT *xprt) 420a9148abdSDoug Rabson { 421b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group; 422a9148abdSDoug Rabson 423b563304cSAlexander Motin mtx_assert(&grp->sg_lock, MA_OWNED); 424a9148abdSDoug Rabson if (xprt->xp_active) { 425ba981145SAlexander Motin if (xprt->xp_thread == NULL) 426b563304cSAlexander Motin TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink); 427a9148abdSDoug Rabson xprt->xp_active = FALSE; 428a9148abdSDoug Rabson } 429a9148abdSDoug Rabson } 430a9148abdSDoug Rabson 431a9148abdSDoug Rabson void 432dfdcada3SDoug Rabson xprt_inactive(SVCXPRT *xprt) 433dfdcada3SDoug Rabson { 434b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group; 435dfdcada3SDoug Rabson 436b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 437a9148abdSDoug Rabson xprt_inactive_locked(xprt); 438b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 439dfdcada3SDoug Rabson } 440dfdcada3SDoug Rabson 441dfdcada3SDoug Rabson /* 4425c42b9dcSAlexander Motin * Variant of xprt_inactive() for use only when sure that port is 443*6244c6e7SPedro F. Giffuni * assigned to thread. For example, within receive handlers. 4445c42b9dcSAlexander Motin */ 4455c42b9dcSAlexander Motin void 4465c42b9dcSAlexander Motin xprt_inactive_self(SVCXPRT *xprt) 4475c42b9dcSAlexander Motin { 4485c42b9dcSAlexander Motin 4495c42b9dcSAlexander Motin KASSERT(xprt->xp_thread != NULL, 4505c42b9dcSAlexander Motin ("xprt_inactive_self(%p) with NULL xp_thread", xprt)); 4515c42b9dcSAlexander Motin xprt->xp_active = FALSE; 4525c42b9dcSAlexander Motin } 4535c42b9dcSAlexander Motin 4545c42b9dcSAlexander Motin /* 455dfdcada3SDoug Rabson * Add a service program to the callout list. 456dfdcada3SDoug Rabson * The dispatch routine will be called when a rpc request for this 457dfdcada3SDoug Rabson * program number comes in. 458dfdcada3SDoug Rabson */ 459dfdcada3SDoug Rabson bool_t 460dfdcada3SDoug Rabson svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers, 461dfdcada3SDoug Rabson void (*dispatch)(struct svc_req *, SVCXPRT *), 462dfdcada3SDoug Rabson const struct netconfig *nconf) 463dfdcada3SDoug Rabson { 464dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 465dfdcada3SDoug Rabson struct svc_callout *s; 466dfdcada3SDoug Rabson char *netid = NULL; 467dfdcada3SDoug Rabson int flag = 0; 468dfdcada3SDoug Rabson 469dfdcada3SDoug Rabson /* VARIABLES PROTECTED BY svc_lock: s, svc_head */ 470dfdcada3SDoug Rabson 471dfdcada3SDoug Rabson if (xprt->xp_netid) { 472dfdcada3SDoug Rabson netid = strdup(xprt->xp_netid, M_RPC); 473dfdcada3SDoug Rabson flag = 1; 474dfdcada3SDoug Rabson } else if (nconf && nconf->nc_netid) { 475dfdcada3SDoug Rabson netid = strdup(nconf->nc_netid, M_RPC); 476dfdcada3SDoug Rabson flag = 1; 477dfdcada3SDoug Rabson } /* must have been created with svc_raw_create */ 478dfdcada3SDoug Rabson if ((netid == NULL) && (flag == 1)) { 479dfdcada3SDoug Rabson return (FALSE); 480dfdcada3SDoug Rabson } 481dfdcada3SDoug Rabson 482dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 483dfdcada3SDoug Rabson if ((s = svc_find(pool, prog, vers, netid)) != NULL) { 484dfdcada3SDoug Rabson if (netid) 485dfdcada3SDoug Rabson free(netid, M_RPC); 486dfdcada3SDoug Rabson if (s->sc_dispatch == dispatch) 487dfdcada3SDoug Rabson goto rpcb_it; /* he is registering another xptr */ 488dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 489dfdcada3SDoug Rabson return (FALSE); 490dfdcada3SDoug Rabson } 491dfdcada3SDoug Rabson s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT); 492dfdcada3SDoug Rabson if (s == NULL) { 493dfdcada3SDoug Rabson if (netid) 494dfdcada3SDoug Rabson free(netid, M_RPC); 495dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 496dfdcada3SDoug Rabson return (FALSE); 497dfdcada3SDoug Rabson } 498dfdcada3SDoug Rabson 499dfdcada3SDoug Rabson s->sc_prog = prog; 500dfdcada3SDoug Rabson s->sc_vers = vers; 501dfdcada3SDoug Rabson s->sc_dispatch = dispatch; 502dfdcada3SDoug Rabson s->sc_netid = netid; 503dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link); 504dfdcada3SDoug Rabson 505dfdcada3SDoug Rabson if ((xprt->xp_netid == NULL) && (flag == 1) && netid) 506dfdcada3SDoug Rabson ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC); 507dfdcada3SDoug Rabson 508dfdcada3SDoug Rabson rpcb_it: 509dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 510dfdcada3SDoug Rabson /* now register the information with the local binder service */ 511dfdcada3SDoug Rabson if (nconf) { 512dfdcada3SDoug Rabson bool_t dummy; 513dfdcada3SDoug Rabson struct netconfig tnc; 514a9148abdSDoug Rabson struct netbuf nb; 515dfdcada3SDoug Rabson tnc = *nconf; 516a9148abdSDoug Rabson nb.buf = &xprt->xp_ltaddr; 517a9148abdSDoug Rabson nb.len = xprt->xp_ltaddr.ss_len; 518a9148abdSDoug Rabson dummy = rpcb_set(prog, vers, &tnc, &nb); 519dfdcada3SDoug Rabson return (dummy); 520dfdcada3SDoug Rabson } 521dfdcada3SDoug Rabson return (TRUE); 522dfdcada3SDoug Rabson } 523dfdcada3SDoug Rabson 524dfdcada3SDoug Rabson /* 525dfdcada3SDoug Rabson * Remove a service program from the callout list. 526dfdcada3SDoug Rabson */ 527dfdcada3SDoug Rabson void 528dfdcada3SDoug Rabson svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers) 529dfdcada3SDoug Rabson { 530dfdcada3SDoug Rabson struct svc_callout *s; 531dfdcada3SDoug Rabson 532dfdcada3SDoug Rabson /* unregister the information anyway */ 533dfdcada3SDoug Rabson (void) rpcb_unset(prog, vers, NULL); 534dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock); 535dfdcada3SDoug Rabson while ((s = svc_find(pool, prog, vers, NULL)) != NULL) { 536dfdcada3SDoug Rabson TAILQ_REMOVE(&pool->sp_callouts, s, sc_link); 537dfdcada3SDoug Rabson if (s->sc_netid) 538dfdcada3SDoug Rabson mem_free(s->sc_netid, sizeof (s->sc_netid) + 1); 539dfdcada3SDoug Rabson mem_free(s, sizeof (struct svc_callout)); 540dfdcada3SDoug Rabson } 541dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock); 542dfdcada3SDoug Rabson } 543dfdcada3SDoug Rabson 544d473bac7SAlexander Motin /* 545d473bac7SAlexander Motin * Add a service connection loss program to the callout list. 546d473bac7SAlexander Motin * The dispatch routine will be called when some port in ths pool die. 547d473bac7SAlexander Motin */ 548d473bac7SAlexander Motin bool_t 549d473bac7SAlexander Motin svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *)) 550d473bac7SAlexander Motin { 551d473bac7SAlexander Motin SVCPOOL *pool = xprt->xp_pool; 552d473bac7SAlexander Motin struct svc_loss_callout *s; 553d473bac7SAlexander Motin 554d473bac7SAlexander Motin mtx_lock(&pool->sp_lock); 555d473bac7SAlexander Motin TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) { 556d473bac7SAlexander Motin if (s->slc_dispatch == dispatch) 557d473bac7SAlexander Motin break; 558d473bac7SAlexander Motin } 559d473bac7SAlexander Motin if (s != NULL) { 560d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 561d473bac7SAlexander Motin return (TRUE); 562d473bac7SAlexander Motin } 5638576dc00SAlexander Motin s = malloc(sizeof(struct svc_loss_callout), M_RPC, M_NOWAIT); 564d473bac7SAlexander Motin if (s == NULL) { 565d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 566d473bac7SAlexander Motin return (FALSE); 567d473bac7SAlexander Motin } 568d473bac7SAlexander Motin s->slc_dispatch = dispatch; 569d473bac7SAlexander Motin TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link); 570d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 571d473bac7SAlexander Motin return (TRUE); 572d473bac7SAlexander Motin } 573d473bac7SAlexander Motin 574d473bac7SAlexander Motin /* 575d473bac7SAlexander Motin * Remove a service connection loss program from the callout list. 576d473bac7SAlexander Motin */ 577d473bac7SAlexander Motin void 578d473bac7SAlexander Motin svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *)) 579d473bac7SAlexander Motin { 580d473bac7SAlexander Motin struct svc_loss_callout *s; 581d473bac7SAlexander Motin 582d473bac7SAlexander Motin mtx_lock(&pool->sp_lock); 583d473bac7SAlexander Motin TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) { 584d473bac7SAlexander Motin if (s->slc_dispatch == dispatch) { 585d473bac7SAlexander Motin TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link); 586d473bac7SAlexander Motin free(s, M_RPC); 587d473bac7SAlexander Motin break; 588d473bac7SAlexander Motin } 589d473bac7SAlexander Motin } 590d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock); 591d473bac7SAlexander Motin } 592d473bac7SAlexander Motin 593dfdcada3SDoug Rabson /* ********************** CALLOUT list related stuff ************* */ 594dfdcada3SDoug Rabson 595dfdcada3SDoug Rabson /* 596dfdcada3SDoug Rabson * Search the callout list for a program number, return the callout 597dfdcada3SDoug Rabson * struct. 598dfdcada3SDoug Rabson */ 599dfdcada3SDoug Rabson static struct svc_callout * 600dfdcada3SDoug Rabson svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid) 601dfdcada3SDoug Rabson { 602dfdcada3SDoug Rabson struct svc_callout *s; 603dfdcada3SDoug Rabson 604dfdcada3SDoug Rabson mtx_assert(&pool->sp_lock, MA_OWNED); 605dfdcada3SDoug Rabson TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { 606dfdcada3SDoug Rabson if (s->sc_prog == prog && s->sc_vers == vers 607dfdcada3SDoug Rabson && (netid == NULL || s->sc_netid == NULL || 608dfdcada3SDoug Rabson strcmp(netid, s->sc_netid) == 0)) 609dfdcada3SDoug Rabson break; 610dfdcada3SDoug Rabson } 611dfdcada3SDoug Rabson 612dfdcada3SDoug Rabson return (s); 613dfdcada3SDoug Rabson } 614dfdcada3SDoug Rabson 615dfdcada3SDoug Rabson /* ******************* REPLY GENERATION ROUTINES ************ */ 616dfdcada3SDoug Rabson 617a9148abdSDoug Rabson static bool_t 618a9148abdSDoug Rabson svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply, 619a9148abdSDoug Rabson struct mbuf *body) 620a9148abdSDoug Rabson { 621a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 622a9148abdSDoug Rabson bool_t ok; 623a9148abdSDoug Rabson 624a9148abdSDoug Rabson if (rqstp->rq_args) { 625a9148abdSDoug Rabson m_freem(rqstp->rq_args); 626a9148abdSDoug Rabson rqstp->rq_args = NULL; 627a9148abdSDoug Rabson } 628a9148abdSDoug Rabson 629a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 630a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 631a9148abdSDoug Rabson rply, svc_getrpccaller(rqstp), body); 632a9148abdSDoug Rabson 633a9148abdSDoug Rabson if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body)) 634a9148abdSDoug Rabson return (FALSE); 635a9148abdSDoug Rabson 636d473bac7SAlexander Motin ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq); 637a9148abdSDoug Rabson if (rqstp->rq_addr) { 638a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME); 639a9148abdSDoug Rabson rqstp->rq_addr = NULL; 640a9148abdSDoug Rabson } 641a9148abdSDoug Rabson 642a9148abdSDoug Rabson return (ok); 643a9148abdSDoug Rabson } 644a9148abdSDoug Rabson 645dfdcada3SDoug Rabson /* 646dfdcada3SDoug Rabson * Send a reply to an rpc request 647dfdcada3SDoug Rabson */ 648dfdcada3SDoug Rabson bool_t 649a9148abdSDoug Rabson svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location) 650a9148abdSDoug Rabson { 651a9148abdSDoug Rabson struct rpc_msg rply; 652a9148abdSDoug Rabson struct mbuf *m; 653a9148abdSDoug Rabson XDR xdrs; 654a9148abdSDoug Rabson bool_t ok; 655a9148abdSDoug Rabson 656a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 657a9148abdSDoug Rabson rply.rm_direction = REPLY; 658a9148abdSDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 659a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 660a9148abdSDoug Rabson rply.acpted_rply.ar_stat = SUCCESS; 661a9148abdSDoug Rabson rply.acpted_rply.ar_results.where = NULL; 662a9148abdSDoug Rabson rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; 663a9148abdSDoug Rabson 664bd54830bSGleb Smirnoff m = m_getcl(M_WAITOK, MT_DATA, 0); 665a9148abdSDoug Rabson xdrmbuf_create(&xdrs, m, XDR_ENCODE); 666a9148abdSDoug Rabson ok = xdr_results(&xdrs, xdr_location); 667a9148abdSDoug Rabson XDR_DESTROY(&xdrs); 668a9148abdSDoug Rabson 669a9148abdSDoug Rabson if (ok) { 670a9148abdSDoug Rabson return (svc_sendreply_common(rqstp, &rply, m)); 671a9148abdSDoug Rabson } else { 672a9148abdSDoug Rabson m_freem(m); 673a9148abdSDoug Rabson return (FALSE); 674a9148abdSDoug Rabson } 675a9148abdSDoug Rabson } 676a9148abdSDoug Rabson 677a9148abdSDoug Rabson bool_t 678a9148abdSDoug Rabson svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m) 679dfdcada3SDoug Rabson { 680dfdcada3SDoug Rabson struct rpc_msg rply; 681dfdcada3SDoug Rabson 682a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 683dfdcada3SDoug Rabson rply.rm_direction = REPLY; 684dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 685a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 686dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = SUCCESS; 687a9148abdSDoug Rabson rply.acpted_rply.ar_results.where = NULL; 688a9148abdSDoug Rabson rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void; 689dfdcada3SDoug Rabson 690a9148abdSDoug Rabson return (svc_sendreply_common(rqstp, &rply, m)); 691dfdcada3SDoug Rabson } 692dfdcada3SDoug Rabson 693dfdcada3SDoug Rabson /* 694dfdcada3SDoug Rabson * No procedure error reply 695dfdcada3SDoug Rabson */ 696dfdcada3SDoug Rabson void 697a9148abdSDoug Rabson svcerr_noproc(struct svc_req *rqstp) 698dfdcada3SDoug Rabson { 699a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 700dfdcada3SDoug Rabson struct rpc_msg rply; 701dfdcada3SDoug Rabson 702a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 703dfdcada3SDoug Rabson rply.rm_direction = REPLY; 704dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 705a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 706dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROC_UNAVAIL; 707dfdcada3SDoug Rabson 708a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 709a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 710a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 711a9148abdSDoug Rabson 712a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 713dfdcada3SDoug Rabson } 714dfdcada3SDoug Rabson 715dfdcada3SDoug Rabson /* 716dfdcada3SDoug Rabson * Can't decode args error reply 717dfdcada3SDoug Rabson */ 718dfdcada3SDoug Rabson void 719a9148abdSDoug Rabson svcerr_decode(struct svc_req *rqstp) 720dfdcada3SDoug Rabson { 721a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 722dfdcada3SDoug Rabson struct rpc_msg rply; 723dfdcada3SDoug Rabson 724a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 725dfdcada3SDoug Rabson rply.rm_direction = REPLY; 726dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 727a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 728dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = GARBAGE_ARGS; 729dfdcada3SDoug Rabson 730a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 731a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 732a9148abdSDoug Rabson &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL); 733a9148abdSDoug Rabson 734a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 735dfdcada3SDoug Rabson } 736dfdcada3SDoug Rabson 737dfdcada3SDoug Rabson /* 738dfdcada3SDoug Rabson * Some system error 739dfdcada3SDoug Rabson */ 740dfdcada3SDoug Rabson void 741a9148abdSDoug Rabson svcerr_systemerr(struct svc_req *rqstp) 742dfdcada3SDoug Rabson { 743a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 744dfdcada3SDoug Rabson struct rpc_msg rply; 745dfdcada3SDoug Rabson 746a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 747dfdcada3SDoug Rabson rply.rm_direction = REPLY; 748dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 749a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 750dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = SYSTEM_ERR; 751dfdcada3SDoug Rabson 752a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 753a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 754a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 755a9148abdSDoug Rabson 756a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 757dfdcada3SDoug Rabson } 758dfdcada3SDoug Rabson 759dfdcada3SDoug Rabson /* 760dfdcada3SDoug Rabson * Authentication error reply 761dfdcada3SDoug Rabson */ 762dfdcada3SDoug Rabson void 763a9148abdSDoug Rabson svcerr_auth(struct svc_req *rqstp, enum auth_stat why) 764dfdcada3SDoug Rabson { 765a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 766dfdcada3SDoug Rabson struct rpc_msg rply; 767dfdcada3SDoug Rabson 768a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 769dfdcada3SDoug Rabson rply.rm_direction = REPLY; 770dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_DENIED; 771dfdcada3SDoug Rabson rply.rjcted_rply.rj_stat = AUTH_ERROR; 772dfdcada3SDoug Rabson rply.rjcted_rply.rj_why = why; 773dfdcada3SDoug Rabson 774a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 775a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 776a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 777a9148abdSDoug Rabson 778a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 779dfdcada3SDoug Rabson } 780dfdcada3SDoug Rabson 781dfdcada3SDoug Rabson /* 782dfdcada3SDoug Rabson * Auth too weak error reply 783dfdcada3SDoug Rabson */ 784dfdcada3SDoug Rabson void 785a9148abdSDoug Rabson svcerr_weakauth(struct svc_req *rqstp) 786dfdcada3SDoug Rabson { 787dfdcada3SDoug Rabson 788a9148abdSDoug Rabson svcerr_auth(rqstp, AUTH_TOOWEAK); 789dfdcada3SDoug Rabson } 790dfdcada3SDoug Rabson 791dfdcada3SDoug Rabson /* 792dfdcada3SDoug Rabson * Program unavailable error reply 793dfdcada3SDoug Rabson */ 794dfdcada3SDoug Rabson void 795a9148abdSDoug Rabson svcerr_noprog(struct svc_req *rqstp) 796dfdcada3SDoug Rabson { 797a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 798dfdcada3SDoug Rabson struct rpc_msg rply; 799dfdcada3SDoug Rabson 800a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 801dfdcada3SDoug Rabson rply.rm_direction = REPLY; 802dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 803a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 804dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROG_UNAVAIL; 805dfdcada3SDoug Rabson 806a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 807a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 808a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 809a9148abdSDoug Rabson 810a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 811dfdcada3SDoug Rabson } 812dfdcada3SDoug Rabson 813dfdcada3SDoug Rabson /* 814dfdcada3SDoug Rabson * Program version mismatch error reply 815dfdcada3SDoug Rabson */ 816dfdcada3SDoug Rabson void 817a9148abdSDoug Rabson svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers) 818dfdcada3SDoug Rabson { 819a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 820dfdcada3SDoug Rabson struct rpc_msg rply; 821dfdcada3SDoug Rabson 822a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid; 823dfdcada3SDoug Rabson rply.rm_direction = REPLY; 824dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED; 825a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf; 826dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROG_MISMATCH; 827dfdcada3SDoug Rabson rply.acpted_rply.ar_vers.low = (uint32_t)low_vers; 828dfdcada3SDoug Rabson rply.acpted_rply.ar_vers.high = (uint32_t)high_vers; 829dfdcada3SDoug Rabson 830a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache) 831a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache, 832a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL); 833a9148abdSDoug Rabson 834a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL); 835a9148abdSDoug Rabson } 836a9148abdSDoug Rabson 837a9148abdSDoug Rabson /* 838a9148abdSDoug Rabson * Allocate a new server transport structure. All fields are 839a9148abdSDoug Rabson * initialized to zero and xp_p3 is initialized to point at an 840a9148abdSDoug Rabson * extension structure to hold various flags and authentication 841a9148abdSDoug Rabson * parameters. 842a9148abdSDoug Rabson */ 843a9148abdSDoug Rabson SVCXPRT * 844a9148abdSDoug Rabson svc_xprt_alloc() 845a9148abdSDoug Rabson { 846a9148abdSDoug Rabson SVCXPRT *xprt; 847a9148abdSDoug Rabson SVCXPRT_EXT *ext; 848a9148abdSDoug Rabson 849a9148abdSDoug Rabson xprt = mem_alloc(sizeof(SVCXPRT)); 850a9148abdSDoug Rabson memset(xprt, 0, sizeof(SVCXPRT)); 851a9148abdSDoug Rabson ext = mem_alloc(sizeof(SVCXPRT_EXT)); 852a9148abdSDoug Rabson memset(ext, 0, sizeof(SVCXPRT_EXT)); 853a9148abdSDoug Rabson xprt->xp_p3 = ext; 854a9148abdSDoug Rabson refcount_init(&xprt->xp_refs, 1); 855a9148abdSDoug Rabson 856a9148abdSDoug Rabson return (xprt); 857a9148abdSDoug Rabson } 858a9148abdSDoug Rabson 859a9148abdSDoug Rabson /* 860a9148abdSDoug Rabson * Free a server transport structure. 861a9148abdSDoug Rabson */ 862a9148abdSDoug Rabson void 863a9148abdSDoug Rabson svc_xprt_free(xprt) 864a9148abdSDoug Rabson SVCXPRT *xprt; 865a9148abdSDoug Rabson { 866a9148abdSDoug Rabson 867a9148abdSDoug Rabson mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT)); 868a9148abdSDoug Rabson mem_free(xprt, sizeof(SVCXPRT)); 869dfdcada3SDoug Rabson } 870dfdcada3SDoug Rabson 871dfdcada3SDoug Rabson /* ******************* SERVER INPUT STUFF ******************* */ 872dfdcada3SDoug Rabson 873dfdcada3SDoug Rabson /* 874a9148abdSDoug Rabson * Read RPC requests from a transport and queue them to be 875a9148abdSDoug Rabson * executed. We handle authentication and replay cache replies here. 876a9148abdSDoug Rabson * Actually dispatching the RPC is deferred till svc_executereq. 877dfdcada3SDoug Rabson */ 878a9148abdSDoug Rabson static enum xprt_stat 879a9148abdSDoug Rabson svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret) 880dfdcada3SDoug Rabson { 881dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool; 882a9148abdSDoug Rabson struct svc_req *r; 883dfdcada3SDoug Rabson struct rpc_msg msg; 884a9148abdSDoug Rabson struct mbuf *args; 885d473bac7SAlexander Motin struct svc_loss_callout *s; 886a9148abdSDoug Rabson enum xprt_stat stat; 887a9148abdSDoug Rabson 888a9148abdSDoug Rabson /* now receive msgs from xprtprt (support batch calls) */ 889a9148abdSDoug Rabson r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO); 890a9148abdSDoug Rabson 891a9148abdSDoug Rabson msg.rm_call.cb_cred.oa_base = r->rq_credarea; 892a9148abdSDoug Rabson msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES]; 893a9148abdSDoug Rabson r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES]; 894a9148abdSDoug Rabson if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) { 895a9148abdSDoug Rabson enum auth_stat why; 896a9148abdSDoug Rabson 897a9148abdSDoug Rabson /* 898a9148abdSDoug Rabson * Handle replays and authenticate before queuing the 899a9148abdSDoug Rabson * request to be executed. 900a9148abdSDoug Rabson */ 901a9148abdSDoug Rabson SVC_ACQUIRE(xprt); 902a9148abdSDoug Rabson r->rq_xprt = xprt; 903a9148abdSDoug Rabson if (pool->sp_rcache) { 904a9148abdSDoug Rabson struct rpc_msg repmsg; 905a9148abdSDoug Rabson struct mbuf *repbody; 906a9148abdSDoug Rabson enum replay_state rs; 907a9148abdSDoug Rabson rs = replay_find(pool->sp_rcache, &msg, 908a9148abdSDoug Rabson svc_getrpccaller(r), &repmsg, &repbody); 909a9148abdSDoug Rabson switch (rs) { 910a9148abdSDoug Rabson case RS_NEW: 911a9148abdSDoug Rabson break; 912a9148abdSDoug Rabson case RS_DONE: 913a9148abdSDoug Rabson SVC_REPLY(xprt, &repmsg, r->rq_addr, 914d473bac7SAlexander Motin repbody, &r->rq_reply_seq); 915a9148abdSDoug Rabson if (r->rq_addr) { 916a9148abdSDoug Rabson free(r->rq_addr, M_SONAME); 917a9148abdSDoug Rabson r->rq_addr = NULL; 918a9148abdSDoug Rabson } 919578e600cSRick Macklem m_freem(args); 920a9148abdSDoug Rabson goto call_done; 921a9148abdSDoug Rabson 922a9148abdSDoug Rabson default: 923578e600cSRick Macklem m_freem(args); 924a9148abdSDoug Rabson goto call_done; 925a9148abdSDoug Rabson } 926a9148abdSDoug Rabson } 927a9148abdSDoug Rabson 928a9148abdSDoug Rabson r->rq_xid = msg.rm_xid; 929a9148abdSDoug Rabson r->rq_prog = msg.rm_call.cb_prog; 930a9148abdSDoug Rabson r->rq_vers = msg.rm_call.cb_vers; 931a9148abdSDoug Rabson r->rq_proc = msg.rm_call.cb_proc; 932a9148abdSDoug Rabson r->rq_size = sizeof(*r) + m_length(args, NULL); 933a9148abdSDoug Rabson r->rq_args = args; 934a9148abdSDoug Rabson if ((why = _authenticate(r, &msg)) != AUTH_OK) { 935a9148abdSDoug Rabson /* 936a9148abdSDoug Rabson * RPCSEC_GSS uses this return code 937a9148abdSDoug Rabson * for requests that form part of its 938a9148abdSDoug Rabson * context establishment protocol and 939a9148abdSDoug Rabson * should not be dispatched to the 940a9148abdSDoug Rabson * application. 941a9148abdSDoug Rabson */ 942a9148abdSDoug Rabson if (why != RPCSEC_GSS_NODISPATCH) 943a9148abdSDoug Rabson svcerr_auth(r, why); 944a9148abdSDoug Rabson goto call_done; 945a9148abdSDoug Rabson } 946a9148abdSDoug Rabson 947a9148abdSDoug Rabson if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) { 948a9148abdSDoug Rabson svcerr_decode(r); 949a9148abdSDoug Rabson goto call_done; 950a9148abdSDoug Rabson } 951a9148abdSDoug Rabson 952a9148abdSDoug Rabson /* 953a9148abdSDoug Rabson * Everything checks out, return request to caller. 954a9148abdSDoug Rabson */ 955a9148abdSDoug Rabson *rqstp_ret = r; 956a9148abdSDoug Rabson r = NULL; 957a9148abdSDoug Rabson } 958a9148abdSDoug Rabson call_done: 959a9148abdSDoug Rabson if (r) { 960a9148abdSDoug Rabson svc_freereq(r); 961a9148abdSDoug Rabson r = NULL; 962a9148abdSDoug Rabson } 963a9148abdSDoug Rabson if ((stat = SVC_STAT(xprt)) == XPRT_DIED) { 964d473bac7SAlexander Motin TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) 965d473bac7SAlexander Motin (*s->slc_dispatch)(xprt); 966a9148abdSDoug Rabson xprt_unregister(xprt); 967a9148abdSDoug Rabson } 968a9148abdSDoug Rabson 969a9148abdSDoug Rabson return (stat); 970a9148abdSDoug Rabson } 971a9148abdSDoug Rabson 972a9148abdSDoug Rabson static void 973a9148abdSDoug Rabson svc_executereq(struct svc_req *rqstp) 974a9148abdSDoug Rabson { 975a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt; 976a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool; 977dfdcada3SDoug Rabson int prog_found; 978dfdcada3SDoug Rabson rpcvers_t low_vers; 979dfdcada3SDoug Rabson rpcvers_t high_vers; 980dfdcada3SDoug Rabson struct svc_callout *s; 981dfdcada3SDoug Rabson 982dfdcada3SDoug Rabson /* now match message with a registered service*/ 983dfdcada3SDoug Rabson prog_found = FALSE; 984dfdcada3SDoug Rabson low_vers = (rpcvers_t) -1L; 985dfdcada3SDoug Rabson high_vers = (rpcvers_t) 0L; 986dfdcada3SDoug Rabson TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) { 987a9148abdSDoug Rabson if (s->sc_prog == rqstp->rq_prog) { 988a9148abdSDoug Rabson if (s->sc_vers == rqstp->rq_vers) { 989a9148abdSDoug Rabson /* 990a9148abdSDoug Rabson * We hand ownership of r to the 991a9148abdSDoug Rabson * dispatch method - they must call 992a9148abdSDoug Rabson * svc_freereq. 993a9148abdSDoug Rabson */ 994a9148abdSDoug Rabson (*s->sc_dispatch)(rqstp, xprt); 995a9148abdSDoug Rabson return; 996dfdcada3SDoug Rabson } /* found correct version */ 997dfdcada3SDoug Rabson prog_found = TRUE; 998dfdcada3SDoug Rabson if (s->sc_vers < low_vers) 999dfdcada3SDoug Rabson low_vers = s->sc_vers; 1000dfdcada3SDoug Rabson if (s->sc_vers > high_vers) 1001dfdcada3SDoug Rabson high_vers = s->sc_vers; 1002dfdcada3SDoug Rabson } /* found correct program */ 1003dfdcada3SDoug Rabson } 1004a9148abdSDoug Rabson 1005dfdcada3SDoug Rabson /* 1006dfdcada3SDoug Rabson * if we got here, the program or version 1007dfdcada3SDoug Rabson * is not served ... 1008dfdcada3SDoug Rabson */ 1009dfdcada3SDoug Rabson if (prog_found) 1010a9148abdSDoug Rabson svcerr_progvers(rqstp, low_vers, high_vers); 1011dfdcada3SDoug Rabson else 1012a9148abdSDoug Rabson svcerr_noprog(rqstp); 1013a9148abdSDoug Rabson 1014a9148abdSDoug Rabson svc_freereq(rqstp); 1015dfdcada3SDoug Rabson } 1016a9148abdSDoug Rabson 1017a9148abdSDoug Rabson static void 1018b563304cSAlexander Motin svc_checkidle(SVCGROUP *grp) 1019a9148abdSDoug Rabson { 1020a9148abdSDoug Rabson SVCXPRT *xprt, *nxprt; 1021a9148abdSDoug Rabson time_t timo; 1022a9148abdSDoug Rabson struct svcxprt_list cleanup; 1023a9148abdSDoug Rabson 1024a9148abdSDoug Rabson TAILQ_INIT(&cleanup); 1025b563304cSAlexander Motin TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) { 1026dfdcada3SDoug Rabson /* 1027a9148abdSDoug Rabson * Only some transports have idle timers. Don't time 1028a9148abdSDoug Rabson * something out which is just waking up. 1029dfdcada3SDoug Rabson */ 1030a9148abdSDoug Rabson if (!xprt->xp_idletimeout || xprt->xp_thread) 1031a9148abdSDoug Rabson continue; 1032a9148abdSDoug Rabson 1033a9148abdSDoug Rabson timo = xprt->xp_lastactive + xprt->xp_idletimeout; 1034a9148abdSDoug Rabson if (time_uptime > timo) { 1035a9148abdSDoug Rabson xprt_unregister_locked(xprt); 1036a9148abdSDoug Rabson TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link); 1037a9148abdSDoug Rabson } 1038a9148abdSDoug Rabson } 1039a9148abdSDoug Rabson 1040b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 1041a9148abdSDoug Rabson TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) { 1042a9148abdSDoug Rabson SVC_RELEASE(xprt); 1043a9148abdSDoug Rabson } 1044b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 1045a9148abdSDoug Rabson } 1046a9148abdSDoug Rabson 1047a9148abdSDoug Rabson static void 1048a9148abdSDoug Rabson svc_assign_waiting_sockets(SVCPOOL *pool) 1049a9148abdSDoug Rabson { 1050b563304cSAlexander Motin SVCGROUP *grp; 1051a9148abdSDoug Rabson SVCXPRT *xprt; 1052b563304cSAlexander Motin int g; 1053a9148abdSDoug Rabson 1054b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) { 1055b563304cSAlexander Motin grp = &pool->sp_groups[g]; 1056b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 1057b563304cSAlexander Motin while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) { 1058ba981145SAlexander Motin if (xprt_assignthread(xprt)) 1059b563304cSAlexander Motin TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink); 1060ba981145SAlexander Motin else 1061ba981145SAlexander Motin break; 1062a9148abdSDoug Rabson } 1063b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 1064b563304cSAlexander Motin } 1065f8fb069dSAlexander Motin } 1066f8fb069dSAlexander Motin 1067f8fb069dSAlexander Motin static void 10683c42b5bfSGarrett Wollman svc_change_space_used(SVCPOOL *pool, long delta) 1069f8fb069dSAlexander Motin { 10703c42b5bfSGarrett Wollman unsigned long value; 1071f8fb069dSAlexander Motin 10723c42b5bfSGarrett Wollman value = atomic_fetchadd_long(&pool->sp_space_used, delta) + delta; 1073f8fb069dSAlexander Motin if (delta > 0) { 1074f8fb069dSAlexander Motin if (value >= pool->sp_space_high && !pool->sp_space_throttled) { 1075f8fb069dSAlexander Motin pool->sp_space_throttled = TRUE; 1076f8fb069dSAlexander Motin pool->sp_space_throttle_count++; 1077f8fb069dSAlexander Motin } 1078f8fb069dSAlexander Motin if (value > pool->sp_space_used_highest) 1079f8fb069dSAlexander Motin pool->sp_space_used_highest = value; 1080f8fb069dSAlexander Motin } else { 1081f8fb069dSAlexander Motin if (value < pool->sp_space_low && pool->sp_space_throttled) { 1082f8fb069dSAlexander Motin pool->sp_space_throttled = FALSE; 1083f8fb069dSAlexander Motin svc_assign_waiting_sockets(pool); 1084f8fb069dSAlexander Motin } 1085f8fb069dSAlexander Motin } 1086a9148abdSDoug Rabson } 1087a9148abdSDoug Rabson 1088a9148abdSDoug Rabson static bool_t 1089a9148abdSDoug Rabson svc_request_space_available(SVCPOOL *pool) 1090a9148abdSDoug Rabson { 1091a9148abdSDoug Rabson 1092f8fb069dSAlexander Motin if (pool->sp_space_throttled) 1093f8fb069dSAlexander Motin return (FALSE); 1094f8fb069dSAlexander Motin return (TRUE); 1095a9148abdSDoug Rabson } 1096a9148abdSDoug Rabson 1097a9148abdSDoug Rabson static void 1098b563304cSAlexander Motin svc_run_internal(SVCGROUP *grp, bool_t ismaster) 1099a9148abdSDoug Rabson { 1100b563304cSAlexander Motin SVCPOOL *pool = grp->sg_pool; 1101a9148abdSDoug Rabson SVCTHREAD *st, *stpref; 1102a9148abdSDoug Rabson SVCXPRT *xprt; 1103a9148abdSDoug Rabson enum xprt_stat stat; 1104a9148abdSDoug Rabson struct svc_req *rqstp; 1105f87c8878SKonstantin Belousov struct proc *p; 11063c42b5bfSGarrett Wollman long sz; 1107a9148abdSDoug Rabson int error; 1108a9148abdSDoug Rabson 1109a9148abdSDoug Rabson st = mem_alloc(sizeof(*st)); 1110b776fb2dSAlexander Motin mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF); 1111f8fb069dSAlexander Motin st->st_pool = pool; 1112a9148abdSDoug Rabson st->st_xprt = NULL; 1113a9148abdSDoug Rabson STAILQ_INIT(&st->st_reqs); 1114a9148abdSDoug Rabson cv_init(&st->st_cond, "rpcsvc"); 1115a9148abdSDoug Rabson 1116b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 1117a9148abdSDoug Rabson 1118a9148abdSDoug Rabson /* 1119a9148abdSDoug Rabson * If we are a new thread which was spawned to cope with 1120a9148abdSDoug Rabson * increased load, set the state back to SVCPOOL_ACTIVE. 1121a9148abdSDoug Rabson */ 1122b563304cSAlexander Motin if (grp->sg_state == SVCPOOL_THREADSTARTING) 1123b563304cSAlexander Motin grp->sg_state = SVCPOOL_ACTIVE; 1124a9148abdSDoug Rabson 1125b563304cSAlexander Motin while (grp->sg_state != SVCPOOL_CLOSING) { 1126a9148abdSDoug Rabson /* 1127db7cdfeeSAlexander Motin * Create new thread if requested. 1128db7cdfeeSAlexander Motin */ 1129b563304cSAlexander Motin if (grp->sg_state == SVCPOOL_THREADWANTED) { 1130b563304cSAlexander Motin grp->sg_state = SVCPOOL_THREADSTARTING; 1131b563304cSAlexander Motin grp->sg_lastcreatetime = time_uptime; 1132b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 1133b563304cSAlexander Motin svc_new_thread(grp); 1134b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 1135db7cdfeeSAlexander Motin continue; 1136db7cdfeeSAlexander Motin } 1137db7cdfeeSAlexander Motin 1138db7cdfeeSAlexander Motin /* 1139a9148abdSDoug Rabson * Check for idle transports once per second. 1140a9148abdSDoug Rabson */ 1141b563304cSAlexander Motin if (time_uptime > grp->sg_lastidlecheck) { 1142b563304cSAlexander Motin grp->sg_lastidlecheck = time_uptime; 1143b563304cSAlexander Motin svc_checkidle(grp); 1144a9148abdSDoug Rabson } 1145a9148abdSDoug Rabson 1146a9148abdSDoug Rabson xprt = st->st_xprt; 1147b776fb2dSAlexander Motin if (!xprt) { 1148a9148abdSDoug Rabson /* 1149a9148abdSDoug Rabson * Enforce maxthreads count. 1150a9148abdSDoug Rabson */ 1151b563304cSAlexander Motin if (grp->sg_threadcount > grp->sg_maxthreads) 1152a9148abdSDoug Rabson break; 1153a9148abdSDoug Rabson 1154a9148abdSDoug Rabson /* 1155a9148abdSDoug Rabson * Before sleeping, see if we can find an 1156a9148abdSDoug Rabson * active transport which isn't being serviced 1157a9148abdSDoug Rabson * by a thread. 1158a9148abdSDoug Rabson */ 1159ba981145SAlexander Motin if (svc_request_space_available(pool) && 1160b563304cSAlexander Motin (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) { 1161b563304cSAlexander Motin TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink); 1162a9148abdSDoug Rabson SVC_ACQUIRE(xprt); 1163a9148abdSDoug Rabson xprt->xp_thread = st; 1164a9148abdSDoug Rabson st->st_xprt = xprt; 1165a9148abdSDoug Rabson continue; 1166ba981145SAlexander Motin } 1167a9148abdSDoug Rabson 1168b563304cSAlexander Motin LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink); 1169db7cdfeeSAlexander Motin if (ismaster || (!ismaster && 1170b563304cSAlexander Motin grp->sg_threadcount > grp->sg_minthreads)) 1171db7cdfeeSAlexander Motin error = cv_timedwait_sig(&st->st_cond, 1172b563304cSAlexander Motin &grp->sg_lock, 5 * hz); 1173db7cdfeeSAlexander Motin else 1174db7cdfeeSAlexander Motin error = cv_wait_sig(&st->st_cond, 1175b563304cSAlexander Motin &grp->sg_lock); 1176b5d7fb73SAlexander Motin if (st->st_xprt == NULL) 1177a9148abdSDoug Rabson LIST_REMOVE(st, st_ilink); 1178a9148abdSDoug Rabson 1179a9148abdSDoug Rabson /* 1180a9148abdSDoug Rabson * Reduce worker thread count when idle. 1181a9148abdSDoug Rabson */ 1182a9148abdSDoug Rabson if (error == EWOULDBLOCK) { 1183a9148abdSDoug Rabson if (!ismaster 1184b563304cSAlexander Motin && (grp->sg_threadcount 1185b563304cSAlexander Motin > grp->sg_minthreads) 1186b776fb2dSAlexander Motin && !st->st_xprt) 1187dfdcada3SDoug Rabson break; 1188f87c8878SKonstantin Belousov } else if (error != 0) { 1189f87c8878SKonstantin Belousov KASSERT(error == EINTR || error == ERESTART, 1190f87c8878SKonstantin Belousov ("non-signal error %d", error)); 1191b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 1192f87c8878SKonstantin Belousov p = curproc; 1193f87c8878SKonstantin Belousov PROC_LOCK(p); 11946ddcc233SKonstantin Belousov if (P_SHOULDSTOP(p) || 11956ddcc233SKonstantin Belousov (p->p_flag & P_TOTAL_STOP) != 0) { 1196f87c8878SKonstantin Belousov thread_suspend_check(0); 1197f87c8878SKonstantin Belousov PROC_UNLOCK(p); 1198f87c8878SKonstantin Belousov mtx_lock(&grp->sg_lock); 1199f87c8878SKonstantin Belousov } else { 1200f87c8878SKonstantin Belousov PROC_UNLOCK(p); 1201a9148abdSDoug Rabson svc_exit(pool); 1202b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 1203a9148abdSDoug Rabson break; 1204a9148abdSDoug Rabson } 1205f87c8878SKonstantin Belousov } 1206a9148abdSDoug Rabson continue; 1207a9148abdSDoug Rabson } 1208b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 1209a9148abdSDoug Rabson 1210a9148abdSDoug Rabson /* 1211b776fb2dSAlexander Motin * Drain the transport socket and queue up any RPCs. 1212a9148abdSDoug Rabson */ 1213a9148abdSDoug Rabson xprt->xp_lastactive = time_uptime; 1214a9148abdSDoug Rabson do { 1215a9148abdSDoug Rabson if (!svc_request_space_available(pool)) 1216a9148abdSDoug Rabson break; 1217a9148abdSDoug Rabson rqstp = NULL; 1218a9148abdSDoug Rabson stat = svc_getreq(xprt, &rqstp); 1219a9148abdSDoug Rabson if (rqstp) { 1220f8fb069dSAlexander Motin svc_change_space_used(pool, rqstp->rq_size); 1221a9148abdSDoug Rabson /* 1222b776fb2dSAlexander Motin * See if the application has a preference 1223b776fb2dSAlexander Motin * for some other thread. 1224a9148abdSDoug Rabson */ 1225b776fb2dSAlexander Motin if (pool->sp_assign) { 1226b776fb2dSAlexander Motin stpref = pool->sp_assign(st, rqstp); 122782dcc80dSAlexander Motin rqstp->rq_thread = stpref; 1228a9148abdSDoug Rabson STAILQ_INSERT_TAIL(&stpref->st_reqs, 1229a9148abdSDoug Rabson rqstp, rq_link); 1230b776fb2dSAlexander Motin mtx_unlock(&stpref->st_lock); 1231b776fb2dSAlexander Motin if (stpref != st) 1232b776fb2dSAlexander Motin rqstp = NULL; 123382dcc80dSAlexander Motin } else { 123482dcc80dSAlexander Motin rqstp->rq_thread = st; 1235b776fb2dSAlexander Motin STAILQ_INSERT_TAIL(&st->st_reqs, 1236b776fb2dSAlexander Motin rqstp, rq_link); 1237b776fb2dSAlexander Motin } 123882dcc80dSAlexander Motin } 1239b776fb2dSAlexander Motin } while (rqstp == NULL && stat == XPRT_MOREREQS 1240b563304cSAlexander Motin && grp->sg_state != SVCPOOL_CLOSING); 1241a9148abdSDoug Rabson 1242a9148abdSDoug Rabson /* 1243b776fb2dSAlexander Motin * Move this transport to the end of the active list to 1244b776fb2dSAlexander Motin * ensure fairness when multiple transports are active. 1245b776fb2dSAlexander Motin * If this was the last queued request, svc_getreq will end 1246b776fb2dSAlexander Motin * up calling xprt_inactive to remove from the active list. 1247a9148abdSDoug Rabson */ 1248b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 1249a9148abdSDoug Rabson xprt->xp_thread = NULL; 1250a9148abdSDoug Rabson st->st_xprt = NULL; 1251a9148abdSDoug Rabson if (xprt->xp_active) { 1252f8fb069dSAlexander Motin if (!svc_request_space_available(pool) || 1253f8fb069dSAlexander Motin !xprt_assignthread(xprt)) 1254b563304cSAlexander Motin TAILQ_INSERT_TAIL(&grp->sg_active, 1255ba981145SAlexander Motin xprt, xp_alink); 1256a9148abdSDoug Rabson } 1257b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 1258a9148abdSDoug Rabson SVC_RELEASE(xprt); 1259a9148abdSDoug Rabson 1260a9148abdSDoug Rabson /* 1261a9148abdSDoug Rabson * Execute what we have queued. 1262a9148abdSDoug Rabson */ 1263b776fb2dSAlexander Motin mtx_lock(&st->st_lock); 1264b776fb2dSAlexander Motin while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) { 1265b776fb2dSAlexander Motin STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link); 1266b776fb2dSAlexander Motin mtx_unlock(&st->st_lock); 12673c42b5bfSGarrett Wollman sz = (long)rqstp->rq_size; 1268a9148abdSDoug Rabson svc_executereq(rqstp); 12693c42b5bfSGarrett Wollman svc_change_space_used(pool, -sz); 1270b776fb2dSAlexander Motin mtx_lock(&st->st_lock); 1271a9148abdSDoug Rabson } 1272b776fb2dSAlexander Motin mtx_unlock(&st->st_lock); 1273b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 1274a9148abdSDoug Rabson } 1275a9148abdSDoug Rabson 1276a9148abdSDoug Rabson if (st->st_xprt) { 1277a9148abdSDoug Rabson xprt = st->st_xprt; 1278a9148abdSDoug Rabson st->st_xprt = NULL; 1279a9148abdSDoug Rabson SVC_RELEASE(xprt); 1280a9148abdSDoug Rabson } 1281a9148abdSDoug Rabson KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit")); 1282b776fb2dSAlexander Motin mtx_destroy(&st->st_lock); 1283a9148abdSDoug Rabson cv_destroy(&st->st_cond); 1284a9148abdSDoug Rabson mem_free(st, sizeof(*st)); 1285a9148abdSDoug Rabson 1286b563304cSAlexander Motin grp->sg_threadcount--; 1287a9148abdSDoug Rabson if (!ismaster) 1288b563304cSAlexander Motin wakeup(grp); 1289b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 1290a9148abdSDoug Rabson } 1291a9148abdSDoug Rabson 1292a9148abdSDoug Rabson static void 1293a9148abdSDoug Rabson svc_thread_start(void *arg) 1294a9148abdSDoug Rabson { 1295a9148abdSDoug Rabson 1296b563304cSAlexander Motin svc_run_internal((SVCGROUP *) arg, FALSE); 1297a9148abdSDoug Rabson kthread_exit(); 1298a9148abdSDoug Rabson } 1299a9148abdSDoug Rabson 1300a9148abdSDoug Rabson static void 1301b563304cSAlexander Motin svc_new_thread(SVCGROUP *grp) 1302a9148abdSDoug Rabson { 1303b563304cSAlexander Motin SVCPOOL *pool = grp->sg_pool; 1304a9148abdSDoug Rabson struct thread *td; 1305a9148abdSDoug Rabson 1306ece9d8b7SAlexander Motin mtx_lock(&grp->sg_lock); 1307b563304cSAlexander Motin grp->sg_threadcount++; 1308ece9d8b7SAlexander Motin mtx_unlock(&grp->sg_lock); 1309b563304cSAlexander Motin kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0, 1310a9148abdSDoug Rabson "%s: service", pool->sp_name); 1311dfdcada3SDoug Rabson } 1312dfdcada3SDoug Rabson 1313dfdcada3SDoug Rabson void 1314dfdcada3SDoug Rabson svc_run(SVCPOOL *pool) 1315dfdcada3SDoug Rabson { 1316b563304cSAlexander Motin int g, i; 1317a9148abdSDoug Rabson struct proc *p; 1318a9148abdSDoug Rabson struct thread *td; 1319b563304cSAlexander Motin SVCGROUP *grp; 1320dfdcada3SDoug Rabson 1321a9148abdSDoug Rabson p = curproc; 1322a9148abdSDoug Rabson td = curthread; 1323a9148abdSDoug Rabson snprintf(td->td_name, sizeof(td->td_name), 1324a9148abdSDoug Rabson "%s: master", pool->sp_name); 1325a9148abdSDoug Rabson pool->sp_state = SVCPOOL_ACTIVE; 1326a9148abdSDoug Rabson pool->sp_proc = p; 1327dfdcada3SDoug Rabson 1328b563304cSAlexander Motin /* Choose group count based on number of threads and CPUs. */ 1329b563304cSAlexander Motin pool->sp_groupcount = max(1, min(SVC_MAXGROUPS, 1330b563304cSAlexander Motin min(pool->sp_maxthreads / 2, mp_ncpus) / 6)); 1331b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) { 1332b563304cSAlexander Motin grp = &pool->sp_groups[g]; 1333b563304cSAlexander Motin grp->sg_minthreads = max(1, 1334b563304cSAlexander Motin pool->sp_minthreads / pool->sp_groupcount); 1335b563304cSAlexander Motin grp->sg_maxthreads = max(1, 1336b563304cSAlexander Motin pool->sp_maxthreads / pool->sp_groupcount); 1337b563304cSAlexander Motin grp->sg_lastcreatetime = time_uptime; 1338dfdcada3SDoug Rabson } 1339dfdcada3SDoug Rabson 1340b563304cSAlexander Motin /* Starting threads */ 1341ece9d8b7SAlexander Motin pool->sp_groups[0].sg_threadcount++; 1342b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) { 1343b563304cSAlexander Motin grp = &pool->sp_groups[g]; 1344b563304cSAlexander Motin for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++) 1345b563304cSAlexander Motin svc_new_thread(grp); 1346b563304cSAlexander Motin } 1347b563304cSAlexander Motin svc_run_internal(&pool->sp_groups[0], TRUE); 1348dfdcada3SDoug Rabson 1349b563304cSAlexander Motin /* Waiting for threads to stop. */ 1350b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) { 1351b563304cSAlexander Motin grp = &pool->sp_groups[g]; 1352b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 1353b563304cSAlexander Motin while (grp->sg_threadcount > 0) 1354b563304cSAlexander Motin msleep(grp, &grp->sg_lock, 0, "svcexit", 0); 1355b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 1356b563304cSAlexander Motin } 1357dfdcada3SDoug Rabson } 1358dfdcada3SDoug Rabson 1359dfdcada3SDoug Rabson void 1360dfdcada3SDoug Rabson svc_exit(SVCPOOL *pool) 1361dfdcada3SDoug Rabson { 1362b563304cSAlexander Motin SVCGROUP *grp; 1363a9148abdSDoug Rabson SVCTHREAD *st; 1364b563304cSAlexander Motin int g; 1365a9148abdSDoug Rabson 1366a9148abdSDoug Rabson pool->sp_state = SVCPOOL_CLOSING; 1367b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) { 1368b563304cSAlexander Motin grp = &pool->sp_groups[g]; 1369b563304cSAlexander Motin mtx_lock(&grp->sg_lock); 1370b563304cSAlexander Motin if (grp->sg_state != SVCPOOL_CLOSING) { 1371b563304cSAlexander Motin grp->sg_state = SVCPOOL_CLOSING; 1372b563304cSAlexander Motin LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink) 1373a9148abdSDoug Rabson cv_signal(&st->st_cond); 1374db7cdfeeSAlexander Motin } 1375b563304cSAlexander Motin mtx_unlock(&grp->sg_lock); 1376b563304cSAlexander Motin } 1377dfdcada3SDoug Rabson } 1378a9148abdSDoug Rabson 1379a9148abdSDoug Rabson bool_t 1380a9148abdSDoug Rabson svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) 1381a9148abdSDoug Rabson { 1382a9148abdSDoug Rabson struct mbuf *m; 1383a9148abdSDoug Rabson XDR xdrs; 1384a9148abdSDoug Rabson bool_t stat; 1385a9148abdSDoug Rabson 1386a9148abdSDoug Rabson m = rqstp->rq_args; 1387a9148abdSDoug Rabson rqstp->rq_args = NULL; 1388a9148abdSDoug Rabson 1389a9148abdSDoug Rabson xdrmbuf_create(&xdrs, m, XDR_DECODE); 1390a9148abdSDoug Rabson stat = xargs(&xdrs, args); 1391a9148abdSDoug Rabson XDR_DESTROY(&xdrs); 1392a9148abdSDoug Rabson 1393a9148abdSDoug Rabson return (stat); 1394a9148abdSDoug Rabson } 1395a9148abdSDoug Rabson 1396a9148abdSDoug Rabson bool_t 1397a9148abdSDoug Rabson svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args) 1398a9148abdSDoug Rabson { 1399a9148abdSDoug Rabson XDR xdrs; 1400a9148abdSDoug Rabson 1401a9148abdSDoug Rabson if (rqstp->rq_addr) { 1402a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME); 1403a9148abdSDoug Rabson rqstp->rq_addr = NULL; 1404a9148abdSDoug Rabson } 1405a9148abdSDoug Rabson 1406a9148abdSDoug Rabson xdrs.x_op = XDR_FREE; 1407a9148abdSDoug Rabson return (xargs(&xdrs, args)); 1408a9148abdSDoug Rabson } 1409a9148abdSDoug Rabson 1410a9148abdSDoug Rabson void 1411a9148abdSDoug Rabson svc_freereq(struct svc_req *rqstp) 1412a9148abdSDoug Rabson { 1413a9148abdSDoug Rabson SVCTHREAD *st; 1414a9148abdSDoug Rabson SVCPOOL *pool; 1415a9148abdSDoug Rabson 1416a9148abdSDoug Rabson st = rqstp->rq_thread; 1417a9148abdSDoug Rabson if (st) { 1418f8fb069dSAlexander Motin pool = st->st_pool; 1419a9148abdSDoug Rabson if (pool->sp_done) 1420a9148abdSDoug Rabson pool->sp_done(st, rqstp); 1421a9148abdSDoug Rabson } 1422a9148abdSDoug Rabson 1423a9148abdSDoug Rabson if (rqstp->rq_auth.svc_ah_ops) 1424a9148abdSDoug Rabson SVCAUTH_RELEASE(&rqstp->rq_auth); 1425a9148abdSDoug Rabson 1426a9148abdSDoug Rabson if (rqstp->rq_xprt) { 1427a9148abdSDoug Rabson SVC_RELEASE(rqstp->rq_xprt); 1428a9148abdSDoug Rabson } 1429a9148abdSDoug Rabson 1430a9148abdSDoug Rabson if (rqstp->rq_addr) 1431a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME); 1432a9148abdSDoug Rabson 1433a9148abdSDoug Rabson if (rqstp->rq_args) 1434a9148abdSDoug Rabson m_freem(rqstp->rq_args); 1435a9148abdSDoug Rabson 1436a9148abdSDoug Rabson free(rqstp, M_RPC); 1437a9148abdSDoug Rabson } 1438