xref: /freebsd/sys/rpc/svc.c (revision 1b09d9df3d08f21c3815e388489c58c2f870812d)
1dfdcada3SDoug Rabson /*	$NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $	*/
2dfdcada3SDoug Rabson 
32e322d37SHiroki Sato /*-
451369649SPedro F. Giffuni  * SPDX-License-Identifier: BSD-3-Clause
551369649SPedro F. Giffuni  *
62e322d37SHiroki Sato  * Copyright (c) 2009, Sun Microsystems, Inc.
72e322d37SHiroki Sato  * All rights reserved.
8dfdcada3SDoug Rabson  *
92e322d37SHiroki Sato  * Redistribution and use in source and binary forms, with or without
102e322d37SHiroki Sato  * modification, are permitted provided that the following conditions are met:
112e322d37SHiroki Sato  * - Redistributions of source code must retain the above copyright notice,
122e322d37SHiroki Sato  *   this list of conditions and the following disclaimer.
132e322d37SHiroki Sato  * - Redistributions in binary form must reproduce the above copyright notice,
142e322d37SHiroki Sato  *   this list of conditions and the following disclaimer in the documentation
152e322d37SHiroki Sato  *   and/or other materials provided with the distribution.
162e322d37SHiroki Sato  * - Neither the name of Sun Microsystems, Inc. nor the names of its
172e322d37SHiroki Sato  *   contributors may be used to endorse or promote products derived
182e322d37SHiroki Sato  *   from this software without specific prior written permission.
19dfdcada3SDoug Rabson  *
202e322d37SHiroki Sato  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
212e322d37SHiroki Sato  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
222e322d37SHiroki Sato  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
232e322d37SHiroki Sato  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
242e322d37SHiroki Sato  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
252e322d37SHiroki Sato  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
262e322d37SHiroki Sato  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
272e322d37SHiroki Sato  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
282e322d37SHiroki Sato  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
292e322d37SHiroki Sato  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
302e322d37SHiroki Sato  * POSSIBILITY OF SUCH DAMAGE.
31dfdcada3SDoug Rabson  */
32dfdcada3SDoug Rabson 
33dfdcada3SDoug Rabson #if defined(LIBC_SCCS) && !defined(lint)
34dfdcada3SDoug Rabson static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
35dfdcada3SDoug Rabson static char *sccsid = "@(#)svc.c	2.4 88/08/11 4.0 RPCSRC";
36dfdcada3SDoug Rabson #endif
37dfdcada3SDoug Rabson #include <sys/cdefs.h>
38dfdcada3SDoug Rabson __FBSDID("$FreeBSD$");
39dfdcada3SDoug Rabson 
40dfdcada3SDoug Rabson /*
41dfdcada3SDoug Rabson  * svc.c, Server-side remote procedure call interface.
42dfdcada3SDoug Rabson  *
43dfdcada3SDoug Rabson  * There are two sets of procedures here.  The xprt routines are
44dfdcada3SDoug Rabson  * for handling transport handles.  The svc routines handle the
45dfdcada3SDoug Rabson  * list of service routines.
46dfdcada3SDoug Rabson  *
47dfdcada3SDoug Rabson  * Copyright (C) 1984, Sun Microsystems, Inc.
48dfdcada3SDoug Rabson  */
49dfdcada3SDoug Rabson 
50dfdcada3SDoug Rabson #include <sys/param.h>
51dfdcada3SDoug Rabson #include <sys/lock.h>
52dfdcada3SDoug Rabson #include <sys/kernel.h>
53a9148abdSDoug Rabson #include <sys/kthread.h>
54dfdcada3SDoug Rabson #include <sys/malloc.h>
55a9148abdSDoug Rabson #include <sys/mbuf.h>
56dfdcada3SDoug Rabson #include <sys/mutex.h>
57a9148abdSDoug Rabson #include <sys/proc.h>
58dfdcada3SDoug Rabson #include <sys/queue.h>
59a9148abdSDoug Rabson #include <sys/socketvar.h>
60dfdcada3SDoug Rabson #include <sys/systm.h>
61b563304cSAlexander Motin #include <sys/smp.h>
62d473bac7SAlexander Motin #include <sys/sx.h>
63dfdcada3SDoug Rabson #include <sys/ucred.h>
64dfdcada3SDoug Rabson 
65dfdcada3SDoug Rabson #include <rpc/rpc.h>
66dfdcada3SDoug Rabson #include <rpc/rpcb_clnt.h>
67a9148abdSDoug Rabson #include <rpc/replay.h>
68dfdcada3SDoug Rabson 
69ee31b83aSDoug Rabson #include <rpc/rpc_com.h>
70dfdcada3SDoug Rabson 
71dfdcada3SDoug Rabson #define SVC_VERSQUIET 0x0001		/* keep quiet about vers mismatch */
72a9148abdSDoug Rabson #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
73dfdcada3SDoug Rabson 
74dfdcada3SDoug Rabson static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
75dfdcada3SDoug Rabson     char *);
76b563304cSAlexander Motin static void svc_new_thread(SVCGROUP *grp);
77a9148abdSDoug Rabson static void xprt_unregister_locked(SVCXPRT *xprt);
783c42b5bfSGarrett Wollman static void svc_change_space_used(SVCPOOL *pool, long delta);
79f8fb069dSAlexander Motin static bool_t svc_request_space_available(SVCPOOL *pool);
8090f90687SAndriy Gapon static void svcpool_cleanup(SVCPOOL *pool);
81dfdcada3SDoug Rabson 
82dfdcada3SDoug Rabson /* ***************  SVCXPRT related stuff **************** */
83dfdcada3SDoug Rabson 
84a9148abdSDoug Rabson static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
85a9148abdSDoug Rabson static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
86b563304cSAlexander Motin static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS);
87a9148abdSDoug Rabson 
88dfdcada3SDoug Rabson SVCPOOL*
89a9148abdSDoug Rabson svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
90dfdcada3SDoug Rabson {
91dfdcada3SDoug Rabson 	SVCPOOL *pool;
92b563304cSAlexander Motin 	SVCGROUP *grp;
93b563304cSAlexander Motin 	int g;
94dfdcada3SDoug Rabson 
95dfdcada3SDoug Rabson 	pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
96dfdcada3SDoug Rabson 
97dfdcada3SDoug Rabson 	mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
98a9148abdSDoug Rabson 	pool->sp_name = name;
99a9148abdSDoug Rabson 	pool->sp_state = SVCPOOL_INIT;
100a9148abdSDoug Rabson 	pool->sp_proc = NULL;
101dfdcada3SDoug Rabson 	TAILQ_INIT(&pool->sp_callouts);
102d473bac7SAlexander Motin 	TAILQ_INIT(&pool->sp_lcallouts);
103a9148abdSDoug Rabson 	pool->sp_minthreads = 1;
104a9148abdSDoug Rabson 	pool->sp_maxthreads = 1;
105b563304cSAlexander Motin 	pool->sp_groupcount = 1;
106b563304cSAlexander Motin 	for (g = 0; g < SVC_MAXGROUPS; g++) {
107b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
108b563304cSAlexander Motin 		mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF);
109b563304cSAlexander Motin 		grp->sg_pool = pool;
110b563304cSAlexander Motin 		grp->sg_state = SVCPOOL_ACTIVE;
111b563304cSAlexander Motin 		TAILQ_INIT(&grp->sg_xlist);
112b563304cSAlexander Motin 		TAILQ_INIT(&grp->sg_active);
113b563304cSAlexander Motin 		LIST_INIT(&grp->sg_idlethreads);
114b563304cSAlexander Motin 		grp->sg_minthreads = 1;
115b563304cSAlexander Motin 		grp->sg_maxthreads = 1;
116b563304cSAlexander Motin 	}
117a9148abdSDoug Rabson 
118a9148abdSDoug Rabson 	/*
1193c42b5bfSGarrett Wollman 	 * Don't use more than a quarter of mbuf clusters.  Nota bene:
1203c42b5bfSGarrett Wollman 	 * nmbclusters is an int, but nmbclusters*MCLBYTES may overflow
1213c42b5bfSGarrett Wollman 	 * on LP64 architectures, so cast to u_long to avoid undefined
1223c42b5bfSGarrett Wollman 	 * behavior.  (ILP32 architectures cannot have nmbclusters
1233c42b5bfSGarrett Wollman 	 * large enough to overflow for other reasons.)
124a9148abdSDoug Rabson 	 */
1253c42b5bfSGarrett Wollman 	pool->sp_space_high = (u_long)nmbclusters * MCLBYTES / 4;
1263c42b5bfSGarrett Wollman 	pool->sp_space_low = (pool->sp_space_high / 3) * 2;
127a9148abdSDoug Rabson 
128a9148abdSDoug Rabson 	sysctl_ctx_init(&pool->sp_sysctl);
129a9148abdSDoug Rabson 	if (sysctl_base) {
130a9148abdSDoug Rabson 		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
131a9148abdSDoug Rabson 		    "minthreads", CTLTYPE_INT | CTLFLAG_RW,
132b563304cSAlexander Motin 		    pool, 0, svcpool_minthread_sysctl, "I",
133b563304cSAlexander Motin 		    "Minimal number of threads");
134a9148abdSDoug Rabson 		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
135a9148abdSDoug Rabson 		    "maxthreads", CTLTYPE_INT | CTLFLAG_RW,
136b563304cSAlexander Motin 		    pool, 0, svcpool_maxthread_sysctl, "I",
137b563304cSAlexander Motin 		    "Maximal number of threads");
138b563304cSAlexander Motin 		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
139b563304cSAlexander Motin 		    "threads", CTLTYPE_INT | CTLFLAG_RD,
140b563304cSAlexander Motin 		    pool, 0, svcpool_threads_sysctl, "I",
141b563304cSAlexander Motin 		    "Current number of threads");
142a9148abdSDoug Rabson 		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
143b563304cSAlexander Motin 		    "groups", CTLFLAG_RD, &pool->sp_groupcount, 0,
144b563304cSAlexander Motin 		    "Number of thread groups");
145a9148abdSDoug Rabson 
1463c42b5bfSGarrett Wollman 		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
147a9148abdSDoug Rabson 		    "request_space_used", CTLFLAG_RD,
1483c42b5bfSGarrett Wollman 		    &pool->sp_space_used,
149a9148abdSDoug Rabson 		    "Space in parsed but not handled requests.");
150a9148abdSDoug Rabson 
1513c42b5bfSGarrett Wollman 		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
152a9148abdSDoug Rabson 		    "request_space_used_highest", CTLFLAG_RD,
1533c42b5bfSGarrett Wollman 		    &pool->sp_space_used_highest,
154a9148abdSDoug Rabson 		    "Highest space used since reboot.");
155a9148abdSDoug Rabson 
1563c42b5bfSGarrett Wollman 		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
157a9148abdSDoug Rabson 		    "request_space_high", CTLFLAG_RW,
1583c42b5bfSGarrett Wollman 		    &pool->sp_space_high,
159a9148abdSDoug Rabson 		    "Maximum space in parsed but not handled requests.");
160a9148abdSDoug Rabson 
1613c42b5bfSGarrett Wollman 		SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
162a9148abdSDoug Rabson 		    "request_space_low", CTLFLAG_RW,
1633c42b5bfSGarrett Wollman 		    &pool->sp_space_low,
164a9148abdSDoug Rabson 		    "Low water mark for request space.");
165a9148abdSDoug Rabson 
166fbbb13f9SMatthew D Fleming 		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
167a9148abdSDoug Rabson 		    "request_space_throttled", CTLFLAG_RD,
168a9148abdSDoug Rabson 		    &pool->sp_space_throttled, 0,
169a9148abdSDoug Rabson 		    "Whether nfs requests are currently throttled");
170a9148abdSDoug Rabson 
171fbbb13f9SMatthew D Fleming 		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
172a9148abdSDoug Rabson 		    "request_space_throttle_count", CTLFLAG_RD,
173a9148abdSDoug Rabson 		    &pool->sp_space_throttle_count, 0,
174a9148abdSDoug Rabson 		    "Count of times throttling based on request space has occurred");
175a9148abdSDoug Rabson 	}
176dfdcada3SDoug Rabson 
177dfdcada3SDoug Rabson 	return pool;
178dfdcada3SDoug Rabson }
179dfdcada3SDoug Rabson 
18090f90687SAndriy Gapon /*
18190f90687SAndriy Gapon  * Code common to svcpool_destroy() and svcpool_close(), which cleans up
18290f90687SAndriy Gapon  * the pool data structures.
18390f90687SAndriy Gapon  */
18490f90687SAndriy Gapon static void
18590f90687SAndriy Gapon svcpool_cleanup(SVCPOOL *pool)
186dfdcada3SDoug Rabson {
187b563304cSAlexander Motin 	SVCGROUP *grp;
188a9148abdSDoug Rabson 	SVCXPRT *xprt, *nxprt;
189dfdcada3SDoug Rabson 	struct svc_callout *s;
190d473bac7SAlexander Motin 	struct svc_loss_callout *sl;
191a9148abdSDoug Rabson 	struct svcxprt_list cleanup;
192b563304cSAlexander Motin 	int g;
193dfdcada3SDoug Rabson 
194a9148abdSDoug Rabson 	TAILQ_INIT(&cleanup);
195dfdcada3SDoug Rabson 
196b563304cSAlexander Motin 	for (g = 0; g < SVC_MAXGROUPS; g++) {
197b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
198b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
199b563304cSAlexander Motin 		while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) {
200a9148abdSDoug Rabson 			xprt_unregister_locked(xprt);
201a9148abdSDoug Rabson 			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
202dfdcada3SDoug Rabson 		}
203b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
204b563304cSAlexander Motin 	}
205b563304cSAlexander Motin 	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
206b563304cSAlexander Motin 		SVC_RELEASE(xprt);
207b563304cSAlexander Motin 	}
208dfdcada3SDoug Rabson 
209b563304cSAlexander Motin 	mtx_lock(&pool->sp_lock);
210d473bac7SAlexander Motin 	while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
211dfdcada3SDoug Rabson 		mtx_unlock(&pool->sp_lock);
212dfdcada3SDoug Rabson 		svc_unreg(pool, s->sc_prog, s->sc_vers);
213dfdcada3SDoug Rabson 		mtx_lock(&pool->sp_lock);
214dfdcada3SDoug Rabson 	}
215d473bac7SAlexander Motin 	while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
216d473bac7SAlexander Motin 		mtx_unlock(&pool->sp_lock);
217d473bac7SAlexander Motin 		svc_loss_unreg(pool, sl->slc_dispatch);
218d473bac7SAlexander Motin 		mtx_lock(&pool->sp_lock);
219d473bac7SAlexander Motin 	}
22075f2ae1aSRick Macklem 	mtx_unlock(&pool->sp_lock);
22190f90687SAndriy Gapon }
22290f90687SAndriy Gapon 
22390f90687SAndriy Gapon void
22490f90687SAndriy Gapon svcpool_destroy(SVCPOOL *pool)
22590f90687SAndriy Gapon {
22690f90687SAndriy Gapon 	SVCGROUP *grp;
22790f90687SAndriy Gapon 	int g;
22890f90687SAndriy Gapon 
22990f90687SAndriy Gapon 	svcpool_cleanup(pool);
230dfdcada3SDoug Rabson 
231b563304cSAlexander Motin 	for (g = 0; g < SVC_MAXGROUPS; g++) {
232b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
233b563304cSAlexander Motin 		mtx_destroy(&grp->sg_lock);
234a9148abdSDoug Rabson 	}
235a4fa5e6dSRick Macklem 	mtx_destroy(&pool->sp_lock);
236a4fa5e6dSRick Macklem 
237a9148abdSDoug Rabson 	if (pool->sp_rcache)
238a9148abdSDoug Rabson 		replay_freecache(pool->sp_rcache);
239a9148abdSDoug Rabson 
240a9148abdSDoug Rabson 	sysctl_ctx_free(&pool->sp_sysctl);
241dfdcada3SDoug Rabson 	free(pool, M_RPC);
242dfdcada3SDoug Rabson }
243dfdcada3SDoug Rabson 
244b563304cSAlexander Motin /*
24590f90687SAndriy Gapon  * Similar to svcpool_destroy(), except that it does not destroy the actual
24690f90687SAndriy Gapon  * data structures.  As such, "pool" may be used again.
24790f90687SAndriy Gapon  */
24890f90687SAndriy Gapon void
24990f90687SAndriy Gapon svcpool_close(SVCPOOL *pool)
25090f90687SAndriy Gapon {
25190f90687SAndriy Gapon 	SVCGROUP *grp;
25290f90687SAndriy Gapon 	int g;
25390f90687SAndriy Gapon 
25490f90687SAndriy Gapon 	svcpool_cleanup(pool);
25590f90687SAndriy Gapon 
25690f90687SAndriy Gapon 	/* Now, initialize the pool's state for a fresh svc_run() call. */
25790f90687SAndriy Gapon 	mtx_lock(&pool->sp_lock);
25890f90687SAndriy Gapon 	pool->sp_state = SVCPOOL_INIT;
25990f90687SAndriy Gapon 	mtx_unlock(&pool->sp_lock);
26090f90687SAndriy Gapon 	for (g = 0; g < SVC_MAXGROUPS; g++) {
26190f90687SAndriy Gapon 		grp = &pool->sp_groups[g];
26290f90687SAndriy Gapon 		mtx_lock(&grp->sg_lock);
26390f90687SAndriy Gapon 		grp->sg_state = SVCPOOL_ACTIVE;
26490f90687SAndriy Gapon 		mtx_unlock(&grp->sg_lock);
26590f90687SAndriy Gapon 	}
26690f90687SAndriy Gapon }
26790f90687SAndriy Gapon 
26890f90687SAndriy Gapon /*
269b563304cSAlexander Motin  * Sysctl handler to get the present thread count on a pool
270b563304cSAlexander Motin  */
271b563304cSAlexander Motin static int
272b563304cSAlexander Motin svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS)
273a9148abdSDoug Rabson {
274b563304cSAlexander Motin 	SVCPOOL *pool;
275b563304cSAlexander Motin 	int threads, error, g;
276a9148abdSDoug Rabson 
277b563304cSAlexander Motin 	pool = oidp->oid_arg1;
278b563304cSAlexander Motin 	threads = 0;
279b563304cSAlexander Motin 	mtx_lock(&pool->sp_lock);
280b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++)
281b563304cSAlexander Motin 		threads += pool->sp_groups[g].sg_threadcount;
282b563304cSAlexander Motin 	mtx_unlock(&pool->sp_lock);
283b563304cSAlexander Motin 	error = sysctl_handle_int(oidp, &threads, 0, req);
284b563304cSAlexander Motin 	return (error);
285a9148abdSDoug Rabson }
286a9148abdSDoug Rabson 
287a9148abdSDoug Rabson /*
288a9148abdSDoug Rabson  * Sysctl handler to set the minimum thread count on a pool
289a9148abdSDoug Rabson  */
290a9148abdSDoug Rabson static int
291a9148abdSDoug Rabson svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
292a9148abdSDoug Rabson {
293a9148abdSDoug Rabson 	SVCPOOL *pool;
294b563304cSAlexander Motin 	int newminthreads, error, g;
295a9148abdSDoug Rabson 
296a9148abdSDoug Rabson 	pool = oidp->oid_arg1;
297a9148abdSDoug Rabson 	newminthreads = pool->sp_minthreads;
298a9148abdSDoug Rabson 	error = sysctl_handle_int(oidp, &newminthreads, 0, req);
299a9148abdSDoug Rabson 	if (error == 0 && newminthreads != pool->sp_minthreads) {
300a9148abdSDoug Rabson 		if (newminthreads > pool->sp_maxthreads)
301a9148abdSDoug Rabson 			return (EINVAL);
302a9148abdSDoug Rabson 		mtx_lock(&pool->sp_lock);
303a9148abdSDoug Rabson 		pool->sp_minthreads = newminthreads;
304b563304cSAlexander Motin 		for (g = 0; g < pool->sp_groupcount; g++) {
305b563304cSAlexander Motin 			pool->sp_groups[g].sg_minthreads = max(1,
306b563304cSAlexander Motin 			    pool->sp_minthreads / pool->sp_groupcount);
307b563304cSAlexander Motin 		}
308a9148abdSDoug Rabson 		mtx_unlock(&pool->sp_lock);
309a9148abdSDoug Rabson 	}
310a9148abdSDoug Rabson 	return (error);
311a9148abdSDoug Rabson }
312a9148abdSDoug Rabson 
313a9148abdSDoug Rabson /*
314a9148abdSDoug Rabson  * Sysctl handler to set the maximum thread count on a pool
315a9148abdSDoug Rabson  */
316a9148abdSDoug Rabson static int
317a9148abdSDoug Rabson svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
318a9148abdSDoug Rabson {
319a9148abdSDoug Rabson 	SVCPOOL *pool;
320b563304cSAlexander Motin 	int newmaxthreads, error, g;
321a9148abdSDoug Rabson 
322a9148abdSDoug Rabson 	pool = oidp->oid_arg1;
323a9148abdSDoug Rabson 	newmaxthreads = pool->sp_maxthreads;
324a9148abdSDoug Rabson 	error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
325a9148abdSDoug Rabson 	if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
326a9148abdSDoug Rabson 		if (newmaxthreads < pool->sp_minthreads)
327a9148abdSDoug Rabson 			return (EINVAL);
328a9148abdSDoug Rabson 		mtx_lock(&pool->sp_lock);
329a9148abdSDoug Rabson 		pool->sp_maxthreads = newmaxthreads;
330b563304cSAlexander Motin 		for (g = 0; g < pool->sp_groupcount; g++) {
331b563304cSAlexander Motin 			pool->sp_groups[g].sg_maxthreads = max(1,
332b563304cSAlexander Motin 			    pool->sp_maxthreads / pool->sp_groupcount);
333b563304cSAlexander Motin 		}
334a9148abdSDoug Rabson 		mtx_unlock(&pool->sp_lock);
335a9148abdSDoug Rabson 	}
336a9148abdSDoug Rabson 	return (error);
337a9148abdSDoug Rabson }
338a9148abdSDoug Rabson 
339dfdcada3SDoug Rabson /*
340dfdcada3SDoug Rabson  * Activate a transport handle.
341dfdcada3SDoug Rabson  */
342dfdcada3SDoug Rabson void
343dfdcada3SDoug Rabson xprt_register(SVCXPRT *xprt)
344dfdcada3SDoug Rabson {
345dfdcada3SDoug Rabson 	SVCPOOL *pool = xprt->xp_pool;
346b563304cSAlexander Motin 	SVCGROUP *grp;
347b563304cSAlexander Motin 	int g;
348dfdcada3SDoug Rabson 
3496b97c9f0SRick Macklem 	SVC_ACQUIRE(xprt);
350b563304cSAlexander Motin 	g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount;
351b563304cSAlexander Motin 	xprt->xp_group = grp = &pool->sp_groups[g];
352b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
353dfdcada3SDoug Rabson 	xprt->xp_registered = TRUE;
354dfdcada3SDoug Rabson 	xprt->xp_active = FALSE;
355b563304cSAlexander Motin 	TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link);
356b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
357dfdcada3SDoug Rabson }
358dfdcada3SDoug Rabson 
359dfdcada3SDoug Rabson /*
360a9148abdSDoug Rabson  * De-activate a transport handle. Note: the locked version doesn't
361a9148abdSDoug Rabson  * release the transport - caller must do that after dropping the pool
362a9148abdSDoug Rabson  * lock.
363dfdcada3SDoug Rabson  */
364dfdcada3SDoug Rabson static void
365a9148abdSDoug Rabson xprt_unregister_locked(SVCXPRT *xprt)
366dfdcada3SDoug Rabson {
367b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
368dfdcada3SDoug Rabson 
369b563304cSAlexander Motin 	mtx_assert(&grp->sg_lock, MA_OWNED);
370bca2ec16SRick Macklem 	KASSERT(xprt->xp_registered == TRUE,
371bca2ec16SRick Macklem 	    ("xprt_unregister_locked: not registered"));
372ba981145SAlexander Motin 	xprt_inactive_locked(xprt);
373b563304cSAlexander Motin 	TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link);
374dfdcada3SDoug Rabson 	xprt->xp_registered = FALSE;
375a9148abdSDoug Rabson }
376dfdcada3SDoug Rabson 
377a9148abdSDoug Rabson void
378a9148abdSDoug Rabson xprt_unregister(SVCXPRT *xprt)
379a9148abdSDoug Rabson {
380b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
381a9148abdSDoug Rabson 
382b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
383bca2ec16SRick Macklem 	if (xprt->xp_registered == FALSE) {
384bca2ec16SRick Macklem 		/* Already unregistered by another thread */
385b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
386bca2ec16SRick Macklem 		return;
387bca2ec16SRick Macklem 	}
388a9148abdSDoug Rabson 	xprt_unregister_locked(xprt);
389b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
390a9148abdSDoug Rabson 
391a9148abdSDoug Rabson 	SVC_RELEASE(xprt);
392a9148abdSDoug Rabson }
393a9148abdSDoug Rabson 
394ba981145SAlexander Motin /*
395ba981145SAlexander Motin  * Attempt to assign a service thread to this transport.
396ba981145SAlexander Motin  */
397ba981145SAlexander Motin static int
398a9148abdSDoug Rabson xprt_assignthread(SVCXPRT *xprt)
399a9148abdSDoug Rabson {
400b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
401a9148abdSDoug Rabson 	SVCTHREAD *st;
402a9148abdSDoug Rabson 
403b563304cSAlexander Motin 	mtx_assert(&grp->sg_lock, MA_OWNED);
404b563304cSAlexander Motin 	st = LIST_FIRST(&grp->sg_idlethreads);
405a9148abdSDoug Rabson 	if (st) {
406ba981145SAlexander Motin 		LIST_REMOVE(st, st_ilink);
407a9148abdSDoug Rabson 		SVC_ACQUIRE(xprt);
408a9148abdSDoug Rabson 		xprt->xp_thread = st;
409a9148abdSDoug Rabson 		st->st_xprt = xprt;
410a9148abdSDoug Rabson 		cv_signal(&st->st_cond);
411ba981145SAlexander Motin 		return (TRUE);
412a9148abdSDoug Rabson 	} else {
413a9148abdSDoug Rabson 		/*
414a9148abdSDoug Rabson 		 * See if we can create a new thread. The
415a9148abdSDoug Rabson 		 * actual thread creation happens in
416a9148abdSDoug Rabson 		 * svc_run_internal because our locking state
417a9148abdSDoug Rabson 		 * is poorly defined (we are typically called
418a9148abdSDoug Rabson 		 * from a socket upcall). Don't create more
419a9148abdSDoug Rabson 		 * than one thread per second.
420a9148abdSDoug Rabson 		 */
421b563304cSAlexander Motin 		if (grp->sg_state == SVCPOOL_ACTIVE
422b563304cSAlexander Motin 		    && grp->sg_lastcreatetime < time_uptime
423b563304cSAlexander Motin 		    && grp->sg_threadcount < grp->sg_maxthreads) {
424b563304cSAlexander Motin 			grp->sg_state = SVCPOOL_THREADWANTED;
425a9148abdSDoug Rabson 		}
426a9148abdSDoug Rabson 	}
427ba981145SAlexander Motin 	return (FALSE);
428dfdcada3SDoug Rabson }
429dfdcada3SDoug Rabson 
430dfdcada3SDoug Rabson void
431dfdcada3SDoug Rabson xprt_active(SVCXPRT *xprt)
432dfdcada3SDoug Rabson {
433b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
434dfdcada3SDoug Rabson 
435b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
436a4fa5e6dSRick Macklem 
437a9148abdSDoug Rabson 	if (!xprt->xp_registered) {
438a9148abdSDoug Rabson 		/*
439a9148abdSDoug Rabson 		 * Race with xprt_unregister - we lose.
440a9148abdSDoug Rabson 		 */
441b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
442a9148abdSDoug Rabson 		return;
443a9148abdSDoug Rabson 	}
444a9148abdSDoug Rabson 
445dfdcada3SDoug Rabson 	if (!xprt->xp_active) {
446dfdcada3SDoug Rabson 		xprt->xp_active = TRUE;
447ba981145SAlexander Motin 		if (xprt->xp_thread == NULL) {
448b563304cSAlexander Motin 			if (!svc_request_space_available(xprt->xp_pool) ||
449f8fb069dSAlexander Motin 			    !xprt_assignthread(xprt))
450b563304cSAlexander Motin 				TAILQ_INSERT_TAIL(&grp->sg_active, xprt,
451ba981145SAlexander Motin 				    xp_alink);
452ba981145SAlexander Motin 		}
453dfdcada3SDoug Rabson 	}
454dfdcada3SDoug Rabson 
455b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
456dfdcada3SDoug Rabson }
457dfdcada3SDoug Rabson 
458dfdcada3SDoug Rabson void
459a9148abdSDoug Rabson xprt_inactive_locked(SVCXPRT *xprt)
460a9148abdSDoug Rabson {
461b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
462a9148abdSDoug Rabson 
463b563304cSAlexander Motin 	mtx_assert(&grp->sg_lock, MA_OWNED);
464a9148abdSDoug Rabson 	if (xprt->xp_active) {
465ba981145SAlexander Motin 		if (xprt->xp_thread == NULL)
466b563304cSAlexander Motin 			TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
467a9148abdSDoug Rabson 		xprt->xp_active = FALSE;
468a9148abdSDoug Rabson 	}
469a9148abdSDoug Rabson }
470a9148abdSDoug Rabson 
471a9148abdSDoug Rabson void
472dfdcada3SDoug Rabson xprt_inactive(SVCXPRT *xprt)
473dfdcada3SDoug Rabson {
474b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
475dfdcada3SDoug Rabson 
476b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
477a9148abdSDoug Rabson 	xprt_inactive_locked(xprt);
478b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
479dfdcada3SDoug Rabson }
480dfdcada3SDoug Rabson 
481dfdcada3SDoug Rabson /*
4825c42b9dcSAlexander Motin  * Variant of xprt_inactive() for use only when sure that port is
4836244c6e7SPedro F. Giffuni  * assigned to thread. For example, within receive handlers.
4845c42b9dcSAlexander Motin  */
4855c42b9dcSAlexander Motin void
4865c42b9dcSAlexander Motin xprt_inactive_self(SVCXPRT *xprt)
4875c42b9dcSAlexander Motin {
4885c42b9dcSAlexander Motin 
4895c42b9dcSAlexander Motin 	KASSERT(xprt->xp_thread != NULL,
4905c42b9dcSAlexander Motin 	    ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
4915c42b9dcSAlexander Motin 	xprt->xp_active = FALSE;
4925c42b9dcSAlexander Motin }
4935c42b9dcSAlexander Motin 
4945c42b9dcSAlexander Motin /*
495dfdcada3SDoug Rabson  * Add a service program to the callout list.
496dfdcada3SDoug Rabson  * The dispatch routine will be called when a rpc request for this
497dfdcada3SDoug Rabson  * program number comes in.
498dfdcada3SDoug Rabson  */
499dfdcada3SDoug Rabson bool_t
500dfdcada3SDoug Rabson svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
501dfdcada3SDoug Rabson     void (*dispatch)(struct svc_req *, SVCXPRT *),
502dfdcada3SDoug Rabson     const struct netconfig *nconf)
503dfdcada3SDoug Rabson {
504dfdcada3SDoug Rabson 	SVCPOOL *pool = xprt->xp_pool;
505dfdcada3SDoug Rabson 	struct svc_callout *s;
506dfdcada3SDoug Rabson 	char *netid = NULL;
507dfdcada3SDoug Rabson 	int flag = 0;
508dfdcada3SDoug Rabson 
509dfdcada3SDoug Rabson /* VARIABLES PROTECTED BY svc_lock: s, svc_head */
510dfdcada3SDoug Rabson 
511dfdcada3SDoug Rabson 	if (xprt->xp_netid) {
512dfdcada3SDoug Rabson 		netid = strdup(xprt->xp_netid, M_RPC);
513dfdcada3SDoug Rabson 		flag = 1;
514dfdcada3SDoug Rabson 	} else if (nconf && nconf->nc_netid) {
515dfdcada3SDoug Rabson 		netid = strdup(nconf->nc_netid, M_RPC);
516dfdcada3SDoug Rabson 		flag = 1;
517dfdcada3SDoug Rabson 	} /* must have been created with svc_raw_create */
518dfdcada3SDoug Rabson 	if ((netid == NULL) && (flag == 1)) {
519dfdcada3SDoug Rabson 		return (FALSE);
520dfdcada3SDoug Rabson 	}
521dfdcada3SDoug Rabson 
522dfdcada3SDoug Rabson 	mtx_lock(&pool->sp_lock);
523dfdcada3SDoug Rabson 	if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
524dfdcada3SDoug Rabson 		if (netid)
525dfdcada3SDoug Rabson 			free(netid, M_RPC);
526dfdcada3SDoug Rabson 		if (s->sc_dispatch == dispatch)
527dfdcada3SDoug Rabson 			goto rpcb_it; /* he is registering another xptr */
528dfdcada3SDoug Rabson 		mtx_unlock(&pool->sp_lock);
529dfdcada3SDoug Rabson 		return (FALSE);
530dfdcada3SDoug Rabson 	}
531dfdcada3SDoug Rabson 	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
532dfdcada3SDoug Rabson 	if (s == NULL) {
533dfdcada3SDoug Rabson 		if (netid)
534dfdcada3SDoug Rabson 			free(netid, M_RPC);
535dfdcada3SDoug Rabson 		mtx_unlock(&pool->sp_lock);
536dfdcada3SDoug Rabson 		return (FALSE);
537dfdcada3SDoug Rabson 	}
538dfdcada3SDoug Rabson 
539dfdcada3SDoug Rabson 	s->sc_prog = prog;
540dfdcada3SDoug Rabson 	s->sc_vers = vers;
541dfdcada3SDoug Rabson 	s->sc_dispatch = dispatch;
542dfdcada3SDoug Rabson 	s->sc_netid = netid;
543dfdcada3SDoug Rabson 	TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
544dfdcada3SDoug Rabson 
545dfdcada3SDoug Rabson 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
546dfdcada3SDoug Rabson 		((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
547dfdcada3SDoug Rabson 
548dfdcada3SDoug Rabson rpcb_it:
549dfdcada3SDoug Rabson 	mtx_unlock(&pool->sp_lock);
550dfdcada3SDoug Rabson 	/* now register the information with the local binder service */
551dfdcada3SDoug Rabson 	if (nconf) {
552dfdcada3SDoug Rabson 		bool_t dummy;
553dfdcada3SDoug Rabson 		struct netconfig tnc;
554a9148abdSDoug Rabson 		struct netbuf nb;
555dfdcada3SDoug Rabson 		tnc = *nconf;
556a9148abdSDoug Rabson 		nb.buf = &xprt->xp_ltaddr;
557a9148abdSDoug Rabson 		nb.len = xprt->xp_ltaddr.ss_len;
558a9148abdSDoug Rabson 		dummy = rpcb_set(prog, vers, &tnc, &nb);
559dfdcada3SDoug Rabson 		return (dummy);
560dfdcada3SDoug Rabson 	}
561dfdcada3SDoug Rabson 	return (TRUE);
562dfdcada3SDoug Rabson }
563dfdcada3SDoug Rabson 
564dfdcada3SDoug Rabson /*
565dfdcada3SDoug Rabson  * Remove a service program from the callout list.
566dfdcada3SDoug Rabson  */
567dfdcada3SDoug Rabson void
568dfdcada3SDoug Rabson svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
569dfdcada3SDoug Rabson {
570dfdcada3SDoug Rabson 	struct svc_callout *s;
571dfdcada3SDoug Rabson 
572dfdcada3SDoug Rabson 	/* unregister the information anyway */
573dfdcada3SDoug Rabson 	(void) rpcb_unset(prog, vers, NULL);
574dfdcada3SDoug Rabson 	mtx_lock(&pool->sp_lock);
575dfdcada3SDoug Rabson 	while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
576dfdcada3SDoug Rabson 		TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
577dfdcada3SDoug Rabson 		if (s->sc_netid)
578dfdcada3SDoug Rabson 			mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
579dfdcada3SDoug Rabson 		mem_free(s, sizeof (struct svc_callout));
580dfdcada3SDoug Rabson 	}
581dfdcada3SDoug Rabson 	mtx_unlock(&pool->sp_lock);
582dfdcada3SDoug Rabson }
583dfdcada3SDoug Rabson 
584d473bac7SAlexander Motin /*
585d473bac7SAlexander Motin  * Add a service connection loss program to the callout list.
586d473bac7SAlexander Motin  * The dispatch routine will be called when some port in ths pool die.
587d473bac7SAlexander Motin  */
588d473bac7SAlexander Motin bool_t
589d473bac7SAlexander Motin svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
590d473bac7SAlexander Motin {
591d473bac7SAlexander Motin 	SVCPOOL *pool = xprt->xp_pool;
592d473bac7SAlexander Motin 	struct svc_loss_callout *s;
593d473bac7SAlexander Motin 
594d473bac7SAlexander Motin 	mtx_lock(&pool->sp_lock);
595d473bac7SAlexander Motin 	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
596d473bac7SAlexander Motin 		if (s->slc_dispatch == dispatch)
597d473bac7SAlexander Motin 			break;
598d473bac7SAlexander Motin 	}
599d473bac7SAlexander Motin 	if (s != NULL) {
600d473bac7SAlexander Motin 		mtx_unlock(&pool->sp_lock);
601d473bac7SAlexander Motin 		return (TRUE);
602d473bac7SAlexander Motin 	}
6038576dc00SAlexander Motin 	s = malloc(sizeof(struct svc_loss_callout), M_RPC, M_NOWAIT);
604d473bac7SAlexander Motin 	if (s == NULL) {
605d473bac7SAlexander Motin 		mtx_unlock(&pool->sp_lock);
606d473bac7SAlexander Motin 		return (FALSE);
607d473bac7SAlexander Motin 	}
608d473bac7SAlexander Motin 	s->slc_dispatch = dispatch;
609d473bac7SAlexander Motin 	TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
610d473bac7SAlexander Motin 	mtx_unlock(&pool->sp_lock);
611d473bac7SAlexander Motin 	return (TRUE);
612d473bac7SAlexander Motin }
613d473bac7SAlexander Motin 
614d473bac7SAlexander Motin /*
615d473bac7SAlexander Motin  * Remove a service connection loss program from the callout list.
616d473bac7SAlexander Motin  */
617d473bac7SAlexander Motin void
618d473bac7SAlexander Motin svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
619d473bac7SAlexander Motin {
620d473bac7SAlexander Motin 	struct svc_loss_callout *s;
621d473bac7SAlexander Motin 
622d473bac7SAlexander Motin 	mtx_lock(&pool->sp_lock);
623d473bac7SAlexander Motin 	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
624d473bac7SAlexander Motin 		if (s->slc_dispatch == dispatch) {
625d473bac7SAlexander Motin 			TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
626d473bac7SAlexander Motin 			free(s, M_RPC);
627d473bac7SAlexander Motin 			break;
628d473bac7SAlexander Motin 		}
629d473bac7SAlexander Motin 	}
630d473bac7SAlexander Motin 	mtx_unlock(&pool->sp_lock);
631d473bac7SAlexander Motin }
632d473bac7SAlexander Motin 
633dfdcada3SDoug Rabson /* ********************** CALLOUT list related stuff ************* */
634dfdcada3SDoug Rabson 
635dfdcada3SDoug Rabson /*
636dfdcada3SDoug Rabson  * Search the callout list for a program number, return the callout
637dfdcada3SDoug Rabson  * struct.
638dfdcada3SDoug Rabson  */
639dfdcada3SDoug Rabson static struct svc_callout *
640dfdcada3SDoug Rabson svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
641dfdcada3SDoug Rabson {
642dfdcada3SDoug Rabson 	struct svc_callout *s;
643dfdcada3SDoug Rabson 
644dfdcada3SDoug Rabson 	mtx_assert(&pool->sp_lock, MA_OWNED);
645dfdcada3SDoug Rabson 	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
646dfdcada3SDoug Rabson 		if (s->sc_prog == prog && s->sc_vers == vers
647dfdcada3SDoug Rabson 		    && (netid == NULL || s->sc_netid == NULL ||
648dfdcada3SDoug Rabson 			strcmp(netid, s->sc_netid) == 0))
649dfdcada3SDoug Rabson 			break;
650dfdcada3SDoug Rabson 	}
651dfdcada3SDoug Rabson 
652dfdcada3SDoug Rabson 	return (s);
653dfdcada3SDoug Rabson }
654dfdcada3SDoug Rabson 
655dfdcada3SDoug Rabson /* ******************* REPLY GENERATION ROUTINES  ************ */
656dfdcada3SDoug Rabson 
657a9148abdSDoug Rabson static bool_t
658a9148abdSDoug Rabson svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
659a9148abdSDoug Rabson     struct mbuf *body)
660a9148abdSDoug Rabson {
661a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
662a9148abdSDoug Rabson 	bool_t ok;
663a9148abdSDoug Rabson 
664a9148abdSDoug Rabson 	if (rqstp->rq_args) {
665a9148abdSDoug Rabson 		m_freem(rqstp->rq_args);
666a9148abdSDoug Rabson 		rqstp->rq_args = NULL;
667a9148abdSDoug Rabson 	}
668a9148abdSDoug Rabson 
669a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
670a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
671a9148abdSDoug Rabson 		    rply, svc_getrpccaller(rqstp), body);
672a9148abdSDoug Rabson 
673a9148abdSDoug Rabson 	if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
674a9148abdSDoug Rabson 		return (FALSE);
675a9148abdSDoug Rabson 
676d473bac7SAlexander Motin 	ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
677a9148abdSDoug Rabson 	if (rqstp->rq_addr) {
678a9148abdSDoug Rabson 		free(rqstp->rq_addr, M_SONAME);
679a9148abdSDoug Rabson 		rqstp->rq_addr = NULL;
680a9148abdSDoug Rabson 	}
681a9148abdSDoug Rabson 
682a9148abdSDoug Rabson 	return (ok);
683a9148abdSDoug Rabson }
684a9148abdSDoug Rabson 
685dfdcada3SDoug Rabson /*
686dfdcada3SDoug Rabson  * Send a reply to an rpc request
687dfdcada3SDoug Rabson  */
688dfdcada3SDoug Rabson bool_t
689a9148abdSDoug Rabson svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
690a9148abdSDoug Rabson {
691a9148abdSDoug Rabson 	struct rpc_msg rply;
692a9148abdSDoug Rabson 	struct mbuf *m;
693a9148abdSDoug Rabson 	XDR xdrs;
694a9148abdSDoug Rabson 	bool_t ok;
695a9148abdSDoug Rabson 
696a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
697a9148abdSDoug Rabson 	rply.rm_direction = REPLY;
698a9148abdSDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
699a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
700a9148abdSDoug Rabson 	rply.acpted_rply.ar_stat = SUCCESS;
701a9148abdSDoug Rabson 	rply.acpted_rply.ar_results.where = NULL;
702a9148abdSDoug Rabson 	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
703a9148abdSDoug Rabson 
704bd54830bSGleb Smirnoff 	m = m_getcl(M_WAITOK, MT_DATA, 0);
705a9148abdSDoug Rabson 	xdrmbuf_create(&xdrs, m, XDR_ENCODE);
706a9148abdSDoug Rabson 	ok = xdr_results(&xdrs, xdr_location);
707a9148abdSDoug Rabson 	XDR_DESTROY(&xdrs);
708a9148abdSDoug Rabson 
709a9148abdSDoug Rabson 	if (ok) {
710a9148abdSDoug Rabson 		return (svc_sendreply_common(rqstp, &rply, m));
711a9148abdSDoug Rabson 	} else {
712a9148abdSDoug Rabson 		m_freem(m);
713a9148abdSDoug Rabson 		return (FALSE);
714a9148abdSDoug Rabson 	}
715a9148abdSDoug Rabson }
716a9148abdSDoug Rabson 
717a9148abdSDoug Rabson bool_t
718a9148abdSDoug Rabson svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
719dfdcada3SDoug Rabson {
720dfdcada3SDoug Rabson 	struct rpc_msg rply;
721dfdcada3SDoug Rabson 
722a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
723dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
724dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
725a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
726dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = SUCCESS;
727a9148abdSDoug Rabson 	rply.acpted_rply.ar_results.where = NULL;
728a9148abdSDoug Rabson 	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
729dfdcada3SDoug Rabson 
730a9148abdSDoug Rabson 	return (svc_sendreply_common(rqstp, &rply, m));
731dfdcada3SDoug Rabson }
732dfdcada3SDoug Rabson 
733dfdcada3SDoug Rabson /*
734dfdcada3SDoug Rabson  * No procedure error reply
735dfdcada3SDoug Rabson  */
736dfdcada3SDoug Rabson void
737a9148abdSDoug Rabson svcerr_noproc(struct svc_req *rqstp)
738dfdcada3SDoug Rabson {
739a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
740dfdcada3SDoug Rabson 	struct rpc_msg rply;
741dfdcada3SDoug Rabson 
742a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
743dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
744dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
745a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
746dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
747dfdcada3SDoug Rabson 
748a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
749a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
750a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
751a9148abdSDoug Rabson 
752a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
753dfdcada3SDoug Rabson }
754dfdcada3SDoug Rabson 
755dfdcada3SDoug Rabson /*
756dfdcada3SDoug Rabson  * Can't decode args error reply
757dfdcada3SDoug Rabson  */
758dfdcada3SDoug Rabson void
759a9148abdSDoug Rabson svcerr_decode(struct svc_req *rqstp)
760dfdcada3SDoug Rabson {
761a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
762dfdcada3SDoug Rabson 	struct rpc_msg rply;
763dfdcada3SDoug Rabson 
764a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
765dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
766dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
767a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
768dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
769dfdcada3SDoug Rabson 
770a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
771a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
772a9148abdSDoug Rabson 		    &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
773a9148abdSDoug Rabson 
774a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
775dfdcada3SDoug Rabson }
776dfdcada3SDoug Rabson 
777dfdcada3SDoug Rabson /*
778dfdcada3SDoug Rabson  * Some system error
779dfdcada3SDoug Rabson  */
780dfdcada3SDoug Rabson void
781a9148abdSDoug Rabson svcerr_systemerr(struct svc_req *rqstp)
782dfdcada3SDoug Rabson {
783a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
784dfdcada3SDoug Rabson 	struct rpc_msg rply;
785dfdcada3SDoug Rabson 
786a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
787dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
788dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
789a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
790dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = SYSTEM_ERR;
791dfdcada3SDoug Rabson 
792a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
793a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
794a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
795a9148abdSDoug Rabson 
796a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
797dfdcada3SDoug Rabson }
798dfdcada3SDoug Rabson 
799dfdcada3SDoug Rabson /*
800dfdcada3SDoug Rabson  * Authentication error reply
801dfdcada3SDoug Rabson  */
802dfdcada3SDoug Rabson void
803a9148abdSDoug Rabson svcerr_auth(struct svc_req *rqstp, enum auth_stat why)
804dfdcada3SDoug Rabson {
805a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
806dfdcada3SDoug Rabson 	struct rpc_msg rply;
807dfdcada3SDoug Rabson 
808a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
809dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
810dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_DENIED;
811dfdcada3SDoug Rabson 	rply.rjcted_rply.rj_stat = AUTH_ERROR;
812dfdcada3SDoug Rabson 	rply.rjcted_rply.rj_why = why;
813dfdcada3SDoug Rabson 
814a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
815a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
816a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
817a9148abdSDoug Rabson 
818a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
819dfdcada3SDoug Rabson }
820dfdcada3SDoug Rabson 
821dfdcada3SDoug Rabson /*
822dfdcada3SDoug Rabson  * Auth too weak error reply
823dfdcada3SDoug Rabson  */
824dfdcada3SDoug Rabson void
825a9148abdSDoug Rabson svcerr_weakauth(struct svc_req *rqstp)
826dfdcada3SDoug Rabson {
827dfdcada3SDoug Rabson 
828a9148abdSDoug Rabson 	svcerr_auth(rqstp, AUTH_TOOWEAK);
829dfdcada3SDoug Rabson }
830dfdcada3SDoug Rabson 
831dfdcada3SDoug Rabson /*
832dfdcada3SDoug Rabson  * Program unavailable error reply
833dfdcada3SDoug Rabson  */
834dfdcada3SDoug Rabson void
835a9148abdSDoug Rabson svcerr_noprog(struct svc_req *rqstp)
836dfdcada3SDoug Rabson {
837a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
838dfdcada3SDoug Rabson 	struct rpc_msg rply;
839dfdcada3SDoug Rabson 
840a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
841dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
842dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
843a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
844dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
845dfdcada3SDoug Rabson 
846a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
847a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
848a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
849a9148abdSDoug Rabson 
850a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
851dfdcada3SDoug Rabson }
852dfdcada3SDoug Rabson 
853dfdcada3SDoug Rabson /*
854dfdcada3SDoug Rabson  * Program version mismatch error reply
855dfdcada3SDoug Rabson  */
856dfdcada3SDoug Rabson void
857a9148abdSDoug Rabson svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
858dfdcada3SDoug Rabson {
859a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
860dfdcada3SDoug Rabson 	struct rpc_msg rply;
861dfdcada3SDoug Rabson 
862a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
863dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
864dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
865a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
866dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = PROG_MISMATCH;
867dfdcada3SDoug Rabson 	rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
868dfdcada3SDoug Rabson 	rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
869dfdcada3SDoug Rabson 
870a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
871a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
872a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
873a9148abdSDoug Rabson 
874a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
875a9148abdSDoug Rabson }
876a9148abdSDoug Rabson 
877a9148abdSDoug Rabson /*
878a9148abdSDoug Rabson  * Allocate a new server transport structure. All fields are
879a9148abdSDoug Rabson  * initialized to zero and xp_p3 is initialized to point at an
880a9148abdSDoug Rabson  * extension structure to hold various flags and authentication
881a9148abdSDoug Rabson  * parameters.
882a9148abdSDoug Rabson  */
883a9148abdSDoug Rabson SVCXPRT *
884462984cbSEnji Cooper svc_xprt_alloc(void)
885a9148abdSDoug Rabson {
886a9148abdSDoug Rabson 	SVCXPRT *xprt;
887a9148abdSDoug Rabson 	SVCXPRT_EXT *ext;
888a9148abdSDoug Rabson 
889a9148abdSDoug Rabson 	xprt = mem_alloc(sizeof(SVCXPRT));
890a9148abdSDoug Rabson 	ext = mem_alloc(sizeof(SVCXPRT_EXT));
891a9148abdSDoug Rabson 	xprt->xp_p3 = ext;
892a9148abdSDoug Rabson 	refcount_init(&xprt->xp_refs, 1);
893a9148abdSDoug Rabson 
894a9148abdSDoug Rabson 	return (xprt);
895a9148abdSDoug Rabson }
896a9148abdSDoug Rabson 
897a9148abdSDoug Rabson /*
898a9148abdSDoug Rabson  * Free a server transport structure.
899a9148abdSDoug Rabson  */
900a9148abdSDoug Rabson void
901462984cbSEnji Cooper svc_xprt_free(SVCXPRT *xprt)
902a9148abdSDoug Rabson {
903a9148abdSDoug Rabson 
904a9148abdSDoug Rabson 	mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
905a9148abdSDoug Rabson 	mem_free(xprt, sizeof(SVCXPRT));
906dfdcada3SDoug Rabson }
907dfdcada3SDoug Rabson 
908dfdcada3SDoug Rabson /* ******************* SERVER INPUT STUFF ******************* */
909dfdcada3SDoug Rabson 
910dfdcada3SDoug Rabson /*
911a9148abdSDoug Rabson  * Read RPC requests from a transport and queue them to be
912a9148abdSDoug Rabson  * executed. We handle authentication and replay cache replies here.
913a9148abdSDoug Rabson  * Actually dispatching the RPC is deferred till svc_executereq.
914dfdcada3SDoug Rabson  */
915a9148abdSDoug Rabson static enum xprt_stat
916a9148abdSDoug Rabson svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
917dfdcada3SDoug Rabson {
918dfdcada3SDoug Rabson 	SVCPOOL *pool = xprt->xp_pool;
919a9148abdSDoug Rabson 	struct svc_req *r;
920dfdcada3SDoug Rabson 	struct rpc_msg msg;
921a9148abdSDoug Rabson 	struct mbuf *args;
922d473bac7SAlexander Motin 	struct svc_loss_callout *s;
923a9148abdSDoug Rabson 	enum xprt_stat stat;
924a9148abdSDoug Rabson 
925a9148abdSDoug Rabson 	/* now receive msgs from xprtprt (support batch calls) */
926a9148abdSDoug Rabson 	r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
927a9148abdSDoug Rabson 
928a9148abdSDoug Rabson 	msg.rm_call.cb_cred.oa_base = r->rq_credarea;
929a9148abdSDoug Rabson 	msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
930a9148abdSDoug Rabson 	r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
931a9148abdSDoug Rabson 	if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
932a9148abdSDoug Rabson 		enum auth_stat why;
933a9148abdSDoug Rabson 
934a9148abdSDoug Rabson 		/*
935a9148abdSDoug Rabson 		 * Handle replays and authenticate before queuing the
936a9148abdSDoug Rabson 		 * request to be executed.
937a9148abdSDoug Rabson 		 */
938a9148abdSDoug Rabson 		SVC_ACQUIRE(xprt);
939a9148abdSDoug Rabson 		r->rq_xprt = xprt;
940a9148abdSDoug Rabson 		if (pool->sp_rcache) {
941a9148abdSDoug Rabson 			struct rpc_msg repmsg;
942a9148abdSDoug Rabson 			struct mbuf *repbody;
943a9148abdSDoug Rabson 			enum replay_state rs;
944a9148abdSDoug Rabson 			rs = replay_find(pool->sp_rcache, &msg,
945a9148abdSDoug Rabson 			    svc_getrpccaller(r), &repmsg, &repbody);
946a9148abdSDoug Rabson 			switch (rs) {
947a9148abdSDoug Rabson 			case RS_NEW:
948a9148abdSDoug Rabson 				break;
949a9148abdSDoug Rabson 			case RS_DONE:
950a9148abdSDoug Rabson 				SVC_REPLY(xprt, &repmsg, r->rq_addr,
951d473bac7SAlexander Motin 				    repbody, &r->rq_reply_seq);
952a9148abdSDoug Rabson 				if (r->rq_addr) {
953a9148abdSDoug Rabson 					free(r->rq_addr, M_SONAME);
954a9148abdSDoug Rabson 					r->rq_addr = NULL;
955a9148abdSDoug Rabson 				}
956578e600cSRick Macklem 				m_freem(args);
957a9148abdSDoug Rabson 				goto call_done;
958a9148abdSDoug Rabson 
959a9148abdSDoug Rabson 			default:
960578e600cSRick Macklem 				m_freem(args);
961a9148abdSDoug Rabson 				goto call_done;
962a9148abdSDoug Rabson 			}
963a9148abdSDoug Rabson 		}
964a9148abdSDoug Rabson 
965a9148abdSDoug Rabson 		r->rq_xid = msg.rm_xid;
966a9148abdSDoug Rabson 		r->rq_prog = msg.rm_call.cb_prog;
967a9148abdSDoug Rabson 		r->rq_vers = msg.rm_call.cb_vers;
968a9148abdSDoug Rabson 		r->rq_proc = msg.rm_call.cb_proc;
969a9148abdSDoug Rabson 		r->rq_size = sizeof(*r) + m_length(args, NULL);
970a9148abdSDoug Rabson 		r->rq_args = args;
971a9148abdSDoug Rabson 		if ((why = _authenticate(r, &msg)) != AUTH_OK) {
972a9148abdSDoug Rabson 			/*
973a9148abdSDoug Rabson 			 * RPCSEC_GSS uses this return code
974a9148abdSDoug Rabson 			 * for requests that form part of its
975a9148abdSDoug Rabson 			 * context establishment protocol and
976a9148abdSDoug Rabson 			 * should not be dispatched to the
977a9148abdSDoug Rabson 			 * application.
978a9148abdSDoug Rabson 			 */
979a9148abdSDoug Rabson 			if (why != RPCSEC_GSS_NODISPATCH)
980a9148abdSDoug Rabson 				svcerr_auth(r, why);
981a9148abdSDoug Rabson 			goto call_done;
982a9148abdSDoug Rabson 		}
983a9148abdSDoug Rabson 
984a9148abdSDoug Rabson 		if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
985a9148abdSDoug Rabson 			svcerr_decode(r);
986a9148abdSDoug Rabson 			goto call_done;
987a9148abdSDoug Rabson 		}
988a9148abdSDoug Rabson 
989a9148abdSDoug Rabson 		/*
990a9148abdSDoug Rabson 		 * Everything checks out, return request to caller.
991a9148abdSDoug Rabson 		 */
992a9148abdSDoug Rabson 		*rqstp_ret = r;
993a9148abdSDoug Rabson 		r = NULL;
994a9148abdSDoug Rabson 	}
995a9148abdSDoug Rabson call_done:
996a9148abdSDoug Rabson 	if (r) {
997a9148abdSDoug Rabson 		svc_freereq(r);
998a9148abdSDoug Rabson 		r = NULL;
999a9148abdSDoug Rabson 	}
1000a9148abdSDoug Rabson 	if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1001d473bac7SAlexander Motin 		TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
1002d473bac7SAlexander Motin 			(*s->slc_dispatch)(xprt);
1003a9148abdSDoug Rabson 		xprt_unregister(xprt);
1004a9148abdSDoug Rabson 	}
1005a9148abdSDoug Rabson 
1006a9148abdSDoug Rabson 	return (stat);
1007a9148abdSDoug Rabson }
1008a9148abdSDoug Rabson 
1009a9148abdSDoug Rabson static void
1010a9148abdSDoug Rabson svc_executereq(struct svc_req *rqstp)
1011a9148abdSDoug Rabson {
1012a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
1013a9148abdSDoug Rabson 	SVCPOOL *pool = xprt->xp_pool;
1014dfdcada3SDoug Rabson 	int prog_found;
1015dfdcada3SDoug Rabson 	rpcvers_t low_vers;
1016dfdcada3SDoug Rabson 	rpcvers_t high_vers;
1017dfdcada3SDoug Rabson 	struct svc_callout *s;
1018dfdcada3SDoug Rabson 
1019dfdcada3SDoug Rabson 	/* now match message with a registered service*/
1020dfdcada3SDoug Rabson 	prog_found = FALSE;
1021dfdcada3SDoug Rabson 	low_vers = (rpcvers_t) -1L;
1022dfdcada3SDoug Rabson 	high_vers = (rpcvers_t) 0L;
1023dfdcada3SDoug Rabson 	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
1024a9148abdSDoug Rabson 		if (s->sc_prog == rqstp->rq_prog) {
1025a9148abdSDoug Rabson 			if (s->sc_vers == rqstp->rq_vers) {
1026a9148abdSDoug Rabson 				/*
1027a9148abdSDoug Rabson 				 * We hand ownership of r to the
1028a9148abdSDoug Rabson 				 * dispatch method - they must call
1029a9148abdSDoug Rabson 				 * svc_freereq.
1030a9148abdSDoug Rabson 				 */
1031a9148abdSDoug Rabson 				(*s->sc_dispatch)(rqstp, xprt);
1032a9148abdSDoug Rabson 				return;
1033dfdcada3SDoug Rabson 			}  /* found correct version */
1034dfdcada3SDoug Rabson 			prog_found = TRUE;
1035dfdcada3SDoug Rabson 			if (s->sc_vers < low_vers)
1036dfdcada3SDoug Rabson 				low_vers = s->sc_vers;
1037dfdcada3SDoug Rabson 			if (s->sc_vers > high_vers)
1038dfdcada3SDoug Rabson 				high_vers = s->sc_vers;
1039dfdcada3SDoug Rabson 		}   /* found correct program */
1040dfdcada3SDoug Rabson 	}
1041a9148abdSDoug Rabson 
1042dfdcada3SDoug Rabson 	/*
1043dfdcada3SDoug Rabson 	 * if we got here, the program or version
1044dfdcada3SDoug Rabson 	 * is not served ...
1045dfdcada3SDoug Rabson 	 */
1046dfdcada3SDoug Rabson 	if (prog_found)
1047a9148abdSDoug Rabson 		svcerr_progvers(rqstp, low_vers, high_vers);
1048dfdcada3SDoug Rabson 	else
1049a9148abdSDoug Rabson 		svcerr_noprog(rqstp);
1050a9148abdSDoug Rabson 
1051a9148abdSDoug Rabson 	svc_freereq(rqstp);
1052dfdcada3SDoug Rabson }
1053a9148abdSDoug Rabson 
1054a9148abdSDoug Rabson static void
1055b563304cSAlexander Motin svc_checkidle(SVCGROUP *grp)
1056a9148abdSDoug Rabson {
1057a9148abdSDoug Rabson 	SVCXPRT *xprt, *nxprt;
1058a9148abdSDoug Rabson 	time_t timo;
1059a9148abdSDoug Rabson 	struct svcxprt_list cleanup;
1060a9148abdSDoug Rabson 
1061a9148abdSDoug Rabson 	TAILQ_INIT(&cleanup);
1062b563304cSAlexander Motin 	TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) {
1063dfdcada3SDoug Rabson 		/*
1064a9148abdSDoug Rabson 		 * Only some transports have idle timers. Don't time
1065a9148abdSDoug Rabson 		 * something out which is just waking up.
1066dfdcada3SDoug Rabson 		 */
1067a9148abdSDoug Rabson 		if (!xprt->xp_idletimeout || xprt->xp_thread)
1068a9148abdSDoug Rabson 			continue;
1069a9148abdSDoug Rabson 
1070a9148abdSDoug Rabson 		timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1071a9148abdSDoug Rabson 		if (time_uptime > timo) {
1072a9148abdSDoug Rabson 			xprt_unregister_locked(xprt);
1073a9148abdSDoug Rabson 			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1074a9148abdSDoug Rabson 		}
1075a9148abdSDoug Rabson 	}
1076a9148abdSDoug Rabson 
1077b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
1078a9148abdSDoug Rabson 	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1079a9148abdSDoug Rabson 		SVC_RELEASE(xprt);
1080a9148abdSDoug Rabson 	}
1081b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
1082a9148abdSDoug Rabson }
1083a9148abdSDoug Rabson 
1084a9148abdSDoug Rabson static void
1085a9148abdSDoug Rabson svc_assign_waiting_sockets(SVCPOOL *pool)
1086a9148abdSDoug Rabson {
1087b563304cSAlexander Motin 	SVCGROUP *grp;
1088a9148abdSDoug Rabson 	SVCXPRT *xprt;
1089b563304cSAlexander Motin 	int g;
1090a9148abdSDoug Rabson 
1091b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1092b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1093b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1094b563304cSAlexander Motin 		while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1095ba981145SAlexander Motin 			if (xprt_assignthread(xprt))
1096b563304cSAlexander Motin 				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1097ba981145SAlexander Motin 			else
1098ba981145SAlexander Motin 				break;
1099a9148abdSDoug Rabson 		}
1100b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1101b563304cSAlexander Motin 	}
1102f8fb069dSAlexander Motin }
1103f8fb069dSAlexander Motin 
1104f8fb069dSAlexander Motin static void
11053c42b5bfSGarrett Wollman svc_change_space_used(SVCPOOL *pool, long delta)
1106f8fb069dSAlexander Motin {
11073c42b5bfSGarrett Wollman 	unsigned long value;
1108f8fb069dSAlexander Motin 
11093c42b5bfSGarrett Wollman 	value = atomic_fetchadd_long(&pool->sp_space_used, delta) + delta;
1110f8fb069dSAlexander Motin 	if (delta > 0) {
1111f8fb069dSAlexander Motin 		if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1112f8fb069dSAlexander Motin 			pool->sp_space_throttled = TRUE;
1113f8fb069dSAlexander Motin 			pool->sp_space_throttle_count++;
1114f8fb069dSAlexander Motin 		}
1115f8fb069dSAlexander Motin 		if (value > pool->sp_space_used_highest)
1116f8fb069dSAlexander Motin 			pool->sp_space_used_highest = value;
1117f8fb069dSAlexander Motin 	} else {
1118f8fb069dSAlexander Motin 		if (value < pool->sp_space_low && pool->sp_space_throttled) {
1119f8fb069dSAlexander Motin 			pool->sp_space_throttled = FALSE;
1120f8fb069dSAlexander Motin 			svc_assign_waiting_sockets(pool);
1121f8fb069dSAlexander Motin 		}
1122f8fb069dSAlexander Motin 	}
1123a9148abdSDoug Rabson }
1124a9148abdSDoug Rabson 
1125a9148abdSDoug Rabson static bool_t
1126a9148abdSDoug Rabson svc_request_space_available(SVCPOOL *pool)
1127a9148abdSDoug Rabson {
1128a9148abdSDoug Rabson 
1129f8fb069dSAlexander Motin 	if (pool->sp_space_throttled)
1130f8fb069dSAlexander Motin 		return (FALSE);
1131f8fb069dSAlexander Motin 	return (TRUE);
1132a9148abdSDoug Rabson }
1133a9148abdSDoug Rabson 
1134a9148abdSDoug Rabson static void
1135b563304cSAlexander Motin svc_run_internal(SVCGROUP *grp, bool_t ismaster)
1136a9148abdSDoug Rabson {
1137b563304cSAlexander Motin 	SVCPOOL *pool = grp->sg_pool;
1138a9148abdSDoug Rabson 	SVCTHREAD *st, *stpref;
1139a9148abdSDoug Rabson 	SVCXPRT *xprt;
1140a9148abdSDoug Rabson 	enum xprt_stat stat;
1141a9148abdSDoug Rabson 	struct svc_req *rqstp;
1142f87c8878SKonstantin Belousov 	struct proc *p;
11433c42b5bfSGarrett Wollman 	long sz;
1144a9148abdSDoug Rabson 	int error;
1145a9148abdSDoug Rabson 
1146a9148abdSDoug Rabson 	st = mem_alloc(sizeof(*st));
1147b776fb2dSAlexander Motin 	mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF);
1148f8fb069dSAlexander Motin 	st->st_pool = pool;
1149a9148abdSDoug Rabson 	st->st_xprt = NULL;
1150a9148abdSDoug Rabson 	STAILQ_INIT(&st->st_reqs);
1151a9148abdSDoug Rabson 	cv_init(&st->st_cond, "rpcsvc");
1152a9148abdSDoug Rabson 
1153b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
1154a9148abdSDoug Rabson 
1155a9148abdSDoug Rabson 	/*
1156a9148abdSDoug Rabson 	 * If we are a new thread which was spawned to cope with
1157a9148abdSDoug Rabson 	 * increased load, set the state back to SVCPOOL_ACTIVE.
1158a9148abdSDoug Rabson 	 */
1159b563304cSAlexander Motin 	if (grp->sg_state == SVCPOOL_THREADSTARTING)
1160b563304cSAlexander Motin 		grp->sg_state = SVCPOOL_ACTIVE;
1161a9148abdSDoug Rabson 
1162b563304cSAlexander Motin 	while (grp->sg_state != SVCPOOL_CLOSING) {
1163a9148abdSDoug Rabson 		/*
1164db7cdfeeSAlexander Motin 		 * Create new thread if requested.
1165db7cdfeeSAlexander Motin 		 */
1166b563304cSAlexander Motin 		if (grp->sg_state == SVCPOOL_THREADWANTED) {
1167b563304cSAlexander Motin 			grp->sg_state = SVCPOOL_THREADSTARTING;
1168b563304cSAlexander Motin 			grp->sg_lastcreatetime = time_uptime;
1169b563304cSAlexander Motin 			mtx_unlock(&grp->sg_lock);
1170b563304cSAlexander Motin 			svc_new_thread(grp);
1171b563304cSAlexander Motin 			mtx_lock(&grp->sg_lock);
1172db7cdfeeSAlexander Motin 			continue;
1173db7cdfeeSAlexander Motin 		}
1174db7cdfeeSAlexander Motin 
1175db7cdfeeSAlexander Motin 		/*
1176a9148abdSDoug Rabson 		 * Check for idle transports once per second.
1177a9148abdSDoug Rabson 		 */
1178b563304cSAlexander Motin 		if (time_uptime > grp->sg_lastidlecheck) {
1179b563304cSAlexander Motin 			grp->sg_lastidlecheck = time_uptime;
1180b563304cSAlexander Motin 			svc_checkidle(grp);
1181a9148abdSDoug Rabson 		}
1182a9148abdSDoug Rabson 
1183a9148abdSDoug Rabson 		xprt = st->st_xprt;
1184b776fb2dSAlexander Motin 		if (!xprt) {
1185a9148abdSDoug Rabson 			/*
1186a9148abdSDoug Rabson 			 * Enforce maxthreads count.
1187a9148abdSDoug Rabson 			 */
1188*1b09d9dfSRick Macklem 			if (!ismaster && grp->sg_threadcount >
1189*1b09d9dfSRick Macklem 			    grp->sg_maxthreads)
1190a9148abdSDoug Rabson 				break;
1191a9148abdSDoug Rabson 
1192a9148abdSDoug Rabson 			/*
1193a9148abdSDoug Rabson 			 * Before sleeping, see if we can find an
1194a9148abdSDoug Rabson 			 * active transport which isn't being serviced
1195a9148abdSDoug Rabson 			 * by a thread.
1196a9148abdSDoug Rabson 			 */
1197ba981145SAlexander Motin 			if (svc_request_space_available(pool) &&
1198b563304cSAlexander Motin 			    (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1199b563304cSAlexander Motin 				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1200a9148abdSDoug Rabson 				SVC_ACQUIRE(xprt);
1201a9148abdSDoug Rabson 				xprt->xp_thread = st;
1202a9148abdSDoug Rabson 				st->st_xprt = xprt;
1203a9148abdSDoug Rabson 				continue;
1204ba981145SAlexander Motin 			}
1205a9148abdSDoug Rabson 
1206b563304cSAlexander Motin 			LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink);
1207db7cdfeeSAlexander Motin 			if (ismaster || (!ismaster &&
1208b563304cSAlexander Motin 			    grp->sg_threadcount > grp->sg_minthreads))
1209db7cdfeeSAlexander Motin 				error = cv_timedwait_sig(&st->st_cond,
1210b563304cSAlexander Motin 				    &grp->sg_lock, 5 * hz);
1211db7cdfeeSAlexander Motin 			else
1212db7cdfeeSAlexander Motin 				error = cv_wait_sig(&st->st_cond,
1213b563304cSAlexander Motin 				    &grp->sg_lock);
1214b5d7fb73SAlexander Motin 			if (st->st_xprt == NULL)
1215a9148abdSDoug Rabson 				LIST_REMOVE(st, st_ilink);
1216a9148abdSDoug Rabson 
1217a9148abdSDoug Rabson 			/*
1218a9148abdSDoug Rabson 			 * Reduce worker thread count when idle.
1219a9148abdSDoug Rabson 			 */
1220a9148abdSDoug Rabson 			if (error == EWOULDBLOCK) {
1221a9148abdSDoug Rabson 				if (!ismaster
1222b563304cSAlexander Motin 				    && (grp->sg_threadcount
1223b563304cSAlexander Motin 					> grp->sg_minthreads)
1224b776fb2dSAlexander Motin 					&& !st->st_xprt)
1225dfdcada3SDoug Rabson 					break;
1226f87c8878SKonstantin Belousov 			} else if (error != 0) {
1227f87c8878SKonstantin Belousov 				KASSERT(error == EINTR || error == ERESTART,
1228f87c8878SKonstantin Belousov 				    ("non-signal error %d", error));
1229b563304cSAlexander Motin 				mtx_unlock(&grp->sg_lock);
1230f87c8878SKonstantin Belousov 				p = curproc;
1231f87c8878SKonstantin Belousov 				PROC_LOCK(p);
12326ddcc233SKonstantin Belousov 				if (P_SHOULDSTOP(p) ||
12336ddcc233SKonstantin Belousov 				    (p->p_flag & P_TOTAL_STOP) != 0) {
1234f87c8878SKonstantin Belousov 					thread_suspend_check(0);
1235f87c8878SKonstantin Belousov 					PROC_UNLOCK(p);
1236f87c8878SKonstantin Belousov 					mtx_lock(&grp->sg_lock);
1237f87c8878SKonstantin Belousov 				} else {
1238f87c8878SKonstantin Belousov 					PROC_UNLOCK(p);
1239a9148abdSDoug Rabson 					svc_exit(pool);
1240b563304cSAlexander Motin 					mtx_lock(&grp->sg_lock);
1241a9148abdSDoug Rabson 					break;
1242a9148abdSDoug Rabson 				}
1243f87c8878SKonstantin Belousov 			}
1244a9148abdSDoug Rabson 			continue;
1245a9148abdSDoug Rabson 		}
1246b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1247a9148abdSDoug Rabson 
1248a9148abdSDoug Rabson 		/*
1249b776fb2dSAlexander Motin 		 * Drain the transport socket and queue up any RPCs.
1250a9148abdSDoug Rabson 		 */
1251a9148abdSDoug Rabson 		xprt->xp_lastactive = time_uptime;
1252a9148abdSDoug Rabson 		do {
1253a9148abdSDoug Rabson 			if (!svc_request_space_available(pool))
1254a9148abdSDoug Rabson 				break;
1255a9148abdSDoug Rabson 			rqstp = NULL;
1256a9148abdSDoug Rabson 			stat = svc_getreq(xprt, &rqstp);
1257a9148abdSDoug Rabson 			if (rqstp) {
1258f8fb069dSAlexander Motin 				svc_change_space_used(pool, rqstp->rq_size);
1259a9148abdSDoug Rabson 				/*
1260b776fb2dSAlexander Motin 				 * See if the application has a preference
1261b776fb2dSAlexander Motin 				 * for some other thread.
1262a9148abdSDoug Rabson 				 */
1263b776fb2dSAlexander Motin 				if (pool->sp_assign) {
1264b776fb2dSAlexander Motin 					stpref = pool->sp_assign(st, rqstp);
126582dcc80dSAlexander Motin 					rqstp->rq_thread = stpref;
1266a9148abdSDoug Rabson 					STAILQ_INSERT_TAIL(&stpref->st_reqs,
1267a9148abdSDoug Rabson 					    rqstp, rq_link);
1268b776fb2dSAlexander Motin 					mtx_unlock(&stpref->st_lock);
1269b776fb2dSAlexander Motin 					if (stpref != st)
1270b776fb2dSAlexander Motin 						rqstp = NULL;
127182dcc80dSAlexander Motin 				} else {
127282dcc80dSAlexander Motin 					rqstp->rq_thread = st;
1273b776fb2dSAlexander Motin 					STAILQ_INSERT_TAIL(&st->st_reqs,
1274b776fb2dSAlexander Motin 					    rqstp, rq_link);
1275b776fb2dSAlexander Motin 				}
127682dcc80dSAlexander Motin 			}
1277b776fb2dSAlexander Motin 		} while (rqstp == NULL && stat == XPRT_MOREREQS
1278b563304cSAlexander Motin 		    && grp->sg_state != SVCPOOL_CLOSING);
1279a9148abdSDoug Rabson 
1280a9148abdSDoug Rabson 		/*
1281b776fb2dSAlexander Motin 		 * Move this transport to the end of the active list to
1282b776fb2dSAlexander Motin 		 * ensure fairness when multiple transports are active.
1283b776fb2dSAlexander Motin 		 * If this was the last queued request, svc_getreq will end
1284b776fb2dSAlexander Motin 		 * up calling xprt_inactive to remove from the active list.
1285a9148abdSDoug Rabson 		 */
1286b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1287a9148abdSDoug Rabson 		xprt->xp_thread = NULL;
1288a9148abdSDoug Rabson 		st->st_xprt = NULL;
1289a9148abdSDoug Rabson 		if (xprt->xp_active) {
1290f8fb069dSAlexander Motin 			if (!svc_request_space_available(pool) ||
1291f8fb069dSAlexander Motin 			    !xprt_assignthread(xprt))
1292b563304cSAlexander Motin 				TAILQ_INSERT_TAIL(&grp->sg_active,
1293ba981145SAlexander Motin 				    xprt, xp_alink);
1294a9148abdSDoug Rabson 		}
1295b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1296a9148abdSDoug Rabson 		SVC_RELEASE(xprt);
1297a9148abdSDoug Rabson 
1298a9148abdSDoug Rabson 		/*
1299a9148abdSDoug Rabson 		 * Execute what we have queued.
1300a9148abdSDoug Rabson 		 */
1301b776fb2dSAlexander Motin 		mtx_lock(&st->st_lock);
1302b776fb2dSAlexander Motin 		while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) {
1303b776fb2dSAlexander Motin 			STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link);
1304b776fb2dSAlexander Motin 			mtx_unlock(&st->st_lock);
13053c42b5bfSGarrett Wollman 			sz = (long)rqstp->rq_size;
1306a9148abdSDoug Rabson 			svc_executereq(rqstp);
13073c42b5bfSGarrett Wollman 			svc_change_space_used(pool, -sz);
1308b776fb2dSAlexander Motin 			mtx_lock(&st->st_lock);
1309a9148abdSDoug Rabson 		}
1310b776fb2dSAlexander Motin 		mtx_unlock(&st->st_lock);
1311b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1312a9148abdSDoug Rabson 	}
1313a9148abdSDoug Rabson 
1314a9148abdSDoug Rabson 	if (st->st_xprt) {
1315a9148abdSDoug Rabson 		xprt = st->st_xprt;
1316a9148abdSDoug Rabson 		st->st_xprt = NULL;
1317a9148abdSDoug Rabson 		SVC_RELEASE(xprt);
1318a9148abdSDoug Rabson 	}
1319a9148abdSDoug Rabson 	KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1320b776fb2dSAlexander Motin 	mtx_destroy(&st->st_lock);
1321a9148abdSDoug Rabson 	cv_destroy(&st->st_cond);
1322a9148abdSDoug Rabson 	mem_free(st, sizeof(*st));
1323a9148abdSDoug Rabson 
1324b563304cSAlexander Motin 	grp->sg_threadcount--;
1325a9148abdSDoug Rabson 	if (!ismaster)
1326b563304cSAlexander Motin 		wakeup(grp);
1327b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
1328a9148abdSDoug Rabson }
1329a9148abdSDoug Rabson 
1330a9148abdSDoug Rabson static void
1331a9148abdSDoug Rabson svc_thread_start(void *arg)
1332a9148abdSDoug Rabson {
1333a9148abdSDoug Rabson 
1334b563304cSAlexander Motin 	svc_run_internal((SVCGROUP *) arg, FALSE);
1335a9148abdSDoug Rabson 	kthread_exit();
1336a9148abdSDoug Rabson }
1337a9148abdSDoug Rabson 
1338a9148abdSDoug Rabson static void
1339b563304cSAlexander Motin svc_new_thread(SVCGROUP *grp)
1340a9148abdSDoug Rabson {
1341b563304cSAlexander Motin 	SVCPOOL *pool = grp->sg_pool;
1342a9148abdSDoug Rabson 	struct thread *td;
1343a9148abdSDoug Rabson 
1344ece9d8b7SAlexander Motin 	mtx_lock(&grp->sg_lock);
1345b563304cSAlexander Motin 	grp->sg_threadcount++;
1346ece9d8b7SAlexander Motin 	mtx_unlock(&grp->sg_lock);
1347b563304cSAlexander Motin 	kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0,
1348a9148abdSDoug Rabson 	    "%s: service", pool->sp_name);
1349dfdcada3SDoug Rabson }
1350dfdcada3SDoug Rabson 
1351dfdcada3SDoug Rabson void
1352dfdcada3SDoug Rabson svc_run(SVCPOOL *pool)
1353dfdcada3SDoug Rabson {
1354b563304cSAlexander Motin 	int g, i;
1355a9148abdSDoug Rabson 	struct proc *p;
1356a9148abdSDoug Rabson 	struct thread *td;
1357b563304cSAlexander Motin 	SVCGROUP *grp;
1358dfdcada3SDoug Rabson 
1359a9148abdSDoug Rabson 	p = curproc;
1360a9148abdSDoug Rabson 	td = curthread;
1361a9148abdSDoug Rabson 	snprintf(td->td_name, sizeof(td->td_name),
1362a9148abdSDoug Rabson 	    "%s: master", pool->sp_name);
1363a9148abdSDoug Rabson 	pool->sp_state = SVCPOOL_ACTIVE;
1364a9148abdSDoug Rabson 	pool->sp_proc = p;
1365dfdcada3SDoug Rabson 
1366b563304cSAlexander Motin 	/* Choose group count based on number of threads and CPUs. */
1367b563304cSAlexander Motin 	pool->sp_groupcount = max(1, min(SVC_MAXGROUPS,
1368b563304cSAlexander Motin 	    min(pool->sp_maxthreads / 2, mp_ncpus) / 6));
1369b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1370b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1371b563304cSAlexander Motin 		grp->sg_minthreads = max(1,
1372b563304cSAlexander Motin 		    pool->sp_minthreads / pool->sp_groupcount);
1373b563304cSAlexander Motin 		grp->sg_maxthreads = max(1,
1374b563304cSAlexander Motin 		    pool->sp_maxthreads / pool->sp_groupcount);
1375b563304cSAlexander Motin 		grp->sg_lastcreatetime = time_uptime;
1376dfdcada3SDoug Rabson 	}
1377dfdcada3SDoug Rabson 
1378b563304cSAlexander Motin 	/* Starting threads */
1379ece9d8b7SAlexander Motin 	pool->sp_groups[0].sg_threadcount++;
1380b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1381b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1382b563304cSAlexander Motin 		for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++)
1383b563304cSAlexander Motin 			svc_new_thread(grp);
1384b563304cSAlexander Motin 	}
1385b563304cSAlexander Motin 	svc_run_internal(&pool->sp_groups[0], TRUE);
1386dfdcada3SDoug Rabson 
1387b563304cSAlexander Motin 	/* Waiting for threads to stop. */
1388b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1389b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1390b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1391b563304cSAlexander Motin 		while (grp->sg_threadcount > 0)
1392b563304cSAlexander Motin 			msleep(grp, &grp->sg_lock, 0, "svcexit", 0);
1393b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1394b563304cSAlexander Motin 	}
1395dfdcada3SDoug Rabson }
1396dfdcada3SDoug Rabson 
1397dfdcada3SDoug Rabson void
1398dfdcada3SDoug Rabson svc_exit(SVCPOOL *pool)
1399dfdcada3SDoug Rabson {
1400b563304cSAlexander Motin 	SVCGROUP *grp;
1401a9148abdSDoug Rabson 	SVCTHREAD *st;
1402b563304cSAlexander Motin 	int g;
1403a9148abdSDoug Rabson 
1404a9148abdSDoug Rabson 	pool->sp_state = SVCPOOL_CLOSING;
1405b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1406b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1407b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1408b563304cSAlexander Motin 		if (grp->sg_state != SVCPOOL_CLOSING) {
1409b563304cSAlexander Motin 			grp->sg_state = SVCPOOL_CLOSING;
1410b563304cSAlexander Motin 			LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink)
1411a9148abdSDoug Rabson 				cv_signal(&st->st_cond);
1412db7cdfeeSAlexander Motin 		}
1413b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1414b563304cSAlexander Motin 	}
1415dfdcada3SDoug Rabson }
1416a9148abdSDoug Rabson 
1417a9148abdSDoug Rabson bool_t
1418a9148abdSDoug Rabson svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1419a9148abdSDoug Rabson {
1420a9148abdSDoug Rabson 	struct mbuf *m;
1421a9148abdSDoug Rabson 	XDR xdrs;
1422a9148abdSDoug Rabson 	bool_t stat;
1423a9148abdSDoug Rabson 
1424a9148abdSDoug Rabson 	m = rqstp->rq_args;
1425a9148abdSDoug Rabson 	rqstp->rq_args = NULL;
1426a9148abdSDoug Rabson 
1427a9148abdSDoug Rabson 	xdrmbuf_create(&xdrs, m, XDR_DECODE);
1428a9148abdSDoug Rabson 	stat = xargs(&xdrs, args);
1429a9148abdSDoug Rabson 	XDR_DESTROY(&xdrs);
1430a9148abdSDoug Rabson 
1431a9148abdSDoug Rabson 	return (stat);
1432a9148abdSDoug Rabson }
1433a9148abdSDoug Rabson 
1434a9148abdSDoug Rabson bool_t
1435a9148abdSDoug Rabson svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1436a9148abdSDoug Rabson {
1437a9148abdSDoug Rabson 	XDR xdrs;
1438a9148abdSDoug Rabson 
1439a9148abdSDoug Rabson 	if (rqstp->rq_addr) {
1440a9148abdSDoug Rabson 		free(rqstp->rq_addr, M_SONAME);
1441a9148abdSDoug Rabson 		rqstp->rq_addr = NULL;
1442a9148abdSDoug Rabson 	}
1443a9148abdSDoug Rabson 
1444a9148abdSDoug Rabson 	xdrs.x_op = XDR_FREE;
1445a9148abdSDoug Rabson 	return (xargs(&xdrs, args));
1446a9148abdSDoug Rabson }
1447a9148abdSDoug Rabson 
1448a9148abdSDoug Rabson void
1449a9148abdSDoug Rabson svc_freereq(struct svc_req *rqstp)
1450a9148abdSDoug Rabson {
1451a9148abdSDoug Rabson 	SVCTHREAD *st;
1452a9148abdSDoug Rabson 	SVCPOOL *pool;
1453a9148abdSDoug Rabson 
1454a9148abdSDoug Rabson 	st = rqstp->rq_thread;
1455a9148abdSDoug Rabson 	if (st) {
1456f8fb069dSAlexander Motin 		pool = st->st_pool;
1457a9148abdSDoug Rabson 		if (pool->sp_done)
1458a9148abdSDoug Rabson 			pool->sp_done(st, rqstp);
1459a9148abdSDoug Rabson 	}
1460a9148abdSDoug Rabson 
1461a9148abdSDoug Rabson 	if (rqstp->rq_auth.svc_ah_ops)
1462a9148abdSDoug Rabson 		SVCAUTH_RELEASE(&rqstp->rq_auth);
1463a9148abdSDoug Rabson 
1464a9148abdSDoug Rabson 	if (rqstp->rq_xprt) {
1465a9148abdSDoug Rabson 		SVC_RELEASE(rqstp->rq_xprt);
1466a9148abdSDoug Rabson 	}
1467a9148abdSDoug Rabson 
1468a9148abdSDoug Rabson 	if (rqstp->rq_addr)
1469a9148abdSDoug Rabson 		free(rqstp->rq_addr, M_SONAME);
1470a9148abdSDoug Rabson 
1471a9148abdSDoug Rabson 	if (rqstp->rq_args)
1472a9148abdSDoug Rabson 		m_freem(rqstp->rq_args);
1473a9148abdSDoug Rabson 
1474a9148abdSDoug Rabson 	free(rqstp, M_RPC);
1475a9148abdSDoug Rabson }
1476