xref: /freebsd/sys/rpc/svc.c (revision b563304c502c6e905c4d425cbbaca7e1f59e0e44)
1dfdcada3SDoug Rabson /*	$NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $	*/
2dfdcada3SDoug Rabson 
32e322d37SHiroki Sato /*-
42e322d37SHiroki Sato  * Copyright (c) 2009, Sun Microsystems, Inc.
52e322d37SHiroki Sato  * All rights reserved.
6dfdcada3SDoug Rabson  *
72e322d37SHiroki Sato  * Redistribution and use in source and binary forms, with or without
82e322d37SHiroki Sato  * modification, are permitted provided that the following conditions are met:
92e322d37SHiroki Sato  * - Redistributions of source code must retain the above copyright notice,
102e322d37SHiroki Sato  *   this list of conditions and the following disclaimer.
112e322d37SHiroki Sato  * - Redistributions in binary form must reproduce the above copyright notice,
122e322d37SHiroki Sato  *   this list of conditions and the following disclaimer in the documentation
132e322d37SHiroki Sato  *   and/or other materials provided with the distribution.
142e322d37SHiroki Sato  * - Neither the name of Sun Microsystems, Inc. nor the names of its
152e322d37SHiroki Sato  *   contributors may be used to endorse or promote products derived
162e322d37SHiroki Sato  *   from this software without specific prior written permission.
17dfdcada3SDoug Rabson  *
182e322d37SHiroki Sato  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
192e322d37SHiroki Sato  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
202e322d37SHiroki Sato  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
212e322d37SHiroki Sato  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
222e322d37SHiroki Sato  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
232e322d37SHiroki Sato  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
242e322d37SHiroki Sato  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
252e322d37SHiroki Sato  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
262e322d37SHiroki Sato  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
272e322d37SHiroki Sato  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
282e322d37SHiroki Sato  * POSSIBILITY OF SUCH DAMAGE.
29dfdcada3SDoug Rabson  */
30dfdcada3SDoug Rabson 
31dfdcada3SDoug Rabson #if defined(LIBC_SCCS) && !defined(lint)
32dfdcada3SDoug Rabson static char *sccsid2 = "@(#)svc.c 1.44 88/02/08 Copyr 1984 Sun Micro";
33dfdcada3SDoug Rabson static char *sccsid = "@(#)svc.c	2.4 88/08/11 4.0 RPCSRC";
34dfdcada3SDoug Rabson #endif
35dfdcada3SDoug Rabson #include <sys/cdefs.h>
36dfdcada3SDoug Rabson __FBSDID("$FreeBSD$");
37dfdcada3SDoug Rabson 
38dfdcada3SDoug Rabson /*
39dfdcada3SDoug Rabson  * svc.c, Server-side remote procedure call interface.
40dfdcada3SDoug Rabson  *
41dfdcada3SDoug Rabson  * There are two sets of procedures here.  The xprt routines are
42dfdcada3SDoug Rabson  * for handling transport handles.  The svc routines handle the
43dfdcada3SDoug Rabson  * list of service routines.
44dfdcada3SDoug Rabson  *
45dfdcada3SDoug Rabson  * Copyright (C) 1984, Sun Microsystems, Inc.
46dfdcada3SDoug Rabson  */
47dfdcada3SDoug Rabson 
48dfdcada3SDoug Rabson #include <sys/param.h>
49dfdcada3SDoug Rabson #include <sys/lock.h>
50dfdcada3SDoug Rabson #include <sys/kernel.h>
51a9148abdSDoug Rabson #include <sys/kthread.h>
52dfdcada3SDoug Rabson #include <sys/malloc.h>
53a9148abdSDoug Rabson #include <sys/mbuf.h>
54dfdcada3SDoug Rabson #include <sys/mutex.h>
55a9148abdSDoug Rabson #include <sys/proc.h>
56dfdcada3SDoug Rabson #include <sys/queue.h>
57a9148abdSDoug Rabson #include <sys/socketvar.h>
58dfdcada3SDoug Rabson #include <sys/systm.h>
59*b563304cSAlexander Motin #include <sys/smp.h>
60d473bac7SAlexander Motin #include <sys/sx.h>
61dfdcada3SDoug Rabson #include <sys/ucred.h>
62dfdcada3SDoug Rabson 
63dfdcada3SDoug Rabson #include <rpc/rpc.h>
64dfdcada3SDoug Rabson #include <rpc/rpcb_clnt.h>
65a9148abdSDoug Rabson #include <rpc/replay.h>
66dfdcada3SDoug Rabson 
67ee31b83aSDoug Rabson #include <rpc/rpc_com.h>
68dfdcada3SDoug Rabson 
69dfdcada3SDoug Rabson #define SVC_VERSQUIET 0x0001		/* keep quiet about vers mismatch */
70a9148abdSDoug Rabson #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
71dfdcada3SDoug Rabson 
72dfdcada3SDoug Rabson static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
73dfdcada3SDoug Rabson     char *);
74*b563304cSAlexander Motin static void svc_new_thread(SVCGROUP *grp);
75a9148abdSDoug Rabson static void xprt_unregister_locked(SVCXPRT *xprt);
76f8fb069dSAlexander Motin static void svc_change_space_used(SVCPOOL *pool, int delta);
77f8fb069dSAlexander Motin static bool_t svc_request_space_available(SVCPOOL *pool);
78dfdcada3SDoug Rabson 
79dfdcada3SDoug Rabson /* ***************  SVCXPRT related stuff **************** */
80dfdcada3SDoug Rabson 
81a9148abdSDoug Rabson static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
82a9148abdSDoug Rabson static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
83*b563304cSAlexander Motin static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS);
84a9148abdSDoug Rabson 
85dfdcada3SDoug Rabson SVCPOOL*
86a9148abdSDoug Rabson svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
87dfdcada3SDoug Rabson {
88dfdcada3SDoug Rabson 	SVCPOOL *pool;
89*b563304cSAlexander Motin 	SVCGROUP *grp;
90*b563304cSAlexander Motin 	int g;
91dfdcada3SDoug Rabson 
92dfdcada3SDoug Rabson 	pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
93dfdcada3SDoug Rabson 
94dfdcada3SDoug Rabson 	mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
95a9148abdSDoug Rabson 	pool->sp_name = name;
96a9148abdSDoug Rabson 	pool->sp_state = SVCPOOL_INIT;
97a9148abdSDoug Rabson 	pool->sp_proc = NULL;
98dfdcada3SDoug Rabson 	TAILQ_INIT(&pool->sp_callouts);
99d473bac7SAlexander Motin 	TAILQ_INIT(&pool->sp_lcallouts);
100a9148abdSDoug Rabson 	pool->sp_minthreads = 1;
101a9148abdSDoug Rabson 	pool->sp_maxthreads = 1;
102*b563304cSAlexander Motin 	pool->sp_groupcount = 1;
103*b563304cSAlexander Motin 	for (g = 0; g < SVC_MAXGROUPS; g++) {
104*b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
105*b563304cSAlexander Motin 		mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF);
106*b563304cSAlexander Motin 		grp->sg_pool = pool;
107*b563304cSAlexander Motin 		grp->sg_state = SVCPOOL_ACTIVE;
108*b563304cSAlexander Motin 		TAILQ_INIT(&grp->sg_xlist);
109*b563304cSAlexander Motin 		TAILQ_INIT(&grp->sg_active);
110*b563304cSAlexander Motin 		LIST_INIT(&grp->sg_idlethreads);
111*b563304cSAlexander Motin 		grp->sg_minthreads = 1;
112*b563304cSAlexander Motin 		grp->sg_maxthreads = 1;
113*b563304cSAlexander Motin 	}
114a9148abdSDoug Rabson 
115a9148abdSDoug Rabson 	/*
116a9148abdSDoug Rabson 	 * Don't use more than a quarter of mbuf clusters or more than
117a9148abdSDoug Rabson 	 * 45Mb buffering requests.
118a9148abdSDoug Rabson 	 */
119a9148abdSDoug Rabson 	pool->sp_space_high = nmbclusters * MCLBYTES / 4;
120a9148abdSDoug Rabson 	if (pool->sp_space_high > 45 << 20)
121a9148abdSDoug Rabson 		pool->sp_space_high = 45 << 20;
122a9148abdSDoug Rabson 	pool->sp_space_low = 2 * pool->sp_space_high / 3;
123a9148abdSDoug Rabson 
124a9148abdSDoug Rabson 	sysctl_ctx_init(&pool->sp_sysctl);
125a9148abdSDoug Rabson 	if (sysctl_base) {
126a9148abdSDoug Rabson 		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
127a9148abdSDoug Rabson 		    "minthreads", CTLTYPE_INT | CTLFLAG_RW,
128*b563304cSAlexander Motin 		    pool, 0, svcpool_minthread_sysctl, "I",
129*b563304cSAlexander Motin 		    "Minimal number of threads");
130a9148abdSDoug Rabson 		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
131a9148abdSDoug Rabson 		    "maxthreads", CTLTYPE_INT | CTLFLAG_RW,
132*b563304cSAlexander Motin 		    pool, 0, svcpool_maxthread_sysctl, "I",
133*b563304cSAlexander Motin 		    "Maximal number of threads");
134*b563304cSAlexander Motin 		SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
135*b563304cSAlexander Motin 		    "threads", CTLTYPE_INT | CTLFLAG_RD,
136*b563304cSAlexander Motin 		    pool, 0, svcpool_threads_sysctl, "I",
137*b563304cSAlexander Motin 		    "Current number of threads");
138a9148abdSDoug Rabson 		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
139*b563304cSAlexander Motin 		    "groups", CTLFLAG_RD, &pool->sp_groupcount, 0,
140*b563304cSAlexander Motin 		    "Number of thread groups");
141a9148abdSDoug Rabson 
142a9148abdSDoug Rabson 		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
143a9148abdSDoug Rabson 		    "request_space_used", CTLFLAG_RD,
144a9148abdSDoug Rabson 		    &pool->sp_space_used, 0,
145a9148abdSDoug Rabson 		    "Space in parsed but not handled requests.");
146a9148abdSDoug Rabson 
147a9148abdSDoug Rabson 		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
148a9148abdSDoug Rabson 		    "request_space_used_highest", CTLFLAG_RD,
149a9148abdSDoug Rabson 		    &pool->sp_space_used_highest, 0,
150a9148abdSDoug Rabson 		    "Highest space used since reboot.");
151a9148abdSDoug Rabson 
152a9148abdSDoug Rabson 		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
153a9148abdSDoug Rabson 		    "request_space_high", CTLFLAG_RW,
154a9148abdSDoug Rabson 		    &pool->sp_space_high, 0,
155a9148abdSDoug Rabson 		    "Maximum space in parsed but not handled requests.");
156a9148abdSDoug Rabson 
157a9148abdSDoug Rabson 		SYSCTL_ADD_UINT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
158a9148abdSDoug Rabson 		    "request_space_low", CTLFLAG_RW,
159a9148abdSDoug Rabson 		    &pool->sp_space_low, 0,
160a9148abdSDoug Rabson 		    "Low water mark for request space.");
161a9148abdSDoug Rabson 
162fbbb13f9SMatthew D Fleming 		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
163a9148abdSDoug Rabson 		    "request_space_throttled", CTLFLAG_RD,
164a9148abdSDoug Rabson 		    &pool->sp_space_throttled, 0,
165a9148abdSDoug Rabson 		    "Whether nfs requests are currently throttled");
166a9148abdSDoug Rabson 
167fbbb13f9SMatthew D Fleming 		SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
168a9148abdSDoug Rabson 		    "request_space_throttle_count", CTLFLAG_RD,
169a9148abdSDoug Rabson 		    &pool->sp_space_throttle_count, 0,
170a9148abdSDoug Rabson 		    "Count of times throttling based on request space has occurred");
171a9148abdSDoug Rabson 	}
172dfdcada3SDoug Rabson 
173dfdcada3SDoug Rabson 	return pool;
174dfdcada3SDoug Rabson }
175dfdcada3SDoug Rabson 
176dfdcada3SDoug Rabson void
177dfdcada3SDoug Rabson svcpool_destroy(SVCPOOL *pool)
178dfdcada3SDoug Rabson {
179*b563304cSAlexander Motin 	SVCGROUP *grp;
180a9148abdSDoug Rabson 	SVCXPRT *xprt, *nxprt;
181dfdcada3SDoug Rabson 	struct svc_callout *s;
182d473bac7SAlexander Motin 	struct svc_loss_callout *sl;
183a9148abdSDoug Rabson 	struct svcxprt_list cleanup;
184*b563304cSAlexander Motin 	int g;
185dfdcada3SDoug Rabson 
186a9148abdSDoug Rabson 	TAILQ_INIT(&cleanup);
187dfdcada3SDoug Rabson 
188*b563304cSAlexander Motin 	for (g = 0; g < SVC_MAXGROUPS; g++) {
189*b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
190*b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
191*b563304cSAlexander Motin 		while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) {
192a9148abdSDoug Rabson 			xprt_unregister_locked(xprt);
193a9148abdSDoug Rabson 			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
194dfdcada3SDoug Rabson 		}
195*b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
196*b563304cSAlexander Motin 	}
197*b563304cSAlexander Motin 	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
198*b563304cSAlexander Motin 		SVC_RELEASE(xprt);
199*b563304cSAlexander Motin 	}
200dfdcada3SDoug Rabson 
201*b563304cSAlexander Motin 	mtx_lock(&pool->sp_lock);
202d473bac7SAlexander Motin 	while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
203dfdcada3SDoug Rabson 		mtx_unlock(&pool->sp_lock);
204dfdcada3SDoug Rabson 		svc_unreg(pool, s->sc_prog, s->sc_vers);
205dfdcada3SDoug Rabson 		mtx_lock(&pool->sp_lock);
206dfdcada3SDoug Rabson 	}
207d473bac7SAlexander Motin 	while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
208d473bac7SAlexander Motin 		mtx_unlock(&pool->sp_lock);
209d473bac7SAlexander Motin 		svc_loss_unreg(pool, sl->slc_dispatch);
210d473bac7SAlexander Motin 		mtx_lock(&pool->sp_lock);
211d473bac7SAlexander Motin 	}
21275f2ae1aSRick Macklem 	mtx_unlock(&pool->sp_lock);
213dfdcada3SDoug Rabson 
214*b563304cSAlexander Motin 	for (g = 0; g < SVC_MAXGROUPS; g++) {
215*b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
216*b563304cSAlexander Motin 		mtx_destroy(&grp->sg_lock);
217a9148abdSDoug Rabson 	}
218a4fa5e6dSRick Macklem 	mtx_destroy(&pool->sp_lock);
219a4fa5e6dSRick Macklem 
220a9148abdSDoug Rabson 	if (pool->sp_rcache)
221a9148abdSDoug Rabson 		replay_freecache(pool->sp_rcache);
222a9148abdSDoug Rabson 
223a9148abdSDoug Rabson 	sysctl_ctx_free(&pool->sp_sysctl);
224dfdcada3SDoug Rabson 	free(pool, M_RPC);
225dfdcada3SDoug Rabson }
226dfdcada3SDoug Rabson 
227*b563304cSAlexander Motin /*
228*b563304cSAlexander Motin  * Sysctl handler to get the present thread count on a pool
229*b563304cSAlexander Motin  */
230*b563304cSAlexander Motin static int
231*b563304cSAlexander Motin svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS)
232a9148abdSDoug Rabson {
233*b563304cSAlexander Motin 	SVCPOOL *pool;
234*b563304cSAlexander Motin 	int threads, error, g;
235a9148abdSDoug Rabson 
236*b563304cSAlexander Motin 	pool = oidp->oid_arg1;
237*b563304cSAlexander Motin 	threads = 0;
238*b563304cSAlexander Motin 	mtx_lock(&pool->sp_lock);
239*b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++)
240*b563304cSAlexander Motin 		threads += pool->sp_groups[g].sg_threadcount;
241*b563304cSAlexander Motin 	mtx_unlock(&pool->sp_lock);
242*b563304cSAlexander Motin 	error = sysctl_handle_int(oidp, &threads, 0, req);
243*b563304cSAlexander Motin 	return (error);
244a9148abdSDoug Rabson }
245a9148abdSDoug Rabson 
246a9148abdSDoug Rabson /*
247a9148abdSDoug Rabson  * Sysctl handler to set the minimum thread count on a pool
248a9148abdSDoug Rabson  */
249a9148abdSDoug Rabson static int
250a9148abdSDoug Rabson svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
251a9148abdSDoug Rabson {
252a9148abdSDoug Rabson 	SVCPOOL *pool;
253*b563304cSAlexander Motin 	int newminthreads, error, g;
254a9148abdSDoug Rabson 
255a9148abdSDoug Rabson 	pool = oidp->oid_arg1;
256a9148abdSDoug Rabson 	newminthreads = pool->sp_minthreads;
257a9148abdSDoug Rabson 	error = sysctl_handle_int(oidp, &newminthreads, 0, req);
258a9148abdSDoug Rabson 	if (error == 0 && newminthreads != pool->sp_minthreads) {
259a9148abdSDoug Rabson 		if (newminthreads > pool->sp_maxthreads)
260a9148abdSDoug Rabson 			return (EINVAL);
261a9148abdSDoug Rabson 		mtx_lock(&pool->sp_lock);
262a9148abdSDoug Rabson 		pool->sp_minthreads = newminthreads;
263*b563304cSAlexander Motin 		for (g = 0; g < pool->sp_groupcount; g++) {
264*b563304cSAlexander Motin 			pool->sp_groups[g].sg_minthreads = max(1,
265*b563304cSAlexander Motin 			    pool->sp_minthreads / pool->sp_groupcount);
266*b563304cSAlexander Motin 		}
267a9148abdSDoug Rabson 		mtx_unlock(&pool->sp_lock);
268a9148abdSDoug Rabson 	}
269a9148abdSDoug Rabson 	return (error);
270a9148abdSDoug Rabson }
271a9148abdSDoug Rabson 
272a9148abdSDoug Rabson /*
273a9148abdSDoug Rabson  * Sysctl handler to set the maximum thread count on a pool
274a9148abdSDoug Rabson  */
275a9148abdSDoug Rabson static int
276a9148abdSDoug Rabson svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
277a9148abdSDoug Rabson {
278a9148abdSDoug Rabson 	SVCPOOL *pool;
279*b563304cSAlexander Motin 	int newmaxthreads, error, g;
280a9148abdSDoug Rabson 
281a9148abdSDoug Rabson 	pool = oidp->oid_arg1;
282a9148abdSDoug Rabson 	newmaxthreads = pool->sp_maxthreads;
283a9148abdSDoug Rabson 	error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
284a9148abdSDoug Rabson 	if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
285a9148abdSDoug Rabson 		if (newmaxthreads < pool->sp_minthreads)
286a9148abdSDoug Rabson 			return (EINVAL);
287a9148abdSDoug Rabson 		mtx_lock(&pool->sp_lock);
288a9148abdSDoug Rabson 		pool->sp_maxthreads = newmaxthreads;
289*b563304cSAlexander Motin 		for (g = 0; g < pool->sp_groupcount; g++) {
290*b563304cSAlexander Motin 			pool->sp_groups[g].sg_maxthreads = max(1,
291*b563304cSAlexander Motin 			    pool->sp_maxthreads / pool->sp_groupcount);
292*b563304cSAlexander Motin 		}
293a9148abdSDoug Rabson 		mtx_unlock(&pool->sp_lock);
294a9148abdSDoug Rabson 	}
295a9148abdSDoug Rabson 	return (error);
296a9148abdSDoug Rabson }
297a9148abdSDoug Rabson 
298dfdcada3SDoug Rabson /*
299dfdcada3SDoug Rabson  * Activate a transport handle.
300dfdcada3SDoug Rabson  */
301dfdcada3SDoug Rabson void
302dfdcada3SDoug Rabson xprt_register(SVCXPRT *xprt)
303dfdcada3SDoug Rabson {
304dfdcada3SDoug Rabson 	SVCPOOL *pool = xprt->xp_pool;
305*b563304cSAlexander Motin 	SVCGROUP *grp;
306*b563304cSAlexander Motin 	int g;
307dfdcada3SDoug Rabson 
3086b97c9f0SRick Macklem 	SVC_ACQUIRE(xprt);
309*b563304cSAlexander Motin 	g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount;
310*b563304cSAlexander Motin 	xprt->xp_group = grp = &pool->sp_groups[g];
311*b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
312dfdcada3SDoug Rabson 	xprt->xp_registered = TRUE;
313dfdcada3SDoug Rabson 	xprt->xp_active = FALSE;
314*b563304cSAlexander Motin 	TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link);
315*b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
316dfdcada3SDoug Rabson }
317dfdcada3SDoug Rabson 
318dfdcada3SDoug Rabson /*
319a9148abdSDoug Rabson  * De-activate a transport handle. Note: the locked version doesn't
320a9148abdSDoug Rabson  * release the transport - caller must do that after dropping the pool
321a9148abdSDoug Rabson  * lock.
322dfdcada3SDoug Rabson  */
323dfdcada3SDoug Rabson static void
324a9148abdSDoug Rabson xprt_unregister_locked(SVCXPRT *xprt)
325dfdcada3SDoug Rabson {
326*b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
327dfdcada3SDoug Rabson 
328*b563304cSAlexander Motin 	mtx_assert(&grp->sg_lock, MA_OWNED);
329bca2ec16SRick Macklem 	KASSERT(xprt->xp_registered == TRUE,
330bca2ec16SRick Macklem 	    ("xprt_unregister_locked: not registered"));
331ba981145SAlexander Motin 	xprt_inactive_locked(xprt);
332*b563304cSAlexander Motin 	TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link);
333dfdcada3SDoug Rabson 	xprt->xp_registered = FALSE;
334a9148abdSDoug Rabson }
335dfdcada3SDoug Rabson 
336a9148abdSDoug Rabson void
337a9148abdSDoug Rabson xprt_unregister(SVCXPRT *xprt)
338a9148abdSDoug Rabson {
339*b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
340a9148abdSDoug Rabson 
341*b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
342bca2ec16SRick Macklem 	if (xprt->xp_registered == FALSE) {
343bca2ec16SRick Macklem 		/* Already unregistered by another thread */
344*b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
345bca2ec16SRick Macklem 		return;
346bca2ec16SRick Macklem 	}
347a9148abdSDoug Rabson 	xprt_unregister_locked(xprt);
348*b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
349a9148abdSDoug Rabson 
350a9148abdSDoug Rabson 	SVC_RELEASE(xprt);
351a9148abdSDoug Rabson }
352a9148abdSDoug Rabson 
353ba981145SAlexander Motin /*
354ba981145SAlexander Motin  * Attempt to assign a service thread to this transport.
355ba981145SAlexander Motin  */
356ba981145SAlexander Motin static int
357a9148abdSDoug Rabson xprt_assignthread(SVCXPRT *xprt)
358a9148abdSDoug Rabson {
359*b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
360a9148abdSDoug Rabson 	SVCTHREAD *st;
361a9148abdSDoug Rabson 
362*b563304cSAlexander Motin 	mtx_assert(&grp->sg_lock, MA_OWNED);
363*b563304cSAlexander Motin 	st = LIST_FIRST(&grp->sg_idlethreads);
364a9148abdSDoug Rabson 	if (st) {
365ba981145SAlexander Motin 		LIST_REMOVE(st, st_ilink);
366a9148abdSDoug Rabson 		SVC_ACQUIRE(xprt);
367a9148abdSDoug Rabson 		xprt->xp_thread = st;
368a9148abdSDoug Rabson 		st->st_xprt = xprt;
369a9148abdSDoug Rabson 		cv_signal(&st->st_cond);
370ba981145SAlexander Motin 		return (TRUE);
371a9148abdSDoug Rabson 	} else {
372a9148abdSDoug Rabson 		/*
373a9148abdSDoug Rabson 		 * See if we can create a new thread. The
374a9148abdSDoug Rabson 		 * actual thread creation happens in
375a9148abdSDoug Rabson 		 * svc_run_internal because our locking state
376a9148abdSDoug Rabson 		 * is poorly defined (we are typically called
377a9148abdSDoug Rabson 		 * from a socket upcall). Don't create more
378a9148abdSDoug Rabson 		 * than one thread per second.
379a9148abdSDoug Rabson 		 */
380*b563304cSAlexander Motin 		if (grp->sg_state == SVCPOOL_ACTIVE
381*b563304cSAlexander Motin 		    && grp->sg_lastcreatetime < time_uptime
382*b563304cSAlexander Motin 		    && grp->sg_threadcount < grp->sg_maxthreads) {
383*b563304cSAlexander Motin 			grp->sg_state = SVCPOOL_THREADWANTED;
384a9148abdSDoug Rabson 		}
385a9148abdSDoug Rabson 	}
386ba981145SAlexander Motin 	return (FALSE);
387dfdcada3SDoug Rabson }
388dfdcada3SDoug Rabson 
389dfdcada3SDoug Rabson void
390dfdcada3SDoug Rabson xprt_active(SVCXPRT *xprt)
391dfdcada3SDoug Rabson {
392*b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
393dfdcada3SDoug Rabson 
394*b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
395a4fa5e6dSRick Macklem 
396a9148abdSDoug Rabson 	if (!xprt->xp_registered) {
397a9148abdSDoug Rabson 		/*
398a9148abdSDoug Rabson 		 * Race with xprt_unregister - we lose.
399a9148abdSDoug Rabson 		 */
400*b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
401a9148abdSDoug Rabson 		return;
402a9148abdSDoug Rabson 	}
403a9148abdSDoug Rabson 
404dfdcada3SDoug Rabson 	if (!xprt->xp_active) {
405dfdcada3SDoug Rabson 		xprt->xp_active = TRUE;
406ba981145SAlexander Motin 		if (xprt->xp_thread == NULL) {
407*b563304cSAlexander Motin 			if (!svc_request_space_available(xprt->xp_pool) ||
408f8fb069dSAlexander Motin 			    !xprt_assignthread(xprt))
409*b563304cSAlexander Motin 				TAILQ_INSERT_TAIL(&grp->sg_active, xprt,
410ba981145SAlexander Motin 				    xp_alink);
411ba981145SAlexander Motin 		}
412dfdcada3SDoug Rabson 	}
413dfdcada3SDoug Rabson 
414*b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
415dfdcada3SDoug Rabson }
416dfdcada3SDoug Rabson 
417dfdcada3SDoug Rabson void
418a9148abdSDoug Rabson xprt_inactive_locked(SVCXPRT *xprt)
419a9148abdSDoug Rabson {
420*b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
421a9148abdSDoug Rabson 
422*b563304cSAlexander Motin 	mtx_assert(&grp->sg_lock, MA_OWNED);
423a9148abdSDoug Rabson 	if (xprt->xp_active) {
424ba981145SAlexander Motin 		if (xprt->xp_thread == NULL)
425*b563304cSAlexander Motin 			TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
426a9148abdSDoug Rabson 		xprt->xp_active = FALSE;
427a9148abdSDoug Rabson 	}
428a9148abdSDoug Rabson }
429a9148abdSDoug Rabson 
430a9148abdSDoug Rabson void
431dfdcada3SDoug Rabson xprt_inactive(SVCXPRT *xprt)
432dfdcada3SDoug Rabson {
433*b563304cSAlexander Motin 	SVCGROUP *grp = xprt->xp_group;
434dfdcada3SDoug Rabson 
435*b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
436a9148abdSDoug Rabson 	xprt_inactive_locked(xprt);
437*b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
438dfdcada3SDoug Rabson }
439dfdcada3SDoug Rabson 
440dfdcada3SDoug Rabson /*
4415c42b9dcSAlexander Motin  * Variant of xprt_inactive() for use only when sure that port is
4425c42b9dcSAlexander Motin  * assigned to thread. For example, withing receive handlers.
4435c42b9dcSAlexander Motin  */
4445c42b9dcSAlexander Motin void
4455c42b9dcSAlexander Motin xprt_inactive_self(SVCXPRT *xprt)
4465c42b9dcSAlexander Motin {
4475c42b9dcSAlexander Motin 
4485c42b9dcSAlexander Motin 	KASSERT(xprt->xp_thread != NULL,
4495c42b9dcSAlexander Motin 	    ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
4505c42b9dcSAlexander Motin 	xprt->xp_active = FALSE;
4515c42b9dcSAlexander Motin }
4525c42b9dcSAlexander Motin 
4535c42b9dcSAlexander Motin /*
454dfdcada3SDoug Rabson  * Add a service program to the callout list.
455dfdcada3SDoug Rabson  * The dispatch routine will be called when a rpc request for this
456dfdcada3SDoug Rabson  * program number comes in.
457dfdcada3SDoug Rabson  */
458dfdcada3SDoug Rabson bool_t
459dfdcada3SDoug Rabson svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
460dfdcada3SDoug Rabson     void (*dispatch)(struct svc_req *, SVCXPRT *),
461dfdcada3SDoug Rabson     const struct netconfig *nconf)
462dfdcada3SDoug Rabson {
463dfdcada3SDoug Rabson 	SVCPOOL *pool = xprt->xp_pool;
464dfdcada3SDoug Rabson 	struct svc_callout *s;
465dfdcada3SDoug Rabson 	char *netid = NULL;
466dfdcada3SDoug Rabson 	int flag = 0;
467dfdcada3SDoug Rabson 
468dfdcada3SDoug Rabson /* VARIABLES PROTECTED BY svc_lock: s, svc_head */
469dfdcada3SDoug Rabson 
470dfdcada3SDoug Rabson 	if (xprt->xp_netid) {
471dfdcada3SDoug Rabson 		netid = strdup(xprt->xp_netid, M_RPC);
472dfdcada3SDoug Rabson 		flag = 1;
473dfdcada3SDoug Rabson 	} else if (nconf && nconf->nc_netid) {
474dfdcada3SDoug Rabson 		netid = strdup(nconf->nc_netid, M_RPC);
475dfdcada3SDoug Rabson 		flag = 1;
476dfdcada3SDoug Rabson 	} /* must have been created with svc_raw_create */
477dfdcada3SDoug Rabson 	if ((netid == NULL) && (flag == 1)) {
478dfdcada3SDoug Rabson 		return (FALSE);
479dfdcada3SDoug Rabson 	}
480dfdcada3SDoug Rabson 
481dfdcada3SDoug Rabson 	mtx_lock(&pool->sp_lock);
482dfdcada3SDoug Rabson 	if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
483dfdcada3SDoug Rabson 		if (netid)
484dfdcada3SDoug Rabson 			free(netid, M_RPC);
485dfdcada3SDoug Rabson 		if (s->sc_dispatch == dispatch)
486dfdcada3SDoug Rabson 			goto rpcb_it; /* he is registering another xptr */
487dfdcada3SDoug Rabson 		mtx_unlock(&pool->sp_lock);
488dfdcada3SDoug Rabson 		return (FALSE);
489dfdcada3SDoug Rabson 	}
490dfdcada3SDoug Rabson 	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
491dfdcada3SDoug Rabson 	if (s == NULL) {
492dfdcada3SDoug Rabson 		if (netid)
493dfdcada3SDoug Rabson 			free(netid, M_RPC);
494dfdcada3SDoug Rabson 		mtx_unlock(&pool->sp_lock);
495dfdcada3SDoug Rabson 		return (FALSE);
496dfdcada3SDoug Rabson 	}
497dfdcada3SDoug Rabson 
498dfdcada3SDoug Rabson 	s->sc_prog = prog;
499dfdcada3SDoug Rabson 	s->sc_vers = vers;
500dfdcada3SDoug Rabson 	s->sc_dispatch = dispatch;
501dfdcada3SDoug Rabson 	s->sc_netid = netid;
502dfdcada3SDoug Rabson 	TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
503dfdcada3SDoug Rabson 
504dfdcada3SDoug Rabson 	if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
505dfdcada3SDoug Rabson 		((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
506dfdcada3SDoug Rabson 
507dfdcada3SDoug Rabson rpcb_it:
508dfdcada3SDoug Rabson 	mtx_unlock(&pool->sp_lock);
509dfdcada3SDoug Rabson 	/* now register the information with the local binder service */
510dfdcada3SDoug Rabson 	if (nconf) {
511dfdcada3SDoug Rabson 		bool_t dummy;
512dfdcada3SDoug Rabson 		struct netconfig tnc;
513a9148abdSDoug Rabson 		struct netbuf nb;
514dfdcada3SDoug Rabson 		tnc = *nconf;
515a9148abdSDoug Rabson 		nb.buf = &xprt->xp_ltaddr;
516a9148abdSDoug Rabson 		nb.len = xprt->xp_ltaddr.ss_len;
517a9148abdSDoug Rabson 		dummy = rpcb_set(prog, vers, &tnc, &nb);
518dfdcada3SDoug Rabson 		return (dummy);
519dfdcada3SDoug Rabson 	}
520dfdcada3SDoug Rabson 	return (TRUE);
521dfdcada3SDoug Rabson }
522dfdcada3SDoug Rabson 
523dfdcada3SDoug Rabson /*
524dfdcada3SDoug Rabson  * Remove a service program from the callout list.
525dfdcada3SDoug Rabson  */
526dfdcada3SDoug Rabson void
527dfdcada3SDoug Rabson svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
528dfdcada3SDoug Rabson {
529dfdcada3SDoug Rabson 	struct svc_callout *s;
530dfdcada3SDoug Rabson 
531dfdcada3SDoug Rabson 	/* unregister the information anyway */
532dfdcada3SDoug Rabson 	(void) rpcb_unset(prog, vers, NULL);
533dfdcada3SDoug Rabson 	mtx_lock(&pool->sp_lock);
534dfdcada3SDoug Rabson 	while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
535dfdcada3SDoug Rabson 		TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
536dfdcada3SDoug Rabson 		if (s->sc_netid)
537dfdcada3SDoug Rabson 			mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
538dfdcada3SDoug Rabson 		mem_free(s, sizeof (struct svc_callout));
539dfdcada3SDoug Rabson 	}
540dfdcada3SDoug Rabson 	mtx_unlock(&pool->sp_lock);
541dfdcada3SDoug Rabson }
542dfdcada3SDoug Rabson 
543d473bac7SAlexander Motin /*
544d473bac7SAlexander Motin  * Add a service connection loss program to the callout list.
545d473bac7SAlexander Motin  * The dispatch routine will be called when some port in ths pool die.
546d473bac7SAlexander Motin  */
547d473bac7SAlexander Motin bool_t
548d473bac7SAlexander Motin svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
549d473bac7SAlexander Motin {
550d473bac7SAlexander Motin 	SVCPOOL *pool = xprt->xp_pool;
551d473bac7SAlexander Motin 	struct svc_loss_callout *s;
552d473bac7SAlexander Motin 
553d473bac7SAlexander Motin 	mtx_lock(&pool->sp_lock);
554d473bac7SAlexander Motin 	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
555d473bac7SAlexander Motin 		if (s->slc_dispatch == dispatch)
556d473bac7SAlexander Motin 			break;
557d473bac7SAlexander Motin 	}
558d473bac7SAlexander Motin 	if (s != NULL) {
559d473bac7SAlexander Motin 		mtx_unlock(&pool->sp_lock);
560d473bac7SAlexander Motin 		return (TRUE);
561d473bac7SAlexander Motin 	}
562d473bac7SAlexander Motin 	s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
563d473bac7SAlexander Motin 	if (s == NULL) {
564d473bac7SAlexander Motin 		mtx_unlock(&pool->sp_lock);
565d473bac7SAlexander Motin 		return (FALSE);
566d473bac7SAlexander Motin 	}
567d473bac7SAlexander Motin 	s->slc_dispatch = dispatch;
568d473bac7SAlexander Motin 	TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
569d473bac7SAlexander Motin 	mtx_unlock(&pool->sp_lock);
570d473bac7SAlexander Motin 	return (TRUE);
571d473bac7SAlexander Motin }
572d473bac7SAlexander Motin 
573d473bac7SAlexander Motin /*
574d473bac7SAlexander Motin  * Remove a service connection loss program from the callout list.
575d473bac7SAlexander Motin  */
576d473bac7SAlexander Motin void
577d473bac7SAlexander Motin svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
578d473bac7SAlexander Motin {
579d473bac7SAlexander Motin 	struct svc_loss_callout *s;
580d473bac7SAlexander Motin 
581d473bac7SAlexander Motin 	mtx_lock(&pool->sp_lock);
582d473bac7SAlexander Motin 	TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
583d473bac7SAlexander Motin 		if (s->slc_dispatch == dispatch) {
584d473bac7SAlexander Motin 			TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
585d473bac7SAlexander Motin 			free(s, M_RPC);
586d473bac7SAlexander Motin 			break;
587d473bac7SAlexander Motin 		}
588d473bac7SAlexander Motin 	}
589d473bac7SAlexander Motin 	mtx_unlock(&pool->sp_lock);
590d473bac7SAlexander Motin }
591d473bac7SAlexander Motin 
592dfdcada3SDoug Rabson /* ********************** CALLOUT list related stuff ************* */
593dfdcada3SDoug Rabson 
594dfdcada3SDoug Rabson /*
595dfdcada3SDoug Rabson  * Search the callout list for a program number, return the callout
596dfdcada3SDoug Rabson  * struct.
597dfdcada3SDoug Rabson  */
598dfdcada3SDoug Rabson static struct svc_callout *
599dfdcada3SDoug Rabson svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
600dfdcada3SDoug Rabson {
601dfdcada3SDoug Rabson 	struct svc_callout *s;
602dfdcada3SDoug Rabson 
603dfdcada3SDoug Rabson 	mtx_assert(&pool->sp_lock, MA_OWNED);
604dfdcada3SDoug Rabson 	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
605dfdcada3SDoug Rabson 		if (s->sc_prog == prog && s->sc_vers == vers
606dfdcada3SDoug Rabson 		    && (netid == NULL || s->sc_netid == NULL ||
607dfdcada3SDoug Rabson 			strcmp(netid, s->sc_netid) == 0))
608dfdcada3SDoug Rabson 			break;
609dfdcada3SDoug Rabson 	}
610dfdcada3SDoug Rabson 
611dfdcada3SDoug Rabson 	return (s);
612dfdcada3SDoug Rabson }
613dfdcada3SDoug Rabson 
614dfdcada3SDoug Rabson /* ******************* REPLY GENERATION ROUTINES  ************ */
615dfdcada3SDoug Rabson 
616a9148abdSDoug Rabson static bool_t
617a9148abdSDoug Rabson svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
618a9148abdSDoug Rabson     struct mbuf *body)
619a9148abdSDoug Rabson {
620a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
621a9148abdSDoug Rabson 	bool_t ok;
622a9148abdSDoug Rabson 
623a9148abdSDoug Rabson 	if (rqstp->rq_args) {
624a9148abdSDoug Rabson 		m_freem(rqstp->rq_args);
625a9148abdSDoug Rabson 		rqstp->rq_args = NULL;
626a9148abdSDoug Rabson 	}
627a9148abdSDoug Rabson 
628a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
629a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
630a9148abdSDoug Rabson 		    rply, svc_getrpccaller(rqstp), body);
631a9148abdSDoug Rabson 
632a9148abdSDoug Rabson 	if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
633a9148abdSDoug Rabson 		return (FALSE);
634a9148abdSDoug Rabson 
635d473bac7SAlexander Motin 	ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
636a9148abdSDoug Rabson 	if (rqstp->rq_addr) {
637a9148abdSDoug Rabson 		free(rqstp->rq_addr, M_SONAME);
638a9148abdSDoug Rabson 		rqstp->rq_addr = NULL;
639a9148abdSDoug Rabson 	}
640a9148abdSDoug Rabson 
641a9148abdSDoug Rabson 	return (ok);
642a9148abdSDoug Rabson }
643a9148abdSDoug Rabson 
644dfdcada3SDoug Rabson /*
645dfdcada3SDoug Rabson  * Send a reply to an rpc request
646dfdcada3SDoug Rabson  */
647dfdcada3SDoug Rabson bool_t
648a9148abdSDoug Rabson svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
649a9148abdSDoug Rabson {
650a9148abdSDoug Rabson 	struct rpc_msg rply;
651a9148abdSDoug Rabson 	struct mbuf *m;
652a9148abdSDoug Rabson 	XDR xdrs;
653a9148abdSDoug Rabson 	bool_t ok;
654a9148abdSDoug Rabson 
655a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
656a9148abdSDoug Rabson 	rply.rm_direction = REPLY;
657a9148abdSDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
658a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
659a9148abdSDoug Rabson 	rply.acpted_rply.ar_stat = SUCCESS;
660a9148abdSDoug Rabson 	rply.acpted_rply.ar_results.where = NULL;
661a9148abdSDoug Rabson 	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
662a9148abdSDoug Rabson 
663bd54830bSGleb Smirnoff 	m = m_getcl(M_WAITOK, MT_DATA, 0);
664a9148abdSDoug Rabson 	xdrmbuf_create(&xdrs, m, XDR_ENCODE);
665a9148abdSDoug Rabson 	ok = xdr_results(&xdrs, xdr_location);
666a9148abdSDoug Rabson 	XDR_DESTROY(&xdrs);
667a9148abdSDoug Rabson 
668a9148abdSDoug Rabson 	if (ok) {
669a9148abdSDoug Rabson 		return (svc_sendreply_common(rqstp, &rply, m));
670a9148abdSDoug Rabson 	} else {
671a9148abdSDoug Rabson 		m_freem(m);
672a9148abdSDoug Rabson 		return (FALSE);
673a9148abdSDoug Rabson 	}
674a9148abdSDoug Rabson }
675a9148abdSDoug Rabson 
676a9148abdSDoug Rabson bool_t
677a9148abdSDoug Rabson svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
678dfdcada3SDoug Rabson {
679dfdcada3SDoug Rabson 	struct rpc_msg rply;
680dfdcada3SDoug Rabson 
681a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
682dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
683dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
684a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
685dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = SUCCESS;
686a9148abdSDoug Rabson 	rply.acpted_rply.ar_results.where = NULL;
687a9148abdSDoug Rabson 	rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
688dfdcada3SDoug Rabson 
689a9148abdSDoug Rabson 	return (svc_sendreply_common(rqstp, &rply, m));
690dfdcada3SDoug Rabson }
691dfdcada3SDoug Rabson 
692dfdcada3SDoug Rabson /*
693dfdcada3SDoug Rabson  * No procedure error reply
694dfdcada3SDoug Rabson  */
695dfdcada3SDoug Rabson void
696a9148abdSDoug Rabson svcerr_noproc(struct svc_req *rqstp)
697dfdcada3SDoug Rabson {
698a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
699dfdcada3SDoug Rabson 	struct rpc_msg rply;
700dfdcada3SDoug Rabson 
701a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
702dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
703dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
704a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
705dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = PROC_UNAVAIL;
706dfdcada3SDoug Rabson 
707a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
708a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
709a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
710a9148abdSDoug Rabson 
711a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
712dfdcada3SDoug Rabson }
713dfdcada3SDoug Rabson 
714dfdcada3SDoug Rabson /*
715dfdcada3SDoug Rabson  * Can't decode args error reply
716dfdcada3SDoug Rabson  */
717dfdcada3SDoug Rabson void
718a9148abdSDoug Rabson svcerr_decode(struct svc_req *rqstp)
719dfdcada3SDoug Rabson {
720a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
721dfdcada3SDoug Rabson 	struct rpc_msg rply;
722dfdcada3SDoug Rabson 
723a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
724dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
725dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
726a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
727dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = GARBAGE_ARGS;
728dfdcada3SDoug Rabson 
729a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
730a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
731a9148abdSDoug Rabson 		    &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
732a9148abdSDoug Rabson 
733a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
734dfdcada3SDoug Rabson }
735dfdcada3SDoug Rabson 
736dfdcada3SDoug Rabson /*
737dfdcada3SDoug Rabson  * Some system error
738dfdcada3SDoug Rabson  */
739dfdcada3SDoug Rabson void
740a9148abdSDoug Rabson svcerr_systemerr(struct svc_req *rqstp)
741dfdcada3SDoug Rabson {
742a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
743dfdcada3SDoug Rabson 	struct rpc_msg rply;
744dfdcada3SDoug Rabson 
745a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
746dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
747dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
748a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
749dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = SYSTEM_ERR;
750dfdcada3SDoug Rabson 
751a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
752a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
753a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
754a9148abdSDoug Rabson 
755a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
756dfdcada3SDoug Rabson }
757dfdcada3SDoug Rabson 
758dfdcada3SDoug Rabson /*
759dfdcada3SDoug Rabson  * Authentication error reply
760dfdcada3SDoug Rabson  */
761dfdcada3SDoug Rabson void
762a9148abdSDoug Rabson svcerr_auth(struct svc_req *rqstp, enum auth_stat why)
763dfdcada3SDoug Rabson {
764a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
765dfdcada3SDoug Rabson 	struct rpc_msg rply;
766dfdcada3SDoug Rabson 
767a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
768dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
769dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_DENIED;
770dfdcada3SDoug Rabson 	rply.rjcted_rply.rj_stat = AUTH_ERROR;
771dfdcada3SDoug Rabson 	rply.rjcted_rply.rj_why = why;
772dfdcada3SDoug Rabson 
773a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
774a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
775a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
776a9148abdSDoug Rabson 
777a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
778dfdcada3SDoug Rabson }
779dfdcada3SDoug Rabson 
780dfdcada3SDoug Rabson /*
781dfdcada3SDoug Rabson  * Auth too weak error reply
782dfdcada3SDoug Rabson  */
783dfdcada3SDoug Rabson void
784a9148abdSDoug Rabson svcerr_weakauth(struct svc_req *rqstp)
785dfdcada3SDoug Rabson {
786dfdcada3SDoug Rabson 
787a9148abdSDoug Rabson 	svcerr_auth(rqstp, AUTH_TOOWEAK);
788dfdcada3SDoug Rabson }
789dfdcada3SDoug Rabson 
790dfdcada3SDoug Rabson /*
791dfdcada3SDoug Rabson  * Program unavailable error reply
792dfdcada3SDoug Rabson  */
793dfdcada3SDoug Rabson void
794a9148abdSDoug Rabson svcerr_noprog(struct svc_req *rqstp)
795dfdcada3SDoug Rabson {
796a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
797dfdcada3SDoug Rabson 	struct rpc_msg rply;
798dfdcada3SDoug Rabson 
799a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
800dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
801dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
802a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
803dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = PROG_UNAVAIL;
804dfdcada3SDoug Rabson 
805a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
806a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
807a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
808a9148abdSDoug Rabson 
809a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
810dfdcada3SDoug Rabson }
811dfdcada3SDoug Rabson 
812dfdcada3SDoug Rabson /*
813dfdcada3SDoug Rabson  * Program version mismatch error reply
814dfdcada3SDoug Rabson  */
815dfdcada3SDoug Rabson void
816a9148abdSDoug Rabson svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
817dfdcada3SDoug Rabson {
818a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
819dfdcada3SDoug Rabson 	struct rpc_msg rply;
820dfdcada3SDoug Rabson 
821a9148abdSDoug Rabson 	rply.rm_xid = rqstp->rq_xid;
822dfdcada3SDoug Rabson 	rply.rm_direction = REPLY;
823dfdcada3SDoug Rabson 	rply.rm_reply.rp_stat = MSG_ACCEPTED;
824a9148abdSDoug Rabson 	rply.acpted_rply.ar_verf = rqstp->rq_verf;
825dfdcada3SDoug Rabson 	rply.acpted_rply.ar_stat = PROG_MISMATCH;
826dfdcada3SDoug Rabson 	rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
827dfdcada3SDoug Rabson 	rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
828dfdcada3SDoug Rabson 
829a9148abdSDoug Rabson 	if (xprt->xp_pool->sp_rcache)
830a9148abdSDoug Rabson 		replay_setreply(xprt->xp_pool->sp_rcache,
831a9148abdSDoug Rabson 		    &rply, svc_getrpccaller(rqstp), NULL);
832a9148abdSDoug Rabson 
833a9148abdSDoug Rabson 	svc_sendreply_common(rqstp, &rply, NULL);
834a9148abdSDoug Rabson }
835a9148abdSDoug Rabson 
836a9148abdSDoug Rabson /*
837a9148abdSDoug Rabson  * Allocate a new server transport structure. All fields are
838a9148abdSDoug Rabson  * initialized to zero and xp_p3 is initialized to point at an
839a9148abdSDoug Rabson  * extension structure to hold various flags and authentication
840a9148abdSDoug Rabson  * parameters.
841a9148abdSDoug Rabson  */
842a9148abdSDoug Rabson SVCXPRT *
843a9148abdSDoug Rabson svc_xprt_alloc()
844a9148abdSDoug Rabson {
845a9148abdSDoug Rabson 	SVCXPRT *xprt;
846a9148abdSDoug Rabson 	SVCXPRT_EXT *ext;
847a9148abdSDoug Rabson 
848a9148abdSDoug Rabson 	xprt = mem_alloc(sizeof(SVCXPRT));
849a9148abdSDoug Rabson 	memset(xprt, 0, sizeof(SVCXPRT));
850a9148abdSDoug Rabson 	ext = mem_alloc(sizeof(SVCXPRT_EXT));
851a9148abdSDoug Rabson 	memset(ext, 0, sizeof(SVCXPRT_EXT));
852a9148abdSDoug Rabson 	xprt->xp_p3 = ext;
853a9148abdSDoug Rabson 	refcount_init(&xprt->xp_refs, 1);
854a9148abdSDoug Rabson 
855a9148abdSDoug Rabson 	return (xprt);
856a9148abdSDoug Rabson }
857a9148abdSDoug Rabson 
858a9148abdSDoug Rabson /*
859a9148abdSDoug Rabson  * Free a server transport structure.
860a9148abdSDoug Rabson  */
861a9148abdSDoug Rabson void
862a9148abdSDoug Rabson svc_xprt_free(xprt)
863a9148abdSDoug Rabson 	SVCXPRT *xprt;
864a9148abdSDoug Rabson {
865a9148abdSDoug Rabson 
866a9148abdSDoug Rabson 	mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
867a9148abdSDoug Rabson 	mem_free(xprt, sizeof(SVCXPRT));
868dfdcada3SDoug Rabson }
869dfdcada3SDoug Rabson 
870dfdcada3SDoug Rabson /* ******************* SERVER INPUT STUFF ******************* */
871dfdcada3SDoug Rabson 
872dfdcada3SDoug Rabson /*
873a9148abdSDoug Rabson  * Read RPC requests from a transport and queue them to be
874a9148abdSDoug Rabson  * executed. We handle authentication and replay cache replies here.
875a9148abdSDoug Rabson  * Actually dispatching the RPC is deferred till svc_executereq.
876dfdcada3SDoug Rabson  */
877a9148abdSDoug Rabson static enum xprt_stat
878a9148abdSDoug Rabson svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
879dfdcada3SDoug Rabson {
880dfdcada3SDoug Rabson 	SVCPOOL *pool = xprt->xp_pool;
881a9148abdSDoug Rabson 	struct svc_req *r;
882dfdcada3SDoug Rabson 	struct rpc_msg msg;
883a9148abdSDoug Rabson 	struct mbuf *args;
884d473bac7SAlexander Motin 	struct svc_loss_callout *s;
885a9148abdSDoug Rabson 	enum xprt_stat stat;
886a9148abdSDoug Rabson 
887a9148abdSDoug Rabson 	/* now receive msgs from xprtprt (support batch calls) */
888a9148abdSDoug Rabson 	r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
889a9148abdSDoug Rabson 
890a9148abdSDoug Rabson 	msg.rm_call.cb_cred.oa_base = r->rq_credarea;
891a9148abdSDoug Rabson 	msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
892a9148abdSDoug Rabson 	r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
893a9148abdSDoug Rabson 	if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
894a9148abdSDoug Rabson 		enum auth_stat why;
895a9148abdSDoug Rabson 
896a9148abdSDoug Rabson 		/*
897a9148abdSDoug Rabson 		 * Handle replays and authenticate before queuing the
898a9148abdSDoug Rabson 		 * request to be executed.
899a9148abdSDoug Rabson 		 */
900a9148abdSDoug Rabson 		SVC_ACQUIRE(xprt);
901a9148abdSDoug Rabson 		r->rq_xprt = xprt;
902a9148abdSDoug Rabson 		if (pool->sp_rcache) {
903a9148abdSDoug Rabson 			struct rpc_msg repmsg;
904a9148abdSDoug Rabson 			struct mbuf *repbody;
905a9148abdSDoug Rabson 			enum replay_state rs;
906a9148abdSDoug Rabson 			rs = replay_find(pool->sp_rcache, &msg,
907a9148abdSDoug Rabson 			    svc_getrpccaller(r), &repmsg, &repbody);
908a9148abdSDoug Rabson 			switch (rs) {
909a9148abdSDoug Rabson 			case RS_NEW:
910a9148abdSDoug Rabson 				break;
911a9148abdSDoug Rabson 			case RS_DONE:
912a9148abdSDoug Rabson 				SVC_REPLY(xprt, &repmsg, r->rq_addr,
913d473bac7SAlexander Motin 				    repbody, &r->rq_reply_seq);
914a9148abdSDoug Rabson 				if (r->rq_addr) {
915a9148abdSDoug Rabson 					free(r->rq_addr, M_SONAME);
916a9148abdSDoug Rabson 					r->rq_addr = NULL;
917a9148abdSDoug Rabson 				}
918578e600cSRick Macklem 				m_freem(args);
919a9148abdSDoug Rabson 				goto call_done;
920a9148abdSDoug Rabson 
921a9148abdSDoug Rabson 			default:
922578e600cSRick Macklem 				m_freem(args);
923a9148abdSDoug Rabson 				goto call_done;
924a9148abdSDoug Rabson 			}
925a9148abdSDoug Rabson 		}
926a9148abdSDoug Rabson 
927a9148abdSDoug Rabson 		r->rq_xid = msg.rm_xid;
928a9148abdSDoug Rabson 		r->rq_prog = msg.rm_call.cb_prog;
929a9148abdSDoug Rabson 		r->rq_vers = msg.rm_call.cb_vers;
930a9148abdSDoug Rabson 		r->rq_proc = msg.rm_call.cb_proc;
931a9148abdSDoug Rabson 		r->rq_size = sizeof(*r) + m_length(args, NULL);
932a9148abdSDoug Rabson 		r->rq_args = args;
933a9148abdSDoug Rabson 		if ((why = _authenticate(r, &msg)) != AUTH_OK) {
934a9148abdSDoug Rabson 			/*
935a9148abdSDoug Rabson 			 * RPCSEC_GSS uses this return code
936a9148abdSDoug Rabson 			 * for requests that form part of its
937a9148abdSDoug Rabson 			 * context establishment protocol and
938a9148abdSDoug Rabson 			 * should not be dispatched to the
939a9148abdSDoug Rabson 			 * application.
940a9148abdSDoug Rabson 			 */
941a9148abdSDoug Rabson 			if (why != RPCSEC_GSS_NODISPATCH)
942a9148abdSDoug Rabson 				svcerr_auth(r, why);
943a9148abdSDoug Rabson 			goto call_done;
944a9148abdSDoug Rabson 		}
945a9148abdSDoug Rabson 
946a9148abdSDoug Rabson 		if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
947a9148abdSDoug Rabson 			svcerr_decode(r);
948a9148abdSDoug Rabson 			goto call_done;
949a9148abdSDoug Rabson 		}
950a9148abdSDoug Rabson 
951a9148abdSDoug Rabson 		/*
952a9148abdSDoug Rabson 		 * Everything checks out, return request to caller.
953a9148abdSDoug Rabson 		 */
954a9148abdSDoug Rabson 		*rqstp_ret = r;
955a9148abdSDoug Rabson 		r = NULL;
956a9148abdSDoug Rabson 	}
957a9148abdSDoug Rabson call_done:
958a9148abdSDoug Rabson 	if (r) {
959a9148abdSDoug Rabson 		svc_freereq(r);
960a9148abdSDoug Rabson 		r = NULL;
961a9148abdSDoug Rabson 	}
962a9148abdSDoug Rabson 	if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
963d473bac7SAlexander Motin 		TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
964d473bac7SAlexander Motin 			(*s->slc_dispatch)(xprt);
965a9148abdSDoug Rabson 		xprt_unregister(xprt);
966a9148abdSDoug Rabson 	}
967a9148abdSDoug Rabson 
968a9148abdSDoug Rabson 	return (stat);
969a9148abdSDoug Rabson }
970a9148abdSDoug Rabson 
971a9148abdSDoug Rabson static void
972a9148abdSDoug Rabson svc_executereq(struct svc_req *rqstp)
973a9148abdSDoug Rabson {
974a9148abdSDoug Rabson 	SVCXPRT *xprt = rqstp->rq_xprt;
975a9148abdSDoug Rabson 	SVCPOOL *pool = xprt->xp_pool;
976dfdcada3SDoug Rabson 	int prog_found;
977dfdcada3SDoug Rabson 	rpcvers_t low_vers;
978dfdcada3SDoug Rabson 	rpcvers_t high_vers;
979dfdcada3SDoug Rabson 	struct svc_callout *s;
980dfdcada3SDoug Rabson 
981dfdcada3SDoug Rabson 	/* now match message with a registered service*/
982dfdcada3SDoug Rabson 	prog_found = FALSE;
983dfdcada3SDoug Rabson 	low_vers = (rpcvers_t) -1L;
984dfdcada3SDoug Rabson 	high_vers = (rpcvers_t) 0L;
985dfdcada3SDoug Rabson 	TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
986a9148abdSDoug Rabson 		if (s->sc_prog == rqstp->rq_prog) {
987a9148abdSDoug Rabson 			if (s->sc_vers == rqstp->rq_vers) {
988a9148abdSDoug Rabson 				/*
989a9148abdSDoug Rabson 				 * We hand ownership of r to the
990a9148abdSDoug Rabson 				 * dispatch method - they must call
991a9148abdSDoug Rabson 				 * svc_freereq.
992a9148abdSDoug Rabson 				 */
993a9148abdSDoug Rabson 				(*s->sc_dispatch)(rqstp, xprt);
994a9148abdSDoug Rabson 				return;
995dfdcada3SDoug Rabson 			}  /* found correct version */
996dfdcada3SDoug Rabson 			prog_found = TRUE;
997dfdcada3SDoug Rabson 			if (s->sc_vers < low_vers)
998dfdcada3SDoug Rabson 				low_vers = s->sc_vers;
999dfdcada3SDoug Rabson 			if (s->sc_vers > high_vers)
1000dfdcada3SDoug Rabson 				high_vers = s->sc_vers;
1001dfdcada3SDoug Rabson 		}   /* found correct program */
1002dfdcada3SDoug Rabson 	}
1003a9148abdSDoug Rabson 
1004dfdcada3SDoug Rabson 	/*
1005dfdcada3SDoug Rabson 	 * if we got here, the program or version
1006dfdcada3SDoug Rabson 	 * is not served ...
1007dfdcada3SDoug Rabson 	 */
1008dfdcada3SDoug Rabson 	if (prog_found)
1009a9148abdSDoug Rabson 		svcerr_progvers(rqstp, low_vers, high_vers);
1010dfdcada3SDoug Rabson 	else
1011a9148abdSDoug Rabson 		svcerr_noprog(rqstp);
1012a9148abdSDoug Rabson 
1013a9148abdSDoug Rabson 	svc_freereq(rqstp);
1014dfdcada3SDoug Rabson }
1015a9148abdSDoug Rabson 
1016a9148abdSDoug Rabson static void
1017*b563304cSAlexander Motin svc_checkidle(SVCGROUP *grp)
1018a9148abdSDoug Rabson {
1019a9148abdSDoug Rabson 	SVCXPRT *xprt, *nxprt;
1020a9148abdSDoug Rabson 	time_t timo;
1021a9148abdSDoug Rabson 	struct svcxprt_list cleanup;
1022a9148abdSDoug Rabson 
1023a9148abdSDoug Rabson 	TAILQ_INIT(&cleanup);
1024*b563304cSAlexander Motin 	TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) {
1025dfdcada3SDoug Rabson 		/*
1026a9148abdSDoug Rabson 		 * Only some transports have idle timers. Don't time
1027a9148abdSDoug Rabson 		 * something out which is just waking up.
1028dfdcada3SDoug Rabson 		 */
1029a9148abdSDoug Rabson 		if (!xprt->xp_idletimeout || xprt->xp_thread)
1030a9148abdSDoug Rabson 			continue;
1031a9148abdSDoug Rabson 
1032a9148abdSDoug Rabson 		timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1033a9148abdSDoug Rabson 		if (time_uptime > timo) {
1034a9148abdSDoug Rabson 			xprt_unregister_locked(xprt);
1035a9148abdSDoug Rabson 			TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1036a9148abdSDoug Rabson 		}
1037a9148abdSDoug Rabson 	}
1038a9148abdSDoug Rabson 
1039*b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
1040a9148abdSDoug Rabson 	TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1041a9148abdSDoug Rabson 		SVC_RELEASE(xprt);
1042a9148abdSDoug Rabson 	}
1043*b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
1044a9148abdSDoug Rabson }
1045a9148abdSDoug Rabson 
1046a9148abdSDoug Rabson static void
1047a9148abdSDoug Rabson svc_assign_waiting_sockets(SVCPOOL *pool)
1048a9148abdSDoug Rabson {
1049*b563304cSAlexander Motin 	SVCGROUP *grp;
1050a9148abdSDoug Rabson 	SVCXPRT *xprt;
1051*b563304cSAlexander Motin 	int g;
1052a9148abdSDoug Rabson 
1053*b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1054*b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1055*b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1056*b563304cSAlexander Motin 		while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1057ba981145SAlexander Motin 			if (xprt_assignthread(xprt))
1058*b563304cSAlexander Motin 				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1059ba981145SAlexander Motin 			else
1060ba981145SAlexander Motin 				break;
1061a9148abdSDoug Rabson 		}
1062*b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1063*b563304cSAlexander Motin 	}
1064f8fb069dSAlexander Motin }
1065f8fb069dSAlexander Motin 
1066f8fb069dSAlexander Motin static void
1067f8fb069dSAlexander Motin svc_change_space_used(SVCPOOL *pool, int delta)
1068f8fb069dSAlexander Motin {
1069f8fb069dSAlexander Motin 	unsigned int value;
1070f8fb069dSAlexander Motin 
1071f8fb069dSAlexander Motin 	value = atomic_fetchadd_int(&pool->sp_space_used, delta) + delta;
1072f8fb069dSAlexander Motin 	if (delta > 0) {
1073f8fb069dSAlexander Motin 		if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1074f8fb069dSAlexander Motin 			pool->sp_space_throttled = TRUE;
1075f8fb069dSAlexander Motin 			pool->sp_space_throttle_count++;
1076f8fb069dSAlexander Motin 		}
1077f8fb069dSAlexander Motin 		if (value > pool->sp_space_used_highest)
1078f8fb069dSAlexander Motin 			pool->sp_space_used_highest = value;
1079f8fb069dSAlexander Motin 	} else {
1080f8fb069dSAlexander Motin 		if (value < pool->sp_space_low && pool->sp_space_throttled) {
1081f8fb069dSAlexander Motin 			pool->sp_space_throttled = FALSE;
1082f8fb069dSAlexander Motin 			svc_assign_waiting_sockets(pool);
1083f8fb069dSAlexander Motin 		}
1084f8fb069dSAlexander Motin 	}
1085a9148abdSDoug Rabson }
1086a9148abdSDoug Rabson 
1087a9148abdSDoug Rabson static bool_t
1088a9148abdSDoug Rabson svc_request_space_available(SVCPOOL *pool)
1089a9148abdSDoug Rabson {
1090a9148abdSDoug Rabson 
1091f8fb069dSAlexander Motin 	if (pool->sp_space_throttled)
1092f8fb069dSAlexander Motin 		return (FALSE);
1093f8fb069dSAlexander Motin 	return (TRUE);
1094a9148abdSDoug Rabson }
1095a9148abdSDoug Rabson 
1096a9148abdSDoug Rabson static void
1097*b563304cSAlexander Motin svc_run_internal(SVCGROUP *grp, bool_t ismaster)
1098a9148abdSDoug Rabson {
1099*b563304cSAlexander Motin 	SVCPOOL *pool = grp->sg_pool;
1100a9148abdSDoug Rabson 	SVCTHREAD *st, *stpref;
1101a9148abdSDoug Rabson 	SVCXPRT *xprt;
1102a9148abdSDoug Rabson 	enum xprt_stat stat;
1103a9148abdSDoug Rabson 	struct svc_req *rqstp;
1104f8fb069dSAlexander Motin 	size_t sz;
1105a9148abdSDoug Rabson 	int error;
1106a9148abdSDoug Rabson 
1107a9148abdSDoug Rabson 	st = mem_alloc(sizeof(*st));
1108b776fb2dSAlexander Motin 	mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF);
1109f8fb069dSAlexander Motin 	st->st_pool = pool;
1110a9148abdSDoug Rabson 	st->st_xprt = NULL;
1111a9148abdSDoug Rabson 	STAILQ_INIT(&st->st_reqs);
1112a9148abdSDoug Rabson 	cv_init(&st->st_cond, "rpcsvc");
1113a9148abdSDoug Rabson 
1114*b563304cSAlexander Motin 	mtx_lock(&grp->sg_lock);
1115a9148abdSDoug Rabson 
1116a9148abdSDoug Rabson 	/*
1117a9148abdSDoug Rabson 	 * If we are a new thread which was spawned to cope with
1118a9148abdSDoug Rabson 	 * increased load, set the state back to SVCPOOL_ACTIVE.
1119a9148abdSDoug Rabson 	 */
1120*b563304cSAlexander Motin 	if (grp->sg_state == SVCPOOL_THREADSTARTING)
1121*b563304cSAlexander Motin 		grp->sg_state = SVCPOOL_ACTIVE;
1122a9148abdSDoug Rabson 
1123*b563304cSAlexander Motin 	while (grp->sg_state != SVCPOOL_CLOSING) {
1124a9148abdSDoug Rabson 		/*
1125db7cdfeeSAlexander Motin 		 * Create new thread if requested.
1126db7cdfeeSAlexander Motin 		 */
1127*b563304cSAlexander Motin 		if (grp->sg_state == SVCPOOL_THREADWANTED) {
1128*b563304cSAlexander Motin 			grp->sg_state = SVCPOOL_THREADSTARTING;
1129*b563304cSAlexander Motin 			grp->sg_lastcreatetime = time_uptime;
1130*b563304cSAlexander Motin 			mtx_unlock(&grp->sg_lock);
1131*b563304cSAlexander Motin 			svc_new_thread(grp);
1132*b563304cSAlexander Motin 			mtx_lock(&grp->sg_lock);
1133db7cdfeeSAlexander Motin 			continue;
1134db7cdfeeSAlexander Motin 		}
1135db7cdfeeSAlexander Motin 
1136db7cdfeeSAlexander Motin 		/*
1137a9148abdSDoug Rabson 		 * Check for idle transports once per second.
1138a9148abdSDoug Rabson 		 */
1139*b563304cSAlexander Motin 		if (time_uptime > grp->sg_lastidlecheck) {
1140*b563304cSAlexander Motin 			grp->sg_lastidlecheck = time_uptime;
1141*b563304cSAlexander Motin 			svc_checkidle(grp);
1142a9148abdSDoug Rabson 		}
1143a9148abdSDoug Rabson 
1144a9148abdSDoug Rabson 		xprt = st->st_xprt;
1145b776fb2dSAlexander Motin 		if (!xprt) {
1146a9148abdSDoug Rabson 			/*
1147a9148abdSDoug Rabson 			 * Enforce maxthreads count.
1148a9148abdSDoug Rabson 			 */
1149*b563304cSAlexander Motin 			if (grp->sg_threadcount > grp->sg_maxthreads)
1150a9148abdSDoug Rabson 				break;
1151a9148abdSDoug Rabson 
1152a9148abdSDoug Rabson 			/*
1153a9148abdSDoug Rabson 			 * Before sleeping, see if we can find an
1154a9148abdSDoug Rabson 			 * active transport which isn't being serviced
1155a9148abdSDoug Rabson 			 * by a thread.
1156a9148abdSDoug Rabson 			 */
1157ba981145SAlexander Motin 			if (svc_request_space_available(pool) &&
1158*b563304cSAlexander Motin 			    (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1159*b563304cSAlexander Motin 				TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1160a9148abdSDoug Rabson 				SVC_ACQUIRE(xprt);
1161a9148abdSDoug Rabson 				xprt->xp_thread = st;
1162a9148abdSDoug Rabson 				st->st_xprt = xprt;
1163a9148abdSDoug Rabson 				continue;
1164ba981145SAlexander Motin 			}
1165a9148abdSDoug Rabson 
1166*b563304cSAlexander Motin 			LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink);
1167db7cdfeeSAlexander Motin 			if (ismaster || (!ismaster &&
1168*b563304cSAlexander Motin 			    grp->sg_threadcount > grp->sg_minthreads))
1169db7cdfeeSAlexander Motin 				error = cv_timedwait_sig(&st->st_cond,
1170*b563304cSAlexander Motin 				    &grp->sg_lock, 5 * hz);
1171db7cdfeeSAlexander Motin 			else
1172db7cdfeeSAlexander Motin 				error = cv_wait_sig(&st->st_cond,
1173*b563304cSAlexander Motin 				    &grp->sg_lock);
1174b5d7fb73SAlexander Motin 			if (st->st_xprt == NULL)
1175a9148abdSDoug Rabson 				LIST_REMOVE(st, st_ilink);
1176a9148abdSDoug Rabson 
1177a9148abdSDoug Rabson 			/*
1178a9148abdSDoug Rabson 			 * Reduce worker thread count when idle.
1179a9148abdSDoug Rabson 			 */
1180a9148abdSDoug Rabson 			if (error == EWOULDBLOCK) {
1181a9148abdSDoug Rabson 				if (!ismaster
1182*b563304cSAlexander Motin 				    && (grp->sg_threadcount
1183*b563304cSAlexander Motin 					> grp->sg_minthreads)
1184b776fb2dSAlexander Motin 					&& !st->st_xprt)
1185dfdcada3SDoug Rabson 					break;
1186db7cdfeeSAlexander Motin 			} else if (error) {
1187*b563304cSAlexander Motin 				mtx_unlock(&grp->sg_lock);
1188a9148abdSDoug Rabson 				svc_exit(pool);
1189*b563304cSAlexander Motin 				mtx_lock(&grp->sg_lock);
1190a9148abdSDoug Rabson 				break;
1191a9148abdSDoug Rabson 			}
1192a9148abdSDoug Rabson 			continue;
1193a9148abdSDoug Rabson 		}
1194*b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1195a9148abdSDoug Rabson 
1196a9148abdSDoug Rabson 		/*
1197b776fb2dSAlexander Motin 		 * Drain the transport socket and queue up any RPCs.
1198a9148abdSDoug Rabson 		 */
1199a9148abdSDoug Rabson 		xprt->xp_lastactive = time_uptime;
1200a9148abdSDoug Rabson 		do {
1201a9148abdSDoug Rabson 			if (!svc_request_space_available(pool))
1202a9148abdSDoug Rabson 				break;
1203a9148abdSDoug Rabson 			rqstp = NULL;
1204a9148abdSDoug Rabson 			stat = svc_getreq(xprt, &rqstp);
1205a9148abdSDoug Rabson 			if (rqstp) {
1206f8fb069dSAlexander Motin 				svc_change_space_used(pool, rqstp->rq_size);
1207a9148abdSDoug Rabson 				/*
1208b776fb2dSAlexander Motin 				 * See if the application has a preference
1209b776fb2dSAlexander Motin 				 * for some other thread.
1210a9148abdSDoug Rabson 				 */
1211b776fb2dSAlexander Motin 				if (pool->sp_assign) {
1212b776fb2dSAlexander Motin 					stpref = pool->sp_assign(st, rqstp);
1213a9148abdSDoug Rabson 					STAILQ_INSERT_TAIL(&stpref->st_reqs,
1214a9148abdSDoug Rabson 					    rqstp, rq_link);
1215b776fb2dSAlexander Motin 					mtx_unlock(&stpref->st_lock);
1216b776fb2dSAlexander Motin 					rqstp->rq_thread = stpref;
1217b776fb2dSAlexander Motin 					if (stpref != st)
1218b776fb2dSAlexander Motin 						rqstp = NULL;
1219f8fb069dSAlexander Motin 				} else
1220b776fb2dSAlexander Motin 					STAILQ_INSERT_TAIL(&st->st_reqs,
1221b776fb2dSAlexander Motin 					    rqstp, rq_link);
1222b776fb2dSAlexander Motin 			}
1223b776fb2dSAlexander Motin 		} while (rqstp == NULL && stat == XPRT_MOREREQS
1224*b563304cSAlexander Motin 		    && grp->sg_state != SVCPOOL_CLOSING);
1225a9148abdSDoug Rabson 
1226a9148abdSDoug Rabson 		/*
1227b776fb2dSAlexander Motin 		 * Move this transport to the end of the active list to
1228b776fb2dSAlexander Motin 		 * ensure fairness when multiple transports are active.
1229b776fb2dSAlexander Motin 		 * If this was the last queued request, svc_getreq will end
1230b776fb2dSAlexander Motin 		 * up calling xprt_inactive to remove from the active list.
1231a9148abdSDoug Rabson 		 */
1232*b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1233a9148abdSDoug Rabson 		xprt->xp_thread = NULL;
1234a9148abdSDoug Rabson 		st->st_xprt = NULL;
1235a9148abdSDoug Rabson 		if (xprt->xp_active) {
1236f8fb069dSAlexander Motin 			if (!svc_request_space_available(pool) ||
1237f8fb069dSAlexander Motin 			    !xprt_assignthread(xprt))
1238*b563304cSAlexander Motin 				TAILQ_INSERT_TAIL(&grp->sg_active,
1239ba981145SAlexander Motin 				    xprt, xp_alink);
1240a9148abdSDoug Rabson 		}
1241*b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1242a9148abdSDoug Rabson 		SVC_RELEASE(xprt);
1243a9148abdSDoug Rabson 
1244a9148abdSDoug Rabson 		/*
1245a9148abdSDoug Rabson 		 * Execute what we have queued.
1246a9148abdSDoug Rabson 		 */
1247f8fb069dSAlexander Motin 		sz = 0;
1248b776fb2dSAlexander Motin 		mtx_lock(&st->st_lock);
1249b776fb2dSAlexander Motin 		while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) {
1250b776fb2dSAlexander Motin 			STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link);
1251b776fb2dSAlexander Motin 			mtx_unlock(&st->st_lock);
1252f8fb069dSAlexander Motin 			sz += rqstp->rq_size;
1253a9148abdSDoug Rabson 			svc_executereq(rqstp);
1254b776fb2dSAlexander Motin 			mtx_lock(&st->st_lock);
1255a9148abdSDoug Rabson 		}
1256b776fb2dSAlexander Motin 		mtx_unlock(&st->st_lock);
1257f8fb069dSAlexander Motin 		svc_change_space_used(pool, -sz);
1258*b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1259a9148abdSDoug Rabson 	}
1260a9148abdSDoug Rabson 
1261a9148abdSDoug Rabson 	if (st->st_xprt) {
1262a9148abdSDoug Rabson 		xprt = st->st_xprt;
1263a9148abdSDoug Rabson 		st->st_xprt = NULL;
1264a9148abdSDoug Rabson 		SVC_RELEASE(xprt);
1265a9148abdSDoug Rabson 	}
1266a9148abdSDoug Rabson 	KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1267b776fb2dSAlexander Motin 	mtx_destroy(&st->st_lock);
1268a9148abdSDoug Rabson 	cv_destroy(&st->st_cond);
1269a9148abdSDoug Rabson 	mem_free(st, sizeof(*st));
1270a9148abdSDoug Rabson 
1271*b563304cSAlexander Motin 	grp->sg_threadcount--;
1272a9148abdSDoug Rabson 	if (!ismaster)
1273*b563304cSAlexander Motin 		wakeup(grp);
1274*b563304cSAlexander Motin 	mtx_unlock(&grp->sg_lock);
1275a9148abdSDoug Rabson }
1276a9148abdSDoug Rabson 
1277a9148abdSDoug Rabson static void
1278a9148abdSDoug Rabson svc_thread_start(void *arg)
1279a9148abdSDoug Rabson {
1280a9148abdSDoug Rabson 
1281*b563304cSAlexander Motin 	svc_run_internal((SVCGROUP *) arg, FALSE);
1282a9148abdSDoug Rabson 	kthread_exit();
1283a9148abdSDoug Rabson }
1284a9148abdSDoug Rabson 
1285a9148abdSDoug Rabson static void
1286*b563304cSAlexander Motin svc_new_thread(SVCGROUP *grp)
1287a9148abdSDoug Rabson {
1288*b563304cSAlexander Motin 	SVCPOOL *pool = grp->sg_pool;
1289a9148abdSDoug Rabson 	struct thread *td;
1290a9148abdSDoug Rabson 
1291*b563304cSAlexander Motin 	grp->sg_threadcount++;
1292*b563304cSAlexander Motin 	kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0,
1293a9148abdSDoug Rabson 	    "%s: service", pool->sp_name);
1294dfdcada3SDoug Rabson }
1295dfdcada3SDoug Rabson 
1296dfdcada3SDoug Rabson void
1297dfdcada3SDoug Rabson svc_run(SVCPOOL *pool)
1298dfdcada3SDoug Rabson {
1299*b563304cSAlexander Motin 	int g, i;
1300a9148abdSDoug Rabson 	struct proc *p;
1301a9148abdSDoug Rabson 	struct thread *td;
1302*b563304cSAlexander Motin 	SVCGROUP *grp;
1303dfdcada3SDoug Rabson 
1304a9148abdSDoug Rabson 	p = curproc;
1305a9148abdSDoug Rabson 	td = curthread;
1306a9148abdSDoug Rabson 	snprintf(td->td_name, sizeof(td->td_name),
1307a9148abdSDoug Rabson 	    "%s: master", pool->sp_name);
1308a9148abdSDoug Rabson 	pool->sp_state = SVCPOOL_ACTIVE;
1309a9148abdSDoug Rabson 	pool->sp_proc = p;
1310dfdcada3SDoug Rabson 
1311*b563304cSAlexander Motin 	/* Choose group count based on number of threads and CPUs. */
1312*b563304cSAlexander Motin 	pool->sp_groupcount = max(1, min(SVC_MAXGROUPS,
1313*b563304cSAlexander Motin 	    min(pool->sp_maxthreads / 2, mp_ncpus) / 6));
1314*b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1315*b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1316*b563304cSAlexander Motin 		grp->sg_minthreads = max(1,
1317*b563304cSAlexander Motin 		    pool->sp_minthreads / pool->sp_groupcount);
1318*b563304cSAlexander Motin 		grp->sg_maxthreads = max(1,
1319*b563304cSAlexander Motin 		    pool->sp_maxthreads / pool->sp_groupcount);
1320*b563304cSAlexander Motin 		grp->sg_lastcreatetime = time_uptime;
1321dfdcada3SDoug Rabson 	}
1322dfdcada3SDoug Rabson 
1323*b563304cSAlexander Motin 	/* Starting threads */
1324*b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1325*b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1326*b563304cSAlexander Motin 		for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++)
1327*b563304cSAlexander Motin 			svc_new_thread(grp);
1328*b563304cSAlexander Motin 	}
1329*b563304cSAlexander Motin 	pool->sp_groups[0].sg_threadcount++;
1330*b563304cSAlexander Motin 	svc_run_internal(&pool->sp_groups[0], TRUE);
1331dfdcada3SDoug Rabson 
1332*b563304cSAlexander Motin 	/* Waiting for threads to stop. */
1333*b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1334*b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1335*b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1336*b563304cSAlexander Motin 		while (grp->sg_threadcount > 0)
1337*b563304cSAlexander Motin 			msleep(grp, &grp->sg_lock, 0, "svcexit", 0);
1338*b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1339*b563304cSAlexander Motin 	}
1340dfdcada3SDoug Rabson }
1341dfdcada3SDoug Rabson 
1342dfdcada3SDoug Rabson void
1343dfdcada3SDoug Rabson svc_exit(SVCPOOL *pool)
1344dfdcada3SDoug Rabson {
1345*b563304cSAlexander Motin 	SVCGROUP *grp;
1346a9148abdSDoug Rabson 	SVCTHREAD *st;
1347*b563304cSAlexander Motin 	int g;
1348a9148abdSDoug Rabson 
1349a9148abdSDoug Rabson 	pool->sp_state = SVCPOOL_CLOSING;
1350*b563304cSAlexander Motin 	for (g = 0; g < pool->sp_groupcount; g++) {
1351*b563304cSAlexander Motin 		grp = &pool->sp_groups[g];
1352*b563304cSAlexander Motin 		mtx_lock(&grp->sg_lock);
1353*b563304cSAlexander Motin 		if (grp->sg_state != SVCPOOL_CLOSING) {
1354*b563304cSAlexander Motin 			grp->sg_state = SVCPOOL_CLOSING;
1355*b563304cSAlexander Motin 			LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink)
1356a9148abdSDoug Rabson 				cv_signal(&st->st_cond);
1357db7cdfeeSAlexander Motin 		}
1358*b563304cSAlexander Motin 		mtx_unlock(&grp->sg_lock);
1359*b563304cSAlexander Motin 	}
1360dfdcada3SDoug Rabson }
1361a9148abdSDoug Rabson 
1362a9148abdSDoug Rabson bool_t
1363a9148abdSDoug Rabson svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1364a9148abdSDoug Rabson {
1365a9148abdSDoug Rabson 	struct mbuf *m;
1366a9148abdSDoug Rabson 	XDR xdrs;
1367a9148abdSDoug Rabson 	bool_t stat;
1368a9148abdSDoug Rabson 
1369a9148abdSDoug Rabson 	m = rqstp->rq_args;
1370a9148abdSDoug Rabson 	rqstp->rq_args = NULL;
1371a9148abdSDoug Rabson 
1372a9148abdSDoug Rabson 	xdrmbuf_create(&xdrs, m, XDR_DECODE);
1373a9148abdSDoug Rabson 	stat = xargs(&xdrs, args);
1374a9148abdSDoug Rabson 	XDR_DESTROY(&xdrs);
1375a9148abdSDoug Rabson 
1376a9148abdSDoug Rabson 	return (stat);
1377a9148abdSDoug Rabson }
1378a9148abdSDoug Rabson 
1379a9148abdSDoug Rabson bool_t
1380a9148abdSDoug Rabson svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1381a9148abdSDoug Rabson {
1382a9148abdSDoug Rabson 	XDR xdrs;
1383a9148abdSDoug Rabson 
1384a9148abdSDoug Rabson 	if (rqstp->rq_addr) {
1385a9148abdSDoug Rabson 		free(rqstp->rq_addr, M_SONAME);
1386a9148abdSDoug Rabson 		rqstp->rq_addr = NULL;
1387a9148abdSDoug Rabson 	}
1388a9148abdSDoug Rabson 
1389a9148abdSDoug Rabson 	xdrs.x_op = XDR_FREE;
1390a9148abdSDoug Rabson 	return (xargs(&xdrs, args));
1391a9148abdSDoug Rabson }
1392a9148abdSDoug Rabson 
1393a9148abdSDoug Rabson void
1394a9148abdSDoug Rabson svc_freereq(struct svc_req *rqstp)
1395a9148abdSDoug Rabson {
1396a9148abdSDoug Rabson 	SVCTHREAD *st;
1397a9148abdSDoug Rabson 	SVCPOOL *pool;
1398a9148abdSDoug Rabson 
1399a9148abdSDoug Rabson 	st = rqstp->rq_thread;
1400a9148abdSDoug Rabson 	if (st) {
1401f8fb069dSAlexander Motin 		pool = st->st_pool;
1402a9148abdSDoug Rabson 		if (pool->sp_done)
1403a9148abdSDoug Rabson 			pool->sp_done(st, rqstp);
1404a9148abdSDoug Rabson 	}
1405a9148abdSDoug Rabson 
1406a9148abdSDoug Rabson 	if (rqstp->rq_auth.svc_ah_ops)
1407a9148abdSDoug Rabson 		SVCAUTH_RELEASE(&rqstp->rq_auth);
1408a9148abdSDoug Rabson 
1409a9148abdSDoug Rabson 	if (rqstp->rq_xprt) {
1410a9148abdSDoug Rabson 		SVC_RELEASE(rqstp->rq_xprt);
1411a9148abdSDoug Rabson 	}
1412a9148abdSDoug Rabson 
1413a9148abdSDoug Rabson 	if (rqstp->rq_addr)
1414a9148abdSDoug Rabson 		free(rqstp->rq_addr, M_SONAME);
1415a9148abdSDoug Rabson 
1416a9148abdSDoug Rabson 	if (rqstp->rq_args)
1417a9148abdSDoug Rabson 		m_freem(rqstp->rq_args);
1418a9148abdSDoug Rabson 
1419a9148abdSDoug Rabson 	free(rqstp, M_RPC);
1420a9148abdSDoug Rabson }
1421