1dfdcada3SDoug Rabson /* $NetBSD: svc.c,v 1.21 2000/07/06 03:10:35 christos Exp $ */
2dfdcada3SDoug Rabson
32e322d37SHiroki Sato /*-
451369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause
551369649SPedro F. Giffuni *
62e322d37SHiroki Sato * Copyright (c) 2009, Sun Microsystems, Inc.
72e322d37SHiroki Sato * All rights reserved.
8dfdcada3SDoug Rabson *
92e322d37SHiroki Sato * Redistribution and use in source and binary forms, with or without
102e322d37SHiroki Sato * modification, are permitted provided that the following conditions are met:
112e322d37SHiroki Sato * - Redistributions of source code must retain the above copyright notice,
122e322d37SHiroki Sato * this list of conditions and the following disclaimer.
132e322d37SHiroki Sato * - Redistributions in binary form must reproduce the above copyright notice,
142e322d37SHiroki Sato * this list of conditions and the following disclaimer in the documentation
152e322d37SHiroki Sato * and/or other materials provided with the distribution.
162e322d37SHiroki Sato * - Neither the name of Sun Microsystems, Inc. nor the names of its
172e322d37SHiroki Sato * contributors may be used to endorse or promote products derived
182e322d37SHiroki Sato * from this software without specific prior written permission.
19dfdcada3SDoug Rabson *
202e322d37SHiroki Sato * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
212e322d37SHiroki Sato * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
222e322d37SHiroki Sato * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
232e322d37SHiroki Sato * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
242e322d37SHiroki Sato * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
252e322d37SHiroki Sato * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
262e322d37SHiroki Sato * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
272e322d37SHiroki Sato * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
282e322d37SHiroki Sato * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
292e322d37SHiroki Sato * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
302e322d37SHiroki Sato * POSSIBILITY OF SUCH DAMAGE.
31dfdcada3SDoug Rabson */
32dfdcada3SDoug Rabson
33dfdcada3SDoug Rabson #include <sys/cdefs.h>
34dfdcada3SDoug Rabson /*
35dfdcada3SDoug Rabson * svc.c, Server-side remote procedure call interface.
36dfdcada3SDoug Rabson *
37dfdcada3SDoug Rabson * There are two sets of procedures here. The xprt routines are
38dfdcada3SDoug Rabson * for handling transport handles. The svc routines handle the
39dfdcada3SDoug Rabson * list of service routines.
40dfdcada3SDoug Rabson *
41dfdcada3SDoug Rabson * Copyright (C) 1984, Sun Microsystems, Inc.
42dfdcada3SDoug Rabson */
43dfdcada3SDoug Rabson
44dfdcada3SDoug Rabson #include <sys/param.h>
456a76d35cSRick Macklem #include <sys/jail.h>
46dfdcada3SDoug Rabson #include <sys/lock.h>
47dfdcada3SDoug Rabson #include <sys/kernel.h>
48a9148abdSDoug Rabson #include <sys/kthread.h>
49dfdcada3SDoug Rabson #include <sys/malloc.h>
50a9148abdSDoug Rabson #include <sys/mbuf.h>
51dfdcada3SDoug Rabson #include <sys/mutex.h>
52a9148abdSDoug Rabson #include <sys/proc.h>
53a16ff32fSJohn Baldwin #include <sys/protosw.h>
54dfdcada3SDoug Rabson #include <sys/queue.h>
55a9148abdSDoug Rabson #include <sys/socketvar.h>
56dfdcada3SDoug Rabson #include <sys/systm.h>
57b563304cSAlexander Motin #include <sys/smp.h>
58d473bac7SAlexander Motin #include <sys/sx.h>
59dfdcada3SDoug Rabson #include <sys/ucred.h>
60dfdcada3SDoug Rabson
61a16ff32fSJohn Baldwin #include <netinet/tcp.h>
62a16ff32fSJohn Baldwin
63dfdcada3SDoug Rabson #include <rpc/rpc.h>
64dfdcada3SDoug Rabson #include <rpc/rpcb_clnt.h>
65a9148abdSDoug Rabson #include <rpc/replay.h>
66dfdcada3SDoug Rabson
67ee31b83aSDoug Rabson #include <rpc/rpc_com.h>
68dfdcada3SDoug Rabson
69dfdcada3SDoug Rabson #define SVC_VERSQUIET 0x0001 /* keep quiet about vers mismatch */
70a9148abdSDoug Rabson #define version_keepquiet(xp) (SVC_EXT(xp)->xp_flags & SVC_VERSQUIET)
71dfdcada3SDoug Rabson
72dfdcada3SDoug Rabson static struct svc_callout *svc_find(SVCPOOL *pool, rpcprog_t, rpcvers_t,
73dfdcada3SDoug Rabson char *);
74b563304cSAlexander Motin static void svc_new_thread(SVCGROUP *grp);
75a9148abdSDoug Rabson static void xprt_unregister_locked(SVCXPRT *xprt);
763c42b5bfSGarrett Wollman static void svc_change_space_used(SVCPOOL *pool, long delta);
77f8fb069dSAlexander Motin static bool_t svc_request_space_available(SVCPOOL *pool);
7890f90687SAndriy Gapon static void svcpool_cleanup(SVCPOOL *pool);
79dfdcada3SDoug Rabson
80dfdcada3SDoug Rabson /* *************** SVCXPRT related stuff **************** */
81dfdcada3SDoug Rabson
82a9148abdSDoug Rabson static int svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS);
83a9148abdSDoug Rabson static int svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS);
84b563304cSAlexander Motin static int svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS);
85a9148abdSDoug Rabson
86dfdcada3SDoug Rabson SVCPOOL*
svcpool_create(const char * name,struct sysctl_oid_list * sysctl_base)87a9148abdSDoug Rabson svcpool_create(const char *name, struct sysctl_oid_list *sysctl_base)
88dfdcada3SDoug Rabson {
89dfdcada3SDoug Rabson SVCPOOL *pool;
90b563304cSAlexander Motin SVCGROUP *grp;
91b563304cSAlexander Motin int g;
92dfdcada3SDoug Rabson
93dfdcada3SDoug Rabson pool = malloc(sizeof(SVCPOOL), M_RPC, M_WAITOK|M_ZERO);
94dfdcada3SDoug Rabson
95dfdcada3SDoug Rabson mtx_init(&pool->sp_lock, "sp_lock", NULL, MTX_DEF);
96a9148abdSDoug Rabson pool->sp_name = name;
97a9148abdSDoug Rabson pool->sp_state = SVCPOOL_INIT;
98a9148abdSDoug Rabson pool->sp_proc = NULL;
99dfdcada3SDoug Rabson TAILQ_INIT(&pool->sp_callouts);
100d473bac7SAlexander Motin TAILQ_INIT(&pool->sp_lcallouts);
101a9148abdSDoug Rabson pool->sp_minthreads = 1;
102a9148abdSDoug Rabson pool->sp_maxthreads = 1;
103b563304cSAlexander Motin pool->sp_groupcount = 1;
104b563304cSAlexander Motin for (g = 0; g < SVC_MAXGROUPS; g++) {
105b563304cSAlexander Motin grp = &pool->sp_groups[g];
106b563304cSAlexander Motin mtx_init(&grp->sg_lock, "sg_lock", NULL, MTX_DEF);
107b563304cSAlexander Motin grp->sg_pool = pool;
108b563304cSAlexander Motin grp->sg_state = SVCPOOL_ACTIVE;
109b563304cSAlexander Motin TAILQ_INIT(&grp->sg_xlist);
110b563304cSAlexander Motin TAILQ_INIT(&grp->sg_active);
111b563304cSAlexander Motin LIST_INIT(&grp->sg_idlethreads);
112b563304cSAlexander Motin grp->sg_minthreads = 1;
113b563304cSAlexander Motin grp->sg_maxthreads = 1;
114b563304cSAlexander Motin }
115a9148abdSDoug Rabson
116a9148abdSDoug Rabson /*
1173c42b5bfSGarrett Wollman * Don't use more than a quarter of mbuf clusters. Nota bene:
1183c42b5bfSGarrett Wollman * nmbclusters is an int, but nmbclusters*MCLBYTES may overflow
1193c42b5bfSGarrett Wollman * on LP64 architectures, so cast to u_long to avoid undefined
1203c42b5bfSGarrett Wollman * behavior. (ILP32 architectures cannot have nmbclusters
1213c42b5bfSGarrett Wollman * large enough to overflow for other reasons.)
122a9148abdSDoug Rabson */
1233c42b5bfSGarrett Wollman pool->sp_space_high = (u_long)nmbclusters * MCLBYTES / 4;
1243c42b5bfSGarrett Wollman pool->sp_space_low = (pool->sp_space_high / 3) * 2;
125a9148abdSDoug Rabson
126a9148abdSDoug Rabson sysctl_ctx_init(&pool->sp_sysctl);
127780bae23SRick Macklem if (IS_DEFAULT_VNET(curvnet) && sysctl_base) {
128a9148abdSDoug Rabson SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
1297029da5cSPawel Biernacki "minthreads", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
130b563304cSAlexander Motin pool, 0, svcpool_minthread_sysctl, "I",
131b563304cSAlexander Motin "Minimal number of threads");
132a9148abdSDoug Rabson SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
1337029da5cSPawel Biernacki "maxthreads", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
134b563304cSAlexander Motin pool, 0, svcpool_maxthread_sysctl, "I",
135b563304cSAlexander Motin "Maximal number of threads");
136b563304cSAlexander Motin SYSCTL_ADD_PROC(&pool->sp_sysctl, sysctl_base, OID_AUTO,
1377029da5cSPawel Biernacki "threads", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
138b563304cSAlexander Motin pool, 0, svcpool_threads_sysctl, "I",
139b563304cSAlexander Motin "Current number of threads");
140a9148abdSDoug Rabson SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
141b563304cSAlexander Motin "groups", CTLFLAG_RD, &pool->sp_groupcount, 0,
142b563304cSAlexander Motin "Number of thread groups");
143a9148abdSDoug Rabson
1443c42b5bfSGarrett Wollman SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
145a9148abdSDoug Rabson "request_space_used", CTLFLAG_RD,
1463c42b5bfSGarrett Wollman &pool->sp_space_used,
147a9148abdSDoug Rabson "Space in parsed but not handled requests.");
148a9148abdSDoug Rabson
1493c42b5bfSGarrett Wollman SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
150a9148abdSDoug Rabson "request_space_used_highest", CTLFLAG_RD,
1513c42b5bfSGarrett Wollman &pool->sp_space_used_highest,
152a9148abdSDoug Rabson "Highest space used since reboot.");
153a9148abdSDoug Rabson
1543c42b5bfSGarrett Wollman SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
155a9148abdSDoug Rabson "request_space_high", CTLFLAG_RW,
1563c42b5bfSGarrett Wollman &pool->sp_space_high,
157a9148abdSDoug Rabson "Maximum space in parsed but not handled requests.");
158a9148abdSDoug Rabson
1593c42b5bfSGarrett Wollman SYSCTL_ADD_ULONG(&pool->sp_sysctl, sysctl_base, OID_AUTO,
160a9148abdSDoug Rabson "request_space_low", CTLFLAG_RW,
1613c42b5bfSGarrett Wollman &pool->sp_space_low,
162a9148abdSDoug Rabson "Low water mark for request space.");
163a9148abdSDoug Rabson
164fbbb13f9SMatthew D Fleming SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
165a9148abdSDoug Rabson "request_space_throttled", CTLFLAG_RD,
166a9148abdSDoug Rabson &pool->sp_space_throttled, 0,
167a9148abdSDoug Rabson "Whether nfs requests are currently throttled");
168a9148abdSDoug Rabson
169fbbb13f9SMatthew D Fleming SYSCTL_ADD_INT(&pool->sp_sysctl, sysctl_base, OID_AUTO,
170a9148abdSDoug Rabson "request_space_throttle_count", CTLFLAG_RD,
171a9148abdSDoug Rabson &pool->sp_space_throttle_count, 0,
172a9148abdSDoug Rabson "Count of times throttling based on request space has occurred");
173a9148abdSDoug Rabson }
174dfdcada3SDoug Rabson
175dfdcada3SDoug Rabson return pool;
176dfdcada3SDoug Rabson }
177dfdcada3SDoug Rabson
17890f90687SAndriy Gapon /*
17990f90687SAndriy Gapon * Code common to svcpool_destroy() and svcpool_close(), which cleans up
18090f90687SAndriy Gapon * the pool data structures.
18190f90687SAndriy Gapon */
18290f90687SAndriy Gapon static void
svcpool_cleanup(SVCPOOL * pool)18390f90687SAndriy Gapon svcpool_cleanup(SVCPOOL *pool)
184dfdcada3SDoug Rabson {
185b563304cSAlexander Motin SVCGROUP *grp;
186a9148abdSDoug Rabson SVCXPRT *xprt, *nxprt;
187dfdcada3SDoug Rabson struct svc_callout *s;
188d473bac7SAlexander Motin struct svc_loss_callout *sl;
189a9148abdSDoug Rabson struct svcxprt_list cleanup;
190b563304cSAlexander Motin int g;
191dfdcada3SDoug Rabson
192a9148abdSDoug Rabson TAILQ_INIT(&cleanup);
193dfdcada3SDoug Rabson
194b563304cSAlexander Motin for (g = 0; g < SVC_MAXGROUPS; g++) {
195b563304cSAlexander Motin grp = &pool->sp_groups[g];
196b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
197b563304cSAlexander Motin while ((xprt = TAILQ_FIRST(&grp->sg_xlist)) != NULL) {
198a9148abdSDoug Rabson xprt_unregister_locked(xprt);
199a9148abdSDoug Rabson TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
200dfdcada3SDoug Rabson }
201b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
202b563304cSAlexander Motin }
203b563304cSAlexander Motin TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
204db8c27f4SRick Macklem if (xprt->xp_socket != NULL)
205db8c27f4SRick Macklem soshutdown(xprt->xp_socket, SHUT_WR);
206b563304cSAlexander Motin SVC_RELEASE(xprt);
207b563304cSAlexander Motin }
208dfdcada3SDoug Rabson
209b563304cSAlexander Motin mtx_lock(&pool->sp_lock);
210d473bac7SAlexander Motin while ((s = TAILQ_FIRST(&pool->sp_callouts)) != NULL) {
211dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock);
212dfdcada3SDoug Rabson svc_unreg(pool, s->sc_prog, s->sc_vers);
213dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock);
214dfdcada3SDoug Rabson }
215d473bac7SAlexander Motin while ((sl = TAILQ_FIRST(&pool->sp_lcallouts)) != NULL) {
216d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock);
217d473bac7SAlexander Motin svc_loss_unreg(pool, sl->slc_dispatch);
218d473bac7SAlexander Motin mtx_lock(&pool->sp_lock);
219d473bac7SAlexander Motin }
22075f2ae1aSRick Macklem mtx_unlock(&pool->sp_lock);
22190f90687SAndriy Gapon }
22290f90687SAndriy Gapon
22390f90687SAndriy Gapon void
svcpool_destroy(SVCPOOL * pool)22490f90687SAndriy Gapon svcpool_destroy(SVCPOOL *pool)
22590f90687SAndriy Gapon {
22690f90687SAndriy Gapon SVCGROUP *grp;
22790f90687SAndriy Gapon int g;
22890f90687SAndriy Gapon
22990f90687SAndriy Gapon svcpool_cleanup(pool);
230dfdcada3SDoug Rabson
231b563304cSAlexander Motin for (g = 0; g < SVC_MAXGROUPS; g++) {
232b563304cSAlexander Motin grp = &pool->sp_groups[g];
233b563304cSAlexander Motin mtx_destroy(&grp->sg_lock);
234a9148abdSDoug Rabson }
235a4fa5e6dSRick Macklem mtx_destroy(&pool->sp_lock);
236a4fa5e6dSRick Macklem
237a9148abdSDoug Rabson if (pool->sp_rcache)
238a9148abdSDoug Rabson replay_freecache(pool->sp_rcache);
239a9148abdSDoug Rabson
240a9148abdSDoug Rabson sysctl_ctx_free(&pool->sp_sysctl);
241dfdcada3SDoug Rabson free(pool, M_RPC);
242dfdcada3SDoug Rabson }
243dfdcada3SDoug Rabson
244b563304cSAlexander Motin /*
24590f90687SAndriy Gapon * Similar to svcpool_destroy(), except that it does not destroy the actual
24690f90687SAndriy Gapon * data structures. As such, "pool" may be used again.
24790f90687SAndriy Gapon */
24890f90687SAndriy Gapon void
svcpool_close(SVCPOOL * pool)24990f90687SAndriy Gapon svcpool_close(SVCPOOL *pool)
25090f90687SAndriy Gapon {
25190f90687SAndriy Gapon SVCGROUP *grp;
25290f90687SAndriy Gapon int g;
25390f90687SAndriy Gapon
25490f90687SAndriy Gapon svcpool_cleanup(pool);
25590f90687SAndriy Gapon
25690f90687SAndriy Gapon /* Now, initialize the pool's state for a fresh svc_run() call. */
25790f90687SAndriy Gapon mtx_lock(&pool->sp_lock);
25890f90687SAndriy Gapon pool->sp_state = SVCPOOL_INIT;
25990f90687SAndriy Gapon mtx_unlock(&pool->sp_lock);
26090f90687SAndriy Gapon for (g = 0; g < SVC_MAXGROUPS; g++) {
26190f90687SAndriy Gapon grp = &pool->sp_groups[g];
26290f90687SAndriy Gapon mtx_lock(&grp->sg_lock);
26390f90687SAndriy Gapon grp->sg_state = SVCPOOL_ACTIVE;
26490f90687SAndriy Gapon mtx_unlock(&grp->sg_lock);
26590f90687SAndriy Gapon }
26690f90687SAndriy Gapon }
26790f90687SAndriy Gapon
26890f90687SAndriy Gapon /*
269b563304cSAlexander Motin * Sysctl handler to get the present thread count on a pool
270b563304cSAlexander Motin */
271b563304cSAlexander Motin static int
svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS)272b563304cSAlexander Motin svcpool_threads_sysctl(SYSCTL_HANDLER_ARGS)
273a9148abdSDoug Rabson {
274b563304cSAlexander Motin SVCPOOL *pool;
275b563304cSAlexander Motin int threads, error, g;
276a9148abdSDoug Rabson
277b563304cSAlexander Motin pool = oidp->oid_arg1;
278b563304cSAlexander Motin threads = 0;
279b563304cSAlexander Motin mtx_lock(&pool->sp_lock);
280b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++)
281b563304cSAlexander Motin threads += pool->sp_groups[g].sg_threadcount;
282b563304cSAlexander Motin mtx_unlock(&pool->sp_lock);
283b563304cSAlexander Motin error = sysctl_handle_int(oidp, &threads, 0, req);
284b563304cSAlexander Motin return (error);
285a9148abdSDoug Rabson }
286a9148abdSDoug Rabson
287a9148abdSDoug Rabson /*
288a9148abdSDoug Rabson * Sysctl handler to set the minimum thread count on a pool
289a9148abdSDoug Rabson */
290a9148abdSDoug Rabson static int
svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)291a9148abdSDoug Rabson svcpool_minthread_sysctl(SYSCTL_HANDLER_ARGS)
292a9148abdSDoug Rabson {
293a9148abdSDoug Rabson SVCPOOL *pool;
294b563304cSAlexander Motin int newminthreads, error, g;
295a9148abdSDoug Rabson
296a9148abdSDoug Rabson pool = oidp->oid_arg1;
297a9148abdSDoug Rabson newminthreads = pool->sp_minthreads;
298a9148abdSDoug Rabson error = sysctl_handle_int(oidp, &newminthreads, 0, req);
299a9148abdSDoug Rabson if (error == 0 && newminthreads != pool->sp_minthreads) {
300a9148abdSDoug Rabson if (newminthreads > pool->sp_maxthreads)
301a9148abdSDoug Rabson return (EINVAL);
302a9148abdSDoug Rabson mtx_lock(&pool->sp_lock);
303a9148abdSDoug Rabson pool->sp_minthreads = newminthreads;
304b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) {
305b563304cSAlexander Motin pool->sp_groups[g].sg_minthreads = max(1,
306b563304cSAlexander Motin pool->sp_minthreads / pool->sp_groupcount);
307b563304cSAlexander Motin }
308a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock);
309a9148abdSDoug Rabson }
310a9148abdSDoug Rabson return (error);
311a9148abdSDoug Rabson }
312a9148abdSDoug Rabson
313a9148abdSDoug Rabson /*
314a9148abdSDoug Rabson * Sysctl handler to set the maximum thread count on a pool
315a9148abdSDoug Rabson */
316a9148abdSDoug Rabson static int
svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)317a9148abdSDoug Rabson svcpool_maxthread_sysctl(SYSCTL_HANDLER_ARGS)
318a9148abdSDoug Rabson {
319a9148abdSDoug Rabson SVCPOOL *pool;
320b563304cSAlexander Motin int newmaxthreads, error, g;
321a9148abdSDoug Rabson
322a9148abdSDoug Rabson pool = oidp->oid_arg1;
323a9148abdSDoug Rabson newmaxthreads = pool->sp_maxthreads;
324a9148abdSDoug Rabson error = sysctl_handle_int(oidp, &newmaxthreads, 0, req);
325a9148abdSDoug Rabson if (error == 0 && newmaxthreads != pool->sp_maxthreads) {
326a9148abdSDoug Rabson if (newmaxthreads < pool->sp_minthreads)
327a9148abdSDoug Rabson return (EINVAL);
328a9148abdSDoug Rabson mtx_lock(&pool->sp_lock);
329a9148abdSDoug Rabson pool->sp_maxthreads = newmaxthreads;
330b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) {
331b563304cSAlexander Motin pool->sp_groups[g].sg_maxthreads = max(1,
332b563304cSAlexander Motin pool->sp_maxthreads / pool->sp_groupcount);
333b563304cSAlexander Motin }
334a9148abdSDoug Rabson mtx_unlock(&pool->sp_lock);
335a9148abdSDoug Rabson }
336a9148abdSDoug Rabson return (error);
337a9148abdSDoug Rabson }
338a9148abdSDoug Rabson
339dfdcada3SDoug Rabson /*
340dfdcada3SDoug Rabson * Activate a transport handle.
341dfdcada3SDoug Rabson */
342dfdcada3SDoug Rabson void
xprt_register(SVCXPRT * xprt)343dfdcada3SDoug Rabson xprt_register(SVCXPRT *xprt)
344dfdcada3SDoug Rabson {
345dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool;
346b563304cSAlexander Motin SVCGROUP *grp;
347b563304cSAlexander Motin int g;
348dfdcada3SDoug Rabson
3496b97c9f0SRick Macklem SVC_ACQUIRE(xprt);
350b563304cSAlexander Motin g = atomic_fetchadd_int(&pool->sp_nextgroup, 1) % pool->sp_groupcount;
351b563304cSAlexander Motin xprt->xp_group = grp = &pool->sp_groups[g];
352b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
353dfdcada3SDoug Rabson xprt->xp_registered = TRUE;
354dfdcada3SDoug Rabson xprt->xp_active = FALSE;
355b563304cSAlexander Motin TAILQ_INSERT_TAIL(&grp->sg_xlist, xprt, xp_link);
356b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
357dfdcada3SDoug Rabson }
358dfdcada3SDoug Rabson
359dfdcada3SDoug Rabson /*
360a9148abdSDoug Rabson * De-activate a transport handle. Note: the locked version doesn't
361a9148abdSDoug Rabson * release the transport - caller must do that after dropping the pool
362a9148abdSDoug Rabson * lock.
363dfdcada3SDoug Rabson */
364dfdcada3SDoug Rabson static void
xprt_unregister_locked(SVCXPRT * xprt)365a9148abdSDoug Rabson xprt_unregister_locked(SVCXPRT *xprt)
366dfdcada3SDoug Rabson {
367b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group;
368dfdcada3SDoug Rabson
369b563304cSAlexander Motin mtx_assert(&grp->sg_lock, MA_OWNED);
370bca2ec16SRick Macklem KASSERT(xprt->xp_registered == TRUE,
371bca2ec16SRick Macklem ("xprt_unregister_locked: not registered"));
372ba981145SAlexander Motin xprt_inactive_locked(xprt);
373b563304cSAlexander Motin TAILQ_REMOVE(&grp->sg_xlist, xprt, xp_link);
374dfdcada3SDoug Rabson xprt->xp_registered = FALSE;
375a9148abdSDoug Rabson }
376dfdcada3SDoug Rabson
377a9148abdSDoug Rabson void
xprt_unregister(SVCXPRT * xprt)378a9148abdSDoug Rabson xprt_unregister(SVCXPRT *xprt)
379a9148abdSDoug Rabson {
380b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group;
381a9148abdSDoug Rabson
382b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
383bca2ec16SRick Macklem if (xprt->xp_registered == FALSE) {
384bca2ec16SRick Macklem /* Already unregistered by another thread */
385b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
386bca2ec16SRick Macklem return;
387bca2ec16SRick Macklem }
388a9148abdSDoug Rabson xprt_unregister_locked(xprt);
389b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
390a9148abdSDoug Rabson
391db8c27f4SRick Macklem if (xprt->xp_socket != NULL)
392db8c27f4SRick Macklem soshutdown(xprt->xp_socket, SHUT_WR);
393a9148abdSDoug Rabson SVC_RELEASE(xprt);
394a9148abdSDoug Rabson }
395a9148abdSDoug Rabson
396ba981145SAlexander Motin /*
397ba981145SAlexander Motin * Attempt to assign a service thread to this transport.
398ba981145SAlexander Motin */
399ba981145SAlexander Motin static int
xprt_assignthread(SVCXPRT * xprt)400a9148abdSDoug Rabson xprt_assignthread(SVCXPRT *xprt)
401a9148abdSDoug Rabson {
402b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group;
403a9148abdSDoug Rabson SVCTHREAD *st;
404a9148abdSDoug Rabson
405b563304cSAlexander Motin mtx_assert(&grp->sg_lock, MA_OWNED);
406b563304cSAlexander Motin st = LIST_FIRST(&grp->sg_idlethreads);
407a9148abdSDoug Rabson if (st) {
408ba981145SAlexander Motin LIST_REMOVE(st, st_ilink);
409a9148abdSDoug Rabson SVC_ACQUIRE(xprt);
410a9148abdSDoug Rabson xprt->xp_thread = st;
411a9148abdSDoug Rabson st->st_xprt = xprt;
412a9148abdSDoug Rabson cv_signal(&st->st_cond);
413ba981145SAlexander Motin return (TRUE);
414a9148abdSDoug Rabson } else {
415a9148abdSDoug Rabson /*
416a9148abdSDoug Rabson * See if we can create a new thread. The
417a9148abdSDoug Rabson * actual thread creation happens in
418a9148abdSDoug Rabson * svc_run_internal because our locking state
419a9148abdSDoug Rabson * is poorly defined (we are typically called
420a9148abdSDoug Rabson * from a socket upcall). Don't create more
421a9148abdSDoug Rabson * than one thread per second.
422a9148abdSDoug Rabson */
423b563304cSAlexander Motin if (grp->sg_state == SVCPOOL_ACTIVE
424b563304cSAlexander Motin && grp->sg_lastcreatetime < time_uptime
425b563304cSAlexander Motin && grp->sg_threadcount < grp->sg_maxthreads) {
426b563304cSAlexander Motin grp->sg_state = SVCPOOL_THREADWANTED;
427a9148abdSDoug Rabson }
428a9148abdSDoug Rabson }
429ba981145SAlexander Motin return (FALSE);
430dfdcada3SDoug Rabson }
431dfdcada3SDoug Rabson
432dfdcada3SDoug Rabson void
xprt_active(SVCXPRT * xprt)433dfdcada3SDoug Rabson xprt_active(SVCXPRT *xprt)
434dfdcada3SDoug Rabson {
435b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group;
436dfdcada3SDoug Rabson
437b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
438a4fa5e6dSRick Macklem
439a9148abdSDoug Rabson if (!xprt->xp_registered) {
440a9148abdSDoug Rabson /*
441a9148abdSDoug Rabson * Race with xprt_unregister - we lose.
442a9148abdSDoug Rabson */
443b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
444a9148abdSDoug Rabson return;
445a9148abdSDoug Rabson }
446a9148abdSDoug Rabson
447dfdcada3SDoug Rabson if (!xprt->xp_active) {
448dfdcada3SDoug Rabson xprt->xp_active = TRUE;
449ba981145SAlexander Motin if (xprt->xp_thread == NULL) {
450b563304cSAlexander Motin if (!svc_request_space_available(xprt->xp_pool) ||
451f8fb069dSAlexander Motin !xprt_assignthread(xprt))
452b563304cSAlexander Motin TAILQ_INSERT_TAIL(&grp->sg_active, xprt,
453ba981145SAlexander Motin xp_alink);
454ba981145SAlexander Motin }
455dfdcada3SDoug Rabson }
456dfdcada3SDoug Rabson
457b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
458dfdcada3SDoug Rabson }
459dfdcada3SDoug Rabson
460dfdcada3SDoug Rabson void
xprt_inactive_locked(SVCXPRT * xprt)461a9148abdSDoug Rabson xprt_inactive_locked(SVCXPRT *xprt)
462a9148abdSDoug Rabson {
463b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group;
464a9148abdSDoug Rabson
465b563304cSAlexander Motin mtx_assert(&grp->sg_lock, MA_OWNED);
466a9148abdSDoug Rabson if (xprt->xp_active) {
467ba981145SAlexander Motin if (xprt->xp_thread == NULL)
468b563304cSAlexander Motin TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
469a9148abdSDoug Rabson xprt->xp_active = FALSE;
470a9148abdSDoug Rabson }
471a9148abdSDoug Rabson }
472a9148abdSDoug Rabson
473a9148abdSDoug Rabson void
xprt_inactive(SVCXPRT * xprt)474dfdcada3SDoug Rabson xprt_inactive(SVCXPRT *xprt)
475dfdcada3SDoug Rabson {
476b563304cSAlexander Motin SVCGROUP *grp = xprt->xp_group;
477dfdcada3SDoug Rabson
478b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
479a9148abdSDoug Rabson xprt_inactive_locked(xprt);
480b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
481dfdcada3SDoug Rabson }
482dfdcada3SDoug Rabson
483dfdcada3SDoug Rabson /*
4845c42b9dcSAlexander Motin * Variant of xprt_inactive() for use only when sure that port is
4856244c6e7SPedro F. Giffuni * assigned to thread. For example, within receive handlers.
4865c42b9dcSAlexander Motin */
4875c42b9dcSAlexander Motin void
xprt_inactive_self(SVCXPRT * xprt)4885c42b9dcSAlexander Motin xprt_inactive_self(SVCXPRT *xprt)
4895c42b9dcSAlexander Motin {
4905c42b9dcSAlexander Motin
4915c42b9dcSAlexander Motin KASSERT(xprt->xp_thread != NULL,
4925c42b9dcSAlexander Motin ("xprt_inactive_self(%p) with NULL xp_thread", xprt));
4935c42b9dcSAlexander Motin xprt->xp_active = FALSE;
4945c42b9dcSAlexander Motin }
4955c42b9dcSAlexander Motin
4965c42b9dcSAlexander Motin /*
497dfdcada3SDoug Rabson * Add a service program to the callout list.
498dfdcada3SDoug Rabson * The dispatch routine will be called when a rpc request for this
499dfdcada3SDoug Rabson * program number comes in.
500dfdcada3SDoug Rabson */
501dfdcada3SDoug Rabson bool_t
svc_reg(SVCXPRT * xprt,const rpcprog_t prog,const rpcvers_t vers,void (* dispatch)(struct svc_req *,SVCXPRT *),const struct netconfig * nconf)502dfdcada3SDoug Rabson svc_reg(SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
503dfdcada3SDoug Rabson void (*dispatch)(struct svc_req *, SVCXPRT *),
504dfdcada3SDoug Rabson const struct netconfig *nconf)
505dfdcada3SDoug Rabson {
506dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool;
507dfdcada3SDoug Rabson struct svc_callout *s;
508dfdcada3SDoug Rabson char *netid = NULL;
509dfdcada3SDoug Rabson int flag = 0;
510dfdcada3SDoug Rabson
511dfdcada3SDoug Rabson /* VARIABLES PROTECTED BY svc_lock: s, svc_head */
512dfdcada3SDoug Rabson
513dfdcada3SDoug Rabson if (xprt->xp_netid) {
514dfdcada3SDoug Rabson netid = strdup(xprt->xp_netid, M_RPC);
515dfdcada3SDoug Rabson flag = 1;
516dfdcada3SDoug Rabson } else if (nconf && nconf->nc_netid) {
517dfdcada3SDoug Rabson netid = strdup(nconf->nc_netid, M_RPC);
518dfdcada3SDoug Rabson flag = 1;
519dfdcada3SDoug Rabson } /* must have been created with svc_raw_create */
520dfdcada3SDoug Rabson if ((netid == NULL) && (flag == 1)) {
521dfdcada3SDoug Rabson return (FALSE);
522dfdcada3SDoug Rabson }
523dfdcada3SDoug Rabson
524dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock);
525dfdcada3SDoug Rabson if ((s = svc_find(pool, prog, vers, netid)) != NULL) {
526dfdcada3SDoug Rabson if (netid)
527dfdcada3SDoug Rabson free(netid, M_RPC);
528dfdcada3SDoug Rabson if (s->sc_dispatch == dispatch)
529dfdcada3SDoug Rabson goto rpcb_it; /* he is registering another xptr */
530dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock);
531dfdcada3SDoug Rabson return (FALSE);
532dfdcada3SDoug Rabson }
533dfdcada3SDoug Rabson s = malloc(sizeof (struct svc_callout), M_RPC, M_NOWAIT);
534dfdcada3SDoug Rabson if (s == NULL) {
535dfdcada3SDoug Rabson if (netid)
536dfdcada3SDoug Rabson free(netid, M_RPC);
537dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock);
538dfdcada3SDoug Rabson return (FALSE);
539dfdcada3SDoug Rabson }
540dfdcada3SDoug Rabson
541dfdcada3SDoug Rabson s->sc_prog = prog;
542dfdcada3SDoug Rabson s->sc_vers = vers;
543dfdcada3SDoug Rabson s->sc_dispatch = dispatch;
544dfdcada3SDoug Rabson s->sc_netid = netid;
545dfdcada3SDoug Rabson TAILQ_INSERT_TAIL(&pool->sp_callouts, s, sc_link);
546dfdcada3SDoug Rabson
547dfdcada3SDoug Rabson if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
548dfdcada3SDoug Rabson ((SVCXPRT *) xprt)->xp_netid = strdup(netid, M_RPC);
549dfdcada3SDoug Rabson
550dfdcada3SDoug Rabson rpcb_it:
551dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock);
552dfdcada3SDoug Rabson /* now register the information with the local binder service */
553dfdcada3SDoug Rabson if (nconf) {
554dfdcada3SDoug Rabson bool_t dummy;
555dfdcada3SDoug Rabson struct netconfig tnc;
556a9148abdSDoug Rabson struct netbuf nb;
557dfdcada3SDoug Rabson tnc = *nconf;
558a9148abdSDoug Rabson nb.buf = &xprt->xp_ltaddr;
559a9148abdSDoug Rabson nb.len = xprt->xp_ltaddr.ss_len;
560a9148abdSDoug Rabson dummy = rpcb_set(prog, vers, &tnc, &nb);
561dfdcada3SDoug Rabson return (dummy);
562dfdcada3SDoug Rabson }
563dfdcada3SDoug Rabson return (TRUE);
564dfdcada3SDoug Rabson }
565dfdcada3SDoug Rabson
566dfdcada3SDoug Rabson /*
567dfdcada3SDoug Rabson * Remove a service program from the callout list.
568dfdcada3SDoug Rabson */
569dfdcada3SDoug Rabson void
svc_unreg(SVCPOOL * pool,const rpcprog_t prog,const rpcvers_t vers)570dfdcada3SDoug Rabson svc_unreg(SVCPOOL *pool, const rpcprog_t prog, const rpcvers_t vers)
571dfdcada3SDoug Rabson {
572dfdcada3SDoug Rabson struct svc_callout *s;
573dfdcada3SDoug Rabson
574dfdcada3SDoug Rabson /* unregister the information anyway */
575dfdcada3SDoug Rabson (void) rpcb_unset(prog, vers, NULL);
576dfdcada3SDoug Rabson mtx_lock(&pool->sp_lock);
577dfdcada3SDoug Rabson while ((s = svc_find(pool, prog, vers, NULL)) != NULL) {
578dfdcada3SDoug Rabson TAILQ_REMOVE(&pool->sp_callouts, s, sc_link);
579dfdcada3SDoug Rabson if (s->sc_netid)
580dfdcada3SDoug Rabson mem_free(s->sc_netid, sizeof (s->sc_netid) + 1);
581dfdcada3SDoug Rabson mem_free(s, sizeof (struct svc_callout));
582dfdcada3SDoug Rabson }
583dfdcada3SDoug Rabson mtx_unlock(&pool->sp_lock);
584dfdcada3SDoug Rabson }
585dfdcada3SDoug Rabson
586d473bac7SAlexander Motin /*
587d473bac7SAlexander Motin * Add a service connection loss program to the callout list.
588d473bac7SAlexander Motin * The dispatch routine will be called when some port in ths pool die.
589d473bac7SAlexander Motin */
590d473bac7SAlexander Motin bool_t
svc_loss_reg(SVCXPRT * xprt,void (* dispatch)(SVCXPRT *))591d473bac7SAlexander Motin svc_loss_reg(SVCXPRT *xprt, void (*dispatch)(SVCXPRT *))
592d473bac7SAlexander Motin {
593d473bac7SAlexander Motin SVCPOOL *pool = xprt->xp_pool;
594d473bac7SAlexander Motin struct svc_loss_callout *s;
595d473bac7SAlexander Motin
596d473bac7SAlexander Motin mtx_lock(&pool->sp_lock);
597d473bac7SAlexander Motin TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
598d473bac7SAlexander Motin if (s->slc_dispatch == dispatch)
599d473bac7SAlexander Motin break;
600d473bac7SAlexander Motin }
601d473bac7SAlexander Motin if (s != NULL) {
602d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock);
603d473bac7SAlexander Motin return (TRUE);
604d473bac7SAlexander Motin }
6058576dc00SAlexander Motin s = malloc(sizeof(struct svc_loss_callout), M_RPC, M_NOWAIT);
606d473bac7SAlexander Motin if (s == NULL) {
607d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock);
608d473bac7SAlexander Motin return (FALSE);
609d473bac7SAlexander Motin }
610d473bac7SAlexander Motin s->slc_dispatch = dispatch;
611d473bac7SAlexander Motin TAILQ_INSERT_TAIL(&pool->sp_lcallouts, s, slc_link);
612d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock);
613d473bac7SAlexander Motin return (TRUE);
614d473bac7SAlexander Motin }
615d473bac7SAlexander Motin
616d473bac7SAlexander Motin /*
617d473bac7SAlexander Motin * Remove a service connection loss program from the callout list.
618d473bac7SAlexander Motin */
619d473bac7SAlexander Motin void
svc_loss_unreg(SVCPOOL * pool,void (* dispatch)(SVCXPRT *))620d473bac7SAlexander Motin svc_loss_unreg(SVCPOOL *pool, void (*dispatch)(SVCXPRT *))
621d473bac7SAlexander Motin {
622d473bac7SAlexander Motin struct svc_loss_callout *s;
623d473bac7SAlexander Motin
624d473bac7SAlexander Motin mtx_lock(&pool->sp_lock);
625d473bac7SAlexander Motin TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link) {
626d473bac7SAlexander Motin if (s->slc_dispatch == dispatch) {
627d473bac7SAlexander Motin TAILQ_REMOVE(&pool->sp_lcallouts, s, slc_link);
628d473bac7SAlexander Motin free(s, M_RPC);
629d473bac7SAlexander Motin break;
630d473bac7SAlexander Motin }
631d473bac7SAlexander Motin }
632d473bac7SAlexander Motin mtx_unlock(&pool->sp_lock);
633d473bac7SAlexander Motin }
634d473bac7SAlexander Motin
635dfdcada3SDoug Rabson /* ********************** CALLOUT list related stuff ************* */
636dfdcada3SDoug Rabson
637dfdcada3SDoug Rabson /*
638dfdcada3SDoug Rabson * Search the callout list for a program number, return the callout
639dfdcada3SDoug Rabson * struct.
640dfdcada3SDoug Rabson */
641dfdcada3SDoug Rabson static struct svc_callout *
svc_find(SVCPOOL * pool,rpcprog_t prog,rpcvers_t vers,char * netid)642dfdcada3SDoug Rabson svc_find(SVCPOOL *pool, rpcprog_t prog, rpcvers_t vers, char *netid)
643dfdcada3SDoug Rabson {
644dfdcada3SDoug Rabson struct svc_callout *s;
645dfdcada3SDoug Rabson
646dfdcada3SDoug Rabson mtx_assert(&pool->sp_lock, MA_OWNED);
647dfdcada3SDoug Rabson TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
648dfdcada3SDoug Rabson if (s->sc_prog == prog && s->sc_vers == vers
649dfdcada3SDoug Rabson && (netid == NULL || s->sc_netid == NULL ||
650dfdcada3SDoug Rabson strcmp(netid, s->sc_netid) == 0))
651dfdcada3SDoug Rabson break;
652dfdcada3SDoug Rabson }
653dfdcada3SDoug Rabson
654dfdcada3SDoug Rabson return (s);
655dfdcada3SDoug Rabson }
656dfdcada3SDoug Rabson
657dfdcada3SDoug Rabson /* ******************* REPLY GENERATION ROUTINES ************ */
658dfdcada3SDoug Rabson
659a9148abdSDoug Rabson static bool_t
svc_sendreply_common(struct svc_req * rqstp,struct rpc_msg * rply,struct mbuf * body)660a9148abdSDoug Rabson svc_sendreply_common(struct svc_req *rqstp, struct rpc_msg *rply,
661a9148abdSDoug Rabson struct mbuf *body)
662a9148abdSDoug Rabson {
663a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt;
664a9148abdSDoug Rabson bool_t ok;
665a9148abdSDoug Rabson
666a9148abdSDoug Rabson if (rqstp->rq_args) {
667a9148abdSDoug Rabson m_freem(rqstp->rq_args);
668a9148abdSDoug Rabson rqstp->rq_args = NULL;
669a9148abdSDoug Rabson }
670a9148abdSDoug Rabson
671a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache)
672a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache,
673a9148abdSDoug Rabson rply, svc_getrpccaller(rqstp), body);
674a9148abdSDoug Rabson
675a9148abdSDoug Rabson if (!SVCAUTH_WRAP(&rqstp->rq_auth, &body))
676a9148abdSDoug Rabson return (FALSE);
677a9148abdSDoug Rabson
678d473bac7SAlexander Motin ok = SVC_REPLY(xprt, rply, rqstp->rq_addr, body, &rqstp->rq_reply_seq);
679a9148abdSDoug Rabson if (rqstp->rq_addr) {
680a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME);
681a9148abdSDoug Rabson rqstp->rq_addr = NULL;
682a9148abdSDoug Rabson }
683a9148abdSDoug Rabson
684a9148abdSDoug Rabson return (ok);
685a9148abdSDoug Rabson }
686a9148abdSDoug Rabson
687dfdcada3SDoug Rabson /*
688dfdcada3SDoug Rabson * Send a reply to an rpc request
689dfdcada3SDoug Rabson */
690dfdcada3SDoug Rabson bool_t
svc_sendreply(struct svc_req * rqstp,xdrproc_t xdr_results,void * xdr_location)691a9148abdSDoug Rabson svc_sendreply(struct svc_req *rqstp, xdrproc_t xdr_results, void * xdr_location)
692a9148abdSDoug Rabson {
693a9148abdSDoug Rabson struct rpc_msg rply;
694a9148abdSDoug Rabson struct mbuf *m;
695a9148abdSDoug Rabson XDR xdrs;
696a9148abdSDoug Rabson bool_t ok;
697a9148abdSDoug Rabson
698a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid;
699a9148abdSDoug Rabson rply.rm_direction = REPLY;
700a9148abdSDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED;
701a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf;
702a9148abdSDoug Rabson rply.acpted_rply.ar_stat = SUCCESS;
703a9148abdSDoug Rabson rply.acpted_rply.ar_results.where = NULL;
704a9148abdSDoug Rabson rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
705a9148abdSDoug Rabson
706bd54830bSGleb Smirnoff m = m_getcl(M_WAITOK, MT_DATA, 0);
707a9148abdSDoug Rabson xdrmbuf_create(&xdrs, m, XDR_ENCODE);
708a9148abdSDoug Rabson ok = xdr_results(&xdrs, xdr_location);
709a9148abdSDoug Rabson XDR_DESTROY(&xdrs);
710a9148abdSDoug Rabson
711a9148abdSDoug Rabson if (ok) {
712a9148abdSDoug Rabson return (svc_sendreply_common(rqstp, &rply, m));
713a9148abdSDoug Rabson } else {
714a9148abdSDoug Rabson m_freem(m);
715a9148abdSDoug Rabson return (FALSE);
716a9148abdSDoug Rabson }
717a9148abdSDoug Rabson }
718a9148abdSDoug Rabson
719a9148abdSDoug Rabson bool_t
svc_sendreply_mbuf(struct svc_req * rqstp,struct mbuf * m)720a9148abdSDoug Rabson svc_sendreply_mbuf(struct svc_req *rqstp, struct mbuf *m)
721dfdcada3SDoug Rabson {
722dfdcada3SDoug Rabson struct rpc_msg rply;
723dfdcada3SDoug Rabson
724a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid;
725dfdcada3SDoug Rabson rply.rm_direction = REPLY;
726dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED;
727a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf;
728dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = SUCCESS;
729a9148abdSDoug Rabson rply.acpted_rply.ar_results.where = NULL;
730a9148abdSDoug Rabson rply.acpted_rply.ar_results.proc = (xdrproc_t) xdr_void;
731dfdcada3SDoug Rabson
732a9148abdSDoug Rabson return (svc_sendreply_common(rqstp, &rply, m));
733dfdcada3SDoug Rabson }
734dfdcada3SDoug Rabson
735dfdcada3SDoug Rabson /*
736dfdcada3SDoug Rabson * No procedure error reply
737dfdcada3SDoug Rabson */
738dfdcada3SDoug Rabson void
svcerr_noproc(struct svc_req * rqstp)739a9148abdSDoug Rabson svcerr_noproc(struct svc_req *rqstp)
740dfdcada3SDoug Rabson {
741a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt;
742dfdcada3SDoug Rabson struct rpc_msg rply;
743dfdcada3SDoug Rabson
744a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid;
745dfdcada3SDoug Rabson rply.rm_direction = REPLY;
746dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED;
747a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf;
748dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROC_UNAVAIL;
749dfdcada3SDoug Rabson
750a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache)
751a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache,
752a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL);
753a9148abdSDoug Rabson
754a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL);
755dfdcada3SDoug Rabson }
756dfdcada3SDoug Rabson
757dfdcada3SDoug Rabson /*
758dfdcada3SDoug Rabson * Can't decode args error reply
759dfdcada3SDoug Rabson */
760dfdcada3SDoug Rabson void
svcerr_decode(struct svc_req * rqstp)761a9148abdSDoug Rabson svcerr_decode(struct svc_req *rqstp)
762dfdcada3SDoug Rabson {
763a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt;
764dfdcada3SDoug Rabson struct rpc_msg rply;
765dfdcada3SDoug Rabson
766a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid;
767dfdcada3SDoug Rabson rply.rm_direction = REPLY;
768dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED;
769a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf;
770dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = GARBAGE_ARGS;
771dfdcada3SDoug Rabson
772a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache)
773a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache,
774a9148abdSDoug Rabson &rply, (struct sockaddr *) &xprt->xp_rtaddr, NULL);
775a9148abdSDoug Rabson
776a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL);
777dfdcada3SDoug Rabson }
778dfdcada3SDoug Rabson
779dfdcada3SDoug Rabson /*
780dfdcada3SDoug Rabson * Some system error
781dfdcada3SDoug Rabson */
782dfdcada3SDoug Rabson void
svcerr_systemerr(struct svc_req * rqstp)783a9148abdSDoug Rabson svcerr_systemerr(struct svc_req *rqstp)
784dfdcada3SDoug Rabson {
785a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt;
786dfdcada3SDoug Rabson struct rpc_msg rply;
787dfdcada3SDoug Rabson
788a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid;
789dfdcada3SDoug Rabson rply.rm_direction = REPLY;
790dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED;
791a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf;
792dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = SYSTEM_ERR;
793dfdcada3SDoug Rabson
794a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache)
795a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache,
796a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL);
797a9148abdSDoug Rabson
798a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL);
799dfdcada3SDoug Rabson }
800dfdcada3SDoug Rabson
801dfdcada3SDoug Rabson /*
802dfdcada3SDoug Rabson * Authentication error reply
803dfdcada3SDoug Rabson */
804dfdcada3SDoug Rabson void
svcerr_auth(struct svc_req * rqstp,enum auth_stat why)805a9148abdSDoug Rabson svcerr_auth(struct svc_req *rqstp, enum auth_stat why)
806dfdcada3SDoug Rabson {
807a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt;
808dfdcada3SDoug Rabson struct rpc_msg rply;
809dfdcada3SDoug Rabson
810a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid;
811dfdcada3SDoug Rabson rply.rm_direction = REPLY;
812dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_DENIED;
813dfdcada3SDoug Rabson rply.rjcted_rply.rj_stat = AUTH_ERROR;
814dfdcada3SDoug Rabson rply.rjcted_rply.rj_why = why;
815dfdcada3SDoug Rabson
816a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache)
817a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache,
818a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL);
819a9148abdSDoug Rabson
820a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL);
821dfdcada3SDoug Rabson }
822dfdcada3SDoug Rabson
823dfdcada3SDoug Rabson /*
824dfdcada3SDoug Rabson * Auth too weak error reply
825dfdcada3SDoug Rabson */
826dfdcada3SDoug Rabson void
svcerr_weakauth(struct svc_req * rqstp)827a9148abdSDoug Rabson svcerr_weakauth(struct svc_req *rqstp)
828dfdcada3SDoug Rabson {
829dfdcada3SDoug Rabson
830a9148abdSDoug Rabson svcerr_auth(rqstp, AUTH_TOOWEAK);
831dfdcada3SDoug Rabson }
832dfdcada3SDoug Rabson
833dfdcada3SDoug Rabson /*
834dfdcada3SDoug Rabson * Program unavailable error reply
835dfdcada3SDoug Rabson */
836dfdcada3SDoug Rabson void
svcerr_noprog(struct svc_req * rqstp)837a9148abdSDoug Rabson svcerr_noprog(struct svc_req *rqstp)
838dfdcada3SDoug Rabson {
839a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt;
840dfdcada3SDoug Rabson struct rpc_msg rply;
841dfdcada3SDoug Rabson
842a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid;
843dfdcada3SDoug Rabson rply.rm_direction = REPLY;
844dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED;
845a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf;
846dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROG_UNAVAIL;
847dfdcada3SDoug Rabson
848a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache)
849a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache,
850a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL);
851a9148abdSDoug Rabson
852a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL);
853dfdcada3SDoug Rabson }
854dfdcada3SDoug Rabson
855dfdcada3SDoug Rabson /*
856dfdcada3SDoug Rabson * Program version mismatch error reply
857dfdcada3SDoug Rabson */
858dfdcada3SDoug Rabson void
svcerr_progvers(struct svc_req * rqstp,rpcvers_t low_vers,rpcvers_t high_vers)859a9148abdSDoug Rabson svcerr_progvers(struct svc_req *rqstp, rpcvers_t low_vers, rpcvers_t high_vers)
860dfdcada3SDoug Rabson {
861a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt;
862dfdcada3SDoug Rabson struct rpc_msg rply;
863dfdcada3SDoug Rabson
864a9148abdSDoug Rabson rply.rm_xid = rqstp->rq_xid;
865dfdcada3SDoug Rabson rply.rm_direction = REPLY;
866dfdcada3SDoug Rabson rply.rm_reply.rp_stat = MSG_ACCEPTED;
867a9148abdSDoug Rabson rply.acpted_rply.ar_verf = rqstp->rq_verf;
868dfdcada3SDoug Rabson rply.acpted_rply.ar_stat = PROG_MISMATCH;
869dfdcada3SDoug Rabson rply.acpted_rply.ar_vers.low = (uint32_t)low_vers;
870dfdcada3SDoug Rabson rply.acpted_rply.ar_vers.high = (uint32_t)high_vers;
871dfdcada3SDoug Rabson
872a9148abdSDoug Rabson if (xprt->xp_pool->sp_rcache)
873a9148abdSDoug Rabson replay_setreply(xprt->xp_pool->sp_rcache,
874a9148abdSDoug Rabson &rply, svc_getrpccaller(rqstp), NULL);
875a9148abdSDoug Rabson
876a9148abdSDoug Rabson svc_sendreply_common(rqstp, &rply, NULL);
877a9148abdSDoug Rabson }
878a9148abdSDoug Rabson
879a9148abdSDoug Rabson /*
880a9148abdSDoug Rabson * Allocate a new server transport structure. All fields are
881a9148abdSDoug Rabson * initialized to zero and xp_p3 is initialized to point at an
882a9148abdSDoug Rabson * extension structure to hold various flags and authentication
883a9148abdSDoug Rabson * parameters.
884a9148abdSDoug Rabson */
885a9148abdSDoug Rabson SVCXPRT *
svc_xprt_alloc(void)886462984cbSEnji Cooper svc_xprt_alloc(void)
887a9148abdSDoug Rabson {
888a9148abdSDoug Rabson SVCXPRT *xprt;
889a9148abdSDoug Rabson SVCXPRT_EXT *ext;
890a9148abdSDoug Rabson
891a9148abdSDoug Rabson xprt = mem_alloc(sizeof(SVCXPRT));
892a9148abdSDoug Rabson ext = mem_alloc(sizeof(SVCXPRT_EXT));
893a9148abdSDoug Rabson xprt->xp_p3 = ext;
894a9148abdSDoug Rabson refcount_init(&xprt->xp_refs, 1);
895a9148abdSDoug Rabson
896a9148abdSDoug Rabson return (xprt);
897a9148abdSDoug Rabson }
898a9148abdSDoug Rabson
899a9148abdSDoug Rabson /*
900a9148abdSDoug Rabson * Free a server transport structure.
901a9148abdSDoug Rabson */
902a9148abdSDoug Rabson void
svc_xprt_free(SVCXPRT * xprt)903462984cbSEnji Cooper svc_xprt_free(SVCXPRT *xprt)
904a9148abdSDoug Rabson {
905a9148abdSDoug Rabson
906a9148abdSDoug Rabson mem_free(xprt->xp_p3, sizeof(SVCXPRT_EXT));
90722f085c4SRick Macklem /* The size argument is ignored, so 0 is ok. */
90822f085c4SRick Macklem mem_free(xprt->xp_gidp, 0);
909a9148abdSDoug Rabson mem_free(xprt, sizeof(SVCXPRT));
910dfdcada3SDoug Rabson }
911dfdcada3SDoug Rabson
912dfdcada3SDoug Rabson /* ******************* SERVER INPUT STUFF ******************* */
913dfdcada3SDoug Rabson
914dfdcada3SDoug Rabson /*
915a9148abdSDoug Rabson * Read RPC requests from a transport and queue them to be
916a9148abdSDoug Rabson * executed. We handle authentication and replay cache replies here.
917a9148abdSDoug Rabson * Actually dispatching the RPC is deferred till svc_executereq.
918dfdcada3SDoug Rabson */
919a9148abdSDoug Rabson static enum xprt_stat
svc_getreq(SVCXPRT * xprt,struct svc_req ** rqstp_ret)920a9148abdSDoug Rabson svc_getreq(SVCXPRT *xprt, struct svc_req **rqstp_ret)
921dfdcada3SDoug Rabson {
922dfdcada3SDoug Rabson SVCPOOL *pool = xprt->xp_pool;
923a9148abdSDoug Rabson struct svc_req *r;
924dfdcada3SDoug Rabson struct rpc_msg msg;
925a9148abdSDoug Rabson struct mbuf *args;
926d473bac7SAlexander Motin struct svc_loss_callout *s;
927a9148abdSDoug Rabson enum xprt_stat stat;
928a9148abdSDoug Rabson
929a9148abdSDoug Rabson /* now receive msgs from xprtprt (support batch calls) */
930a9148abdSDoug Rabson r = malloc(sizeof(*r), M_RPC, M_WAITOK|M_ZERO);
931a9148abdSDoug Rabson
932a9148abdSDoug Rabson msg.rm_call.cb_cred.oa_base = r->rq_credarea;
933a9148abdSDoug Rabson msg.rm_call.cb_verf.oa_base = &r->rq_credarea[MAX_AUTH_BYTES];
934a9148abdSDoug Rabson r->rq_clntcred = &r->rq_credarea[2*MAX_AUTH_BYTES];
935a9148abdSDoug Rabson if (SVC_RECV(xprt, &msg, &r->rq_addr, &args)) {
936a9148abdSDoug Rabson enum auth_stat why;
937a9148abdSDoug Rabson
938a9148abdSDoug Rabson /*
939a9148abdSDoug Rabson * Handle replays and authenticate before queuing the
940a9148abdSDoug Rabson * request to be executed.
941a9148abdSDoug Rabson */
942a9148abdSDoug Rabson SVC_ACQUIRE(xprt);
943a9148abdSDoug Rabson r->rq_xprt = xprt;
944a9148abdSDoug Rabson if (pool->sp_rcache) {
945a9148abdSDoug Rabson struct rpc_msg repmsg;
946a9148abdSDoug Rabson struct mbuf *repbody;
947a9148abdSDoug Rabson enum replay_state rs;
948a9148abdSDoug Rabson rs = replay_find(pool->sp_rcache, &msg,
949a9148abdSDoug Rabson svc_getrpccaller(r), &repmsg, &repbody);
950a9148abdSDoug Rabson switch (rs) {
951a9148abdSDoug Rabson case RS_NEW:
952a9148abdSDoug Rabson break;
953a9148abdSDoug Rabson case RS_DONE:
954a9148abdSDoug Rabson SVC_REPLY(xprt, &repmsg, r->rq_addr,
955d473bac7SAlexander Motin repbody, &r->rq_reply_seq);
956a9148abdSDoug Rabson if (r->rq_addr) {
957a9148abdSDoug Rabson free(r->rq_addr, M_SONAME);
958a9148abdSDoug Rabson r->rq_addr = NULL;
959a9148abdSDoug Rabson }
960578e600cSRick Macklem m_freem(args);
961a9148abdSDoug Rabson goto call_done;
962a9148abdSDoug Rabson
963a9148abdSDoug Rabson default:
964578e600cSRick Macklem m_freem(args);
965a9148abdSDoug Rabson goto call_done;
966a9148abdSDoug Rabson }
967a9148abdSDoug Rabson }
968a9148abdSDoug Rabson
969a9148abdSDoug Rabson r->rq_xid = msg.rm_xid;
970a9148abdSDoug Rabson r->rq_prog = msg.rm_call.cb_prog;
971a9148abdSDoug Rabson r->rq_vers = msg.rm_call.cb_vers;
972a9148abdSDoug Rabson r->rq_proc = msg.rm_call.cb_proc;
973a9148abdSDoug Rabson r->rq_size = sizeof(*r) + m_length(args, NULL);
974a9148abdSDoug Rabson r->rq_args = args;
975a9148abdSDoug Rabson if ((why = _authenticate(r, &msg)) != AUTH_OK) {
976a9148abdSDoug Rabson /*
977a9148abdSDoug Rabson * RPCSEC_GSS uses this return code
978a9148abdSDoug Rabson * for requests that form part of its
979a9148abdSDoug Rabson * context establishment protocol and
980a9148abdSDoug Rabson * should not be dispatched to the
981a9148abdSDoug Rabson * application.
982a9148abdSDoug Rabson */
983a9148abdSDoug Rabson if (why != RPCSEC_GSS_NODISPATCH)
984a9148abdSDoug Rabson svcerr_auth(r, why);
985a9148abdSDoug Rabson goto call_done;
986a9148abdSDoug Rabson }
987a9148abdSDoug Rabson
988a9148abdSDoug Rabson if (!SVCAUTH_UNWRAP(&r->rq_auth, &r->rq_args)) {
989a9148abdSDoug Rabson svcerr_decode(r);
990a9148abdSDoug Rabson goto call_done;
991a9148abdSDoug Rabson }
992a9148abdSDoug Rabson
993a9148abdSDoug Rabson /*
994a16ff32fSJohn Baldwin * Defer enabling DDP until the first non-NULLPROC RPC
995a16ff32fSJohn Baldwin * is received to allow STARTTLS authentication to
996a16ff32fSJohn Baldwin * enable TLS offload first.
997a16ff32fSJohn Baldwin */
998a16ff32fSJohn Baldwin if (xprt->xp_doneddp == 0 && r->rq_proc != NULLPROC &&
999*6c9170e0SRick Macklem xprt->xp_socket != NULL &&
1000a16ff32fSJohn Baldwin atomic_cmpset_int(&xprt->xp_doneddp, 0, 1)) {
1001a16ff32fSJohn Baldwin if (xprt->xp_socket->so_proto->pr_protocol ==
1002a16ff32fSJohn Baldwin IPPROTO_TCP) {
1003a16ff32fSJohn Baldwin int optval = 1;
1004a16ff32fSJohn Baldwin
1005a16ff32fSJohn Baldwin (void)so_setsockopt(xprt->xp_socket,
1006a16ff32fSJohn Baldwin IPPROTO_TCP, TCP_USE_DDP, &optval,
1007a16ff32fSJohn Baldwin sizeof(optval));
1008a16ff32fSJohn Baldwin }
1009a16ff32fSJohn Baldwin }
1010a16ff32fSJohn Baldwin
1011a16ff32fSJohn Baldwin /*
1012a9148abdSDoug Rabson * Everything checks out, return request to caller.
1013a9148abdSDoug Rabson */
1014a9148abdSDoug Rabson *rqstp_ret = r;
1015a9148abdSDoug Rabson r = NULL;
1016a9148abdSDoug Rabson }
1017a9148abdSDoug Rabson call_done:
1018a9148abdSDoug Rabson if (r) {
1019a9148abdSDoug Rabson svc_freereq(r);
1020a9148abdSDoug Rabson r = NULL;
1021a9148abdSDoug Rabson }
1022a9148abdSDoug Rabson if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1023d473bac7SAlexander Motin TAILQ_FOREACH(s, &pool->sp_lcallouts, slc_link)
1024d473bac7SAlexander Motin (*s->slc_dispatch)(xprt);
1025a9148abdSDoug Rabson xprt_unregister(xprt);
1026a9148abdSDoug Rabson }
1027a9148abdSDoug Rabson
1028a9148abdSDoug Rabson return (stat);
1029a9148abdSDoug Rabson }
1030a9148abdSDoug Rabson
1031a9148abdSDoug Rabson static void
svc_executereq(struct svc_req * rqstp)1032a9148abdSDoug Rabson svc_executereq(struct svc_req *rqstp)
1033a9148abdSDoug Rabson {
1034a9148abdSDoug Rabson SVCXPRT *xprt = rqstp->rq_xprt;
1035a9148abdSDoug Rabson SVCPOOL *pool = xprt->xp_pool;
1036dfdcada3SDoug Rabson int prog_found;
1037dfdcada3SDoug Rabson rpcvers_t low_vers;
1038dfdcada3SDoug Rabson rpcvers_t high_vers;
1039dfdcada3SDoug Rabson struct svc_callout *s;
1040dfdcada3SDoug Rabson
1041dfdcada3SDoug Rabson /* now match message with a registered service*/
1042dfdcada3SDoug Rabson prog_found = FALSE;
1043dfdcada3SDoug Rabson low_vers = (rpcvers_t) -1L;
1044dfdcada3SDoug Rabson high_vers = (rpcvers_t) 0L;
1045dfdcada3SDoug Rabson TAILQ_FOREACH(s, &pool->sp_callouts, sc_link) {
1046a9148abdSDoug Rabson if (s->sc_prog == rqstp->rq_prog) {
1047a9148abdSDoug Rabson if (s->sc_vers == rqstp->rq_vers) {
1048a9148abdSDoug Rabson /*
1049a9148abdSDoug Rabson * We hand ownership of r to the
1050a9148abdSDoug Rabson * dispatch method - they must call
1051a9148abdSDoug Rabson * svc_freereq.
1052a9148abdSDoug Rabson */
1053a9148abdSDoug Rabson (*s->sc_dispatch)(rqstp, xprt);
1054a9148abdSDoug Rabson return;
1055dfdcada3SDoug Rabson } /* found correct version */
1056dfdcada3SDoug Rabson prog_found = TRUE;
1057dfdcada3SDoug Rabson if (s->sc_vers < low_vers)
1058dfdcada3SDoug Rabson low_vers = s->sc_vers;
1059dfdcada3SDoug Rabson if (s->sc_vers > high_vers)
1060dfdcada3SDoug Rabson high_vers = s->sc_vers;
1061dfdcada3SDoug Rabson } /* found correct program */
1062dfdcada3SDoug Rabson }
1063a9148abdSDoug Rabson
1064dfdcada3SDoug Rabson /*
1065dfdcada3SDoug Rabson * if we got here, the program or version
1066dfdcada3SDoug Rabson * is not served ...
1067dfdcada3SDoug Rabson */
1068dfdcada3SDoug Rabson if (prog_found)
1069a9148abdSDoug Rabson svcerr_progvers(rqstp, low_vers, high_vers);
1070dfdcada3SDoug Rabson else
1071a9148abdSDoug Rabson svcerr_noprog(rqstp);
1072a9148abdSDoug Rabson
1073a9148abdSDoug Rabson svc_freereq(rqstp);
1074dfdcada3SDoug Rabson }
1075a9148abdSDoug Rabson
1076a9148abdSDoug Rabson static void
svc_checkidle(SVCGROUP * grp)1077b563304cSAlexander Motin svc_checkidle(SVCGROUP *grp)
1078a9148abdSDoug Rabson {
1079a9148abdSDoug Rabson SVCXPRT *xprt, *nxprt;
1080a9148abdSDoug Rabson time_t timo;
1081a9148abdSDoug Rabson struct svcxprt_list cleanup;
1082a9148abdSDoug Rabson
1083a9148abdSDoug Rabson TAILQ_INIT(&cleanup);
1084b563304cSAlexander Motin TAILQ_FOREACH_SAFE(xprt, &grp->sg_xlist, xp_link, nxprt) {
1085dfdcada3SDoug Rabson /*
1086a9148abdSDoug Rabson * Only some transports have idle timers. Don't time
1087a9148abdSDoug Rabson * something out which is just waking up.
1088dfdcada3SDoug Rabson */
1089a9148abdSDoug Rabson if (!xprt->xp_idletimeout || xprt->xp_thread)
1090a9148abdSDoug Rabson continue;
1091a9148abdSDoug Rabson
1092a9148abdSDoug Rabson timo = xprt->xp_lastactive + xprt->xp_idletimeout;
1093a9148abdSDoug Rabson if (time_uptime > timo) {
1094a9148abdSDoug Rabson xprt_unregister_locked(xprt);
1095a9148abdSDoug Rabson TAILQ_INSERT_TAIL(&cleanup, xprt, xp_link);
1096a9148abdSDoug Rabson }
1097a9148abdSDoug Rabson }
1098a9148abdSDoug Rabson
1099b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
1100a9148abdSDoug Rabson TAILQ_FOREACH_SAFE(xprt, &cleanup, xp_link, nxprt) {
1101db8c27f4SRick Macklem soshutdown(xprt->xp_socket, SHUT_WR);
1102a9148abdSDoug Rabson SVC_RELEASE(xprt);
1103a9148abdSDoug Rabson }
1104b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
1105a9148abdSDoug Rabson }
1106a9148abdSDoug Rabson
1107a9148abdSDoug Rabson static void
svc_assign_waiting_sockets(SVCPOOL * pool)1108a9148abdSDoug Rabson svc_assign_waiting_sockets(SVCPOOL *pool)
1109a9148abdSDoug Rabson {
1110b563304cSAlexander Motin SVCGROUP *grp;
1111a9148abdSDoug Rabson SVCXPRT *xprt;
1112b563304cSAlexander Motin int g;
1113a9148abdSDoug Rabson
1114b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) {
1115b563304cSAlexander Motin grp = &pool->sp_groups[g];
1116b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
1117b563304cSAlexander Motin while ((xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1118ba981145SAlexander Motin if (xprt_assignthread(xprt))
1119b563304cSAlexander Motin TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1120ba981145SAlexander Motin else
1121ba981145SAlexander Motin break;
1122a9148abdSDoug Rabson }
1123b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
1124b563304cSAlexander Motin }
1125f8fb069dSAlexander Motin }
1126f8fb069dSAlexander Motin
1127f8fb069dSAlexander Motin static void
svc_change_space_used(SVCPOOL * pool,long delta)11283c42b5bfSGarrett Wollman svc_change_space_used(SVCPOOL *pool, long delta)
1129f8fb069dSAlexander Motin {
11303c42b5bfSGarrett Wollman unsigned long value;
1131f8fb069dSAlexander Motin
11323c42b5bfSGarrett Wollman value = atomic_fetchadd_long(&pool->sp_space_used, delta) + delta;
1133f8fb069dSAlexander Motin if (delta > 0) {
1134f8fb069dSAlexander Motin if (value >= pool->sp_space_high && !pool->sp_space_throttled) {
1135f8fb069dSAlexander Motin pool->sp_space_throttled = TRUE;
1136f8fb069dSAlexander Motin pool->sp_space_throttle_count++;
1137f8fb069dSAlexander Motin }
1138f8fb069dSAlexander Motin if (value > pool->sp_space_used_highest)
1139f8fb069dSAlexander Motin pool->sp_space_used_highest = value;
1140f8fb069dSAlexander Motin } else {
1141f8fb069dSAlexander Motin if (value < pool->sp_space_low && pool->sp_space_throttled) {
1142f8fb069dSAlexander Motin pool->sp_space_throttled = FALSE;
1143f8fb069dSAlexander Motin svc_assign_waiting_sockets(pool);
1144f8fb069dSAlexander Motin }
1145f8fb069dSAlexander Motin }
1146a9148abdSDoug Rabson }
1147a9148abdSDoug Rabson
1148a9148abdSDoug Rabson static bool_t
svc_request_space_available(SVCPOOL * pool)1149a9148abdSDoug Rabson svc_request_space_available(SVCPOOL *pool)
1150a9148abdSDoug Rabson {
1151a9148abdSDoug Rabson
1152f8fb069dSAlexander Motin if (pool->sp_space_throttled)
1153f8fb069dSAlexander Motin return (FALSE);
1154f8fb069dSAlexander Motin return (TRUE);
1155a9148abdSDoug Rabson }
1156a9148abdSDoug Rabson
1157a9148abdSDoug Rabson static void
svc_run_internal(SVCGROUP * grp,bool_t ismaster)1158b563304cSAlexander Motin svc_run_internal(SVCGROUP *grp, bool_t ismaster)
1159a9148abdSDoug Rabson {
1160b563304cSAlexander Motin SVCPOOL *pool = grp->sg_pool;
1161a9148abdSDoug Rabson SVCTHREAD *st, *stpref;
1162a9148abdSDoug Rabson SVCXPRT *xprt;
1163a9148abdSDoug Rabson enum xprt_stat stat;
1164a9148abdSDoug Rabson struct svc_req *rqstp;
1165f87c8878SKonstantin Belousov struct proc *p;
11663c42b5bfSGarrett Wollman long sz;
1167a9148abdSDoug Rabson int error;
1168a9148abdSDoug Rabson
1169a9148abdSDoug Rabson st = mem_alloc(sizeof(*st));
1170b776fb2dSAlexander Motin mtx_init(&st->st_lock, "st_lock", NULL, MTX_DEF);
1171f8fb069dSAlexander Motin st->st_pool = pool;
1172a9148abdSDoug Rabson st->st_xprt = NULL;
1173a9148abdSDoug Rabson STAILQ_INIT(&st->st_reqs);
1174a9148abdSDoug Rabson cv_init(&st->st_cond, "rpcsvc");
1175a9148abdSDoug Rabson
1176b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
1177a9148abdSDoug Rabson
1178a9148abdSDoug Rabson /*
1179a9148abdSDoug Rabson * If we are a new thread which was spawned to cope with
1180a9148abdSDoug Rabson * increased load, set the state back to SVCPOOL_ACTIVE.
1181a9148abdSDoug Rabson */
1182b563304cSAlexander Motin if (grp->sg_state == SVCPOOL_THREADSTARTING)
1183b563304cSAlexander Motin grp->sg_state = SVCPOOL_ACTIVE;
1184a9148abdSDoug Rabson
1185b563304cSAlexander Motin while (grp->sg_state != SVCPOOL_CLOSING) {
1186a9148abdSDoug Rabson /*
1187db7cdfeeSAlexander Motin * Create new thread if requested.
1188db7cdfeeSAlexander Motin */
1189b563304cSAlexander Motin if (grp->sg_state == SVCPOOL_THREADWANTED) {
1190b563304cSAlexander Motin grp->sg_state = SVCPOOL_THREADSTARTING;
1191b563304cSAlexander Motin grp->sg_lastcreatetime = time_uptime;
1192b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
1193b563304cSAlexander Motin svc_new_thread(grp);
1194b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
1195db7cdfeeSAlexander Motin continue;
1196db7cdfeeSAlexander Motin }
1197db7cdfeeSAlexander Motin
1198db7cdfeeSAlexander Motin /*
1199a9148abdSDoug Rabson * Check for idle transports once per second.
1200a9148abdSDoug Rabson */
1201b563304cSAlexander Motin if (time_uptime > grp->sg_lastidlecheck) {
1202b563304cSAlexander Motin grp->sg_lastidlecheck = time_uptime;
1203b563304cSAlexander Motin svc_checkidle(grp);
1204a9148abdSDoug Rabson }
1205a9148abdSDoug Rabson
1206a9148abdSDoug Rabson xprt = st->st_xprt;
1207b776fb2dSAlexander Motin if (!xprt) {
1208a9148abdSDoug Rabson /*
1209a9148abdSDoug Rabson * Enforce maxthreads count.
1210a9148abdSDoug Rabson */
12111b09d9dfSRick Macklem if (!ismaster && grp->sg_threadcount >
12121b09d9dfSRick Macklem grp->sg_maxthreads)
1213a9148abdSDoug Rabson break;
1214a9148abdSDoug Rabson
1215a9148abdSDoug Rabson /*
1216a9148abdSDoug Rabson * Before sleeping, see if we can find an
1217a9148abdSDoug Rabson * active transport which isn't being serviced
1218a9148abdSDoug Rabson * by a thread.
1219a9148abdSDoug Rabson */
1220ba981145SAlexander Motin if (svc_request_space_available(pool) &&
1221b563304cSAlexander Motin (xprt = TAILQ_FIRST(&grp->sg_active)) != NULL) {
1222b563304cSAlexander Motin TAILQ_REMOVE(&grp->sg_active, xprt, xp_alink);
1223a9148abdSDoug Rabson SVC_ACQUIRE(xprt);
1224a9148abdSDoug Rabson xprt->xp_thread = st;
1225a9148abdSDoug Rabson st->st_xprt = xprt;
1226a9148abdSDoug Rabson continue;
1227ba981145SAlexander Motin }
1228a9148abdSDoug Rabson
1229b563304cSAlexander Motin LIST_INSERT_HEAD(&grp->sg_idlethreads, st, st_ilink);
1230db7cdfeeSAlexander Motin if (ismaster || (!ismaster &&
1231b563304cSAlexander Motin grp->sg_threadcount > grp->sg_minthreads))
1232db7cdfeeSAlexander Motin error = cv_timedwait_sig(&st->st_cond,
1233b563304cSAlexander Motin &grp->sg_lock, 5 * hz);
1234db7cdfeeSAlexander Motin else
1235db7cdfeeSAlexander Motin error = cv_wait_sig(&st->st_cond,
1236b563304cSAlexander Motin &grp->sg_lock);
1237b5d7fb73SAlexander Motin if (st->st_xprt == NULL)
1238a9148abdSDoug Rabson LIST_REMOVE(st, st_ilink);
1239a9148abdSDoug Rabson
1240a9148abdSDoug Rabson /*
1241a9148abdSDoug Rabson * Reduce worker thread count when idle.
1242a9148abdSDoug Rabson */
1243a9148abdSDoug Rabson if (error == EWOULDBLOCK) {
1244a9148abdSDoug Rabson if (!ismaster
1245b563304cSAlexander Motin && (grp->sg_threadcount
1246b563304cSAlexander Motin > grp->sg_minthreads)
1247b776fb2dSAlexander Motin && !st->st_xprt)
1248dfdcada3SDoug Rabson break;
1249f87c8878SKonstantin Belousov } else if (error != 0) {
1250f87c8878SKonstantin Belousov KASSERT(error == EINTR || error == ERESTART,
1251f87c8878SKonstantin Belousov ("non-signal error %d", error));
1252b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
1253f87c8878SKonstantin Belousov p = curproc;
1254f87c8878SKonstantin Belousov PROC_LOCK(p);
12556ddcc233SKonstantin Belousov if (P_SHOULDSTOP(p) ||
12566ddcc233SKonstantin Belousov (p->p_flag & P_TOTAL_STOP) != 0) {
1257f87c8878SKonstantin Belousov thread_suspend_check(0);
1258f87c8878SKonstantin Belousov PROC_UNLOCK(p);
1259f87c8878SKonstantin Belousov mtx_lock(&grp->sg_lock);
1260f87c8878SKonstantin Belousov } else {
1261f87c8878SKonstantin Belousov PROC_UNLOCK(p);
1262a9148abdSDoug Rabson svc_exit(pool);
1263b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
1264a9148abdSDoug Rabson break;
1265a9148abdSDoug Rabson }
1266f87c8878SKonstantin Belousov }
1267a9148abdSDoug Rabson continue;
1268a9148abdSDoug Rabson }
1269b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
1270a9148abdSDoug Rabson
1271a9148abdSDoug Rabson /*
1272b776fb2dSAlexander Motin * Drain the transport socket and queue up any RPCs.
1273a9148abdSDoug Rabson */
1274a9148abdSDoug Rabson xprt->xp_lastactive = time_uptime;
1275a9148abdSDoug Rabson do {
1276a9148abdSDoug Rabson if (!svc_request_space_available(pool))
1277a9148abdSDoug Rabson break;
1278a9148abdSDoug Rabson rqstp = NULL;
1279a9148abdSDoug Rabson stat = svc_getreq(xprt, &rqstp);
1280a9148abdSDoug Rabson if (rqstp) {
1281f8fb069dSAlexander Motin svc_change_space_used(pool, rqstp->rq_size);
1282a9148abdSDoug Rabson /*
1283b776fb2dSAlexander Motin * See if the application has a preference
1284b776fb2dSAlexander Motin * for some other thread.
1285a9148abdSDoug Rabson */
1286b776fb2dSAlexander Motin if (pool->sp_assign) {
1287b776fb2dSAlexander Motin stpref = pool->sp_assign(st, rqstp);
128882dcc80dSAlexander Motin rqstp->rq_thread = stpref;
1289a9148abdSDoug Rabson STAILQ_INSERT_TAIL(&stpref->st_reqs,
1290a9148abdSDoug Rabson rqstp, rq_link);
1291b776fb2dSAlexander Motin mtx_unlock(&stpref->st_lock);
1292b776fb2dSAlexander Motin if (stpref != st)
1293b776fb2dSAlexander Motin rqstp = NULL;
129482dcc80dSAlexander Motin } else {
129582dcc80dSAlexander Motin rqstp->rq_thread = st;
1296b776fb2dSAlexander Motin STAILQ_INSERT_TAIL(&st->st_reqs,
1297b776fb2dSAlexander Motin rqstp, rq_link);
1298b776fb2dSAlexander Motin }
129982dcc80dSAlexander Motin }
1300b776fb2dSAlexander Motin } while (rqstp == NULL && stat == XPRT_MOREREQS
1301b563304cSAlexander Motin && grp->sg_state != SVCPOOL_CLOSING);
1302a9148abdSDoug Rabson
1303a9148abdSDoug Rabson /*
1304b776fb2dSAlexander Motin * Move this transport to the end of the active list to
1305b776fb2dSAlexander Motin * ensure fairness when multiple transports are active.
1306b776fb2dSAlexander Motin * If this was the last queued request, svc_getreq will end
1307b776fb2dSAlexander Motin * up calling xprt_inactive to remove from the active list.
1308a9148abdSDoug Rabson */
1309b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
1310a9148abdSDoug Rabson xprt->xp_thread = NULL;
1311a9148abdSDoug Rabson st->st_xprt = NULL;
1312a9148abdSDoug Rabson if (xprt->xp_active) {
1313f8fb069dSAlexander Motin if (!svc_request_space_available(pool) ||
1314f8fb069dSAlexander Motin !xprt_assignthread(xprt))
1315b563304cSAlexander Motin TAILQ_INSERT_TAIL(&grp->sg_active,
1316ba981145SAlexander Motin xprt, xp_alink);
1317a9148abdSDoug Rabson }
1318b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
1319a9148abdSDoug Rabson SVC_RELEASE(xprt);
1320a9148abdSDoug Rabson
1321a9148abdSDoug Rabson /*
1322a9148abdSDoug Rabson * Execute what we have queued.
1323a9148abdSDoug Rabson */
1324b776fb2dSAlexander Motin mtx_lock(&st->st_lock);
1325b776fb2dSAlexander Motin while ((rqstp = STAILQ_FIRST(&st->st_reqs)) != NULL) {
1326b776fb2dSAlexander Motin STAILQ_REMOVE_HEAD(&st->st_reqs, rq_link);
1327b776fb2dSAlexander Motin mtx_unlock(&st->st_lock);
13283c42b5bfSGarrett Wollman sz = (long)rqstp->rq_size;
1329a9148abdSDoug Rabson svc_executereq(rqstp);
13303c42b5bfSGarrett Wollman svc_change_space_used(pool, -sz);
1331b776fb2dSAlexander Motin mtx_lock(&st->st_lock);
1332a9148abdSDoug Rabson }
1333b776fb2dSAlexander Motin mtx_unlock(&st->st_lock);
1334b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
1335a9148abdSDoug Rabson }
1336a9148abdSDoug Rabson
1337a9148abdSDoug Rabson if (st->st_xprt) {
1338a9148abdSDoug Rabson xprt = st->st_xprt;
1339a9148abdSDoug Rabson st->st_xprt = NULL;
1340a9148abdSDoug Rabson SVC_RELEASE(xprt);
1341a9148abdSDoug Rabson }
1342a9148abdSDoug Rabson KASSERT(STAILQ_EMPTY(&st->st_reqs), ("stray reqs on exit"));
1343b776fb2dSAlexander Motin mtx_destroy(&st->st_lock);
1344a9148abdSDoug Rabson cv_destroy(&st->st_cond);
1345a9148abdSDoug Rabson mem_free(st, sizeof(*st));
1346a9148abdSDoug Rabson
1347b563304cSAlexander Motin grp->sg_threadcount--;
1348a9148abdSDoug Rabson if (!ismaster)
1349b563304cSAlexander Motin wakeup(grp);
1350b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
1351a9148abdSDoug Rabson }
1352a9148abdSDoug Rabson
1353a9148abdSDoug Rabson static void
svc_thread_start(void * arg)1354a9148abdSDoug Rabson svc_thread_start(void *arg)
1355a9148abdSDoug Rabson {
1356a9148abdSDoug Rabson
1357b563304cSAlexander Motin svc_run_internal((SVCGROUP *) arg, FALSE);
1358a9148abdSDoug Rabson kthread_exit();
1359a9148abdSDoug Rabson }
1360a9148abdSDoug Rabson
1361a9148abdSDoug Rabson static void
svc_new_thread(SVCGROUP * grp)1362b563304cSAlexander Motin svc_new_thread(SVCGROUP *grp)
1363a9148abdSDoug Rabson {
1364b563304cSAlexander Motin SVCPOOL *pool = grp->sg_pool;
1365a9148abdSDoug Rabson struct thread *td;
1366a9148abdSDoug Rabson
1367ece9d8b7SAlexander Motin mtx_lock(&grp->sg_lock);
1368b563304cSAlexander Motin grp->sg_threadcount++;
1369ece9d8b7SAlexander Motin mtx_unlock(&grp->sg_lock);
1370b563304cSAlexander Motin kthread_add(svc_thread_start, grp, pool->sp_proc, &td, 0, 0,
1371a9148abdSDoug Rabson "%s: service", pool->sp_name);
1372dfdcada3SDoug Rabson }
1373dfdcada3SDoug Rabson
1374dfdcada3SDoug Rabson void
svc_run(SVCPOOL * pool)1375dfdcada3SDoug Rabson svc_run(SVCPOOL *pool)
1376dfdcada3SDoug Rabson {
1377b563304cSAlexander Motin int g, i;
1378a9148abdSDoug Rabson struct proc *p;
1379a9148abdSDoug Rabson struct thread *td;
1380b563304cSAlexander Motin SVCGROUP *grp;
1381dfdcada3SDoug Rabson
1382a9148abdSDoug Rabson p = curproc;
1383a9148abdSDoug Rabson td = curthread;
1384a9148abdSDoug Rabson snprintf(td->td_name, sizeof(td->td_name),
1385a9148abdSDoug Rabson "%s: master", pool->sp_name);
1386a9148abdSDoug Rabson pool->sp_state = SVCPOOL_ACTIVE;
1387a9148abdSDoug Rabson pool->sp_proc = p;
1388dfdcada3SDoug Rabson
1389b563304cSAlexander Motin /* Choose group count based on number of threads and CPUs. */
1390b563304cSAlexander Motin pool->sp_groupcount = max(1, min(SVC_MAXGROUPS,
1391b563304cSAlexander Motin min(pool->sp_maxthreads / 2, mp_ncpus) / 6));
1392b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) {
1393b563304cSAlexander Motin grp = &pool->sp_groups[g];
1394b563304cSAlexander Motin grp->sg_minthreads = max(1,
1395b563304cSAlexander Motin pool->sp_minthreads / pool->sp_groupcount);
1396b563304cSAlexander Motin grp->sg_maxthreads = max(1,
1397b563304cSAlexander Motin pool->sp_maxthreads / pool->sp_groupcount);
1398b563304cSAlexander Motin grp->sg_lastcreatetime = time_uptime;
1399dfdcada3SDoug Rabson }
1400dfdcada3SDoug Rabson
1401b563304cSAlexander Motin /* Starting threads */
1402ece9d8b7SAlexander Motin pool->sp_groups[0].sg_threadcount++;
1403b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) {
1404b563304cSAlexander Motin grp = &pool->sp_groups[g];
1405b563304cSAlexander Motin for (i = ((g == 0) ? 1 : 0); i < grp->sg_minthreads; i++)
1406b563304cSAlexander Motin svc_new_thread(grp);
1407b563304cSAlexander Motin }
1408b563304cSAlexander Motin svc_run_internal(&pool->sp_groups[0], TRUE);
1409dfdcada3SDoug Rabson
1410b563304cSAlexander Motin /* Waiting for threads to stop. */
1411b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) {
1412b563304cSAlexander Motin grp = &pool->sp_groups[g];
1413b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
1414b563304cSAlexander Motin while (grp->sg_threadcount > 0)
1415b563304cSAlexander Motin msleep(grp, &grp->sg_lock, 0, "svcexit", 0);
1416b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
1417b563304cSAlexander Motin }
1418dfdcada3SDoug Rabson }
1419dfdcada3SDoug Rabson
1420dfdcada3SDoug Rabson void
svc_exit(SVCPOOL * pool)1421dfdcada3SDoug Rabson svc_exit(SVCPOOL *pool)
1422dfdcada3SDoug Rabson {
1423b563304cSAlexander Motin SVCGROUP *grp;
1424a9148abdSDoug Rabson SVCTHREAD *st;
1425b563304cSAlexander Motin int g;
1426a9148abdSDoug Rabson
1427a9148abdSDoug Rabson pool->sp_state = SVCPOOL_CLOSING;
1428b563304cSAlexander Motin for (g = 0; g < pool->sp_groupcount; g++) {
1429b563304cSAlexander Motin grp = &pool->sp_groups[g];
1430b563304cSAlexander Motin mtx_lock(&grp->sg_lock);
1431b563304cSAlexander Motin if (grp->sg_state != SVCPOOL_CLOSING) {
1432b563304cSAlexander Motin grp->sg_state = SVCPOOL_CLOSING;
1433b563304cSAlexander Motin LIST_FOREACH(st, &grp->sg_idlethreads, st_ilink)
1434a9148abdSDoug Rabson cv_signal(&st->st_cond);
1435db7cdfeeSAlexander Motin }
1436b563304cSAlexander Motin mtx_unlock(&grp->sg_lock);
1437b563304cSAlexander Motin }
1438dfdcada3SDoug Rabson }
1439a9148abdSDoug Rabson
1440a9148abdSDoug Rabson bool_t
svc_getargs(struct svc_req * rqstp,xdrproc_t xargs,void * args)1441a9148abdSDoug Rabson svc_getargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1442a9148abdSDoug Rabson {
1443a9148abdSDoug Rabson struct mbuf *m;
1444a9148abdSDoug Rabson XDR xdrs;
1445a9148abdSDoug Rabson bool_t stat;
1446a9148abdSDoug Rabson
1447a9148abdSDoug Rabson m = rqstp->rq_args;
1448a9148abdSDoug Rabson rqstp->rq_args = NULL;
1449a9148abdSDoug Rabson
1450a9148abdSDoug Rabson xdrmbuf_create(&xdrs, m, XDR_DECODE);
1451a9148abdSDoug Rabson stat = xargs(&xdrs, args);
1452a9148abdSDoug Rabson XDR_DESTROY(&xdrs);
1453a9148abdSDoug Rabson
1454a9148abdSDoug Rabson return (stat);
1455a9148abdSDoug Rabson }
1456a9148abdSDoug Rabson
1457a9148abdSDoug Rabson bool_t
svc_freeargs(struct svc_req * rqstp,xdrproc_t xargs,void * args)1458a9148abdSDoug Rabson svc_freeargs(struct svc_req *rqstp, xdrproc_t xargs, void *args)
1459a9148abdSDoug Rabson {
1460a9148abdSDoug Rabson XDR xdrs;
1461a9148abdSDoug Rabson
1462a9148abdSDoug Rabson if (rqstp->rq_addr) {
1463a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME);
1464a9148abdSDoug Rabson rqstp->rq_addr = NULL;
1465a9148abdSDoug Rabson }
1466a9148abdSDoug Rabson
1467a9148abdSDoug Rabson xdrs.x_op = XDR_FREE;
1468a9148abdSDoug Rabson return (xargs(&xdrs, args));
1469a9148abdSDoug Rabson }
1470a9148abdSDoug Rabson
1471a9148abdSDoug Rabson void
svc_freereq(struct svc_req * rqstp)1472a9148abdSDoug Rabson svc_freereq(struct svc_req *rqstp)
1473a9148abdSDoug Rabson {
1474a9148abdSDoug Rabson SVCTHREAD *st;
1475a9148abdSDoug Rabson SVCPOOL *pool;
1476a9148abdSDoug Rabson
1477a9148abdSDoug Rabson st = rqstp->rq_thread;
1478a9148abdSDoug Rabson if (st) {
1479f8fb069dSAlexander Motin pool = st->st_pool;
1480a9148abdSDoug Rabson if (pool->sp_done)
1481a9148abdSDoug Rabson pool->sp_done(st, rqstp);
1482a9148abdSDoug Rabson }
1483a9148abdSDoug Rabson
1484a9148abdSDoug Rabson if (rqstp->rq_auth.svc_ah_ops)
1485a9148abdSDoug Rabson SVCAUTH_RELEASE(&rqstp->rq_auth);
1486a9148abdSDoug Rabson
1487a9148abdSDoug Rabson if (rqstp->rq_xprt) {
1488a9148abdSDoug Rabson SVC_RELEASE(rqstp->rq_xprt);
1489a9148abdSDoug Rabson }
1490a9148abdSDoug Rabson
1491a9148abdSDoug Rabson if (rqstp->rq_addr)
1492a9148abdSDoug Rabson free(rqstp->rq_addr, M_SONAME);
1493a9148abdSDoug Rabson
1494a9148abdSDoug Rabson if (rqstp->rq_args)
1495a9148abdSDoug Rabson m_freem(rqstp->rq_args);
1496a9148abdSDoug Rabson
1497a9148abdSDoug Rabson free(rqstp, M_RPC);
1498a9148abdSDoug Rabson }
1499