xref: /freebsd/sys/fs/nfsserver/nfs_fha_new.c (revision 734e82fe33aa764367791a7d603b383996c6b40b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
5  * Copyright (c) 2013 Spectra Logic Corporation
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/types.h>
31 #include <sys/mbuf.h>
32 #include <sys/sbuf.h>
33 
34 #include <fs/nfs/nfsport.h>
35 #include <fs/nfsserver/nfs_fha_new.h>
36 
37 #include <rpc/rpc.h>
38 
39 static MALLOC_DEFINE(M_NFS_FHA, "NFS FHA", "NFS FHA");
40 
41 static void		fhanew_init(void *foo);
42 static void		fhanew_uninit(void *foo);
43 static rpcproc_t	fhanew_get_procnum(rpcproc_t procnum);
44 static int		fhanew_get_fh(uint64_t *fh, int v3, struct mbuf **md,
45 			    caddr_t *dpos);
46 static int		fhanew_is_read(rpcproc_t procnum);
47 static int		fhanew_is_write(rpcproc_t procnum);
48 static int		fhanew_get_offset(struct mbuf **md, caddr_t *dpos,
49 			    int v3, struct fha_info *info);
50 static int		fhanew_no_offset(rpcproc_t procnum);
51 static void		fhanew_set_locktype(rpcproc_t procnum,
52 			    struct fha_info *info);
53 static int		fhenew_stats_sysctl(SYSCTL_HANDLER_ARGS);
54 static void		fha_extract_info(struct svc_req *req,
55 			    struct fha_info *i);
56 
57 NFSD_VNET_DEFINE_STATIC(struct fha_params *, fhanew_softc);
58 NFSD_VNET_DEFINE_STATIC(struct fha_ctls, nfsfha_ctls);
59 
60 SYSCTL_DECL(_vfs_nfsd);
61 SYSCTL_NODE(_vfs_nfsd, OID_AUTO, fha, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
62     "NFS File Handle Affinity (FHA)");
63 
64 SYSCTL_UINT(_vfs_nfsd_fha,
65     OID_AUTO, enable, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
66     &NFSD_VNET_NAME(nfsfha_ctls).enable, 0,
67     "Enable NFS File Handle Affinity (FHA)");
68 
69 SYSCTL_UINT(_vfs_nfsd_fha,
70     OID_AUTO, read, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
71     &NFSD_VNET_NAME(nfsfha_ctls).read, 0,
72     "Enable NFS FHA read locality");
73 
74 SYSCTL_UINT(_vfs_nfsd_fha,
75     OID_AUTO, write, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
76     &NFSD_VNET_NAME(nfsfha_ctls).write, 0,
77     "Enable NFS FHA write locality");
78 
79 SYSCTL_UINT(_vfs_nfsd_fha,
80     OID_AUTO, bin_shift, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
81     &NFSD_VNET_NAME(nfsfha_ctls).bin_shift, 0,
82     "Maximum locality distance 2^(bin_shift) bytes");
83 
84 SYSCTL_UINT(_vfs_nfsd_fha,
85     OID_AUTO, max_nfsds_per_fh, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
86     &NFSD_VNET_NAME(nfsfha_ctls).max_nfsds_per_fh, 0,
87     "Maximum nfsd threads that "
88     "should be working on requests for the same file handle");
89 
90 SYSCTL_UINT(_vfs_nfsd_fha,
91     OID_AUTO, max_reqs_per_nfsd, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
92     &NFSD_VNET_NAME(nfsfha_ctls).max_reqs_per_nfsd, 0, "Maximum requests that "
93     "single nfsd thread should be working on at any time");
94 
95 SYSCTL_PROC(_vfs_nfsd_fha, OID_AUTO, fhe_stats,
96     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
97     fhenew_stats_sysctl, "A", "");
98 
99 extern int newnfs_nfsv3_procid[];
100 
101 VNET_SYSINIT(nfs_fhanew, SI_SUB_VNET_DONE, SI_ORDER_ANY, fhanew_init, NULL);
102 VNET_SYSUNINIT(nfs_fhanew, SI_SUB_VNET_DONE, SI_ORDER_ANY, fhanew_uninit, NULL);
103 
104 static void
105 fhanew_init(void *foo)
106 {
107 	struct fha_params *softc;
108 	int i;
109 
110 	NFSD_VNET(fhanew_softc) = malloc(sizeof(struct fha_params), M_TEMP,
111 	    M_WAITOK | M_ZERO);
112 	softc = NFSD_VNET(fhanew_softc);
113 
114 	snprintf(softc->server_name, sizeof(softc->server_name),
115 	    FHANEW_SERVER_NAME);
116 
117 	for (i = 0; i < FHA_HASH_SIZE; i++)
118 		mtx_init(&softc->fha_hash[i].mtx, "fhalock", NULL, MTX_DEF);
119 
120 	/*
121 	 * Set the default tuning parameters.
122 	 */
123 	NFSD_VNET(nfsfha_ctls).enable = FHA_DEF_ENABLE;
124 	NFSD_VNET(nfsfha_ctls).read = FHA_DEF_READ;
125 	NFSD_VNET(nfsfha_ctls).write = FHA_DEF_WRITE;
126 	NFSD_VNET(nfsfha_ctls).bin_shift = FHA_DEF_BIN_SHIFT;
127 	NFSD_VNET(nfsfha_ctls).max_nfsds_per_fh = FHA_DEF_MAX_NFSDS_PER_FH;
128 	NFSD_VNET(nfsfha_ctls).max_reqs_per_nfsd = FHA_DEF_MAX_REQS_PER_NFSD;
129 
130 }
131 
132 static void
133 fhanew_uninit(void *foo)
134 {
135 	struct fha_params *softc;
136 	int i;
137 
138 	softc = NFSD_VNET(fhanew_softc);
139 
140 	for (i = 0; i < FHA_HASH_SIZE; i++)
141 		mtx_destroy(&softc->fha_hash[i].mtx);
142 	free(softc, M_TEMP);
143 }
144 
145 static rpcproc_t
146 fhanew_get_procnum(rpcproc_t procnum)
147 {
148 	if (procnum > NFSV2PROC_STATFS)
149 		return (-1);
150 
151 	return (newnfs_nfsv3_procid[procnum]);
152 }
153 
154 static int
155 fhanew_get_fh(uint64_t *fh, int v3, struct mbuf **md, caddr_t *dpos)
156 {
157 	struct nfsrv_descript lnd, *nd;
158 	uint32_t *tl;
159 	uint8_t *buf;
160 	uint64_t t;
161 	int error, len, i;
162 
163 	error = 0;
164 	len = 0;
165 	nd = &lnd;
166 
167 	nd->nd_md = *md;
168 	nd->nd_dpos = *dpos;
169 
170 	if (v3) {
171 		NFSM_DISSECT_NONBLOCK(tl, uint32_t *, NFSX_UNSIGNED);
172 		if ((len = fxdr_unsigned(int, *tl)) <= 0 || len > NFSX_FHMAX) {
173 			error = EBADRPC;
174 			goto nfsmout;
175 		}
176 	} else {
177 		len = NFSX_V2FH;
178 	}
179 
180 	t = 0;
181 	if (len != 0) {
182 		NFSM_DISSECT_NONBLOCK(buf, uint8_t *, len);
183 		for (i = 0; i < len; i++)
184 			t ^= ((uint64_t)buf[i] << (i & 7) * 8);
185 	}
186 	*fh = t;
187 
188 nfsmout:
189 	*md = nd->nd_md;
190 	*dpos = nd->nd_dpos;
191 
192 	return (error);
193 }
194 
195 static int
196 fhanew_is_read(rpcproc_t procnum)
197 {
198 	if (procnum == NFSPROC_READ)
199 		return (1);
200 	else
201 		return (0);
202 }
203 
204 static int
205 fhanew_is_write(rpcproc_t procnum)
206 {
207 	if (procnum == NFSPROC_WRITE)
208 		return (1);
209 	else
210 		return (0);
211 }
212 
213 static int
214 fhanew_get_offset(struct mbuf **md, caddr_t *dpos, int v3,
215     struct fha_info *info)
216 {
217 	struct nfsrv_descript lnd, *nd;
218 	uint32_t *tl;
219 	int error;
220 
221 	error = 0;
222 
223 	nd = &lnd;
224 	nd->nd_md = *md;
225 	nd->nd_dpos = *dpos;
226 
227 	if (v3) {
228 		NFSM_DISSECT_NONBLOCK(tl, uint32_t *, 2 * NFSX_UNSIGNED);
229 		info->offset = fxdr_hyper(tl);
230 	} else {
231 		NFSM_DISSECT_NONBLOCK(tl, uint32_t *, NFSX_UNSIGNED);
232 		info->offset = fxdr_unsigned(uint32_t, *tl);
233 	}
234 
235 nfsmout:
236 	*md = nd->nd_md;
237 	*dpos = nd->nd_dpos;
238 
239 	return (error);
240 }
241 
242 static int
243 fhanew_no_offset(rpcproc_t procnum)
244 {
245 	if (procnum == NFSPROC_FSSTAT ||
246 	    procnum == NFSPROC_FSINFO ||
247 	    procnum == NFSPROC_PATHCONF ||
248 	    procnum == NFSPROC_NOOP ||
249 	    procnum == NFSPROC_NULL)
250 		return (1);
251 	else
252 		return (0);
253 }
254 
255 static void
256 fhanew_set_locktype(rpcproc_t procnum, struct fha_info *info)
257 {
258 	switch (procnum) {
259 	case NFSPROC_NULL:
260 	case NFSPROC_GETATTR:
261 	case NFSPROC_LOOKUP:
262 	case NFSPROC_ACCESS:
263 	case NFSPROC_READLINK:
264 	case NFSPROC_READ:
265 	case NFSPROC_READDIR:
266 	case NFSPROC_READDIRPLUS:
267 	case NFSPROC_WRITE:
268 		info->locktype = LK_SHARED;
269 		break;
270 	case NFSPROC_SETATTR:
271 	case NFSPROC_CREATE:
272 	case NFSPROC_MKDIR:
273 	case NFSPROC_SYMLINK:
274 	case NFSPROC_MKNOD:
275 	case NFSPROC_REMOVE:
276 	case NFSPROC_RMDIR:
277 	case NFSPROC_RENAME:
278 	case NFSPROC_LINK:
279 	case NFSPROC_FSSTAT:
280 	case NFSPROC_FSINFO:
281 	case NFSPROC_PATHCONF:
282 	case NFSPROC_COMMIT:
283 	case NFSPROC_NOOP:
284 		info->locktype = LK_EXCLUSIVE;
285 		break;
286 	}
287 }
288 
289 /*
290  * This just specifies that offsets should obey affinity when within
291  * the same 1Mbyte (1<<20) chunk for the file (reads only for now).
292  */
293 static void
294 fha_extract_info(struct svc_req *req, struct fha_info *i)
295 {
296 	struct mbuf *md;
297 	caddr_t dpos;
298 	static u_int64_t random_fh = 0;
299 	int error;
300 	int v3 = (req->rq_vers == 3);
301 	rpcproc_t procnum;
302 
303 	/*
304 	 * We start off with a random fh.  If we get a reasonable
305 	 * procnum, we set the fh.  If there's a concept of offset
306 	 * that we're interested in, we set that.
307 	 */
308 	i->fh = ++random_fh;
309 	i->offset = 0;
310 	i->locktype = LK_EXCLUSIVE;
311 	i->read = i->write = 0;
312 
313 	/*
314 	 * Extract the procnum and convert to v3 form if necessary,
315 	 * taking care to deal with out-of-range procnums.  Caller will
316 	 * ensure that rq_vers is either 2 or 3.
317 	 */
318 	procnum = req->rq_proc;
319 	if (!v3) {
320 		rpcproc_t tmp_procnum;
321 
322 		tmp_procnum = fhanew_get_procnum(procnum);
323 		if (tmp_procnum == -1)
324 			goto out;
325 		procnum = tmp_procnum;
326 	}
327 
328 	/*
329 	 * We do affinity for most.  However, we divide a realm of affinity
330 	 * by file offset so as to allow for concurrent random access.  We
331 	 * only do this for reads today, but this may change when IFS supports
332 	 * efficient concurrent writes.
333 	 */
334 	if (fhanew_no_offset(procnum))
335 		goto out;
336 
337 	i->read = fhanew_is_read(procnum);
338 	i->write = fhanew_is_write(procnum);
339 
340 	error = newnfs_realign(&req->rq_args, M_NOWAIT);
341 	if (error)
342 		goto out;
343 	md = req->rq_args;
344 	dpos = mtod(md, caddr_t);
345 
346 	/* Grab the filehandle. */
347 	error = fhanew_get_fh(&i->fh, v3, &md, &dpos);
348 	if (error)
349 		goto out;
350 
351 	/* Content ourselves with zero offset for all but reads. */
352 	if (i->read || i->write)
353 		fhanew_get_offset(&md, &dpos, v3, i);
354 
355 out:
356 	fhanew_set_locktype(procnum, i);
357 }
358 
359 static struct fha_hash_entry *
360 fha_hash_entry_new(u_int64_t fh)
361 {
362 	struct fha_hash_entry *e;
363 
364 	e = malloc(sizeof(*e), M_NFS_FHA, M_WAITOK);
365 	e->fh = fh;
366 	e->num_rw = 0;
367 	e->num_exclusive = 0;
368 	e->num_threads = 0;
369 	LIST_INIT(&e->threads);
370 
371 	return (e);
372 }
373 
374 static void
375 fha_hash_entry_destroy(struct fha_hash_entry *e)
376 {
377 
378 	mtx_assert(e->mtx, MA_OWNED);
379 	KASSERT(e->num_rw == 0,
380 	    ("%d reqs on destroyed fhe %p", e->num_rw, e));
381 	KASSERT(e->num_exclusive == 0,
382 	    ("%d exclusive reqs on destroyed fhe %p", e->num_exclusive, e));
383 	KASSERT(e->num_threads == 0,
384 	    ("%d threads on destroyed fhe %p", e->num_threads, e));
385 	free(e, M_NFS_FHA);
386 }
387 
388 static void
389 fha_hash_entry_remove(struct fha_hash_entry *e)
390 {
391 
392 	mtx_assert(e->mtx, MA_OWNED);
393 	LIST_REMOVE(e, link);
394 	fha_hash_entry_destroy(e);
395 }
396 
397 static struct fha_hash_entry *
398 fha_hash_entry_lookup(struct fha_params *softc, u_int64_t fh)
399 {
400 	struct fha_hash_slot *fhs;
401 	struct fha_hash_entry *fhe, *new_fhe;
402 
403 	fhs = &softc->fha_hash[fh % FHA_HASH_SIZE];
404 	new_fhe = fha_hash_entry_new(fh);
405 	new_fhe->mtx = &fhs->mtx;
406 	mtx_lock(&fhs->mtx);
407 	LIST_FOREACH(fhe, &fhs->list, link)
408 		if (fhe->fh == fh)
409 			break;
410 	if (!fhe) {
411 		fhe = new_fhe;
412 		LIST_INSERT_HEAD(&fhs->list, fhe, link);
413 	} else
414 		fha_hash_entry_destroy(new_fhe);
415 	return (fhe);
416 }
417 
418 static void
419 fha_hash_entry_add_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread)
420 {
421 
422 	mtx_assert(fhe->mtx, MA_OWNED);
423 	thread->st_p2 = 0;
424 	LIST_INSERT_HEAD(&fhe->threads, thread, st_alink);
425 	fhe->num_threads++;
426 }
427 
428 static void
429 fha_hash_entry_remove_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread)
430 {
431 
432 	mtx_assert(fhe->mtx, MA_OWNED);
433 	KASSERT(thread->st_p2 == 0,
434 	    ("%d reqs on removed thread %p", thread->st_p2, thread));
435 	LIST_REMOVE(thread, st_alink);
436 	fhe->num_threads--;
437 }
438 
439 /*
440  * Account for an ongoing operation associated with this file.
441  */
442 static void
443 fha_hash_entry_add_op(struct fha_hash_entry *fhe, int locktype, int count)
444 {
445 
446 	mtx_assert(fhe->mtx, MA_OWNED);
447 	if (LK_EXCLUSIVE == locktype)
448 		fhe->num_exclusive += count;
449 	else
450 		fhe->num_rw += count;
451 }
452 
453 /*
454  * Get the service thread currently associated with the fhe that is
455  * appropriate to handle this operation.
456  */
457 static SVCTHREAD *
458 fha_hash_entry_choose_thread(struct fha_params *softc,
459     struct fha_hash_entry *fhe, struct fha_info *i, SVCTHREAD *this_thread)
460 {
461 	SVCTHREAD *thread, *min_thread = NULL;
462 	int req_count, min_count = 0;
463 	off_t offset1, offset2;
464 
465 	LIST_FOREACH(thread, &fhe->threads, st_alink) {
466 		req_count = thread->st_p2;
467 
468 		/* If there are any writes in progress, use the first thread. */
469 		if (fhe->num_exclusive) {
470 #if 0
471 			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
472 			    "fha: %p(%d)w", thread, req_count);
473 #endif
474 			return (thread);
475 		}
476 
477 		/* Check whether we should consider locality. */
478 		if ((i->read && !NFSD_VNET(nfsfha_ctls).read) ||
479 		    (i->write && !NFSD_VNET(nfsfha_ctls).write))
480 			goto noloc;
481 
482 		/*
483 		 * Check for locality, making sure that we won't
484 		 * exceed our per-thread load limit in the process.
485 		 */
486 		offset1 = i->offset;
487 		offset2 = thread->st_p3;
488 
489 		if (((offset1 >= offset2)
490 		  && ((offset1 - offset2) < (1 << NFSD_VNET(nfsfha_ctls).bin_shift)))
491 		 || ((offset2 > offset1)
492 		  && ((offset2 - offset1) < (1 << NFSD_VNET(nfsfha_ctls).bin_shift)))) {
493 			if ((NFSD_VNET(nfsfha_ctls).max_reqs_per_nfsd == 0) ||
494 			    (req_count < NFSD_VNET(nfsfha_ctls).max_reqs_per_nfsd)) {
495 #if 0
496 				ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
497 				    "fha: %p(%d)r", thread, req_count);
498 #endif
499 				return (thread);
500 			}
501 		}
502 
503 noloc:
504 		/*
505 		 * We don't have a locality match, so skip this thread,
506 		 * but keep track of the most attractive thread in case
507 		 * we need to come back to it later.
508 		 */
509 #if 0
510 		ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
511 		    "fha: %p(%d)s off1 %llu off2 %llu", thread,
512 		    req_count, offset1, offset2);
513 #endif
514 		if ((min_thread == NULL) || (req_count < min_count)) {
515 			min_count = req_count;
516 			min_thread = thread;
517 		}
518 	}
519 
520 	/*
521 	 * We didn't find a good match yet.  See if we can add
522 	 * a new thread to this file handle entry's thread list.
523 	 */
524 	if ((NFSD_VNET(nfsfha_ctls).max_nfsds_per_fh == 0) ||
525 	    (fhe->num_threads < NFSD_VNET(nfsfha_ctls).max_nfsds_per_fh)) {
526 		thread = this_thread;
527 #if 0
528 		ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
529 		    "fha: %p(%d)t", thread, thread->st_p2);
530 #endif
531 		fha_hash_entry_add_thread(fhe, thread);
532 	} else {
533 		/*
534 		 * We don't want to use any more threads for this file, so
535 		 * go back to the most attractive nfsd we're already using.
536 		 */
537 		thread = min_thread;
538 	}
539 
540 	return (thread);
541 }
542 
543 /*
544  * After getting a request, try to assign it to some thread.  Usually we
545  * handle it ourselves.
546  */
547 SVCTHREAD *
548 fhanew_assign(SVCTHREAD *this_thread, struct svc_req *req)
549 {
550 	struct fha_params *softc;
551 	SVCTHREAD *thread;
552 	struct fha_info i;
553 	struct fha_hash_entry *fhe;
554 
555 	NFSD_CURVNET_SET(NFSD_TD_TO_VNET(curthread));
556 	softc = NFSD_VNET(fhanew_softc);
557 	/* Check to see whether we're enabled. */
558 	if (NFSD_VNET(nfsfha_ctls).enable == 0)
559 		goto thist;
560 
561 	/*
562 	 * Only do placement if this is an NFS request.
563 	 */
564 	if (req->rq_prog != NFS_PROG)
565 		goto thist;
566 
567 	if (req->rq_vers != 2 && req->rq_vers != 3)
568 		goto thist;
569 
570 	fha_extract_info(req, &i);
571 
572 	/*
573 	 * We save the offset associated with this request for later
574 	 * nfsd matching.
575 	 */
576 	fhe = fha_hash_entry_lookup(softc, i.fh);
577 	req->rq_p1 = fhe;
578 	req->rq_p2 = i.locktype;
579 	req->rq_p3 = i.offset;
580 
581 	/*
582 	 * Choose a thread, taking into consideration locality, thread load,
583 	 * and the number of threads already working on this file.
584 	 */
585 	thread = fha_hash_entry_choose_thread(softc, fhe, &i, this_thread);
586 	KASSERT(thread, ("fha_assign: NULL thread!"));
587 	fha_hash_entry_add_op(fhe, i.locktype, 1);
588 	thread->st_p2++;
589 	thread->st_p3 = i.offset;
590 
591 	/*
592 	 * Grab the pool lock here to not let chosen thread go away before
593 	 * the new request inserted to its queue while we drop fhe lock.
594 	 */
595 	mtx_lock(&thread->st_lock);
596 	mtx_unlock(fhe->mtx);
597 
598 	NFSD_CURVNET_RESTORE();
599 	return (thread);
600 thist:
601 	req->rq_p1 = NULL;
602 	NFSD_CURVNET_RESTORE();
603 	mtx_lock(&this_thread->st_lock);
604 	return (this_thread);
605 }
606 
607 /*
608  * Called when we're done with an operation.  The request has already
609  * been de-queued.
610  */
611 void
612 fhanew_nd_complete(SVCTHREAD *thread, struct svc_req *req)
613 {
614 	struct fha_hash_entry *fhe = req->rq_p1;
615 	struct mtx *mtx;
616 
617 	NFSD_CURVNET_SET(NFSD_TD_TO_VNET(curthread));
618 	/*
619 	 * This may be called for reqs that didn't go through
620 	 * fha_assign (e.g. extra NULL ops used for RPCSEC_GSS.
621 	 */
622 	if (!fhe) {
623 		NFSD_CURVNET_RESTORE();
624 		return;
625 	}
626 
627 	mtx = fhe->mtx;
628 	mtx_lock(mtx);
629 	fha_hash_entry_add_op(fhe, req->rq_p2, -1);
630 	thread->st_p2--;
631 	KASSERT(thread->st_p2 >= 0, ("Negative request count %d on %p",
632 	    thread->st_p2, thread));
633 	if (thread->st_p2 == 0) {
634 		fha_hash_entry_remove_thread(fhe, thread);
635 		if (0 == fhe->num_rw + fhe->num_exclusive)
636 			fha_hash_entry_remove(fhe);
637 	}
638 	mtx_unlock(mtx);
639 	NFSD_CURVNET_RESTORE();
640 }
641 
642 static int
643 fhenew_stats_sysctl(SYSCTL_HANDLER_ARGS)
644 {
645 	struct fha_params *softc;
646 	int error, i;
647 	struct sbuf sb;
648 	struct fha_hash_entry *fhe;
649 	bool_t first, hfirst;
650 	SVCTHREAD *thread;
651 
652 	sbuf_new(&sb, NULL, 65536, SBUF_FIXEDLEN);
653 
654 	NFSD_CURVNET_SET(NFSD_TD_TO_VNET(curthread));
655 	softc = NFSD_VNET(fhanew_softc);
656 	for (i = 0; i < FHA_HASH_SIZE; i++)
657 		if (!LIST_EMPTY(&softc->fha_hash[i].list))
658 			break;
659 
660 	if (i == FHA_HASH_SIZE) {
661 		sbuf_printf(&sb, "No file handle entries.\n");
662 		goto out;
663 	}
664 
665 	hfirst = TRUE;
666 	for (; i < FHA_HASH_SIZE; i++) {
667 		mtx_lock(&softc->fha_hash[i].mtx);
668 		if (LIST_EMPTY(&softc->fha_hash[i].list)) {
669 			mtx_unlock(&softc->fha_hash[i].mtx);
670 			continue;
671 		}
672 		sbuf_printf(&sb, "%shash %d: {\n", hfirst ? "" : ", ", i);
673 		first = TRUE;
674 		LIST_FOREACH(fhe, &softc->fha_hash[i].list, link) {
675 			sbuf_printf(&sb, "%sfhe %p: {\n", first ? "  " : ", ",
676 			    fhe);
677 			sbuf_printf(&sb, "    fh: %ju\n", (uintmax_t) fhe->fh);
678 			sbuf_printf(&sb, "    num_rw/exclusive: %d/%d\n",
679 			    fhe->num_rw, fhe->num_exclusive);
680 			sbuf_printf(&sb, "    num_threads: %d\n",
681 			    fhe->num_threads);
682 
683 			LIST_FOREACH(thread, &fhe->threads, st_alink) {
684 				sbuf_printf(&sb, "      thread %p offset %ju "
685 				    "reqs %d\n", thread,
686 				    thread->st_p3, thread->st_p2);
687 			}
688 
689 			sbuf_printf(&sb, "  }");
690 			first = FALSE;
691 		}
692 		sbuf_printf(&sb, "\n}");
693 		mtx_unlock(&softc->fha_hash[i].mtx);
694 		hfirst = FALSE;
695 	}
696 
697  out:
698 	NFSD_CURVNET_RESTORE();
699 	sbuf_trim(&sb);
700 	sbuf_finish(&sb);
701 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
702 	sbuf_delete(&sb);
703 	return (error);
704 }
705