xref: /freebsd/sys/fs/nfsserver/nfs_fha_new.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
5  * Copyright (c) 2013 Spectra Logic Corporation
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/types.h>
30 #include <sys/mbuf.h>
31 #include <sys/sbuf.h>
32 
33 #include <fs/nfs/nfsport.h>
34 #include <fs/nfsserver/nfs_fha_new.h>
35 
36 #include <rpc/rpc.h>
37 
38 static MALLOC_DEFINE(M_NFS_FHA, "NFS FHA", "NFS FHA");
39 
40 static void		fhanew_init(void *foo);
41 static void		fhanew_uninit(void *foo);
42 static rpcproc_t	fhanew_get_procnum(rpcproc_t procnum);
43 static int		fhanew_get_fh(uint64_t *fh, int v3, struct mbuf **md,
44 			    caddr_t *dpos);
45 static int		fhanew_is_read(rpcproc_t procnum);
46 static int		fhanew_is_write(rpcproc_t procnum);
47 static int		fhanew_get_offset(struct mbuf **md, caddr_t *dpos,
48 			    int v3, struct fha_info *info);
49 static int		fhanew_no_offset(rpcproc_t procnum);
50 static void		fhanew_set_locktype(rpcproc_t procnum,
51 			    struct fha_info *info);
52 static int		fhenew_stats_sysctl(SYSCTL_HANDLER_ARGS);
53 static void		fha_extract_info(struct svc_req *req,
54 			    struct fha_info *i);
55 
56 NFSD_VNET_DEFINE_STATIC(struct fha_params *, fhanew_softc);
57 NFSD_VNET_DEFINE_STATIC(struct fha_ctls, nfsfha_ctls);
58 
59 SYSCTL_DECL(_vfs_nfsd);
60 SYSCTL_NODE(_vfs_nfsd, OID_AUTO, fha, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
61     "NFS File Handle Affinity (FHA)");
62 
63 SYSCTL_UINT(_vfs_nfsd_fha,
64     OID_AUTO, enable, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
65     &NFSD_VNET_NAME(nfsfha_ctls).enable, 0,
66     "Enable NFS File Handle Affinity (FHA)");
67 
68 SYSCTL_UINT(_vfs_nfsd_fha,
69     OID_AUTO, read, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
70     &NFSD_VNET_NAME(nfsfha_ctls).read, 0,
71     "Enable NFS FHA read locality");
72 
73 SYSCTL_UINT(_vfs_nfsd_fha,
74     OID_AUTO, write, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
75     &NFSD_VNET_NAME(nfsfha_ctls).write, 0,
76     "Enable NFS FHA write locality");
77 
78 SYSCTL_UINT(_vfs_nfsd_fha,
79     OID_AUTO, bin_shift, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
80     &NFSD_VNET_NAME(nfsfha_ctls).bin_shift, 0,
81     "Maximum locality distance 2^(bin_shift) bytes");
82 
83 SYSCTL_UINT(_vfs_nfsd_fha,
84     OID_AUTO, max_nfsds_per_fh, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
85     &NFSD_VNET_NAME(nfsfha_ctls).max_nfsds_per_fh, 0,
86     "Maximum nfsd threads that "
87     "should be working on requests for the same file handle");
88 
89 SYSCTL_UINT(_vfs_nfsd_fha,
90     OID_AUTO, max_reqs_per_nfsd, CTLFLAG_NFSD_VNET | CTLFLAG_RWTUN,
91     &NFSD_VNET_NAME(nfsfha_ctls).max_reqs_per_nfsd, 0, "Maximum requests that "
92     "single nfsd thread should be working on at any time");
93 
94 SYSCTL_PROC(_vfs_nfsd_fha, OID_AUTO, fhe_stats,
95     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
96     fhenew_stats_sysctl, "A", "");
97 
98 extern int newnfs_nfsv3_procid[];
99 
100 VNET_SYSINIT(nfs_fhanew, SI_SUB_VNET_DONE, SI_ORDER_ANY, fhanew_init, NULL);
101 VNET_SYSUNINIT(nfs_fhanew, SI_SUB_VNET_DONE, SI_ORDER_ANY, fhanew_uninit, NULL);
102 
103 static void
104 fhanew_init(void *foo)
105 {
106 	struct fha_params *softc;
107 	int i;
108 
109 	NFSD_VNET(fhanew_softc) = malloc(sizeof(struct fha_params), M_TEMP,
110 	    M_WAITOK | M_ZERO);
111 	softc = NFSD_VNET(fhanew_softc);
112 
113 	snprintf(softc->server_name, sizeof(softc->server_name),
114 	    FHANEW_SERVER_NAME);
115 
116 	for (i = 0; i < FHA_HASH_SIZE; i++)
117 		mtx_init(&softc->fha_hash[i].mtx, "fhalock", NULL, MTX_DEF);
118 
119 	/*
120 	 * Set the default tuning parameters.
121 	 */
122 	NFSD_VNET(nfsfha_ctls).enable = FHA_DEF_ENABLE;
123 	NFSD_VNET(nfsfha_ctls).read = FHA_DEF_READ;
124 	NFSD_VNET(nfsfha_ctls).write = FHA_DEF_WRITE;
125 	NFSD_VNET(nfsfha_ctls).bin_shift = FHA_DEF_BIN_SHIFT;
126 	NFSD_VNET(nfsfha_ctls).max_nfsds_per_fh = FHA_DEF_MAX_NFSDS_PER_FH;
127 	NFSD_VNET(nfsfha_ctls).max_reqs_per_nfsd = FHA_DEF_MAX_REQS_PER_NFSD;
128 
129 }
130 
131 static void
132 fhanew_uninit(void *foo)
133 {
134 	struct fha_params *softc;
135 	int i;
136 
137 	softc = NFSD_VNET(fhanew_softc);
138 
139 	for (i = 0; i < FHA_HASH_SIZE; i++)
140 		mtx_destroy(&softc->fha_hash[i].mtx);
141 	free(softc, M_TEMP);
142 }
143 
144 static rpcproc_t
145 fhanew_get_procnum(rpcproc_t procnum)
146 {
147 	if (procnum > NFSV2PROC_STATFS)
148 		return (-1);
149 
150 	return (newnfs_nfsv3_procid[procnum]);
151 }
152 
153 static int
154 fhanew_get_fh(uint64_t *fh, int v3, struct mbuf **md, caddr_t *dpos)
155 {
156 	struct nfsrv_descript lnd, *nd;
157 	uint32_t *tl;
158 	uint8_t *buf;
159 	uint64_t t;
160 	int error, len, i;
161 
162 	error = 0;
163 	len = 0;
164 	nd = &lnd;
165 
166 	nd->nd_md = *md;
167 	nd->nd_dpos = *dpos;
168 
169 	if (v3) {
170 		NFSM_DISSECT_NONBLOCK(tl, uint32_t *, NFSX_UNSIGNED);
171 		if ((len = fxdr_unsigned(int, *tl)) <= 0 || len > NFSX_FHMAX) {
172 			error = EBADRPC;
173 			goto nfsmout;
174 		}
175 	} else {
176 		len = NFSX_V2FH;
177 	}
178 
179 	t = 0;
180 	if (len != 0) {
181 		NFSM_DISSECT_NONBLOCK(buf, uint8_t *, len);
182 		for (i = 0; i < len; i++)
183 			t ^= ((uint64_t)buf[i] << (i & 7) * 8);
184 	}
185 	*fh = t;
186 
187 nfsmout:
188 	*md = nd->nd_md;
189 	*dpos = nd->nd_dpos;
190 
191 	return (error);
192 }
193 
194 static int
195 fhanew_is_read(rpcproc_t procnum)
196 {
197 	if (procnum == NFSPROC_READ)
198 		return (1);
199 	else
200 		return (0);
201 }
202 
203 static int
204 fhanew_is_write(rpcproc_t procnum)
205 {
206 	if (procnum == NFSPROC_WRITE)
207 		return (1);
208 	else
209 		return (0);
210 }
211 
212 static int
213 fhanew_get_offset(struct mbuf **md, caddr_t *dpos, int v3,
214     struct fha_info *info)
215 {
216 	struct nfsrv_descript lnd, *nd;
217 	uint32_t *tl;
218 	int error;
219 
220 	error = 0;
221 
222 	nd = &lnd;
223 	nd->nd_md = *md;
224 	nd->nd_dpos = *dpos;
225 
226 	if (v3) {
227 		NFSM_DISSECT_NONBLOCK(tl, uint32_t *, 2 * NFSX_UNSIGNED);
228 		info->offset = fxdr_hyper(tl);
229 	} else {
230 		NFSM_DISSECT_NONBLOCK(tl, uint32_t *, NFSX_UNSIGNED);
231 		info->offset = fxdr_unsigned(uint32_t, *tl);
232 	}
233 
234 nfsmout:
235 	*md = nd->nd_md;
236 	*dpos = nd->nd_dpos;
237 
238 	return (error);
239 }
240 
241 static int
242 fhanew_no_offset(rpcproc_t procnum)
243 {
244 	if (procnum == NFSPROC_FSSTAT ||
245 	    procnum == NFSPROC_FSINFO ||
246 	    procnum == NFSPROC_PATHCONF ||
247 	    procnum == NFSPROC_NOOP ||
248 	    procnum == NFSPROC_NULL)
249 		return (1);
250 	else
251 		return (0);
252 }
253 
254 static void
255 fhanew_set_locktype(rpcproc_t procnum, struct fha_info *info)
256 {
257 	switch (procnum) {
258 	case NFSPROC_NULL:
259 	case NFSPROC_GETATTR:
260 	case NFSPROC_LOOKUP:
261 	case NFSPROC_ACCESS:
262 	case NFSPROC_READLINK:
263 	case NFSPROC_READ:
264 	case NFSPROC_READDIR:
265 	case NFSPROC_READDIRPLUS:
266 	case NFSPROC_WRITE:
267 		info->locktype = LK_SHARED;
268 		break;
269 	case NFSPROC_SETATTR:
270 	case NFSPROC_CREATE:
271 	case NFSPROC_MKDIR:
272 	case NFSPROC_SYMLINK:
273 	case NFSPROC_MKNOD:
274 	case NFSPROC_REMOVE:
275 	case NFSPROC_RMDIR:
276 	case NFSPROC_RENAME:
277 	case NFSPROC_LINK:
278 	case NFSPROC_FSSTAT:
279 	case NFSPROC_FSINFO:
280 	case NFSPROC_PATHCONF:
281 	case NFSPROC_COMMIT:
282 	case NFSPROC_NOOP:
283 		info->locktype = LK_EXCLUSIVE;
284 		break;
285 	}
286 }
287 
288 /*
289  * This just specifies that offsets should obey affinity when within
290  * the same 1Mbyte (1<<20) chunk for the file (reads only for now).
291  */
292 static void
293 fha_extract_info(struct svc_req *req, struct fha_info *i)
294 {
295 	struct mbuf *md;
296 	caddr_t dpos;
297 	static u_int64_t random_fh = 0;
298 	int error;
299 	int v3 = (req->rq_vers == 3);
300 	rpcproc_t procnum;
301 
302 	/*
303 	 * We start off with a random fh.  If we get a reasonable
304 	 * procnum, we set the fh.  If there's a concept of offset
305 	 * that we're interested in, we set that.
306 	 */
307 	i->fh = ++random_fh;
308 	i->offset = 0;
309 	i->locktype = LK_EXCLUSIVE;
310 	i->read = i->write = 0;
311 
312 	/*
313 	 * Extract the procnum and convert to v3 form if necessary,
314 	 * taking care to deal with out-of-range procnums.  Caller will
315 	 * ensure that rq_vers is either 2 or 3.
316 	 */
317 	procnum = req->rq_proc;
318 	if (!v3) {
319 		rpcproc_t tmp_procnum;
320 
321 		tmp_procnum = fhanew_get_procnum(procnum);
322 		if (tmp_procnum == -1)
323 			goto out;
324 		procnum = tmp_procnum;
325 	}
326 
327 	/*
328 	 * We do affinity for most.  However, we divide a realm of affinity
329 	 * by file offset so as to allow for concurrent random access.  We
330 	 * only do this for reads today, but this may change when IFS supports
331 	 * efficient concurrent writes.
332 	 */
333 	if (fhanew_no_offset(procnum))
334 		goto out;
335 
336 	i->read = fhanew_is_read(procnum);
337 	i->write = fhanew_is_write(procnum);
338 
339 	error = newnfs_realign(&req->rq_args, M_NOWAIT);
340 	if (error)
341 		goto out;
342 	md = req->rq_args;
343 	dpos = mtod(md, caddr_t);
344 
345 	/* Grab the filehandle. */
346 	error = fhanew_get_fh(&i->fh, v3, &md, &dpos);
347 	if (error)
348 		goto out;
349 
350 	/* Content ourselves with zero offset for all but reads. */
351 	if (i->read || i->write)
352 		fhanew_get_offset(&md, &dpos, v3, i);
353 
354 out:
355 	fhanew_set_locktype(procnum, i);
356 }
357 
358 static struct fha_hash_entry *
359 fha_hash_entry_new(u_int64_t fh)
360 {
361 	struct fha_hash_entry *e;
362 
363 	e = malloc(sizeof(*e), M_NFS_FHA, M_WAITOK);
364 	e->fh = fh;
365 	e->num_rw = 0;
366 	e->num_exclusive = 0;
367 	e->num_threads = 0;
368 	LIST_INIT(&e->threads);
369 
370 	return (e);
371 }
372 
373 static void
374 fha_hash_entry_destroy(struct fha_hash_entry *e)
375 {
376 
377 	mtx_assert(e->mtx, MA_OWNED);
378 	KASSERT(e->num_rw == 0,
379 	    ("%d reqs on destroyed fhe %p", e->num_rw, e));
380 	KASSERT(e->num_exclusive == 0,
381 	    ("%d exclusive reqs on destroyed fhe %p", e->num_exclusive, e));
382 	KASSERT(e->num_threads == 0,
383 	    ("%d threads on destroyed fhe %p", e->num_threads, e));
384 	free(e, M_NFS_FHA);
385 }
386 
387 static void
388 fha_hash_entry_remove(struct fha_hash_entry *e)
389 {
390 
391 	mtx_assert(e->mtx, MA_OWNED);
392 	LIST_REMOVE(e, link);
393 	fha_hash_entry_destroy(e);
394 }
395 
396 static struct fha_hash_entry *
397 fha_hash_entry_lookup(struct fha_params *softc, u_int64_t fh)
398 {
399 	struct fha_hash_slot *fhs;
400 	struct fha_hash_entry *fhe, *new_fhe;
401 
402 	fhs = &softc->fha_hash[fh % FHA_HASH_SIZE];
403 	new_fhe = fha_hash_entry_new(fh);
404 	new_fhe->mtx = &fhs->mtx;
405 	mtx_lock(&fhs->mtx);
406 	LIST_FOREACH(fhe, &fhs->list, link)
407 		if (fhe->fh == fh)
408 			break;
409 	if (!fhe) {
410 		fhe = new_fhe;
411 		LIST_INSERT_HEAD(&fhs->list, fhe, link);
412 	} else
413 		fha_hash_entry_destroy(new_fhe);
414 	return (fhe);
415 }
416 
417 static void
418 fha_hash_entry_add_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread)
419 {
420 
421 	mtx_assert(fhe->mtx, MA_OWNED);
422 	thread->st_p2 = 0;
423 	LIST_INSERT_HEAD(&fhe->threads, thread, st_alink);
424 	fhe->num_threads++;
425 }
426 
427 static void
428 fha_hash_entry_remove_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread)
429 {
430 
431 	mtx_assert(fhe->mtx, MA_OWNED);
432 	KASSERT(thread->st_p2 == 0,
433 	    ("%d reqs on removed thread %p", thread->st_p2, thread));
434 	LIST_REMOVE(thread, st_alink);
435 	fhe->num_threads--;
436 }
437 
438 /*
439  * Account for an ongoing operation associated with this file.
440  */
441 static void
442 fha_hash_entry_add_op(struct fha_hash_entry *fhe, int locktype, int count)
443 {
444 
445 	mtx_assert(fhe->mtx, MA_OWNED);
446 	if (LK_EXCLUSIVE == locktype)
447 		fhe->num_exclusive += count;
448 	else
449 		fhe->num_rw += count;
450 }
451 
452 /*
453  * Get the service thread currently associated with the fhe that is
454  * appropriate to handle this operation.
455  */
456 static SVCTHREAD *
457 fha_hash_entry_choose_thread(struct fha_params *softc,
458     struct fha_hash_entry *fhe, struct fha_info *i, SVCTHREAD *this_thread)
459 {
460 	SVCTHREAD *thread, *min_thread = NULL;
461 	int req_count, min_count = 0;
462 	off_t offset1, offset2;
463 
464 	LIST_FOREACH(thread, &fhe->threads, st_alink) {
465 		req_count = thread->st_p2;
466 
467 		/* If there are any writes in progress, use the first thread. */
468 		if (fhe->num_exclusive) {
469 #if 0
470 			ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
471 			    "fha: %p(%d)w", thread, req_count);
472 #endif
473 			return (thread);
474 		}
475 
476 		/* Check whether we should consider locality. */
477 		if ((i->read && !NFSD_VNET(nfsfha_ctls).read) ||
478 		    (i->write && !NFSD_VNET(nfsfha_ctls).write))
479 			goto noloc;
480 
481 		/*
482 		 * Check for locality, making sure that we won't
483 		 * exceed our per-thread load limit in the process.
484 		 */
485 		offset1 = i->offset;
486 		offset2 = thread->st_p3;
487 
488 		if (((offset1 >= offset2)
489 		  && ((offset1 - offset2) < (1 << NFSD_VNET(nfsfha_ctls).bin_shift)))
490 		 || ((offset2 > offset1)
491 		  && ((offset2 - offset1) < (1 << NFSD_VNET(nfsfha_ctls).bin_shift)))) {
492 			if ((NFSD_VNET(nfsfha_ctls).max_reqs_per_nfsd == 0) ||
493 			    (req_count < NFSD_VNET(nfsfha_ctls).max_reqs_per_nfsd)) {
494 #if 0
495 				ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
496 				    "fha: %p(%d)r", thread, req_count);
497 #endif
498 				return (thread);
499 			}
500 		}
501 
502 noloc:
503 		/*
504 		 * We don't have a locality match, so skip this thread,
505 		 * but keep track of the most attractive thread in case
506 		 * we need to come back to it later.
507 		 */
508 #if 0
509 		ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
510 		    "fha: %p(%d)s off1 %llu off2 %llu", thread,
511 		    req_count, offset1, offset2);
512 #endif
513 		if ((min_thread == NULL) || (req_count < min_count)) {
514 			min_count = req_count;
515 			min_thread = thread;
516 		}
517 	}
518 
519 	/*
520 	 * We didn't find a good match yet.  See if we can add
521 	 * a new thread to this file handle entry's thread list.
522 	 */
523 	if ((NFSD_VNET(nfsfha_ctls).max_nfsds_per_fh == 0) ||
524 	    (fhe->num_threads < NFSD_VNET(nfsfha_ctls).max_nfsds_per_fh)) {
525 		thread = this_thread;
526 #if 0
527 		ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO,
528 		    "fha: %p(%d)t", thread, thread->st_p2);
529 #endif
530 		fha_hash_entry_add_thread(fhe, thread);
531 	} else {
532 		/*
533 		 * We don't want to use any more threads for this file, so
534 		 * go back to the most attractive nfsd we're already using.
535 		 */
536 		thread = min_thread;
537 	}
538 
539 	return (thread);
540 }
541 
542 /*
543  * After getting a request, try to assign it to some thread.  Usually we
544  * handle it ourselves.
545  */
546 SVCTHREAD *
547 fhanew_assign(SVCTHREAD *this_thread, struct svc_req *req)
548 {
549 	struct fha_params *softc;
550 	SVCTHREAD *thread;
551 	struct fha_info i;
552 	struct fha_hash_entry *fhe;
553 
554 	NFSD_CURVNET_SET(NFSD_TD_TO_VNET(curthread));
555 	softc = NFSD_VNET(fhanew_softc);
556 	/* Check to see whether we're enabled. */
557 	if (NFSD_VNET(nfsfha_ctls).enable == 0)
558 		goto thist;
559 
560 	/*
561 	 * Only do placement if this is an NFS request.
562 	 */
563 	if (req->rq_prog != NFS_PROG)
564 		goto thist;
565 
566 	if (req->rq_vers != 2 && req->rq_vers != 3)
567 		goto thist;
568 
569 	fha_extract_info(req, &i);
570 
571 	/*
572 	 * We save the offset associated with this request for later
573 	 * nfsd matching.
574 	 */
575 	fhe = fha_hash_entry_lookup(softc, i.fh);
576 	req->rq_p1 = fhe;
577 	req->rq_p2 = i.locktype;
578 	req->rq_p3 = i.offset;
579 
580 	/*
581 	 * Choose a thread, taking into consideration locality, thread load,
582 	 * and the number of threads already working on this file.
583 	 */
584 	thread = fha_hash_entry_choose_thread(softc, fhe, &i, this_thread);
585 	KASSERT(thread, ("fha_assign: NULL thread!"));
586 	fha_hash_entry_add_op(fhe, i.locktype, 1);
587 	thread->st_p2++;
588 	thread->st_p3 = i.offset;
589 
590 	/*
591 	 * Grab the pool lock here to not let chosen thread go away before
592 	 * the new request inserted to its queue while we drop fhe lock.
593 	 */
594 	mtx_lock(&thread->st_lock);
595 	mtx_unlock(fhe->mtx);
596 
597 	NFSD_CURVNET_RESTORE();
598 	return (thread);
599 thist:
600 	req->rq_p1 = NULL;
601 	NFSD_CURVNET_RESTORE();
602 	mtx_lock(&this_thread->st_lock);
603 	return (this_thread);
604 }
605 
606 /*
607  * Called when we're done with an operation.  The request has already
608  * been de-queued.
609  */
610 void
611 fhanew_nd_complete(SVCTHREAD *thread, struct svc_req *req)
612 {
613 	struct fha_hash_entry *fhe = req->rq_p1;
614 	struct mtx *mtx;
615 
616 	NFSD_CURVNET_SET(NFSD_TD_TO_VNET(curthread));
617 	/*
618 	 * This may be called for reqs that didn't go through
619 	 * fha_assign (e.g. extra NULL ops used for RPCSEC_GSS.
620 	 */
621 	if (!fhe) {
622 		NFSD_CURVNET_RESTORE();
623 		return;
624 	}
625 
626 	mtx = fhe->mtx;
627 	mtx_lock(mtx);
628 	fha_hash_entry_add_op(fhe, req->rq_p2, -1);
629 	thread->st_p2--;
630 	KASSERT(thread->st_p2 >= 0, ("Negative request count %d on %p",
631 	    thread->st_p2, thread));
632 	if (thread->st_p2 == 0) {
633 		fha_hash_entry_remove_thread(fhe, thread);
634 		if (0 == fhe->num_rw + fhe->num_exclusive)
635 			fha_hash_entry_remove(fhe);
636 	}
637 	mtx_unlock(mtx);
638 	NFSD_CURVNET_RESTORE();
639 }
640 
641 static int
642 fhenew_stats_sysctl(SYSCTL_HANDLER_ARGS)
643 {
644 	struct fha_params *softc;
645 	int error, i;
646 	struct sbuf sb;
647 	struct fha_hash_entry *fhe;
648 	bool_t first, hfirst;
649 	SVCTHREAD *thread;
650 
651 	sbuf_new(&sb, NULL, 65536, SBUF_FIXEDLEN);
652 
653 	NFSD_CURVNET_SET(NFSD_TD_TO_VNET(curthread));
654 	softc = NFSD_VNET(fhanew_softc);
655 	for (i = 0; i < FHA_HASH_SIZE; i++)
656 		if (!LIST_EMPTY(&softc->fha_hash[i].list))
657 			break;
658 
659 	if (i == FHA_HASH_SIZE) {
660 		sbuf_printf(&sb, "No file handle entries.\n");
661 		goto out;
662 	}
663 
664 	hfirst = TRUE;
665 	for (; i < FHA_HASH_SIZE; i++) {
666 		mtx_lock(&softc->fha_hash[i].mtx);
667 		if (LIST_EMPTY(&softc->fha_hash[i].list)) {
668 			mtx_unlock(&softc->fha_hash[i].mtx);
669 			continue;
670 		}
671 		sbuf_printf(&sb, "%shash %d: {\n", hfirst ? "" : ", ", i);
672 		first = TRUE;
673 		LIST_FOREACH(fhe, &softc->fha_hash[i].list, link) {
674 			sbuf_printf(&sb, "%sfhe %p: {\n", first ? "  " : ", ",
675 			    fhe);
676 			sbuf_printf(&sb, "    fh: %ju\n", (uintmax_t) fhe->fh);
677 			sbuf_printf(&sb, "    num_rw/exclusive: %d/%d\n",
678 			    fhe->num_rw, fhe->num_exclusive);
679 			sbuf_printf(&sb, "    num_threads: %d\n",
680 			    fhe->num_threads);
681 
682 			LIST_FOREACH(thread, &fhe->threads, st_alink) {
683 				sbuf_printf(&sb, "      thread %p offset %ju "
684 				    "reqs %d\n", thread,
685 				    thread->st_p3, thread->st_p2);
686 			}
687 
688 			sbuf_printf(&sb, "  }");
689 			first = FALSE;
690 		}
691 		sbuf_printf(&sb, "\n}");
692 		mtx_unlock(&softc->fha_hash[i].mtx);
693 		hfirst = FALSE;
694 	}
695 
696  out:
697 	NFSD_CURVNET_RESTORE();
698 	sbuf_trim(&sb);
699 	sbuf_finish(&sb);
700 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
701 	sbuf_delete(&sb);
702 	return (error);
703 }
704