1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 5 * Copyright (c) 2013 Spectra Logic Corporation 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/types.h> 33 #include <sys/mbuf.h> 34 #include <sys/sbuf.h> 35 36 #include <fs/nfs/nfsport.h> 37 #include <fs/nfsserver/nfs_fha_new.h> 38 39 #include <rpc/rpc.h> 40 41 static MALLOC_DEFINE(M_NFS_FHA, "NFS FHA", "NFS FHA"); 42 43 static void fhanew_init(void *foo); 44 static void fhanew_uninit(void *foo); 45 static rpcproc_t fhanew_get_procnum(rpcproc_t procnum); 46 static int fhanew_get_fh(uint64_t *fh, int v3, struct mbuf **md, 47 caddr_t *dpos); 48 static int fhanew_is_read(rpcproc_t procnum); 49 static int fhanew_is_write(rpcproc_t procnum); 50 static int fhanew_get_offset(struct mbuf **md, caddr_t *dpos, 51 int v3, struct fha_info *info); 52 static int fhanew_no_offset(rpcproc_t procnum); 53 static void fhanew_set_locktype(rpcproc_t procnum, 54 struct fha_info *info); 55 static int fhenew_stats_sysctl(SYSCTL_HANDLER_ARGS); 56 static void fha_extract_info(struct svc_req *req, 57 struct fha_info *i); 58 59 static struct fha_params fhanew_softc; 60 61 SYSCTL_DECL(_vfs_nfsd); 62 63 extern int newnfs_nfsv3_procid[]; 64 65 SYSINIT(nfs_fhanew, SI_SUB_ROOT_CONF, SI_ORDER_ANY, fhanew_init, NULL); 66 SYSUNINIT(nfs_fhanew, SI_SUB_ROOT_CONF, SI_ORDER_ANY, fhanew_uninit, NULL); 67 68 static void 69 fhanew_init(void *foo) 70 { 71 struct fha_params *softc; 72 int i; 73 74 softc = &fhanew_softc; 75 76 bzero(softc, sizeof(*softc)); 77 78 snprintf(softc->server_name, sizeof(softc->server_name), 79 FHANEW_SERVER_NAME); 80 81 /* 82 * Initialize the sysctl context list for the fha module. 83 */ 84 sysctl_ctx_init(&softc->sysctl_ctx); 85 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 86 SYSCTL_STATIC_CHILDREN(_vfs_nfsd), OID_AUTO, "fha", 87 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NFS File Handle Affinity (FHA)"); 88 if (softc->sysctl_tree == NULL) { 89 printf("%s: unable to allocate sysctl tree\n", __func__); 90 return; 91 } 92 93 for (i = 0; i < FHA_HASH_SIZE; i++) 94 mtx_init(&softc->fha_hash[i].mtx, "fhalock", NULL, MTX_DEF); 95 96 /* 97 * Set the default tuning parameters. 98 */ 99 softc->ctls.enable = FHA_DEF_ENABLE; 100 softc->ctls.read = FHA_DEF_READ; 101 softc->ctls.write = FHA_DEF_WRITE; 102 softc->ctls.bin_shift = FHA_DEF_BIN_SHIFT; 103 softc->ctls.max_nfsds_per_fh = FHA_DEF_MAX_NFSDS_PER_FH; 104 softc->ctls.max_reqs_per_nfsd = FHA_DEF_MAX_REQS_PER_NFSD; 105 106 /* 107 * Add sysctls so the user can change the tuning parameters. 108 */ 109 SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 110 OID_AUTO, "enable", CTLFLAG_RWTUN, 111 &softc->ctls.enable, 0, "Enable NFS File Handle Affinity (FHA)"); 112 113 SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 114 OID_AUTO, "read", CTLFLAG_RWTUN, 115 &softc->ctls.read, 0, "Enable NFS FHA read locality"); 116 117 SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 118 OID_AUTO, "write", CTLFLAG_RWTUN, 119 &softc->ctls.write, 0, "Enable NFS FHA write locality"); 120 121 SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 122 OID_AUTO, "bin_shift", CTLFLAG_RWTUN, 123 &softc->ctls.bin_shift, 0, 124 "Maximum locality distance 2^(bin_shift) bytes"); 125 126 SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 127 OID_AUTO, "max_nfsds_per_fh", CTLFLAG_RWTUN, 128 &softc->ctls.max_nfsds_per_fh, 0, "Maximum nfsd threads that " 129 "should be working on requests for the same file handle"); 130 131 SYSCTL_ADD_UINT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 132 OID_AUTO, "max_reqs_per_nfsd", CTLFLAG_RWTUN, 133 &softc->ctls.max_reqs_per_nfsd, 0, "Maximum requests that " 134 "single nfsd thread should be working on at any time"); 135 136 SYSCTL_ADD_OID(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 137 OID_AUTO, "fhe_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 138 0, 0, fhenew_stats_sysctl, "A", ""); 139 } 140 141 static void 142 fhanew_uninit(void *foo) 143 { 144 struct fha_params *softc; 145 int i; 146 147 softc = &fhanew_softc; 148 149 sysctl_ctx_free(&softc->sysctl_ctx); 150 for (i = 0; i < FHA_HASH_SIZE; i++) 151 mtx_destroy(&softc->fha_hash[i].mtx); 152 } 153 154 static rpcproc_t 155 fhanew_get_procnum(rpcproc_t procnum) 156 { 157 if (procnum > NFSV2PROC_STATFS) 158 return (-1); 159 160 return (newnfs_nfsv3_procid[procnum]); 161 } 162 163 static int 164 fhanew_get_fh(uint64_t *fh, int v3, struct mbuf **md, caddr_t *dpos) 165 { 166 struct nfsrv_descript lnd, *nd; 167 uint32_t *tl; 168 uint8_t *buf; 169 uint64_t t; 170 int error, len, i; 171 172 error = 0; 173 len = 0; 174 nd = &lnd; 175 176 nd->nd_md = *md; 177 nd->nd_dpos = *dpos; 178 179 if (v3) { 180 NFSM_DISSECT_NONBLOCK(tl, uint32_t *, NFSX_UNSIGNED); 181 if ((len = fxdr_unsigned(int, *tl)) <= 0 || len > NFSX_FHMAX) { 182 error = EBADRPC; 183 goto nfsmout; 184 } 185 } else { 186 len = NFSX_V2FH; 187 } 188 189 t = 0; 190 if (len != 0) { 191 NFSM_DISSECT_NONBLOCK(buf, uint8_t *, len); 192 for (i = 0; i < len; i++) 193 t ^= ((uint64_t)buf[i] << (i & 7) * 8); 194 } 195 *fh = t; 196 197 nfsmout: 198 *md = nd->nd_md; 199 *dpos = nd->nd_dpos; 200 201 return (error); 202 } 203 204 static int 205 fhanew_is_read(rpcproc_t procnum) 206 { 207 if (procnum == NFSPROC_READ) 208 return (1); 209 else 210 return (0); 211 } 212 213 static int 214 fhanew_is_write(rpcproc_t procnum) 215 { 216 if (procnum == NFSPROC_WRITE) 217 return (1); 218 else 219 return (0); 220 } 221 222 static int 223 fhanew_get_offset(struct mbuf **md, caddr_t *dpos, int v3, 224 struct fha_info *info) 225 { 226 struct nfsrv_descript lnd, *nd; 227 uint32_t *tl; 228 int error; 229 230 error = 0; 231 232 nd = &lnd; 233 nd->nd_md = *md; 234 nd->nd_dpos = *dpos; 235 236 if (v3) { 237 NFSM_DISSECT_NONBLOCK(tl, uint32_t *, 2 * NFSX_UNSIGNED); 238 info->offset = fxdr_hyper(tl); 239 } else { 240 NFSM_DISSECT_NONBLOCK(tl, uint32_t *, NFSX_UNSIGNED); 241 info->offset = fxdr_unsigned(uint32_t, *tl); 242 } 243 244 nfsmout: 245 *md = nd->nd_md; 246 *dpos = nd->nd_dpos; 247 248 return (error); 249 } 250 251 static int 252 fhanew_no_offset(rpcproc_t procnum) 253 { 254 if (procnum == NFSPROC_FSSTAT || 255 procnum == NFSPROC_FSINFO || 256 procnum == NFSPROC_PATHCONF || 257 procnum == NFSPROC_NOOP || 258 procnum == NFSPROC_NULL) 259 return (1); 260 else 261 return (0); 262 } 263 264 static void 265 fhanew_set_locktype(rpcproc_t procnum, struct fha_info *info) 266 { 267 switch (procnum) { 268 case NFSPROC_NULL: 269 case NFSPROC_GETATTR: 270 case NFSPROC_LOOKUP: 271 case NFSPROC_ACCESS: 272 case NFSPROC_READLINK: 273 case NFSPROC_READ: 274 case NFSPROC_READDIR: 275 case NFSPROC_READDIRPLUS: 276 case NFSPROC_WRITE: 277 info->locktype = LK_SHARED; 278 break; 279 case NFSPROC_SETATTR: 280 case NFSPROC_CREATE: 281 case NFSPROC_MKDIR: 282 case NFSPROC_SYMLINK: 283 case NFSPROC_MKNOD: 284 case NFSPROC_REMOVE: 285 case NFSPROC_RMDIR: 286 case NFSPROC_RENAME: 287 case NFSPROC_LINK: 288 case NFSPROC_FSSTAT: 289 case NFSPROC_FSINFO: 290 case NFSPROC_PATHCONF: 291 case NFSPROC_COMMIT: 292 case NFSPROC_NOOP: 293 info->locktype = LK_EXCLUSIVE; 294 break; 295 } 296 } 297 298 /* 299 * This just specifies that offsets should obey affinity when within 300 * the same 1Mbyte (1<<20) chunk for the file (reads only for now). 301 */ 302 static void 303 fha_extract_info(struct svc_req *req, struct fha_info *i) 304 { 305 struct mbuf *md; 306 caddr_t dpos; 307 static u_int64_t random_fh = 0; 308 int error; 309 int v3 = (req->rq_vers == 3); 310 rpcproc_t procnum; 311 312 /* 313 * We start off with a random fh. If we get a reasonable 314 * procnum, we set the fh. If there's a concept of offset 315 * that we're interested in, we set that. 316 */ 317 i->fh = ++random_fh; 318 i->offset = 0; 319 i->locktype = LK_EXCLUSIVE; 320 i->read = i->write = 0; 321 322 /* 323 * Extract the procnum and convert to v3 form if necessary, 324 * taking care to deal with out-of-range procnums. Caller will 325 * ensure that rq_vers is either 2 or 3. 326 */ 327 procnum = req->rq_proc; 328 if (!v3) { 329 rpcproc_t tmp_procnum; 330 331 tmp_procnum = fhanew_get_procnum(procnum); 332 if (tmp_procnum == -1) 333 goto out; 334 procnum = tmp_procnum; 335 } 336 337 /* 338 * We do affinity for most. However, we divide a realm of affinity 339 * by file offset so as to allow for concurrent random access. We 340 * only do this for reads today, but this may change when IFS supports 341 * efficient concurrent writes. 342 */ 343 if (fhanew_no_offset(procnum)) 344 goto out; 345 346 i->read = fhanew_is_read(procnum); 347 i->write = fhanew_is_write(procnum); 348 349 error = newnfs_realign(&req->rq_args, M_NOWAIT); 350 if (error) 351 goto out; 352 md = req->rq_args; 353 dpos = mtod(md, caddr_t); 354 355 /* Grab the filehandle. */ 356 error = fhanew_get_fh(&i->fh, v3, &md, &dpos); 357 if (error) 358 goto out; 359 360 /* Content ourselves with zero offset for all but reads. */ 361 if (i->read || i->write) 362 fhanew_get_offset(&md, &dpos, v3, i); 363 364 out: 365 fhanew_set_locktype(procnum, i); 366 } 367 368 static struct fha_hash_entry * 369 fha_hash_entry_new(u_int64_t fh) 370 { 371 struct fha_hash_entry *e; 372 373 e = malloc(sizeof(*e), M_NFS_FHA, M_WAITOK); 374 e->fh = fh; 375 e->num_rw = 0; 376 e->num_exclusive = 0; 377 e->num_threads = 0; 378 LIST_INIT(&e->threads); 379 380 return (e); 381 } 382 383 static void 384 fha_hash_entry_destroy(struct fha_hash_entry *e) 385 { 386 387 mtx_assert(e->mtx, MA_OWNED); 388 KASSERT(e->num_rw == 0, 389 ("%d reqs on destroyed fhe %p", e->num_rw, e)); 390 KASSERT(e->num_exclusive == 0, 391 ("%d exclusive reqs on destroyed fhe %p", e->num_exclusive, e)); 392 KASSERT(e->num_threads == 0, 393 ("%d threads on destroyed fhe %p", e->num_threads, e)); 394 free(e, M_NFS_FHA); 395 } 396 397 static void 398 fha_hash_entry_remove(struct fha_hash_entry *e) 399 { 400 401 mtx_assert(e->mtx, MA_OWNED); 402 LIST_REMOVE(e, link); 403 fha_hash_entry_destroy(e); 404 } 405 406 static struct fha_hash_entry * 407 fha_hash_entry_lookup(struct fha_params *softc, u_int64_t fh) 408 { 409 struct fha_hash_slot *fhs; 410 struct fha_hash_entry *fhe, *new_fhe; 411 412 fhs = &softc->fha_hash[fh % FHA_HASH_SIZE]; 413 new_fhe = fha_hash_entry_new(fh); 414 new_fhe->mtx = &fhs->mtx; 415 mtx_lock(&fhs->mtx); 416 LIST_FOREACH(fhe, &fhs->list, link) 417 if (fhe->fh == fh) 418 break; 419 if (!fhe) { 420 fhe = new_fhe; 421 LIST_INSERT_HEAD(&fhs->list, fhe, link); 422 } else 423 fha_hash_entry_destroy(new_fhe); 424 return (fhe); 425 } 426 427 static void 428 fha_hash_entry_add_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread) 429 { 430 431 mtx_assert(fhe->mtx, MA_OWNED); 432 thread->st_p2 = 0; 433 LIST_INSERT_HEAD(&fhe->threads, thread, st_alink); 434 fhe->num_threads++; 435 } 436 437 static void 438 fha_hash_entry_remove_thread(struct fha_hash_entry *fhe, SVCTHREAD *thread) 439 { 440 441 mtx_assert(fhe->mtx, MA_OWNED); 442 KASSERT(thread->st_p2 == 0, 443 ("%d reqs on removed thread %p", thread->st_p2, thread)); 444 LIST_REMOVE(thread, st_alink); 445 fhe->num_threads--; 446 } 447 448 /* 449 * Account for an ongoing operation associated with this file. 450 */ 451 static void 452 fha_hash_entry_add_op(struct fha_hash_entry *fhe, int locktype, int count) 453 { 454 455 mtx_assert(fhe->mtx, MA_OWNED); 456 if (LK_EXCLUSIVE == locktype) 457 fhe->num_exclusive += count; 458 else 459 fhe->num_rw += count; 460 } 461 462 /* 463 * Get the service thread currently associated with the fhe that is 464 * appropriate to handle this operation. 465 */ 466 static SVCTHREAD * 467 fha_hash_entry_choose_thread(struct fha_params *softc, 468 struct fha_hash_entry *fhe, struct fha_info *i, SVCTHREAD *this_thread) 469 { 470 SVCTHREAD *thread, *min_thread = NULL; 471 int req_count, min_count = 0; 472 off_t offset1, offset2; 473 474 LIST_FOREACH(thread, &fhe->threads, st_alink) { 475 req_count = thread->st_p2; 476 477 /* If there are any writes in progress, use the first thread. */ 478 if (fhe->num_exclusive) { 479 #if 0 480 ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 481 "fha: %p(%d)w", thread, req_count); 482 #endif 483 return (thread); 484 } 485 486 /* Check whether we should consider locality. */ 487 if ((i->read && !softc->ctls.read) || 488 (i->write && !softc->ctls.write)) 489 goto noloc; 490 491 /* 492 * Check for locality, making sure that we won't 493 * exceed our per-thread load limit in the process. 494 */ 495 offset1 = i->offset; 496 offset2 = thread->st_p3; 497 498 if (((offset1 >= offset2) 499 && ((offset1 - offset2) < (1 << softc->ctls.bin_shift))) 500 || ((offset2 > offset1) 501 && ((offset2 - offset1) < (1 << softc->ctls.bin_shift)))) { 502 if ((softc->ctls.max_reqs_per_nfsd == 0) || 503 (req_count < softc->ctls.max_reqs_per_nfsd)) { 504 #if 0 505 ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 506 "fha: %p(%d)r", thread, req_count); 507 #endif 508 return (thread); 509 } 510 } 511 512 noloc: 513 /* 514 * We don't have a locality match, so skip this thread, 515 * but keep track of the most attractive thread in case 516 * we need to come back to it later. 517 */ 518 #if 0 519 ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 520 "fha: %p(%d)s off1 %llu off2 %llu", thread, 521 req_count, offset1, offset2); 522 #endif 523 if ((min_thread == NULL) || (req_count < min_count)) { 524 min_count = req_count; 525 min_thread = thread; 526 } 527 } 528 529 /* 530 * We didn't find a good match yet. See if we can add 531 * a new thread to this file handle entry's thread list. 532 */ 533 if ((softc->ctls.max_nfsds_per_fh == 0) || 534 (fhe->num_threads < softc->ctls.max_nfsds_per_fh)) { 535 thread = this_thread; 536 #if 0 537 ITRACE_CURPROC(ITRACE_NFS, ITRACE_INFO, 538 "fha: %p(%d)t", thread, thread->st_p2); 539 #endif 540 fha_hash_entry_add_thread(fhe, thread); 541 } else { 542 /* 543 * We don't want to use any more threads for this file, so 544 * go back to the most attractive nfsd we're already using. 545 */ 546 thread = min_thread; 547 } 548 549 return (thread); 550 } 551 552 /* 553 * After getting a request, try to assign it to some thread. Usually we 554 * handle it ourselves. 555 */ 556 SVCTHREAD * 557 fhanew_assign(SVCTHREAD *this_thread, struct svc_req *req) 558 { 559 struct fha_params *softc = &fhanew_softc; 560 SVCTHREAD *thread; 561 struct fha_info i; 562 struct fha_hash_entry *fhe; 563 564 /* Check to see whether we're enabled. */ 565 if (softc->ctls.enable == 0) 566 goto thist; 567 568 /* 569 * Only do placement if this is an NFS request. 570 */ 571 if (req->rq_prog != NFS_PROG) 572 goto thist; 573 574 if (req->rq_vers != 2 && req->rq_vers != 3) 575 goto thist; 576 577 fha_extract_info(req, &i); 578 579 /* 580 * We save the offset associated with this request for later 581 * nfsd matching. 582 */ 583 fhe = fha_hash_entry_lookup(softc, i.fh); 584 req->rq_p1 = fhe; 585 req->rq_p2 = i.locktype; 586 req->rq_p3 = i.offset; 587 588 /* 589 * Choose a thread, taking into consideration locality, thread load, 590 * and the number of threads already working on this file. 591 */ 592 thread = fha_hash_entry_choose_thread(softc, fhe, &i, this_thread); 593 KASSERT(thread, ("fha_assign: NULL thread!")); 594 fha_hash_entry_add_op(fhe, i.locktype, 1); 595 thread->st_p2++; 596 thread->st_p3 = i.offset; 597 598 /* 599 * Grab the pool lock here to not let chosen thread go away before 600 * the new request inserted to its queue while we drop fhe lock. 601 */ 602 mtx_lock(&thread->st_lock); 603 mtx_unlock(fhe->mtx); 604 605 return (thread); 606 thist: 607 req->rq_p1 = NULL; 608 mtx_lock(&this_thread->st_lock); 609 return (this_thread); 610 } 611 612 /* 613 * Called when we're done with an operation. The request has already 614 * been de-queued. 615 */ 616 void 617 fhanew_nd_complete(SVCTHREAD *thread, struct svc_req *req) 618 { 619 struct fha_hash_entry *fhe = req->rq_p1; 620 struct mtx *mtx; 621 622 /* 623 * This may be called for reqs that didn't go through 624 * fha_assign (e.g. extra NULL ops used for RPCSEC_GSS. 625 */ 626 if (!fhe) 627 return; 628 629 mtx = fhe->mtx; 630 mtx_lock(mtx); 631 fha_hash_entry_add_op(fhe, req->rq_p2, -1); 632 thread->st_p2--; 633 KASSERT(thread->st_p2 >= 0, ("Negative request count %d on %p", 634 thread->st_p2, thread)); 635 if (thread->st_p2 == 0) { 636 fha_hash_entry_remove_thread(fhe, thread); 637 if (0 == fhe->num_rw + fhe->num_exclusive) 638 fha_hash_entry_remove(fhe); 639 } 640 mtx_unlock(mtx); 641 } 642 643 static int 644 fhenew_stats_sysctl(SYSCTL_HANDLER_ARGS) 645 { 646 struct fha_params *softc = &fhanew_softc; 647 int error, i; 648 struct sbuf sb; 649 struct fha_hash_entry *fhe; 650 bool_t first, hfirst; 651 SVCTHREAD *thread; 652 653 sbuf_new(&sb, NULL, 65536, SBUF_FIXEDLEN); 654 655 for (i = 0; i < FHA_HASH_SIZE; i++) 656 if (!LIST_EMPTY(&softc->fha_hash[i].list)) 657 break; 658 659 if (i == FHA_HASH_SIZE) { 660 sbuf_printf(&sb, "No file handle entries.\n"); 661 goto out; 662 } 663 664 hfirst = TRUE; 665 for (; i < FHA_HASH_SIZE; i++) { 666 mtx_lock(&softc->fha_hash[i].mtx); 667 if (LIST_EMPTY(&softc->fha_hash[i].list)) { 668 mtx_unlock(&softc->fha_hash[i].mtx); 669 continue; 670 } 671 sbuf_printf(&sb, "%shash %d: {\n", hfirst ? "" : ", ", i); 672 first = TRUE; 673 LIST_FOREACH(fhe, &softc->fha_hash[i].list, link) { 674 sbuf_printf(&sb, "%sfhe %p: {\n", first ? " " : ", ", 675 fhe); 676 sbuf_printf(&sb, " fh: %ju\n", (uintmax_t) fhe->fh); 677 sbuf_printf(&sb, " num_rw/exclusive: %d/%d\n", 678 fhe->num_rw, fhe->num_exclusive); 679 sbuf_printf(&sb, " num_threads: %d\n", 680 fhe->num_threads); 681 682 LIST_FOREACH(thread, &fhe->threads, st_alink) { 683 sbuf_printf(&sb, " thread %p offset %ju " 684 "reqs %d\n", thread, 685 thread->st_p3, thread->st_p2); 686 } 687 688 sbuf_printf(&sb, " }"); 689 first = FALSE; 690 } 691 sbuf_printf(&sb, "\n}"); 692 mtx_unlock(&softc->fha_hash[i].mtx); 693 hfirst = FALSE; 694 } 695 696 out: 697 sbuf_trim(&sb); 698 sbuf_finish(&sb); 699 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 700 sbuf_delete(&sb); 701 return (error); 702 } 703