1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 26 * All rights reserved. 27 */ 28 29 /* 30 * Node hash implementation borrowed from NFS. 31 * See: uts/common/fs/nfs/nfs_subr.c 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/time.h> 37 #include <sys/vnode.h> 38 #include <sys/bitmap.h> 39 #include <sys/dnlc.h> 40 #include <sys/kmem.h> 41 #include <sys/sunddi.h> 42 43 #ifdef APPLE 44 #include <sys/smb_apple.h> 45 #include <sys/utfconv.h> 46 #include <sys/smb_iconv.h> 47 #else 48 #include <netsmb/smb_osdep.h> 49 #endif 50 51 #include <netsmb/smb.h> 52 #include <netsmb/smb_conn.h> 53 #include <netsmb/smb_subr.h> 54 #include <netsmb/smb_rq.h> 55 56 #include <smbfs/smbfs.h> 57 #include <smbfs/smbfs_node.h> 58 #include <smbfs/smbfs_subr.h> 59 60 /* 61 * The hash queues for the access to active and cached smbnodes 62 * are organized as doubly linked lists. A reader/writer lock 63 * for each hash bucket is used to control access and to synchronize 64 * lookups, additions, and deletions from the hash queue. 65 * 66 * The smbnode freelist is organized as a doubly linked list with 67 * a head pointer. Additions and deletions are synchronized via 68 * a single mutex. 69 * 70 * In order to add an smbnode to the free list, it must be hashed into 71 * a hash queue and the exclusive lock to the hash queue be held. 72 * If an smbnode is not hashed into a hash queue, then it is destroyed 73 * because it represents no valuable information that can be reused 74 * about the file. The exclusive lock to the hash queue must be 75 * held in order to prevent a lookup in the hash queue from finding 76 * the smbnode and using it and assuming that the smbnode is not on the 77 * freelist. The lookup in the hash queue will have the hash queue 78 * locked, either exclusive or shared. 79 * 80 * The vnode reference count for each smbnode is not allowed to drop 81 * below 1. This prevents external entities, such as the VM 82 * subsystem, from acquiring references to vnodes already on the 83 * freelist and then trying to place them back on the freelist 84 * when their reference is released. This means that the when an 85 * smbnode is looked up in the hash queues, then either the smbnode 86 * is removed from the freelist and that reference is tranfered to 87 * the new reference or the vnode reference count must be incremented 88 * accordingly. The mutex for the freelist must be held in order to 89 * accurately test to see if the smbnode is on the freelist or not. 90 * The hash queue lock might be held shared and it is possible that 91 * two different threads may race to remove the smbnode from the 92 * freelist. This race can be resolved by holding the mutex for the 93 * freelist. Please note that the mutex for the freelist does not 94 * need to held if the smbnode is not on the freelist. It can not be 95 * placed on the freelist due to the requirement that the thread 96 * putting the smbnode on the freelist must hold the exclusive lock 97 * to the hash queue and the thread doing the lookup in the hash 98 * queue is holding either a shared or exclusive lock to the hash 99 * queue. 100 * 101 * The lock ordering is: 102 * 103 * hash bucket lock -> vnode lock 104 * hash bucket lock -> freelist lock 105 */ 106 static rhashq_t *smbtable; 107 108 static kmutex_t smbfreelist_lock; 109 static smbnode_t *smbfreelist = NULL; 110 static ulong_t smbnodenew = 0; 111 long nsmbnode = 0; 112 113 static int smbtablesize; 114 static int smbtablemask; 115 static int smbhashlen = 4; 116 117 static struct kmem_cache *smbnode_cache; 118 119 /* 120 * Mutex to protect the following variables: 121 * smbfs_major 122 * smbfs_minor 123 */ 124 kmutex_t smbfs_minor_lock; 125 int smbfs_major; 126 int smbfs_minor; 127 128 /* 129 * Local functions. 130 * Not static, to aid debugging. 131 */ 132 void smb_rmfree(smbnode_t *); 133 void smbinactive(smbnode_t *); 134 void smb_rmhash_locked(smbnode_t *); 135 void smb_destroy_node(smbnode_t *); 136 void smbfs_kmem_reclaim(void *cdrarg); 137 138 smbnode_t *smbhashfind(struct vfs *, const char *, int, rhashq_t *); 139 static vnode_t *make_smbnode(vfs_t *, char *, int, rhashq_t *, int *); 140 141 142 /* 143 * Free the resources associated with an smbnode. 144 * Note: This is different from smbfs_inactive 145 * 146 * NFS: nfs_subr.c:rinactive 147 */ 148 void 149 smbinactive(smbnode_t *np) 150 { 151 152 if (np->n_rpath) { 153 kmem_free(np->n_rpath, np->n_rplen + 1); 154 np->n_rpath = NULL; 155 } 156 } 157 158 /* 159 * Return a vnode for the given CIFS directory and filename. 160 * If no smbnode exists for this fhandle, create one and put it 161 * into the hash queues. If the smbnode for this fhandle 162 * already exists, return it. 163 * 164 * Note: make_smbnode() may upgrade the hash bucket lock to exclusive. 165 * 166 * NFS: nfs_subr.c:makenfsnode 167 */ 168 vnode_t * 169 smbfs_make_node( 170 vfs_t *vfsp, 171 const char *dir, 172 int dirlen, 173 const char *name, 174 int nmlen, 175 char sep, 176 struct smbfattr *fap) 177 { 178 char *rpath; 179 int rplen, idx; 180 uint32_t hash; 181 rhashq_t *rhtp; 182 smbnode_t *np; 183 vnode_t *vp; 184 #ifdef NOT_YET 185 vattr_t va; 186 #endif 187 int newnode; 188 189 /* 190 * Build the full path name in allocated memory 191 * so we have it for lookup, etc. Note the 192 * special case at the root (dir=="\\", dirlen==1) 193 * where this does not add a slash separator. 194 * To do that would make a double slash, which 195 * has special meaning in CIFS. 196 * 197 * ToDo: Would prefer to allocate a remote path 198 * only when we will create a new node. 199 */ 200 if (dirlen <= 1 && sep == '\\') 201 sep = '\0'; /* no slash */ 202 203 /* Compute the length of rpath and allocate. */ 204 rplen = dirlen; 205 if (sep) 206 rplen++; 207 if (name) 208 rplen += nmlen; 209 210 rpath = kmem_alloc(rplen + 1, KM_SLEEP); 211 212 /* Fill in rpath */ 213 bcopy(dir, rpath, dirlen); 214 if (sep) 215 rpath[dirlen++] = sep; 216 if (name) 217 bcopy(name, &rpath[dirlen], nmlen); 218 rpath[rplen] = 0; 219 220 hash = smbfs_hash(rpath, rplen); 221 idx = hash & smbtablemask; 222 rhtp = &smbtable[idx]; 223 rw_enter(&rhtp->r_lock, RW_READER); 224 225 vp = make_smbnode(vfsp, rpath, rplen, rhtp, &newnode); 226 np = VTOSMB(vp); 227 np->n_ino = hash; /* Equivalent to: smbfs_getino() */ 228 229 /* 230 * Note: make_smbnode keeps a reference to rpath in 231 * new nodes it creates, so only free when we found 232 * an existing node. 233 */ 234 if (!newnode) { 235 kmem_free(rpath, rplen + 1); 236 rpath = NULL; 237 } 238 239 if (fap == NULL) { 240 #ifdef NOT_YET 241 if (newnode) { 242 PURGE_ATTRCACHE(vp); 243 } 244 #endif 245 rw_exit(&rhtp->r_lock); 246 return (vp); 247 } 248 249 /* Have SMB attributes. */ 250 vp->v_type = (fap->fa_attr & SMB_FA_DIR) ? VDIR : VREG; 251 /* XXX: np->n_ino = fap->fa_ino; see above */ 252 np->r_size = fap->fa_size; 253 /* XXX: np->r_attr = *fap here instead? */ 254 np->r_atime = fap->fa_atime; 255 np->r_ctime = fap->fa_ctime; 256 np->r_mtime = fap->fa_mtime; 257 258 #ifdef NOT_YET 259 if (!newnode) { 260 rw_exit(&rhtp->r_lock); 261 (void) nfs_cache_fattr(vp, attr, &va, t, cr); 262 } else { 263 if (attr->na_type < NFNON || attr->na_type > NFSOC) 264 vp->v_type = VBAD; 265 else 266 vp->v_type = n2v_type(attr); 267 vp->v_rdev = makedevice(attr->rdev.specdata1, 268 attr->rdev.specdata2); 269 nfs_attrcache(vp, attr, t); 270 rw_exit(&rhtp->r_lock); 271 } 272 #else 273 rw_exit(&rhtp->r_lock); 274 #endif 275 276 return (vp); 277 } 278 279 /* 280 * NFS: nfs_subr.c:rtablehash 281 * We use smbfs_hash(). 282 */ 283 284 /* 285 * Find or create an smbnode. 286 * NFS: nfs_subr.c:make_rnode 287 */ 288 static vnode_t * 289 make_smbnode( 290 vfs_t *vfsp, 291 char *rpath, 292 int rplen, 293 rhashq_t *rhtp, 294 int *newnode) 295 { 296 smbnode_t *np; 297 smbnode_t *tnp; 298 vnode_t *vp; 299 smbmntinfo_t *mi; 300 301 ASSERT(RW_READ_HELD(&rhtp->r_lock)); 302 303 mi = VFTOSMI(vfsp); 304 305 start: 306 np = smbhashfind(vfsp, rpath, rplen, rhtp); 307 if (np != NULL) { 308 vp = SMBTOV(np); 309 *newnode = 0; 310 return (vp); 311 } 312 313 /* Note: will retake this lock below. */ 314 rw_exit(&rhtp->r_lock); 315 316 /* 317 * see if we can find something on the freelist 318 */ 319 mutex_enter(&smbfreelist_lock); 320 if (smbfreelist != NULL && smbnodenew >= nsmbnode) { 321 np = smbfreelist; 322 smb_rmfree(np); 323 mutex_exit(&smbfreelist_lock); 324 325 vp = SMBTOV(np); 326 327 if (np->r_flags & RHASHED) { 328 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 329 mutex_enter(&vp->v_lock); 330 if (vp->v_count > 1) { 331 vp->v_count--; 332 mutex_exit(&vp->v_lock); 333 rw_exit(&np->r_hashq->r_lock); 334 rw_enter(&rhtp->r_lock, RW_READER); 335 goto start; 336 } 337 mutex_exit(&vp->v_lock); 338 smb_rmhash_locked(np); 339 rw_exit(&np->r_hashq->r_lock); 340 } 341 342 smbinactive(np); 343 344 mutex_enter(&vp->v_lock); 345 if (vp->v_count > 1) { 346 vp->v_count--; 347 mutex_exit(&vp->v_lock); 348 rw_enter(&rhtp->r_lock, RW_READER); 349 goto start; 350 } 351 mutex_exit(&vp->v_lock); 352 vn_invalid(vp); 353 /* 354 * destroy old locks before bzero'ing and 355 * recreating the locks below. 356 */ 357 smbfs_rw_destroy(&np->r_rwlock); 358 smbfs_rw_destroy(&np->r_lkserlock); 359 mutex_destroy(&np->r_statelock); 360 cv_destroy(&np->r_cv); 361 /* 362 * Make sure that if smbnode is recycled then 363 * VFS count is decremented properly before 364 * reuse. 365 */ 366 VFS_RELE(vp->v_vfsp); 367 vn_reinit(vp); 368 } else { 369 /* 370 * allocate and initialize a new smbnode 371 */ 372 vnode_t *new_vp; 373 374 mutex_exit(&smbfreelist_lock); 375 376 np = kmem_cache_alloc(smbnode_cache, KM_SLEEP); 377 new_vp = vn_alloc(KM_SLEEP); 378 379 atomic_add_long((ulong_t *)&smbnodenew, 1); 380 vp = new_vp; 381 } 382 383 /* Initialize smbnode_t */ 384 bzero(np, sizeof (*np)); 385 386 smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL); 387 smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL); 388 mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL); 389 cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL); 390 /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */ 391 392 np->r_vnode = vp; 393 np->n_mount = mi; 394 np->r_hashq = rhtp; 395 np->n_fid = SMB_FID_UNUSED; 396 np->n_uid = UID_NOBODY; 397 np->n_gid = GID_NOBODY; 398 /* XXX: make attributes stale? */ 399 400 #if 0 /* XXX dircache */ 401 /* 402 * We don't know if it's a directory yet. 403 * Let the caller do this? XXX 404 */ 405 avl_create(&np->r_dir, compar, sizeof (rddir_cache), 406 offsetof(rddir_cache, tree)); 407 #endif 408 409 /* Now fill in the vnode. */ 410 vn_setops(vp, smbfs_vnodeops); 411 vp->v_data = (caddr_t)np; 412 VFS_HOLD(vfsp); 413 vp->v_vfsp = vfsp; 414 vp->v_type = VNON; 415 416 /* 417 * There is a race condition if someone else 418 * alloc's the smbnode while no locks are held, so we 419 * check again and recover if found. 420 */ 421 rw_enter(&rhtp->r_lock, RW_WRITER); 422 tnp = smbhashfind(vfsp, rpath, rplen, rhtp); 423 if (tnp != NULL) { 424 vp = SMBTOV(tnp); 425 *newnode = 0; 426 rw_exit(&rhtp->r_lock); 427 /* The node we were building goes on the free list. */ 428 smb_addfree(np); 429 rw_enter(&rhtp->r_lock, RW_READER); 430 return (vp); 431 } 432 433 /* 434 * Hash search identifies nodes by the full pathname, 435 * so store that before linking in the hash list. 436 * Note: caller allocates the rpath, and knows 437 * about this reference when *newnode is set. 438 */ 439 np->n_rpath = rpath; 440 np->n_rplen = rplen; 441 442 smb_addhash(np); 443 *newnode = 1; 444 return (vp); 445 } 446 447 /* 448 * smb_addfree 449 * Put a smbnode on the free list. 450 * 451 * Normally called by smbfs_inactive, but also 452 * called in here during cleanup operations. 453 * 454 * Smbnodes which were allocated above and beyond the normal limit 455 * are immediately freed. 456 * 457 * NFS: nfs_subr.c:rp_addfree 458 */ 459 void 460 smb_addfree(smbnode_t *np) 461 { 462 vnode_t *vp; 463 struct vfs *vfsp; 464 465 vp = SMBTOV(np); 466 ASSERT(vp->v_count >= 1); 467 ASSERT(np->r_freef == NULL && np->r_freeb == NULL); 468 469 /* 470 * If we have too many smbnodes allocated and there are no 471 * references to this smbnode, or if the smbnode is no longer 472 * accessible by it does not reside in the hash queues, 473 * or if an i/o error occurred while writing to the file, 474 * then just free it instead of putting it on the smbnode 475 * freelist. 476 */ 477 vfsp = vp->v_vfsp; 478 if (((smbnodenew > nsmbnode || !(np->r_flags & RHASHED) || 479 np->r_error || (vfsp->vfs_flag & VFS_UNMOUNTED)) && 480 np->r_count == 0)) { 481 if (np->r_flags & RHASHED) { 482 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 483 mutex_enter(&vp->v_lock); 484 if (vp->v_count > 1) { 485 vp->v_count--; 486 mutex_exit(&vp->v_lock); 487 rw_exit(&np->r_hashq->r_lock); 488 return; 489 /* 490 * Will get another call later, 491 * via smbfs_inactive. 492 */ 493 } 494 mutex_exit(&vp->v_lock); 495 smb_rmhash_locked(np); 496 rw_exit(&np->r_hashq->r_lock); 497 } 498 499 smbinactive(np); 500 501 /* 502 * Recheck the vnode reference count. We need to 503 * make sure that another reference has not been 504 * acquired while we were not holding v_lock. The 505 * smbnode is not in the smbnode hash queues, so the 506 * only way for a reference to have been acquired 507 * is for a VOP_PUTPAGE because the smbnode was marked 508 * with RDIRTY or for a modified page. This 509 * reference may have been acquired before our call 510 * to smbinactive. The i/o may have been completed, 511 * thus allowing smbinactive to complete, but the 512 * reference to the vnode may not have been released 513 * yet. In any case, the smbnode can not be destroyed 514 * until the other references to this vnode have been 515 * released. The other references will take care of 516 * either destroying the smbnode or placing it on the 517 * smbnode freelist. If there are no other references, 518 * then the smbnode may be safely destroyed. 519 */ 520 mutex_enter(&vp->v_lock); 521 if (vp->v_count > 1) { 522 vp->v_count--; 523 mutex_exit(&vp->v_lock); 524 return; 525 } 526 mutex_exit(&vp->v_lock); 527 528 smb_destroy_node(np); 529 return; 530 } 531 /* 532 * Lock the hash queue and then recheck the reference count 533 * to ensure that no other threads have acquired a reference 534 * to indicate that the smbnode should not be placed on the 535 * freelist. If another reference has been acquired, then 536 * just release this one and let the other thread complete 537 * the processing of adding this smbnode to the freelist. 538 */ 539 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 540 541 mutex_enter(&vp->v_lock); 542 if (vp->v_count > 1) { 543 vp->v_count--; 544 mutex_exit(&vp->v_lock); 545 rw_exit(&np->r_hashq->r_lock); 546 return; 547 } 548 mutex_exit(&vp->v_lock); 549 550 /* 551 * If there is no cached data or metadata for this file, then 552 * put the smbnode on the front of the freelist so that it will 553 * be reused before other smbnodes which may have cached data or 554 * metadata associated with them. 555 */ 556 mutex_enter(&smbfreelist_lock); 557 if (smbfreelist == NULL) { 558 np->r_freef = np; 559 np->r_freeb = np; 560 smbfreelist = np; 561 } else { 562 np->r_freef = smbfreelist; 563 np->r_freeb = smbfreelist->r_freeb; 564 smbfreelist->r_freeb->r_freef = np; 565 smbfreelist->r_freeb = np; 566 } 567 mutex_exit(&smbfreelist_lock); 568 569 rw_exit(&np->r_hashq->r_lock); 570 } 571 572 /* 573 * Remove an smbnode from the free list. 574 * 575 * The caller must be holding smbfreelist_lock and the smbnode 576 * must be on the freelist. 577 * 578 * NFS: nfs_subr.c:rp_rmfree 579 */ 580 void 581 smb_rmfree(smbnode_t *np) 582 { 583 584 ASSERT(MUTEX_HELD(&smbfreelist_lock)); 585 ASSERT(np->r_freef != NULL && np->r_freeb != NULL); 586 587 if (np == smbfreelist) { 588 smbfreelist = np->r_freef; 589 if (np == smbfreelist) 590 smbfreelist = NULL; 591 } 592 593 np->r_freeb->r_freef = np->r_freef; 594 np->r_freef->r_freeb = np->r_freeb; 595 596 np->r_freef = np->r_freeb = NULL; 597 } 598 599 /* 600 * Put a smbnode in the hash table. 601 * 602 * The caller must be holding the exclusive hash queue lock. 603 * 604 * NFS: nfs_subr.c:rp_addhash 605 */ 606 void 607 smb_addhash(smbnode_t *np) 608 { 609 610 ASSERT(RW_WRITE_HELD(&np->r_hashq->r_lock)); 611 ASSERT(!(np->r_flags & RHASHED)); 612 613 np->r_hashf = np->r_hashq->r_hashf; 614 np->r_hashq->r_hashf = np; 615 np->r_hashb = (smbnode_t *)np->r_hashq; 616 np->r_hashf->r_hashb = np; 617 618 mutex_enter(&np->r_statelock); 619 np->r_flags |= RHASHED; 620 mutex_exit(&np->r_statelock); 621 } 622 623 /* 624 * Remove a smbnode from the hash table. 625 * 626 * The caller must be holding the hash queue lock. 627 * 628 * NFS: nfs_subr.c:rp_rmhash_locked 629 */ 630 void 631 smb_rmhash_locked(smbnode_t *np) 632 { 633 634 ASSERT(RW_WRITE_HELD(&np->r_hashq->r_lock)); 635 ASSERT(np->r_flags & RHASHED); 636 637 np->r_hashb->r_hashf = np->r_hashf; 638 np->r_hashf->r_hashb = np->r_hashb; 639 640 mutex_enter(&np->r_statelock); 641 np->r_flags &= ~RHASHED; 642 mutex_exit(&np->r_statelock); 643 } 644 645 /* 646 * Remove a smbnode from the hash table. 647 * 648 * The caller must not be holding the hash queue lock. 649 */ 650 void 651 smb_rmhash(smbnode_t *np) 652 { 653 654 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 655 smb_rmhash_locked(np); 656 rw_exit(&np->r_hashq->r_lock); 657 } 658 659 /* 660 * Lookup a smbnode by fhandle. 661 * 662 * The caller must be holding the hash queue lock, either shared or exclusive. 663 * XXX: make static? 664 * 665 * NFS: nfs_subr.c:rfind 666 */ 667 smbnode_t * 668 smbhashfind( 669 struct vfs *vfsp, 670 const char *rpath, 671 int rplen, 672 rhashq_t *rhtp) 673 { 674 smbnode_t *np; 675 vnode_t *vp; 676 677 ASSERT(RW_LOCK_HELD(&rhtp->r_lock)); 678 679 for (np = rhtp->r_hashf; np != (smbnode_t *)rhtp; np = np->r_hashf) { 680 vp = SMBTOV(np); 681 if (vp->v_vfsp == vfsp && 682 np->n_rplen == rplen && 683 bcmp(np->n_rpath, rpath, rplen) == 0) { 684 /* 685 * remove smbnode from free list, if necessary. 686 */ 687 if (np->r_freef != NULL) { 688 mutex_enter(&smbfreelist_lock); 689 /* 690 * If the smbnode is on the freelist, 691 * then remove it and use that reference 692 * as the new reference. Otherwise, 693 * need to increment the reference count. 694 */ 695 if (np->r_freef != NULL) { 696 smb_rmfree(np); 697 mutex_exit(&smbfreelist_lock); 698 } else { 699 mutex_exit(&smbfreelist_lock); 700 VN_HOLD(vp); 701 } 702 } else 703 VN_HOLD(vp); 704 return (np); 705 } 706 } 707 return (NULL); 708 } 709 710 #ifdef SMB_VNODE_DEBUG 711 int smb_check_table_debug = 1; 712 #else /* SMB_VNODE_DEBUG */ 713 int smb_check_table_debug = 0; 714 #endif /* SMB_VNODE_DEBUG */ 715 716 717 /* 718 * Return 1 if there is a active vnode belonging to this vfs in the 719 * smbtable cache. 720 * 721 * Several of these checks are done without holding the usual 722 * locks. This is safe because destroy_smbtable(), smb_addfree(), 723 * etc. will redo the necessary checks before actually destroying 724 * any smbnodes. 725 * 726 * NFS: nfs_subr.c:check_rtable 727 * 728 * Debugging changes here relative to NFS. 729 * Relatively harmless, so left 'em in. 730 */ 731 int 732 smb_check_table(struct vfs *vfsp, smbnode_t *rtnp) 733 { 734 smbnode_t *np; 735 vnode_t *vp; 736 int index; 737 int busycnt = 0; 738 739 for (index = 0; index < smbtablesize; index++) { 740 rw_enter(&smbtable[index].r_lock, RW_READER); 741 for (np = smbtable[index].r_hashf; 742 np != (smbnode_t *)(&smbtable[index]); 743 np = np->r_hashf) { 744 if (np == rtnp) 745 continue; /* skip the root */ 746 vp = SMBTOV(np); 747 if (vp->v_vfsp != vfsp) 748 continue; /* skip other mount */ 749 750 /* Now the 'busy' checks: */ 751 /* Not on the free list? */ 752 if (np->r_freef == NULL) { 753 SMBVDEBUG("!r_freef: node=0x%p, v_path=%s\n", 754 (void *)np, vp->v_path); 755 busycnt++; 756 } 757 758 /* Has dirty pages? */ 759 if (vn_has_cached_data(vp) && 760 (np->r_flags & RDIRTY)) { 761 SMBVDEBUG("is dirty: node=0x%p, v_path=%s\n", 762 (void *)np, vp->v_path); 763 busycnt++; 764 } 765 766 /* Other refs? (not reflected in v_count) */ 767 if (np->r_count > 0) { 768 SMBVDEBUG("+r_count: node=0x%p, v_path=%s\n", 769 (void *)np, vp->v_path); 770 busycnt++; 771 } 772 773 if (busycnt && !smb_check_table_debug) 774 break; 775 776 } 777 rw_exit(&smbtable[index].r_lock); 778 } 779 return (busycnt); 780 } 781 782 /* 783 * Destroy inactive vnodes from the hash queues which belong to this 784 * vfs. It is essential that we destroy all inactive vnodes during a 785 * forced unmount as well as during a normal unmount. 786 * 787 * NFS: nfs_subr.c:destroy_rtable 788 */ 789 void 790 smbfs_destroy_table(struct vfs *vfsp) 791 { 792 int index; 793 smbnode_t *np; 794 smbnode_t *rlist; 795 smbnode_t *r_hashf; 796 vnode_t *vp; 797 798 rlist = NULL; 799 800 for (index = 0; index < smbtablesize; index++) { 801 rw_enter(&smbtable[index].r_lock, RW_WRITER); 802 for (np = smbtable[index].r_hashf; 803 np != (smbnode_t *)(&smbtable[index]); 804 np = r_hashf) { 805 /* save the hash pointer before destroying */ 806 r_hashf = np->r_hashf; 807 vp = SMBTOV(np); 808 if (vp->v_vfsp == vfsp) { 809 mutex_enter(&smbfreelist_lock); 810 if (np->r_freef != NULL) { 811 smb_rmfree(np); 812 mutex_exit(&smbfreelist_lock); 813 smb_rmhash_locked(np); 814 np->r_hashf = rlist; 815 rlist = np; 816 } else 817 mutex_exit(&smbfreelist_lock); 818 } 819 } 820 rw_exit(&smbtable[index].r_lock); 821 } 822 823 for (np = rlist; np != NULL; np = rlist) { 824 rlist = np->r_hashf; 825 /* 826 * This call to smb_addfree will end up destroying the 827 * smbnode, but in a safe way with the appropriate set 828 * of checks done. 829 */ 830 smb_addfree(np); 831 } 832 833 } 834 835 /* 836 * This routine destroys all the resources associated with the smbnode 837 * and then the smbnode itself. 838 * 839 * NFS: nfs_subr.c:destroy_rnode 840 */ 841 void 842 smb_destroy_node(smbnode_t *np) 843 { 844 vnode_t *vp; 845 vfs_t *vfsp; 846 847 vp = SMBTOV(np); 848 vfsp = vp->v_vfsp; 849 850 ASSERT(vp->v_count == 1); 851 ASSERT(np->r_count == 0); 852 ASSERT(np->r_mapcnt == 0); 853 ASSERT(!(np->r_flags & RHASHED)); 854 ASSERT(np->r_freef == NULL && np->r_freeb == NULL); 855 atomic_add_long((ulong_t *)&smbnodenew, -1); 856 vn_invalid(vp); 857 vn_free(vp); 858 kmem_cache_free(smbnode_cache, np); 859 VFS_RELE(vfsp); 860 } 861 862 /* rflush? */ 863 /* access cache */ 864 /* client handles */ 865 866 /* 867 * initialize resources that are used by smbfs_subr.c 868 * this is called from the _init() routine (by the way of smbfs_clntinit()) 869 * 870 * allocate and initialze smbfs hash table 871 * NFS: nfs_subr.c:nfs_subrinit 872 */ 873 int 874 smbfs_subrinit(void) 875 { 876 int i; 877 ulong_t nsmbnode_max; 878 879 /* 880 * Allocate and initialize the smbnode hash queues 881 */ 882 if (nsmbnode <= 0) 883 nsmbnode = ncsize; /* dnlc.h */ 884 nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) / 885 sizeof (struct smbnode)); 886 if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) { 887 zcmn_err(GLOBAL_ZONEID, CE_NOTE, 888 "setting nsmbnode to max value of %ld", nsmbnode_max); 889 nsmbnode = nsmbnode_max; 890 } 891 892 smbtablesize = 1 << highbit(nsmbnode / smbhashlen); 893 smbtablemask = smbtablesize - 1; 894 smbtable = kmem_alloc(smbtablesize * sizeof (*smbtable), KM_SLEEP); 895 for (i = 0; i < smbtablesize; i++) { 896 smbtable[i].r_hashf = (smbnode_t *)(&smbtable[i]); 897 smbtable[i].r_hashb = (smbnode_t *)(&smbtable[i]); 898 rw_init(&smbtable[i].r_lock, NULL, RW_DEFAULT, NULL); 899 } 900 smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t), 901 0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0); 902 903 /* 904 * Initialize the various mutexes and reader/writer locks 905 */ 906 mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL); 907 mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL); 908 909 /* 910 * Assign unique major number for all smbfs mounts 911 */ 912 if ((smbfs_major = getudev()) == -1) { 913 zcmn_err(GLOBAL_ZONEID, CE_WARN, 914 "smbfs: init: can't get unique device number"); 915 smbfs_major = 0; 916 } 917 smbfs_minor = 0; 918 919 return (0); 920 } 921 922 /* 923 * free smbfs hash table, etc. 924 * NFS: nfs_subr.c:nfs_subrfini 925 */ 926 void 927 smbfs_subrfini(void) 928 { 929 int i; 930 931 /* 932 * Deallocate the smbnode hash queues 933 */ 934 kmem_cache_destroy(smbnode_cache); 935 936 for (i = 0; i < smbtablesize; i++) 937 rw_destroy(&smbtable[i].r_lock); 938 kmem_free(smbtable, smbtablesize * sizeof (*smbtable)); 939 940 /* 941 * Destroy the various mutexes and reader/writer locks 942 */ 943 mutex_destroy(&smbfreelist_lock); 944 mutex_destroy(&smbfs_minor_lock); 945 } 946 947 /* rddir_cache ? */ 948 949 /* 950 * Support functions for smbfs_kmem_reclaim 951 */ 952 953 static int 954 smbfs_node_reclaim(void) 955 { 956 int freed; 957 smbnode_t *np; 958 vnode_t *vp; 959 960 freed = 0; 961 mutex_enter(&smbfreelist_lock); 962 while ((np = smbfreelist) != NULL) { 963 smb_rmfree(np); 964 mutex_exit(&smbfreelist_lock); 965 if (np->r_flags & RHASHED) { 966 vp = SMBTOV(np); 967 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 968 mutex_enter(&vp->v_lock); 969 if (vp->v_count > 1) { 970 vp->v_count--; 971 mutex_exit(&vp->v_lock); 972 rw_exit(&np->r_hashq->r_lock); 973 mutex_enter(&smbfreelist_lock); 974 continue; 975 } 976 mutex_exit(&vp->v_lock); 977 smb_rmhash_locked(np); 978 rw_exit(&np->r_hashq->r_lock); 979 } 980 /* 981 * This call to smb_addfree will end up destroying the 982 * smbnode, but in a safe way with the appropriate set 983 * of checks done. 984 */ 985 smb_addfree(np); 986 mutex_enter(&smbfreelist_lock); 987 } 988 mutex_exit(&smbfreelist_lock); 989 return (freed); 990 } 991 992 /* 993 * Called by kmem_cache_alloc ask us if we could 994 * "Please give back some memory!" 995 * 996 * Todo: dump nodes from the free list? 997 */ 998 /*ARGSUSED*/ 999 void 1000 smbfs_kmem_reclaim(void *cdrarg) 1001 { 1002 (void) smbfs_node_reclaim(); 1003 } 1004 1005 /* nfs failover stuff */ 1006 /* nfs_rw_xxx - see smbfs_rwlock.c */ 1007