1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 26 * All rights reserved. 27 */ 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * Node hash implementation borrowed from NFS. 33 * See: uts/common/fs/nfs/nfs_subr.c 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/time.h> 39 #include <sys/vnode.h> 40 #include <sys/bitmap.h> 41 #include <sys/dnlc.h> 42 #include <sys/kmem.h> 43 #include <sys/sunddi.h> 44 45 #ifdef APPLE 46 #include <sys/smb_apple.h> 47 #include <sys/utfconv.h> 48 #include <sys/smb_iconv.h> 49 #else 50 #include <netsmb/smb_osdep.h> 51 #endif 52 53 #include <netsmb/smb.h> 54 #include <netsmb/smb_conn.h> 55 #include <netsmb/smb_subr.h> 56 #include <netsmb/smb_rq.h> 57 58 #include <smbfs/smbfs.h> 59 #include <smbfs/smbfs_node.h> 60 #include <smbfs/smbfs_subr.h> 61 62 /* 63 * The hash queues for the access to active and cached smbnodes 64 * are organized as doubly linked lists. A reader/writer lock 65 * for each hash bucket is used to control access and to synchronize 66 * lookups, additions, and deletions from the hash queue. 67 * 68 * The smbnode freelist is organized as a doubly linked list with 69 * a head pointer. Additions and deletions are synchronized via 70 * a single mutex. 71 * 72 * In order to add an smbnode to the free list, it must be hashed into 73 * a hash queue and the exclusive lock to the hash queue be held. 74 * If an smbnode is not hashed into a hash queue, then it is destroyed 75 * because it represents no valuable information that can be reused 76 * about the file. The exclusive lock to the hash queue must be 77 * held in order to prevent a lookup in the hash queue from finding 78 * the smbnode and using it and assuming that the smbnode is not on the 79 * freelist. The lookup in the hash queue will have the hash queue 80 * locked, either exclusive or shared. 81 * 82 * The vnode reference count for each smbnode is not allowed to drop 83 * below 1. This prevents external entities, such as the VM 84 * subsystem, from acquiring references to vnodes already on the 85 * freelist and then trying to place them back on the freelist 86 * when their reference is released. This means that the when an 87 * smbnode is looked up in the hash queues, then either the smbnode 88 * is removed from the freelist and that reference is tranfered to 89 * the new reference or the vnode reference count must be incremented 90 * accordingly. The mutex for the freelist must be held in order to 91 * accurately test to see if the smbnode is on the freelist or not. 92 * The hash queue lock might be held shared and it is possible that 93 * two different threads may race to remove the smbnode from the 94 * freelist. This race can be resolved by holding the mutex for the 95 * freelist. Please note that the mutex for the freelist does not 96 * need to held if the smbnode is not on the freelist. It can not be 97 * placed on the freelist due to the requirement that the thread 98 * putting the smbnode on the freelist must hold the exclusive lock 99 * to the hash queue and the thread doing the lookup in the hash 100 * queue is holding either a shared or exclusive lock to the hash 101 * queue. 102 * 103 * The lock ordering is: 104 * 105 * hash bucket lock -> vnode lock 106 * hash bucket lock -> freelist lock 107 */ 108 static rhashq_t *smbtable; 109 110 static kmutex_t smbfreelist_lock; 111 static smbnode_t *smbfreelist = NULL; 112 static ulong_t smbnodenew = 0; 113 long nsmbnode = 0; 114 115 static int smbtablesize; 116 static int smbtablemask; 117 static int smbhashlen = 4; 118 119 static struct kmem_cache *smbnode_cache; 120 121 /* 122 * Mutex to protect the following variables: 123 * smbfs_major 124 * smbfs_minor 125 */ 126 kmutex_t smbfs_minor_lock; 127 int smbfs_major; 128 int smbfs_minor; 129 130 /* 131 * Local functions. 132 * Not static, to aid debugging. 133 */ 134 void smb_rmfree(smbnode_t *); 135 void smbinactive(smbnode_t *); 136 void smb_rmhash_locked(smbnode_t *); 137 void smb_destroy_node(smbnode_t *); 138 void smbfs_kmem_reclaim(void *cdrarg); 139 140 smbnode_t *smbhashfind(struct vfs *, const char *, int, rhashq_t *); 141 static vnode_t *make_smbnode(vfs_t *, char *, int, rhashq_t *, int *); 142 143 144 /* 145 * Free the resources associated with an smbnode. 146 * Note: This is different from smbfs_inactive 147 * 148 * NFS: nfs_subr.c:rinactive 149 */ 150 void 151 smbinactive(smbnode_t *np) 152 { 153 154 if (np->n_rpath) { 155 kmem_free(np->n_rpath, np->n_rplen + 1); 156 np->n_rpath = NULL; 157 } 158 } 159 160 /* 161 * Return a vnode for the given CIFS directory and filename. 162 * If no smbnode exists for this fhandle, create one and put it 163 * into the hash queues. If the smbnode for this fhandle 164 * already exists, return it. 165 * 166 * Note: make_smbnode() may upgrade the hash bucket lock to exclusive. 167 * 168 * NFS: nfs_subr.c:makenfsnode 169 */ 170 vnode_t * 171 smbfs_make_node( 172 vfs_t *vfsp, 173 const char *dir, 174 int dirlen, 175 const char *name, 176 int nmlen, 177 char sep, 178 struct smbfattr *fap) 179 { 180 char *rpath; 181 int rplen, idx; 182 uint32_t hash; 183 rhashq_t *rhtp; 184 smbnode_t *np; 185 vnode_t *vp; 186 #ifdef NOT_YET 187 vattr_t va; 188 #endif 189 int newnode; 190 191 /* 192 * Build the full path name in allocated memory 193 * so we have it for lookup, etc. Note the 194 * special case at the root (dir=="\\", dirlen==1) 195 * where this does not add a slash separator. 196 * To do that would make a double slash, which 197 * has special meaning in CIFS. 198 * 199 * ToDo: Would prefer to allocate a remote path 200 * only when we will create a new node. 201 */ 202 if (dirlen <= 1 && sep == '\\') 203 sep = '\0'; /* no slash */ 204 205 /* Compute the length of rpath and allocate. */ 206 rplen = dirlen; 207 if (sep) 208 rplen++; 209 if (name) 210 rplen += nmlen; 211 212 rpath = kmem_alloc(rplen + 1, KM_SLEEP); 213 214 /* Fill in rpath */ 215 bcopy(dir, rpath, dirlen); 216 if (sep) 217 rpath[dirlen++] = sep; 218 if (name) 219 bcopy(name, &rpath[dirlen], nmlen); 220 rpath[rplen] = 0; 221 222 hash = smbfs_hash(rpath, rplen); 223 idx = hash & smbtablemask; 224 rhtp = &smbtable[idx]; 225 rw_enter(&rhtp->r_lock, RW_READER); 226 227 vp = make_smbnode(vfsp, rpath, rplen, rhtp, &newnode); 228 np = VTOSMB(vp); 229 np->n_ino = hash; /* Equivalent to: smbfs_getino() */ 230 231 /* 232 * Note: make_smbnode keeps a reference to rpath in 233 * new nodes it creates, so only free when we found 234 * an existing node. 235 */ 236 if (!newnode) { 237 kmem_free(rpath, rplen + 1); 238 rpath = NULL; 239 } 240 241 if (fap == NULL) { 242 #ifdef NOT_YET 243 if (newnode) { 244 PURGE_ATTRCACHE(vp); 245 } 246 #endif 247 rw_exit(&rhtp->r_lock); 248 return (vp); 249 } 250 251 /* Have SMB attributes. */ 252 vp->v_type = (fap->fa_attr & SMB_FA_DIR) ? VDIR : VREG; 253 /* XXX: np->n_ino = fap->fa_ino; see above */ 254 np->r_size = fap->fa_size; 255 /* XXX: np->r_attr = *fap here instead? */ 256 np->r_atime = fap->fa_atime; 257 np->r_ctime = fap->fa_mtime; 258 np->r_mtime = fap->fa_ctime; 259 260 #ifdef NOT_YET 261 if (!newnode) { 262 rw_exit(&rhtp->r_lock); 263 (void) nfs_cache_fattr(vp, attr, &va, t, cr); 264 } else { 265 if (attr->na_type < NFNON || attr->na_type > NFSOC) 266 vp->v_type = VBAD; 267 else 268 vp->v_type = n2v_type(attr); 269 vp->v_rdev = makedevice(attr->rdev.specdata1, 270 attr->rdev.specdata2); 271 nfs_attrcache(vp, attr, t); 272 rw_exit(&rhtp->r_lock); 273 } 274 #else 275 rw_exit(&rhtp->r_lock); 276 #endif 277 278 return (vp); 279 } 280 281 /* 282 * NFS: nfs_subr.c:rtablehash 283 * We use smbfs_hash(). 284 */ 285 286 /* 287 * Find or create an smbnode. 288 * NFS: nfs_subr.c:make_rnode 289 */ 290 static vnode_t * 291 make_smbnode( 292 vfs_t *vfsp, 293 char *rpath, 294 int rplen, 295 rhashq_t *rhtp, 296 int *newnode) 297 { 298 smbnode_t *np; 299 smbnode_t *tnp; 300 vnode_t *vp; 301 smbmntinfo_t *mi; 302 303 ASSERT(RW_READ_HELD(&rhtp->r_lock)); 304 305 mi = VFTOSMI(vfsp); 306 307 start: 308 np = smbhashfind(vfsp, rpath, rplen, rhtp); 309 if (np != NULL) { 310 vp = SMBTOV(np); 311 *newnode = 0; 312 return (vp); 313 } 314 315 /* Note: will retake this lock below. */ 316 rw_exit(&rhtp->r_lock); 317 318 /* 319 * see if we can find something on the freelist 320 */ 321 mutex_enter(&smbfreelist_lock); 322 if (smbfreelist != NULL && smbnodenew >= nsmbnode) { 323 np = smbfreelist; 324 smb_rmfree(np); 325 mutex_exit(&smbfreelist_lock); 326 327 vp = SMBTOV(np); 328 329 if (np->r_flags & RHASHED) { 330 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 331 mutex_enter(&vp->v_lock); 332 if (vp->v_count > 1) { 333 vp->v_count--; 334 mutex_exit(&vp->v_lock); 335 rw_exit(&np->r_hashq->r_lock); 336 rw_enter(&rhtp->r_lock, RW_READER); 337 goto start; 338 } 339 mutex_exit(&vp->v_lock); 340 smb_rmhash_locked(np); 341 rw_exit(&np->r_hashq->r_lock); 342 } 343 344 smbinactive(np); 345 346 mutex_enter(&vp->v_lock); 347 if (vp->v_count > 1) { 348 vp->v_count--; 349 mutex_exit(&vp->v_lock); 350 rw_enter(&rhtp->r_lock, RW_READER); 351 goto start; 352 } 353 mutex_exit(&vp->v_lock); 354 vn_invalid(vp); 355 /* 356 * destroy old locks before bzero'ing and 357 * recreating the locks below. 358 */ 359 smbfs_rw_destroy(&np->r_rwlock); 360 smbfs_rw_destroy(&np->r_lkserlock); 361 mutex_destroy(&np->r_statelock); 362 cv_destroy(&np->r_cv); 363 /* 364 * Make sure that if smbnode is recycled then 365 * VFS count is decremented properly before 366 * reuse. 367 */ 368 VFS_RELE(vp->v_vfsp); 369 vn_reinit(vp); 370 } else { 371 /* 372 * allocate and initialize a new smbnode 373 */ 374 vnode_t *new_vp; 375 376 mutex_exit(&smbfreelist_lock); 377 378 np = kmem_cache_alloc(smbnode_cache, KM_SLEEP); 379 new_vp = vn_alloc(KM_SLEEP); 380 381 atomic_add_long((ulong_t *)&smbnodenew, 1); 382 vp = new_vp; 383 } 384 385 /* Initialize smbnode_t */ 386 bzero(np, sizeof (*np)); 387 388 smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL); 389 smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL); 390 mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL); 391 cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL); 392 /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */ 393 394 np->r_vnode = vp; 395 np->n_mount = mi; 396 np->r_hashq = rhtp; 397 np->n_direof = -1; 398 np->n_fid = SMB_FID_UNUSED; 399 np->n_uid = UID_NOBODY; 400 np->n_gid = GID_NOBODY; 401 /* XXX: make attributes stale? */ 402 403 #if 0 /* XXX dircache */ 404 /* 405 * We don't know if it's a directory yet. 406 * Let the caller do this? XXX 407 */ 408 avl_create(&np->r_dir, compar, sizeof (rddir_cache), 409 offsetof(rddir_cache, tree)); 410 #endif 411 412 /* Now fill in the vnode. */ 413 vn_setops(vp, smbfs_vnodeops); 414 vp->v_data = (caddr_t)np; 415 VFS_HOLD(vfsp); 416 vp->v_vfsp = vfsp; 417 vp->v_type = VNON; 418 419 /* 420 * There is a race condition if someone else 421 * alloc's the smbnode while no locks are held, so we 422 * check again and recover if found. 423 */ 424 rw_enter(&rhtp->r_lock, RW_WRITER); 425 tnp = smbhashfind(vfsp, rpath, rplen, rhtp); 426 if (tnp != NULL) { 427 vp = SMBTOV(tnp); 428 *newnode = 0; 429 rw_exit(&rhtp->r_lock); 430 /* The node we were building goes on the free list. */ 431 smb_addfree(np); 432 rw_enter(&rhtp->r_lock, RW_READER); 433 return (vp); 434 } 435 436 /* 437 * Hash search identifies nodes by the full pathname, 438 * so store that before linking in the hash list. 439 * Note: caller allocates the rpath, and knows 440 * about this reference when *newnode is set. 441 */ 442 np->n_rpath = rpath; 443 np->n_rplen = rplen; 444 445 smb_addhash(np); 446 *newnode = 1; 447 return (vp); 448 } 449 450 /* 451 * smb_addfree 452 * Put a smbnode on the free list. 453 * 454 * Normally called by smbfs_inactive, but also 455 * called in here during cleanup operations. 456 * 457 * Smbnodes which were allocated above and beyond the normal limit 458 * are immediately freed. 459 * 460 * NFS: nfs_subr.c:rp_addfree 461 */ 462 void 463 smb_addfree(smbnode_t *np) 464 { 465 vnode_t *vp; 466 struct vfs *vfsp; 467 468 vp = SMBTOV(np); 469 ASSERT(vp->v_count >= 1); 470 ASSERT(np->r_freef == NULL && np->r_freeb == NULL); 471 472 /* 473 * If we have too many smbnodes allocated and there are no 474 * references to this smbnode, or if the smbnode is no longer 475 * accessible by it does not reside in the hash queues, 476 * or if an i/o error occurred while writing to the file, 477 * then just free it instead of putting it on the smbnode 478 * freelist. 479 */ 480 vfsp = vp->v_vfsp; 481 if (((smbnodenew > nsmbnode || !(np->r_flags & RHASHED) || 482 np->r_error || (vfsp->vfs_flag & VFS_UNMOUNTED)) && 483 np->r_count == 0)) { 484 if (np->r_flags & RHASHED) { 485 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 486 mutex_enter(&vp->v_lock); 487 if (vp->v_count > 1) { 488 vp->v_count--; 489 mutex_exit(&vp->v_lock); 490 rw_exit(&np->r_hashq->r_lock); 491 return; 492 /* 493 * Will get another call later, 494 * via smbfs_inactive. 495 */ 496 } 497 mutex_exit(&vp->v_lock); 498 smb_rmhash_locked(np); 499 rw_exit(&np->r_hashq->r_lock); 500 } 501 502 smbinactive(np); 503 504 /* 505 * Recheck the vnode reference count. We need to 506 * make sure that another reference has not been 507 * acquired while we were not holding v_lock. The 508 * smbnode is not in the smbnode hash queues, so the 509 * only way for a reference to have been acquired 510 * is for a VOP_PUTPAGE because the smbnode was marked 511 * with RDIRTY or for a modified page. This 512 * reference may have been acquired before our call 513 * to smbinactive. The i/o may have been completed, 514 * thus allowing smbinactive to complete, but the 515 * reference to the vnode may not have been released 516 * yet. In any case, the smbnode can not be destroyed 517 * until the other references to this vnode have been 518 * released. The other references will take care of 519 * either destroying the smbnode or placing it on the 520 * smbnode freelist. If there are no other references, 521 * then the smbnode may be safely destroyed. 522 */ 523 mutex_enter(&vp->v_lock); 524 if (vp->v_count > 1) { 525 vp->v_count--; 526 mutex_exit(&vp->v_lock); 527 return; 528 } 529 mutex_exit(&vp->v_lock); 530 531 smb_destroy_node(np); 532 return; 533 } 534 /* 535 * Lock the hash queue and then recheck the reference count 536 * to ensure that no other threads have acquired a reference 537 * to indicate that the smbnode should not be placed on the 538 * freelist. If another reference has been acquired, then 539 * just release this one and let the other thread complete 540 * the processing of adding this smbnode to the freelist. 541 */ 542 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 543 544 mutex_enter(&vp->v_lock); 545 if (vp->v_count > 1) { 546 vp->v_count--; 547 mutex_exit(&vp->v_lock); 548 rw_exit(&np->r_hashq->r_lock); 549 return; 550 } 551 mutex_exit(&vp->v_lock); 552 553 /* 554 * If there is no cached data or metadata for this file, then 555 * put the smbnode on the front of the freelist so that it will 556 * be reused before other smbnodes which may have cached data or 557 * metadata associated with them. 558 */ 559 mutex_enter(&smbfreelist_lock); 560 if (smbfreelist == NULL) { 561 np->r_freef = np; 562 np->r_freeb = np; 563 smbfreelist = np; 564 } else { 565 np->r_freef = smbfreelist; 566 np->r_freeb = smbfreelist->r_freeb; 567 smbfreelist->r_freeb->r_freef = np; 568 smbfreelist->r_freeb = np; 569 } 570 mutex_exit(&smbfreelist_lock); 571 572 rw_exit(&np->r_hashq->r_lock); 573 } 574 575 /* 576 * Remove an smbnode from the free list. 577 * 578 * The caller must be holding smbfreelist_lock and the smbnode 579 * must be on the freelist. 580 * 581 * NFS: nfs_subr.c:rp_rmfree 582 */ 583 void 584 smb_rmfree(smbnode_t *np) 585 { 586 587 ASSERT(MUTEX_HELD(&smbfreelist_lock)); 588 ASSERT(np->r_freef != NULL && np->r_freeb != NULL); 589 590 if (np == smbfreelist) { 591 smbfreelist = np->r_freef; 592 if (np == smbfreelist) 593 smbfreelist = NULL; 594 } 595 596 np->r_freeb->r_freef = np->r_freef; 597 np->r_freef->r_freeb = np->r_freeb; 598 599 np->r_freef = np->r_freeb = NULL; 600 } 601 602 /* 603 * Put a smbnode in the hash table. 604 * 605 * The caller must be holding the exclusive hash queue lock. 606 * 607 * NFS: nfs_subr.c:rp_addhash 608 */ 609 void 610 smb_addhash(smbnode_t *np) 611 { 612 613 ASSERT(RW_WRITE_HELD(&np->r_hashq->r_lock)); 614 ASSERT(!(np->r_flags & RHASHED)); 615 616 np->r_hashf = np->r_hashq->r_hashf; 617 np->r_hashq->r_hashf = np; 618 np->r_hashb = (smbnode_t *)np->r_hashq; 619 np->r_hashf->r_hashb = np; 620 621 mutex_enter(&np->r_statelock); 622 np->r_flags |= RHASHED; 623 mutex_exit(&np->r_statelock); 624 } 625 626 /* 627 * Remove a smbnode from the hash table. 628 * 629 * The caller must be holding the hash queue lock. 630 * 631 * NFS: nfs_subr.c:rp_rmhash_locked 632 */ 633 void 634 smb_rmhash_locked(smbnode_t *np) 635 { 636 637 ASSERT(RW_WRITE_HELD(&np->r_hashq->r_lock)); 638 ASSERT(np->r_flags & RHASHED); 639 640 np->r_hashb->r_hashf = np->r_hashf; 641 np->r_hashf->r_hashb = np->r_hashb; 642 643 mutex_enter(&np->r_statelock); 644 np->r_flags &= ~RHASHED; 645 mutex_exit(&np->r_statelock); 646 } 647 648 /* 649 * Remove a smbnode from the hash table. 650 * 651 * The caller must not be holding the hash queue lock. 652 */ 653 void 654 smb_rmhash(smbnode_t *np) 655 { 656 657 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 658 smb_rmhash_locked(np); 659 rw_exit(&np->r_hashq->r_lock); 660 } 661 662 /* 663 * Lookup a smbnode by fhandle. 664 * 665 * The caller must be holding the hash queue lock, either shared or exclusive. 666 * XXX: make static? 667 * 668 * NFS: nfs_subr.c:rfind 669 */ 670 smbnode_t * 671 smbhashfind( 672 struct vfs *vfsp, 673 const char *rpath, 674 int rplen, 675 rhashq_t *rhtp) 676 { 677 smbnode_t *np; 678 vnode_t *vp; 679 680 ASSERT(RW_LOCK_HELD(&rhtp->r_lock)); 681 682 for (np = rhtp->r_hashf; np != (smbnode_t *)rhtp; np = np->r_hashf) { 683 vp = SMBTOV(np); 684 if (vp->v_vfsp == vfsp && 685 np->n_rplen == rplen && 686 bcmp(np->n_rpath, rpath, rplen) == 0) { 687 /* 688 * remove smbnode from free list, if necessary. 689 */ 690 if (np->r_freef != NULL) { 691 mutex_enter(&smbfreelist_lock); 692 /* 693 * If the smbnode is on the freelist, 694 * then remove it and use that reference 695 * as the new reference. Otherwise, 696 * need to increment the reference count. 697 */ 698 if (np->r_freef != NULL) { 699 smb_rmfree(np); 700 mutex_exit(&smbfreelist_lock); 701 } else { 702 mutex_exit(&smbfreelist_lock); 703 VN_HOLD(vp); 704 } 705 } else 706 VN_HOLD(vp); 707 return (np); 708 } 709 } 710 return (NULL); 711 } 712 713 #ifdef SMB_VNODE_DEBUG 714 int smb_check_table_debug = 1; 715 #else /* SMB_VNODE_DEBUG */ 716 int smb_check_table_debug = 0; 717 #endif /* SMB_VNODE_DEBUG */ 718 719 720 /* 721 * Return 1 if there is a active vnode belonging to this vfs in the 722 * smbtable cache. 723 * 724 * Several of these checks are done without holding the usual 725 * locks. This is safe because destroy_smbtable(), smb_addfree(), 726 * etc. will redo the necessary checks before actually destroying 727 * any smbnodes. 728 * 729 * NFS: nfs_subr.c:check_rtable 730 * 731 * Debugging changes here relative to NFS. 732 * Relatively harmless, so left 'em in. 733 */ 734 int 735 smb_check_table(struct vfs *vfsp, smbnode_t *rtnp) 736 { 737 smbnode_t *np; 738 vnode_t *vp; 739 int index; 740 int busycnt = 0; 741 742 for (index = 0; index < smbtablesize; index++) { 743 rw_enter(&smbtable[index].r_lock, RW_READER); 744 for (np = smbtable[index].r_hashf; 745 np != (smbnode_t *)(&smbtable[index]); 746 np = np->r_hashf) { 747 if (np == rtnp) 748 continue; /* skip the root */ 749 vp = SMBTOV(np); 750 if (vp->v_vfsp != vfsp) 751 continue; /* skip other mount */ 752 753 /* Now the 'busy' checks: */ 754 /* Not on the free list? */ 755 if (np->r_freef == NULL) { 756 SMBVDEBUG("!r_freef: node=0x%p, v_path=%s\n", 757 (void *)np, vp->v_path); 758 busycnt++; 759 } 760 761 /* Has dirty pages? */ 762 if (vn_has_cached_data(vp) && 763 (np->r_flags & RDIRTY)) { 764 SMBVDEBUG("is dirty: node=0x%p, v_path=%s\n", 765 (void *)np, vp->v_path); 766 busycnt++; 767 } 768 769 /* Other refs? (not reflected in v_count) */ 770 if (np->r_count > 0) { 771 SMBVDEBUG("+r_count: node=0x%p, v_path=%s\n", 772 (void *)np, vp->v_path); 773 busycnt++; 774 } 775 776 if (busycnt && !smb_check_table_debug) 777 break; 778 779 } 780 rw_exit(&smbtable[index].r_lock); 781 } 782 return (busycnt); 783 } 784 785 /* 786 * Destroy inactive vnodes from the hash queues which belong to this 787 * vfs. It is essential that we destroy all inactive vnodes during a 788 * forced unmount as well as during a normal unmount. 789 * 790 * NFS: nfs_subr.c:destroy_rtable 791 */ 792 void 793 smbfs_destroy_table(struct vfs *vfsp) 794 { 795 int index; 796 smbnode_t *np; 797 smbnode_t *rlist; 798 smbnode_t *r_hashf; 799 vnode_t *vp; 800 801 rlist = NULL; 802 803 for (index = 0; index < smbtablesize; index++) { 804 rw_enter(&smbtable[index].r_lock, RW_WRITER); 805 for (np = smbtable[index].r_hashf; 806 np != (smbnode_t *)(&smbtable[index]); 807 np = r_hashf) { 808 /* save the hash pointer before destroying */ 809 r_hashf = np->r_hashf; 810 vp = SMBTOV(np); 811 if (vp->v_vfsp == vfsp) { 812 mutex_enter(&smbfreelist_lock); 813 if (np->r_freef != NULL) { 814 smb_rmfree(np); 815 mutex_exit(&smbfreelist_lock); 816 smb_rmhash_locked(np); 817 np->r_hashf = rlist; 818 rlist = np; 819 } else 820 mutex_exit(&smbfreelist_lock); 821 } 822 } 823 rw_exit(&smbtable[index].r_lock); 824 } 825 826 for (np = rlist; np != NULL; np = rlist) { 827 rlist = np->r_hashf; 828 /* 829 * This call to smb_addfree will end up destroying the 830 * smbnode, but in a safe way with the appropriate set 831 * of checks done. 832 */ 833 smb_addfree(np); 834 } 835 836 } 837 838 /* 839 * This routine destroys all the resources associated with the smbnode 840 * and then the smbnode itself. 841 * 842 * NFS: nfs_subr.c:destroy_rnode 843 */ 844 void 845 smb_destroy_node(smbnode_t *np) 846 { 847 vnode_t *vp; 848 vfs_t *vfsp; 849 850 vp = SMBTOV(np); 851 vfsp = vp->v_vfsp; 852 853 ASSERT(vp->v_count == 1); 854 ASSERT(np->r_count == 0); 855 ASSERT(np->r_mapcnt == 0); 856 ASSERT(!(np->r_flags & RHASHED)); 857 ASSERT(np->r_freef == NULL && np->r_freeb == NULL); 858 atomic_add_long((ulong_t *)&smbnodenew, -1); 859 vn_invalid(vp); 860 vn_free(vp); 861 kmem_cache_free(smbnode_cache, np); 862 VFS_RELE(vfsp); 863 } 864 865 /* rflush? */ 866 /* access cache */ 867 /* client handles */ 868 869 /* 870 * initialize resources that are used by smbfs_subr.c 871 * this is called from the _init() routine (by the way of smbfs_clntinit()) 872 * 873 * allocate and initialze smbfs hash table 874 * NFS: nfs_subr.c:nfs_subrinit 875 */ 876 int 877 smbfs_subrinit(void) 878 { 879 int i; 880 ulong_t nsmbnode_max; 881 882 /* 883 * Allocate and initialize the smbnode hash queues 884 */ 885 if (nsmbnode <= 0) 886 nsmbnode = ncsize; /* dnlc.h */ 887 nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) / 888 sizeof (struct smbnode)); 889 if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) { 890 zcmn_err(GLOBAL_ZONEID, CE_NOTE, 891 "setting nsmbnode to max value of %ld", nsmbnode_max); 892 nsmbnode = nsmbnode_max; 893 } 894 895 smbtablesize = 1 << highbit(nsmbnode / smbhashlen); 896 smbtablemask = smbtablesize - 1; 897 smbtable = kmem_alloc(smbtablesize * sizeof (*smbtable), KM_SLEEP); 898 for (i = 0; i < smbtablesize; i++) { 899 smbtable[i].r_hashf = (smbnode_t *)(&smbtable[i]); 900 smbtable[i].r_hashb = (smbnode_t *)(&smbtable[i]); 901 rw_init(&smbtable[i].r_lock, NULL, RW_DEFAULT, NULL); 902 } 903 smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t), 904 0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0); 905 906 /* 907 * Initialize the various mutexes and reader/writer locks 908 */ 909 mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL); 910 mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL); 911 912 /* 913 * Assign unique major number for all smbfs mounts 914 */ 915 if ((smbfs_major = getudev()) == -1) { 916 zcmn_err(GLOBAL_ZONEID, CE_WARN, 917 "smbfs: init: can't get unique device number"); 918 smbfs_major = 0; 919 } 920 smbfs_minor = 0; 921 922 return (0); 923 } 924 925 /* 926 * free smbfs hash table, etc. 927 * NFS: nfs_subr.c:nfs_subrfini 928 */ 929 void 930 smbfs_subrfini(void) 931 { 932 int i; 933 934 /* 935 * Deallocate the smbnode hash queues 936 */ 937 kmem_cache_destroy(smbnode_cache); 938 939 for (i = 0; i < smbtablesize; i++) 940 rw_destroy(&smbtable[i].r_lock); 941 kmem_free(smbtable, smbtablesize * sizeof (*smbtable)); 942 943 /* 944 * Destroy the various mutexes and reader/writer locks 945 */ 946 mutex_destroy(&smbfreelist_lock); 947 mutex_destroy(&smbfs_minor_lock); 948 } 949 950 /* rddir_cache ? */ 951 952 /* 953 * Support functions for smbfs_kmem_reclaim 954 */ 955 956 static int 957 smbfs_node_reclaim(void) 958 { 959 int freed; 960 smbnode_t *np; 961 vnode_t *vp; 962 963 freed = 0; 964 mutex_enter(&smbfreelist_lock); 965 while ((np = smbfreelist) != NULL) { 966 smb_rmfree(np); 967 mutex_exit(&smbfreelist_lock); 968 if (np->r_flags & RHASHED) { 969 vp = SMBTOV(np); 970 rw_enter(&np->r_hashq->r_lock, RW_WRITER); 971 mutex_enter(&vp->v_lock); 972 if (vp->v_count > 1) { 973 vp->v_count--; 974 mutex_exit(&vp->v_lock); 975 rw_exit(&np->r_hashq->r_lock); 976 mutex_enter(&smbfreelist_lock); 977 continue; 978 } 979 mutex_exit(&vp->v_lock); 980 smb_rmhash_locked(np); 981 rw_exit(&np->r_hashq->r_lock); 982 } 983 /* 984 * This call to smb_addfree will end up destroying the 985 * smbnode, but in a safe way with the appropriate set 986 * of checks done. 987 */ 988 smb_addfree(np); 989 mutex_enter(&smbfreelist_lock); 990 } 991 mutex_exit(&smbfreelist_lock); 992 return (freed); 993 } 994 995 /* 996 * Called by kmem_cache_alloc ask us if we could 997 * "Please give back some memory!" 998 * 999 * Todo: dump nodes from the free list? 1000 */ 1001 /*ARGSUSED*/ 1002 void 1003 smbfs_kmem_reclaim(void *cdrarg) 1004 { 1005 (void) smbfs_node_reclaim(); 1006 } 1007 1008 /* nfs failover stuff */ 1009 /* nfs_rw_xxx - see smbfs_rwlock.c */ 1010