xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/smbfs/smbfs_subr2.c (revision a4f7cf1f88d277df34c22eaec6648bf33f148a7d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  *	Copyright (c) 1983,1984,1985,1986,1987,1988,1989  AT&T.
26  *	All rights reserved.
27  */
28 
29 /*
30  * Node hash implementation borrowed from NFS.
31  * See: uts/common/fs/nfs/nfs_subr.c
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/time.h>
37 #include <sys/vnode.h>
38 #include <sys/bitmap.h>
39 #include <sys/dnlc.h>
40 #include <sys/kmem.h>
41 #include <sys/sunddi.h>
42 
43 #ifdef APPLE
44 #include <sys/smb_apple.h>
45 #include <sys/utfconv.h>
46 #include <sys/smb_iconv.h>
47 #else
48 #include <netsmb/smb_osdep.h>
49 #endif
50 
51 #include <netsmb/smb.h>
52 #include <netsmb/smb_conn.h>
53 #include <netsmb/smb_subr.h>
54 #include <netsmb/smb_rq.h>
55 
56 #include <smbfs/smbfs.h>
57 #include <smbfs/smbfs_node.h>
58 #include <smbfs/smbfs_subr.h>
59 
60 /*
61  * The hash queues for the access to active and cached smbnodes
62  * are organized as doubly linked lists.  A reader/writer lock
63  * for each hash bucket is used to control access and to synchronize
64  * lookups, additions, and deletions from the hash queue.
65  *
66  * The smbnode freelist is organized as a doubly linked list with
67  * a head pointer.  Additions and deletions are synchronized via
68  * a single mutex.
69  *
70  * In order to add an smbnode to the free list, it must be hashed into
71  * a hash queue and the exclusive lock to the hash queue be held.
72  * If an smbnode is not hashed into a hash queue, then it is destroyed
73  * because it represents no valuable information that can be reused
74  * about the file.  The exclusive lock to the hash queue must be
75  * held in order to prevent a lookup in the hash queue from finding
76  * the smbnode and using it and assuming that the smbnode is not on the
77  * freelist.  The lookup in the hash queue will have the hash queue
78  * locked, either exclusive or shared.
79  *
80  * The vnode reference count for each smbnode is not allowed to drop
81  * below 1.  This prevents external entities, such as the VM
82  * subsystem, from acquiring references to vnodes already on the
83  * freelist and then trying to place them back on the freelist
84  * when their reference is released.  This means that the when an
85  * smbnode is looked up in the hash queues, then either the smbnode
86  * is removed from the freelist and that reference is tranfered to
87  * the new reference or the vnode reference count must be incremented
88  * accordingly.  The mutex for the freelist must be held in order to
89  * accurately test to see if the smbnode is on the freelist or not.
90  * The hash queue lock might be held shared and it is possible that
91  * two different threads may race to remove the smbnode from the
92  * freelist.  This race can be resolved by holding the mutex for the
93  * freelist.  Please note that the mutex for the freelist does not
94  * need to held if the smbnode is not on the freelist.  It can not be
95  * placed on the freelist due to the requirement that the thread
96  * putting the smbnode on the freelist must hold the exclusive lock
97  * to the hash queue and the thread doing the lookup in the hash
98  * queue is holding either a shared or exclusive lock to the hash
99  * queue.
100  *
101  * The lock ordering is:
102  *
103  *	hash bucket lock -> vnode lock
104  *	hash bucket lock -> freelist lock
105  */
106 static rhashq_t *smbtable;
107 
108 static kmutex_t smbfreelist_lock;
109 static smbnode_t *smbfreelist = NULL;
110 static ulong_t	smbnodenew = 0;
111 long	nsmbnode = 0;
112 
113 static int smbtablesize;
114 static int smbtablemask;
115 static int smbhashlen = 4;
116 
117 static struct kmem_cache *smbnode_cache;
118 
119 /*
120  * Mutex to protect the following variables:
121  *	smbfs_major
122  *	smbfs_minor
123  */
124 kmutex_t smbfs_minor_lock;
125 int smbfs_major;
126 int smbfs_minor;
127 
128 /*
129  * Local functions.
130  * Not static, to aid debugging.
131  */
132 void smb_rmfree(smbnode_t *);
133 void smbinactive(smbnode_t *);
134 void smb_rmhash_locked(smbnode_t *);
135 void smb_destroy_node(smbnode_t *);
136 void smbfs_kmem_reclaim(void *cdrarg);
137 
138 smbnode_t *smbhashfind(struct vfs *, const char *, int, rhashq_t *);
139 static vnode_t *make_smbnode(vfs_t *, char *, int, rhashq_t *, int *);
140 
141 
142 /*
143  * Free the resources associated with an smbnode.
144  * Note: This is different from smbfs_inactive
145  *
146  * NFS: nfs_subr.c:rinactive
147  */
148 void
149 smbinactive(smbnode_t *np)
150 {
151 
152 	if (np->n_rpath) {
153 		kmem_free(np->n_rpath, np->n_rplen + 1);
154 		np->n_rpath = NULL;
155 	}
156 }
157 
158 /*
159  * Return a vnode for the given CIFS directory and filename.
160  * If no smbnode exists for this fhandle, create one and put it
161  * into the hash queues.  If the smbnode for this fhandle
162  * already exists, return it.
163  *
164  * Note: make_smbnode() may upgrade the hash bucket lock to exclusive.
165  *
166  * NFS: nfs_subr.c:makenfsnode
167  */
168 vnode_t *
169 smbfs_make_node(
170 	vfs_t *vfsp,
171 	const char *dir,
172 	int dirlen,
173 	const char *name,
174 	int nmlen,
175 	char sep,
176 	struct smbfattr *fap)
177 {
178 	char *rpath;
179 	int rplen, idx;
180 	uint32_t hash;
181 	rhashq_t *rhtp;
182 	smbnode_t *np;
183 	vnode_t *vp;
184 #ifdef NOT_YET
185 	vattr_t va;
186 #endif
187 	int newnode;
188 
189 	/*
190 	 * Build the full path name in allocated memory
191 	 * so we have it for lookup, etc.  Note the
192 	 * special case at the root (dir=="\\", dirlen==1)
193 	 * where this does not add a slash separator.
194 	 * To do that would make a double slash, which
195 	 * has special meaning in CIFS.
196 	 *
197 	 * ToDo:  Would prefer to allocate a remote path
198 	 * only when we will create a new node.
199 	 */
200 	if (dirlen <= 1 && sep == '\\')
201 		sep = '\0';	/* no slash */
202 
203 	/* Compute the length of rpath and allocate. */
204 	rplen = dirlen;
205 	if (sep)
206 		rplen++;
207 	if (name)
208 		rplen += nmlen;
209 
210 	rpath = kmem_alloc(rplen + 1, KM_SLEEP);
211 
212 	/* Fill in rpath */
213 	bcopy(dir, rpath, dirlen);
214 	if (sep)
215 		rpath[dirlen++] = sep;
216 	if (name)
217 		bcopy(name, &rpath[dirlen], nmlen);
218 	rpath[rplen] = 0;
219 
220 	hash = smbfs_hash(rpath, rplen);
221 	idx = hash & smbtablemask;
222 	rhtp = &smbtable[idx];
223 	rw_enter(&rhtp->r_lock, RW_READER);
224 
225 	vp = make_smbnode(vfsp, rpath, rplen, rhtp, &newnode);
226 	np = VTOSMB(vp);
227 	np->n_ino = hash;	/* Equivalent to: smbfs_getino() */
228 
229 	/*
230 	 * Note: make_smbnode keeps a reference to rpath in
231 	 * new nodes it creates, so only free when we found
232 	 * an existing node.
233 	 */
234 	if (!newnode) {
235 		kmem_free(rpath, rplen + 1);
236 		rpath = NULL;
237 	}
238 
239 	if (fap == NULL) {
240 #ifdef NOT_YET
241 		if (newnode) {
242 			PURGE_ATTRCACHE(vp);
243 		}
244 #endif
245 		rw_exit(&rhtp->r_lock);
246 		return (vp);
247 	}
248 
249 	/* Have SMB attributes. */
250 	vp->v_type = (fap->fa_attr & SMB_FA_DIR) ? VDIR : VREG;
251 	/* XXX: np->n_ino = fap->fa_ino; see above */
252 	np->r_size = fap->fa_size;
253 	/* XXX: np->r_attr = *fap here instead? */
254 	np->r_atime = fap->fa_atime;
255 	np->r_ctime = fap->fa_ctime;
256 	np->r_mtime = fap->fa_mtime;
257 
258 #ifdef NOT_YET
259 	if (!newnode) {
260 		rw_exit(&rhtp->r_lock);
261 		(void) nfs_cache_fattr(vp, attr, &va, t, cr);
262 	} else {
263 		if (attr->na_type < NFNON || attr->na_type > NFSOC)
264 			vp->v_type = VBAD;
265 		else
266 			vp->v_type = n2v_type(attr);
267 		vp->v_rdev = makedevice(attr->rdev.specdata1,
268 		    attr->rdev.specdata2);
269 		nfs_attrcache(vp, attr, t);
270 		rw_exit(&rhtp->r_lock);
271 	}
272 #else
273 	rw_exit(&rhtp->r_lock);
274 #endif
275 
276 	return (vp);
277 }
278 
279 /*
280  * NFS: nfs_subr.c:rtablehash
281  * We use smbfs_hash().
282  */
283 
284 /*
285  * Find or create an smbnode.
286  * NFS: nfs_subr.c:make_rnode
287  */
288 static vnode_t *
289 make_smbnode(
290 	vfs_t *vfsp,
291 	char *rpath,
292 	int rplen,
293 	rhashq_t *rhtp,
294 	int *newnode)
295 {
296 	smbnode_t *np;
297 	smbnode_t *tnp;
298 	vnode_t *vp;
299 	smbmntinfo_t *mi;
300 
301 	ASSERT(RW_READ_HELD(&rhtp->r_lock));
302 
303 	mi = VFTOSMI(vfsp);
304 
305 start:
306 	np = smbhashfind(vfsp, rpath, rplen, rhtp);
307 	if (np != NULL) {
308 		vp = SMBTOV(np);
309 		*newnode = 0;
310 		return (vp);
311 	}
312 
313 	/* Note: will retake this lock below. */
314 	rw_exit(&rhtp->r_lock);
315 
316 	/*
317 	 * see if we can find something on the freelist
318 	 */
319 	mutex_enter(&smbfreelist_lock);
320 	if (smbfreelist != NULL && smbnodenew >= nsmbnode) {
321 		np = smbfreelist;
322 		smb_rmfree(np);
323 		mutex_exit(&smbfreelist_lock);
324 
325 		vp = SMBTOV(np);
326 
327 		if (np->r_flags & RHASHED) {
328 			rw_enter(&np->r_hashq->r_lock, RW_WRITER);
329 			mutex_enter(&vp->v_lock);
330 			if (vp->v_count > 1) {
331 				vp->v_count--;
332 				mutex_exit(&vp->v_lock);
333 				rw_exit(&np->r_hashq->r_lock);
334 				rw_enter(&rhtp->r_lock, RW_READER);
335 				goto start;
336 			}
337 			mutex_exit(&vp->v_lock);
338 			smb_rmhash_locked(np);
339 			rw_exit(&np->r_hashq->r_lock);
340 		}
341 
342 		smbinactive(np);
343 
344 		mutex_enter(&vp->v_lock);
345 		if (vp->v_count > 1) {
346 			vp->v_count--;
347 			mutex_exit(&vp->v_lock);
348 			rw_enter(&rhtp->r_lock, RW_READER);
349 			goto start;
350 		}
351 		mutex_exit(&vp->v_lock);
352 		vn_invalid(vp);
353 		/*
354 		 * destroy old locks before bzero'ing and
355 		 * recreating the locks below.
356 		 */
357 		smbfs_rw_destroy(&np->r_rwlock);
358 		smbfs_rw_destroy(&np->r_lkserlock);
359 		mutex_destroy(&np->r_statelock);
360 		cv_destroy(&np->r_cv);
361 		/*
362 		 * Make sure that if smbnode is recycled then
363 		 * VFS count is decremented properly before
364 		 * reuse.
365 		 */
366 		VFS_RELE(vp->v_vfsp);
367 		vn_reinit(vp);
368 	} else {
369 		/*
370 		 * allocate and initialize a new smbnode
371 		 */
372 		vnode_t *new_vp;
373 
374 		mutex_exit(&smbfreelist_lock);
375 
376 		np = kmem_cache_alloc(smbnode_cache, KM_SLEEP);
377 		new_vp = vn_alloc(KM_SLEEP);
378 
379 		atomic_add_long((ulong_t *)&smbnodenew, 1);
380 		vp = new_vp;
381 	}
382 
383 	/* Initialize smbnode_t */
384 	bzero(np, sizeof (*np));
385 
386 	smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
387 	smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
388 	mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
389 	cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
390 	/* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
391 
392 	np->r_vnode = vp;
393 	np->n_mount = mi;
394 	np->r_hashq = rhtp;
395 	np->n_direof = -1;
396 	np->n_fid = SMB_FID_UNUSED;
397 	np->n_uid = UID_NOBODY;
398 	np->n_gid = GID_NOBODY;
399 	/* XXX: make attributes stale? */
400 
401 #if 0 /* XXX dircache */
402 	/*
403 	 * We don't know if it's a directory yet.
404 	 * Let the caller do this?  XXX
405 	 */
406 	avl_create(&np->r_dir, compar, sizeof (rddir_cache),
407 	    offsetof(rddir_cache, tree));
408 #endif
409 
410 	/* Now fill in the vnode. */
411 	vn_setops(vp, smbfs_vnodeops);
412 	vp->v_data = (caddr_t)np;
413 	VFS_HOLD(vfsp);
414 	vp->v_vfsp = vfsp;
415 	vp->v_type = VNON;
416 
417 	/*
418 	 * There is a race condition if someone else
419 	 * alloc's the smbnode while no locks are held, so we
420 	 * check again and recover if found.
421 	 */
422 	rw_enter(&rhtp->r_lock, RW_WRITER);
423 	tnp = smbhashfind(vfsp, rpath, rplen, rhtp);
424 	if (tnp != NULL) {
425 		vp = SMBTOV(tnp);
426 		*newnode = 0;
427 		rw_exit(&rhtp->r_lock);
428 		/* The node we were building goes on the free list. */
429 		smb_addfree(np);
430 		rw_enter(&rhtp->r_lock, RW_READER);
431 		return (vp);
432 	}
433 
434 	/*
435 	 * Hash search identifies nodes by the full pathname,
436 	 * so store that before linking in the hash list.
437 	 * Note: caller allocates the rpath, and knows
438 	 * about this reference when *newnode is set.
439 	 */
440 	np->n_rpath = rpath;
441 	np->n_rplen = rplen;
442 
443 	smb_addhash(np);
444 	*newnode = 1;
445 	return (vp);
446 }
447 
448 /*
449  * smb_addfree
450  * Put a smbnode on the free list.
451  *
452  * Normally called by smbfs_inactive, but also
453  * called in here during cleanup operations.
454  *
455  * Smbnodes which were allocated above and beyond the normal limit
456  * are immediately freed.
457  *
458  * NFS: nfs_subr.c:rp_addfree
459  */
460 void
461 smb_addfree(smbnode_t *np)
462 {
463 	vnode_t *vp;
464 	struct vfs *vfsp;
465 
466 	vp = SMBTOV(np);
467 	ASSERT(vp->v_count >= 1);
468 	ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
469 
470 	/*
471 	 * If we have too many smbnodes allocated and there are no
472 	 * references to this smbnode, or if the smbnode is no longer
473 	 * accessible by it does not reside in the hash queues,
474 	 * or if an i/o error occurred while writing to the file,
475 	 * then just free it instead of putting it on the smbnode
476 	 * freelist.
477 	 */
478 	vfsp = vp->v_vfsp;
479 	if (((smbnodenew > nsmbnode || !(np->r_flags & RHASHED) ||
480 	    np->r_error || (vfsp->vfs_flag & VFS_UNMOUNTED)) &&
481 	    np->r_count == 0)) {
482 		if (np->r_flags & RHASHED) {
483 			rw_enter(&np->r_hashq->r_lock, RW_WRITER);
484 			mutex_enter(&vp->v_lock);
485 			if (vp->v_count > 1) {
486 				vp->v_count--;
487 				mutex_exit(&vp->v_lock);
488 				rw_exit(&np->r_hashq->r_lock);
489 				return;
490 				/*
491 				 * Will get another call later,
492 				 * via smbfs_inactive.
493 				 */
494 			}
495 			mutex_exit(&vp->v_lock);
496 			smb_rmhash_locked(np);
497 			rw_exit(&np->r_hashq->r_lock);
498 		}
499 
500 		smbinactive(np);
501 
502 		/*
503 		 * Recheck the vnode reference count.  We need to
504 		 * make sure that another reference has not been
505 		 * acquired while we were not holding v_lock.  The
506 		 * smbnode is not in the smbnode hash queues, so the
507 		 * only way for a reference to have been acquired
508 		 * is for a VOP_PUTPAGE because the smbnode was marked
509 		 * with RDIRTY or for a modified page.  This
510 		 * reference may have been acquired before our call
511 		 * to smbinactive.  The i/o may have been completed,
512 		 * thus allowing smbinactive to complete, but the
513 		 * reference to the vnode may not have been released
514 		 * yet.  In any case, the smbnode can not be destroyed
515 		 * until the other references to this vnode have been
516 		 * released.  The other references will take care of
517 		 * either destroying the smbnode or placing it on the
518 		 * smbnode freelist.  If there are no other references,
519 		 * then the smbnode may be safely destroyed.
520 		 */
521 		mutex_enter(&vp->v_lock);
522 		if (vp->v_count > 1) {
523 			vp->v_count--;
524 			mutex_exit(&vp->v_lock);
525 			return;
526 		}
527 		mutex_exit(&vp->v_lock);
528 
529 		smb_destroy_node(np);
530 		return;
531 	}
532 	/*
533 	 * Lock the hash queue and then recheck the reference count
534 	 * to ensure that no other threads have acquired a reference
535 	 * to indicate that the smbnode should not be placed on the
536 	 * freelist.  If another reference has been acquired, then
537 	 * just release this one and let the other thread complete
538 	 * the processing of adding this smbnode to the freelist.
539 	 */
540 	rw_enter(&np->r_hashq->r_lock, RW_WRITER);
541 
542 	mutex_enter(&vp->v_lock);
543 	if (vp->v_count > 1) {
544 		vp->v_count--;
545 		mutex_exit(&vp->v_lock);
546 		rw_exit(&np->r_hashq->r_lock);
547 		return;
548 	}
549 	mutex_exit(&vp->v_lock);
550 
551 	/*
552 	 * If there is no cached data or metadata for this file, then
553 	 * put the smbnode on the front of the freelist so that it will
554 	 * be reused before other smbnodes which may have cached data or
555 	 * metadata associated with them.
556 	 */
557 	mutex_enter(&smbfreelist_lock);
558 	if (smbfreelist == NULL) {
559 		np->r_freef = np;
560 		np->r_freeb = np;
561 		smbfreelist = np;
562 	} else {
563 		np->r_freef = smbfreelist;
564 		np->r_freeb = smbfreelist->r_freeb;
565 		smbfreelist->r_freeb->r_freef = np;
566 		smbfreelist->r_freeb = np;
567 	}
568 	mutex_exit(&smbfreelist_lock);
569 
570 	rw_exit(&np->r_hashq->r_lock);
571 }
572 
573 /*
574  * Remove an smbnode from the free list.
575  *
576  * The caller must be holding smbfreelist_lock and the smbnode
577  * must be on the freelist.
578  *
579  * NFS: nfs_subr.c:rp_rmfree
580  */
581 void
582 smb_rmfree(smbnode_t *np)
583 {
584 
585 	ASSERT(MUTEX_HELD(&smbfreelist_lock));
586 	ASSERT(np->r_freef != NULL && np->r_freeb != NULL);
587 
588 	if (np == smbfreelist) {
589 		smbfreelist = np->r_freef;
590 		if (np == smbfreelist)
591 			smbfreelist = NULL;
592 	}
593 
594 	np->r_freeb->r_freef = np->r_freef;
595 	np->r_freef->r_freeb = np->r_freeb;
596 
597 	np->r_freef = np->r_freeb = NULL;
598 }
599 
600 /*
601  * Put a smbnode in the hash table.
602  *
603  * The caller must be holding the exclusive hash queue lock.
604  *
605  * NFS: nfs_subr.c:rp_addhash
606  */
607 void
608 smb_addhash(smbnode_t *np)
609 {
610 
611 	ASSERT(RW_WRITE_HELD(&np->r_hashq->r_lock));
612 	ASSERT(!(np->r_flags & RHASHED));
613 
614 	np->r_hashf = np->r_hashq->r_hashf;
615 	np->r_hashq->r_hashf = np;
616 	np->r_hashb = (smbnode_t *)np->r_hashq;
617 	np->r_hashf->r_hashb = np;
618 
619 	mutex_enter(&np->r_statelock);
620 	np->r_flags |= RHASHED;
621 	mutex_exit(&np->r_statelock);
622 }
623 
624 /*
625  * Remove a smbnode from the hash table.
626  *
627  * The caller must be holding the hash queue lock.
628  *
629  * NFS: nfs_subr.c:rp_rmhash_locked
630  */
631 void
632 smb_rmhash_locked(smbnode_t *np)
633 {
634 
635 	ASSERT(RW_WRITE_HELD(&np->r_hashq->r_lock));
636 	ASSERT(np->r_flags & RHASHED);
637 
638 	np->r_hashb->r_hashf = np->r_hashf;
639 	np->r_hashf->r_hashb = np->r_hashb;
640 
641 	mutex_enter(&np->r_statelock);
642 	np->r_flags &= ~RHASHED;
643 	mutex_exit(&np->r_statelock);
644 }
645 
646 /*
647  * Remove a smbnode from the hash table.
648  *
649  * The caller must not be holding the hash queue lock.
650  */
651 void
652 smb_rmhash(smbnode_t *np)
653 {
654 
655 	rw_enter(&np->r_hashq->r_lock, RW_WRITER);
656 	smb_rmhash_locked(np);
657 	rw_exit(&np->r_hashq->r_lock);
658 }
659 
660 /*
661  * Lookup a smbnode by fhandle.
662  *
663  * The caller must be holding the hash queue lock, either shared or exclusive.
664  * XXX: make static?
665  *
666  * NFS: nfs_subr.c:rfind
667  */
668 smbnode_t *
669 smbhashfind(
670 	struct vfs *vfsp,
671 	const char *rpath,
672 	int rplen,
673 	rhashq_t *rhtp)
674 {
675 	smbnode_t *np;
676 	vnode_t *vp;
677 
678 	ASSERT(RW_LOCK_HELD(&rhtp->r_lock));
679 
680 	for (np = rhtp->r_hashf; np != (smbnode_t *)rhtp; np = np->r_hashf) {
681 		vp = SMBTOV(np);
682 		if (vp->v_vfsp == vfsp &&
683 		    np->n_rplen == rplen &&
684 		    bcmp(np->n_rpath, rpath, rplen) == 0) {
685 			/*
686 			 * remove smbnode from free list, if necessary.
687 			 */
688 			if (np->r_freef != NULL) {
689 				mutex_enter(&smbfreelist_lock);
690 				/*
691 				 * If the smbnode is on the freelist,
692 				 * then remove it and use that reference
693 				 * as the new reference.  Otherwise,
694 				 * need to increment the reference count.
695 				 */
696 				if (np->r_freef != NULL) {
697 					smb_rmfree(np);
698 					mutex_exit(&smbfreelist_lock);
699 				} else {
700 					mutex_exit(&smbfreelist_lock);
701 					VN_HOLD(vp);
702 				}
703 			} else
704 				VN_HOLD(vp);
705 			return (np);
706 		}
707 	}
708 	return (NULL);
709 }
710 
711 #ifdef SMB_VNODE_DEBUG
712 int smb_check_table_debug = 1;
713 #else /* SMB_VNODE_DEBUG */
714 int smb_check_table_debug = 0;
715 #endif /* SMB_VNODE_DEBUG */
716 
717 
718 /*
719  * Return 1 if there is a active vnode belonging to this vfs in the
720  * smbtable cache.
721  *
722  * Several of these checks are done without holding the usual
723  * locks.  This is safe because destroy_smbtable(), smb_addfree(),
724  * etc. will redo the necessary checks before actually destroying
725  * any smbnodes.
726  *
727  * NFS: nfs_subr.c:check_rtable
728  *
729  * Debugging changes here relative to NFS.
730  * Relatively harmless, so left 'em in.
731  */
732 int
733 smb_check_table(struct vfs *vfsp, smbnode_t *rtnp)
734 {
735 	smbnode_t *np;
736 	vnode_t *vp;
737 	int index;
738 	int busycnt = 0;
739 
740 	for (index = 0; index < smbtablesize; index++) {
741 		rw_enter(&smbtable[index].r_lock, RW_READER);
742 		for (np = smbtable[index].r_hashf;
743 		    np != (smbnode_t *)(&smbtable[index]);
744 		    np = np->r_hashf) {
745 			if (np == rtnp)
746 				continue; /* skip the root */
747 			vp = SMBTOV(np);
748 			if (vp->v_vfsp != vfsp)
749 				continue; /* skip other mount */
750 
751 			/* Now the 'busy' checks: */
752 			/* Not on the free list? */
753 			if (np->r_freef == NULL) {
754 				SMBVDEBUG("!r_freef: node=0x%p, v_path=%s\n",
755 				    (void *)np, vp->v_path);
756 				busycnt++;
757 			}
758 
759 			/* Has dirty pages? */
760 			if (vn_has_cached_data(vp) &&
761 			    (np->r_flags & RDIRTY)) {
762 				SMBVDEBUG("is dirty: node=0x%p, v_path=%s\n",
763 				    (void *)np, vp->v_path);
764 				busycnt++;
765 			}
766 
767 			/* Other refs? (not reflected in v_count) */
768 			if (np->r_count > 0) {
769 				SMBVDEBUG("+r_count: node=0x%p, v_path=%s\n",
770 				    (void *)np, vp->v_path);
771 				busycnt++;
772 			}
773 
774 			if (busycnt && !smb_check_table_debug)
775 				break;
776 
777 		}
778 		rw_exit(&smbtable[index].r_lock);
779 	}
780 	return (busycnt);
781 }
782 
783 /*
784  * Destroy inactive vnodes from the hash queues which belong to this
785  * vfs.  It is essential that we destroy all inactive vnodes during a
786  * forced unmount as well as during a normal unmount.
787  *
788  * NFS: nfs_subr.c:destroy_rtable
789  */
790 void
791 smbfs_destroy_table(struct vfs *vfsp)
792 {
793 	int index;
794 	smbnode_t *np;
795 	smbnode_t *rlist;
796 	smbnode_t *r_hashf;
797 	vnode_t *vp;
798 
799 	rlist = NULL;
800 
801 	for (index = 0; index < smbtablesize; index++) {
802 		rw_enter(&smbtable[index].r_lock, RW_WRITER);
803 		for (np = smbtable[index].r_hashf;
804 		    np != (smbnode_t *)(&smbtable[index]);
805 		    np = r_hashf) {
806 			/* save the hash pointer before destroying */
807 			r_hashf = np->r_hashf;
808 			vp = SMBTOV(np);
809 			if (vp->v_vfsp == vfsp) {
810 				mutex_enter(&smbfreelist_lock);
811 				if (np->r_freef != NULL) {
812 					smb_rmfree(np);
813 					mutex_exit(&smbfreelist_lock);
814 					smb_rmhash_locked(np);
815 					np->r_hashf = rlist;
816 					rlist = np;
817 				} else
818 					mutex_exit(&smbfreelist_lock);
819 			}
820 		}
821 		rw_exit(&smbtable[index].r_lock);
822 	}
823 
824 	for (np = rlist; np != NULL; np = rlist) {
825 		rlist = np->r_hashf;
826 		/*
827 		 * This call to smb_addfree will end up destroying the
828 		 * smbnode, but in a safe way with the appropriate set
829 		 * of checks done.
830 		 */
831 		smb_addfree(np);
832 	}
833 
834 }
835 
836 /*
837  * This routine destroys all the resources associated with the smbnode
838  * and then the smbnode itself.
839  *
840  * NFS: nfs_subr.c:destroy_rnode
841  */
842 void
843 smb_destroy_node(smbnode_t *np)
844 {
845 	vnode_t *vp;
846 	vfs_t *vfsp;
847 
848 	vp = SMBTOV(np);
849 	vfsp = vp->v_vfsp;
850 
851 	ASSERT(vp->v_count == 1);
852 	ASSERT(np->r_count == 0);
853 	ASSERT(np->r_mapcnt == 0);
854 	ASSERT(!(np->r_flags & RHASHED));
855 	ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
856 	atomic_add_long((ulong_t *)&smbnodenew, -1);
857 	vn_invalid(vp);
858 	vn_free(vp);
859 	kmem_cache_free(smbnode_cache, np);
860 	VFS_RELE(vfsp);
861 }
862 
863 /* rflush? */
864 /* access cache */
865 /* client handles */
866 
867 /*
868  * initialize resources that are used by smbfs_subr.c
869  * this is called from the _init() routine (by the way of smbfs_clntinit())
870  *
871  * allocate and initialze smbfs hash table
872  * NFS: nfs_subr.c:nfs_subrinit
873  */
874 int
875 smbfs_subrinit(void)
876 {
877 	int i;
878 	ulong_t nsmbnode_max;
879 
880 	/*
881 	 * Allocate and initialize the smbnode hash queues
882 	 */
883 	if (nsmbnode <= 0)
884 		nsmbnode = ncsize; /* dnlc.h */
885 	nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) /
886 	    sizeof (struct smbnode));
887 	if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) {
888 		zcmn_err(GLOBAL_ZONEID, CE_NOTE,
889 		    "setting nsmbnode to max value of %ld", nsmbnode_max);
890 		nsmbnode = nsmbnode_max;
891 	}
892 
893 	smbtablesize = 1 << highbit(nsmbnode / smbhashlen);
894 	smbtablemask = smbtablesize - 1;
895 	smbtable = kmem_alloc(smbtablesize * sizeof (*smbtable), KM_SLEEP);
896 	for (i = 0; i < smbtablesize; i++) {
897 		smbtable[i].r_hashf = (smbnode_t *)(&smbtable[i]);
898 		smbtable[i].r_hashb = (smbnode_t *)(&smbtable[i]);
899 		rw_init(&smbtable[i].r_lock, NULL, RW_DEFAULT, NULL);
900 	}
901 	smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t),
902 	    0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0);
903 
904 	/*
905 	 * Initialize the various mutexes and reader/writer locks
906 	 */
907 	mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL);
908 	mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL);
909 
910 	/*
911 	 * Assign unique major number for all smbfs mounts
912 	 */
913 	if ((smbfs_major = getudev()) == -1) {
914 		zcmn_err(GLOBAL_ZONEID, CE_WARN,
915 		    "smbfs: init: can't get unique device number");
916 		smbfs_major = 0;
917 	}
918 	smbfs_minor = 0;
919 
920 	return (0);
921 }
922 
923 /*
924  * free smbfs hash table, etc.
925  * NFS: nfs_subr.c:nfs_subrfini
926  */
927 void
928 smbfs_subrfini(void)
929 {
930 	int i;
931 
932 	/*
933 	 * Deallocate the smbnode hash queues
934 	 */
935 	kmem_cache_destroy(smbnode_cache);
936 
937 	for (i = 0; i < smbtablesize; i++)
938 		rw_destroy(&smbtable[i].r_lock);
939 	kmem_free(smbtable, smbtablesize * sizeof (*smbtable));
940 
941 	/*
942 	 * Destroy the various mutexes and reader/writer locks
943 	 */
944 	mutex_destroy(&smbfreelist_lock);
945 	mutex_destroy(&smbfs_minor_lock);
946 }
947 
948 /* rddir_cache ? */
949 
950 /*
951  * Support functions for smbfs_kmem_reclaim
952  */
953 
954 static int
955 smbfs_node_reclaim(void)
956 {
957 	int freed;
958 	smbnode_t *np;
959 	vnode_t *vp;
960 
961 	freed = 0;
962 	mutex_enter(&smbfreelist_lock);
963 	while ((np = smbfreelist) != NULL) {
964 		smb_rmfree(np);
965 		mutex_exit(&smbfreelist_lock);
966 		if (np->r_flags & RHASHED) {
967 			vp = SMBTOV(np);
968 			rw_enter(&np->r_hashq->r_lock, RW_WRITER);
969 			mutex_enter(&vp->v_lock);
970 			if (vp->v_count > 1) {
971 				vp->v_count--;
972 				mutex_exit(&vp->v_lock);
973 				rw_exit(&np->r_hashq->r_lock);
974 				mutex_enter(&smbfreelist_lock);
975 				continue;
976 			}
977 			mutex_exit(&vp->v_lock);
978 			smb_rmhash_locked(np);
979 			rw_exit(&np->r_hashq->r_lock);
980 		}
981 		/*
982 		 * This call to smb_addfree will end up destroying the
983 		 * smbnode, but in a safe way with the appropriate set
984 		 * of checks done.
985 		 */
986 		smb_addfree(np);
987 		mutex_enter(&smbfreelist_lock);
988 	}
989 	mutex_exit(&smbfreelist_lock);
990 	return (freed);
991 }
992 
993 /*
994  * Called by kmem_cache_alloc ask us if we could
995  * "Please give back some memory!"
996  *
997  * Todo: dump nodes from the free list?
998  */
999 /*ARGSUSED*/
1000 void
1001 smbfs_kmem_reclaim(void *cdrarg)
1002 {
1003 	(void) smbfs_node_reclaim();
1004 }
1005 
1006 /* nfs failover stuff */
1007 /* nfs_rw_xxx - see smbfs_rwlock.c */
1008