1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 *
25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
26 * All rights reserved.
27 *
28 * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/thread.h>
34 #include <sys/t_lock.h>
35 #include <sys/time.h>
36 #include <sys/vnode.h>
37 #include <sys/vfs.h>
38 #include <sys/errno.h>
39 #include <sys/buf.h>
40 #include <sys/stat.h>
41 #include <sys/cred.h>
42 #include <sys/kmem.h>
43 #include <sys/debug.h>
44 #include <sys/vmsystm.h>
45 #include <sys/flock.h>
46 #include <sys/share.h>
47 #include <sys/cmn_err.h>
48 #include <sys/tiuser.h>
49 #include <sys/sysmacros.h>
50 #include <sys/callb.h>
51 #include <sys/acl.h>
52 #include <sys/kstat.h>
53 #include <sys/signal.h>
54 #include <sys/list.h>
55 #include <sys/zone.h>
56
57 #include <netsmb/smb.h>
58 #include <netsmb/smb_conn.h>
59 #include <netsmb/smb_subr.h>
60
61 #include <smbfs/smbfs.h>
62 #include <smbfs/smbfs_node.h>
63 #include <smbfs/smbfs_subr.h>
64
65 #ifdef _KERNEL
66 #include <vm/hat.h>
67 #include <vm/as.h>
68 #include <vm/page.h>
69 #include <vm/pvn.h>
70 #include <vm/seg.h>
71 #include <vm/seg_map.h>
72 #include <vm/seg_vn.h>
73 #endif // _KERNEL
74
75 #define ATTRCACHE_VALID(vp) (gethrtime() < VTOSMB(vp)->r_attrtime)
76
77 static int smbfs_getattr_cache(vnode_t *, smbfattr_t *);
78 static void smbfattr_to_vattr(vnode_t *, smbfattr_t *, vattr_t *);
79 static void smbfattr_to_xvattr(smbfattr_t *, vattr_t *);
80 static int smbfs_getattr_otw(vnode_t *, struct smbfattr *, cred_t *);
81
82
83 /*
84 * The following code provide zone support in order to perform an action
85 * for each smbfs mount in a zone. This is also where we would add
86 * per-zone globals and kernel threads for the smbfs module (since
87 * they must be terminated by the shutdown callback).
88 */
89
90 struct smi_globals {
91 kmutex_t smg_lock; /* lock protecting smg_list */
92 list_t smg_list; /* list of SMBFS mounts in zone */
93 boolean_t smg_destructor_called;
94 };
95 typedef struct smi_globals smi_globals_t;
96
97 static zone_key_t smi_list_key;
98
99 /*
100 * Attributes caching:
101 *
102 * Attributes are cached in the smbnode in struct vattr form.
103 * There is a time associated with the cached attributes (r_attrtime)
104 * which tells whether the attributes are valid. The time is initialized
105 * to the difference between current time and the modify time of the vnode
106 * when new attributes are cached. This allows the attributes for
107 * files that have changed recently to be timed out sooner than for files
108 * that have not changed for a long time. There are minimum and maximum
109 * timeout values that can be set per mount point.
110 */
111
112 /*
113 * Helper for _validate_caches
114 */
115 int
smbfs_waitfor_purge_complete(vnode_t * vp)116 smbfs_waitfor_purge_complete(vnode_t *vp)
117 {
118 smbnode_t *np;
119 k_sigset_t smask;
120
121 np = VTOSMB(vp);
122 if (np->r_serial != NULL && np->r_serial != curthread) {
123 mutex_enter(&np->r_statelock);
124 sigintr(&smask, VTOSMI(vp)->smi_flags & SMI_INT);
125 while (np->r_serial != NULL) {
126 if (!cv_wait_sig(&np->r_cv, &np->r_statelock)) {
127 sigunintr(&smask);
128 mutex_exit(&np->r_statelock);
129 return (EINTR);
130 }
131 }
132 sigunintr(&smask);
133 mutex_exit(&np->r_statelock);
134 }
135 return (0);
136 }
137
138 /*
139 * Validate caches by checking cached attributes. If the cached
140 * attributes have timed out, then get new attributes from the server.
141 * As a side affect, this will do cache invalidation if the attributes
142 * have changed.
143 *
144 * If the attributes have not timed out and if there is a cache
145 * invalidation being done by some other thread, then wait until that
146 * thread has completed the cache invalidation.
147 */
148 int
smbfs_validate_caches(struct vnode * vp,cred_t * cr)149 smbfs_validate_caches(
150 struct vnode *vp,
151 cred_t *cr)
152 {
153 struct smbfattr fa;
154 int error;
155
156 if (ATTRCACHE_VALID(vp)) {
157 error = smbfs_waitfor_purge_complete(vp);
158 if (error)
159 return (error);
160 return (0);
161 }
162
163 return (smbfs_getattr_otw(vp, &fa, cr));
164 }
165
166 /*
167 * Purge all of the various data caches.
168 *
169 * Here NFS also had a flags arg to control what gets flushed.
170 * We only have the page cache, so no flags arg.
171 */
172 /* ARGSUSED */
173 void
smbfs_purge_caches(struct vnode * vp,cred_t * cr)174 smbfs_purge_caches(struct vnode *vp, cred_t *cr)
175 {
176
177 /*
178 * Here NFS has: Purge the DNLC for this vp,
179 * Clear any readdir state bits,
180 * the readlink response cache, ...
181 */
182
183 /*
184 * Flush the page cache.
185 */
186 if (vn_has_cached_data(vp)) {
187 (void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_INVAL, cr, NULL);
188 }
189
190 /*
191 * Here NFS has: Flush the readdir response cache.
192 * No readdir cache in smbfs.
193 */
194 }
195
196 /*
197 * Here NFS has:
198 * nfs_purge_rddir_cache()
199 * nfs3_cache_post_op_attr()
200 * nfs3_cache_post_op_vattr()
201 * nfs3_cache_wcc_data()
202 */
203
204 /*
205 * Check the attribute cache to see if the new attributes match
206 * those cached. If they do, the various `data' caches are
207 * considered to be good. Otherwise, purge the cached data.
208 */
209 static void
smbfs_cache_check(struct vnode * vp,struct smbfattr * fap,cred_t * cr)210 smbfs_cache_check(
211 struct vnode *vp,
212 struct smbfattr *fap,
213 cred_t *cr)
214 {
215 smbnode_t *np;
216 int purge_data = 0;
217 int purge_acl = 0;
218
219 np = VTOSMB(vp);
220 mutex_enter(&np->r_statelock);
221
222 /*
223 * Compare with NFS macro: CACHE_VALID
224 * If the mtime or size has changed,
225 * purge cached data.
226 */
227 if (np->r_attr.fa_mtime.tv_sec != fap->fa_mtime.tv_sec ||
228 np->r_attr.fa_mtime.tv_nsec != fap->fa_mtime.tv_nsec)
229 purge_data = 1;
230 if (np->r_attr.fa_size != fap->fa_size)
231 purge_data = 1;
232
233 if (np->r_attr.fa_ctime.tv_sec != fap->fa_ctime.tv_sec ||
234 np->r_attr.fa_ctime.tv_nsec != fap->fa_ctime.tv_nsec)
235 purge_acl = 1;
236
237 if (purge_acl) {
238 np->r_sectime = gethrtime();
239 }
240
241 mutex_exit(&np->r_statelock);
242
243 if (purge_data)
244 smbfs_purge_caches(vp, cr);
245 }
246
247 /*
248 * Set attributes cache for given vnode using SMB fattr
249 * and update the attribute cache timeout.
250 *
251 * Based on NFS: nfs_attrcache, nfs_attrcache_va
252 */
253 void
smbfs_attrcache_fa(vnode_t * vp,struct smbfattr * fap)254 smbfs_attrcache_fa(vnode_t *vp, struct smbfattr *fap)
255 {
256 smbnode_t *np;
257 smbmntinfo_t *smi;
258 hrtime_t delta, now;
259 u_offset_t newsize;
260 vtype_t vtype, oldvt;
261 mode_t mode;
262
263 np = VTOSMB(vp);
264 smi = VTOSMI(vp);
265
266 /*
267 * We allow v_type to change, so set that here
268 * (and the mode, which depends on the type).
269 */
270 if (fap->fa_attr & SMB_FA_DIR) {
271 vtype = VDIR;
272 mode = smi->smi_dmode;
273 } else {
274 vtype = VREG;
275 mode = smi->smi_fmode;
276 }
277
278 mutex_enter(&np->r_statelock);
279 now = gethrtime();
280
281 /*
282 * Delta is the number of nanoseconds that we will
283 * cache the attributes of the file. It is based on
284 * the number of nanoseconds since the last time that
285 * we detected a change. The assumption is that files
286 * that changed recently are likely to change again.
287 * There is a minimum and a maximum for regular files
288 * and for directories which is enforced though.
289 *
290 * Using the time since last change was detected
291 * eliminates direct comparison or calculation
292 * using mixed client and server times. SMBFS
293 * does not make any assumptions regarding the
294 * client and server clocks being synchronized.
295 */
296 if (fap->fa_mtime.tv_sec != np->r_attr.fa_mtime.tv_sec ||
297 fap->fa_mtime.tv_nsec != np->r_attr.fa_mtime.tv_nsec ||
298 fap->fa_size != np->r_attr.fa_size)
299 np->r_mtime = now;
300
301 if ((smi->smi_flags & SMI_NOAC) || (vp->v_flag & VNOCACHE))
302 delta = 0;
303 else {
304 delta = now - np->r_mtime;
305 if (vtype == VDIR) {
306 if (delta < smi->smi_acdirmin)
307 delta = smi->smi_acdirmin;
308 else if (delta > smi->smi_acdirmax)
309 delta = smi->smi_acdirmax;
310 } else {
311 if (delta < smi->smi_acregmin)
312 delta = smi->smi_acregmin;
313 else if (delta > smi->smi_acregmax)
314 delta = smi->smi_acregmax;
315 }
316 }
317
318 np->r_attrtime = now + delta;
319 np->r_attr = *fap;
320 np->n_mode = mode;
321 oldvt = vp->v_type;
322 vp->v_type = vtype;
323
324 /*
325 * Shall we update r_size? (local notion of size)
326 *
327 * The real criteria for updating r_size should be:
328 * if the file has grown on the server, or if
329 * the client has not modified the file.
330 *
331 * Also deal with the fact that SMB presents
332 * directories as having size=0. Doing that
333 * here and leaving fa_size as returned OtW
334 * avoids fixing the size lots of places.
335 */
336 newsize = fap->fa_size;
337 if (vtype == VDIR && newsize < DEV_BSIZE)
338 newsize = DEV_BSIZE;
339
340 if (np->r_size != newsize &&
341 (!vn_has_cached_data(vp) ||
342 (!(np->r_flags & RDIRTY) && np->r_count == 0))) {
343 /* OK to set the size. */
344 np->r_size = newsize;
345 }
346
347 /*
348 * Here NFS has:
349 * nfs_setswaplike(vp, va);
350 * np->r_flags &= ~RWRITEATTR;
351 * (not needed here)
352 */
353
354 np->n_flag &= ~NATTRCHANGED;
355 mutex_exit(&np->r_statelock);
356
357 if (oldvt != vtype) {
358 SMBVDEBUG("vtype change %d to %d\n", oldvt, vtype);
359 }
360 }
361
362 /*
363 * Fill in attribute from the cache.
364 *
365 * If valid, copy to *fap and return zero,
366 * otherwise return an error.
367 *
368 * From NFS: nfs_getattr_cache()
369 */
370 int
smbfs_getattr_cache(vnode_t * vp,struct smbfattr * fap)371 smbfs_getattr_cache(vnode_t *vp, struct smbfattr *fap)
372 {
373 smbnode_t *np;
374 int error;
375
376 np = VTOSMB(vp);
377
378 mutex_enter(&np->r_statelock);
379 if (gethrtime() >= np->r_attrtime) {
380 /* cache expired */
381 error = ENOENT;
382 } else {
383 /* cache is valid */
384 *fap = np->r_attr;
385 error = 0;
386 }
387 mutex_exit(&np->r_statelock);
388
389 return (error);
390 }
391
392 /*
393 * Get attributes over-the-wire and update attributes cache
394 * if no error occurred in the over-the-wire operation.
395 * Return 0 if successful, otherwise error.
396 * From NFS: nfs_getattr_otw
397 */
398 static int
smbfs_getattr_otw(vnode_t * vp,struct smbfattr * fap,cred_t * cr)399 smbfs_getattr_otw(vnode_t *vp, struct smbfattr *fap, cred_t *cr)
400 {
401 struct smb_cred scred;
402 smbnode_t *np = VTOSMB(vp);
403 smb_share_t *ssp = np->n_mount->smi_share;
404 smb_fh_t *fhp = NULL;
405 int error;
406
407 bzero(fap, sizeof (*fap));
408
409 /*
410 * Special case the XATTR directory here (all fake).
411 * OK to leave a,c,m times zero (expected).
412 */
413 if (vp->v_flag & V_XATTRDIR) {
414 fap->fa_attr = SMB_FA_DIR;
415 fap->fa_size = DEV_BSIZE;
416 return (0);
417 }
418
419 /*
420 * Here NFS uses the ACL RPC (if smi_flags & SMI_ACL)
421 * With SMB, getting the ACL is a significantly more
422 * expensive operation, so we do that only when asked
423 * for the uid/gid. See smbfsgetattr().
424 */
425
426 /* Shared lock for (possible) n_fid use. */
427 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
428 return (EINTR);
429 smb_credinit(&scred, cr);
430
431 // Does the attr. open code path work for streams?
432 // Trying that, and if it doesn't work enable this.
433 #if 0 // XXX
434 /*
435 * Extended attribute files
436 */
437 if (np->n_flag & N_XATTR) {
438 error = smbfs_xa_getfattr(np, fap, scrp);
439 goto out;
440 }
441 #endif // XXX
442
443 if (np->n_fidrefs > 0 &&
444 (fhp = np->n_fid) != NULL &&
445 (fhp->fh_vcgenid == ssp->ss_vcgenid)) {
446 /* Use the FID we have. */
447 error = smbfs_smb_getfattr(np, fhp, fap, &scred);
448
449 } else {
450 /* This will do an attr open */
451 error = smbfs_smb_getpattr(np, fap, &scred);
452 }
453
454 smb_credrele(&scred);
455 smbfs_rw_exit(&np->r_lkserlock);
456
457 if (error) {
458 /* Here NFS has: PURGE_STALE_FH(error, vp, cr) */
459 smbfs_attrcache_remove(np);
460 if (error == ENOENT || error == ENOTDIR) {
461 /*
462 * Getattr failed because the object was
463 * removed or renamed by another client.
464 * Remove any cached attributes under it.
465 */
466 smbfs_attrcache_prune(np);
467 }
468 return (error);
469 }
470
471 /*
472 * Here NFS has: nfs_cache_fattr(vap, fa, vap, t, cr);
473 * which did: fattr_to_vattr, nfs_attr_cache.
474 * We cache the fattr form, so just do the
475 * cache check and store the attributes.
476 */
477 smbfs_cache_check(vp, fap, cr);
478 smbfs_attrcache_fa(vp, fap);
479
480 return (0);
481 }
482
483 /*
484 * Return either cached or remote attributes. If we get remote attrs,
485 * use them to check and invalidate caches, then cache the new attributes.
486 *
487 * From NFS: nfsgetattr()
488 */
489 int
smbfsgetattr(vnode_t * vp,struct vattr * vap,cred_t * cr)490 smbfsgetattr(vnode_t *vp, struct vattr *vap, cred_t *cr)
491 {
492 struct smbfattr fa;
493 smbmntinfo_t *smi;
494 uint_t mask;
495 int error;
496
497 smi = VTOSMI(vp);
498
499 ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone);
500
501 /*
502 * If asked for UID or GID, update n_uid, n_gid.
503 */
504 mask = AT_ALL;
505 if (vap->va_mask & (AT_UID | AT_GID)) {
506 if (smi->smi_flags & SMI_ACL)
507 (void) smbfs_acl_getids(vp, cr);
508 /* else leave as set in make_smbnode */
509 } else {
510 mask &= ~(AT_UID | AT_GID);
511 }
512
513 /*
514 * If we've got cached attributes, just use them;
515 * otherwise go to the server to get attributes,
516 * which will update the cache in the process.
517 */
518 error = smbfs_getattr_cache(vp, &fa);
519 if (error)
520 error = smbfs_getattr_otw(vp, &fa, cr);
521 if (error)
522 return (error);
523 vap->va_mask |= mask;
524
525 /*
526 * Re. client's view of the file size, see:
527 * smbfs_attrcache_fa, smbfs_getattr_otw
528 */
529 smbfattr_to_vattr(vp, &fa, vap);
530 if (vap->va_mask & AT_XVATTR)
531 smbfattr_to_xvattr(&fa, vap);
532
533 return (0);
534 }
535
536
537 /*
538 * Convert SMB over the wire attributes to vnode form.
539 * Returns 0 for success, error if failed (overflow, etc).
540 * From NFS: nattr_to_vattr()
541 */
542 void
smbfattr_to_vattr(vnode_t * vp,struct smbfattr * fa,struct vattr * vap)543 smbfattr_to_vattr(vnode_t *vp, struct smbfattr *fa, struct vattr *vap)
544 {
545 struct smbnode *np = VTOSMB(vp);
546
547 /*
548 * Take type, mode, uid, gid from the smbfs node,
549 * which has have been updated by _getattr_otw.
550 */
551 vap->va_type = vp->v_type;
552 vap->va_mode = np->n_mode;
553
554 vap->va_uid = np->n_uid;
555 vap->va_gid = np->n_gid;
556
557 vap->va_fsid = vp->v_vfsp->vfs_dev;
558 vap->va_nodeid = np->n_ino;
559 vap->va_nlink = 1;
560
561 /*
562 * Difference from NFS here: We cache attributes as
563 * reported by the server, so r_attr.fa_size is the
564 * server's idea of the file size. This is called
565 * for getattr, so we want to return the client's
566 * idea of the file size. NFS deals with that in
567 * nfsgetattr(), the equivalent of our caller.
568 */
569 vap->va_size = np->r_size;
570
571 /*
572 * Times. Note, already converted from NT to
573 * Unix form (in the unmarshalling code).
574 */
575 vap->va_atime = fa->fa_atime;
576 vap->va_mtime = fa->fa_mtime;
577 vap->va_ctime = fa->fa_ctime;
578
579 /*
580 * rdev, blksize, seq are made up.
581 * va_nblocks is 512 byte blocks.
582 */
583 vap->va_rdev = vp->v_rdev;
584 vap->va_blksize = MAXBSIZE;
585 vap->va_nblocks = (fsblkcnt64_t)btod(np->r_attr.fa_allocsz);
586 vap->va_seq = 0;
587 }
588
589 /*
590 * smbfattr_to_xvattr: like smbfattr_to_vattr but for
591 * Extensible system attributes (PSARC 2007/315)
592 */
593 static void
smbfattr_to_xvattr(struct smbfattr * fa,struct vattr * vap)594 smbfattr_to_xvattr(struct smbfattr *fa, struct vattr *vap)
595 {
596 xvattr_t *xvap = (xvattr_t *)vap; /* *vap may be xvattr_t */
597 xoptattr_t *xoap = NULL;
598
599 if ((xoap = xva_getxoptattr(xvap)) == NULL)
600 return;
601
602 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
603 xoap->xoa_createtime = fa->fa_createtime;
604 XVA_SET_RTN(xvap, XAT_CREATETIME);
605 }
606
607 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
608 xoap->xoa_archive =
609 ((fa->fa_attr & SMB_FA_ARCHIVE) != 0);
610 XVA_SET_RTN(xvap, XAT_ARCHIVE);
611 }
612
613 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
614 xoap->xoa_system =
615 ((fa->fa_attr & SMB_FA_SYSTEM) != 0);
616 XVA_SET_RTN(xvap, XAT_SYSTEM);
617 }
618
619 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
620 xoap->xoa_readonly =
621 ((fa->fa_attr & SMB_FA_RDONLY) != 0);
622 XVA_SET_RTN(xvap, XAT_READONLY);
623 }
624
625 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
626 xoap->xoa_hidden =
627 ((fa->fa_attr & SMB_FA_HIDDEN) != 0);
628 XVA_SET_RTN(xvap, XAT_HIDDEN);
629 }
630 }
631
632 /*
633 * Here NFS has:
634 * nfs_async_... stuff
635 * which we're not using (no async I/O), and:
636 * writerp(),
637 * nfs_putpages()
638 * nfs_invalidate_pages()
639 * which we have in smbfs_vnops.c, and
640 * nfs_printfhandle()
641 * nfs_write_error()
642 * not needed here.
643 */
644
645 /*
646 * Helper function for smbfs_sync
647 *
648 * Walk the per-zone list of smbfs mounts, calling smbfs_rflush
649 * on each one. This is a little tricky because we need to exit
650 * the list mutex before each _rflush call and then try to resume
651 * where we were in the list after re-entering the mutex.
652 */
653 void
smbfs_flushall(cred_t * cr)654 smbfs_flushall(cred_t *cr)
655 {
656 smi_globals_t *smg;
657 smbmntinfo_t *tmp_smi, *cur_smi, *next_smi;
658
659 smg = zone_getspecific(smi_list_key, crgetzone(cr));
660 ASSERT(smg != NULL);
661
662 mutex_enter(&smg->smg_lock);
663 cur_smi = list_head(&smg->smg_list);
664 if (cur_smi == NULL) {
665 mutex_exit(&smg->smg_lock);
666 return;
667 }
668 VFS_HOLD(cur_smi->smi_vfsp);
669 mutex_exit(&smg->smg_lock);
670
671 flush:
672 smbfs_rflush(cur_smi->smi_vfsp, cr);
673
674 mutex_enter(&smg->smg_lock);
675 /*
676 * Resume after cur_smi if that's still on the list,
677 * otherwise restart at the head.
678 */
679 for (tmp_smi = list_head(&smg->smg_list);
680 tmp_smi != NULL;
681 tmp_smi = list_next(&smg->smg_list, tmp_smi))
682 if (tmp_smi == cur_smi)
683 break;
684 if (tmp_smi != NULL)
685 next_smi = list_next(&smg->smg_list, tmp_smi);
686 else
687 next_smi = list_head(&smg->smg_list);
688
689 if (next_smi != NULL)
690 VFS_HOLD(next_smi->smi_vfsp);
691 VFS_RELE(cur_smi->smi_vfsp);
692
693 mutex_exit(&smg->smg_lock);
694
695 if (next_smi != NULL) {
696 cur_smi = next_smi;
697 goto flush;
698 }
699 }
700
701 /*
702 * SMB Client initialization and cleanup.
703 * Much of it is per-zone now.
704 */
705
706
707 /* ARGSUSED */
708 static void *
smbfs_zone_init(zoneid_t zoneid)709 smbfs_zone_init(zoneid_t zoneid)
710 {
711 smi_globals_t *smg;
712
713 smg = kmem_alloc(sizeof (*smg), KM_SLEEP);
714 mutex_init(&smg->smg_lock, NULL, MUTEX_DEFAULT, NULL);
715 list_create(&smg->smg_list, sizeof (smbmntinfo_t),
716 offsetof(smbmntinfo_t, smi_zone_node));
717 smg->smg_destructor_called = B_FALSE;
718 return (smg);
719 }
720
721 /*
722 * Callback routine to tell all SMBFS mounts in the zone to stop creating new
723 * threads. Existing threads should exit.
724 */
725 /* ARGSUSED */
726 static void
smbfs_zone_shutdown(zoneid_t zoneid,void * data)727 smbfs_zone_shutdown(zoneid_t zoneid, void *data)
728 {
729 smi_globals_t *smg = data;
730 smbmntinfo_t *smi;
731
732 ASSERT(smg != NULL);
733 again:
734 mutex_enter(&smg->smg_lock);
735 for (smi = list_head(&smg->smg_list); smi != NULL;
736 smi = list_next(&smg->smg_list, smi)) {
737
738 /*
739 * If we've done the shutdown work for this FS, skip.
740 * Once we go off the end of the list, we're done.
741 */
742 if (smi->smi_flags & SMI_DEAD)
743 continue;
744
745 /*
746 * We will do work, so not done. Get a hold on the FS.
747 */
748 VFS_HOLD(smi->smi_vfsp);
749
750 mutex_enter(&smi->smi_lock);
751 smi->smi_flags |= SMI_DEAD;
752 mutex_exit(&smi->smi_lock);
753
754 /*
755 * Drop lock and release FS, which may change list, then repeat.
756 * We're done when every mi has been done or the list is empty.
757 */
758 mutex_exit(&smg->smg_lock);
759 VFS_RELE(smi->smi_vfsp);
760 goto again;
761 }
762 mutex_exit(&smg->smg_lock);
763 }
764
765 static void
smbfs_zone_free_globals(smi_globals_t * smg)766 smbfs_zone_free_globals(smi_globals_t *smg)
767 {
768 list_destroy(&smg->smg_list); /* makes sure the list is empty */
769 mutex_destroy(&smg->smg_lock);
770 kmem_free(smg, sizeof (*smg));
771
772 }
773
774 /* ARGSUSED */
775 static void
smbfs_zone_destroy(zoneid_t zoneid,void * data)776 smbfs_zone_destroy(zoneid_t zoneid, void *data)
777 {
778 smi_globals_t *smg = data;
779
780 ASSERT(smg != NULL);
781 mutex_enter(&smg->smg_lock);
782 if (list_head(&smg->smg_list) != NULL) {
783 /* Still waiting for VFS_FREEVFS() */
784 smg->smg_destructor_called = B_TRUE;
785 mutex_exit(&smg->smg_lock);
786 return;
787 }
788 smbfs_zone_free_globals(smg);
789 }
790
791 /*
792 * Add an SMBFS mount to the per-zone list of SMBFS mounts.
793 */
794 void
smbfs_zonelist_add(smbmntinfo_t * smi)795 smbfs_zonelist_add(smbmntinfo_t *smi)
796 {
797 smi_globals_t *smg;
798
799 smg = zone_getspecific(smi_list_key, smi->smi_zone_ref.zref_zone);
800 mutex_enter(&smg->smg_lock);
801 list_insert_head(&smg->smg_list, smi);
802 mutex_exit(&smg->smg_lock);
803 }
804
805 /*
806 * Remove an SMBFS mount from the per-zone list of SMBFS mounts.
807 */
808 void
smbfs_zonelist_remove(smbmntinfo_t * smi)809 smbfs_zonelist_remove(smbmntinfo_t *smi)
810 {
811 smi_globals_t *smg;
812
813 smg = zone_getspecific(smi_list_key, smi->smi_zone_ref.zref_zone);
814 mutex_enter(&smg->smg_lock);
815 list_remove(&smg->smg_list, smi);
816 /*
817 * We can be called asynchronously by VFS_FREEVFS() after the zone
818 * shutdown/destroy callbacks have executed; if so, clean up the zone's
819 * smi_globals.
820 */
821 if (list_head(&smg->smg_list) == NULL &&
822 smg->smg_destructor_called == B_TRUE) {
823 smbfs_zone_free_globals(smg);
824 return;
825 }
826 mutex_exit(&smg->smg_lock);
827 }
828
829 #ifdef lint
830 #define NEED_SMBFS_CALLBACKS 1
831 #endif
832
833 #ifdef NEED_SMBFS_CALLBACKS
834 /*
835 * Call-back hooks for netsmb, in case we want them.
836 * Apple's VFS wants them. We may not need them.
837 */
838 /*ARGSUSED*/
smbfs_dead(smb_share_t * ssp)839 static void smbfs_dead(smb_share_t *ssp)
840 {
841 /*
842 * Walk the mount list, finding all mounts
843 * using this share...
844 */
845 }
846
847 /*ARGSUSED*/
smbfs_cb_nop(smb_share_t * ss)848 static void smbfs_cb_nop(smb_share_t *ss)
849 {
850 /* no-op */
851 }
852
853 smb_fscb_t smbfs_cb = {
854 .fscb_disconn = smbfs_dead,
855 .fscb_connect = smbfs_cb_nop
856 };
857
858 #endif /* NEED_SMBFS_CALLBACKS */
859
860 /*
861 * SMBFS Client initialization routine. This routine should only be called
862 * once. It performs the following tasks:
863 * - Initalize all global locks
864 * - Call sub-initialization routines (localize access to variables)
865 */
866 int
smbfs_clntinit(void)867 smbfs_clntinit(void)
868 {
869
870 zone_key_create(&smi_list_key, smbfs_zone_init, smbfs_zone_shutdown,
871 smbfs_zone_destroy);
872 #ifdef NEED_SMBFS_CALLBACKS
873 (void) smb_fscb_set(&smbfs_cb);
874 #endif /* NEED_SMBFS_CALLBACKS */
875 return (0);
876 }
877
878 /*
879 * This routine is called when the modunload is called. This will cleanup
880 * the previously allocated/initialized nodes.
881 */
882 void
smbfs_clntfini(void)883 smbfs_clntfini(void)
884 {
885 #ifdef NEED_SMBFS_CALLBACKS
886 (void) smb_fscb_set(NULL);
887 #endif /* NEED_SMBFS_CALLBACKS */
888 (void) zone_key_delete(smi_list_key);
889 }
890