1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2016 Nexenta Systems, Inc.
26 * Copyright (c) 2017 by Delphix. All rights reserved.
27 * Copyright 2024 Oxide Computer Company
28 */
29
30 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
31 /* All Rights Reserved */
32
33 /*
34 * University Copyright- Copyright (c) 1982, 1986, 1988
35 * The Regents of the University of California
36 * All Rights Reserved
37 *
38 * University Acknowledgment- Portions of this document are derived from
39 * software developed by the University of California, Berkeley, and its
40 * contributors.
41 */
42
43 #include <sys/types.h>
44 #include <sys/t_lock.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bitmap.h>
48 #include <sys/sysmacros.h>
49 #include <sys/kmem.h>
50 #include <sys/signal.h>
51 #include <sys/user.h>
52 #include <sys/proc.h>
53 #include <sys/disp.h>
54 #include <sys/buf.h>
55 #include <sys/pathname.h>
56 #include <sys/vfs.h>
57 #include <sys/vfs_opreg.h>
58 #include <sys/vnode.h>
59 #include <sys/file.h>
60 #include <sys/atomic.h>
61 #include <sys/uio.h>
62 #include <sys/dkio.h>
63 #include <sys/cred.h>
64 #include <sys/conf.h>
65 #include <sys/dnlc.h>
66 #include <sys/kstat.h>
67 #include <sys/acl.h>
68 #include <sys/fs/ufs_fsdir.h>
69 #include <sys/fs/ufs_fs.h>
70 #include <sys/fs/ufs_inode.h>
71 #include <sys/fs/ufs_mount.h>
72 #include <sys/fs/ufs_acl.h>
73 #include <sys/fs/ufs_panic.h>
74 #include <sys/fs/ufs_bio.h>
75 #include <sys/fs/ufs_quota.h>
76 #include <sys/fs/ufs_log.h>
77 #undef NFS
78 #include <sys/statvfs.h>
79 #include <sys/mount.h>
80 #include <sys/mntent.h>
81 #include <sys/swap.h>
82 #include <sys/errno.h>
83 #include <sys/debug.h>
84 #include "fs/fs_subr.h"
85 #include <sys/cmn_err.h>
86 #include <sys/dnlc.h>
87 #include <sys/fssnap_if.h>
88 #include <sys/sunddi.h>
89 #include <sys/bootconf.h>
90 #include <sys/policy.h>
91 #include <sys/zone.h>
92
93 /*
94 * This is the loadable module wrapper.
95 */
96 #include <sys/modctl.h>
97
98 int ufsfstype;
99 vfsops_t *ufs_vfsops;
100 static int ufsinit(int, char *);
101 static int mountfs();
102 extern int highbit();
103 extern struct instats ins;
104 extern struct vnode *common_specvp(struct vnode *vp);
105 extern vfs_t EIO_vfs;
106
107 struct dquot *dquot, *dquotNDQUOT;
108
109 /*
110 * Cylinder group summary information handling tunable.
111 * This defines when these deltas get logged.
112 * If the number of cylinders in the file system is over the
113 * tunable then we log csum updates. Otherwise the updates are only
114 * done for performance on unmount. After a panic they can be
115 * quickly constructed during mounting. See ufs_construct_si()
116 * called from ufs_getsummaryinfo().
117 *
118 * This performance feature can of course be disabled by setting
119 * ufs_ncg_log to 0, and fully enabled by setting it to 0xffffffff.
120 */
121 #define UFS_LOG_NCG_DEFAULT 10000
122 uint32_t ufs_ncg_log = UFS_LOG_NCG_DEFAULT;
123
124 /*
125 * ufs_clean_root indicates whether the root fs went down cleanly
126 */
127 static int ufs_clean_root = 0;
128
129 /*
130 * UFS Mount options table
131 */
132 static char *intr_cancel[] = { MNTOPT_NOINTR, NULL };
133 static char *nointr_cancel[] = { MNTOPT_INTR, NULL };
134 static char *forcedirectio_cancel[] = { MNTOPT_NOFORCEDIRECTIO, NULL };
135 static char *noforcedirectio_cancel[] = { MNTOPT_FORCEDIRECTIO, NULL };
136 static char *largefiles_cancel[] = { MNTOPT_NOLARGEFILES, NULL };
137 static char *nolargefiles_cancel[] = { MNTOPT_LARGEFILES, NULL };
138 static char *logging_cancel[] = { MNTOPT_NOLOGGING, NULL };
139 static char *nologging_cancel[] = { MNTOPT_LOGGING, NULL };
140 static char *xattr_cancel[] = { MNTOPT_NOXATTR, NULL };
141 static char *noxattr_cancel[] = { MNTOPT_XATTR, NULL };
142 static char *quota_cancel[] = { MNTOPT_NOQUOTA, NULL };
143 static char *noquota_cancel[] = { MNTOPT_QUOTA, NULL };
144 static char *dfratime_cancel[] = { MNTOPT_NODFRATIME, NULL };
145 static char *nodfratime_cancel[] = { MNTOPT_DFRATIME, NULL };
146
147 static mntopt_t mntopts[] = {
148 /*
149 * option name cancel option default arg flags
150 * ufs arg flag
151 */
152 { MNTOPT_INTR, intr_cancel, NULL, MO_DEFAULT,
153 (void *)0 },
154 { MNTOPT_NOINTR, nointr_cancel, NULL, 0,
155 (void *)UFSMNT_NOINTR },
156 { MNTOPT_SYNCDIR, NULL, NULL, 0,
157 (void *)UFSMNT_SYNCDIR },
158 { MNTOPT_FORCEDIRECTIO, forcedirectio_cancel, NULL, 0,
159 (void *)UFSMNT_FORCEDIRECTIO },
160 { MNTOPT_NOFORCEDIRECTIO, noforcedirectio_cancel, NULL, 0,
161 (void *)UFSMNT_NOFORCEDIRECTIO },
162 { MNTOPT_NOSETSEC, NULL, NULL, 0,
163 (void *)UFSMNT_NOSETSEC },
164 { MNTOPT_LARGEFILES, largefiles_cancel, NULL, MO_DEFAULT,
165 (void *)UFSMNT_LARGEFILES },
166 { MNTOPT_NOLARGEFILES, nolargefiles_cancel, NULL, 0,
167 (void *)0 },
168 { MNTOPT_LOGGING, logging_cancel, NULL, MO_TAG,
169 (void *)UFSMNT_LOGGING },
170 { MNTOPT_NOLOGGING, nologging_cancel, NULL,
171 MO_NODISPLAY|MO_DEFAULT|MO_TAG, (void *)0 },
172 { MNTOPT_QUOTA, quota_cancel, NULL, MO_IGNORE,
173 (void *)0 },
174 { MNTOPT_NOQUOTA, noquota_cancel, NULL,
175 MO_NODISPLAY|MO_DEFAULT, (void *)0 },
176 { MNTOPT_GLOBAL, NULL, NULL, 0,
177 (void *)0 },
178 { MNTOPT_XATTR, xattr_cancel, NULL, MO_DEFAULT,
179 (void *)0 },
180 { MNTOPT_NOXATTR, noxattr_cancel, NULL, 0,
181 (void *)0 },
182 { MNTOPT_NOATIME, NULL, NULL, 0,
183 (void *)UFSMNT_NOATIME },
184 { MNTOPT_DFRATIME, dfratime_cancel, NULL, 0,
185 (void *)0 },
186 { MNTOPT_NODFRATIME, nodfratime_cancel, NULL,
187 MO_NODISPLAY|MO_DEFAULT, (void *)UFSMNT_NODFRATIME },
188 { MNTOPT_ONERROR, NULL, UFSMNT_ONERROR_PANIC_STR,
189 MO_DEFAULT|MO_HASVALUE, (void *)0 },
190 };
191
192 static mntopts_t ufs_mntopts = {
193 sizeof (mntopts) / sizeof (mntopt_t),
194 mntopts
195 };
196
197 static vfsdef_t vfw = {
198 VFSDEF_VERSION,
199 "ufs",
200 ufsinit,
201 VSW_HASPROTO|VSW_CANREMOUNT|VSW_STATS|VSW_CANLOFI|VSW_MOUNTDEV,
202 &ufs_mntopts
203 };
204
205 /*
206 * Module linkage information for the kernel.
207 */
208 extern struct mod_ops mod_fsops;
209
210 static struct modlfs modlfs = {
211 &mod_fsops, "filesystem for ufs", &vfw
212 };
213
214 static struct modlinkage modlinkage = {
215 MODREV_1, (void *)&modlfs, NULL
216 };
217
218 /*
219 * An attempt has been made to make this module unloadable. In order to
220 * test it, we need a system in which the root fs is NOT ufs. THIS HAS NOT
221 * BEEN DONE
222 */
223
224 extern kstat_t *ufs_inode_kstat;
225 extern uint_t ufs_lockfs_key;
226 extern void ufs_lockfs_tsd_destructor(void *);
227 extern uint_t bypass_snapshot_throttle_key;
228
229 int
_init(void)230 _init(void)
231 {
232 /*
233 * Create an index into the per thread array so that any thread doing
234 * VOP will have a lockfs mark on it.
235 */
236 tsd_create(&ufs_lockfs_key, ufs_lockfs_tsd_destructor);
237 tsd_create(&bypass_snapshot_throttle_key, NULL);
238 return (mod_install(&modlinkage));
239 }
240
241 int
_fini(void)242 _fini(void)
243 {
244 return (EBUSY);
245 }
246
247 int
_info(struct modinfo * modinfop)248 _info(struct modinfo *modinfop)
249 {
250 return (mod_info(&modlinkage, modinfop));
251 }
252
253 extern struct vnode *makespecvp(dev_t dev, vtype_t type);
254
255 extern kmutex_t ufs_scan_lock;
256
257 static int mountfs(struct vfs *, enum whymountroot, struct vnode *, char *,
258 struct cred *, int, void *, int);
259
260
261 static int
ufs_mount(struct vfs * vfsp,struct vnode * mvp,struct mounta * uap,struct cred * cr)262 ufs_mount(struct vfs *vfsp, struct vnode *mvp, struct mounta *uap,
263 struct cred *cr)
264 {
265 char *data = uap->dataptr;
266 int datalen = uap->datalen;
267 dev_t dev;
268 struct vnode *lvp = NULL;
269 struct vnode *svp = NULL;
270 struct pathname dpn;
271 int error;
272 enum whymountroot why = ROOT_INIT;
273 struct ufs_args args;
274 int oflag, aflag;
275 int fromspace = (uap->flags & MS_SYSSPACE) ?
276 UIO_SYSSPACE : UIO_USERSPACE;
277
278 if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
279 return (error);
280
281 if (mvp->v_type != VDIR)
282 return (ENOTDIR);
283
284 mutex_enter(&mvp->v_lock);
285 if ((uap->flags & MS_REMOUNT) == 0 &&
286 (uap->flags & MS_OVERLAY) == 0 &&
287 (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
288 mutex_exit(&mvp->v_lock);
289 return (EBUSY);
290 }
291 mutex_exit(&mvp->v_lock);
292
293 /*
294 * Get arguments
295 */
296 bzero(&args, sizeof (args));
297 if ((uap->flags & MS_DATA) && data != NULL && datalen != 0) {
298 int copy_result = 0;
299
300 if (datalen > sizeof (args))
301 return (EINVAL);
302 if (uap->flags & MS_SYSSPACE)
303 bcopy(data, &args, datalen);
304 else
305 copy_result = copyin(data, &args, datalen);
306 if (copy_result)
307 return (EFAULT);
308 datalen = sizeof (struct ufs_args);
309 } else {
310 datalen = 0;
311 }
312
313 if ((vfsp->vfs_flag & VFS_RDONLY) != 0 ||
314 (uap->flags & MS_RDONLY) != 0) {
315 oflag = FREAD;
316 aflag = VREAD;
317 } else {
318 oflag = FREAD | FWRITE;
319 aflag = VREAD | VWRITE;
320 }
321
322 /*
323 * Read in the mount point pathname
324 * (so we can record the directory the file system was last mounted on).
325 */
326 if (error = pn_get(uap->dir, fromspace, &dpn))
327 return (error);
328
329 /*
330 * Resolve path name of special file being mounted.
331 */
332 if (error = lookupname(uap->spec, fromspace, FOLLOW, NULL, &svp)) {
333 pn_free(&dpn);
334 return (error);
335 }
336
337 error = vfs_get_lofi(vfsp, &lvp);
338
339 if (error > 0) {
340 VN_RELE(svp);
341 pn_free(&dpn);
342 return (error);
343 } else if (error == 0) {
344 dev = lvp->v_rdev;
345
346 if (getmajor(dev) >= devcnt) {
347 error = ENXIO;
348 goto out;
349 }
350 } else {
351 dev = svp->v_rdev;
352
353 if (svp->v_type != VBLK) {
354 VN_RELE(svp);
355 pn_free(&dpn);
356 return (ENOTBLK);
357 }
358
359 if (getmajor(dev) >= devcnt) {
360 error = ENXIO;
361 goto out;
362 }
363
364 /*
365 * In SunCluster, requests to a global device are
366 * satisfied by a local device. We substitute the global
367 * pxfs node with a local spec node here.
368 */
369 if (IS_PXFSVP(svp)) {
370 ASSERT(lvp == NULL);
371 VN_RELE(svp);
372 svp = makespecvp(dev, VBLK);
373 }
374
375 if ((error = secpolicy_spec_open(cr, svp, oflag)) != 0) {
376 VN_RELE(svp);
377 pn_free(&dpn);
378 return (error);
379 }
380 }
381
382 if (uap->flags & MS_REMOUNT)
383 why = ROOT_REMOUNT;
384
385 /*
386 * Open device/file mounted on. We need this to check whether
387 * the caller has sufficient rights to access the resource in
388 * question. When bio is fixed for vnodes this can all be vnode
389 * operations.
390 */
391 if ((error = VOP_ACCESS(svp, aflag, 0, cr, NULL)) != 0)
392 goto out;
393
394 /*
395 * Ensure that this device isn't already mounted or in progress on a
396 * mount unless this is a REMOUNT request or we are told to suppress
397 * mount checks. Global mounts require special handling.
398 */
399 if ((uap->flags & MS_NOCHECK) == 0) {
400 if ((uap->flags & MS_GLOBAL) == 0 &&
401 vfs_devmounting(dev, vfsp)) {
402 error = EBUSY;
403 goto out;
404 }
405 if (vfs_devismounted(dev)) {
406 if ((uap->flags & MS_REMOUNT) == 0) {
407 error = EBUSY;
408 goto out;
409 }
410 }
411 }
412
413 /*
414 * If the device is a tape, mount it read only
415 */
416 if (devopsp[getmajor(dev)]->devo_cb_ops->cb_flag & D_TAPE) {
417 vfsp->vfs_flag |= VFS_RDONLY;
418 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
419 }
420 if (uap->flags & MS_RDONLY)
421 vfsp->vfs_flag |= VFS_RDONLY;
422
423 /*
424 * Mount the filesystem, free the device vnode on error.
425 */
426 error = mountfs(vfsp, why, lvp != NULL ? lvp : svp,
427 dpn.pn_path, cr, 0, &args, datalen);
428
429 if (error == 0) {
430 vfs_set_feature(vfsp, VFSFT_SYSATTR_VIEWS);
431
432 /*
433 * If lofi, drop our reference to the original file.
434 */
435 if (lvp != NULL)
436 VN_RELE(svp);
437 }
438
439 out:
440 pn_free(&dpn);
441
442 if (error) {
443 if (lvp != NULL)
444 VN_RELE(lvp);
445 if (svp != NULL)
446 VN_RELE(svp);
447 }
448 return (error);
449 }
450
451 /*
452 * Mount root file system.
453 * "why" is ROOT_INIT on initial call ROOT_REMOUNT if called to
454 * remount the root file system, and ROOT_UNMOUNT if called to
455 * unmount the root (e.g., as part of a system shutdown).
456 *
457 * XXX - this may be partially machine-dependent; it, along with the VFS_SWAPVP
458 * operation, goes along with auto-configuration. A mechanism should be
459 * provided by which machine-INdependent code in the kernel can say "get me the
460 * right root file system" and "get me the right initial swap area", and have
461 * that done in what may well be a machine-dependent fashion.
462 * Unfortunately, it is also file-system-type dependent (NFS gets it via
463 * bootparams calls, UFS gets it from various and sundry machine-dependent
464 * mechanisms, as SPECFS does for swap).
465 */
466 static int
ufs_mountroot(struct vfs * vfsp,enum whymountroot why)467 ufs_mountroot(struct vfs *vfsp, enum whymountroot why)
468 {
469 struct fs *fsp;
470 int error;
471 static int ufsrootdone = 0;
472 dev_t rootdev;
473 struct vnode *vp;
474 struct vnode *devvp = 0;
475 int ovflags;
476 int doclkset;
477 ufsvfs_t *ufsvfsp;
478
479 if (why == ROOT_INIT) {
480 if (ufsrootdone++)
481 return (EBUSY);
482 rootdev = getrootdev();
483 if (rootdev == (dev_t)NODEV)
484 return (ENODEV);
485 vfsp->vfs_dev = rootdev;
486 vfsp->vfs_flag |= VFS_RDONLY;
487 } else if (why == ROOT_REMOUNT) {
488 vp = ((struct ufsvfs *)vfsp->vfs_data)->vfs_devvp;
489 (void) dnlc_purge_vfsp(vfsp, 0);
490 vp = common_specvp(vp);
491 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_INVAL,
492 CRED(), NULL);
493 (void) bfinval(vfsp->vfs_dev, 0);
494 fsp = getfs(vfsp);
495
496 ovflags = vfsp->vfs_flag;
497 vfsp->vfs_flag &= ~VFS_RDONLY;
498 vfsp->vfs_flag |= VFS_REMOUNT;
499 rootdev = vfsp->vfs_dev;
500 } else if (why == ROOT_UNMOUNT) {
501 if (vfs_lock(vfsp) == 0) {
502 (void) ufs_flush(vfsp);
503 /*
504 * Mark the log as fully rolled
505 */
506 ufsvfsp = (ufsvfs_t *)vfsp->vfs_data;
507 fsp = ufsvfsp->vfs_fs;
508 if (TRANS_ISTRANS(ufsvfsp) &&
509 !TRANS_ISERROR(ufsvfsp) &&
510 (fsp->fs_rolled == FS_NEED_ROLL)) {
511 ml_unit_t *ul = ufsvfsp->vfs_log;
512
513 error = ufs_putsummaryinfo(ul->un_dev,
514 ufsvfsp, fsp);
515 if (error == 0) {
516 fsp->fs_rolled = FS_ALL_ROLLED;
517 UFS_BWRITE2(NULL, ufsvfsp->vfs_bufp);
518 }
519 }
520 vfs_unlock(vfsp);
521 } else {
522 ufs_update(0);
523 }
524
525 vp = ((struct ufsvfs *)vfsp->vfs_data)->vfs_devvp;
526 (void) VOP_CLOSE(vp, FREAD|FWRITE, 1,
527 (offset_t)0, CRED(), NULL);
528 return (0);
529 }
530 error = vfs_lock(vfsp);
531 if (error)
532 return (error);
533
534 devvp = makespecvp(rootdev, VBLK);
535
536 /* If RO media, don't call clkset() (see below) */
537 doclkset = 1;
538 if (why == ROOT_INIT) {
539 error = VOP_OPEN(&devvp, FREAD|FWRITE, CRED(), NULL);
540 if (error == 0) {
541 (void) VOP_CLOSE(devvp, FREAD|FWRITE, 1,
542 (offset_t)0, CRED(), NULL);
543 } else {
544 doclkset = 0;
545 }
546 }
547
548 error = mountfs(vfsp, why, devvp, "/", CRED(), 1, NULL, 0);
549 /*
550 * XXX - assumes root device is not indirect, because we don't set
551 * rootvp. Is rootvp used for anything? If so, make another arg
552 * to mountfs.
553 */
554 if (error) {
555 vfs_unlock(vfsp);
556 if (why == ROOT_REMOUNT)
557 vfsp->vfs_flag = ovflags;
558 if (rootvp) {
559 VN_RELE(rootvp);
560 rootvp = (struct vnode *)0;
561 }
562 VN_RELE(devvp);
563 return (error);
564 }
565 if (why == ROOT_INIT)
566 vfs_add((struct vnode *)0, vfsp,
567 (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0);
568 vfs_unlock(vfsp);
569 fsp = getfs(vfsp);
570 clkset(doclkset ? fsp->fs_time : -1);
571 ufsvfsp = (ufsvfs_t *)vfsp->vfs_data;
572 if (ufsvfsp->vfs_log) {
573 vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
574 }
575 return (0);
576 }
577
578 static int
remountfs(struct vfs * vfsp,dev_t dev,void * raw_argsp,int args_len)579 remountfs(struct vfs *vfsp, dev_t dev, void *raw_argsp, int args_len)
580 {
581 struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
582 struct ulockfs *ulp = &ufsvfsp->vfs_ulockfs;
583 struct buf *bp = ufsvfsp->vfs_bufp;
584 struct fs *fsp = (struct fs *)bp->b_un.b_addr;
585 struct fs *fspt;
586 struct buf *tpt = 0;
587 int error = 0;
588 int flags = 0;
589
590 if (args_len == sizeof (struct ufs_args) && raw_argsp)
591 flags = ((struct ufs_args *)raw_argsp)->flags;
592
593 /* cannot remount to RDONLY */
594 if (vfsp->vfs_flag & VFS_RDONLY)
595 return (ENOTSUP);
596
597 /* whoops, wrong dev */
598 if (vfsp->vfs_dev != dev)
599 return (EINVAL);
600
601 /*
602 * synchronize w/ufs ioctls
603 */
604 mutex_enter(&ulp->ul_lock);
605 atomic_inc_ulong(&ufs_quiesce_pend);
606
607 /*
608 * reset options
609 */
610 ufsvfsp->vfs_nointr = flags & UFSMNT_NOINTR;
611 ufsvfsp->vfs_syncdir = flags & UFSMNT_SYNCDIR;
612 ufsvfsp->vfs_nosetsec = flags & UFSMNT_NOSETSEC;
613 ufsvfsp->vfs_noatime = flags & UFSMNT_NOATIME;
614 if ((flags & UFSMNT_NODFRATIME) || ufsvfsp->vfs_noatime)
615 ufsvfsp->vfs_dfritime &= ~UFS_DFRATIME;
616 else /* dfratime, default behavior */
617 ufsvfsp->vfs_dfritime |= UFS_DFRATIME;
618 if (flags & UFSMNT_FORCEDIRECTIO)
619 ufsvfsp->vfs_forcedirectio = 1;
620 else /* default is no direct I/O */
621 ufsvfsp->vfs_forcedirectio = 0;
622 ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
623
624 /*
625 * set largefiles flag in ufsvfs equal to the
626 * value passed in by the mount command. If
627 * it is "nolargefiles", and the flag is set
628 * in the superblock, the mount fails.
629 */
630 if (!(flags & UFSMNT_LARGEFILES)) { /* "nolargefiles" */
631 if (fsp->fs_flags & FSLARGEFILES) {
632 error = EFBIG;
633 goto remounterr;
634 }
635 ufsvfsp->vfs_lfflags &= ~UFS_LARGEFILES;
636 } else /* "largefiles" */
637 ufsvfsp->vfs_lfflags |= UFS_LARGEFILES;
638 /*
639 * read/write to read/write; all done
640 */
641 if (fsp->fs_ronly == 0)
642 goto remounterr;
643
644 /*
645 * fix-on-panic assumes RO->RW remount implies system-critical fs
646 * if it is shortly after boot; so, don't attempt to lock and fix
647 * (unless the user explicitly asked for another action on error)
648 * XXX UFSMNT_ONERROR_RDONLY rather than UFSMNT_ONERROR_PANIC
649 */
650 #define BOOT_TIME_LIMIT (180*hz)
651 if (!(flags & UFSMNT_ONERROR_FLGMASK) &&
652 ddi_get_lbolt() < BOOT_TIME_LIMIT) {
653 cmn_err(CE_WARN, "%s is required to be mounted onerror=%s",
654 ufsvfsp->vfs_fs->fs_fsmnt, UFSMNT_ONERROR_PANIC_STR);
655 flags |= UFSMNT_ONERROR_PANIC;
656 }
657
658 if ((error = ufsfx_mount(ufsvfsp, flags)) != 0)
659 goto remounterr;
660
661 /*
662 * quiesce the file system
663 */
664 error = ufs_quiesce(ulp);
665 if (error)
666 goto remounterr;
667
668 tpt = UFS_BREAD(ufsvfsp, ufsvfsp->vfs_dev, SBLOCK, SBSIZE);
669 if (tpt->b_flags & B_ERROR) {
670 error = EIO;
671 goto remounterr;
672 }
673 fspt = (struct fs *)tpt->b_un.b_addr;
674 if (((fspt->fs_magic != FS_MAGIC) &&
675 (fspt->fs_magic != MTB_UFS_MAGIC)) ||
676 (fspt->fs_magic == FS_MAGIC &&
677 (fspt->fs_version != UFS_EFISTYLE4NONEFI_VERSION_2 &&
678 fspt->fs_version != UFS_VERSION_MIN)) ||
679 (fspt->fs_magic == MTB_UFS_MAGIC &&
680 (fspt->fs_version > MTB_UFS_VERSION_1 ||
681 fspt->fs_version < MTB_UFS_VERSION_MIN)) ||
682 fspt->fs_bsize > MAXBSIZE || fspt->fs_frag > MAXFRAG ||
683 fspt->fs_bsize < sizeof (struct fs) || fspt->fs_bsize < PAGESIZE) {
684 tpt->b_flags |= B_STALE | B_AGE;
685 error = EINVAL;
686 goto remounterr;
687 }
688
689 if (ufsvfsp->vfs_log && (ufsvfsp->vfs_log->un_flags & LDL_NOROLL)) {
690 ufsvfsp->vfs_log->un_flags &= ~LDL_NOROLL;
691 logmap_start_roll(ufsvfsp->vfs_log);
692 }
693
694 if (TRANS_ISERROR(ufsvfsp))
695 goto remounterr;
696 TRANS_DOMATAMAP(ufsvfsp);
697
698 if ((fspt->fs_state + fspt->fs_time == FSOKAY) &&
699 fspt->fs_clean == FSLOG && !TRANS_ISTRANS(ufsvfsp)) {
700 ufsvfsp->vfs_log = NULL;
701 ufsvfsp->vfs_domatamap = 0;
702 error = ENOSPC;
703 goto remounterr;
704 }
705
706 if (fspt->fs_state + fspt->fs_time == FSOKAY &&
707 (fspt->fs_clean == FSCLEAN ||
708 fspt->fs_clean == FSSTABLE ||
709 fspt->fs_clean == FSLOG)) {
710
711 /*
712 * Ensure that ufs_getsummaryinfo doesn't reconstruct
713 * the summary info.
714 */
715 error = ufs_getsummaryinfo(vfsp->vfs_dev, ufsvfsp, fspt);
716 if (error)
717 goto remounterr;
718
719 /* preserve mount name */
720 (void) strncpy(fspt->fs_fsmnt, fsp->fs_fsmnt, MAXMNTLEN);
721 /* free the old cg space */
722 kmem_free(fsp->fs_u.fs_csp, fsp->fs_cssize);
723 /* switch in the new superblock */
724 fspt->fs_rolled = FS_NEED_ROLL;
725 bcopy(tpt->b_un.b_addr, bp->b_un.b_addr, fspt->fs_sbsize);
726
727 fsp->fs_clean = FSSTABLE;
728 } /* superblock updated in memory */
729 tpt->b_flags |= B_STALE | B_AGE;
730 brelse(tpt);
731 tpt = 0;
732
733 if (fsp->fs_clean != FSSTABLE) {
734 error = ENOSPC;
735 goto remounterr;
736 }
737
738
739 if (TRANS_ISTRANS(ufsvfsp)) {
740 fsp->fs_clean = FSLOG;
741 ufsvfsp->vfs_dio = 0;
742 } else
743 if (ufsvfsp->vfs_dio)
744 fsp->fs_clean = FSSUSPEND;
745
746 TRANS_MATA_MOUNT(ufsvfsp);
747
748 fsp->fs_fmod = 0;
749 fsp->fs_ronly = 0;
750
751 atomic_dec_ulong(&ufs_quiesce_pend);
752 cv_broadcast(&ulp->ul_cv);
753 mutex_exit(&ulp->ul_lock);
754
755 if (TRANS_ISTRANS(ufsvfsp)) {
756
757 /*
758 * start the delete thread
759 */
760 ufs_thread_start(&ufsvfsp->vfs_delete, ufs_thread_delete, vfsp);
761
762 /*
763 * start the reclaim thread
764 */
765 if (fsp->fs_reclaim & (FS_RECLAIM|FS_RECLAIMING)) {
766 fsp->fs_reclaim &= ~FS_RECLAIM;
767 fsp->fs_reclaim |= FS_RECLAIMING;
768 ufs_thread_start(&ufsvfsp->vfs_reclaim,
769 ufs_thread_reclaim, vfsp);
770 }
771 }
772
773 TRANS_SBWRITE(ufsvfsp, TOP_MOUNT);
774
775 return (0);
776
777 remounterr:
778 if (tpt)
779 brelse(tpt);
780 atomic_dec_ulong(&ufs_quiesce_pend);
781 cv_broadcast(&ulp->ul_cv);
782 mutex_exit(&ulp->ul_lock);
783 return (error);
784 }
785
786 /*
787 * If the device maxtransfer size is not available, we use ufs_maxmaxphys
788 * along with the system value for maxphys to determine the value for
789 * maxtransfer.
790 */
791 int ufs_maxmaxphys = (1024 * 1024);
792
793 #include <sys/ddi.h> /* for delay(9f) */
794
795 int ufs_mount_error_delay = 20; /* default to 20ms */
796 int ufs_mount_timeout = 60000; /* default to 1 minute */
797
798 static int
mountfs(struct vfs * vfsp,enum whymountroot why,struct vnode * devvp,char * path,cred_t * cr,int isroot,void * raw_argsp,int args_len)799 mountfs(struct vfs *vfsp, enum whymountroot why, struct vnode *devvp,
800 char *path, cred_t *cr, int isroot, void *raw_argsp, int args_len)
801 {
802 dev_t dev = devvp->v_rdev;
803 struct fs *fsp;
804 struct ufsvfs *ufsvfsp = 0;
805 struct buf *bp = 0;
806 struct buf *tp = 0;
807 struct dk_cinfo ci;
808 int error = 0;
809 size_t len;
810 int needclose = 0;
811 int needtrans = 0;
812 struct inode *rip;
813 struct vnode *rvp = NULL;
814 int flags = 0;
815 kmutex_t *ihm;
816 int elapsed;
817 int status;
818 extern int maxphys;
819
820 if (args_len == sizeof (struct ufs_args) && raw_argsp)
821 flags = ((struct ufs_args *)raw_argsp)->flags;
822
823 ASSERT(vfs_lock_held(vfsp));
824
825 if (why == ROOT_INIT) {
826 /*
827 * Open block device mounted on.
828 * When bio is fixed for vnodes this can all be vnode
829 * operations.
830 */
831 error = VOP_OPEN(&devvp,
832 (vfsp->vfs_flag & VFS_RDONLY) ? FREAD : FREAD|FWRITE,
833 cr, NULL);
834 if (error)
835 goto out;
836 needclose = 1;
837
838 /*
839 * Refuse to go any further if this
840 * device is being used for swapping.
841 */
842 if (IS_SWAPVP(devvp)) {
843 error = EBUSY;
844 goto out;
845 }
846 }
847
848 /*
849 * check for dev already mounted on
850 */
851 if (vfsp->vfs_flag & VFS_REMOUNT) {
852 error = remountfs(vfsp, dev, raw_argsp, args_len);
853 if (error == 0)
854 VN_RELE(devvp);
855 return (error);
856 }
857
858 ASSERT(devvp != 0);
859
860 /*
861 * Flush back any dirty pages on the block device to
862 * try and keep the buffer cache in sync with the page
863 * cache if someone is trying to use block devices when
864 * they really should be using the raw device.
865 */
866 (void) VOP_PUTPAGE(common_specvp(devvp), (offset_t)0,
867 (size_t)0, B_INVAL, cr, NULL);
868
869 /*
870 * read in superblock
871 */
872 ufsvfsp = kmem_zalloc(sizeof (struct ufsvfs), KM_SLEEP);
873 tp = UFS_BREAD(ufsvfsp, dev, SBLOCK, SBSIZE);
874 if (tp->b_flags & B_ERROR)
875 goto out;
876 fsp = (struct fs *)tp->b_un.b_addr;
877
878 if ((fsp->fs_magic != FS_MAGIC) && (fsp->fs_magic != MTB_UFS_MAGIC)) {
879 cmn_err(CE_NOTE,
880 "mount: not a UFS magic number (0x%x)", fsp->fs_magic);
881 error = EINVAL;
882 goto out;
883 }
884
885 if ((fsp->fs_magic == FS_MAGIC) &&
886 (fsp->fs_version != UFS_EFISTYLE4NONEFI_VERSION_2 &&
887 fsp->fs_version != UFS_VERSION_MIN)) {
888 cmn_err(CE_NOTE,
889 "mount: unrecognized version of UFS on-disk format: %d",
890 fsp->fs_version);
891 error = EINVAL;
892 goto out;
893 }
894
895 if ((fsp->fs_magic == MTB_UFS_MAGIC) &&
896 (fsp->fs_version > MTB_UFS_VERSION_1 ||
897 fsp->fs_version < MTB_UFS_VERSION_MIN)) {
898 cmn_err(CE_NOTE,
899 "mount: unrecognized version of UFS on-disk format: %d",
900 fsp->fs_version);
901 error = EINVAL;
902 goto out;
903 }
904
905 #ifndef _LP64
906 if (fsp->fs_magic == MTB_UFS_MAGIC) {
907 /*
908 * Find the size of the device in sectors. If the
909 * the size in sectors is greater than INT_MAX, it's
910 * a multi-terabyte file system, which can't be
911 * mounted by a 32-bit kernel. We can't use the
912 * fsbtodb() macro in the next line because the macro
913 * casts the intermediate values to daddr_t, which is
914 * a 32-bit quantity in a 32-bit kernel. Here we
915 * really do need the intermediate values to be held
916 * in 64-bit quantities because we're checking for
917 * overflow of a 32-bit field.
918 */
919 if ((((diskaddr_t)(fsp->fs_size)) << fsp->fs_fsbtodb)
920 > INT_MAX) {
921 cmn_err(CE_NOTE,
922 "mount: multi-terabyte UFS cannot be"
923 " mounted by a 32-bit kernel");
924 error = EINVAL;
925 goto out;
926 }
927
928 }
929 #endif
930
931 if (fsp->fs_bsize > MAXBSIZE || fsp->fs_frag > MAXFRAG ||
932 fsp->fs_bsize < sizeof (struct fs) || fsp->fs_bsize < PAGESIZE) {
933 error = EINVAL; /* also needs translation */
934 goto out;
935 }
936
937 /*
938 * Allocate VFS private data.
939 */
940 vfsp->vfs_bcount = 0;
941 vfsp->vfs_data = (caddr_t)ufsvfsp;
942 vfsp->vfs_fstype = ufsfstype;
943 vfsp->vfs_dev = dev;
944 vfsp->vfs_flag |= VFS_NOTRUNC;
945 vfs_make_fsid(&vfsp->vfs_fsid, dev, ufsfstype);
946 ufsvfsp->vfs_devvp = devvp;
947
948 /*
949 * Cross-link with vfs and add to instance list.
950 */
951 ufsvfsp->vfs_vfs = vfsp;
952 ufs_vfs_add(ufsvfsp);
953
954 ufsvfsp->vfs_dev = dev;
955 ufsvfsp->vfs_bufp = tp;
956
957 ufsvfsp->vfs_dirsize = INODESIZE + (4 * ALLOCSIZE) + fsp->fs_fsize;
958 ufsvfsp->vfs_minfrags =
959 (int)((int64_t)fsp->fs_dsize * fsp->fs_minfree / 100);
960 /*
961 * if mount allows largefiles, indicate so in ufsvfs
962 */
963 if (flags & UFSMNT_LARGEFILES)
964 ufsvfsp->vfs_lfflags |= UFS_LARGEFILES;
965 /*
966 * Initialize threads
967 */
968 ufs_delete_init(ufsvfsp, 1);
969 ufs_thread_init(&ufsvfsp->vfs_reclaim, 0);
970
971 /*
972 * Chicken and egg problem. The superblock may have deltas
973 * in the log. So after the log is scanned we reread the
974 * superblock. We guarantee that the fields needed to
975 * scan the log will not be in the log.
976 */
977 if (fsp->fs_logbno && fsp->fs_clean == FSLOG &&
978 (fsp->fs_state + fsp->fs_time == FSOKAY)) {
979 error = lufs_snarf(ufsvfsp, fsp, (vfsp->vfs_flag & VFS_RDONLY));
980 if (error) {
981 /*
982 * Allow a ro mount to continue even if the
983 * log cannot be processed - yet.
984 */
985 if (!(vfsp->vfs_flag & VFS_RDONLY)) {
986 cmn_err(CE_WARN, "Error accessing ufs "
987 "log for %s; Please run fsck(8)", path);
988 goto out;
989 }
990 }
991 tp->b_flags |= (B_AGE | B_STALE);
992 brelse(tp);
993 tp = UFS_BREAD(ufsvfsp, dev, SBLOCK, SBSIZE);
994 fsp = (struct fs *)tp->b_un.b_addr;
995 ufsvfsp->vfs_bufp = tp;
996 if (tp->b_flags & B_ERROR)
997 goto out;
998 }
999
1000 /*
1001 * Set logging mounted flag used by lockfs
1002 */
1003 ufsvfsp->vfs_validfs = UT_MOUNTED;
1004
1005 /*
1006 * Copy the super block into a buffer in its native size.
1007 * Use ngeteblk to allocate the buffer
1008 */
1009 bp = ngeteblk(fsp->fs_bsize);
1010 ufsvfsp->vfs_bufp = bp;
1011 bp->b_edev = dev;
1012 bp->b_dev = cmpdev(dev);
1013 bp->b_blkno = SBLOCK;
1014 bp->b_bcount = fsp->fs_sbsize;
1015 bcopy(tp->b_un.b_addr, bp->b_un.b_addr, fsp->fs_sbsize);
1016 tp->b_flags |= B_STALE | B_AGE;
1017 brelse(tp);
1018 tp = 0;
1019
1020 fsp = (struct fs *)bp->b_un.b_addr;
1021 /*
1022 * Mount fails if superblock flag indicates presence of large
1023 * files and filesystem is attempted to be mounted 'nolargefiles'.
1024 * The exception is for a read only mount of root, which we
1025 * always want to succeed, so fsck can fix potential problems.
1026 * The assumption is that we will remount root at some point,
1027 * and the remount will enforce the mount option.
1028 */
1029 if (!(isroot & (vfsp->vfs_flag & VFS_RDONLY)) &&
1030 (fsp->fs_flags & FSLARGEFILES) &&
1031 !(flags & UFSMNT_LARGEFILES)) {
1032 error = EFBIG;
1033 goto out;
1034 }
1035
1036 if (vfsp->vfs_flag & VFS_RDONLY) {
1037 fsp->fs_ronly = 1;
1038 fsp->fs_fmod = 0;
1039 if (((fsp->fs_state + fsp->fs_time) == FSOKAY) &&
1040 ((fsp->fs_clean == FSCLEAN) ||
1041 (fsp->fs_clean == FSSTABLE) ||
1042 (fsp->fs_clean == FSLOG))) {
1043 if (isroot) {
1044 if (fsp->fs_clean == FSLOG) {
1045 if (fsp->fs_rolled == FS_ALL_ROLLED) {
1046 ufs_clean_root = 1;
1047 }
1048 } else {
1049 ufs_clean_root = 1;
1050 }
1051 }
1052 fsp->fs_clean = FSSTABLE;
1053 } else {
1054 fsp->fs_clean = FSBAD;
1055 }
1056 } else {
1057
1058 fsp->fs_fmod = 0;
1059 fsp->fs_ronly = 0;
1060
1061 TRANS_DOMATAMAP(ufsvfsp);
1062
1063 if ((TRANS_ISERROR(ufsvfsp)) ||
1064 (((fsp->fs_state + fsp->fs_time) == FSOKAY) &&
1065 fsp->fs_clean == FSLOG && !TRANS_ISTRANS(ufsvfsp))) {
1066 ufsvfsp->vfs_log = NULL;
1067 ufsvfsp->vfs_domatamap = 0;
1068 error = ENOSPC;
1069 goto out;
1070 }
1071
1072 if (((fsp->fs_state + fsp->fs_time) == FSOKAY) &&
1073 (fsp->fs_clean == FSCLEAN ||
1074 fsp->fs_clean == FSSTABLE ||
1075 fsp->fs_clean == FSLOG))
1076 fsp->fs_clean = FSSTABLE;
1077 else {
1078 if (isroot) {
1079 /*
1080 * allow root partition to be mounted even
1081 * when fs_state is not ok
1082 * will be fixed later by a remount root
1083 */
1084 fsp->fs_clean = FSBAD;
1085 ufsvfsp->vfs_log = NULL;
1086 ufsvfsp->vfs_domatamap = 0;
1087 } else {
1088 error = ENOSPC;
1089 goto out;
1090 }
1091 }
1092
1093 if (fsp->fs_clean == FSSTABLE && TRANS_ISTRANS(ufsvfsp))
1094 fsp->fs_clean = FSLOG;
1095 }
1096 TRANS_MATA_MOUNT(ufsvfsp);
1097 needtrans = 1;
1098
1099 vfsp->vfs_bsize = fsp->fs_bsize;
1100
1101 /*
1102 * Read in summary info
1103 */
1104 if (error = ufs_getsummaryinfo(dev, ufsvfsp, fsp))
1105 goto out;
1106
1107 /*
1108 * lastwhinetime is set to zero rather than lbolt, so that after
1109 * mounting if the filesystem is found to be full, then immediately the
1110 * "file system message" will be logged.
1111 */
1112 ufsvfsp->vfs_lastwhinetime = 0L;
1113
1114
1115 mutex_init(&ufsvfsp->vfs_lock, NULL, MUTEX_DEFAULT, NULL);
1116 (void) copystr(path, fsp->fs_fsmnt, sizeof (fsp->fs_fsmnt) - 1, &len);
1117 bzero(fsp->fs_fsmnt + len, sizeof (fsp->fs_fsmnt) - len);
1118
1119 /*
1120 * Sanity checks for old file systems
1121 */
1122 if (fsp->fs_postblformat == FS_42POSTBLFMT)
1123 ufsvfsp->vfs_nrpos = 8;
1124 else
1125 ufsvfsp->vfs_nrpos = fsp->fs_nrpos;
1126
1127 /*
1128 * Initialize lockfs structure to support file system locking
1129 */
1130 bzero(&ufsvfsp->vfs_ulockfs.ul_lockfs,
1131 sizeof (struct lockfs));
1132 ufsvfsp->vfs_ulockfs.ul_fs_lock = ULOCKFS_ULOCK;
1133 mutex_init(&ufsvfsp->vfs_ulockfs.ul_lock, NULL,
1134 MUTEX_DEFAULT, NULL);
1135 cv_init(&ufsvfsp->vfs_ulockfs.ul_cv, NULL, CV_DEFAULT, NULL);
1136
1137 /*
1138 * We don't need to grab vfs_dqrwlock for this ufs_iget() call.
1139 * We are in the process of mounting the file system so there
1140 * is no need to grab the quota lock. If a quota applies to the
1141 * root inode, then it will be updated when quotas are enabled.
1142 *
1143 * However, we have an ASSERT(RW_LOCK_HELD(&ufsvfsp->vfs_dqrwlock))
1144 * in getinoquota() that we want to keep so grab it anyway.
1145 */
1146 rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
1147
1148 error = ufs_iget_alloced(vfsp, UFSROOTINO, &rip, cr);
1149
1150 rw_exit(&ufsvfsp->vfs_dqrwlock);
1151
1152 if (error)
1153 goto out;
1154
1155 /*
1156 * make sure root inode is a directory. Returning ENOTDIR might
1157 * be confused with the mount point not being a directory, so
1158 * we use EIO instead.
1159 */
1160 if ((rip->i_mode & IFMT) != IFDIR) {
1161 /*
1162 * Mark this inode as subject for cleanup
1163 * to avoid stray inodes in the cache.
1164 */
1165 rvp = ITOV(rip);
1166 error = EIO;
1167 goto out;
1168 }
1169
1170 rvp = ITOV(rip);
1171 mutex_enter(&rvp->v_lock);
1172 rvp->v_flag |= VROOT;
1173 mutex_exit(&rvp->v_lock);
1174 ufsvfsp->vfs_root = rvp;
1175 /* The buffer for the root inode does not contain a valid b_vp */
1176 (void) bfinval(dev, 0);
1177
1178 /* options */
1179 ufsvfsp->vfs_nosetsec = flags & UFSMNT_NOSETSEC;
1180 ufsvfsp->vfs_nointr = flags & UFSMNT_NOINTR;
1181 ufsvfsp->vfs_syncdir = flags & UFSMNT_SYNCDIR;
1182 ufsvfsp->vfs_noatime = flags & UFSMNT_NOATIME;
1183 if ((flags & UFSMNT_NODFRATIME) || ufsvfsp->vfs_noatime)
1184 ufsvfsp->vfs_dfritime &= ~UFS_DFRATIME;
1185 else /* dfratime, default behavior */
1186 ufsvfsp->vfs_dfritime |= UFS_DFRATIME;
1187 if (flags & UFSMNT_FORCEDIRECTIO)
1188 ufsvfsp->vfs_forcedirectio = 1;
1189 else if (flags & UFSMNT_NOFORCEDIRECTIO)
1190 ufsvfsp->vfs_forcedirectio = 0;
1191 ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
1192
1193 ufsvfsp->vfs_nindiroffset = fsp->fs_nindir - 1;
1194 ufsvfsp->vfs_nindirshift = highbit(ufsvfsp->vfs_nindiroffset);
1195 ufsvfsp->vfs_ioclustsz = fsp->fs_bsize * fsp->fs_maxcontig;
1196
1197 if (cdev_ioctl(dev, DKIOCINFO, (intptr_t)&ci,
1198 FKIOCTL|FNATIVE|FREAD, CRED(), &status) == 0) {
1199 ufsvfsp->vfs_iotransz = ci.dki_maxtransfer * DEV_BSIZE;
1200 } else {
1201 ufsvfsp->vfs_iotransz = MIN(maxphys, ufs_maxmaxphys);
1202 }
1203
1204 if (ufsvfsp->vfs_iotransz <= 0) {
1205 ufsvfsp->vfs_iotransz = MIN(maxphys, ufs_maxmaxphys);
1206 }
1207
1208 /*
1209 * When logging, used to reserve log space for writes and truncs
1210 */
1211 ufsvfsp->vfs_avgbfree = fsp->fs_cstotal.cs_nbfree / fsp->fs_ncg;
1212
1213 /*
1214 * Determine whether to log cylinder group summary info.
1215 */
1216 ufsvfsp->vfs_nolog_si = (fsp->fs_ncg < ufs_ncg_log);
1217
1218 if (TRANS_ISTRANS(ufsvfsp)) {
1219 /*
1220 * start the delete thread
1221 */
1222 ufs_thread_start(&ufsvfsp->vfs_delete, ufs_thread_delete, vfsp);
1223
1224 /*
1225 * start reclaim thread if the filesystem was not mounted
1226 * read only.
1227 */
1228 if (!fsp->fs_ronly && (fsp->fs_reclaim &
1229 (FS_RECLAIM|FS_RECLAIMING))) {
1230 fsp->fs_reclaim &= ~FS_RECLAIM;
1231 fsp->fs_reclaim |= FS_RECLAIMING;
1232 ufs_thread_start(&ufsvfsp->vfs_reclaim,
1233 ufs_thread_reclaim, vfsp);
1234 }
1235
1236 /* Mark the fs as unrolled */
1237 fsp->fs_rolled = FS_NEED_ROLL;
1238 } else if (!fsp->fs_ronly && (fsp->fs_reclaim &
1239 (FS_RECLAIM|FS_RECLAIMING))) {
1240 /*
1241 * If a file system that is mounted nologging, after
1242 * having previously been mounted logging, becomes
1243 * unmounted whilst the reclaim thread is in the throes
1244 * of reclaiming open/deleted inodes, a subsequent mount
1245 * of such a file system with logging disabled could lead
1246 * to inodes becoming lost. So, start reclaim now, even
1247 * though logging was disabled for the previous mount, to
1248 * tidy things up.
1249 */
1250 fsp->fs_reclaim &= ~FS_RECLAIM;
1251 fsp->fs_reclaim |= FS_RECLAIMING;
1252 ufs_thread_start(&ufsvfsp->vfs_reclaim,
1253 ufs_thread_reclaim, vfsp);
1254 }
1255
1256 if (!fsp->fs_ronly) {
1257 TRANS_SBWRITE(ufsvfsp, TOP_MOUNT);
1258 if (error = geterror(ufsvfsp->vfs_bufp))
1259 goto out;
1260 }
1261
1262 /* fix-on-panic initialization */
1263 if (isroot && !(flags & UFSMNT_ONERROR_FLGMASK))
1264 flags |= UFSMNT_ONERROR_PANIC; /* XXX ..._RDONLY */
1265
1266 if ((error = ufsfx_mount(ufsvfsp, flags)) != 0)
1267 goto out;
1268
1269 if (why == ROOT_INIT && isroot)
1270 rootvp = devvp;
1271
1272 return (0);
1273 out:
1274 if (error == 0)
1275 error = EIO;
1276 if (rvp) {
1277 /* the following sequence is similar to ufs_unmount() */
1278
1279 /*
1280 * There's a problem that ufs_iget() puts inodes into
1281 * the inode cache before it returns them. If someone
1282 * traverses that cache and gets a reference to our
1283 * inode, there's a chance they'll still be using it
1284 * after we've destroyed it. This is a hard race to
1285 * hit, but it's happened (putting in a medium delay
1286 * here, and a large delay in ufs_scan_inodes() for
1287 * inodes on the device we're bailing out on, makes
1288 * the race easy to demonstrate). The symptom is some
1289 * other part of UFS faulting on bad inode contents,
1290 * or when grabbing one of the locks inside the inode,
1291 * etc. The usual victim is ufs_scan_inodes() or
1292 * someone called by it.
1293 */
1294
1295 /*
1296 * First, isolate it so that no new references can be
1297 * gotten via the inode cache.
1298 */
1299 ihm = &ih_lock[INOHASH(UFSROOTINO)];
1300 mutex_enter(ihm);
1301 remque(rip);
1302 mutex_exit(ihm);
1303
1304 /*
1305 * Now wait for all outstanding references except our
1306 * own to drain. This could, in theory, take forever,
1307 * so don't wait *too* long. If we time out, mark
1308 * it stale and leak it, so we don't hit the problem
1309 * described above.
1310 *
1311 * Note that v_count is an int, which means we can read
1312 * it in one operation. Thus, there's no need to lock
1313 * around our tests.
1314 */
1315 elapsed = 0;
1316 while ((rvp->v_count > 1) && (elapsed < ufs_mount_timeout)) {
1317 delay(ufs_mount_error_delay * drv_usectohz(1000));
1318 elapsed += ufs_mount_error_delay;
1319 }
1320
1321 if (rvp->v_count > 1) {
1322 mutex_enter(&rip->i_tlock);
1323 rip->i_flag |= ISTALE;
1324 mutex_exit(&rip->i_tlock);
1325 cmn_err(CE_WARN,
1326 "Timed out while cleaning up after "
1327 "failed mount of %s", path);
1328 } else {
1329
1330 /*
1331 * Now we're the only one with a handle left, so tear
1332 * it down the rest of the way.
1333 */
1334 if (ufs_rmidle(rip))
1335 VN_RELE(rvp);
1336 ufs_si_del(rip);
1337 rip->i_ufsvfs = NULL;
1338 rvp->v_vfsp = NULL;
1339 rvp->v_type = VBAD;
1340 VN_RELE(rvp);
1341 }
1342 }
1343 if (needtrans) {
1344 TRANS_MATA_UMOUNT(ufsvfsp);
1345 }
1346 if (ufsvfsp) {
1347 ufs_vfs_remove(ufsvfsp);
1348 ufs_thread_exit(&ufsvfsp->vfs_delete);
1349 ufs_thread_exit(&ufsvfsp->vfs_reclaim);
1350 mutex_destroy(&ufsvfsp->vfs_lock);
1351 if (ufsvfsp->vfs_log) {
1352 lufs_unsnarf(ufsvfsp);
1353 }
1354 kmem_free(ufsvfsp, sizeof (struct ufsvfs));
1355 }
1356 if (bp) {
1357 bp->b_flags |= (B_STALE|B_AGE);
1358 brelse(bp);
1359 }
1360 if (tp) {
1361 tp->b_flags |= (B_STALE|B_AGE);
1362 brelse(tp);
1363 }
1364 if (needclose) {
1365 (void) VOP_CLOSE(devvp, (vfsp->vfs_flag & VFS_RDONLY) ?
1366 FREAD : FREAD|FWRITE, 1, (offset_t)0, cr, NULL);
1367 bflush(dev);
1368 (void) bfinval(dev, 1);
1369 }
1370 return (error);
1371 }
1372
1373 /*
1374 * vfs operations
1375 */
1376 static int
ufs_unmount(struct vfs * vfsp,int fflag,struct cred * cr)1377 ufs_unmount(struct vfs *vfsp, int fflag, struct cred *cr)
1378 {
1379 dev_t dev = vfsp->vfs_dev;
1380 struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
1381 struct fs *fs = ufsvfsp->vfs_fs;
1382 struct ulockfs *ulp = &ufsvfsp->vfs_ulockfs;
1383 struct vnode *bvp, *vp;
1384 struct buf *bp;
1385 struct inode *ip, *inext, *rip;
1386 union ihead *ih;
1387 int error, flag, i;
1388 struct lockfs lockfs;
1389 int poll_events = POLLPRI;
1390 extern struct pollhead ufs_pollhd;
1391 refstr_t *mountpoint;
1392
1393 ASSERT(vfs_lock_held(vfsp));
1394
1395 if (secpolicy_fs_unmount(cr, vfsp) != 0)
1396 return (EPERM);
1397 /*
1398 * Forced unmount is now supported through the
1399 * lockfs protocol.
1400 */
1401 if (fflag & MS_FORCE) {
1402 /*
1403 * Mark the filesystem as being unmounted now in
1404 * case of a forcible umount before we take any
1405 * locks inside UFS to prevent racing with a VFS_VGET()
1406 * request. Throw these VFS_VGET() requests away for
1407 * the duration of the forcible umount so they won't
1408 * use stale or even freed data later on when we're done.
1409 * It may happen that the VFS has had a additional hold
1410 * placed on it by someone other than UFS and thus will
1411 * not get freed immediately once we're done with the
1412 * umount by dounmount() - use VFS_UNMOUNTED to inform
1413 * users of this still-alive VFS that its corresponding
1414 * filesystem being gone so they can detect that and error
1415 * out.
1416 */
1417 vfsp->vfs_flag |= VFS_UNMOUNTED;
1418
1419 ufs_thread_suspend(&ufsvfsp->vfs_delete);
1420 mutex_enter(&ulp->ul_lock);
1421 /*
1422 * If file system is already hard locked,
1423 * unmount the file system, otherwise
1424 * hard lock it before unmounting.
1425 */
1426 if (!ULOCKFS_IS_HLOCK(ulp)) {
1427 atomic_inc_ulong(&ufs_quiesce_pend);
1428 lockfs.lf_lock = LOCKFS_HLOCK;
1429 lockfs.lf_flags = 0;
1430 lockfs.lf_key = ulp->ul_lockfs.lf_key + 1;
1431 lockfs.lf_comlen = 0;
1432 lockfs.lf_comment = NULL;
1433 ufs_freeze(ulp, &lockfs);
1434 ULOCKFS_SET_BUSY(ulp);
1435 LOCKFS_SET_BUSY(&ulp->ul_lockfs);
1436 (void) ufs_quiesce(ulp);
1437 (void) ufs_flush(vfsp);
1438 (void) ufs_thaw(vfsp, ufsvfsp, ulp);
1439 atomic_dec_ulong(&ufs_quiesce_pend);
1440 ULOCKFS_CLR_BUSY(ulp);
1441 LOCKFS_CLR_BUSY(&ulp->ul_lockfs);
1442 poll_events |= POLLERR;
1443 pollwakeup(&ufs_pollhd, poll_events);
1444 }
1445 ufs_thread_continue(&ufsvfsp->vfs_delete);
1446 mutex_exit(&ulp->ul_lock);
1447 }
1448
1449 /* let all types of writes go through */
1450 ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
1451
1452 /* coordinate with global hlock thread */
1453 if (TRANS_ISTRANS(ufsvfsp) && (ufsvfsp->vfs_validfs == UT_HLOCKING)) {
1454 /*
1455 * last possibility for a forced umount to fail hence clear
1456 * VFS_UNMOUNTED if appropriate.
1457 */
1458 if (fflag & MS_FORCE)
1459 vfsp->vfs_flag &= ~VFS_UNMOUNTED;
1460 return (EAGAIN);
1461 }
1462
1463 ufsvfsp->vfs_validfs = UT_UNMOUNTED;
1464
1465 /* kill the reclaim thread */
1466 ufs_thread_exit(&ufsvfsp->vfs_reclaim);
1467
1468 /* suspend the delete thread */
1469 ufs_thread_suspend(&ufsvfsp->vfs_delete);
1470
1471 /*
1472 * drain the delete and idle queues
1473 */
1474 ufs_delete_drain(vfsp, -1, 1);
1475 ufs_idle_drain(vfsp);
1476
1477 /*
1478 * use the lockfs protocol to prevent new ops from starting
1479 * a forcible umount can not fail beyond this point as
1480 * we hard-locked the filesystem and drained all current consumers
1481 * before.
1482 */
1483 mutex_enter(&ulp->ul_lock);
1484
1485 /*
1486 * if the file system is busy; return EBUSY
1487 */
1488 if (ulp->ul_vnops_cnt || ulp->ul_falloc_cnt || ULOCKFS_IS_SLOCK(ulp)) {
1489 error = EBUSY;
1490 goto out;
1491 }
1492
1493 /*
1494 * if this is not a forced unmount (!hard/error locked), then
1495 * get rid of every inode except the root and quota inodes
1496 * also, commit any outstanding transactions
1497 */
1498 if (!ULOCKFS_IS_HLOCK(ulp) && !ULOCKFS_IS_ELOCK(ulp))
1499 if (error = ufs_flush(vfsp))
1500 goto out;
1501
1502 /*
1503 * ignore inodes in the cache if fs is hard locked or error locked
1504 */
1505 rip = VTOI(ufsvfsp->vfs_root);
1506 if (!ULOCKFS_IS_HLOCK(ulp) && !ULOCKFS_IS_ELOCK(ulp)) {
1507 /*
1508 * Otherwise, only the quota and root inodes are in the cache.
1509 *
1510 * Avoid racing with ufs_update() and ufs_sync().
1511 */
1512 mutex_enter(&ufs_scan_lock);
1513
1514 for (i = 0, ih = ihead; i < inohsz; i++, ih++) {
1515 mutex_enter(&ih_lock[i]);
1516 for (ip = ih->ih_chain[0];
1517 ip != (struct inode *)ih;
1518 ip = ip->i_forw) {
1519 if (ip->i_ufsvfs != ufsvfsp)
1520 continue;
1521 if (ip == ufsvfsp->vfs_qinod)
1522 continue;
1523 if (ip == rip && ITOV(ip)->v_count == 1)
1524 continue;
1525 mutex_exit(&ih_lock[i]);
1526 mutex_exit(&ufs_scan_lock);
1527 error = EBUSY;
1528 goto out;
1529 }
1530 mutex_exit(&ih_lock[i]);
1531 }
1532 mutex_exit(&ufs_scan_lock);
1533 }
1534
1535 /*
1536 * if a snapshot exists and this is a forced unmount, then delete
1537 * the snapshot. Otherwise return EBUSY. This will insure the
1538 * snapshot always belongs to a valid file system.
1539 */
1540 if (ufsvfsp->vfs_snapshot) {
1541 if (ULOCKFS_IS_HLOCK(ulp) || ULOCKFS_IS_ELOCK(ulp)) {
1542 (void) fssnap_delete(&ufsvfsp->vfs_snapshot);
1543 } else {
1544 error = EBUSY;
1545 goto out;
1546 }
1547 }
1548
1549 /*
1550 * Close the quota file and invalidate anything left in the quota
1551 * cache for this file system. Pass kcred to allow all quota
1552 * manipulations.
1553 */
1554 (void) closedq(ufsvfsp, kcred);
1555 invalidatedq(ufsvfsp);
1556 /*
1557 * drain the delete and idle queues
1558 */
1559 ufs_delete_drain(vfsp, -1, 0);
1560 ufs_idle_drain(vfsp);
1561
1562 /*
1563 * discard the inodes for this fs (including root, shadow, and quota)
1564 */
1565 for (i = 0, ih = ihead; i < inohsz; i++, ih++) {
1566 mutex_enter(&ih_lock[i]);
1567 for (inext = 0, ip = ih->ih_chain[0];
1568 ip != (struct inode *)ih;
1569 ip = inext) {
1570 inext = ip->i_forw;
1571 if (ip->i_ufsvfs != ufsvfsp)
1572 continue;
1573
1574 /*
1575 * We've found the inode in the cache and as we
1576 * hold the hash mutex the inode can not
1577 * disappear from underneath us.
1578 * We also know it must have at least a vnode
1579 * reference count of 1.
1580 * We perform an additional VN_HOLD so the VN_RELE
1581 * in case we take the inode off the idle queue
1582 * can not be the last one.
1583 * It is safe to grab the writer contents lock here
1584 * to prevent a race with ufs_iinactive() putting
1585 * inodes into the idle queue while we operate on
1586 * this inode.
1587 */
1588 rw_enter(&ip->i_contents, RW_WRITER);
1589
1590 vp = ITOV(ip);
1591 VN_HOLD(vp)
1592 remque(ip);
1593 if (ufs_rmidle(ip))
1594 VN_RELE(vp);
1595 ufs_si_del(ip);
1596 /*
1597 * rip->i_ufsvfsp is needed by bflush()
1598 */
1599 if (ip != rip)
1600 ip->i_ufsvfs = NULL;
1601 /*
1602 * Set vnode's vfsops to dummy ops, which return
1603 * EIO. This is needed to forced unmounts to work
1604 * with lofs/nfs properly.
1605 */
1606 if (ULOCKFS_IS_HLOCK(ulp) || ULOCKFS_IS_ELOCK(ulp))
1607 vp->v_vfsp = &EIO_vfs;
1608 else
1609 vp->v_vfsp = NULL;
1610 vp->v_type = VBAD;
1611
1612 rw_exit(&ip->i_contents);
1613
1614 VN_RELE(vp);
1615 }
1616 mutex_exit(&ih_lock[i]);
1617 }
1618 ufs_si_cache_flush(dev);
1619
1620 /*
1621 * kill the delete thread and drain the idle queue
1622 */
1623 ufs_thread_exit(&ufsvfsp->vfs_delete);
1624 ufs_idle_drain(vfsp);
1625
1626 bp = ufsvfsp->vfs_bufp;
1627 bvp = ufsvfsp->vfs_devvp;
1628 flag = !fs->fs_ronly;
1629 if (flag) {
1630 bflush(dev);
1631 if (fs->fs_clean != FSBAD) {
1632 if (fs->fs_clean == FSSTABLE)
1633 fs->fs_clean = FSCLEAN;
1634 fs->fs_reclaim &= ~FS_RECLAIM;
1635 }
1636 if (TRANS_ISTRANS(ufsvfsp) &&
1637 !TRANS_ISERROR(ufsvfsp) &&
1638 !ULOCKFS_IS_HLOCK(ulp) &&
1639 (fs->fs_rolled == FS_NEED_ROLL)) {
1640 /*
1641 * ufs_flush() above has flushed the last Moby.
1642 * This is needed to ensure the following superblock
1643 * update really is the last metadata update
1644 */
1645 error = ufs_putsummaryinfo(dev, ufsvfsp, fs);
1646 if (error == 0) {
1647 fs->fs_rolled = FS_ALL_ROLLED;
1648 }
1649 }
1650 TRANS_SBUPDATE(ufsvfsp, vfsp, TOP_SBUPDATE_UNMOUNT);
1651 /*
1652 * push this last transaction
1653 */
1654 curthread->t_flag |= T_DONTBLOCK;
1655 TRANS_BEGIN_SYNC(ufsvfsp, TOP_COMMIT_UNMOUNT, TOP_COMMIT_SIZE,
1656 error);
1657 if (!error)
1658 TRANS_END_SYNC(ufsvfsp, error, TOP_COMMIT_UNMOUNT,
1659 TOP_COMMIT_SIZE);
1660 curthread->t_flag &= ~T_DONTBLOCK;
1661 }
1662
1663 TRANS_MATA_UMOUNT(ufsvfsp);
1664 lufs_unsnarf(ufsvfsp); /* Release the in-memory structs */
1665 ufsfx_unmount(ufsvfsp); /* fix-on-panic bookkeeping */
1666 kmem_free(fs->fs_u.fs_csp, fs->fs_cssize);
1667
1668 bp->b_flags |= B_STALE|B_AGE;
1669 ufsvfsp->vfs_bufp = NULL; /* don't point at freed buf */
1670 brelse(bp); /* free the superblock buf */
1671
1672 (void) VOP_PUTPAGE(common_specvp(bvp), (offset_t)0, (size_t)0,
1673 B_INVAL, cr, NULL);
1674 (void) VOP_CLOSE(bvp, flag, 1, (offset_t)0, cr, NULL);
1675 bflush(dev);
1676 (void) bfinval(dev, 1);
1677 VN_RELE(bvp);
1678
1679 /*
1680 * It is now safe to NULL out the ufsvfs pointer and discard
1681 * the root inode.
1682 */
1683 rip->i_ufsvfs = NULL;
1684 VN_RELE(ITOV(rip));
1685
1686 /* free up lockfs comment structure, if any */
1687 if (ulp->ul_lockfs.lf_comlen && ulp->ul_lockfs.lf_comment)
1688 kmem_free(ulp->ul_lockfs.lf_comment, ulp->ul_lockfs.lf_comlen);
1689
1690 /*
1691 * Remove from instance list.
1692 */
1693 ufs_vfs_remove(ufsvfsp);
1694
1695 /*
1696 * For a forcible unmount, threads may be asleep in
1697 * ufs_lockfs_begin/ufs_check_lockfs. These threads will need
1698 * the ufsvfs structure so we don't free it, yet. ufs_update
1699 * will free it up after awhile.
1700 */
1701 if (ULOCKFS_IS_HLOCK(ulp) || ULOCKFS_IS_ELOCK(ulp)) {
1702 extern kmutex_t ufsvfs_mutex;
1703 extern struct ufsvfs *ufsvfslist;
1704
1705 mutex_enter(&ufsvfs_mutex);
1706 ufsvfsp->vfs_dontblock = 1;
1707 ufsvfsp->vfs_next = ufsvfslist;
1708 ufsvfslist = ufsvfsp;
1709 mutex_exit(&ufsvfs_mutex);
1710 /* wakeup any suspended threads */
1711 cv_broadcast(&ulp->ul_cv);
1712 mutex_exit(&ulp->ul_lock);
1713 } else {
1714 mutex_destroy(&ufsvfsp->vfs_lock);
1715 kmem_free(ufsvfsp, sizeof (struct ufsvfs));
1716 }
1717
1718 /*
1719 * Now mark the filesystem as unmounted since we're done with it.
1720 */
1721 vfsp->vfs_flag |= VFS_UNMOUNTED;
1722
1723 return (0);
1724 out:
1725 /* open the fs to new ops */
1726 cv_broadcast(&ulp->ul_cv);
1727 mutex_exit(&ulp->ul_lock);
1728
1729 if (TRANS_ISTRANS(ufsvfsp)) {
1730 /* allow the delete thread to continue */
1731 ufs_thread_continue(&ufsvfsp->vfs_delete);
1732 /* restart the reclaim thread */
1733 ufs_thread_start(&ufsvfsp->vfs_reclaim, ufs_thread_reclaim,
1734 vfsp);
1735 /* coordinate with global hlock thread */
1736 ufsvfsp->vfs_validfs = UT_MOUNTED;
1737 /* check for trans errors during umount */
1738 ufs_trans_onerror();
1739
1740 /*
1741 * if we have a separate /usr it will never unmount
1742 * when halting. In order to not re-read all the
1743 * cylinder group summary info on mounting after
1744 * reboot the logging of summary info is re-enabled
1745 * and the super block written out.
1746 */
1747 mountpoint = vfs_getmntpoint(vfsp);
1748 if ((fs->fs_si == FS_SI_OK) &&
1749 (strcmp("/usr", refstr_value(mountpoint)) == 0)) {
1750 ufsvfsp->vfs_nolog_si = 0;
1751 UFS_BWRITE2(NULL, ufsvfsp->vfs_bufp);
1752 }
1753 refstr_rele(mountpoint);
1754 }
1755
1756 return (error);
1757 }
1758
1759 static int
ufs_root(struct vfs * vfsp,struct vnode ** vpp)1760 ufs_root(struct vfs *vfsp, struct vnode **vpp)
1761 {
1762 struct ufsvfs *ufsvfsp;
1763 struct vnode *vp;
1764
1765 if (!vfsp)
1766 return (EIO);
1767
1768 ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
1769 if (!ufsvfsp || !ufsvfsp->vfs_root)
1770 return (EIO); /* forced unmount */
1771
1772 vp = ufsvfsp->vfs_root;
1773 VN_HOLD(vp);
1774 *vpp = vp;
1775 return (0);
1776 }
1777
1778 /*
1779 * Get file system statistics.
1780 */
1781 static int
ufs_statvfs(struct vfs * vfsp,struct statvfs64 * sp)1782 ufs_statvfs(struct vfs *vfsp, struct statvfs64 *sp)
1783 {
1784 struct fs *fsp;
1785 struct ufsvfs *ufsvfsp;
1786 int blk, i;
1787 long max_avail, used;
1788 dev32_t d32;
1789
1790 if (vfsp->vfs_flag & VFS_UNMOUNTED)
1791 return (EIO);
1792
1793 ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
1794 fsp = ufsvfsp->vfs_fs;
1795 if ((fsp->fs_magic != FS_MAGIC) && (fsp->fs_magic != MTB_UFS_MAGIC))
1796 return (EINVAL);
1797 if (fsp->fs_magic == FS_MAGIC &&
1798 (fsp->fs_version != UFS_EFISTYLE4NONEFI_VERSION_2 &&
1799 fsp->fs_version != UFS_VERSION_MIN))
1800 return (EINVAL);
1801 if (fsp->fs_magic == MTB_UFS_MAGIC &&
1802 (fsp->fs_version > MTB_UFS_VERSION_1 ||
1803 fsp->fs_version < MTB_UFS_VERSION_MIN))
1804 return (EINVAL);
1805
1806 /*
1807 * get the basic numbers
1808 */
1809 (void) bzero(sp, sizeof (*sp));
1810
1811 sp->f_bsize = fsp->fs_bsize;
1812 sp->f_frsize = fsp->fs_fsize;
1813 sp->f_blocks = (fsblkcnt64_t)fsp->fs_dsize;
1814 sp->f_bfree = (fsblkcnt64_t)fsp->fs_cstotal.cs_nbfree * fsp->fs_frag +
1815 fsp->fs_cstotal.cs_nffree;
1816
1817 sp->f_files = (fsfilcnt64_t)fsp->fs_ncg * fsp->fs_ipg;
1818 sp->f_ffree = (fsfilcnt64_t)fsp->fs_cstotal.cs_nifree;
1819
1820 /*
1821 * Adjust the numbers based on things waiting to be deleted.
1822 * modifies f_bfree and f_ffree. Afterwards, everything we
1823 * come up with will be self-consistent. By definition, this
1824 * is a point-in-time snapshot, so the fact that the delete
1825 * thread's probably already invalidated the results is not a
1826 * problem. Note that if the delete thread is ever extended to
1827 * non-logging ufs, this adjustment must always be made.
1828 */
1829 if (TRANS_ISTRANS(ufsvfsp))
1830 ufs_delete_adjust_stats(ufsvfsp, sp);
1831
1832 /*
1833 * avail = MAX(max_avail - used, 0)
1834 */
1835 max_avail = fsp->fs_dsize - ufsvfsp->vfs_minfrags;
1836
1837 used = (fsp->fs_dsize - sp->f_bfree);
1838
1839 if (max_avail > used)
1840 sp->f_bavail = (fsblkcnt64_t)max_avail - used;
1841 else
1842 sp->f_bavail = (fsblkcnt64_t)0;
1843
1844 sp->f_favail = sp->f_ffree;
1845 (void) cmpldev(&d32, vfsp->vfs_dev);
1846 sp->f_fsid = d32;
1847 (void) strcpy(sp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name);
1848 sp->f_flag = vf_to_stf(vfsp->vfs_flag);
1849
1850 /* keep coordinated with ufs_l_pathconf() */
1851 sp->f_namemax = MAXNAMLEN;
1852
1853 if (fsp->fs_cpc == 0) {
1854 bzero(sp->f_fstr, 14);
1855 return (0);
1856 }
1857 blk = fsp->fs_spc * fsp->fs_cpc / NSPF(fsp);
1858 for (i = 0; i < blk; i += fsp->fs_frag) /* CSTYLED */
1859 /* void */;
1860 i -= fsp->fs_frag;
1861 blk = i / fsp->fs_frag;
1862 bcopy(&(fs_rotbl(fsp)[blk]), sp->f_fstr, 14);
1863 return (0);
1864 }
1865
1866 /*
1867 * Flush any pending I/O to file system vfsp.
1868 * The ufs_update() routine will only flush *all* ufs files.
1869 * If vfsp is non-NULL, only sync this ufs (in preparation
1870 * for a umount).
1871 */
1872 /*ARGSUSED*/
1873 static int
ufs_sync(struct vfs * vfsp,short flag,struct cred * cr)1874 ufs_sync(struct vfs *vfsp, short flag, struct cred *cr)
1875 {
1876 struct ufsvfs *ufsvfsp;
1877 struct fs *fs;
1878 int cheap = flag & SYNC_ATTR;
1879 int error;
1880
1881 /*
1882 * SYNC_CLOSE means we're rebooting. Toss everything
1883 * on the idle queue so we don't have to slog through
1884 * a bunch of uninteresting inodes over and over again.
1885 */
1886 if (flag & SYNC_CLOSE)
1887 ufs_idle_drain(NULL);
1888
1889 if (vfsp == NULL) {
1890 ufs_update(flag);
1891 return (0);
1892 }
1893
1894 /* Flush a single ufs */
1895 if (!vfs_matchops(vfsp, ufs_vfsops) || vfs_lock(vfsp) != 0)
1896 return (0);
1897
1898 ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
1899 if (!ufsvfsp)
1900 return (EIO);
1901 fs = ufsvfsp->vfs_fs;
1902 mutex_enter(&ufsvfsp->vfs_lock);
1903
1904 if (ufsvfsp->vfs_dio &&
1905 fs->fs_ronly == 0 &&
1906 fs->fs_clean != FSBAD &&
1907 fs->fs_clean != FSLOG) {
1908 /* turn off fast-io on unmount, so no fsck needed (4029401) */
1909 ufsvfsp->vfs_dio = 0;
1910 fs->fs_clean = FSACTIVE;
1911 fs->fs_fmod = 1;
1912 }
1913
1914 /* Write back modified superblock */
1915 if (fs->fs_fmod == 0) {
1916 mutex_exit(&ufsvfsp->vfs_lock);
1917 } else {
1918 if (fs->fs_ronly != 0) {
1919 mutex_exit(&ufsvfsp->vfs_lock);
1920 vfs_unlock(vfsp);
1921 return (ufs_fault(ufsvfsp->vfs_root,
1922 "fs = %s update: ro fs mod\n", fs->fs_fsmnt));
1923 }
1924 fs->fs_fmod = 0;
1925 mutex_exit(&ufsvfsp->vfs_lock);
1926
1927 TRANS_SBUPDATE(ufsvfsp, vfsp, TOP_SBUPDATE_UPDATE);
1928 }
1929 vfs_unlock(vfsp);
1930
1931 /*
1932 * Avoid racing with ufs_update() and ufs_unmount().
1933 *
1934 */
1935 mutex_enter(&ufs_scan_lock);
1936
1937 (void) ufs_scan_inodes(1, ufs_sync_inode,
1938 (void *)(uintptr_t)cheap, ufsvfsp);
1939
1940 mutex_exit(&ufs_scan_lock);
1941
1942 bflush((dev_t)vfsp->vfs_dev);
1943
1944 /*
1945 * commit any outstanding async transactions
1946 */
1947 curthread->t_flag |= T_DONTBLOCK;
1948 TRANS_BEGIN_SYNC(ufsvfsp, TOP_COMMIT_UPDATE, TOP_COMMIT_SIZE, error);
1949 if (!error) {
1950 TRANS_END_SYNC(ufsvfsp, error, TOP_COMMIT_UPDATE,
1951 TOP_COMMIT_SIZE);
1952 }
1953 curthread->t_flag &= ~T_DONTBLOCK;
1954
1955 return (0);
1956 }
1957
1958
1959 void
sbupdate(struct vfs * vfsp)1960 sbupdate(struct vfs *vfsp)
1961 {
1962 struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
1963 struct fs *fs = ufsvfsp->vfs_fs;
1964 struct buf *bp;
1965 int blks;
1966 caddr_t space;
1967 int i;
1968 size_t size;
1969
1970 /*
1971 * for ulockfs processing, limit the superblock writes
1972 */
1973 if ((ufsvfsp->vfs_ulockfs.ul_sbowner) &&
1974 (curthread != ufsvfsp->vfs_ulockfs.ul_sbowner)) {
1975 /* process later */
1976 fs->fs_fmod = 1;
1977 return;
1978 }
1979 ULOCKFS_SET_MOD((&ufsvfsp->vfs_ulockfs));
1980
1981 if (TRANS_ISTRANS(ufsvfsp)) {
1982 mutex_enter(&ufsvfsp->vfs_lock);
1983 ufs_sbwrite(ufsvfsp);
1984 mutex_exit(&ufsvfsp->vfs_lock);
1985 return;
1986 }
1987
1988 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1989 space = (caddr_t)fs->fs_u.fs_csp;
1990 for (i = 0; i < blks; i += fs->fs_frag) {
1991 size = fs->fs_bsize;
1992 if (i + fs->fs_frag > blks)
1993 size = (blks - i) * fs->fs_fsize;
1994 bp = UFS_GETBLK(ufsvfsp, ufsvfsp->vfs_dev,
1995 (daddr_t)(fsbtodb(fs, fs->fs_csaddr + i)),
1996 fs->fs_bsize);
1997 bcopy(space, bp->b_un.b_addr, size);
1998 space += size;
1999 bp->b_bcount = size;
2000 UFS_BRWRITE(ufsvfsp, bp);
2001 }
2002 mutex_enter(&ufsvfsp->vfs_lock);
2003 ufs_sbwrite(ufsvfsp);
2004 mutex_exit(&ufsvfsp->vfs_lock);
2005 }
2006
2007 int ufs_vget_idle_count = 2; /* Number of inodes to idle each time */
2008 static int
ufs_vget(struct vfs * vfsp,struct vnode ** vpp,struct fid * fidp)2009 ufs_vget(struct vfs *vfsp, struct vnode **vpp, struct fid *fidp)
2010 {
2011 int error = 0;
2012 struct ufid *ufid;
2013 struct inode *ip;
2014 struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
2015 struct ulockfs *ulp;
2016
2017 /*
2018 * Check for unmounted filesystem.
2019 */
2020 if (vfsp->vfs_flag & VFS_UNMOUNTED) {
2021 error = EIO;
2022 goto errout;
2023 }
2024
2025 /*
2026 * Keep the idle queue from getting too long by
2027 * idling an inode before attempting to allocate another.
2028 * This operation must be performed before entering
2029 * lockfs or a transaction.
2030 */
2031 if (ufs_idle_q.uq_ne > ufs_idle_q.uq_hiwat)
2032 if ((curthread->t_flag & T_DONTBLOCK) == 0) {
2033 ins.in_vidles.value.ul += ufs_vget_idle_count;
2034 ufs_idle_some(ufs_vget_idle_count);
2035 }
2036
2037 ufid = (struct ufid *)fidp;
2038
2039 if (error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_VGET_MASK))
2040 goto errout;
2041
2042 rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
2043
2044 error = ufs_iget(vfsp, ufid->ufid_ino, &ip, CRED());
2045
2046 rw_exit(&ufsvfsp->vfs_dqrwlock);
2047
2048 ufs_lockfs_end(ulp);
2049
2050 if (error)
2051 goto errout;
2052
2053 /*
2054 * Check if the inode has been deleted or freed or is in transient state
2055 * since the last VFS_VGET() request for it, release it and don't return
2056 * it to the caller, presumably NFS, as it's no longer valid.
2057 */
2058 if (ip->i_gen != ufid->ufid_gen || ip->i_mode == 0 ||
2059 (ip->i_nlink <= 0)) {
2060 VN_RELE(ITOV(ip));
2061 error = EINVAL;
2062 goto errout;
2063 }
2064
2065 *vpp = ITOV(ip);
2066 return (0);
2067
2068 errout:
2069 *vpp = NULL;
2070 return (error);
2071 }
2072
2073 static int
ufs_syncfs(vfs_t * vfsp,uint64_t flags,cred_t * cr)2074 ufs_syncfs(vfs_t *vfsp, uint64_t flags, cred_t *cr)
2075 {
2076 if (flags != 0) {
2077 return (ENOTSUP);
2078 }
2079
2080 return (ufs_fioffs(vfsp, cr));
2081 }
2082
2083 static int
ufsinit(int fstype,char * name)2084 ufsinit(int fstype, char *name)
2085 {
2086 static const fs_operation_def_t ufs_vfsops_template[] = {
2087 VFSNAME_MOUNT, { .vfs_mount = ufs_mount },
2088 VFSNAME_UNMOUNT, { .vfs_unmount = ufs_unmount },
2089 VFSNAME_ROOT, { .vfs_root = ufs_root },
2090 VFSNAME_STATVFS, { .vfs_statvfs = ufs_statvfs },
2091 VFSNAME_SYNC, { .vfs_sync = ufs_sync },
2092 VFSNAME_VGET, { .vfs_vget = ufs_vget },
2093 VFSNAME_MOUNTROOT, { .vfs_mountroot = ufs_mountroot },
2094 VFSNAME_SYNCFS, { .vfs_syncfs = ufs_syncfs },
2095 NULL, NULL
2096 };
2097 int error;
2098
2099 ufsfstype = fstype;
2100
2101 error = vfs_setfsops(fstype, ufs_vfsops_template, &ufs_vfsops);
2102 if (error != 0) {
2103 cmn_err(CE_WARN, "ufsinit: bad vfs ops template");
2104 return (error);
2105 }
2106
2107 error = vn_make_ops(name, ufs_vnodeops_template, &ufs_vnodeops);
2108 if (error != 0) {
2109 (void) vfs_freevfsops_by_type(fstype);
2110 cmn_err(CE_WARN, "ufsinit: bad vnode ops template");
2111 return (error);
2112 }
2113
2114 ufs_iinit();
2115 return (0);
2116 }
2117