xref: /illumos-gate/usr/src/uts/common/fs/udfs/udf_vfsops.c (revision 2a6e99a0f1f7d22c0396e8b2ce9b9babbd1056cf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25  * Copyright (c) 2017 by Delphix. All rights reserved.
26  */
27 
28 #include <sys/types.h>
29 #include <sys/t_lock.h>
30 #include <sys/param.h>
31 #include <sys/time.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
35 #include <sys/signal.h>
36 #include <sys/cred.h>
37 #include <sys/user.h>
38 #include <sys/buf.h>
39 #include <sys/vfs.h>
40 #include <sys/vfs_opreg.h>
41 #include <sys/stat.h>
42 #include <sys/vnode.h>
43 #include <sys/mode.h>
44 #include <sys/proc.h>
45 #include <sys/disp.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/flock.h>
49 #include <sys/kmem.h>
50 #include <sys/uio.h>
51 #include <sys/dnlc.h>
52 #include <sys/conf.h>
53 #include <sys/errno.h>
54 #include <sys/mman.h>
55 #include <sys/fbuf.h>
56 #include <sys/pathname.h>
57 #include <sys/debug.h>
58 #include <sys/vmsystm.h>
59 #include <sys/cmn_err.h>
60 #include <sys/dirent.h>
61 #include <sys/errno.h>
62 #include <sys/modctl.h>
63 #include <sys/statvfs.h>
64 #include <sys/mount.h>
65 #include <sys/sunddi.h>
66 #include <sys/bootconf.h>
67 #include <sys/policy.h>
68 
69 #include <vm/hat.h>
70 #include <vm/page.h>
71 #include <vm/pvn.h>
72 #include <vm/as.h>
73 #include <vm/seg.h>
74 #include <vm/seg_map.h>
75 #include <vm/seg_kmem.h>
76 #include <vm/seg_vn.h>
77 #include <vm/rm.h>
78 #include <vm/page.h>
79 #include <sys/swap.h>
80 #include <sys/mntent.h>
81 
82 
83 #include <fs/fs_subr.h>
84 
85 
86 #include <sys/fs/udf_volume.h>
87 #include <sys/fs/udf_inode.h>
88 
89 
90 extern struct vnode *common_specvp(struct vnode *vp);
91 
92 extern kmutex_t ud_sync_busy;
93 static int32_t ud_mountfs(struct vfs *,
94     enum whymountroot, dev_t, char *, struct cred *, int32_t);
95 static struct udf_vfs *ud_validate_and_fill_superblock(dev_t,
96     int32_t, uint32_t);
97 void ud_destroy_fsp(struct udf_vfs *);
98 void ud_convert_to_superblock(struct udf_vfs *,
99     struct log_vol_int_desc *);
100 void ud_update_superblock(struct vfs *);
101 int32_t ud_get_last_block(dev_t, daddr_t *);
102 static int32_t ud_val_get_vat(struct udf_vfs *,
103     dev_t, daddr_t, struct ud_map *);
104 int32_t ud_read_sparing_tbls(struct udf_vfs *,
105     dev_t, struct ud_map *, struct pmap_typ2 *);
106 uint32_t ud_get_lbsize(dev_t, uint32_t *);
107 
108 static int32_t udf_mount(struct vfs *,
109     struct vnode *, struct mounta *, struct cred *);
110 static int32_t udf_unmount(struct vfs *, int, struct cred *);
111 static int32_t udf_root(struct vfs *, struct vnode **);
112 static int32_t udf_statvfs(struct vfs *, struct statvfs64 *);
113 static int32_t udf_sync(struct vfs *, int16_t, struct cred *);
114 static int32_t udf_vget(struct vfs *, struct vnode **, struct fid *);
115 static int32_t udf_mountroot(struct vfs *vfsp, enum whymountroot);
116 
117 static int udfinit(int, char *);
118 
119 static mntopts_t udfs_mntopts;
120 
121 static vfsdef_t vfw = {
122 	VFSDEF_VERSION,
123 	"udfs",
124 	udfinit,
125 	VSW_HASPROTO|VSW_CANREMOUNT|VSW_STATS|VSW_CANLOFI|VSW_MOUNTDEV,
126 	&udfs_mntopts
127 };
128 
129 static mntopts_t udfs_mntopts = {
130 	0,
131 	NULL
132 };
133 
134 /*
135  * Module linkage information for the kernel.
136  */
137 extern struct mod_ops mod_fsops;
138 
139 static struct modlfs modlfs = {
140 	&mod_fsops, "filesystem for UDFS", &vfw
141 };
142 
143 static struct modlinkage modlinkage = {
144 	MODREV_1, (void *)&modlfs, NULL
145 };
146 
147 int32_t udf_fstype = -1;
148 
149 int
150 _init()
151 {
152 	return (mod_install(&modlinkage));
153 }
154 
155 int
156 _fini()
157 {
158 	return (EBUSY);
159 }
160 
161 int
162 _info(struct modinfo *modinfop)
163 {
164 	return (mod_info(&modlinkage, modinfop));
165 }
166 
167 
168 /* -------------------- vfs routines -------------------- */
169 
170 /*
171  * XXX - this appears only to be used by the VM code to handle the case where
172  * UNIX is running off the mini-root.  That probably wants to be done
173  * differently.
174  */
175 struct vnode *rootvp;
176 #ifndef	__lint
177 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", rootvp))
178 #endif
179 static int32_t
180 udf_mount(struct vfs *vfsp, struct vnode *mvp,
181     struct mounta *uap, struct cred *cr)
182 {
183 	dev_t dev;
184 	struct vnode *lvp = NULL;
185 	struct vnode *svp = NULL;
186 	struct pathname dpn;
187 	int32_t error;
188 	enum whymountroot why;
189 	int oflag, aflag;
190 
191 	ud_printf("udf_mount\n");
192 
193 	if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0) {
194 		return (error);
195 	}
196 
197 	if (mvp->v_type != VDIR) {
198 		return (ENOTDIR);
199 	}
200 
201 	mutex_enter(&mvp->v_lock);
202 	if ((uap->flags & MS_REMOUNT) == 0 &&
203 	    (uap->flags & MS_OVERLAY) == 0 &&
204 	    (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
205 		mutex_exit(&mvp->v_lock);
206 		return (EBUSY);
207 	}
208 	mutex_exit(&mvp->v_lock);
209 
210 	if (error = pn_get(uap->dir, UIO_USERSPACE, &dpn)) {
211 		return (error);
212 	}
213 
214 	/*
215 	 * Resolve path name of the file being mounted.
216 	 */
217 	if (error = lookupname(uap->spec, UIO_USERSPACE, FOLLOW, NULLVPP,
218 	    &svp)) {
219 		pn_free(&dpn);
220 		return (error);
221 	}
222 
223 	error = vfs_get_lofi(vfsp, &lvp);
224 
225 	if (error > 0) {
226 		if (error == ENOENT)
227 			error = ENODEV;
228 		goto out;
229 	} else if (error == 0) {
230 		dev = lvp->v_rdev;
231 	} else {
232 		dev = svp->v_rdev;
233 
234 		if (svp->v_type != VBLK) {
235 			error = ENOTBLK;
236 			goto out;
237 		}
238 	}
239 
240 	/*
241 	 * Ensure that this device isn't already mounted,
242 	 * unless this is a REMOUNT request
243 	 */
244 	if (vfs_devmounting(dev, vfsp)) {
245 		error = EBUSY;
246 		goto out;
247 	}
248 	if (vfs_devismounted(dev)) {
249 		if (uap->flags & MS_REMOUNT) {
250 			why = ROOT_REMOUNT;
251 		} else {
252 			error = EBUSY;
253 			goto out;
254 		}
255 	} else {
256 		why = ROOT_INIT;
257 	}
258 	if (getmajor(dev) >= devcnt) {
259 		error = ENXIO;
260 		goto out;
261 	}
262 
263 	/*
264 	 * If the device is a tape, mount it read only
265 	 */
266 	if (devopsp[getmajor(dev)]->devo_cb_ops->cb_flag & D_TAPE) {
267 		vfsp->vfs_flag |= VFS_RDONLY;
268 	}
269 
270 	if (uap->flags & MS_RDONLY) {
271 		vfsp->vfs_flag |= VFS_RDONLY;
272 	}
273 
274 	/*
275 	 * Set mount options.
276 	 */
277 	if (uap->flags & MS_RDONLY) {
278 		vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
279 	}
280 	if (uap->flags & MS_NOSUID) {
281 		vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0);
282 	}
283 
284 	/*
285 	 * Verify that the caller can open the device special file as
286 	 * required.  It is not until this moment that we know whether
287 	 * we're mounting "ro" or not.
288 	 */
289 	if ((vfsp->vfs_flag & VFS_RDONLY) != 0) {
290 		oflag = FREAD;
291 		aflag = VREAD;
292 	} else {
293 		oflag = FREAD | FWRITE;
294 		aflag = VREAD | VWRITE;
295 	}
296 
297 	if (lvp == NULL &&
298 	    (error = secpolicy_spec_open(cr, svp, oflag)) != 0)
299 		goto out;
300 
301 	if ((error = VOP_ACCESS(svp, aflag, 0, cr, NULL)) != 0)
302 		goto out;
303 
304 	/*
305 	 * Mount the filesystem.
306 	 */
307 	error = ud_mountfs(vfsp, why, dev, dpn.pn_path, cr, 0);
308 out:
309 	VN_RELE(svp);
310 	if (lvp != NULL)
311 		VN_RELE(lvp);
312 	pn_free(&dpn);
313 	return (error);
314 }
315 
316 
317 
318 /*
319  * unmount the file system pointed
320  * by vfsp
321  */
322 /* ARGSUSED */
323 static int32_t
324 udf_unmount(struct vfs *vfsp, int fflag, struct cred *cr)
325 {
326 	struct udf_vfs *udf_vfsp;
327 	struct vnode *bvp, *rvp;
328 	struct ud_inode *rip;
329 	int32_t flag;
330 
331 	ud_printf("udf_unmount\n");
332 
333 	if (secpolicy_fs_unmount(cr, vfsp) != 0) {
334 		return (EPERM);
335 	}
336 
337 	/*
338 	 * forced unmount is not supported by this file system
339 	 * and thus, ENOTSUP, is being returned.
340 	 */
341 	if (fflag & MS_FORCE)
342 		return (ENOTSUP);
343 
344 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
345 	flag = !(udf_vfsp->udf_flags & UDF_FL_RDONLY);
346 	bvp = udf_vfsp->udf_devvp;
347 
348 	rvp = udf_vfsp->udf_root;
349 	ASSERT(rvp != NULL);
350 	rip = VTOI(rvp);
351 
352 	(void) ud_release_cache(udf_vfsp);
353 
354 
355 	/* Flush all inodes except root */
356 	if (ud_iflush(vfsp) < 0) {
357 		return (EBUSY);
358 	}
359 
360 	rw_enter(&rip->i_contents, RW_WRITER);
361 	(void) ud_syncip(rip, B_INVAL, I_SYNC);
362 	rw_exit(&rip->i_contents);
363 
364 	mutex_enter(&ud_sync_busy);
365 	if ((udf_vfsp->udf_flags & UDF_FL_RDONLY) == 0) {
366 		bflush(vfsp->vfs_dev);
367 		mutex_enter(&udf_vfsp->udf_lock);
368 		udf_vfsp->udf_clean = UDF_CLEAN;
369 		mutex_exit(&udf_vfsp->udf_lock);
370 		ud_update_superblock(vfsp);
371 	}
372 	mutex_exit(&ud_sync_busy);
373 
374 	mutex_destroy(&udf_vfsp->udf_lock);
375 	mutex_destroy(&udf_vfsp->udf_rename_lck);
376 
377 	ud_delcache(rip);
378 	ITIMES(rip);
379 	VN_RELE(rvp);
380 
381 	ud_destroy_fsp(udf_vfsp);
382 
383 	(void) VOP_PUTPAGE(bvp, (offset_t)0, (uint32_t)0, B_INVAL, cr, NULL);
384 	(void) VOP_CLOSE(bvp, flag, 1, (offset_t)0, cr, NULL);
385 
386 	(void) bfinval(vfsp->vfs_dev, 1);
387 	VN_RELE(bvp);
388 
389 
390 	return (0);
391 }
392 
393 
394 /*
395  * Get the root vp for the
396  * file system
397  */
398 static int32_t
399 udf_root(struct vfs *vfsp, struct vnode **vpp)
400 {
401 	struct udf_vfs *udf_vfsp;
402 	struct vnode *vp;
403 
404 	ud_printf("udf_root\n");
405 
406 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
407 
408 	ASSERT(udf_vfsp != NULL);
409 	ASSERT(udf_vfsp->udf_root != NULL);
410 
411 	vp = udf_vfsp->udf_root;
412 	VN_HOLD(vp);
413 	*vpp = vp;
414 	return (0);
415 }
416 
417 
418 /*
419  * Get file system statistics.
420  */
421 static int32_t
422 udf_statvfs(struct vfs *vfsp, struct statvfs64 *sp)
423 {
424 	struct udf_vfs *udf_vfsp;
425 	struct ud_part *parts;
426 	dev32_t d32;
427 	int32_t index;
428 
429 	ud_printf("udf_statvfs\n");
430 
431 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
432 	(void) bzero(sp, sizeof (struct statvfs64));
433 
434 	mutex_enter(&udf_vfsp->udf_lock);
435 	sp->f_bsize = udf_vfsp->udf_lbsize;
436 	sp->f_frsize = udf_vfsp->udf_lbsize;
437 	sp->f_blocks = 0;
438 	sp->f_bfree = 0;
439 	parts = udf_vfsp->udf_parts;
440 	for (index = 0; index < udf_vfsp->udf_npart; index++) {
441 		sp->f_blocks += parts->udp_nblocks;
442 		sp->f_bfree += parts->udp_nfree;
443 		parts++;
444 	}
445 	sp->f_bavail = sp->f_bfree;
446 
447 	/*
448 	 * Since there are no real inodes allocated
449 	 * we will approximate
450 	 * each new file will occupy :
451 	 * 38(over head each dent) + MAXNAMLEN / 2 + inode_size(==block size)
452 	 */
453 	sp->f_ffree = sp->f_favail =
454 	    (sp->f_bavail * sp->f_bsize) / (146 + sp->f_bsize);
455 
456 	/*
457 	 * The total number of inodes is
458 	 * the sum of files + directories + free inodes
459 	 */
460 	sp->f_files = sp->f_ffree + udf_vfsp->udf_nfiles + udf_vfsp->udf_ndirs;
461 	(void) cmpldev(&d32, vfsp->vfs_dev);
462 	sp->f_fsid = d32;
463 	(void) strcpy(sp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name);
464 	sp->f_flag = vf_to_stf(vfsp->vfs_flag);
465 	sp->f_namemax = MAXNAMLEN;
466 	(void) strcpy(sp->f_fstr, udf_vfsp->udf_volid);
467 
468 	mutex_exit(&udf_vfsp->udf_lock);
469 
470 	return (0);
471 }
472 
473 
474 /*
475  * Flush any pending I/O to file system vfsp.
476  * The ud_update() routine will only flush *all* udf files.
477  */
478 /*ARGSUSED*/
479 /* ARGSUSED */
480 static int32_t
481 udf_sync(struct vfs *vfsp, int16_t flag, struct cred *cr)
482 {
483 	ud_printf("udf_sync\n");
484 
485 	ud_update(flag);
486 	return (0);
487 }
488 
489 
490 
491 /* ARGSUSED */
492 static int32_t
493 udf_vget(struct vfs *vfsp, struct vnode **vpp, struct fid *fidp)
494 {
495 	int32_t error = 0;
496 	struct udf_fid *udfid;
497 	struct udf_vfs *udf_vfsp;
498 	struct ud_inode *ip;
499 
500 	ud_printf("udf_vget\n");
501 
502 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
503 	if (udf_vfsp == NULL) {
504 		*vpp = NULL;
505 		return (0);
506 	}
507 
508 	udfid = (struct udf_fid *)fidp;
509 	if ((error = ud_iget(vfsp, udfid->udfid_prn,
510 	    udfid->udfid_icb_lbn, &ip, NULL, CRED())) != 0) {
511 		*vpp = NULL;
512 		return (error);
513 	}
514 
515 	rw_enter(&ip->i_contents, RW_READER);
516 	if ((udfid->udfid_uinq_lo != (ip->i_uniqid & 0xffffffff)) ||
517 	    (udfid->udfid_prn != ip->i_icb_prn)) {
518 		rw_exit(&ip->i_contents);
519 		VN_RELE(ITOV(ip));
520 		*vpp = NULL;
521 		return (EINVAL);
522 	}
523 	rw_exit(&ip->i_contents);
524 
525 	*vpp = ITOV(ip);
526 	return (0);
527 }
528 
529 
530 /*
531  * Mount root file system.
532  * "why" is ROOT_INIT on initial call, ROOT_REMOUNT if called to
533  * remount the root file system, and ROOT_UNMOUNT if called to
534  * unmount the root (e.g., as part of a system shutdown).
535  *
536  * XXX - this may be partially machine-dependent; it, along with the VFS_SWAPVP
537  * operation, goes along with auto-configuration.  A mechanism should be
538  * provided by which machine-INdependent code in the kernel can say "get me the
539  * right root file system" and "get me the right initial swap area", and have
540  * that done in what may well be a machine-dependent fashion.
541  * Unfortunately, it is also file-system-type dependent (NFS gets it via
542  * bootparams calls, UFS gets it from various and sundry machine-dependent
543  * mechanisms, as SPECFS does for swap).
544  */
545 /* ARGSUSED */
546 static int32_t
547 udf_mountroot(struct vfs *vfsp, enum whymountroot why)
548 {
549 	dev_t rootdev;
550 	static int32_t udf_rootdone = 0;
551 	struct vnode *vp = NULL;
552 	int32_t ovflags, error;
553 	ud_printf("udf_mountroot\n");
554 
555 	if (why == ROOT_INIT) {
556 		if (udf_rootdone++) {
557 			return (EBUSY);
558 		}
559 		rootdev = getrootdev();
560 		if (rootdev == (dev_t)NODEV) {
561 			return (ENODEV);
562 		}
563 		vfsp->vfs_dev = rootdev;
564 		vfsp->vfs_flag |= VFS_RDONLY;
565 	} else if (why == ROOT_REMOUNT) {
566 		vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp;
567 		(void) dnlc_purge_vfsp(vfsp, 0);
568 		vp = common_specvp(vp);
569 		(void) VOP_PUTPAGE(vp, (offset_t)0,
570 		    (uint32_t)0, B_INVAL, CRED(), NULL);
571 		binval(vfsp->vfs_dev);
572 
573 		ovflags = vfsp->vfs_flag;
574 		vfsp->vfs_flag &= ~VFS_RDONLY;
575 		vfsp->vfs_flag |= VFS_REMOUNT;
576 		rootdev = vfsp->vfs_dev;
577 	} else if (why == ROOT_UNMOUNT) {
578 		ud_update(0);
579 		vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp;
580 		(void) VOP_CLOSE(vp, FREAD|FWRITE, 1,
581 		    (offset_t)0, CRED(), NULL);
582 		return (0);
583 	}
584 
585 	if ((error = vfs_lock(vfsp)) != 0) {
586 		return (error);
587 	}
588 
589 	error = ud_mountfs(vfsp, why, rootdev, "/", CRED(), 1);
590 	if (error) {
591 		vfs_unlock(vfsp);
592 		if (why == ROOT_REMOUNT) {
593 			vfsp->vfs_flag = ovflags;
594 		}
595 		if (rootvp) {
596 			VN_RELE(rootvp);
597 			rootvp = (struct vnode *)0;
598 		}
599 		return (error);
600 	}
601 
602 	if (why == ROOT_INIT) {
603 		vfs_add((struct vnode *)0, vfsp,
604 		    (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0);
605 	}
606 	vfs_unlock(vfsp);
607 	return (0);
608 }
609 
610 
611 /* ------------------------- local routines ------------------------- */
612 
613 
614 static int32_t
615 ud_mountfs(struct vfs *vfsp, enum whymountroot why, dev_t dev, char *name,
616     struct cred *cr, int32_t isroot)
617 {
618 	struct vnode *devvp = NULL;
619 	int32_t error = 0;
620 	int32_t needclose = 0;
621 	struct udf_vfs *udf_vfsp = NULL;
622 	struct log_vol_int_desc *lvid;
623 	struct ud_inode *rip = NULL;
624 	struct vnode *rvp = NULL;
625 	int32_t i, lbsize;
626 	uint32_t avd_loc;
627 	struct ud_map *map;
628 	int32_t	desc_len;
629 
630 	ud_printf("ud_mountfs\n");
631 
632 	if (why == ROOT_INIT) {
633 		/*
634 		 * Open the device.
635 		 */
636 		devvp = makespecvp(dev, VBLK);
637 
638 		/*
639 		 * Open block device mounted on.
640 		 * When bio is fixed for vnodes this can all be vnode
641 		 * operations.
642 		 */
643 		error = VOP_OPEN(&devvp,
644 		    (vfsp->vfs_flag & VFS_RDONLY) ? FREAD : FREAD|FWRITE,
645 		    cr, NULL);
646 		if (error) {
647 			goto out;
648 		}
649 		needclose = 1;
650 
651 		/*
652 		 * Refuse to go any further if this
653 		 * device is being used for swapping.
654 		 */
655 		if (IS_SWAPVP(devvp)) {
656 			error = EBUSY;
657 			goto out;
658 		}
659 	}
660 
661 	/*
662 	 * check for dev already mounted on
663 	 */
664 	if (vfsp->vfs_flag & VFS_REMOUNT) {
665 		struct tag *ttag;
666 		int32_t index, count;
667 		struct buf *tpt = 0;
668 		caddr_t addr;
669 
670 
671 		/* cannot remount to RDONLY */
672 		if (vfsp->vfs_flag & VFS_RDONLY) {
673 			return (EINVAL);
674 		}
675 
676 		if (vfsp->vfs_dev != dev) {
677 			return (EINVAL);
678 		}
679 
680 		udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
681 		devvp = udf_vfsp->udf_devvp;
682 
683 		/*
684 		 * fsck may have altered the file system; discard
685 		 * as much incore data as possible.  Don't flush
686 		 * if this is a rw to rw remount; it's just resetting
687 		 * the options.
688 		 */
689 		if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
690 			(void) dnlc_purge_vfsp(vfsp, 0);
691 			(void) VOP_PUTPAGE(devvp, (offset_t)0, (uint_t)0,
692 			    B_INVAL, CRED(), NULL);
693 			(void) ud_iflush(vfsp);
694 			bflush(dev);
695 			binval(dev);
696 		}
697 
698 		/*
699 		 * We could read UDF1.50 and write UDF1.50 only
700 		 * disallow mount of any highier version
701 		 */
702 		if ((udf_vfsp->udf_miread > UDF_150) ||
703 		    (udf_vfsp->udf_miwrite > UDF_150)) {
704 			error = EINVAL;
705 			goto remountout;
706 		}
707 
708 		/*
709 		 * read/write to read/write; all done
710 		 */
711 		if (udf_vfsp->udf_flags & UDF_FL_RW) {
712 			goto remountout;
713 		}
714 
715 		/*
716 		 * Does the media type allow a writable mount
717 		 */
718 		if (udf_vfsp->udf_mtype != UDF_MT_OW) {
719 			error = EINVAL;
720 			goto remountout;
721 		}
722 
723 		/*
724 		 * Read the metadata
725 		 * and check if it is possible to
726 		 * mount in rw mode
727 		 */
728 		tpt = ud_bread(vfsp->vfs_dev,
729 		    udf_vfsp->udf_iseq_loc << udf_vfsp->udf_l2d_shift,
730 		    udf_vfsp->udf_iseq_len);
731 		if (tpt->b_flags & B_ERROR) {
732 			error = EIO;
733 			goto remountout;
734 		}
735 		count = udf_vfsp->udf_iseq_len / DEV_BSIZE;
736 		addr = tpt->b_un.b_addr;
737 		for (index = 0; index < count; index ++) {
738 			ttag = (struct tag *)(addr + index * DEV_BSIZE);
739 			desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE);
740 			if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT,
741 			    udf_vfsp->udf_iseq_loc +
742 			    (index >> udf_vfsp->udf_l2d_shift),
743 			    1, desc_len) == 0) {
744 				struct log_vol_int_desc *lvid;
745 
746 				lvid = (struct log_vol_int_desc *)ttag;
747 
748 				if (SWAP_32(lvid->lvid_int_type) !=
749 				    LOG_VOL_CLOSE_INT) {
750 					error = EINVAL;
751 					goto remountout;
752 				}
753 
754 				/*
755 				 * Copy new data to old data
756 				 */
757 				bcopy(udf_vfsp->udf_iseq->b_un.b_addr,
758 				    tpt->b_un.b_addr, udf_vfsp->udf_iseq_len);
759 				break;
760 			}
761 		}
762 
763 		udf_vfsp->udf_flags = UDF_FL_RW;
764 
765 		mutex_enter(&udf_vfsp->udf_lock);
766 		ud_sbwrite(udf_vfsp);
767 		mutex_exit(&udf_vfsp->udf_lock);
768 remountout:
769 		if (tpt != NULL) {
770 			tpt->b_flags = B_AGE | B_STALE;
771 			brelse(tpt);
772 		}
773 		return (error);
774 	}
775 
776 	ASSERT(devvp != 0);
777 	/*
778 	 * Flush back any dirty pages on the block device to
779 	 * try and keep the buffer cache in sync with the page
780 	 * cache if someone is trying to use block devices when
781 	 * they really should be using the raw device.
782 	 */
783 	(void) VOP_PUTPAGE(common_specvp(devvp), (offset_t)0,
784 	    (uint32_t)0, B_INVAL, cr, NULL);
785 
786 
787 	/*
788 	 * Check if the file system
789 	 * is a valid udfs and fill
790 	 * the required fields in udf_vfs
791 	 */
792 #ifndef	__lint
793 	_NOTE(NO_COMPETING_THREADS_NOW);
794 #endif
795 
796 	if ((lbsize = ud_get_lbsize(dev, &avd_loc)) == 0) {
797 		error = EINVAL;
798 		goto out;
799 	}
800 
801 	udf_vfsp = ud_validate_and_fill_superblock(dev, lbsize, avd_loc);
802 	if (udf_vfsp == NULL) {
803 		error = EINVAL;
804 		goto out;
805 	}
806 
807 	/*
808 	 * Fill in vfs private data
809 	 */
810 	vfsp->vfs_fstype = udf_fstype;
811 	vfs_make_fsid(&vfsp->vfs_fsid, dev, udf_fstype);
812 	vfsp->vfs_data = (caddr_t)udf_vfsp;
813 	vfsp->vfs_dev = dev;
814 	vfsp->vfs_flag |= VFS_NOTRUNC;
815 	udf_vfsp->udf_devvp = devvp;
816 
817 	udf_vfsp->udf_fsmnt = kmem_zalloc(strlen(name) + 1, KM_SLEEP);
818 	(void) strcpy(udf_vfsp->udf_fsmnt, name);
819 
820 	udf_vfsp->udf_vfs = vfsp;
821 	udf_vfsp->udf_rdclustsz = udf_vfsp->udf_wrclustsz = maxphys;
822 
823 	udf_vfsp->udf_mod = 0;
824 
825 
826 	lvid = udf_vfsp->udf_lvid;
827 	if (vfsp->vfs_flag & VFS_RDONLY) {
828 		/*
829 		 * We could read only UDF1.50
830 		 * disallow mount of any highier version
831 		 */
832 		if (udf_vfsp->udf_miread > UDF_150) {
833 			error = EINVAL;
834 			goto out;
835 		}
836 		udf_vfsp->udf_flags = UDF_FL_RDONLY;
837 		if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) {
838 			udf_vfsp->udf_clean = UDF_CLEAN;
839 		} else {
840 			/* Do we have a VAT at the end of the recorded media */
841 			map = udf_vfsp->udf_maps;
842 			for (i = 0; i < udf_vfsp->udf_nmaps; i++) {
843 				if (map->udm_flags & UDM_MAP_VPM) {
844 					break;
845 				}
846 				map++;
847 			}
848 			if (i == udf_vfsp->udf_nmaps) {
849 				error = ENOSPC;
850 				goto out;
851 			}
852 			udf_vfsp->udf_clean = UDF_CLEAN;
853 		}
854 	} else {
855 		/*
856 		 * We could read UDF1.50 and write UDF1.50 only
857 		 * disallow mount of any highier version
858 		 */
859 		if ((udf_vfsp->udf_miread > UDF_150) ||
860 		    (udf_vfsp->udf_miwrite > UDF_150)) {
861 			error = EINVAL;
862 			goto out;
863 		}
864 		/*
865 		 * Check if the media allows
866 		 * us to mount read/write
867 		 */
868 		if (udf_vfsp->udf_mtype != UDF_MT_OW) {
869 			error = EACCES;
870 			goto out;
871 		}
872 
873 		/*
874 		 * Check if we have VAT on a writable media
875 		 * we cannot use the media in presence of VAT
876 		 * Dent RW mount.
877 		 */
878 		map = udf_vfsp->udf_maps;
879 		ASSERT(map != NULL);
880 		for (i = 0; i < udf_vfsp->udf_nmaps; i++) {
881 			if (map->udm_flags & UDM_MAP_VPM) {
882 				error = EACCES;
883 				goto out;
884 			}
885 			map++;
886 		}
887 
888 		/*
889 		 * Check if the domain Id allows
890 		 * us to write
891 		 */
892 		if (udf_vfsp->udf_lvd->lvd_dom_id.reg_ids[2] & 0x3) {
893 			error = EACCES;
894 			goto out;
895 		}
896 		udf_vfsp->udf_flags = UDF_FL_RW;
897 
898 		if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) {
899 			udf_vfsp->udf_clean = UDF_CLEAN;
900 		} else {
901 			if (isroot) {
902 				udf_vfsp->udf_clean = UDF_DIRTY;
903 			} else {
904 				error = ENOSPC;
905 				goto out;
906 			}
907 		}
908 	}
909 
910 	mutex_init(&udf_vfsp->udf_lock, NULL, MUTEX_DEFAULT, NULL);
911 
912 	mutex_init(&udf_vfsp->udf_rename_lck, NULL, MUTEX_DEFAULT, NULL);
913 
914 #ifndef	__lint
915 	_NOTE(COMPETING_THREADS_NOW);
916 #endif
917 	if (error = ud_iget(vfsp, udf_vfsp->udf_ricb_prn,
918 	    udf_vfsp->udf_ricb_loc, &rip, NULL, cr)) {
919 		mutex_destroy(&udf_vfsp->udf_lock);
920 		goto out;
921 	}
922 
923 
924 	/*
925 	 * Get the root inode and
926 	 * initialize the root vnode
927 	 */
928 	rvp = ITOV(rip);
929 	mutex_enter(&rvp->v_lock);
930 	rvp->v_flag |= VROOT;
931 	mutex_exit(&rvp->v_lock);
932 	udf_vfsp->udf_root = rvp;
933 
934 
935 	if (why == ROOT_INIT && isroot)
936 		rootvp = devvp;
937 
938 	ud_vfs_add(udf_vfsp);
939 
940 	if (udf_vfsp->udf_flags == UDF_FL_RW) {
941 		udf_vfsp->udf_clean = UDF_DIRTY;
942 		ud_update_superblock(vfsp);
943 	}
944 
945 	return (0);
946 
947 out:
948 	ud_destroy_fsp(udf_vfsp);
949 	if (needclose) {
950 		(void) VOP_CLOSE(devvp, (vfsp->vfs_flag & VFS_RDONLY) ?
951 		    FREAD : FREAD|FWRITE, 1, (offset_t)0, cr, NULL);
952 		bflush(dev);
953 		binval(dev);
954 	}
955 	VN_RELE(devvp);
956 
957 	return (error);
958 }
959 
960 
961 static struct udf_vfs *
962 ud_validate_and_fill_superblock(dev_t dev, int32_t bsize, uint32_t avd_loc)
963 {
964 	int32_t error, count, index, shift;
965 	uint32_t dummy, vds_loc;
966 	caddr_t addr;
967 	daddr_t blkno, lblkno;
968 	struct buf *secbp, *bp;
969 	struct tag *ttag;
970 	struct anch_vol_desc_ptr *avdp;
971 	struct file_set_desc *fsd;
972 	struct udf_vfs *udf_vfsp = NULL;
973 	struct pmap_hdr *hdr;
974 	struct pmap_typ1 *typ1;
975 	struct pmap_typ2 *typ2;
976 	struct ud_map *map;
977 	int32_t	desc_len;
978 
979 	ud_printf("ud_validate_and_fill_superblock\n");
980 
981 	if (bsize < DEV_BSIZE) {
982 		return (NULL);
983 	}
984 	shift = 0;
985 	while ((bsize >> shift) > DEV_BSIZE) {
986 		shift++;
987 	}
988 
989 	/*
990 	 * Read Anchor Volume Descriptor
991 	 * Verify it and get the location of
992 	 * Main Volume Descriptor Sequence
993 	 */
994 	secbp = ud_bread(dev, avd_loc << shift, ANCHOR_VOL_DESC_LEN);
995 	if ((error = geterror(secbp)) != 0) {
996 		cmn_err(CE_NOTE, "udfs : Could not read Anchor Volume Desc %x",
997 		    error);
998 		brelse(secbp);
999 		return (NULL);
1000 	}
1001 	avdp = (struct anch_vol_desc_ptr *)secbp->b_un.b_addr;
1002 	if (ud_verify_tag_and_desc(&avdp->avd_tag, UD_ANCH_VOL_DESC,
1003 	    avd_loc, 1, ANCHOR_VOL_DESC_LEN) != 0) {
1004 		brelse(secbp);
1005 		return (NULL);
1006 	}
1007 	udf_vfsp = (struct udf_vfs *)
1008 	    kmem_zalloc(sizeof (struct udf_vfs), KM_SLEEP);
1009 	udf_vfsp->udf_mvds_loc = SWAP_32(avdp->avd_main_vdse.ext_loc);
1010 	udf_vfsp->udf_mvds_len = SWAP_32(avdp->avd_main_vdse.ext_len);
1011 	udf_vfsp->udf_rvds_loc = SWAP_32(avdp->avd_res_vdse.ext_loc);
1012 	udf_vfsp->udf_rvds_len = SWAP_32(avdp->avd_res_vdse.ext_len);
1013 	secbp->b_flags = B_AGE | B_STALE;
1014 	brelse(secbp);
1015 
1016 	/*
1017 	 * Read Main Volume Descriptor Sequence
1018 	 * and process it
1019 	 */
1020 	vds_loc = udf_vfsp->udf_mvds_loc;
1021 	secbp = ud_bread(dev, vds_loc << shift,
1022 	    udf_vfsp->udf_mvds_len);
1023 	if ((error = geterror(secbp)) != 0) {
1024 		brelse(secbp);
1025 		cmn_err(CE_NOTE, "udfs : Could not read Main Volume Desc %x",
1026 		    error);
1027 
1028 		vds_loc = udf_vfsp->udf_rvds_loc;
1029 		secbp = ud_bread(dev, vds_loc << shift,
1030 		    udf_vfsp->udf_rvds_len);
1031 		if ((error = geterror(secbp)) != 0) {
1032 			brelse(secbp);
1033 			cmn_err(CE_NOTE,
1034 			"udfs : Could not read Res Volume Desc %x", error);
1035 			return (NULL);
1036 		}
1037 	}
1038 
1039 	udf_vfsp->udf_vds = ngeteblk(udf_vfsp->udf_mvds_len);
1040 	bp = udf_vfsp->udf_vds;
1041 	bp->b_edev = dev;
1042 	bp->b_dev = cmpdev(dev);
1043 	bp->b_blkno = vds_loc << shift;
1044 	bp->b_bcount = udf_vfsp->udf_mvds_len;
1045 	bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_mvds_len);
1046 	secbp->b_flags |= B_STALE | B_AGE;
1047 	brelse(secbp);
1048 
1049 
1050 	count = udf_vfsp->udf_mvds_len / DEV_BSIZE;
1051 	addr = bp->b_un.b_addr;
1052 	for (index = 0; index < count; index ++) {
1053 		ttag = (struct tag *)(addr + index * DEV_BSIZE);
1054 		desc_len = udf_vfsp->udf_mvds_len - (index * DEV_BSIZE);
1055 		if (ud_verify_tag_and_desc(ttag, UD_PRI_VOL_DESC,
1056 		    vds_loc + (index >> shift),
1057 		    1, desc_len) == 0) {
1058 			if (udf_vfsp->udf_pvd == NULL) {
1059 				udf_vfsp->udf_pvd =
1060 				    (struct pri_vol_desc *)ttag;
1061 			} else {
1062 				struct pri_vol_desc *opvd, *npvd;
1063 
1064 				opvd = udf_vfsp->udf_pvd;
1065 				npvd = (struct pri_vol_desc *)ttag;
1066 
1067 				if ((strncmp(opvd->pvd_vsi,
1068 				    npvd->pvd_vsi, 128) == 0) &&
1069 				    (strncmp(opvd->pvd_vol_id,
1070 				    npvd->pvd_vol_id, 32) == 0) &&
1071 				    (strncmp((caddr_t)&opvd->pvd_desc_cs,
1072 				    (caddr_t)&npvd->pvd_desc_cs,
1073 				    sizeof (charspec_t)) == 0)) {
1074 
1075 					if (SWAP_32(opvd->pvd_vdsn) <
1076 					    SWAP_32(npvd->pvd_vdsn)) {
1077 						udf_vfsp->udf_pvd = npvd;
1078 					}
1079 				} else {
1080 					goto out;
1081 				}
1082 			}
1083 		} else if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_DESC,
1084 		    vds_loc + (index >> shift),
1085 		    1, desc_len) == 0) {
1086 			struct log_vol_desc *lvd;
1087 
1088 			lvd = (struct log_vol_desc *)ttag;
1089 			if (strncmp(lvd->lvd_dom_id.reg_id,
1090 			    UDF_DOMAIN_NAME, 23) != 0) {
1091 				printf("Domain ID in lvd is not valid\n");
1092 				goto out;
1093 			}
1094 
1095 			if (udf_vfsp->udf_lvd == NULL) {
1096 				udf_vfsp->udf_lvd = lvd;
1097 			} else {
1098 				struct log_vol_desc *olvd;
1099 
1100 				olvd = udf_vfsp->udf_lvd;
1101 				if ((strncmp((caddr_t)&olvd->lvd_desc_cs,
1102 				    (caddr_t)&lvd->lvd_desc_cs,
1103 				    sizeof (charspec_t)) == 0) &&
1104 				    (strncmp(olvd->lvd_lvid,
1105 				    lvd->lvd_lvid, 128) == 0)) {
1106 					if (SWAP_32(olvd->lvd_vdsn) <
1107 					    SWAP_32(lvd->lvd_vdsn)) {
1108 						udf_vfsp->udf_lvd = lvd;
1109 					}
1110 				} else {
1111 					goto out;
1112 				}
1113 			}
1114 		} else if (ud_verify_tag_and_desc(ttag, UD_PART_DESC,
1115 		    vds_loc + (index >> shift),
1116 		    1, desc_len) == 0) {
1117 			int32_t i;
1118 			struct phdr_desc *hdr;
1119 			struct part_desc *pdesc;
1120 			struct ud_part *pnew, *pold, *part;
1121 
1122 			pdesc = (struct part_desc *)ttag;
1123 			pold = udf_vfsp->udf_parts;
1124 			for (i = 0; i < udf_vfsp->udf_npart; i++) {
1125 				if (pold->udp_number !=
1126 				    SWAP_16(pdesc->pd_pnum)) {
1127 					pold++;
1128 					continue;
1129 				}
1130 
1131 				if (SWAP_32(pdesc->pd_vdsn) >
1132 				    pold->udp_seqno) {
1133 					pold->udp_seqno =
1134 					    SWAP_32(pdesc->pd_vdsn);
1135 					pold->udp_access =
1136 					    SWAP_32(pdesc->pd_acc_type);
1137 					pold->udp_start =
1138 					    SWAP_32(pdesc->pd_part_start);
1139 					pold->udp_length =
1140 					    SWAP_32(pdesc->pd_part_length);
1141 				}
1142 				goto loop_end;
1143 			}
1144 			pold = udf_vfsp->udf_parts;
1145 			udf_vfsp->udf_npart++;
1146 			pnew = kmem_zalloc(udf_vfsp->udf_npart *
1147 			    sizeof (struct ud_part), KM_SLEEP);
1148 			udf_vfsp->udf_parts = pnew;
1149 			if (pold) {
1150 				bcopy(pold, pnew,
1151 				    sizeof (struct ud_part) *
1152 				    (udf_vfsp->udf_npart - 1));
1153 				kmem_free(pold,
1154 				    sizeof (struct ud_part) *
1155 				    (udf_vfsp->udf_npart - 1));
1156 			}
1157 			part = pnew + (udf_vfsp->udf_npart - 1);
1158 			part->udp_number = SWAP_16(pdesc->pd_pnum);
1159 			part->udp_seqno = SWAP_32(pdesc->pd_vdsn);
1160 			part->udp_access = SWAP_32(pdesc->pd_acc_type);
1161 			part->udp_start = SWAP_32(pdesc->pd_part_start);
1162 			part->udp_length = SWAP_32(pdesc->pd_part_length);
1163 			part->udp_last_alloc = 0;
1164 
1165 			/*
1166 			 * Figure out space bitmaps
1167 			 * or space tables
1168 			 */
1169 			hdr = (struct phdr_desc *)pdesc->pd_pc_use;
1170 			if (hdr->phdr_ust.sad_ext_len) {
1171 				part->udp_flags = UDP_SPACETBLS;
1172 				part->udp_unall_loc =
1173 				    SWAP_32(hdr->phdr_ust.sad_ext_loc);
1174 				part->udp_unall_len =
1175 				    SWAP_32(hdr->phdr_ust.sad_ext_len);
1176 				part->udp_freed_loc =
1177 				    SWAP_32(hdr->phdr_fst.sad_ext_loc);
1178 				part->udp_freed_len =
1179 				    SWAP_32(hdr->phdr_fst.sad_ext_len);
1180 			} else {
1181 				part->udp_flags = UDP_BITMAPS;
1182 				part->udp_unall_loc =
1183 				    SWAP_32(hdr->phdr_usb.sad_ext_loc);
1184 				part->udp_unall_len =
1185 				    SWAP_32(hdr->phdr_usb.sad_ext_len);
1186 				part->udp_freed_loc =
1187 				    SWAP_32(hdr->phdr_fsb.sad_ext_loc);
1188 				part->udp_freed_len =
1189 				    SWAP_32(hdr->phdr_fsb.sad_ext_len);
1190 			}
1191 		} else if (ud_verify_tag_and_desc(ttag, UD_TERM_DESC,
1192 		    vds_loc + (index >> shift),
1193 		    1, desc_len) == 0) {
1194 
1195 			break;
1196 		}
1197 loop_end:
1198 		;
1199 	}
1200 	if ((udf_vfsp->udf_pvd == NULL) ||
1201 	    (udf_vfsp->udf_lvd == NULL) ||
1202 	    (udf_vfsp->udf_parts == NULL)) {
1203 		goto out;
1204 	}
1205 
1206 	/*
1207 	 * Process Primary Volume Descriptor
1208 	 */
1209 	(void) strncpy(udf_vfsp->udf_volid, udf_vfsp->udf_pvd->pvd_vol_id, 32);
1210 	udf_vfsp->udf_volid[31] = '\0';
1211 	udf_vfsp->udf_tsno = SWAP_16(udf_vfsp->udf_pvd->pvd_tag.tag_sno);
1212 
1213 	/*
1214 	 * Process Logical Volume Descriptor
1215 	 */
1216 	udf_vfsp->udf_lbsize =
1217 	    SWAP_32(udf_vfsp->udf_lvd->lvd_log_bsize);
1218 	udf_vfsp->udf_lbmask = udf_vfsp->udf_lbsize - 1;
1219 	udf_vfsp->udf_l2d_shift = shift;
1220 	udf_vfsp->udf_l2b_shift = shift + DEV_BSHIFT;
1221 
1222 	/*
1223 	 * Check if the media is in
1224 	 * proper domain.
1225 	 */
1226 	if (strcmp(udf_vfsp->udf_lvd->lvd_dom_id.reg_id,
1227 	    UDF_DOMAIN_NAME) != 0) {
1228 		goto out;
1229 	}
1230 
1231 	/*
1232 	 * AVDS offset does not match with the lbsize
1233 	 * in the lvd
1234 	 */
1235 	if (udf_vfsp->udf_lbsize != bsize) {
1236 		goto out;
1237 	}
1238 
1239 	udf_vfsp->udf_iseq_loc =
1240 	    SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_loc);
1241 	udf_vfsp->udf_iseq_len =
1242 	    SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_len);
1243 
1244 	udf_vfsp->udf_fsd_prn =
1245 	    SWAP_16(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_prn);
1246 	udf_vfsp->udf_fsd_loc =
1247 	    SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_loc);
1248 	udf_vfsp->udf_fsd_len =
1249 	    SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_len);
1250 
1251 
1252 	/*
1253 	 * process paritions
1254 	 */
1255 	udf_vfsp->udf_mtype = udf_vfsp->udf_parts[0].udp_access;
1256 	for (index = 0; index < udf_vfsp->udf_npart; index ++) {
1257 		if (udf_vfsp->udf_parts[index].udp_access <
1258 		    udf_vfsp->udf_mtype) {
1259 			udf_vfsp->udf_mtype =
1260 			    udf_vfsp->udf_parts[index].udp_access;
1261 		}
1262 	}
1263 	if ((udf_vfsp->udf_mtype < UDF_MT_RO) ||
1264 	    (udf_vfsp->udf_mtype > UDF_MT_OW)) {
1265 		udf_vfsp->udf_mtype = UDF_MT_RO;
1266 	}
1267 
1268 	udf_vfsp->udf_nmaps = 0;
1269 	hdr = (struct pmap_hdr *)udf_vfsp->udf_lvd->lvd_pmaps;
1270 	count = SWAP_32(udf_vfsp->udf_lvd->lvd_num_pmaps);
1271 	for (index = 0; index < count; index++) {
1272 
1273 		if ((hdr->maph_type == MAP_TYPE1) &&
1274 		    (hdr->maph_length == MAP_TYPE1_LEN)) {
1275 			typ1 = (struct pmap_typ1 *)hdr;
1276 
1277 			map = udf_vfsp->udf_maps;
1278 			udf_vfsp->udf_maps =
1279 			    kmem_zalloc(sizeof (struct ud_map) *
1280 			    (udf_vfsp->udf_nmaps + 1), KM_SLEEP);
1281 			if (map != NULL) {
1282 				bcopy(map, udf_vfsp->udf_maps,
1283 				    sizeof (struct ud_map) *
1284 				    udf_vfsp->udf_nmaps);
1285 				kmem_free(map, sizeof (struct ud_map) *
1286 				    udf_vfsp->udf_nmaps);
1287 			}
1288 			map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps;
1289 			map->udm_flags = UDM_MAP_NORM;
1290 			map->udm_vsn = SWAP_16(typ1->map1_vsn);
1291 			map->udm_pn = SWAP_16(typ1->map1_pn);
1292 			udf_vfsp->udf_nmaps ++;
1293 		} else if ((hdr->maph_type == MAP_TYPE2) &&
1294 		    (hdr->maph_length == MAP_TYPE2_LEN)) {
1295 			typ2 = (struct pmap_typ2 *)hdr;
1296 
1297 			if (strncmp(typ2->map2_pti.reg_id,
1298 			    UDF_VIRT_PART, 23) == 0) {
1299 				/*
1300 				 * Add this to the normal
1301 				 * partition table so that
1302 				 * we donot
1303 				 */
1304 				map = udf_vfsp->udf_maps;
1305 				udf_vfsp->udf_maps =
1306 				    kmem_zalloc(sizeof (struct ud_map) *
1307 				    (udf_vfsp->udf_nmaps + 1), KM_SLEEP);
1308 				if (map != NULL) {
1309 					bcopy(map, udf_vfsp->udf_maps,
1310 					    sizeof (struct ud_map) *
1311 					    udf_vfsp->udf_nmaps);
1312 					kmem_free(map,
1313 					    sizeof (struct ud_map) *
1314 					    udf_vfsp->udf_nmaps);
1315 				}
1316 				map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps;
1317 				map->udm_flags = UDM_MAP_VPM;
1318 				map->udm_vsn = SWAP_16(typ2->map2_vsn);
1319 				map->udm_pn = SWAP_16(typ2->map2_pn);
1320 				udf_vfsp->udf_nmaps ++;
1321 				if (error = ud_get_last_block(dev, &lblkno)) {
1322 					goto out;
1323 				}
1324 				if (error = ud_val_get_vat(udf_vfsp, dev,
1325 				    lblkno, map)) {
1326 					goto out;
1327 				}
1328 			} else if (strncmp(typ2->map2_pti.reg_id,
1329 			    UDF_SPAR_PART, 23) == 0) {
1330 
1331 				if (SWAP_16(typ2->map2_pl) != 32) {
1332 					printf(
1333 					    "Packet Length is not valid %x\n",
1334 					    SWAP_16(typ2->map2_pl));
1335 					goto out;
1336 				}
1337 				if ((typ2->map2_nst < 1) ||
1338 				    (typ2->map2_nst > 4)) {
1339 					goto out;
1340 				}
1341 				map = udf_vfsp->udf_maps;
1342 				udf_vfsp->udf_maps =
1343 				    kmem_zalloc(sizeof (struct ud_map) *
1344 				    (udf_vfsp->udf_nmaps + 1),
1345 				    KM_SLEEP);
1346 				if (map != NULL) {
1347 					bcopy(map, udf_vfsp->udf_maps,
1348 					    sizeof (struct ud_map) *
1349 					    udf_vfsp->udf_nmaps);
1350 					kmem_free(map,
1351 					    sizeof (struct ud_map) *
1352 					    udf_vfsp->udf_nmaps);
1353 				}
1354 				map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps;
1355 				map->udm_flags = UDM_MAP_SPM;
1356 				map->udm_vsn = SWAP_16(typ2->map2_vsn);
1357 				map->udm_pn = SWAP_16(typ2->map2_pn);
1358 
1359 				udf_vfsp->udf_nmaps ++;
1360 
1361 				if (error = ud_read_sparing_tbls(udf_vfsp,
1362 				    dev, map, typ2)) {
1363 					goto out;
1364 				}
1365 			} else {
1366 				/*
1367 				 * Unknown type of partition
1368 				 * Bail out
1369 				 */
1370 				goto out;
1371 			}
1372 		} else {
1373 			/*
1374 			 * Unknown type of partition
1375 			 * Bail out
1376 			 */
1377 			goto out;
1378 		}
1379 		hdr = (struct pmap_hdr *)(((uint8_t *)hdr) + hdr->maph_length);
1380 	}
1381 
1382 
1383 	/*
1384 	 * Read Logical Volume Integrity Sequence
1385 	 * and process it
1386 	 */
1387 	secbp = ud_bread(dev, udf_vfsp->udf_iseq_loc << shift,
1388 	    udf_vfsp->udf_iseq_len);
1389 	if ((error = geterror(secbp)) != 0) {
1390 		cmn_err(CE_NOTE,
1391 		"udfs : Could not read Logical Volume Integrity Sequence %x",
1392 		    error);
1393 		brelse(secbp);
1394 		goto out;
1395 	}
1396 	udf_vfsp->udf_iseq = ngeteblk(udf_vfsp->udf_iseq_len);
1397 	bp = udf_vfsp->udf_iseq;
1398 	bp->b_edev = dev;
1399 	bp->b_dev = cmpdev(dev);
1400 	bp->b_blkno = udf_vfsp->udf_iseq_loc << shift;
1401 	bp->b_bcount = udf_vfsp->udf_iseq_len;
1402 	bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_iseq_len);
1403 	secbp->b_flags |= B_STALE | B_AGE;
1404 	brelse(secbp);
1405 
1406 	count = udf_vfsp->udf_iseq_len / DEV_BSIZE;
1407 	addr = bp->b_un.b_addr;
1408 	for (index = 0; index < count; index ++) {
1409 		ttag = (struct tag *)(addr + index * DEV_BSIZE);
1410 		desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE);
1411 		if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT,
1412 		    udf_vfsp->udf_iseq_loc + (index >> shift),
1413 		    1, desc_len) == 0) {
1414 
1415 			struct log_vol_int_desc *lvid;
1416 
1417 			lvid = (struct log_vol_int_desc *)ttag;
1418 			udf_vfsp->udf_lvid = lvid;
1419 
1420 			if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) {
1421 				udf_vfsp->udf_clean = UDF_CLEAN;
1422 			} else {
1423 				udf_vfsp->udf_clean = UDF_DIRTY;
1424 			}
1425 
1426 			/*
1427 			 * update superblock with the metadata
1428 			 */
1429 			ud_convert_to_superblock(udf_vfsp, lvid);
1430 			break;
1431 		}
1432 	}
1433 
1434 	if (udf_vfsp->udf_lvid == NULL) {
1435 		goto out;
1436 	}
1437 
1438 	if ((blkno = ud_xlate_to_daddr(udf_vfsp,
1439 	    udf_vfsp->udf_fsd_prn, udf_vfsp->udf_fsd_loc,
1440 	    1, &dummy)) == 0) {
1441 		goto out;
1442 	}
1443 	secbp = ud_bread(dev, blkno << shift, udf_vfsp->udf_fsd_len);
1444 	if ((error = geterror(secbp)) != 0) {
1445 		cmn_err(CE_NOTE,
1446 		"udfs : Could not read File Set Descriptor %x", error);
1447 		brelse(secbp);
1448 		goto out;
1449 	}
1450 	fsd = (struct file_set_desc *)secbp->b_un.b_addr;
1451 	if (ud_verify_tag_and_desc(&fsd->fsd_tag, UD_FILE_SET_DESC,
1452 	    udf_vfsp->udf_fsd_loc,
1453 	    1, udf_vfsp->udf_fsd_len) != 0) {
1454 		secbp->b_flags = B_AGE | B_STALE;
1455 		brelse(secbp);
1456 		goto out;
1457 	}
1458 	udf_vfsp->udf_ricb_prn = SWAP_16(fsd->fsd_root_icb.lad_ext_prn);
1459 	udf_vfsp->udf_ricb_loc = SWAP_32(fsd->fsd_root_icb.lad_ext_loc);
1460 	udf_vfsp->udf_ricb_len = SWAP_32(fsd->fsd_root_icb.lad_ext_len);
1461 	secbp->b_flags = B_AGE | B_STALE;
1462 	brelse(secbp);
1463 	udf_vfsp->udf_root_blkno = ud_xlate_to_daddr(udf_vfsp,
1464 	    udf_vfsp->udf_ricb_prn, udf_vfsp->udf_ricb_loc,
1465 	    1, &dummy);
1466 
1467 	return (udf_vfsp);
1468 out:
1469 	ud_destroy_fsp(udf_vfsp);
1470 
1471 	return (NULL);
1472 }
1473 
1474 /*
1475  * release/free resources from one ud_map; map data was zalloc'd in
1476  * ud_validate_and_fill_superblock() and fields may later point to
1477  * valid data
1478  */
1479 static void
1480 ud_free_map(struct ud_map *map)
1481 {
1482 	uint32_t n;
1483 
1484 	if (map->udm_flags & UDM_MAP_VPM) {
1485 		if (map->udm_count) {
1486 			kmem_free(map->udm_count,
1487 			    map->udm_nent * sizeof (*map->udm_count));
1488 			map->udm_count = NULL;
1489 		}
1490 		if (map->udm_bp) {
1491 			for (n = 0; n < map->udm_nent; n++) {
1492 				if (map->udm_bp[n])
1493 					brelse(map->udm_bp[n]);
1494 			}
1495 			kmem_free(map->udm_bp,
1496 			    map->udm_nent * sizeof (*map->udm_bp));
1497 			map->udm_bp = NULL;
1498 		}
1499 		if (map->udm_addr) {
1500 			kmem_free(map->udm_addr,
1501 			    map->udm_nent * sizeof (*map->udm_addr));
1502 			map->udm_addr = NULL;
1503 		}
1504 	}
1505 	if (map->udm_flags & UDM_MAP_SPM) {
1506 		for (n = 0; n < MAX_SPM; n++) {
1507 			if (map->udm_sbp[n]) {
1508 				brelse(map->udm_sbp[n]);
1509 				map->udm_sbp[n] = NULL;
1510 				map->udm_spaddr[n] = NULL;
1511 			}
1512 		}
1513 	}
1514 }
1515 
1516 void
1517 ud_destroy_fsp(struct udf_vfs *udf_vfsp)
1518 {
1519 	int32_t i;
1520 
1521 	ud_printf("ud_destroy_fsp\n");
1522 	if (udf_vfsp == NULL)
1523 		return;
1524 
1525 	if (udf_vfsp->udf_maps) {
1526 		for (i = 0; i < udf_vfsp->udf_nmaps; i++)
1527 			ud_free_map(&udf_vfsp->udf_maps[i]);
1528 
1529 		kmem_free(udf_vfsp->udf_maps,
1530 		    udf_vfsp->udf_nmaps * sizeof (*udf_vfsp->udf_maps));
1531 	}
1532 
1533 	if (udf_vfsp->udf_parts) {
1534 		kmem_free(udf_vfsp->udf_parts,
1535 		    udf_vfsp->udf_npart * sizeof (*udf_vfsp->udf_parts));
1536 	}
1537 	if (udf_vfsp->udf_iseq) {
1538 		udf_vfsp->udf_iseq->b_flags |= (B_STALE|B_AGE);
1539 		brelse(udf_vfsp->udf_iseq);
1540 	}
1541 	if (udf_vfsp->udf_vds) {
1542 		udf_vfsp->udf_vds->b_flags |= (B_STALE|B_AGE);
1543 		brelse(udf_vfsp->udf_vds);
1544 	}
1545 	if (udf_vfsp->udf_vfs)
1546 		ud_vfs_remove(udf_vfsp);
1547 	if (udf_vfsp->udf_fsmnt) {
1548 		kmem_free(udf_vfsp->udf_fsmnt,
1549 		    strlen(udf_vfsp->udf_fsmnt) + 1);
1550 	}
1551 	kmem_free(udf_vfsp, sizeof (*udf_vfsp));
1552 }
1553 
1554 void
1555 ud_convert_to_superblock(struct udf_vfs *udf_vfsp,
1556     struct log_vol_int_desc *lvid)
1557 {
1558 	int32_t i, c;
1559 	uint32_t *temp;
1560 	struct ud_part *ud_part;
1561 	struct lvid_iu *iu;
1562 
1563 	udf_vfsp->udf_maxuniq = SWAP_64(lvid->lvid_uniqid);
1564 	temp = lvid->lvid_fst;
1565 	c = SWAP_32(lvid->lvid_npart);
1566 	ud_part = udf_vfsp->udf_parts;
1567 	for (i = 0; i < c; i++) {
1568 		if (i >= udf_vfsp->udf_npart) {
1569 			continue;
1570 		}
1571 		ud_part->udp_nfree =  SWAP_32(temp[i]);
1572 		ud_part->udp_nblocks =  SWAP_32(temp[c + i]);
1573 		udf_vfsp->udf_freeblks += SWAP_32(temp[i]);
1574 		udf_vfsp->udf_totalblks += SWAP_32(temp[c + i]);
1575 		ud_part++;
1576 	}
1577 
1578 	iu = (struct lvid_iu *)(temp + c * 2);
1579 	udf_vfsp->udf_nfiles = SWAP_32(iu->lvidiu_nfiles);
1580 	udf_vfsp->udf_ndirs = SWAP_32(iu->lvidiu_ndirs);
1581 	udf_vfsp->udf_miread = BCD2HEX_16(SWAP_16(iu->lvidiu_mread));
1582 	udf_vfsp->udf_miwrite = BCD2HEX_16(SWAP_16(iu->lvidiu_mwrite));
1583 	udf_vfsp->udf_mawrite = BCD2HEX_16(SWAP_16(iu->lvidiu_maxwr));
1584 }
1585 
1586 void
1587 ud_update_superblock(struct vfs *vfsp)
1588 {
1589 	struct udf_vfs *udf_vfsp;
1590 
1591 	ud_printf("ud_update_superblock\n");
1592 
1593 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1594 
1595 	mutex_enter(&udf_vfsp->udf_lock);
1596 	ud_sbwrite(udf_vfsp);
1597 	mutex_exit(&udf_vfsp->udf_lock);
1598 }
1599 
1600 
1601 #include <sys/dkio.h>
1602 #include <sys/cdio.h>
1603 #include <sys/vtoc.h>
1604 
1605 /*
1606  * This part of the code is known
1607  * to work with only sparc. It needs
1608  * to be evluated before using it with x86
1609  */
1610 int32_t
1611 ud_get_last_block(dev_t dev, daddr_t *blkno)
1612 {
1613 	struct vtoc vtoc;
1614 	struct dk_cinfo dki_info;
1615 	int32_t rval, error;
1616 
1617 	if ((error = cdev_ioctl(dev, DKIOCGVTOC, (intptr_t)&vtoc,
1618 	    FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) {
1619 		cmn_err(CE_NOTE, "Could not get the vtoc information");
1620 		return (error);
1621 	}
1622 
1623 	if (vtoc.v_sanity != VTOC_SANE) {
1624 		return (EINVAL);
1625 	}
1626 	if ((error = cdev_ioctl(dev, DKIOCINFO, (intptr_t)&dki_info,
1627 	    FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) {
1628 		cmn_err(CE_NOTE, "Could not get the slice information");
1629 		return (error);
1630 	}
1631 
1632 	if (dki_info.dki_partition > V_NUMPAR) {
1633 		return (EINVAL);
1634 	}
1635 
1636 
1637 	*blkno = vtoc.v_part[dki_info.dki_partition].p_size;
1638 
1639 	return (0);
1640 }
1641 
1642 /* Search sequentially N - 2, N, N - 152, N - 150 for vat icb */
1643 /*
1644  * int32_t ud_sub_blks[] = {2, 0, 152, 150};
1645  */
1646 int32_t ud_sub_blks[] = {152, 150, 2, 0};
1647 int32_t ud_sub_count = 4;
1648 
1649 /*
1650  * Validate the VAT ICB
1651  */
1652 static int32_t
1653 ud_val_get_vat(struct udf_vfs *udf_vfsp, dev_t dev,
1654     daddr_t blkno, struct ud_map *udm)
1655 {
1656 	struct buf *secbp;
1657 	struct file_entry *fe;
1658 	int32_t end_loc, i, j, ad_type;
1659 	struct short_ad *sad;
1660 	struct long_ad *lad;
1661 	uint32_t count, blk;
1662 	struct ud_part *ud_part;
1663 	int err = 0;
1664 
1665 	end_loc = (blkno >> udf_vfsp->udf_l2d_shift) - 1;
1666 
1667 	for (i = 0; i < ud_sub_count; i++) {
1668 		udm->udm_vat_icb = end_loc - ud_sub_blks[i];
1669 
1670 		secbp = ud_bread(dev,
1671 		    udm->udm_vat_icb << udf_vfsp->udf_l2d_shift,
1672 		    udf_vfsp->udf_lbsize);
1673 		ASSERT(secbp->b_un.b_addr);
1674 
1675 		fe = (struct file_entry *)secbp->b_un.b_addr;
1676 		if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY, 0,
1677 		    0, 0) == 0) {
1678 			if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
1679 			    SWAP_32(fe->fe_tag.tag_loc),
1680 			    1, udf_vfsp->udf_lbsize) == 0) {
1681 				if (fe->fe_icb_tag.itag_ftype == 0) {
1682 					break;
1683 				}
1684 			}
1685 		}
1686 		secbp->b_flags |= B_AGE | B_STALE;
1687 		brelse(secbp);
1688 	}
1689 	if (i == ud_sub_count) {
1690 		return (EINVAL);
1691 	}
1692 
1693 	ad_type = SWAP_16(fe->fe_icb_tag.itag_flags) & 0x3;
1694 	if (ad_type == ICB_FLAG_ONE_AD) {
1695 		udm->udm_nent = 1;
1696 	} else if (ad_type == ICB_FLAG_SHORT_AD) {
1697 		udm->udm_nent =
1698 		    SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
1699 	} else if (ad_type == ICB_FLAG_LONG_AD) {
1700 		udm->udm_nent =
1701 		    SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
1702 	} else {
1703 		err = EINVAL;
1704 		goto end;
1705 	}
1706 
1707 	udm->udm_count = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_count),
1708 	    KM_SLEEP);
1709 	udm->udm_bp = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_bp),
1710 	    KM_SLEEP);
1711 	udm->udm_addr = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_addr),
1712 	    KM_SLEEP);
1713 
1714 	if (ad_type == ICB_FLAG_ONE_AD) {
1715 			udm->udm_count[0] = (SWAP_64(fe->fe_info_len) - 36) /
1716 			    sizeof (uint32_t);
1717 			udm->udm_bp[0] = secbp;
1718 			udm->udm_addr[0] = (uint32_t *)
1719 			    &fe->fe_spec[SWAP_32(fe->fe_len_ear)];
1720 			return (0);
1721 	}
1722 	for (i = 0; i < udm->udm_nent; i++) {
1723 		if (ad_type == ICB_FLAG_SHORT_AD) {
1724 			sad = (struct short_ad *)
1725 			    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1726 			sad += i;
1727 			count = SWAP_32(sad->sad_ext_len);
1728 			blk = SWAP_32(sad->sad_ext_loc);
1729 		} else {
1730 			lad = (struct long_ad *)
1731 			    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1732 			lad += i;
1733 			count = SWAP_32(lad->lad_ext_len);
1734 			blk = SWAP_32(lad->lad_ext_loc);
1735 			ASSERT(SWAP_16(lad->lad_ext_prn) == udm->udm_pn);
1736 		}
1737 		if ((count & 0x3FFFFFFF) == 0) {
1738 			break;
1739 		}
1740 		if (i < udm->udm_nent - 1) {
1741 			udm->udm_count[i] = count / 4;
1742 		} else {
1743 			udm->udm_count[i] = (count - 36) / 4;
1744 		}
1745 		ud_part = udf_vfsp->udf_parts;
1746 		for (j = 0; j < udf_vfsp->udf_npart; j++) {
1747 			if (udm->udm_pn == ud_part->udp_number) {
1748 				blk = ud_part->udp_start + blk;
1749 				break;
1750 			}
1751 		}
1752 		if (j == udf_vfsp->udf_npart) {
1753 			err = EINVAL;
1754 			break;
1755 		}
1756 
1757 		count = (count + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1758 		udm->udm_bp[i] = ud_bread(dev,
1759 		    blk << udf_vfsp->udf_l2d_shift, count);
1760 		if ((udm->udm_bp[i]->b_error != 0) ||
1761 		    (udm->udm_bp[i]->b_resid)) {
1762 			err = EINVAL;
1763 			break;
1764 		}
1765 		udm->udm_addr[i] = (uint32_t *)udm->udm_bp[i]->b_un.b_addr;
1766 	}
1767 
1768 end:
1769 	if (err)
1770 		ud_free_map(udm);
1771 	secbp->b_flags |= B_AGE | B_STALE;
1772 	brelse(secbp);
1773 	return (err);
1774 }
1775 
1776 int32_t
1777 ud_read_sparing_tbls(struct udf_vfs *udf_vfsp,
1778     dev_t dev, struct ud_map *map, struct pmap_typ2 *typ2)
1779 {
1780 	int32_t index, valid = 0;
1781 	uint32_t sz;
1782 	struct buf *bp;
1783 	struct stbl *stbl;
1784 
1785 	map->udm_plen = SWAP_16(typ2->map2_pl);
1786 	map->udm_nspm = typ2->map2_nst;
1787 	map->udm_spsz = SWAP_32(typ2->map2_sest);
1788 	sz = (map->udm_spsz + udf_vfsp->udf_lbmask) & ~udf_vfsp->udf_lbmask;
1789 	if (sz == 0) {
1790 		return (0);
1791 	}
1792 
1793 	for (index = 0; index < map->udm_nspm; index++) {
1794 		map->udm_loc[index] = SWAP_32(typ2->map2_st[index]);
1795 
1796 		bp = ud_bread(dev,
1797 		    map->udm_loc[index] << udf_vfsp->udf_l2d_shift, sz);
1798 		if ((bp->b_error != 0) || (bp->b_resid)) {
1799 			brelse(bp);
1800 			continue;
1801 		}
1802 		stbl = (struct stbl *)bp->b_un.b_addr;
1803 		if (strncmp(stbl->stbl_si.reg_id, UDF_SPAR_TBL, 23) != 0) {
1804 			printf("Sparing Identifier does not match\n");
1805 			bp->b_flags |= B_AGE | B_STALE;
1806 			brelse(bp);
1807 			continue;
1808 		}
1809 		map->udm_sbp[index] = bp;
1810 		map->udm_spaddr[index] = bp->b_un.b_addr;
1811 #ifdef	UNDEF
1812 {
1813 	struct stbl_entry *te;
1814 	int32_t i, tbl_len;
1815 
1816 	te = (struct stbl_entry *)&stbl->stbl_entry;
1817 	tbl_len = SWAP_16(stbl->stbl_len);
1818 
1819 	printf("%x %x\n", tbl_len, SWAP_32(stbl->stbl_seqno));
1820 	printf("%x %x\n", bp->b_un.b_addr, te);
1821 
1822 	for (i = 0; i < tbl_len; i++) {
1823 		printf("%x %x\n", SWAP_32(te->sent_ol), SWAP_32(te->sent_ml));
1824 		te ++;
1825 	}
1826 }
1827 #endif
1828 		valid ++;
1829 	}
1830 
1831 	if (valid) {
1832 		return (0);
1833 	}
1834 	return (EINVAL);
1835 }
1836 
1837 uint32_t
1838 ud_get_lbsize(dev_t dev, uint32_t *loc)
1839 {
1840 	int32_t bsize, shift, index, end_index;
1841 	daddr_t last_block;
1842 	uint32_t avd_loc;
1843 	struct buf *bp;
1844 	struct anch_vol_desc_ptr *avdp;
1845 	uint32_t session_offset = 0;
1846 	int32_t rval;
1847 
1848 	if (ud_get_last_block(dev, &last_block) != 0) {
1849 		end_index = 1;
1850 	} else {
1851 		end_index = 3;
1852 	}
1853 
1854 	if (cdev_ioctl(dev, CDROMREADOFFSET, (intptr_t)&session_offset,
1855 	    FKIOCTL|FREAD|FNATIVE, CRED(), &rval) != 0) {
1856 		session_offset = 0;
1857 	}
1858 
1859 	for (index = 0; index < end_index; index++) {
1860 
1861 		for (bsize = DEV_BSIZE, shift = 0;
1862 		    bsize <= MAXBSIZE; bsize <<= 1, shift++) {
1863 
1864 			if (index == 0) {
1865 				avd_loc = 256;
1866 				if (bsize <= 2048) {
1867 					avd_loc +=
1868 					    session_offset * 2048 / bsize;
1869 				} else {
1870 					avd_loc +=
1871 					    session_offset / (bsize / 2048);
1872 				}
1873 			} else if (index == 1) {
1874 				avd_loc = last_block - (1 << shift);
1875 			} else {
1876 				avd_loc = last_block - (256 << shift);
1877 			}
1878 
1879 			bp = ud_bread(dev, avd_loc << shift,
1880 			    ANCHOR_VOL_DESC_LEN);
1881 			if (geterror(bp) != 0) {
1882 				brelse(bp);
1883 				continue;
1884 			}
1885 
1886 			/*
1887 			 * Verify if we have avdp here
1888 			 */
1889 			avdp = (struct anch_vol_desc_ptr *)bp->b_un.b_addr;
1890 			if (ud_verify_tag_and_desc(&avdp->avd_tag,
1891 			    UD_ANCH_VOL_DESC, avd_loc,
1892 			    1, ANCHOR_VOL_DESC_LEN) != 0) {
1893 				bp->b_flags |= B_AGE | B_STALE;
1894 				brelse(bp);
1895 				continue;
1896 			}
1897 			bp->b_flags |= B_AGE | B_STALE;
1898 			brelse(bp);
1899 			*loc = avd_loc;
1900 			return (bsize);
1901 		}
1902 	}
1903 
1904 	/*
1905 	 * Did not find AVD at all the locations
1906 	 */
1907 	return (0);
1908 }
1909 
1910 static int
1911 udfinit(int fstype, char *name)
1912 {
1913 	static const fs_operation_def_t udf_vfsops_template[] = {
1914 		VFSNAME_MOUNT,		{ .vfs_mount = udf_mount },
1915 		VFSNAME_UNMOUNT,	{ .vfs_unmount = udf_unmount },
1916 		VFSNAME_ROOT,		{ .vfs_root = udf_root },
1917 		VFSNAME_STATVFS,	{ .vfs_statvfs = udf_statvfs },
1918 		VFSNAME_SYNC,		{ .vfs_sync = udf_sync },
1919 		VFSNAME_VGET,		{ .vfs_vget = udf_vget },
1920 		VFSNAME_MOUNTROOT,	{ .vfs_mountroot = udf_mountroot },
1921 		NULL,			NULL
1922 	};
1923 	extern struct vnodeops *udf_vnodeops;
1924 	extern const fs_operation_def_t udf_vnodeops_template[];
1925 	int error;
1926 
1927 	ud_printf("udfinit\n");
1928 
1929 	error = vfs_setfsops(fstype, udf_vfsops_template, NULL);
1930 	if (error != 0) {
1931 		cmn_err(CE_WARN, "udfinit: bad vfs ops template");
1932 		return (error);
1933 	}
1934 
1935 	error = vn_make_ops(name, udf_vnodeops_template, &udf_vnodeops);
1936 	if (error != 0) {
1937 		(void) vfs_freevfsops_by_type(fstype);
1938 		cmn_err(CE_WARN, "udfinit: bad vnode ops template");
1939 		return (error);
1940 	}
1941 
1942 	udf_fstype = fstype;
1943 
1944 	ud_init_inodes();
1945 
1946 	return (0);
1947 }
1948