1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/t_lock.h> 30 #include <sys/param.h> 31 #include <sys/time.h> 32 #include <sys/systm.h> 33 #include <sys/sysmacros.h> 34 #include <sys/resource.h> 35 #include <sys/signal.h> 36 #include <sys/cred.h> 37 #include <sys/user.h> 38 #include <sys/buf.h> 39 #include <sys/vfs.h> 40 #include <sys/vfs_opreg.h> 41 #include <sys/stat.h> 42 #include <sys/vnode.h> 43 #include <sys/mode.h> 44 #include <sys/proc.h> 45 #include <sys/disp.h> 46 #include <sys/file.h> 47 #include <sys/fcntl.h> 48 #include <sys/flock.h> 49 #include <sys/kmem.h> 50 #include <sys/uio.h> 51 #include <sys/dnlc.h> 52 #include <sys/conf.h> 53 #include <sys/errno.h> 54 #include <sys/mman.h> 55 #include <sys/fbuf.h> 56 #include <sys/pathname.h> 57 #include <sys/debug.h> 58 #include <sys/vmsystm.h> 59 #include <sys/cmn_err.h> 60 #include <sys/dirent.h> 61 #include <sys/errno.h> 62 #include <sys/modctl.h> 63 #include <sys/statvfs.h> 64 #include <sys/mount.h> 65 #include <sys/sunddi.h> 66 #include <sys/bootconf.h> 67 #include <sys/policy.h> 68 69 #include <vm/hat.h> 70 #include <vm/page.h> 71 #include <vm/pvn.h> 72 #include <vm/as.h> 73 #include <vm/seg.h> 74 #include <vm/seg_map.h> 75 #include <vm/seg_kmem.h> 76 #include <vm/seg_vn.h> 77 #include <vm/rm.h> 78 #include <vm/page.h> 79 #include <sys/swap.h> 80 #include <sys/mntent.h> 81 82 83 #include <fs/fs_subr.h> 84 85 86 #include <sys/fs/udf_volume.h> 87 #include <sys/fs/udf_inode.h> 88 89 90 extern struct vnode *common_specvp(struct vnode *vp); 91 92 extern kmutex_t ud_sync_busy; 93 static int32_t ud_mountfs(struct vfs *, 94 enum whymountroot, dev_t, char *, struct cred *, int32_t); 95 static struct udf_vfs *ud_validate_and_fill_superblock(dev_t, 96 int32_t, uint32_t); 97 void ud_destroy_fsp(struct udf_vfs *); 98 void ud_convert_to_superblock(struct udf_vfs *, 99 struct log_vol_int_desc *); 100 void ud_update_superblock(struct vfs *); 101 int32_t ud_get_last_block(dev_t, daddr_t *); 102 static int32_t ud_val_get_vat(struct udf_vfs *, 103 dev_t, daddr_t, struct ud_map *); 104 int32_t ud_read_sparing_tbls(struct udf_vfs *, 105 dev_t, struct ud_map *, struct pmap_typ2 *); 106 uint32_t ud_get_lbsize(dev_t, uint32_t *); 107 108 static int32_t udf_mount(struct vfs *, 109 struct vnode *, struct mounta *, struct cred *); 110 static int32_t udf_unmount(struct vfs *, int, struct cred *); 111 static int32_t udf_root(struct vfs *, struct vnode **); 112 static int32_t udf_statvfs(struct vfs *, struct statvfs64 *); 113 static int32_t udf_sync(struct vfs *, int16_t, struct cred *); 114 static int32_t udf_vget(struct vfs *, struct vnode **, struct fid *); 115 static int32_t udf_mountroot(struct vfs *vfsp, enum whymountroot); 116 117 static int udfinit(int, char *); 118 119 static mntopts_t udfs_mntopts; 120 121 static vfsdef_t vfw = { 122 VFSDEF_VERSION, 123 "udfs", 124 udfinit, 125 VSW_HASPROTO|VSW_CANREMOUNT|VSW_STATS, 126 &udfs_mntopts 127 }; 128 129 static mntopts_t udfs_mntopts = { 130 0, 131 NULL 132 }; 133 134 /* 135 * Module linkage information for the kernel. 136 */ 137 extern struct mod_ops mod_fsops; 138 139 static struct modlfs modlfs = { 140 &mod_fsops, "filesystem for UDFS", &vfw 141 }; 142 143 static struct modlinkage modlinkage = { 144 MODREV_1, (void *)&modlfs, NULL 145 }; 146 147 char _depends_on[] = "fs/specfs"; 148 149 int32_t udf_fstype = -1; 150 151 int 152 _init() 153 { 154 return (mod_install(&modlinkage)); 155 } 156 157 int 158 _fini() 159 { 160 return (EBUSY); 161 } 162 163 int 164 _info(struct modinfo *modinfop) 165 { 166 return (mod_info(&modlinkage, modinfop)); 167 } 168 169 170 /* -------------------- vfs routines -------------------- */ 171 172 /* 173 * XXX - this appears only to be used by the VM code to handle the case where 174 * UNIX is running off the mini-root. That probably wants to be done 175 * differently. 176 */ 177 struct vnode *rootvp; 178 #ifndef __lint 179 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", rootvp)) 180 #endif 181 static int32_t 182 udf_mount(struct vfs *vfsp, struct vnode *mvp, 183 struct mounta *uap, struct cred *cr) 184 { 185 dev_t dev; 186 struct vnode *bvp; 187 struct pathname dpn; 188 int32_t error; 189 enum whymountroot why; 190 int oflag, aflag; 191 192 ud_printf("udf_mount\n"); 193 194 if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0) { 195 return (error); 196 } 197 198 if (mvp->v_type != VDIR) { 199 return (ENOTDIR); 200 } 201 202 mutex_enter(&mvp->v_lock); 203 if ((uap->flags & MS_REMOUNT) == 0 && 204 (uap->flags & MS_OVERLAY) == 0 && 205 (mvp->v_count != 1 || (mvp->v_flag & VROOT))) { 206 mutex_exit(&mvp->v_lock); 207 return (EBUSY); 208 } 209 mutex_exit(&mvp->v_lock); 210 211 if (error = pn_get(uap->dir, UIO_USERSPACE, &dpn)) { 212 return (error); 213 } 214 215 /* 216 * Resolve path name of special file being mounted. 217 */ 218 if (error = lookupname(uap->spec, UIO_USERSPACE, FOLLOW, NULLVPP, 219 &bvp)) { 220 pn_free(&dpn); 221 return (error); 222 } 223 if (bvp->v_type != VBLK) { 224 error = ENOTBLK; 225 goto out; 226 } 227 dev = bvp->v_rdev; 228 229 /* 230 * Ensure that this device isn't already mounted, 231 * unless this is a REMOUNT request 232 */ 233 if (vfs_devmounting(dev, vfsp)) { 234 error = EBUSY; 235 goto out; 236 } 237 if (vfs_devismounted(dev)) { 238 if (uap->flags & MS_REMOUNT) { 239 why = ROOT_REMOUNT; 240 } else { 241 error = EBUSY; 242 goto out; 243 } 244 } else { 245 why = ROOT_INIT; 246 } 247 if (getmajor(dev) >= devcnt) { 248 error = ENXIO; 249 goto out; 250 } 251 252 /* 253 * If the device is a tape, mount it read only 254 */ 255 if (devopsp[getmajor(dev)]->devo_cb_ops->cb_flag & D_TAPE) { 256 vfsp->vfs_flag |= VFS_RDONLY; 257 } 258 259 if (uap->flags & MS_RDONLY) { 260 vfsp->vfs_flag |= VFS_RDONLY; 261 } 262 263 /* 264 * Set mount options. 265 */ 266 if (uap->flags & MS_RDONLY) { 267 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0); 268 } 269 if (uap->flags & MS_NOSUID) { 270 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0); 271 } 272 273 /* 274 * Verify that the caller can open the device special file as 275 * required. It is not until this moment that we know whether 276 * we're mounting "ro" or not. 277 */ 278 if ((vfsp->vfs_flag & VFS_RDONLY) != 0) { 279 oflag = FREAD; 280 aflag = VREAD; 281 } else { 282 oflag = FREAD | FWRITE; 283 aflag = VREAD | VWRITE; 284 } 285 if ((error = VOP_ACCESS(bvp, aflag, 0, cr, NULL)) != 0 || 286 (error = secpolicy_spec_open(cr, bvp, oflag)) != 0) { 287 goto out; 288 } 289 290 /* 291 * Mount the filesystem. 292 */ 293 error = ud_mountfs(vfsp, why, dev, dpn.pn_path, cr, 0); 294 out: 295 VN_RELE(bvp); 296 pn_free(&dpn); 297 298 return (error); 299 } 300 301 302 303 /* 304 * unmount the file system pointed 305 * by vfsp 306 */ 307 /* ARGSUSED */ 308 static int32_t 309 udf_unmount(struct vfs *vfsp, int fflag, struct cred *cr) 310 { 311 struct udf_vfs *udf_vfsp; 312 struct vnode *bvp, *rvp; 313 struct ud_inode *rip; 314 int32_t flag; 315 316 ud_printf("udf_unmount\n"); 317 318 if (secpolicy_fs_unmount(cr, vfsp) != 0) { 319 return (EPERM); 320 } 321 322 /* 323 * forced unmount is not supported by this file system 324 * and thus, ENOTSUP, is being returned. 325 */ 326 if (fflag & MS_FORCE) 327 return (ENOTSUP); 328 329 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 330 flag = !(udf_vfsp->udf_flags & UDF_FL_RDONLY); 331 bvp = udf_vfsp->udf_devvp; 332 333 rvp = udf_vfsp->udf_root; 334 ASSERT(rvp != NULL); 335 rip = VTOI(rvp); 336 337 (void) ud_release_cache(udf_vfsp); 338 339 340 /* Flush all inodes except root */ 341 if (ud_iflush(vfsp) < 0) { 342 return (EBUSY); 343 } 344 345 rw_enter(&rip->i_contents, RW_WRITER); 346 (void) ud_syncip(rip, B_INVAL, I_SYNC); 347 rw_exit(&rip->i_contents); 348 349 mutex_enter(&ud_sync_busy); 350 if ((udf_vfsp->udf_flags & UDF_FL_RDONLY) == 0) { 351 bflush(vfsp->vfs_dev); 352 mutex_enter(&udf_vfsp->udf_lock); 353 udf_vfsp->udf_clean = UDF_CLEAN; 354 mutex_exit(&udf_vfsp->udf_lock); 355 ud_update_superblock(vfsp); 356 } 357 mutex_exit(&ud_sync_busy); 358 359 mutex_destroy(&udf_vfsp->udf_lock); 360 mutex_destroy(&udf_vfsp->udf_rename_lck); 361 362 ud_delcache(rip); 363 ITIMES(rip); 364 VN_RELE(rvp); 365 366 ud_destroy_fsp(udf_vfsp); 367 368 (void) VOP_PUTPAGE(bvp, (offset_t)0, (uint32_t)0, B_INVAL, cr, NULL); 369 (void) VOP_CLOSE(bvp, flag, 1, (offset_t)0, cr, NULL); 370 371 (void) bfinval(vfsp->vfs_dev, 1); 372 VN_RELE(bvp); 373 374 375 return (0); 376 } 377 378 379 /* 380 * Get the root vp for the 381 * file system 382 */ 383 static int32_t 384 udf_root(struct vfs *vfsp, struct vnode **vpp) 385 { 386 struct udf_vfs *udf_vfsp; 387 struct vnode *vp; 388 389 ud_printf("udf_root\n"); 390 391 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 392 393 ASSERT(udf_vfsp != NULL); 394 ASSERT(udf_vfsp->udf_root != NULL); 395 396 vp = udf_vfsp->udf_root; 397 VN_HOLD(vp); 398 *vpp = vp; 399 return (0); 400 } 401 402 403 /* 404 * Get file system statistics. 405 */ 406 static int32_t 407 udf_statvfs(struct vfs *vfsp, struct statvfs64 *sp) 408 { 409 struct udf_vfs *udf_vfsp; 410 struct ud_part *parts; 411 dev32_t d32; 412 int32_t index; 413 414 ud_printf("udf_statvfs\n"); 415 416 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 417 (void) bzero(sp, sizeof (struct statvfs64)); 418 419 mutex_enter(&udf_vfsp->udf_lock); 420 sp->f_bsize = udf_vfsp->udf_lbsize; 421 sp->f_frsize = udf_vfsp->udf_lbsize; 422 sp->f_blocks = 0; 423 sp->f_bfree = 0; 424 parts = udf_vfsp->udf_parts; 425 for (index = 0; index < udf_vfsp->udf_npart; index++) { 426 sp->f_blocks += parts->udp_nblocks; 427 sp->f_bfree += parts->udp_nfree; 428 parts++; 429 } 430 sp->f_bavail = sp->f_bfree; 431 432 /* 433 * Since there are no real inodes allocated 434 * we will approximate 435 * each new file will occupy : 436 * 38(over head each dent) + MAXNAMLEN / 2 + inode_size(==block size) 437 */ 438 sp->f_ffree = sp->f_favail = 439 (sp->f_bavail * sp->f_bsize) / (146 + sp->f_bsize); 440 441 /* 442 * The total number of inodes is 443 * the sum of files + directories + free inodes 444 */ 445 sp->f_files = sp->f_ffree + 446 udf_vfsp->udf_nfiles + 447 udf_vfsp->udf_ndirs; 448 (void) cmpldev(&d32, vfsp->vfs_dev); 449 sp->f_fsid = d32; 450 (void) strcpy(sp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name); 451 sp->f_flag = vf_to_stf(vfsp->vfs_flag); 452 sp->f_namemax = MAXNAMLEN; 453 (void) strcpy(sp->f_fstr, udf_vfsp->udf_volid); 454 455 mutex_exit(&udf_vfsp->udf_lock); 456 457 return (0); 458 } 459 460 461 /* 462 * Flush any pending I/O to file system vfsp. 463 * The ud_update() routine will only flush *all* udf files. 464 */ 465 /*ARGSUSED*/ 466 /* ARGSUSED */ 467 static int32_t 468 udf_sync(struct vfs *vfsp, int16_t flag, struct cred *cr) 469 { 470 ud_printf("udf_sync\n"); 471 472 ud_update(flag); 473 return (0); 474 } 475 476 477 478 /* ARGSUSED */ 479 static int32_t 480 udf_vget(struct vfs *vfsp, 481 struct vnode **vpp, struct fid *fidp) 482 { 483 int32_t error = 0; 484 struct udf_fid *udfid; 485 struct udf_vfs *udf_vfsp; 486 struct ud_inode *ip; 487 488 ud_printf("udf_vget\n"); 489 490 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 491 if (udf_vfsp == NULL) { 492 *vpp = NULL; 493 return (0); 494 } 495 496 udfid = (struct udf_fid *)fidp; 497 if ((error = ud_iget(vfsp, udfid->udfid_prn, 498 udfid->udfid_icb_lbn, &ip, NULL, CRED())) != 0) { 499 *vpp = NULL; 500 return (error); 501 } 502 503 rw_enter(&ip->i_contents, RW_READER); 504 if ((udfid->udfid_uinq_lo != (ip->i_uniqid & 0xffffffff)) || 505 (udfid->udfid_prn != ip->i_icb_prn)) { 506 rw_exit(&ip->i_contents); 507 VN_RELE(ITOV(ip)); 508 *vpp = NULL; 509 return (EINVAL); 510 } 511 rw_exit(&ip->i_contents); 512 513 *vpp = ITOV(ip); 514 return (0); 515 } 516 517 518 /* 519 * Mount root file system. 520 * "why" is ROOT_INIT on initial call, ROOT_REMOUNT if called to 521 * remount the root file system, and ROOT_UNMOUNT if called to 522 * unmount the root (e.g., as part of a system shutdown). 523 * 524 * XXX - this may be partially machine-dependent; it, along with the VFS_SWAPVP 525 * operation, goes along with auto-configuration. A mechanism should be 526 * provided by which machine-INdependent code in the kernel can say "get me the 527 * right root file system" and "get me the right initial swap area", and have 528 * that done in what may well be a machine-dependent fashion. 529 * Unfortunately, it is also file-system-type dependent (NFS gets it via 530 * bootparams calls, UFS gets it from various and sundry machine-dependent 531 * mechanisms, as SPECFS does for swap). 532 */ 533 /* ARGSUSED */ 534 static int32_t 535 udf_mountroot(struct vfs *vfsp, enum whymountroot why) 536 { 537 dev_t rootdev; 538 static int32_t udf_rootdone = 0; 539 struct vnode *vp = NULL; 540 int32_t ovflags, error; 541 ud_printf("udf_mountroot\n"); 542 543 if (why == ROOT_INIT) { 544 if (udf_rootdone++) { 545 return (EBUSY); 546 } 547 rootdev = getrootdev(); 548 if (rootdev == (dev_t)NODEV) { 549 return (ENODEV); 550 } 551 vfsp->vfs_dev = rootdev; 552 vfsp->vfs_flag |= VFS_RDONLY; 553 } else if (why == ROOT_REMOUNT) { 554 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp; 555 (void) dnlc_purge_vfsp(vfsp, 0); 556 vp = common_specvp(vp); 557 (void) VOP_PUTPAGE(vp, (offset_t)0, 558 (uint32_t)0, B_INVAL, CRED(), NULL); 559 binval(vfsp->vfs_dev); 560 561 ovflags = vfsp->vfs_flag; 562 vfsp->vfs_flag &= ~VFS_RDONLY; 563 vfsp->vfs_flag |= VFS_REMOUNT; 564 rootdev = vfsp->vfs_dev; 565 } else if (why == ROOT_UNMOUNT) { 566 ud_update(0); 567 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp; 568 (void) VOP_CLOSE(vp, FREAD|FWRITE, 1, 569 (offset_t)0, CRED(), NULL); 570 return (0); 571 } 572 573 if ((error = vfs_lock(vfsp)) != 0) { 574 return (error); 575 } 576 577 error = ud_mountfs(vfsp, why, rootdev, "/", CRED(), 1); 578 if (error) { 579 vfs_unlock(vfsp); 580 if (why == ROOT_REMOUNT) { 581 vfsp->vfs_flag = ovflags; 582 } 583 if (rootvp) { 584 VN_RELE(rootvp); 585 rootvp = (struct vnode *)0; 586 } 587 return (error); 588 } 589 590 if (why == ROOT_INIT) { 591 vfs_add((struct vnode *)0, vfsp, 592 (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0); 593 } 594 vfs_unlock(vfsp); 595 return (0); 596 } 597 598 599 /* ------------------------- local routines ------------------------- */ 600 601 602 static int32_t 603 ud_mountfs(struct vfs *vfsp, 604 enum whymountroot why, dev_t dev, char *name, 605 struct cred *cr, int32_t isroot) 606 { 607 struct vnode *devvp = NULL; 608 int32_t error = 0; 609 int32_t needclose = 0; 610 struct udf_vfs *udf_vfsp = NULL; 611 struct log_vol_int_desc *lvid; 612 struct ud_inode *rip = NULL; 613 struct vnode *rvp = NULL; 614 int32_t i, lbsize; 615 uint32_t avd_loc; 616 struct ud_map *map; 617 int32_t desc_len; 618 619 ud_printf("ud_mountfs\n"); 620 621 if (why == ROOT_INIT) { 622 /* 623 * Open the device. 624 */ 625 devvp = makespecvp(dev, VBLK); 626 627 /* 628 * Open block device mounted on. 629 * When bio is fixed for vnodes this can all be vnode 630 * operations. 631 */ 632 error = VOP_OPEN(&devvp, 633 (vfsp->vfs_flag & VFS_RDONLY) ? FREAD : FREAD|FWRITE, 634 cr, NULL); 635 if (error) { 636 goto out; 637 } 638 needclose = 1; 639 640 /* 641 * Refuse to go any further if this 642 * device is being used for swapping. 643 */ 644 if (IS_SWAPVP(devvp)) { 645 error = EBUSY; 646 goto out; 647 } 648 } 649 650 /* 651 * check for dev already mounted on 652 */ 653 if (vfsp->vfs_flag & VFS_REMOUNT) { 654 struct tag *ttag; 655 int32_t index, count; 656 struct buf *tpt = 0; 657 caddr_t addr; 658 659 660 /* cannot remount to RDONLY */ 661 if (vfsp->vfs_flag & VFS_RDONLY) { 662 return (EINVAL); 663 } 664 665 if (vfsp->vfs_dev != dev) { 666 return (EINVAL); 667 } 668 669 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 670 devvp = udf_vfsp->udf_devvp; 671 672 /* 673 * fsck may have altered the file system; discard 674 * as much incore data as possible. Don't flush 675 * if this is a rw to rw remount; it's just resetting 676 * the options. 677 */ 678 if (udf_vfsp->udf_flags & UDF_FL_RDONLY) { 679 (void) dnlc_purge_vfsp(vfsp, 0); 680 (void) VOP_PUTPAGE(devvp, (offset_t)0, (uint_t)0, 681 B_INVAL, CRED(), NULL); 682 (void) ud_iflush(vfsp); 683 bflush(dev); 684 binval(dev); 685 } 686 687 /* 688 * We could read UDF1.50 and write UDF1.50 only 689 * disallow mount of any highier version 690 */ 691 if ((udf_vfsp->udf_miread > UDF_150) || 692 (udf_vfsp->udf_miwrite > UDF_150)) { 693 error = EINVAL; 694 goto remountout; 695 } 696 697 /* 698 * read/write to read/write; all done 699 */ 700 if (udf_vfsp->udf_flags & UDF_FL_RW) { 701 goto remountout; 702 } 703 704 /* 705 * Does the media type allow a writable mount 706 */ 707 if (udf_vfsp->udf_mtype != UDF_MT_OW) { 708 error = EINVAL; 709 goto remountout; 710 } 711 712 /* 713 * Read the metadata 714 * and check if it is possible to 715 * mount in rw mode 716 */ 717 tpt = ud_bread(vfsp->vfs_dev, 718 udf_vfsp->udf_iseq_loc << udf_vfsp->udf_l2d_shift, 719 udf_vfsp->udf_iseq_len); 720 if (tpt->b_flags & B_ERROR) { 721 error = EIO; 722 goto remountout; 723 } 724 count = udf_vfsp->udf_iseq_len / DEV_BSIZE; 725 addr = tpt->b_un.b_addr; 726 for (index = 0; index < count; index ++) { 727 ttag = (struct tag *)(addr + index * DEV_BSIZE); 728 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE); 729 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT, 730 udf_vfsp->udf_iseq_loc + 731 (index >> udf_vfsp->udf_l2d_shift), 732 1, desc_len) == 0) { 733 struct log_vol_int_desc *lvid; 734 735 lvid = (struct log_vol_int_desc *)ttag; 736 737 if (SWAP_32(lvid->lvid_int_type) != 738 LOG_VOL_CLOSE_INT) { 739 error = EINVAL; 740 goto remountout; 741 } 742 743 /* 744 * Copy new data to old data 745 */ 746 bcopy(udf_vfsp->udf_iseq->b_un.b_addr, 747 tpt->b_un.b_addr, udf_vfsp->udf_iseq_len); 748 break; 749 } 750 } 751 752 udf_vfsp->udf_flags = UDF_FL_RW; 753 754 mutex_enter(&udf_vfsp->udf_lock); 755 ud_sbwrite(udf_vfsp); 756 mutex_exit(&udf_vfsp->udf_lock); 757 remountout: 758 if (tpt != NULL) { 759 tpt->b_flags = B_AGE | B_STALE; 760 brelse(tpt); 761 } 762 return (error); 763 } 764 765 ASSERT(devvp != 0); 766 /* 767 * Flush back any dirty pages on the block device to 768 * try and keep the buffer cache in sync with the page 769 * cache if someone is trying to use block devices when 770 * they really should be using the raw device. 771 */ 772 (void) VOP_PUTPAGE(common_specvp(devvp), (offset_t)0, 773 (uint32_t)0, B_INVAL, cr, NULL); 774 775 776 /* 777 * Check if the file system 778 * is a valid udfs and fill 779 * the required fields in udf_vfs 780 */ 781 #ifndef __lint 782 _NOTE(NO_COMPETING_THREADS_NOW); 783 #endif 784 785 if ((lbsize = ud_get_lbsize(dev, &avd_loc)) == 0) { 786 error = EINVAL; 787 goto out; 788 } 789 790 udf_vfsp = ud_validate_and_fill_superblock(dev, lbsize, avd_loc); 791 if (udf_vfsp == NULL) { 792 error = EINVAL; 793 goto out; 794 } 795 796 /* 797 * Fill in vfs private data 798 */ 799 vfsp->vfs_fstype = udf_fstype; 800 vfs_make_fsid(&vfsp->vfs_fsid, dev, udf_fstype); 801 vfsp->vfs_data = (caddr_t)udf_vfsp; 802 vfsp->vfs_dev = dev; 803 vfsp->vfs_flag |= VFS_NOTRUNC; 804 udf_vfsp->udf_devvp = devvp; 805 806 udf_vfsp->udf_fsmnt = kmem_zalloc(strlen(name) + 1, KM_SLEEP); 807 (void) strcpy(udf_vfsp->udf_fsmnt, name); 808 809 udf_vfsp->udf_vfs = vfsp; 810 udf_vfsp->udf_rdclustsz = udf_vfsp->udf_wrclustsz = maxphys; 811 812 udf_vfsp->udf_mod = 0; 813 814 815 lvid = udf_vfsp->udf_lvid; 816 if (vfsp->vfs_flag & VFS_RDONLY) { 817 /* 818 * We could read only UDF1.50 819 * disallow mount of any highier version 820 */ 821 if (udf_vfsp->udf_miread > UDF_150) { 822 error = EINVAL; 823 goto out; 824 } 825 udf_vfsp->udf_flags = UDF_FL_RDONLY; 826 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) { 827 udf_vfsp->udf_clean = UDF_CLEAN; 828 } else { 829 /* Do we have a VAT at the end of the recorded media */ 830 map = udf_vfsp->udf_maps; 831 for (i = 0; i < udf_vfsp->udf_nmaps; i++) { 832 if (map->udm_flags & UDM_MAP_VPM) { 833 break; 834 } 835 map++; 836 } 837 if (i == udf_vfsp->udf_nmaps) { 838 error = ENOSPC; 839 goto out; 840 } 841 udf_vfsp->udf_clean = UDF_CLEAN; 842 } 843 } else { 844 /* 845 * We could read UDF1.50 and write UDF1.50 only 846 * disallow mount of any highier version 847 */ 848 if ((udf_vfsp->udf_miread > UDF_150) || 849 (udf_vfsp->udf_miwrite > UDF_150)) { 850 error = EINVAL; 851 goto out; 852 } 853 /* 854 * Check if the media allows 855 * us to mount read/write 856 */ 857 if (udf_vfsp->udf_mtype != UDF_MT_OW) { 858 error = EACCES; 859 goto out; 860 } 861 862 /* 863 * Check if we have VAT on a writable media 864 * we cannot use the media in presence of VAT 865 * Dent RW mount. 866 */ 867 map = udf_vfsp->udf_maps; 868 ASSERT(map != NULL); 869 for (i = 0; i < udf_vfsp->udf_nmaps; i++) { 870 if (map->udm_flags & UDM_MAP_VPM) { 871 error = EACCES; 872 goto out; 873 } 874 map++; 875 } 876 877 /* 878 * Check if the domain Id allows 879 * us to write 880 */ 881 if (udf_vfsp->udf_lvd->lvd_dom_id.reg_ids[2] & 0x3) { 882 error = EACCES; 883 goto out; 884 } 885 udf_vfsp->udf_flags = UDF_FL_RW; 886 887 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) { 888 udf_vfsp->udf_clean = UDF_CLEAN; 889 } else { 890 if (isroot) { 891 udf_vfsp->udf_clean = UDF_DIRTY; 892 } else { 893 error = ENOSPC; 894 goto out; 895 } 896 } 897 } 898 899 mutex_init(&udf_vfsp->udf_lock, NULL, MUTEX_DEFAULT, NULL); 900 901 mutex_init(&udf_vfsp->udf_rename_lck, NULL, MUTEX_DEFAULT, NULL); 902 903 #ifndef __lint 904 _NOTE(COMPETING_THREADS_NOW); 905 #endif 906 if (error = ud_iget(vfsp, udf_vfsp->udf_ricb_prn, 907 udf_vfsp->udf_ricb_loc, &rip, NULL, cr)) { 908 mutex_destroy(&udf_vfsp->udf_lock); 909 goto out; 910 } 911 912 913 /* 914 * Get the root inode and 915 * initialize the root vnode 916 */ 917 rvp = ITOV(rip); 918 mutex_enter(&rvp->v_lock); 919 rvp->v_flag |= VROOT; 920 mutex_exit(&rvp->v_lock); 921 udf_vfsp->udf_root = rvp; 922 923 924 if (why == ROOT_INIT && isroot) 925 rootvp = devvp; 926 927 ud_vfs_add(udf_vfsp); 928 929 if (udf_vfsp->udf_flags == UDF_FL_RW) { 930 udf_vfsp->udf_clean = UDF_DIRTY; 931 ud_update_superblock(vfsp); 932 } 933 934 return (0); 935 936 out: 937 ud_destroy_fsp(udf_vfsp); 938 if (needclose) { 939 (void) VOP_CLOSE(devvp, (vfsp->vfs_flag & VFS_RDONLY) ? 940 FREAD : FREAD|FWRITE, 1, (offset_t)0, cr, NULL); 941 bflush(dev); 942 binval(dev); 943 } 944 VN_RELE(devvp); 945 946 return (error); 947 } 948 949 950 static struct udf_vfs * 951 ud_validate_and_fill_superblock(dev_t dev, int32_t bsize, uint32_t avd_loc) 952 { 953 int32_t error, count, index, shift; 954 uint32_t dummy, vds_loc; 955 caddr_t addr; 956 daddr_t blkno, lblkno; 957 struct buf *secbp, *bp; 958 struct tag *ttag; 959 struct anch_vol_desc_ptr *avdp; 960 struct file_set_desc *fsd; 961 struct udf_vfs *udf_vfsp = NULL; 962 struct pmap_hdr *hdr; 963 struct pmap_typ1 *typ1; 964 struct pmap_typ2 *typ2; 965 struct ud_map *map; 966 int32_t desc_len; 967 968 ud_printf("ud_validate_and_fill_superblock\n"); 969 970 if (bsize < DEV_BSIZE) { 971 return (NULL); 972 } 973 shift = 0; 974 while ((bsize >> shift) > DEV_BSIZE) { 975 shift++; 976 } 977 978 /* 979 * Read Anchor Volume Descriptor 980 * Verify it and get the location of 981 * Main Volume Descriptor Sequence 982 */ 983 secbp = ud_bread(dev, avd_loc << shift, ANCHOR_VOL_DESC_LEN); 984 if ((error = geterror(secbp)) != 0) { 985 cmn_err(CE_NOTE, 986 "udfs : Could not read Anchor Volume Desc %x", error); 987 brelse(secbp); 988 return (NULL); 989 } 990 avdp = (struct anch_vol_desc_ptr *)secbp->b_un.b_addr; 991 if (ud_verify_tag_and_desc(&avdp->avd_tag, UD_ANCH_VOL_DESC, 992 avd_loc, 1, ANCHOR_VOL_DESC_LEN) != 0) { 993 brelse(secbp); 994 return (NULL); 995 } 996 udf_vfsp = (struct udf_vfs *) 997 kmem_zalloc(sizeof (struct udf_vfs), KM_SLEEP); 998 udf_vfsp->udf_mvds_loc = SWAP_32(avdp->avd_main_vdse.ext_loc); 999 udf_vfsp->udf_mvds_len = SWAP_32(avdp->avd_main_vdse.ext_len); 1000 udf_vfsp->udf_rvds_loc = SWAP_32(avdp->avd_res_vdse.ext_loc); 1001 udf_vfsp->udf_rvds_len = SWAP_32(avdp->avd_res_vdse.ext_len); 1002 secbp->b_flags = B_AGE | B_STALE; 1003 brelse(secbp); 1004 1005 /* 1006 * Read Main Volume Descriptor Sequence 1007 * and process it 1008 */ 1009 vds_loc = udf_vfsp->udf_mvds_loc; 1010 secbp = ud_bread(dev, vds_loc << shift, 1011 udf_vfsp->udf_mvds_len); 1012 if ((error = geterror(secbp)) != 0) { 1013 brelse(secbp); 1014 cmn_err(CE_NOTE, 1015 "udfs : Could not read Main Volume Desc %x", error); 1016 1017 vds_loc = udf_vfsp->udf_rvds_loc; 1018 secbp = ud_bread(dev, vds_loc << shift, 1019 udf_vfsp->udf_rvds_len); 1020 if ((error = geterror(secbp)) != 0) { 1021 brelse(secbp); 1022 cmn_err(CE_NOTE, 1023 "udfs : Could not read Res Volume Desc %x", error); 1024 return (NULL); 1025 } 1026 } 1027 1028 udf_vfsp->udf_vds = ngeteblk(udf_vfsp->udf_mvds_len); 1029 bp = udf_vfsp->udf_vds; 1030 bp->b_edev = dev; 1031 bp->b_dev = cmpdev(dev); 1032 bp->b_blkno = vds_loc << shift; 1033 bp->b_bcount = udf_vfsp->udf_mvds_len; 1034 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_mvds_len); 1035 secbp->b_flags |= B_STALE | B_AGE; 1036 brelse(secbp); 1037 1038 1039 count = udf_vfsp->udf_mvds_len / DEV_BSIZE; 1040 addr = bp->b_un.b_addr; 1041 for (index = 0; index < count; index ++) { 1042 ttag = (struct tag *)(addr + index * DEV_BSIZE); 1043 desc_len = udf_vfsp->udf_mvds_len - (index * DEV_BSIZE); 1044 if (ud_verify_tag_and_desc(ttag, UD_PRI_VOL_DESC, 1045 vds_loc + (index >> shift), 1046 1, desc_len) == 0) { 1047 if (udf_vfsp->udf_pvd == NULL) { 1048 udf_vfsp->udf_pvd = 1049 (struct pri_vol_desc *)ttag; 1050 } else { 1051 struct pri_vol_desc *opvd, *npvd; 1052 1053 opvd = udf_vfsp->udf_pvd; 1054 npvd = (struct pri_vol_desc *)ttag; 1055 1056 if ((strncmp(opvd->pvd_vsi, 1057 npvd->pvd_vsi, 128) == 0) && 1058 (strncmp(opvd->pvd_vol_id, 1059 npvd->pvd_vol_id, 32) == 0) && 1060 (strncmp((caddr_t)&opvd->pvd_desc_cs, 1061 (caddr_t)&npvd->pvd_desc_cs, 1062 sizeof (charspec_t)) == 0)) { 1063 1064 if (SWAP_32(opvd->pvd_vdsn) < 1065 SWAP_32(npvd->pvd_vdsn)) { 1066 udf_vfsp->udf_pvd = npvd; 1067 } 1068 } else { 1069 goto out; 1070 } 1071 } 1072 } else if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_DESC, 1073 vds_loc + (index >> shift), 1074 1, desc_len) == 0) { 1075 struct log_vol_desc *lvd; 1076 1077 lvd = (struct log_vol_desc *)ttag; 1078 if (strncmp(lvd->lvd_dom_id.reg_id, 1079 UDF_DOMAIN_NAME, 23) != 0) { 1080 printf("Domain ID in lvd is not valid\n"); 1081 goto out; 1082 } 1083 1084 if (udf_vfsp->udf_lvd == NULL) { 1085 udf_vfsp->udf_lvd = lvd; 1086 } else { 1087 struct log_vol_desc *olvd; 1088 1089 olvd = udf_vfsp->udf_lvd; 1090 if ((strncmp((caddr_t)&olvd->lvd_desc_cs, 1091 (caddr_t)&lvd->lvd_desc_cs, 1092 sizeof (charspec_t)) == 0) && 1093 (strncmp(olvd->lvd_lvid, 1094 lvd->lvd_lvid, 128) == 0)) { 1095 if (SWAP_32(olvd->lvd_vdsn) < 1096 SWAP_32(lvd->lvd_vdsn)) { 1097 udf_vfsp->udf_lvd = lvd; 1098 } 1099 } else { 1100 goto out; 1101 } 1102 } 1103 } else if (ud_verify_tag_and_desc(ttag, UD_PART_DESC, 1104 vds_loc + (index >> shift), 1105 1, desc_len) == 0) { 1106 int32_t i; 1107 struct phdr_desc *hdr; 1108 struct part_desc *pdesc; 1109 struct ud_part *pnew, *pold, *part; 1110 1111 pdesc = (struct part_desc *)ttag; 1112 pold = udf_vfsp->udf_parts; 1113 for (i = 0; i < udf_vfsp->udf_npart; i++) { 1114 if (pold->udp_number == 1115 SWAP_16(pdesc->pd_pnum)) { 1116 if (SWAP_32(pdesc->pd_vdsn) > 1117 pold->udp_seqno) { 1118 pold->udp_seqno = 1119 SWAP_32(pdesc->pd_vdsn); 1120 pold->udp_access = 1121 SWAP_32(pdesc->pd_acc_type); 1122 pold->udp_start = 1123 SWAP_32(pdesc->pd_part_start); 1124 pold->udp_length = 1125 SWAP_32(pdesc->pd_part_length); 1126 } 1127 goto loop_end; 1128 } 1129 pold ++; 1130 } 1131 pold = udf_vfsp->udf_parts; 1132 udf_vfsp->udf_npart++; 1133 pnew = kmem_zalloc(udf_vfsp->udf_npart * 1134 sizeof (struct ud_part), KM_SLEEP); 1135 udf_vfsp->udf_parts = pnew; 1136 if (pold) { 1137 bcopy(pold, pnew, 1138 sizeof (struct ud_part) * 1139 (udf_vfsp->udf_npart - 1)); 1140 kmem_free(pold, 1141 sizeof (struct ud_part) * 1142 (udf_vfsp->udf_npart - 1)); 1143 } 1144 part = pnew + (udf_vfsp->udf_npart - 1); 1145 part->udp_number = SWAP_16(pdesc->pd_pnum); 1146 part->udp_seqno = SWAP_32(pdesc->pd_vdsn); 1147 part->udp_access = SWAP_32(pdesc->pd_acc_type); 1148 part->udp_start = SWAP_32(pdesc->pd_part_start); 1149 part->udp_length = SWAP_32(pdesc->pd_part_length); 1150 part->udp_last_alloc = 0; 1151 1152 /* 1153 * Figure out space bitmaps 1154 * or space tables 1155 */ 1156 hdr = (struct phdr_desc *)pdesc->pd_pc_use; 1157 if (hdr->phdr_ust.sad_ext_len) { 1158 part->udp_flags = UDP_SPACETBLS; 1159 part->udp_unall_loc = 1160 SWAP_32(hdr->phdr_ust.sad_ext_loc); 1161 part->udp_unall_len = 1162 SWAP_32(hdr->phdr_ust.sad_ext_len); 1163 part->udp_freed_loc = 1164 SWAP_32(hdr->phdr_fst.sad_ext_loc); 1165 part->udp_freed_len = 1166 SWAP_32(hdr->phdr_fst.sad_ext_len); 1167 } else { 1168 part->udp_flags = UDP_BITMAPS; 1169 part->udp_unall_loc = 1170 SWAP_32(hdr->phdr_usb.sad_ext_loc); 1171 part->udp_unall_len = 1172 SWAP_32(hdr->phdr_usb.sad_ext_len); 1173 part->udp_freed_loc = 1174 SWAP_32(hdr->phdr_fsb.sad_ext_loc); 1175 part->udp_freed_len = 1176 SWAP_32(hdr->phdr_fsb.sad_ext_len); 1177 } 1178 } else if (ud_verify_tag_and_desc(ttag, UD_TERM_DESC, 1179 vds_loc + (index >> shift), 1180 1, desc_len) == 0) { 1181 1182 break; 1183 } 1184 loop_end: 1185 ; 1186 } 1187 if ((udf_vfsp->udf_pvd == NULL) || 1188 (udf_vfsp->udf_lvd == NULL) || 1189 (udf_vfsp->udf_parts == NULL)) { 1190 goto out; 1191 } 1192 1193 /* 1194 * Process Primary Volume Descriptor 1195 */ 1196 (void) strncpy(udf_vfsp->udf_volid, udf_vfsp->udf_pvd->pvd_vol_id, 32); 1197 udf_vfsp->udf_volid[31] = '\0'; 1198 udf_vfsp->udf_tsno = SWAP_16(udf_vfsp->udf_pvd->pvd_tag.tag_sno); 1199 1200 /* 1201 * Process Logical Volume Descriptor 1202 */ 1203 udf_vfsp->udf_lbsize = 1204 SWAP_32(udf_vfsp->udf_lvd->lvd_log_bsize); 1205 udf_vfsp->udf_lbmask = udf_vfsp->udf_lbsize - 1; 1206 udf_vfsp->udf_l2d_shift = shift; 1207 udf_vfsp->udf_l2b_shift = shift + DEV_BSHIFT; 1208 1209 /* 1210 * Check if the media is in 1211 * proper domain. 1212 */ 1213 if (strcmp(udf_vfsp->udf_lvd->lvd_dom_id.reg_id, 1214 UDF_DOMAIN_NAME) != 0) { 1215 goto out; 1216 } 1217 1218 /* 1219 * AVDS offset does not match with the lbsize 1220 * in the lvd 1221 */ 1222 if (udf_vfsp->udf_lbsize != bsize) { 1223 goto out; 1224 } 1225 1226 udf_vfsp->udf_iseq_loc = 1227 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_loc); 1228 udf_vfsp->udf_iseq_len = 1229 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_len); 1230 1231 udf_vfsp->udf_fsd_prn = 1232 SWAP_16(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_prn); 1233 udf_vfsp->udf_fsd_loc = 1234 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_loc); 1235 udf_vfsp->udf_fsd_len = 1236 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_len); 1237 1238 1239 /* 1240 * process paritions 1241 */ 1242 udf_vfsp->udf_mtype = udf_vfsp->udf_parts[0].udp_access; 1243 for (index = 0; index < udf_vfsp->udf_npart; index ++) { 1244 if (udf_vfsp->udf_parts[index].udp_access < 1245 udf_vfsp->udf_mtype) { 1246 udf_vfsp->udf_mtype = 1247 udf_vfsp->udf_parts[index].udp_access; 1248 } 1249 } 1250 if ((udf_vfsp->udf_mtype < UDF_MT_RO) || 1251 (udf_vfsp->udf_mtype > UDF_MT_OW)) { 1252 udf_vfsp->udf_mtype = UDF_MT_RO; 1253 } 1254 1255 udf_vfsp->udf_nmaps = 0; 1256 hdr = (struct pmap_hdr *)udf_vfsp->udf_lvd->lvd_pmaps; 1257 count = SWAP_32(udf_vfsp->udf_lvd->lvd_num_pmaps); 1258 for (index = 0; index < count; index++) { 1259 1260 if ((hdr->maph_type == MAP_TYPE1) && 1261 (hdr->maph_length == MAP_TYPE1_LEN)) { 1262 typ1 = (struct pmap_typ1 *)hdr; 1263 1264 map = udf_vfsp->udf_maps; 1265 udf_vfsp->udf_maps = 1266 kmem_zalloc(sizeof (struct ud_map) * 1267 (udf_vfsp->udf_nmaps + 1), 1268 KM_SLEEP); 1269 if (map != NULL) { 1270 bcopy(map, udf_vfsp->udf_maps, 1271 sizeof (struct ud_map) * udf_vfsp->udf_nmaps); 1272 kmem_free(map, 1273 sizeof (struct ud_map) * udf_vfsp->udf_nmaps); 1274 } 1275 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps; 1276 map->udm_flags = UDM_MAP_NORM; 1277 map->udm_vsn = SWAP_16(typ1->map1_vsn); 1278 map->udm_pn = SWAP_16(typ1->map1_pn); 1279 udf_vfsp->udf_nmaps ++; 1280 } else if ((hdr->maph_type == MAP_TYPE2) && 1281 (hdr->maph_length == MAP_TYPE2_LEN)) { 1282 typ2 = (struct pmap_typ2 *)hdr; 1283 1284 if (strncmp(typ2->map2_pti.reg_id, 1285 UDF_VIRT_PART, 23) == 0) { 1286 /* 1287 * Add this to the normal 1288 * partition table so that 1289 * we donot 1290 */ 1291 map = udf_vfsp->udf_maps; 1292 udf_vfsp->udf_maps = 1293 kmem_zalloc(sizeof (struct ud_map) * 1294 (udf_vfsp->udf_nmaps + 1), 1295 KM_SLEEP); 1296 if (map != NULL) { 1297 bcopy(map, udf_vfsp->udf_maps, 1298 sizeof (struct ud_map) * 1299 udf_vfsp->udf_nmaps); 1300 kmem_free(map, 1301 sizeof (struct ud_map) * 1302 udf_vfsp->udf_nmaps); 1303 } 1304 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps; 1305 map->udm_flags = UDM_MAP_VPM; 1306 map->udm_vsn = SWAP_16(typ2->map2_vsn); 1307 map->udm_pn = SWAP_16(typ2->map2_pn); 1308 udf_vfsp->udf_nmaps ++; 1309 if (error = ud_get_last_block(dev, &lblkno)) { 1310 goto out; 1311 } 1312 if (error = ud_val_get_vat(udf_vfsp, dev, 1313 lblkno, map)) { 1314 goto out; 1315 } 1316 } else if (strncmp(typ2->map2_pti.reg_id, 1317 UDF_SPAR_PART, 23) == 0) { 1318 1319 if (SWAP_16(typ2->map2_pl) != 32) { 1320 printf( 1321 "Packet Length is not valid %x\n", 1322 SWAP_16(typ2->map2_pl)); 1323 goto out; 1324 } 1325 if ((typ2->map2_nst < 1) || 1326 (typ2->map2_nst > 4)) { 1327 goto out; 1328 } 1329 map = udf_vfsp->udf_maps; 1330 udf_vfsp->udf_maps = 1331 kmem_zalloc(sizeof (struct ud_map) * 1332 (udf_vfsp->udf_nmaps + 1), 1333 KM_SLEEP); 1334 if (map != NULL) { 1335 bcopy(map, udf_vfsp->udf_maps, 1336 sizeof (struct ud_map) * 1337 udf_vfsp->udf_nmaps); 1338 kmem_free(map, 1339 sizeof (struct ud_map) * 1340 udf_vfsp->udf_nmaps); 1341 } 1342 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps; 1343 map->udm_flags = UDM_MAP_SPM; 1344 map->udm_vsn = SWAP_16(typ2->map2_vsn); 1345 map->udm_pn = SWAP_16(typ2->map2_pn); 1346 1347 udf_vfsp->udf_nmaps ++; 1348 1349 if (error = ud_read_sparing_tbls(udf_vfsp, 1350 dev, map, typ2)) { 1351 goto out; 1352 } 1353 } else { 1354 /* 1355 * Unknown type of partition 1356 * Bail out 1357 */ 1358 goto out; 1359 } 1360 } else { 1361 /* 1362 * Unknown type of partition 1363 * Bail out 1364 */ 1365 goto out; 1366 } 1367 hdr = (struct pmap_hdr *)(((uint8_t *)hdr) + hdr->maph_length); 1368 } 1369 1370 1371 /* 1372 * Read Logical Volume Integrity Sequence 1373 * and process it 1374 */ 1375 secbp = ud_bread(dev, udf_vfsp->udf_iseq_loc << shift, 1376 udf_vfsp->udf_iseq_len); 1377 if ((error = geterror(secbp)) != 0) { 1378 cmn_err(CE_NOTE, 1379 "udfs : Could not read Logical Volume Integrity Sequence %x", 1380 error); 1381 brelse(secbp); 1382 goto out; 1383 } 1384 udf_vfsp->udf_iseq = ngeteblk(udf_vfsp->udf_iseq_len); 1385 bp = udf_vfsp->udf_iseq; 1386 bp->b_edev = dev; 1387 bp->b_dev = cmpdev(dev); 1388 bp->b_blkno = udf_vfsp->udf_iseq_loc << shift; 1389 bp->b_bcount = udf_vfsp->udf_iseq_len; 1390 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_iseq_len); 1391 secbp->b_flags |= B_STALE | B_AGE; 1392 brelse(secbp); 1393 1394 count = udf_vfsp->udf_iseq_len / DEV_BSIZE; 1395 addr = bp->b_un.b_addr; 1396 for (index = 0; index < count; index ++) { 1397 ttag = (struct tag *)(addr + index * DEV_BSIZE); 1398 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE); 1399 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT, 1400 udf_vfsp->udf_iseq_loc + (index >> shift), 1401 1, desc_len) == 0) { 1402 1403 struct log_vol_int_desc *lvid; 1404 1405 lvid = (struct log_vol_int_desc *)ttag; 1406 udf_vfsp->udf_lvid = lvid; 1407 1408 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) { 1409 udf_vfsp->udf_clean = UDF_CLEAN; 1410 } else { 1411 udf_vfsp->udf_clean = UDF_DIRTY; 1412 } 1413 1414 /* 1415 * update superblock with the metadata 1416 */ 1417 ud_convert_to_superblock(udf_vfsp, lvid); 1418 break; 1419 } 1420 } 1421 1422 if (udf_vfsp->udf_lvid == NULL) { 1423 goto out; 1424 } 1425 1426 if ((blkno = ud_xlate_to_daddr(udf_vfsp, 1427 udf_vfsp->udf_fsd_prn, udf_vfsp->udf_fsd_loc, 1428 1, &dummy)) == 0) { 1429 goto out; 1430 } 1431 secbp = ud_bread(dev, blkno << shift, udf_vfsp->udf_fsd_len); 1432 if ((error = geterror(secbp)) != 0) { 1433 cmn_err(CE_NOTE, 1434 "udfs : Could not read File Set Descriptor %x", error); 1435 brelse(secbp); 1436 goto out; 1437 } 1438 fsd = (struct file_set_desc *)secbp->b_un.b_addr; 1439 if (ud_verify_tag_and_desc(&fsd->fsd_tag, UD_FILE_SET_DESC, 1440 udf_vfsp->udf_fsd_loc, 1441 1, udf_vfsp->udf_fsd_len) != 0) { 1442 secbp->b_flags = B_AGE | B_STALE; 1443 brelse(secbp); 1444 goto out; 1445 } 1446 udf_vfsp->udf_ricb_prn = SWAP_16(fsd->fsd_root_icb.lad_ext_prn); 1447 udf_vfsp->udf_ricb_loc = SWAP_32(fsd->fsd_root_icb.lad_ext_loc); 1448 udf_vfsp->udf_ricb_len = SWAP_32(fsd->fsd_root_icb.lad_ext_len); 1449 secbp->b_flags = B_AGE | B_STALE; 1450 brelse(secbp); 1451 udf_vfsp->udf_root_blkno = ud_xlate_to_daddr(udf_vfsp, 1452 udf_vfsp->udf_ricb_prn, udf_vfsp->udf_ricb_loc, 1453 1, &dummy); 1454 1455 return (udf_vfsp); 1456 out: 1457 ud_destroy_fsp(udf_vfsp); 1458 1459 return (NULL); 1460 } 1461 1462 /* 1463 * release/free resources from one ud_map; map data was zalloc'd in 1464 * ud_validate_and_fill_superblock() and fields may later point to 1465 * valid data 1466 */ 1467 static void 1468 ud_free_map(struct ud_map *map) 1469 { 1470 uint32_t n; 1471 1472 if (map->udm_flags & UDM_MAP_VPM) { 1473 if (map->udm_count) { 1474 kmem_free(map->udm_count, 1475 map->udm_nent * sizeof (*map->udm_count)); 1476 map->udm_count = NULL; 1477 } 1478 if (map->udm_bp) { 1479 for (n = 0; n < map->udm_nent; n++) { 1480 if (map->udm_bp[n]) 1481 brelse(map->udm_bp[n]); 1482 } 1483 kmem_free(map->udm_bp, 1484 map->udm_nent * sizeof (*map->udm_bp)); 1485 map->udm_bp = NULL; 1486 } 1487 if (map->udm_addr) { 1488 kmem_free(map->udm_addr, 1489 map->udm_nent * sizeof (*map->udm_addr)); 1490 map->udm_addr = NULL; 1491 } 1492 } 1493 if (map->udm_flags & UDM_MAP_SPM) { 1494 for (n = 0; n < MAX_SPM; n++) { 1495 if (map->udm_sbp[n]) { 1496 brelse(map->udm_sbp[n]); 1497 map->udm_sbp[n] = NULL; 1498 map->udm_spaddr[n] = NULL; 1499 } 1500 } 1501 } 1502 } 1503 1504 void 1505 ud_destroy_fsp(struct udf_vfs *udf_vfsp) 1506 { 1507 int32_t i; 1508 1509 ud_printf("ud_destroy_fsp\n"); 1510 if (udf_vfsp == NULL) 1511 return; 1512 1513 if (udf_vfsp->udf_maps) { 1514 for (i = 0; i < udf_vfsp->udf_nmaps; i++) 1515 ud_free_map(&udf_vfsp->udf_maps[i]); 1516 1517 kmem_free(udf_vfsp->udf_maps, 1518 udf_vfsp->udf_nmaps * sizeof (*udf_vfsp->udf_maps)); 1519 } 1520 1521 if (udf_vfsp->udf_parts) { 1522 kmem_free(udf_vfsp->udf_parts, 1523 udf_vfsp->udf_npart * sizeof (*udf_vfsp->udf_parts)); 1524 } 1525 if (udf_vfsp->udf_iseq) { 1526 udf_vfsp->udf_iseq->b_flags |= (B_STALE|B_AGE); 1527 brelse(udf_vfsp->udf_iseq); 1528 } 1529 if (udf_vfsp->udf_vds) { 1530 udf_vfsp->udf_vds->b_flags |= (B_STALE|B_AGE); 1531 brelse(udf_vfsp->udf_vds); 1532 } 1533 if (udf_vfsp->udf_vfs) 1534 ud_vfs_remove(udf_vfsp); 1535 if (udf_vfsp->udf_fsmnt) { 1536 kmem_free(udf_vfsp->udf_fsmnt, 1537 strlen(udf_vfsp->udf_fsmnt) + 1); 1538 } 1539 kmem_free(udf_vfsp, sizeof (*udf_vfsp)); 1540 } 1541 1542 void 1543 ud_convert_to_superblock(struct udf_vfs *udf_vfsp, 1544 struct log_vol_int_desc *lvid) 1545 { 1546 int32_t i, c; 1547 uint32_t *temp; 1548 struct ud_part *ud_part; 1549 struct lvid_iu *iu; 1550 1551 udf_vfsp->udf_maxuniq = SWAP_64(lvid->lvid_uniqid); 1552 temp = lvid->lvid_fst; 1553 c = SWAP_32(lvid->lvid_npart); 1554 ud_part = udf_vfsp->udf_parts; 1555 for (i = 0; i < c; i++) { 1556 if (i >= udf_vfsp->udf_npart) { 1557 continue; 1558 } 1559 ud_part->udp_nfree = SWAP_32(temp[i]); 1560 ud_part->udp_nblocks = SWAP_32(temp[c + i]); 1561 udf_vfsp->udf_freeblks += SWAP_32(temp[i]); 1562 udf_vfsp->udf_totalblks += SWAP_32(temp[c + i]); 1563 ud_part++; 1564 } 1565 1566 iu = (struct lvid_iu *)(temp + c * 2); 1567 udf_vfsp->udf_nfiles = SWAP_32(iu->lvidiu_nfiles); 1568 udf_vfsp->udf_ndirs = SWAP_32(iu->lvidiu_ndirs); 1569 udf_vfsp->udf_miread = BCD2HEX_16(SWAP_16(iu->lvidiu_mread)); 1570 udf_vfsp->udf_miwrite = BCD2HEX_16(SWAP_16(iu->lvidiu_mwrite)); 1571 udf_vfsp->udf_mawrite = BCD2HEX_16(SWAP_16(iu->lvidiu_maxwr)); 1572 } 1573 1574 void 1575 ud_update_superblock(struct vfs *vfsp) 1576 { 1577 struct udf_vfs *udf_vfsp; 1578 1579 ud_printf("ud_update_superblock\n"); 1580 1581 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 1582 1583 mutex_enter(&udf_vfsp->udf_lock); 1584 ud_sbwrite(udf_vfsp); 1585 mutex_exit(&udf_vfsp->udf_lock); 1586 } 1587 1588 1589 #include <sys/dkio.h> 1590 #include <sys/cdio.h> 1591 #include <sys/vtoc.h> 1592 1593 /* 1594 * This part of the code is known 1595 * to work with only sparc. It needs 1596 * to be evluated before using it with x86 1597 */ 1598 int32_t 1599 ud_get_last_block(dev_t dev, daddr_t *blkno) 1600 { 1601 struct vtoc vtoc; 1602 struct dk_cinfo dki_info; 1603 int32_t rval, error; 1604 1605 if ((error = cdev_ioctl(dev, DKIOCGVTOC, (intptr_t)&vtoc, 1606 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) { 1607 cmn_err(CE_NOTE, "Could not get the vtoc information"); 1608 return (error); 1609 } 1610 1611 if (vtoc.v_sanity != VTOC_SANE) { 1612 return (EINVAL); 1613 } 1614 if ((error = cdev_ioctl(dev, DKIOCINFO, (intptr_t)&dki_info, 1615 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) { 1616 cmn_err(CE_NOTE, "Could not get the slice information"); 1617 return (error); 1618 } 1619 1620 if (dki_info.dki_partition > V_NUMPAR) { 1621 return (EINVAL); 1622 } 1623 1624 1625 *blkno = vtoc.v_part[dki_info.dki_partition].p_size; 1626 1627 return (0); 1628 } 1629 1630 /* Search sequentially N - 2, N, N - 152, N - 150 for vat icb */ 1631 /* 1632 * int32_t ud_sub_blks[] = {2, 0, 152, 150}; 1633 */ 1634 int32_t ud_sub_blks[] = {152, 150, 2, 0}; 1635 int32_t ud_sub_count = 4; 1636 1637 /* 1638 * Validate the VAT ICB 1639 */ 1640 static int32_t 1641 ud_val_get_vat(struct udf_vfs *udf_vfsp, dev_t dev, 1642 daddr_t blkno, struct ud_map *udm) 1643 { 1644 struct buf *secbp; 1645 struct file_entry *fe; 1646 int32_t end_loc, i, j, ad_type; 1647 struct short_ad *sad; 1648 struct long_ad *lad; 1649 uint32_t count, blk; 1650 struct ud_part *ud_part; 1651 int err = 0; 1652 1653 end_loc = (blkno >> udf_vfsp->udf_l2d_shift) - 1; 1654 1655 for (i = 0; i < ud_sub_count; i++) { 1656 udm->udm_vat_icb = end_loc - ud_sub_blks[i]; 1657 1658 secbp = ud_bread(dev, 1659 udm->udm_vat_icb << udf_vfsp->udf_l2d_shift, 1660 udf_vfsp->udf_lbsize); 1661 ASSERT(secbp->b_un.b_addr); 1662 1663 fe = (struct file_entry *)secbp->b_un.b_addr; 1664 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY, 0, 1665 0, 0) == 0) { 1666 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY, 1667 SWAP_32(fe->fe_tag.tag_loc), 1668 1, udf_vfsp->udf_lbsize) == 0) { 1669 if (fe->fe_icb_tag.itag_ftype == 0) { 1670 break; 1671 } 1672 } 1673 } 1674 secbp->b_flags |= B_AGE | B_STALE; 1675 brelse(secbp); 1676 } 1677 if (i == ud_sub_count) { 1678 return (EINVAL); 1679 } 1680 1681 ad_type = SWAP_16(fe->fe_icb_tag.itag_flags) & 0x3; 1682 if (ad_type == ICB_FLAG_ONE_AD) { 1683 udm->udm_nent = 1; 1684 } else if (ad_type == ICB_FLAG_SHORT_AD) { 1685 udm->udm_nent = 1686 SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad); 1687 } else if (ad_type == ICB_FLAG_LONG_AD) { 1688 udm->udm_nent = 1689 SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad); 1690 } else { 1691 err = EINVAL; 1692 goto end; 1693 } 1694 1695 udm->udm_count = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_count), 1696 KM_SLEEP); 1697 udm->udm_bp = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_bp), 1698 KM_SLEEP); 1699 udm->udm_addr = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_addr), 1700 KM_SLEEP); 1701 1702 if (ad_type == ICB_FLAG_ONE_AD) { 1703 udm->udm_count[0] = (SWAP_64(fe->fe_info_len) - 36) / 1704 sizeof (uint32_t); 1705 udm->udm_bp[0] = secbp; 1706 udm->udm_addr[0] = (uint32_t *) 1707 &fe->fe_spec[SWAP_32(fe->fe_len_ear)]; 1708 return (0); 1709 } 1710 for (i = 0; i < udm->udm_nent; i++) { 1711 if (ad_type == ICB_FLAG_SHORT_AD) { 1712 sad = (struct short_ad *) 1713 (fe->fe_spec + SWAP_32(fe->fe_len_ear)); 1714 sad += i; 1715 count = SWAP_32(sad->sad_ext_len); 1716 blk = SWAP_32(sad->sad_ext_loc); 1717 } else { 1718 lad = (struct long_ad *) 1719 (fe->fe_spec + SWAP_32(fe->fe_len_ear)); 1720 lad += i; 1721 count = SWAP_32(lad->lad_ext_len); 1722 blk = SWAP_32(lad->lad_ext_loc); 1723 ASSERT(SWAP_16(lad->lad_ext_prn) == udm->udm_pn); 1724 } 1725 if ((count & 0x3FFFFFFF) == 0) { 1726 break; 1727 } 1728 if (i < udm->udm_nent - 1) { 1729 udm->udm_count[i] = count / 4; 1730 } else { 1731 udm->udm_count[i] = (count - 36) / 4; 1732 } 1733 ud_part = udf_vfsp->udf_parts; 1734 for (j = 0; j < udf_vfsp->udf_npart; j++) { 1735 if (udm->udm_pn == ud_part->udp_number) { 1736 blk = ud_part->udp_start + blk; 1737 break; 1738 } 1739 } 1740 if (j == udf_vfsp->udf_npart) { 1741 err = EINVAL; 1742 break; 1743 } 1744 1745 count = (count + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1746 udm->udm_bp[i] = ud_bread(dev, 1747 blk << udf_vfsp->udf_l2d_shift, count); 1748 if ((udm->udm_bp[i]->b_error != 0) || 1749 (udm->udm_bp[i]->b_resid)) { 1750 err = EINVAL; 1751 break; 1752 } 1753 udm->udm_addr[i] = (uint32_t *)udm->udm_bp[i]->b_un.b_addr; 1754 } 1755 1756 end: 1757 if (err) 1758 ud_free_map(udm); 1759 secbp->b_flags |= B_AGE | B_STALE; 1760 brelse(secbp); 1761 return (err); 1762 } 1763 1764 int32_t 1765 ud_read_sparing_tbls(struct udf_vfs *udf_vfsp, 1766 dev_t dev, struct ud_map *map, struct pmap_typ2 *typ2) 1767 { 1768 int32_t index, valid = 0; 1769 uint32_t sz; 1770 struct buf *bp; 1771 struct stbl *stbl; 1772 1773 map->udm_plen = SWAP_16(typ2->map2_pl); 1774 map->udm_nspm = typ2->map2_nst; 1775 map->udm_spsz = SWAP_32(typ2->map2_sest); 1776 sz = (map->udm_spsz + udf_vfsp->udf_lbmask) & ~udf_vfsp->udf_lbmask; 1777 if (sz == 0) { 1778 return (0); 1779 } 1780 1781 for (index = 0; index < map->udm_nspm; index++) { 1782 map->udm_loc[index] = SWAP_32(typ2->map2_st[index]); 1783 1784 bp = ud_bread(dev, 1785 map->udm_loc[index] << udf_vfsp->udf_l2d_shift, sz); 1786 if ((bp->b_error != 0) || (bp->b_resid)) { 1787 brelse(bp); 1788 continue; 1789 } 1790 stbl = (struct stbl *)bp->b_un.b_addr; 1791 if (strncmp(stbl->stbl_si.reg_id, UDF_SPAR_TBL, 23) != 0) { 1792 printf("Sparing Identifier does not match\n"); 1793 bp->b_flags |= B_AGE | B_STALE; 1794 brelse(bp); 1795 continue; 1796 } 1797 map->udm_sbp[index] = bp; 1798 map->udm_spaddr[index] = bp->b_un.b_addr; 1799 #ifdef UNDEF 1800 { 1801 struct stbl_entry *te; 1802 int32_t i, tbl_len; 1803 1804 te = (struct stbl_entry *)&stbl->stbl_entry; 1805 tbl_len = SWAP_16(stbl->stbl_len); 1806 1807 printf("%x %x\n", tbl_len, SWAP_32(stbl->stbl_seqno)); 1808 printf("%x %x\n", bp->b_un.b_addr, te); 1809 1810 for (i = 0; i < tbl_len; i++) { 1811 printf("%x %x\n", SWAP_32(te->sent_ol), SWAP_32(te->sent_ml)); 1812 te ++; 1813 } 1814 } 1815 #endif 1816 valid ++; 1817 } 1818 1819 if (valid) { 1820 return (0); 1821 } 1822 return (EINVAL); 1823 } 1824 1825 uint32_t 1826 ud_get_lbsize(dev_t dev, uint32_t *loc) 1827 { 1828 int32_t bsize, shift, index, end_index; 1829 daddr_t last_block; 1830 uint32_t avd_loc; 1831 struct buf *bp; 1832 struct anch_vol_desc_ptr *avdp; 1833 uint32_t session_offset = 0; 1834 int32_t rval; 1835 1836 if (ud_get_last_block(dev, &last_block) != 0) { 1837 end_index = 1; 1838 } else { 1839 end_index = 3; 1840 } 1841 1842 if (cdev_ioctl(dev, CDROMREADOFFSET, (intptr_t)&session_offset, 1843 FKIOCTL|FREAD|FNATIVE, CRED(), &rval) != 0) { 1844 session_offset = 0; 1845 } 1846 1847 for (index = 0; index < end_index; index++) { 1848 1849 for (bsize = DEV_BSIZE, shift = 0; 1850 bsize <= MAXBSIZE; bsize <<= 1, shift++) { 1851 1852 if (index == 0) { 1853 avd_loc = 256; 1854 if (bsize <= 2048) { 1855 avd_loc += 1856 session_offset * 2048 / bsize; 1857 } else { 1858 avd_loc += 1859 session_offset / (bsize / 2048); 1860 } 1861 } else if (index == 1) { 1862 avd_loc = last_block - (1 << shift); 1863 } else { 1864 avd_loc = last_block - (256 << shift); 1865 } 1866 1867 bp = ud_bread(dev, avd_loc << shift, 1868 ANCHOR_VOL_DESC_LEN); 1869 if (geterror(bp) != 0) { 1870 brelse(bp); 1871 continue; 1872 } 1873 1874 /* 1875 * Verify if we have avdp here 1876 */ 1877 avdp = (struct anch_vol_desc_ptr *)bp->b_un.b_addr; 1878 if (ud_verify_tag_and_desc(&avdp->avd_tag, 1879 UD_ANCH_VOL_DESC, avd_loc, 1880 1, ANCHOR_VOL_DESC_LEN) != 0) { 1881 bp->b_flags |= B_AGE | B_STALE; 1882 brelse(bp); 1883 continue; 1884 } 1885 bp->b_flags |= B_AGE | B_STALE; 1886 brelse(bp); 1887 *loc = avd_loc; 1888 return (bsize); 1889 } 1890 } 1891 1892 /* 1893 * Did not find AVD at all the locations 1894 */ 1895 return (0); 1896 } 1897 1898 static int 1899 udfinit(int fstype, char *name) 1900 { 1901 static const fs_operation_def_t udf_vfsops_template[] = { 1902 VFSNAME_MOUNT, { .vfs_mount = udf_mount }, 1903 VFSNAME_UNMOUNT, { .vfs_unmount = udf_unmount }, 1904 VFSNAME_ROOT, { .vfs_root = udf_root }, 1905 VFSNAME_STATVFS, { .vfs_statvfs = udf_statvfs }, 1906 VFSNAME_SYNC, { .vfs_sync = udf_sync }, 1907 VFSNAME_VGET, { .vfs_vget = udf_vget }, 1908 VFSNAME_MOUNTROOT, { .vfs_mountroot = udf_mountroot }, 1909 NULL, NULL 1910 }; 1911 extern struct vnodeops *udf_vnodeops; 1912 extern const fs_operation_def_t udf_vnodeops_template[]; 1913 int error; 1914 1915 ud_printf("udfinit\n"); 1916 1917 error = vfs_setfsops(fstype, udf_vfsops_template, NULL); 1918 if (error != 0) { 1919 cmn_err(CE_WARN, "udfinit: bad vfs ops template"); 1920 return (error); 1921 } 1922 1923 error = vn_make_ops(name, udf_vnodeops_template, &udf_vnodeops); 1924 if (error != 0) { 1925 (void) vfs_freevfsops_by_type(fstype); 1926 cmn_err(CE_WARN, "udfinit: bad vnode ops template"); 1927 return (error); 1928 } 1929 1930 udf_fstype = fstype; 1931 1932 ud_init_inodes(); 1933 1934 return (0); 1935 } 1936