1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/t_lock.h> 30 #include <sys/param.h> 31 #include <sys/time.h> 32 #include <sys/systm.h> 33 #include <sys/sysmacros.h> 34 #include <sys/resource.h> 35 #include <sys/signal.h> 36 #include <sys/cred.h> 37 #include <sys/user.h> 38 #include <sys/buf.h> 39 #include <sys/vfs.h> 40 #include <sys/vfs_opreg.h> 41 #include <sys/stat.h> 42 #include <sys/vnode.h> 43 #include <sys/mode.h> 44 #include <sys/proc.h> 45 #include <sys/disp.h> 46 #include <sys/file.h> 47 #include <sys/fcntl.h> 48 #include <sys/flock.h> 49 #include <sys/kmem.h> 50 #include <sys/uio.h> 51 #include <sys/dnlc.h> 52 #include <sys/conf.h> 53 #include <sys/errno.h> 54 #include <sys/mman.h> 55 #include <sys/fbuf.h> 56 #include <sys/pathname.h> 57 #include <sys/debug.h> 58 #include <sys/vmsystm.h> 59 #include <sys/cmn_err.h> 60 #include <sys/dirent.h> 61 #include <sys/errno.h> 62 #include <sys/modctl.h> 63 #include <sys/statvfs.h> 64 #include <sys/mount.h> 65 #include <sys/sunddi.h> 66 #include <sys/bootconf.h> 67 #include <sys/policy.h> 68 69 #include <vm/hat.h> 70 #include <vm/page.h> 71 #include <vm/pvn.h> 72 #include <vm/as.h> 73 #include <vm/seg.h> 74 #include <vm/seg_map.h> 75 #include <vm/seg_kmem.h> 76 #include <vm/seg_vn.h> 77 #include <vm/rm.h> 78 #include <vm/page.h> 79 #include <sys/swap.h> 80 #include <sys/mntent.h> 81 82 83 #include <fs/fs_subr.h> 84 85 86 #include <sys/fs/udf_volume.h> 87 #include <sys/fs/udf_inode.h> 88 89 90 extern struct vnode *common_specvp(struct vnode *vp); 91 92 extern kmutex_t ud_sync_busy; 93 static int32_t ud_mountfs(struct vfs *, 94 enum whymountroot, dev_t, char *, struct cred *, int32_t); 95 static struct udf_vfs *ud_validate_and_fill_superblock(dev_t, 96 int32_t, uint32_t); 97 void ud_destroy_fsp(struct udf_vfs *); 98 void ud_convert_to_superblock(struct udf_vfs *, 99 struct log_vol_int_desc *); 100 void ud_update_superblock(struct vfs *); 101 int32_t ud_get_last_block(dev_t, daddr_t *); 102 static int32_t ud_val_get_vat(struct udf_vfs *, 103 dev_t, daddr_t, struct ud_map *); 104 int32_t ud_read_sparing_tbls(struct udf_vfs *, 105 dev_t, struct ud_map *, struct pmap_typ2 *); 106 uint32_t ud_get_lbsize(dev_t, uint32_t *); 107 108 static int32_t udf_mount(struct vfs *, 109 struct vnode *, struct mounta *, struct cred *); 110 static int32_t udf_unmount(struct vfs *, int, struct cred *); 111 static int32_t udf_root(struct vfs *, struct vnode **); 112 static int32_t udf_statvfs(struct vfs *, struct statvfs64 *); 113 static int32_t udf_sync(struct vfs *, int16_t, struct cred *); 114 static int32_t udf_vget(struct vfs *, struct vnode **, struct fid *); 115 static int32_t udf_mountroot(struct vfs *vfsp, enum whymountroot); 116 117 static int udfinit(int, char *); 118 119 static mntopts_t udfs_mntopts; 120 121 static vfsdef_t vfw = { 122 VFSDEF_VERSION, 123 "udfs", 124 udfinit, 125 VSW_HASPROTO|VSW_CANREMOUNT|VSW_STATS|VSW_CANLOFI, 126 &udfs_mntopts 127 }; 128 129 static mntopts_t udfs_mntopts = { 130 0, 131 NULL 132 }; 133 134 /* 135 * Module linkage information for the kernel. 136 */ 137 extern struct mod_ops mod_fsops; 138 139 static struct modlfs modlfs = { 140 &mod_fsops, "filesystem for UDFS", &vfw 141 }; 142 143 static struct modlinkage modlinkage = { 144 MODREV_1, (void *)&modlfs, NULL 145 }; 146 147 char _depends_on[] = "fs/specfs"; 148 149 int32_t udf_fstype = -1; 150 151 int 152 _init() 153 { 154 return (mod_install(&modlinkage)); 155 } 156 157 int 158 _fini() 159 { 160 return (EBUSY); 161 } 162 163 int 164 _info(struct modinfo *modinfop) 165 { 166 return (mod_info(&modlinkage, modinfop)); 167 } 168 169 170 /* -------------------- vfs routines -------------------- */ 171 172 /* 173 * XXX - this appears only to be used by the VM code to handle the case where 174 * UNIX is running off the mini-root. That probably wants to be done 175 * differently. 176 */ 177 struct vnode *rootvp; 178 #ifndef __lint 179 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", rootvp)) 180 #endif 181 static int32_t 182 udf_mount(struct vfs *vfsp, struct vnode *mvp, 183 struct mounta *uap, struct cred *cr) 184 { 185 dev_t dev; 186 struct vnode *lvp = NULL; 187 struct vnode *svp = NULL; 188 struct pathname dpn; 189 int32_t error; 190 enum whymountroot why; 191 int oflag, aflag; 192 193 ud_printf("udf_mount\n"); 194 195 if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0) { 196 return (error); 197 } 198 199 if (mvp->v_type != VDIR) { 200 return (ENOTDIR); 201 } 202 203 mutex_enter(&mvp->v_lock); 204 if ((uap->flags & MS_REMOUNT) == 0 && 205 (uap->flags & MS_OVERLAY) == 0 && 206 (mvp->v_count != 1 || (mvp->v_flag & VROOT))) { 207 mutex_exit(&mvp->v_lock); 208 return (EBUSY); 209 } 210 mutex_exit(&mvp->v_lock); 211 212 if (error = pn_get(uap->dir, UIO_USERSPACE, &dpn)) { 213 return (error); 214 } 215 216 /* 217 * Resolve path name of the file being mounted. 218 */ 219 if (error = lookupname(uap->spec, UIO_USERSPACE, FOLLOW, NULLVPP, 220 &svp)) { 221 pn_free(&dpn); 222 return (error); 223 } 224 225 error = vfs_get_lofi(vfsp, &lvp); 226 227 if (error > 0) { 228 if (error == ENOENT) 229 error = ENODEV; 230 goto out; 231 } else if (error == 0) { 232 dev = lvp->v_rdev; 233 } else { 234 dev = svp->v_rdev; 235 236 if (svp->v_type != VBLK) { 237 error = ENOTBLK; 238 goto out; 239 } 240 } 241 242 /* 243 * Ensure that this device isn't already mounted, 244 * unless this is a REMOUNT request 245 */ 246 if (vfs_devmounting(dev, vfsp)) { 247 error = EBUSY; 248 goto out; 249 } 250 if (vfs_devismounted(dev)) { 251 if (uap->flags & MS_REMOUNT) { 252 why = ROOT_REMOUNT; 253 } else { 254 error = EBUSY; 255 goto out; 256 } 257 } else { 258 why = ROOT_INIT; 259 } 260 if (getmajor(dev) >= devcnt) { 261 error = ENXIO; 262 goto out; 263 } 264 265 /* 266 * If the device is a tape, mount it read only 267 */ 268 if (devopsp[getmajor(dev)]->devo_cb_ops->cb_flag & D_TAPE) { 269 vfsp->vfs_flag |= VFS_RDONLY; 270 } 271 272 if (uap->flags & MS_RDONLY) { 273 vfsp->vfs_flag |= VFS_RDONLY; 274 } 275 276 /* 277 * Set mount options. 278 */ 279 if (uap->flags & MS_RDONLY) { 280 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0); 281 } 282 if (uap->flags & MS_NOSUID) { 283 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0); 284 } 285 286 /* 287 * Verify that the caller can open the device special file as 288 * required. It is not until this moment that we know whether 289 * we're mounting "ro" or not. 290 */ 291 if ((vfsp->vfs_flag & VFS_RDONLY) != 0) { 292 oflag = FREAD; 293 aflag = VREAD; 294 } else { 295 oflag = FREAD | FWRITE; 296 aflag = VREAD | VWRITE; 297 } 298 299 if (lvp == NULL && 300 (error = secpolicy_spec_open(cr, svp, oflag)) != 0) 301 goto out; 302 303 if ((error = VOP_ACCESS(svp, aflag, 0, cr, NULL)) != 0) 304 goto out; 305 306 /* 307 * Mount the filesystem. 308 */ 309 error = ud_mountfs(vfsp, why, dev, dpn.pn_path, cr, 0); 310 out: 311 VN_RELE(svp); 312 if (lvp != NULL) 313 VN_RELE(lvp); 314 pn_free(&dpn); 315 return (error); 316 } 317 318 319 320 /* 321 * unmount the file system pointed 322 * by vfsp 323 */ 324 /* ARGSUSED */ 325 static int32_t 326 udf_unmount(struct vfs *vfsp, int fflag, struct cred *cr) 327 { 328 struct udf_vfs *udf_vfsp; 329 struct vnode *bvp, *rvp; 330 struct ud_inode *rip; 331 int32_t flag; 332 333 ud_printf("udf_unmount\n"); 334 335 if (secpolicy_fs_unmount(cr, vfsp) != 0) { 336 return (EPERM); 337 } 338 339 /* 340 * forced unmount is not supported by this file system 341 * and thus, ENOTSUP, is being returned. 342 */ 343 if (fflag & MS_FORCE) 344 return (ENOTSUP); 345 346 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 347 flag = !(udf_vfsp->udf_flags & UDF_FL_RDONLY); 348 bvp = udf_vfsp->udf_devvp; 349 350 rvp = udf_vfsp->udf_root; 351 ASSERT(rvp != NULL); 352 rip = VTOI(rvp); 353 354 (void) ud_release_cache(udf_vfsp); 355 356 357 /* Flush all inodes except root */ 358 if (ud_iflush(vfsp) < 0) { 359 return (EBUSY); 360 } 361 362 rw_enter(&rip->i_contents, RW_WRITER); 363 (void) ud_syncip(rip, B_INVAL, I_SYNC); 364 rw_exit(&rip->i_contents); 365 366 mutex_enter(&ud_sync_busy); 367 if ((udf_vfsp->udf_flags & UDF_FL_RDONLY) == 0) { 368 bflush(vfsp->vfs_dev); 369 mutex_enter(&udf_vfsp->udf_lock); 370 udf_vfsp->udf_clean = UDF_CLEAN; 371 mutex_exit(&udf_vfsp->udf_lock); 372 ud_update_superblock(vfsp); 373 } 374 mutex_exit(&ud_sync_busy); 375 376 mutex_destroy(&udf_vfsp->udf_lock); 377 mutex_destroy(&udf_vfsp->udf_rename_lck); 378 379 ud_delcache(rip); 380 ITIMES(rip); 381 VN_RELE(rvp); 382 383 ud_destroy_fsp(udf_vfsp); 384 385 (void) VOP_PUTPAGE(bvp, (offset_t)0, (uint32_t)0, B_INVAL, cr, NULL); 386 (void) VOP_CLOSE(bvp, flag, 1, (offset_t)0, cr, NULL); 387 388 (void) bfinval(vfsp->vfs_dev, 1); 389 VN_RELE(bvp); 390 391 392 return (0); 393 } 394 395 396 /* 397 * Get the root vp for the 398 * file system 399 */ 400 static int32_t 401 udf_root(struct vfs *vfsp, struct vnode **vpp) 402 { 403 struct udf_vfs *udf_vfsp; 404 struct vnode *vp; 405 406 ud_printf("udf_root\n"); 407 408 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 409 410 ASSERT(udf_vfsp != NULL); 411 ASSERT(udf_vfsp->udf_root != NULL); 412 413 vp = udf_vfsp->udf_root; 414 VN_HOLD(vp); 415 *vpp = vp; 416 return (0); 417 } 418 419 420 /* 421 * Get file system statistics. 422 */ 423 static int32_t 424 udf_statvfs(struct vfs *vfsp, struct statvfs64 *sp) 425 { 426 struct udf_vfs *udf_vfsp; 427 struct ud_part *parts; 428 dev32_t d32; 429 int32_t index; 430 431 ud_printf("udf_statvfs\n"); 432 433 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 434 (void) bzero(sp, sizeof (struct statvfs64)); 435 436 mutex_enter(&udf_vfsp->udf_lock); 437 sp->f_bsize = udf_vfsp->udf_lbsize; 438 sp->f_frsize = udf_vfsp->udf_lbsize; 439 sp->f_blocks = 0; 440 sp->f_bfree = 0; 441 parts = udf_vfsp->udf_parts; 442 for (index = 0; index < udf_vfsp->udf_npart; index++) { 443 sp->f_blocks += parts->udp_nblocks; 444 sp->f_bfree += parts->udp_nfree; 445 parts++; 446 } 447 sp->f_bavail = sp->f_bfree; 448 449 /* 450 * Since there are no real inodes allocated 451 * we will approximate 452 * each new file will occupy : 453 * 38(over head each dent) + MAXNAMLEN / 2 + inode_size(==block size) 454 */ 455 sp->f_ffree = sp->f_favail = 456 (sp->f_bavail * sp->f_bsize) / (146 + sp->f_bsize); 457 458 /* 459 * The total number of inodes is 460 * the sum of files + directories + free inodes 461 */ 462 sp->f_files = sp->f_ffree + udf_vfsp->udf_nfiles + udf_vfsp->udf_ndirs; 463 (void) cmpldev(&d32, vfsp->vfs_dev); 464 sp->f_fsid = d32; 465 (void) strcpy(sp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name); 466 sp->f_flag = vf_to_stf(vfsp->vfs_flag); 467 sp->f_namemax = MAXNAMLEN; 468 (void) strcpy(sp->f_fstr, udf_vfsp->udf_volid); 469 470 mutex_exit(&udf_vfsp->udf_lock); 471 472 return (0); 473 } 474 475 476 /* 477 * Flush any pending I/O to file system vfsp. 478 * The ud_update() routine will only flush *all* udf files. 479 */ 480 /*ARGSUSED*/ 481 /* ARGSUSED */ 482 static int32_t 483 udf_sync(struct vfs *vfsp, int16_t flag, struct cred *cr) 484 { 485 ud_printf("udf_sync\n"); 486 487 ud_update(flag); 488 return (0); 489 } 490 491 492 493 /* ARGSUSED */ 494 static int32_t 495 udf_vget(struct vfs *vfsp, 496 struct vnode **vpp, struct fid *fidp) 497 { 498 int32_t error = 0; 499 struct udf_fid *udfid; 500 struct udf_vfs *udf_vfsp; 501 struct ud_inode *ip; 502 503 ud_printf("udf_vget\n"); 504 505 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 506 if (udf_vfsp == NULL) { 507 *vpp = NULL; 508 return (0); 509 } 510 511 udfid = (struct udf_fid *)fidp; 512 if ((error = ud_iget(vfsp, udfid->udfid_prn, 513 udfid->udfid_icb_lbn, &ip, NULL, CRED())) != 0) { 514 *vpp = NULL; 515 return (error); 516 } 517 518 rw_enter(&ip->i_contents, RW_READER); 519 if ((udfid->udfid_uinq_lo != (ip->i_uniqid & 0xffffffff)) || 520 (udfid->udfid_prn != ip->i_icb_prn)) { 521 rw_exit(&ip->i_contents); 522 VN_RELE(ITOV(ip)); 523 *vpp = NULL; 524 return (EINVAL); 525 } 526 rw_exit(&ip->i_contents); 527 528 *vpp = ITOV(ip); 529 return (0); 530 } 531 532 533 /* 534 * Mount root file system. 535 * "why" is ROOT_INIT on initial call, ROOT_REMOUNT if called to 536 * remount the root file system, and ROOT_UNMOUNT if called to 537 * unmount the root (e.g., as part of a system shutdown). 538 * 539 * XXX - this may be partially machine-dependent; it, along with the VFS_SWAPVP 540 * operation, goes along with auto-configuration. A mechanism should be 541 * provided by which machine-INdependent code in the kernel can say "get me the 542 * right root file system" and "get me the right initial swap area", and have 543 * that done in what may well be a machine-dependent fashion. 544 * Unfortunately, it is also file-system-type dependent (NFS gets it via 545 * bootparams calls, UFS gets it from various and sundry machine-dependent 546 * mechanisms, as SPECFS does for swap). 547 */ 548 /* ARGSUSED */ 549 static int32_t 550 udf_mountroot(struct vfs *vfsp, enum whymountroot why) 551 { 552 dev_t rootdev; 553 static int32_t udf_rootdone = 0; 554 struct vnode *vp = NULL; 555 int32_t ovflags, error; 556 ud_printf("udf_mountroot\n"); 557 558 if (why == ROOT_INIT) { 559 if (udf_rootdone++) { 560 return (EBUSY); 561 } 562 rootdev = getrootdev(); 563 if (rootdev == (dev_t)NODEV) { 564 return (ENODEV); 565 } 566 vfsp->vfs_dev = rootdev; 567 vfsp->vfs_flag |= VFS_RDONLY; 568 } else if (why == ROOT_REMOUNT) { 569 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp; 570 (void) dnlc_purge_vfsp(vfsp, 0); 571 vp = common_specvp(vp); 572 (void) VOP_PUTPAGE(vp, (offset_t)0, 573 (uint32_t)0, B_INVAL, CRED(), NULL); 574 binval(vfsp->vfs_dev); 575 576 ovflags = vfsp->vfs_flag; 577 vfsp->vfs_flag &= ~VFS_RDONLY; 578 vfsp->vfs_flag |= VFS_REMOUNT; 579 rootdev = vfsp->vfs_dev; 580 } else if (why == ROOT_UNMOUNT) { 581 ud_update(0); 582 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp; 583 (void) VOP_CLOSE(vp, FREAD|FWRITE, 1, 584 (offset_t)0, CRED(), NULL); 585 return (0); 586 } 587 588 if ((error = vfs_lock(vfsp)) != 0) { 589 return (error); 590 } 591 592 error = ud_mountfs(vfsp, why, rootdev, "/", CRED(), 1); 593 if (error) { 594 vfs_unlock(vfsp); 595 if (why == ROOT_REMOUNT) { 596 vfsp->vfs_flag = ovflags; 597 } 598 if (rootvp) { 599 VN_RELE(rootvp); 600 rootvp = (struct vnode *)0; 601 } 602 return (error); 603 } 604 605 if (why == ROOT_INIT) { 606 vfs_add((struct vnode *)0, vfsp, 607 (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0); 608 } 609 vfs_unlock(vfsp); 610 return (0); 611 } 612 613 614 /* ------------------------- local routines ------------------------- */ 615 616 617 static int32_t 618 ud_mountfs(struct vfs *vfsp, 619 enum whymountroot why, dev_t dev, char *name, 620 struct cred *cr, int32_t isroot) 621 { 622 struct vnode *devvp = NULL; 623 int32_t error = 0; 624 int32_t needclose = 0; 625 struct udf_vfs *udf_vfsp = NULL; 626 struct log_vol_int_desc *lvid; 627 struct ud_inode *rip = NULL; 628 struct vnode *rvp = NULL; 629 int32_t i, lbsize; 630 uint32_t avd_loc; 631 struct ud_map *map; 632 int32_t desc_len; 633 634 ud_printf("ud_mountfs\n"); 635 636 if (why == ROOT_INIT) { 637 /* 638 * Open the device. 639 */ 640 devvp = makespecvp(dev, VBLK); 641 642 /* 643 * Open block device mounted on. 644 * When bio is fixed for vnodes this can all be vnode 645 * operations. 646 */ 647 error = VOP_OPEN(&devvp, 648 (vfsp->vfs_flag & VFS_RDONLY) ? FREAD : FREAD|FWRITE, 649 cr, NULL); 650 if (error) { 651 goto out; 652 } 653 needclose = 1; 654 655 /* 656 * Refuse to go any further if this 657 * device is being used for swapping. 658 */ 659 if (IS_SWAPVP(devvp)) { 660 error = EBUSY; 661 goto out; 662 } 663 } 664 665 /* 666 * check for dev already mounted on 667 */ 668 if (vfsp->vfs_flag & VFS_REMOUNT) { 669 struct tag *ttag; 670 int32_t index, count; 671 struct buf *tpt = 0; 672 caddr_t addr; 673 674 675 /* cannot remount to RDONLY */ 676 if (vfsp->vfs_flag & VFS_RDONLY) { 677 return (EINVAL); 678 } 679 680 if (vfsp->vfs_dev != dev) { 681 return (EINVAL); 682 } 683 684 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 685 devvp = udf_vfsp->udf_devvp; 686 687 /* 688 * fsck may have altered the file system; discard 689 * as much incore data as possible. Don't flush 690 * if this is a rw to rw remount; it's just resetting 691 * the options. 692 */ 693 if (udf_vfsp->udf_flags & UDF_FL_RDONLY) { 694 (void) dnlc_purge_vfsp(vfsp, 0); 695 (void) VOP_PUTPAGE(devvp, (offset_t)0, (uint_t)0, 696 B_INVAL, CRED(), NULL); 697 (void) ud_iflush(vfsp); 698 bflush(dev); 699 binval(dev); 700 } 701 702 /* 703 * We could read UDF1.50 and write UDF1.50 only 704 * disallow mount of any highier version 705 */ 706 if ((udf_vfsp->udf_miread > UDF_150) || 707 (udf_vfsp->udf_miwrite > UDF_150)) { 708 error = EINVAL; 709 goto remountout; 710 } 711 712 /* 713 * read/write to read/write; all done 714 */ 715 if (udf_vfsp->udf_flags & UDF_FL_RW) { 716 goto remountout; 717 } 718 719 /* 720 * Does the media type allow a writable mount 721 */ 722 if (udf_vfsp->udf_mtype != UDF_MT_OW) { 723 error = EINVAL; 724 goto remountout; 725 } 726 727 /* 728 * Read the metadata 729 * and check if it is possible to 730 * mount in rw mode 731 */ 732 tpt = ud_bread(vfsp->vfs_dev, 733 udf_vfsp->udf_iseq_loc << udf_vfsp->udf_l2d_shift, 734 udf_vfsp->udf_iseq_len); 735 if (tpt->b_flags & B_ERROR) { 736 error = EIO; 737 goto remountout; 738 } 739 count = udf_vfsp->udf_iseq_len / DEV_BSIZE; 740 addr = tpt->b_un.b_addr; 741 for (index = 0; index < count; index ++) { 742 ttag = (struct tag *)(addr + index * DEV_BSIZE); 743 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE); 744 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT, 745 udf_vfsp->udf_iseq_loc + 746 (index >> udf_vfsp->udf_l2d_shift), 747 1, desc_len) == 0) { 748 struct log_vol_int_desc *lvid; 749 750 lvid = (struct log_vol_int_desc *)ttag; 751 752 if (SWAP_32(lvid->lvid_int_type) != 753 LOG_VOL_CLOSE_INT) { 754 error = EINVAL; 755 goto remountout; 756 } 757 758 /* 759 * Copy new data to old data 760 */ 761 bcopy(udf_vfsp->udf_iseq->b_un.b_addr, 762 tpt->b_un.b_addr, udf_vfsp->udf_iseq_len); 763 break; 764 } 765 } 766 767 udf_vfsp->udf_flags = UDF_FL_RW; 768 769 mutex_enter(&udf_vfsp->udf_lock); 770 ud_sbwrite(udf_vfsp); 771 mutex_exit(&udf_vfsp->udf_lock); 772 remountout: 773 if (tpt != NULL) { 774 tpt->b_flags = B_AGE | B_STALE; 775 brelse(tpt); 776 } 777 return (error); 778 } 779 780 ASSERT(devvp != 0); 781 /* 782 * Flush back any dirty pages on the block device to 783 * try and keep the buffer cache in sync with the page 784 * cache if someone is trying to use block devices when 785 * they really should be using the raw device. 786 */ 787 (void) VOP_PUTPAGE(common_specvp(devvp), (offset_t)0, 788 (uint32_t)0, B_INVAL, cr, NULL); 789 790 791 /* 792 * Check if the file system 793 * is a valid udfs and fill 794 * the required fields in udf_vfs 795 */ 796 #ifndef __lint 797 _NOTE(NO_COMPETING_THREADS_NOW); 798 #endif 799 800 if ((lbsize = ud_get_lbsize(dev, &avd_loc)) == 0) { 801 error = EINVAL; 802 goto out; 803 } 804 805 udf_vfsp = ud_validate_and_fill_superblock(dev, lbsize, avd_loc); 806 if (udf_vfsp == NULL) { 807 error = EINVAL; 808 goto out; 809 } 810 811 /* 812 * Fill in vfs private data 813 */ 814 vfsp->vfs_fstype = udf_fstype; 815 vfs_make_fsid(&vfsp->vfs_fsid, dev, udf_fstype); 816 vfsp->vfs_data = (caddr_t)udf_vfsp; 817 vfsp->vfs_dev = dev; 818 vfsp->vfs_flag |= VFS_NOTRUNC; 819 udf_vfsp->udf_devvp = devvp; 820 821 udf_vfsp->udf_fsmnt = kmem_zalloc(strlen(name) + 1, KM_SLEEP); 822 (void) strcpy(udf_vfsp->udf_fsmnt, name); 823 824 udf_vfsp->udf_vfs = vfsp; 825 udf_vfsp->udf_rdclustsz = udf_vfsp->udf_wrclustsz = maxphys; 826 827 udf_vfsp->udf_mod = 0; 828 829 830 lvid = udf_vfsp->udf_lvid; 831 if (vfsp->vfs_flag & VFS_RDONLY) { 832 /* 833 * We could read only UDF1.50 834 * disallow mount of any highier version 835 */ 836 if (udf_vfsp->udf_miread > UDF_150) { 837 error = EINVAL; 838 goto out; 839 } 840 udf_vfsp->udf_flags = UDF_FL_RDONLY; 841 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) { 842 udf_vfsp->udf_clean = UDF_CLEAN; 843 } else { 844 /* Do we have a VAT at the end of the recorded media */ 845 map = udf_vfsp->udf_maps; 846 for (i = 0; i < udf_vfsp->udf_nmaps; i++) { 847 if (map->udm_flags & UDM_MAP_VPM) { 848 break; 849 } 850 map++; 851 } 852 if (i == udf_vfsp->udf_nmaps) { 853 error = ENOSPC; 854 goto out; 855 } 856 udf_vfsp->udf_clean = UDF_CLEAN; 857 } 858 } else { 859 /* 860 * We could read UDF1.50 and write UDF1.50 only 861 * disallow mount of any highier version 862 */ 863 if ((udf_vfsp->udf_miread > UDF_150) || 864 (udf_vfsp->udf_miwrite > UDF_150)) { 865 error = EINVAL; 866 goto out; 867 } 868 /* 869 * Check if the media allows 870 * us to mount read/write 871 */ 872 if (udf_vfsp->udf_mtype != UDF_MT_OW) { 873 error = EACCES; 874 goto out; 875 } 876 877 /* 878 * Check if we have VAT on a writable media 879 * we cannot use the media in presence of VAT 880 * Dent RW mount. 881 */ 882 map = udf_vfsp->udf_maps; 883 ASSERT(map != NULL); 884 for (i = 0; i < udf_vfsp->udf_nmaps; i++) { 885 if (map->udm_flags & UDM_MAP_VPM) { 886 error = EACCES; 887 goto out; 888 } 889 map++; 890 } 891 892 /* 893 * Check if the domain Id allows 894 * us to write 895 */ 896 if (udf_vfsp->udf_lvd->lvd_dom_id.reg_ids[2] & 0x3) { 897 error = EACCES; 898 goto out; 899 } 900 udf_vfsp->udf_flags = UDF_FL_RW; 901 902 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) { 903 udf_vfsp->udf_clean = UDF_CLEAN; 904 } else { 905 if (isroot) { 906 udf_vfsp->udf_clean = UDF_DIRTY; 907 } else { 908 error = ENOSPC; 909 goto out; 910 } 911 } 912 } 913 914 mutex_init(&udf_vfsp->udf_lock, NULL, MUTEX_DEFAULT, NULL); 915 916 mutex_init(&udf_vfsp->udf_rename_lck, NULL, MUTEX_DEFAULT, NULL); 917 918 #ifndef __lint 919 _NOTE(COMPETING_THREADS_NOW); 920 #endif 921 if (error = ud_iget(vfsp, udf_vfsp->udf_ricb_prn, 922 udf_vfsp->udf_ricb_loc, &rip, NULL, cr)) { 923 mutex_destroy(&udf_vfsp->udf_lock); 924 goto out; 925 } 926 927 928 /* 929 * Get the root inode and 930 * initialize the root vnode 931 */ 932 rvp = ITOV(rip); 933 mutex_enter(&rvp->v_lock); 934 rvp->v_flag |= VROOT; 935 mutex_exit(&rvp->v_lock); 936 udf_vfsp->udf_root = rvp; 937 938 939 if (why == ROOT_INIT && isroot) 940 rootvp = devvp; 941 942 ud_vfs_add(udf_vfsp); 943 944 if (udf_vfsp->udf_flags == UDF_FL_RW) { 945 udf_vfsp->udf_clean = UDF_DIRTY; 946 ud_update_superblock(vfsp); 947 } 948 949 return (0); 950 951 out: 952 ud_destroy_fsp(udf_vfsp); 953 if (needclose) { 954 (void) VOP_CLOSE(devvp, (vfsp->vfs_flag & VFS_RDONLY) ? 955 FREAD : FREAD|FWRITE, 1, (offset_t)0, cr, NULL); 956 bflush(dev); 957 binval(dev); 958 } 959 VN_RELE(devvp); 960 961 return (error); 962 } 963 964 965 static struct udf_vfs * 966 ud_validate_and_fill_superblock(dev_t dev, int32_t bsize, uint32_t avd_loc) 967 { 968 int32_t error, count, index, shift; 969 uint32_t dummy, vds_loc; 970 caddr_t addr; 971 daddr_t blkno, lblkno; 972 struct buf *secbp, *bp; 973 struct tag *ttag; 974 struct anch_vol_desc_ptr *avdp; 975 struct file_set_desc *fsd; 976 struct udf_vfs *udf_vfsp = NULL; 977 struct pmap_hdr *hdr; 978 struct pmap_typ1 *typ1; 979 struct pmap_typ2 *typ2; 980 struct ud_map *map; 981 int32_t desc_len; 982 983 ud_printf("ud_validate_and_fill_superblock\n"); 984 985 if (bsize < DEV_BSIZE) { 986 return (NULL); 987 } 988 shift = 0; 989 while ((bsize >> shift) > DEV_BSIZE) { 990 shift++; 991 } 992 993 /* 994 * Read Anchor Volume Descriptor 995 * Verify it and get the location of 996 * Main Volume Descriptor Sequence 997 */ 998 secbp = ud_bread(dev, avd_loc << shift, ANCHOR_VOL_DESC_LEN); 999 if ((error = geterror(secbp)) != 0) { 1000 cmn_err(CE_NOTE, "udfs : Could not read Anchor Volume Desc %x", 1001 error); 1002 brelse(secbp); 1003 return (NULL); 1004 } 1005 avdp = (struct anch_vol_desc_ptr *)secbp->b_un.b_addr; 1006 if (ud_verify_tag_and_desc(&avdp->avd_tag, UD_ANCH_VOL_DESC, 1007 avd_loc, 1, ANCHOR_VOL_DESC_LEN) != 0) { 1008 brelse(secbp); 1009 return (NULL); 1010 } 1011 udf_vfsp = (struct udf_vfs *) 1012 kmem_zalloc(sizeof (struct udf_vfs), KM_SLEEP); 1013 udf_vfsp->udf_mvds_loc = SWAP_32(avdp->avd_main_vdse.ext_loc); 1014 udf_vfsp->udf_mvds_len = SWAP_32(avdp->avd_main_vdse.ext_len); 1015 udf_vfsp->udf_rvds_loc = SWAP_32(avdp->avd_res_vdse.ext_loc); 1016 udf_vfsp->udf_rvds_len = SWAP_32(avdp->avd_res_vdse.ext_len); 1017 secbp->b_flags = B_AGE | B_STALE; 1018 brelse(secbp); 1019 1020 /* 1021 * Read Main Volume Descriptor Sequence 1022 * and process it 1023 */ 1024 vds_loc = udf_vfsp->udf_mvds_loc; 1025 secbp = ud_bread(dev, vds_loc << shift, 1026 udf_vfsp->udf_mvds_len); 1027 if ((error = geterror(secbp)) != 0) { 1028 brelse(secbp); 1029 cmn_err(CE_NOTE, "udfs : Could not read Main Volume Desc %x", 1030 error); 1031 1032 vds_loc = udf_vfsp->udf_rvds_loc; 1033 secbp = ud_bread(dev, vds_loc << shift, 1034 udf_vfsp->udf_rvds_len); 1035 if ((error = geterror(secbp)) != 0) { 1036 brelse(secbp); 1037 cmn_err(CE_NOTE, 1038 "udfs : Could not read Res Volume Desc %x", error); 1039 return (NULL); 1040 } 1041 } 1042 1043 udf_vfsp->udf_vds = ngeteblk(udf_vfsp->udf_mvds_len); 1044 bp = udf_vfsp->udf_vds; 1045 bp->b_edev = dev; 1046 bp->b_dev = cmpdev(dev); 1047 bp->b_blkno = vds_loc << shift; 1048 bp->b_bcount = udf_vfsp->udf_mvds_len; 1049 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_mvds_len); 1050 secbp->b_flags |= B_STALE | B_AGE; 1051 brelse(secbp); 1052 1053 1054 count = udf_vfsp->udf_mvds_len / DEV_BSIZE; 1055 addr = bp->b_un.b_addr; 1056 for (index = 0; index < count; index ++) { 1057 ttag = (struct tag *)(addr + index * DEV_BSIZE); 1058 desc_len = udf_vfsp->udf_mvds_len - (index * DEV_BSIZE); 1059 if (ud_verify_tag_and_desc(ttag, UD_PRI_VOL_DESC, 1060 vds_loc + (index >> shift), 1061 1, desc_len) == 0) { 1062 if (udf_vfsp->udf_pvd == NULL) { 1063 udf_vfsp->udf_pvd = 1064 (struct pri_vol_desc *)ttag; 1065 } else { 1066 struct pri_vol_desc *opvd, *npvd; 1067 1068 opvd = udf_vfsp->udf_pvd; 1069 npvd = (struct pri_vol_desc *)ttag; 1070 1071 if ((strncmp(opvd->pvd_vsi, 1072 npvd->pvd_vsi, 128) == 0) && 1073 (strncmp(opvd->pvd_vol_id, 1074 npvd->pvd_vol_id, 32) == 0) && 1075 (strncmp((caddr_t)&opvd->pvd_desc_cs, 1076 (caddr_t)&npvd->pvd_desc_cs, 1077 sizeof (charspec_t)) == 0)) { 1078 1079 if (SWAP_32(opvd->pvd_vdsn) < 1080 SWAP_32(npvd->pvd_vdsn)) { 1081 udf_vfsp->udf_pvd = npvd; 1082 } 1083 } else { 1084 goto out; 1085 } 1086 } 1087 } else if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_DESC, 1088 vds_loc + (index >> shift), 1089 1, desc_len) == 0) { 1090 struct log_vol_desc *lvd; 1091 1092 lvd = (struct log_vol_desc *)ttag; 1093 if (strncmp(lvd->lvd_dom_id.reg_id, 1094 UDF_DOMAIN_NAME, 23) != 0) { 1095 printf("Domain ID in lvd is not valid\n"); 1096 goto out; 1097 } 1098 1099 if (udf_vfsp->udf_lvd == NULL) { 1100 udf_vfsp->udf_lvd = lvd; 1101 } else { 1102 struct log_vol_desc *olvd; 1103 1104 olvd = udf_vfsp->udf_lvd; 1105 if ((strncmp((caddr_t)&olvd->lvd_desc_cs, 1106 (caddr_t)&lvd->lvd_desc_cs, 1107 sizeof (charspec_t)) == 0) && 1108 (strncmp(olvd->lvd_lvid, 1109 lvd->lvd_lvid, 128) == 0)) { 1110 if (SWAP_32(olvd->lvd_vdsn) < 1111 SWAP_32(lvd->lvd_vdsn)) { 1112 udf_vfsp->udf_lvd = lvd; 1113 } 1114 } else { 1115 goto out; 1116 } 1117 } 1118 } else if (ud_verify_tag_and_desc(ttag, UD_PART_DESC, 1119 vds_loc + (index >> shift), 1120 1, desc_len) == 0) { 1121 int32_t i; 1122 struct phdr_desc *hdr; 1123 struct part_desc *pdesc; 1124 struct ud_part *pnew, *pold, *part; 1125 1126 pdesc = (struct part_desc *)ttag; 1127 pold = udf_vfsp->udf_parts; 1128 for (i = 0; i < udf_vfsp->udf_npart; i++) { 1129 if (pold->udp_number != 1130 SWAP_16(pdesc->pd_pnum)) { 1131 pold++; 1132 continue; 1133 } 1134 1135 if (SWAP_32(pdesc->pd_vdsn) > 1136 pold->udp_seqno) { 1137 pold->udp_seqno = 1138 SWAP_32(pdesc->pd_vdsn); 1139 pold->udp_access = 1140 SWAP_32(pdesc->pd_acc_type); 1141 pold->udp_start = 1142 SWAP_32(pdesc->pd_part_start); 1143 pold->udp_length = 1144 SWAP_32(pdesc->pd_part_length); 1145 } 1146 goto loop_end; 1147 } 1148 pold = udf_vfsp->udf_parts; 1149 udf_vfsp->udf_npart++; 1150 pnew = kmem_zalloc(udf_vfsp->udf_npart * 1151 sizeof (struct ud_part), KM_SLEEP); 1152 udf_vfsp->udf_parts = pnew; 1153 if (pold) { 1154 bcopy(pold, pnew, 1155 sizeof (struct ud_part) * 1156 (udf_vfsp->udf_npart - 1)); 1157 kmem_free(pold, 1158 sizeof (struct ud_part) * 1159 (udf_vfsp->udf_npart - 1)); 1160 } 1161 part = pnew + (udf_vfsp->udf_npart - 1); 1162 part->udp_number = SWAP_16(pdesc->pd_pnum); 1163 part->udp_seqno = SWAP_32(pdesc->pd_vdsn); 1164 part->udp_access = SWAP_32(pdesc->pd_acc_type); 1165 part->udp_start = SWAP_32(pdesc->pd_part_start); 1166 part->udp_length = SWAP_32(pdesc->pd_part_length); 1167 part->udp_last_alloc = 0; 1168 1169 /* 1170 * Figure out space bitmaps 1171 * or space tables 1172 */ 1173 hdr = (struct phdr_desc *)pdesc->pd_pc_use; 1174 if (hdr->phdr_ust.sad_ext_len) { 1175 part->udp_flags = UDP_SPACETBLS; 1176 part->udp_unall_loc = 1177 SWAP_32(hdr->phdr_ust.sad_ext_loc); 1178 part->udp_unall_len = 1179 SWAP_32(hdr->phdr_ust.sad_ext_len); 1180 part->udp_freed_loc = 1181 SWAP_32(hdr->phdr_fst.sad_ext_loc); 1182 part->udp_freed_len = 1183 SWAP_32(hdr->phdr_fst.sad_ext_len); 1184 } else { 1185 part->udp_flags = UDP_BITMAPS; 1186 part->udp_unall_loc = 1187 SWAP_32(hdr->phdr_usb.sad_ext_loc); 1188 part->udp_unall_len = 1189 SWAP_32(hdr->phdr_usb.sad_ext_len); 1190 part->udp_freed_loc = 1191 SWAP_32(hdr->phdr_fsb.sad_ext_loc); 1192 part->udp_freed_len = 1193 SWAP_32(hdr->phdr_fsb.sad_ext_len); 1194 } 1195 } else if (ud_verify_tag_and_desc(ttag, UD_TERM_DESC, 1196 vds_loc + (index >> shift), 1197 1, desc_len) == 0) { 1198 1199 break; 1200 } 1201 loop_end: 1202 ; 1203 } 1204 if ((udf_vfsp->udf_pvd == NULL) || 1205 (udf_vfsp->udf_lvd == NULL) || 1206 (udf_vfsp->udf_parts == NULL)) { 1207 goto out; 1208 } 1209 1210 /* 1211 * Process Primary Volume Descriptor 1212 */ 1213 (void) strncpy(udf_vfsp->udf_volid, udf_vfsp->udf_pvd->pvd_vol_id, 32); 1214 udf_vfsp->udf_volid[31] = '\0'; 1215 udf_vfsp->udf_tsno = SWAP_16(udf_vfsp->udf_pvd->pvd_tag.tag_sno); 1216 1217 /* 1218 * Process Logical Volume Descriptor 1219 */ 1220 udf_vfsp->udf_lbsize = 1221 SWAP_32(udf_vfsp->udf_lvd->lvd_log_bsize); 1222 udf_vfsp->udf_lbmask = udf_vfsp->udf_lbsize - 1; 1223 udf_vfsp->udf_l2d_shift = shift; 1224 udf_vfsp->udf_l2b_shift = shift + DEV_BSHIFT; 1225 1226 /* 1227 * Check if the media is in 1228 * proper domain. 1229 */ 1230 if (strcmp(udf_vfsp->udf_lvd->lvd_dom_id.reg_id, 1231 UDF_DOMAIN_NAME) != 0) { 1232 goto out; 1233 } 1234 1235 /* 1236 * AVDS offset does not match with the lbsize 1237 * in the lvd 1238 */ 1239 if (udf_vfsp->udf_lbsize != bsize) { 1240 goto out; 1241 } 1242 1243 udf_vfsp->udf_iseq_loc = 1244 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_loc); 1245 udf_vfsp->udf_iseq_len = 1246 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_len); 1247 1248 udf_vfsp->udf_fsd_prn = 1249 SWAP_16(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_prn); 1250 udf_vfsp->udf_fsd_loc = 1251 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_loc); 1252 udf_vfsp->udf_fsd_len = 1253 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_len); 1254 1255 1256 /* 1257 * process paritions 1258 */ 1259 udf_vfsp->udf_mtype = udf_vfsp->udf_parts[0].udp_access; 1260 for (index = 0; index < udf_vfsp->udf_npart; index ++) { 1261 if (udf_vfsp->udf_parts[index].udp_access < 1262 udf_vfsp->udf_mtype) { 1263 udf_vfsp->udf_mtype = 1264 udf_vfsp->udf_parts[index].udp_access; 1265 } 1266 } 1267 if ((udf_vfsp->udf_mtype < UDF_MT_RO) || 1268 (udf_vfsp->udf_mtype > UDF_MT_OW)) { 1269 udf_vfsp->udf_mtype = UDF_MT_RO; 1270 } 1271 1272 udf_vfsp->udf_nmaps = 0; 1273 hdr = (struct pmap_hdr *)udf_vfsp->udf_lvd->lvd_pmaps; 1274 count = SWAP_32(udf_vfsp->udf_lvd->lvd_num_pmaps); 1275 for (index = 0; index < count; index++) { 1276 1277 if ((hdr->maph_type == MAP_TYPE1) && 1278 (hdr->maph_length == MAP_TYPE1_LEN)) { 1279 typ1 = (struct pmap_typ1 *)hdr; 1280 1281 map = udf_vfsp->udf_maps; 1282 udf_vfsp->udf_maps = 1283 kmem_zalloc(sizeof (struct ud_map) * 1284 (udf_vfsp->udf_nmaps + 1), KM_SLEEP); 1285 if (map != NULL) { 1286 bcopy(map, udf_vfsp->udf_maps, 1287 sizeof (struct ud_map) * 1288 udf_vfsp->udf_nmaps); 1289 kmem_free(map, sizeof (struct ud_map) * 1290 udf_vfsp->udf_nmaps); 1291 } 1292 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps; 1293 map->udm_flags = UDM_MAP_NORM; 1294 map->udm_vsn = SWAP_16(typ1->map1_vsn); 1295 map->udm_pn = SWAP_16(typ1->map1_pn); 1296 udf_vfsp->udf_nmaps ++; 1297 } else if ((hdr->maph_type == MAP_TYPE2) && 1298 (hdr->maph_length == MAP_TYPE2_LEN)) { 1299 typ2 = (struct pmap_typ2 *)hdr; 1300 1301 if (strncmp(typ2->map2_pti.reg_id, 1302 UDF_VIRT_PART, 23) == 0) { 1303 /* 1304 * Add this to the normal 1305 * partition table so that 1306 * we donot 1307 */ 1308 map = udf_vfsp->udf_maps; 1309 udf_vfsp->udf_maps = 1310 kmem_zalloc(sizeof (struct ud_map) * 1311 (udf_vfsp->udf_nmaps + 1), KM_SLEEP); 1312 if (map != NULL) { 1313 bcopy(map, udf_vfsp->udf_maps, 1314 sizeof (struct ud_map) * 1315 udf_vfsp->udf_nmaps); 1316 kmem_free(map, 1317 sizeof (struct ud_map) * 1318 udf_vfsp->udf_nmaps); 1319 } 1320 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps; 1321 map->udm_flags = UDM_MAP_VPM; 1322 map->udm_vsn = SWAP_16(typ2->map2_vsn); 1323 map->udm_pn = SWAP_16(typ2->map2_pn); 1324 udf_vfsp->udf_nmaps ++; 1325 if (error = ud_get_last_block(dev, &lblkno)) { 1326 goto out; 1327 } 1328 if (error = ud_val_get_vat(udf_vfsp, dev, 1329 lblkno, map)) { 1330 goto out; 1331 } 1332 } else if (strncmp(typ2->map2_pti.reg_id, 1333 UDF_SPAR_PART, 23) == 0) { 1334 1335 if (SWAP_16(typ2->map2_pl) != 32) { 1336 printf( 1337 "Packet Length is not valid %x\n", 1338 SWAP_16(typ2->map2_pl)); 1339 goto out; 1340 } 1341 if ((typ2->map2_nst < 1) || 1342 (typ2->map2_nst > 4)) { 1343 goto out; 1344 } 1345 map = udf_vfsp->udf_maps; 1346 udf_vfsp->udf_maps = 1347 kmem_zalloc(sizeof (struct ud_map) * 1348 (udf_vfsp->udf_nmaps + 1), 1349 KM_SLEEP); 1350 if (map != NULL) { 1351 bcopy(map, udf_vfsp->udf_maps, 1352 sizeof (struct ud_map) * 1353 udf_vfsp->udf_nmaps); 1354 kmem_free(map, 1355 sizeof (struct ud_map) * 1356 udf_vfsp->udf_nmaps); 1357 } 1358 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps; 1359 map->udm_flags = UDM_MAP_SPM; 1360 map->udm_vsn = SWAP_16(typ2->map2_vsn); 1361 map->udm_pn = SWAP_16(typ2->map2_pn); 1362 1363 udf_vfsp->udf_nmaps ++; 1364 1365 if (error = ud_read_sparing_tbls(udf_vfsp, 1366 dev, map, typ2)) { 1367 goto out; 1368 } 1369 } else { 1370 /* 1371 * Unknown type of partition 1372 * Bail out 1373 */ 1374 goto out; 1375 } 1376 } else { 1377 /* 1378 * Unknown type of partition 1379 * Bail out 1380 */ 1381 goto out; 1382 } 1383 hdr = (struct pmap_hdr *)(((uint8_t *)hdr) + hdr->maph_length); 1384 } 1385 1386 1387 /* 1388 * Read Logical Volume Integrity Sequence 1389 * and process it 1390 */ 1391 secbp = ud_bread(dev, udf_vfsp->udf_iseq_loc << shift, 1392 udf_vfsp->udf_iseq_len); 1393 if ((error = geterror(secbp)) != 0) { 1394 cmn_err(CE_NOTE, 1395 "udfs : Could not read Logical Volume Integrity Sequence %x", 1396 error); 1397 brelse(secbp); 1398 goto out; 1399 } 1400 udf_vfsp->udf_iseq = ngeteblk(udf_vfsp->udf_iseq_len); 1401 bp = udf_vfsp->udf_iseq; 1402 bp->b_edev = dev; 1403 bp->b_dev = cmpdev(dev); 1404 bp->b_blkno = udf_vfsp->udf_iseq_loc << shift; 1405 bp->b_bcount = udf_vfsp->udf_iseq_len; 1406 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_iseq_len); 1407 secbp->b_flags |= B_STALE | B_AGE; 1408 brelse(secbp); 1409 1410 count = udf_vfsp->udf_iseq_len / DEV_BSIZE; 1411 addr = bp->b_un.b_addr; 1412 for (index = 0; index < count; index ++) { 1413 ttag = (struct tag *)(addr + index * DEV_BSIZE); 1414 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE); 1415 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT, 1416 udf_vfsp->udf_iseq_loc + (index >> shift), 1417 1, desc_len) == 0) { 1418 1419 struct log_vol_int_desc *lvid; 1420 1421 lvid = (struct log_vol_int_desc *)ttag; 1422 udf_vfsp->udf_lvid = lvid; 1423 1424 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) { 1425 udf_vfsp->udf_clean = UDF_CLEAN; 1426 } else { 1427 udf_vfsp->udf_clean = UDF_DIRTY; 1428 } 1429 1430 /* 1431 * update superblock with the metadata 1432 */ 1433 ud_convert_to_superblock(udf_vfsp, lvid); 1434 break; 1435 } 1436 } 1437 1438 if (udf_vfsp->udf_lvid == NULL) { 1439 goto out; 1440 } 1441 1442 if ((blkno = ud_xlate_to_daddr(udf_vfsp, 1443 udf_vfsp->udf_fsd_prn, udf_vfsp->udf_fsd_loc, 1444 1, &dummy)) == 0) { 1445 goto out; 1446 } 1447 secbp = ud_bread(dev, blkno << shift, udf_vfsp->udf_fsd_len); 1448 if ((error = geterror(secbp)) != 0) { 1449 cmn_err(CE_NOTE, 1450 "udfs : Could not read File Set Descriptor %x", error); 1451 brelse(secbp); 1452 goto out; 1453 } 1454 fsd = (struct file_set_desc *)secbp->b_un.b_addr; 1455 if (ud_verify_tag_and_desc(&fsd->fsd_tag, UD_FILE_SET_DESC, 1456 udf_vfsp->udf_fsd_loc, 1457 1, udf_vfsp->udf_fsd_len) != 0) { 1458 secbp->b_flags = B_AGE | B_STALE; 1459 brelse(secbp); 1460 goto out; 1461 } 1462 udf_vfsp->udf_ricb_prn = SWAP_16(fsd->fsd_root_icb.lad_ext_prn); 1463 udf_vfsp->udf_ricb_loc = SWAP_32(fsd->fsd_root_icb.lad_ext_loc); 1464 udf_vfsp->udf_ricb_len = SWAP_32(fsd->fsd_root_icb.lad_ext_len); 1465 secbp->b_flags = B_AGE | B_STALE; 1466 brelse(secbp); 1467 udf_vfsp->udf_root_blkno = ud_xlate_to_daddr(udf_vfsp, 1468 udf_vfsp->udf_ricb_prn, udf_vfsp->udf_ricb_loc, 1469 1, &dummy); 1470 1471 return (udf_vfsp); 1472 out: 1473 ud_destroy_fsp(udf_vfsp); 1474 1475 return (NULL); 1476 } 1477 1478 /* 1479 * release/free resources from one ud_map; map data was zalloc'd in 1480 * ud_validate_and_fill_superblock() and fields may later point to 1481 * valid data 1482 */ 1483 static void 1484 ud_free_map(struct ud_map *map) 1485 { 1486 uint32_t n; 1487 1488 if (map->udm_flags & UDM_MAP_VPM) { 1489 if (map->udm_count) { 1490 kmem_free(map->udm_count, 1491 map->udm_nent * sizeof (*map->udm_count)); 1492 map->udm_count = NULL; 1493 } 1494 if (map->udm_bp) { 1495 for (n = 0; n < map->udm_nent; n++) { 1496 if (map->udm_bp[n]) 1497 brelse(map->udm_bp[n]); 1498 } 1499 kmem_free(map->udm_bp, 1500 map->udm_nent * sizeof (*map->udm_bp)); 1501 map->udm_bp = NULL; 1502 } 1503 if (map->udm_addr) { 1504 kmem_free(map->udm_addr, 1505 map->udm_nent * sizeof (*map->udm_addr)); 1506 map->udm_addr = NULL; 1507 } 1508 } 1509 if (map->udm_flags & UDM_MAP_SPM) { 1510 for (n = 0; n < MAX_SPM; n++) { 1511 if (map->udm_sbp[n]) { 1512 brelse(map->udm_sbp[n]); 1513 map->udm_sbp[n] = NULL; 1514 map->udm_spaddr[n] = NULL; 1515 } 1516 } 1517 } 1518 } 1519 1520 void 1521 ud_destroy_fsp(struct udf_vfs *udf_vfsp) 1522 { 1523 int32_t i; 1524 1525 ud_printf("ud_destroy_fsp\n"); 1526 if (udf_vfsp == NULL) 1527 return; 1528 1529 if (udf_vfsp->udf_maps) { 1530 for (i = 0; i < udf_vfsp->udf_nmaps; i++) 1531 ud_free_map(&udf_vfsp->udf_maps[i]); 1532 1533 kmem_free(udf_vfsp->udf_maps, 1534 udf_vfsp->udf_nmaps * sizeof (*udf_vfsp->udf_maps)); 1535 } 1536 1537 if (udf_vfsp->udf_parts) { 1538 kmem_free(udf_vfsp->udf_parts, 1539 udf_vfsp->udf_npart * sizeof (*udf_vfsp->udf_parts)); 1540 } 1541 if (udf_vfsp->udf_iseq) { 1542 udf_vfsp->udf_iseq->b_flags |= (B_STALE|B_AGE); 1543 brelse(udf_vfsp->udf_iseq); 1544 } 1545 if (udf_vfsp->udf_vds) { 1546 udf_vfsp->udf_vds->b_flags |= (B_STALE|B_AGE); 1547 brelse(udf_vfsp->udf_vds); 1548 } 1549 if (udf_vfsp->udf_vfs) 1550 ud_vfs_remove(udf_vfsp); 1551 if (udf_vfsp->udf_fsmnt) { 1552 kmem_free(udf_vfsp->udf_fsmnt, 1553 strlen(udf_vfsp->udf_fsmnt) + 1); 1554 } 1555 kmem_free(udf_vfsp, sizeof (*udf_vfsp)); 1556 } 1557 1558 void 1559 ud_convert_to_superblock(struct udf_vfs *udf_vfsp, 1560 struct log_vol_int_desc *lvid) 1561 { 1562 int32_t i, c; 1563 uint32_t *temp; 1564 struct ud_part *ud_part; 1565 struct lvid_iu *iu; 1566 1567 udf_vfsp->udf_maxuniq = SWAP_64(lvid->lvid_uniqid); 1568 temp = lvid->lvid_fst; 1569 c = SWAP_32(lvid->lvid_npart); 1570 ud_part = udf_vfsp->udf_parts; 1571 for (i = 0; i < c; i++) { 1572 if (i >= udf_vfsp->udf_npart) { 1573 continue; 1574 } 1575 ud_part->udp_nfree = SWAP_32(temp[i]); 1576 ud_part->udp_nblocks = SWAP_32(temp[c + i]); 1577 udf_vfsp->udf_freeblks += SWAP_32(temp[i]); 1578 udf_vfsp->udf_totalblks += SWAP_32(temp[c + i]); 1579 ud_part++; 1580 } 1581 1582 iu = (struct lvid_iu *)(temp + c * 2); 1583 udf_vfsp->udf_nfiles = SWAP_32(iu->lvidiu_nfiles); 1584 udf_vfsp->udf_ndirs = SWAP_32(iu->lvidiu_ndirs); 1585 udf_vfsp->udf_miread = BCD2HEX_16(SWAP_16(iu->lvidiu_mread)); 1586 udf_vfsp->udf_miwrite = BCD2HEX_16(SWAP_16(iu->lvidiu_mwrite)); 1587 udf_vfsp->udf_mawrite = BCD2HEX_16(SWAP_16(iu->lvidiu_maxwr)); 1588 } 1589 1590 void 1591 ud_update_superblock(struct vfs *vfsp) 1592 { 1593 struct udf_vfs *udf_vfsp; 1594 1595 ud_printf("ud_update_superblock\n"); 1596 1597 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 1598 1599 mutex_enter(&udf_vfsp->udf_lock); 1600 ud_sbwrite(udf_vfsp); 1601 mutex_exit(&udf_vfsp->udf_lock); 1602 } 1603 1604 1605 #include <sys/dkio.h> 1606 #include <sys/cdio.h> 1607 #include <sys/vtoc.h> 1608 1609 /* 1610 * This part of the code is known 1611 * to work with only sparc. It needs 1612 * to be evluated before using it with x86 1613 */ 1614 int32_t 1615 ud_get_last_block(dev_t dev, daddr_t *blkno) 1616 { 1617 struct vtoc vtoc; 1618 struct dk_cinfo dki_info; 1619 int32_t rval, error; 1620 1621 if ((error = cdev_ioctl(dev, DKIOCGVTOC, (intptr_t)&vtoc, 1622 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) { 1623 cmn_err(CE_NOTE, "Could not get the vtoc information"); 1624 return (error); 1625 } 1626 1627 if (vtoc.v_sanity != VTOC_SANE) { 1628 return (EINVAL); 1629 } 1630 if ((error = cdev_ioctl(dev, DKIOCINFO, (intptr_t)&dki_info, 1631 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) { 1632 cmn_err(CE_NOTE, "Could not get the slice information"); 1633 return (error); 1634 } 1635 1636 if (dki_info.dki_partition > V_NUMPAR) { 1637 return (EINVAL); 1638 } 1639 1640 1641 *blkno = vtoc.v_part[dki_info.dki_partition].p_size; 1642 1643 return (0); 1644 } 1645 1646 /* Search sequentially N - 2, N, N - 152, N - 150 for vat icb */ 1647 /* 1648 * int32_t ud_sub_blks[] = {2, 0, 152, 150}; 1649 */ 1650 int32_t ud_sub_blks[] = {152, 150, 2, 0}; 1651 int32_t ud_sub_count = 4; 1652 1653 /* 1654 * Validate the VAT ICB 1655 */ 1656 static int32_t 1657 ud_val_get_vat(struct udf_vfs *udf_vfsp, dev_t dev, 1658 daddr_t blkno, struct ud_map *udm) 1659 { 1660 struct buf *secbp; 1661 struct file_entry *fe; 1662 int32_t end_loc, i, j, ad_type; 1663 struct short_ad *sad; 1664 struct long_ad *lad; 1665 uint32_t count, blk; 1666 struct ud_part *ud_part; 1667 int err = 0; 1668 1669 end_loc = (blkno >> udf_vfsp->udf_l2d_shift) - 1; 1670 1671 for (i = 0; i < ud_sub_count; i++) { 1672 udm->udm_vat_icb = end_loc - ud_sub_blks[i]; 1673 1674 secbp = ud_bread(dev, 1675 udm->udm_vat_icb << udf_vfsp->udf_l2d_shift, 1676 udf_vfsp->udf_lbsize); 1677 ASSERT(secbp->b_un.b_addr); 1678 1679 fe = (struct file_entry *)secbp->b_un.b_addr; 1680 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY, 0, 1681 0, 0) == 0) { 1682 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY, 1683 SWAP_32(fe->fe_tag.tag_loc), 1684 1, udf_vfsp->udf_lbsize) == 0) { 1685 if (fe->fe_icb_tag.itag_ftype == 0) { 1686 break; 1687 } 1688 } 1689 } 1690 secbp->b_flags |= B_AGE | B_STALE; 1691 brelse(secbp); 1692 } 1693 if (i == ud_sub_count) { 1694 return (EINVAL); 1695 } 1696 1697 ad_type = SWAP_16(fe->fe_icb_tag.itag_flags) & 0x3; 1698 if (ad_type == ICB_FLAG_ONE_AD) { 1699 udm->udm_nent = 1; 1700 } else if (ad_type == ICB_FLAG_SHORT_AD) { 1701 udm->udm_nent = 1702 SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad); 1703 } else if (ad_type == ICB_FLAG_LONG_AD) { 1704 udm->udm_nent = 1705 SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad); 1706 } else { 1707 err = EINVAL; 1708 goto end; 1709 } 1710 1711 udm->udm_count = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_count), 1712 KM_SLEEP); 1713 udm->udm_bp = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_bp), 1714 KM_SLEEP); 1715 udm->udm_addr = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_addr), 1716 KM_SLEEP); 1717 1718 if (ad_type == ICB_FLAG_ONE_AD) { 1719 udm->udm_count[0] = (SWAP_64(fe->fe_info_len) - 36) / 1720 sizeof (uint32_t); 1721 udm->udm_bp[0] = secbp; 1722 udm->udm_addr[0] = (uint32_t *) 1723 &fe->fe_spec[SWAP_32(fe->fe_len_ear)]; 1724 return (0); 1725 } 1726 for (i = 0; i < udm->udm_nent; i++) { 1727 if (ad_type == ICB_FLAG_SHORT_AD) { 1728 sad = (struct short_ad *) 1729 (fe->fe_spec + SWAP_32(fe->fe_len_ear)); 1730 sad += i; 1731 count = SWAP_32(sad->sad_ext_len); 1732 blk = SWAP_32(sad->sad_ext_loc); 1733 } else { 1734 lad = (struct long_ad *) 1735 (fe->fe_spec + SWAP_32(fe->fe_len_ear)); 1736 lad += i; 1737 count = SWAP_32(lad->lad_ext_len); 1738 blk = SWAP_32(lad->lad_ext_loc); 1739 ASSERT(SWAP_16(lad->lad_ext_prn) == udm->udm_pn); 1740 } 1741 if ((count & 0x3FFFFFFF) == 0) { 1742 break; 1743 } 1744 if (i < udm->udm_nent - 1) { 1745 udm->udm_count[i] = count / 4; 1746 } else { 1747 udm->udm_count[i] = (count - 36) / 4; 1748 } 1749 ud_part = udf_vfsp->udf_parts; 1750 for (j = 0; j < udf_vfsp->udf_npart; j++) { 1751 if (udm->udm_pn == ud_part->udp_number) { 1752 blk = ud_part->udp_start + blk; 1753 break; 1754 } 1755 } 1756 if (j == udf_vfsp->udf_npart) { 1757 err = EINVAL; 1758 break; 1759 } 1760 1761 count = (count + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1762 udm->udm_bp[i] = ud_bread(dev, 1763 blk << udf_vfsp->udf_l2d_shift, count); 1764 if ((udm->udm_bp[i]->b_error != 0) || 1765 (udm->udm_bp[i]->b_resid)) { 1766 err = EINVAL; 1767 break; 1768 } 1769 udm->udm_addr[i] = (uint32_t *)udm->udm_bp[i]->b_un.b_addr; 1770 } 1771 1772 end: 1773 if (err) 1774 ud_free_map(udm); 1775 secbp->b_flags |= B_AGE | B_STALE; 1776 brelse(secbp); 1777 return (err); 1778 } 1779 1780 int32_t 1781 ud_read_sparing_tbls(struct udf_vfs *udf_vfsp, 1782 dev_t dev, struct ud_map *map, struct pmap_typ2 *typ2) 1783 { 1784 int32_t index, valid = 0; 1785 uint32_t sz; 1786 struct buf *bp; 1787 struct stbl *stbl; 1788 1789 map->udm_plen = SWAP_16(typ2->map2_pl); 1790 map->udm_nspm = typ2->map2_nst; 1791 map->udm_spsz = SWAP_32(typ2->map2_sest); 1792 sz = (map->udm_spsz + udf_vfsp->udf_lbmask) & ~udf_vfsp->udf_lbmask; 1793 if (sz == 0) { 1794 return (0); 1795 } 1796 1797 for (index = 0; index < map->udm_nspm; index++) { 1798 map->udm_loc[index] = SWAP_32(typ2->map2_st[index]); 1799 1800 bp = ud_bread(dev, 1801 map->udm_loc[index] << udf_vfsp->udf_l2d_shift, sz); 1802 if ((bp->b_error != 0) || (bp->b_resid)) { 1803 brelse(bp); 1804 continue; 1805 } 1806 stbl = (struct stbl *)bp->b_un.b_addr; 1807 if (strncmp(stbl->stbl_si.reg_id, UDF_SPAR_TBL, 23) != 0) { 1808 printf("Sparing Identifier does not match\n"); 1809 bp->b_flags |= B_AGE | B_STALE; 1810 brelse(bp); 1811 continue; 1812 } 1813 map->udm_sbp[index] = bp; 1814 map->udm_spaddr[index] = bp->b_un.b_addr; 1815 #ifdef UNDEF 1816 { 1817 struct stbl_entry *te; 1818 int32_t i, tbl_len; 1819 1820 te = (struct stbl_entry *)&stbl->stbl_entry; 1821 tbl_len = SWAP_16(stbl->stbl_len); 1822 1823 printf("%x %x\n", tbl_len, SWAP_32(stbl->stbl_seqno)); 1824 printf("%x %x\n", bp->b_un.b_addr, te); 1825 1826 for (i = 0; i < tbl_len; i++) { 1827 printf("%x %x\n", SWAP_32(te->sent_ol), SWAP_32(te->sent_ml)); 1828 te ++; 1829 } 1830 } 1831 #endif 1832 valid ++; 1833 } 1834 1835 if (valid) { 1836 return (0); 1837 } 1838 return (EINVAL); 1839 } 1840 1841 uint32_t 1842 ud_get_lbsize(dev_t dev, uint32_t *loc) 1843 { 1844 int32_t bsize, shift, index, end_index; 1845 daddr_t last_block; 1846 uint32_t avd_loc; 1847 struct buf *bp; 1848 struct anch_vol_desc_ptr *avdp; 1849 uint32_t session_offset = 0; 1850 int32_t rval; 1851 1852 if (ud_get_last_block(dev, &last_block) != 0) { 1853 end_index = 1; 1854 } else { 1855 end_index = 3; 1856 } 1857 1858 if (cdev_ioctl(dev, CDROMREADOFFSET, (intptr_t)&session_offset, 1859 FKIOCTL|FREAD|FNATIVE, CRED(), &rval) != 0) { 1860 session_offset = 0; 1861 } 1862 1863 for (index = 0; index < end_index; index++) { 1864 1865 for (bsize = DEV_BSIZE, shift = 0; 1866 bsize <= MAXBSIZE; bsize <<= 1, shift++) { 1867 1868 if (index == 0) { 1869 avd_loc = 256; 1870 if (bsize <= 2048) { 1871 avd_loc += 1872 session_offset * 2048 / bsize; 1873 } else { 1874 avd_loc += 1875 session_offset / (bsize / 2048); 1876 } 1877 } else if (index == 1) { 1878 avd_loc = last_block - (1 << shift); 1879 } else { 1880 avd_loc = last_block - (256 << shift); 1881 } 1882 1883 bp = ud_bread(dev, avd_loc << shift, 1884 ANCHOR_VOL_DESC_LEN); 1885 if (geterror(bp) != 0) { 1886 brelse(bp); 1887 continue; 1888 } 1889 1890 /* 1891 * Verify if we have avdp here 1892 */ 1893 avdp = (struct anch_vol_desc_ptr *)bp->b_un.b_addr; 1894 if (ud_verify_tag_and_desc(&avdp->avd_tag, 1895 UD_ANCH_VOL_DESC, avd_loc, 1896 1, ANCHOR_VOL_DESC_LEN) != 0) { 1897 bp->b_flags |= B_AGE | B_STALE; 1898 brelse(bp); 1899 continue; 1900 } 1901 bp->b_flags |= B_AGE | B_STALE; 1902 brelse(bp); 1903 *loc = avd_loc; 1904 return (bsize); 1905 } 1906 } 1907 1908 /* 1909 * Did not find AVD at all the locations 1910 */ 1911 return (0); 1912 } 1913 1914 static int 1915 udfinit(int fstype, char *name) 1916 { 1917 static const fs_operation_def_t udf_vfsops_template[] = { 1918 VFSNAME_MOUNT, { .vfs_mount = udf_mount }, 1919 VFSNAME_UNMOUNT, { .vfs_unmount = udf_unmount }, 1920 VFSNAME_ROOT, { .vfs_root = udf_root }, 1921 VFSNAME_STATVFS, { .vfs_statvfs = udf_statvfs }, 1922 VFSNAME_SYNC, { .vfs_sync = udf_sync }, 1923 VFSNAME_VGET, { .vfs_vget = udf_vget }, 1924 VFSNAME_MOUNTROOT, { .vfs_mountroot = udf_mountroot }, 1925 NULL, NULL 1926 }; 1927 extern struct vnodeops *udf_vnodeops; 1928 extern const fs_operation_def_t udf_vnodeops_template[]; 1929 int error; 1930 1931 ud_printf("udfinit\n"); 1932 1933 error = vfs_setfsops(fstype, udf_vfsops_template, NULL); 1934 if (error != 0) { 1935 cmn_err(CE_WARN, "udfinit: bad vfs ops template"); 1936 return (error); 1937 } 1938 1939 error = vn_make_ops(name, udf_vnodeops_template, &udf_vnodeops); 1940 if (error != 0) { 1941 (void) vfs_freevfsops_by_type(fstype); 1942 cmn_err(CE_WARN, "udfinit: bad vnode ops template"); 1943 return (error); 1944 } 1945 1946 udf_fstype = fstype; 1947 1948 ud_init_inodes(); 1949 1950 return (0); 1951 } 1952