1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/t_lock.h> 30 #include <sys/param.h> 31 #include <sys/time.h> 32 #include <sys/systm.h> 33 #include <sys/sysmacros.h> 34 #include <sys/resource.h> 35 #include <sys/signal.h> 36 #include <sys/cred.h> 37 #include <sys/user.h> 38 #include <sys/buf.h> 39 #include <sys/vfs.h> 40 #include <sys/stat.h> 41 #include <sys/vnode.h> 42 #include <sys/mode.h> 43 #include <sys/proc.h> 44 #include <sys/disp.h> 45 #include <sys/file.h> 46 #include <sys/fcntl.h> 47 #include <sys/flock.h> 48 #include <sys/kmem.h> 49 #include <sys/uio.h> 50 #include <sys/dnlc.h> 51 #include <sys/conf.h> 52 #include <sys/errno.h> 53 #include <sys/mman.h> 54 #include <sys/fbuf.h> 55 #include <sys/pathname.h> 56 #include <sys/debug.h> 57 #include <sys/vmsystm.h> 58 #include <sys/cmn_err.h> 59 #include <sys/dirent.h> 60 #include <sys/errno.h> 61 #include <sys/modctl.h> 62 #include <sys/statvfs.h> 63 #include <sys/mount.h> 64 #include <sys/sunddi.h> 65 #include <sys/bootconf.h> 66 #include <sys/policy.h> 67 68 #include <vm/hat.h> 69 #include <vm/page.h> 70 #include <vm/pvn.h> 71 #include <vm/as.h> 72 #include <vm/seg.h> 73 #include <vm/seg_map.h> 74 #include <vm/seg_kmem.h> 75 #include <vm/seg_vn.h> 76 #include <vm/rm.h> 77 #include <vm/page.h> 78 #include <sys/swap.h> 79 #include <sys/mntent.h> 80 81 82 #include <fs/fs_subr.h> 83 84 85 #include <sys/fs/udf_volume.h> 86 #include <sys/fs/udf_inode.h> 87 88 89 extern struct vnode *common_specvp(struct vnode *vp); 90 91 extern kmutex_t ud_sync_busy; 92 static int32_t ud_mountfs(struct vfs *, 93 enum whymountroot, dev_t, char *, struct cred *, int32_t); 94 static struct udf_vfs *ud_validate_and_fill_superblock(dev_t, 95 int32_t, uint32_t); 96 void ud_destroy_fsp(struct udf_vfs *); 97 void ud_convert_to_superblock(struct udf_vfs *, 98 struct log_vol_int_desc *); 99 void ud_update_superblock(struct vfs *); 100 int32_t ud_get_last_block(dev_t, daddr_t *); 101 static int32_t ud_val_get_vat(struct udf_vfs *, 102 dev_t, daddr_t, struct ud_map *); 103 int32_t ud_read_sparing_tbls(struct udf_vfs *, 104 dev_t, struct ud_map *, struct pmap_typ2 *); 105 uint32_t ud_get_lbsize(dev_t, uint32_t *); 106 107 static int32_t udf_mount(struct vfs *, 108 struct vnode *, struct mounta *, struct cred *); 109 static int32_t udf_unmount(struct vfs *, int, struct cred *); 110 static int32_t udf_root(struct vfs *, struct vnode **); 111 static int32_t udf_statvfs(struct vfs *, struct statvfs64 *); 112 static int32_t udf_sync(struct vfs *, int16_t, struct cred *); 113 static int32_t udf_vget(struct vfs *, struct vnode **, struct fid *); 114 static int32_t udf_mountroot(struct vfs *vfsp, enum whymountroot); 115 116 static int udfinit(int, char *); 117 118 static mntopts_t udfs_mntopts; 119 120 static vfsdef_t vfw = { 121 VFSDEF_VERSION, 122 "udfs", 123 udfinit, 124 VSW_HASPROTO|VSW_CANREMOUNT|VSW_STATS, 125 &udfs_mntopts 126 }; 127 128 static mntopts_t udfs_mntopts = { 129 0, 130 NULL 131 }; 132 133 /* 134 * Module linkage information for the kernel. 135 */ 136 extern struct mod_ops mod_fsops; 137 138 static struct modlfs modlfs = { 139 &mod_fsops, "filesystem for UDFS", &vfw 140 }; 141 142 static struct modlinkage modlinkage = { 143 MODREV_1, (void *)&modlfs, NULL 144 }; 145 146 char _depends_on[] = "fs/specfs"; 147 148 int32_t udf_fstype = -1; 149 150 int 151 _init() 152 { 153 return (mod_install(&modlinkage)); 154 } 155 156 int 157 _fini() 158 { 159 return (EBUSY); 160 } 161 162 int 163 _info(struct modinfo *modinfop) 164 { 165 return (mod_info(&modlinkage, modinfop)); 166 } 167 168 169 /* -------------------- vfs routines -------------------- */ 170 171 /* 172 * XXX - this appears only to be used by the VM code to handle the case where 173 * UNIX is running off the mini-root. That probably wants to be done 174 * differently. 175 */ 176 struct vnode *rootvp; 177 #ifndef __lint 178 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", rootvp)) 179 #endif 180 static int32_t 181 udf_mount(struct vfs *vfsp, struct vnode *mvp, 182 struct mounta *uap, struct cred *cr) 183 { 184 dev_t dev; 185 struct vnode *bvp; 186 struct pathname dpn; 187 int32_t error; 188 enum whymountroot why; 189 int oflag, aflag; 190 191 ud_printf("udf_mount\n"); 192 193 if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0) { 194 return (error); 195 } 196 197 if (mvp->v_type != VDIR) { 198 return (ENOTDIR); 199 } 200 201 mutex_enter(&mvp->v_lock); 202 if ((uap->flags & MS_REMOUNT) == 0 && 203 (uap->flags & MS_OVERLAY) == 0 && 204 (mvp->v_count != 1 || (mvp->v_flag & VROOT))) { 205 mutex_exit(&mvp->v_lock); 206 return (EBUSY); 207 } 208 mutex_exit(&mvp->v_lock); 209 210 if (error = pn_get(uap->dir, UIO_USERSPACE, &dpn)) { 211 return (error); 212 } 213 214 /* 215 * Resolve path name of special file being mounted. 216 */ 217 if (error = lookupname(uap->spec, UIO_USERSPACE, FOLLOW, NULLVPP, 218 &bvp)) { 219 pn_free(&dpn); 220 return (error); 221 } 222 if (bvp->v_type != VBLK) { 223 error = ENOTBLK; 224 goto out; 225 } 226 dev = bvp->v_rdev; 227 228 /* 229 * Ensure that this device isn't already mounted, 230 * unless this is a REMOUNT request 231 */ 232 if (vfs_devmounting(dev, vfsp)) { 233 error = EBUSY; 234 goto out; 235 } 236 if (vfs_devismounted(dev)) { 237 if (uap->flags & MS_REMOUNT) { 238 why = ROOT_REMOUNT; 239 } else { 240 error = EBUSY; 241 goto out; 242 } 243 } else { 244 why = ROOT_INIT; 245 } 246 if (getmajor(dev) >= devcnt) { 247 error = ENXIO; 248 goto out; 249 } 250 251 /* 252 * If the device is a tape, mount it read only 253 */ 254 if (devopsp[getmajor(dev)]->devo_cb_ops->cb_flag & D_TAPE) { 255 vfsp->vfs_flag |= VFS_RDONLY; 256 } 257 258 if (uap->flags & MS_RDONLY) { 259 vfsp->vfs_flag |= VFS_RDONLY; 260 } 261 262 /* 263 * Set mount options. 264 */ 265 if (uap->flags & MS_RDONLY) { 266 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0); 267 } 268 if (uap->flags & MS_NOSUID) { 269 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0); 270 } 271 272 /* 273 * Verify that the caller can open the device special file as 274 * required. It is not until this moment that we know whether 275 * we're mounting "ro" or not. 276 */ 277 if ((vfsp->vfs_flag & VFS_RDONLY) != 0) { 278 oflag = FREAD; 279 aflag = VREAD; 280 } else { 281 oflag = FREAD | FWRITE; 282 aflag = VREAD | VWRITE; 283 } 284 if ((error = VOP_ACCESS(bvp, aflag, 0, cr)) != 0 || 285 (error = secpolicy_spec_open(cr, bvp, oflag)) != 0) { 286 goto out; 287 } 288 289 /* 290 * Mount the filesystem. 291 */ 292 error = ud_mountfs(vfsp, why, dev, dpn.pn_path, cr, 0); 293 out: 294 VN_RELE(bvp); 295 pn_free(&dpn); 296 297 return (error); 298 } 299 300 301 302 /* 303 * unmount the file system pointed 304 * by vfsp 305 */ 306 /* ARGSUSED */ 307 static int32_t 308 udf_unmount(struct vfs *vfsp, int fflag, struct cred *cr) 309 { 310 struct udf_vfs *udf_vfsp; 311 struct vnode *bvp, *rvp; 312 struct ud_inode *rip; 313 int32_t flag; 314 315 ud_printf("udf_unmount\n"); 316 317 if (secpolicy_fs_unmount(cr, vfsp) != 0) { 318 return (EPERM); 319 } 320 321 /* 322 * forced unmount is not supported by this file system 323 * and thus, ENOTSUP, is being returned. 324 */ 325 if (fflag & MS_FORCE) 326 return (ENOTSUP); 327 328 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 329 flag = !(udf_vfsp->udf_flags & UDF_FL_RDONLY); 330 bvp = udf_vfsp->udf_devvp; 331 332 rvp = udf_vfsp->udf_root; 333 ASSERT(rvp != NULL); 334 rip = VTOI(rvp); 335 336 (void) ud_release_cache(udf_vfsp); 337 338 339 /* Flush all inodes except root */ 340 if (ud_iflush(vfsp) < 0) { 341 return (EBUSY); 342 } 343 344 rw_enter(&rip->i_contents, RW_WRITER); 345 (void) ud_syncip(rip, B_INVAL, I_SYNC); 346 rw_exit(&rip->i_contents); 347 348 mutex_enter(&ud_sync_busy); 349 if ((udf_vfsp->udf_flags & UDF_FL_RDONLY) == 0) { 350 bflush(vfsp->vfs_dev); 351 mutex_enter(&udf_vfsp->udf_lock); 352 udf_vfsp->udf_clean = UDF_CLEAN; 353 mutex_exit(&udf_vfsp->udf_lock); 354 ud_update_superblock(vfsp); 355 } 356 mutex_exit(&ud_sync_busy); 357 358 mutex_destroy(&udf_vfsp->udf_lock); 359 mutex_destroy(&udf_vfsp->udf_rename_lck); 360 361 ud_delcache(rip); 362 ITIMES(rip); 363 VN_RELE(rvp); 364 365 ud_destroy_fsp(udf_vfsp); 366 367 (void) VOP_PUTPAGE(bvp, (offset_t)0, (uint32_t)0, B_INVAL, cr); 368 (void) VOP_CLOSE(bvp, flag, 1, (offset_t)0, cr); 369 370 (void) bfinval(vfsp->vfs_dev, 1); 371 VN_RELE(bvp); 372 373 374 return (0); 375 } 376 377 378 /* 379 * Get the root vp for the 380 * file system 381 */ 382 static int32_t 383 udf_root(struct vfs *vfsp, struct vnode **vpp) 384 { 385 struct udf_vfs *udf_vfsp; 386 struct vnode *vp; 387 388 ud_printf("udf_root\n"); 389 390 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 391 392 ASSERT(udf_vfsp != NULL); 393 ASSERT(udf_vfsp->udf_root != NULL); 394 395 vp = udf_vfsp->udf_root; 396 VN_HOLD(vp); 397 *vpp = vp; 398 return (0); 399 } 400 401 402 /* 403 * Get file system statistics. 404 */ 405 static int32_t 406 udf_statvfs(struct vfs *vfsp, struct statvfs64 *sp) 407 { 408 struct udf_vfs *udf_vfsp; 409 struct ud_part *parts; 410 dev32_t d32; 411 int32_t index; 412 413 ud_printf("udf_statvfs\n"); 414 415 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 416 (void) bzero(sp, sizeof (struct statvfs64)); 417 418 mutex_enter(&udf_vfsp->udf_lock); 419 sp->f_bsize = udf_vfsp->udf_lbsize; 420 sp->f_frsize = udf_vfsp->udf_lbsize; 421 sp->f_blocks = 0; 422 sp->f_bfree = 0; 423 parts = udf_vfsp->udf_parts; 424 for (index = 0; index < udf_vfsp->udf_npart; index++) { 425 sp->f_blocks += parts->udp_nblocks; 426 sp->f_bfree += parts->udp_nfree; 427 parts++; 428 } 429 sp->f_bavail = sp->f_bfree; 430 431 /* 432 * Since there are no real inodes allocated 433 * we will approximate 434 * each new file will occupy : 435 * 38(over head each dent) + MAXNAMLEN / 2 + inode_size(==block size) 436 */ 437 sp->f_ffree = sp->f_favail = 438 (sp->f_bavail * sp->f_bsize) / (146 + sp->f_bsize); 439 440 /* 441 * The total number of inodes is 442 * the sum of files + directories + free inodes 443 */ 444 sp->f_files = sp->f_ffree + 445 udf_vfsp->udf_nfiles + 446 udf_vfsp->udf_ndirs; 447 (void) cmpldev(&d32, vfsp->vfs_dev); 448 sp->f_fsid = d32; 449 (void) strcpy(sp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name); 450 sp->f_flag = vf_to_stf(vfsp->vfs_flag); 451 sp->f_namemax = MAXNAMLEN; 452 (void) strcpy(sp->f_fstr, udf_vfsp->udf_volid); 453 454 mutex_exit(&udf_vfsp->udf_lock); 455 456 return (0); 457 } 458 459 460 /* 461 * Flush any pending I/O to file system vfsp. 462 * The ud_update() routine will only flush *all* udf files. 463 */ 464 /*ARGSUSED*/ 465 /* ARGSUSED */ 466 static int32_t 467 udf_sync(struct vfs *vfsp, int16_t flag, struct cred *cr) 468 { 469 ud_printf("udf_sync\n"); 470 471 ud_update(flag); 472 return (0); 473 } 474 475 476 477 /* ARGSUSED */ 478 static int32_t 479 udf_vget(struct vfs *vfsp, 480 struct vnode **vpp, struct fid *fidp) 481 { 482 int32_t error = 0; 483 struct udf_fid *udfid; 484 struct udf_vfs *udf_vfsp; 485 struct ud_inode *ip; 486 487 ud_printf("udf_vget\n"); 488 489 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 490 if (udf_vfsp == NULL) { 491 *vpp = NULL; 492 return (0); 493 } 494 495 udfid = (struct udf_fid *)fidp; 496 if ((error = ud_iget(vfsp, udfid->udfid_prn, 497 udfid->udfid_icb_lbn, &ip, NULL, CRED())) != 0) { 498 *vpp = NULL; 499 return (error); 500 } 501 502 rw_enter(&ip->i_contents, RW_READER); 503 if ((udfid->udfid_uinq_lo != (ip->i_uniqid & 0xffffffff)) || 504 (udfid->udfid_prn != ip->i_icb_prn)) { 505 rw_exit(&ip->i_contents); 506 VN_RELE(ITOV(ip)); 507 *vpp = NULL; 508 return (EINVAL); 509 } 510 rw_exit(&ip->i_contents); 511 512 *vpp = ITOV(ip); 513 return (0); 514 } 515 516 517 /* 518 * Mount root file system. 519 * "why" is ROOT_INIT on initial call, ROOT_REMOUNT if called to 520 * remount the root file system, and ROOT_UNMOUNT if called to 521 * unmount the root (e.g., as part of a system shutdown). 522 * 523 * XXX - this may be partially machine-dependent; it, along with the VFS_SWAPVP 524 * operation, goes along with auto-configuration. A mechanism should be 525 * provided by which machine-INdependent code in the kernel can say "get me the 526 * right root file system" and "get me the right initial swap area", and have 527 * that done in what may well be a machine-dependent fashion. 528 * Unfortunately, it is also file-system-type dependent (NFS gets it via 529 * bootparams calls, UFS gets it from various and sundry machine-dependent 530 * mechanisms, as SPECFS does for swap). 531 */ 532 /* ARGSUSED */ 533 static int32_t 534 udf_mountroot(struct vfs *vfsp, enum whymountroot why) 535 { 536 dev_t rootdev; 537 static int32_t udf_rootdone = 0; 538 struct vnode *vp = NULL; 539 int32_t ovflags, error; 540 ud_printf("udf_mountroot\n"); 541 542 if (why == ROOT_INIT) { 543 if (udf_rootdone++) { 544 return (EBUSY); 545 } 546 rootdev = getrootdev(); 547 if (rootdev == (dev_t)NODEV) { 548 return (ENODEV); 549 } 550 vfsp->vfs_dev = rootdev; 551 vfsp->vfs_flag |= VFS_RDONLY; 552 } else if (why == ROOT_REMOUNT) { 553 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp; 554 (void) dnlc_purge_vfsp(vfsp, 0); 555 vp = common_specvp(vp); 556 (void) VOP_PUTPAGE(vp, (offset_t)0, 557 (uint32_t)0, B_INVAL, CRED()); 558 binval(vfsp->vfs_dev); 559 560 ovflags = vfsp->vfs_flag; 561 vfsp->vfs_flag &= ~VFS_RDONLY; 562 vfsp->vfs_flag |= VFS_REMOUNT; 563 rootdev = vfsp->vfs_dev; 564 } else if (why == ROOT_UNMOUNT) { 565 ud_update(0); 566 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp; 567 (void) VOP_CLOSE(vp, FREAD|FWRITE, 1, 568 (offset_t)0, CRED()); 569 return (0); 570 } 571 572 if ((error = vfs_lock(vfsp)) != 0) { 573 return (error); 574 } 575 576 error = ud_mountfs(vfsp, why, rootdev, "/", CRED(), 1); 577 if (error) { 578 vfs_unlock(vfsp); 579 if (why == ROOT_REMOUNT) { 580 vfsp->vfs_flag = ovflags; 581 } 582 if (rootvp) { 583 VN_RELE(rootvp); 584 rootvp = (struct vnode *)0; 585 } 586 return (error); 587 } 588 589 if (why == ROOT_INIT) { 590 vfs_add((struct vnode *)0, vfsp, 591 (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0); 592 } 593 vfs_unlock(vfsp); 594 return (0); 595 } 596 597 598 /* ------------------------- local routines ------------------------- */ 599 600 601 static int32_t 602 ud_mountfs(struct vfs *vfsp, 603 enum whymountroot why, dev_t dev, char *name, 604 struct cred *cr, int32_t isroot) 605 { 606 struct vnode *devvp = NULL; 607 int32_t error = 0; 608 int32_t needclose = 0; 609 struct udf_vfs *udf_vfsp = NULL; 610 struct log_vol_int_desc *lvid; 611 struct ud_inode *rip = NULL; 612 struct vnode *rvp = NULL; 613 int32_t i, lbsize; 614 uint32_t avd_loc; 615 struct ud_map *map; 616 int32_t desc_len; 617 618 ud_printf("ud_mountfs\n"); 619 620 if (why == ROOT_INIT) { 621 /* 622 * Open the device. 623 */ 624 devvp = makespecvp(dev, VBLK); 625 626 /* 627 * Open block device mounted on. 628 * When bio is fixed for vnodes this can all be vnode 629 * operations. 630 */ 631 error = VOP_OPEN(&devvp, 632 (vfsp->vfs_flag & VFS_RDONLY) ? FREAD : FREAD|FWRITE, cr); 633 if (error) { 634 goto out; 635 } 636 needclose = 1; 637 638 /* 639 * Refuse to go any further if this 640 * device is being used for swapping. 641 */ 642 if (IS_SWAPVP(devvp)) { 643 error = EBUSY; 644 goto out; 645 } 646 } 647 648 /* 649 * check for dev already mounted on 650 */ 651 if (vfsp->vfs_flag & VFS_REMOUNT) { 652 struct tag *ttag; 653 int32_t index, count; 654 struct buf *tpt = 0; 655 caddr_t addr; 656 657 658 /* cannot remount to RDONLY */ 659 if (vfsp->vfs_flag & VFS_RDONLY) { 660 return (EINVAL); 661 } 662 663 if (vfsp->vfs_dev != dev) { 664 return (EINVAL); 665 } 666 667 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 668 devvp = udf_vfsp->udf_devvp; 669 670 /* 671 * fsck may have altered the file system; discard 672 * as much incore data as possible. Don't flush 673 * if this is a rw to rw remount; it's just resetting 674 * the options. 675 */ 676 if (udf_vfsp->udf_flags & UDF_FL_RDONLY) { 677 (void) dnlc_purge_vfsp(vfsp, 0); 678 (void) VOP_PUTPAGE(devvp, (offset_t)0, (uint_t)0, 679 B_INVAL, CRED()); 680 (void) ud_iflush(vfsp); 681 bflush(dev); 682 binval(dev); 683 } 684 685 /* 686 * We could read UDF1.50 and write UDF1.50 only 687 * disallow mount of any highier version 688 */ 689 if ((udf_vfsp->udf_miread > UDF_150) || 690 (udf_vfsp->udf_miwrite > UDF_150)) { 691 error = EINVAL; 692 goto remountout; 693 } 694 695 /* 696 * read/write to read/write; all done 697 */ 698 if (udf_vfsp->udf_flags & UDF_FL_RW) { 699 goto remountout; 700 } 701 702 /* 703 * Does the media type allow a writable mount 704 */ 705 if (udf_vfsp->udf_mtype != UDF_MT_OW) { 706 error = EINVAL; 707 goto remountout; 708 } 709 710 /* 711 * Read the metadata 712 * and check if it is possible to 713 * mount in rw mode 714 */ 715 tpt = ud_bread(vfsp->vfs_dev, 716 udf_vfsp->udf_iseq_loc << udf_vfsp->udf_l2d_shift, 717 udf_vfsp->udf_iseq_len); 718 if (tpt->b_flags & B_ERROR) { 719 error = EIO; 720 goto remountout; 721 } 722 count = udf_vfsp->udf_iseq_len / DEV_BSIZE; 723 addr = tpt->b_un.b_addr; 724 for (index = 0; index < count; index ++) { 725 ttag = (struct tag *)(addr + index * DEV_BSIZE); 726 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE); 727 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT, 728 udf_vfsp->udf_iseq_loc + 729 (index >> udf_vfsp->udf_l2d_shift), 730 1, desc_len) == 0) { 731 struct log_vol_int_desc *lvid; 732 733 lvid = (struct log_vol_int_desc *)ttag; 734 735 if (SWAP_32(lvid->lvid_int_type) != 736 LOG_VOL_CLOSE_INT) { 737 error = EINVAL; 738 goto remountout; 739 } 740 741 /* 742 * Copy new data to old data 743 */ 744 bcopy(udf_vfsp->udf_iseq->b_un.b_addr, 745 tpt->b_un.b_addr, udf_vfsp->udf_iseq_len); 746 break; 747 } 748 } 749 750 udf_vfsp->udf_flags = UDF_FL_RW; 751 752 mutex_enter(&udf_vfsp->udf_lock); 753 ud_sbwrite(udf_vfsp); 754 mutex_exit(&udf_vfsp->udf_lock); 755 remountout: 756 if (tpt != NULL) { 757 tpt->b_flags = B_AGE | B_STALE; 758 brelse(tpt); 759 } 760 return (error); 761 } 762 763 ASSERT(devvp != 0); 764 /* 765 * Flush back any dirty pages on the block device to 766 * try and keep the buffer cache in sync with the page 767 * cache if someone is trying to use block devices when 768 * they really should be using the raw device. 769 */ 770 (void) VOP_PUTPAGE(common_specvp(devvp), (offset_t)0, 771 (uint32_t)0, B_INVAL, cr); 772 773 774 /* 775 * Check if the file system 776 * is a valid udfs and fill 777 * the required fields in udf_vfs 778 */ 779 #ifndef __lint 780 _NOTE(NO_COMPETING_THREADS_NOW); 781 #endif 782 783 if ((lbsize = ud_get_lbsize(dev, &avd_loc)) == 0) { 784 error = EINVAL; 785 goto out; 786 } 787 788 udf_vfsp = ud_validate_and_fill_superblock(dev, lbsize, avd_loc); 789 if (udf_vfsp == NULL) { 790 error = EINVAL; 791 goto out; 792 } 793 794 /* 795 * Fill in vfs private data 796 */ 797 vfsp->vfs_fstype = udf_fstype; 798 vfs_make_fsid(&vfsp->vfs_fsid, dev, udf_fstype); 799 vfsp->vfs_data = (caddr_t)udf_vfsp; 800 vfsp->vfs_dev = dev; 801 vfsp->vfs_flag |= VFS_NOTRUNC; 802 udf_vfsp->udf_devvp = devvp; 803 804 udf_vfsp->udf_fsmnt = kmem_zalloc(strlen(name) + 1, KM_SLEEP); 805 (void) strcpy(udf_vfsp->udf_fsmnt, name); 806 807 udf_vfsp->udf_vfs = vfsp; 808 udf_vfsp->udf_rdclustsz = udf_vfsp->udf_wrclustsz = maxphys; 809 810 udf_vfsp->udf_mod = 0; 811 812 813 lvid = udf_vfsp->udf_lvid; 814 if (vfsp->vfs_flag & VFS_RDONLY) { 815 /* 816 * We could read only UDF1.50 817 * disallow mount of any highier version 818 */ 819 if (udf_vfsp->udf_miread > UDF_150) { 820 error = EINVAL; 821 goto out; 822 } 823 udf_vfsp->udf_flags = UDF_FL_RDONLY; 824 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) { 825 udf_vfsp->udf_clean = UDF_CLEAN; 826 } else { 827 /* Do we have a VAT at the end of the recorded media */ 828 map = udf_vfsp->udf_maps; 829 for (i = 0; i < udf_vfsp->udf_nmaps; i++) { 830 if (map->udm_flags & UDM_MAP_VPM) { 831 break; 832 } 833 map++; 834 } 835 if (i == udf_vfsp->udf_nmaps) { 836 error = ENOSPC; 837 goto out; 838 } 839 udf_vfsp->udf_clean = UDF_CLEAN; 840 } 841 } else { 842 /* 843 * We could read UDF1.50 and write UDF1.50 only 844 * disallow mount of any highier version 845 */ 846 if ((udf_vfsp->udf_miread > UDF_150) || 847 (udf_vfsp->udf_miwrite > UDF_150)) { 848 error = EINVAL; 849 goto out; 850 } 851 /* 852 * Check if the media allows 853 * us to mount read/write 854 */ 855 if (udf_vfsp->udf_mtype != UDF_MT_OW) { 856 error = EACCES; 857 goto out; 858 } 859 860 /* 861 * Check if we have VAT on a writable media 862 * we cannot use the media in presence of VAT 863 * Dent RW mount. 864 */ 865 map = udf_vfsp->udf_maps; 866 ASSERT(map != NULL); 867 for (i = 0; i < udf_vfsp->udf_nmaps; i++) { 868 if (map->udm_flags & UDM_MAP_VPM) { 869 error = EACCES; 870 goto out; 871 } 872 map++; 873 } 874 875 /* 876 * Check if the domain Id allows 877 * us to write 878 */ 879 if (udf_vfsp->udf_lvd->lvd_dom_id.reg_ids[2] & 0x3) { 880 error = EACCES; 881 goto out; 882 } 883 udf_vfsp->udf_flags = UDF_FL_RW; 884 885 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) { 886 udf_vfsp->udf_clean = UDF_CLEAN; 887 } else { 888 if (isroot) { 889 udf_vfsp->udf_clean = UDF_DIRTY; 890 } else { 891 error = ENOSPC; 892 goto out; 893 } 894 } 895 } 896 897 mutex_init(&udf_vfsp->udf_lock, NULL, MUTEX_DEFAULT, NULL); 898 899 mutex_init(&udf_vfsp->udf_rename_lck, NULL, MUTEX_DEFAULT, NULL); 900 901 #ifndef __lint 902 _NOTE(COMPETING_THREADS_NOW); 903 #endif 904 if (error = ud_iget(vfsp, udf_vfsp->udf_ricb_prn, 905 udf_vfsp->udf_ricb_loc, &rip, NULL, cr)) { 906 mutex_destroy(&udf_vfsp->udf_lock); 907 goto out; 908 } 909 910 911 /* 912 * Get the root inode and 913 * initialize the root vnode 914 */ 915 rvp = ITOV(rip); 916 mutex_enter(&rvp->v_lock); 917 rvp->v_flag |= VROOT; 918 mutex_exit(&rvp->v_lock); 919 udf_vfsp->udf_root = rvp; 920 921 922 if (why == ROOT_INIT && isroot) 923 rootvp = devvp; 924 925 ud_vfs_add(udf_vfsp); 926 927 if (udf_vfsp->udf_flags == UDF_FL_RW) { 928 udf_vfsp->udf_clean = UDF_DIRTY; 929 ud_update_superblock(vfsp); 930 } 931 932 return (0); 933 934 out: 935 ud_destroy_fsp(udf_vfsp); 936 if (needclose) { 937 (void) VOP_CLOSE(devvp, (vfsp->vfs_flag & VFS_RDONLY) ? 938 FREAD : FREAD|FWRITE, 1, (offset_t)0, cr); 939 bflush(dev); 940 binval(dev); 941 } 942 VN_RELE(devvp); 943 944 return (error); 945 } 946 947 948 static struct udf_vfs * 949 ud_validate_and_fill_superblock(dev_t dev, int32_t bsize, uint32_t avd_loc) 950 { 951 int32_t error, count, index, shift; 952 uint32_t dummy, vds_loc; 953 caddr_t addr; 954 daddr_t blkno, lblkno; 955 struct buf *secbp, *bp; 956 struct tag *ttag; 957 struct anch_vol_desc_ptr *avdp; 958 struct file_set_desc *fsd; 959 struct udf_vfs *udf_vfsp = NULL; 960 struct pmap_hdr *hdr; 961 struct pmap_typ1 *typ1; 962 struct pmap_typ2 *typ2; 963 struct ud_map *map; 964 int32_t desc_len; 965 966 ud_printf("ud_validate_and_fill_superblock\n"); 967 968 if (bsize < DEV_BSIZE) { 969 return (NULL); 970 } 971 shift = 0; 972 while ((bsize >> shift) > DEV_BSIZE) { 973 shift++; 974 } 975 976 /* 977 * Read Anchor Volume Descriptor 978 * Verify it and get the location of 979 * Main Volume Descriptor Sequence 980 */ 981 secbp = ud_bread(dev, avd_loc << shift, ANCHOR_VOL_DESC_LEN); 982 if ((error = geterror(secbp)) != 0) { 983 cmn_err(CE_NOTE, 984 "udfs : Could not read Anchor Volume Desc %x", error); 985 brelse(secbp); 986 return (NULL); 987 } 988 avdp = (struct anch_vol_desc_ptr *)secbp->b_un.b_addr; 989 if (ud_verify_tag_and_desc(&avdp->avd_tag, UD_ANCH_VOL_DESC, 990 avd_loc, 1, ANCHOR_VOL_DESC_LEN) != 0) { 991 brelse(secbp); 992 return (NULL); 993 } 994 udf_vfsp = (struct udf_vfs *) 995 kmem_zalloc(sizeof (struct udf_vfs), KM_SLEEP); 996 udf_vfsp->udf_mvds_loc = SWAP_32(avdp->avd_main_vdse.ext_loc); 997 udf_vfsp->udf_mvds_len = SWAP_32(avdp->avd_main_vdse.ext_len); 998 udf_vfsp->udf_rvds_loc = SWAP_32(avdp->avd_res_vdse.ext_loc); 999 udf_vfsp->udf_rvds_len = SWAP_32(avdp->avd_res_vdse.ext_len); 1000 secbp->b_flags = B_AGE | B_STALE; 1001 brelse(secbp); 1002 1003 /* 1004 * Read Main Volume Descriptor Sequence 1005 * and process it 1006 */ 1007 vds_loc = udf_vfsp->udf_mvds_loc; 1008 secbp = ud_bread(dev, vds_loc << shift, 1009 udf_vfsp->udf_mvds_len); 1010 if ((error = geterror(secbp)) != 0) { 1011 brelse(secbp); 1012 cmn_err(CE_NOTE, 1013 "udfs : Could not read Main Volume Desc %x", error); 1014 1015 vds_loc = udf_vfsp->udf_rvds_loc; 1016 secbp = ud_bread(dev, vds_loc << shift, 1017 udf_vfsp->udf_rvds_len); 1018 if ((error = geterror(secbp)) != 0) { 1019 brelse(secbp); 1020 cmn_err(CE_NOTE, 1021 "udfs : Could not read Res Volume Desc %x", error); 1022 return (NULL); 1023 } 1024 } 1025 1026 udf_vfsp->udf_vds = ngeteblk(udf_vfsp->udf_mvds_len); 1027 bp = udf_vfsp->udf_vds; 1028 bp->b_edev = dev; 1029 bp->b_dev = cmpdev(dev); 1030 bp->b_blkno = vds_loc << shift; 1031 bp->b_bcount = udf_vfsp->udf_mvds_len; 1032 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_mvds_len); 1033 secbp->b_flags |= B_STALE | B_AGE; 1034 brelse(secbp); 1035 1036 1037 count = udf_vfsp->udf_mvds_len / DEV_BSIZE; 1038 addr = bp->b_un.b_addr; 1039 for (index = 0; index < count; index ++) { 1040 ttag = (struct tag *)(addr + index * DEV_BSIZE); 1041 desc_len = udf_vfsp->udf_mvds_len - (index * DEV_BSIZE); 1042 if (ud_verify_tag_and_desc(ttag, UD_PRI_VOL_DESC, 1043 vds_loc + (index >> shift), 1044 1, desc_len) == 0) { 1045 if (udf_vfsp->udf_pvd == NULL) { 1046 udf_vfsp->udf_pvd = 1047 (struct pri_vol_desc *)ttag; 1048 } else { 1049 struct pri_vol_desc *opvd, *npvd; 1050 1051 opvd = udf_vfsp->udf_pvd; 1052 npvd = (struct pri_vol_desc *)ttag; 1053 1054 if ((strncmp(opvd->pvd_vsi, 1055 npvd->pvd_vsi, 128) == 0) && 1056 (strncmp(opvd->pvd_vol_id, 1057 npvd->pvd_vol_id, 32) == 0) && 1058 (strncmp((caddr_t)&opvd->pvd_desc_cs, 1059 (caddr_t)&npvd->pvd_desc_cs, 1060 sizeof (charspec_t)) == 0)) { 1061 1062 if (SWAP_32(opvd->pvd_vdsn) < 1063 SWAP_32(npvd->pvd_vdsn)) { 1064 udf_vfsp->udf_pvd = npvd; 1065 } 1066 } else { 1067 goto out; 1068 } 1069 } 1070 } else if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_DESC, 1071 vds_loc + (index >> shift), 1072 1, desc_len) == 0) { 1073 struct log_vol_desc *lvd; 1074 1075 lvd = (struct log_vol_desc *)ttag; 1076 if (strncmp(lvd->lvd_dom_id.reg_id, 1077 UDF_DOMAIN_NAME, 23) != 0) { 1078 printf("Domain ID in lvd is not valid\n"); 1079 goto out; 1080 } 1081 1082 if (udf_vfsp->udf_lvd == NULL) { 1083 udf_vfsp->udf_lvd = lvd; 1084 } else { 1085 struct log_vol_desc *olvd; 1086 1087 olvd = udf_vfsp->udf_lvd; 1088 if ((strncmp((caddr_t)&olvd->lvd_desc_cs, 1089 (caddr_t)&lvd->lvd_desc_cs, 1090 sizeof (charspec_t)) == 0) && 1091 (strncmp(olvd->lvd_lvid, 1092 lvd->lvd_lvid, 128) == 0)) { 1093 if (SWAP_32(olvd->lvd_vdsn) < 1094 SWAP_32(lvd->lvd_vdsn)) { 1095 udf_vfsp->udf_lvd = lvd; 1096 } 1097 } else { 1098 goto out; 1099 } 1100 } 1101 } else if (ud_verify_tag_and_desc(ttag, UD_PART_DESC, 1102 vds_loc + (index >> shift), 1103 1, desc_len) == 0) { 1104 int32_t i; 1105 struct phdr_desc *hdr; 1106 struct part_desc *pdesc; 1107 struct ud_part *pnew, *pold, *part; 1108 1109 pdesc = (struct part_desc *)ttag; 1110 pold = udf_vfsp->udf_parts; 1111 for (i = 0; i < udf_vfsp->udf_npart; i++) { 1112 if (pold->udp_number == 1113 SWAP_16(pdesc->pd_pnum)) { 1114 if (SWAP_32(pdesc->pd_vdsn) > 1115 pold->udp_seqno) { 1116 pold->udp_seqno = 1117 SWAP_32(pdesc->pd_vdsn); 1118 pold->udp_access = 1119 SWAP_32(pdesc->pd_acc_type); 1120 pold->udp_start = 1121 SWAP_32(pdesc->pd_part_start); 1122 pold->udp_length = 1123 SWAP_32(pdesc->pd_part_length); 1124 } 1125 goto loop_end; 1126 } 1127 pold ++; 1128 } 1129 pold = udf_vfsp->udf_parts; 1130 udf_vfsp->udf_npart++; 1131 pnew = kmem_zalloc(udf_vfsp->udf_npart * 1132 sizeof (struct ud_part), KM_SLEEP); 1133 udf_vfsp->udf_parts = pnew; 1134 if (pold) { 1135 bcopy(pold, pnew, 1136 sizeof (struct ud_part) * 1137 (udf_vfsp->udf_npart - 1)); 1138 kmem_free(pold, 1139 sizeof (struct ud_part) * 1140 (udf_vfsp->udf_npart - 1)); 1141 } 1142 part = pnew + (udf_vfsp->udf_npart - 1); 1143 part->udp_number = SWAP_16(pdesc->pd_pnum); 1144 part->udp_seqno = SWAP_32(pdesc->pd_vdsn); 1145 part->udp_access = SWAP_32(pdesc->pd_acc_type); 1146 part->udp_start = SWAP_32(pdesc->pd_part_start); 1147 part->udp_length = SWAP_32(pdesc->pd_part_length); 1148 part->udp_last_alloc = 0; 1149 1150 /* 1151 * Figure out space bitmaps 1152 * or space tables 1153 */ 1154 hdr = (struct phdr_desc *)pdesc->pd_pc_use; 1155 if (hdr->phdr_ust.sad_ext_len) { 1156 part->udp_flags = UDP_SPACETBLS; 1157 part->udp_unall_loc = 1158 SWAP_32(hdr->phdr_ust.sad_ext_loc); 1159 part->udp_unall_len = 1160 SWAP_32(hdr->phdr_ust.sad_ext_len); 1161 part->udp_freed_loc = 1162 SWAP_32(hdr->phdr_fst.sad_ext_loc); 1163 part->udp_freed_len = 1164 SWAP_32(hdr->phdr_fst.sad_ext_len); 1165 } else { 1166 part->udp_flags = UDP_BITMAPS; 1167 part->udp_unall_loc = 1168 SWAP_32(hdr->phdr_usb.sad_ext_loc); 1169 part->udp_unall_len = 1170 SWAP_32(hdr->phdr_usb.sad_ext_len); 1171 part->udp_freed_loc = 1172 SWAP_32(hdr->phdr_fsb.sad_ext_loc); 1173 part->udp_freed_len = 1174 SWAP_32(hdr->phdr_fsb.sad_ext_len); 1175 } 1176 } else if (ud_verify_tag_and_desc(ttag, UD_TERM_DESC, 1177 vds_loc + (index >> shift), 1178 1, desc_len) == 0) { 1179 1180 break; 1181 } 1182 loop_end: 1183 ; 1184 } 1185 if ((udf_vfsp->udf_pvd == NULL) || 1186 (udf_vfsp->udf_lvd == NULL) || 1187 (udf_vfsp->udf_parts == NULL)) { 1188 goto out; 1189 } 1190 1191 /* 1192 * Process Primary Volume Descriptor 1193 */ 1194 (void) strncpy(udf_vfsp->udf_volid, udf_vfsp->udf_pvd->pvd_vol_id, 32); 1195 udf_vfsp->udf_volid[31] = '\0'; 1196 udf_vfsp->udf_tsno = SWAP_16(udf_vfsp->udf_pvd->pvd_tag.tag_sno); 1197 1198 /* 1199 * Process Logical Volume Descriptor 1200 */ 1201 udf_vfsp->udf_lbsize = 1202 SWAP_32(udf_vfsp->udf_lvd->lvd_log_bsize); 1203 udf_vfsp->udf_lbmask = udf_vfsp->udf_lbsize - 1; 1204 udf_vfsp->udf_l2d_shift = shift; 1205 udf_vfsp->udf_l2b_shift = shift + DEV_BSHIFT; 1206 1207 /* 1208 * Check if the media is in 1209 * proper domain. 1210 */ 1211 if (strcmp(udf_vfsp->udf_lvd->lvd_dom_id.reg_id, 1212 UDF_DOMAIN_NAME) != 0) { 1213 goto out; 1214 } 1215 1216 /* 1217 * AVDS offset does not match with the lbsize 1218 * in the lvd 1219 */ 1220 if (udf_vfsp->udf_lbsize != bsize) { 1221 goto out; 1222 } 1223 1224 udf_vfsp->udf_iseq_loc = 1225 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_loc); 1226 udf_vfsp->udf_iseq_len = 1227 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_len); 1228 1229 udf_vfsp->udf_fsd_prn = 1230 SWAP_16(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_prn); 1231 udf_vfsp->udf_fsd_loc = 1232 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_loc); 1233 udf_vfsp->udf_fsd_len = 1234 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_len); 1235 1236 1237 /* 1238 * process paritions 1239 */ 1240 udf_vfsp->udf_mtype = udf_vfsp->udf_parts[0].udp_access; 1241 for (index = 0; index < udf_vfsp->udf_npart; index ++) { 1242 if (udf_vfsp->udf_parts[index].udp_access < 1243 udf_vfsp->udf_mtype) { 1244 udf_vfsp->udf_mtype = 1245 udf_vfsp->udf_parts[index].udp_access; 1246 } 1247 } 1248 if ((udf_vfsp->udf_mtype < UDF_MT_RO) || 1249 (udf_vfsp->udf_mtype > UDF_MT_OW)) { 1250 udf_vfsp->udf_mtype = UDF_MT_RO; 1251 } 1252 1253 udf_vfsp->udf_nmaps = 0; 1254 hdr = (struct pmap_hdr *)udf_vfsp->udf_lvd->lvd_pmaps; 1255 count = SWAP_32(udf_vfsp->udf_lvd->lvd_num_pmaps); 1256 for (index = 0; index < count; index++) { 1257 1258 if ((hdr->maph_type == MAP_TYPE1) && 1259 (hdr->maph_length == MAP_TYPE1_LEN)) { 1260 typ1 = (struct pmap_typ1 *)hdr; 1261 1262 map = udf_vfsp->udf_maps; 1263 udf_vfsp->udf_maps = 1264 kmem_zalloc(sizeof (struct ud_map) * 1265 (udf_vfsp->udf_nmaps + 1), 1266 KM_SLEEP); 1267 if (map != NULL) { 1268 bcopy(map, udf_vfsp->udf_maps, 1269 sizeof (struct ud_map) * udf_vfsp->udf_nmaps); 1270 kmem_free(map, 1271 sizeof (struct ud_map) * udf_vfsp->udf_nmaps); 1272 } 1273 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps; 1274 map->udm_flags = UDM_MAP_NORM; 1275 map->udm_vsn = SWAP_16(typ1->map1_vsn); 1276 map->udm_pn = SWAP_16(typ1->map1_pn); 1277 udf_vfsp->udf_nmaps ++; 1278 } else if ((hdr->maph_type == MAP_TYPE2) && 1279 (hdr->maph_length == MAP_TYPE2_LEN)) { 1280 typ2 = (struct pmap_typ2 *)hdr; 1281 1282 if (strncmp(typ2->map2_pti.reg_id, 1283 UDF_VIRT_PART, 23) == 0) { 1284 /* 1285 * Add this to the normal 1286 * partition table so that 1287 * we donot 1288 */ 1289 map = udf_vfsp->udf_maps; 1290 udf_vfsp->udf_maps = 1291 kmem_zalloc(sizeof (struct ud_map) * 1292 (udf_vfsp->udf_nmaps + 1), 1293 KM_SLEEP); 1294 if (map != NULL) { 1295 bcopy(map, udf_vfsp->udf_maps, 1296 sizeof (struct ud_map) * 1297 udf_vfsp->udf_nmaps); 1298 kmem_free(map, 1299 sizeof (struct ud_map) * 1300 udf_vfsp->udf_nmaps); 1301 } 1302 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps; 1303 map->udm_flags = UDM_MAP_VPM; 1304 map->udm_vsn = SWAP_16(typ2->map2_vsn); 1305 map->udm_pn = SWAP_16(typ2->map2_pn); 1306 udf_vfsp->udf_nmaps ++; 1307 if (error = ud_get_last_block(dev, &lblkno)) { 1308 goto out; 1309 } 1310 if (error = ud_val_get_vat(udf_vfsp, dev, 1311 lblkno, map)) { 1312 goto out; 1313 } 1314 } else if (strncmp(typ2->map2_pti.reg_id, 1315 UDF_SPAR_PART, 23) == 0) { 1316 1317 if (SWAP_16(typ2->map2_pl) != 32) { 1318 printf( 1319 "Packet Length is not valid %x\n", 1320 SWAP_16(typ2->map2_pl)); 1321 goto out; 1322 } 1323 if ((typ2->map2_nst < 1) || 1324 (typ2->map2_nst > 4)) { 1325 goto out; 1326 } 1327 map = udf_vfsp->udf_maps; 1328 udf_vfsp->udf_maps = 1329 kmem_zalloc(sizeof (struct ud_map) * 1330 (udf_vfsp->udf_nmaps + 1), 1331 KM_SLEEP); 1332 if (map != NULL) { 1333 bcopy(map, udf_vfsp->udf_maps, 1334 sizeof (struct ud_map) * 1335 udf_vfsp->udf_nmaps); 1336 kmem_free(map, 1337 sizeof (struct ud_map) * 1338 udf_vfsp->udf_nmaps); 1339 } 1340 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps; 1341 map->udm_flags = UDM_MAP_SPM; 1342 map->udm_vsn = SWAP_16(typ2->map2_vsn); 1343 map->udm_pn = SWAP_16(typ2->map2_pn); 1344 1345 udf_vfsp->udf_nmaps ++; 1346 1347 if (error = ud_read_sparing_tbls(udf_vfsp, 1348 dev, map, typ2)) { 1349 goto out; 1350 } 1351 } else { 1352 /* 1353 * Unknown type of partition 1354 * Bail out 1355 */ 1356 goto out; 1357 } 1358 } else { 1359 /* 1360 * Unknown type of partition 1361 * Bail out 1362 */ 1363 goto out; 1364 } 1365 hdr = (struct pmap_hdr *)(((uint8_t *)hdr) + hdr->maph_length); 1366 } 1367 1368 1369 /* 1370 * Read Logical Volume Integrity Sequence 1371 * and process it 1372 */ 1373 secbp = ud_bread(dev, udf_vfsp->udf_iseq_loc << shift, 1374 udf_vfsp->udf_iseq_len); 1375 if ((error = geterror(secbp)) != 0) { 1376 cmn_err(CE_NOTE, 1377 "udfs : Could not read Logical Volume Integrity Sequence %x", 1378 error); 1379 brelse(secbp); 1380 goto out; 1381 } 1382 udf_vfsp->udf_iseq = ngeteblk(udf_vfsp->udf_iseq_len); 1383 bp = udf_vfsp->udf_iseq; 1384 bp->b_edev = dev; 1385 bp->b_dev = cmpdev(dev); 1386 bp->b_blkno = udf_vfsp->udf_iseq_loc << shift; 1387 bp->b_bcount = udf_vfsp->udf_iseq_len; 1388 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_iseq_len); 1389 secbp->b_flags |= B_STALE | B_AGE; 1390 brelse(secbp); 1391 1392 count = udf_vfsp->udf_iseq_len / DEV_BSIZE; 1393 addr = bp->b_un.b_addr; 1394 for (index = 0; index < count; index ++) { 1395 ttag = (struct tag *)(addr + index * DEV_BSIZE); 1396 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE); 1397 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT, 1398 udf_vfsp->udf_iseq_loc + (index >> shift), 1399 1, desc_len) == 0) { 1400 1401 struct log_vol_int_desc *lvid; 1402 1403 lvid = (struct log_vol_int_desc *)ttag; 1404 udf_vfsp->udf_lvid = lvid; 1405 1406 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) { 1407 udf_vfsp->udf_clean = UDF_CLEAN; 1408 } else { 1409 udf_vfsp->udf_clean = UDF_DIRTY; 1410 } 1411 1412 /* 1413 * update superblock with the metadata 1414 */ 1415 ud_convert_to_superblock(udf_vfsp, lvid); 1416 break; 1417 } 1418 } 1419 1420 if (udf_vfsp->udf_lvid == NULL) { 1421 goto out; 1422 } 1423 1424 if ((blkno = ud_xlate_to_daddr(udf_vfsp, 1425 udf_vfsp->udf_fsd_prn, udf_vfsp->udf_fsd_loc, 1426 1, &dummy)) == 0) { 1427 goto out; 1428 } 1429 secbp = ud_bread(dev, blkno << shift, udf_vfsp->udf_fsd_len); 1430 if ((error = geterror(secbp)) != 0) { 1431 cmn_err(CE_NOTE, 1432 "udfs : Could not read File Set Descriptor %x", error); 1433 brelse(secbp); 1434 goto out; 1435 } 1436 fsd = (struct file_set_desc *)secbp->b_un.b_addr; 1437 if (ud_verify_tag_and_desc(&fsd->fsd_tag, UD_FILE_SET_DESC, 1438 udf_vfsp->udf_fsd_loc, 1439 1, udf_vfsp->udf_fsd_len) != 0) { 1440 secbp->b_flags = B_AGE | B_STALE; 1441 brelse(secbp); 1442 goto out; 1443 } 1444 udf_vfsp->udf_ricb_prn = SWAP_16(fsd->fsd_root_icb.lad_ext_prn); 1445 udf_vfsp->udf_ricb_loc = SWAP_32(fsd->fsd_root_icb.lad_ext_loc); 1446 udf_vfsp->udf_ricb_len = SWAP_32(fsd->fsd_root_icb.lad_ext_len); 1447 secbp->b_flags = B_AGE | B_STALE; 1448 brelse(secbp); 1449 udf_vfsp->udf_root_blkno = ud_xlate_to_daddr(udf_vfsp, 1450 udf_vfsp->udf_ricb_prn, udf_vfsp->udf_ricb_loc, 1451 1, &dummy); 1452 1453 return (udf_vfsp); 1454 out: 1455 ud_destroy_fsp(udf_vfsp); 1456 1457 return (NULL); 1458 } 1459 1460 /* 1461 * release/free resources from one ud_map; map data was zalloc'd in 1462 * ud_validate_and_fill_superblock() and fields may later point to 1463 * valid data 1464 */ 1465 static void 1466 ud_free_map(struct ud_map *map) 1467 { 1468 uint32_t n; 1469 1470 if (map->udm_flags & UDM_MAP_VPM) { 1471 if (map->udm_count) { 1472 kmem_free(map->udm_count, 1473 map->udm_nent * sizeof (*map->udm_count)); 1474 map->udm_count = NULL; 1475 } 1476 if (map->udm_bp) { 1477 for (n = 0; n < map->udm_nent; n++) { 1478 if (map->udm_bp[n]) 1479 brelse(map->udm_bp[n]); 1480 } 1481 kmem_free(map->udm_bp, 1482 map->udm_nent * sizeof (*map->udm_bp)); 1483 map->udm_bp = NULL; 1484 } 1485 if (map->udm_addr) { 1486 kmem_free(map->udm_addr, 1487 map->udm_nent * sizeof (*map->udm_addr)); 1488 map->udm_addr = NULL; 1489 } 1490 } 1491 if (map->udm_flags & UDM_MAP_SPM) { 1492 for (n = 0; n < MAX_SPM; n++) { 1493 if (map->udm_sbp[n]) { 1494 brelse(map->udm_sbp[n]); 1495 map->udm_sbp[n] = NULL; 1496 map->udm_spaddr[n] = NULL; 1497 } 1498 } 1499 } 1500 } 1501 1502 void 1503 ud_destroy_fsp(struct udf_vfs *udf_vfsp) 1504 { 1505 int32_t i; 1506 1507 ud_printf("ud_destroy_fsp\n"); 1508 if (udf_vfsp == NULL) 1509 return; 1510 1511 if (udf_vfsp->udf_maps) { 1512 for (i = 0; i < udf_vfsp->udf_nmaps; i++) 1513 ud_free_map(&udf_vfsp->udf_maps[i]); 1514 1515 kmem_free(udf_vfsp->udf_maps, 1516 udf_vfsp->udf_nmaps * sizeof (*udf_vfsp->udf_maps)); 1517 } 1518 1519 if (udf_vfsp->udf_parts) { 1520 kmem_free(udf_vfsp->udf_parts, 1521 udf_vfsp->udf_npart * sizeof (*udf_vfsp->udf_parts)); 1522 } 1523 if (udf_vfsp->udf_iseq) { 1524 udf_vfsp->udf_iseq->b_flags |= (B_STALE|B_AGE); 1525 brelse(udf_vfsp->udf_iseq); 1526 } 1527 if (udf_vfsp->udf_vds) { 1528 udf_vfsp->udf_vds->b_flags |= (B_STALE|B_AGE); 1529 brelse(udf_vfsp->udf_vds); 1530 } 1531 if (udf_vfsp->udf_vfs) 1532 ud_vfs_remove(udf_vfsp); 1533 if (udf_vfsp->udf_fsmnt) { 1534 kmem_free(udf_vfsp->udf_fsmnt, 1535 strlen(udf_vfsp->udf_fsmnt) + 1); 1536 } 1537 kmem_free(udf_vfsp, sizeof (*udf_vfsp)); 1538 } 1539 1540 void 1541 ud_convert_to_superblock(struct udf_vfs *udf_vfsp, 1542 struct log_vol_int_desc *lvid) 1543 { 1544 int32_t i, c; 1545 uint32_t *temp; 1546 struct ud_part *ud_part; 1547 struct lvid_iu *iu; 1548 1549 udf_vfsp->udf_maxuniq = SWAP_64(lvid->lvid_uniqid); 1550 temp = lvid->lvid_fst; 1551 c = SWAP_32(lvid->lvid_npart); 1552 ud_part = udf_vfsp->udf_parts; 1553 for (i = 0; i < c; i++) { 1554 if (i >= udf_vfsp->udf_npart) { 1555 continue; 1556 } 1557 ud_part->udp_nfree = SWAP_32(temp[i]); 1558 ud_part->udp_nblocks = SWAP_32(temp[c + i]); 1559 udf_vfsp->udf_freeblks += SWAP_32(temp[i]); 1560 udf_vfsp->udf_totalblks += SWAP_32(temp[c + i]); 1561 ud_part++; 1562 } 1563 1564 iu = (struct lvid_iu *)(temp + c * 2); 1565 udf_vfsp->udf_nfiles = SWAP_32(iu->lvidiu_nfiles); 1566 udf_vfsp->udf_ndirs = SWAP_32(iu->lvidiu_ndirs); 1567 udf_vfsp->udf_miread = BCD2HEX_16(SWAP_16(iu->lvidiu_mread)); 1568 udf_vfsp->udf_miwrite = BCD2HEX_16(SWAP_16(iu->lvidiu_mwrite)); 1569 udf_vfsp->udf_mawrite = BCD2HEX_16(SWAP_16(iu->lvidiu_maxwr)); 1570 } 1571 1572 void 1573 ud_update_superblock(struct vfs *vfsp) 1574 { 1575 struct udf_vfs *udf_vfsp; 1576 1577 ud_printf("ud_update_superblock\n"); 1578 1579 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data; 1580 1581 mutex_enter(&udf_vfsp->udf_lock); 1582 ud_sbwrite(udf_vfsp); 1583 mutex_exit(&udf_vfsp->udf_lock); 1584 } 1585 1586 1587 #include <sys/dkio.h> 1588 #include <sys/cdio.h> 1589 #include <sys/vtoc.h> 1590 1591 /* 1592 * This part of the code is known 1593 * to work with only sparc. It needs 1594 * to be evluated before using it with x86 1595 */ 1596 int32_t 1597 ud_get_last_block(dev_t dev, daddr_t *blkno) 1598 { 1599 struct vtoc vtoc; 1600 struct dk_cinfo dki_info; 1601 int32_t rval, error; 1602 1603 if ((error = cdev_ioctl(dev, DKIOCGVTOC, (intptr_t)&vtoc, 1604 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) { 1605 cmn_err(CE_NOTE, "Could not get the vtoc information"); 1606 return (error); 1607 } 1608 1609 if (vtoc.v_sanity != VTOC_SANE) { 1610 return (EINVAL); 1611 } 1612 if ((error = cdev_ioctl(dev, DKIOCINFO, (intptr_t)&dki_info, 1613 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) { 1614 cmn_err(CE_NOTE, "Could not get the slice information"); 1615 return (error); 1616 } 1617 1618 if (dki_info.dki_partition > V_NUMPAR) { 1619 return (EINVAL); 1620 } 1621 1622 1623 *blkno = vtoc.v_part[dki_info.dki_partition].p_size; 1624 1625 return (0); 1626 } 1627 1628 /* Search sequentially N - 2, N, N - 152, N - 150 for vat icb */ 1629 /* 1630 * int32_t ud_sub_blks[] = {2, 0, 152, 150}; 1631 */ 1632 int32_t ud_sub_blks[] = {152, 150, 2, 0}; 1633 int32_t ud_sub_count = 4; 1634 1635 /* 1636 * Validate the VAT ICB 1637 */ 1638 static int32_t 1639 ud_val_get_vat(struct udf_vfs *udf_vfsp, dev_t dev, 1640 daddr_t blkno, struct ud_map *udm) 1641 { 1642 struct buf *secbp; 1643 struct file_entry *fe; 1644 int32_t end_loc, i, j, ad_type; 1645 struct short_ad *sad; 1646 struct long_ad *lad; 1647 uint32_t count, blk; 1648 struct ud_part *ud_part; 1649 int err = 0; 1650 1651 end_loc = (blkno >> udf_vfsp->udf_l2d_shift) - 1; 1652 1653 for (i = 0; i < ud_sub_count; i++) { 1654 udm->udm_vat_icb = end_loc - ud_sub_blks[i]; 1655 1656 secbp = ud_bread(dev, 1657 udm->udm_vat_icb << udf_vfsp->udf_l2d_shift, 1658 udf_vfsp->udf_lbsize); 1659 ASSERT(secbp->b_un.b_addr); 1660 1661 fe = (struct file_entry *)secbp->b_un.b_addr; 1662 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY, 0, 1663 0, 0) == 0) { 1664 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY, 1665 SWAP_32(fe->fe_tag.tag_loc), 1666 1, udf_vfsp->udf_lbsize) == 0) { 1667 if (fe->fe_icb_tag.itag_ftype == 0) { 1668 break; 1669 } 1670 } 1671 } 1672 secbp->b_flags |= B_AGE | B_STALE; 1673 brelse(secbp); 1674 } 1675 if (i == ud_sub_count) { 1676 return (EINVAL); 1677 } 1678 1679 ad_type = SWAP_16(fe->fe_icb_tag.itag_flags) & 0x3; 1680 if (ad_type == ICB_FLAG_ONE_AD) { 1681 udm->udm_nent = 1; 1682 } else if (ad_type == ICB_FLAG_SHORT_AD) { 1683 udm->udm_nent = 1684 SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad); 1685 } else if (ad_type == ICB_FLAG_LONG_AD) { 1686 udm->udm_nent = 1687 SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad); 1688 } else { 1689 err = EINVAL; 1690 goto end; 1691 } 1692 1693 udm->udm_count = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_count), 1694 KM_SLEEP); 1695 udm->udm_bp = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_bp), 1696 KM_SLEEP); 1697 udm->udm_addr = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_addr), 1698 KM_SLEEP); 1699 1700 if (ad_type == ICB_FLAG_ONE_AD) { 1701 udm->udm_count[0] = (SWAP_64(fe->fe_info_len) - 36) / 1702 sizeof (uint32_t); 1703 udm->udm_bp[0] = secbp; 1704 udm->udm_addr[0] = (uint32_t *) 1705 &fe->fe_spec[SWAP_32(fe->fe_len_ear)]; 1706 return (0); 1707 } 1708 for (i = 0; i < udm->udm_nent; i++) { 1709 if (ad_type == ICB_FLAG_SHORT_AD) { 1710 sad = (struct short_ad *) 1711 (fe->fe_spec + SWAP_32(fe->fe_len_ear)); 1712 sad += i; 1713 count = SWAP_32(sad->sad_ext_len); 1714 blk = SWAP_32(sad->sad_ext_loc); 1715 } else { 1716 lad = (struct long_ad *) 1717 (fe->fe_spec + SWAP_32(fe->fe_len_ear)); 1718 lad += i; 1719 count = SWAP_32(lad->lad_ext_len); 1720 blk = SWAP_32(lad->lad_ext_loc); 1721 ASSERT(SWAP_16(lad->lad_ext_prn) == udm->udm_pn); 1722 } 1723 if ((count & 0x3FFFFFFF) == 0) { 1724 break; 1725 } 1726 if (i < udm->udm_nent - 1) { 1727 udm->udm_count[i] = count / 4; 1728 } else { 1729 udm->udm_count[i] = (count - 36) / 4; 1730 } 1731 ud_part = udf_vfsp->udf_parts; 1732 for (j = 0; j < udf_vfsp->udf_npart; j++) { 1733 if (udm->udm_pn == ud_part->udp_number) { 1734 blk = ud_part->udp_start + blk; 1735 break; 1736 } 1737 } 1738 if (j == udf_vfsp->udf_npart) { 1739 err = EINVAL; 1740 break; 1741 } 1742 1743 count = (count + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1744 udm->udm_bp[i] = ud_bread(dev, 1745 blk << udf_vfsp->udf_l2d_shift, count); 1746 if ((udm->udm_bp[i]->b_error != 0) || 1747 (udm->udm_bp[i]->b_resid)) { 1748 err = EINVAL; 1749 break; 1750 } 1751 udm->udm_addr[i] = (uint32_t *)udm->udm_bp[i]->b_un.b_addr; 1752 } 1753 1754 end: 1755 if (err) 1756 ud_free_map(udm); 1757 secbp->b_flags |= B_AGE | B_STALE; 1758 brelse(secbp); 1759 return (err); 1760 } 1761 1762 int32_t 1763 ud_read_sparing_tbls(struct udf_vfs *udf_vfsp, 1764 dev_t dev, struct ud_map *map, struct pmap_typ2 *typ2) 1765 { 1766 int32_t index, valid = 0; 1767 uint32_t sz; 1768 struct buf *bp; 1769 struct stbl *stbl; 1770 1771 map->udm_plen = SWAP_16(typ2->map2_pl); 1772 map->udm_nspm = typ2->map2_nst; 1773 map->udm_spsz = SWAP_32(typ2->map2_sest); 1774 sz = (map->udm_spsz + udf_vfsp->udf_lbmask) & ~udf_vfsp->udf_lbmask; 1775 if (sz == 0) { 1776 return (0); 1777 } 1778 1779 for (index = 0; index < map->udm_nspm; index++) { 1780 map->udm_loc[index] = SWAP_32(typ2->map2_st[index]); 1781 1782 bp = ud_bread(dev, 1783 map->udm_loc[index] << udf_vfsp->udf_l2d_shift, sz); 1784 if ((bp->b_error != 0) || (bp->b_resid)) { 1785 brelse(bp); 1786 continue; 1787 } 1788 stbl = (struct stbl *)bp->b_un.b_addr; 1789 if (strncmp(stbl->stbl_si.reg_id, UDF_SPAR_TBL, 23) != 0) { 1790 printf("Sparing Identifier does not match\n"); 1791 bp->b_flags |= B_AGE | B_STALE; 1792 brelse(bp); 1793 continue; 1794 } 1795 map->udm_sbp[index] = bp; 1796 map->udm_spaddr[index] = bp->b_un.b_addr; 1797 #ifdef UNDEF 1798 { 1799 struct stbl_entry *te; 1800 int32_t i, tbl_len; 1801 1802 te = (struct stbl_entry *)&stbl->stbl_entry; 1803 tbl_len = SWAP_16(stbl->stbl_len); 1804 1805 printf("%x %x\n", tbl_len, SWAP_32(stbl->stbl_seqno)); 1806 printf("%x %x\n", bp->b_un.b_addr, te); 1807 1808 for (i = 0; i < tbl_len; i++) { 1809 printf("%x %x\n", SWAP_32(te->sent_ol), SWAP_32(te->sent_ml)); 1810 te ++; 1811 } 1812 } 1813 #endif 1814 valid ++; 1815 } 1816 1817 if (valid) { 1818 return (0); 1819 } 1820 return (EINVAL); 1821 } 1822 1823 uint32_t 1824 ud_get_lbsize(dev_t dev, uint32_t *loc) 1825 { 1826 int32_t bsize, shift, index, end_index; 1827 daddr_t last_block; 1828 uint32_t avd_loc; 1829 struct buf *bp; 1830 struct anch_vol_desc_ptr *avdp; 1831 uint32_t session_offset = 0; 1832 int32_t rval; 1833 1834 if (ud_get_last_block(dev, &last_block) != 0) { 1835 end_index = 1; 1836 } else { 1837 end_index = 3; 1838 } 1839 1840 if (cdev_ioctl(dev, CDROMREADOFFSET, (intptr_t)&session_offset, 1841 FKIOCTL|FREAD|FNATIVE, CRED(), &rval) != 0) { 1842 session_offset = 0; 1843 } 1844 1845 for (index = 0; index < end_index; index++) { 1846 1847 for (bsize = DEV_BSIZE, shift = 0; 1848 bsize <= MAXBSIZE; bsize <<= 1, shift++) { 1849 1850 if (index == 0) { 1851 avd_loc = 256; 1852 if (bsize <= 2048) { 1853 avd_loc += 1854 session_offset * 2048 / bsize; 1855 } else { 1856 avd_loc += 1857 session_offset / (bsize / 2048); 1858 } 1859 } else if (index == 1) { 1860 avd_loc = last_block - (1 << shift); 1861 } else { 1862 avd_loc = last_block - (256 << shift); 1863 } 1864 1865 bp = ud_bread(dev, avd_loc << shift, 1866 ANCHOR_VOL_DESC_LEN); 1867 if (geterror(bp) != 0) { 1868 brelse(bp); 1869 continue; 1870 } 1871 1872 /* 1873 * Verify if we have avdp here 1874 */ 1875 avdp = (struct anch_vol_desc_ptr *)bp->b_un.b_addr; 1876 if (ud_verify_tag_and_desc(&avdp->avd_tag, 1877 UD_ANCH_VOL_DESC, avd_loc, 1878 1, ANCHOR_VOL_DESC_LEN) != 0) { 1879 bp->b_flags |= B_AGE | B_STALE; 1880 brelse(bp); 1881 continue; 1882 } 1883 bp->b_flags |= B_AGE | B_STALE; 1884 brelse(bp); 1885 *loc = avd_loc; 1886 return (bsize); 1887 } 1888 } 1889 1890 /* 1891 * Did not find AVD at all the locations 1892 */ 1893 return (0); 1894 } 1895 1896 static int 1897 udfinit(int fstype, char *name) 1898 { 1899 static const fs_operation_def_t udf_vfsops_template[] = { 1900 VFSNAME_MOUNT, udf_mount, 1901 VFSNAME_UNMOUNT, udf_unmount, 1902 VFSNAME_ROOT, udf_root, 1903 VFSNAME_STATVFS, udf_statvfs, 1904 VFSNAME_SYNC, (fs_generic_func_p) udf_sync, 1905 VFSNAME_VGET, udf_vget, 1906 VFSNAME_MOUNTROOT, udf_mountroot, 1907 NULL, NULL 1908 }; 1909 extern struct vnodeops *udf_vnodeops; 1910 extern const fs_operation_def_t udf_vnodeops_template[]; 1911 int error; 1912 1913 ud_printf("udfinit\n"); 1914 1915 error = vfs_setfsops(fstype, udf_vfsops_template, NULL); 1916 if (error != 0) { 1917 cmn_err(CE_WARN, "udfinit: bad vfs ops template"); 1918 return (error); 1919 } 1920 1921 error = vn_make_ops(name, udf_vnodeops_template, &udf_vnodeops); 1922 if (error != 0) { 1923 (void) vfs_freevfsops_by_type(fstype); 1924 cmn_err(CE_WARN, "udfinit: bad vnode ops template"); 1925 return (error); 1926 } 1927 1928 udf_fstype = fstype; 1929 1930 ud_init_inodes(); 1931 1932 return (0); 1933 } 1934