1 /* 2 * super.c 3 * 4 * PURPOSE 5 * Super block routines for the OSTA-UDF(tm) filesystem. 6 * 7 * DESCRIPTION 8 * OSTA-UDF(tm) = Optical Storage Technology Association 9 * Universal Disk Format. 10 * 11 * This code is based on version 2.00 of the UDF specification, 12 * and revision 3 of the ECMA 167 standard [equivalent to ISO 13346]. 13 * http://www.osta.org/ 14 * https://www.ecma.ch/ 15 * https://www.iso.org/ 16 * 17 * COPYRIGHT 18 * This file is distributed under the terms of the GNU General Public 19 * License (GPL). Copies of the GPL can be obtained from: 20 * ftp://prep.ai.mit.edu/pub/gnu/GPL 21 * Each contributing author retains all rights to their own work. 22 * 23 * (C) 1998 Dave Boynton 24 * (C) 1998-2004 Ben Fennema 25 * (C) 2000 Stelias Computing Inc 26 * 27 * HISTORY 28 * 29 * 09/24/98 dgb changed to allow compiling outside of kernel, and 30 * added some debugging. 31 * 10/01/98 dgb updated to allow (some) possibility of compiling w/2.0.34 32 * 10/16/98 attempting some multi-session support 33 * 10/17/98 added freespace count for "df" 34 * 11/11/98 gr added novrs option 35 * 11/26/98 dgb added fileset,anchor mount options 36 * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced 37 * vol descs. rewrote option handling based on isofs 38 * 12/20/98 find the free space bitmap (if it exists) 39 */ 40 41 #include "udfdecl.h" 42 43 #include <linux/blkdev.h> 44 #include <linux/slab.h> 45 #include <linux/kernel.h> 46 #include <linux/module.h> 47 #include <linux/parser.h> 48 #include <linux/stat.h> 49 #include <linux/cdrom.h> 50 #include <linux/nls.h> 51 #include <linux/vfs.h> 52 #include <linux/vmalloc.h> 53 #include <linux/errno.h> 54 #include <linux/mount.h> 55 #include <linux/seq_file.h> 56 #include <linux/bitmap.h> 57 #include <linux/crc-itu-t.h> 58 #include <linux/log2.h> 59 #include <asm/byteorder.h> 60 #include <linux/iversion.h> 61 62 #include "udf_sb.h" 63 #include "udf_i.h" 64 65 #include <linux/init.h> 66 #include <linux/uaccess.h> 67 68 enum { 69 VDS_POS_PRIMARY_VOL_DESC, 70 VDS_POS_UNALLOC_SPACE_DESC, 71 VDS_POS_LOGICAL_VOL_DESC, 72 VDS_POS_IMP_USE_VOL_DESC, 73 VDS_POS_LENGTH 74 }; 75 76 #define VSD_FIRST_SECTOR_OFFSET 32768 77 #define VSD_MAX_SECTOR_OFFSET 0x800000 78 79 /* 80 * Maximum number of Terminating Descriptor / Logical Volume Integrity 81 * Descriptor redirections. The chosen numbers are arbitrary - just that we 82 * hopefully don't limit any real use of rewritten inode on write-once media 83 * but avoid looping for too long on corrupted media. 84 */ 85 #define UDF_MAX_TD_NESTING 64 86 #define UDF_MAX_LVID_NESTING 1000 87 88 enum { UDF_MAX_LINKS = 0xffff }; 89 /* 90 * We limit filesize to 4TB. This is arbitrary as the on-disk format supports 91 * more but because the file space is described by a linked list of extents, 92 * each of which can have at most 1GB, the creation and handling of extents 93 * gets unusably slow beyond certain point... 94 */ 95 #define UDF_MAX_FILESIZE (1ULL << 42) 96 97 /* These are the "meat" - everything else is stuffing */ 98 static int udf_fill_super(struct super_block *, void *, int); 99 static void udf_put_super(struct super_block *); 100 static int udf_sync_fs(struct super_block *, int); 101 static int udf_remount_fs(struct super_block *, int *, char *); 102 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad); 103 static void udf_open_lvid(struct super_block *); 104 static void udf_close_lvid(struct super_block *); 105 static unsigned int udf_count_free(struct super_block *); 106 static int udf_statfs(struct dentry *, struct kstatfs *); 107 static int udf_show_options(struct seq_file *, struct dentry *); 108 109 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb) 110 { 111 struct logicalVolIntegrityDesc *lvid; 112 unsigned int partnum; 113 unsigned int offset; 114 115 if (!UDF_SB(sb)->s_lvid_bh) 116 return NULL; 117 lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data; 118 partnum = le32_to_cpu(lvid->numOfPartitions); 119 /* The offset is to skip freeSpaceTable and sizeTable arrays */ 120 offset = partnum * 2 * sizeof(uint32_t); 121 return (struct logicalVolIntegrityDescImpUse *) 122 (((uint8_t *)(lvid + 1)) + offset); 123 } 124 125 /* UDF filesystem type */ 126 static struct dentry *udf_mount(struct file_system_type *fs_type, 127 int flags, const char *dev_name, void *data) 128 { 129 return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super); 130 } 131 132 static struct file_system_type udf_fstype = { 133 .owner = THIS_MODULE, 134 .name = "udf", 135 .mount = udf_mount, 136 .kill_sb = kill_block_super, 137 .fs_flags = FS_REQUIRES_DEV, 138 }; 139 MODULE_ALIAS_FS("udf"); 140 141 static struct kmem_cache *udf_inode_cachep; 142 143 static struct inode *udf_alloc_inode(struct super_block *sb) 144 { 145 struct udf_inode_info *ei; 146 ei = alloc_inode_sb(sb, udf_inode_cachep, GFP_KERNEL); 147 if (!ei) 148 return NULL; 149 150 ei->i_unique = 0; 151 ei->i_lenExtents = 0; 152 ei->i_lenStreams = 0; 153 ei->i_next_alloc_block = 0; 154 ei->i_next_alloc_goal = 0; 155 ei->i_strat4096 = 0; 156 ei->i_streamdir = 0; 157 ei->i_hidden = 0; 158 init_rwsem(&ei->i_data_sem); 159 ei->cached_extent.lstart = -1; 160 spin_lock_init(&ei->i_extent_cache_lock); 161 inode_set_iversion(&ei->vfs_inode, 1); 162 163 return &ei->vfs_inode; 164 } 165 166 static void udf_free_in_core_inode(struct inode *inode) 167 { 168 kmem_cache_free(udf_inode_cachep, UDF_I(inode)); 169 } 170 171 static void init_once(void *foo) 172 { 173 struct udf_inode_info *ei = foo; 174 175 ei->i_data = NULL; 176 inode_init_once(&ei->vfs_inode); 177 } 178 179 static int __init init_inodecache(void) 180 { 181 udf_inode_cachep = kmem_cache_create("udf_inode_cache", 182 sizeof(struct udf_inode_info), 183 0, (SLAB_RECLAIM_ACCOUNT | 184 SLAB_MEM_SPREAD | 185 SLAB_ACCOUNT), 186 init_once); 187 if (!udf_inode_cachep) 188 return -ENOMEM; 189 return 0; 190 } 191 192 static void destroy_inodecache(void) 193 { 194 /* 195 * Make sure all delayed rcu free inodes are flushed before we 196 * destroy cache. 197 */ 198 rcu_barrier(); 199 kmem_cache_destroy(udf_inode_cachep); 200 } 201 202 /* Superblock operations */ 203 static const struct super_operations udf_sb_ops = { 204 .alloc_inode = udf_alloc_inode, 205 .free_inode = udf_free_in_core_inode, 206 .write_inode = udf_write_inode, 207 .evict_inode = udf_evict_inode, 208 .put_super = udf_put_super, 209 .sync_fs = udf_sync_fs, 210 .statfs = udf_statfs, 211 .remount_fs = udf_remount_fs, 212 .show_options = udf_show_options, 213 }; 214 215 struct udf_options { 216 unsigned char novrs; 217 unsigned int blocksize; 218 unsigned int session; 219 unsigned int lastblock; 220 unsigned int anchor; 221 unsigned int flags; 222 umode_t umask; 223 kgid_t gid; 224 kuid_t uid; 225 umode_t fmode; 226 umode_t dmode; 227 struct nls_table *nls_map; 228 }; 229 230 static int __init init_udf_fs(void) 231 { 232 int err; 233 234 err = init_inodecache(); 235 if (err) 236 goto out1; 237 err = register_filesystem(&udf_fstype); 238 if (err) 239 goto out; 240 241 return 0; 242 243 out: 244 destroy_inodecache(); 245 246 out1: 247 return err; 248 } 249 250 static void __exit exit_udf_fs(void) 251 { 252 unregister_filesystem(&udf_fstype); 253 destroy_inodecache(); 254 } 255 256 static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count) 257 { 258 struct udf_sb_info *sbi = UDF_SB(sb); 259 260 sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL); 261 if (!sbi->s_partmaps) { 262 sbi->s_partitions = 0; 263 return -ENOMEM; 264 } 265 266 sbi->s_partitions = count; 267 return 0; 268 } 269 270 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap) 271 { 272 int i; 273 int nr_groups = bitmap->s_nr_groups; 274 275 for (i = 0; i < nr_groups; i++) 276 brelse(bitmap->s_block_bitmap[i]); 277 278 kvfree(bitmap); 279 } 280 281 static void udf_free_partition(struct udf_part_map *map) 282 { 283 int i; 284 struct udf_meta_data *mdata; 285 286 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 287 iput(map->s_uspace.s_table); 288 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) 289 udf_sb_free_bitmap(map->s_uspace.s_bitmap); 290 if (map->s_partition_type == UDF_SPARABLE_MAP15) 291 for (i = 0; i < 4; i++) 292 brelse(map->s_type_specific.s_sparing.s_spar_map[i]); 293 else if (map->s_partition_type == UDF_METADATA_MAP25) { 294 mdata = &map->s_type_specific.s_metadata; 295 iput(mdata->s_metadata_fe); 296 mdata->s_metadata_fe = NULL; 297 298 iput(mdata->s_mirror_fe); 299 mdata->s_mirror_fe = NULL; 300 301 iput(mdata->s_bitmap_fe); 302 mdata->s_bitmap_fe = NULL; 303 } 304 } 305 306 static void udf_sb_free_partitions(struct super_block *sb) 307 { 308 struct udf_sb_info *sbi = UDF_SB(sb); 309 int i; 310 311 if (!sbi->s_partmaps) 312 return; 313 for (i = 0; i < sbi->s_partitions; i++) 314 udf_free_partition(&sbi->s_partmaps[i]); 315 kfree(sbi->s_partmaps); 316 sbi->s_partmaps = NULL; 317 } 318 319 static int udf_show_options(struct seq_file *seq, struct dentry *root) 320 { 321 struct super_block *sb = root->d_sb; 322 struct udf_sb_info *sbi = UDF_SB(sb); 323 324 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) 325 seq_puts(seq, ",nostrict"); 326 if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET)) 327 seq_printf(seq, ",bs=%lu", sb->s_blocksize); 328 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE)) 329 seq_puts(seq, ",unhide"); 330 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE)) 331 seq_puts(seq, ",undelete"); 332 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB)) 333 seq_puts(seq, ",noadinicb"); 334 if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD)) 335 seq_puts(seq, ",shortad"); 336 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET)) 337 seq_puts(seq, ",uid=forget"); 338 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET)) 339 seq_puts(seq, ",gid=forget"); 340 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET)) 341 seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid)); 342 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET)) 343 seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid)); 344 if (sbi->s_umask != 0) 345 seq_printf(seq, ",umask=%ho", sbi->s_umask); 346 if (sbi->s_fmode != UDF_INVALID_MODE) 347 seq_printf(seq, ",mode=%ho", sbi->s_fmode); 348 if (sbi->s_dmode != UDF_INVALID_MODE) 349 seq_printf(seq, ",dmode=%ho", sbi->s_dmode); 350 if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET)) 351 seq_printf(seq, ",session=%d", sbi->s_session); 352 if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET)) 353 seq_printf(seq, ",lastblock=%u", sbi->s_last_block); 354 if (sbi->s_anchor != 0) 355 seq_printf(seq, ",anchor=%u", sbi->s_anchor); 356 if (sbi->s_nls_map) 357 seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset); 358 else 359 seq_puts(seq, ",iocharset=utf8"); 360 361 return 0; 362 } 363 364 /* 365 * udf_parse_options 366 * 367 * PURPOSE 368 * Parse mount options. 369 * 370 * DESCRIPTION 371 * The following mount options are supported: 372 * 373 * gid= Set the default group. 374 * umask= Set the default umask. 375 * mode= Set the default file permissions. 376 * dmode= Set the default directory permissions. 377 * uid= Set the default user. 378 * bs= Set the block size. 379 * unhide Show otherwise hidden files. 380 * undelete Show deleted files in lists. 381 * adinicb Embed data in the inode (default) 382 * noadinicb Don't embed data in the inode 383 * shortad Use short ad's 384 * longad Use long ad's (default) 385 * nostrict Unset strict conformance 386 * iocharset= Set the NLS character set 387 * 388 * The remaining are for debugging and disaster recovery: 389 * 390 * novrs Skip volume sequence recognition 391 * 392 * The following expect a offset from 0. 393 * 394 * session= Set the CDROM session (default= last session) 395 * anchor= Override standard anchor location. (default= 256) 396 * volume= Override the VolumeDesc location. (unused) 397 * partition= Override the PartitionDesc location. (unused) 398 * lastblock= Set the last block of the filesystem/ 399 * 400 * The following expect a offset from the partition root. 401 * 402 * fileset= Override the fileset block location. (unused) 403 * rootdir= Override the root directory location. (unused) 404 * WARNING: overriding the rootdir to a non-directory may 405 * yield highly unpredictable results. 406 * 407 * PRE-CONDITIONS 408 * options Pointer to mount options string. 409 * uopts Pointer to mount options variable. 410 * 411 * POST-CONDITIONS 412 * <return> 1 Mount options parsed okay. 413 * <return> 0 Error parsing mount options. 414 * 415 * HISTORY 416 * July 1, 1997 - Andrew E. Mileski 417 * Written, tested, and released. 418 */ 419 420 enum { 421 Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete, 422 Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad, 423 Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock, 424 Opt_anchor, Opt_volume, Opt_partition, Opt_fileset, 425 Opt_rootdir, Opt_utf8, Opt_iocharset, 426 Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore, 427 Opt_fmode, Opt_dmode 428 }; 429 430 static const match_table_t tokens = { 431 {Opt_novrs, "novrs"}, 432 {Opt_nostrict, "nostrict"}, 433 {Opt_bs, "bs=%u"}, 434 {Opt_unhide, "unhide"}, 435 {Opt_undelete, "undelete"}, 436 {Opt_noadinicb, "noadinicb"}, 437 {Opt_adinicb, "adinicb"}, 438 {Opt_shortad, "shortad"}, 439 {Opt_longad, "longad"}, 440 {Opt_uforget, "uid=forget"}, 441 {Opt_uignore, "uid=ignore"}, 442 {Opt_gforget, "gid=forget"}, 443 {Opt_gignore, "gid=ignore"}, 444 {Opt_gid, "gid=%u"}, 445 {Opt_uid, "uid=%u"}, 446 {Opt_umask, "umask=%o"}, 447 {Opt_session, "session=%u"}, 448 {Opt_lastblock, "lastblock=%u"}, 449 {Opt_anchor, "anchor=%u"}, 450 {Opt_volume, "volume=%u"}, 451 {Opt_partition, "partition=%u"}, 452 {Opt_fileset, "fileset=%u"}, 453 {Opt_rootdir, "rootdir=%u"}, 454 {Opt_utf8, "utf8"}, 455 {Opt_iocharset, "iocharset=%s"}, 456 {Opt_fmode, "mode=%o"}, 457 {Opt_dmode, "dmode=%o"}, 458 {Opt_err, NULL} 459 }; 460 461 static int udf_parse_options(char *options, struct udf_options *uopt, 462 bool remount) 463 { 464 char *p; 465 int option; 466 unsigned int uv; 467 468 uopt->novrs = 0; 469 uopt->session = 0xFFFFFFFF; 470 uopt->lastblock = 0; 471 uopt->anchor = 0; 472 473 if (!options) 474 return 1; 475 476 while ((p = strsep(&options, ",")) != NULL) { 477 substring_t args[MAX_OPT_ARGS]; 478 int token; 479 unsigned n; 480 if (!*p) 481 continue; 482 483 token = match_token(p, tokens, args); 484 switch (token) { 485 case Opt_novrs: 486 uopt->novrs = 1; 487 break; 488 case Opt_bs: 489 if (match_int(&args[0], &option)) 490 return 0; 491 n = option; 492 if (n != 512 && n != 1024 && n != 2048 && n != 4096) 493 return 0; 494 uopt->blocksize = n; 495 uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET); 496 break; 497 case Opt_unhide: 498 uopt->flags |= (1 << UDF_FLAG_UNHIDE); 499 break; 500 case Opt_undelete: 501 uopt->flags |= (1 << UDF_FLAG_UNDELETE); 502 break; 503 case Opt_noadinicb: 504 uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB); 505 break; 506 case Opt_adinicb: 507 uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB); 508 break; 509 case Opt_shortad: 510 uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD); 511 break; 512 case Opt_longad: 513 uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD); 514 break; 515 case Opt_gid: 516 if (match_uint(args, &uv)) 517 return 0; 518 uopt->gid = make_kgid(current_user_ns(), uv); 519 if (!gid_valid(uopt->gid)) 520 return 0; 521 uopt->flags |= (1 << UDF_FLAG_GID_SET); 522 break; 523 case Opt_uid: 524 if (match_uint(args, &uv)) 525 return 0; 526 uopt->uid = make_kuid(current_user_ns(), uv); 527 if (!uid_valid(uopt->uid)) 528 return 0; 529 uopt->flags |= (1 << UDF_FLAG_UID_SET); 530 break; 531 case Opt_umask: 532 if (match_octal(args, &option)) 533 return 0; 534 uopt->umask = option; 535 break; 536 case Opt_nostrict: 537 uopt->flags &= ~(1 << UDF_FLAG_STRICT); 538 break; 539 case Opt_session: 540 if (match_int(args, &option)) 541 return 0; 542 uopt->session = option; 543 if (!remount) 544 uopt->flags |= (1 << UDF_FLAG_SESSION_SET); 545 break; 546 case Opt_lastblock: 547 if (match_int(args, &option)) 548 return 0; 549 uopt->lastblock = option; 550 if (!remount) 551 uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET); 552 break; 553 case Opt_anchor: 554 if (match_int(args, &option)) 555 return 0; 556 uopt->anchor = option; 557 break; 558 case Opt_volume: 559 case Opt_partition: 560 case Opt_fileset: 561 case Opt_rootdir: 562 /* Ignored (never implemented properly) */ 563 break; 564 case Opt_utf8: 565 if (!remount) { 566 unload_nls(uopt->nls_map); 567 uopt->nls_map = NULL; 568 } 569 break; 570 case Opt_iocharset: 571 if (!remount) { 572 unload_nls(uopt->nls_map); 573 uopt->nls_map = NULL; 574 } 575 /* When nls_map is not loaded then UTF-8 is used */ 576 if (!remount && strcmp(args[0].from, "utf8") != 0) { 577 uopt->nls_map = load_nls(args[0].from); 578 if (!uopt->nls_map) { 579 pr_err("iocharset %s not found\n", 580 args[0].from); 581 return 0; 582 } 583 } 584 break; 585 case Opt_uforget: 586 uopt->flags |= (1 << UDF_FLAG_UID_FORGET); 587 break; 588 case Opt_uignore: 589 case Opt_gignore: 590 /* These options are superseeded by uid=<number> */ 591 break; 592 case Opt_gforget: 593 uopt->flags |= (1 << UDF_FLAG_GID_FORGET); 594 break; 595 case Opt_fmode: 596 if (match_octal(args, &option)) 597 return 0; 598 uopt->fmode = option & 0777; 599 break; 600 case Opt_dmode: 601 if (match_octal(args, &option)) 602 return 0; 603 uopt->dmode = option & 0777; 604 break; 605 default: 606 pr_err("bad mount option \"%s\" or missing value\n", p); 607 return 0; 608 } 609 } 610 return 1; 611 } 612 613 static int udf_remount_fs(struct super_block *sb, int *flags, char *options) 614 { 615 struct udf_options uopt; 616 struct udf_sb_info *sbi = UDF_SB(sb); 617 int error = 0; 618 619 if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT)) 620 return -EACCES; 621 622 sync_filesystem(sb); 623 624 uopt.flags = sbi->s_flags; 625 uopt.uid = sbi->s_uid; 626 uopt.gid = sbi->s_gid; 627 uopt.umask = sbi->s_umask; 628 uopt.fmode = sbi->s_fmode; 629 uopt.dmode = sbi->s_dmode; 630 uopt.nls_map = NULL; 631 632 if (!udf_parse_options(options, &uopt, true)) 633 return -EINVAL; 634 635 write_lock(&sbi->s_cred_lock); 636 sbi->s_flags = uopt.flags; 637 sbi->s_uid = uopt.uid; 638 sbi->s_gid = uopt.gid; 639 sbi->s_umask = uopt.umask; 640 sbi->s_fmode = uopt.fmode; 641 sbi->s_dmode = uopt.dmode; 642 write_unlock(&sbi->s_cred_lock); 643 644 if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) 645 goto out_unlock; 646 647 if (*flags & SB_RDONLY) 648 udf_close_lvid(sb); 649 else 650 udf_open_lvid(sb); 651 652 out_unlock: 653 return error; 654 } 655 656 /* 657 * Check VSD descriptor. Returns -1 in case we are at the end of volume 658 * recognition area, 0 if the descriptor is valid but non-interesting, 1 if 659 * we found one of NSR descriptors we are looking for. 660 */ 661 static int identify_vsd(const struct volStructDesc *vsd) 662 { 663 int ret = 0; 664 665 if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) { 666 switch (vsd->structType) { 667 case 0: 668 udf_debug("ISO9660 Boot Record found\n"); 669 break; 670 case 1: 671 udf_debug("ISO9660 Primary Volume Descriptor found\n"); 672 break; 673 case 2: 674 udf_debug("ISO9660 Supplementary Volume Descriptor found\n"); 675 break; 676 case 3: 677 udf_debug("ISO9660 Volume Partition Descriptor found\n"); 678 break; 679 case 255: 680 udf_debug("ISO9660 Volume Descriptor Set Terminator found\n"); 681 break; 682 default: 683 udf_debug("ISO9660 VRS (%u) found\n", vsd->structType); 684 break; 685 } 686 } else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN)) 687 ; /* ret = 0 */ 688 else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN)) 689 ret = 1; 690 else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN)) 691 ret = 1; 692 else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN)) 693 ; /* ret = 0 */ 694 else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN)) 695 ; /* ret = 0 */ 696 else { 697 /* TEA01 or invalid id : end of volume recognition area */ 698 ret = -1; 699 } 700 701 return ret; 702 } 703 704 /* 705 * Check Volume Structure Descriptors (ECMA 167 2/9.1) 706 * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) 707 * @return 1 if NSR02 or NSR03 found, 708 * -1 if first sector read error, 0 otherwise 709 */ 710 static int udf_check_vsd(struct super_block *sb) 711 { 712 struct volStructDesc *vsd = NULL; 713 loff_t sector = VSD_FIRST_SECTOR_OFFSET; 714 int sectorsize; 715 struct buffer_head *bh = NULL; 716 int nsr = 0; 717 struct udf_sb_info *sbi; 718 loff_t session_offset; 719 720 sbi = UDF_SB(sb); 721 if (sb->s_blocksize < sizeof(struct volStructDesc)) 722 sectorsize = sizeof(struct volStructDesc); 723 else 724 sectorsize = sb->s_blocksize; 725 726 session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits; 727 sector += session_offset; 728 729 udf_debug("Starting at sector %u (%lu byte sectors)\n", 730 (unsigned int)(sector >> sb->s_blocksize_bits), 731 sb->s_blocksize); 732 /* Process the sequence (if applicable). The hard limit on the sector 733 * offset is arbitrary, hopefully large enough so that all valid UDF 734 * filesystems will be recognised. There is no mention of an upper 735 * bound to the size of the volume recognition area in the standard. 736 * The limit will prevent the code to read all the sectors of a 737 * specially crafted image (like a bluray disc full of CD001 sectors), 738 * potentially causing minutes or even hours of uninterruptible I/O 739 * activity. This actually happened with uninitialised SSD partitions 740 * (all 0xFF) before the check for the limit and all valid IDs were 741 * added */ 742 for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) { 743 /* Read a block */ 744 bh = sb_bread(sb, sector >> sb->s_blocksize_bits); 745 if (!bh) 746 break; 747 748 vsd = (struct volStructDesc *)(bh->b_data + 749 (sector & (sb->s_blocksize - 1))); 750 nsr = identify_vsd(vsd); 751 /* Found NSR or end? */ 752 if (nsr) { 753 brelse(bh); 754 break; 755 } 756 /* 757 * Special handling for improperly formatted VRS (e.g., Win10) 758 * where components are separated by 2048 bytes even though 759 * sectors are 4K 760 */ 761 if (sb->s_blocksize == 4096) { 762 nsr = identify_vsd(vsd + 1); 763 /* Ignore unknown IDs... */ 764 if (nsr < 0) 765 nsr = 0; 766 } 767 brelse(bh); 768 } 769 770 if (nsr > 0) 771 return 1; 772 else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET) 773 return -1; 774 else 775 return 0; 776 } 777 778 static int udf_verify_domain_identifier(struct super_block *sb, 779 struct regid *ident, char *dname) 780 { 781 struct domainIdentSuffix *suffix; 782 783 if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) { 784 udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname); 785 goto force_ro; 786 } 787 if (ident->flags & ENTITYID_FLAGS_DIRTY) { 788 udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n", 789 dname); 790 goto force_ro; 791 } 792 suffix = (struct domainIdentSuffix *)ident->identSuffix; 793 if ((suffix->domainFlags & DOMAIN_FLAGS_HARD_WRITE_PROTECT) || 794 (suffix->domainFlags & DOMAIN_FLAGS_SOFT_WRITE_PROTECT)) { 795 if (!sb_rdonly(sb)) { 796 udf_warn(sb, "Descriptor for %s marked write protected." 797 " Forcing read only mount.\n", dname); 798 } 799 goto force_ro; 800 } 801 return 0; 802 803 force_ro: 804 if (!sb_rdonly(sb)) 805 return -EACCES; 806 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); 807 return 0; 808 } 809 810 static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset, 811 struct kernel_lb_addr *root) 812 { 813 int ret; 814 815 ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set"); 816 if (ret < 0) 817 return ret; 818 819 *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation); 820 UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum); 821 822 udf_debug("Rootdir at block=%u, partition=%u\n", 823 root->logicalBlockNum, root->partitionReferenceNum); 824 return 0; 825 } 826 827 static int udf_find_fileset(struct super_block *sb, 828 struct kernel_lb_addr *fileset, 829 struct kernel_lb_addr *root) 830 { 831 struct buffer_head *bh; 832 uint16_t ident; 833 int ret; 834 835 if (fileset->logicalBlockNum == 0xFFFFFFFF && 836 fileset->partitionReferenceNum == 0xFFFF) 837 return -EINVAL; 838 839 bh = udf_read_ptagged(sb, fileset, 0, &ident); 840 if (!bh) 841 return -EIO; 842 if (ident != TAG_IDENT_FSD) { 843 brelse(bh); 844 return -EINVAL; 845 } 846 847 udf_debug("Fileset at block=%u, partition=%u\n", 848 fileset->logicalBlockNum, fileset->partitionReferenceNum); 849 850 UDF_SB(sb)->s_partition = fileset->partitionReferenceNum; 851 ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root); 852 brelse(bh); 853 return ret; 854 } 855 856 /* 857 * Load primary Volume Descriptor Sequence 858 * 859 * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence 860 * should be tried. 861 */ 862 static int udf_load_pvoldesc(struct super_block *sb, sector_t block) 863 { 864 struct primaryVolDesc *pvoldesc; 865 uint8_t *outstr; 866 struct buffer_head *bh; 867 uint16_t ident; 868 int ret; 869 struct timestamp *ts; 870 871 outstr = kmalloc(128, GFP_NOFS); 872 if (!outstr) 873 return -ENOMEM; 874 875 bh = udf_read_tagged(sb, block, block, &ident); 876 if (!bh) { 877 ret = -EAGAIN; 878 goto out2; 879 } 880 881 if (ident != TAG_IDENT_PVD) { 882 ret = -EIO; 883 goto out_bh; 884 } 885 886 pvoldesc = (struct primaryVolDesc *)bh->b_data; 887 888 udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time, 889 pvoldesc->recordingDateAndTime); 890 ts = &pvoldesc->recordingDateAndTime; 891 udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n", 892 le16_to_cpu(ts->year), ts->month, ts->day, ts->hour, 893 ts->minute, le16_to_cpu(ts->typeAndTimezone)); 894 895 ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32); 896 if (ret < 0) { 897 strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName"); 898 pr_warn("incorrect volume identification, setting to " 899 "'InvalidName'\n"); 900 } else { 901 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret); 902 } 903 udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); 904 905 ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128); 906 if (ret < 0) { 907 ret = 0; 908 goto out_bh; 909 } 910 outstr[ret] = 0; 911 udf_debug("volSetIdent[] = '%s'\n", outstr); 912 913 ret = 0; 914 out_bh: 915 brelse(bh); 916 out2: 917 kfree(outstr); 918 return ret; 919 } 920 921 struct inode *udf_find_metadata_inode_efe(struct super_block *sb, 922 u32 meta_file_loc, u32 partition_ref) 923 { 924 struct kernel_lb_addr addr; 925 struct inode *metadata_fe; 926 927 addr.logicalBlockNum = meta_file_loc; 928 addr.partitionReferenceNum = partition_ref; 929 930 metadata_fe = udf_iget_special(sb, &addr); 931 932 if (IS_ERR(metadata_fe)) { 933 udf_warn(sb, "metadata inode efe not found\n"); 934 return metadata_fe; 935 } 936 if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) { 937 udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n"); 938 iput(metadata_fe); 939 return ERR_PTR(-EIO); 940 } 941 942 return metadata_fe; 943 } 944 945 static int udf_load_metadata_files(struct super_block *sb, int partition, 946 int type1_index) 947 { 948 struct udf_sb_info *sbi = UDF_SB(sb); 949 struct udf_part_map *map; 950 struct udf_meta_data *mdata; 951 struct kernel_lb_addr addr; 952 struct inode *fe; 953 954 map = &sbi->s_partmaps[partition]; 955 mdata = &map->s_type_specific.s_metadata; 956 mdata->s_phys_partition_ref = type1_index; 957 958 /* metadata address */ 959 udf_debug("Metadata file location: block = %u part = %u\n", 960 mdata->s_meta_file_loc, mdata->s_phys_partition_ref); 961 962 fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc, 963 mdata->s_phys_partition_ref); 964 if (IS_ERR(fe)) { 965 /* mirror file entry */ 966 udf_debug("Mirror metadata file location: block = %u part = %u\n", 967 mdata->s_mirror_file_loc, mdata->s_phys_partition_ref); 968 969 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, 970 mdata->s_phys_partition_ref); 971 972 if (IS_ERR(fe)) { 973 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); 974 return PTR_ERR(fe); 975 } 976 mdata->s_mirror_fe = fe; 977 } else 978 mdata->s_metadata_fe = fe; 979 980 981 /* 982 * bitmap file entry 983 * Note: 984 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102) 985 */ 986 if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) { 987 addr.logicalBlockNum = mdata->s_bitmap_file_loc; 988 addr.partitionReferenceNum = mdata->s_phys_partition_ref; 989 990 udf_debug("Bitmap file location: block = %u part = %u\n", 991 addr.logicalBlockNum, addr.partitionReferenceNum); 992 993 fe = udf_iget_special(sb, &addr); 994 if (IS_ERR(fe)) { 995 if (sb_rdonly(sb)) 996 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n"); 997 else { 998 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n"); 999 return PTR_ERR(fe); 1000 } 1001 } else 1002 mdata->s_bitmap_fe = fe; 1003 } 1004 1005 udf_debug("udf_load_metadata_files Ok\n"); 1006 return 0; 1007 } 1008 1009 int udf_compute_nr_groups(struct super_block *sb, u32 partition) 1010 { 1011 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 1012 return DIV_ROUND_UP(map->s_partition_len + 1013 (sizeof(struct spaceBitmapDesc) << 3), 1014 sb->s_blocksize * 8); 1015 } 1016 1017 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) 1018 { 1019 struct udf_bitmap *bitmap; 1020 int nr_groups = udf_compute_nr_groups(sb, index); 1021 1022 bitmap = kvzalloc(struct_size(bitmap, s_block_bitmap, nr_groups), 1023 GFP_KERNEL); 1024 if (!bitmap) 1025 return NULL; 1026 1027 bitmap->s_nr_groups = nr_groups; 1028 return bitmap; 1029 } 1030 1031 static int check_partition_desc(struct super_block *sb, 1032 struct partitionDesc *p, 1033 struct udf_part_map *map) 1034 { 1035 bool umap, utable, fmap, ftable; 1036 struct partitionHeaderDesc *phd; 1037 1038 switch (le32_to_cpu(p->accessType)) { 1039 case PD_ACCESS_TYPE_READ_ONLY: 1040 case PD_ACCESS_TYPE_WRITE_ONCE: 1041 case PD_ACCESS_TYPE_NONE: 1042 goto force_ro; 1043 } 1044 1045 /* No Partition Header Descriptor? */ 1046 if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) && 1047 strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03)) 1048 goto force_ro; 1049 1050 phd = (struct partitionHeaderDesc *)p->partitionContentsUse; 1051 utable = phd->unallocSpaceTable.extLength; 1052 umap = phd->unallocSpaceBitmap.extLength; 1053 ftable = phd->freedSpaceTable.extLength; 1054 fmap = phd->freedSpaceBitmap.extLength; 1055 1056 /* No allocation info? */ 1057 if (!utable && !umap && !ftable && !fmap) 1058 goto force_ro; 1059 1060 /* We don't support blocks that require erasing before overwrite */ 1061 if (ftable || fmap) 1062 goto force_ro; 1063 /* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */ 1064 if (utable && umap) 1065 goto force_ro; 1066 1067 if (map->s_partition_type == UDF_VIRTUAL_MAP15 || 1068 map->s_partition_type == UDF_VIRTUAL_MAP20 || 1069 map->s_partition_type == UDF_METADATA_MAP25) 1070 goto force_ro; 1071 1072 return 0; 1073 force_ro: 1074 if (!sb_rdonly(sb)) 1075 return -EACCES; 1076 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); 1077 return 0; 1078 } 1079 1080 static int udf_fill_partdesc_info(struct super_block *sb, 1081 struct partitionDesc *p, int p_index) 1082 { 1083 struct udf_part_map *map; 1084 struct udf_sb_info *sbi = UDF_SB(sb); 1085 struct partitionHeaderDesc *phd; 1086 int err; 1087 1088 map = &sbi->s_partmaps[p_index]; 1089 1090 map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */ 1091 map->s_partition_root = le32_to_cpu(p->partitionStartingLocation); 1092 1093 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY)) 1094 map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY; 1095 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE)) 1096 map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE; 1097 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE)) 1098 map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE; 1099 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE)) 1100 map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE; 1101 1102 udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n", 1103 p_index, map->s_partition_type, 1104 map->s_partition_root, map->s_partition_len); 1105 1106 err = check_partition_desc(sb, p, map); 1107 if (err) 1108 return err; 1109 1110 /* 1111 * Skip loading allocation info it we cannot ever write to the fs. 1112 * This is a correctness thing as we may have decided to force ro mount 1113 * to avoid allocation info we don't support. 1114 */ 1115 if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT)) 1116 return 0; 1117 1118 phd = (struct partitionHeaderDesc *)p->partitionContentsUse; 1119 if (phd->unallocSpaceTable.extLength) { 1120 struct kernel_lb_addr loc = { 1121 .logicalBlockNum = le32_to_cpu( 1122 phd->unallocSpaceTable.extPosition), 1123 .partitionReferenceNum = p_index, 1124 }; 1125 struct inode *inode; 1126 1127 inode = udf_iget_special(sb, &loc); 1128 if (IS_ERR(inode)) { 1129 udf_debug("cannot load unallocSpaceTable (part %d)\n", 1130 p_index); 1131 return PTR_ERR(inode); 1132 } 1133 map->s_uspace.s_table = inode; 1134 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE; 1135 udf_debug("unallocSpaceTable (part %d) @ %lu\n", 1136 p_index, map->s_uspace.s_table->i_ino); 1137 } 1138 1139 if (phd->unallocSpaceBitmap.extLength) { 1140 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index); 1141 if (!bitmap) 1142 return -ENOMEM; 1143 map->s_uspace.s_bitmap = bitmap; 1144 bitmap->s_extPosition = le32_to_cpu( 1145 phd->unallocSpaceBitmap.extPosition); 1146 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP; 1147 udf_debug("unallocSpaceBitmap (part %d) @ %u\n", 1148 p_index, bitmap->s_extPosition); 1149 } 1150 1151 return 0; 1152 } 1153 1154 static void udf_find_vat_block(struct super_block *sb, int p_index, 1155 int type1_index, sector_t start_block) 1156 { 1157 struct udf_sb_info *sbi = UDF_SB(sb); 1158 struct udf_part_map *map = &sbi->s_partmaps[p_index]; 1159 sector_t vat_block; 1160 struct kernel_lb_addr ino; 1161 struct inode *inode; 1162 1163 /* 1164 * VAT file entry is in the last recorded block. Some broken disks have 1165 * it a few blocks before so try a bit harder... 1166 */ 1167 ino.partitionReferenceNum = type1_index; 1168 for (vat_block = start_block; 1169 vat_block >= map->s_partition_root && 1170 vat_block >= start_block - 3; vat_block--) { 1171 ino.logicalBlockNum = vat_block - map->s_partition_root; 1172 inode = udf_iget_special(sb, &ino); 1173 if (!IS_ERR(inode)) { 1174 sbi->s_vat_inode = inode; 1175 break; 1176 } 1177 } 1178 } 1179 1180 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) 1181 { 1182 struct udf_sb_info *sbi = UDF_SB(sb); 1183 struct udf_part_map *map = &sbi->s_partmaps[p_index]; 1184 struct buffer_head *bh = NULL; 1185 struct udf_inode_info *vati; 1186 struct virtualAllocationTable20 *vat20; 1187 sector_t blocks = sb_bdev_nr_blocks(sb); 1188 1189 udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block); 1190 if (!sbi->s_vat_inode && 1191 sbi->s_last_block != blocks - 1) { 1192 pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n", 1193 (unsigned long)sbi->s_last_block, 1194 (unsigned long)blocks - 1); 1195 udf_find_vat_block(sb, p_index, type1_index, blocks - 1); 1196 } 1197 if (!sbi->s_vat_inode) 1198 return -EIO; 1199 1200 if (map->s_partition_type == UDF_VIRTUAL_MAP15) { 1201 map->s_type_specific.s_virtual.s_start_offset = 0; 1202 map->s_type_specific.s_virtual.s_num_entries = 1203 (sbi->s_vat_inode->i_size - 36) >> 2; 1204 } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) { 1205 vati = UDF_I(sbi->s_vat_inode); 1206 if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 1207 int err = 0; 1208 1209 bh = udf_bread(sbi->s_vat_inode, 0, 0, &err); 1210 if (!bh) { 1211 if (!err) 1212 err = -EFSCORRUPTED; 1213 return err; 1214 } 1215 vat20 = (struct virtualAllocationTable20 *)bh->b_data; 1216 } else { 1217 vat20 = (struct virtualAllocationTable20 *) 1218 vati->i_data; 1219 } 1220 1221 map->s_type_specific.s_virtual.s_start_offset = 1222 le16_to_cpu(vat20->lengthHeader); 1223 map->s_type_specific.s_virtual.s_num_entries = 1224 (sbi->s_vat_inode->i_size - 1225 map->s_type_specific.s_virtual. 1226 s_start_offset) >> 2; 1227 brelse(bh); 1228 } 1229 return 0; 1230 } 1231 1232 /* 1233 * Load partition descriptor block 1234 * 1235 * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor 1236 * sequence. 1237 */ 1238 static int udf_load_partdesc(struct super_block *sb, sector_t block) 1239 { 1240 struct buffer_head *bh; 1241 struct partitionDesc *p; 1242 struct udf_part_map *map; 1243 struct udf_sb_info *sbi = UDF_SB(sb); 1244 int i, type1_idx; 1245 uint16_t partitionNumber; 1246 uint16_t ident; 1247 int ret; 1248 1249 bh = udf_read_tagged(sb, block, block, &ident); 1250 if (!bh) 1251 return -EAGAIN; 1252 if (ident != TAG_IDENT_PD) { 1253 ret = 0; 1254 goto out_bh; 1255 } 1256 1257 p = (struct partitionDesc *)bh->b_data; 1258 partitionNumber = le16_to_cpu(p->partitionNumber); 1259 1260 /* First scan for TYPE1 and SPARABLE partitions */ 1261 for (i = 0; i < sbi->s_partitions; i++) { 1262 map = &sbi->s_partmaps[i]; 1263 udf_debug("Searching map: (%u == %u)\n", 1264 map->s_partition_num, partitionNumber); 1265 if (map->s_partition_num == partitionNumber && 1266 (map->s_partition_type == UDF_TYPE1_MAP15 || 1267 map->s_partition_type == UDF_SPARABLE_MAP15)) 1268 break; 1269 } 1270 1271 if (i >= sbi->s_partitions) { 1272 udf_debug("Partition (%u) not found in partition map\n", 1273 partitionNumber); 1274 ret = 0; 1275 goto out_bh; 1276 } 1277 1278 ret = udf_fill_partdesc_info(sb, p, i); 1279 if (ret < 0) 1280 goto out_bh; 1281 1282 /* 1283 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and 1284 * PHYSICAL partitions are already set up 1285 */ 1286 type1_idx = i; 1287 map = NULL; /* supress 'maybe used uninitialized' warning */ 1288 for (i = 0; i < sbi->s_partitions; i++) { 1289 map = &sbi->s_partmaps[i]; 1290 1291 if (map->s_partition_num == partitionNumber && 1292 (map->s_partition_type == UDF_VIRTUAL_MAP15 || 1293 map->s_partition_type == UDF_VIRTUAL_MAP20 || 1294 map->s_partition_type == UDF_METADATA_MAP25)) 1295 break; 1296 } 1297 1298 if (i >= sbi->s_partitions) { 1299 ret = 0; 1300 goto out_bh; 1301 } 1302 1303 ret = udf_fill_partdesc_info(sb, p, i); 1304 if (ret < 0) 1305 goto out_bh; 1306 1307 if (map->s_partition_type == UDF_METADATA_MAP25) { 1308 ret = udf_load_metadata_files(sb, i, type1_idx); 1309 if (ret < 0) { 1310 udf_err(sb, "error loading MetaData partition map %d\n", 1311 i); 1312 goto out_bh; 1313 } 1314 } else { 1315 /* 1316 * If we have a partition with virtual map, we don't handle 1317 * writing to it (we overwrite blocks instead of relocating 1318 * them). 1319 */ 1320 if (!sb_rdonly(sb)) { 1321 ret = -EACCES; 1322 goto out_bh; 1323 } 1324 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); 1325 ret = udf_load_vat(sb, i, type1_idx); 1326 if (ret < 0) 1327 goto out_bh; 1328 } 1329 ret = 0; 1330 out_bh: 1331 /* In case loading failed, we handle cleanup in udf_fill_super */ 1332 brelse(bh); 1333 return ret; 1334 } 1335 1336 static int udf_load_sparable_map(struct super_block *sb, 1337 struct udf_part_map *map, 1338 struct sparablePartitionMap *spm) 1339 { 1340 uint32_t loc; 1341 uint16_t ident; 1342 struct sparingTable *st; 1343 struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing; 1344 int i; 1345 struct buffer_head *bh; 1346 1347 map->s_partition_type = UDF_SPARABLE_MAP15; 1348 sdata->s_packet_len = le16_to_cpu(spm->packetLength); 1349 if (!is_power_of_2(sdata->s_packet_len)) { 1350 udf_err(sb, "error loading logical volume descriptor: " 1351 "Invalid packet length %u\n", 1352 (unsigned)sdata->s_packet_len); 1353 return -EIO; 1354 } 1355 if (spm->numSparingTables > 4) { 1356 udf_err(sb, "error loading logical volume descriptor: " 1357 "Too many sparing tables (%d)\n", 1358 (int)spm->numSparingTables); 1359 return -EIO; 1360 } 1361 if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) { 1362 udf_err(sb, "error loading logical volume descriptor: " 1363 "Too big sparing table size (%u)\n", 1364 le32_to_cpu(spm->sizeSparingTable)); 1365 return -EIO; 1366 } 1367 1368 for (i = 0; i < spm->numSparingTables; i++) { 1369 loc = le32_to_cpu(spm->locSparingTable[i]); 1370 bh = udf_read_tagged(sb, loc, loc, &ident); 1371 if (!bh) 1372 continue; 1373 1374 st = (struct sparingTable *)bh->b_data; 1375 if (ident != 0 || 1376 strncmp(st->sparingIdent.ident, UDF_ID_SPARING, 1377 strlen(UDF_ID_SPARING)) || 1378 sizeof(*st) + le16_to_cpu(st->reallocationTableLen) > 1379 sb->s_blocksize) { 1380 brelse(bh); 1381 continue; 1382 } 1383 1384 sdata->s_spar_map[i] = bh; 1385 } 1386 map->s_partition_func = udf_get_pblock_spar15; 1387 return 0; 1388 } 1389 1390 static int udf_load_logicalvol(struct super_block *sb, sector_t block, 1391 struct kernel_lb_addr *fileset) 1392 { 1393 struct logicalVolDesc *lvd; 1394 int i, offset; 1395 uint8_t type; 1396 struct udf_sb_info *sbi = UDF_SB(sb); 1397 struct genericPartitionMap *gpm; 1398 uint16_t ident; 1399 struct buffer_head *bh; 1400 unsigned int table_len; 1401 int ret; 1402 1403 bh = udf_read_tagged(sb, block, block, &ident); 1404 if (!bh) 1405 return -EAGAIN; 1406 BUG_ON(ident != TAG_IDENT_LVD); 1407 lvd = (struct logicalVolDesc *)bh->b_data; 1408 table_len = le32_to_cpu(lvd->mapTableLength); 1409 if (table_len > sb->s_blocksize - sizeof(*lvd)) { 1410 udf_err(sb, "error loading logical volume descriptor: " 1411 "Partition table too long (%u > %lu)\n", table_len, 1412 sb->s_blocksize - sizeof(*lvd)); 1413 ret = -EIO; 1414 goto out_bh; 1415 } 1416 1417 ret = udf_verify_domain_identifier(sb, &lvd->domainIdent, 1418 "logical volume"); 1419 if (ret) 1420 goto out_bh; 1421 ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); 1422 if (ret) 1423 goto out_bh; 1424 1425 for (i = 0, offset = 0; 1426 i < sbi->s_partitions && offset < table_len; 1427 i++, offset += gpm->partitionMapLength) { 1428 struct udf_part_map *map = &sbi->s_partmaps[i]; 1429 gpm = (struct genericPartitionMap *) 1430 &(lvd->partitionMaps[offset]); 1431 type = gpm->partitionMapType; 1432 if (type == 1) { 1433 struct genericPartitionMap1 *gpm1 = 1434 (struct genericPartitionMap1 *)gpm; 1435 map->s_partition_type = UDF_TYPE1_MAP15; 1436 map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum); 1437 map->s_partition_num = le16_to_cpu(gpm1->partitionNum); 1438 map->s_partition_func = NULL; 1439 } else if (type == 2) { 1440 struct udfPartitionMap2 *upm2 = 1441 (struct udfPartitionMap2 *)gpm; 1442 if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, 1443 strlen(UDF_ID_VIRTUAL))) { 1444 u16 suf = 1445 le16_to_cpu(((__le16 *)upm2->partIdent. 1446 identSuffix)[0]); 1447 if (suf < 0x0200) { 1448 map->s_partition_type = 1449 UDF_VIRTUAL_MAP15; 1450 map->s_partition_func = 1451 udf_get_pblock_virt15; 1452 } else { 1453 map->s_partition_type = 1454 UDF_VIRTUAL_MAP20; 1455 map->s_partition_func = 1456 udf_get_pblock_virt20; 1457 } 1458 } else if (!strncmp(upm2->partIdent.ident, 1459 UDF_ID_SPARABLE, 1460 strlen(UDF_ID_SPARABLE))) { 1461 ret = udf_load_sparable_map(sb, map, 1462 (struct sparablePartitionMap *)gpm); 1463 if (ret < 0) 1464 goto out_bh; 1465 } else if (!strncmp(upm2->partIdent.ident, 1466 UDF_ID_METADATA, 1467 strlen(UDF_ID_METADATA))) { 1468 struct udf_meta_data *mdata = 1469 &map->s_type_specific.s_metadata; 1470 struct metadataPartitionMap *mdm = 1471 (struct metadataPartitionMap *) 1472 &(lvd->partitionMaps[offset]); 1473 udf_debug("Parsing Logical vol part %d type %u id=%s\n", 1474 i, type, UDF_ID_METADATA); 1475 1476 map->s_partition_type = UDF_METADATA_MAP25; 1477 map->s_partition_func = udf_get_pblock_meta25; 1478 1479 mdata->s_meta_file_loc = 1480 le32_to_cpu(mdm->metadataFileLoc); 1481 mdata->s_mirror_file_loc = 1482 le32_to_cpu(mdm->metadataMirrorFileLoc); 1483 mdata->s_bitmap_file_loc = 1484 le32_to_cpu(mdm->metadataBitmapFileLoc); 1485 mdata->s_alloc_unit_size = 1486 le32_to_cpu(mdm->allocUnitSize); 1487 mdata->s_align_unit_size = 1488 le16_to_cpu(mdm->alignUnitSize); 1489 if (mdm->flags & 0x01) 1490 mdata->s_flags |= MF_DUPLICATE_MD; 1491 1492 udf_debug("Metadata Ident suffix=0x%x\n", 1493 le16_to_cpu(*(__le16 *) 1494 mdm->partIdent.identSuffix)); 1495 udf_debug("Metadata part num=%u\n", 1496 le16_to_cpu(mdm->partitionNum)); 1497 udf_debug("Metadata part alloc unit size=%u\n", 1498 le32_to_cpu(mdm->allocUnitSize)); 1499 udf_debug("Metadata file loc=%u\n", 1500 le32_to_cpu(mdm->metadataFileLoc)); 1501 udf_debug("Mirror file loc=%u\n", 1502 le32_to_cpu(mdm->metadataMirrorFileLoc)); 1503 udf_debug("Bitmap file loc=%u\n", 1504 le32_to_cpu(mdm->metadataBitmapFileLoc)); 1505 udf_debug("Flags: %d %u\n", 1506 mdata->s_flags, mdm->flags); 1507 } else { 1508 udf_debug("Unknown ident: %s\n", 1509 upm2->partIdent.ident); 1510 continue; 1511 } 1512 map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum); 1513 map->s_partition_num = le16_to_cpu(upm2->partitionNum); 1514 } 1515 udf_debug("Partition (%d:%u) type %u on volume %u\n", 1516 i, map->s_partition_num, type, map->s_volumeseqnum); 1517 } 1518 1519 if (fileset) { 1520 struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]); 1521 1522 *fileset = lelb_to_cpu(la->extLocation); 1523 udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n", 1524 fileset->logicalBlockNum, 1525 fileset->partitionReferenceNum); 1526 } 1527 if (lvd->integritySeqExt.extLength) 1528 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt)); 1529 ret = 0; 1530 1531 if (!sbi->s_lvid_bh) { 1532 /* We can't generate unique IDs without a valid LVID */ 1533 if (sb_rdonly(sb)) { 1534 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); 1535 } else { 1536 udf_warn(sb, "Damaged or missing LVID, forcing " 1537 "readonly mount\n"); 1538 ret = -EACCES; 1539 } 1540 } 1541 out_bh: 1542 brelse(bh); 1543 return ret; 1544 } 1545 1546 /* 1547 * Find the prevailing Logical Volume Integrity Descriptor. 1548 */ 1549 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc) 1550 { 1551 struct buffer_head *bh, *final_bh; 1552 uint16_t ident; 1553 struct udf_sb_info *sbi = UDF_SB(sb); 1554 struct logicalVolIntegrityDesc *lvid; 1555 int indirections = 0; 1556 u32 parts, impuselen; 1557 1558 while (++indirections <= UDF_MAX_LVID_NESTING) { 1559 final_bh = NULL; 1560 while (loc.extLength > 0 && 1561 (bh = udf_read_tagged(sb, loc.extLocation, 1562 loc.extLocation, &ident))) { 1563 if (ident != TAG_IDENT_LVID) { 1564 brelse(bh); 1565 break; 1566 } 1567 1568 brelse(final_bh); 1569 final_bh = bh; 1570 1571 loc.extLength -= sb->s_blocksize; 1572 loc.extLocation++; 1573 } 1574 1575 if (!final_bh) 1576 return; 1577 1578 brelse(sbi->s_lvid_bh); 1579 sbi->s_lvid_bh = final_bh; 1580 1581 lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data; 1582 if (lvid->nextIntegrityExt.extLength == 0) 1583 goto check; 1584 1585 loc = leea_to_cpu(lvid->nextIntegrityExt); 1586 } 1587 1588 udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n", 1589 UDF_MAX_LVID_NESTING); 1590 out_err: 1591 brelse(sbi->s_lvid_bh); 1592 sbi->s_lvid_bh = NULL; 1593 return; 1594 check: 1595 parts = le32_to_cpu(lvid->numOfPartitions); 1596 impuselen = le32_to_cpu(lvid->lengthOfImpUse); 1597 if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize || 1598 sizeof(struct logicalVolIntegrityDesc) + impuselen + 1599 2 * parts * sizeof(u32) > sb->s_blocksize) { 1600 udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), " 1601 "ignoring.\n", parts, impuselen); 1602 goto out_err; 1603 } 1604 } 1605 1606 /* 1607 * Step for reallocation of table of partition descriptor sequence numbers. 1608 * Must be power of 2. 1609 */ 1610 #define PART_DESC_ALLOC_STEP 32 1611 1612 struct part_desc_seq_scan_data { 1613 struct udf_vds_record rec; 1614 u32 partnum; 1615 }; 1616 1617 struct desc_seq_scan_data { 1618 struct udf_vds_record vds[VDS_POS_LENGTH]; 1619 unsigned int size_part_descs; 1620 unsigned int num_part_descs; 1621 struct part_desc_seq_scan_data *part_descs_loc; 1622 }; 1623 1624 static struct udf_vds_record *handle_partition_descriptor( 1625 struct buffer_head *bh, 1626 struct desc_seq_scan_data *data) 1627 { 1628 struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; 1629 int partnum; 1630 int i; 1631 1632 partnum = le16_to_cpu(desc->partitionNumber); 1633 for (i = 0; i < data->num_part_descs; i++) 1634 if (partnum == data->part_descs_loc[i].partnum) 1635 return &(data->part_descs_loc[i].rec); 1636 if (data->num_part_descs >= data->size_part_descs) { 1637 struct part_desc_seq_scan_data *new_loc; 1638 unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); 1639 1640 new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); 1641 if (!new_loc) 1642 return ERR_PTR(-ENOMEM); 1643 memcpy(new_loc, data->part_descs_loc, 1644 data->size_part_descs * sizeof(*new_loc)); 1645 kfree(data->part_descs_loc); 1646 data->part_descs_loc = new_loc; 1647 data->size_part_descs = new_size; 1648 } 1649 return &(data->part_descs_loc[data->num_part_descs++].rec); 1650 } 1651 1652 1653 static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident, 1654 struct buffer_head *bh, struct desc_seq_scan_data *data) 1655 { 1656 switch (ident) { 1657 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ 1658 return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]); 1659 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ 1660 return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]); 1661 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ 1662 return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]); 1663 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ 1664 return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]); 1665 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ 1666 return handle_partition_descriptor(bh, data); 1667 } 1668 return NULL; 1669 } 1670 1671 /* 1672 * Process a main/reserve volume descriptor sequence. 1673 * @block First block of first extent of the sequence. 1674 * @lastblock Lastblock of first extent of the sequence. 1675 * @fileset There we store extent containing root fileset 1676 * 1677 * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor 1678 * sequence 1679 */ 1680 static noinline int udf_process_sequence( 1681 struct super_block *sb, 1682 sector_t block, sector_t lastblock, 1683 struct kernel_lb_addr *fileset) 1684 { 1685 struct buffer_head *bh = NULL; 1686 struct udf_vds_record *curr; 1687 struct generic_desc *gd; 1688 struct volDescPtr *vdp; 1689 bool done = false; 1690 uint32_t vdsn; 1691 uint16_t ident; 1692 int ret; 1693 unsigned int indirections = 0; 1694 struct desc_seq_scan_data data; 1695 unsigned int i; 1696 1697 memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); 1698 data.size_part_descs = PART_DESC_ALLOC_STEP; 1699 data.num_part_descs = 0; 1700 data.part_descs_loc = kcalloc(data.size_part_descs, 1701 sizeof(*data.part_descs_loc), 1702 GFP_KERNEL); 1703 if (!data.part_descs_loc) 1704 return -ENOMEM; 1705 1706 /* 1707 * Read the main descriptor sequence and find which descriptors 1708 * are in it. 1709 */ 1710 for (; (!done && block <= lastblock); block++) { 1711 bh = udf_read_tagged(sb, block, block, &ident); 1712 if (!bh) 1713 break; 1714 1715 /* Process each descriptor (ISO 13346 3/8.3-8.4) */ 1716 gd = (struct generic_desc *)bh->b_data; 1717 vdsn = le32_to_cpu(gd->volDescSeqNum); 1718 switch (ident) { 1719 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */ 1720 if (++indirections > UDF_MAX_TD_NESTING) { 1721 udf_err(sb, "too many Volume Descriptor " 1722 "Pointers (max %u supported)\n", 1723 UDF_MAX_TD_NESTING); 1724 brelse(bh); 1725 ret = -EIO; 1726 goto out; 1727 } 1728 1729 vdp = (struct volDescPtr *)bh->b_data; 1730 block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation); 1731 lastblock = le32_to_cpu( 1732 vdp->nextVolDescSeqExt.extLength) >> 1733 sb->s_blocksize_bits; 1734 lastblock += block - 1; 1735 /* For loop is going to increment 'block' again */ 1736 block--; 1737 break; 1738 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ 1739 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ 1740 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ 1741 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ 1742 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ 1743 curr = get_volume_descriptor_record(ident, bh, &data); 1744 if (IS_ERR(curr)) { 1745 brelse(bh); 1746 ret = PTR_ERR(curr); 1747 goto out; 1748 } 1749 /* Descriptor we don't care about? */ 1750 if (!curr) 1751 break; 1752 if (vdsn >= curr->volDescSeqNum) { 1753 curr->volDescSeqNum = vdsn; 1754 curr->block = block; 1755 } 1756 break; 1757 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */ 1758 done = true; 1759 break; 1760 } 1761 brelse(bh); 1762 } 1763 /* 1764 * Now read interesting descriptors again and process them 1765 * in a suitable order 1766 */ 1767 if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) { 1768 udf_err(sb, "Primary Volume Descriptor not found!\n"); 1769 ret = -EAGAIN; 1770 goto out; 1771 } 1772 ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block); 1773 if (ret < 0) 1774 goto out; 1775 1776 if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) { 1777 ret = udf_load_logicalvol(sb, 1778 data.vds[VDS_POS_LOGICAL_VOL_DESC].block, 1779 fileset); 1780 if (ret < 0) 1781 goto out; 1782 } 1783 1784 /* Now handle prevailing Partition Descriptors */ 1785 for (i = 0; i < data.num_part_descs; i++) { 1786 ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); 1787 if (ret < 0) 1788 goto out; 1789 } 1790 ret = 0; 1791 out: 1792 kfree(data.part_descs_loc); 1793 return ret; 1794 } 1795 1796 /* 1797 * Load Volume Descriptor Sequence described by anchor in bh 1798 * 1799 * Returns <0 on error, 0 on success 1800 */ 1801 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh, 1802 struct kernel_lb_addr *fileset) 1803 { 1804 struct anchorVolDescPtr *anchor; 1805 sector_t main_s, main_e, reserve_s, reserve_e; 1806 int ret; 1807 1808 anchor = (struct anchorVolDescPtr *)bh->b_data; 1809 1810 /* Locate the main sequence */ 1811 main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation); 1812 main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength); 1813 main_e = main_e >> sb->s_blocksize_bits; 1814 main_e += main_s - 1; 1815 1816 /* Locate the reserve sequence */ 1817 reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation); 1818 reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength); 1819 reserve_e = reserve_e >> sb->s_blocksize_bits; 1820 reserve_e += reserve_s - 1; 1821 1822 /* Process the main & reserve sequences */ 1823 /* responsible for finding the PartitionDesc(s) */ 1824 ret = udf_process_sequence(sb, main_s, main_e, fileset); 1825 if (ret != -EAGAIN) 1826 return ret; 1827 udf_sb_free_partitions(sb); 1828 ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset); 1829 if (ret < 0) { 1830 udf_sb_free_partitions(sb); 1831 /* No sequence was OK, return -EIO */ 1832 if (ret == -EAGAIN) 1833 ret = -EIO; 1834 } 1835 return ret; 1836 } 1837 1838 /* 1839 * Check whether there is an anchor block in the given block and 1840 * load Volume Descriptor Sequence if so. 1841 * 1842 * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor 1843 * block 1844 */ 1845 static int udf_check_anchor_block(struct super_block *sb, sector_t block, 1846 struct kernel_lb_addr *fileset) 1847 { 1848 struct buffer_head *bh; 1849 uint16_t ident; 1850 int ret; 1851 1852 bh = udf_read_tagged(sb, block, block, &ident); 1853 if (!bh) 1854 return -EAGAIN; 1855 if (ident != TAG_IDENT_AVDP) { 1856 brelse(bh); 1857 return -EAGAIN; 1858 } 1859 ret = udf_load_sequence(sb, bh, fileset); 1860 brelse(bh); 1861 return ret; 1862 } 1863 1864 /* 1865 * Search for an anchor volume descriptor pointer. 1866 * 1867 * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set 1868 * of anchors. 1869 */ 1870 static int udf_scan_anchors(struct super_block *sb, udf_pblk_t *lastblock, 1871 struct kernel_lb_addr *fileset) 1872 { 1873 udf_pblk_t last[6]; 1874 int i; 1875 struct udf_sb_info *sbi = UDF_SB(sb); 1876 int last_count = 0; 1877 int ret; 1878 1879 /* First try user provided anchor */ 1880 if (sbi->s_anchor) { 1881 ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset); 1882 if (ret != -EAGAIN) 1883 return ret; 1884 } 1885 /* 1886 * according to spec, anchor is in either: 1887 * block 256 1888 * lastblock-256 1889 * lastblock 1890 * however, if the disc isn't closed, it could be 512. 1891 */ 1892 ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset); 1893 if (ret != -EAGAIN) 1894 return ret; 1895 /* 1896 * The trouble is which block is the last one. Drives often misreport 1897 * this so we try various possibilities. 1898 */ 1899 last[last_count++] = *lastblock; 1900 if (*lastblock >= 1) 1901 last[last_count++] = *lastblock - 1; 1902 last[last_count++] = *lastblock + 1; 1903 if (*lastblock >= 2) 1904 last[last_count++] = *lastblock - 2; 1905 if (*lastblock >= 150) 1906 last[last_count++] = *lastblock - 150; 1907 if (*lastblock >= 152) 1908 last[last_count++] = *lastblock - 152; 1909 1910 for (i = 0; i < last_count; i++) { 1911 if (last[i] >= sb_bdev_nr_blocks(sb)) 1912 continue; 1913 ret = udf_check_anchor_block(sb, last[i], fileset); 1914 if (ret != -EAGAIN) { 1915 if (!ret) 1916 *lastblock = last[i]; 1917 return ret; 1918 } 1919 if (last[i] < 256) 1920 continue; 1921 ret = udf_check_anchor_block(sb, last[i] - 256, fileset); 1922 if (ret != -EAGAIN) { 1923 if (!ret) 1924 *lastblock = last[i]; 1925 return ret; 1926 } 1927 } 1928 1929 /* Finally try block 512 in case media is open */ 1930 return udf_check_anchor_block(sb, sbi->s_session + 512, fileset); 1931 } 1932 1933 /* 1934 * Check Volume Structure Descriptor, find Anchor block and load Volume 1935 * Descriptor Sequence. 1936 * 1937 * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor 1938 * block was not found. 1939 */ 1940 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt, 1941 int silent, struct kernel_lb_addr *fileset) 1942 { 1943 struct udf_sb_info *sbi = UDF_SB(sb); 1944 int nsr = 0; 1945 int ret; 1946 1947 if (!sb_set_blocksize(sb, uopt->blocksize)) { 1948 if (!silent) 1949 udf_warn(sb, "Bad block size\n"); 1950 return -EINVAL; 1951 } 1952 sbi->s_last_block = uopt->lastblock; 1953 if (!uopt->novrs) { 1954 /* Check that it is NSR02 compliant */ 1955 nsr = udf_check_vsd(sb); 1956 if (!nsr) { 1957 if (!silent) 1958 udf_warn(sb, "No VRS found\n"); 1959 return -EINVAL; 1960 } 1961 if (nsr == -1) 1962 udf_debug("Failed to read sector at offset %d. " 1963 "Assuming open disc. Skipping validity " 1964 "check\n", VSD_FIRST_SECTOR_OFFSET); 1965 if (!sbi->s_last_block) 1966 sbi->s_last_block = udf_get_last_block(sb); 1967 } else { 1968 udf_debug("Validity check skipped because of novrs option\n"); 1969 } 1970 1971 /* Look for anchor block and load Volume Descriptor Sequence */ 1972 sbi->s_anchor = uopt->anchor; 1973 ret = udf_scan_anchors(sb, &sbi->s_last_block, fileset); 1974 if (ret < 0) { 1975 if (!silent && ret == -EAGAIN) 1976 udf_warn(sb, "No anchor found\n"); 1977 return ret; 1978 } 1979 return 0; 1980 } 1981 1982 static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid) 1983 { 1984 struct timespec64 ts; 1985 1986 ktime_get_real_ts64(&ts); 1987 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts); 1988 lvid->descTag.descCRC = cpu_to_le16( 1989 crc_itu_t(0, (char *)lvid + sizeof(struct tag), 1990 le16_to_cpu(lvid->descTag.descCRCLength))); 1991 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); 1992 } 1993 1994 static void udf_open_lvid(struct super_block *sb) 1995 { 1996 struct udf_sb_info *sbi = UDF_SB(sb); 1997 struct buffer_head *bh = sbi->s_lvid_bh; 1998 struct logicalVolIntegrityDesc *lvid; 1999 struct logicalVolIntegrityDescImpUse *lvidiu; 2000 2001 if (!bh) 2002 return; 2003 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 2004 lvidiu = udf_sb_lvidiu(sb); 2005 if (!lvidiu) 2006 return; 2007 2008 mutex_lock(&sbi->s_alloc_mutex); 2009 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 2010 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 2011 if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE) 2012 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN); 2013 else 2014 UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT); 2015 2016 udf_finalize_lvid(lvid); 2017 mark_buffer_dirty(bh); 2018 sbi->s_lvid_dirty = 0; 2019 mutex_unlock(&sbi->s_alloc_mutex); 2020 /* Make opening of filesystem visible on the media immediately */ 2021 sync_dirty_buffer(bh); 2022 } 2023 2024 static void udf_close_lvid(struct super_block *sb) 2025 { 2026 struct udf_sb_info *sbi = UDF_SB(sb); 2027 struct buffer_head *bh = sbi->s_lvid_bh; 2028 struct logicalVolIntegrityDesc *lvid; 2029 struct logicalVolIntegrityDescImpUse *lvidiu; 2030 2031 if (!bh) 2032 return; 2033 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 2034 lvidiu = udf_sb_lvidiu(sb); 2035 if (!lvidiu) 2036 return; 2037 2038 mutex_lock(&sbi->s_alloc_mutex); 2039 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 2040 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 2041 if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev)) 2042 lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION); 2043 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev)) 2044 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev); 2045 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev)) 2046 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev); 2047 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT)) 2048 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); 2049 2050 /* 2051 * We set buffer uptodate unconditionally here to avoid spurious 2052 * warnings from mark_buffer_dirty() when previous EIO has marked 2053 * the buffer as !uptodate 2054 */ 2055 set_buffer_uptodate(bh); 2056 udf_finalize_lvid(lvid); 2057 mark_buffer_dirty(bh); 2058 sbi->s_lvid_dirty = 0; 2059 mutex_unlock(&sbi->s_alloc_mutex); 2060 /* Make closing of filesystem visible on the media immediately */ 2061 sync_dirty_buffer(bh); 2062 } 2063 2064 u64 lvid_get_unique_id(struct super_block *sb) 2065 { 2066 struct buffer_head *bh; 2067 struct udf_sb_info *sbi = UDF_SB(sb); 2068 struct logicalVolIntegrityDesc *lvid; 2069 struct logicalVolHeaderDesc *lvhd; 2070 u64 uniqueID; 2071 u64 ret; 2072 2073 bh = sbi->s_lvid_bh; 2074 if (!bh) 2075 return 0; 2076 2077 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 2078 lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse; 2079 2080 mutex_lock(&sbi->s_alloc_mutex); 2081 ret = uniqueID = le64_to_cpu(lvhd->uniqueID); 2082 if (!(++uniqueID & 0xFFFFFFFF)) 2083 uniqueID += 16; 2084 lvhd->uniqueID = cpu_to_le64(uniqueID); 2085 udf_updated_lvid(sb); 2086 mutex_unlock(&sbi->s_alloc_mutex); 2087 2088 return ret; 2089 } 2090 2091 static int udf_fill_super(struct super_block *sb, void *options, int silent) 2092 { 2093 int ret = -EINVAL; 2094 struct inode *inode = NULL; 2095 struct udf_options uopt; 2096 struct kernel_lb_addr rootdir, fileset; 2097 struct udf_sb_info *sbi; 2098 bool lvid_open = false; 2099 2100 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); 2101 /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */ 2102 uopt.uid = make_kuid(current_user_ns(), overflowuid); 2103 uopt.gid = make_kgid(current_user_ns(), overflowgid); 2104 uopt.umask = 0; 2105 uopt.fmode = UDF_INVALID_MODE; 2106 uopt.dmode = UDF_INVALID_MODE; 2107 uopt.nls_map = NULL; 2108 2109 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 2110 if (!sbi) 2111 return -ENOMEM; 2112 2113 sb->s_fs_info = sbi; 2114 2115 mutex_init(&sbi->s_alloc_mutex); 2116 2117 if (!udf_parse_options((char *)options, &uopt, false)) 2118 goto parse_options_failure; 2119 2120 fileset.logicalBlockNum = 0xFFFFFFFF; 2121 fileset.partitionReferenceNum = 0xFFFF; 2122 2123 sbi->s_flags = uopt.flags; 2124 sbi->s_uid = uopt.uid; 2125 sbi->s_gid = uopt.gid; 2126 sbi->s_umask = uopt.umask; 2127 sbi->s_fmode = uopt.fmode; 2128 sbi->s_dmode = uopt.dmode; 2129 sbi->s_nls_map = uopt.nls_map; 2130 rwlock_init(&sbi->s_cred_lock); 2131 2132 if (uopt.session == 0xFFFFFFFF) 2133 sbi->s_session = udf_get_last_session(sb); 2134 else 2135 sbi->s_session = uopt.session; 2136 2137 udf_debug("Multi-session=%d\n", sbi->s_session); 2138 2139 /* Fill in the rest of the superblock */ 2140 sb->s_op = &udf_sb_ops; 2141 sb->s_export_op = &udf_export_ops; 2142 2143 sb->s_magic = UDF_SUPER_MAGIC; 2144 sb->s_time_gran = 1000; 2145 2146 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { 2147 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2148 } else { 2149 uopt.blocksize = bdev_logical_block_size(sb->s_bdev); 2150 while (uopt.blocksize <= 4096) { 2151 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2152 if (ret < 0) { 2153 if (!silent && ret != -EACCES) { 2154 pr_notice("Scanning with blocksize %u failed\n", 2155 uopt.blocksize); 2156 } 2157 brelse(sbi->s_lvid_bh); 2158 sbi->s_lvid_bh = NULL; 2159 /* 2160 * EACCES is special - we want to propagate to 2161 * upper layers that we cannot handle RW mount. 2162 */ 2163 if (ret == -EACCES) 2164 break; 2165 } else 2166 break; 2167 2168 uopt.blocksize <<= 1; 2169 } 2170 } 2171 if (ret < 0) { 2172 if (ret == -EAGAIN) { 2173 udf_warn(sb, "No partition found (1)\n"); 2174 ret = -EINVAL; 2175 } 2176 goto error_out; 2177 } 2178 2179 udf_debug("Lastblock=%u\n", sbi->s_last_block); 2180 2181 if (sbi->s_lvid_bh) { 2182 struct logicalVolIntegrityDescImpUse *lvidiu = 2183 udf_sb_lvidiu(sb); 2184 uint16_t minUDFReadRev; 2185 uint16_t minUDFWriteRev; 2186 2187 if (!lvidiu) { 2188 ret = -EINVAL; 2189 goto error_out; 2190 } 2191 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); 2192 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); 2193 if (minUDFReadRev > UDF_MAX_READ_VERSION) { 2194 udf_err(sb, "minUDFReadRev=%x (max is %x)\n", 2195 minUDFReadRev, 2196 UDF_MAX_READ_VERSION); 2197 ret = -EINVAL; 2198 goto error_out; 2199 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) { 2200 if (!sb_rdonly(sb)) { 2201 ret = -EACCES; 2202 goto error_out; 2203 } 2204 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); 2205 } 2206 2207 sbi->s_udfrev = minUDFWriteRev; 2208 2209 if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE) 2210 UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE); 2211 if (minUDFReadRev >= UDF_VERS_USE_STREAMS) 2212 UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS); 2213 } 2214 2215 if (!sbi->s_partitions) { 2216 udf_warn(sb, "No partition found (2)\n"); 2217 ret = -EINVAL; 2218 goto error_out; 2219 } 2220 2221 if (sbi->s_partmaps[sbi->s_partition].s_partition_flags & 2222 UDF_PART_FLAG_READ_ONLY) { 2223 if (!sb_rdonly(sb)) { 2224 ret = -EACCES; 2225 goto error_out; 2226 } 2227 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT); 2228 } 2229 2230 ret = udf_find_fileset(sb, &fileset, &rootdir); 2231 if (ret < 0) { 2232 udf_warn(sb, "No fileset found\n"); 2233 goto error_out; 2234 } 2235 2236 if (!silent) { 2237 struct timestamp ts; 2238 udf_time_to_disk_stamp(&ts, sbi->s_record_time); 2239 udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n", 2240 sbi->s_volume_ident, 2241 le16_to_cpu(ts.year), ts.month, ts.day, 2242 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone)); 2243 } 2244 if (!sb_rdonly(sb)) { 2245 udf_open_lvid(sb); 2246 lvid_open = true; 2247 } 2248 2249 /* Assign the root inode */ 2250 /* assign inodes by physical block number */ 2251 /* perhaps it's not extensible enough, but for now ... */ 2252 inode = udf_iget(sb, &rootdir); 2253 if (IS_ERR(inode)) { 2254 udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n", 2255 rootdir.logicalBlockNum, rootdir.partitionReferenceNum); 2256 ret = PTR_ERR(inode); 2257 goto error_out; 2258 } 2259 2260 /* Allocate a dentry for the root inode */ 2261 sb->s_root = d_make_root(inode); 2262 if (!sb->s_root) { 2263 udf_err(sb, "Couldn't allocate root dentry\n"); 2264 ret = -ENOMEM; 2265 goto error_out; 2266 } 2267 sb->s_maxbytes = UDF_MAX_FILESIZE; 2268 sb->s_max_links = UDF_MAX_LINKS; 2269 return 0; 2270 2271 error_out: 2272 iput(sbi->s_vat_inode); 2273 parse_options_failure: 2274 unload_nls(uopt.nls_map); 2275 if (lvid_open) 2276 udf_close_lvid(sb); 2277 brelse(sbi->s_lvid_bh); 2278 udf_sb_free_partitions(sb); 2279 kfree(sbi); 2280 sb->s_fs_info = NULL; 2281 2282 return ret; 2283 } 2284 2285 void _udf_err(struct super_block *sb, const char *function, 2286 const char *fmt, ...) 2287 { 2288 struct va_format vaf; 2289 va_list args; 2290 2291 va_start(args, fmt); 2292 2293 vaf.fmt = fmt; 2294 vaf.va = &args; 2295 2296 pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf); 2297 2298 va_end(args); 2299 } 2300 2301 void _udf_warn(struct super_block *sb, const char *function, 2302 const char *fmt, ...) 2303 { 2304 struct va_format vaf; 2305 va_list args; 2306 2307 va_start(args, fmt); 2308 2309 vaf.fmt = fmt; 2310 vaf.va = &args; 2311 2312 pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf); 2313 2314 va_end(args); 2315 } 2316 2317 static void udf_put_super(struct super_block *sb) 2318 { 2319 struct udf_sb_info *sbi; 2320 2321 sbi = UDF_SB(sb); 2322 2323 iput(sbi->s_vat_inode); 2324 unload_nls(sbi->s_nls_map); 2325 if (!sb_rdonly(sb)) 2326 udf_close_lvid(sb); 2327 brelse(sbi->s_lvid_bh); 2328 udf_sb_free_partitions(sb); 2329 mutex_destroy(&sbi->s_alloc_mutex); 2330 kfree(sb->s_fs_info); 2331 sb->s_fs_info = NULL; 2332 } 2333 2334 static int udf_sync_fs(struct super_block *sb, int wait) 2335 { 2336 struct udf_sb_info *sbi = UDF_SB(sb); 2337 2338 mutex_lock(&sbi->s_alloc_mutex); 2339 if (sbi->s_lvid_dirty) { 2340 struct buffer_head *bh = sbi->s_lvid_bh; 2341 struct logicalVolIntegrityDesc *lvid; 2342 2343 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 2344 udf_finalize_lvid(lvid); 2345 2346 /* 2347 * Blockdevice will be synced later so we don't have to submit 2348 * the buffer for IO 2349 */ 2350 mark_buffer_dirty(bh); 2351 sbi->s_lvid_dirty = 0; 2352 } 2353 mutex_unlock(&sbi->s_alloc_mutex); 2354 2355 return 0; 2356 } 2357 2358 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf) 2359 { 2360 struct super_block *sb = dentry->d_sb; 2361 struct udf_sb_info *sbi = UDF_SB(sb); 2362 struct logicalVolIntegrityDescImpUse *lvidiu; 2363 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 2364 2365 lvidiu = udf_sb_lvidiu(sb); 2366 buf->f_type = UDF_SUPER_MAGIC; 2367 buf->f_bsize = sb->s_blocksize; 2368 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; 2369 buf->f_bfree = udf_count_free(sb); 2370 buf->f_bavail = buf->f_bfree; 2371 /* 2372 * Let's pretend each free block is also a free 'inode' since UDF does 2373 * not have separate preallocated table of inodes. 2374 */ 2375 buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) + 2376 le32_to_cpu(lvidiu->numDirs)) : 0) 2377 + buf->f_bfree; 2378 buf->f_ffree = buf->f_bfree; 2379 buf->f_namelen = UDF_NAME_LEN; 2380 buf->f_fsid = u64_to_fsid(id); 2381 2382 return 0; 2383 } 2384 2385 static unsigned int udf_count_free_bitmap(struct super_block *sb, 2386 struct udf_bitmap *bitmap) 2387 { 2388 struct buffer_head *bh = NULL; 2389 unsigned int accum = 0; 2390 int index; 2391 udf_pblk_t block = 0, newblock; 2392 struct kernel_lb_addr loc; 2393 uint32_t bytes; 2394 uint8_t *ptr; 2395 uint16_t ident; 2396 struct spaceBitmapDesc *bm; 2397 2398 loc.logicalBlockNum = bitmap->s_extPosition; 2399 loc.partitionReferenceNum = UDF_SB(sb)->s_partition; 2400 bh = udf_read_ptagged(sb, &loc, 0, &ident); 2401 2402 if (!bh) { 2403 udf_err(sb, "udf_count_free failed\n"); 2404 goto out; 2405 } else if (ident != TAG_IDENT_SBD) { 2406 brelse(bh); 2407 udf_err(sb, "udf_count_free failed\n"); 2408 goto out; 2409 } 2410 2411 bm = (struct spaceBitmapDesc *)bh->b_data; 2412 bytes = le32_to_cpu(bm->numOfBytes); 2413 index = sizeof(struct spaceBitmapDesc); /* offset in first block only */ 2414 ptr = (uint8_t *)bh->b_data; 2415 2416 while (bytes > 0) { 2417 u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index); 2418 accum += bitmap_weight((const unsigned long *)(ptr + index), 2419 cur_bytes * 8); 2420 bytes -= cur_bytes; 2421 if (bytes) { 2422 brelse(bh); 2423 newblock = udf_get_lb_pblock(sb, &loc, ++block); 2424 bh = sb_bread(sb, newblock); 2425 if (!bh) { 2426 udf_debug("read failed\n"); 2427 goto out; 2428 } 2429 index = 0; 2430 ptr = (uint8_t *)bh->b_data; 2431 } 2432 } 2433 brelse(bh); 2434 out: 2435 return accum; 2436 } 2437 2438 static unsigned int udf_count_free_table(struct super_block *sb, 2439 struct inode *table) 2440 { 2441 unsigned int accum = 0; 2442 uint32_t elen; 2443 struct kernel_lb_addr eloc; 2444 struct extent_position epos; 2445 2446 mutex_lock(&UDF_SB(sb)->s_alloc_mutex); 2447 epos.block = UDF_I(table)->i_location; 2448 epos.offset = sizeof(struct unallocSpaceEntry); 2449 epos.bh = NULL; 2450 2451 while (udf_next_aext(table, &epos, &eloc, &elen, 1) != -1) 2452 accum += (elen >> table->i_sb->s_blocksize_bits); 2453 2454 brelse(epos.bh); 2455 mutex_unlock(&UDF_SB(sb)->s_alloc_mutex); 2456 2457 return accum; 2458 } 2459 2460 static unsigned int udf_count_free(struct super_block *sb) 2461 { 2462 unsigned int accum = 0; 2463 struct udf_sb_info *sbi = UDF_SB(sb); 2464 struct udf_part_map *map; 2465 unsigned int part = sbi->s_partition; 2466 int ptype = sbi->s_partmaps[part].s_partition_type; 2467 2468 if (ptype == UDF_METADATA_MAP25) { 2469 part = sbi->s_partmaps[part].s_type_specific.s_metadata. 2470 s_phys_partition_ref; 2471 } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) { 2472 /* 2473 * Filesystems with VAT are append-only and we cannot write to 2474 * them. Let's just report 0 here. 2475 */ 2476 return 0; 2477 } 2478 2479 if (sbi->s_lvid_bh) { 2480 struct logicalVolIntegrityDesc *lvid = 2481 (struct logicalVolIntegrityDesc *) 2482 sbi->s_lvid_bh->b_data; 2483 if (le32_to_cpu(lvid->numOfPartitions) > part) { 2484 accum = le32_to_cpu( 2485 lvid->freeSpaceTable[part]); 2486 if (accum == 0xFFFFFFFF) 2487 accum = 0; 2488 } 2489 } 2490 2491 if (accum) 2492 return accum; 2493 2494 map = &sbi->s_partmaps[part]; 2495 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { 2496 accum += udf_count_free_bitmap(sb, 2497 map->s_uspace.s_bitmap); 2498 } 2499 if (accum) 2500 return accum; 2501 2502 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { 2503 accum += udf_count_free_table(sb, 2504 map->s_uspace.s_table); 2505 } 2506 return accum; 2507 } 2508 2509 MODULE_AUTHOR("Ben Fennema"); 2510 MODULE_DESCRIPTION("Universal Disk Format Filesystem"); 2511 MODULE_LICENSE("GPL"); 2512 module_init(init_udf_fs) 2513 module_exit(exit_udf_fs) 2514