1 /* 2 * super.c 3 * 4 * PURPOSE 5 * Super block routines for the OSTA-UDF(tm) filesystem. 6 * 7 * DESCRIPTION 8 * OSTA-UDF(tm) = Optical Storage Technology Association 9 * Universal Disk Format. 10 * 11 * This code is based on version 2.00 of the UDF specification, 12 * and revision 3 of the ECMA 167 standard [equivalent to ISO 13346]. 13 * http://www.osta.org/ 14 * http://www.ecma.ch/ 15 * http://www.iso.org/ 16 * 17 * COPYRIGHT 18 * This file is distributed under the terms of the GNU General Public 19 * License (GPL). Copies of the GPL can be obtained from: 20 * ftp://prep.ai.mit.edu/pub/gnu/GPL 21 * Each contributing author retains all rights to their own work. 22 * 23 * (C) 1998 Dave Boynton 24 * (C) 1998-2004 Ben Fennema 25 * (C) 2000 Stelias Computing Inc 26 * 27 * HISTORY 28 * 29 * 09/24/98 dgb changed to allow compiling outside of kernel, and 30 * added some debugging. 31 * 10/01/98 dgb updated to allow (some) possibility of compiling w/2.0.34 32 * 10/16/98 attempting some multi-session support 33 * 10/17/98 added freespace count for "df" 34 * 11/11/98 gr added novrs option 35 * 11/26/98 dgb added fileset,anchor mount options 36 * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced 37 * vol descs. rewrote option handling based on isofs 38 * 12/20/98 find the free space bitmap (if it exists) 39 */ 40 41 #include "udfdecl.h" 42 43 #include <linux/blkdev.h> 44 #include <linux/slab.h> 45 #include <linux/kernel.h> 46 #include <linux/module.h> 47 #include <linux/parser.h> 48 #include <linux/stat.h> 49 #include <linux/cdrom.h> 50 #include <linux/nls.h> 51 #include <linux/vfs.h> 52 #include <linux/vmalloc.h> 53 #include <linux/errno.h> 54 #include <linux/mount.h> 55 #include <linux/seq_file.h> 56 #include <linux/bitmap.h> 57 #include <linux/crc-itu-t.h> 58 #include <linux/log2.h> 59 #include <asm/byteorder.h> 60 61 #include "udf_sb.h" 62 #include "udf_i.h" 63 64 #include <linux/init.h> 65 #include <linux/uaccess.h> 66 67 #define VDS_POS_PRIMARY_VOL_DESC 0 68 #define VDS_POS_UNALLOC_SPACE_DESC 1 69 #define VDS_POS_LOGICAL_VOL_DESC 2 70 #define VDS_POS_PARTITION_DESC 3 71 #define VDS_POS_IMP_USE_VOL_DESC 4 72 #define VDS_POS_VOL_DESC_PTR 5 73 #define VDS_POS_TERMINATING_DESC 6 74 #define VDS_POS_LENGTH 7 75 76 #define VSD_FIRST_SECTOR_OFFSET 32768 77 #define VSD_MAX_SECTOR_OFFSET 0x800000 78 79 /* 80 * Maximum number of Terminating Descriptor / Logical Volume Integrity 81 * Descriptor redirections. The chosen numbers are arbitrary - just that we 82 * hopefully don't limit any real use of rewritten inode on write-once media 83 * but avoid looping for too long on corrupted media. 84 */ 85 #define UDF_MAX_TD_NESTING 64 86 #define UDF_MAX_LVID_NESTING 1000 87 88 enum { UDF_MAX_LINKS = 0xffff }; 89 90 /* These are the "meat" - everything else is stuffing */ 91 static int udf_fill_super(struct super_block *, void *, int); 92 static void udf_put_super(struct super_block *); 93 static int udf_sync_fs(struct super_block *, int); 94 static int udf_remount_fs(struct super_block *, int *, char *); 95 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad); 96 static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *, 97 struct kernel_lb_addr *); 98 static void udf_load_fileset(struct super_block *, struct buffer_head *, 99 struct kernel_lb_addr *); 100 static void udf_open_lvid(struct super_block *); 101 static void udf_close_lvid(struct super_block *); 102 static unsigned int udf_count_free(struct super_block *); 103 static int udf_statfs(struct dentry *, struct kstatfs *); 104 static int udf_show_options(struct seq_file *, struct dentry *); 105 106 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb) 107 { 108 struct logicalVolIntegrityDesc *lvid; 109 unsigned int partnum; 110 unsigned int offset; 111 112 if (!UDF_SB(sb)->s_lvid_bh) 113 return NULL; 114 lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data; 115 partnum = le32_to_cpu(lvid->numOfPartitions); 116 if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) - 117 offsetof(struct logicalVolIntegrityDesc, impUse)) / 118 (2 * sizeof(uint32_t)) < partnum) { 119 udf_err(sb, "Logical volume integrity descriptor corrupted " 120 "(numOfPartitions = %u)!\n", partnum); 121 return NULL; 122 } 123 /* The offset is to skip freeSpaceTable and sizeTable arrays */ 124 offset = partnum * 2 * sizeof(uint32_t); 125 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]); 126 } 127 128 /* UDF filesystem type */ 129 static struct dentry *udf_mount(struct file_system_type *fs_type, 130 int flags, const char *dev_name, void *data) 131 { 132 return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super); 133 } 134 135 static struct file_system_type udf_fstype = { 136 .owner = THIS_MODULE, 137 .name = "udf", 138 .mount = udf_mount, 139 .kill_sb = kill_block_super, 140 .fs_flags = FS_REQUIRES_DEV, 141 }; 142 MODULE_ALIAS_FS("udf"); 143 144 static struct kmem_cache *udf_inode_cachep; 145 146 static struct inode *udf_alloc_inode(struct super_block *sb) 147 { 148 struct udf_inode_info *ei; 149 ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL); 150 if (!ei) 151 return NULL; 152 153 ei->i_unique = 0; 154 ei->i_lenExtents = 0; 155 ei->i_next_alloc_block = 0; 156 ei->i_next_alloc_goal = 0; 157 ei->i_strat4096 = 0; 158 init_rwsem(&ei->i_data_sem); 159 ei->cached_extent.lstart = -1; 160 spin_lock_init(&ei->i_extent_cache_lock); 161 162 return &ei->vfs_inode; 163 } 164 165 static void udf_i_callback(struct rcu_head *head) 166 { 167 struct inode *inode = container_of(head, struct inode, i_rcu); 168 kmem_cache_free(udf_inode_cachep, UDF_I(inode)); 169 } 170 171 static void udf_destroy_inode(struct inode *inode) 172 { 173 call_rcu(&inode->i_rcu, udf_i_callback); 174 } 175 176 static void init_once(void *foo) 177 { 178 struct udf_inode_info *ei = (struct udf_inode_info *)foo; 179 180 ei->i_ext.i_data = NULL; 181 inode_init_once(&ei->vfs_inode); 182 } 183 184 static int __init init_inodecache(void) 185 { 186 udf_inode_cachep = kmem_cache_create("udf_inode_cache", 187 sizeof(struct udf_inode_info), 188 0, (SLAB_RECLAIM_ACCOUNT | 189 SLAB_MEM_SPREAD | 190 SLAB_ACCOUNT), 191 init_once); 192 if (!udf_inode_cachep) 193 return -ENOMEM; 194 return 0; 195 } 196 197 static void destroy_inodecache(void) 198 { 199 /* 200 * Make sure all delayed rcu free inodes are flushed before we 201 * destroy cache. 202 */ 203 rcu_barrier(); 204 kmem_cache_destroy(udf_inode_cachep); 205 } 206 207 /* Superblock operations */ 208 static const struct super_operations udf_sb_ops = { 209 .alloc_inode = udf_alloc_inode, 210 .destroy_inode = udf_destroy_inode, 211 .write_inode = udf_write_inode, 212 .evict_inode = udf_evict_inode, 213 .put_super = udf_put_super, 214 .sync_fs = udf_sync_fs, 215 .statfs = udf_statfs, 216 .remount_fs = udf_remount_fs, 217 .show_options = udf_show_options, 218 }; 219 220 struct udf_options { 221 unsigned char novrs; 222 unsigned int blocksize; 223 unsigned int session; 224 unsigned int lastblock; 225 unsigned int anchor; 226 unsigned int volume; 227 unsigned short partition; 228 unsigned int fileset; 229 unsigned int rootdir; 230 unsigned int flags; 231 umode_t umask; 232 kgid_t gid; 233 kuid_t uid; 234 umode_t fmode; 235 umode_t dmode; 236 struct nls_table *nls_map; 237 }; 238 239 static int __init init_udf_fs(void) 240 { 241 int err; 242 243 err = init_inodecache(); 244 if (err) 245 goto out1; 246 err = register_filesystem(&udf_fstype); 247 if (err) 248 goto out; 249 250 return 0; 251 252 out: 253 destroy_inodecache(); 254 255 out1: 256 return err; 257 } 258 259 static void __exit exit_udf_fs(void) 260 { 261 unregister_filesystem(&udf_fstype); 262 destroy_inodecache(); 263 } 264 265 static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count) 266 { 267 struct udf_sb_info *sbi = UDF_SB(sb); 268 269 sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map), 270 GFP_KERNEL); 271 if (!sbi->s_partmaps) { 272 udf_err(sb, "Unable to allocate space for %d partition maps\n", 273 count); 274 sbi->s_partitions = 0; 275 return -ENOMEM; 276 } 277 278 sbi->s_partitions = count; 279 return 0; 280 } 281 282 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap) 283 { 284 int i; 285 int nr_groups = bitmap->s_nr_groups; 286 287 for (i = 0; i < nr_groups; i++) 288 if (bitmap->s_block_bitmap[i]) 289 brelse(bitmap->s_block_bitmap[i]); 290 291 kvfree(bitmap); 292 } 293 294 static void udf_free_partition(struct udf_part_map *map) 295 { 296 int i; 297 struct udf_meta_data *mdata; 298 299 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 300 iput(map->s_uspace.s_table); 301 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) 302 iput(map->s_fspace.s_table); 303 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) 304 udf_sb_free_bitmap(map->s_uspace.s_bitmap); 305 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) 306 udf_sb_free_bitmap(map->s_fspace.s_bitmap); 307 if (map->s_partition_type == UDF_SPARABLE_MAP15) 308 for (i = 0; i < 4; i++) 309 brelse(map->s_type_specific.s_sparing.s_spar_map[i]); 310 else if (map->s_partition_type == UDF_METADATA_MAP25) { 311 mdata = &map->s_type_specific.s_metadata; 312 iput(mdata->s_metadata_fe); 313 mdata->s_metadata_fe = NULL; 314 315 iput(mdata->s_mirror_fe); 316 mdata->s_mirror_fe = NULL; 317 318 iput(mdata->s_bitmap_fe); 319 mdata->s_bitmap_fe = NULL; 320 } 321 } 322 323 static void udf_sb_free_partitions(struct super_block *sb) 324 { 325 struct udf_sb_info *sbi = UDF_SB(sb); 326 int i; 327 if (sbi->s_partmaps == NULL) 328 return; 329 for (i = 0; i < sbi->s_partitions; i++) 330 udf_free_partition(&sbi->s_partmaps[i]); 331 kfree(sbi->s_partmaps); 332 sbi->s_partmaps = NULL; 333 } 334 335 static int udf_show_options(struct seq_file *seq, struct dentry *root) 336 { 337 struct super_block *sb = root->d_sb; 338 struct udf_sb_info *sbi = UDF_SB(sb); 339 340 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) 341 seq_puts(seq, ",nostrict"); 342 if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET)) 343 seq_printf(seq, ",bs=%lu", sb->s_blocksize); 344 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE)) 345 seq_puts(seq, ",unhide"); 346 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE)) 347 seq_puts(seq, ",undelete"); 348 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB)) 349 seq_puts(seq, ",noadinicb"); 350 if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD)) 351 seq_puts(seq, ",shortad"); 352 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET)) 353 seq_puts(seq, ",uid=forget"); 354 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE)) 355 seq_puts(seq, ",uid=ignore"); 356 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET)) 357 seq_puts(seq, ",gid=forget"); 358 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE)) 359 seq_puts(seq, ",gid=ignore"); 360 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET)) 361 seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid)); 362 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET)) 363 seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid)); 364 if (sbi->s_umask != 0) 365 seq_printf(seq, ",umask=%ho", sbi->s_umask); 366 if (sbi->s_fmode != UDF_INVALID_MODE) 367 seq_printf(seq, ",mode=%ho", sbi->s_fmode); 368 if (sbi->s_dmode != UDF_INVALID_MODE) 369 seq_printf(seq, ",dmode=%ho", sbi->s_dmode); 370 if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET)) 371 seq_printf(seq, ",session=%u", sbi->s_session); 372 if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET)) 373 seq_printf(seq, ",lastblock=%u", sbi->s_last_block); 374 if (sbi->s_anchor != 0) 375 seq_printf(seq, ",anchor=%u", sbi->s_anchor); 376 /* 377 * volume, partition, fileset and rootdir seem to be ignored 378 * currently 379 */ 380 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) 381 seq_puts(seq, ",utf8"); 382 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map) 383 seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset); 384 385 return 0; 386 } 387 388 /* 389 * udf_parse_options 390 * 391 * PURPOSE 392 * Parse mount options. 393 * 394 * DESCRIPTION 395 * The following mount options are supported: 396 * 397 * gid= Set the default group. 398 * umask= Set the default umask. 399 * mode= Set the default file permissions. 400 * dmode= Set the default directory permissions. 401 * uid= Set the default user. 402 * bs= Set the block size. 403 * unhide Show otherwise hidden files. 404 * undelete Show deleted files in lists. 405 * adinicb Embed data in the inode (default) 406 * noadinicb Don't embed data in the inode 407 * shortad Use short ad's 408 * longad Use long ad's (default) 409 * nostrict Unset strict conformance 410 * iocharset= Set the NLS character set 411 * 412 * The remaining are for debugging and disaster recovery: 413 * 414 * novrs Skip volume sequence recognition 415 * 416 * The following expect a offset from 0. 417 * 418 * session= Set the CDROM session (default= last session) 419 * anchor= Override standard anchor location. (default= 256) 420 * volume= Override the VolumeDesc location. (unused) 421 * partition= Override the PartitionDesc location. (unused) 422 * lastblock= Set the last block of the filesystem/ 423 * 424 * The following expect a offset from the partition root. 425 * 426 * fileset= Override the fileset block location. (unused) 427 * rootdir= Override the root directory location. (unused) 428 * WARNING: overriding the rootdir to a non-directory may 429 * yield highly unpredictable results. 430 * 431 * PRE-CONDITIONS 432 * options Pointer to mount options string. 433 * uopts Pointer to mount options variable. 434 * 435 * POST-CONDITIONS 436 * <return> 1 Mount options parsed okay. 437 * <return> 0 Error parsing mount options. 438 * 439 * HISTORY 440 * July 1, 1997 - Andrew E. Mileski 441 * Written, tested, and released. 442 */ 443 444 enum { 445 Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete, 446 Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad, 447 Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock, 448 Opt_anchor, Opt_volume, Opt_partition, Opt_fileset, 449 Opt_rootdir, Opt_utf8, Opt_iocharset, 450 Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore, 451 Opt_fmode, Opt_dmode 452 }; 453 454 static const match_table_t tokens = { 455 {Opt_novrs, "novrs"}, 456 {Opt_nostrict, "nostrict"}, 457 {Opt_bs, "bs=%u"}, 458 {Opt_unhide, "unhide"}, 459 {Opt_undelete, "undelete"}, 460 {Opt_noadinicb, "noadinicb"}, 461 {Opt_adinicb, "adinicb"}, 462 {Opt_shortad, "shortad"}, 463 {Opt_longad, "longad"}, 464 {Opt_uforget, "uid=forget"}, 465 {Opt_uignore, "uid=ignore"}, 466 {Opt_gforget, "gid=forget"}, 467 {Opt_gignore, "gid=ignore"}, 468 {Opt_gid, "gid=%u"}, 469 {Opt_uid, "uid=%u"}, 470 {Opt_umask, "umask=%o"}, 471 {Opt_session, "session=%u"}, 472 {Opt_lastblock, "lastblock=%u"}, 473 {Opt_anchor, "anchor=%u"}, 474 {Opt_volume, "volume=%u"}, 475 {Opt_partition, "partition=%u"}, 476 {Opt_fileset, "fileset=%u"}, 477 {Opt_rootdir, "rootdir=%u"}, 478 {Opt_utf8, "utf8"}, 479 {Opt_iocharset, "iocharset=%s"}, 480 {Opt_fmode, "mode=%o"}, 481 {Opt_dmode, "dmode=%o"}, 482 {Opt_err, NULL} 483 }; 484 485 static int udf_parse_options(char *options, struct udf_options *uopt, 486 bool remount) 487 { 488 char *p; 489 int option; 490 491 uopt->novrs = 0; 492 uopt->partition = 0xFFFF; 493 uopt->session = 0xFFFFFFFF; 494 uopt->lastblock = 0; 495 uopt->anchor = 0; 496 uopt->volume = 0xFFFFFFFF; 497 uopt->rootdir = 0xFFFFFFFF; 498 uopt->fileset = 0xFFFFFFFF; 499 uopt->nls_map = NULL; 500 501 if (!options) 502 return 1; 503 504 while ((p = strsep(&options, ",")) != NULL) { 505 substring_t args[MAX_OPT_ARGS]; 506 int token; 507 unsigned n; 508 if (!*p) 509 continue; 510 511 token = match_token(p, tokens, args); 512 switch (token) { 513 case Opt_novrs: 514 uopt->novrs = 1; 515 break; 516 case Opt_bs: 517 if (match_int(&args[0], &option)) 518 return 0; 519 n = option; 520 if (n != 512 && n != 1024 && n != 2048 && n != 4096) 521 return 0; 522 uopt->blocksize = n; 523 uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET); 524 break; 525 case Opt_unhide: 526 uopt->flags |= (1 << UDF_FLAG_UNHIDE); 527 break; 528 case Opt_undelete: 529 uopt->flags |= (1 << UDF_FLAG_UNDELETE); 530 break; 531 case Opt_noadinicb: 532 uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB); 533 break; 534 case Opt_adinicb: 535 uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB); 536 break; 537 case Opt_shortad: 538 uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD); 539 break; 540 case Opt_longad: 541 uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD); 542 break; 543 case Opt_gid: 544 if (match_int(args, &option)) 545 return 0; 546 uopt->gid = make_kgid(current_user_ns(), option); 547 if (!gid_valid(uopt->gid)) 548 return 0; 549 uopt->flags |= (1 << UDF_FLAG_GID_SET); 550 break; 551 case Opt_uid: 552 if (match_int(args, &option)) 553 return 0; 554 uopt->uid = make_kuid(current_user_ns(), option); 555 if (!uid_valid(uopt->uid)) 556 return 0; 557 uopt->flags |= (1 << UDF_FLAG_UID_SET); 558 break; 559 case Opt_umask: 560 if (match_octal(args, &option)) 561 return 0; 562 uopt->umask = option; 563 break; 564 case Opt_nostrict: 565 uopt->flags &= ~(1 << UDF_FLAG_STRICT); 566 break; 567 case Opt_session: 568 if (match_int(args, &option)) 569 return 0; 570 uopt->session = option; 571 if (!remount) 572 uopt->flags |= (1 << UDF_FLAG_SESSION_SET); 573 break; 574 case Opt_lastblock: 575 if (match_int(args, &option)) 576 return 0; 577 uopt->lastblock = option; 578 if (!remount) 579 uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET); 580 break; 581 case Opt_anchor: 582 if (match_int(args, &option)) 583 return 0; 584 uopt->anchor = option; 585 break; 586 case Opt_volume: 587 if (match_int(args, &option)) 588 return 0; 589 uopt->volume = option; 590 break; 591 case Opt_partition: 592 if (match_int(args, &option)) 593 return 0; 594 uopt->partition = option; 595 break; 596 case Opt_fileset: 597 if (match_int(args, &option)) 598 return 0; 599 uopt->fileset = option; 600 break; 601 case Opt_rootdir: 602 if (match_int(args, &option)) 603 return 0; 604 uopt->rootdir = option; 605 break; 606 case Opt_utf8: 607 uopt->flags |= (1 << UDF_FLAG_UTF8); 608 break; 609 #ifdef CONFIG_UDF_NLS 610 case Opt_iocharset: 611 uopt->nls_map = load_nls(args[0].from); 612 uopt->flags |= (1 << UDF_FLAG_NLS_MAP); 613 break; 614 #endif 615 case Opt_uignore: 616 uopt->flags |= (1 << UDF_FLAG_UID_IGNORE); 617 break; 618 case Opt_uforget: 619 uopt->flags |= (1 << UDF_FLAG_UID_FORGET); 620 break; 621 case Opt_gignore: 622 uopt->flags |= (1 << UDF_FLAG_GID_IGNORE); 623 break; 624 case Opt_gforget: 625 uopt->flags |= (1 << UDF_FLAG_GID_FORGET); 626 break; 627 case Opt_fmode: 628 if (match_octal(args, &option)) 629 return 0; 630 uopt->fmode = option & 0777; 631 break; 632 case Opt_dmode: 633 if (match_octal(args, &option)) 634 return 0; 635 uopt->dmode = option & 0777; 636 break; 637 default: 638 pr_err("bad mount option \"%s\" or missing value\n", p); 639 return 0; 640 } 641 } 642 return 1; 643 } 644 645 static int udf_remount_fs(struct super_block *sb, int *flags, char *options) 646 { 647 struct udf_options uopt; 648 struct udf_sb_info *sbi = UDF_SB(sb); 649 int error = 0; 650 struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb); 651 652 sync_filesystem(sb); 653 if (lvidiu) { 654 int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev); 655 if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY)) 656 return -EACCES; 657 } 658 659 uopt.flags = sbi->s_flags; 660 uopt.uid = sbi->s_uid; 661 uopt.gid = sbi->s_gid; 662 uopt.umask = sbi->s_umask; 663 uopt.fmode = sbi->s_fmode; 664 uopt.dmode = sbi->s_dmode; 665 666 if (!udf_parse_options(options, &uopt, true)) 667 return -EINVAL; 668 669 write_lock(&sbi->s_cred_lock); 670 sbi->s_flags = uopt.flags; 671 sbi->s_uid = uopt.uid; 672 sbi->s_gid = uopt.gid; 673 sbi->s_umask = uopt.umask; 674 sbi->s_fmode = uopt.fmode; 675 sbi->s_dmode = uopt.dmode; 676 write_unlock(&sbi->s_cred_lock); 677 678 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 679 goto out_unlock; 680 681 if (*flags & MS_RDONLY) 682 udf_close_lvid(sb); 683 else 684 udf_open_lvid(sb); 685 686 out_unlock: 687 return error; 688 } 689 690 /* Check Volume Structure Descriptors (ECMA 167 2/9.1) */ 691 /* We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */ 692 static loff_t udf_check_vsd(struct super_block *sb) 693 { 694 struct volStructDesc *vsd = NULL; 695 loff_t sector = VSD_FIRST_SECTOR_OFFSET; 696 int sectorsize; 697 struct buffer_head *bh = NULL; 698 int nsr02 = 0; 699 int nsr03 = 0; 700 struct udf_sb_info *sbi; 701 702 sbi = UDF_SB(sb); 703 if (sb->s_blocksize < sizeof(struct volStructDesc)) 704 sectorsize = sizeof(struct volStructDesc); 705 else 706 sectorsize = sb->s_blocksize; 707 708 sector += (sbi->s_session << sb->s_blocksize_bits); 709 710 udf_debug("Starting at sector %u (%ld byte sectors)\n", 711 (unsigned int)(sector >> sb->s_blocksize_bits), 712 sb->s_blocksize); 713 /* Process the sequence (if applicable). The hard limit on the sector 714 * offset is arbitrary, hopefully large enough so that all valid UDF 715 * filesystems will be recognised. There is no mention of an upper 716 * bound to the size of the volume recognition area in the standard. 717 * The limit will prevent the code to read all the sectors of a 718 * specially crafted image (like a bluray disc full of CD001 sectors), 719 * potentially causing minutes or even hours of uninterruptible I/O 720 * activity. This actually happened with uninitialised SSD partitions 721 * (all 0xFF) before the check for the limit and all valid IDs were 722 * added */ 723 for (; !nsr02 && !nsr03 && sector < VSD_MAX_SECTOR_OFFSET; 724 sector += sectorsize) { 725 /* Read a block */ 726 bh = udf_tread(sb, sector >> sb->s_blocksize_bits); 727 if (!bh) 728 break; 729 730 /* Look for ISO descriptors */ 731 vsd = (struct volStructDesc *)(bh->b_data + 732 (sector & (sb->s_blocksize - 1))); 733 734 if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, 735 VSD_STD_ID_LEN)) { 736 switch (vsd->structType) { 737 case 0: 738 udf_debug("ISO9660 Boot Record found\n"); 739 break; 740 case 1: 741 udf_debug("ISO9660 Primary Volume Descriptor found\n"); 742 break; 743 case 2: 744 udf_debug("ISO9660 Supplementary Volume Descriptor found\n"); 745 break; 746 case 3: 747 udf_debug("ISO9660 Volume Partition Descriptor found\n"); 748 break; 749 case 255: 750 udf_debug("ISO9660 Volume Descriptor Set Terminator found\n"); 751 break; 752 default: 753 udf_debug("ISO9660 VRS (%u) found\n", 754 vsd->structType); 755 break; 756 } 757 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, 758 VSD_STD_ID_LEN)) 759 ; /* nothing */ 760 else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, 761 VSD_STD_ID_LEN)) { 762 brelse(bh); 763 break; 764 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, 765 VSD_STD_ID_LEN)) 766 nsr02 = sector; 767 else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, 768 VSD_STD_ID_LEN)) 769 nsr03 = sector; 770 else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BOOT2, 771 VSD_STD_ID_LEN)) 772 ; /* nothing */ 773 else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CDW02, 774 VSD_STD_ID_LEN)) 775 ; /* nothing */ 776 else { 777 /* invalid id : end of volume recognition area */ 778 brelse(bh); 779 break; 780 } 781 brelse(bh); 782 } 783 784 if (nsr03) 785 return nsr03; 786 else if (nsr02) 787 return nsr02; 788 else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) == 789 VSD_FIRST_SECTOR_OFFSET) 790 return -1; 791 else 792 return 0; 793 } 794 795 static int udf_find_fileset(struct super_block *sb, 796 struct kernel_lb_addr *fileset, 797 struct kernel_lb_addr *root) 798 { 799 struct buffer_head *bh = NULL; 800 long lastblock; 801 uint16_t ident; 802 struct udf_sb_info *sbi; 803 804 if (fileset->logicalBlockNum != 0xFFFFFFFF || 805 fileset->partitionReferenceNum != 0xFFFF) { 806 bh = udf_read_ptagged(sb, fileset, 0, &ident); 807 808 if (!bh) { 809 return 1; 810 } else if (ident != TAG_IDENT_FSD) { 811 brelse(bh); 812 return 1; 813 } 814 815 } 816 817 sbi = UDF_SB(sb); 818 if (!bh) { 819 /* Search backwards through the partitions */ 820 struct kernel_lb_addr newfileset; 821 822 /* --> cvg: FIXME - is it reasonable? */ 823 return 1; 824 825 for (newfileset.partitionReferenceNum = sbi->s_partitions - 1; 826 (newfileset.partitionReferenceNum != 0xFFFF && 827 fileset->logicalBlockNum == 0xFFFFFFFF && 828 fileset->partitionReferenceNum == 0xFFFF); 829 newfileset.partitionReferenceNum--) { 830 lastblock = sbi->s_partmaps 831 [newfileset.partitionReferenceNum] 832 .s_partition_len; 833 newfileset.logicalBlockNum = 0; 834 835 do { 836 bh = udf_read_ptagged(sb, &newfileset, 0, 837 &ident); 838 if (!bh) { 839 newfileset.logicalBlockNum++; 840 continue; 841 } 842 843 switch (ident) { 844 case TAG_IDENT_SBD: 845 { 846 struct spaceBitmapDesc *sp; 847 sp = (struct spaceBitmapDesc *) 848 bh->b_data; 849 newfileset.logicalBlockNum += 1 + 850 ((le32_to_cpu(sp->numOfBytes) + 851 sizeof(struct spaceBitmapDesc) 852 - 1) >> sb->s_blocksize_bits); 853 brelse(bh); 854 break; 855 } 856 case TAG_IDENT_FSD: 857 *fileset = newfileset; 858 break; 859 default: 860 newfileset.logicalBlockNum++; 861 brelse(bh); 862 bh = NULL; 863 break; 864 } 865 } while (newfileset.logicalBlockNum < lastblock && 866 fileset->logicalBlockNum == 0xFFFFFFFF && 867 fileset->partitionReferenceNum == 0xFFFF); 868 } 869 } 870 871 if ((fileset->logicalBlockNum != 0xFFFFFFFF || 872 fileset->partitionReferenceNum != 0xFFFF) && bh) { 873 udf_debug("Fileset at block=%d, partition=%d\n", 874 fileset->logicalBlockNum, 875 fileset->partitionReferenceNum); 876 877 sbi->s_partition = fileset->partitionReferenceNum; 878 udf_load_fileset(sb, bh, root); 879 brelse(bh); 880 return 0; 881 } 882 return 1; 883 } 884 885 /* 886 * Load primary Volume Descriptor Sequence 887 * 888 * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence 889 * should be tried. 890 */ 891 static int udf_load_pvoldesc(struct super_block *sb, sector_t block) 892 { 893 struct primaryVolDesc *pvoldesc; 894 uint8_t *outstr; 895 struct buffer_head *bh; 896 uint16_t ident; 897 int ret = -ENOMEM; 898 899 outstr = kmalloc(128, GFP_NOFS); 900 if (!outstr) 901 return -ENOMEM; 902 903 bh = udf_read_tagged(sb, block, block, &ident); 904 if (!bh) { 905 ret = -EAGAIN; 906 goto out2; 907 } 908 909 if (ident != TAG_IDENT_PVD) { 910 ret = -EIO; 911 goto out_bh; 912 } 913 914 pvoldesc = (struct primaryVolDesc *)bh->b_data; 915 916 if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time, 917 pvoldesc->recordingDateAndTime)) { 918 #ifdef UDFFS_DEBUG 919 struct timestamp *ts = &pvoldesc->recordingDateAndTime; 920 udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n", 921 le16_to_cpu(ts->year), ts->month, ts->day, ts->hour, 922 ts->minute, le16_to_cpu(ts->typeAndTimezone)); 923 #endif 924 } 925 926 ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32); 927 if (ret < 0) 928 goto out_bh; 929 930 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret); 931 udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); 932 933 ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128); 934 if (ret < 0) 935 goto out_bh; 936 937 outstr[ret] = 0; 938 udf_debug("volSetIdent[] = '%s'\n", outstr); 939 940 ret = 0; 941 out_bh: 942 brelse(bh); 943 out2: 944 kfree(outstr); 945 return ret; 946 } 947 948 struct inode *udf_find_metadata_inode_efe(struct super_block *sb, 949 u32 meta_file_loc, u32 partition_ref) 950 { 951 struct kernel_lb_addr addr; 952 struct inode *metadata_fe; 953 954 addr.logicalBlockNum = meta_file_loc; 955 addr.partitionReferenceNum = partition_ref; 956 957 metadata_fe = udf_iget_special(sb, &addr); 958 959 if (IS_ERR(metadata_fe)) { 960 udf_warn(sb, "metadata inode efe not found\n"); 961 return metadata_fe; 962 } 963 if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) { 964 udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n"); 965 iput(metadata_fe); 966 return ERR_PTR(-EIO); 967 } 968 969 return metadata_fe; 970 } 971 972 static int udf_load_metadata_files(struct super_block *sb, int partition, 973 int type1_index) 974 { 975 struct udf_sb_info *sbi = UDF_SB(sb); 976 struct udf_part_map *map; 977 struct udf_meta_data *mdata; 978 struct kernel_lb_addr addr; 979 struct inode *fe; 980 981 map = &sbi->s_partmaps[partition]; 982 mdata = &map->s_type_specific.s_metadata; 983 mdata->s_phys_partition_ref = type1_index; 984 985 /* metadata address */ 986 udf_debug("Metadata file location: block = %d part = %d\n", 987 mdata->s_meta_file_loc, mdata->s_phys_partition_ref); 988 989 fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc, 990 mdata->s_phys_partition_ref); 991 if (IS_ERR(fe)) { 992 /* mirror file entry */ 993 udf_debug("Mirror metadata file location: block = %d part = %d\n", 994 mdata->s_mirror_file_loc, mdata->s_phys_partition_ref); 995 996 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, 997 mdata->s_phys_partition_ref); 998 999 if (IS_ERR(fe)) { 1000 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); 1001 return PTR_ERR(fe); 1002 } 1003 mdata->s_mirror_fe = fe; 1004 } else 1005 mdata->s_metadata_fe = fe; 1006 1007 1008 /* 1009 * bitmap file entry 1010 * Note: 1011 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102) 1012 */ 1013 if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) { 1014 addr.logicalBlockNum = mdata->s_bitmap_file_loc; 1015 addr.partitionReferenceNum = mdata->s_phys_partition_ref; 1016 1017 udf_debug("Bitmap file location: block = %d part = %d\n", 1018 addr.logicalBlockNum, addr.partitionReferenceNum); 1019 1020 fe = udf_iget_special(sb, &addr); 1021 if (IS_ERR(fe)) { 1022 if (sb->s_flags & MS_RDONLY) 1023 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n"); 1024 else { 1025 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n"); 1026 return PTR_ERR(fe); 1027 } 1028 } else 1029 mdata->s_bitmap_fe = fe; 1030 } 1031 1032 udf_debug("udf_load_metadata_files Ok\n"); 1033 return 0; 1034 } 1035 1036 static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh, 1037 struct kernel_lb_addr *root) 1038 { 1039 struct fileSetDesc *fset; 1040 1041 fset = (struct fileSetDesc *)bh->b_data; 1042 1043 *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation); 1044 1045 UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum); 1046 1047 udf_debug("Rootdir at block=%d, partition=%d\n", 1048 root->logicalBlockNum, root->partitionReferenceNum); 1049 } 1050 1051 int udf_compute_nr_groups(struct super_block *sb, u32 partition) 1052 { 1053 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 1054 return DIV_ROUND_UP(map->s_partition_len + 1055 (sizeof(struct spaceBitmapDesc) << 3), 1056 sb->s_blocksize * 8); 1057 } 1058 1059 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) 1060 { 1061 struct udf_bitmap *bitmap; 1062 int nr_groups; 1063 int size; 1064 1065 nr_groups = udf_compute_nr_groups(sb, index); 1066 size = sizeof(struct udf_bitmap) + 1067 (sizeof(struct buffer_head *) * nr_groups); 1068 1069 if (size <= PAGE_SIZE) 1070 bitmap = kzalloc(size, GFP_KERNEL); 1071 else 1072 bitmap = vzalloc(size); /* TODO: get rid of vzalloc */ 1073 1074 if (bitmap == NULL) 1075 return NULL; 1076 1077 bitmap->s_nr_groups = nr_groups; 1078 return bitmap; 1079 } 1080 1081 static int udf_fill_partdesc_info(struct super_block *sb, 1082 struct partitionDesc *p, int p_index) 1083 { 1084 struct udf_part_map *map; 1085 struct udf_sb_info *sbi = UDF_SB(sb); 1086 struct partitionHeaderDesc *phd; 1087 1088 map = &sbi->s_partmaps[p_index]; 1089 1090 map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */ 1091 map->s_partition_root = le32_to_cpu(p->partitionStartingLocation); 1092 1093 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY)) 1094 map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY; 1095 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE)) 1096 map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE; 1097 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE)) 1098 map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE; 1099 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE)) 1100 map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE; 1101 1102 udf_debug("Partition (%d type %x) starts at physical %d, block length %d\n", 1103 p_index, map->s_partition_type, 1104 map->s_partition_root, map->s_partition_len); 1105 1106 if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) && 1107 strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03)) 1108 return 0; 1109 1110 phd = (struct partitionHeaderDesc *)p->partitionContentsUse; 1111 if (phd->unallocSpaceTable.extLength) { 1112 struct kernel_lb_addr loc = { 1113 .logicalBlockNum = le32_to_cpu( 1114 phd->unallocSpaceTable.extPosition), 1115 .partitionReferenceNum = p_index, 1116 }; 1117 struct inode *inode; 1118 1119 inode = udf_iget_special(sb, &loc); 1120 if (IS_ERR(inode)) { 1121 udf_debug("cannot load unallocSpaceTable (part %d)\n", 1122 p_index); 1123 return PTR_ERR(inode); 1124 } 1125 map->s_uspace.s_table = inode; 1126 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE; 1127 udf_debug("unallocSpaceTable (part %d) @ %ld\n", 1128 p_index, map->s_uspace.s_table->i_ino); 1129 } 1130 1131 if (phd->unallocSpaceBitmap.extLength) { 1132 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index); 1133 if (!bitmap) 1134 return -ENOMEM; 1135 map->s_uspace.s_bitmap = bitmap; 1136 bitmap->s_extPosition = le32_to_cpu( 1137 phd->unallocSpaceBitmap.extPosition); 1138 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP; 1139 udf_debug("unallocSpaceBitmap (part %d) @ %d\n", 1140 p_index, bitmap->s_extPosition); 1141 } 1142 1143 if (phd->partitionIntegrityTable.extLength) 1144 udf_debug("partitionIntegrityTable (part %d)\n", p_index); 1145 1146 if (phd->freedSpaceTable.extLength) { 1147 struct kernel_lb_addr loc = { 1148 .logicalBlockNum = le32_to_cpu( 1149 phd->freedSpaceTable.extPosition), 1150 .partitionReferenceNum = p_index, 1151 }; 1152 struct inode *inode; 1153 1154 inode = udf_iget_special(sb, &loc); 1155 if (IS_ERR(inode)) { 1156 udf_debug("cannot load freedSpaceTable (part %d)\n", 1157 p_index); 1158 return PTR_ERR(inode); 1159 } 1160 map->s_fspace.s_table = inode; 1161 map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE; 1162 udf_debug("freedSpaceTable (part %d) @ %ld\n", 1163 p_index, map->s_fspace.s_table->i_ino); 1164 } 1165 1166 if (phd->freedSpaceBitmap.extLength) { 1167 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index); 1168 if (!bitmap) 1169 return -ENOMEM; 1170 map->s_fspace.s_bitmap = bitmap; 1171 bitmap->s_extPosition = le32_to_cpu( 1172 phd->freedSpaceBitmap.extPosition); 1173 map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP; 1174 udf_debug("freedSpaceBitmap (part %d) @ %d\n", 1175 p_index, bitmap->s_extPosition); 1176 } 1177 return 0; 1178 } 1179 1180 static void udf_find_vat_block(struct super_block *sb, int p_index, 1181 int type1_index, sector_t start_block) 1182 { 1183 struct udf_sb_info *sbi = UDF_SB(sb); 1184 struct udf_part_map *map = &sbi->s_partmaps[p_index]; 1185 sector_t vat_block; 1186 struct kernel_lb_addr ino; 1187 struct inode *inode; 1188 1189 /* 1190 * VAT file entry is in the last recorded block. Some broken disks have 1191 * it a few blocks before so try a bit harder... 1192 */ 1193 ino.partitionReferenceNum = type1_index; 1194 for (vat_block = start_block; 1195 vat_block >= map->s_partition_root && 1196 vat_block >= start_block - 3; vat_block--) { 1197 ino.logicalBlockNum = vat_block - map->s_partition_root; 1198 inode = udf_iget_special(sb, &ino); 1199 if (!IS_ERR(inode)) { 1200 sbi->s_vat_inode = inode; 1201 break; 1202 } 1203 } 1204 } 1205 1206 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) 1207 { 1208 struct udf_sb_info *sbi = UDF_SB(sb); 1209 struct udf_part_map *map = &sbi->s_partmaps[p_index]; 1210 struct buffer_head *bh = NULL; 1211 struct udf_inode_info *vati; 1212 uint32_t pos; 1213 struct virtualAllocationTable20 *vat20; 1214 sector_t blocks = i_size_read(sb->s_bdev->bd_inode) >> 1215 sb->s_blocksize_bits; 1216 1217 udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block); 1218 if (!sbi->s_vat_inode && 1219 sbi->s_last_block != blocks - 1) { 1220 pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n", 1221 (unsigned long)sbi->s_last_block, 1222 (unsigned long)blocks - 1); 1223 udf_find_vat_block(sb, p_index, type1_index, blocks - 1); 1224 } 1225 if (!sbi->s_vat_inode) 1226 return -EIO; 1227 1228 if (map->s_partition_type == UDF_VIRTUAL_MAP15) { 1229 map->s_type_specific.s_virtual.s_start_offset = 0; 1230 map->s_type_specific.s_virtual.s_num_entries = 1231 (sbi->s_vat_inode->i_size - 36) >> 2; 1232 } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) { 1233 vati = UDF_I(sbi->s_vat_inode); 1234 if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 1235 pos = udf_block_map(sbi->s_vat_inode, 0); 1236 bh = sb_bread(sb, pos); 1237 if (!bh) 1238 return -EIO; 1239 vat20 = (struct virtualAllocationTable20 *)bh->b_data; 1240 } else { 1241 vat20 = (struct virtualAllocationTable20 *) 1242 vati->i_ext.i_data; 1243 } 1244 1245 map->s_type_specific.s_virtual.s_start_offset = 1246 le16_to_cpu(vat20->lengthHeader); 1247 map->s_type_specific.s_virtual.s_num_entries = 1248 (sbi->s_vat_inode->i_size - 1249 map->s_type_specific.s_virtual. 1250 s_start_offset) >> 2; 1251 brelse(bh); 1252 } 1253 return 0; 1254 } 1255 1256 /* 1257 * Load partition descriptor block 1258 * 1259 * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor 1260 * sequence. 1261 */ 1262 static int udf_load_partdesc(struct super_block *sb, sector_t block) 1263 { 1264 struct buffer_head *bh; 1265 struct partitionDesc *p; 1266 struct udf_part_map *map; 1267 struct udf_sb_info *sbi = UDF_SB(sb); 1268 int i, type1_idx; 1269 uint16_t partitionNumber; 1270 uint16_t ident; 1271 int ret; 1272 1273 bh = udf_read_tagged(sb, block, block, &ident); 1274 if (!bh) 1275 return -EAGAIN; 1276 if (ident != TAG_IDENT_PD) { 1277 ret = 0; 1278 goto out_bh; 1279 } 1280 1281 p = (struct partitionDesc *)bh->b_data; 1282 partitionNumber = le16_to_cpu(p->partitionNumber); 1283 1284 /* First scan for TYPE1 and SPARABLE partitions */ 1285 for (i = 0; i < sbi->s_partitions; i++) { 1286 map = &sbi->s_partmaps[i]; 1287 udf_debug("Searching map: (%d == %d)\n", 1288 map->s_partition_num, partitionNumber); 1289 if (map->s_partition_num == partitionNumber && 1290 (map->s_partition_type == UDF_TYPE1_MAP15 || 1291 map->s_partition_type == UDF_SPARABLE_MAP15)) 1292 break; 1293 } 1294 1295 if (i >= sbi->s_partitions) { 1296 udf_debug("Partition (%d) not found in partition map\n", 1297 partitionNumber); 1298 ret = 0; 1299 goto out_bh; 1300 } 1301 1302 ret = udf_fill_partdesc_info(sb, p, i); 1303 if (ret < 0) 1304 goto out_bh; 1305 1306 /* 1307 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and 1308 * PHYSICAL partitions are already set up 1309 */ 1310 type1_idx = i; 1311 #ifdef UDFFS_DEBUG 1312 map = NULL; /* supress 'maybe used uninitialized' warning */ 1313 #endif 1314 for (i = 0; i < sbi->s_partitions; i++) { 1315 map = &sbi->s_partmaps[i]; 1316 1317 if (map->s_partition_num == partitionNumber && 1318 (map->s_partition_type == UDF_VIRTUAL_MAP15 || 1319 map->s_partition_type == UDF_VIRTUAL_MAP20 || 1320 map->s_partition_type == UDF_METADATA_MAP25)) 1321 break; 1322 } 1323 1324 if (i >= sbi->s_partitions) { 1325 ret = 0; 1326 goto out_bh; 1327 } 1328 1329 ret = udf_fill_partdesc_info(sb, p, i); 1330 if (ret < 0) 1331 goto out_bh; 1332 1333 if (map->s_partition_type == UDF_METADATA_MAP25) { 1334 ret = udf_load_metadata_files(sb, i, type1_idx); 1335 if (ret < 0) { 1336 udf_err(sb, "error loading MetaData partition map %d\n", 1337 i); 1338 goto out_bh; 1339 } 1340 } else { 1341 /* 1342 * If we have a partition with virtual map, we don't handle 1343 * writing to it (we overwrite blocks instead of relocating 1344 * them). 1345 */ 1346 if (!(sb->s_flags & MS_RDONLY)) { 1347 ret = -EACCES; 1348 goto out_bh; 1349 } 1350 ret = udf_load_vat(sb, i, type1_idx); 1351 if (ret < 0) 1352 goto out_bh; 1353 } 1354 ret = 0; 1355 out_bh: 1356 /* In case loading failed, we handle cleanup in udf_fill_super */ 1357 brelse(bh); 1358 return ret; 1359 } 1360 1361 static int udf_load_sparable_map(struct super_block *sb, 1362 struct udf_part_map *map, 1363 struct sparablePartitionMap *spm) 1364 { 1365 uint32_t loc; 1366 uint16_t ident; 1367 struct sparingTable *st; 1368 struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing; 1369 int i; 1370 struct buffer_head *bh; 1371 1372 map->s_partition_type = UDF_SPARABLE_MAP15; 1373 sdata->s_packet_len = le16_to_cpu(spm->packetLength); 1374 if (!is_power_of_2(sdata->s_packet_len)) { 1375 udf_err(sb, "error loading logical volume descriptor: " 1376 "Invalid packet length %u\n", 1377 (unsigned)sdata->s_packet_len); 1378 return -EIO; 1379 } 1380 if (spm->numSparingTables > 4) { 1381 udf_err(sb, "error loading logical volume descriptor: " 1382 "Too many sparing tables (%d)\n", 1383 (int)spm->numSparingTables); 1384 return -EIO; 1385 } 1386 1387 for (i = 0; i < spm->numSparingTables; i++) { 1388 loc = le32_to_cpu(spm->locSparingTable[i]); 1389 bh = udf_read_tagged(sb, loc, loc, &ident); 1390 if (!bh) 1391 continue; 1392 1393 st = (struct sparingTable *)bh->b_data; 1394 if (ident != 0 || 1395 strncmp(st->sparingIdent.ident, UDF_ID_SPARING, 1396 strlen(UDF_ID_SPARING)) || 1397 sizeof(*st) + le16_to_cpu(st->reallocationTableLen) > 1398 sb->s_blocksize) { 1399 brelse(bh); 1400 continue; 1401 } 1402 1403 sdata->s_spar_map[i] = bh; 1404 } 1405 map->s_partition_func = udf_get_pblock_spar15; 1406 return 0; 1407 } 1408 1409 static int udf_load_logicalvol(struct super_block *sb, sector_t block, 1410 struct kernel_lb_addr *fileset) 1411 { 1412 struct logicalVolDesc *lvd; 1413 int i, offset; 1414 uint8_t type; 1415 struct udf_sb_info *sbi = UDF_SB(sb); 1416 struct genericPartitionMap *gpm; 1417 uint16_t ident; 1418 struct buffer_head *bh; 1419 unsigned int table_len; 1420 int ret; 1421 1422 bh = udf_read_tagged(sb, block, block, &ident); 1423 if (!bh) 1424 return -EAGAIN; 1425 BUG_ON(ident != TAG_IDENT_LVD); 1426 lvd = (struct logicalVolDesc *)bh->b_data; 1427 table_len = le32_to_cpu(lvd->mapTableLength); 1428 if (table_len > sb->s_blocksize - sizeof(*lvd)) { 1429 udf_err(sb, "error loading logical volume descriptor: " 1430 "Partition table too long (%u > %lu)\n", table_len, 1431 sb->s_blocksize - sizeof(*lvd)); 1432 ret = -EIO; 1433 goto out_bh; 1434 } 1435 1436 ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); 1437 if (ret) 1438 goto out_bh; 1439 1440 for (i = 0, offset = 0; 1441 i < sbi->s_partitions && offset < table_len; 1442 i++, offset += gpm->partitionMapLength) { 1443 struct udf_part_map *map = &sbi->s_partmaps[i]; 1444 gpm = (struct genericPartitionMap *) 1445 &(lvd->partitionMaps[offset]); 1446 type = gpm->partitionMapType; 1447 if (type == 1) { 1448 struct genericPartitionMap1 *gpm1 = 1449 (struct genericPartitionMap1 *)gpm; 1450 map->s_partition_type = UDF_TYPE1_MAP15; 1451 map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum); 1452 map->s_partition_num = le16_to_cpu(gpm1->partitionNum); 1453 map->s_partition_func = NULL; 1454 } else if (type == 2) { 1455 struct udfPartitionMap2 *upm2 = 1456 (struct udfPartitionMap2 *)gpm; 1457 if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, 1458 strlen(UDF_ID_VIRTUAL))) { 1459 u16 suf = 1460 le16_to_cpu(((__le16 *)upm2->partIdent. 1461 identSuffix)[0]); 1462 if (suf < 0x0200) { 1463 map->s_partition_type = 1464 UDF_VIRTUAL_MAP15; 1465 map->s_partition_func = 1466 udf_get_pblock_virt15; 1467 } else { 1468 map->s_partition_type = 1469 UDF_VIRTUAL_MAP20; 1470 map->s_partition_func = 1471 udf_get_pblock_virt20; 1472 } 1473 } else if (!strncmp(upm2->partIdent.ident, 1474 UDF_ID_SPARABLE, 1475 strlen(UDF_ID_SPARABLE))) { 1476 ret = udf_load_sparable_map(sb, map, 1477 (struct sparablePartitionMap *)gpm); 1478 if (ret < 0) 1479 goto out_bh; 1480 } else if (!strncmp(upm2->partIdent.ident, 1481 UDF_ID_METADATA, 1482 strlen(UDF_ID_METADATA))) { 1483 struct udf_meta_data *mdata = 1484 &map->s_type_specific.s_metadata; 1485 struct metadataPartitionMap *mdm = 1486 (struct metadataPartitionMap *) 1487 &(lvd->partitionMaps[offset]); 1488 udf_debug("Parsing Logical vol part %d type %d id=%s\n", 1489 i, type, UDF_ID_METADATA); 1490 1491 map->s_partition_type = UDF_METADATA_MAP25; 1492 map->s_partition_func = udf_get_pblock_meta25; 1493 1494 mdata->s_meta_file_loc = 1495 le32_to_cpu(mdm->metadataFileLoc); 1496 mdata->s_mirror_file_loc = 1497 le32_to_cpu(mdm->metadataMirrorFileLoc); 1498 mdata->s_bitmap_file_loc = 1499 le32_to_cpu(mdm->metadataBitmapFileLoc); 1500 mdata->s_alloc_unit_size = 1501 le32_to_cpu(mdm->allocUnitSize); 1502 mdata->s_align_unit_size = 1503 le16_to_cpu(mdm->alignUnitSize); 1504 if (mdm->flags & 0x01) 1505 mdata->s_flags |= MF_DUPLICATE_MD; 1506 1507 udf_debug("Metadata Ident suffix=0x%x\n", 1508 le16_to_cpu(*(__le16 *) 1509 mdm->partIdent.identSuffix)); 1510 udf_debug("Metadata part num=%d\n", 1511 le16_to_cpu(mdm->partitionNum)); 1512 udf_debug("Metadata part alloc unit size=%d\n", 1513 le32_to_cpu(mdm->allocUnitSize)); 1514 udf_debug("Metadata file loc=%d\n", 1515 le32_to_cpu(mdm->metadataFileLoc)); 1516 udf_debug("Mirror file loc=%d\n", 1517 le32_to_cpu(mdm->metadataMirrorFileLoc)); 1518 udf_debug("Bitmap file loc=%d\n", 1519 le32_to_cpu(mdm->metadataBitmapFileLoc)); 1520 udf_debug("Flags: %d %d\n", 1521 mdata->s_flags, mdm->flags); 1522 } else { 1523 udf_debug("Unknown ident: %s\n", 1524 upm2->partIdent.ident); 1525 continue; 1526 } 1527 map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum); 1528 map->s_partition_num = le16_to_cpu(upm2->partitionNum); 1529 } 1530 udf_debug("Partition (%d:%d) type %d on volume %d\n", 1531 i, map->s_partition_num, type, map->s_volumeseqnum); 1532 } 1533 1534 if (fileset) { 1535 struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]); 1536 1537 *fileset = lelb_to_cpu(la->extLocation); 1538 udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n", 1539 fileset->logicalBlockNum, 1540 fileset->partitionReferenceNum); 1541 } 1542 if (lvd->integritySeqExt.extLength) 1543 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt)); 1544 ret = 0; 1545 out_bh: 1546 brelse(bh); 1547 return ret; 1548 } 1549 1550 /* 1551 * Find the prevailing Logical Volume Integrity Descriptor. 1552 */ 1553 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc) 1554 { 1555 struct buffer_head *bh, *final_bh; 1556 uint16_t ident; 1557 struct udf_sb_info *sbi = UDF_SB(sb); 1558 struct logicalVolIntegrityDesc *lvid; 1559 int indirections = 0; 1560 1561 while (++indirections <= UDF_MAX_LVID_NESTING) { 1562 final_bh = NULL; 1563 while (loc.extLength > 0 && 1564 (bh = udf_read_tagged(sb, loc.extLocation, 1565 loc.extLocation, &ident))) { 1566 if (ident != TAG_IDENT_LVID) { 1567 brelse(bh); 1568 break; 1569 } 1570 1571 brelse(final_bh); 1572 final_bh = bh; 1573 1574 loc.extLength -= sb->s_blocksize; 1575 loc.extLocation++; 1576 } 1577 1578 if (!final_bh) 1579 return; 1580 1581 brelse(sbi->s_lvid_bh); 1582 sbi->s_lvid_bh = final_bh; 1583 1584 lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data; 1585 if (lvid->nextIntegrityExt.extLength == 0) 1586 return; 1587 1588 loc = leea_to_cpu(lvid->nextIntegrityExt); 1589 } 1590 1591 udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n", 1592 UDF_MAX_LVID_NESTING); 1593 brelse(sbi->s_lvid_bh); 1594 sbi->s_lvid_bh = NULL; 1595 } 1596 1597 1598 /* 1599 * Process a main/reserve volume descriptor sequence. 1600 * @block First block of first extent of the sequence. 1601 * @lastblock Lastblock of first extent of the sequence. 1602 * @fileset There we store extent containing root fileset 1603 * 1604 * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor 1605 * sequence 1606 */ 1607 static noinline int udf_process_sequence( 1608 struct super_block *sb, 1609 sector_t block, sector_t lastblock, 1610 struct kernel_lb_addr *fileset) 1611 { 1612 struct buffer_head *bh = NULL; 1613 struct udf_vds_record vds[VDS_POS_LENGTH]; 1614 struct udf_vds_record *curr; 1615 struct generic_desc *gd; 1616 struct volDescPtr *vdp; 1617 bool done = false; 1618 uint32_t vdsn; 1619 uint16_t ident; 1620 long next_s = 0, next_e = 0; 1621 int ret; 1622 unsigned int indirections = 0; 1623 1624 memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); 1625 1626 /* 1627 * Read the main descriptor sequence and find which descriptors 1628 * are in it. 1629 */ 1630 for (; (!done && block <= lastblock); block++) { 1631 1632 bh = udf_read_tagged(sb, block, block, &ident); 1633 if (!bh) { 1634 udf_err(sb, 1635 "Block %llu of volume descriptor sequence is corrupted or we could not read it\n", 1636 (unsigned long long)block); 1637 return -EAGAIN; 1638 } 1639 1640 /* Process each descriptor (ISO 13346 3/8.3-8.4) */ 1641 gd = (struct generic_desc *)bh->b_data; 1642 vdsn = le32_to_cpu(gd->volDescSeqNum); 1643 switch (ident) { 1644 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ 1645 curr = &vds[VDS_POS_PRIMARY_VOL_DESC]; 1646 if (vdsn >= curr->volDescSeqNum) { 1647 curr->volDescSeqNum = vdsn; 1648 curr->block = block; 1649 } 1650 break; 1651 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */ 1652 curr = &vds[VDS_POS_VOL_DESC_PTR]; 1653 if (vdsn >= curr->volDescSeqNum) { 1654 curr->volDescSeqNum = vdsn; 1655 curr->block = block; 1656 1657 vdp = (struct volDescPtr *)bh->b_data; 1658 next_s = le32_to_cpu( 1659 vdp->nextVolDescSeqExt.extLocation); 1660 next_e = le32_to_cpu( 1661 vdp->nextVolDescSeqExt.extLength); 1662 next_e = next_e >> sb->s_blocksize_bits; 1663 next_e += next_s; 1664 } 1665 break; 1666 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ 1667 curr = &vds[VDS_POS_IMP_USE_VOL_DESC]; 1668 if (vdsn >= curr->volDescSeqNum) { 1669 curr->volDescSeqNum = vdsn; 1670 curr->block = block; 1671 } 1672 break; 1673 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ 1674 curr = &vds[VDS_POS_PARTITION_DESC]; 1675 if (!curr->block) 1676 curr->block = block; 1677 break; 1678 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ 1679 curr = &vds[VDS_POS_LOGICAL_VOL_DESC]; 1680 if (vdsn >= curr->volDescSeqNum) { 1681 curr->volDescSeqNum = vdsn; 1682 curr->block = block; 1683 } 1684 break; 1685 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ 1686 curr = &vds[VDS_POS_UNALLOC_SPACE_DESC]; 1687 if (vdsn >= curr->volDescSeqNum) { 1688 curr->volDescSeqNum = vdsn; 1689 curr->block = block; 1690 } 1691 break; 1692 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */ 1693 if (++indirections > UDF_MAX_TD_NESTING) { 1694 udf_err(sb, "too many TDs (max %u supported)\n", UDF_MAX_TD_NESTING); 1695 brelse(bh); 1696 return -EIO; 1697 } 1698 1699 vds[VDS_POS_TERMINATING_DESC].block = block; 1700 if (next_e) { 1701 block = next_s; 1702 lastblock = next_e; 1703 next_s = next_e = 0; 1704 } else 1705 done = true; 1706 break; 1707 } 1708 brelse(bh); 1709 } 1710 /* 1711 * Now read interesting descriptors again and process them 1712 * in a suitable order 1713 */ 1714 if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) { 1715 udf_err(sb, "Primary Volume Descriptor not found!\n"); 1716 return -EAGAIN; 1717 } 1718 ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block); 1719 if (ret < 0) 1720 return ret; 1721 1722 if (vds[VDS_POS_LOGICAL_VOL_DESC].block) { 1723 ret = udf_load_logicalvol(sb, 1724 vds[VDS_POS_LOGICAL_VOL_DESC].block, 1725 fileset); 1726 if (ret < 0) 1727 return ret; 1728 } 1729 1730 if (vds[VDS_POS_PARTITION_DESC].block) { 1731 /* 1732 * We rescan the whole descriptor sequence to find 1733 * partition descriptor blocks and process them. 1734 */ 1735 for (block = vds[VDS_POS_PARTITION_DESC].block; 1736 block < vds[VDS_POS_TERMINATING_DESC].block; 1737 block++) { 1738 ret = udf_load_partdesc(sb, block); 1739 if (ret < 0) 1740 return ret; 1741 } 1742 } 1743 1744 return 0; 1745 } 1746 1747 /* 1748 * Load Volume Descriptor Sequence described by anchor in bh 1749 * 1750 * Returns <0 on error, 0 on success 1751 */ 1752 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh, 1753 struct kernel_lb_addr *fileset) 1754 { 1755 struct anchorVolDescPtr *anchor; 1756 sector_t main_s, main_e, reserve_s, reserve_e; 1757 int ret; 1758 1759 anchor = (struct anchorVolDescPtr *)bh->b_data; 1760 1761 /* Locate the main sequence */ 1762 main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation); 1763 main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength); 1764 main_e = main_e >> sb->s_blocksize_bits; 1765 main_e += main_s; 1766 1767 /* Locate the reserve sequence */ 1768 reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation); 1769 reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength); 1770 reserve_e = reserve_e >> sb->s_blocksize_bits; 1771 reserve_e += reserve_s; 1772 1773 /* Process the main & reserve sequences */ 1774 /* responsible for finding the PartitionDesc(s) */ 1775 ret = udf_process_sequence(sb, main_s, main_e, fileset); 1776 if (ret != -EAGAIN) 1777 return ret; 1778 udf_sb_free_partitions(sb); 1779 ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset); 1780 if (ret < 0) { 1781 udf_sb_free_partitions(sb); 1782 /* No sequence was OK, return -EIO */ 1783 if (ret == -EAGAIN) 1784 ret = -EIO; 1785 } 1786 return ret; 1787 } 1788 1789 /* 1790 * Check whether there is an anchor block in the given block and 1791 * load Volume Descriptor Sequence if so. 1792 * 1793 * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor 1794 * block 1795 */ 1796 static int udf_check_anchor_block(struct super_block *sb, sector_t block, 1797 struct kernel_lb_addr *fileset) 1798 { 1799 struct buffer_head *bh; 1800 uint16_t ident; 1801 int ret; 1802 1803 if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) && 1804 udf_fixed_to_variable(block) >= 1805 i_size_read(sb->s_bdev->bd_inode) >> sb->s_blocksize_bits) 1806 return -EAGAIN; 1807 1808 bh = udf_read_tagged(sb, block, block, &ident); 1809 if (!bh) 1810 return -EAGAIN; 1811 if (ident != TAG_IDENT_AVDP) { 1812 brelse(bh); 1813 return -EAGAIN; 1814 } 1815 ret = udf_load_sequence(sb, bh, fileset); 1816 brelse(bh); 1817 return ret; 1818 } 1819 1820 /* 1821 * Search for an anchor volume descriptor pointer. 1822 * 1823 * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set 1824 * of anchors. 1825 */ 1826 static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock, 1827 struct kernel_lb_addr *fileset) 1828 { 1829 sector_t last[6]; 1830 int i; 1831 struct udf_sb_info *sbi = UDF_SB(sb); 1832 int last_count = 0; 1833 int ret; 1834 1835 /* First try user provided anchor */ 1836 if (sbi->s_anchor) { 1837 ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset); 1838 if (ret != -EAGAIN) 1839 return ret; 1840 } 1841 /* 1842 * according to spec, anchor is in either: 1843 * block 256 1844 * lastblock-256 1845 * lastblock 1846 * however, if the disc isn't closed, it could be 512. 1847 */ 1848 ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset); 1849 if (ret != -EAGAIN) 1850 return ret; 1851 /* 1852 * The trouble is which block is the last one. Drives often misreport 1853 * this so we try various possibilities. 1854 */ 1855 last[last_count++] = *lastblock; 1856 if (*lastblock >= 1) 1857 last[last_count++] = *lastblock - 1; 1858 last[last_count++] = *lastblock + 1; 1859 if (*lastblock >= 2) 1860 last[last_count++] = *lastblock - 2; 1861 if (*lastblock >= 150) 1862 last[last_count++] = *lastblock - 150; 1863 if (*lastblock >= 152) 1864 last[last_count++] = *lastblock - 152; 1865 1866 for (i = 0; i < last_count; i++) { 1867 if (last[i] >= i_size_read(sb->s_bdev->bd_inode) >> 1868 sb->s_blocksize_bits) 1869 continue; 1870 ret = udf_check_anchor_block(sb, last[i], fileset); 1871 if (ret != -EAGAIN) { 1872 if (!ret) 1873 *lastblock = last[i]; 1874 return ret; 1875 } 1876 if (last[i] < 256) 1877 continue; 1878 ret = udf_check_anchor_block(sb, last[i] - 256, fileset); 1879 if (ret != -EAGAIN) { 1880 if (!ret) 1881 *lastblock = last[i]; 1882 return ret; 1883 } 1884 } 1885 1886 /* Finally try block 512 in case media is open */ 1887 return udf_check_anchor_block(sb, sbi->s_session + 512, fileset); 1888 } 1889 1890 /* 1891 * Find an anchor volume descriptor and load Volume Descriptor Sequence from 1892 * area specified by it. The function expects sbi->s_lastblock to be the last 1893 * block on the media. 1894 * 1895 * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor 1896 * was not found. 1897 */ 1898 static int udf_find_anchor(struct super_block *sb, 1899 struct kernel_lb_addr *fileset) 1900 { 1901 struct udf_sb_info *sbi = UDF_SB(sb); 1902 sector_t lastblock = sbi->s_last_block; 1903 int ret; 1904 1905 ret = udf_scan_anchors(sb, &lastblock, fileset); 1906 if (ret != -EAGAIN) 1907 goto out; 1908 1909 /* No anchor found? Try VARCONV conversion of block numbers */ 1910 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); 1911 lastblock = udf_variable_to_fixed(sbi->s_last_block); 1912 /* Firstly, we try to not convert number of the last block */ 1913 ret = udf_scan_anchors(sb, &lastblock, fileset); 1914 if (ret != -EAGAIN) 1915 goto out; 1916 1917 lastblock = sbi->s_last_block; 1918 /* Secondly, we try with converted number of the last block */ 1919 ret = udf_scan_anchors(sb, &lastblock, fileset); 1920 if (ret < 0) { 1921 /* VARCONV didn't help. Clear it. */ 1922 UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV); 1923 } 1924 out: 1925 if (ret == 0) 1926 sbi->s_last_block = lastblock; 1927 return ret; 1928 } 1929 1930 /* 1931 * Check Volume Structure Descriptor, find Anchor block and load Volume 1932 * Descriptor Sequence. 1933 * 1934 * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor 1935 * block was not found. 1936 */ 1937 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt, 1938 int silent, struct kernel_lb_addr *fileset) 1939 { 1940 struct udf_sb_info *sbi = UDF_SB(sb); 1941 loff_t nsr_off; 1942 int ret; 1943 1944 if (!sb_set_blocksize(sb, uopt->blocksize)) { 1945 if (!silent) 1946 udf_warn(sb, "Bad block size\n"); 1947 return -EINVAL; 1948 } 1949 sbi->s_last_block = uopt->lastblock; 1950 if (!uopt->novrs) { 1951 /* Check that it is NSR02 compliant */ 1952 nsr_off = udf_check_vsd(sb); 1953 if (!nsr_off) { 1954 if (!silent) 1955 udf_warn(sb, "No VRS found\n"); 1956 return -EINVAL; 1957 } 1958 if (nsr_off == -1) 1959 udf_debug("Failed to read sector at offset %d. " 1960 "Assuming open disc. Skipping validity " 1961 "check\n", VSD_FIRST_SECTOR_OFFSET); 1962 if (!sbi->s_last_block) 1963 sbi->s_last_block = udf_get_last_block(sb); 1964 } else { 1965 udf_debug("Validity check skipped because of novrs option\n"); 1966 } 1967 1968 /* Look for anchor block and load Volume Descriptor Sequence */ 1969 sbi->s_anchor = uopt->anchor; 1970 ret = udf_find_anchor(sb, fileset); 1971 if (ret < 0) { 1972 if (!silent && ret == -EAGAIN) 1973 udf_warn(sb, "No anchor found\n"); 1974 return ret; 1975 } 1976 return 0; 1977 } 1978 1979 static void udf_open_lvid(struct super_block *sb) 1980 { 1981 struct udf_sb_info *sbi = UDF_SB(sb); 1982 struct buffer_head *bh = sbi->s_lvid_bh; 1983 struct logicalVolIntegrityDesc *lvid; 1984 struct logicalVolIntegrityDescImpUse *lvidiu; 1985 struct timespec ts; 1986 1987 if (!bh) 1988 return; 1989 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 1990 lvidiu = udf_sb_lvidiu(sb); 1991 if (!lvidiu) 1992 return; 1993 1994 mutex_lock(&sbi->s_alloc_mutex); 1995 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1996 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1997 ktime_get_real_ts(&ts); 1998 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts); 1999 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN); 2000 2001 lvid->descTag.descCRC = cpu_to_le16( 2002 crc_itu_t(0, (char *)lvid + sizeof(struct tag), 2003 le16_to_cpu(lvid->descTag.descCRCLength))); 2004 2005 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); 2006 mark_buffer_dirty(bh); 2007 sbi->s_lvid_dirty = 0; 2008 mutex_unlock(&sbi->s_alloc_mutex); 2009 /* Make opening of filesystem visible on the media immediately */ 2010 sync_dirty_buffer(bh); 2011 } 2012 2013 static void udf_close_lvid(struct super_block *sb) 2014 { 2015 struct udf_sb_info *sbi = UDF_SB(sb); 2016 struct buffer_head *bh = sbi->s_lvid_bh; 2017 struct logicalVolIntegrityDesc *lvid; 2018 struct logicalVolIntegrityDescImpUse *lvidiu; 2019 struct timespec ts; 2020 2021 if (!bh) 2022 return; 2023 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 2024 lvidiu = udf_sb_lvidiu(sb); 2025 if (!lvidiu) 2026 return; 2027 2028 mutex_lock(&sbi->s_alloc_mutex); 2029 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 2030 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 2031 ktime_get_real_ts(&ts); 2032 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts); 2033 if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev)) 2034 lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION); 2035 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev)) 2036 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev); 2037 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev)) 2038 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev); 2039 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); 2040 2041 lvid->descTag.descCRC = cpu_to_le16( 2042 crc_itu_t(0, (char *)lvid + sizeof(struct tag), 2043 le16_to_cpu(lvid->descTag.descCRCLength))); 2044 2045 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); 2046 /* 2047 * We set buffer uptodate unconditionally here to avoid spurious 2048 * warnings from mark_buffer_dirty() when previous EIO has marked 2049 * the buffer as !uptodate 2050 */ 2051 set_buffer_uptodate(bh); 2052 mark_buffer_dirty(bh); 2053 sbi->s_lvid_dirty = 0; 2054 mutex_unlock(&sbi->s_alloc_mutex); 2055 /* Make closing of filesystem visible on the media immediately */ 2056 sync_dirty_buffer(bh); 2057 } 2058 2059 u64 lvid_get_unique_id(struct super_block *sb) 2060 { 2061 struct buffer_head *bh; 2062 struct udf_sb_info *sbi = UDF_SB(sb); 2063 struct logicalVolIntegrityDesc *lvid; 2064 struct logicalVolHeaderDesc *lvhd; 2065 u64 uniqueID; 2066 u64 ret; 2067 2068 bh = sbi->s_lvid_bh; 2069 if (!bh) 2070 return 0; 2071 2072 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 2073 lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse; 2074 2075 mutex_lock(&sbi->s_alloc_mutex); 2076 ret = uniqueID = le64_to_cpu(lvhd->uniqueID); 2077 if (!(++uniqueID & 0xFFFFFFFF)) 2078 uniqueID += 16; 2079 lvhd->uniqueID = cpu_to_le64(uniqueID); 2080 mutex_unlock(&sbi->s_alloc_mutex); 2081 mark_buffer_dirty(bh); 2082 2083 return ret; 2084 } 2085 2086 static int udf_fill_super(struct super_block *sb, void *options, int silent) 2087 { 2088 int ret = -EINVAL; 2089 struct inode *inode = NULL; 2090 struct udf_options uopt; 2091 struct kernel_lb_addr rootdir, fileset; 2092 struct udf_sb_info *sbi; 2093 bool lvid_open = false; 2094 2095 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); 2096 uopt.uid = INVALID_UID; 2097 uopt.gid = INVALID_GID; 2098 uopt.umask = 0; 2099 uopt.fmode = UDF_INVALID_MODE; 2100 uopt.dmode = UDF_INVALID_MODE; 2101 2102 sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL); 2103 if (!sbi) 2104 return -ENOMEM; 2105 2106 sb->s_fs_info = sbi; 2107 2108 mutex_init(&sbi->s_alloc_mutex); 2109 2110 if (!udf_parse_options((char *)options, &uopt, false)) 2111 goto parse_options_failure; 2112 2113 if (uopt.flags & (1 << UDF_FLAG_UTF8) && 2114 uopt.flags & (1 << UDF_FLAG_NLS_MAP)) { 2115 udf_err(sb, "utf8 cannot be combined with iocharset\n"); 2116 goto parse_options_failure; 2117 } 2118 #ifdef CONFIG_UDF_NLS 2119 if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) { 2120 uopt.nls_map = load_nls_default(); 2121 if (!uopt.nls_map) 2122 uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP); 2123 else 2124 udf_debug("Using default NLS map\n"); 2125 } 2126 #endif 2127 if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP))) 2128 uopt.flags |= (1 << UDF_FLAG_UTF8); 2129 2130 fileset.logicalBlockNum = 0xFFFFFFFF; 2131 fileset.partitionReferenceNum = 0xFFFF; 2132 2133 sbi->s_flags = uopt.flags; 2134 sbi->s_uid = uopt.uid; 2135 sbi->s_gid = uopt.gid; 2136 sbi->s_umask = uopt.umask; 2137 sbi->s_fmode = uopt.fmode; 2138 sbi->s_dmode = uopt.dmode; 2139 sbi->s_nls_map = uopt.nls_map; 2140 rwlock_init(&sbi->s_cred_lock); 2141 2142 if (uopt.session == 0xFFFFFFFF) 2143 sbi->s_session = udf_get_last_session(sb); 2144 else 2145 sbi->s_session = uopt.session; 2146 2147 udf_debug("Multi-session=%d\n", sbi->s_session); 2148 2149 /* Fill in the rest of the superblock */ 2150 sb->s_op = &udf_sb_ops; 2151 sb->s_export_op = &udf_export_ops; 2152 2153 sb->s_magic = UDF_SUPER_MAGIC; 2154 sb->s_time_gran = 1000; 2155 2156 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { 2157 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2158 } else { 2159 uopt.blocksize = bdev_logical_block_size(sb->s_bdev); 2160 while (uopt.blocksize <= 4096) { 2161 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2162 if (ret < 0) { 2163 if (!silent && ret != -EACCES) { 2164 pr_notice("Scanning with blocksize %d failed\n", 2165 uopt.blocksize); 2166 } 2167 brelse(sbi->s_lvid_bh); 2168 sbi->s_lvid_bh = NULL; 2169 /* 2170 * EACCES is special - we want to propagate to 2171 * upper layers that we cannot handle RW mount. 2172 */ 2173 if (ret == -EACCES) 2174 break; 2175 } else 2176 break; 2177 2178 uopt.blocksize <<= 1; 2179 } 2180 } 2181 if (ret < 0) { 2182 if (ret == -EAGAIN) { 2183 udf_warn(sb, "No partition found (1)\n"); 2184 ret = -EINVAL; 2185 } 2186 goto error_out; 2187 } 2188 2189 udf_debug("Lastblock=%d\n", sbi->s_last_block); 2190 2191 if (sbi->s_lvid_bh) { 2192 struct logicalVolIntegrityDescImpUse *lvidiu = 2193 udf_sb_lvidiu(sb); 2194 uint16_t minUDFReadRev; 2195 uint16_t minUDFWriteRev; 2196 2197 if (!lvidiu) { 2198 ret = -EINVAL; 2199 goto error_out; 2200 } 2201 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); 2202 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); 2203 if (minUDFReadRev > UDF_MAX_READ_VERSION) { 2204 udf_err(sb, "minUDFReadRev=%x (max is %x)\n", 2205 minUDFReadRev, 2206 UDF_MAX_READ_VERSION); 2207 ret = -EINVAL; 2208 goto error_out; 2209 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION && 2210 !(sb->s_flags & MS_RDONLY)) { 2211 ret = -EACCES; 2212 goto error_out; 2213 } 2214 2215 sbi->s_udfrev = minUDFWriteRev; 2216 2217 if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE) 2218 UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE); 2219 if (minUDFReadRev >= UDF_VERS_USE_STREAMS) 2220 UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS); 2221 } 2222 2223 if (!sbi->s_partitions) { 2224 udf_warn(sb, "No partition found (2)\n"); 2225 ret = -EINVAL; 2226 goto error_out; 2227 } 2228 2229 if (sbi->s_partmaps[sbi->s_partition].s_partition_flags & 2230 UDF_PART_FLAG_READ_ONLY && 2231 !(sb->s_flags & MS_RDONLY)) { 2232 ret = -EACCES; 2233 goto error_out; 2234 } 2235 2236 if (udf_find_fileset(sb, &fileset, &rootdir)) { 2237 udf_warn(sb, "No fileset found\n"); 2238 ret = -EINVAL; 2239 goto error_out; 2240 } 2241 2242 if (!silent) { 2243 struct timestamp ts; 2244 udf_time_to_disk_stamp(&ts, sbi->s_record_time); 2245 udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n", 2246 sbi->s_volume_ident, 2247 le16_to_cpu(ts.year), ts.month, ts.day, 2248 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone)); 2249 } 2250 if (!(sb->s_flags & MS_RDONLY)) { 2251 udf_open_lvid(sb); 2252 lvid_open = true; 2253 } 2254 2255 /* Assign the root inode */ 2256 /* assign inodes by physical block number */ 2257 /* perhaps it's not extensible enough, but for now ... */ 2258 inode = udf_iget(sb, &rootdir); 2259 if (IS_ERR(inode)) { 2260 udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n", 2261 rootdir.logicalBlockNum, rootdir.partitionReferenceNum); 2262 ret = PTR_ERR(inode); 2263 goto error_out; 2264 } 2265 2266 /* Allocate a dentry for the root inode */ 2267 sb->s_root = d_make_root(inode); 2268 if (!sb->s_root) { 2269 udf_err(sb, "Couldn't allocate root dentry\n"); 2270 ret = -ENOMEM; 2271 goto error_out; 2272 } 2273 sb->s_maxbytes = MAX_LFS_FILESIZE; 2274 sb->s_max_links = UDF_MAX_LINKS; 2275 return 0; 2276 2277 error_out: 2278 iput(sbi->s_vat_inode); 2279 parse_options_failure: 2280 #ifdef CONFIG_UDF_NLS 2281 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) 2282 unload_nls(sbi->s_nls_map); 2283 #endif 2284 if (lvid_open) 2285 udf_close_lvid(sb); 2286 brelse(sbi->s_lvid_bh); 2287 udf_sb_free_partitions(sb); 2288 kfree(sbi); 2289 sb->s_fs_info = NULL; 2290 2291 return ret; 2292 } 2293 2294 void _udf_err(struct super_block *sb, const char *function, 2295 const char *fmt, ...) 2296 { 2297 struct va_format vaf; 2298 va_list args; 2299 2300 va_start(args, fmt); 2301 2302 vaf.fmt = fmt; 2303 vaf.va = &args; 2304 2305 pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf); 2306 2307 va_end(args); 2308 } 2309 2310 void _udf_warn(struct super_block *sb, const char *function, 2311 const char *fmt, ...) 2312 { 2313 struct va_format vaf; 2314 va_list args; 2315 2316 va_start(args, fmt); 2317 2318 vaf.fmt = fmt; 2319 vaf.va = &args; 2320 2321 pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf); 2322 2323 va_end(args); 2324 } 2325 2326 static void udf_put_super(struct super_block *sb) 2327 { 2328 struct udf_sb_info *sbi; 2329 2330 sbi = UDF_SB(sb); 2331 2332 iput(sbi->s_vat_inode); 2333 #ifdef CONFIG_UDF_NLS 2334 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) 2335 unload_nls(sbi->s_nls_map); 2336 #endif 2337 if (!(sb->s_flags & MS_RDONLY)) 2338 udf_close_lvid(sb); 2339 brelse(sbi->s_lvid_bh); 2340 udf_sb_free_partitions(sb); 2341 mutex_destroy(&sbi->s_alloc_mutex); 2342 kfree(sb->s_fs_info); 2343 sb->s_fs_info = NULL; 2344 } 2345 2346 static int udf_sync_fs(struct super_block *sb, int wait) 2347 { 2348 struct udf_sb_info *sbi = UDF_SB(sb); 2349 2350 mutex_lock(&sbi->s_alloc_mutex); 2351 if (sbi->s_lvid_dirty) { 2352 /* 2353 * Blockdevice will be synced later so we don't have to submit 2354 * the buffer for IO 2355 */ 2356 mark_buffer_dirty(sbi->s_lvid_bh); 2357 sbi->s_lvid_dirty = 0; 2358 } 2359 mutex_unlock(&sbi->s_alloc_mutex); 2360 2361 return 0; 2362 } 2363 2364 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf) 2365 { 2366 struct super_block *sb = dentry->d_sb; 2367 struct udf_sb_info *sbi = UDF_SB(sb); 2368 struct logicalVolIntegrityDescImpUse *lvidiu; 2369 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 2370 2371 lvidiu = udf_sb_lvidiu(sb); 2372 buf->f_type = UDF_SUPER_MAGIC; 2373 buf->f_bsize = sb->s_blocksize; 2374 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; 2375 buf->f_bfree = udf_count_free(sb); 2376 buf->f_bavail = buf->f_bfree; 2377 buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) + 2378 le32_to_cpu(lvidiu->numDirs)) : 0) 2379 + buf->f_bfree; 2380 buf->f_ffree = buf->f_bfree; 2381 buf->f_namelen = UDF_NAME_LEN; 2382 buf->f_fsid.val[0] = (u32)id; 2383 buf->f_fsid.val[1] = (u32)(id >> 32); 2384 2385 return 0; 2386 } 2387 2388 static unsigned int udf_count_free_bitmap(struct super_block *sb, 2389 struct udf_bitmap *bitmap) 2390 { 2391 struct buffer_head *bh = NULL; 2392 unsigned int accum = 0; 2393 int index; 2394 int block = 0, newblock; 2395 struct kernel_lb_addr loc; 2396 uint32_t bytes; 2397 uint8_t *ptr; 2398 uint16_t ident; 2399 struct spaceBitmapDesc *bm; 2400 2401 loc.logicalBlockNum = bitmap->s_extPosition; 2402 loc.partitionReferenceNum = UDF_SB(sb)->s_partition; 2403 bh = udf_read_ptagged(sb, &loc, 0, &ident); 2404 2405 if (!bh) { 2406 udf_err(sb, "udf_count_free failed\n"); 2407 goto out; 2408 } else if (ident != TAG_IDENT_SBD) { 2409 brelse(bh); 2410 udf_err(sb, "udf_count_free failed\n"); 2411 goto out; 2412 } 2413 2414 bm = (struct spaceBitmapDesc *)bh->b_data; 2415 bytes = le32_to_cpu(bm->numOfBytes); 2416 index = sizeof(struct spaceBitmapDesc); /* offset in first block only */ 2417 ptr = (uint8_t *)bh->b_data; 2418 2419 while (bytes > 0) { 2420 u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index); 2421 accum += bitmap_weight((const unsigned long *)(ptr + index), 2422 cur_bytes * 8); 2423 bytes -= cur_bytes; 2424 if (bytes) { 2425 brelse(bh); 2426 newblock = udf_get_lb_pblock(sb, &loc, ++block); 2427 bh = udf_tread(sb, newblock); 2428 if (!bh) { 2429 udf_debug("read failed\n"); 2430 goto out; 2431 } 2432 index = 0; 2433 ptr = (uint8_t *)bh->b_data; 2434 } 2435 } 2436 brelse(bh); 2437 out: 2438 return accum; 2439 } 2440 2441 static unsigned int udf_count_free_table(struct super_block *sb, 2442 struct inode *table) 2443 { 2444 unsigned int accum = 0; 2445 uint32_t elen; 2446 struct kernel_lb_addr eloc; 2447 int8_t etype; 2448 struct extent_position epos; 2449 2450 mutex_lock(&UDF_SB(sb)->s_alloc_mutex); 2451 epos.block = UDF_I(table)->i_location; 2452 epos.offset = sizeof(struct unallocSpaceEntry); 2453 epos.bh = NULL; 2454 2455 while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) 2456 accum += (elen >> table->i_sb->s_blocksize_bits); 2457 2458 brelse(epos.bh); 2459 mutex_unlock(&UDF_SB(sb)->s_alloc_mutex); 2460 2461 return accum; 2462 } 2463 2464 static unsigned int udf_count_free(struct super_block *sb) 2465 { 2466 unsigned int accum = 0; 2467 struct udf_sb_info *sbi; 2468 struct udf_part_map *map; 2469 2470 sbi = UDF_SB(sb); 2471 if (sbi->s_lvid_bh) { 2472 struct logicalVolIntegrityDesc *lvid = 2473 (struct logicalVolIntegrityDesc *) 2474 sbi->s_lvid_bh->b_data; 2475 if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) { 2476 accum = le32_to_cpu( 2477 lvid->freeSpaceTable[sbi->s_partition]); 2478 if (accum == 0xFFFFFFFF) 2479 accum = 0; 2480 } 2481 } 2482 2483 if (accum) 2484 return accum; 2485 2486 map = &sbi->s_partmaps[sbi->s_partition]; 2487 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { 2488 accum += udf_count_free_bitmap(sb, 2489 map->s_uspace.s_bitmap); 2490 } 2491 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { 2492 accum += udf_count_free_bitmap(sb, 2493 map->s_fspace.s_bitmap); 2494 } 2495 if (accum) 2496 return accum; 2497 2498 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { 2499 accum += udf_count_free_table(sb, 2500 map->s_uspace.s_table); 2501 } 2502 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { 2503 accum += udf_count_free_table(sb, 2504 map->s_fspace.s_table); 2505 } 2506 2507 return accum; 2508 } 2509 2510 MODULE_AUTHOR("Ben Fennema"); 2511 MODULE_DESCRIPTION("Universal Disk Format Filesystem"); 2512 MODULE_LICENSE("GPL"); 2513 module_init(init_udf_fs) 2514 module_exit(exit_udf_fs) 2515