xref: /linux/fs/udf/super.c (revision 988b0c541ed8b1c633c4d4df7169010635942e18)
1 /*
2  * super.c
3  *
4  * PURPOSE
5  *  Super block routines for the OSTA-UDF(tm) filesystem.
6  *
7  * DESCRIPTION
8  *  OSTA-UDF(tm) = Optical Storage Technology Association
9  *  Universal Disk Format.
10  *
11  *  This code is based on version 2.00 of the UDF specification,
12  *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
13  *    http://www.osta.org/
14  *    http://www.ecma.ch/
15  *    http://www.iso.org/
16  *
17  * COPYRIGHT
18  *  This file is distributed under the terms of the GNU General Public
19  *  License (GPL). Copies of the GPL can be obtained from:
20  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
21  *  Each contributing author retains all rights to their own work.
22  *
23  *  (C) 1998 Dave Boynton
24  *  (C) 1998-2004 Ben Fennema
25  *  (C) 2000 Stelias Computing Inc
26  *
27  * HISTORY
28  *
29  *  09/24/98 dgb  changed to allow compiling outside of kernel, and
30  *                added some debugging.
31  *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
32  *  10/16/98      attempting some multi-session support
33  *  10/17/98      added freespace count for "df"
34  *  11/11/98 gr   added novrs option
35  *  11/26/98 dgb  added fileset,anchor mount options
36  *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced
37  *                vol descs. rewrote option handling based on isofs
38  *  12/20/98      find the free space bitmap (if it exists)
39  */
40 
41 #include "udfdecl.h"
42 
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/kernel.h>
46 #include <linux/module.h>
47 #include <linux/parser.h>
48 #include <linux/stat.h>
49 #include <linux/cdrom.h>
50 #include <linux/nls.h>
51 #include <linux/buffer_head.h>
52 #include <linux/vfs.h>
53 #include <linux/vmalloc.h>
54 #include <linux/errno.h>
55 #include <linux/mount.h>
56 #include <linux/seq_file.h>
57 #include <linux/bitmap.h>
58 #include <linux/crc-itu-t.h>
59 #include <linux/log2.h>
60 #include <asm/byteorder.h>
61 
62 #include "udf_sb.h"
63 #include "udf_i.h"
64 
65 #include <linux/init.h>
66 #include <asm/uaccess.h>
67 
68 #define VDS_POS_PRIMARY_VOL_DESC	0
69 #define VDS_POS_UNALLOC_SPACE_DESC	1
70 #define VDS_POS_LOGICAL_VOL_DESC	2
71 #define VDS_POS_PARTITION_DESC		3
72 #define VDS_POS_IMP_USE_VOL_DESC	4
73 #define VDS_POS_VOL_DESC_PTR		5
74 #define VDS_POS_TERMINATING_DESC	6
75 #define VDS_POS_LENGTH			7
76 
77 #define UDF_DEFAULT_BLOCKSIZE 2048
78 
79 #define VSD_FIRST_SECTOR_OFFSET		32768
80 #define VSD_MAX_SECTOR_OFFSET		0x800000
81 
82 enum { UDF_MAX_LINKS = 0xffff };
83 
84 /* These are the "meat" - everything else is stuffing */
85 static int udf_fill_super(struct super_block *, void *, int);
86 static void udf_put_super(struct super_block *);
87 static int udf_sync_fs(struct super_block *, int);
88 static int udf_remount_fs(struct super_block *, int *, char *);
89 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
90 static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *,
91 			    struct kernel_lb_addr *);
92 static void udf_load_fileset(struct super_block *, struct buffer_head *,
93 			     struct kernel_lb_addr *);
94 static void udf_open_lvid(struct super_block *);
95 static void udf_close_lvid(struct super_block *);
96 static unsigned int udf_count_free(struct super_block *);
97 static int udf_statfs(struct dentry *, struct kstatfs *);
98 static int udf_show_options(struct seq_file *, struct dentry *);
99 
100 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
101 {
102 	struct logicalVolIntegrityDesc *lvid;
103 	unsigned int partnum;
104 	unsigned int offset;
105 
106 	if (!UDF_SB(sb)->s_lvid_bh)
107 		return NULL;
108 	lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
109 	partnum = le32_to_cpu(lvid->numOfPartitions);
110 	if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
111 	     offsetof(struct logicalVolIntegrityDesc, impUse)) /
112 	     (2 * sizeof(uint32_t)) < partnum) {
113 		udf_err(sb, "Logical volume integrity descriptor corrupted "
114 			"(numOfPartitions = %u)!\n", partnum);
115 		return NULL;
116 	}
117 	/* The offset is to skip freeSpaceTable and sizeTable arrays */
118 	offset = partnum * 2 * sizeof(uint32_t);
119 	return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
120 }
121 
122 /* UDF filesystem type */
123 static struct dentry *udf_mount(struct file_system_type *fs_type,
124 		      int flags, const char *dev_name, void *data)
125 {
126 	return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
127 }
128 
129 static struct file_system_type udf_fstype = {
130 	.owner		= THIS_MODULE,
131 	.name		= "udf",
132 	.mount		= udf_mount,
133 	.kill_sb	= kill_block_super,
134 	.fs_flags	= FS_REQUIRES_DEV,
135 };
136 MODULE_ALIAS_FS("udf");
137 
138 static struct kmem_cache *udf_inode_cachep;
139 
140 static struct inode *udf_alloc_inode(struct super_block *sb)
141 {
142 	struct udf_inode_info *ei;
143 	ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
144 	if (!ei)
145 		return NULL;
146 
147 	ei->i_unique = 0;
148 	ei->i_lenExtents = 0;
149 	ei->i_next_alloc_block = 0;
150 	ei->i_next_alloc_goal = 0;
151 	ei->i_strat4096 = 0;
152 	init_rwsem(&ei->i_data_sem);
153 	ei->cached_extent.lstart = -1;
154 	spin_lock_init(&ei->i_extent_cache_lock);
155 
156 	return &ei->vfs_inode;
157 }
158 
159 static void udf_i_callback(struct rcu_head *head)
160 {
161 	struct inode *inode = container_of(head, struct inode, i_rcu);
162 	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
163 }
164 
165 static void udf_destroy_inode(struct inode *inode)
166 {
167 	call_rcu(&inode->i_rcu, udf_i_callback);
168 }
169 
170 static void init_once(void *foo)
171 {
172 	struct udf_inode_info *ei = (struct udf_inode_info *)foo;
173 
174 	ei->i_ext.i_data = NULL;
175 	inode_init_once(&ei->vfs_inode);
176 }
177 
178 static int __init init_inodecache(void)
179 {
180 	udf_inode_cachep = kmem_cache_create("udf_inode_cache",
181 					     sizeof(struct udf_inode_info),
182 					     0, (SLAB_RECLAIM_ACCOUNT |
183 						 SLAB_MEM_SPREAD),
184 					     init_once);
185 	if (!udf_inode_cachep)
186 		return -ENOMEM;
187 	return 0;
188 }
189 
190 static void destroy_inodecache(void)
191 {
192 	/*
193 	 * Make sure all delayed rcu free inodes are flushed before we
194 	 * destroy cache.
195 	 */
196 	rcu_barrier();
197 	kmem_cache_destroy(udf_inode_cachep);
198 }
199 
200 /* Superblock operations */
201 static const struct super_operations udf_sb_ops = {
202 	.alloc_inode	= udf_alloc_inode,
203 	.destroy_inode	= udf_destroy_inode,
204 	.write_inode	= udf_write_inode,
205 	.evict_inode	= udf_evict_inode,
206 	.put_super	= udf_put_super,
207 	.sync_fs	= udf_sync_fs,
208 	.statfs		= udf_statfs,
209 	.remount_fs	= udf_remount_fs,
210 	.show_options	= udf_show_options,
211 };
212 
213 struct udf_options {
214 	unsigned char novrs;
215 	unsigned int blocksize;
216 	unsigned int session;
217 	unsigned int lastblock;
218 	unsigned int anchor;
219 	unsigned int volume;
220 	unsigned short partition;
221 	unsigned int fileset;
222 	unsigned int rootdir;
223 	unsigned int flags;
224 	umode_t umask;
225 	kgid_t gid;
226 	kuid_t uid;
227 	umode_t fmode;
228 	umode_t dmode;
229 	struct nls_table *nls_map;
230 };
231 
232 static int __init init_udf_fs(void)
233 {
234 	int err;
235 
236 	err = init_inodecache();
237 	if (err)
238 		goto out1;
239 	err = register_filesystem(&udf_fstype);
240 	if (err)
241 		goto out;
242 
243 	return 0;
244 
245 out:
246 	destroy_inodecache();
247 
248 out1:
249 	return err;
250 }
251 
252 static void __exit exit_udf_fs(void)
253 {
254 	unregister_filesystem(&udf_fstype);
255 	destroy_inodecache();
256 }
257 
258 module_init(init_udf_fs)
259 module_exit(exit_udf_fs)
260 
261 static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
262 {
263 	struct udf_sb_info *sbi = UDF_SB(sb);
264 
265 	sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map),
266 				  GFP_KERNEL);
267 	if (!sbi->s_partmaps) {
268 		udf_err(sb, "Unable to allocate space for %d partition maps\n",
269 			count);
270 		sbi->s_partitions = 0;
271 		return -ENOMEM;
272 	}
273 
274 	sbi->s_partitions = count;
275 	return 0;
276 }
277 
278 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
279 {
280 	int i;
281 	int nr_groups = bitmap->s_nr_groups;
282 	int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) *
283 						nr_groups);
284 
285 	for (i = 0; i < nr_groups; i++)
286 		if (bitmap->s_block_bitmap[i])
287 			brelse(bitmap->s_block_bitmap[i]);
288 
289 	if (size <= PAGE_SIZE)
290 		kfree(bitmap);
291 	else
292 		vfree(bitmap);
293 }
294 
295 static void udf_free_partition(struct udf_part_map *map)
296 {
297 	int i;
298 	struct udf_meta_data *mdata;
299 
300 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
301 		iput(map->s_uspace.s_table);
302 	if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
303 		iput(map->s_fspace.s_table);
304 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
305 		udf_sb_free_bitmap(map->s_uspace.s_bitmap);
306 	if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
307 		udf_sb_free_bitmap(map->s_fspace.s_bitmap);
308 	if (map->s_partition_type == UDF_SPARABLE_MAP15)
309 		for (i = 0; i < 4; i++)
310 			brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
311 	else if (map->s_partition_type == UDF_METADATA_MAP25) {
312 		mdata = &map->s_type_specific.s_metadata;
313 		iput(mdata->s_metadata_fe);
314 		mdata->s_metadata_fe = NULL;
315 
316 		iput(mdata->s_mirror_fe);
317 		mdata->s_mirror_fe = NULL;
318 
319 		iput(mdata->s_bitmap_fe);
320 		mdata->s_bitmap_fe = NULL;
321 	}
322 }
323 
324 static void udf_sb_free_partitions(struct super_block *sb)
325 {
326 	struct udf_sb_info *sbi = UDF_SB(sb);
327 	int i;
328 	if (sbi->s_partmaps == NULL)
329 		return;
330 	for (i = 0; i < sbi->s_partitions; i++)
331 		udf_free_partition(&sbi->s_partmaps[i]);
332 	kfree(sbi->s_partmaps);
333 	sbi->s_partmaps = NULL;
334 }
335 
336 static int udf_show_options(struct seq_file *seq, struct dentry *root)
337 {
338 	struct super_block *sb = root->d_sb;
339 	struct udf_sb_info *sbi = UDF_SB(sb);
340 
341 	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
342 		seq_puts(seq, ",nostrict");
343 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
344 		seq_printf(seq, ",bs=%lu", sb->s_blocksize);
345 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
346 		seq_puts(seq, ",unhide");
347 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
348 		seq_puts(seq, ",undelete");
349 	if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
350 		seq_puts(seq, ",noadinicb");
351 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
352 		seq_puts(seq, ",shortad");
353 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
354 		seq_puts(seq, ",uid=forget");
355 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE))
356 		seq_puts(seq, ",uid=ignore");
357 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
358 		seq_puts(seq, ",gid=forget");
359 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE))
360 		seq_puts(seq, ",gid=ignore");
361 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
362 		seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
363 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
364 		seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
365 	if (sbi->s_umask != 0)
366 		seq_printf(seq, ",umask=%ho", sbi->s_umask);
367 	if (sbi->s_fmode != UDF_INVALID_MODE)
368 		seq_printf(seq, ",mode=%ho", sbi->s_fmode);
369 	if (sbi->s_dmode != UDF_INVALID_MODE)
370 		seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
371 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
372 		seq_printf(seq, ",session=%u", sbi->s_session);
373 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
374 		seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
375 	if (sbi->s_anchor != 0)
376 		seq_printf(seq, ",anchor=%u", sbi->s_anchor);
377 	/*
378 	 * volume, partition, fileset and rootdir seem to be ignored
379 	 * currently
380 	 */
381 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
382 		seq_puts(seq, ",utf8");
383 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
384 		seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
385 
386 	return 0;
387 }
388 
389 /*
390  * udf_parse_options
391  *
392  * PURPOSE
393  *	Parse mount options.
394  *
395  * DESCRIPTION
396  *	The following mount options are supported:
397  *
398  *	gid=		Set the default group.
399  *	umask=		Set the default umask.
400  *	mode=		Set the default file permissions.
401  *	dmode=		Set the default directory permissions.
402  *	uid=		Set the default user.
403  *	bs=		Set the block size.
404  *	unhide		Show otherwise hidden files.
405  *	undelete	Show deleted files in lists.
406  *	adinicb		Embed data in the inode (default)
407  *	noadinicb	Don't embed data in the inode
408  *	shortad		Use short ad's
409  *	longad		Use long ad's (default)
410  *	nostrict	Unset strict conformance
411  *	iocharset=	Set the NLS character set
412  *
413  *	The remaining are for debugging and disaster recovery:
414  *
415  *	novrs		Skip volume sequence recognition
416  *
417  *	The following expect a offset from 0.
418  *
419  *	session=	Set the CDROM session (default= last session)
420  *	anchor=		Override standard anchor location. (default= 256)
421  *	volume=		Override the VolumeDesc location. (unused)
422  *	partition=	Override the PartitionDesc location. (unused)
423  *	lastblock=	Set the last block of the filesystem/
424  *
425  *	The following expect a offset from the partition root.
426  *
427  *	fileset=	Override the fileset block location. (unused)
428  *	rootdir=	Override the root directory location. (unused)
429  *		WARNING: overriding the rootdir to a non-directory may
430  *		yield highly unpredictable results.
431  *
432  * PRE-CONDITIONS
433  *	options		Pointer to mount options string.
434  *	uopts		Pointer to mount options variable.
435  *
436  * POST-CONDITIONS
437  *	<return>	1	Mount options parsed okay.
438  *	<return>	0	Error parsing mount options.
439  *
440  * HISTORY
441  *	July 1, 1997 - Andrew E. Mileski
442  *	Written, tested, and released.
443  */
444 
445 enum {
446 	Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
447 	Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
448 	Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
449 	Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
450 	Opt_rootdir, Opt_utf8, Opt_iocharset,
451 	Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
452 	Opt_fmode, Opt_dmode
453 };
454 
455 static const match_table_t tokens = {
456 	{Opt_novrs,	"novrs"},
457 	{Opt_nostrict,	"nostrict"},
458 	{Opt_bs,	"bs=%u"},
459 	{Opt_unhide,	"unhide"},
460 	{Opt_undelete,	"undelete"},
461 	{Opt_noadinicb,	"noadinicb"},
462 	{Opt_adinicb,	"adinicb"},
463 	{Opt_shortad,	"shortad"},
464 	{Opt_longad,	"longad"},
465 	{Opt_uforget,	"uid=forget"},
466 	{Opt_uignore,	"uid=ignore"},
467 	{Opt_gforget,	"gid=forget"},
468 	{Opt_gignore,	"gid=ignore"},
469 	{Opt_gid,	"gid=%u"},
470 	{Opt_uid,	"uid=%u"},
471 	{Opt_umask,	"umask=%o"},
472 	{Opt_session,	"session=%u"},
473 	{Opt_lastblock,	"lastblock=%u"},
474 	{Opt_anchor,	"anchor=%u"},
475 	{Opt_volume,	"volume=%u"},
476 	{Opt_partition,	"partition=%u"},
477 	{Opt_fileset,	"fileset=%u"},
478 	{Opt_rootdir,	"rootdir=%u"},
479 	{Opt_utf8,	"utf8"},
480 	{Opt_iocharset,	"iocharset=%s"},
481 	{Opt_fmode,     "mode=%o"},
482 	{Opt_dmode,     "dmode=%o"},
483 	{Opt_err,	NULL}
484 };
485 
486 static int udf_parse_options(char *options, struct udf_options *uopt,
487 			     bool remount)
488 {
489 	char *p;
490 	int option;
491 
492 	uopt->novrs = 0;
493 	uopt->partition = 0xFFFF;
494 	uopt->session = 0xFFFFFFFF;
495 	uopt->lastblock = 0;
496 	uopt->anchor = 0;
497 	uopt->volume = 0xFFFFFFFF;
498 	uopt->rootdir = 0xFFFFFFFF;
499 	uopt->fileset = 0xFFFFFFFF;
500 	uopt->nls_map = NULL;
501 
502 	if (!options)
503 		return 1;
504 
505 	while ((p = strsep(&options, ",")) != NULL) {
506 		substring_t args[MAX_OPT_ARGS];
507 		int token;
508 		unsigned n;
509 		if (!*p)
510 			continue;
511 
512 		token = match_token(p, tokens, args);
513 		switch (token) {
514 		case Opt_novrs:
515 			uopt->novrs = 1;
516 			break;
517 		case Opt_bs:
518 			if (match_int(&args[0], &option))
519 				return 0;
520 			n = option;
521 			if (n != 512 && n != 1024 && n != 2048 && n != 4096)
522 				return 0;
523 			uopt->blocksize = n;
524 			uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
525 			break;
526 		case Opt_unhide:
527 			uopt->flags |= (1 << UDF_FLAG_UNHIDE);
528 			break;
529 		case Opt_undelete:
530 			uopt->flags |= (1 << UDF_FLAG_UNDELETE);
531 			break;
532 		case Opt_noadinicb:
533 			uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
534 			break;
535 		case Opt_adinicb:
536 			uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
537 			break;
538 		case Opt_shortad:
539 			uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
540 			break;
541 		case Opt_longad:
542 			uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
543 			break;
544 		case Opt_gid:
545 			if (match_int(args, &option))
546 				return 0;
547 			uopt->gid = make_kgid(current_user_ns(), option);
548 			if (!gid_valid(uopt->gid))
549 				return 0;
550 			uopt->flags |= (1 << UDF_FLAG_GID_SET);
551 			break;
552 		case Opt_uid:
553 			if (match_int(args, &option))
554 				return 0;
555 			uopt->uid = make_kuid(current_user_ns(), option);
556 			if (!uid_valid(uopt->uid))
557 				return 0;
558 			uopt->flags |= (1 << UDF_FLAG_UID_SET);
559 			break;
560 		case Opt_umask:
561 			if (match_octal(args, &option))
562 				return 0;
563 			uopt->umask = option;
564 			break;
565 		case Opt_nostrict:
566 			uopt->flags &= ~(1 << UDF_FLAG_STRICT);
567 			break;
568 		case Opt_session:
569 			if (match_int(args, &option))
570 				return 0;
571 			uopt->session = option;
572 			if (!remount)
573 				uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
574 			break;
575 		case Opt_lastblock:
576 			if (match_int(args, &option))
577 				return 0;
578 			uopt->lastblock = option;
579 			if (!remount)
580 				uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
581 			break;
582 		case Opt_anchor:
583 			if (match_int(args, &option))
584 				return 0;
585 			uopt->anchor = option;
586 			break;
587 		case Opt_volume:
588 			if (match_int(args, &option))
589 				return 0;
590 			uopt->volume = option;
591 			break;
592 		case Opt_partition:
593 			if (match_int(args, &option))
594 				return 0;
595 			uopt->partition = option;
596 			break;
597 		case Opt_fileset:
598 			if (match_int(args, &option))
599 				return 0;
600 			uopt->fileset = option;
601 			break;
602 		case Opt_rootdir:
603 			if (match_int(args, &option))
604 				return 0;
605 			uopt->rootdir = option;
606 			break;
607 		case Opt_utf8:
608 			uopt->flags |= (1 << UDF_FLAG_UTF8);
609 			break;
610 #ifdef CONFIG_UDF_NLS
611 		case Opt_iocharset:
612 			uopt->nls_map = load_nls(args[0].from);
613 			uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
614 			break;
615 #endif
616 		case Opt_uignore:
617 			uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
618 			break;
619 		case Opt_uforget:
620 			uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
621 			break;
622 		case Opt_gignore:
623 			uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
624 			break;
625 		case Opt_gforget:
626 			uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
627 			break;
628 		case Opt_fmode:
629 			if (match_octal(args, &option))
630 				return 0;
631 			uopt->fmode = option & 0777;
632 			break;
633 		case Opt_dmode:
634 			if (match_octal(args, &option))
635 				return 0;
636 			uopt->dmode = option & 0777;
637 			break;
638 		default:
639 			pr_err("bad mount option \"%s\" or missing value\n", p);
640 			return 0;
641 		}
642 	}
643 	return 1;
644 }
645 
646 static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
647 {
648 	struct udf_options uopt;
649 	struct udf_sb_info *sbi = UDF_SB(sb);
650 	int error = 0;
651 	struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
652 
653 	sync_filesystem(sb);
654 	if (lvidiu) {
655 		int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
656 		if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
657 			return -EACCES;
658 	}
659 
660 	uopt.flags = sbi->s_flags;
661 	uopt.uid   = sbi->s_uid;
662 	uopt.gid   = sbi->s_gid;
663 	uopt.umask = sbi->s_umask;
664 	uopt.fmode = sbi->s_fmode;
665 	uopt.dmode = sbi->s_dmode;
666 
667 	if (!udf_parse_options(options, &uopt, true))
668 		return -EINVAL;
669 
670 	write_lock(&sbi->s_cred_lock);
671 	sbi->s_flags = uopt.flags;
672 	sbi->s_uid   = uopt.uid;
673 	sbi->s_gid   = uopt.gid;
674 	sbi->s_umask = uopt.umask;
675 	sbi->s_fmode = uopt.fmode;
676 	sbi->s_dmode = uopt.dmode;
677 	write_unlock(&sbi->s_cred_lock);
678 
679 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
680 		goto out_unlock;
681 
682 	if (*flags & MS_RDONLY)
683 		udf_close_lvid(sb);
684 	else
685 		udf_open_lvid(sb);
686 
687 out_unlock:
688 	return error;
689 }
690 
691 /* Check Volume Structure Descriptors (ECMA 167 2/9.1) */
692 /* We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
693 static loff_t udf_check_vsd(struct super_block *sb)
694 {
695 	struct volStructDesc *vsd = NULL;
696 	loff_t sector = VSD_FIRST_SECTOR_OFFSET;
697 	int sectorsize;
698 	struct buffer_head *bh = NULL;
699 	int nsr02 = 0;
700 	int nsr03 = 0;
701 	struct udf_sb_info *sbi;
702 
703 	sbi = UDF_SB(sb);
704 	if (sb->s_blocksize < sizeof(struct volStructDesc))
705 		sectorsize = sizeof(struct volStructDesc);
706 	else
707 		sectorsize = sb->s_blocksize;
708 
709 	sector += (sbi->s_session << sb->s_blocksize_bits);
710 
711 	udf_debug("Starting at sector %u (%ld byte sectors)\n",
712 		  (unsigned int)(sector >> sb->s_blocksize_bits),
713 		  sb->s_blocksize);
714 	/* Process the sequence (if applicable). The hard limit on the sector
715 	 * offset is arbitrary, hopefully large enough so that all valid UDF
716 	 * filesystems will be recognised. There is no mention of an upper
717 	 * bound to the size of the volume recognition area in the standard.
718 	 *  The limit will prevent the code to read all the sectors of a
719 	 * specially crafted image (like a bluray disc full of CD001 sectors),
720 	 * potentially causing minutes or even hours of uninterruptible I/O
721 	 * activity. This actually happened with uninitialised SSD partitions
722 	 * (all 0xFF) before the check for the limit and all valid IDs were
723 	 * added */
724 	for (; !nsr02 && !nsr03 && sector < VSD_MAX_SECTOR_OFFSET;
725 	     sector += sectorsize) {
726 		/* Read a block */
727 		bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
728 		if (!bh)
729 			break;
730 
731 		/* Look for ISO  descriptors */
732 		vsd = (struct volStructDesc *)(bh->b_data +
733 					      (sector & (sb->s_blocksize - 1)));
734 
735 		if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
736 				    VSD_STD_ID_LEN)) {
737 			switch (vsd->structType) {
738 			case 0:
739 				udf_debug("ISO9660 Boot Record found\n");
740 				break;
741 			case 1:
742 				udf_debug("ISO9660 Primary Volume Descriptor found\n");
743 				break;
744 			case 2:
745 				udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
746 				break;
747 			case 3:
748 				udf_debug("ISO9660 Volume Partition Descriptor found\n");
749 				break;
750 			case 255:
751 				udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
752 				break;
753 			default:
754 				udf_debug("ISO9660 VRS (%u) found\n",
755 					  vsd->structType);
756 				break;
757 			}
758 		} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01,
759 				    VSD_STD_ID_LEN))
760 			; /* nothing */
761 		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01,
762 				    VSD_STD_ID_LEN)) {
763 			brelse(bh);
764 			break;
765 		} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02,
766 				    VSD_STD_ID_LEN))
767 			nsr02 = sector;
768 		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
769 				    VSD_STD_ID_LEN))
770 			nsr03 = sector;
771 		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BOOT2,
772 				    VSD_STD_ID_LEN))
773 			; /* nothing */
774 		else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CDW02,
775 				    VSD_STD_ID_LEN))
776 			; /* nothing */
777 		else {
778 			/* invalid id : end of volume recognition area */
779 			brelse(bh);
780 			break;
781 		}
782 		brelse(bh);
783 	}
784 
785 	if (nsr03)
786 		return nsr03;
787 	else if (nsr02)
788 		return nsr02;
789 	else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
790 			VSD_FIRST_SECTOR_OFFSET)
791 		return -1;
792 	else
793 		return 0;
794 }
795 
796 static int udf_find_fileset(struct super_block *sb,
797 			    struct kernel_lb_addr *fileset,
798 			    struct kernel_lb_addr *root)
799 {
800 	struct buffer_head *bh = NULL;
801 	long lastblock;
802 	uint16_t ident;
803 	struct udf_sb_info *sbi;
804 
805 	if (fileset->logicalBlockNum != 0xFFFFFFFF ||
806 	    fileset->partitionReferenceNum != 0xFFFF) {
807 		bh = udf_read_ptagged(sb, fileset, 0, &ident);
808 
809 		if (!bh) {
810 			return 1;
811 		} else if (ident != TAG_IDENT_FSD) {
812 			brelse(bh);
813 			return 1;
814 		}
815 
816 	}
817 
818 	sbi = UDF_SB(sb);
819 	if (!bh) {
820 		/* Search backwards through the partitions */
821 		struct kernel_lb_addr newfileset;
822 
823 /* --> cvg: FIXME - is it reasonable? */
824 		return 1;
825 
826 		for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
827 		     (newfileset.partitionReferenceNum != 0xFFFF &&
828 		      fileset->logicalBlockNum == 0xFFFFFFFF &&
829 		      fileset->partitionReferenceNum == 0xFFFF);
830 		     newfileset.partitionReferenceNum--) {
831 			lastblock = sbi->s_partmaps
832 					[newfileset.partitionReferenceNum]
833 						.s_partition_len;
834 			newfileset.logicalBlockNum = 0;
835 
836 			do {
837 				bh = udf_read_ptagged(sb, &newfileset, 0,
838 						      &ident);
839 				if (!bh) {
840 					newfileset.logicalBlockNum++;
841 					continue;
842 				}
843 
844 				switch (ident) {
845 				case TAG_IDENT_SBD:
846 				{
847 					struct spaceBitmapDesc *sp;
848 					sp = (struct spaceBitmapDesc *)
849 								bh->b_data;
850 					newfileset.logicalBlockNum += 1 +
851 						((le32_to_cpu(sp->numOfBytes) +
852 						  sizeof(struct spaceBitmapDesc)
853 						  - 1) >> sb->s_blocksize_bits);
854 					brelse(bh);
855 					break;
856 				}
857 				case TAG_IDENT_FSD:
858 					*fileset = newfileset;
859 					break;
860 				default:
861 					newfileset.logicalBlockNum++;
862 					brelse(bh);
863 					bh = NULL;
864 					break;
865 				}
866 			} while (newfileset.logicalBlockNum < lastblock &&
867 				 fileset->logicalBlockNum == 0xFFFFFFFF &&
868 				 fileset->partitionReferenceNum == 0xFFFF);
869 		}
870 	}
871 
872 	if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
873 	     fileset->partitionReferenceNum != 0xFFFF) && bh) {
874 		udf_debug("Fileset at block=%d, partition=%d\n",
875 			  fileset->logicalBlockNum,
876 			  fileset->partitionReferenceNum);
877 
878 		sbi->s_partition = fileset->partitionReferenceNum;
879 		udf_load_fileset(sb, bh, root);
880 		brelse(bh);
881 		return 0;
882 	}
883 	return 1;
884 }
885 
886 /*
887  * Load primary Volume Descriptor Sequence
888  *
889  * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
890  * should be tried.
891  */
892 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
893 {
894 	struct primaryVolDesc *pvoldesc;
895 	struct ustr *instr, *outstr;
896 	struct buffer_head *bh;
897 	uint16_t ident;
898 	int ret = -ENOMEM;
899 
900 	instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
901 	if (!instr)
902 		return -ENOMEM;
903 
904 	outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
905 	if (!outstr)
906 		goto out1;
907 
908 	bh = udf_read_tagged(sb, block, block, &ident);
909 	if (!bh) {
910 		ret = -EAGAIN;
911 		goto out2;
912 	}
913 
914 	if (ident != TAG_IDENT_PVD) {
915 		ret = -EIO;
916 		goto out_bh;
917 	}
918 
919 	pvoldesc = (struct primaryVolDesc *)bh->b_data;
920 
921 	if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
922 			      pvoldesc->recordingDateAndTime)) {
923 #ifdef UDFFS_DEBUG
924 		struct timestamp *ts = &pvoldesc->recordingDateAndTime;
925 		udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
926 			  le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
927 			  ts->minute, le16_to_cpu(ts->typeAndTimezone));
928 #endif
929 	}
930 
931 	if (!udf_build_ustr(instr, pvoldesc->volIdent, 32))
932 		if (udf_CS0toUTF8(outstr, instr)) {
933 			strncpy(UDF_SB(sb)->s_volume_ident, outstr->u_name,
934 				outstr->u_len > 31 ? 31 : outstr->u_len);
935 			udf_debug("volIdent[] = '%s'\n",
936 				  UDF_SB(sb)->s_volume_ident);
937 		}
938 
939 	if (!udf_build_ustr(instr, pvoldesc->volSetIdent, 128))
940 		if (udf_CS0toUTF8(outstr, instr))
941 			udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
942 
943 	ret = 0;
944 out_bh:
945 	brelse(bh);
946 out2:
947 	kfree(outstr);
948 out1:
949 	kfree(instr);
950 	return ret;
951 }
952 
953 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
954 					u32 meta_file_loc, u32 partition_num)
955 {
956 	struct kernel_lb_addr addr;
957 	struct inode *metadata_fe;
958 
959 	addr.logicalBlockNum = meta_file_loc;
960 	addr.partitionReferenceNum = partition_num;
961 
962 	metadata_fe = udf_iget(sb, &addr);
963 
964 	if (metadata_fe == NULL)
965 		udf_warn(sb, "metadata inode efe not found\n");
966 	else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
967 		udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
968 		iput(metadata_fe);
969 		metadata_fe = NULL;
970 	}
971 
972 	return metadata_fe;
973 }
974 
975 static int udf_load_metadata_files(struct super_block *sb, int partition)
976 {
977 	struct udf_sb_info *sbi = UDF_SB(sb);
978 	struct udf_part_map *map;
979 	struct udf_meta_data *mdata;
980 	struct kernel_lb_addr addr;
981 
982 	map = &sbi->s_partmaps[partition];
983 	mdata = &map->s_type_specific.s_metadata;
984 
985 	/* metadata address */
986 	udf_debug("Metadata file location: block = %d part = %d\n",
987 		  mdata->s_meta_file_loc, map->s_partition_num);
988 
989 	mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
990 		mdata->s_meta_file_loc, map->s_partition_num);
991 
992 	if (mdata->s_metadata_fe == NULL) {
993 		/* mirror file entry */
994 		udf_debug("Mirror metadata file location: block = %d part = %d\n",
995 			  mdata->s_mirror_file_loc, map->s_partition_num);
996 
997 		mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
998 			mdata->s_mirror_file_loc, map->s_partition_num);
999 
1000 		if (mdata->s_mirror_fe == NULL) {
1001 			udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
1002 			return -EIO;
1003 		}
1004 	}
1005 
1006 	/*
1007 	 * bitmap file entry
1008 	 * Note:
1009 	 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
1010 	*/
1011 	if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
1012 		addr.logicalBlockNum = mdata->s_bitmap_file_loc;
1013 		addr.partitionReferenceNum = map->s_partition_num;
1014 
1015 		udf_debug("Bitmap file location: block = %d part = %d\n",
1016 			  addr.logicalBlockNum, addr.partitionReferenceNum);
1017 
1018 		mdata->s_bitmap_fe = udf_iget(sb, &addr);
1019 		if (mdata->s_bitmap_fe == NULL) {
1020 			if (sb->s_flags & MS_RDONLY)
1021 				udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
1022 			else {
1023 				udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
1024 				return -EIO;
1025 			}
1026 		}
1027 	}
1028 
1029 	udf_debug("udf_load_metadata_files Ok\n");
1030 	return 0;
1031 }
1032 
1033 static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
1034 			     struct kernel_lb_addr *root)
1035 {
1036 	struct fileSetDesc *fset;
1037 
1038 	fset = (struct fileSetDesc *)bh->b_data;
1039 
1040 	*root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
1041 
1042 	UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
1043 
1044 	udf_debug("Rootdir at block=%d, partition=%d\n",
1045 		  root->logicalBlockNum, root->partitionReferenceNum);
1046 }
1047 
1048 int udf_compute_nr_groups(struct super_block *sb, u32 partition)
1049 {
1050 	struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1051 	return DIV_ROUND_UP(map->s_partition_len +
1052 			    (sizeof(struct spaceBitmapDesc) << 3),
1053 			    sb->s_blocksize * 8);
1054 }
1055 
1056 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1057 {
1058 	struct udf_bitmap *bitmap;
1059 	int nr_groups;
1060 	int size;
1061 
1062 	nr_groups = udf_compute_nr_groups(sb, index);
1063 	size = sizeof(struct udf_bitmap) +
1064 		(sizeof(struct buffer_head *) * nr_groups);
1065 
1066 	if (size <= PAGE_SIZE)
1067 		bitmap = kzalloc(size, GFP_KERNEL);
1068 	else
1069 		bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
1070 
1071 	if (bitmap == NULL)
1072 		return NULL;
1073 
1074 	bitmap->s_nr_groups = nr_groups;
1075 	return bitmap;
1076 }
1077 
1078 static int udf_fill_partdesc_info(struct super_block *sb,
1079 		struct partitionDesc *p, int p_index)
1080 {
1081 	struct udf_part_map *map;
1082 	struct udf_sb_info *sbi = UDF_SB(sb);
1083 	struct partitionHeaderDesc *phd;
1084 
1085 	map = &sbi->s_partmaps[p_index];
1086 
1087 	map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1088 	map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1089 
1090 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1091 		map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1092 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1093 		map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1094 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1095 		map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1096 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1097 		map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1098 
1099 	udf_debug("Partition (%d type %x) starts at physical %d, block length %d\n",
1100 		  p_index, map->s_partition_type,
1101 		  map->s_partition_root, map->s_partition_len);
1102 
1103 	if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1104 	    strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1105 		return 0;
1106 
1107 	phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1108 	if (phd->unallocSpaceTable.extLength) {
1109 		struct kernel_lb_addr loc = {
1110 			.logicalBlockNum = le32_to_cpu(
1111 				phd->unallocSpaceTable.extPosition),
1112 			.partitionReferenceNum = p_index,
1113 		};
1114 
1115 		map->s_uspace.s_table = udf_iget(sb, &loc);
1116 		if (!map->s_uspace.s_table) {
1117 			udf_debug("cannot load unallocSpaceTable (part %d)\n",
1118 				  p_index);
1119 			return -EIO;
1120 		}
1121 		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1122 		udf_debug("unallocSpaceTable (part %d) @ %ld\n",
1123 			  p_index, map->s_uspace.s_table->i_ino);
1124 	}
1125 
1126 	if (phd->unallocSpaceBitmap.extLength) {
1127 		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1128 		if (!bitmap)
1129 			return -ENOMEM;
1130 		map->s_uspace.s_bitmap = bitmap;
1131 		bitmap->s_extPosition = le32_to_cpu(
1132 				phd->unallocSpaceBitmap.extPosition);
1133 		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1134 		udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
1135 			  p_index, bitmap->s_extPosition);
1136 	}
1137 
1138 	if (phd->partitionIntegrityTable.extLength)
1139 		udf_debug("partitionIntegrityTable (part %d)\n", p_index);
1140 
1141 	if (phd->freedSpaceTable.extLength) {
1142 		struct kernel_lb_addr loc = {
1143 			.logicalBlockNum = le32_to_cpu(
1144 				phd->freedSpaceTable.extPosition),
1145 			.partitionReferenceNum = p_index,
1146 		};
1147 
1148 		map->s_fspace.s_table = udf_iget(sb, &loc);
1149 		if (!map->s_fspace.s_table) {
1150 			udf_debug("cannot load freedSpaceTable (part %d)\n",
1151 				  p_index);
1152 			return -EIO;
1153 		}
1154 
1155 		map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
1156 		udf_debug("freedSpaceTable (part %d) @ %ld\n",
1157 			  p_index, map->s_fspace.s_table->i_ino);
1158 	}
1159 
1160 	if (phd->freedSpaceBitmap.extLength) {
1161 		struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1162 		if (!bitmap)
1163 			return -ENOMEM;
1164 		map->s_fspace.s_bitmap = bitmap;
1165 		bitmap->s_extPosition = le32_to_cpu(
1166 				phd->freedSpaceBitmap.extPosition);
1167 		map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
1168 		udf_debug("freedSpaceBitmap (part %d) @ %d\n",
1169 			  p_index, bitmap->s_extPosition);
1170 	}
1171 	return 0;
1172 }
1173 
1174 static void udf_find_vat_block(struct super_block *sb, int p_index,
1175 			       int type1_index, sector_t start_block)
1176 {
1177 	struct udf_sb_info *sbi = UDF_SB(sb);
1178 	struct udf_part_map *map = &sbi->s_partmaps[p_index];
1179 	sector_t vat_block;
1180 	struct kernel_lb_addr ino;
1181 
1182 	/*
1183 	 * VAT file entry is in the last recorded block. Some broken disks have
1184 	 * it a few blocks before so try a bit harder...
1185 	 */
1186 	ino.partitionReferenceNum = type1_index;
1187 	for (vat_block = start_block;
1188 	     vat_block >= map->s_partition_root &&
1189 	     vat_block >= start_block - 3 &&
1190 	     !sbi->s_vat_inode; vat_block--) {
1191 		ino.logicalBlockNum = vat_block - map->s_partition_root;
1192 		sbi->s_vat_inode = udf_iget(sb, &ino);
1193 	}
1194 }
1195 
1196 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1197 {
1198 	struct udf_sb_info *sbi = UDF_SB(sb);
1199 	struct udf_part_map *map = &sbi->s_partmaps[p_index];
1200 	struct buffer_head *bh = NULL;
1201 	struct udf_inode_info *vati;
1202 	uint32_t pos;
1203 	struct virtualAllocationTable20 *vat20;
1204 	sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
1205 
1206 	udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1207 	if (!sbi->s_vat_inode &&
1208 	    sbi->s_last_block != blocks - 1) {
1209 		pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1210 			  (unsigned long)sbi->s_last_block,
1211 			  (unsigned long)blocks - 1);
1212 		udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1213 	}
1214 	if (!sbi->s_vat_inode)
1215 		return -EIO;
1216 
1217 	if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1218 		map->s_type_specific.s_virtual.s_start_offset = 0;
1219 		map->s_type_specific.s_virtual.s_num_entries =
1220 			(sbi->s_vat_inode->i_size - 36) >> 2;
1221 	} else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1222 		vati = UDF_I(sbi->s_vat_inode);
1223 		if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1224 			pos = udf_block_map(sbi->s_vat_inode, 0);
1225 			bh = sb_bread(sb, pos);
1226 			if (!bh)
1227 				return -EIO;
1228 			vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1229 		} else {
1230 			vat20 = (struct virtualAllocationTable20 *)
1231 							vati->i_ext.i_data;
1232 		}
1233 
1234 		map->s_type_specific.s_virtual.s_start_offset =
1235 			le16_to_cpu(vat20->lengthHeader);
1236 		map->s_type_specific.s_virtual.s_num_entries =
1237 			(sbi->s_vat_inode->i_size -
1238 				map->s_type_specific.s_virtual.
1239 					s_start_offset) >> 2;
1240 		brelse(bh);
1241 	}
1242 	return 0;
1243 }
1244 
1245 /*
1246  * Load partition descriptor block
1247  *
1248  * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1249  * sequence.
1250  */
1251 static int udf_load_partdesc(struct super_block *sb, sector_t block)
1252 {
1253 	struct buffer_head *bh;
1254 	struct partitionDesc *p;
1255 	struct udf_part_map *map;
1256 	struct udf_sb_info *sbi = UDF_SB(sb);
1257 	int i, type1_idx;
1258 	uint16_t partitionNumber;
1259 	uint16_t ident;
1260 	int ret;
1261 
1262 	bh = udf_read_tagged(sb, block, block, &ident);
1263 	if (!bh)
1264 		return -EAGAIN;
1265 	if (ident != TAG_IDENT_PD) {
1266 		ret = 0;
1267 		goto out_bh;
1268 	}
1269 
1270 	p = (struct partitionDesc *)bh->b_data;
1271 	partitionNumber = le16_to_cpu(p->partitionNumber);
1272 
1273 	/* First scan for TYPE1, SPARABLE and METADATA partitions */
1274 	for (i = 0; i < sbi->s_partitions; i++) {
1275 		map = &sbi->s_partmaps[i];
1276 		udf_debug("Searching map: (%d == %d)\n",
1277 			  map->s_partition_num, partitionNumber);
1278 		if (map->s_partition_num == partitionNumber &&
1279 		    (map->s_partition_type == UDF_TYPE1_MAP15 ||
1280 		     map->s_partition_type == UDF_SPARABLE_MAP15))
1281 			break;
1282 	}
1283 
1284 	if (i >= sbi->s_partitions) {
1285 		udf_debug("Partition (%d) not found in partition map\n",
1286 			  partitionNumber);
1287 		ret = 0;
1288 		goto out_bh;
1289 	}
1290 
1291 	ret = udf_fill_partdesc_info(sb, p, i);
1292 	if (ret < 0)
1293 		goto out_bh;
1294 
1295 	/*
1296 	 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1297 	 * PHYSICAL partitions are already set up
1298 	 */
1299 	type1_idx = i;
1300 #ifdef UDFFS_DEBUG
1301 	map = NULL; /* supress 'maybe used uninitialized' warning */
1302 #endif
1303 	for (i = 0; i < sbi->s_partitions; i++) {
1304 		map = &sbi->s_partmaps[i];
1305 
1306 		if (map->s_partition_num == partitionNumber &&
1307 		    (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1308 		     map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1309 		     map->s_partition_type == UDF_METADATA_MAP25))
1310 			break;
1311 	}
1312 
1313 	if (i >= sbi->s_partitions) {
1314 		ret = 0;
1315 		goto out_bh;
1316 	}
1317 
1318 	ret = udf_fill_partdesc_info(sb, p, i);
1319 	if (ret < 0)
1320 		goto out_bh;
1321 
1322 	if (map->s_partition_type == UDF_METADATA_MAP25) {
1323 		ret = udf_load_metadata_files(sb, i);
1324 		if (ret < 0) {
1325 			udf_err(sb, "error loading MetaData partition map %d\n",
1326 				i);
1327 			goto out_bh;
1328 		}
1329 	} else {
1330 		/*
1331 		 * If we have a partition with virtual map, we don't handle
1332 		 * writing to it (we overwrite blocks instead of relocating
1333 		 * them).
1334 		 */
1335 		if (!(sb->s_flags & MS_RDONLY)) {
1336 			ret = -EACCES;
1337 			goto out_bh;
1338 		}
1339 		ret = udf_load_vat(sb, i, type1_idx);
1340 		if (ret < 0)
1341 			goto out_bh;
1342 	}
1343 	ret = 0;
1344 out_bh:
1345 	/* In case loading failed, we handle cleanup in udf_fill_super */
1346 	brelse(bh);
1347 	return ret;
1348 }
1349 
1350 static int udf_load_sparable_map(struct super_block *sb,
1351 				 struct udf_part_map *map,
1352 				 struct sparablePartitionMap *spm)
1353 {
1354 	uint32_t loc;
1355 	uint16_t ident;
1356 	struct sparingTable *st;
1357 	struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1358 	int i;
1359 	struct buffer_head *bh;
1360 
1361 	map->s_partition_type = UDF_SPARABLE_MAP15;
1362 	sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1363 	if (!is_power_of_2(sdata->s_packet_len)) {
1364 		udf_err(sb, "error loading logical volume descriptor: "
1365 			"Invalid packet length %u\n",
1366 			(unsigned)sdata->s_packet_len);
1367 		return -EIO;
1368 	}
1369 	if (spm->numSparingTables > 4) {
1370 		udf_err(sb, "error loading logical volume descriptor: "
1371 			"Too many sparing tables (%d)\n",
1372 			(int)spm->numSparingTables);
1373 		return -EIO;
1374 	}
1375 
1376 	for (i = 0; i < spm->numSparingTables; i++) {
1377 		loc = le32_to_cpu(spm->locSparingTable[i]);
1378 		bh = udf_read_tagged(sb, loc, loc, &ident);
1379 		if (!bh)
1380 			continue;
1381 
1382 		st = (struct sparingTable *)bh->b_data;
1383 		if (ident != 0 ||
1384 		    strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1385 			    strlen(UDF_ID_SPARING)) ||
1386 		    sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1387 							sb->s_blocksize) {
1388 			brelse(bh);
1389 			continue;
1390 		}
1391 
1392 		sdata->s_spar_map[i] = bh;
1393 	}
1394 	map->s_partition_func = udf_get_pblock_spar15;
1395 	return 0;
1396 }
1397 
1398 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1399 			       struct kernel_lb_addr *fileset)
1400 {
1401 	struct logicalVolDesc *lvd;
1402 	int i, offset;
1403 	uint8_t type;
1404 	struct udf_sb_info *sbi = UDF_SB(sb);
1405 	struct genericPartitionMap *gpm;
1406 	uint16_t ident;
1407 	struct buffer_head *bh;
1408 	unsigned int table_len;
1409 	int ret;
1410 
1411 	bh = udf_read_tagged(sb, block, block, &ident);
1412 	if (!bh)
1413 		return -EAGAIN;
1414 	BUG_ON(ident != TAG_IDENT_LVD);
1415 	lvd = (struct logicalVolDesc *)bh->b_data;
1416 	table_len = le32_to_cpu(lvd->mapTableLength);
1417 	if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1418 		udf_err(sb, "error loading logical volume descriptor: "
1419 			"Partition table too long (%u > %lu)\n", table_len,
1420 			sb->s_blocksize - sizeof(*lvd));
1421 		ret = -EIO;
1422 		goto out_bh;
1423 	}
1424 
1425 	ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1426 	if (ret)
1427 		goto out_bh;
1428 
1429 	for (i = 0, offset = 0;
1430 	     i < sbi->s_partitions && offset < table_len;
1431 	     i++, offset += gpm->partitionMapLength) {
1432 		struct udf_part_map *map = &sbi->s_partmaps[i];
1433 		gpm = (struct genericPartitionMap *)
1434 				&(lvd->partitionMaps[offset]);
1435 		type = gpm->partitionMapType;
1436 		if (type == 1) {
1437 			struct genericPartitionMap1 *gpm1 =
1438 				(struct genericPartitionMap1 *)gpm;
1439 			map->s_partition_type = UDF_TYPE1_MAP15;
1440 			map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1441 			map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1442 			map->s_partition_func = NULL;
1443 		} else if (type == 2) {
1444 			struct udfPartitionMap2 *upm2 =
1445 						(struct udfPartitionMap2 *)gpm;
1446 			if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1447 						strlen(UDF_ID_VIRTUAL))) {
1448 				u16 suf =
1449 					le16_to_cpu(((__le16 *)upm2->partIdent.
1450 							identSuffix)[0]);
1451 				if (suf < 0x0200) {
1452 					map->s_partition_type =
1453 							UDF_VIRTUAL_MAP15;
1454 					map->s_partition_func =
1455 							udf_get_pblock_virt15;
1456 				} else {
1457 					map->s_partition_type =
1458 							UDF_VIRTUAL_MAP20;
1459 					map->s_partition_func =
1460 							udf_get_pblock_virt20;
1461 				}
1462 			} else if (!strncmp(upm2->partIdent.ident,
1463 						UDF_ID_SPARABLE,
1464 						strlen(UDF_ID_SPARABLE))) {
1465 				ret = udf_load_sparable_map(sb, map,
1466 					(struct sparablePartitionMap *)gpm);
1467 				if (ret < 0)
1468 					goto out_bh;
1469 			} else if (!strncmp(upm2->partIdent.ident,
1470 						UDF_ID_METADATA,
1471 						strlen(UDF_ID_METADATA))) {
1472 				struct udf_meta_data *mdata =
1473 					&map->s_type_specific.s_metadata;
1474 				struct metadataPartitionMap *mdm =
1475 						(struct metadataPartitionMap *)
1476 						&(lvd->partitionMaps[offset]);
1477 				udf_debug("Parsing Logical vol part %d type %d  id=%s\n",
1478 					  i, type, UDF_ID_METADATA);
1479 
1480 				map->s_partition_type = UDF_METADATA_MAP25;
1481 				map->s_partition_func = udf_get_pblock_meta25;
1482 
1483 				mdata->s_meta_file_loc   =
1484 					le32_to_cpu(mdm->metadataFileLoc);
1485 				mdata->s_mirror_file_loc =
1486 					le32_to_cpu(mdm->metadataMirrorFileLoc);
1487 				mdata->s_bitmap_file_loc =
1488 					le32_to_cpu(mdm->metadataBitmapFileLoc);
1489 				mdata->s_alloc_unit_size =
1490 					le32_to_cpu(mdm->allocUnitSize);
1491 				mdata->s_align_unit_size =
1492 					le16_to_cpu(mdm->alignUnitSize);
1493 				if (mdm->flags & 0x01)
1494 					mdata->s_flags |= MF_DUPLICATE_MD;
1495 
1496 				udf_debug("Metadata Ident suffix=0x%x\n",
1497 					  le16_to_cpu(*(__le16 *)
1498 						      mdm->partIdent.identSuffix));
1499 				udf_debug("Metadata part num=%d\n",
1500 					  le16_to_cpu(mdm->partitionNum));
1501 				udf_debug("Metadata part alloc unit size=%d\n",
1502 					  le32_to_cpu(mdm->allocUnitSize));
1503 				udf_debug("Metadata file loc=%d\n",
1504 					  le32_to_cpu(mdm->metadataFileLoc));
1505 				udf_debug("Mirror file loc=%d\n",
1506 					  le32_to_cpu(mdm->metadataMirrorFileLoc));
1507 				udf_debug("Bitmap file loc=%d\n",
1508 					  le32_to_cpu(mdm->metadataBitmapFileLoc));
1509 				udf_debug("Flags: %d %d\n",
1510 					  mdata->s_flags, mdm->flags);
1511 			} else {
1512 				udf_debug("Unknown ident: %s\n",
1513 					  upm2->partIdent.ident);
1514 				continue;
1515 			}
1516 			map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1517 			map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1518 		}
1519 		udf_debug("Partition (%d:%d) type %d on volume %d\n",
1520 			  i, map->s_partition_num, type, map->s_volumeseqnum);
1521 	}
1522 
1523 	if (fileset) {
1524 		struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1525 
1526 		*fileset = lelb_to_cpu(la->extLocation);
1527 		udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
1528 			  fileset->logicalBlockNum,
1529 			  fileset->partitionReferenceNum);
1530 	}
1531 	if (lvd->integritySeqExt.extLength)
1532 		udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1533 	ret = 0;
1534 out_bh:
1535 	brelse(bh);
1536 	return ret;
1537 }
1538 
1539 /*
1540  * udf_load_logicalvolint
1541  *
1542  */
1543 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1544 {
1545 	struct buffer_head *bh = NULL;
1546 	uint16_t ident;
1547 	struct udf_sb_info *sbi = UDF_SB(sb);
1548 	struct logicalVolIntegrityDesc *lvid;
1549 
1550 	while (loc.extLength > 0 &&
1551 	       (bh = udf_read_tagged(sb, loc.extLocation,
1552 				     loc.extLocation, &ident)) &&
1553 	       ident == TAG_IDENT_LVID) {
1554 		sbi->s_lvid_bh = bh;
1555 		lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1556 
1557 		if (lvid->nextIntegrityExt.extLength)
1558 			udf_load_logicalvolint(sb,
1559 				leea_to_cpu(lvid->nextIntegrityExt));
1560 
1561 		if (sbi->s_lvid_bh != bh)
1562 			brelse(bh);
1563 		loc.extLength -= sb->s_blocksize;
1564 		loc.extLocation++;
1565 	}
1566 	if (sbi->s_lvid_bh != bh)
1567 		brelse(bh);
1568 }
1569 
1570 /*
1571  * Process a main/reserve volume descriptor sequence.
1572  *   @block		First block of first extent of the sequence.
1573  *   @lastblock		Lastblock of first extent of the sequence.
1574  *   @fileset		There we store extent containing root fileset
1575  *
1576  * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1577  * sequence
1578  */
1579 static noinline int udf_process_sequence(
1580 		struct super_block *sb,
1581 		sector_t block, sector_t lastblock,
1582 		struct kernel_lb_addr *fileset)
1583 {
1584 	struct buffer_head *bh = NULL;
1585 	struct udf_vds_record vds[VDS_POS_LENGTH];
1586 	struct udf_vds_record *curr;
1587 	struct generic_desc *gd;
1588 	struct volDescPtr *vdp;
1589 	int done = 0;
1590 	uint32_t vdsn;
1591 	uint16_t ident;
1592 	long next_s = 0, next_e = 0;
1593 	int ret;
1594 
1595 	memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1596 
1597 	/*
1598 	 * Read the main descriptor sequence and find which descriptors
1599 	 * are in it.
1600 	 */
1601 	for (; (!done && block <= lastblock); block++) {
1602 
1603 		bh = udf_read_tagged(sb, block, block, &ident);
1604 		if (!bh) {
1605 			udf_err(sb,
1606 				"Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
1607 				(unsigned long long)block);
1608 			return -EAGAIN;
1609 		}
1610 
1611 		/* Process each descriptor (ISO 13346 3/8.3-8.4) */
1612 		gd = (struct generic_desc *)bh->b_data;
1613 		vdsn = le32_to_cpu(gd->volDescSeqNum);
1614 		switch (ident) {
1615 		case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1616 			curr = &vds[VDS_POS_PRIMARY_VOL_DESC];
1617 			if (vdsn >= curr->volDescSeqNum) {
1618 				curr->volDescSeqNum = vdsn;
1619 				curr->block = block;
1620 			}
1621 			break;
1622 		case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1623 			curr = &vds[VDS_POS_VOL_DESC_PTR];
1624 			if (vdsn >= curr->volDescSeqNum) {
1625 				curr->volDescSeqNum = vdsn;
1626 				curr->block = block;
1627 
1628 				vdp = (struct volDescPtr *)bh->b_data;
1629 				next_s = le32_to_cpu(
1630 					vdp->nextVolDescSeqExt.extLocation);
1631 				next_e = le32_to_cpu(
1632 					vdp->nextVolDescSeqExt.extLength);
1633 				next_e = next_e >> sb->s_blocksize_bits;
1634 				next_e += next_s;
1635 			}
1636 			break;
1637 		case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1638 			curr = &vds[VDS_POS_IMP_USE_VOL_DESC];
1639 			if (vdsn >= curr->volDescSeqNum) {
1640 				curr->volDescSeqNum = vdsn;
1641 				curr->block = block;
1642 			}
1643 			break;
1644 		case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1645 			curr = &vds[VDS_POS_PARTITION_DESC];
1646 			if (!curr->block)
1647 				curr->block = block;
1648 			break;
1649 		case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1650 			curr = &vds[VDS_POS_LOGICAL_VOL_DESC];
1651 			if (vdsn >= curr->volDescSeqNum) {
1652 				curr->volDescSeqNum = vdsn;
1653 				curr->block = block;
1654 			}
1655 			break;
1656 		case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1657 			curr = &vds[VDS_POS_UNALLOC_SPACE_DESC];
1658 			if (vdsn >= curr->volDescSeqNum) {
1659 				curr->volDescSeqNum = vdsn;
1660 				curr->block = block;
1661 			}
1662 			break;
1663 		case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1664 			vds[VDS_POS_TERMINATING_DESC].block = block;
1665 			if (next_e) {
1666 				block = next_s;
1667 				lastblock = next_e;
1668 				next_s = next_e = 0;
1669 			} else
1670 				done = 1;
1671 			break;
1672 		}
1673 		brelse(bh);
1674 	}
1675 	/*
1676 	 * Now read interesting descriptors again and process them
1677 	 * in a suitable order
1678 	 */
1679 	if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1680 		udf_err(sb, "Primary Volume Descriptor not found!\n");
1681 		return -EAGAIN;
1682 	}
1683 	ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block);
1684 	if (ret < 0)
1685 		return ret;
1686 
1687 	if (vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1688 		ret = udf_load_logicalvol(sb,
1689 					  vds[VDS_POS_LOGICAL_VOL_DESC].block,
1690 					  fileset);
1691 		if (ret < 0)
1692 			return ret;
1693 	}
1694 
1695 	if (vds[VDS_POS_PARTITION_DESC].block) {
1696 		/*
1697 		 * We rescan the whole descriptor sequence to find
1698 		 * partition descriptor blocks and process them.
1699 		 */
1700 		for (block = vds[VDS_POS_PARTITION_DESC].block;
1701 		     block < vds[VDS_POS_TERMINATING_DESC].block;
1702 		     block++) {
1703 			ret = udf_load_partdesc(sb, block);
1704 			if (ret < 0)
1705 				return ret;
1706 		}
1707 	}
1708 
1709 	return 0;
1710 }
1711 
1712 /*
1713  * Load Volume Descriptor Sequence described by anchor in bh
1714  *
1715  * Returns <0 on error, 0 on success
1716  */
1717 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1718 			     struct kernel_lb_addr *fileset)
1719 {
1720 	struct anchorVolDescPtr *anchor;
1721 	sector_t main_s, main_e, reserve_s, reserve_e;
1722 	int ret;
1723 
1724 	anchor = (struct anchorVolDescPtr *)bh->b_data;
1725 
1726 	/* Locate the main sequence */
1727 	main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1728 	main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1729 	main_e = main_e >> sb->s_blocksize_bits;
1730 	main_e += main_s;
1731 
1732 	/* Locate the reserve sequence */
1733 	reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1734 	reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1735 	reserve_e = reserve_e >> sb->s_blocksize_bits;
1736 	reserve_e += reserve_s;
1737 
1738 	/* Process the main & reserve sequences */
1739 	/* responsible for finding the PartitionDesc(s) */
1740 	ret = udf_process_sequence(sb, main_s, main_e, fileset);
1741 	if (ret != -EAGAIN)
1742 		return ret;
1743 	udf_sb_free_partitions(sb);
1744 	ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1745 	if (ret < 0) {
1746 		udf_sb_free_partitions(sb);
1747 		/* No sequence was OK, return -EIO */
1748 		if (ret == -EAGAIN)
1749 			ret = -EIO;
1750 	}
1751 	return ret;
1752 }
1753 
1754 /*
1755  * Check whether there is an anchor block in the given block and
1756  * load Volume Descriptor Sequence if so.
1757  *
1758  * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1759  * block
1760  */
1761 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1762 				  struct kernel_lb_addr *fileset)
1763 {
1764 	struct buffer_head *bh;
1765 	uint16_t ident;
1766 	int ret;
1767 
1768 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
1769 	    udf_fixed_to_variable(block) >=
1770 	    sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
1771 		return -EAGAIN;
1772 
1773 	bh = udf_read_tagged(sb, block, block, &ident);
1774 	if (!bh)
1775 		return -EAGAIN;
1776 	if (ident != TAG_IDENT_AVDP) {
1777 		brelse(bh);
1778 		return -EAGAIN;
1779 	}
1780 	ret = udf_load_sequence(sb, bh, fileset);
1781 	brelse(bh);
1782 	return ret;
1783 }
1784 
1785 /*
1786  * Search for an anchor volume descriptor pointer.
1787  *
1788  * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1789  * of anchors.
1790  */
1791 static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
1792 			    struct kernel_lb_addr *fileset)
1793 {
1794 	sector_t last[6];
1795 	int i;
1796 	struct udf_sb_info *sbi = UDF_SB(sb);
1797 	int last_count = 0;
1798 	int ret;
1799 
1800 	/* First try user provided anchor */
1801 	if (sbi->s_anchor) {
1802 		ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1803 		if (ret != -EAGAIN)
1804 			return ret;
1805 	}
1806 	/*
1807 	 * according to spec, anchor is in either:
1808 	 *     block 256
1809 	 *     lastblock-256
1810 	 *     lastblock
1811 	 *  however, if the disc isn't closed, it could be 512.
1812 	 */
1813 	ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1814 	if (ret != -EAGAIN)
1815 		return ret;
1816 	/*
1817 	 * The trouble is which block is the last one. Drives often misreport
1818 	 * this so we try various possibilities.
1819 	 */
1820 	last[last_count++] = *lastblock;
1821 	if (*lastblock >= 1)
1822 		last[last_count++] = *lastblock - 1;
1823 	last[last_count++] = *lastblock + 1;
1824 	if (*lastblock >= 2)
1825 		last[last_count++] = *lastblock - 2;
1826 	if (*lastblock >= 150)
1827 		last[last_count++] = *lastblock - 150;
1828 	if (*lastblock >= 152)
1829 		last[last_count++] = *lastblock - 152;
1830 
1831 	for (i = 0; i < last_count; i++) {
1832 		if (last[i] >= sb->s_bdev->bd_inode->i_size >>
1833 				sb->s_blocksize_bits)
1834 			continue;
1835 		ret = udf_check_anchor_block(sb, last[i], fileset);
1836 		if (ret != -EAGAIN) {
1837 			if (!ret)
1838 				*lastblock = last[i];
1839 			return ret;
1840 		}
1841 		if (last[i] < 256)
1842 			continue;
1843 		ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1844 		if (ret != -EAGAIN) {
1845 			if (!ret)
1846 				*lastblock = last[i];
1847 			return ret;
1848 		}
1849 	}
1850 
1851 	/* Finally try block 512 in case media is open */
1852 	return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1853 }
1854 
1855 /*
1856  * Find an anchor volume descriptor and load Volume Descriptor Sequence from
1857  * area specified by it. The function expects sbi->s_lastblock to be the last
1858  * block on the media.
1859  *
1860  * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
1861  * was not found.
1862  */
1863 static int udf_find_anchor(struct super_block *sb,
1864 			   struct kernel_lb_addr *fileset)
1865 {
1866 	struct udf_sb_info *sbi = UDF_SB(sb);
1867 	sector_t lastblock = sbi->s_last_block;
1868 	int ret;
1869 
1870 	ret = udf_scan_anchors(sb, &lastblock, fileset);
1871 	if (ret != -EAGAIN)
1872 		goto out;
1873 
1874 	/* No anchor found? Try VARCONV conversion of block numbers */
1875 	UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
1876 	lastblock = udf_variable_to_fixed(sbi->s_last_block);
1877 	/* Firstly, we try to not convert number of the last block */
1878 	ret = udf_scan_anchors(sb, &lastblock, fileset);
1879 	if (ret != -EAGAIN)
1880 		goto out;
1881 
1882 	lastblock = sbi->s_last_block;
1883 	/* Secondly, we try with converted number of the last block */
1884 	ret = udf_scan_anchors(sb, &lastblock, fileset);
1885 	if (ret < 0) {
1886 		/* VARCONV didn't help. Clear it. */
1887 		UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
1888 	}
1889 out:
1890 	if (ret == 0)
1891 		sbi->s_last_block = lastblock;
1892 	return ret;
1893 }
1894 
1895 /*
1896  * Check Volume Structure Descriptor, find Anchor block and load Volume
1897  * Descriptor Sequence.
1898  *
1899  * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
1900  * block was not found.
1901  */
1902 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1903 			int silent, struct kernel_lb_addr *fileset)
1904 {
1905 	struct udf_sb_info *sbi = UDF_SB(sb);
1906 	loff_t nsr_off;
1907 	int ret;
1908 
1909 	if (!sb_set_blocksize(sb, uopt->blocksize)) {
1910 		if (!silent)
1911 			udf_warn(sb, "Bad block size\n");
1912 		return -EINVAL;
1913 	}
1914 	sbi->s_last_block = uopt->lastblock;
1915 	if (!uopt->novrs) {
1916 		/* Check that it is NSR02 compliant */
1917 		nsr_off = udf_check_vsd(sb);
1918 		if (!nsr_off) {
1919 			if (!silent)
1920 				udf_warn(sb, "No VRS found\n");
1921 			return 0;
1922 		}
1923 		if (nsr_off == -1)
1924 			udf_debug("Failed to read sector at offset %d. "
1925 				  "Assuming open disc. Skipping validity "
1926 				  "check\n", VSD_FIRST_SECTOR_OFFSET);
1927 		if (!sbi->s_last_block)
1928 			sbi->s_last_block = udf_get_last_block(sb);
1929 	} else {
1930 		udf_debug("Validity check skipped because of novrs option\n");
1931 	}
1932 
1933 	/* Look for anchor block and load Volume Descriptor Sequence */
1934 	sbi->s_anchor = uopt->anchor;
1935 	ret = udf_find_anchor(sb, fileset);
1936 	if (ret < 0) {
1937 		if (!silent && ret == -EAGAIN)
1938 			udf_warn(sb, "No anchor found\n");
1939 		return ret;
1940 	}
1941 	return 0;
1942 }
1943 
1944 static void udf_open_lvid(struct super_block *sb)
1945 {
1946 	struct udf_sb_info *sbi = UDF_SB(sb);
1947 	struct buffer_head *bh = sbi->s_lvid_bh;
1948 	struct logicalVolIntegrityDesc *lvid;
1949 	struct logicalVolIntegrityDescImpUse *lvidiu;
1950 
1951 	if (!bh)
1952 		return;
1953 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1954 	lvidiu = udf_sb_lvidiu(sb);
1955 	if (!lvidiu)
1956 		return;
1957 
1958 	mutex_lock(&sbi->s_alloc_mutex);
1959 	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1960 	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1961 	udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
1962 				CURRENT_TIME);
1963 	lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
1964 
1965 	lvid->descTag.descCRC = cpu_to_le16(
1966 		crc_itu_t(0, (char *)lvid + sizeof(struct tag),
1967 			le16_to_cpu(lvid->descTag.descCRCLength)));
1968 
1969 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1970 	mark_buffer_dirty(bh);
1971 	sbi->s_lvid_dirty = 0;
1972 	mutex_unlock(&sbi->s_alloc_mutex);
1973 	/* Make opening of filesystem visible on the media immediately */
1974 	sync_dirty_buffer(bh);
1975 }
1976 
1977 static void udf_close_lvid(struct super_block *sb)
1978 {
1979 	struct udf_sb_info *sbi = UDF_SB(sb);
1980 	struct buffer_head *bh = sbi->s_lvid_bh;
1981 	struct logicalVolIntegrityDesc *lvid;
1982 	struct logicalVolIntegrityDescImpUse *lvidiu;
1983 
1984 	if (!bh)
1985 		return;
1986 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1987 	lvidiu = udf_sb_lvidiu(sb);
1988 	if (!lvidiu)
1989 		return;
1990 
1991 	mutex_lock(&sbi->s_alloc_mutex);
1992 	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1993 	lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1994 	udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
1995 	if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
1996 		lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
1997 	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
1998 		lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
1999 	if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
2000 		lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
2001 	lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
2002 
2003 	lvid->descTag.descCRC = cpu_to_le16(
2004 			crc_itu_t(0, (char *)lvid + sizeof(struct tag),
2005 				le16_to_cpu(lvid->descTag.descCRCLength)));
2006 
2007 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
2008 	/*
2009 	 * We set buffer uptodate unconditionally here to avoid spurious
2010 	 * warnings from mark_buffer_dirty() when previous EIO has marked
2011 	 * the buffer as !uptodate
2012 	 */
2013 	set_buffer_uptodate(bh);
2014 	mark_buffer_dirty(bh);
2015 	sbi->s_lvid_dirty = 0;
2016 	mutex_unlock(&sbi->s_alloc_mutex);
2017 	/* Make closing of filesystem visible on the media immediately */
2018 	sync_dirty_buffer(bh);
2019 }
2020 
2021 u64 lvid_get_unique_id(struct super_block *sb)
2022 {
2023 	struct buffer_head *bh;
2024 	struct udf_sb_info *sbi = UDF_SB(sb);
2025 	struct logicalVolIntegrityDesc *lvid;
2026 	struct logicalVolHeaderDesc *lvhd;
2027 	u64 uniqueID;
2028 	u64 ret;
2029 
2030 	bh = sbi->s_lvid_bh;
2031 	if (!bh)
2032 		return 0;
2033 
2034 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2035 	lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
2036 
2037 	mutex_lock(&sbi->s_alloc_mutex);
2038 	ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
2039 	if (!(++uniqueID & 0xFFFFFFFF))
2040 		uniqueID += 16;
2041 	lvhd->uniqueID = cpu_to_le64(uniqueID);
2042 	mutex_unlock(&sbi->s_alloc_mutex);
2043 	mark_buffer_dirty(bh);
2044 
2045 	return ret;
2046 }
2047 
2048 static int udf_fill_super(struct super_block *sb, void *options, int silent)
2049 {
2050 	int ret = -EINVAL;
2051 	struct inode *inode = NULL;
2052 	struct udf_options uopt;
2053 	struct kernel_lb_addr rootdir, fileset;
2054 	struct udf_sb_info *sbi;
2055 
2056 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
2057 	uopt.uid = INVALID_UID;
2058 	uopt.gid = INVALID_GID;
2059 	uopt.umask = 0;
2060 	uopt.fmode = UDF_INVALID_MODE;
2061 	uopt.dmode = UDF_INVALID_MODE;
2062 
2063 	sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
2064 	if (!sbi)
2065 		return -ENOMEM;
2066 
2067 	sb->s_fs_info = sbi;
2068 
2069 	mutex_init(&sbi->s_alloc_mutex);
2070 
2071 	if (!udf_parse_options((char *)options, &uopt, false))
2072 		goto error_out;
2073 
2074 	if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
2075 	    uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
2076 		udf_err(sb, "utf8 cannot be combined with iocharset\n");
2077 		goto error_out;
2078 	}
2079 #ifdef CONFIG_UDF_NLS
2080 	if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
2081 		uopt.nls_map = load_nls_default();
2082 		if (!uopt.nls_map)
2083 			uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
2084 		else
2085 			udf_debug("Using default NLS map\n");
2086 	}
2087 #endif
2088 	if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
2089 		uopt.flags |= (1 << UDF_FLAG_UTF8);
2090 
2091 	fileset.logicalBlockNum = 0xFFFFFFFF;
2092 	fileset.partitionReferenceNum = 0xFFFF;
2093 
2094 	sbi->s_flags = uopt.flags;
2095 	sbi->s_uid = uopt.uid;
2096 	sbi->s_gid = uopt.gid;
2097 	sbi->s_umask = uopt.umask;
2098 	sbi->s_fmode = uopt.fmode;
2099 	sbi->s_dmode = uopt.dmode;
2100 	sbi->s_nls_map = uopt.nls_map;
2101 	rwlock_init(&sbi->s_cred_lock);
2102 
2103 	if (uopt.session == 0xFFFFFFFF)
2104 		sbi->s_session = udf_get_last_session(sb);
2105 	else
2106 		sbi->s_session = uopt.session;
2107 
2108 	udf_debug("Multi-session=%d\n", sbi->s_session);
2109 
2110 	/* Fill in the rest of the superblock */
2111 	sb->s_op = &udf_sb_ops;
2112 	sb->s_export_op = &udf_export_ops;
2113 
2114 	sb->s_magic = UDF_SUPER_MAGIC;
2115 	sb->s_time_gran = 1000;
2116 
2117 	if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2118 		ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2119 	} else {
2120 		uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
2121 		ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2122 		if (ret == -EAGAIN && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
2123 			if (!silent)
2124 				pr_notice("Rescanning with blocksize %d\n",
2125 					  UDF_DEFAULT_BLOCKSIZE);
2126 			brelse(sbi->s_lvid_bh);
2127 			sbi->s_lvid_bh = NULL;
2128 			uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
2129 			ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2130 		}
2131 	}
2132 	if (ret < 0) {
2133 		if (ret == -EAGAIN) {
2134 			udf_warn(sb, "No partition found (1)\n");
2135 			ret = -EINVAL;
2136 		}
2137 		goto error_out;
2138 	}
2139 
2140 	udf_debug("Lastblock=%d\n", sbi->s_last_block);
2141 
2142 	if (sbi->s_lvid_bh) {
2143 		struct logicalVolIntegrityDescImpUse *lvidiu =
2144 							udf_sb_lvidiu(sb);
2145 		uint16_t minUDFReadRev;
2146 		uint16_t minUDFWriteRev;
2147 
2148 		if (!lvidiu) {
2149 			ret = -EINVAL;
2150 			goto error_out;
2151 		}
2152 		minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2153 		minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2154 		if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2155 			udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2156 				minUDFReadRev,
2157 				UDF_MAX_READ_VERSION);
2158 			ret = -EINVAL;
2159 			goto error_out;
2160 		} else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
2161 			   !(sb->s_flags & MS_RDONLY)) {
2162 			ret = -EACCES;
2163 			goto error_out;
2164 		}
2165 
2166 		sbi->s_udfrev = minUDFWriteRev;
2167 
2168 		if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2169 			UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2170 		if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2171 			UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2172 	}
2173 
2174 	if (!sbi->s_partitions) {
2175 		udf_warn(sb, "No partition found (2)\n");
2176 		ret = -EINVAL;
2177 		goto error_out;
2178 	}
2179 
2180 	if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2181 			UDF_PART_FLAG_READ_ONLY &&
2182 	    !(sb->s_flags & MS_RDONLY)) {
2183 		ret = -EACCES;
2184 		goto error_out;
2185 	}
2186 
2187 	if (udf_find_fileset(sb, &fileset, &rootdir)) {
2188 		udf_warn(sb, "No fileset found\n");
2189 		ret = -EINVAL;
2190 		goto error_out;
2191 	}
2192 
2193 	if (!silent) {
2194 		struct timestamp ts;
2195 		udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2196 		udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2197 			 sbi->s_volume_ident,
2198 			 le16_to_cpu(ts.year), ts.month, ts.day,
2199 			 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2200 	}
2201 	if (!(sb->s_flags & MS_RDONLY))
2202 		udf_open_lvid(sb);
2203 
2204 	/* Assign the root inode */
2205 	/* assign inodes by physical block number */
2206 	/* perhaps it's not extensible enough, but for now ... */
2207 	inode = udf_iget(sb, &rootdir);
2208 	if (!inode) {
2209 		udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
2210 		       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2211 		ret = -EIO;
2212 		goto error_out;
2213 	}
2214 
2215 	/* Allocate a dentry for the root inode */
2216 	sb->s_root = d_make_root(inode);
2217 	if (!sb->s_root) {
2218 		udf_err(sb, "Couldn't allocate root dentry\n");
2219 		ret = -ENOMEM;
2220 		goto error_out;
2221 	}
2222 	sb->s_maxbytes = MAX_LFS_FILESIZE;
2223 	sb->s_max_links = UDF_MAX_LINKS;
2224 	return 0;
2225 
2226 error_out:
2227 	if (sbi->s_vat_inode)
2228 		iput(sbi->s_vat_inode);
2229 #ifdef CONFIG_UDF_NLS
2230 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
2231 		unload_nls(sbi->s_nls_map);
2232 #endif
2233 	if (!(sb->s_flags & MS_RDONLY))
2234 		udf_close_lvid(sb);
2235 	brelse(sbi->s_lvid_bh);
2236 	udf_sb_free_partitions(sb);
2237 	kfree(sbi);
2238 	sb->s_fs_info = NULL;
2239 
2240 	return ret;
2241 }
2242 
2243 void _udf_err(struct super_block *sb, const char *function,
2244 	      const char *fmt, ...)
2245 {
2246 	struct va_format vaf;
2247 	va_list args;
2248 
2249 	va_start(args, fmt);
2250 
2251 	vaf.fmt = fmt;
2252 	vaf.va = &args;
2253 
2254 	pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2255 
2256 	va_end(args);
2257 }
2258 
2259 void _udf_warn(struct super_block *sb, const char *function,
2260 	       const char *fmt, ...)
2261 {
2262 	struct va_format vaf;
2263 	va_list args;
2264 
2265 	va_start(args, fmt);
2266 
2267 	vaf.fmt = fmt;
2268 	vaf.va = &args;
2269 
2270 	pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2271 
2272 	va_end(args);
2273 }
2274 
2275 static void udf_put_super(struct super_block *sb)
2276 {
2277 	struct udf_sb_info *sbi;
2278 
2279 	sbi = UDF_SB(sb);
2280 
2281 	if (sbi->s_vat_inode)
2282 		iput(sbi->s_vat_inode);
2283 #ifdef CONFIG_UDF_NLS
2284 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
2285 		unload_nls(sbi->s_nls_map);
2286 #endif
2287 	if (!(sb->s_flags & MS_RDONLY))
2288 		udf_close_lvid(sb);
2289 	brelse(sbi->s_lvid_bh);
2290 	udf_sb_free_partitions(sb);
2291 	kfree(sb->s_fs_info);
2292 	sb->s_fs_info = NULL;
2293 }
2294 
2295 static int udf_sync_fs(struct super_block *sb, int wait)
2296 {
2297 	struct udf_sb_info *sbi = UDF_SB(sb);
2298 
2299 	mutex_lock(&sbi->s_alloc_mutex);
2300 	if (sbi->s_lvid_dirty) {
2301 		/*
2302 		 * Blockdevice will be synced later so we don't have to submit
2303 		 * the buffer for IO
2304 		 */
2305 		mark_buffer_dirty(sbi->s_lvid_bh);
2306 		sbi->s_lvid_dirty = 0;
2307 	}
2308 	mutex_unlock(&sbi->s_alloc_mutex);
2309 
2310 	return 0;
2311 }
2312 
2313 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2314 {
2315 	struct super_block *sb = dentry->d_sb;
2316 	struct udf_sb_info *sbi = UDF_SB(sb);
2317 	struct logicalVolIntegrityDescImpUse *lvidiu;
2318 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2319 
2320 	lvidiu = udf_sb_lvidiu(sb);
2321 	buf->f_type = UDF_SUPER_MAGIC;
2322 	buf->f_bsize = sb->s_blocksize;
2323 	buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2324 	buf->f_bfree = udf_count_free(sb);
2325 	buf->f_bavail = buf->f_bfree;
2326 	buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2327 					  le32_to_cpu(lvidiu->numDirs)) : 0)
2328 			+ buf->f_bfree;
2329 	buf->f_ffree = buf->f_bfree;
2330 	buf->f_namelen = UDF_NAME_LEN - 2;
2331 	buf->f_fsid.val[0] = (u32)id;
2332 	buf->f_fsid.val[1] = (u32)(id >> 32);
2333 
2334 	return 0;
2335 }
2336 
2337 static unsigned int udf_count_free_bitmap(struct super_block *sb,
2338 					  struct udf_bitmap *bitmap)
2339 {
2340 	struct buffer_head *bh = NULL;
2341 	unsigned int accum = 0;
2342 	int index;
2343 	int block = 0, newblock;
2344 	struct kernel_lb_addr loc;
2345 	uint32_t bytes;
2346 	uint8_t *ptr;
2347 	uint16_t ident;
2348 	struct spaceBitmapDesc *bm;
2349 
2350 	loc.logicalBlockNum = bitmap->s_extPosition;
2351 	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2352 	bh = udf_read_ptagged(sb, &loc, 0, &ident);
2353 
2354 	if (!bh) {
2355 		udf_err(sb, "udf_count_free failed\n");
2356 		goto out;
2357 	} else if (ident != TAG_IDENT_SBD) {
2358 		brelse(bh);
2359 		udf_err(sb, "udf_count_free failed\n");
2360 		goto out;
2361 	}
2362 
2363 	bm = (struct spaceBitmapDesc *)bh->b_data;
2364 	bytes = le32_to_cpu(bm->numOfBytes);
2365 	index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2366 	ptr = (uint8_t *)bh->b_data;
2367 
2368 	while (bytes > 0) {
2369 		u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2370 		accum += bitmap_weight((const unsigned long *)(ptr + index),
2371 					cur_bytes * 8);
2372 		bytes -= cur_bytes;
2373 		if (bytes) {
2374 			brelse(bh);
2375 			newblock = udf_get_lb_pblock(sb, &loc, ++block);
2376 			bh = udf_tread(sb, newblock);
2377 			if (!bh) {
2378 				udf_debug("read failed\n");
2379 				goto out;
2380 			}
2381 			index = 0;
2382 			ptr = (uint8_t *)bh->b_data;
2383 		}
2384 	}
2385 	brelse(bh);
2386 out:
2387 	return accum;
2388 }
2389 
2390 static unsigned int udf_count_free_table(struct super_block *sb,
2391 					 struct inode *table)
2392 {
2393 	unsigned int accum = 0;
2394 	uint32_t elen;
2395 	struct kernel_lb_addr eloc;
2396 	int8_t etype;
2397 	struct extent_position epos;
2398 
2399 	mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2400 	epos.block = UDF_I(table)->i_location;
2401 	epos.offset = sizeof(struct unallocSpaceEntry);
2402 	epos.bh = NULL;
2403 
2404 	while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
2405 		accum += (elen >> table->i_sb->s_blocksize_bits);
2406 
2407 	brelse(epos.bh);
2408 	mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2409 
2410 	return accum;
2411 }
2412 
2413 static unsigned int udf_count_free(struct super_block *sb)
2414 {
2415 	unsigned int accum = 0;
2416 	struct udf_sb_info *sbi;
2417 	struct udf_part_map *map;
2418 
2419 	sbi = UDF_SB(sb);
2420 	if (sbi->s_lvid_bh) {
2421 		struct logicalVolIntegrityDesc *lvid =
2422 			(struct logicalVolIntegrityDesc *)
2423 			sbi->s_lvid_bh->b_data;
2424 		if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
2425 			accum = le32_to_cpu(
2426 					lvid->freeSpaceTable[sbi->s_partition]);
2427 			if (accum == 0xFFFFFFFF)
2428 				accum = 0;
2429 		}
2430 	}
2431 
2432 	if (accum)
2433 		return accum;
2434 
2435 	map = &sbi->s_partmaps[sbi->s_partition];
2436 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2437 		accum += udf_count_free_bitmap(sb,
2438 					       map->s_uspace.s_bitmap);
2439 	}
2440 	if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
2441 		accum += udf_count_free_bitmap(sb,
2442 					       map->s_fspace.s_bitmap);
2443 	}
2444 	if (accum)
2445 		return accum;
2446 
2447 	if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2448 		accum += udf_count_free_table(sb,
2449 					      map->s_uspace.s_table);
2450 	}
2451 	if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
2452 		accum += udf_count_free_table(sb,
2453 					      map->s_fspace.s_table);
2454 	}
2455 
2456 	return accum;
2457 }
2458