xref: /linux/fs/udf/inode.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  * inode.c
3  *
4  * PURPOSE
5  *  Inode handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *  This file is distributed under the terms of the GNU General Public
9  *  License (GPL). Copies of the GPL can be obtained from:
10  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *  Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1998 Dave Boynton
14  *  (C) 1998-2004 Ben Fennema
15  *  (C) 1999-2000 Stelias Computing Inc
16  *
17  * HISTORY
18  *
19  *  10/04/98 dgb  Added rudimentary directory functions
20  *  10/07/98      Fully working udf_block_map! It works!
21  *  11/25/98      bmap altered to better support extents
22  *  12/06/98 blf  partition support in udf_iget, udf_block_map and udf_read_inode
23  *  12/12/98      rewrote udf_block_map to handle next extents and descs across
24  *                block boundaries (which is not actually allowed)
25  *  12/20/98      added support for strategy 4096
26  *  03/07/99      rewrote udf_block_map (again)
27  *                New funcs, inode_bmap, udf_next_aext
28  *  04/19/99      Support for writing device EA's for major/minor #
29  */
30 
31 #include "udfdecl.h"
32 #include <linux/mm.h>
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
39 
40 #include "udf_i.h"
41 #include "udf_sb.h"
42 
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
46 
47 #define EXTENT_MERGE_SIZE 5
48 
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static struct buffer_head *inode_getblk(struct inode *, long, int *,
53 	long *, int *);
54 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
55 	kernel_lb_addr, uint32_t, struct buffer_head *);
56 static void udf_split_extents(struct inode *, int *, int, int,
57 	kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58 static void udf_prealloc_extents(struct inode *, int, int,
59 	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_merge_extents(struct inode *,
61 	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_update_extents(struct inode *,
63 	kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 	kernel_lb_addr, uint32_t, struct buffer_head **);
65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
66 
67 /*
68  * udf_delete_inode
69  *
70  * PURPOSE
71  *	Clean-up before the specified inode is destroyed.
72  *
73  * DESCRIPTION
74  *	This routine is called when the kernel destroys an inode structure
75  *	ie. when iput() finds i_count == 0.
76  *
77  * HISTORY
78  *	July 1, 1997 - Andrew E. Mileski
79  *	Written, tested, and released.
80  *
81  *  Called at the last iput() if i_nlink is zero.
82  */
83 void udf_delete_inode(struct inode * inode)
84 {
85 	truncate_inode_pages(&inode->i_data, 0);
86 
87 	if (is_bad_inode(inode))
88 		goto no_delete;
89 
90 	inode->i_size = 0;
91 	udf_truncate(inode);
92 	lock_kernel();
93 
94 	udf_update_inode(inode, IS_SYNC(inode));
95 	udf_free_inode(inode);
96 
97 	unlock_kernel();
98 	return;
99 no_delete:
100 	clear_inode(inode);
101 }
102 
103 void udf_clear_inode(struct inode *inode)
104 {
105 	if (!(inode->i_sb->s_flags & MS_RDONLY)) {
106 		lock_kernel();
107 		udf_discard_prealloc(inode);
108 		unlock_kernel();
109 	}
110 
111 	kfree(UDF_I_DATA(inode));
112 	UDF_I_DATA(inode) = NULL;
113 }
114 
115 static int udf_writepage(struct page *page, struct writeback_control *wbc)
116 {
117 	return block_write_full_page(page, udf_get_block, wbc);
118 }
119 
120 static int udf_readpage(struct file *file, struct page *page)
121 {
122 	return block_read_full_page(page, udf_get_block);
123 }
124 
125 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
126 {
127 	return block_prepare_write(page, from, to, udf_get_block);
128 }
129 
130 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
131 {
132 	return generic_block_bmap(mapping,block,udf_get_block);
133 }
134 
135 struct address_space_operations udf_aops = {
136 	.readpage		= udf_readpage,
137 	.writepage		= udf_writepage,
138 	.sync_page		= block_sync_page,
139 	.prepare_write		= udf_prepare_write,
140 	.commit_write		= generic_commit_write,
141 	.bmap			= udf_bmap,
142 };
143 
144 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
145 {
146 	struct page *page;
147 	char *kaddr;
148 	struct writeback_control udf_wbc = {
149 		.sync_mode = WB_SYNC_NONE,
150 		.nr_to_write = 1,
151 	};
152 
153 	/* from now on we have normal address_space methods */
154 	inode->i_data.a_ops = &udf_aops;
155 
156 	if (!UDF_I_LENALLOC(inode))
157 	{
158 		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
160 		else
161 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162 		mark_inode_dirty(inode);
163 		return;
164 	}
165 
166 	page = grab_cache_page(inode->i_mapping, 0);
167 	BUG_ON(!PageLocked(page));
168 
169 	if (!PageUptodate(page))
170 	{
171 		kaddr = kmap(page);
172 		memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173 			PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174 		memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175 			UDF_I_LENALLOC(inode));
176 		flush_dcache_page(page);
177 		SetPageUptodate(page);
178 		kunmap(page);
179 	}
180 	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181 		UDF_I_LENALLOC(inode));
182 	UDF_I_LENALLOC(inode) = 0;
183 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
185 	else
186 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
187 
188 	inode->i_data.a_ops->writepage(page, &udf_wbc);
189 	page_cache_release(page);
190 
191 	mark_inode_dirty(inode);
192 }
193 
194 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
195 {
196 	int newblock;
197 	struct buffer_head *sbh = NULL, *dbh = NULL;
198 	kernel_lb_addr bloc, eloc;
199 	uint32_t elen, extoffset;
200 	uint8_t alloctype;
201 
202 	struct udf_fileident_bh sfibh, dfibh;
203 	loff_t f_pos = udf_ext0_offset(inode) >> 2;
204 	int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
205 	struct fileIdentDesc cfi, *sfi, *dfi;
206 
207 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
208 		alloctype = ICBTAG_FLAG_AD_SHORT;
209 	else
210 		alloctype = ICBTAG_FLAG_AD_LONG;
211 
212 	if (!inode->i_size)
213 	{
214 		UDF_I_ALLOCTYPE(inode) = alloctype;
215 		mark_inode_dirty(inode);
216 		return NULL;
217 	}
218 
219 	/* alloc block, and copy data to it */
220 	*block = udf_new_block(inode->i_sb, inode,
221 		UDF_I_LOCATION(inode).partitionReferenceNum,
222 		UDF_I_LOCATION(inode).logicalBlockNum, err);
223 
224 	if (!(*block))
225 		return NULL;
226 	newblock = udf_get_pblock(inode->i_sb, *block,
227 		UDF_I_LOCATION(inode).partitionReferenceNum, 0);
228 	if (!newblock)
229 		return NULL;
230 	dbh = udf_tgetblk(inode->i_sb, newblock);
231 	if (!dbh)
232 		return NULL;
233 	lock_buffer(dbh);
234 	memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
235 	set_buffer_uptodate(dbh);
236 	unlock_buffer(dbh);
237 	mark_buffer_dirty_inode(dbh, inode);
238 
239 	sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
240 	sbh = sfibh.sbh = sfibh.ebh = NULL;
241 	dfibh.soffset = dfibh.eoffset = 0;
242 	dfibh.sbh = dfibh.ebh = dbh;
243 	while ( (f_pos < size) )
244 	{
245 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
246 		sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
247 		if (!sfi)
248 		{
249 			udf_release_data(dbh);
250 			return NULL;
251 		}
252 		UDF_I_ALLOCTYPE(inode) = alloctype;
253 		sfi->descTag.tagLocation = cpu_to_le32(*block);
254 		dfibh.soffset = dfibh.eoffset;
255 		dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
256 		dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
257 		if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
258 			sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
259 		{
260 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
261 			udf_release_data(dbh);
262 			return NULL;
263 		}
264 	}
265 	mark_buffer_dirty_inode(dbh, inode);
266 
267 	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
268 	UDF_I_LENALLOC(inode) = 0;
269 	bloc = UDF_I_LOCATION(inode);
270 	eloc.logicalBlockNum = *block;
271 	eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272 	elen = inode->i_size;
273 	UDF_I_LENEXTENTS(inode) = elen;
274 	extoffset = udf_file_entry_alloc_offset(inode);
275 	udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
276 	/* UniqueID stuff */
277 
278 	udf_release_data(sbh);
279 	mark_inode_dirty(inode);
280 	return dbh;
281 }
282 
283 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
284 {
285 	int err, new;
286 	struct buffer_head *bh;
287 	unsigned long phys;
288 
289 	if (!create)
290 	{
291 		phys = udf_block_map(inode, block);
292 		if (phys)
293 			map_bh(bh_result, inode->i_sb, phys);
294 		return 0;
295 	}
296 
297 	err = -EIO;
298 	new = 0;
299 	bh = NULL;
300 
301 	lock_kernel();
302 
303 	if (block < 0)
304 		goto abort_negative;
305 
306 	if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
307 	{
308 		UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
309 		UDF_I_NEXT_ALLOC_GOAL(inode) ++;
310 	}
311 
312 	err = 0;
313 
314 	bh = inode_getblk(inode, block, &err, &phys, &new);
315 	if (bh)
316 		BUG();
317 	if (err)
318 		goto abort;
319 	if (!phys)
320 		BUG();
321 
322 	if (new)
323 		set_buffer_new(bh_result);
324 	map_bh(bh_result, inode->i_sb, phys);
325 abort:
326 	unlock_kernel();
327 	return err;
328 
329 abort_negative:
330 	udf_warning(inode->i_sb, "udf_get_block", "block < 0");
331 	goto abort;
332 }
333 
334 static struct buffer_head *
335 udf_getblk(struct inode *inode, long block, int create, int *err)
336 {
337 	struct buffer_head dummy;
338 
339 	dummy.b_state = 0;
340 	dummy.b_blocknr = -1000;
341 	*err = udf_get_block(inode, block, &dummy, create);
342 	if (!*err && buffer_mapped(&dummy))
343 	{
344 		struct buffer_head *bh;
345 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
346 		if (buffer_new(&dummy))
347 		{
348 			lock_buffer(bh);
349 			memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
350 			set_buffer_uptodate(bh);
351 			unlock_buffer(bh);
352 			mark_buffer_dirty_inode(bh, inode);
353 		}
354 		return bh;
355 	}
356 	return NULL;
357 }
358 
359 static struct buffer_head * inode_getblk(struct inode * inode, long block,
360 	int *err, long *phys, int *new)
361 {
362 	struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
363 	kernel_long_ad laarr[EXTENT_MERGE_SIZE];
364 	uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
365 	int count = 0, startnum = 0, endnum = 0;
366 	uint32_t elen = 0;
367 	kernel_lb_addr eloc, pbloc, cbloc, nbloc;
368 	int c = 1;
369 	uint64_t lbcount = 0, b_off = 0;
370 	uint32_t newblocknum, newblock, offset = 0;
371 	int8_t etype;
372 	int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
373 	char lastblock = 0;
374 
375 	pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
376 	b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
377 	pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
378 
379 	/* find the extent which contains the block we are looking for.
380        alternate between laarr[0] and laarr[1] for locations of the
381        current extent, and the previous extent */
382 	do
383 	{
384 		if (pbh != cbh)
385 		{
386 			udf_release_data(pbh);
387 			atomic_inc(&cbh->b_count);
388 			pbh = cbh;
389 		}
390 		if (cbh != nbh)
391 		{
392 			udf_release_data(cbh);
393 			atomic_inc(&nbh->b_count);
394 			cbh = nbh;
395 		}
396 
397 		lbcount += elen;
398 
399 		pbloc = cbloc;
400 		cbloc = nbloc;
401 
402 		pextoffset = cextoffset;
403 		cextoffset = nextoffset;
404 
405 		if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
406 			break;
407 
408 		c = !c;
409 
410 		laarr[c].extLength = (etype << 30) | elen;
411 		laarr[c].extLocation = eloc;
412 
413 		if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
414 			pgoal = eloc.logicalBlockNum +
415 				((elen + inode->i_sb->s_blocksize - 1) >>
416 				inode->i_sb->s_blocksize_bits);
417 
418 		count ++;
419 	} while (lbcount + elen <= b_off);
420 
421 	b_off -= lbcount;
422 	offset = b_off >> inode->i_sb->s_blocksize_bits;
423 
424 	/* if the extent is allocated and recorded, return the block
425        if the extent is not a multiple of the blocksize, round up */
426 
427 	if (etype == (EXT_RECORDED_ALLOCATED >> 30))
428 	{
429 		if (elen & (inode->i_sb->s_blocksize - 1))
430 		{
431 			elen = EXT_RECORDED_ALLOCATED |
432 				((elen + inode->i_sb->s_blocksize - 1) &
433 				~(inode->i_sb->s_blocksize - 1));
434 			etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
435 		}
436 		udf_release_data(pbh);
437 		udf_release_data(cbh);
438 		udf_release_data(nbh);
439 		newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
440 		*phys = newblock;
441 		return NULL;
442 	}
443 
444 	if (etype == -1)
445 	{
446 		endnum = startnum = ((count > 1) ? 1 : count);
447 		if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
448 		{
449 			laarr[c].extLength =
450 				(laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
451 				(((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
452 					inode->i_sb->s_blocksize - 1) &
453 				~(inode->i_sb->s_blocksize - 1));
454 			UDF_I_LENEXTENTS(inode) =
455 				(UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
456 					~(inode->i_sb->s_blocksize - 1);
457 		}
458 		c = !c;
459 		laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
460 			((offset + 1) << inode->i_sb->s_blocksize_bits);
461 		memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
462 		count ++;
463 		endnum ++;
464 		lastblock = 1;
465 	}
466 	else
467 		endnum = startnum = ((count > 2) ? 2 : count);
468 
469 	/* if the current extent is in position 0, swap it with the previous */
470 	if (!c && count != 1)
471 	{
472 		laarr[2] = laarr[0];
473 		laarr[0] = laarr[1];
474 		laarr[1] = laarr[2];
475 		c = 1;
476 	}
477 
478 	/* if the current block is located in a extent, read the next extent */
479 	if (etype != -1)
480 	{
481 		if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
482 		{
483 			laarr[c+1].extLength = (etype << 30) | elen;
484 			laarr[c+1].extLocation = eloc;
485 			count ++;
486 			startnum ++;
487 			endnum ++;
488 		}
489 		else
490 			lastblock = 1;
491 	}
492 	udf_release_data(cbh);
493 	udf_release_data(nbh);
494 
495 	/* if the current extent is not recorded but allocated, get the
496 		block in the extent corresponding to the requested block */
497 	if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
498 		newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
499 	else /* otherwise, allocate a new block */
500 	{
501 		if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
502 			goal = UDF_I_NEXT_ALLOC_GOAL(inode);
503 
504 		if (!goal)
505 		{
506 			if (!(goal = pgoal))
507 				goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
508 		}
509 
510 		if (!(newblocknum = udf_new_block(inode->i_sb, inode,
511 			UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
512 		{
513 			udf_release_data(pbh);
514 			*err = -ENOSPC;
515 			return NULL;
516 		}
517 		UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
518 	}
519 
520 	/* if the extent the requsted block is located in contains multiple blocks,
521        split the extent into at most three extents. blocks prior to requested
522        block, requested block, and blocks after requested block */
523 	udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
524 
525 #ifdef UDF_PREALLOCATE
526 	/* preallocate blocks */
527 	udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
528 #endif
529 
530 	/* merge any continuous blocks in laarr */
531 	udf_merge_extents(inode, laarr, &endnum);
532 
533 	/* write back the new extents, inserting new extents if the new number
534        of extents is greater than the old number, and deleting extents if
535        the new number of extents is less than the old number */
536 	udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
537 
538 	udf_release_data(pbh);
539 
540 	if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
541 		UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
542 	{
543 		return NULL;
544 	}
545 	*phys = newblock;
546 	*err = 0;
547 	*new = 1;
548 	UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
549 	UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
550 	inode->i_ctime = current_fs_time(inode->i_sb);
551 
552 	if (IS_SYNC(inode))
553 		udf_sync_inode(inode);
554 	else
555 		mark_inode_dirty(inode);
556 	return result;
557 }
558 
559 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
560 	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
561 {
562 	if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
563 		(laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
564 	{
565 		int curr = *c;
566 		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
567 			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
568 		int8_t etype = (laarr[curr].extLength >> 30);
569 
570 		if (blen == 1)
571 			;
572 		else if (!offset || blen == offset + 1)
573 		{
574 			laarr[curr+2] = laarr[curr+1];
575 			laarr[curr+1] = laarr[curr];
576 		}
577 		else
578 		{
579 			laarr[curr+3] = laarr[curr+1];
580 			laarr[curr+2] = laarr[curr+1] = laarr[curr];
581 		}
582 
583 		if (offset)
584 		{
585 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
586 			{
587 				udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
588 				laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
589 					(offset << inode->i_sb->s_blocksize_bits);
590 				laarr[curr].extLocation.logicalBlockNum = 0;
591 				laarr[curr].extLocation.partitionReferenceNum = 0;
592 			}
593 			else
594 				laarr[curr].extLength = (etype << 30) |
595 					(offset << inode->i_sb->s_blocksize_bits);
596 			curr ++;
597 			(*c) ++;
598 			(*endnum) ++;
599 		}
600 
601 		laarr[curr].extLocation.logicalBlockNum = newblocknum;
602 		if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
603 			laarr[curr].extLocation.partitionReferenceNum =
604 				UDF_I_LOCATION(inode).partitionReferenceNum;
605 		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
606 			inode->i_sb->s_blocksize;
607 		curr ++;
608 
609 		if (blen != offset + 1)
610 		{
611 			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
612 				laarr[curr].extLocation.logicalBlockNum += (offset + 1);
613 			laarr[curr].extLength = (etype << 30) |
614 				((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
615 			curr ++;
616 			(*endnum) ++;
617 		}
618 	}
619 }
620 
621 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
622 	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
623 {
624 	int start, length = 0, currlength = 0, i;
625 
626 	if (*endnum >= (c+1))
627 	{
628 		if (!lastblock)
629 			return;
630 		else
631 			start = c;
632 	}
633 	else
634 	{
635 		if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
636 		{
637 			start = c+1;
638 			length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
639 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
640 		}
641 		else
642 			start = c;
643 	}
644 
645 	for (i=start+1; i<=*endnum; i++)
646 	{
647 		if (i == *endnum)
648 		{
649 			if (lastblock)
650 				length += UDF_DEFAULT_PREALLOC_BLOCKS;
651 		}
652 		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
653 			length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
654 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
655 		else
656 			break;
657 	}
658 
659 	if (length)
660 	{
661 		int next = laarr[start].extLocation.logicalBlockNum +
662 			(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
663 			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
664 		int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
665 			laarr[start].extLocation.partitionReferenceNum,
666 			next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
667 				UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
668 
669 		if (numalloc)
670 		{
671 			if (start == (c+1))
672 				laarr[start].extLength +=
673 					(numalloc << inode->i_sb->s_blocksize_bits);
674 			else
675 			{
676 				memmove(&laarr[c+2], &laarr[c+1],
677 					sizeof(long_ad) * (*endnum - (c+1)));
678 				(*endnum) ++;
679 				laarr[c+1].extLocation.logicalBlockNum = next;
680 				laarr[c+1].extLocation.partitionReferenceNum =
681 					laarr[c].extLocation.partitionReferenceNum;
682 				laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
683 					(numalloc << inode->i_sb->s_blocksize_bits);
684 				start = c+1;
685 			}
686 
687 			for (i=start+1; numalloc && i<*endnum; i++)
688 			{
689 				int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
690 					inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
691 
692 				if (elen > numalloc)
693 				{
694 					laarr[i].extLength -=
695 						(numalloc << inode->i_sb->s_blocksize_bits);
696 					numalloc = 0;
697 				}
698 				else
699 				{
700 					numalloc -= elen;
701 					if (*endnum > (i+1))
702 						memmove(&laarr[i], &laarr[i+1],
703 							sizeof(long_ad) * (*endnum - (i+1)));
704 					i --;
705 					(*endnum) --;
706 				}
707 			}
708 			UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
709 		}
710 	}
711 }
712 
713 static void udf_merge_extents(struct inode *inode,
714 	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
715 {
716 	int i;
717 
718 	for (i=0; i<(*endnum-1); i++)
719 	{
720 		if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
721 		{
722 			if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
723 				((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
724 				(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
725 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
726 			{
727 				if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
728 					(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
729 					inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
730 				{
731 					laarr[i+1].extLength = (laarr[i+1].extLength -
732 						(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
733 						UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
734 					laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
735 						(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
736 					laarr[i+1].extLocation.logicalBlockNum =
737 						laarr[i].extLocation.logicalBlockNum +
738 						((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
739 							inode->i_sb->s_blocksize_bits);
740 				}
741 				else
742 				{
743 					laarr[i].extLength = laarr[i+1].extLength +
744 						(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
745 						inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
746 					if (*endnum > (i+2))
747 						memmove(&laarr[i+1], &laarr[i+2],
748 							sizeof(long_ad) * (*endnum - (i+2)));
749 					i --;
750 					(*endnum) --;
751 				}
752 			}
753 		}
754 		else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
755 			((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
756 		{
757 			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
758 				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
759 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
760 			laarr[i].extLocation.logicalBlockNum = 0;
761 			laarr[i].extLocation.partitionReferenceNum = 0;
762 
763 			if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
764 				(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
765 				inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
766 			{
767 				laarr[i+1].extLength = (laarr[i+1].extLength -
768 					(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
769 					UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
770 				laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
771 					(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
772 			}
773 			else
774 			{
775 				laarr[i].extLength = laarr[i+1].extLength +
776 					(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
777 					inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
778 				if (*endnum > (i+2))
779 					memmove(&laarr[i+1], &laarr[i+2],
780 						sizeof(long_ad) * (*endnum - (i+2)));
781 				i --;
782 				(*endnum) --;
783 			}
784 		}
785 		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
786 		{
787 			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
788 				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
789 			       inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
790 			laarr[i].extLocation.logicalBlockNum = 0;
791 			laarr[i].extLocation.partitionReferenceNum = 0;
792 			laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
793 				EXT_NOT_RECORDED_NOT_ALLOCATED;
794 		}
795 	}
796 }
797 
798 static void udf_update_extents(struct inode *inode,
799 	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
800 	kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
801 {
802 	int start = 0, i;
803 	kernel_lb_addr tmploc;
804 	uint32_t tmplen;
805 
806 	if (startnum > endnum)
807 	{
808 		for (i=0; i<(startnum-endnum); i++)
809 		{
810 			udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
811 				laarr[i].extLength, *pbh);
812 		}
813 	}
814 	else if (startnum < endnum)
815 	{
816 		for (i=0; i<(endnum-startnum); i++)
817 		{
818 			udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
819 				laarr[i].extLength, *pbh);
820 			udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
821 				&laarr[i].extLength, pbh, 1);
822 			start ++;
823 		}
824 	}
825 
826 	for (i=start; i<endnum; i++)
827 	{
828 		udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
829 		udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
830 			laarr[i].extLength, *pbh, 1);
831 	}
832 }
833 
834 struct buffer_head * udf_bread(struct inode * inode, int block,
835 	int create, int * err)
836 {
837 	struct buffer_head * bh = NULL;
838 
839 	bh = udf_getblk(inode, block, create, err);
840 	if (!bh)
841 		return NULL;
842 
843 	if (buffer_uptodate(bh))
844 		return bh;
845 	ll_rw_block(READ, 1, &bh);
846 	wait_on_buffer(bh);
847 	if (buffer_uptodate(bh))
848 		return bh;
849 	brelse(bh);
850 	*err = -EIO;
851 	return NULL;
852 }
853 
854 void udf_truncate(struct inode * inode)
855 {
856 	int offset;
857 	int err;
858 
859 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
860 			S_ISLNK(inode->i_mode)))
861 		return;
862 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
863 		return;
864 
865 	lock_kernel();
866 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
867 	{
868 		if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
869 			inode->i_size))
870 		{
871 			udf_expand_file_adinicb(inode, inode->i_size, &err);
872 			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
873 			{
874 				inode->i_size = UDF_I_LENALLOC(inode);
875 				unlock_kernel();
876 				return;
877 			}
878 			else
879 				udf_truncate_extents(inode);
880 		}
881 		else
882 		{
883 			offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
884 			memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
885 			UDF_I_LENALLOC(inode) = inode->i_size;
886 		}
887 	}
888 	else
889 	{
890 		block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
891 		udf_truncate_extents(inode);
892 	}
893 
894 	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
895 	if (IS_SYNC(inode))
896 		udf_sync_inode (inode);
897 	else
898 		mark_inode_dirty(inode);
899 	unlock_kernel();
900 }
901 
902 static void
903 __udf_read_inode(struct inode *inode)
904 {
905 	struct buffer_head *bh = NULL;
906 	struct fileEntry *fe;
907 	uint16_t ident;
908 
909 	/*
910 	 * Set defaults, but the inode is still incomplete!
911 	 * Note: get_new_inode() sets the following on a new inode:
912 	 *      i_sb = sb
913 	 *      i_no = ino
914 	 *      i_flags = sb->s_flags
915 	 *      i_state = 0
916 	 * clean_inode(): zero fills and sets
917 	 *      i_count = 1
918 	 *      i_nlink = 1
919 	 *      i_op = NULL;
920 	 */
921 	inode->i_blksize = PAGE_SIZE;
922 
923 	bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
924 
925 	if (!bh)
926 	{
927 		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
928 			inode->i_ino);
929 		make_bad_inode(inode);
930 		return;
931 	}
932 
933 	if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
934 		ident != TAG_IDENT_USE)
935 	{
936 		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
937 			inode->i_ino, ident);
938 		udf_release_data(bh);
939 		make_bad_inode(inode);
940 		return;
941 	}
942 
943 	fe = (struct fileEntry *)bh->b_data;
944 
945 	if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
946 	{
947 		struct buffer_head *ibh = NULL, *nbh = NULL;
948 		struct indirectEntry *ie;
949 
950 		ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
951 		if (ident == TAG_IDENT_IE)
952 		{
953 			if (ibh)
954 			{
955 				kernel_lb_addr loc;
956 				ie = (struct indirectEntry *)ibh->b_data;
957 
958 				loc = lelb_to_cpu(ie->indirectICB.extLocation);
959 
960 				if (ie->indirectICB.extLength &&
961 					(nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
962 				{
963 					if (ident == TAG_IDENT_FE ||
964 						ident == TAG_IDENT_EFE)
965 					{
966 						memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
967 						udf_release_data(bh);
968 						udf_release_data(ibh);
969 						udf_release_data(nbh);
970 						__udf_read_inode(inode);
971 						return;
972 					}
973 					else
974 					{
975 						udf_release_data(nbh);
976 						udf_release_data(ibh);
977 					}
978 				}
979 				else
980 					udf_release_data(ibh);
981 			}
982 		}
983 		else
984 			udf_release_data(ibh);
985 	}
986 	else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
987 	{
988 		printk(KERN_ERR "udf: unsupported strategy type: %d\n",
989 			le16_to_cpu(fe->icbTag.strategyType));
990 		udf_release_data(bh);
991 		make_bad_inode(inode);
992 		return;
993 	}
994 	udf_fill_inode(inode, bh);
995 	udf_release_data(bh);
996 }
997 
998 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
999 {
1000 	struct fileEntry *fe;
1001 	struct extendedFileEntry *efe;
1002 	time_t convtime;
1003 	long convtime_usec;
1004 	int offset;
1005 
1006 	fe = (struct fileEntry *)bh->b_data;
1007 	efe = (struct extendedFileEntry *)bh->b_data;
1008 
1009 	if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1010 		UDF_I_STRAT4096(inode) = 0;
1011 	else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1012 		UDF_I_STRAT4096(inode) = 1;
1013 
1014 	UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1015 	UDF_I_UNIQUE(inode) = 0;
1016 	UDF_I_LENEATTR(inode) = 0;
1017 	UDF_I_LENEXTENTS(inode) = 0;
1018 	UDF_I_LENALLOC(inode) = 0;
1019 	UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1020 	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1021 	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1022 	{
1023 		UDF_I_EFE(inode) = 1;
1024 		UDF_I_USE(inode) = 0;
1025 		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1026 		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1027 	}
1028 	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1029 	{
1030 		UDF_I_EFE(inode) = 0;
1031 		UDF_I_USE(inode) = 0;
1032 		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1033 		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1034 	}
1035 	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1036 	{
1037 		UDF_I_EFE(inode) = 0;
1038 		UDF_I_USE(inode) = 1;
1039 		UDF_I_LENALLOC(inode) =
1040 			le32_to_cpu(
1041 				((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1042 		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1043 		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1044 		return;
1045 	}
1046 
1047 	inode->i_uid = le32_to_cpu(fe->uid);
1048 	if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1049 					UDF_FLAG_UID_IGNORE))
1050 		inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1051 
1052 	inode->i_gid = le32_to_cpu(fe->gid);
1053 	if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1054 					UDF_FLAG_GID_IGNORE))
1055 		inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1056 
1057 	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1058 	if (!inode->i_nlink)
1059 		inode->i_nlink = 1;
1060 
1061 	inode->i_size = le64_to_cpu(fe->informationLength);
1062 	UDF_I_LENEXTENTS(inode) = inode->i_size;
1063 
1064 	inode->i_mode = udf_convert_permissions(fe);
1065 	inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1066 
1067 	if (UDF_I_EFE(inode) == 0)
1068 	{
1069 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1070 			(inode->i_sb->s_blocksize_bits - 9);
1071 
1072 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1073 			lets_to_cpu(fe->accessTime)) )
1074 		{
1075 			inode->i_atime.tv_sec = convtime;
1076 			inode->i_atime.tv_nsec = convtime_usec * 1000;
1077 		}
1078 		else
1079 		{
1080 			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1081 		}
1082 
1083 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1084 			lets_to_cpu(fe->modificationTime)) )
1085 		{
1086 			inode->i_mtime.tv_sec = convtime;
1087 			inode->i_mtime.tv_nsec = convtime_usec * 1000;
1088 		}
1089 		else
1090 		{
1091 			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1092 		}
1093 
1094 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1095 			lets_to_cpu(fe->attrTime)) )
1096 		{
1097 			inode->i_ctime.tv_sec = convtime;
1098 			inode->i_ctime.tv_nsec = convtime_usec * 1000;
1099 		}
1100 		else
1101 		{
1102 			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1103 		}
1104 
1105 		UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1106 		UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1107 		UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1108 		offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1109 	}
1110 	else
1111 	{
1112 		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1113 			(inode->i_sb->s_blocksize_bits - 9);
1114 
1115 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1116 			lets_to_cpu(efe->accessTime)) )
1117 		{
1118 			inode->i_atime.tv_sec = convtime;
1119 			inode->i_atime.tv_nsec = convtime_usec * 1000;
1120 		}
1121 		else
1122 		{
1123 			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1124 		}
1125 
1126 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1127 			lets_to_cpu(efe->modificationTime)) )
1128 		{
1129 			inode->i_mtime.tv_sec = convtime;
1130 			inode->i_mtime.tv_nsec = convtime_usec * 1000;
1131 		}
1132 		else
1133 		{
1134 			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1135 		}
1136 
1137 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1138 			lets_to_cpu(efe->createTime)) )
1139 		{
1140 			UDF_I_CRTIME(inode).tv_sec = convtime;
1141 			UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1142 		}
1143 		else
1144 		{
1145 			UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1146 		}
1147 
1148 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1149 			lets_to_cpu(efe->attrTime)) )
1150 		{
1151 			inode->i_ctime.tv_sec = convtime;
1152 			inode->i_ctime.tv_nsec = convtime_usec * 1000;
1153 		}
1154 		else
1155 		{
1156 			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1157 		}
1158 
1159 		UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1160 		UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1161 		UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1162 		offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1163 	}
1164 
1165 	switch (fe->icbTag.fileType)
1166 	{
1167 		case ICBTAG_FILE_TYPE_DIRECTORY:
1168 		{
1169 			inode->i_op = &udf_dir_inode_operations;
1170 			inode->i_fop = &udf_dir_operations;
1171 			inode->i_mode |= S_IFDIR;
1172 			inode->i_nlink ++;
1173 			break;
1174 		}
1175 		case ICBTAG_FILE_TYPE_REALTIME:
1176 		case ICBTAG_FILE_TYPE_REGULAR:
1177 		case ICBTAG_FILE_TYPE_UNDEF:
1178 		{
1179 			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1180 				inode->i_data.a_ops = &udf_adinicb_aops;
1181 			else
1182 				inode->i_data.a_ops = &udf_aops;
1183 			inode->i_op = &udf_file_inode_operations;
1184 			inode->i_fop = &udf_file_operations;
1185 			inode->i_mode |= S_IFREG;
1186 			break;
1187 		}
1188 		case ICBTAG_FILE_TYPE_BLOCK:
1189 		{
1190 			inode->i_mode |= S_IFBLK;
1191 			break;
1192 		}
1193 		case ICBTAG_FILE_TYPE_CHAR:
1194 		{
1195 			inode->i_mode |= S_IFCHR;
1196 			break;
1197 		}
1198 		case ICBTAG_FILE_TYPE_FIFO:
1199 		{
1200 			init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1201 			break;
1202 		}
1203 		case ICBTAG_FILE_TYPE_SOCKET:
1204 		{
1205 			init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1206 			break;
1207 		}
1208 		case ICBTAG_FILE_TYPE_SYMLINK:
1209 		{
1210 			inode->i_data.a_ops = &udf_symlink_aops;
1211 			inode->i_op = &page_symlink_inode_operations;
1212 			inode->i_mode = S_IFLNK|S_IRWXUGO;
1213 			break;
1214 		}
1215 		default:
1216 		{
1217 			printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1218 				inode->i_ino, fe->icbTag.fileType);
1219 			make_bad_inode(inode);
1220 			return;
1221 		}
1222 	}
1223 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1224 	{
1225 		struct deviceSpec *dsea =
1226 			(struct deviceSpec *)
1227 				udf_get_extendedattr(inode, 12, 1);
1228 
1229 		if (dsea)
1230 		{
1231 			init_special_inode(inode, inode->i_mode, MKDEV(
1232 				le32_to_cpu(dsea->majorDeviceIdent),
1233 				le32_to_cpu(dsea->minorDeviceIdent)));
1234 			/* Developer ID ??? */
1235 		}
1236 		else
1237 		{
1238 			make_bad_inode(inode);
1239 		}
1240 	}
1241 }
1242 
1243 static mode_t
1244 udf_convert_permissions(struct fileEntry *fe)
1245 {
1246 	mode_t mode;
1247 	uint32_t permissions;
1248 	uint32_t flags;
1249 
1250 	permissions = le32_to_cpu(fe->permissions);
1251 	flags = le16_to_cpu(fe->icbTag.flags);
1252 
1253 	mode =	(( permissions      ) & S_IRWXO) |
1254 		(( permissions >> 2 ) & S_IRWXG) |
1255 		(( permissions >> 4 ) & S_IRWXU) |
1256 		(( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1257 		(( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1258 		(( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1259 
1260 	return mode;
1261 }
1262 
1263 /*
1264  * udf_write_inode
1265  *
1266  * PURPOSE
1267  *	Write out the specified inode.
1268  *
1269  * DESCRIPTION
1270  *	This routine is called whenever an inode is synced.
1271  *	Currently this routine is just a placeholder.
1272  *
1273  * HISTORY
1274  *	July 1, 1997 - Andrew E. Mileski
1275  *	Written, tested, and released.
1276  */
1277 
1278 int udf_write_inode(struct inode * inode, int sync)
1279 {
1280 	int ret;
1281 	lock_kernel();
1282 	ret = udf_update_inode(inode, sync);
1283 	unlock_kernel();
1284 	return ret;
1285 }
1286 
1287 int udf_sync_inode(struct inode * inode)
1288 {
1289 	return udf_update_inode(inode, 1);
1290 }
1291 
1292 static int
1293 udf_update_inode(struct inode *inode, int do_sync)
1294 {
1295 	struct buffer_head *bh = NULL;
1296 	struct fileEntry *fe;
1297 	struct extendedFileEntry *efe;
1298 	uint32_t udfperms;
1299 	uint16_t icbflags;
1300 	uint16_t crclen;
1301 	int i;
1302 	kernel_timestamp cpu_time;
1303 	int err = 0;
1304 
1305 	bh = udf_tread(inode->i_sb,
1306 		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1307 
1308 	if (!bh)
1309 	{
1310 		udf_debug("bread failure\n");
1311 		return -EIO;
1312 	}
1313 
1314 	memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1315 
1316 	fe = (struct fileEntry *)bh->b_data;
1317 	efe = (struct extendedFileEntry *)bh->b_data;
1318 
1319 	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1320 	{
1321 		struct unallocSpaceEntry *use =
1322 			(struct unallocSpaceEntry *)bh->b_data;
1323 
1324 		use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1325 		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1326 		crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1327 			sizeof(tag);
1328 		use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1329 		use->descTag.descCRCLength = cpu_to_le16(crclen);
1330 		use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1331 
1332 		use->descTag.tagChecksum = 0;
1333 		for (i=0; i<16; i++)
1334 			if (i != 4)
1335 				use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1336 
1337 		mark_buffer_dirty(bh);
1338 		udf_release_data(bh);
1339 		return err;
1340 	}
1341 
1342 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1343 		fe->uid = cpu_to_le32(-1);
1344 	else fe->uid = cpu_to_le32(inode->i_uid);
1345 
1346 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1347 		fe->gid = cpu_to_le32(-1);
1348 	else fe->gid = cpu_to_le32(inode->i_gid);
1349 
1350 	udfperms =	((inode->i_mode & S_IRWXO)     ) |
1351 			((inode->i_mode & S_IRWXG) << 2) |
1352 			((inode->i_mode & S_IRWXU) << 4);
1353 
1354 	udfperms |=	(le32_to_cpu(fe->permissions) &
1355 			(FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1356 			 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1357 			 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1358 	fe->permissions = cpu_to_le32(udfperms);
1359 
1360 	if (S_ISDIR(inode->i_mode))
1361 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1362 	else
1363 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1364 
1365 	fe->informationLength = cpu_to_le64(inode->i_size);
1366 
1367 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1368 	{
1369 		regid *eid;
1370 		struct deviceSpec *dsea =
1371 			(struct deviceSpec *)
1372 				udf_get_extendedattr(inode, 12, 1);
1373 
1374 		if (!dsea)
1375 		{
1376 			dsea = (struct deviceSpec *)
1377 				udf_add_extendedattr(inode,
1378 					sizeof(struct deviceSpec) +
1379 					sizeof(regid), 12, 0x3);
1380 			dsea->attrType = cpu_to_le32(12);
1381 			dsea->attrSubtype = 1;
1382 			dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1383 				sizeof(regid));
1384 			dsea->impUseLength = cpu_to_le32(sizeof(regid));
1385 		}
1386 		eid = (regid *)dsea->impUse;
1387 		memset(eid, 0, sizeof(regid));
1388 		strcpy(eid->ident, UDF_ID_DEVELOPER);
1389 		eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1390 		eid->identSuffix[1] = UDF_OS_ID_LINUX;
1391 		dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1392 		dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1393 	}
1394 
1395 	if (UDF_I_EFE(inode) == 0)
1396 	{
1397 		memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1398 		fe->logicalBlocksRecorded = cpu_to_le64(
1399 			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1400 			(inode->i_sb->s_blocksize_bits - 9));
1401 
1402 		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1403 			fe->accessTime = cpu_to_lets(cpu_time);
1404 		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1405 			fe->modificationTime = cpu_to_lets(cpu_time);
1406 		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1407 			fe->attrTime = cpu_to_lets(cpu_time);
1408 		memset(&(fe->impIdent), 0, sizeof(regid));
1409 		strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1410 		fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1411 		fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1412 		fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1413 		fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1414 		fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1415 		fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1416 		crclen = sizeof(struct fileEntry);
1417 	}
1418 	else
1419 	{
1420 		memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1421 		efe->objectSize = cpu_to_le64(inode->i_size);
1422 		efe->logicalBlocksRecorded = cpu_to_le64(
1423 			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1424 			(inode->i_sb->s_blocksize_bits - 9));
1425 
1426 		if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1427 			(UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1428 			 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1429 		{
1430 			UDF_I_CRTIME(inode) = inode->i_atime;
1431 		}
1432 		if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1433 			(UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1434 			 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1435 		{
1436 			UDF_I_CRTIME(inode) = inode->i_mtime;
1437 		}
1438 		if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1439 			(UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1440 			 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1441 		{
1442 			UDF_I_CRTIME(inode) = inode->i_ctime;
1443 		}
1444 
1445 		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1446 			efe->accessTime = cpu_to_lets(cpu_time);
1447 		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1448 			efe->modificationTime = cpu_to_lets(cpu_time);
1449 		if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1450 			efe->createTime = cpu_to_lets(cpu_time);
1451 		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1452 			efe->attrTime = cpu_to_lets(cpu_time);
1453 
1454 		memset(&(efe->impIdent), 0, sizeof(regid));
1455 		strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1456 		efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1457 		efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1458 		efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1459 		efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1460 		efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1461 		efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1462 		crclen = sizeof(struct extendedFileEntry);
1463 	}
1464 	if (UDF_I_STRAT4096(inode))
1465 	{
1466 		fe->icbTag.strategyType = cpu_to_le16(4096);
1467 		fe->icbTag.strategyParameter = cpu_to_le16(1);
1468 		fe->icbTag.numEntries = cpu_to_le16(2);
1469 	}
1470 	else
1471 	{
1472 		fe->icbTag.strategyType = cpu_to_le16(4);
1473 		fe->icbTag.numEntries = cpu_to_le16(1);
1474 	}
1475 
1476 	if (S_ISDIR(inode->i_mode))
1477 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1478 	else if (S_ISREG(inode->i_mode))
1479 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1480 	else if (S_ISLNK(inode->i_mode))
1481 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1482 	else if (S_ISBLK(inode->i_mode))
1483 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1484 	else if (S_ISCHR(inode->i_mode))
1485 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1486 	else if (S_ISFIFO(inode->i_mode))
1487 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1488 	else if (S_ISSOCK(inode->i_mode))
1489 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1490 
1491 	icbflags =	UDF_I_ALLOCTYPE(inode) |
1492 			((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1493 			((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1494 			((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1495 			(le16_to_cpu(fe->icbTag.flags) &
1496 				~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1497 				ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1498 
1499 	fe->icbTag.flags = cpu_to_le16(icbflags);
1500 	if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1501 		fe->descTag.descVersion = cpu_to_le16(3);
1502 	else
1503 		fe->descTag.descVersion = cpu_to_le16(2);
1504 	fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1505 	fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1506 	crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1507 	fe->descTag.descCRCLength = cpu_to_le16(crclen);
1508 	fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1509 
1510 	fe->descTag.tagChecksum = 0;
1511 	for (i=0; i<16; i++)
1512 		if (i != 4)
1513 			fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1514 
1515 	/* write the data blocks */
1516 	mark_buffer_dirty(bh);
1517 	if (do_sync)
1518 	{
1519 		sync_dirty_buffer(bh);
1520 		if (buffer_req(bh) && !buffer_uptodate(bh))
1521 		{
1522 			printk("IO error syncing udf inode [%s:%08lx]\n",
1523 				inode->i_sb->s_id, inode->i_ino);
1524 			err = -EIO;
1525 		}
1526 	}
1527 	udf_release_data(bh);
1528 	return err;
1529 }
1530 
1531 struct inode *
1532 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1533 {
1534 	unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1535 	struct inode *inode = iget_locked(sb, block);
1536 
1537 	if (!inode)
1538 		return NULL;
1539 
1540 	if (inode->i_state & I_NEW) {
1541 		memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1542 		__udf_read_inode(inode);
1543 		unlock_new_inode(inode);
1544 	}
1545 
1546 	if (is_bad_inode(inode))
1547 		goto out_iput;
1548 
1549 	if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1550 		udf_debug("block=%d, partition=%d out of range\n",
1551 			ino.logicalBlockNum, ino.partitionReferenceNum);
1552 		make_bad_inode(inode);
1553 		goto out_iput;
1554 	}
1555 
1556 	return inode;
1557 
1558  out_iput:
1559 	iput(inode);
1560 	return NULL;
1561 }
1562 
1563 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1564 	kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1565 {
1566 	int adsize;
1567 	short_ad *sad = NULL;
1568 	long_ad *lad = NULL;
1569 	struct allocExtDesc *aed;
1570 	int8_t etype;
1571 	uint8_t *ptr;
1572 
1573 	if (!*bh)
1574 		ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1575 	else
1576 		ptr = (*bh)->b_data + *extoffset;
1577 
1578 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1579 		adsize = sizeof(short_ad);
1580 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1581 		adsize = sizeof(long_ad);
1582 	else
1583 		return -1;
1584 
1585 	if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1586 	{
1587 		char *sptr, *dptr;
1588 		struct buffer_head *nbh;
1589 		int err, loffset;
1590 		kernel_lb_addr obloc = *bloc;
1591 
1592 		if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1593 			obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1594 		{
1595 			return -1;
1596 		}
1597 		if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1598 			*bloc, 0))))
1599 		{
1600 			return -1;
1601 		}
1602 		lock_buffer(nbh);
1603 		memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1604 		set_buffer_uptodate(nbh);
1605 		unlock_buffer(nbh);
1606 		mark_buffer_dirty_inode(nbh, inode);
1607 
1608 		aed = (struct allocExtDesc *)(nbh->b_data);
1609 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1610 			aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1611 		if (*extoffset + adsize > inode->i_sb->s_blocksize)
1612 		{
1613 			loffset = *extoffset;
1614 			aed->lengthAllocDescs = cpu_to_le32(adsize);
1615 			sptr = ptr - adsize;
1616 			dptr = nbh->b_data + sizeof(struct allocExtDesc);
1617 			memcpy(dptr, sptr, adsize);
1618 			*extoffset = sizeof(struct allocExtDesc) + adsize;
1619 		}
1620 		else
1621 		{
1622 			loffset = *extoffset + adsize;
1623 			aed->lengthAllocDescs = cpu_to_le32(0);
1624 			sptr = ptr;
1625 			*extoffset = sizeof(struct allocExtDesc);
1626 
1627 			if (*bh)
1628 			{
1629 				aed = (struct allocExtDesc *)(*bh)->b_data;
1630 				aed->lengthAllocDescs =
1631 					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1632 			}
1633 			else
1634 			{
1635 				UDF_I_LENALLOC(inode) += adsize;
1636 				mark_inode_dirty(inode);
1637 			}
1638 		}
1639 		if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1640 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1641 				bloc->logicalBlockNum, sizeof(tag));
1642 		else
1643 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1644 				bloc->logicalBlockNum, sizeof(tag));
1645 		switch (UDF_I_ALLOCTYPE(inode))
1646 		{
1647 			case ICBTAG_FLAG_AD_SHORT:
1648 			{
1649 				sad = (short_ad *)sptr;
1650 				sad->extLength = cpu_to_le32(
1651 					EXT_NEXT_EXTENT_ALLOCDECS |
1652 					inode->i_sb->s_blocksize);
1653 				sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1654 				break;
1655 			}
1656 			case ICBTAG_FLAG_AD_LONG:
1657 			{
1658 				lad = (long_ad *)sptr;
1659 				lad->extLength = cpu_to_le32(
1660 					EXT_NEXT_EXTENT_ALLOCDECS |
1661 					inode->i_sb->s_blocksize);
1662 				lad->extLocation = cpu_to_lelb(*bloc);
1663 				memset(lad->impUse, 0x00, sizeof(lad->impUse));
1664 				break;
1665 			}
1666 		}
1667 		if (*bh)
1668 		{
1669 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1670 				udf_update_tag((*bh)->b_data, loffset);
1671 			else
1672 				udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1673 			mark_buffer_dirty_inode(*bh, inode);
1674 			udf_release_data(*bh);
1675 		}
1676 		else
1677 			mark_inode_dirty(inode);
1678 		*bh = nbh;
1679 	}
1680 
1681 	etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1682 
1683 	if (!*bh)
1684 	{
1685 		UDF_I_LENALLOC(inode) += adsize;
1686 		mark_inode_dirty(inode);
1687 	}
1688 	else
1689 	{
1690 		aed = (struct allocExtDesc *)(*bh)->b_data;
1691 		aed->lengthAllocDescs =
1692 			cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1693 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1694 			udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1695 		else
1696 			udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1697 		mark_buffer_dirty_inode(*bh, inode);
1698 	}
1699 
1700 	return etype;
1701 }
1702 
1703 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1704     kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1705 {
1706 	int adsize;
1707 	uint8_t *ptr;
1708 
1709 	if (!bh)
1710 		ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1711 	else
1712 	{
1713 		ptr = bh->b_data + *extoffset;
1714 		atomic_inc(&bh->b_count);
1715 	}
1716 
1717 	switch (UDF_I_ALLOCTYPE(inode))
1718 	{
1719 		case ICBTAG_FLAG_AD_SHORT:
1720 		{
1721 			short_ad *sad = (short_ad *)ptr;
1722 			sad->extLength = cpu_to_le32(elen);
1723 			sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1724 			adsize = sizeof(short_ad);
1725 			break;
1726 		}
1727 		case ICBTAG_FLAG_AD_LONG:
1728 		{
1729 			long_ad *lad = (long_ad *)ptr;
1730 			lad->extLength = cpu_to_le32(elen);
1731 			lad->extLocation = cpu_to_lelb(eloc);
1732 			memset(lad->impUse, 0x00, sizeof(lad->impUse));
1733 			adsize = sizeof(long_ad);
1734 			break;
1735 		}
1736 		default:
1737 			return -1;
1738 	}
1739 
1740 	if (bh)
1741 	{
1742 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1743 		{
1744 			struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1745 			udf_update_tag((bh)->b_data,
1746 				le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1747 		}
1748 		mark_buffer_dirty_inode(bh, inode);
1749 		udf_release_data(bh);
1750 	}
1751 	else
1752 		mark_inode_dirty(inode);
1753 
1754 	if (inc)
1755 		*extoffset += adsize;
1756 	return (elen >> 30);
1757 }
1758 
1759 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1760 	kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1761 {
1762 	int8_t etype;
1763 
1764 	while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1765 		(EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1766 	{
1767 		*bloc = *eloc;
1768 		*extoffset = sizeof(struct allocExtDesc);
1769 		udf_release_data(*bh);
1770 		if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1771 		{
1772 			udf_debug("reading block %d failed!\n",
1773 				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1774 			return -1;
1775 		}
1776 	}
1777 
1778 	return etype;
1779 }
1780 
1781 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1782 	kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1783 {
1784 	int alen;
1785 	int8_t etype;
1786 	uint8_t *ptr;
1787 
1788 	if (!*bh)
1789 	{
1790 		if (!(*extoffset))
1791 			*extoffset = udf_file_entry_alloc_offset(inode);
1792 		ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1793 		alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1794 	}
1795 	else
1796 	{
1797 		if (!(*extoffset))
1798 			*extoffset = sizeof(struct allocExtDesc);
1799 		ptr = (*bh)->b_data + *extoffset;
1800 		alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1801 	}
1802 
1803 	switch (UDF_I_ALLOCTYPE(inode))
1804 	{
1805 		case ICBTAG_FLAG_AD_SHORT:
1806 		{
1807 			short_ad *sad;
1808 
1809 			if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1810 				return -1;
1811 
1812 			etype = le32_to_cpu(sad->extLength) >> 30;
1813 			eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1814 			eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1815 			*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1816 			break;
1817 		}
1818 		case ICBTAG_FLAG_AD_LONG:
1819 		{
1820 			long_ad *lad;
1821 
1822 			if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1823 				return -1;
1824 
1825 			etype = le32_to_cpu(lad->extLength) >> 30;
1826 			*eloc = lelb_to_cpu(lad->extLocation);
1827 			*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1828 			break;
1829 		}
1830 		default:
1831 		{
1832 			udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1833 			return -1;
1834 		}
1835 	}
1836 
1837 	return etype;
1838 }
1839 
1840 static int8_t
1841 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1842 		kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1843 {
1844 	kernel_lb_addr oeloc;
1845 	uint32_t oelen;
1846 	int8_t etype;
1847 
1848 	if (bh)
1849 		atomic_inc(&bh->b_count);
1850 
1851 	while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1852 	{
1853 		udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1854 
1855 		neloc = oeloc;
1856 		nelen = (etype << 30) | oelen;
1857 	}
1858 	udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1859 	udf_release_data(bh);
1860 	return (nelen >> 30);
1861 }
1862 
1863 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1864 	kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1865 {
1866 	struct buffer_head *obh;
1867 	kernel_lb_addr obloc;
1868 	int oextoffset, adsize;
1869 	int8_t etype;
1870 	struct allocExtDesc *aed;
1871 
1872 	if (nbh)
1873 	{
1874 		atomic_inc(&nbh->b_count);
1875 		atomic_inc(&nbh->b_count);
1876 	}
1877 
1878 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1879 		adsize = sizeof(short_ad);
1880 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1881 		adsize = sizeof(long_ad);
1882 	else
1883 		adsize = 0;
1884 
1885 	obh = nbh;
1886 	obloc = nbloc;
1887 	oextoffset = nextoffset;
1888 
1889 	if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1890 		return -1;
1891 
1892 	while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1893 	{
1894 		udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1895 		if (obh != nbh)
1896 		{
1897 			obloc = nbloc;
1898 			udf_release_data(obh);
1899 			atomic_inc(&nbh->b_count);
1900 			obh = nbh;
1901 			oextoffset = nextoffset - adsize;
1902 		}
1903 	}
1904 	memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1905 	elen = 0;
1906 
1907 	if (nbh != obh)
1908 	{
1909 		udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1910 		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1911 		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1912 		if (!obh)
1913 		{
1914 			UDF_I_LENALLOC(inode) -= (adsize * 2);
1915 			mark_inode_dirty(inode);
1916 		}
1917 		else
1918 		{
1919 			aed = (struct allocExtDesc *)(obh)->b_data;
1920 			aed->lengthAllocDescs =
1921 				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1922 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1923 				udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1924 			else
1925 				udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1926 			mark_buffer_dirty_inode(obh, inode);
1927 		}
1928 	}
1929 	else
1930 	{
1931 		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1932 		if (!obh)
1933 		{
1934 			UDF_I_LENALLOC(inode) -= adsize;
1935 			mark_inode_dirty(inode);
1936 		}
1937 		else
1938 		{
1939 			aed = (struct allocExtDesc *)(obh)->b_data;
1940 			aed->lengthAllocDescs =
1941 				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1942 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1943 				udf_update_tag((obh)->b_data, oextoffset - adsize);
1944 			else
1945 				udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1946 			mark_buffer_dirty_inode(obh, inode);
1947 		}
1948 	}
1949 
1950 	udf_release_data(nbh);
1951 	udf_release_data(obh);
1952 	return (elen >> 30);
1953 }
1954 
1955 int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t *extoffset,
1956 	kernel_lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
1957 {
1958 	uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
1959 	int8_t etype;
1960 
1961 	if (block < 0)
1962 	{
1963 		printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1964 		return -1;
1965 	}
1966 
1967 	*extoffset = 0;
1968 	*elen = 0;
1969 	*bloc = UDF_I_LOCATION(inode);
1970 
1971 	do
1972 	{
1973 		if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1974 		{
1975 			*offset = bcount - lbcount;
1976 			UDF_I_LENEXTENTS(inode) = lbcount;
1977 			return -1;
1978 		}
1979 		lbcount += *elen;
1980 	} while (lbcount <= bcount);
1981 
1982 	*offset = bcount + *elen - lbcount;
1983 
1984 	return etype;
1985 }
1986 
1987 long udf_block_map(struct inode *inode, long block)
1988 {
1989 	kernel_lb_addr eloc, bloc;
1990 	uint32_t offset, extoffset, elen;
1991 	struct buffer_head *bh = NULL;
1992 	int ret;
1993 
1994 	lock_kernel();
1995 
1996 	if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
1997 		ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
1998 	else
1999 		ret = 0;
2000 
2001 	unlock_kernel();
2002 	udf_release_data(bh);
2003 
2004 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2005 		return udf_fixed_to_variable(ret);
2006 	else
2007 		return ret;
2008 }
2009