xref: /linux/fs/ext4/extents.c (revision f7d1331f16a869c76a5102caebb58e840e1d509c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4  * Written by Alex Tomas <alex@clusterfs.com>
5  *
6  * Architecture independence:
7  *   Copyright (c) 2005, Bull S.A.
8  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
9  */
10 
11 /*
12  * Extents support for EXT4
13  *
14  * TODO:
15  *   - ext4*_error() should be used in some situations
16  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
17  *   - smart tree reduction
18  */
19 
20 #include <linux/fs.h>
21 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/highuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/quotaops.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/fiemap.h>
30 #include <linux/iomap.h>
31 #include <linux/sched/mm.h>
32 #include "ext4_jbd2.h"
33 #include "ext4_extents.h"
34 #include "xattr.h"
35 
36 #include <trace/events/ext4.h>
37 
38 /*
39  * used by extent splitting.
40  */
41 #define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails \
42 					due to ENOSPC */
43 #define EXT4_EXT_MARK_UNWRIT1	0x2  /* mark first half unwritten */
44 #define EXT4_EXT_MARK_UNWRIT2	0x4  /* mark second half unwritten */
45 
46 #define EXT4_EXT_DATA_VALID1	0x8  /* first half contains valid data */
47 #define EXT4_EXT_DATA_VALID2	0x10 /* second half contains valid data */
48 
49 static __le32 ext4_extent_block_csum(struct inode *inode,
50 				     struct ext4_extent_header *eh)
51 {
52 	struct ext4_inode_info *ei = EXT4_I(inode);
53 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 	__u32 csum;
55 
56 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
57 			   EXT4_EXTENT_TAIL_OFFSET(eh));
58 	return cpu_to_le32(csum);
59 }
60 
61 static int ext4_extent_block_csum_verify(struct inode *inode,
62 					 struct ext4_extent_header *eh)
63 {
64 	struct ext4_extent_tail *et;
65 
66 	if (!ext4_has_metadata_csum(inode->i_sb))
67 		return 1;
68 
69 	et = find_ext4_extent_tail(eh);
70 	if (et->et_checksum != ext4_extent_block_csum(inode, eh))
71 		return 0;
72 	return 1;
73 }
74 
75 static void ext4_extent_block_csum_set(struct inode *inode,
76 				       struct ext4_extent_header *eh)
77 {
78 	struct ext4_extent_tail *et;
79 
80 	if (!ext4_has_metadata_csum(inode->i_sb))
81 		return;
82 
83 	et = find_ext4_extent_tail(eh);
84 	et->et_checksum = ext4_extent_block_csum(inode, eh);
85 }
86 
87 static int ext4_split_extent_at(handle_t *handle,
88 			     struct inode *inode,
89 			     struct ext4_ext_path **ppath,
90 			     ext4_lblk_t split,
91 			     int split_flag,
92 			     int flags);
93 
94 static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
95 {
96 	/*
97 	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
98 	 * moment, get_block can be called only for blocks inside i_size since
99 	 * page cache has been already dropped and writes are blocked by
100 	 * i_rwsem. So we can safely drop the i_data_sem here.
101 	 */
102 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
103 	ext4_discard_preallocations(inode);
104 	up_write(&EXT4_I(inode)->i_data_sem);
105 	*dropped = 1;
106 	return 0;
107 }
108 
109 static inline void ext4_ext_path_brelse(struct ext4_ext_path *path)
110 {
111 	brelse(path->p_bh);
112 	path->p_bh = NULL;
113 }
114 
115 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
116 {
117 	int depth, i;
118 
119 	if (IS_ERR_OR_NULL(path))
120 		return;
121 	depth = path->p_depth;
122 	for (i = 0; i <= depth; i++, path++)
123 		ext4_ext_path_brelse(path);
124 }
125 
126 void ext4_free_ext_path(struct ext4_ext_path *path)
127 {
128 	if (IS_ERR_OR_NULL(path))
129 		return;
130 	ext4_ext_drop_refs(path);
131 	kfree(path);
132 }
133 
134 /*
135  * Make sure 'handle' has at least 'check_cred' credits. If not, restart
136  * transaction with 'restart_cred' credits. The function drops i_data_sem
137  * when restarting transaction and gets it after transaction is restarted.
138  *
139  * The function returns 0 on success, 1 if transaction had to be restarted,
140  * and < 0 in case of fatal error.
141  */
142 int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
143 				int check_cred, int restart_cred,
144 				int revoke_cred)
145 {
146 	int ret;
147 	int dropped = 0;
148 
149 	ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
150 		revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
151 	if (dropped)
152 		down_write(&EXT4_I(inode)->i_data_sem);
153 	return ret;
154 }
155 
156 /*
157  * could return:
158  *  - EROFS
159  *  - ENOMEM
160  */
161 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
162 				struct ext4_ext_path *path)
163 {
164 	int err = 0;
165 
166 	if (path->p_bh) {
167 		/* path points to block */
168 		BUFFER_TRACE(path->p_bh, "get_write_access");
169 		err = ext4_journal_get_write_access(handle, inode->i_sb,
170 						    path->p_bh, EXT4_JTR_NONE);
171 		/*
172 		 * The extent buffer's verified bit will be set again in
173 		 * __ext4_ext_dirty(). We could leave an inconsistent
174 		 * buffer if the extents updating procudure break off du
175 		 * to some error happens, force to check it again.
176 		 */
177 		if (!err)
178 			clear_buffer_verified(path->p_bh);
179 	}
180 	/* path points to leaf/index in inode body */
181 	/* we use in-core data, no need to protect them */
182 	return err;
183 }
184 
185 /*
186  * could return:
187  *  - EROFS
188  *  - ENOMEM
189  *  - EIO
190  */
191 static int __ext4_ext_dirty(const char *where, unsigned int line,
192 			    handle_t *handle, struct inode *inode,
193 			    struct ext4_ext_path *path)
194 {
195 	int err;
196 
197 	WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
198 	if (path->p_bh) {
199 		ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
200 		/* path points to block */
201 		err = __ext4_handle_dirty_metadata(where, line, handle,
202 						   inode, path->p_bh);
203 		/* Extents updating done, re-set verified flag */
204 		if (!err)
205 			set_buffer_verified(path->p_bh);
206 	} else {
207 		/* path points to leaf/index in inode body */
208 		err = ext4_mark_inode_dirty(handle, inode);
209 	}
210 	return err;
211 }
212 
213 #define ext4_ext_dirty(handle, inode, path) \
214 		__ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
215 
216 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
217 			      struct ext4_ext_path *path,
218 			      ext4_lblk_t block)
219 {
220 	if (path) {
221 		int depth = path->p_depth;
222 		struct ext4_extent *ex;
223 
224 		/*
225 		 * Try to predict block placement assuming that we are
226 		 * filling in a file which will eventually be
227 		 * non-sparse --- i.e., in the case of libbfd writing
228 		 * an ELF object sections out-of-order but in a way
229 		 * the eventually results in a contiguous object or
230 		 * executable file, or some database extending a table
231 		 * space file.  However, this is actually somewhat
232 		 * non-ideal if we are writing a sparse file such as
233 		 * qemu or KVM writing a raw image file that is going
234 		 * to stay fairly sparse, since it will end up
235 		 * fragmenting the file system's free space.  Maybe we
236 		 * should have some hueristics or some way to allow
237 		 * userspace to pass a hint to file system,
238 		 * especially if the latter case turns out to be
239 		 * common.
240 		 */
241 		ex = path[depth].p_ext;
242 		if (ex) {
243 			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
244 			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
245 
246 			if (block > ext_block)
247 				return ext_pblk + (block - ext_block);
248 			else
249 				return ext_pblk - (ext_block - block);
250 		}
251 
252 		/* it looks like index is empty;
253 		 * try to find starting block from index itself */
254 		if (path[depth].p_bh)
255 			return path[depth].p_bh->b_blocknr;
256 	}
257 
258 	/* OK. use inode's group */
259 	return ext4_inode_to_goal_block(inode);
260 }
261 
262 /*
263  * Allocation for a meta data block
264  */
265 static ext4_fsblk_t
266 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
267 			struct ext4_ext_path *path,
268 			struct ext4_extent *ex, int *err, unsigned int flags)
269 {
270 	ext4_fsblk_t goal, newblock;
271 
272 	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
273 	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
274 					NULL, err);
275 	return newblock;
276 }
277 
278 static inline int ext4_ext_space_block(struct inode *inode, int check)
279 {
280 	int size;
281 
282 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
283 			/ sizeof(struct ext4_extent);
284 #ifdef AGGRESSIVE_TEST
285 	if (!check && size > 6)
286 		size = 6;
287 #endif
288 	return size;
289 }
290 
291 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
292 {
293 	int size;
294 
295 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
296 			/ sizeof(struct ext4_extent_idx);
297 #ifdef AGGRESSIVE_TEST
298 	if (!check && size > 5)
299 		size = 5;
300 #endif
301 	return size;
302 }
303 
304 static inline int ext4_ext_space_root(struct inode *inode, int check)
305 {
306 	int size;
307 
308 	size = sizeof(EXT4_I(inode)->i_data);
309 	size -= sizeof(struct ext4_extent_header);
310 	size /= sizeof(struct ext4_extent);
311 #ifdef AGGRESSIVE_TEST
312 	if (!check && size > 3)
313 		size = 3;
314 #endif
315 	return size;
316 }
317 
318 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
319 {
320 	int size;
321 
322 	size = sizeof(EXT4_I(inode)->i_data);
323 	size -= sizeof(struct ext4_extent_header);
324 	size /= sizeof(struct ext4_extent_idx);
325 #ifdef AGGRESSIVE_TEST
326 	if (!check && size > 4)
327 		size = 4;
328 #endif
329 	return size;
330 }
331 
332 static inline int
333 ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
334 			   struct ext4_ext_path **ppath, ext4_lblk_t lblk,
335 			   int nofail)
336 {
337 	struct ext4_ext_path *path = *ppath;
338 	int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
339 	int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
340 
341 	if (nofail)
342 		flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
343 
344 	return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
345 			EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
346 			flags);
347 }
348 
349 static int
350 ext4_ext_max_entries(struct inode *inode, int depth)
351 {
352 	int max;
353 
354 	if (depth == ext_depth(inode)) {
355 		if (depth == 0)
356 			max = ext4_ext_space_root(inode, 1);
357 		else
358 			max = ext4_ext_space_root_idx(inode, 1);
359 	} else {
360 		if (depth == 0)
361 			max = ext4_ext_space_block(inode, 1);
362 		else
363 			max = ext4_ext_space_block_idx(inode, 1);
364 	}
365 
366 	return max;
367 }
368 
369 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
370 {
371 	ext4_fsblk_t block = ext4_ext_pblock(ext);
372 	int len = ext4_ext_get_actual_len(ext);
373 	ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
374 
375 	/*
376 	 * We allow neither:
377 	 *  - zero length
378 	 *  - overflow/wrap-around
379 	 */
380 	if (lblock + len <= lblock)
381 		return 0;
382 	return ext4_inode_block_valid(inode, block, len);
383 }
384 
385 static int ext4_valid_extent_idx(struct inode *inode,
386 				struct ext4_extent_idx *ext_idx)
387 {
388 	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
389 
390 	return ext4_inode_block_valid(inode, block, 1);
391 }
392 
393 static int ext4_valid_extent_entries(struct inode *inode,
394 				     struct ext4_extent_header *eh,
395 				     ext4_lblk_t lblk, ext4_fsblk_t *pblk,
396 				     int depth)
397 {
398 	unsigned short entries;
399 	ext4_lblk_t lblock = 0;
400 	ext4_lblk_t cur = 0;
401 
402 	if (eh->eh_entries == 0)
403 		return 1;
404 
405 	entries = le16_to_cpu(eh->eh_entries);
406 
407 	if (depth == 0) {
408 		/* leaf entries */
409 		struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
410 
411 		/*
412 		 * The logical block in the first entry should equal to
413 		 * the number in the index block.
414 		 */
415 		if (depth != ext_depth(inode) &&
416 		    lblk != le32_to_cpu(ext->ee_block))
417 			return 0;
418 		while (entries) {
419 			if (!ext4_valid_extent(inode, ext))
420 				return 0;
421 
422 			/* Check for overlapping extents */
423 			lblock = le32_to_cpu(ext->ee_block);
424 			if (lblock < cur) {
425 				*pblk = ext4_ext_pblock(ext);
426 				return 0;
427 			}
428 			cur = lblock + ext4_ext_get_actual_len(ext);
429 			ext++;
430 			entries--;
431 		}
432 	} else {
433 		struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
434 
435 		/*
436 		 * The logical block in the first entry should equal to
437 		 * the number in the parent index block.
438 		 */
439 		if (depth != ext_depth(inode) &&
440 		    lblk != le32_to_cpu(ext_idx->ei_block))
441 			return 0;
442 		while (entries) {
443 			if (!ext4_valid_extent_idx(inode, ext_idx))
444 				return 0;
445 
446 			/* Check for overlapping index extents */
447 			lblock = le32_to_cpu(ext_idx->ei_block);
448 			if (lblock < cur) {
449 				*pblk = ext4_idx_pblock(ext_idx);
450 				return 0;
451 			}
452 			ext_idx++;
453 			entries--;
454 			cur = lblock + 1;
455 		}
456 	}
457 	return 1;
458 }
459 
460 static int __ext4_ext_check(const char *function, unsigned int line,
461 			    struct inode *inode, struct ext4_extent_header *eh,
462 			    int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk)
463 {
464 	const char *error_msg;
465 	int max = 0, err = -EFSCORRUPTED;
466 
467 	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
468 		error_msg = "invalid magic";
469 		goto corrupted;
470 	}
471 	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
472 		error_msg = "unexpected eh_depth";
473 		goto corrupted;
474 	}
475 	if (unlikely(eh->eh_max == 0)) {
476 		error_msg = "invalid eh_max";
477 		goto corrupted;
478 	}
479 	max = ext4_ext_max_entries(inode, depth);
480 	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
481 		error_msg = "too large eh_max";
482 		goto corrupted;
483 	}
484 	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
485 		error_msg = "invalid eh_entries";
486 		goto corrupted;
487 	}
488 	if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
489 		error_msg = "eh_entries is 0 but eh_depth is > 0";
490 		goto corrupted;
491 	}
492 	if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
493 		error_msg = "invalid extent entries";
494 		goto corrupted;
495 	}
496 	if (unlikely(depth > 32)) {
497 		error_msg = "too large eh_depth";
498 		goto corrupted;
499 	}
500 	/* Verify checksum on non-root extent tree nodes */
501 	if (ext_depth(inode) != depth &&
502 	    !ext4_extent_block_csum_verify(inode, eh)) {
503 		error_msg = "extent tree corrupted";
504 		err = -EFSBADCRC;
505 		goto corrupted;
506 	}
507 	return 0;
508 
509 corrupted:
510 	ext4_error_inode_err(inode, function, line, 0, -err,
511 			     "pblk %llu bad header/extent: %s - magic %x, "
512 			     "entries %u, max %u(%u), depth %u(%u)",
513 			     (unsigned long long) pblk, error_msg,
514 			     le16_to_cpu(eh->eh_magic),
515 			     le16_to_cpu(eh->eh_entries),
516 			     le16_to_cpu(eh->eh_max),
517 			     max, le16_to_cpu(eh->eh_depth), depth);
518 	return err;
519 }
520 
521 #define ext4_ext_check(inode, eh, depth, pblk)			\
522 	__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0)
523 
524 int ext4_ext_check_inode(struct inode *inode)
525 {
526 	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
527 }
528 
529 static void ext4_cache_extents(struct inode *inode,
530 			       struct ext4_extent_header *eh)
531 {
532 	struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
533 	ext4_lblk_t prev = 0;
534 	int i;
535 
536 	for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
537 		unsigned int status = EXTENT_STATUS_WRITTEN;
538 		ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
539 		int len = ext4_ext_get_actual_len(ex);
540 
541 		if (prev && (prev != lblk))
542 			ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
543 					     EXTENT_STATUS_HOLE);
544 
545 		if (ext4_ext_is_unwritten(ex))
546 			status = EXTENT_STATUS_UNWRITTEN;
547 		ext4_es_cache_extent(inode, lblk, len,
548 				     ext4_ext_pblock(ex), status);
549 		prev = lblk + len;
550 	}
551 }
552 
553 static struct buffer_head *
554 __read_extent_tree_block(const char *function, unsigned int line,
555 			 struct inode *inode, struct ext4_extent_idx *idx,
556 			 int depth, int flags)
557 {
558 	struct buffer_head		*bh;
559 	int				err;
560 	gfp_t				gfp_flags = __GFP_MOVABLE | GFP_NOFS;
561 	ext4_fsblk_t			pblk;
562 
563 	if (flags & EXT4_EX_NOFAIL)
564 		gfp_flags |= __GFP_NOFAIL;
565 
566 	pblk = ext4_idx_pblock(idx);
567 	bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
568 	if (unlikely(!bh))
569 		return ERR_PTR(-ENOMEM);
570 
571 	if (!bh_uptodate_or_lock(bh)) {
572 		trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
573 		err = ext4_read_bh(bh, 0, NULL);
574 		if (err < 0)
575 			goto errout;
576 	}
577 	if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
578 		return bh;
579 	err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh),
580 			       depth, pblk, le32_to_cpu(idx->ei_block));
581 	if (err)
582 		goto errout;
583 	set_buffer_verified(bh);
584 	/*
585 	 * If this is a leaf block, cache all of its entries
586 	 */
587 	if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
588 		struct ext4_extent_header *eh = ext_block_hdr(bh);
589 		ext4_cache_extents(inode, eh);
590 	}
591 	return bh;
592 errout:
593 	put_bh(bh);
594 	return ERR_PTR(err);
595 
596 }
597 
598 #define read_extent_tree_block(inode, idx, depth, flags)		\
599 	__read_extent_tree_block(__func__, __LINE__, (inode), (idx),	\
600 				 (depth), (flags))
601 
602 /*
603  * This function is called to cache a file's extent information in the
604  * extent status tree
605  */
606 int ext4_ext_precache(struct inode *inode)
607 {
608 	struct ext4_inode_info *ei = EXT4_I(inode);
609 	struct ext4_ext_path *path = NULL;
610 	struct buffer_head *bh;
611 	int i = 0, depth, ret = 0;
612 
613 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
614 		return 0;	/* not an extent-mapped inode */
615 
616 	down_read(&ei->i_data_sem);
617 	depth = ext_depth(inode);
618 
619 	/* Don't cache anything if there are no external extent blocks */
620 	if (!depth) {
621 		up_read(&ei->i_data_sem);
622 		return ret;
623 	}
624 
625 	path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
626 		       GFP_NOFS);
627 	if (path == NULL) {
628 		up_read(&ei->i_data_sem);
629 		return -ENOMEM;
630 	}
631 
632 	path[0].p_hdr = ext_inode_hdr(inode);
633 	ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
634 	if (ret)
635 		goto out;
636 	path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
637 	while (i >= 0) {
638 		/*
639 		 * If this is a leaf block or we've reached the end of
640 		 * the index block, go up
641 		 */
642 		if ((i == depth) ||
643 		    path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
644 			ext4_ext_path_brelse(path + i);
645 			i--;
646 			continue;
647 		}
648 		bh = read_extent_tree_block(inode, path[i].p_idx++,
649 					    depth - i - 1,
650 					    EXT4_EX_FORCE_CACHE);
651 		if (IS_ERR(bh)) {
652 			ret = PTR_ERR(bh);
653 			break;
654 		}
655 		i++;
656 		path[i].p_bh = bh;
657 		path[i].p_hdr = ext_block_hdr(bh);
658 		path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
659 	}
660 	ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
661 out:
662 	up_read(&ei->i_data_sem);
663 	ext4_free_ext_path(path);
664 	return ret;
665 }
666 
667 #ifdef EXT_DEBUG
668 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
669 {
670 	int k, l = path->p_depth;
671 
672 	ext_debug(inode, "path:");
673 	for (k = 0; k <= l; k++, path++) {
674 		if (path->p_idx) {
675 			ext_debug(inode, "  %d->%llu",
676 				  le32_to_cpu(path->p_idx->ei_block),
677 				  ext4_idx_pblock(path->p_idx));
678 		} else if (path->p_ext) {
679 			ext_debug(inode, "  %d:[%d]%d:%llu ",
680 				  le32_to_cpu(path->p_ext->ee_block),
681 				  ext4_ext_is_unwritten(path->p_ext),
682 				  ext4_ext_get_actual_len(path->p_ext),
683 				  ext4_ext_pblock(path->p_ext));
684 		} else
685 			ext_debug(inode, "  []");
686 	}
687 	ext_debug(inode, "\n");
688 }
689 
690 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
691 {
692 	int depth = ext_depth(inode);
693 	struct ext4_extent_header *eh;
694 	struct ext4_extent *ex;
695 	int i;
696 
697 	if (!path)
698 		return;
699 
700 	eh = path[depth].p_hdr;
701 	ex = EXT_FIRST_EXTENT(eh);
702 
703 	ext_debug(inode, "Displaying leaf extents\n");
704 
705 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
706 		ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
707 			  ext4_ext_is_unwritten(ex),
708 			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
709 	}
710 	ext_debug(inode, "\n");
711 }
712 
713 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
714 			ext4_fsblk_t newblock, int level)
715 {
716 	int depth = ext_depth(inode);
717 	struct ext4_extent *ex;
718 
719 	if (depth != level) {
720 		struct ext4_extent_idx *idx;
721 		idx = path[level].p_idx;
722 		while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
723 			ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
724 				  level, le32_to_cpu(idx->ei_block),
725 				  ext4_idx_pblock(idx), newblock);
726 			idx++;
727 		}
728 
729 		return;
730 	}
731 
732 	ex = path[depth].p_ext;
733 	while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
734 		ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
735 				le32_to_cpu(ex->ee_block),
736 				ext4_ext_pblock(ex),
737 				ext4_ext_is_unwritten(ex),
738 				ext4_ext_get_actual_len(ex),
739 				newblock);
740 		ex++;
741 	}
742 }
743 
744 #else
745 #define ext4_ext_show_path(inode, path)
746 #define ext4_ext_show_leaf(inode, path)
747 #define ext4_ext_show_move(inode, path, newblock, level)
748 #endif
749 
750 /*
751  * ext4_ext_binsearch_idx:
752  * binary search for the closest index of the given block
753  * the header must be checked before calling this
754  */
755 static void
756 ext4_ext_binsearch_idx(struct inode *inode,
757 			struct ext4_ext_path *path, ext4_lblk_t block)
758 {
759 	struct ext4_extent_header *eh = path->p_hdr;
760 	struct ext4_extent_idx *r, *l, *m;
761 
762 
763 	ext_debug(inode, "binsearch for %u(idx):  ", block);
764 
765 	l = EXT_FIRST_INDEX(eh) + 1;
766 	r = EXT_LAST_INDEX(eh);
767 	while (l <= r) {
768 		m = l + (r - l) / 2;
769 		ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
770 			  le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
771 			  r, le32_to_cpu(r->ei_block));
772 
773 		if (block < le32_to_cpu(m->ei_block))
774 			r = m - 1;
775 		else
776 			l = m + 1;
777 	}
778 
779 	path->p_idx = l - 1;
780 	ext_debug(inode, "  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
781 		  ext4_idx_pblock(path->p_idx));
782 
783 #ifdef CHECK_BINSEARCH
784 	{
785 		struct ext4_extent_idx *chix, *ix;
786 		int k;
787 
788 		chix = ix = EXT_FIRST_INDEX(eh);
789 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
790 			if (k != 0 && le32_to_cpu(ix->ei_block) <=
791 			    le32_to_cpu(ix[-1].ei_block)) {
792 				printk(KERN_DEBUG "k=%d, ix=0x%p, "
793 				       "first=0x%p\n", k,
794 				       ix, EXT_FIRST_INDEX(eh));
795 				printk(KERN_DEBUG "%u <= %u\n",
796 				       le32_to_cpu(ix->ei_block),
797 				       le32_to_cpu(ix[-1].ei_block));
798 			}
799 			BUG_ON(k && le32_to_cpu(ix->ei_block)
800 					   <= le32_to_cpu(ix[-1].ei_block));
801 			if (block < le32_to_cpu(ix->ei_block))
802 				break;
803 			chix = ix;
804 		}
805 		BUG_ON(chix != path->p_idx);
806 	}
807 #endif
808 
809 }
810 
811 /*
812  * ext4_ext_binsearch:
813  * binary search for closest extent of the given block
814  * the header must be checked before calling this
815  */
816 static void
817 ext4_ext_binsearch(struct inode *inode,
818 		struct ext4_ext_path *path, ext4_lblk_t block)
819 {
820 	struct ext4_extent_header *eh = path->p_hdr;
821 	struct ext4_extent *r, *l, *m;
822 
823 	if (eh->eh_entries == 0) {
824 		/*
825 		 * this leaf is empty:
826 		 * we get such a leaf in split/add case
827 		 */
828 		return;
829 	}
830 
831 	ext_debug(inode, "binsearch for %u:  ", block);
832 
833 	l = EXT_FIRST_EXTENT(eh) + 1;
834 	r = EXT_LAST_EXTENT(eh);
835 
836 	while (l <= r) {
837 		m = l + (r - l) / 2;
838 		ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
839 			  le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
840 			  r, le32_to_cpu(r->ee_block));
841 
842 		if (block < le32_to_cpu(m->ee_block))
843 			r = m - 1;
844 		else
845 			l = m + 1;
846 	}
847 
848 	path->p_ext = l - 1;
849 	ext_debug(inode, "  -> %d:%llu:[%d]%d ",
850 			le32_to_cpu(path->p_ext->ee_block),
851 			ext4_ext_pblock(path->p_ext),
852 			ext4_ext_is_unwritten(path->p_ext),
853 			ext4_ext_get_actual_len(path->p_ext));
854 
855 #ifdef CHECK_BINSEARCH
856 	{
857 		struct ext4_extent *chex, *ex;
858 		int k;
859 
860 		chex = ex = EXT_FIRST_EXTENT(eh);
861 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
862 			BUG_ON(k && le32_to_cpu(ex->ee_block)
863 					  <= le32_to_cpu(ex[-1].ee_block));
864 			if (block < le32_to_cpu(ex->ee_block))
865 				break;
866 			chex = ex;
867 		}
868 		BUG_ON(chex != path->p_ext);
869 	}
870 #endif
871 
872 }
873 
874 void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
875 {
876 	struct ext4_extent_header *eh;
877 
878 	eh = ext_inode_hdr(inode);
879 	eh->eh_depth = 0;
880 	eh->eh_entries = 0;
881 	eh->eh_magic = EXT4_EXT_MAGIC;
882 	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
883 	eh->eh_generation = 0;
884 	ext4_mark_inode_dirty(handle, inode);
885 }
886 
887 struct ext4_ext_path *
888 ext4_find_extent(struct inode *inode, ext4_lblk_t block,
889 		 struct ext4_ext_path *path, int flags)
890 {
891 	struct ext4_extent_header *eh;
892 	struct buffer_head *bh;
893 	short int depth, i, ppos = 0;
894 	int ret;
895 	gfp_t gfp_flags = GFP_NOFS;
896 
897 	if (flags & EXT4_EX_NOFAIL)
898 		gfp_flags |= __GFP_NOFAIL;
899 
900 	eh = ext_inode_hdr(inode);
901 	depth = ext_depth(inode);
902 	if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
903 		EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
904 				 depth);
905 		ret = -EFSCORRUPTED;
906 		goto err;
907 	}
908 
909 	if (path) {
910 		ext4_ext_drop_refs(path);
911 		if (depth > path[0].p_maxdepth) {
912 			kfree(path);
913 			path = NULL;
914 		}
915 	}
916 	if (!path) {
917 		/* account possible depth increase */
918 		path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
919 				gfp_flags);
920 		if (unlikely(!path))
921 			return ERR_PTR(-ENOMEM);
922 		path[0].p_maxdepth = depth + 1;
923 	}
924 	path[0].p_hdr = eh;
925 	path[0].p_bh = NULL;
926 
927 	i = depth;
928 	if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
929 		ext4_cache_extents(inode, eh);
930 	/* walk through the tree */
931 	while (i) {
932 		ext_debug(inode, "depth %d: num %d, max %d\n",
933 			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
934 
935 		ext4_ext_binsearch_idx(inode, path + ppos, block);
936 		path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
937 		path[ppos].p_depth = i;
938 		path[ppos].p_ext = NULL;
939 
940 		bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
941 		if (IS_ERR(bh)) {
942 			ret = PTR_ERR(bh);
943 			goto err;
944 		}
945 
946 		eh = ext_block_hdr(bh);
947 		ppos++;
948 		path[ppos].p_bh = bh;
949 		path[ppos].p_hdr = eh;
950 	}
951 
952 	path[ppos].p_depth = i;
953 	path[ppos].p_ext = NULL;
954 	path[ppos].p_idx = NULL;
955 
956 	/* find extent */
957 	ext4_ext_binsearch(inode, path + ppos, block);
958 	/* if not an empty leaf */
959 	if (path[ppos].p_ext)
960 		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
961 
962 	ext4_ext_show_path(inode, path);
963 
964 	return path;
965 
966 err:
967 	ext4_free_ext_path(path);
968 	return ERR_PTR(ret);
969 }
970 
971 /*
972  * ext4_ext_insert_index:
973  * insert new index [@logical;@ptr] into the block at @curp;
974  * check where to insert: before @curp or after @curp
975  */
976 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
977 				 struct ext4_ext_path *curp,
978 				 int logical, ext4_fsblk_t ptr)
979 {
980 	struct ext4_extent_idx *ix;
981 	int len, err;
982 
983 	err = ext4_ext_get_access(handle, inode, curp);
984 	if (err)
985 		return err;
986 
987 	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
988 		EXT4_ERROR_INODE(inode,
989 				 "logical %d == ei_block %d!",
990 				 logical, le32_to_cpu(curp->p_idx->ei_block));
991 		return -EFSCORRUPTED;
992 	}
993 
994 	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
995 			     >= le16_to_cpu(curp->p_hdr->eh_max))) {
996 		EXT4_ERROR_INODE(inode,
997 				 "eh_entries %d >= eh_max %d!",
998 				 le16_to_cpu(curp->p_hdr->eh_entries),
999 				 le16_to_cpu(curp->p_hdr->eh_max));
1000 		return -EFSCORRUPTED;
1001 	}
1002 
1003 	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
1004 		/* insert after */
1005 		ext_debug(inode, "insert new index %d after: %llu\n",
1006 			  logical, ptr);
1007 		ix = curp->p_idx + 1;
1008 	} else {
1009 		/* insert before */
1010 		ext_debug(inode, "insert new index %d before: %llu\n",
1011 			  logical, ptr);
1012 		ix = curp->p_idx;
1013 	}
1014 
1015 	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
1016 		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
1017 		return -EFSCORRUPTED;
1018 	}
1019 
1020 	len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
1021 	BUG_ON(len < 0);
1022 	if (len > 0) {
1023 		ext_debug(inode, "insert new index %d: "
1024 				"move %d indices from 0x%p to 0x%p\n",
1025 				logical, len, ix, ix + 1);
1026 		memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
1027 	}
1028 
1029 	ix->ei_block = cpu_to_le32(logical);
1030 	ext4_idx_store_pblock(ix, ptr);
1031 	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
1032 
1033 	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
1034 		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
1035 		return -EFSCORRUPTED;
1036 	}
1037 
1038 	err = ext4_ext_dirty(handle, inode, curp);
1039 	ext4_std_error(inode->i_sb, err);
1040 
1041 	return err;
1042 }
1043 
1044 /*
1045  * ext4_ext_split:
1046  * inserts new subtree into the path, using free index entry
1047  * at depth @at:
1048  * - allocates all needed blocks (new leaf and all intermediate index blocks)
1049  * - makes decision where to split
1050  * - moves remaining extents and index entries (right to the split point)
1051  *   into the newly allocated blocks
1052  * - initializes subtree
1053  */
1054 static int ext4_ext_split(handle_t *handle, struct inode *inode,
1055 			  unsigned int flags,
1056 			  struct ext4_ext_path *path,
1057 			  struct ext4_extent *newext, int at)
1058 {
1059 	struct buffer_head *bh = NULL;
1060 	int depth = ext_depth(inode);
1061 	struct ext4_extent_header *neh;
1062 	struct ext4_extent_idx *fidx;
1063 	int i = at, k, m, a;
1064 	ext4_fsblk_t newblock, oldblock;
1065 	__le32 border;
1066 	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
1067 	gfp_t gfp_flags = GFP_NOFS;
1068 	int err = 0;
1069 	size_t ext_size = 0;
1070 
1071 	if (flags & EXT4_EX_NOFAIL)
1072 		gfp_flags |= __GFP_NOFAIL;
1073 
1074 	/* make decision: where to split? */
1075 	/* FIXME: now decision is simplest: at current extent */
1076 
1077 	/* if current leaf will be split, then we should use
1078 	 * border from split point */
1079 	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1080 		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1081 		return -EFSCORRUPTED;
1082 	}
1083 	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1084 		border = path[depth].p_ext[1].ee_block;
1085 		ext_debug(inode, "leaf will be split."
1086 				" next leaf starts at %d\n",
1087 				  le32_to_cpu(border));
1088 	} else {
1089 		border = newext->ee_block;
1090 		ext_debug(inode, "leaf will be added."
1091 				" next leaf starts at %d\n",
1092 				le32_to_cpu(border));
1093 	}
1094 
1095 	/*
1096 	 * If error occurs, then we break processing
1097 	 * and mark filesystem read-only. index won't
1098 	 * be inserted and tree will be in consistent
1099 	 * state. Next mount will repair buffers too.
1100 	 */
1101 
1102 	/*
1103 	 * Get array to track all allocated blocks.
1104 	 * We need this to handle errors and free blocks
1105 	 * upon them.
1106 	 */
1107 	ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
1108 	if (!ablocks)
1109 		return -ENOMEM;
1110 
1111 	/* allocate all needed blocks */
1112 	ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
1113 	for (a = 0; a < depth - at; a++) {
1114 		newblock = ext4_ext_new_meta_block(handle, inode, path,
1115 						   newext, &err, flags);
1116 		if (newblock == 0)
1117 			goto cleanup;
1118 		ablocks[a] = newblock;
1119 	}
1120 
1121 	/* initialize new leaf */
1122 	newblock = ablocks[--a];
1123 	if (unlikely(newblock == 0)) {
1124 		EXT4_ERROR_INODE(inode, "newblock == 0!");
1125 		err = -EFSCORRUPTED;
1126 		goto cleanup;
1127 	}
1128 	bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1129 	if (unlikely(!bh)) {
1130 		err = -ENOMEM;
1131 		goto cleanup;
1132 	}
1133 	lock_buffer(bh);
1134 
1135 	err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1136 					     EXT4_JTR_NONE);
1137 	if (err)
1138 		goto cleanup;
1139 
1140 	neh = ext_block_hdr(bh);
1141 	neh->eh_entries = 0;
1142 	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1143 	neh->eh_magic = EXT4_EXT_MAGIC;
1144 	neh->eh_depth = 0;
1145 	neh->eh_generation = 0;
1146 
1147 	/* move remainder of path[depth] to the new leaf */
1148 	if (unlikely(path[depth].p_hdr->eh_entries !=
1149 		     path[depth].p_hdr->eh_max)) {
1150 		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1151 				 path[depth].p_hdr->eh_entries,
1152 				 path[depth].p_hdr->eh_max);
1153 		err = -EFSCORRUPTED;
1154 		goto cleanup;
1155 	}
1156 	/* start copy from next extent */
1157 	m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1158 	ext4_ext_show_move(inode, path, newblock, depth);
1159 	if (m) {
1160 		struct ext4_extent *ex;
1161 		ex = EXT_FIRST_EXTENT(neh);
1162 		memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1163 		le16_add_cpu(&neh->eh_entries, m);
1164 	}
1165 
1166 	/* zero out unused area in the extent block */
1167 	ext_size = sizeof(struct ext4_extent_header) +
1168 		sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
1169 	memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1170 	ext4_extent_block_csum_set(inode, neh);
1171 	set_buffer_uptodate(bh);
1172 	unlock_buffer(bh);
1173 
1174 	err = ext4_handle_dirty_metadata(handle, inode, bh);
1175 	if (err)
1176 		goto cleanup;
1177 	brelse(bh);
1178 	bh = NULL;
1179 
1180 	/* correct old leaf */
1181 	if (m) {
1182 		err = ext4_ext_get_access(handle, inode, path + depth);
1183 		if (err)
1184 			goto cleanup;
1185 		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1186 		err = ext4_ext_dirty(handle, inode, path + depth);
1187 		if (err)
1188 			goto cleanup;
1189 
1190 	}
1191 
1192 	/* create intermediate indexes */
1193 	k = depth - at - 1;
1194 	if (unlikely(k < 0)) {
1195 		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1196 		err = -EFSCORRUPTED;
1197 		goto cleanup;
1198 	}
1199 	if (k)
1200 		ext_debug(inode, "create %d intermediate indices\n", k);
1201 	/* insert new index into current index block */
1202 	/* current depth stored in i var */
1203 	i = depth - 1;
1204 	while (k--) {
1205 		oldblock = newblock;
1206 		newblock = ablocks[--a];
1207 		bh = sb_getblk(inode->i_sb, newblock);
1208 		if (unlikely(!bh)) {
1209 			err = -ENOMEM;
1210 			goto cleanup;
1211 		}
1212 		lock_buffer(bh);
1213 
1214 		err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1215 						     EXT4_JTR_NONE);
1216 		if (err)
1217 			goto cleanup;
1218 
1219 		neh = ext_block_hdr(bh);
1220 		neh->eh_entries = cpu_to_le16(1);
1221 		neh->eh_magic = EXT4_EXT_MAGIC;
1222 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1223 		neh->eh_depth = cpu_to_le16(depth - i);
1224 		neh->eh_generation = 0;
1225 		fidx = EXT_FIRST_INDEX(neh);
1226 		fidx->ei_block = border;
1227 		ext4_idx_store_pblock(fidx, oldblock);
1228 
1229 		ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
1230 				i, newblock, le32_to_cpu(border), oldblock);
1231 
1232 		/* move remainder of path[i] to the new index block */
1233 		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1234 					EXT_LAST_INDEX(path[i].p_hdr))) {
1235 			EXT4_ERROR_INODE(inode,
1236 					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1237 					 le32_to_cpu(path[i].p_ext->ee_block));
1238 			err = -EFSCORRUPTED;
1239 			goto cleanup;
1240 		}
1241 		/* start copy indexes */
1242 		m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1243 		ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
1244 				EXT_MAX_INDEX(path[i].p_hdr));
1245 		ext4_ext_show_move(inode, path, newblock, i);
1246 		if (m) {
1247 			memmove(++fidx, path[i].p_idx,
1248 				sizeof(struct ext4_extent_idx) * m);
1249 			le16_add_cpu(&neh->eh_entries, m);
1250 		}
1251 		/* zero out unused area in the extent block */
1252 		ext_size = sizeof(struct ext4_extent_header) +
1253 		   (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
1254 		memset(bh->b_data + ext_size, 0,
1255 			inode->i_sb->s_blocksize - ext_size);
1256 		ext4_extent_block_csum_set(inode, neh);
1257 		set_buffer_uptodate(bh);
1258 		unlock_buffer(bh);
1259 
1260 		err = ext4_handle_dirty_metadata(handle, inode, bh);
1261 		if (err)
1262 			goto cleanup;
1263 		brelse(bh);
1264 		bh = NULL;
1265 
1266 		/* correct old index */
1267 		if (m) {
1268 			err = ext4_ext_get_access(handle, inode, path + i);
1269 			if (err)
1270 				goto cleanup;
1271 			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1272 			err = ext4_ext_dirty(handle, inode, path + i);
1273 			if (err)
1274 				goto cleanup;
1275 		}
1276 
1277 		i--;
1278 	}
1279 
1280 	/* insert new index */
1281 	err = ext4_ext_insert_index(handle, inode, path + at,
1282 				    le32_to_cpu(border), newblock);
1283 
1284 cleanup:
1285 	if (bh) {
1286 		if (buffer_locked(bh))
1287 			unlock_buffer(bh);
1288 		brelse(bh);
1289 	}
1290 
1291 	if (err) {
1292 		/* free all allocated blocks in error case */
1293 		for (i = 0; i < depth; i++) {
1294 			if (!ablocks[i])
1295 				continue;
1296 			ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1297 					 EXT4_FREE_BLOCKS_METADATA);
1298 		}
1299 	}
1300 	kfree(ablocks);
1301 
1302 	return err;
1303 }
1304 
1305 /*
1306  * ext4_ext_grow_indepth:
1307  * implements tree growing procedure:
1308  * - allocates new block
1309  * - moves top-level data (index block or leaf) into the new block
1310  * - initializes new top-level, creating index that points to the
1311  *   just created block
1312  */
1313 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1314 				 unsigned int flags)
1315 {
1316 	struct ext4_extent_header *neh;
1317 	struct buffer_head *bh;
1318 	ext4_fsblk_t newblock, goal = 0;
1319 	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1320 	int err = 0;
1321 	size_t ext_size = 0;
1322 
1323 	/* Try to prepend new index to old one */
1324 	if (ext_depth(inode))
1325 		goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1326 	if (goal > le32_to_cpu(es->s_first_data_block)) {
1327 		flags |= EXT4_MB_HINT_TRY_GOAL;
1328 		goal--;
1329 	} else
1330 		goal = ext4_inode_to_goal_block(inode);
1331 	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1332 					NULL, &err);
1333 	if (newblock == 0)
1334 		return err;
1335 
1336 	bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1337 	if (unlikely(!bh))
1338 		return -ENOMEM;
1339 	lock_buffer(bh);
1340 
1341 	err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1342 					     EXT4_JTR_NONE);
1343 	if (err) {
1344 		unlock_buffer(bh);
1345 		goto out;
1346 	}
1347 
1348 	ext_size = sizeof(EXT4_I(inode)->i_data);
1349 	/* move top-level index/leaf into new block */
1350 	memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
1351 	/* zero out unused area in the extent block */
1352 	memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1353 
1354 	/* set size of new block */
1355 	neh = ext_block_hdr(bh);
1356 	/* old root could have indexes or leaves
1357 	 * so calculate e_max right way */
1358 	if (ext_depth(inode))
1359 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1360 	else
1361 		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1362 	neh->eh_magic = EXT4_EXT_MAGIC;
1363 	ext4_extent_block_csum_set(inode, neh);
1364 	set_buffer_uptodate(bh);
1365 	set_buffer_verified(bh);
1366 	unlock_buffer(bh);
1367 
1368 	err = ext4_handle_dirty_metadata(handle, inode, bh);
1369 	if (err)
1370 		goto out;
1371 
1372 	/* Update top-level index: num,max,pointer */
1373 	neh = ext_inode_hdr(inode);
1374 	neh->eh_entries = cpu_to_le16(1);
1375 	ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1376 	if (neh->eh_depth == 0) {
1377 		/* Root extent block becomes index block */
1378 		neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1379 		EXT_FIRST_INDEX(neh)->ei_block =
1380 			EXT_FIRST_EXTENT(neh)->ee_block;
1381 	}
1382 	ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
1383 		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1384 		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1385 		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1386 
1387 	le16_add_cpu(&neh->eh_depth, 1);
1388 	err = ext4_mark_inode_dirty(handle, inode);
1389 out:
1390 	brelse(bh);
1391 
1392 	return err;
1393 }
1394 
1395 /*
1396  * ext4_ext_create_new_leaf:
1397  * finds empty index and adds new leaf.
1398  * if no free index is found, then it requests in-depth growing.
1399  */
1400 static struct ext4_ext_path *
1401 ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1402 			 unsigned int mb_flags, unsigned int gb_flags,
1403 			 struct ext4_ext_path *path,
1404 			 struct ext4_extent *newext)
1405 {
1406 	struct ext4_ext_path *curp;
1407 	int depth, i, err = 0;
1408 
1409 repeat:
1410 	i = depth = ext_depth(inode);
1411 
1412 	/* walk up to the tree and look for free index entry */
1413 	curp = path + depth;
1414 	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1415 		i--;
1416 		curp--;
1417 	}
1418 
1419 	/* we use already allocated block for index block,
1420 	 * so subsequent data blocks should be contiguous */
1421 	if (EXT_HAS_FREE_INDEX(curp)) {
1422 		/* if we found index with free entry, then use that
1423 		 * entry: create all needed subtree and add new leaf */
1424 		err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1425 		if (err)
1426 			goto errout;
1427 
1428 		/* refill path */
1429 		path = ext4_find_extent(inode,
1430 				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1431 				    path, gb_flags);
1432 		return path;
1433 	} else {
1434 		/* tree is full, time to grow in depth */
1435 		err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1436 		if (err)
1437 			goto errout;
1438 
1439 		/* refill path */
1440 		path = ext4_find_extent(inode,
1441 				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1442 				    path, gb_flags);
1443 		if (IS_ERR(path))
1444 			return path;
1445 
1446 		/*
1447 		 * only first (depth 0 -> 1) produces free space;
1448 		 * in all other cases we have to split the grown tree
1449 		 */
1450 		depth = ext_depth(inode);
1451 		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1452 			/* now we need to split */
1453 			goto repeat;
1454 		}
1455 	}
1456 	return path;
1457 
1458 errout:
1459 	ext4_free_ext_path(path);
1460 	return ERR_PTR(err);
1461 }
1462 
1463 /*
1464  * search the closest allocated block to the left for *logical
1465  * and returns it at @logical + it's physical address at @phys
1466  * if *logical is the smallest allocated block, the function
1467  * returns 0 at @phys
1468  * return value contains 0 (success) or error code
1469  */
1470 static int ext4_ext_search_left(struct inode *inode,
1471 				struct ext4_ext_path *path,
1472 				ext4_lblk_t *logical, ext4_fsblk_t *phys)
1473 {
1474 	struct ext4_extent_idx *ix;
1475 	struct ext4_extent *ex;
1476 	int depth, ee_len;
1477 
1478 	if (unlikely(path == NULL)) {
1479 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1480 		return -EFSCORRUPTED;
1481 	}
1482 	depth = path->p_depth;
1483 	*phys = 0;
1484 
1485 	if (depth == 0 && path->p_ext == NULL)
1486 		return 0;
1487 
1488 	/* usually extent in the path covers blocks smaller
1489 	 * then *logical, but it can be that extent is the
1490 	 * first one in the file */
1491 
1492 	ex = path[depth].p_ext;
1493 	ee_len = ext4_ext_get_actual_len(ex);
1494 	if (*logical < le32_to_cpu(ex->ee_block)) {
1495 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1496 			EXT4_ERROR_INODE(inode,
1497 					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1498 					 *logical, le32_to_cpu(ex->ee_block));
1499 			return -EFSCORRUPTED;
1500 		}
1501 		while (--depth >= 0) {
1502 			ix = path[depth].p_idx;
1503 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1504 				EXT4_ERROR_INODE(inode,
1505 				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1506 				  ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1507 				  le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block),
1508 				  depth);
1509 				return -EFSCORRUPTED;
1510 			}
1511 		}
1512 		return 0;
1513 	}
1514 
1515 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1516 		EXT4_ERROR_INODE(inode,
1517 				 "logical %d < ee_block %d + ee_len %d!",
1518 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1519 		return -EFSCORRUPTED;
1520 	}
1521 
1522 	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1523 	*phys = ext4_ext_pblock(ex) + ee_len - 1;
1524 	return 0;
1525 }
1526 
1527 /*
1528  * Search the closest allocated block to the right for *logical
1529  * and returns it at @logical + it's physical address at @phys.
1530  * If not exists, return 0 and @phys is set to 0. We will return
1531  * 1 which means we found an allocated block and ret_ex is valid.
1532  * Or return a (< 0) error code.
1533  */
1534 static int ext4_ext_search_right(struct inode *inode,
1535 				 struct ext4_ext_path *path,
1536 				 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1537 				 struct ext4_extent *ret_ex)
1538 {
1539 	struct buffer_head *bh = NULL;
1540 	struct ext4_extent_header *eh;
1541 	struct ext4_extent_idx *ix;
1542 	struct ext4_extent *ex;
1543 	int depth;	/* Note, NOT eh_depth; depth from top of tree */
1544 	int ee_len;
1545 
1546 	if (unlikely(path == NULL)) {
1547 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1548 		return -EFSCORRUPTED;
1549 	}
1550 	depth = path->p_depth;
1551 	*phys = 0;
1552 
1553 	if (depth == 0 && path->p_ext == NULL)
1554 		return 0;
1555 
1556 	/* usually extent in the path covers blocks smaller
1557 	 * then *logical, but it can be that extent is the
1558 	 * first one in the file */
1559 
1560 	ex = path[depth].p_ext;
1561 	ee_len = ext4_ext_get_actual_len(ex);
1562 	if (*logical < le32_to_cpu(ex->ee_block)) {
1563 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1564 			EXT4_ERROR_INODE(inode,
1565 					 "first_extent(path[%d].p_hdr) != ex",
1566 					 depth);
1567 			return -EFSCORRUPTED;
1568 		}
1569 		while (--depth >= 0) {
1570 			ix = path[depth].p_idx;
1571 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1572 				EXT4_ERROR_INODE(inode,
1573 						 "ix != EXT_FIRST_INDEX *logical %d!",
1574 						 *logical);
1575 				return -EFSCORRUPTED;
1576 			}
1577 		}
1578 		goto found_extent;
1579 	}
1580 
1581 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1582 		EXT4_ERROR_INODE(inode,
1583 				 "logical %d < ee_block %d + ee_len %d!",
1584 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1585 		return -EFSCORRUPTED;
1586 	}
1587 
1588 	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1589 		/* next allocated block in this leaf */
1590 		ex++;
1591 		goto found_extent;
1592 	}
1593 
1594 	/* go up and search for index to the right */
1595 	while (--depth >= 0) {
1596 		ix = path[depth].p_idx;
1597 		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1598 			goto got_index;
1599 	}
1600 
1601 	/* we've gone up to the root and found no index to the right */
1602 	return 0;
1603 
1604 got_index:
1605 	/* we've found index to the right, let's
1606 	 * follow it and find the closest allocated
1607 	 * block to the right */
1608 	ix++;
1609 	while (++depth < path->p_depth) {
1610 		/* subtract from p_depth to get proper eh_depth */
1611 		bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1612 		if (IS_ERR(bh))
1613 			return PTR_ERR(bh);
1614 		eh = ext_block_hdr(bh);
1615 		ix = EXT_FIRST_INDEX(eh);
1616 		put_bh(bh);
1617 	}
1618 
1619 	bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
1620 	if (IS_ERR(bh))
1621 		return PTR_ERR(bh);
1622 	eh = ext_block_hdr(bh);
1623 	ex = EXT_FIRST_EXTENT(eh);
1624 found_extent:
1625 	*logical = le32_to_cpu(ex->ee_block);
1626 	*phys = ext4_ext_pblock(ex);
1627 	if (ret_ex)
1628 		*ret_ex = *ex;
1629 	if (bh)
1630 		put_bh(bh);
1631 	return 1;
1632 }
1633 
1634 /*
1635  * ext4_ext_next_allocated_block:
1636  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1637  * NOTE: it considers block number from index entry as
1638  * allocated block. Thus, index entries have to be consistent
1639  * with leaves.
1640  */
1641 ext4_lblk_t
1642 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1643 {
1644 	int depth;
1645 
1646 	BUG_ON(path == NULL);
1647 	depth = path->p_depth;
1648 
1649 	if (depth == 0 && path->p_ext == NULL)
1650 		return EXT_MAX_BLOCKS;
1651 
1652 	while (depth >= 0) {
1653 		struct ext4_ext_path *p = &path[depth];
1654 
1655 		if (depth == path->p_depth) {
1656 			/* leaf */
1657 			if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
1658 				return le32_to_cpu(p->p_ext[1].ee_block);
1659 		} else {
1660 			/* index */
1661 			if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
1662 				return le32_to_cpu(p->p_idx[1].ei_block);
1663 		}
1664 		depth--;
1665 	}
1666 
1667 	return EXT_MAX_BLOCKS;
1668 }
1669 
1670 /*
1671  * ext4_ext_next_leaf_block:
1672  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1673  */
1674 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1675 {
1676 	int depth;
1677 
1678 	BUG_ON(path == NULL);
1679 	depth = path->p_depth;
1680 
1681 	/* zero-tree has no leaf blocks at all */
1682 	if (depth == 0)
1683 		return EXT_MAX_BLOCKS;
1684 
1685 	/* go to index block */
1686 	depth--;
1687 
1688 	while (depth >= 0) {
1689 		if (path[depth].p_idx !=
1690 				EXT_LAST_INDEX(path[depth].p_hdr))
1691 			return (ext4_lblk_t)
1692 				le32_to_cpu(path[depth].p_idx[1].ei_block);
1693 		depth--;
1694 	}
1695 
1696 	return EXT_MAX_BLOCKS;
1697 }
1698 
1699 /*
1700  * ext4_ext_correct_indexes:
1701  * if leaf gets modified and modified extent is first in the leaf,
1702  * then we have to correct all indexes above.
1703  * TODO: do we need to correct tree in all cases?
1704  */
1705 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1706 				struct ext4_ext_path *path)
1707 {
1708 	struct ext4_extent_header *eh;
1709 	int depth = ext_depth(inode);
1710 	struct ext4_extent *ex;
1711 	__le32 border;
1712 	int k, err = 0;
1713 
1714 	eh = path[depth].p_hdr;
1715 	ex = path[depth].p_ext;
1716 
1717 	if (unlikely(ex == NULL || eh == NULL)) {
1718 		EXT4_ERROR_INODE(inode,
1719 				 "ex %p == NULL or eh %p == NULL", ex, eh);
1720 		return -EFSCORRUPTED;
1721 	}
1722 
1723 	if (depth == 0) {
1724 		/* there is no tree at all */
1725 		return 0;
1726 	}
1727 
1728 	if (ex != EXT_FIRST_EXTENT(eh)) {
1729 		/* we correct tree if first leaf got modified only */
1730 		return 0;
1731 	}
1732 
1733 	/*
1734 	 * TODO: we need correction if border is smaller than current one
1735 	 */
1736 	k = depth - 1;
1737 	border = path[depth].p_ext->ee_block;
1738 	err = ext4_ext_get_access(handle, inode, path + k);
1739 	if (err)
1740 		return err;
1741 	path[k].p_idx->ei_block = border;
1742 	err = ext4_ext_dirty(handle, inode, path + k);
1743 	if (err)
1744 		return err;
1745 
1746 	while (k--) {
1747 		/* change all left-side indexes */
1748 		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1749 			break;
1750 		err = ext4_ext_get_access(handle, inode, path + k);
1751 		if (err)
1752 			goto clean;
1753 		path[k].p_idx->ei_block = border;
1754 		err = ext4_ext_dirty(handle, inode, path + k);
1755 		if (err)
1756 			goto clean;
1757 	}
1758 	return 0;
1759 
1760 clean:
1761 	/*
1762 	 * The path[k].p_bh is either unmodified or with no verified bit
1763 	 * set (see ext4_ext_get_access()). So just clear the verified bit
1764 	 * of the successfully modified extents buffers, which will force
1765 	 * these extents to be checked to avoid using inconsistent data.
1766 	 */
1767 	while (++k < depth)
1768 		clear_buffer_verified(path[k].p_bh);
1769 
1770 	return err;
1771 }
1772 
1773 static int ext4_can_extents_be_merged(struct inode *inode,
1774 				      struct ext4_extent *ex1,
1775 				      struct ext4_extent *ex2)
1776 {
1777 	unsigned short ext1_ee_len, ext2_ee_len;
1778 
1779 	if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1780 		return 0;
1781 
1782 	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1783 	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1784 
1785 	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1786 			le32_to_cpu(ex2->ee_block))
1787 		return 0;
1788 
1789 	if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1790 		return 0;
1791 
1792 	if (ext4_ext_is_unwritten(ex1) &&
1793 	    ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
1794 		return 0;
1795 #ifdef AGGRESSIVE_TEST
1796 	if (ext1_ee_len >= 4)
1797 		return 0;
1798 #endif
1799 
1800 	if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1801 		return 1;
1802 	return 0;
1803 }
1804 
1805 /*
1806  * This function tries to merge the "ex" extent to the next extent in the tree.
1807  * It always tries to merge towards right. If you want to merge towards
1808  * left, pass "ex - 1" as argument instead of "ex".
1809  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1810  * 1 if they got merged.
1811  */
1812 static int ext4_ext_try_to_merge_right(struct inode *inode,
1813 				 struct ext4_ext_path *path,
1814 				 struct ext4_extent *ex)
1815 {
1816 	struct ext4_extent_header *eh;
1817 	unsigned int depth, len;
1818 	int merge_done = 0, unwritten;
1819 
1820 	depth = ext_depth(inode);
1821 	BUG_ON(path[depth].p_hdr == NULL);
1822 	eh = path[depth].p_hdr;
1823 
1824 	while (ex < EXT_LAST_EXTENT(eh)) {
1825 		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1826 			break;
1827 		/* merge with next extent! */
1828 		unwritten = ext4_ext_is_unwritten(ex);
1829 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1830 				+ ext4_ext_get_actual_len(ex + 1));
1831 		if (unwritten)
1832 			ext4_ext_mark_unwritten(ex);
1833 
1834 		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1835 			len = (EXT_LAST_EXTENT(eh) - ex - 1)
1836 				* sizeof(struct ext4_extent);
1837 			memmove(ex + 1, ex + 2, len);
1838 		}
1839 		le16_add_cpu(&eh->eh_entries, -1);
1840 		merge_done = 1;
1841 		WARN_ON(eh->eh_entries == 0);
1842 		if (!eh->eh_entries)
1843 			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1844 	}
1845 
1846 	return merge_done;
1847 }
1848 
1849 /*
1850  * This function does a very simple check to see if we can collapse
1851  * an extent tree with a single extent tree leaf block into the inode.
1852  */
1853 static void ext4_ext_try_to_merge_up(handle_t *handle,
1854 				     struct inode *inode,
1855 				     struct ext4_ext_path *path)
1856 {
1857 	size_t s;
1858 	unsigned max_root = ext4_ext_space_root(inode, 0);
1859 	ext4_fsblk_t blk;
1860 
1861 	if ((path[0].p_depth != 1) ||
1862 	    (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1863 	    (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1864 		return;
1865 
1866 	/*
1867 	 * We need to modify the block allocation bitmap and the block
1868 	 * group descriptor to release the extent tree block.  If we
1869 	 * can't get the journal credits, give up.
1870 	 */
1871 	if (ext4_journal_extend(handle, 2,
1872 			ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
1873 		return;
1874 
1875 	/*
1876 	 * Copy the extent data up to the inode
1877 	 */
1878 	blk = ext4_idx_pblock(path[0].p_idx);
1879 	s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1880 		sizeof(struct ext4_extent_idx);
1881 	s += sizeof(struct ext4_extent_header);
1882 
1883 	path[1].p_maxdepth = path[0].p_maxdepth;
1884 	memcpy(path[0].p_hdr, path[1].p_hdr, s);
1885 	path[0].p_depth = 0;
1886 	path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1887 		(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1888 	path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1889 
1890 	ext4_ext_path_brelse(path + 1);
1891 	ext4_free_blocks(handle, inode, NULL, blk, 1,
1892 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1893 }
1894 
1895 /*
1896  * This function tries to merge the @ex extent to neighbours in the tree, then
1897  * tries to collapse the extent tree into the inode.
1898  */
1899 static void ext4_ext_try_to_merge(handle_t *handle,
1900 				  struct inode *inode,
1901 				  struct ext4_ext_path *path,
1902 				  struct ext4_extent *ex)
1903 {
1904 	struct ext4_extent_header *eh;
1905 	unsigned int depth;
1906 	int merge_done = 0;
1907 
1908 	depth = ext_depth(inode);
1909 	BUG_ON(path[depth].p_hdr == NULL);
1910 	eh = path[depth].p_hdr;
1911 
1912 	if (ex > EXT_FIRST_EXTENT(eh))
1913 		merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1914 
1915 	if (!merge_done)
1916 		(void) ext4_ext_try_to_merge_right(inode, path, ex);
1917 
1918 	ext4_ext_try_to_merge_up(handle, inode, path);
1919 }
1920 
1921 /*
1922  * check if a portion of the "newext" extent overlaps with an
1923  * existing extent.
1924  *
1925  * If there is an overlap discovered, it updates the length of the newext
1926  * such that there will be no overlap, and then returns 1.
1927  * If there is no overlap found, it returns 0.
1928  */
1929 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1930 					   struct inode *inode,
1931 					   struct ext4_extent *newext,
1932 					   struct ext4_ext_path *path)
1933 {
1934 	ext4_lblk_t b1, b2;
1935 	unsigned int depth, len1;
1936 	unsigned int ret = 0;
1937 
1938 	b1 = le32_to_cpu(newext->ee_block);
1939 	len1 = ext4_ext_get_actual_len(newext);
1940 	depth = ext_depth(inode);
1941 	if (!path[depth].p_ext)
1942 		goto out;
1943 	b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1944 
1945 	/*
1946 	 * get the next allocated block if the extent in the path
1947 	 * is before the requested block(s)
1948 	 */
1949 	if (b2 < b1) {
1950 		b2 = ext4_ext_next_allocated_block(path);
1951 		if (b2 == EXT_MAX_BLOCKS)
1952 			goto out;
1953 		b2 = EXT4_LBLK_CMASK(sbi, b2);
1954 	}
1955 
1956 	/* check for wrap through zero on extent logical start block*/
1957 	if (b1 + len1 < b1) {
1958 		len1 = EXT_MAX_BLOCKS - b1;
1959 		newext->ee_len = cpu_to_le16(len1);
1960 		ret = 1;
1961 	}
1962 
1963 	/* check for overlap */
1964 	if (b1 + len1 > b2) {
1965 		newext->ee_len = cpu_to_le16(b2 - b1);
1966 		ret = 1;
1967 	}
1968 out:
1969 	return ret;
1970 }
1971 
1972 /*
1973  * ext4_ext_insert_extent:
1974  * tries to merge requested extent into the existing extent or
1975  * inserts requested extent as new one into the tree,
1976  * creating new leaf in the no-space case.
1977  */
1978 struct ext4_ext_path *
1979 ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1980 		       struct ext4_ext_path *path,
1981 		       struct ext4_extent *newext, int gb_flags)
1982 {
1983 	struct ext4_extent_header *eh;
1984 	struct ext4_extent *ex, *fex;
1985 	struct ext4_extent *nearex; /* nearest extent */
1986 	int depth, len, err = 0;
1987 	ext4_lblk_t next;
1988 	int mb_flags = 0, unwritten;
1989 
1990 	if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1991 		mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1992 	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1993 		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1994 		err = -EFSCORRUPTED;
1995 		goto errout;
1996 	}
1997 	depth = ext_depth(inode);
1998 	ex = path[depth].p_ext;
1999 	eh = path[depth].p_hdr;
2000 	if (unlikely(path[depth].p_hdr == NULL)) {
2001 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2002 		err = -EFSCORRUPTED;
2003 		goto errout;
2004 	}
2005 
2006 	/* try to insert block into found extent and return */
2007 	if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
2008 
2009 		/*
2010 		 * Try to see whether we should rather test the extent on
2011 		 * right from ex, or from the left of ex. This is because
2012 		 * ext4_find_extent() can return either extent on the
2013 		 * left, or on the right from the searched position. This
2014 		 * will make merging more effective.
2015 		 */
2016 		if (ex < EXT_LAST_EXTENT(eh) &&
2017 		    (le32_to_cpu(ex->ee_block) +
2018 		    ext4_ext_get_actual_len(ex) <
2019 		    le32_to_cpu(newext->ee_block))) {
2020 			ex += 1;
2021 			goto prepend;
2022 		} else if ((ex > EXT_FIRST_EXTENT(eh)) &&
2023 			   (le32_to_cpu(newext->ee_block) +
2024 			   ext4_ext_get_actual_len(newext) <
2025 			   le32_to_cpu(ex->ee_block)))
2026 			ex -= 1;
2027 
2028 		/* Try to append newex to the ex */
2029 		if (ext4_can_extents_be_merged(inode, ex, newext)) {
2030 			ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
2031 				  "(from %llu)\n",
2032 				  ext4_ext_is_unwritten(newext),
2033 				  ext4_ext_get_actual_len(newext),
2034 				  le32_to_cpu(ex->ee_block),
2035 				  ext4_ext_is_unwritten(ex),
2036 				  ext4_ext_get_actual_len(ex),
2037 				  ext4_ext_pblock(ex));
2038 			err = ext4_ext_get_access(handle, inode,
2039 						  path + depth);
2040 			if (err)
2041 				goto errout;
2042 			unwritten = ext4_ext_is_unwritten(ex);
2043 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2044 					+ ext4_ext_get_actual_len(newext));
2045 			if (unwritten)
2046 				ext4_ext_mark_unwritten(ex);
2047 			nearex = ex;
2048 			goto merge;
2049 		}
2050 
2051 prepend:
2052 		/* Try to prepend newex to the ex */
2053 		if (ext4_can_extents_be_merged(inode, newext, ex)) {
2054 			ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
2055 				  "(from %llu)\n",
2056 				  le32_to_cpu(newext->ee_block),
2057 				  ext4_ext_is_unwritten(newext),
2058 				  ext4_ext_get_actual_len(newext),
2059 				  le32_to_cpu(ex->ee_block),
2060 				  ext4_ext_is_unwritten(ex),
2061 				  ext4_ext_get_actual_len(ex),
2062 				  ext4_ext_pblock(ex));
2063 			err = ext4_ext_get_access(handle, inode,
2064 						  path + depth);
2065 			if (err)
2066 				goto errout;
2067 
2068 			unwritten = ext4_ext_is_unwritten(ex);
2069 			ex->ee_block = newext->ee_block;
2070 			ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2071 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2072 					+ ext4_ext_get_actual_len(newext));
2073 			if (unwritten)
2074 				ext4_ext_mark_unwritten(ex);
2075 			nearex = ex;
2076 			goto merge;
2077 		}
2078 	}
2079 
2080 	depth = ext_depth(inode);
2081 	eh = path[depth].p_hdr;
2082 	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2083 		goto has_space;
2084 
2085 	/* probably next leaf has space for us? */
2086 	fex = EXT_LAST_EXTENT(eh);
2087 	next = EXT_MAX_BLOCKS;
2088 	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2089 		next = ext4_ext_next_leaf_block(path);
2090 	if (next != EXT_MAX_BLOCKS) {
2091 		struct ext4_ext_path *npath;
2092 
2093 		ext_debug(inode, "next leaf block - %u\n", next);
2094 		npath = ext4_find_extent(inode, next, NULL, gb_flags);
2095 		if (IS_ERR(npath)) {
2096 			err = PTR_ERR(npath);
2097 			goto errout;
2098 		}
2099 		BUG_ON(npath->p_depth != path->p_depth);
2100 		eh = npath[depth].p_hdr;
2101 		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2102 			ext_debug(inode, "next leaf isn't full(%d)\n",
2103 				  le16_to_cpu(eh->eh_entries));
2104 			ext4_free_ext_path(path);
2105 			path = npath;
2106 			goto has_space;
2107 		}
2108 		ext_debug(inode, "next leaf has no free space(%d,%d)\n",
2109 			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2110 		ext4_free_ext_path(npath);
2111 	}
2112 
2113 	/*
2114 	 * There is no free space in the found leaf.
2115 	 * We're gonna add a new leaf in the tree.
2116 	 */
2117 	if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2118 		mb_flags |= EXT4_MB_USE_RESERVED;
2119 	path = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2120 					path, newext);
2121 	if (IS_ERR(path))
2122 		return path;
2123 	depth = ext_depth(inode);
2124 	eh = path[depth].p_hdr;
2125 
2126 has_space:
2127 	nearex = path[depth].p_ext;
2128 
2129 	err = ext4_ext_get_access(handle, inode, path + depth);
2130 	if (err)
2131 		goto errout;
2132 
2133 	if (!nearex) {
2134 		/* there is no extent in this leaf, create first one */
2135 		ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
2136 				le32_to_cpu(newext->ee_block),
2137 				ext4_ext_pblock(newext),
2138 				ext4_ext_is_unwritten(newext),
2139 				ext4_ext_get_actual_len(newext));
2140 		nearex = EXT_FIRST_EXTENT(eh);
2141 	} else {
2142 		if (le32_to_cpu(newext->ee_block)
2143 			   > le32_to_cpu(nearex->ee_block)) {
2144 			/* Insert after */
2145 			ext_debug(inode, "insert %u:%llu:[%d]%d before: "
2146 					"nearest %p\n",
2147 					le32_to_cpu(newext->ee_block),
2148 					ext4_ext_pblock(newext),
2149 					ext4_ext_is_unwritten(newext),
2150 					ext4_ext_get_actual_len(newext),
2151 					nearex);
2152 			nearex++;
2153 		} else {
2154 			/* Insert before */
2155 			BUG_ON(newext->ee_block == nearex->ee_block);
2156 			ext_debug(inode, "insert %u:%llu:[%d]%d after: "
2157 					"nearest %p\n",
2158 					le32_to_cpu(newext->ee_block),
2159 					ext4_ext_pblock(newext),
2160 					ext4_ext_is_unwritten(newext),
2161 					ext4_ext_get_actual_len(newext),
2162 					nearex);
2163 		}
2164 		len = EXT_LAST_EXTENT(eh) - nearex + 1;
2165 		if (len > 0) {
2166 			ext_debug(inode, "insert %u:%llu:[%d]%d: "
2167 					"move %d extents from 0x%p to 0x%p\n",
2168 					le32_to_cpu(newext->ee_block),
2169 					ext4_ext_pblock(newext),
2170 					ext4_ext_is_unwritten(newext),
2171 					ext4_ext_get_actual_len(newext),
2172 					len, nearex, nearex + 1);
2173 			memmove(nearex + 1, nearex,
2174 				len * sizeof(struct ext4_extent));
2175 		}
2176 	}
2177 
2178 	le16_add_cpu(&eh->eh_entries, 1);
2179 	path[depth].p_ext = nearex;
2180 	nearex->ee_block = newext->ee_block;
2181 	ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2182 	nearex->ee_len = newext->ee_len;
2183 
2184 merge:
2185 	/* try to merge extents */
2186 	if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2187 		ext4_ext_try_to_merge(handle, inode, path, nearex);
2188 
2189 	/* time to correct all indexes above */
2190 	err = ext4_ext_correct_indexes(handle, inode, path);
2191 	if (err)
2192 		goto errout;
2193 
2194 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2195 	if (err)
2196 		goto errout;
2197 
2198 	return path;
2199 
2200 errout:
2201 	ext4_free_ext_path(path);
2202 	return ERR_PTR(err);
2203 }
2204 
2205 static int ext4_fill_es_cache_info(struct inode *inode,
2206 				   ext4_lblk_t block, ext4_lblk_t num,
2207 				   struct fiemap_extent_info *fieinfo)
2208 {
2209 	ext4_lblk_t next, end = block + num - 1;
2210 	struct extent_status es;
2211 	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2212 	unsigned int flags;
2213 	int err;
2214 
2215 	while (block <= end) {
2216 		next = 0;
2217 		flags = 0;
2218 		if (!ext4_es_lookup_extent(inode, block, &next, &es))
2219 			break;
2220 		if (ext4_es_is_unwritten(&es))
2221 			flags |= FIEMAP_EXTENT_UNWRITTEN;
2222 		if (ext4_es_is_delayed(&es))
2223 			flags |= (FIEMAP_EXTENT_DELALLOC |
2224 				  FIEMAP_EXTENT_UNKNOWN);
2225 		if (ext4_es_is_hole(&es))
2226 			flags |= EXT4_FIEMAP_EXTENT_HOLE;
2227 		if (next == 0)
2228 			flags |= FIEMAP_EXTENT_LAST;
2229 		if (flags & (FIEMAP_EXTENT_DELALLOC|
2230 			     EXT4_FIEMAP_EXTENT_HOLE))
2231 			es.es_pblk = 0;
2232 		else
2233 			es.es_pblk = ext4_es_pblock(&es);
2234 		err = fiemap_fill_next_extent(fieinfo,
2235 				(__u64)es.es_lblk << blksize_bits,
2236 				(__u64)es.es_pblk << blksize_bits,
2237 				(__u64)es.es_len << blksize_bits,
2238 				flags);
2239 		if (next == 0)
2240 			break;
2241 		block = next;
2242 		if (err < 0)
2243 			return err;
2244 		if (err == 1)
2245 			return 0;
2246 	}
2247 	return 0;
2248 }
2249 
2250 
2251 /*
2252  * ext4_ext_find_hole - find hole around given block according to the given path
2253  * @inode:	inode we lookup in
2254  * @path:	path in extent tree to @lblk
2255  * @lblk:	pointer to logical block around which we want to determine hole
2256  *
2257  * Determine hole length (and start if easily possible) around given logical
2258  * block. We don't try too hard to find the beginning of the hole but @path
2259  * actually points to extent before @lblk, we provide it.
2260  *
2261  * The function returns the length of a hole starting at @lblk. We update @lblk
2262  * to the beginning of the hole if we managed to find it.
2263  */
2264 static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
2265 				      struct ext4_ext_path *path,
2266 				      ext4_lblk_t *lblk)
2267 {
2268 	int depth = ext_depth(inode);
2269 	struct ext4_extent *ex;
2270 	ext4_lblk_t len;
2271 
2272 	ex = path[depth].p_ext;
2273 	if (ex == NULL) {
2274 		/* there is no extent yet, so gap is [0;-] */
2275 		*lblk = 0;
2276 		len = EXT_MAX_BLOCKS;
2277 	} else if (*lblk < le32_to_cpu(ex->ee_block)) {
2278 		len = le32_to_cpu(ex->ee_block) - *lblk;
2279 	} else if (*lblk >= le32_to_cpu(ex->ee_block)
2280 			+ ext4_ext_get_actual_len(ex)) {
2281 		ext4_lblk_t next;
2282 
2283 		*lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2284 		next = ext4_ext_next_allocated_block(path);
2285 		BUG_ON(next == *lblk);
2286 		len = next - *lblk;
2287 	} else {
2288 		BUG();
2289 	}
2290 	return len;
2291 }
2292 
2293 /*
2294  * ext4_ext_rm_idx:
2295  * removes index from the index block.
2296  */
2297 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2298 			struct ext4_ext_path *path, int depth)
2299 {
2300 	int err;
2301 	ext4_fsblk_t leaf;
2302 	int k = depth - 1;
2303 
2304 	/* free index block */
2305 	leaf = ext4_idx_pblock(path[k].p_idx);
2306 	if (unlikely(path[k].p_hdr->eh_entries == 0)) {
2307 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr->eh_entries == 0", k);
2308 		return -EFSCORRUPTED;
2309 	}
2310 	err = ext4_ext_get_access(handle, inode, path + k);
2311 	if (err)
2312 		return err;
2313 
2314 	if (path[k].p_idx != EXT_LAST_INDEX(path[k].p_hdr)) {
2315 		int len = EXT_LAST_INDEX(path[k].p_hdr) - path[k].p_idx;
2316 		len *= sizeof(struct ext4_extent_idx);
2317 		memmove(path[k].p_idx, path[k].p_idx + 1, len);
2318 	}
2319 
2320 	le16_add_cpu(&path[k].p_hdr->eh_entries, -1);
2321 	err = ext4_ext_dirty(handle, inode, path + k);
2322 	if (err)
2323 		return err;
2324 	ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
2325 	trace_ext4_ext_rm_idx(inode, leaf);
2326 
2327 	ext4_free_blocks(handle, inode, NULL, leaf, 1,
2328 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2329 
2330 	while (--k >= 0) {
2331 		if (path[k + 1].p_idx != EXT_FIRST_INDEX(path[k + 1].p_hdr))
2332 			break;
2333 		err = ext4_ext_get_access(handle, inode, path + k);
2334 		if (err)
2335 			goto clean;
2336 		path[k].p_idx->ei_block = path[k + 1].p_idx->ei_block;
2337 		err = ext4_ext_dirty(handle, inode, path + k);
2338 		if (err)
2339 			goto clean;
2340 	}
2341 	return 0;
2342 
2343 clean:
2344 	/*
2345 	 * The path[k].p_bh is either unmodified or with no verified bit
2346 	 * set (see ext4_ext_get_access()). So just clear the verified bit
2347 	 * of the successfully modified extents buffers, which will force
2348 	 * these extents to be checked to avoid using inconsistent data.
2349 	 */
2350 	while (++k < depth)
2351 		clear_buffer_verified(path[k].p_bh);
2352 
2353 	return err;
2354 }
2355 
2356 /*
2357  * ext4_ext_calc_credits_for_single_extent:
2358  * This routine returns max. credits that needed to insert an extent
2359  * to the extent tree.
2360  * When pass the actual path, the caller should calculate credits
2361  * under i_data_sem.
2362  */
2363 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2364 						struct ext4_ext_path *path)
2365 {
2366 	if (path) {
2367 		int depth = ext_depth(inode);
2368 		int ret = 0;
2369 
2370 		/* probably there is space in leaf? */
2371 		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2372 				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
2373 
2374 			/*
2375 			 *  There are some space in the leaf tree, no
2376 			 *  need to account for leaf block credit
2377 			 *
2378 			 *  bitmaps and block group descriptor blocks
2379 			 *  and other metadata blocks still need to be
2380 			 *  accounted.
2381 			 */
2382 			/* 1 bitmap, 1 block group descriptor */
2383 			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2384 			return ret;
2385 		}
2386 	}
2387 
2388 	return ext4_chunk_trans_blocks(inode, nrblocks);
2389 }
2390 
2391 /*
2392  * How many index/leaf blocks need to change/allocate to add @extents extents?
2393  *
2394  * If we add a single extent, then in the worse case, each tree level
2395  * index/leaf need to be changed in case of the tree split.
2396  *
2397  * If more extents are inserted, they could cause the whole tree split more
2398  * than once, but this is really rare.
2399  */
2400 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2401 {
2402 	int index;
2403 	int depth;
2404 
2405 	/* If we are converting the inline data, only one is needed here. */
2406 	if (ext4_has_inline_data(inode))
2407 		return 1;
2408 
2409 	depth = ext_depth(inode);
2410 
2411 	if (extents <= 1)
2412 		index = depth * 2;
2413 	else
2414 		index = depth * 3;
2415 
2416 	return index;
2417 }
2418 
2419 static inline int get_default_free_blocks_flags(struct inode *inode)
2420 {
2421 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2422 	    ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2423 		return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2424 	else if (ext4_should_journal_data(inode))
2425 		return EXT4_FREE_BLOCKS_FORGET;
2426 	return 0;
2427 }
2428 
2429 /*
2430  * ext4_rereserve_cluster - increment the reserved cluster count when
2431  *                          freeing a cluster with a pending reservation
2432  *
2433  * @inode - file containing the cluster
2434  * @lblk - logical block in cluster to be reserved
2435  *
2436  * Increments the reserved cluster count and adjusts quota in a bigalloc
2437  * file system when freeing a partial cluster containing at least one
2438  * delayed and unwritten block.  A partial cluster meeting that
2439  * requirement will have a pending reservation.  If so, the
2440  * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to
2441  * defer reserved and allocated space accounting to a subsequent call
2442  * to this function.
2443  */
2444 static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
2445 {
2446 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2447 	struct ext4_inode_info *ei = EXT4_I(inode);
2448 
2449 	dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
2450 
2451 	spin_lock(&ei->i_block_reservation_lock);
2452 	ei->i_reserved_data_blocks++;
2453 	percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
2454 	spin_unlock(&ei->i_block_reservation_lock);
2455 
2456 	percpu_counter_add(&sbi->s_freeclusters_counter, 1);
2457 	ext4_remove_pending(inode, lblk);
2458 }
2459 
2460 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2461 			      struct ext4_extent *ex,
2462 			      struct partial_cluster *partial,
2463 			      ext4_lblk_t from, ext4_lblk_t to)
2464 {
2465 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2466 	unsigned short ee_len = ext4_ext_get_actual_len(ex);
2467 	ext4_fsblk_t last_pblk, pblk;
2468 	ext4_lblk_t num;
2469 	int flags;
2470 
2471 	/* only extent tail removal is allowed */
2472 	if (from < le32_to_cpu(ex->ee_block) ||
2473 	    to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
2474 		ext4_error(sbi->s_sb,
2475 			   "strange request: removal(2) %u-%u from %u:%u",
2476 			   from, to, le32_to_cpu(ex->ee_block), ee_len);
2477 		return 0;
2478 	}
2479 
2480 #ifdef EXTENTS_STATS
2481 	spin_lock(&sbi->s_ext_stats_lock);
2482 	sbi->s_ext_blocks += ee_len;
2483 	sbi->s_ext_extents++;
2484 	if (ee_len < sbi->s_ext_min)
2485 		sbi->s_ext_min = ee_len;
2486 	if (ee_len > sbi->s_ext_max)
2487 		sbi->s_ext_max = ee_len;
2488 	if (ext_depth(inode) > sbi->s_depth_max)
2489 		sbi->s_depth_max = ext_depth(inode);
2490 	spin_unlock(&sbi->s_ext_stats_lock);
2491 #endif
2492 
2493 	trace_ext4_remove_blocks(inode, ex, from, to, partial);
2494 
2495 	/*
2496 	 * if we have a partial cluster, and it's different from the
2497 	 * cluster of the last block in the extent, we free it
2498 	 */
2499 	last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
2500 
2501 	if (partial->state != initial &&
2502 	    partial->pclu != EXT4_B2C(sbi, last_pblk)) {
2503 		if (partial->state == tofree) {
2504 			flags = get_default_free_blocks_flags(inode);
2505 			if (ext4_is_pending(inode, partial->lblk))
2506 				flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2507 			ext4_free_blocks(handle, inode, NULL,
2508 					 EXT4_C2B(sbi, partial->pclu),
2509 					 sbi->s_cluster_ratio, flags);
2510 			if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2511 				ext4_rereserve_cluster(inode, partial->lblk);
2512 		}
2513 		partial->state = initial;
2514 	}
2515 
2516 	num = le32_to_cpu(ex->ee_block) + ee_len - from;
2517 	pblk = ext4_ext_pblock(ex) + ee_len - num;
2518 
2519 	/*
2520 	 * We free the partial cluster at the end of the extent (if any),
2521 	 * unless the cluster is used by another extent (partial_cluster
2522 	 * state is nofree).  If a partial cluster exists here, it must be
2523 	 * shared with the last block in the extent.
2524 	 */
2525 	flags = get_default_free_blocks_flags(inode);
2526 
2527 	/* partial, left end cluster aligned, right end unaligned */
2528 	if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
2529 	    (EXT4_LBLK_CMASK(sbi, to) >= from) &&
2530 	    (partial->state != nofree)) {
2531 		if (ext4_is_pending(inode, to))
2532 			flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2533 		ext4_free_blocks(handle, inode, NULL,
2534 				 EXT4_PBLK_CMASK(sbi, last_pblk),
2535 				 sbi->s_cluster_ratio, flags);
2536 		if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2537 			ext4_rereserve_cluster(inode, to);
2538 		partial->state = initial;
2539 		flags = get_default_free_blocks_flags(inode);
2540 	}
2541 
2542 	flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2543 
2544 	/*
2545 	 * For bigalloc file systems, we never free a partial cluster
2546 	 * at the beginning of the extent.  Instead, we check to see if we
2547 	 * need to free it on a subsequent call to ext4_remove_blocks,
2548 	 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
2549 	 */
2550 	flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2551 	ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2552 
2553 	/* reset the partial cluster if we've freed past it */
2554 	if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
2555 		partial->state = initial;
2556 
2557 	/*
2558 	 * If we've freed the entire extent but the beginning is not left
2559 	 * cluster aligned and is not marked as ineligible for freeing we
2560 	 * record the partial cluster at the beginning of the extent.  It
2561 	 * wasn't freed by the preceding ext4_free_blocks() call, and we
2562 	 * need to look farther to the left to determine if it's to be freed
2563 	 * (not shared with another extent). Else, reset the partial
2564 	 * cluster - we're either  done freeing or the beginning of the
2565 	 * extent is left cluster aligned.
2566 	 */
2567 	if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
2568 		if (partial->state == initial) {
2569 			partial->pclu = EXT4_B2C(sbi, pblk);
2570 			partial->lblk = from;
2571 			partial->state = tofree;
2572 		}
2573 	} else {
2574 		partial->state = initial;
2575 	}
2576 
2577 	return 0;
2578 }
2579 
2580 /*
2581  * ext4_ext_rm_leaf() Removes the extents associated with the
2582  * blocks appearing between "start" and "end".  Both "start"
2583  * and "end" must appear in the same extent or EIO is returned.
2584  *
2585  * @handle: The journal handle
2586  * @inode:  The files inode
2587  * @path:   The path to the leaf
2588  * @partial_cluster: The cluster which we'll have to free if all extents
2589  *                   has been released from it.  However, if this value is
2590  *                   negative, it's a cluster just to the right of the
2591  *                   punched region and it must not be freed.
2592  * @start:  The first block to remove
2593  * @end:   The last block to remove
2594  */
2595 static int
2596 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2597 		 struct ext4_ext_path *path,
2598 		 struct partial_cluster *partial,
2599 		 ext4_lblk_t start, ext4_lblk_t end)
2600 {
2601 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2602 	int err = 0, correct_index = 0;
2603 	int depth = ext_depth(inode), credits, revoke_credits;
2604 	struct ext4_extent_header *eh;
2605 	ext4_lblk_t a, b;
2606 	unsigned num;
2607 	ext4_lblk_t ex_ee_block;
2608 	unsigned short ex_ee_len;
2609 	unsigned unwritten = 0;
2610 	struct ext4_extent *ex;
2611 	ext4_fsblk_t pblk;
2612 
2613 	/* the header must be checked already in ext4_ext_remove_space() */
2614 	ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
2615 	if (!path[depth].p_hdr)
2616 		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2617 	eh = path[depth].p_hdr;
2618 	if (unlikely(path[depth].p_hdr == NULL)) {
2619 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2620 		return -EFSCORRUPTED;
2621 	}
2622 	/* find where to start removing */
2623 	ex = path[depth].p_ext;
2624 	if (!ex)
2625 		ex = EXT_LAST_EXTENT(eh);
2626 
2627 	ex_ee_block = le32_to_cpu(ex->ee_block);
2628 	ex_ee_len = ext4_ext_get_actual_len(ex);
2629 
2630 	trace_ext4_ext_rm_leaf(inode, start, ex, partial);
2631 
2632 	while (ex >= EXT_FIRST_EXTENT(eh) &&
2633 			ex_ee_block + ex_ee_len > start) {
2634 
2635 		if (ext4_ext_is_unwritten(ex))
2636 			unwritten = 1;
2637 		else
2638 			unwritten = 0;
2639 
2640 		ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
2641 			  unwritten, ex_ee_len);
2642 		path[depth].p_ext = ex;
2643 
2644 		a = max(ex_ee_block, start);
2645 		b = min(ex_ee_block + ex_ee_len - 1, end);
2646 
2647 		ext_debug(inode, "  border %u:%u\n", a, b);
2648 
2649 		/* If this extent is beyond the end of the hole, skip it */
2650 		if (end < ex_ee_block) {
2651 			/*
2652 			 * We're going to skip this extent and move to another,
2653 			 * so note that its first cluster is in use to avoid
2654 			 * freeing it when removing blocks.  Eventually, the
2655 			 * right edge of the truncated/punched region will
2656 			 * be just to the left.
2657 			 */
2658 			if (sbi->s_cluster_ratio > 1) {
2659 				pblk = ext4_ext_pblock(ex);
2660 				partial->pclu = EXT4_B2C(sbi, pblk);
2661 				partial->state = nofree;
2662 			}
2663 			ex--;
2664 			ex_ee_block = le32_to_cpu(ex->ee_block);
2665 			ex_ee_len = ext4_ext_get_actual_len(ex);
2666 			continue;
2667 		} else if (b != ex_ee_block + ex_ee_len - 1) {
2668 			EXT4_ERROR_INODE(inode,
2669 					 "can not handle truncate %u:%u "
2670 					 "on extent %u:%u",
2671 					 start, end, ex_ee_block,
2672 					 ex_ee_block + ex_ee_len - 1);
2673 			err = -EFSCORRUPTED;
2674 			goto out;
2675 		} else if (a != ex_ee_block) {
2676 			/* remove tail of the extent */
2677 			num = a - ex_ee_block;
2678 		} else {
2679 			/* remove whole extent: excellent! */
2680 			num = 0;
2681 		}
2682 		/*
2683 		 * 3 for leaf, sb, and inode plus 2 (bmap and group
2684 		 * descriptor) for each block group; assume two block
2685 		 * groups plus ex_ee_len/blocks_per_block_group for
2686 		 * the worst case
2687 		 */
2688 		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2689 		if (ex == EXT_FIRST_EXTENT(eh)) {
2690 			correct_index = 1;
2691 			credits += (ext_depth(inode)) + 1;
2692 		}
2693 		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2694 		/*
2695 		 * We may end up freeing some index blocks and data from the
2696 		 * punched range. Note that partial clusters are accounted for
2697 		 * by ext4_free_data_revoke_credits().
2698 		 */
2699 		revoke_credits =
2700 			ext4_free_metadata_revoke_credits(inode->i_sb,
2701 							  ext_depth(inode)) +
2702 			ext4_free_data_revoke_credits(inode, b - a + 1);
2703 
2704 		err = ext4_datasem_ensure_credits(handle, inode, credits,
2705 						  credits, revoke_credits);
2706 		if (err) {
2707 			if (err > 0)
2708 				err = -EAGAIN;
2709 			goto out;
2710 		}
2711 
2712 		err = ext4_ext_get_access(handle, inode, path + depth);
2713 		if (err)
2714 			goto out;
2715 
2716 		err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
2717 		if (err)
2718 			goto out;
2719 
2720 		if (num == 0)
2721 			/* this extent is removed; mark slot entirely unused */
2722 			ext4_ext_store_pblock(ex, 0);
2723 
2724 		ex->ee_len = cpu_to_le16(num);
2725 		/*
2726 		 * Do not mark unwritten if all the blocks in the
2727 		 * extent have been removed.
2728 		 */
2729 		if (unwritten && num)
2730 			ext4_ext_mark_unwritten(ex);
2731 		/*
2732 		 * If the extent was completely released,
2733 		 * we need to remove it from the leaf
2734 		 */
2735 		if (num == 0) {
2736 			if (end != EXT_MAX_BLOCKS - 1) {
2737 				/*
2738 				 * For hole punching, we need to scoot all the
2739 				 * extents up when an extent is removed so that
2740 				 * we dont have blank extents in the middle
2741 				 */
2742 				memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2743 					sizeof(struct ext4_extent));
2744 
2745 				/* Now get rid of the one at the end */
2746 				memset(EXT_LAST_EXTENT(eh), 0,
2747 					sizeof(struct ext4_extent));
2748 			}
2749 			le16_add_cpu(&eh->eh_entries, -1);
2750 		}
2751 
2752 		err = ext4_ext_dirty(handle, inode, path + depth);
2753 		if (err)
2754 			goto out;
2755 
2756 		ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
2757 				ext4_ext_pblock(ex));
2758 		ex--;
2759 		ex_ee_block = le32_to_cpu(ex->ee_block);
2760 		ex_ee_len = ext4_ext_get_actual_len(ex);
2761 	}
2762 
2763 	if (correct_index && eh->eh_entries)
2764 		err = ext4_ext_correct_indexes(handle, inode, path);
2765 
2766 	/*
2767 	 * If there's a partial cluster and at least one extent remains in
2768 	 * the leaf, free the partial cluster if it isn't shared with the
2769 	 * current extent.  If it is shared with the current extent
2770 	 * we reset the partial cluster because we've reached the start of the
2771 	 * truncated/punched region and we're done removing blocks.
2772 	 */
2773 	if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
2774 		pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2775 		if (partial->pclu != EXT4_B2C(sbi, pblk)) {
2776 			int flags = get_default_free_blocks_flags(inode);
2777 
2778 			if (ext4_is_pending(inode, partial->lblk))
2779 				flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
2780 			ext4_free_blocks(handle, inode, NULL,
2781 					 EXT4_C2B(sbi, partial->pclu),
2782 					 sbi->s_cluster_ratio, flags);
2783 			if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
2784 				ext4_rereserve_cluster(inode, partial->lblk);
2785 		}
2786 		partial->state = initial;
2787 	}
2788 
2789 	/* if this leaf is free, then we should
2790 	 * remove it from index block above */
2791 	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2792 		err = ext4_ext_rm_idx(handle, inode, path, depth);
2793 
2794 out:
2795 	return err;
2796 }
2797 
2798 /*
2799  * ext4_ext_more_to_rm:
2800  * returns 1 if current index has to be freed (even partial)
2801  */
2802 static int
2803 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2804 {
2805 	BUG_ON(path->p_idx == NULL);
2806 
2807 	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2808 		return 0;
2809 
2810 	/*
2811 	 * if truncate on deeper level happened, it wasn't partial,
2812 	 * so we have to consider current index for truncation
2813 	 */
2814 	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2815 		return 0;
2816 	return 1;
2817 }
2818 
2819 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2820 			  ext4_lblk_t end)
2821 {
2822 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2823 	int depth = ext_depth(inode);
2824 	struct ext4_ext_path *path = NULL;
2825 	struct partial_cluster partial;
2826 	handle_t *handle;
2827 	int i = 0, err = 0;
2828 
2829 	partial.pclu = 0;
2830 	partial.lblk = 0;
2831 	partial.state = initial;
2832 
2833 	ext_debug(inode, "truncate since %u to %u\n", start, end);
2834 
2835 	/* probably first extent we're gonna free will be last in block */
2836 	handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
2837 			depth + 1,
2838 			ext4_free_metadata_revoke_credits(inode->i_sb, depth));
2839 	if (IS_ERR(handle))
2840 		return PTR_ERR(handle);
2841 
2842 again:
2843 	trace_ext4_ext_remove_space(inode, start, end, depth);
2844 
2845 	/*
2846 	 * Check if we are removing extents inside the extent tree. If that
2847 	 * is the case, we are going to punch a hole inside the extent tree
2848 	 * so we have to check whether we need to split the extent covering
2849 	 * the last block to remove so we can easily remove the part of it
2850 	 * in ext4_ext_rm_leaf().
2851 	 */
2852 	if (end < EXT_MAX_BLOCKS - 1) {
2853 		struct ext4_extent *ex;
2854 		ext4_lblk_t ee_block, ex_end, lblk;
2855 		ext4_fsblk_t pblk;
2856 
2857 		/* find extent for or closest extent to this block */
2858 		path = ext4_find_extent(inode, end, NULL,
2859 					EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
2860 		if (IS_ERR(path)) {
2861 			ext4_journal_stop(handle);
2862 			return PTR_ERR(path);
2863 		}
2864 		depth = ext_depth(inode);
2865 		/* Leaf not may not exist only if inode has no blocks at all */
2866 		ex = path[depth].p_ext;
2867 		if (!ex) {
2868 			if (depth) {
2869 				EXT4_ERROR_INODE(inode,
2870 						 "path[%d].p_hdr == NULL",
2871 						 depth);
2872 				err = -EFSCORRUPTED;
2873 			}
2874 			goto out;
2875 		}
2876 
2877 		ee_block = le32_to_cpu(ex->ee_block);
2878 		ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2879 
2880 		/*
2881 		 * See if the last block is inside the extent, if so split
2882 		 * the extent at 'end' block so we can easily remove the
2883 		 * tail of the first part of the split extent in
2884 		 * ext4_ext_rm_leaf().
2885 		 */
2886 		if (end >= ee_block && end < ex_end) {
2887 
2888 			/*
2889 			 * If we're going to split the extent, note that
2890 			 * the cluster containing the block after 'end' is
2891 			 * in use to avoid freeing it when removing blocks.
2892 			 */
2893 			if (sbi->s_cluster_ratio > 1) {
2894 				pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
2895 				partial.pclu = EXT4_B2C(sbi, pblk);
2896 				partial.state = nofree;
2897 			}
2898 
2899 			/*
2900 			 * Split the extent in two so that 'end' is the last
2901 			 * block in the first new extent. Also we should not
2902 			 * fail removing space due to ENOSPC so try to use
2903 			 * reserved block if that happens.
2904 			 */
2905 			err = ext4_force_split_extent_at(handle, inode, &path,
2906 							 end + 1, 1);
2907 			if (err < 0)
2908 				goto out;
2909 
2910 		} else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
2911 			   partial.state == initial) {
2912 			/*
2913 			 * If we're punching, there's an extent to the right.
2914 			 * If the partial cluster hasn't been set, set it to
2915 			 * that extent's first cluster and its state to nofree
2916 			 * so it won't be freed should it contain blocks to be
2917 			 * removed. If it's already set (tofree/nofree), we're
2918 			 * retrying and keep the original partial cluster info
2919 			 * so a cluster marked tofree as a result of earlier
2920 			 * extent removal is not lost.
2921 			 */
2922 			lblk = ex_end + 1;
2923 			err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2924 						    NULL);
2925 			if (err < 0)
2926 				goto out;
2927 			if (pblk) {
2928 				partial.pclu = EXT4_B2C(sbi, pblk);
2929 				partial.state = nofree;
2930 			}
2931 		}
2932 	}
2933 	/*
2934 	 * We start scanning from right side, freeing all the blocks
2935 	 * after i_size and walking into the tree depth-wise.
2936 	 */
2937 	depth = ext_depth(inode);
2938 	if (path) {
2939 		int k = i = depth;
2940 		while (--k > 0)
2941 			path[k].p_block =
2942 				le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2943 	} else {
2944 		path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
2945 			       GFP_NOFS | __GFP_NOFAIL);
2946 		if (path == NULL) {
2947 			ext4_journal_stop(handle);
2948 			return -ENOMEM;
2949 		}
2950 		path[0].p_maxdepth = path[0].p_depth = depth;
2951 		path[0].p_hdr = ext_inode_hdr(inode);
2952 		i = 0;
2953 
2954 		if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2955 			err = -EFSCORRUPTED;
2956 			goto out;
2957 		}
2958 	}
2959 	err = 0;
2960 
2961 	while (i >= 0 && err == 0) {
2962 		if (i == depth) {
2963 			/* this is leaf block */
2964 			err = ext4_ext_rm_leaf(handle, inode, path,
2965 					       &partial, start, end);
2966 			/* root level has p_bh == NULL, brelse() eats this */
2967 			ext4_ext_path_brelse(path + i);
2968 			i--;
2969 			continue;
2970 		}
2971 
2972 		/* this is index block */
2973 		if (!path[i].p_hdr) {
2974 			ext_debug(inode, "initialize header\n");
2975 			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2976 		}
2977 
2978 		if (!path[i].p_idx) {
2979 			/* this level hasn't been touched yet */
2980 			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2981 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2982 			ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
2983 				  path[i].p_hdr,
2984 				  le16_to_cpu(path[i].p_hdr->eh_entries));
2985 		} else {
2986 			/* we were already here, see at next index */
2987 			path[i].p_idx--;
2988 		}
2989 
2990 		ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
2991 				i, EXT_FIRST_INDEX(path[i].p_hdr),
2992 				path[i].p_idx);
2993 		if (ext4_ext_more_to_rm(path + i)) {
2994 			struct buffer_head *bh;
2995 			/* go to the next level */
2996 			ext_debug(inode, "move to level %d (block %llu)\n",
2997 				  i + 1, ext4_idx_pblock(path[i].p_idx));
2998 			memset(path + i + 1, 0, sizeof(*path));
2999 			bh = read_extent_tree_block(inode, path[i].p_idx,
3000 						    depth - i - 1,
3001 						    EXT4_EX_NOCACHE);
3002 			if (IS_ERR(bh)) {
3003 				/* should we reset i_size? */
3004 				err = PTR_ERR(bh);
3005 				break;
3006 			}
3007 			/* Yield here to deal with large extent trees.
3008 			 * Should be a no-op if we did IO above. */
3009 			cond_resched();
3010 			if (WARN_ON(i + 1 > depth)) {
3011 				err = -EFSCORRUPTED;
3012 				break;
3013 			}
3014 			path[i + 1].p_bh = bh;
3015 
3016 			/* save actual number of indexes since this
3017 			 * number is changed at the next iteration */
3018 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
3019 			i++;
3020 		} else {
3021 			/* we finished processing this index, go up */
3022 			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
3023 				/* index is empty, remove it;
3024 				 * handle must be already prepared by the
3025 				 * truncatei_leaf() */
3026 				err = ext4_ext_rm_idx(handle, inode, path, i);
3027 			}
3028 			/* root level has p_bh == NULL, brelse() eats this */
3029 			ext4_ext_path_brelse(path + i);
3030 			i--;
3031 			ext_debug(inode, "return to level %d\n", i);
3032 		}
3033 	}
3034 
3035 	trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
3036 					 path->p_hdr->eh_entries);
3037 
3038 	/*
3039 	 * if there's a partial cluster and we have removed the first extent
3040 	 * in the file, then we also free the partial cluster, if any
3041 	 */
3042 	if (partial.state == tofree && err == 0) {
3043 		int flags = get_default_free_blocks_flags(inode);
3044 
3045 		if (ext4_is_pending(inode, partial.lblk))
3046 			flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
3047 		ext4_free_blocks(handle, inode, NULL,
3048 				 EXT4_C2B(sbi, partial.pclu),
3049 				 sbi->s_cluster_ratio, flags);
3050 		if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
3051 			ext4_rereserve_cluster(inode, partial.lblk);
3052 		partial.state = initial;
3053 	}
3054 
3055 	/* TODO: flexible tree reduction should be here */
3056 	if (path->p_hdr->eh_entries == 0) {
3057 		/*
3058 		 * truncate to zero freed all the tree,
3059 		 * so we need to correct eh_depth
3060 		 */
3061 		err = ext4_ext_get_access(handle, inode, path);
3062 		if (err == 0) {
3063 			ext_inode_hdr(inode)->eh_depth = 0;
3064 			ext_inode_hdr(inode)->eh_max =
3065 				cpu_to_le16(ext4_ext_space_root(inode, 0));
3066 			err = ext4_ext_dirty(handle, inode, path);
3067 		}
3068 	}
3069 out:
3070 	ext4_free_ext_path(path);
3071 	path = NULL;
3072 	if (err == -EAGAIN)
3073 		goto again;
3074 	ext4_journal_stop(handle);
3075 
3076 	return err;
3077 }
3078 
3079 /*
3080  * called at mount time
3081  */
3082 void ext4_ext_init(struct super_block *sb)
3083 {
3084 	/*
3085 	 * possible initialization would be here
3086 	 */
3087 
3088 	if (ext4_has_feature_extents(sb)) {
3089 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3090 		printk(KERN_INFO "EXT4-fs: file extents enabled"
3091 #ifdef AGGRESSIVE_TEST
3092 		       ", aggressive tests"
3093 #endif
3094 #ifdef CHECK_BINSEARCH
3095 		       ", check binsearch"
3096 #endif
3097 #ifdef EXTENTS_STATS
3098 		       ", stats"
3099 #endif
3100 		       "\n");
3101 #endif
3102 #ifdef EXTENTS_STATS
3103 		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3104 		EXT4_SB(sb)->s_ext_min = 1 << 30;
3105 		EXT4_SB(sb)->s_ext_max = 0;
3106 #endif
3107 	}
3108 }
3109 
3110 /*
3111  * called at umount time
3112  */
3113 void ext4_ext_release(struct super_block *sb)
3114 {
3115 	if (!ext4_has_feature_extents(sb))
3116 		return;
3117 
3118 #ifdef EXTENTS_STATS
3119 	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3120 		struct ext4_sb_info *sbi = EXT4_SB(sb);
3121 		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3122 			sbi->s_ext_blocks, sbi->s_ext_extents,
3123 			sbi->s_ext_blocks / sbi->s_ext_extents);
3124 		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3125 			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3126 	}
3127 #endif
3128 }
3129 
3130 static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3131 {
3132 	ext4_lblk_t  ee_block;
3133 	ext4_fsblk_t ee_pblock;
3134 	unsigned int ee_len;
3135 
3136 	ee_block  = le32_to_cpu(ex->ee_block);
3137 	ee_len    = ext4_ext_get_actual_len(ex);
3138 	ee_pblock = ext4_ext_pblock(ex);
3139 
3140 	if (ee_len == 0)
3141 		return;
3142 
3143 	ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3144 			      EXTENT_STATUS_WRITTEN, 0);
3145 }
3146 
3147 /* FIXME!! we need to try to merge to left or right after zero-out  */
3148 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3149 {
3150 	ext4_fsblk_t ee_pblock;
3151 	unsigned int ee_len;
3152 
3153 	ee_len    = ext4_ext_get_actual_len(ex);
3154 	ee_pblock = ext4_ext_pblock(ex);
3155 	return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
3156 				  ee_len);
3157 }
3158 
3159 /*
3160  * ext4_split_extent_at() splits an extent at given block.
3161  *
3162  * @handle: the journal handle
3163  * @inode: the file inode
3164  * @path: the path to the extent
3165  * @split: the logical block where the extent is splitted.
3166  * @split_flags: indicates if the extent could be zeroout if split fails, and
3167  *		 the states(init or unwritten) of new extents.
3168  * @flags: flags used to insert new extent to extent tree.
3169  *
3170  *
3171  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3172  * of which are determined by split_flag.
3173  *
3174  * There are two cases:
3175  *  a> the extent are splitted into two extent.
3176  *  b> split is not needed, and just mark the extent.
3177  *
3178  * return 0 on success.
3179  */
3180 static int ext4_split_extent_at(handle_t *handle,
3181 			     struct inode *inode,
3182 			     struct ext4_ext_path **ppath,
3183 			     ext4_lblk_t split,
3184 			     int split_flag,
3185 			     int flags)
3186 {
3187 	struct ext4_ext_path *path = *ppath;
3188 	ext4_fsblk_t newblock;
3189 	ext4_lblk_t ee_block;
3190 	struct ext4_extent *ex, newex, orig_ex, zero_ex;
3191 	struct ext4_extent *ex2 = NULL;
3192 	unsigned int ee_len, depth;
3193 	int err = 0;
3194 
3195 	BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3196 	       (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3197 
3198 	ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
3199 
3200 	ext4_ext_show_leaf(inode, path);
3201 
3202 	depth = ext_depth(inode);
3203 	ex = path[depth].p_ext;
3204 	ee_block = le32_to_cpu(ex->ee_block);
3205 	ee_len = ext4_ext_get_actual_len(ex);
3206 	newblock = split - ee_block + ext4_ext_pblock(ex);
3207 
3208 	BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3209 	BUG_ON(!ext4_ext_is_unwritten(ex) &&
3210 	       split_flag & (EXT4_EXT_MAY_ZEROOUT |
3211 			     EXT4_EXT_MARK_UNWRIT1 |
3212 			     EXT4_EXT_MARK_UNWRIT2));
3213 
3214 	err = ext4_ext_get_access(handle, inode, path + depth);
3215 	if (err)
3216 		goto out;
3217 
3218 	if (split == ee_block) {
3219 		/*
3220 		 * case b: block @split is the block that the extent begins with
3221 		 * then we just change the state of the extent, and splitting
3222 		 * is not needed.
3223 		 */
3224 		if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3225 			ext4_ext_mark_unwritten(ex);
3226 		else
3227 			ext4_ext_mark_initialized(ex);
3228 
3229 		if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3230 			ext4_ext_try_to_merge(handle, inode, path, ex);
3231 
3232 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3233 		goto out;
3234 	}
3235 
3236 	/* case a */
3237 	memcpy(&orig_ex, ex, sizeof(orig_ex));
3238 	ex->ee_len = cpu_to_le16(split - ee_block);
3239 	if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3240 		ext4_ext_mark_unwritten(ex);
3241 
3242 	/*
3243 	 * path may lead to new leaf, not to original leaf any more
3244 	 * after ext4_ext_insert_extent() returns,
3245 	 */
3246 	err = ext4_ext_dirty(handle, inode, path + depth);
3247 	if (err)
3248 		goto fix_extent_len;
3249 
3250 	ex2 = &newex;
3251 	ex2->ee_block = cpu_to_le32(split);
3252 	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
3253 	ext4_ext_store_pblock(ex2, newblock);
3254 	if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3255 		ext4_ext_mark_unwritten(ex2);
3256 
3257 	path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3258 	if (!IS_ERR(path)) {
3259 		*ppath = path;
3260 		goto out;
3261 	}
3262 	*ppath = NULL;
3263 	err = PTR_ERR(path);
3264 	if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
3265 		return err;
3266 
3267 	/*
3268 	 * Get a new path to try to zeroout or fix the extent length.
3269 	 * Using EXT4_EX_NOFAIL guarantees that ext4_find_extent()
3270 	 * will not return -ENOMEM, otherwise -ENOMEM will cause a
3271 	 * retry in do_writepages(), and a WARN_ON may be triggered
3272 	 * in ext4_da_update_reserve_space() due to an incorrect
3273 	 * ee_len causing the i_reserved_data_blocks exception.
3274 	 */
3275 	path = ext4_find_extent(inode, ee_block, NULL,
3276 				flags | EXT4_EX_NOFAIL);
3277 	if (IS_ERR(path)) {
3278 		EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
3279 				 split, PTR_ERR(path));
3280 		return PTR_ERR(path);
3281 	}
3282 	depth = ext_depth(inode);
3283 	ex = path[depth].p_ext;
3284 	*ppath = path;
3285 
3286 	if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
3287 		if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3288 			if (split_flag & EXT4_EXT_DATA_VALID1) {
3289 				err = ext4_ext_zeroout(inode, ex2);
3290 				zero_ex.ee_block = ex2->ee_block;
3291 				zero_ex.ee_len = cpu_to_le16(
3292 						ext4_ext_get_actual_len(ex2));
3293 				ext4_ext_store_pblock(&zero_ex,
3294 						      ext4_ext_pblock(ex2));
3295 			} else {
3296 				err = ext4_ext_zeroout(inode, ex);
3297 				zero_ex.ee_block = ex->ee_block;
3298 				zero_ex.ee_len = cpu_to_le16(
3299 						ext4_ext_get_actual_len(ex));
3300 				ext4_ext_store_pblock(&zero_ex,
3301 						      ext4_ext_pblock(ex));
3302 			}
3303 		} else {
3304 			err = ext4_ext_zeroout(inode, &orig_ex);
3305 			zero_ex.ee_block = orig_ex.ee_block;
3306 			zero_ex.ee_len = cpu_to_le16(
3307 						ext4_ext_get_actual_len(&orig_ex));
3308 			ext4_ext_store_pblock(&zero_ex,
3309 					      ext4_ext_pblock(&orig_ex));
3310 		}
3311 
3312 		if (!err) {
3313 			/* update the extent length and mark as initialized */
3314 			ex->ee_len = cpu_to_le16(ee_len);
3315 			ext4_ext_try_to_merge(handle, inode, path, ex);
3316 			err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3317 			if (!err)
3318 				/* update extent status tree */
3319 				ext4_zeroout_es(inode, &zero_ex);
3320 			/* If we failed at this point, we don't know in which
3321 			 * state the extent tree exactly is so don't try to fix
3322 			 * length of the original extent as it may do even more
3323 			 * damage.
3324 			 */
3325 			goto out;
3326 		}
3327 	}
3328 
3329 fix_extent_len:
3330 	ex->ee_len = orig_ex.ee_len;
3331 	/*
3332 	 * Ignore ext4_ext_dirty return value since we are already in error path
3333 	 * and err is a non-zero error code.
3334 	 */
3335 	ext4_ext_dirty(handle, inode, path + path->p_depth);
3336 	return err;
3337 out:
3338 	ext4_ext_show_leaf(inode, path);
3339 	return err;
3340 }
3341 
3342 /*
3343  * ext4_split_extent() splits an extent and mark extent which is covered
3344  * by @map as split_flags indicates
3345  *
3346  * It may result in splitting the extent into multiple extents (up to three)
3347  * There are three possibilities:
3348  *   a> There is no split required
3349  *   b> Splits in two extents: Split is happening at either end of the extent
3350  *   c> Splits in three extents: Somone is splitting in middle of the extent
3351  *
3352  */
3353 static int ext4_split_extent(handle_t *handle,
3354 			      struct inode *inode,
3355 			      struct ext4_ext_path **ppath,
3356 			      struct ext4_map_blocks *map,
3357 			      int split_flag,
3358 			      int flags)
3359 {
3360 	struct ext4_ext_path *path = *ppath;
3361 	ext4_lblk_t ee_block;
3362 	struct ext4_extent *ex;
3363 	unsigned int ee_len, depth;
3364 	int err = 0;
3365 	int unwritten;
3366 	int split_flag1, flags1;
3367 	int allocated = map->m_len;
3368 
3369 	depth = ext_depth(inode);
3370 	ex = path[depth].p_ext;
3371 	ee_block = le32_to_cpu(ex->ee_block);
3372 	ee_len = ext4_ext_get_actual_len(ex);
3373 	unwritten = ext4_ext_is_unwritten(ex);
3374 
3375 	if (map->m_lblk + map->m_len < ee_block + ee_len) {
3376 		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3377 		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3378 		if (unwritten)
3379 			split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3380 				       EXT4_EXT_MARK_UNWRIT2;
3381 		if (split_flag & EXT4_EXT_DATA_VALID2)
3382 			split_flag1 |= EXT4_EXT_DATA_VALID1;
3383 		err = ext4_split_extent_at(handle, inode, ppath,
3384 				map->m_lblk + map->m_len, split_flag1, flags1);
3385 		if (err)
3386 			goto out;
3387 	} else {
3388 		allocated = ee_len - (map->m_lblk - ee_block);
3389 	}
3390 	/*
3391 	 * Update path is required because previous ext4_split_extent_at() may
3392 	 * result in split of original leaf or extent zeroout.
3393 	 */
3394 	path = ext4_find_extent(inode, map->m_lblk, *ppath, flags);
3395 	if (IS_ERR(path)) {
3396 		*ppath = NULL;
3397 		return PTR_ERR(path);
3398 	}
3399 	*ppath = path;
3400 	depth = ext_depth(inode);
3401 	ex = path[depth].p_ext;
3402 	if (!ex) {
3403 		EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3404 				 (unsigned long) map->m_lblk);
3405 		return -EFSCORRUPTED;
3406 	}
3407 	unwritten = ext4_ext_is_unwritten(ex);
3408 
3409 	if (map->m_lblk >= ee_block) {
3410 		split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3411 		if (unwritten) {
3412 			split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3413 			split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3414 						     EXT4_EXT_MARK_UNWRIT2);
3415 		}
3416 		err = ext4_split_extent_at(handle, inode, ppath,
3417 				map->m_lblk, split_flag1, flags);
3418 		if (err)
3419 			goto out;
3420 	}
3421 
3422 	ext4_ext_show_leaf(inode, *ppath);
3423 out:
3424 	return err ? err : allocated;
3425 }
3426 
3427 /*
3428  * This function is called by ext4_ext_map_blocks() if someone tries to write
3429  * to an unwritten extent. It may result in splitting the unwritten
3430  * extent into multiple extents (up to three - one initialized and two
3431  * unwritten).
3432  * There are three possibilities:
3433  *   a> There is no split required: Entire extent should be initialized
3434  *   b> Splits in two extents: Write is happening at either end of the extent
3435  *   c> Splits in three extents: Somone is writing in middle of the extent
3436  *
3437  * Pre-conditions:
3438  *  - The extent pointed to by 'path' is unwritten.
3439  *  - The extent pointed to by 'path' contains a superset
3440  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3441  *
3442  * Post-conditions on success:
3443  *  - the returned value is the number of blocks beyond map->l_lblk
3444  *    that are allocated and initialized.
3445  *    It is guaranteed to be >= map->m_len.
3446  */
3447 static int ext4_ext_convert_to_initialized(handle_t *handle,
3448 					   struct inode *inode,
3449 					   struct ext4_map_blocks *map,
3450 					   struct ext4_ext_path **ppath,
3451 					   int flags)
3452 {
3453 	struct ext4_ext_path *path = *ppath;
3454 	struct ext4_sb_info *sbi;
3455 	struct ext4_extent_header *eh;
3456 	struct ext4_map_blocks split_map;
3457 	struct ext4_extent zero_ex1, zero_ex2;
3458 	struct ext4_extent *ex, *abut_ex;
3459 	ext4_lblk_t ee_block, eof_block;
3460 	unsigned int ee_len, depth, map_len = map->m_len;
3461 	int err = 0;
3462 	int split_flag = EXT4_EXT_DATA_VALID2;
3463 	int allocated = 0;
3464 	unsigned int max_zeroout = 0;
3465 
3466 	ext_debug(inode, "logical block %llu, max_blocks %u\n",
3467 		  (unsigned long long)map->m_lblk, map_len);
3468 
3469 	sbi = EXT4_SB(inode->i_sb);
3470 	eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3471 			>> inode->i_sb->s_blocksize_bits;
3472 	if (eof_block < map->m_lblk + map_len)
3473 		eof_block = map->m_lblk + map_len;
3474 
3475 	depth = ext_depth(inode);
3476 	eh = path[depth].p_hdr;
3477 	ex = path[depth].p_ext;
3478 	ee_block = le32_to_cpu(ex->ee_block);
3479 	ee_len = ext4_ext_get_actual_len(ex);
3480 	zero_ex1.ee_len = 0;
3481 	zero_ex2.ee_len = 0;
3482 
3483 	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3484 
3485 	/* Pre-conditions */
3486 	BUG_ON(!ext4_ext_is_unwritten(ex));
3487 	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3488 
3489 	/*
3490 	 * Attempt to transfer newly initialized blocks from the currently
3491 	 * unwritten extent to its neighbor. This is much cheaper
3492 	 * than an insertion followed by a merge as those involve costly
3493 	 * memmove() calls. Transferring to the left is the common case in
3494 	 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3495 	 * followed by append writes.
3496 	 *
3497 	 * Limitations of the current logic:
3498 	 *  - L1: we do not deal with writes covering the whole extent.
3499 	 *    This would require removing the extent if the transfer
3500 	 *    is possible.
3501 	 *  - L2: we only attempt to merge with an extent stored in the
3502 	 *    same extent tree node.
3503 	 */
3504 	if ((map->m_lblk == ee_block) &&
3505 		/* See if we can merge left */
3506 		(map_len < ee_len) &&		/*L1*/
3507 		(ex > EXT_FIRST_EXTENT(eh))) {	/*L2*/
3508 		ext4_lblk_t prev_lblk;
3509 		ext4_fsblk_t prev_pblk, ee_pblk;
3510 		unsigned int prev_len;
3511 
3512 		abut_ex = ex - 1;
3513 		prev_lblk = le32_to_cpu(abut_ex->ee_block);
3514 		prev_len = ext4_ext_get_actual_len(abut_ex);
3515 		prev_pblk = ext4_ext_pblock(abut_ex);
3516 		ee_pblk = ext4_ext_pblock(ex);
3517 
3518 		/*
3519 		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3520 		 * upon those conditions:
3521 		 * - C1: abut_ex is initialized,
3522 		 * - C2: abut_ex is logically abutting ex,
3523 		 * - C3: abut_ex is physically abutting ex,
3524 		 * - C4: abut_ex can receive the additional blocks without
3525 		 *   overflowing the (initialized) length limit.
3526 		 */
3527 		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
3528 			((prev_lblk + prev_len) == ee_block) &&		/*C2*/
3529 			((prev_pblk + prev_len) == ee_pblk) &&		/*C3*/
3530 			(prev_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
3531 			err = ext4_ext_get_access(handle, inode, path + depth);
3532 			if (err)
3533 				goto out;
3534 
3535 			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3536 				map, ex, abut_ex);
3537 
3538 			/* Shift the start of ex by 'map_len' blocks */
3539 			ex->ee_block = cpu_to_le32(ee_block + map_len);
3540 			ext4_ext_store_pblock(ex, ee_pblk + map_len);
3541 			ex->ee_len = cpu_to_le16(ee_len - map_len);
3542 			ext4_ext_mark_unwritten(ex); /* Restore the flag */
3543 
3544 			/* Extend abut_ex by 'map_len' blocks */
3545 			abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3546 
3547 			/* Result: number of initialized blocks past m_lblk */
3548 			allocated = map_len;
3549 		}
3550 	} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3551 		   (map_len < ee_len) &&	/*L1*/
3552 		   ex < EXT_LAST_EXTENT(eh)) {	/*L2*/
3553 		/* See if we can merge right */
3554 		ext4_lblk_t next_lblk;
3555 		ext4_fsblk_t next_pblk, ee_pblk;
3556 		unsigned int next_len;
3557 
3558 		abut_ex = ex + 1;
3559 		next_lblk = le32_to_cpu(abut_ex->ee_block);
3560 		next_len = ext4_ext_get_actual_len(abut_ex);
3561 		next_pblk = ext4_ext_pblock(abut_ex);
3562 		ee_pblk = ext4_ext_pblock(ex);
3563 
3564 		/*
3565 		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3566 		 * upon those conditions:
3567 		 * - C1: abut_ex is initialized,
3568 		 * - C2: abut_ex is logically abutting ex,
3569 		 * - C3: abut_ex is physically abutting ex,
3570 		 * - C4: abut_ex can receive the additional blocks without
3571 		 *   overflowing the (initialized) length limit.
3572 		 */
3573 		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
3574 		    ((map->m_lblk + map_len) == next_lblk) &&		/*C2*/
3575 		    ((ee_pblk + ee_len) == next_pblk) &&		/*C3*/
3576 		    (next_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
3577 			err = ext4_ext_get_access(handle, inode, path + depth);
3578 			if (err)
3579 				goto out;
3580 
3581 			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3582 				map, ex, abut_ex);
3583 
3584 			/* Shift the start of abut_ex by 'map_len' blocks */
3585 			abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3586 			ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3587 			ex->ee_len = cpu_to_le16(ee_len - map_len);
3588 			ext4_ext_mark_unwritten(ex); /* Restore the flag */
3589 
3590 			/* Extend abut_ex by 'map_len' blocks */
3591 			abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3592 
3593 			/* Result: number of initialized blocks past m_lblk */
3594 			allocated = map_len;
3595 		}
3596 	}
3597 	if (allocated) {
3598 		/* Mark the block containing both extents as dirty */
3599 		err = ext4_ext_dirty(handle, inode, path + depth);
3600 
3601 		/* Update path to point to the right extent */
3602 		path[depth].p_ext = abut_ex;
3603 		goto out;
3604 	} else
3605 		allocated = ee_len - (map->m_lblk - ee_block);
3606 
3607 	WARN_ON(map->m_lblk < ee_block);
3608 	/*
3609 	 * It is safe to convert extent to initialized via explicit
3610 	 * zeroout only if extent is fully inside i_size or new_size.
3611 	 */
3612 	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3613 
3614 	if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3615 		max_zeroout = sbi->s_extent_max_zeroout_kb >>
3616 			(inode->i_sb->s_blocksize_bits - 10);
3617 
3618 	/*
3619 	 * five cases:
3620 	 * 1. split the extent into three extents.
3621 	 * 2. split the extent into two extents, zeroout the head of the first
3622 	 *    extent.
3623 	 * 3. split the extent into two extents, zeroout the tail of the second
3624 	 *    extent.
3625 	 * 4. split the extent into two extents with out zeroout.
3626 	 * 5. no splitting needed, just possibly zeroout the head and / or the
3627 	 *    tail of the extent.
3628 	 */
3629 	split_map.m_lblk = map->m_lblk;
3630 	split_map.m_len = map->m_len;
3631 
3632 	if (max_zeroout && (allocated > split_map.m_len)) {
3633 		if (allocated <= max_zeroout) {
3634 			/* case 3 or 5 */
3635 			zero_ex1.ee_block =
3636 				 cpu_to_le32(split_map.m_lblk +
3637 					     split_map.m_len);
3638 			zero_ex1.ee_len =
3639 				cpu_to_le16(allocated - split_map.m_len);
3640 			ext4_ext_store_pblock(&zero_ex1,
3641 				ext4_ext_pblock(ex) + split_map.m_lblk +
3642 				split_map.m_len - ee_block);
3643 			err = ext4_ext_zeroout(inode, &zero_ex1);
3644 			if (err)
3645 				goto fallback;
3646 			split_map.m_len = allocated;
3647 		}
3648 		if (split_map.m_lblk - ee_block + split_map.m_len <
3649 								max_zeroout) {
3650 			/* case 2 or 5 */
3651 			if (split_map.m_lblk != ee_block) {
3652 				zero_ex2.ee_block = ex->ee_block;
3653 				zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3654 							ee_block);
3655 				ext4_ext_store_pblock(&zero_ex2,
3656 						      ext4_ext_pblock(ex));
3657 				err = ext4_ext_zeroout(inode, &zero_ex2);
3658 				if (err)
3659 					goto fallback;
3660 			}
3661 
3662 			split_map.m_len += split_map.m_lblk - ee_block;
3663 			split_map.m_lblk = ee_block;
3664 			allocated = map->m_len;
3665 		}
3666 	}
3667 
3668 fallback:
3669 	err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3670 				flags);
3671 	if (err > 0)
3672 		err = 0;
3673 out:
3674 	/* If we have gotten a failure, don't zero out status tree */
3675 	if (!err) {
3676 		ext4_zeroout_es(inode, &zero_ex1);
3677 		ext4_zeroout_es(inode, &zero_ex2);
3678 	}
3679 	return err ? err : allocated;
3680 }
3681 
3682 /*
3683  * This function is called by ext4_ext_map_blocks() from
3684  * ext4_get_blocks_dio_write() when DIO to write
3685  * to an unwritten extent.
3686  *
3687  * Writing to an unwritten extent may result in splitting the unwritten
3688  * extent into multiple initialized/unwritten extents (up to three)
3689  * There are three possibilities:
3690  *   a> There is no split required: Entire extent should be unwritten
3691  *   b> Splits in two extents: Write is happening at either end of the extent
3692  *   c> Splits in three extents: Somone is writing in middle of the extent
3693  *
3694  * This works the same way in the case of initialized -> unwritten conversion.
3695  *
3696  * One of more index blocks maybe needed if the extent tree grow after
3697  * the unwritten extent split. To prevent ENOSPC occur at the IO
3698  * complete, we need to split the unwritten extent before DIO submit
3699  * the IO. The unwritten extent called at this time will be split
3700  * into three unwritten extent(at most). After IO complete, the part
3701  * being filled will be convert to initialized by the end_io callback function
3702  * via ext4_convert_unwritten_extents().
3703  *
3704  * Returns the size of unwritten extent to be written on success.
3705  */
3706 static int ext4_split_convert_extents(handle_t *handle,
3707 					struct inode *inode,
3708 					struct ext4_map_blocks *map,
3709 					struct ext4_ext_path **ppath,
3710 					int flags)
3711 {
3712 	struct ext4_ext_path *path = *ppath;
3713 	ext4_lblk_t eof_block;
3714 	ext4_lblk_t ee_block;
3715 	struct ext4_extent *ex;
3716 	unsigned int ee_len;
3717 	int split_flag = 0, depth;
3718 
3719 	ext_debug(inode, "logical block %llu, max_blocks %u\n",
3720 		  (unsigned long long)map->m_lblk, map->m_len);
3721 
3722 	eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3723 			>> inode->i_sb->s_blocksize_bits;
3724 	if (eof_block < map->m_lblk + map->m_len)
3725 		eof_block = map->m_lblk + map->m_len;
3726 	/*
3727 	 * It is safe to convert extent to initialized via explicit
3728 	 * zeroout only if extent is fully inside i_size or new_size.
3729 	 */
3730 	depth = ext_depth(inode);
3731 	ex = path[depth].p_ext;
3732 	ee_block = le32_to_cpu(ex->ee_block);
3733 	ee_len = ext4_ext_get_actual_len(ex);
3734 
3735 	/* Convert to unwritten */
3736 	if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3737 		split_flag |= EXT4_EXT_DATA_VALID1;
3738 	/* Convert to initialized */
3739 	} else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3740 		split_flag |= ee_block + ee_len <= eof_block ?
3741 			      EXT4_EXT_MAY_ZEROOUT : 0;
3742 		split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3743 	}
3744 	flags |= EXT4_GET_BLOCKS_PRE_IO;
3745 	return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3746 }
3747 
3748 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3749 						struct inode *inode,
3750 						struct ext4_map_blocks *map,
3751 						struct ext4_ext_path **ppath)
3752 {
3753 	struct ext4_ext_path *path = *ppath;
3754 	struct ext4_extent *ex;
3755 	ext4_lblk_t ee_block;
3756 	unsigned int ee_len;
3757 	int depth;
3758 	int err = 0;
3759 
3760 	depth = ext_depth(inode);
3761 	ex = path[depth].p_ext;
3762 	ee_block = le32_to_cpu(ex->ee_block);
3763 	ee_len = ext4_ext_get_actual_len(ex);
3764 
3765 	ext_debug(inode, "logical block %llu, max_blocks %u\n",
3766 		  (unsigned long long)ee_block, ee_len);
3767 
3768 	/* If extent is larger than requested it is a clear sign that we still
3769 	 * have some extent state machine issues left. So extent_split is still
3770 	 * required.
3771 	 * TODO: Once all related issues will be fixed this situation should be
3772 	 * illegal.
3773 	 */
3774 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
3775 #ifdef CONFIG_EXT4_DEBUG
3776 		ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
3777 			     " len %u; IO logical block %llu, len %u",
3778 			     inode->i_ino, (unsigned long long)ee_block, ee_len,
3779 			     (unsigned long long)map->m_lblk, map->m_len);
3780 #endif
3781 		err = ext4_split_convert_extents(handle, inode, map, ppath,
3782 						 EXT4_GET_BLOCKS_CONVERT);
3783 		if (err < 0)
3784 			return err;
3785 		path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
3786 		if (IS_ERR(path)) {
3787 			*ppath = NULL;
3788 			return PTR_ERR(path);
3789 		}
3790 		*ppath = path;
3791 		depth = ext_depth(inode);
3792 		ex = path[depth].p_ext;
3793 	}
3794 
3795 	err = ext4_ext_get_access(handle, inode, path + depth);
3796 	if (err)
3797 		goto out;
3798 	/* first mark the extent as initialized */
3799 	ext4_ext_mark_initialized(ex);
3800 
3801 	/* note: ext4_ext_correct_indexes() isn't needed here because
3802 	 * borders are not changed
3803 	 */
3804 	ext4_ext_try_to_merge(handle, inode, path, ex);
3805 
3806 	/* Mark modified extent as dirty */
3807 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3808 out:
3809 	ext4_ext_show_leaf(inode, path);
3810 	return err;
3811 }
3812 
3813 static int
3814 convert_initialized_extent(handle_t *handle, struct inode *inode,
3815 			   struct ext4_map_blocks *map,
3816 			   struct ext4_ext_path **ppath,
3817 			   unsigned int *allocated)
3818 {
3819 	struct ext4_ext_path *path = *ppath;
3820 	struct ext4_extent *ex;
3821 	ext4_lblk_t ee_block;
3822 	unsigned int ee_len;
3823 	int depth;
3824 	int err = 0;
3825 
3826 	/*
3827 	 * Make sure that the extent is no bigger than we support with
3828 	 * unwritten extent
3829 	 */
3830 	if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3831 		map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3832 
3833 	depth = ext_depth(inode);
3834 	ex = path[depth].p_ext;
3835 	ee_block = le32_to_cpu(ex->ee_block);
3836 	ee_len = ext4_ext_get_actual_len(ex);
3837 
3838 	ext_debug(inode, "logical block %llu, max_blocks %u\n",
3839 		  (unsigned long long)ee_block, ee_len);
3840 
3841 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
3842 		err = ext4_split_convert_extents(handle, inode, map, ppath,
3843 				EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3844 		if (err < 0)
3845 			return err;
3846 		path = ext4_find_extent(inode, map->m_lblk, *ppath, 0);
3847 		if (IS_ERR(path)) {
3848 			*ppath = NULL;
3849 			return PTR_ERR(path);
3850 		}
3851 		*ppath = path;
3852 		depth = ext_depth(inode);
3853 		ex = path[depth].p_ext;
3854 		if (!ex) {
3855 			EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3856 					 (unsigned long) map->m_lblk);
3857 			return -EFSCORRUPTED;
3858 		}
3859 	}
3860 
3861 	err = ext4_ext_get_access(handle, inode, path + depth);
3862 	if (err)
3863 		return err;
3864 	/* first mark the extent as unwritten */
3865 	ext4_ext_mark_unwritten(ex);
3866 
3867 	/* note: ext4_ext_correct_indexes() isn't needed here because
3868 	 * borders are not changed
3869 	 */
3870 	ext4_ext_try_to_merge(handle, inode, path, ex);
3871 
3872 	/* Mark modified extent as dirty */
3873 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3874 	if (err)
3875 		return err;
3876 	ext4_ext_show_leaf(inode, path);
3877 
3878 	ext4_update_inode_fsync_trans(handle, inode, 1);
3879 
3880 	map->m_flags |= EXT4_MAP_UNWRITTEN;
3881 	if (*allocated > map->m_len)
3882 		*allocated = map->m_len;
3883 	map->m_len = *allocated;
3884 	return 0;
3885 }
3886 
3887 static int
3888 ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3889 			struct ext4_map_blocks *map,
3890 			struct ext4_ext_path **ppath, int flags,
3891 			unsigned int allocated, ext4_fsblk_t newblock)
3892 {
3893 	int ret = 0;
3894 	int err = 0;
3895 
3896 	ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
3897 		  (unsigned long long)map->m_lblk, map->m_len, flags,
3898 		  allocated);
3899 	ext4_ext_show_leaf(inode, *ppath);
3900 
3901 	/*
3902 	 * When writing into unwritten space, we should not fail to
3903 	 * allocate metadata blocks for the new extent block if needed.
3904 	 */
3905 	flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3906 
3907 	trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
3908 						    allocated, newblock);
3909 
3910 	/* get_block() before submitting IO, split the extent */
3911 	if (flags & EXT4_GET_BLOCKS_PRE_IO) {
3912 		ret = ext4_split_convert_extents(handle, inode, map, ppath,
3913 					 flags | EXT4_GET_BLOCKS_CONVERT);
3914 		if (ret < 0) {
3915 			err = ret;
3916 			goto out2;
3917 		}
3918 		/*
3919 		 * shouldn't get a 0 return when splitting an extent unless
3920 		 * m_len is 0 (bug) or extent has been corrupted
3921 		 */
3922 		if (unlikely(ret == 0)) {
3923 			EXT4_ERROR_INODE(inode,
3924 					 "unexpected ret == 0, m_len = %u",
3925 					 map->m_len);
3926 			err = -EFSCORRUPTED;
3927 			goto out2;
3928 		}
3929 		map->m_flags |= EXT4_MAP_UNWRITTEN;
3930 		goto out;
3931 	}
3932 	/* IO end_io complete, convert the filled extent to written */
3933 	if (flags & EXT4_GET_BLOCKS_CONVERT) {
3934 		err = ext4_convert_unwritten_extents_endio(handle, inode, map,
3935 							   ppath);
3936 		if (err < 0)
3937 			goto out2;
3938 		ext4_update_inode_fsync_trans(handle, inode, 1);
3939 		goto map_out;
3940 	}
3941 	/* buffered IO cases */
3942 	/*
3943 	 * repeat fallocate creation request
3944 	 * we already have an unwritten extent
3945 	 */
3946 	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
3947 		map->m_flags |= EXT4_MAP_UNWRITTEN;
3948 		goto map_out;
3949 	}
3950 
3951 	/* buffered READ or buffered write_begin() lookup */
3952 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3953 		/*
3954 		 * We have blocks reserved already.  We
3955 		 * return allocated blocks so that delalloc
3956 		 * won't do block reservation for us.  But
3957 		 * the buffer head will be unmapped so that
3958 		 * a read from the block returns 0s.
3959 		 */
3960 		map->m_flags |= EXT4_MAP_UNWRITTEN;
3961 		goto out1;
3962 	}
3963 
3964 	/*
3965 	 * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1.
3966 	 * For buffered writes, at writepage time, etc.  Convert a
3967 	 * discovered unwritten extent to written.
3968 	 */
3969 	ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
3970 	if (ret < 0) {
3971 		err = ret;
3972 		goto out2;
3973 	}
3974 	ext4_update_inode_fsync_trans(handle, inode, 1);
3975 	/*
3976 	 * shouldn't get a 0 return when converting an unwritten extent
3977 	 * unless m_len is 0 (bug) or extent has been corrupted
3978 	 */
3979 	if (unlikely(ret == 0)) {
3980 		EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
3981 				 map->m_len);
3982 		err = -EFSCORRUPTED;
3983 		goto out2;
3984 	}
3985 
3986 out:
3987 	allocated = ret;
3988 	map->m_flags |= EXT4_MAP_NEW;
3989 map_out:
3990 	map->m_flags |= EXT4_MAP_MAPPED;
3991 out1:
3992 	map->m_pblk = newblock;
3993 	if (allocated > map->m_len)
3994 		allocated = map->m_len;
3995 	map->m_len = allocated;
3996 	ext4_ext_show_leaf(inode, *ppath);
3997 out2:
3998 	return err ? err : allocated;
3999 }
4000 
4001 /*
4002  * get_implied_cluster_alloc - check to see if the requested
4003  * allocation (in the map structure) overlaps with a cluster already
4004  * allocated in an extent.
4005  *	@sb	The filesystem superblock structure
4006  *	@map	The requested lblk->pblk mapping
4007  *	@ex	The extent structure which might contain an implied
4008  *			cluster allocation
4009  *
4010  * This function is called by ext4_ext_map_blocks() after we failed to
4011  * find blocks that were already in the inode's extent tree.  Hence,
4012  * we know that the beginning of the requested region cannot overlap
4013  * the extent from the inode's extent tree.  There are three cases we
4014  * want to catch.  The first is this case:
4015  *
4016  *		 |--- cluster # N--|
4017  *    |--- extent ---|	|---- requested region ---|
4018  *			|==========|
4019  *
4020  * The second case that we need to test for is this one:
4021  *
4022  *   |--------- cluster # N ----------------|
4023  *	   |--- requested region --|   |------- extent ----|
4024  *	   |=======================|
4025  *
4026  * The third case is when the requested region lies between two extents
4027  * within the same cluster:
4028  *          |------------- cluster # N-------------|
4029  * |----- ex -----|                  |---- ex_right ----|
4030  *                  |------ requested region ------|
4031  *                  |================|
4032  *
4033  * In each of the above cases, we need to set the map->m_pblk and
4034  * map->m_len so it corresponds to the return the extent labelled as
4035  * "|====|" from cluster #N, since it is already in use for data in
4036  * cluster EXT4_B2C(sbi, map->m_lblk).	We will then return 1 to
4037  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
4038  * as a new "allocated" block region.  Otherwise, we will return 0 and
4039  * ext4_ext_map_blocks() will then allocate one or more new clusters
4040  * by calling ext4_mb_new_blocks().
4041  */
4042 static int get_implied_cluster_alloc(struct super_block *sb,
4043 				     struct ext4_map_blocks *map,
4044 				     struct ext4_extent *ex,
4045 				     struct ext4_ext_path *path)
4046 {
4047 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4048 	ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4049 	ext4_lblk_t ex_cluster_start, ex_cluster_end;
4050 	ext4_lblk_t rr_cluster_start;
4051 	ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4052 	ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4053 	unsigned short ee_len = ext4_ext_get_actual_len(ex);
4054 
4055 	/* The extent passed in that we are trying to match */
4056 	ex_cluster_start = EXT4_B2C(sbi, ee_block);
4057 	ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
4058 
4059 	/* The requested region passed into ext4_map_blocks() */
4060 	rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4061 
4062 	if ((rr_cluster_start == ex_cluster_end) ||
4063 	    (rr_cluster_start == ex_cluster_start)) {
4064 		if (rr_cluster_start == ex_cluster_end)
4065 			ee_start += ee_len - 1;
4066 		map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4067 		map->m_len = min(map->m_len,
4068 				 (unsigned) sbi->s_cluster_ratio - c_offset);
4069 		/*
4070 		 * Check for and handle this case:
4071 		 *
4072 		 *   |--------- cluster # N-------------|
4073 		 *		       |------- extent ----|
4074 		 *	   |--- requested region ---|
4075 		 *	   |===========|
4076 		 */
4077 
4078 		if (map->m_lblk < ee_block)
4079 			map->m_len = min(map->m_len, ee_block - map->m_lblk);
4080 
4081 		/*
4082 		 * Check for the case where there is already another allocated
4083 		 * block to the right of 'ex' but before the end of the cluster.
4084 		 *
4085 		 *          |------------- cluster # N-------------|
4086 		 * |----- ex -----|                  |---- ex_right ----|
4087 		 *                  |------ requested region ------|
4088 		 *                  |================|
4089 		 */
4090 		if (map->m_lblk > ee_block) {
4091 			ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4092 			map->m_len = min(map->m_len, next - map->m_lblk);
4093 		}
4094 
4095 		trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4096 		return 1;
4097 	}
4098 
4099 	trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4100 	return 0;
4101 }
4102 
4103 /*
4104  * Determine hole length around the given logical block, first try to
4105  * locate and expand the hole from the given @path, and then adjust it
4106  * if it's partially or completely converted to delayed extents, insert
4107  * it into the extent cache tree if it's indeed a hole, finally return
4108  * the length of the determined extent.
4109  */
4110 static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
4111 						  struct ext4_ext_path *path,
4112 						  ext4_lblk_t lblk)
4113 {
4114 	ext4_lblk_t hole_start, len;
4115 	struct extent_status es;
4116 
4117 	hole_start = lblk;
4118 	len = ext4_ext_find_hole(inode, path, &hole_start);
4119 again:
4120 	ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
4121 				  hole_start + len - 1, &es);
4122 	if (!es.es_len)
4123 		goto insert_hole;
4124 
4125 	/*
4126 	 * There's a delalloc extent in the hole, handle it if the delalloc
4127 	 * extent is in front of, behind and straddle the queried range.
4128 	 */
4129 	if (lblk >= es.es_lblk + es.es_len) {
4130 		/*
4131 		 * The delalloc extent is in front of the queried range,
4132 		 * find again from the queried start block.
4133 		 */
4134 		len -= lblk - hole_start;
4135 		hole_start = lblk;
4136 		goto again;
4137 	} else if (in_range(lblk, es.es_lblk, es.es_len)) {
4138 		/*
4139 		 * The delalloc extent containing lblk, it must have been
4140 		 * added after ext4_map_blocks() checked the extent status
4141 		 * tree so we are not holding i_rwsem and delalloc info is
4142 		 * only stabilized by i_data_sem we are going to release
4143 		 * soon. Don't modify the extent status tree and report
4144 		 * extent as a hole, just adjust the length to the delalloc
4145 		 * extent's after lblk.
4146 		 */
4147 		len = es.es_lblk + es.es_len - lblk;
4148 		return len;
4149 	} else {
4150 		/*
4151 		 * The delalloc extent is partially or completely behind
4152 		 * the queried range, update hole length until the
4153 		 * beginning of the delalloc extent.
4154 		 */
4155 		len = min(es.es_lblk - hole_start, len);
4156 	}
4157 
4158 insert_hole:
4159 	/* Put just found gap into cache to speed up subsequent requests */
4160 	ext_debug(inode, " -> %u:%u\n", hole_start, len);
4161 	ext4_es_insert_extent(inode, hole_start, len, ~0,
4162 			      EXTENT_STATUS_HOLE, 0);
4163 
4164 	/* Update hole_len to reflect hole size after lblk */
4165 	if (hole_start != lblk)
4166 		len -= lblk - hole_start;
4167 
4168 	return len;
4169 }
4170 
4171 /*
4172  * Block allocation/map/preallocation routine for extents based files
4173  *
4174  *
4175  * Need to be called with
4176  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4177  * (ie, flags is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4178  *
4179  * return > 0, number of blocks already mapped/allocated
4180  *          if flags doesn't contain EXT4_GET_BLOCKS_CREATE and these are pre-allocated blocks
4181  *          	buffer head is unmapped
4182  *          otherwise blocks are mapped
4183  *
4184  * return = 0, if plain look up failed (blocks have not been allocated)
4185  *          buffer head is unmapped
4186  *
4187  * return < 0, error case.
4188  */
4189 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4190 			struct ext4_map_blocks *map, int flags)
4191 {
4192 	struct ext4_ext_path *path = NULL;
4193 	struct ext4_extent newex, *ex, ex2;
4194 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4195 	ext4_fsblk_t newblock = 0, pblk;
4196 	int err = 0, depth, ret;
4197 	unsigned int allocated = 0, offset = 0;
4198 	unsigned int allocated_clusters = 0;
4199 	struct ext4_allocation_request ar;
4200 	ext4_lblk_t cluster_offset;
4201 
4202 	ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
4203 	trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4204 
4205 	/* find extent for this block */
4206 	path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4207 	if (IS_ERR(path)) {
4208 		err = PTR_ERR(path);
4209 		goto out;
4210 	}
4211 
4212 	depth = ext_depth(inode);
4213 
4214 	/*
4215 	 * consistent leaf must not be empty;
4216 	 * this situation is possible, though, _during_ tree modification;
4217 	 * this is why assert can't be put in ext4_find_extent()
4218 	 */
4219 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4220 		EXT4_ERROR_INODE(inode, "bad extent address "
4221 				 "lblock: %lu, depth: %d pblock %lld",
4222 				 (unsigned long) map->m_lblk, depth,
4223 				 path[depth].p_block);
4224 		err = -EFSCORRUPTED;
4225 		goto out;
4226 	}
4227 
4228 	ex = path[depth].p_ext;
4229 	if (ex) {
4230 		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4231 		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4232 		unsigned short ee_len;
4233 
4234 
4235 		/*
4236 		 * unwritten extents are treated as holes, except that
4237 		 * we split out initialized portions during a write.
4238 		 */
4239 		ee_len = ext4_ext_get_actual_len(ex);
4240 
4241 		trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4242 
4243 		/* if found extent covers block, simply return it */
4244 		if (in_range(map->m_lblk, ee_block, ee_len)) {
4245 			newblock = map->m_lblk - ee_block + ee_start;
4246 			/* number of remaining blocks in the extent */
4247 			allocated = ee_len - (map->m_lblk - ee_block);
4248 			ext_debug(inode, "%u fit into %u:%d -> %llu\n",
4249 				  map->m_lblk, ee_block, ee_len, newblock);
4250 
4251 			/*
4252 			 * If the extent is initialized check whether the
4253 			 * caller wants to convert it to unwritten.
4254 			 */
4255 			if ((!ext4_ext_is_unwritten(ex)) &&
4256 			    (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4257 				err = convert_initialized_extent(handle,
4258 					inode, map, &path, &allocated);
4259 				goto out;
4260 			} else if (!ext4_ext_is_unwritten(ex)) {
4261 				map->m_flags |= EXT4_MAP_MAPPED;
4262 				map->m_pblk = newblock;
4263 				if (allocated > map->m_len)
4264 					allocated = map->m_len;
4265 				map->m_len = allocated;
4266 				ext4_ext_show_leaf(inode, path);
4267 				goto out;
4268 			}
4269 
4270 			ret = ext4_ext_handle_unwritten_extents(
4271 				handle, inode, map, &path, flags,
4272 				allocated, newblock);
4273 			if (ret < 0)
4274 				err = ret;
4275 			else
4276 				allocated = ret;
4277 			goto out;
4278 		}
4279 	}
4280 
4281 	/*
4282 	 * requested block isn't allocated yet;
4283 	 * we couldn't try to create block if flags doesn't contain EXT4_GET_BLOCKS_CREATE
4284 	 */
4285 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4286 		ext4_lblk_t len;
4287 
4288 		len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
4289 
4290 		map->m_pblk = 0;
4291 		map->m_len = min_t(unsigned int, map->m_len, len);
4292 		goto out;
4293 	}
4294 
4295 	/*
4296 	 * Okay, we need to do block allocation.
4297 	 */
4298 	newex.ee_block = cpu_to_le32(map->m_lblk);
4299 	cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4300 
4301 	/*
4302 	 * If we are doing bigalloc, check to see if the extent returned
4303 	 * by ext4_find_extent() implies a cluster we can use.
4304 	 */
4305 	if (cluster_offset && ex &&
4306 	    get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4307 		ar.len = allocated = map->m_len;
4308 		newblock = map->m_pblk;
4309 		goto got_allocated_blocks;
4310 	}
4311 
4312 	/* find neighbour allocated blocks */
4313 	ar.lleft = map->m_lblk;
4314 	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4315 	if (err)
4316 		goto out;
4317 	ar.lright = map->m_lblk;
4318 	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4319 	if (err < 0)
4320 		goto out;
4321 
4322 	/* Check if the extent after searching to the right implies a
4323 	 * cluster we can use. */
4324 	if ((sbi->s_cluster_ratio > 1) && err &&
4325 	    get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
4326 		ar.len = allocated = map->m_len;
4327 		newblock = map->m_pblk;
4328 		err = 0;
4329 		goto got_allocated_blocks;
4330 	}
4331 
4332 	/*
4333 	 * See if request is beyond maximum number of blocks we can have in
4334 	 * a single extent. For an initialized extent this limit is
4335 	 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
4336 	 * EXT_UNWRITTEN_MAX_LEN.
4337 	 */
4338 	if (map->m_len > EXT_INIT_MAX_LEN &&
4339 	    !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4340 		map->m_len = EXT_INIT_MAX_LEN;
4341 	else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4342 		 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4343 		map->m_len = EXT_UNWRITTEN_MAX_LEN;
4344 
4345 	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4346 	newex.ee_len = cpu_to_le16(map->m_len);
4347 	err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4348 	if (err)
4349 		allocated = ext4_ext_get_actual_len(&newex);
4350 	else
4351 		allocated = map->m_len;
4352 
4353 	/* allocate new block */
4354 	ar.inode = inode;
4355 	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4356 	ar.logical = map->m_lblk;
4357 	/*
4358 	 * We calculate the offset from the beginning of the cluster
4359 	 * for the logical block number, since when we allocate a
4360 	 * physical cluster, the physical block should start at the
4361 	 * same offset from the beginning of the cluster.  This is
4362 	 * needed so that future calls to get_implied_cluster_alloc()
4363 	 * work correctly.
4364 	 */
4365 	offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4366 	ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4367 	ar.goal -= offset;
4368 	ar.logical -= offset;
4369 	if (S_ISREG(inode->i_mode))
4370 		ar.flags = EXT4_MB_HINT_DATA;
4371 	else
4372 		/* disable in-core preallocation for non-regular files */
4373 		ar.flags = 0;
4374 	if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4375 		ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4376 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4377 		ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4378 	if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4379 		ar.flags |= EXT4_MB_USE_RESERVED;
4380 	newblock = ext4_mb_new_blocks(handle, &ar, &err);
4381 	if (!newblock)
4382 		goto out;
4383 	allocated_clusters = ar.len;
4384 	ar.len = EXT4_C2B(sbi, ar.len) - offset;
4385 	ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
4386 		  ar.goal, newblock, ar.len, allocated);
4387 	if (ar.len > allocated)
4388 		ar.len = allocated;
4389 
4390 got_allocated_blocks:
4391 	/* try to insert new extent into found leaf and return */
4392 	pblk = newblock + offset;
4393 	ext4_ext_store_pblock(&newex, pblk);
4394 	newex.ee_len = cpu_to_le16(ar.len);
4395 	/* Mark unwritten */
4396 	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4397 		ext4_ext_mark_unwritten(&newex);
4398 		map->m_flags |= EXT4_MAP_UNWRITTEN;
4399 	}
4400 
4401 	path = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
4402 	if (IS_ERR(path)) {
4403 		err = PTR_ERR(path);
4404 		if (allocated_clusters) {
4405 			int fb_flags = 0;
4406 
4407 			/*
4408 			 * free data blocks we just allocated.
4409 			 * not a good idea to call discard here directly,
4410 			 * but otherwise we'd need to call it every free().
4411 			 */
4412 			ext4_discard_preallocations(inode);
4413 			if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4414 				fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
4415 			ext4_free_blocks(handle, inode, NULL, newblock,
4416 					 EXT4_C2B(sbi, allocated_clusters),
4417 					 fb_flags);
4418 		}
4419 		goto out;
4420 	}
4421 
4422 	/*
4423 	 * Cache the extent and update transaction to commit on fdatasync only
4424 	 * when it is _not_ an unwritten extent.
4425 	 */
4426 	if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4427 		ext4_update_inode_fsync_trans(handle, inode, 1);
4428 	else
4429 		ext4_update_inode_fsync_trans(handle, inode, 0);
4430 
4431 	map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
4432 	map->m_pblk = pblk;
4433 	map->m_len = ar.len;
4434 	allocated = map->m_len;
4435 	ext4_ext_show_leaf(inode, path);
4436 out:
4437 	ext4_free_ext_path(path);
4438 
4439 	trace_ext4_ext_map_blocks_exit(inode, flags, map,
4440 				       err ? err : allocated);
4441 	return err ? err : allocated;
4442 }
4443 
4444 int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4445 {
4446 	struct super_block *sb = inode->i_sb;
4447 	ext4_lblk_t last_block;
4448 	int err = 0;
4449 
4450 	/*
4451 	 * TODO: optimization is possible here.
4452 	 * Probably we need not scan at all,
4453 	 * because page truncation is enough.
4454 	 */
4455 
4456 	/* we have to know where to truncate from in crash case */
4457 	EXT4_I(inode)->i_disksize = inode->i_size;
4458 	err = ext4_mark_inode_dirty(handle, inode);
4459 	if (err)
4460 		return err;
4461 
4462 	last_block = (inode->i_size + sb->s_blocksize - 1)
4463 			>> EXT4_BLOCK_SIZE_BITS(sb);
4464 	ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
4465 
4466 retry_remove_space:
4467 	err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4468 	if (err == -ENOMEM) {
4469 		memalloc_retry_wait(GFP_ATOMIC);
4470 		goto retry_remove_space;
4471 	}
4472 	return err;
4473 }
4474 
4475 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4476 				  ext4_lblk_t len, loff_t new_size,
4477 				  int flags)
4478 {
4479 	struct inode *inode = file_inode(file);
4480 	handle_t *handle;
4481 	int ret = 0, ret2 = 0, ret3 = 0;
4482 	int retries = 0;
4483 	int depth = 0;
4484 	struct ext4_map_blocks map;
4485 	unsigned int credits;
4486 	loff_t epos;
4487 
4488 	BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
4489 	map.m_lblk = offset;
4490 	map.m_len = len;
4491 	/*
4492 	 * Don't normalize the request if it can fit in one extent so
4493 	 * that it doesn't get unnecessarily split into multiple
4494 	 * extents.
4495 	 */
4496 	if (len <= EXT_UNWRITTEN_MAX_LEN)
4497 		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4498 
4499 	/*
4500 	 * credits to insert 1 extent into extent tree
4501 	 */
4502 	credits = ext4_chunk_trans_blocks(inode, len);
4503 	depth = ext_depth(inode);
4504 
4505 retry:
4506 	while (len) {
4507 		/*
4508 		 * Recalculate credits when extent tree depth changes.
4509 		 */
4510 		if (depth != ext_depth(inode)) {
4511 			credits = ext4_chunk_trans_blocks(inode, len);
4512 			depth = ext_depth(inode);
4513 		}
4514 
4515 		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4516 					    credits);
4517 		if (IS_ERR(handle)) {
4518 			ret = PTR_ERR(handle);
4519 			break;
4520 		}
4521 		ret = ext4_map_blocks(handle, inode, &map, flags);
4522 		if (ret <= 0) {
4523 			ext4_debug("inode #%lu: block %u: len %u: "
4524 				   "ext4_ext_map_blocks returned %d",
4525 				   inode->i_ino, map.m_lblk,
4526 				   map.m_len, ret);
4527 			ext4_mark_inode_dirty(handle, inode);
4528 			ext4_journal_stop(handle);
4529 			break;
4530 		}
4531 		/*
4532 		 * allow a full retry cycle for any remaining allocations
4533 		 */
4534 		retries = 0;
4535 		map.m_lblk += ret;
4536 		map.m_len = len = len - ret;
4537 		epos = (loff_t)map.m_lblk << inode->i_blkbits;
4538 		inode_set_ctime_current(inode);
4539 		if (new_size) {
4540 			if (epos > new_size)
4541 				epos = new_size;
4542 			if (ext4_update_inode_size(inode, epos) & 0x1)
4543 				inode_set_mtime_to_ts(inode,
4544 						      inode_get_ctime(inode));
4545 		}
4546 		ret2 = ext4_mark_inode_dirty(handle, inode);
4547 		ext4_update_inode_fsync_trans(handle, inode, 1);
4548 		ret3 = ext4_journal_stop(handle);
4549 		ret2 = ret3 ? ret3 : ret2;
4550 		if (unlikely(ret2))
4551 			break;
4552 	}
4553 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4554 		goto retry;
4555 
4556 	return ret > 0 ? ret2 : ret;
4557 }
4558 
4559 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
4560 
4561 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
4562 
4563 static long ext4_zero_range(struct file *file, loff_t offset,
4564 			    loff_t len, int mode)
4565 {
4566 	struct inode *inode = file_inode(file);
4567 	struct address_space *mapping = file->f_mapping;
4568 	handle_t *handle = NULL;
4569 	unsigned int max_blocks;
4570 	loff_t new_size = 0;
4571 	int ret = 0;
4572 	int flags;
4573 	int credits;
4574 	int partial_begin, partial_end;
4575 	loff_t start, end;
4576 	ext4_lblk_t lblk;
4577 	unsigned int blkbits = inode->i_blkbits;
4578 
4579 	trace_ext4_zero_range(inode, offset, len, mode);
4580 
4581 	/*
4582 	 * Round up offset. This is not fallocate, we need to zero out
4583 	 * blocks, so convert interior block aligned part of the range to
4584 	 * unwritten and possibly manually zero out unaligned parts of the
4585 	 * range. Here, start and partial_begin are inclusive, end and
4586 	 * partial_end are exclusive.
4587 	 */
4588 	start = round_up(offset, 1 << blkbits);
4589 	end = round_down((offset + len), 1 << blkbits);
4590 
4591 	if (start < offset || end > offset + len)
4592 		return -EINVAL;
4593 	partial_begin = offset & ((1 << blkbits) - 1);
4594 	partial_end = (offset + len) & ((1 << blkbits) - 1);
4595 
4596 	lblk = start >> blkbits;
4597 	max_blocks = (end >> blkbits);
4598 	if (max_blocks < lblk)
4599 		max_blocks = 0;
4600 	else
4601 		max_blocks -= lblk;
4602 
4603 	inode_lock(inode);
4604 
4605 	/*
4606 	 * Indirect files do not support unwritten extents
4607 	 */
4608 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4609 		ret = -EOPNOTSUPP;
4610 		goto out_mutex;
4611 	}
4612 
4613 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4614 	    (offset + len > inode->i_size ||
4615 	     offset + len > EXT4_I(inode)->i_disksize)) {
4616 		new_size = offset + len;
4617 		ret = inode_newsize_ok(inode, new_size);
4618 		if (ret)
4619 			goto out_mutex;
4620 	}
4621 
4622 	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4623 
4624 	/* Wait all existing dio workers, newcomers will block on i_rwsem */
4625 	inode_dio_wait(inode);
4626 
4627 	ret = file_modified(file);
4628 	if (ret)
4629 		goto out_mutex;
4630 
4631 	/* Preallocate the range including the unaligned edges */
4632 	if (partial_begin || partial_end) {
4633 		ret = ext4_alloc_file_blocks(file,
4634 				round_down(offset, 1 << blkbits) >> blkbits,
4635 				(round_up((offset + len), 1 << blkbits) -
4636 				 round_down(offset, 1 << blkbits)) >> blkbits,
4637 				new_size, flags);
4638 		if (ret)
4639 			goto out_mutex;
4640 
4641 	}
4642 
4643 	/* Zero range excluding the unaligned edges */
4644 	if (max_blocks > 0) {
4645 		flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4646 			  EXT4_EX_NOCACHE);
4647 
4648 		/*
4649 		 * Prevent page faults from reinstantiating pages we have
4650 		 * released from page cache.
4651 		 */
4652 		filemap_invalidate_lock(mapping);
4653 
4654 		ret = ext4_break_layouts(inode);
4655 		if (ret) {
4656 			filemap_invalidate_unlock(mapping);
4657 			goto out_mutex;
4658 		}
4659 
4660 		ret = ext4_update_disksize_before_punch(inode, offset, len);
4661 		if (ret) {
4662 			filemap_invalidate_unlock(mapping);
4663 			goto out_mutex;
4664 		}
4665 
4666 		/*
4667 		 * For journalled data we need to write (and checkpoint) pages
4668 		 * before discarding page cache to avoid inconsitent data on
4669 		 * disk in case of crash before zeroing trans is committed.
4670 		 */
4671 		if (ext4_should_journal_data(inode)) {
4672 			ret = filemap_write_and_wait_range(mapping, start,
4673 							   end - 1);
4674 			if (ret) {
4675 				filemap_invalidate_unlock(mapping);
4676 				goto out_mutex;
4677 			}
4678 		}
4679 
4680 		/* Now release the pages and zero block aligned part of pages */
4681 		truncate_pagecache_range(inode, start, end - 1);
4682 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4683 
4684 		ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4685 					     flags);
4686 		filemap_invalidate_unlock(mapping);
4687 		if (ret)
4688 			goto out_mutex;
4689 	}
4690 	if (!partial_begin && !partial_end)
4691 		goto out_mutex;
4692 
4693 	/*
4694 	 * In worst case we have to writeout two nonadjacent unwritten
4695 	 * blocks and update the inode
4696 	 */
4697 	credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
4698 	if (ext4_should_journal_data(inode))
4699 		credits += 2;
4700 	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4701 	if (IS_ERR(handle)) {
4702 		ret = PTR_ERR(handle);
4703 		ext4_std_error(inode->i_sb, ret);
4704 		goto out_mutex;
4705 	}
4706 
4707 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4708 	if (new_size)
4709 		ext4_update_inode_size(inode, new_size);
4710 	ret = ext4_mark_inode_dirty(handle, inode);
4711 	if (unlikely(ret))
4712 		goto out_handle;
4713 	/* Zero out partial block at the edges of the range */
4714 	ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4715 	if (ret >= 0)
4716 		ext4_update_inode_fsync_trans(handle, inode, 1);
4717 
4718 	if (file->f_flags & O_SYNC)
4719 		ext4_handle_sync(handle);
4720 
4721 out_handle:
4722 	ext4_journal_stop(handle);
4723 out_mutex:
4724 	inode_unlock(inode);
4725 	return ret;
4726 }
4727 
4728 /*
4729  * preallocate space for a file. This implements ext4's fallocate file
4730  * operation, which gets called from sys_fallocate system call.
4731  * For block-mapped files, posix_fallocate should fall back to the method
4732  * of writing zeroes to the required new blocks (the same behavior which is
4733  * expected for file systems which do not support fallocate() system call).
4734  */
4735 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4736 {
4737 	struct inode *inode = file_inode(file);
4738 	loff_t new_size = 0;
4739 	unsigned int max_blocks;
4740 	int ret = 0;
4741 	int flags;
4742 	ext4_lblk_t lblk;
4743 	unsigned int blkbits = inode->i_blkbits;
4744 
4745 	/*
4746 	 * Encrypted inodes can't handle collapse range or insert
4747 	 * range since we would need to re-encrypt blocks with a
4748 	 * different IV or XTS tweak (which are based on the logical
4749 	 * block number).
4750 	 */
4751 	if (IS_ENCRYPTED(inode) &&
4752 	    (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
4753 		return -EOPNOTSUPP;
4754 
4755 	/* Return error if mode is not supported */
4756 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4757 		     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4758 		     FALLOC_FL_INSERT_RANGE))
4759 		return -EOPNOTSUPP;
4760 
4761 	inode_lock(inode);
4762 	ret = ext4_convert_inline_data(inode);
4763 	inode_unlock(inode);
4764 	if (ret)
4765 		goto exit;
4766 
4767 	if (mode & FALLOC_FL_PUNCH_HOLE) {
4768 		ret = ext4_punch_hole(file, offset, len);
4769 		goto exit;
4770 	}
4771 
4772 	if (mode & FALLOC_FL_COLLAPSE_RANGE) {
4773 		ret = ext4_collapse_range(file, offset, len);
4774 		goto exit;
4775 	}
4776 
4777 	if (mode & FALLOC_FL_INSERT_RANGE) {
4778 		ret = ext4_insert_range(file, offset, len);
4779 		goto exit;
4780 	}
4781 
4782 	if (mode & FALLOC_FL_ZERO_RANGE) {
4783 		ret = ext4_zero_range(file, offset, len, mode);
4784 		goto exit;
4785 	}
4786 	trace_ext4_fallocate_enter(inode, offset, len, mode);
4787 	lblk = offset >> blkbits;
4788 
4789 	max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4790 	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4791 
4792 	inode_lock(inode);
4793 
4794 	/*
4795 	 * We only support preallocation for extent-based files only
4796 	 */
4797 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4798 		ret = -EOPNOTSUPP;
4799 		goto out;
4800 	}
4801 
4802 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4803 	    (offset + len > inode->i_size ||
4804 	     offset + len > EXT4_I(inode)->i_disksize)) {
4805 		new_size = offset + len;
4806 		ret = inode_newsize_ok(inode, new_size);
4807 		if (ret)
4808 			goto out;
4809 	}
4810 
4811 	/* Wait all existing dio workers, newcomers will block on i_rwsem */
4812 	inode_dio_wait(inode);
4813 
4814 	ret = file_modified(file);
4815 	if (ret)
4816 		goto out;
4817 
4818 	ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
4819 	if (ret)
4820 		goto out;
4821 
4822 	if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4823 		ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
4824 					EXT4_I(inode)->i_sync_tid);
4825 	}
4826 out:
4827 	inode_unlock(inode);
4828 	trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4829 exit:
4830 	return ret;
4831 }
4832 
4833 /*
4834  * This function convert a range of blocks to written extents
4835  * The caller of this function will pass the start offset and the size.
4836  * all unwritten extents within this range will be converted to
4837  * written extents.
4838  *
4839  * This function is called from the direct IO end io call back
4840  * function, to convert the fallocated extents after IO is completed.
4841  * Returns 0 on success.
4842  */
4843 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4844 				   loff_t offset, ssize_t len)
4845 {
4846 	unsigned int max_blocks;
4847 	int ret = 0, ret2 = 0, ret3 = 0;
4848 	struct ext4_map_blocks map;
4849 	unsigned int blkbits = inode->i_blkbits;
4850 	unsigned int credits = 0;
4851 
4852 	map.m_lblk = offset >> blkbits;
4853 	max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4854 
4855 	if (!handle) {
4856 		/*
4857 		 * credits to insert 1 extent into extent tree
4858 		 */
4859 		credits = ext4_chunk_trans_blocks(inode, max_blocks);
4860 	}
4861 	while (ret >= 0 && ret < max_blocks) {
4862 		map.m_lblk += ret;
4863 		map.m_len = (max_blocks -= ret);
4864 		if (credits) {
4865 			handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4866 						    credits);
4867 			if (IS_ERR(handle)) {
4868 				ret = PTR_ERR(handle);
4869 				break;
4870 			}
4871 		}
4872 		ret = ext4_map_blocks(handle, inode, &map,
4873 				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4874 		if (ret <= 0)
4875 			ext4_warning(inode->i_sb,
4876 				     "inode #%lu: block %u: len %u: "
4877 				     "ext4_ext_map_blocks returned %d",
4878 				     inode->i_ino, map.m_lblk,
4879 				     map.m_len, ret);
4880 		ret2 = ext4_mark_inode_dirty(handle, inode);
4881 		if (credits) {
4882 			ret3 = ext4_journal_stop(handle);
4883 			if (unlikely(ret3))
4884 				ret2 = ret3;
4885 		}
4886 
4887 		if (ret <= 0 || ret2)
4888 			break;
4889 	}
4890 	return ret > 0 ? ret2 : ret;
4891 }
4892 
4893 int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
4894 {
4895 	int ret = 0, err = 0;
4896 	struct ext4_io_end_vec *io_end_vec;
4897 
4898 	/*
4899 	 * This is somewhat ugly but the idea is clear: When transaction is
4900 	 * reserved, everything goes into it. Otherwise we rather start several
4901 	 * smaller transactions for conversion of each extent separately.
4902 	 */
4903 	if (handle) {
4904 		handle = ext4_journal_start_reserved(handle,
4905 						     EXT4_HT_EXT_CONVERT);
4906 		if (IS_ERR(handle))
4907 			return PTR_ERR(handle);
4908 	}
4909 
4910 	list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
4911 		ret = ext4_convert_unwritten_extents(handle, io_end->inode,
4912 						     io_end_vec->offset,
4913 						     io_end_vec->size);
4914 		if (ret)
4915 			break;
4916 	}
4917 
4918 	if (handle)
4919 		err = ext4_journal_stop(handle);
4920 
4921 	return ret < 0 ? ret : err;
4922 }
4923 
4924 static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
4925 {
4926 	__u64 physical = 0;
4927 	__u64 length = 0;
4928 	int blockbits = inode->i_sb->s_blocksize_bits;
4929 	int error = 0;
4930 	u16 iomap_type;
4931 
4932 	/* in-inode? */
4933 	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4934 		struct ext4_iloc iloc;
4935 		int offset;	/* offset of xattr in inode */
4936 
4937 		error = ext4_get_inode_loc(inode, &iloc);
4938 		if (error)
4939 			return error;
4940 		physical = (__u64)iloc.bh->b_blocknr << blockbits;
4941 		offset = EXT4_GOOD_OLD_INODE_SIZE +
4942 				EXT4_I(inode)->i_extra_isize;
4943 		physical += offset;
4944 		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4945 		brelse(iloc.bh);
4946 		iomap_type = IOMAP_INLINE;
4947 	} else if (EXT4_I(inode)->i_file_acl) { /* external block */
4948 		physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4949 		length = inode->i_sb->s_blocksize;
4950 		iomap_type = IOMAP_MAPPED;
4951 	} else {
4952 		/* no in-inode or external block for xattr, so return -ENOENT */
4953 		error = -ENOENT;
4954 		goto out;
4955 	}
4956 
4957 	iomap->addr = physical;
4958 	iomap->offset = 0;
4959 	iomap->length = length;
4960 	iomap->type = iomap_type;
4961 	iomap->flags = 0;
4962 out:
4963 	return error;
4964 }
4965 
4966 static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
4967 				  loff_t length, unsigned flags,
4968 				  struct iomap *iomap, struct iomap *srcmap)
4969 {
4970 	int error;
4971 
4972 	error = ext4_iomap_xattr_fiemap(inode, iomap);
4973 	if (error == 0 && (offset >= iomap->length))
4974 		error = -ENOENT;
4975 	return error;
4976 }
4977 
4978 static const struct iomap_ops ext4_iomap_xattr_ops = {
4979 	.iomap_begin		= ext4_iomap_xattr_begin,
4980 };
4981 
4982 static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
4983 {
4984 	u64 maxbytes;
4985 
4986 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4987 		maxbytes = inode->i_sb->s_maxbytes;
4988 	else
4989 		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
4990 
4991 	if (*len == 0)
4992 		return -EINVAL;
4993 	if (start > maxbytes)
4994 		return -EFBIG;
4995 
4996 	/*
4997 	 * Shrink request scope to what the fs can actually handle.
4998 	 */
4999 	if (*len > maxbytes || (maxbytes - *len) < start)
5000 		*len = maxbytes - start;
5001 	return 0;
5002 }
5003 
5004 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5005 		u64 start, u64 len)
5006 {
5007 	int error = 0;
5008 
5009 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
5010 		error = ext4_ext_precache(inode);
5011 		if (error)
5012 			return error;
5013 		fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
5014 	}
5015 
5016 	/*
5017 	 * For bitmap files the maximum size limit could be smaller than
5018 	 * s_maxbytes, so check len here manually instead of just relying on the
5019 	 * generic check.
5020 	 */
5021 	error = ext4_fiemap_check_ranges(inode, start, &len);
5022 	if (error)
5023 		return error;
5024 
5025 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
5026 		fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
5027 		return iomap_fiemap(inode, fieinfo, start, len,
5028 				    &ext4_iomap_xattr_ops);
5029 	}
5030 
5031 	return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
5032 }
5033 
5034 int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
5035 		      __u64 start, __u64 len)
5036 {
5037 	ext4_lblk_t start_blk, len_blks;
5038 	__u64 last_blk;
5039 	int error = 0;
5040 
5041 	if (ext4_has_inline_data(inode)) {
5042 		int has_inline;
5043 
5044 		down_read(&EXT4_I(inode)->xattr_sem);
5045 		has_inline = ext4_has_inline_data(inode);
5046 		up_read(&EXT4_I(inode)->xattr_sem);
5047 		if (has_inline)
5048 			return 0;
5049 	}
5050 
5051 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
5052 		error = ext4_ext_precache(inode);
5053 		if (error)
5054 			return error;
5055 		fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
5056 	}
5057 
5058 	error = fiemap_prep(inode, fieinfo, start, &len, 0);
5059 	if (error)
5060 		return error;
5061 
5062 	error = ext4_fiemap_check_ranges(inode, start, &len);
5063 	if (error)
5064 		return error;
5065 
5066 	start_blk = start >> inode->i_sb->s_blocksize_bits;
5067 	last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5068 	if (last_blk >= EXT_MAX_BLOCKS)
5069 		last_blk = EXT_MAX_BLOCKS-1;
5070 	len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
5071 
5072 	/*
5073 	 * Walk the extent tree gathering extent information
5074 	 * and pushing extents back to the user.
5075 	 */
5076 	return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
5077 }
5078 
5079 /*
5080  * ext4_ext_shift_path_extents:
5081  * Shift the extents of a path structure lying between path[depth].p_ext
5082  * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5083  * if it is right shift or left shift operation.
5084  */
5085 static int
5086 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5087 			    struct inode *inode, handle_t *handle,
5088 			    enum SHIFT_DIRECTION SHIFT)
5089 {
5090 	int depth, err = 0;
5091 	struct ext4_extent *ex_start, *ex_last;
5092 	bool update = false;
5093 	int credits, restart_credits;
5094 	depth = path->p_depth;
5095 
5096 	while (depth >= 0) {
5097 		if (depth == path->p_depth) {
5098 			ex_start = path[depth].p_ext;
5099 			if (!ex_start)
5100 				return -EFSCORRUPTED;
5101 
5102 			ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5103 			/* leaf + sb + inode */
5104 			credits = 3;
5105 			if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
5106 				update = true;
5107 				/* extent tree + sb + inode */
5108 				credits = depth + 2;
5109 			}
5110 
5111 			restart_credits = ext4_writepage_trans_blocks(inode);
5112 			err = ext4_datasem_ensure_credits(handle, inode, credits,
5113 					restart_credits, 0);
5114 			if (err) {
5115 				if (err > 0)
5116 					err = -EAGAIN;
5117 				goto out;
5118 			}
5119 
5120 			err = ext4_ext_get_access(handle, inode, path + depth);
5121 			if (err)
5122 				goto out;
5123 
5124 			while (ex_start <= ex_last) {
5125 				if (SHIFT == SHIFT_LEFT) {
5126 					le32_add_cpu(&ex_start->ee_block,
5127 						-shift);
5128 					/* Try to merge to the left. */
5129 					if ((ex_start >
5130 					    EXT_FIRST_EXTENT(path[depth].p_hdr))
5131 					    &&
5132 					    ext4_ext_try_to_merge_right(inode,
5133 					    path, ex_start - 1))
5134 						ex_last--;
5135 					else
5136 						ex_start++;
5137 				} else {
5138 					le32_add_cpu(&ex_last->ee_block, shift);
5139 					ext4_ext_try_to_merge_right(inode, path,
5140 						ex_last);
5141 					ex_last--;
5142 				}
5143 			}
5144 			err = ext4_ext_dirty(handle, inode, path + depth);
5145 			if (err)
5146 				goto out;
5147 
5148 			if (--depth < 0 || !update)
5149 				break;
5150 		}
5151 
5152 		/* Update index too */
5153 		err = ext4_ext_get_access(handle, inode, path + depth);
5154 		if (err)
5155 			goto out;
5156 
5157 		if (SHIFT == SHIFT_LEFT)
5158 			le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5159 		else
5160 			le32_add_cpu(&path[depth].p_idx->ei_block, shift);
5161 		err = ext4_ext_dirty(handle, inode, path + depth);
5162 		if (err)
5163 			goto out;
5164 
5165 		/* we are done if current index is not a starting index */
5166 		if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5167 			break;
5168 
5169 		depth--;
5170 	}
5171 
5172 out:
5173 	return err;
5174 }
5175 
5176 /*
5177  * ext4_ext_shift_extents:
5178  * All the extents which lies in the range from @start to the last allocated
5179  * block for the @inode are shifted either towards left or right (depending
5180  * upon @SHIFT) by @shift blocks.
5181  * On success, 0 is returned, error otherwise.
5182  */
5183 static int
5184 ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5185 		       ext4_lblk_t start, ext4_lblk_t shift,
5186 		       enum SHIFT_DIRECTION SHIFT)
5187 {
5188 	struct ext4_ext_path *path;
5189 	int ret = 0, depth;
5190 	struct ext4_extent *extent;
5191 	ext4_lblk_t stop, *iterator, ex_start, ex_end;
5192 	ext4_lblk_t tmp = EXT_MAX_BLOCKS;
5193 
5194 	/* Let path point to the last extent */
5195 	path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
5196 				EXT4_EX_NOCACHE);
5197 	if (IS_ERR(path))
5198 		return PTR_ERR(path);
5199 
5200 	depth = path->p_depth;
5201 	extent = path[depth].p_ext;
5202 	if (!extent)
5203 		goto out;
5204 
5205 	stop = le32_to_cpu(extent->ee_block);
5206 
5207        /*
5208 	* For left shifts, make sure the hole on the left is big enough to
5209 	* accommodate the shift.  For right shifts, make sure the last extent
5210 	* won't be shifted beyond EXT_MAX_BLOCKS.
5211 	*/
5212 	if (SHIFT == SHIFT_LEFT) {
5213 		path = ext4_find_extent(inode, start - 1, path,
5214 					EXT4_EX_NOCACHE);
5215 		if (IS_ERR(path))
5216 			return PTR_ERR(path);
5217 		depth = path->p_depth;
5218 		extent =  path[depth].p_ext;
5219 		if (extent) {
5220 			ex_start = le32_to_cpu(extent->ee_block);
5221 			ex_end = le32_to_cpu(extent->ee_block) +
5222 				ext4_ext_get_actual_len(extent);
5223 		} else {
5224 			ex_start = 0;
5225 			ex_end = 0;
5226 		}
5227 
5228 		if ((start == ex_start && shift > ex_start) ||
5229 		    (shift > start - ex_end)) {
5230 			ret = -EINVAL;
5231 			goto out;
5232 		}
5233 	} else {
5234 		if (shift > EXT_MAX_BLOCKS -
5235 		    (stop + ext4_ext_get_actual_len(extent))) {
5236 			ret = -EINVAL;
5237 			goto out;
5238 		}
5239 	}
5240 
5241 	/*
5242 	 * In case of left shift, iterator points to start and it is increased
5243 	 * till we reach stop. In case of right shift, iterator points to stop
5244 	 * and it is decreased till we reach start.
5245 	 */
5246 again:
5247 	ret = 0;
5248 	if (SHIFT == SHIFT_LEFT)
5249 		iterator = &start;
5250 	else
5251 		iterator = &stop;
5252 
5253 	if (tmp != EXT_MAX_BLOCKS)
5254 		*iterator = tmp;
5255 
5256 	/*
5257 	 * Its safe to start updating extents.  Start and stop are unsigned, so
5258 	 * in case of right shift if extent with 0 block is reached, iterator
5259 	 * becomes NULL to indicate the end of the loop.
5260 	 */
5261 	while (iterator && start <= stop) {
5262 		path = ext4_find_extent(inode, *iterator, path,
5263 					EXT4_EX_NOCACHE);
5264 		if (IS_ERR(path))
5265 			return PTR_ERR(path);
5266 		depth = path->p_depth;
5267 		extent = path[depth].p_ext;
5268 		if (!extent) {
5269 			EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5270 					 (unsigned long) *iterator);
5271 			return -EFSCORRUPTED;
5272 		}
5273 		if (SHIFT == SHIFT_LEFT && *iterator >
5274 		    le32_to_cpu(extent->ee_block)) {
5275 			/* Hole, move to the next extent */
5276 			if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5277 				path[depth].p_ext++;
5278 			} else {
5279 				*iterator = ext4_ext_next_allocated_block(path);
5280 				continue;
5281 			}
5282 		}
5283 
5284 		tmp = *iterator;
5285 		if (SHIFT == SHIFT_LEFT) {
5286 			extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5287 			*iterator = le32_to_cpu(extent->ee_block) +
5288 					ext4_ext_get_actual_len(extent);
5289 		} else {
5290 			extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5291 			if (le32_to_cpu(extent->ee_block) > start)
5292 				*iterator = le32_to_cpu(extent->ee_block) - 1;
5293 			else if (le32_to_cpu(extent->ee_block) == start)
5294 				iterator = NULL;
5295 			else {
5296 				extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5297 				while (le32_to_cpu(extent->ee_block) >= start)
5298 					extent--;
5299 
5300 				if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
5301 					break;
5302 
5303 				extent++;
5304 				iterator = NULL;
5305 			}
5306 			path[depth].p_ext = extent;
5307 		}
5308 		ret = ext4_ext_shift_path_extents(path, shift, inode,
5309 				handle, SHIFT);
5310 		/* iterator can be NULL which means we should break */
5311 		if (ret == -EAGAIN)
5312 			goto again;
5313 		if (ret)
5314 			break;
5315 	}
5316 out:
5317 	ext4_free_ext_path(path);
5318 	return ret;
5319 }
5320 
5321 /*
5322  * ext4_collapse_range:
5323  * This implements the fallocate's collapse range functionality for ext4
5324  * Returns: 0 and non-zero on error.
5325  */
5326 static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
5327 {
5328 	struct inode *inode = file_inode(file);
5329 	struct super_block *sb = inode->i_sb;
5330 	struct address_space *mapping = inode->i_mapping;
5331 	ext4_lblk_t punch_start, punch_stop;
5332 	handle_t *handle;
5333 	unsigned int credits;
5334 	loff_t new_size, ioffset;
5335 	int ret;
5336 
5337 	/*
5338 	 * We need to test this early because xfstests assumes that a
5339 	 * collapse range of (0, 1) will return EOPNOTSUPP if the file
5340 	 * system does not support collapse range.
5341 	 */
5342 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5343 		return -EOPNOTSUPP;
5344 
5345 	/* Collapse range works only on fs cluster size aligned regions. */
5346 	if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5347 		return -EINVAL;
5348 
5349 	trace_ext4_collapse_range(inode, offset, len);
5350 
5351 	punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5352 	punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5353 
5354 	inode_lock(inode);
5355 	/*
5356 	 * There is no need to overlap collapse range with EOF, in which case
5357 	 * it is effectively a truncate operation
5358 	 */
5359 	if (offset + len >= inode->i_size) {
5360 		ret = -EINVAL;
5361 		goto out_mutex;
5362 	}
5363 
5364 	/* Currently just for extent based files */
5365 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5366 		ret = -EOPNOTSUPP;
5367 		goto out_mutex;
5368 	}
5369 
5370 	/* Wait for existing dio to complete */
5371 	inode_dio_wait(inode);
5372 
5373 	ret = file_modified(file);
5374 	if (ret)
5375 		goto out_mutex;
5376 
5377 	/*
5378 	 * Prevent page faults from reinstantiating pages we have released from
5379 	 * page cache.
5380 	 */
5381 	filemap_invalidate_lock(mapping);
5382 
5383 	ret = ext4_break_layouts(inode);
5384 	if (ret)
5385 		goto out_mmap;
5386 
5387 	/*
5388 	 * Need to round down offset to be aligned with page size boundary
5389 	 * for page size > block size.
5390 	 */
5391 	ioffset = round_down(offset, PAGE_SIZE);
5392 	/*
5393 	 * Write tail of the last page before removed range since it will get
5394 	 * removed from the page cache below.
5395 	 */
5396 	ret = filemap_write_and_wait_range(mapping, ioffset, offset);
5397 	if (ret)
5398 		goto out_mmap;
5399 	/*
5400 	 * Write data that will be shifted to preserve them when discarding
5401 	 * page cache below. We are also protected from pages becoming dirty
5402 	 * by i_rwsem and invalidate_lock.
5403 	 */
5404 	ret = filemap_write_and_wait_range(mapping, offset + len,
5405 					   LLONG_MAX);
5406 	if (ret)
5407 		goto out_mmap;
5408 	truncate_pagecache(inode, ioffset);
5409 
5410 	credits = ext4_writepage_trans_blocks(inode);
5411 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5412 	if (IS_ERR(handle)) {
5413 		ret = PTR_ERR(handle);
5414 		goto out_mmap;
5415 	}
5416 	ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
5417 
5418 	down_write(&EXT4_I(inode)->i_data_sem);
5419 	ext4_discard_preallocations(inode);
5420 	ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
5421 
5422 	ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5423 	if (ret) {
5424 		up_write(&EXT4_I(inode)->i_data_sem);
5425 		goto out_stop;
5426 	}
5427 	ext4_discard_preallocations(inode);
5428 
5429 	ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5430 				     punch_stop - punch_start, SHIFT_LEFT);
5431 	if (ret) {
5432 		up_write(&EXT4_I(inode)->i_data_sem);
5433 		goto out_stop;
5434 	}
5435 
5436 	new_size = inode->i_size - len;
5437 	i_size_write(inode, new_size);
5438 	EXT4_I(inode)->i_disksize = new_size;
5439 
5440 	up_write(&EXT4_I(inode)->i_data_sem);
5441 	if (IS_SYNC(inode))
5442 		ext4_handle_sync(handle);
5443 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
5444 	ret = ext4_mark_inode_dirty(handle, inode);
5445 	ext4_update_inode_fsync_trans(handle, inode, 1);
5446 
5447 out_stop:
5448 	ext4_journal_stop(handle);
5449 out_mmap:
5450 	filemap_invalidate_unlock(mapping);
5451 out_mutex:
5452 	inode_unlock(inode);
5453 	return ret;
5454 }
5455 
5456 /*
5457  * ext4_insert_range:
5458  * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
5459  * The data blocks starting from @offset to the EOF are shifted by @len
5460  * towards right to create a hole in the @inode. Inode size is increased
5461  * by len bytes.
5462  * Returns 0 on success, error otherwise.
5463  */
5464 static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
5465 {
5466 	struct inode *inode = file_inode(file);
5467 	struct super_block *sb = inode->i_sb;
5468 	struct address_space *mapping = inode->i_mapping;
5469 	handle_t *handle;
5470 	struct ext4_ext_path *path;
5471 	struct ext4_extent *extent;
5472 	ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
5473 	unsigned int credits, ee_len;
5474 	int ret = 0, depth, split_flag = 0;
5475 	loff_t ioffset;
5476 
5477 	/*
5478 	 * We need to test this early because xfstests assumes that an
5479 	 * insert range of (0, 1) will return EOPNOTSUPP if the file
5480 	 * system does not support insert range.
5481 	 */
5482 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5483 		return -EOPNOTSUPP;
5484 
5485 	/* Insert range works only on fs cluster size aligned regions. */
5486 	if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5487 		return -EINVAL;
5488 
5489 	trace_ext4_insert_range(inode, offset, len);
5490 
5491 	offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5492 	len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
5493 
5494 	inode_lock(inode);
5495 	/* Currently just for extent based files */
5496 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5497 		ret = -EOPNOTSUPP;
5498 		goto out_mutex;
5499 	}
5500 
5501 	/* Check whether the maximum file size would be exceeded */
5502 	if (len > inode->i_sb->s_maxbytes - inode->i_size) {
5503 		ret = -EFBIG;
5504 		goto out_mutex;
5505 	}
5506 
5507 	/* Offset must be less than i_size */
5508 	if (offset >= inode->i_size) {
5509 		ret = -EINVAL;
5510 		goto out_mutex;
5511 	}
5512 
5513 	/* Wait for existing dio to complete */
5514 	inode_dio_wait(inode);
5515 
5516 	ret = file_modified(file);
5517 	if (ret)
5518 		goto out_mutex;
5519 
5520 	/*
5521 	 * Prevent page faults from reinstantiating pages we have released from
5522 	 * page cache.
5523 	 */
5524 	filemap_invalidate_lock(mapping);
5525 
5526 	ret = ext4_break_layouts(inode);
5527 	if (ret)
5528 		goto out_mmap;
5529 
5530 	/*
5531 	 * Need to round down to align start offset to page size boundary
5532 	 * for page size > block size.
5533 	 */
5534 	ioffset = round_down(offset, PAGE_SIZE);
5535 	/* Write out all dirty pages */
5536 	ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5537 			LLONG_MAX);
5538 	if (ret)
5539 		goto out_mmap;
5540 	truncate_pagecache(inode, ioffset);
5541 
5542 	credits = ext4_writepage_trans_blocks(inode);
5543 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5544 	if (IS_ERR(handle)) {
5545 		ret = PTR_ERR(handle);
5546 		goto out_mmap;
5547 	}
5548 	ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
5549 
5550 	/* Expand file to avoid data loss if there is error while shifting */
5551 	inode->i_size += len;
5552 	EXT4_I(inode)->i_disksize += len;
5553 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
5554 	ret = ext4_mark_inode_dirty(handle, inode);
5555 	if (ret)
5556 		goto out_stop;
5557 
5558 	down_write(&EXT4_I(inode)->i_data_sem);
5559 	ext4_discard_preallocations(inode);
5560 
5561 	path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5562 	if (IS_ERR(path)) {
5563 		up_write(&EXT4_I(inode)->i_data_sem);
5564 		ret = PTR_ERR(path);
5565 		goto out_stop;
5566 	}
5567 
5568 	depth = ext_depth(inode);
5569 	extent = path[depth].p_ext;
5570 	if (extent) {
5571 		ee_start_lblk = le32_to_cpu(extent->ee_block);
5572 		ee_len = ext4_ext_get_actual_len(extent);
5573 
5574 		/*
5575 		 * If offset_lblk is not the starting block of extent, split
5576 		 * the extent @offset_lblk
5577 		 */
5578 		if ((offset_lblk > ee_start_lblk) &&
5579 				(offset_lblk < (ee_start_lblk + ee_len))) {
5580 			if (ext4_ext_is_unwritten(extent))
5581 				split_flag = EXT4_EXT_MARK_UNWRIT1 |
5582 					EXT4_EXT_MARK_UNWRIT2;
5583 			ret = ext4_split_extent_at(handle, inode, &path,
5584 					offset_lblk, split_flag,
5585 					EXT4_EX_NOCACHE |
5586 					EXT4_GET_BLOCKS_PRE_IO |
5587 					EXT4_GET_BLOCKS_METADATA_NOFAIL);
5588 		}
5589 
5590 		ext4_free_ext_path(path);
5591 		if (ret < 0) {
5592 			up_write(&EXT4_I(inode)->i_data_sem);
5593 			goto out_stop;
5594 		}
5595 	} else {
5596 		ext4_free_ext_path(path);
5597 	}
5598 
5599 	ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
5600 
5601 	/*
5602 	 * if offset_lblk lies in a hole which is at start of file, use
5603 	 * ee_start_lblk to shift extents
5604 	 */
5605 	ret = ext4_ext_shift_extents(inode, handle,
5606 		max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT);
5607 
5608 	up_write(&EXT4_I(inode)->i_data_sem);
5609 	if (IS_SYNC(inode))
5610 		ext4_handle_sync(handle);
5611 	if (ret >= 0)
5612 		ext4_update_inode_fsync_trans(handle, inode, 1);
5613 
5614 out_stop:
5615 	ext4_journal_stop(handle);
5616 out_mmap:
5617 	filemap_invalidate_unlock(mapping);
5618 out_mutex:
5619 	inode_unlock(inode);
5620 	return ret;
5621 }
5622 
5623 /**
5624  * ext4_swap_extents() - Swap extents between two inodes
5625  * @handle: handle for this transaction
5626  * @inode1:	First inode
5627  * @inode2:	Second inode
5628  * @lblk1:	Start block for first inode
5629  * @lblk2:	Start block for second inode
5630  * @count:	Number of blocks to swap
5631  * @unwritten: Mark second inode's extents as unwritten after swap
5632  * @erp:	Pointer to save error value
5633  *
5634  * This helper routine does exactly what is promise "swap extents". All other
5635  * stuff such as page-cache locking consistency, bh mapping consistency or
5636  * extent's data copying must be performed by caller.
5637  * Locking:
5638  *		i_rwsem is held for both inodes
5639  * 		i_data_sem is locked for write for both inodes
5640  * Assumptions:
5641  *		All pages from requested range are locked for both inodes
5642  */
5643 int
5644 ext4_swap_extents(handle_t *handle, struct inode *inode1,
5645 		  struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5646 		  ext4_lblk_t count, int unwritten, int *erp)
5647 {
5648 	struct ext4_ext_path *path1 = NULL;
5649 	struct ext4_ext_path *path2 = NULL;
5650 	int replaced_count = 0;
5651 
5652 	BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5653 	BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5654 	BUG_ON(!inode_is_locked(inode1));
5655 	BUG_ON(!inode_is_locked(inode2));
5656 
5657 	ext4_es_remove_extent(inode1, lblk1, count);
5658 	ext4_es_remove_extent(inode2, lblk2, count);
5659 
5660 	while (count) {
5661 		struct ext4_extent *ex1, *ex2, tmp_ex;
5662 		ext4_lblk_t e1_blk, e2_blk;
5663 		int e1_len, e2_len, len;
5664 		int split = 0;
5665 
5666 		path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5667 		if (IS_ERR(path1)) {
5668 			*erp = PTR_ERR(path1);
5669 			path1 = NULL;
5670 		finish:
5671 			count = 0;
5672 			goto repeat;
5673 		}
5674 		path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5675 		if (IS_ERR(path2)) {
5676 			*erp = PTR_ERR(path2);
5677 			path2 = NULL;
5678 			goto finish;
5679 		}
5680 		ex1 = path1[path1->p_depth].p_ext;
5681 		ex2 = path2[path2->p_depth].p_ext;
5682 		/* Do we have something to swap ? */
5683 		if (unlikely(!ex2 || !ex1))
5684 			goto finish;
5685 
5686 		e1_blk = le32_to_cpu(ex1->ee_block);
5687 		e2_blk = le32_to_cpu(ex2->ee_block);
5688 		e1_len = ext4_ext_get_actual_len(ex1);
5689 		e2_len = ext4_ext_get_actual_len(ex2);
5690 
5691 		/* Hole handling */
5692 		if (!in_range(lblk1, e1_blk, e1_len) ||
5693 		    !in_range(lblk2, e2_blk, e2_len)) {
5694 			ext4_lblk_t next1, next2;
5695 
5696 			/* if hole after extent, then go to next extent */
5697 			next1 = ext4_ext_next_allocated_block(path1);
5698 			next2 = ext4_ext_next_allocated_block(path2);
5699 			/* If hole before extent, then shift to that extent */
5700 			if (e1_blk > lblk1)
5701 				next1 = e1_blk;
5702 			if (e2_blk > lblk2)
5703 				next2 = e2_blk;
5704 			/* Do we have something to swap */
5705 			if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5706 				goto finish;
5707 			/* Move to the rightest boundary */
5708 			len = next1 - lblk1;
5709 			if (len < next2 - lblk2)
5710 				len = next2 - lblk2;
5711 			if (len > count)
5712 				len = count;
5713 			lblk1 += len;
5714 			lblk2 += len;
5715 			count -= len;
5716 			goto repeat;
5717 		}
5718 
5719 		/* Prepare left boundary */
5720 		if (e1_blk < lblk1) {
5721 			split = 1;
5722 			*erp = ext4_force_split_extent_at(handle, inode1,
5723 						&path1, lblk1, 0);
5724 			if (unlikely(*erp))
5725 				goto finish;
5726 		}
5727 		if (e2_blk < lblk2) {
5728 			split = 1;
5729 			*erp = ext4_force_split_extent_at(handle, inode2,
5730 						&path2,  lblk2, 0);
5731 			if (unlikely(*erp))
5732 				goto finish;
5733 		}
5734 		/* ext4_split_extent_at() may result in leaf extent split,
5735 		 * path must to be revalidated. */
5736 		if (split)
5737 			goto repeat;
5738 
5739 		/* Prepare right boundary */
5740 		len = count;
5741 		if (len > e1_blk + e1_len - lblk1)
5742 			len = e1_blk + e1_len - lblk1;
5743 		if (len > e2_blk + e2_len - lblk2)
5744 			len = e2_blk + e2_len - lblk2;
5745 
5746 		if (len != e1_len) {
5747 			split = 1;
5748 			*erp = ext4_force_split_extent_at(handle, inode1,
5749 						&path1, lblk1 + len, 0);
5750 			if (unlikely(*erp))
5751 				goto finish;
5752 		}
5753 		if (len != e2_len) {
5754 			split = 1;
5755 			*erp = ext4_force_split_extent_at(handle, inode2,
5756 						&path2, lblk2 + len, 0);
5757 			if (*erp)
5758 				goto finish;
5759 		}
5760 		/* ext4_split_extent_at() may result in leaf extent split,
5761 		 * path must to be revalidated. */
5762 		if (split)
5763 			goto repeat;
5764 
5765 		BUG_ON(e2_len != e1_len);
5766 		*erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5767 		if (unlikely(*erp))
5768 			goto finish;
5769 		*erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5770 		if (unlikely(*erp))
5771 			goto finish;
5772 
5773 		/* Both extents are fully inside boundaries. Swap it now */
5774 		tmp_ex = *ex1;
5775 		ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5776 		ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5777 		ex1->ee_len = cpu_to_le16(e2_len);
5778 		ex2->ee_len = cpu_to_le16(e1_len);
5779 		if (unwritten)
5780 			ext4_ext_mark_unwritten(ex2);
5781 		if (ext4_ext_is_unwritten(&tmp_ex))
5782 			ext4_ext_mark_unwritten(ex1);
5783 
5784 		ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5785 		ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5786 		*erp = ext4_ext_dirty(handle, inode2, path2 +
5787 				      path2->p_depth);
5788 		if (unlikely(*erp))
5789 			goto finish;
5790 		*erp = ext4_ext_dirty(handle, inode1, path1 +
5791 				      path1->p_depth);
5792 		/*
5793 		 * Looks scarry ah..? second inode already points to new blocks,
5794 		 * and it was successfully dirtied. But luckily error may happen
5795 		 * only due to journal error, so full transaction will be
5796 		 * aborted anyway.
5797 		 */
5798 		if (unlikely(*erp))
5799 			goto finish;
5800 		lblk1 += len;
5801 		lblk2 += len;
5802 		replaced_count += len;
5803 		count -= len;
5804 
5805 	repeat:
5806 		ext4_free_ext_path(path1);
5807 		ext4_free_ext_path(path2);
5808 		path1 = path2 = NULL;
5809 	}
5810 	return replaced_count;
5811 }
5812 
5813 /*
5814  * ext4_clu_mapped - determine whether any block in a logical cluster has
5815  *                   been mapped to a physical cluster
5816  *
5817  * @inode - file containing the logical cluster
5818  * @lclu - logical cluster of interest
5819  *
5820  * Returns 1 if any block in the logical cluster is mapped, signifying
5821  * that a physical cluster has been allocated for it.  Otherwise,
5822  * returns 0.  Can also return negative error codes.  Derived from
5823  * ext4_ext_map_blocks().
5824  */
5825 int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
5826 {
5827 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5828 	struct ext4_ext_path *path;
5829 	int depth, mapped = 0, err = 0;
5830 	struct ext4_extent *extent;
5831 	ext4_lblk_t first_lblk, first_lclu, last_lclu;
5832 
5833 	/*
5834 	 * if data can be stored inline, the logical cluster isn't
5835 	 * mapped - no physical clusters have been allocated, and the
5836 	 * file has no extents
5837 	 */
5838 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
5839 	    ext4_has_inline_data(inode))
5840 		return 0;
5841 
5842 	/* search for the extent closest to the first block in the cluster */
5843 	path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
5844 	if (IS_ERR(path))
5845 		return PTR_ERR(path);
5846 
5847 	depth = ext_depth(inode);
5848 
5849 	/*
5850 	 * A consistent leaf must not be empty.  This situation is possible,
5851 	 * though, _during_ tree modification, and it's why an assert can't
5852 	 * be put in ext4_find_extent().
5853 	 */
5854 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
5855 		EXT4_ERROR_INODE(inode,
5856 		    "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
5857 				 (unsigned long) EXT4_C2B(sbi, lclu),
5858 				 depth, path[depth].p_block);
5859 		err = -EFSCORRUPTED;
5860 		goto out;
5861 	}
5862 
5863 	extent = path[depth].p_ext;
5864 
5865 	/* can't be mapped if the extent tree is empty */
5866 	if (extent == NULL)
5867 		goto out;
5868 
5869 	first_lblk = le32_to_cpu(extent->ee_block);
5870 	first_lclu = EXT4_B2C(sbi, first_lblk);
5871 
5872 	/*
5873 	 * Three possible outcomes at this point - found extent spanning
5874 	 * the target cluster, to the left of the target cluster, or to the
5875 	 * right of the target cluster.  The first two cases are handled here.
5876 	 * The last case indicates the target cluster is not mapped.
5877 	 */
5878 	if (lclu >= first_lclu) {
5879 		last_lclu = EXT4_B2C(sbi, first_lblk +
5880 				     ext4_ext_get_actual_len(extent) - 1);
5881 		if (lclu <= last_lclu) {
5882 			mapped = 1;
5883 		} else {
5884 			first_lblk = ext4_ext_next_allocated_block(path);
5885 			first_lclu = EXT4_B2C(sbi, first_lblk);
5886 			if (lclu == first_lclu)
5887 				mapped = 1;
5888 		}
5889 	}
5890 
5891 out:
5892 	ext4_free_ext_path(path);
5893 
5894 	return err ? err : mapped;
5895 }
5896 
5897 /*
5898  * Updates physical block address and unwritten status of extent
5899  * starting at lblk start and of len. If such an extent doesn't exist,
5900  * this function splits the extent tree appropriately to create an
5901  * extent like this.  This function is called in the fast commit
5902  * replay path.  Returns 0 on success and error on failure.
5903  */
5904 int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
5905 			      int len, int unwritten, ext4_fsblk_t pblk)
5906 {
5907 	struct ext4_ext_path *path;
5908 	struct ext4_extent *ex;
5909 	int ret;
5910 
5911 	path = ext4_find_extent(inode, start, NULL, 0);
5912 	if (IS_ERR(path))
5913 		return PTR_ERR(path);
5914 	ex = path[path->p_depth].p_ext;
5915 	if (!ex) {
5916 		ret = -EFSCORRUPTED;
5917 		goto out;
5918 	}
5919 
5920 	if (le32_to_cpu(ex->ee_block) != start ||
5921 		ext4_ext_get_actual_len(ex) != len) {
5922 		/* We need to split this extent to match our extent first */
5923 		down_write(&EXT4_I(inode)->i_data_sem);
5924 		ret = ext4_force_split_extent_at(NULL, inode, &path, start, 1);
5925 		up_write(&EXT4_I(inode)->i_data_sem);
5926 		if (ret)
5927 			goto out;
5928 
5929 		path = ext4_find_extent(inode, start, path, 0);
5930 		if (IS_ERR(path))
5931 			return PTR_ERR(path);
5932 		ex = path[path->p_depth].p_ext;
5933 		WARN_ON(le32_to_cpu(ex->ee_block) != start);
5934 
5935 		if (ext4_ext_get_actual_len(ex) != len) {
5936 			down_write(&EXT4_I(inode)->i_data_sem);
5937 			ret = ext4_force_split_extent_at(NULL, inode, &path,
5938 							 start + len, 1);
5939 			up_write(&EXT4_I(inode)->i_data_sem);
5940 			if (ret)
5941 				goto out;
5942 
5943 			path = ext4_find_extent(inode, start, path, 0);
5944 			if (IS_ERR(path))
5945 				return PTR_ERR(path);
5946 			ex = path[path->p_depth].p_ext;
5947 		}
5948 	}
5949 	if (unwritten)
5950 		ext4_ext_mark_unwritten(ex);
5951 	else
5952 		ext4_ext_mark_initialized(ex);
5953 	ext4_ext_store_pblock(ex, pblk);
5954 	down_write(&EXT4_I(inode)->i_data_sem);
5955 	ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5956 	up_write(&EXT4_I(inode)->i_data_sem);
5957 out:
5958 	ext4_free_ext_path(path);
5959 	ext4_mark_inode_dirty(NULL, inode);
5960 	return ret;
5961 }
5962 
5963 /* Try to shrink the extent tree */
5964 void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
5965 {
5966 	struct ext4_ext_path *path = NULL;
5967 	struct ext4_extent *ex;
5968 	ext4_lblk_t old_cur, cur = 0;
5969 
5970 	while (cur < end) {
5971 		path = ext4_find_extent(inode, cur, NULL, 0);
5972 		if (IS_ERR(path))
5973 			return;
5974 		ex = path[path->p_depth].p_ext;
5975 		if (!ex) {
5976 			ext4_free_ext_path(path);
5977 			ext4_mark_inode_dirty(NULL, inode);
5978 			return;
5979 		}
5980 		old_cur = cur;
5981 		cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
5982 		if (cur <= old_cur)
5983 			cur = old_cur + 1;
5984 		ext4_ext_try_to_merge(NULL, inode, path, ex);
5985 		down_write(&EXT4_I(inode)->i_data_sem);
5986 		ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
5987 		up_write(&EXT4_I(inode)->i_data_sem);
5988 		ext4_mark_inode_dirty(NULL, inode);
5989 		ext4_free_ext_path(path);
5990 	}
5991 }
5992 
5993 /* Check if *cur is a hole and if it is, skip it */
5994 static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
5995 {
5996 	int ret;
5997 	struct ext4_map_blocks map;
5998 
5999 	map.m_lblk = *cur;
6000 	map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
6001 
6002 	ret = ext4_map_blocks(NULL, inode, &map, 0);
6003 	if (ret < 0)
6004 		return ret;
6005 	if (ret != 0)
6006 		return 0;
6007 	*cur = *cur + map.m_len;
6008 	return 0;
6009 }
6010 
6011 /* Count number of blocks used by this inode and update i_blocks */
6012 int ext4_ext_replay_set_iblocks(struct inode *inode)
6013 {
6014 	struct ext4_ext_path *path = NULL, *path2 = NULL;
6015 	struct ext4_extent *ex;
6016 	ext4_lblk_t cur = 0, end;
6017 	int numblks = 0, i, ret = 0;
6018 	ext4_fsblk_t cmp1, cmp2;
6019 	struct ext4_map_blocks map;
6020 
6021 	/* Determin the size of the file first */
6022 	path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6023 					EXT4_EX_NOCACHE);
6024 	if (IS_ERR(path))
6025 		return PTR_ERR(path);
6026 	ex = path[path->p_depth].p_ext;
6027 	if (!ex) {
6028 		ext4_free_ext_path(path);
6029 		goto out;
6030 	}
6031 	end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
6032 	ext4_free_ext_path(path);
6033 
6034 	/* Count the number of data blocks */
6035 	cur = 0;
6036 	while (cur < end) {
6037 		map.m_lblk = cur;
6038 		map.m_len = end - cur;
6039 		ret = ext4_map_blocks(NULL, inode, &map, 0);
6040 		if (ret < 0)
6041 			break;
6042 		if (ret > 0)
6043 			numblks += ret;
6044 		cur = cur + map.m_len;
6045 	}
6046 
6047 	/*
6048 	 * Count the number of extent tree blocks. We do it by looking up
6049 	 * two successive extents and determining the difference between
6050 	 * their paths. When path is different for 2 successive extents
6051 	 * we compare the blocks in the path at each level and increment
6052 	 * iblocks by total number of differences found.
6053 	 */
6054 	cur = 0;
6055 	ret = skip_hole(inode, &cur);
6056 	if (ret < 0)
6057 		goto out;
6058 	path = ext4_find_extent(inode, cur, NULL, 0);
6059 	if (IS_ERR(path))
6060 		goto out;
6061 	numblks += path->p_depth;
6062 	ext4_free_ext_path(path);
6063 	while (cur < end) {
6064 		path = ext4_find_extent(inode, cur, NULL, 0);
6065 		if (IS_ERR(path))
6066 			break;
6067 		ex = path[path->p_depth].p_ext;
6068 		if (!ex) {
6069 			ext4_free_ext_path(path);
6070 			return 0;
6071 		}
6072 		cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
6073 					ext4_ext_get_actual_len(ex));
6074 		ret = skip_hole(inode, &cur);
6075 		if (ret < 0) {
6076 			ext4_free_ext_path(path);
6077 			break;
6078 		}
6079 		path2 = ext4_find_extent(inode, cur, NULL, 0);
6080 		if (IS_ERR(path2)) {
6081 			ext4_free_ext_path(path);
6082 			break;
6083 		}
6084 		for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
6085 			cmp1 = cmp2 = 0;
6086 			if (i <= path->p_depth)
6087 				cmp1 = path[i].p_bh ?
6088 					path[i].p_bh->b_blocknr : 0;
6089 			if (i <= path2->p_depth)
6090 				cmp2 = path2[i].p_bh ?
6091 					path2[i].p_bh->b_blocknr : 0;
6092 			if (cmp1 != cmp2 && cmp2 != 0)
6093 				numblks++;
6094 		}
6095 		ext4_free_ext_path(path);
6096 		ext4_free_ext_path(path2);
6097 	}
6098 
6099 out:
6100 	inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
6101 	ext4_mark_inode_dirty(NULL, inode);
6102 	return 0;
6103 }
6104 
6105 int ext4_ext_clear_bb(struct inode *inode)
6106 {
6107 	struct ext4_ext_path *path = NULL;
6108 	struct ext4_extent *ex;
6109 	ext4_lblk_t cur = 0, end;
6110 	int j, ret = 0;
6111 	struct ext4_map_blocks map;
6112 
6113 	if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
6114 		return 0;
6115 
6116 	/* Determin the size of the file first */
6117 	path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
6118 					EXT4_EX_NOCACHE);
6119 	if (IS_ERR(path))
6120 		return PTR_ERR(path);
6121 	ex = path[path->p_depth].p_ext;
6122 	if (!ex) {
6123 		ext4_free_ext_path(path);
6124 		return 0;
6125 	}
6126 	end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
6127 	ext4_free_ext_path(path);
6128 
6129 	cur = 0;
6130 	while (cur < end) {
6131 		map.m_lblk = cur;
6132 		map.m_len = end - cur;
6133 		ret = ext4_map_blocks(NULL, inode, &map, 0);
6134 		if (ret < 0)
6135 			break;
6136 		if (ret > 0) {
6137 			path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
6138 			if (!IS_ERR_OR_NULL(path)) {
6139 				for (j = 0; j < path->p_depth; j++) {
6140 
6141 					ext4_mb_mark_bb(inode->i_sb,
6142 							path[j].p_block, 1, false);
6143 					ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6144 							0, path[j].p_block, 1, 1);
6145 				}
6146 				ext4_free_ext_path(path);
6147 			}
6148 			ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, false);
6149 			ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6150 					map.m_lblk, map.m_pblk, map.m_len, 1);
6151 		}
6152 		cur = cur + map.m_len;
6153 	}
6154 
6155 	return 0;
6156 }
6157