xref: /freebsd/sys/fs/ext2fs/ext2_extents.c (revision b37f6c9805edb4b89f0a8c2b78f78a3dcfc0647b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Zheng Liu <lz@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/vnode.h>
37 #include <sys/bio.h>
38 #include <sys/buf.h>
39 #include <sys/conf.h>
40 #include <sys/stat.h>
41 
42 #include <fs/ext2fs/ext2_mount.h>
43 #include <fs/ext2fs/fs.h>
44 #include <fs/ext2fs/inode.h>
45 #include <fs/ext2fs/ext2fs.h>
46 #include <fs/ext2fs/ext2_extents.h>
47 #include <fs/ext2fs/ext2_extern.h>
48 
49 static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents");
50 
51 #ifdef EXT2FS_DEBUG
52 static void
53 ext4_ext_print_extent(struct ext4_extent *ep)
54 {
55 
56 	printf("    ext %p => (blk %u len %u start %lu)\n",
57 	    ep, ep->e_blk, ep->e_len,
58 	    (uint64_t)ep->e_start_hi << 32 | ep->e_start_lo);
59 }
60 
61 static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp);
62 
63 static void
64 ext4_ext_print_index(struct inode *ip, struct ext4_extent_index *ex, int do_walk)
65 {
66 	struct m_ext2fs *fs;
67 	struct buf *bp;
68 	int error;
69 
70 	fs = ip->i_e2fs;
71 
72 	printf("    index %p => (blk %u pblk %lu)\n",
73 	    ex, ex->ei_blk, (uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo);
74 
75 	if(!do_walk)
76 		return;
77 
78 	if ((error = bread(ip->i_devvp,
79 	    fsbtodb(fs, ((uint64_t)ex->ei_leaf_hi << 32 | ex->ei_leaf_lo)),
80 	    (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
81 		brelse(bp);
82 		return;
83 	}
84 
85 	ext4_ext_print_header(ip, (struct ext4_extent_header *)bp->b_data);
86 
87 	brelse(bp);
88 
89 }
90 
91 static void
92 ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp)
93 {
94 	int i;
95 
96 	printf("header %p => (magic 0x%x entries %d max %d depth %d gen %d)\n",
97 	    ehp, ehp->eh_magic, ehp->eh_ecount, ehp->eh_max, ehp->eh_depth,
98 	    ehp->eh_gen);
99 
100 	for (i = 0; i < ehp->eh_ecount; i++)
101 		if (ehp->eh_depth != 0)
102 			ext4_ext_print_index(ip,
103 			    (struct ext4_extent_index *)(ehp + 1 + i), 1);
104 		else
105 			ext4_ext_print_extent((struct ext4_extent *)(ehp + 1 + i));
106 }
107 
108 static void
109 ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path)
110 {
111 	int k, l;
112 
113 	l = path->ep_depth
114 
115 	printf("ip=%d, Path:\n", ip->i_number);
116 	for (k = 0; k <= l; k++, path++) {
117 		if (path->ep_index) {
118 			ext4_ext_print_index(ip, path->ep_index, 0);
119 		} else if (path->ep_ext) {
120 			ext4_ext_print_extent(path->ep_ext);
121 		}
122 	}
123 }
124 
125 void
126 ext4_ext_print_extent_tree_status(struct inode * ip)
127 {
128 	struct m_ext2fs *fs;
129 	struct ext4_extent_header *ehp;
130 
131 	fs = ip->i_e2fs;
132 	ehp = (struct ext4_extent_header *)(char *)ip->i_db;
133 
134 	printf("Extent status:ip=%d\n", ip->i_number);
135 	if (!(ip->i_flag & IN_E4EXTENTS))
136 		return;
137 
138 	ext4_ext_print_header(ip, ehp);
139 
140 	return;
141 }
142 #endif
143 
144 static inline struct ext4_extent_header *
145 ext4_ext_inode_header(struct inode *ip)
146 {
147 
148 	return ((struct ext4_extent_header *)ip->i_db);
149 }
150 
151 static inline struct ext4_extent_header *
152 ext4_ext_block_header(char *bdata)
153 {
154 
155 	return ((struct ext4_extent_header *)bdata);
156 }
157 
158 static inline unsigned short
159 ext4_ext_inode_depth(struct inode *ip)
160 {
161 	struct ext4_extent_header *ehp;
162 
163 	ehp = (struct ext4_extent_header *)ip->i_data;
164 	return (ehp->eh_depth);
165 }
166 
167 static inline e4fs_daddr_t
168 ext4_ext_index_pblock(struct ext4_extent_index *index)
169 {
170 	e4fs_daddr_t blk;
171 
172 	blk = index->ei_leaf_lo;
173 	blk |= (e4fs_daddr_t)index->ei_leaf_hi << 32;
174 
175 	return (blk);
176 }
177 
178 static inline void
179 ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb)
180 {
181 
182 	index->ei_leaf_lo = pb & 0xffffffff;
183 	index->ei_leaf_hi = (pb >> 32) & 0xffff;
184 }
185 
186 
187 static inline e4fs_daddr_t
188 ext4_ext_extent_pblock(struct ext4_extent *extent)
189 {
190 	e4fs_daddr_t blk;
191 
192 	blk = extent->e_start_lo;
193 	blk |= (e4fs_daddr_t)extent->e_start_hi << 32;
194 
195 	return (blk);
196 }
197 
198 static inline void
199 ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb)
200 {
201 
202 	ex->e_start_lo = pb & 0xffffffff;
203 	ex->e_start_hi = (pb >> 32) & 0xffff;
204 }
205 
206 int
207 ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep)
208 {
209 	struct ext4_extent_cache *ecp;
210 	int ret = EXT4_EXT_CACHE_NO;
211 
212 	ecp = &ip->i_ext_cache;
213 	if (ecp->ec_type == EXT4_EXT_CACHE_NO)
214 		return (ret);
215 
216 	if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) {
217 		ep->e_blk = ecp->ec_blk;
218 		ep->e_start_lo = ecp->ec_start & 0xffffffff;
219 		ep->e_start_hi = ecp->ec_start >> 32 & 0xffff;
220 		ep->e_len = ecp->ec_len;
221 		ret = ecp->ec_type;
222 	}
223 	return (ret);
224 }
225 
226 static int
227 ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh)
228 {
229 	struct m_ext2fs *fs;
230 	char *error_msg;
231 
232 	fs = ip->i_e2fs;
233 
234 	if (eh->eh_magic != EXT4_EXT_MAGIC) {
235 		error_msg = "invalid magic";
236 		goto corrupted;
237 	}
238 	if (eh->eh_max == 0) {
239 		error_msg = "invalid eh_max";
240 		goto corrupted;
241 	}
242 	if (eh->eh_ecount > eh->eh_max) {
243 		error_msg = "invalid eh_entries";
244 		goto corrupted;
245 	}
246 
247 	return (0);
248 
249 corrupted:
250 	ext2_fserr(fs, ip->i_uid, error_msg);
251 	return (EIO);
252 }
253 
254 static void
255 ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk)
256 {
257 	struct ext4_extent_header *eh;
258 	struct ext4_extent_index *r, *l, *m;
259 
260 	eh = path->ep_header;
261 
262 	KASSERT(eh->eh_ecount <= eh->eh_max && eh->eh_ecount > 0,
263 	    ("ext4_ext_binsearch_index: bad args"));
264 
265 	l = EXT_FIRST_INDEX(eh) + 1;
266 	r = EXT_FIRST_INDEX(eh) + eh->eh_ecount - 1;
267 	while (l <= r) {
268 		m = l + (r - l) / 2;
269 		if (blk < m->ei_blk)
270 			r = m - 1;
271 		else
272 			l = m + 1;
273 	}
274 
275 	path->ep_index = l - 1;
276 }
277 
278 static void
279 ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk)
280 {
281 	struct ext4_extent_header *eh;
282 	struct ext4_extent *r, *l, *m;
283 
284 	eh = path->ep_header;
285 
286 	KASSERT(eh->eh_ecount <= eh->eh_max,
287 	    ("ext4_ext_binsearch_ext: bad args"));
288 
289 	if (eh->eh_ecount == 0)
290 		return;
291 
292 	l = EXT_FIRST_EXTENT(eh) + 1;
293 	r = EXT_FIRST_EXTENT(eh) + eh->eh_ecount - 1;
294 
295 	while (l <= r) {
296 		m = l + (r - l) / 2;
297 		if (blk < m->e_blk)
298 			r = m - 1;
299 		else
300 			l = m + 1;
301 	}
302 
303 	path->ep_ext = l - 1;
304 }
305 
306 static int
307 ext4_ext_fill_path_bdata(struct ext4_extent_path *path,
308     struct buf *bp, uint64_t blk)
309 {
310 
311 	KASSERT(path->ep_data == NULL,
312 	    ("ext4_ext_fill_path_bdata: bad ep_data"));
313 
314 	path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK);
315 	if (!path->ep_data)
316 		return (ENOMEM);
317 
318 	memcpy(path->ep_data, bp->b_data, bp->b_bufsize);
319 	path->ep_blk = blk;
320 
321 	return (0);
322 }
323 
324 static void
325 ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp)
326 {
327 
328 	KASSERT(path->ep_data != NULL,
329 	    ("ext4_ext_fill_path_buf: bad ep_data"));
330 
331 	memcpy(bp->b_data, path->ep_data, bp->b_bufsize);
332 }
333 
334 static void
335 ext4_ext_drop_refs(struct ext4_extent_path *path)
336 {
337 	int depth, i;
338 
339 	if (!path)
340 		return;
341 
342 	depth = path->ep_depth;
343 	for (i = 0; i <= depth; i++, path++)
344 		if (path->ep_data) {
345 			free(path->ep_data, M_EXT2EXTENTS);
346 			path->ep_data = NULL;
347 		}
348 }
349 
350 void
351 ext4_ext_path_free(struct ext4_extent_path *path)
352 {
353 
354 	if (!path)
355 		return;
356 
357 	ext4_ext_drop_refs(path);
358 	free(path, M_EXT2EXTENTS);
359 }
360 
361 int
362 ext4_ext_find_extent(struct inode *ip, daddr_t block,
363     struct ext4_extent_path **ppath)
364 {
365 	struct m_ext2fs *fs;
366 	struct ext4_extent_header *eh;
367 	struct ext4_extent_path *path;
368 	struct buf *bp;
369 	uint64_t blk;
370 	int error, depth, i, ppos, alloc;
371 
372 	fs = ip->i_e2fs;
373 	eh = ext4_ext_inode_header(ip);
374 	depth = ext4_ext_inode_depth(ip);
375 	ppos = 0;
376 	alloc = 0;
377 
378 	error = ext4_ext_check_header(ip, eh);
379 	if (error)
380 		return (error);
381 
382 	if (ppath == NULL)
383 		return (EINVAL);
384 
385 	path = *ppath;
386 	if (path == NULL) {
387 		path = malloc(EXT4_EXT_DEPTH_MAX *
388 		    sizeof(struct ext4_extent_path),
389 		    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
390 		if (!path)
391 			return (ENOMEM);
392 
393 		*ppath = path;
394 		alloc = 1;
395 	}
396 
397 	path[0].ep_header = eh;
398 	path[0].ep_data = NULL;
399 
400 	/* Walk through the tree. */
401 	i = depth;
402 	while (i) {
403 		ext4_ext_binsearch_index(&path[ppos], block);
404 		blk = ext4_ext_index_pblock(path[ppos].ep_index);
405 		path[ppos].ep_depth = i;
406 		path[ppos].ep_ext = NULL;
407 
408 		error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk),
409 		    ip->i_e2fs->e2fs_bsize, NOCRED, &bp);
410 		if (error) {
411 			brelse(bp);
412 			goto error;
413 		}
414 
415 		ppos++;
416 		if (ppos > depth) {
417 			ext2_fserr(fs, ip->i_uid,
418 			    "ppos > depth => extent corrupted");
419 			error = EIO;
420 			brelse(bp);
421 			goto error;
422 		}
423 
424 		ext4_ext_fill_path_bdata(&path[ppos], bp, blk);
425 		bqrelse(bp);
426 
427 		eh = ext4_ext_block_header(path[ppos].ep_data);
428 		if (ext4_ext_check_header(ip, eh) ||
429 		    ext2_extent_blk_csum_verify(ip, path[ppos].ep_data)) {
430 			error = EIO;
431 			goto error;
432 		}
433 
434 		path[ppos].ep_header = eh;
435 
436 		i--;
437 	}
438 
439 	error = ext4_ext_check_header(ip, eh);
440 	if (error)
441 		goto error;
442 
443 	/* Find extent. */
444 	path[ppos].ep_depth = i;
445 	path[ppos].ep_header = eh;
446 	path[ppos].ep_ext = NULL;
447 	path[ppos].ep_index = NULL;
448 	ext4_ext_binsearch_ext(&path[ppos], block);
449 	return (0);
450 
451 error:
452 	ext4_ext_drop_refs(path);
453 	if (alloc)
454 		free(path, M_EXT2EXTENTS);
455 
456 	*ppath = NULL;
457 
458 	return (error);
459 }
460 
461 static inline int
462 ext4_ext_space_root(struct inode *ip)
463 {
464 	int size;
465 
466 	size = sizeof(ip->i_data);
467 	size -= sizeof(struct ext4_extent_header);
468 	size /= sizeof(struct ext4_extent);
469 
470 	return (size);
471 }
472 
473 static inline int
474 ext4_ext_space_block(struct inode *ip)
475 {
476 	struct m_ext2fs *fs;
477 	int size;
478 
479 	fs = ip->i_e2fs;
480 
481 	size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
482 	    sizeof(struct ext4_extent);
483 
484 	return (size);
485 }
486 
487 static inline int
488 ext4_ext_space_block_index(struct inode *ip)
489 {
490 	struct m_ext2fs *fs;
491 	int size;
492 
493 	fs = ip->i_e2fs;
494 
495 	size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
496 	    sizeof(struct ext4_extent_index);
497 
498 	return (size);
499 }
500 
501 void
502 ext4_ext_tree_init(struct inode *ip)
503 {
504 	struct ext4_extent_header *ehp;
505 
506 	ip->i_flag |= IN_E4EXTENTS;
507 
508 	memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR);
509 	ehp = (struct ext4_extent_header *)ip->i_data;
510 	ehp->eh_magic = EXT4_EXT_MAGIC;
511 	ehp->eh_max = ext4_ext_space_root(ip);
512 	ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
513 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
514 	ext2_update(ip->i_vnode, 1);
515 }
516 
517 static inline void
518 ext4_ext_put_in_cache(struct inode *ip, uint32_t blk,
519 			uint32_t len, uint32_t start, int type)
520 {
521 
522 	KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input"));
523 
524 	ip->i_ext_cache.ec_type = type;
525 	ip->i_ext_cache.ec_blk = blk;
526 	ip->i_ext_cache.ec_len = len;
527 	ip->i_ext_cache.ec_start = start;
528 }
529 
530 static e4fs_daddr_t
531 ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path,
532     e4fs_daddr_t block)
533 {
534 	struct m_ext2fs *fs;
535 	struct ext4_extent *ex;
536 	e4fs_daddr_t bg_start;
537 	int depth;
538 
539 	fs = ip->i_e2fs;
540 
541 	if (path) {
542 		depth = path->ep_depth;
543 		ex = path[depth].ep_ext;
544 		if (ex) {
545 			e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex);
546 			e2fs_daddr_t blk = ex->e_blk;
547 
548 			if (block > blk)
549 				return (pblk + (block - blk));
550 			else
551 				return (pblk - (blk - block));
552 		}
553 
554 		/* Try to get block from index itself. */
555 		if (path[depth].ep_data)
556 			return (path[depth].ep_blk);
557 	}
558 
559 	/* Use inode's group. */
560 	bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
561 	    fs->e2fs->e2fs_first_dblock;
562 
563 	return (bg_start + block);
564 }
565 
566 static int inline
567 ext4_can_extents_be_merged(struct ext4_extent *ex1,
568     struct ext4_extent *ex2)
569 {
570 
571 	if (ex1->e_blk + ex1->e_len != ex2->e_blk)
572 		return (0);
573 
574 	if (ex1->e_len + ex2->e_len > EXT4_MAX_LEN)
575 		return (0);
576 
577 	if (ext4_ext_extent_pblock(ex1) + ex1->e_len ==
578 	    ext4_ext_extent_pblock(ex2))
579 		return (1);
580 
581 	return (0);
582 }
583 
584 static unsigned
585 ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path)
586 {
587 	int depth = path->ep_depth;
588 
589 	/* Empty tree */
590 	if (depth == 0)
591 		return (EXT4_MAX_BLOCKS);
592 
593 	/* Go to indexes. */
594 	depth--;
595 
596 	while (depth >= 0) {
597 		if (path[depth].ep_index !=
598 		    EXT_LAST_INDEX(path[depth].ep_header))
599 			return (path[depth].ep_index[1].ei_blk);
600 
601 		depth--;
602 	}
603 
604 	return (EXT4_MAX_BLOCKS);
605 }
606 
607 static int
608 ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path)
609 {
610 	struct m_ext2fs *fs;
611 	struct buf *bp;
612 	uint64_t blk;
613 	int error;
614 
615 	fs = ip->i_e2fs;
616 
617 	if (!path)
618 		return (EINVAL);
619 
620 	if (path->ep_data) {
621 		blk = path->ep_blk;
622 		bp = getblk(ip->i_devvp, fsbtodb(fs, blk),
623 		    fs->e2fs_bsize, 0, 0, 0);
624 		if (!bp)
625 			return (EIO);
626 		ext4_ext_fill_path_buf(path, bp);
627 		ext2_extent_blk_csum_set(ip, bp->b_data);
628 		error = bwrite(bp);
629 	} else {
630 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
631 		error = ext2_update(ip->i_vnode, 1);
632 	}
633 
634 	return (error);
635 }
636 
637 static int
638 ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path,
639     uint32_t lblk, e4fs_daddr_t blk)
640 {
641 	struct m_ext2fs *fs;
642 	struct ext4_extent_index *idx;
643 	int len;
644 
645 	fs = ip->i_e2fs;
646 
647 	if (lblk == path->ep_index->ei_blk) {
648 		ext2_fserr(fs, ip->i_uid,
649 		    "lblk == index blk => extent corrupted");
650 		return (EIO);
651 	}
652 
653 	if (path->ep_header->eh_ecount >= path->ep_header->eh_max) {
654 		ext2_fserr(fs, ip->i_uid,
655 		    "ecout > maxcount => extent corrupted");
656 		return (EIO);
657 	}
658 
659 	if (lblk > path->ep_index->ei_blk) {
660 		/* Insert after. */
661 		idx = path->ep_index + 1;
662 	} else {
663 		/* Insert before. */
664 		idx = path->ep_index;
665 	}
666 
667 	len = EXT_LAST_INDEX(path->ep_header) - idx + 1;
668 	if (len > 0)
669 		memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index));
670 
671 	if (idx > EXT_MAX_INDEX(path->ep_header)) {
672 		ext2_fserr(fs, ip->i_uid,
673 		    "index is out of range => extent corrupted");
674 		return (EIO);
675 	}
676 
677 	idx->ei_blk = lblk;
678 	ext4_index_store_pblock(idx, blk);
679 	path->ep_header->eh_ecount++;
680 
681 	return (ext4_ext_dirty(ip, path));
682 }
683 
684 static e4fs_daddr_t
685 ext4_ext_alloc_meta(struct inode *ip)
686 {
687 	e4fs_daddr_t blk = ext2_alloc_meta(ip);
688 	if (blk) {
689 		ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize);
690 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
691 		ext2_update(ip->i_vnode, 1);
692 	}
693 
694 	return (blk);
695 }
696 
697 static void
698 ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags)
699 {
700 	struct m_ext2fs *fs;
701 	int i, blocksreleased;
702 
703 	fs = ip->i_e2fs;
704 	blocksreleased = count;
705 
706 	for(i = 0; i < count; i++)
707 		ext2_blkfree(ip, blk + i, fs->e2fs_bsize);
708 
709 	if (ip->i_blocks >= blocksreleased)
710 		ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased);
711 	else
712 		ip->i_blocks = 0;
713 
714 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
715 	ext2_update(ip->i_vnode, 1);
716 }
717 
718 static int
719 ext4_ext_split(struct inode *ip, struct ext4_extent_path *path,
720     struct ext4_extent *newext, int at)
721 {
722 	struct m_ext2fs *fs;
723 	struct  buf *bp;
724 	int depth = ext4_ext_inode_depth(ip);
725 	struct ext4_extent_header *neh;
726 	struct ext4_extent_index *fidx;
727 	struct ext4_extent *ex;
728 	int i = at, k, m, a;
729 	e4fs_daddr_t newblk, oldblk;
730 	uint32_t border;
731 	e4fs_daddr_t *ablks = NULL;
732 	int error = 0;
733 
734 	fs = ip->i_e2fs;
735 	bp = NULL;
736 
737 	/*
738 	 * We will split at current extent for now.
739 	 */
740 	if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) {
741 		ext2_fserr(fs, ip->i_uid,
742 		    "extent is out of range => extent corrupted");
743 		return (EIO);
744 	}
745 
746 	if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header))
747 		border = path[depth].ep_ext[1].e_blk;
748 	else
749 		border = newext->e_blk;
750 
751 	/* Allocate new blocks. */
752 	ablks = malloc(sizeof(e4fs_daddr_t) * depth,
753 	    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
754 	if (!ablks)
755 		return (ENOMEM);
756 	for (a = 0; a < depth - at; a++) {
757 		newblk = ext4_ext_alloc_meta(ip);
758 		if (newblk == 0)
759 			goto cleanup;
760 		ablks[a] = newblk;
761 	}
762 
763 	newblk = ablks[--a];
764 	bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
765 	if (!bp) {
766 		error = EIO;
767 		goto cleanup;
768 	}
769 
770 	neh = ext4_ext_block_header(bp->b_data);
771 	neh->eh_ecount = 0;
772 	neh->eh_max = ext4_ext_space_block(ip);
773 	neh->eh_magic = EXT4_EXT_MAGIC;
774 	neh->eh_depth = 0;
775 	ex = EXT_FIRST_EXTENT(neh);
776 
777 	if (path[depth].ep_header->eh_ecount != path[depth].ep_header->eh_max) {
778 		ext2_fserr(fs, ip->i_uid,
779 		    "extents count out of range => extent corrupted");
780 		error = EIO;
781 		goto cleanup;
782 	}
783 
784 	/* Start copy from next extent. */
785 	m = 0;
786 	path[depth].ep_ext++;
787 	while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) {
788 		path[depth].ep_ext++;
789 		m++;
790 	}
791 	if (m) {
792 		memmove(ex, path[depth].ep_ext - m,
793 		    sizeof(struct ext4_extent) * m);
794 		neh->eh_ecount = neh->eh_ecount + m;
795 	}
796 
797 	ext2_extent_blk_csum_set(ip, bp->b_data);
798 	bwrite(bp);
799 	bp = NULL;
800 
801 	/* Fix old leaf. */
802 	if (m) {
803 		path[depth].ep_header->eh_ecount =
804 		    path[depth].ep_header->eh_ecount - m;
805 		ext4_ext_dirty(ip, path + depth);
806 	}
807 
808 	/* Create intermediate indexes. */
809 	k = depth - at - 1;
810 	KASSERT(k >= 0, ("ext4_ext_split: negative k"));
811 
812 	/* Insert new index into current index block. */
813 	i = depth - 1;
814 	while (k--) {
815 		oldblk = newblk;
816 		newblk = ablks[--a];
817 		error = bread(ip->i_devvp, fsbtodb(fs, newblk),
818 		    (int)fs->e2fs_bsize, NOCRED, &bp);
819 		if (error) {
820 			brelse(bp);
821 			goto cleanup;
822 		}
823 
824 		neh = (struct ext4_extent_header *)bp->b_data;
825 		neh->eh_ecount = 1;
826 		neh->eh_magic = EXT4_EXT_MAGIC;
827 		neh->eh_max = ext4_ext_space_block_index(ip);
828 		neh->eh_depth = depth - i;
829 		fidx = EXT_FIRST_INDEX(neh);
830 		fidx->ei_blk = border;
831 		ext4_index_store_pblock(fidx, oldblk);
832 
833 		m = 0;
834 		path[i].ep_index++;
835 		while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) {
836 			path[i].ep_index++;
837 			m++;
838 		}
839 		if (m) {
840 			memmove(++fidx, path[i].ep_index - m,
841 			    sizeof(struct ext4_extent_index) * m);
842 			neh->eh_ecount = neh->eh_ecount + m;
843 		}
844 
845 		ext2_extent_blk_csum_set(ip, bp->b_data);
846 		bwrite(bp);
847 		bp = NULL;
848 
849 		/* Fix old index. */
850 		if (m) {
851 			path[i].ep_header->eh_ecount =
852 			    path[i].ep_header->eh_ecount - m;
853 			ext4_ext_dirty(ip, path + i);
854 		}
855 
856 		i--;
857 	}
858 
859 	error = ext4_ext_insert_index(ip, path + at, border, newblk);
860 
861 cleanup:
862 	if (bp)
863 		brelse(bp);
864 
865 	if (error) {
866 		for (i = 0; i < depth; i++) {
867 			if (!ablks[i])
868 				continue;
869 			ext4_ext_blkfree(ip, ablks[i], 1, 0);
870 		}
871 	}
872 
873 	free(ablks, M_EXT2EXTENTS);
874 
875 	return (error);
876 }
877 
878 static int
879 ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path,
880     struct ext4_extent *newext)
881 {
882 	struct m_ext2fs *fs;
883 	struct ext4_extent_path *curpath;
884 	struct ext4_extent_header *neh;
885 	struct ext4_extent_index *fidx;
886 	struct buf *bp;
887 	e4fs_daddr_t newblk;
888 	int error = 0;
889 
890 	fs = ip->i_e2fs;
891 	curpath = path;
892 
893 	newblk = ext4_ext_alloc_meta(ip);
894 	if (newblk == 0)
895 		return (error);
896 
897 	bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
898 	if (!bp)
899 		return (EIO);
900 
901 	/* Move top-level index/leaf into new block. */
902 	memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data));
903 
904 	/* Set size of new block */
905 	neh = ext4_ext_block_header(bp->b_data);
906 	neh->eh_magic = EXT4_EXT_MAGIC;
907 
908 	if (ext4_ext_inode_depth(ip))
909 		neh->eh_max = ext4_ext_space_block_index(ip);
910 	else
911 		neh->eh_max = ext4_ext_space_block(ip);
912 
913 	ext2_extent_blk_csum_set(ip, bp->b_data);
914 	error = bwrite(bp);
915 	if (error)
916 		goto out;
917 
918 	bp = NULL;
919 
920 	curpath->ep_header->eh_magic = EXT4_EXT_MAGIC;
921 	curpath->ep_header->eh_max = ext4_ext_space_root(ip);
922 	curpath->ep_header->eh_ecount = 1;
923 	curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header);
924 	curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk;
925 	ext4_index_store_pblock(curpath->ep_index, newblk);
926 
927 	neh = ext4_ext_inode_header(ip);
928 	fidx = EXT_FIRST_INDEX(neh);
929 	neh->eh_depth = path->ep_depth + 1;
930 	ext4_ext_dirty(ip, curpath);
931 out:
932 	brelse(bp);
933 
934 	return (error);
935 }
936 
937 static int
938 ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path,
939     struct ext4_extent *newext)
940 {
941 	struct m_ext2fs *fs;
942 	struct ext4_extent_path *curpath;
943 	int depth, i, error;
944 
945 	fs = ip->i_e2fs;
946 
947 repeat:
948 	i = depth = ext4_ext_inode_depth(ip);
949 
950 	/* Look for free index entry int the tree */
951 	curpath = path + depth;
952 	while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) {
953 		i--;
954 		curpath--;
955 	}
956 
957 	/*
958 	 * We use already allocated block for index block,
959 	 * so subsequent data blocks should be contiguous.
960 	 */
961 	if (EXT_HAS_FREE_INDEX(curpath)) {
962 		error = ext4_ext_split(ip, path, newext, i);
963 		if (error)
964 			goto out;
965 
966 		/* Refill path. */
967 		ext4_ext_drop_refs(path);
968 		error = ext4_ext_find_extent(ip, newext->e_blk, &path);
969 		if (error)
970 			goto out;
971 	} else {
972 		/* Tree is full, do grow in depth. */
973 		error = ext4_ext_grow_indepth(ip, path, newext);
974 		if (error)
975 			goto out;
976 
977 		/* Refill path. */
978 		ext4_ext_drop_refs(path);
979 		error = ext4_ext_find_extent(ip, newext->e_blk, &path);
980 		if (error)
981 			goto out;
982 
983 		/* Check and split tree if required. */
984 		depth = ext4_ext_inode_depth(ip);
985 		if (path[depth].ep_header->eh_ecount ==
986 		    path[depth].ep_header->eh_max)
987 			goto repeat;
988 	}
989 
990 out:
991 	return (error);
992 }
993 
994 static int
995 ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path)
996 {
997 	struct ext4_extent_header *eh;
998 	struct ext4_extent *ex;
999 	int32_t border;
1000 	int depth, k;
1001 
1002 	depth = ext4_ext_inode_depth(ip);
1003 	eh = path[depth].ep_header;
1004 	ex = path[depth].ep_ext;
1005 
1006 	if (ex == NULL || eh == NULL)
1007 		return (EIO);
1008 
1009 	if (!depth)
1010 		return (0);
1011 
1012 	/* We will correct tree if first leaf got modified only. */
1013 	if (ex != EXT_FIRST_EXTENT(eh))
1014 		return (0);
1015 
1016 	k = depth - 1;
1017 	border = path[depth].ep_ext->e_blk;
1018 	path[k].ep_index->ei_blk = border;
1019 	ext4_ext_dirty(ip, path + k);
1020 	while (k--) {
1021 		/* Change all left-side indexes. */
1022 		if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header))
1023 			break;
1024 
1025 		path[k].ep_index->ei_blk = border;
1026 		ext4_ext_dirty(ip, path + k);
1027 	}
1028 
1029 	return (0);
1030 }
1031 
1032 static int
1033 ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path,
1034     struct ext4_extent *newext)
1035 {
1036 	struct m_ext2fs *fs;
1037 	struct ext4_extent_header * eh;
1038 	struct ext4_extent *ex, *nex, *nearex;
1039 	struct ext4_extent_path *npath;
1040 	int depth, len, error, next;
1041 
1042 	fs = ip->i_e2fs;
1043 	depth = ext4_ext_inode_depth(ip);
1044 	ex = path[depth].ep_ext;
1045 	npath = NULL;
1046 
1047 	if (newext->e_len == 0 || path[depth].ep_header == NULL)
1048 		return (EINVAL);
1049 
1050 	/* Insert block into found extent. */
1051 	if (ex && ext4_can_extents_be_merged(ex, newext)) {
1052 		ex->e_len = ex->e_len + newext->e_len;
1053 		eh = path[depth].ep_header;
1054 		nearex = ex;
1055 		goto merge;
1056 	}
1057 
1058 repeat:
1059 	depth = ext4_ext_inode_depth(ip);
1060 	eh = path[depth].ep_header;
1061 	if (eh->eh_ecount < eh->eh_max)
1062 		goto has_space;
1063 
1064 	/* Try next leaf */
1065 	nex = EXT_LAST_EXTENT(eh);
1066 	next = ext4_ext_next_leaf_block(ip, path);
1067 	if (newext->e_blk > nex->e_blk && next != EXT4_MAX_BLOCKS) {
1068 		KASSERT(npath == NULL,
1069 		    ("ext4_ext_insert_extent: bad path"));
1070 
1071 		error = ext4_ext_find_extent(ip, next, &npath);
1072 		if (error)
1073 			goto cleanup;
1074 
1075 		if (npath->ep_depth != path->ep_depth) {
1076 			error = EIO;
1077 			goto cleanup;
1078 		}
1079 
1080 		eh = npath[depth].ep_header;
1081 		if (eh->eh_ecount < eh->eh_max) {
1082 			path = npath;
1083 			goto repeat;
1084 		}
1085 	}
1086 
1087 	/*
1088 	 * There is no free space in the found leaf,
1089 	 * try to add a new leaf to the tree.
1090 	 */
1091 	error = ext4_ext_create_new_leaf(ip, path, newext);
1092 	if (error)
1093 		goto cleanup;
1094 
1095 	depth = ext4_ext_inode_depth(ip);
1096 	eh = path[depth].ep_header;
1097 
1098 has_space:
1099 	nearex = path[depth].ep_ext;
1100 	if (!nearex) {
1101 		/* Create new extent in the leaf. */
1102 		path[depth].ep_ext = EXT_FIRST_EXTENT(eh);
1103 	} else if (newext->e_blk > nearex->e_blk) {
1104 		if (nearex != EXT_LAST_EXTENT(eh)) {
1105 			len = EXT_MAX_EXTENT(eh) - nearex;
1106 			len = (len - 1) * sizeof(struct ext4_extent);
1107 			len = len < 0 ? 0 : len;
1108 			memmove(nearex + 2, nearex + 1, len);
1109 		}
1110 		path[depth].ep_ext = nearex + 1;
1111 	} else {
1112 		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1113 		len = len < 0 ? 0 : len;
1114 		memmove(nearex + 1, nearex, len);
1115 		path[depth].ep_ext = nearex;
1116 	}
1117 
1118 	eh->eh_ecount = eh->eh_ecount + 1;
1119 	nearex = path[depth].ep_ext;
1120 	nearex->e_blk = newext->e_blk;
1121 	nearex->e_start_lo = newext->e_start_lo;
1122 	nearex->e_start_hi = newext->e_start_hi;
1123 	nearex->e_len = newext->e_len;
1124 
1125 merge:
1126 	/* Try to merge extents to the right. */
1127 	while (nearex < EXT_LAST_EXTENT(eh)) {
1128 		if (!ext4_can_extents_be_merged(nearex, nearex + 1))
1129 			break;
1130 
1131 		/* Merge with next extent. */
1132 		nearex->e_len = nearex->e_len + nearex[1].e_len;
1133 		if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1134 			len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
1135 			    sizeof(struct ext4_extent);
1136 			memmove(nearex + 1, nearex + 2, len);
1137 		}
1138 
1139 		eh->eh_ecount = eh->eh_ecount - 1;
1140 		KASSERT(eh->eh_ecount != 0,
1141 		    ("ext4_ext_insert_extent: bad ecount"));
1142 	}
1143 
1144 	/*
1145 	 * Try to merge extents to the left,
1146 	 * start from inexes correction.
1147 	 */
1148 	error = ext4_ext_correct_indexes(ip, path);
1149 	if (error)
1150 		goto cleanup;
1151 
1152 	ext4_ext_dirty(ip, path + depth);
1153 
1154 cleanup:
1155 	if (npath) {
1156 		ext4_ext_drop_refs(npath);
1157 		free(npath, M_EXT2EXTENTS);
1158 	}
1159 
1160 	ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
1161 	return (error);
1162 }
1163 
1164 static e4fs_daddr_t
1165 ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref,
1166     struct ucred *cred, unsigned long *count, int *perror)
1167 {
1168 	struct m_ext2fs *fs;
1169 	struct ext2mount *ump;
1170 	e4fs_daddr_t newblk;
1171 
1172 	fs = ip->i_e2fs;
1173 	ump = ip->i_ump;
1174 
1175 	/*
1176 	 * We will allocate only single block for now.
1177 	 */
1178 	if (*count > 1)
1179 		return (0);
1180 
1181 	EXT2_LOCK(ip->i_ump);
1182 	*perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk);
1183 	if (*perror)
1184 		return (0);
1185 
1186 	if (newblk) {
1187 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
1188 		ext2_update(ip->i_vnode, 1);
1189 	}
1190 
1191 	return (newblk);
1192 }
1193 
1194 int
1195 ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk,
1196     unsigned long max_blocks, struct ucred *cred, struct buf **bpp,
1197     int *pallocated, daddr_t *nb)
1198 {
1199 	struct m_ext2fs *fs;
1200 	struct buf *bp = NULL;
1201 	struct ext4_extent_path *path;
1202 	struct ext4_extent newex, *ex;
1203 	e4fs_daddr_t bpref, newblk = 0;
1204 	unsigned long allocated = 0;
1205 	int error = 0, depth;
1206 
1207 	fs = ip->i_e2fs;
1208 	*pallocated = 0;
1209 	path = NULL;
1210 	if(bpp)
1211 		*bpp = NULL;
1212 
1213 	/* Check cache. */
1214 	if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) {
1215 		if (bpref == EXT4_EXT_CACHE_IN) {
1216 			/* Block is already allocated. */
1217 			newblk = iblk - newex.e_blk +
1218 			    ext4_ext_extent_pblock(&newex);
1219 			allocated = newex.e_len - (iblk - newex.e_blk);
1220 			goto out;
1221 		} else {
1222 			error = EIO;
1223 			goto out2;
1224 		}
1225 	}
1226 
1227 	error = ext4_ext_find_extent(ip, iblk, &path);
1228 	if (error) {
1229 		goto out2;
1230 	}
1231 
1232 	depth = ext4_ext_inode_depth(ip);
1233 	if (path[depth].ep_ext == NULL && depth != 0) {
1234 		error = EIO;
1235 		goto out2;
1236 	}
1237 
1238 	if ((ex = path[depth].ep_ext)) {
1239 		uint64_t lblk = ex->e_blk;
1240 		uint16_t e_len  = ex->e_len;
1241 		e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex);
1242 
1243 		if (e_len > EXT4_MAX_LEN)
1244 			goto out2;
1245 
1246 		/* If we found extent covers block, simply return it. */
1247 		if (iblk >= lblk && iblk < lblk + e_len) {
1248 			newblk = iblk - lblk + e_start;
1249 			allocated = e_len - (iblk - lblk);
1250 			ext4_ext_put_in_cache(ip, lblk, e_len,
1251 			    e_start, EXT4_EXT_CACHE_IN);
1252 			goto out;
1253 		}
1254 	}
1255 
1256 	/* Allocate the new block. */
1257 	if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) {
1258 		ip->i_next_alloc_goal = 0;
1259 	}
1260 
1261 	bpref = ext4_ext_blkpref(ip, path, iblk);
1262 	allocated = max_blocks;
1263 	newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error);
1264 	if (!newblk)
1265 		goto out2;
1266 
1267 	/* Try to insert new extent into found leaf and return. */
1268 	newex.e_blk = iblk;
1269 	ext4_ext_store_pblock(&newex, newblk);
1270 	newex.e_len = allocated;
1271 	error = ext4_ext_insert_extent(ip, path, &newex);
1272 	if (error)
1273 		goto out2;
1274 
1275 	newblk = ext4_ext_extent_pblock(&newex);
1276 	ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN);
1277 	*pallocated = 1;
1278 
1279 out:
1280 	if (allocated > max_blocks)
1281 		allocated = max_blocks;
1282 
1283 	if (bpp)
1284 	{
1285 		error = bread(ip->i_devvp, fsbtodb(fs, newblk),
1286 		    fs->e2fs_bsize, cred, &bp);
1287 		if (error) {
1288 			brelse(bp);
1289 		} else {
1290 			*bpp = bp;
1291 		}
1292 	}
1293 
1294 out2:
1295 	if (path) {
1296 		ext4_ext_drop_refs(path);
1297 		free(path, M_EXT2EXTENTS);
1298 	}
1299 
1300 	if (nb)
1301 		*nb = newblk;
1302 
1303 	return (error);
1304 }
1305 
1306 static inline uint16_t
1307 ext4_ext_get_actual_len(struct ext4_extent *ext)
1308 {
1309 
1310 	return (ext->e_len <= EXT_INIT_MAX_LEN ?
1311 	    ext->e_len : (ext->e_len - EXT_INIT_MAX_LEN));
1312 }
1313 
1314 static inline struct ext4_extent_header *
1315 ext4_ext_header(struct inode *ip)
1316 {
1317 
1318 	return (struct ext4_extent_header *)ip->i_db;
1319 }
1320 
1321 static int
1322 ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex,
1323     unsigned long from, unsigned long to)
1324 {
1325 	unsigned long num, start;
1326 
1327 	if (from >= ex->e_blk &&
1328 	    to == ex->e_blk + ext4_ext_get_actual_len(ex) - 1) {
1329 		/* Tail cleanup. */
1330 		num = ex->e_blk + ext4_ext_get_actual_len(ex) - from;
1331 		start = ext4_ext_extent_pblock(ex) +
1332 		    ext4_ext_get_actual_len(ex) - num;
1333 		ext4_ext_blkfree(ip, start, num, 0);
1334 	}
1335 
1336 	return (0);
1337 }
1338 
1339 static int
1340 ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path)
1341 {
1342 	e4fs_daddr_t leaf;
1343 
1344 	/* Free index block. */
1345 	path--;
1346 	leaf = ext4_ext_index_pblock(path->ep_index);
1347 	KASSERT(path->ep_header->eh_ecount != 0,
1348 	    ("ext4_ext_rm_index: bad ecount"));
1349 	path->ep_header->eh_ecount--;
1350 	ext4_ext_dirty(ip, path);
1351 	ext4_ext_blkfree(ip, leaf, 1, 0);
1352 	return (0);
1353 }
1354 
1355 static int
1356 ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path,
1357     uint64_t start)
1358 {
1359 	struct m_ext2fs *fs;
1360 	int depth;
1361 	struct ext4_extent_header *eh;
1362 	unsigned int a, b, block, num;
1363 	unsigned long ex_blk;
1364 	unsigned short ex_len;
1365 	struct ext4_extent *ex;
1366 	int error, correct_index;
1367 
1368 	fs = ip->i_e2fs;
1369 	depth = ext4_ext_inode_depth(ip);
1370 	correct_index = 0;
1371 
1372 	if (!path[depth].ep_header) {
1373 		if (path[depth].ep_data == NULL)
1374 			return (EINVAL);
1375 		path[depth].ep_header =
1376 		    (struct ext4_extent_header* )path[depth].ep_data;
1377 	}
1378 
1379 	eh = path[depth].ep_header;
1380 	if (!eh) {
1381 		ext2_fserr(fs, ip->i_uid, "bad header => extent corrupted");
1382 		return (EIO);
1383 	}
1384 
1385 	ex = EXT_LAST_EXTENT(eh);
1386 	ex_blk = ex->e_blk;
1387 	ex_len = ext4_ext_get_actual_len(ex);
1388 
1389 	while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) {
1390 		path[depth].ep_ext = ex;
1391 		a = ex_blk > start ? ex_blk : start;
1392 		b = (uint64_t)ex_blk + ex_len - 1 <
1393 		    EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS;
1394 
1395 		if (a != ex_blk && b != ex_blk + ex_len - 1)
1396 			return (EINVAL);
1397 		else if (a != ex_blk) {
1398 			/* Remove tail of the extent. */
1399 			block = ex_blk;
1400 			num = a - block;
1401 		} else if (b != ex_blk + ex_len - 1) {
1402 			/* Remove head of the extent, not implemented. */
1403 			return (EINVAL);
1404 		} else {
1405 			/* Remove whole extent. */
1406 			block = ex_blk;
1407 			num = 0;
1408 		}
1409 
1410 		if (ex == EXT_FIRST_EXTENT(eh))
1411 			correct_index = 1;
1412 
1413 		error = ext4_remove_blocks(ip, ex, a, b);
1414 		if (error)
1415 			goto out;
1416 
1417 		if (num == 0) {
1418 			ext4_ext_store_pblock(ex, 0);
1419 			eh->eh_ecount--;
1420 		}
1421 
1422 		ex->e_blk = block;
1423 		ex->e_len = num;
1424 
1425 		ext4_ext_dirty(ip, path + depth);
1426 
1427 		ex--;
1428 		ex_blk = ex->e_blk;
1429 		ex_len = ext4_ext_get_actual_len(ex);
1430 	};
1431 
1432 	if (correct_index && eh->eh_ecount)
1433 		error = ext4_ext_correct_indexes(ip, path);
1434 
1435 	/*
1436 	 * If this leaf is free, we should
1437 	 * remove it from index block above.
1438 	 */
1439 	if (error == 0 && eh->eh_ecount == 0 && path[depth].ep_data != NULL)
1440 		error = ext4_ext_rm_index(ip, path + depth);
1441 
1442 out:
1443 	return (error);
1444 }
1445 
1446 static struct buf *
1447 ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk,
1448     int depth, int flags)
1449 {
1450 	struct m_ext2fs *fs;
1451 	struct ext4_extent_header *eh;
1452 	struct buf *bp;
1453 	int error;
1454 
1455 	fs = ip->i_e2fs;
1456 
1457 	error = bread(ip->i_devvp, fsbtodb(fs, pblk),
1458 	    fs->e2fs_bsize, NOCRED, &bp);
1459 	if (error) {
1460 		brelse(bp);
1461 		return (NULL);
1462 	}
1463 
1464 	eh = ext4_ext_block_header(bp->b_data);
1465 	if (eh->eh_depth != depth) {
1466 		ext2_fserr(fs, ip->i_uid, "unexpected eh_depth");
1467 		goto err;
1468 	}
1469 
1470 	error = ext4_ext_check_header(ip, eh);
1471 	if (error)
1472 		goto err;
1473 
1474 	return (bp);
1475 
1476 err:
1477 	brelse(bp);
1478 	return (NULL);
1479 
1480 }
1481 
1482 static int inline
1483 ext4_ext_more_to_rm(struct ext4_extent_path *path)
1484 {
1485 
1486 	KASSERT(path->ep_index != NULL,
1487 	    ("ext4_ext_more_to_rm: bad index from path"));
1488 
1489 	if (path->ep_index < EXT_FIRST_INDEX(path->ep_header))
1490 		return (0);
1491 
1492 	if (path->ep_header->eh_ecount == path->index_count)
1493 		return (0);
1494 
1495 	return (1);
1496 }
1497 
1498 int
1499 ext4_ext_remove_space(struct inode *ip, off_t length, int flags,
1500     struct ucred *cred, struct thread *td)
1501 {
1502 	struct buf *bp;
1503 	struct ext4_extent_header *ehp;
1504 	struct ext4_extent_path *path;
1505 	int depth;
1506 	int i, error;
1507 
1508 	ehp = (struct ext4_extent_header *)ip->i_db;
1509 	depth = ext4_ext_inode_depth(ip);
1510 
1511 	error = ext4_ext_check_header(ip, ehp);
1512 	if(error)
1513 		return (error);
1514 
1515 	path = malloc(sizeof(struct ext4_extent_path) * (depth + 1),
1516 	    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
1517 	if (!path)
1518 		return (ENOMEM);
1519 
1520 	i = 0;
1521 	path[0].ep_header = ehp;
1522 	path[0].ep_depth = depth;
1523 	while (i >= 0 && error == 0) {
1524 		if (i == depth) {
1525 			/* This is leaf. */
1526 			error = ext4_ext_rm_leaf(ip, path, length);
1527 			if (error)
1528 				break;
1529 			free(path[i].ep_data, M_EXT2EXTENTS);
1530 			path[i].ep_data = NULL;
1531 			i--;
1532 			continue;
1533 		}
1534 
1535 		/* This is index. */
1536 		if (!path[i].ep_header)
1537 			path[i].ep_header =
1538 			    (struct ext4_extent_header *)path[i].ep_data;
1539 
1540 		if (!path[i].ep_index) {
1541 			/* This level hasn't touched yet. */
1542 			path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header);
1543 			path[i].index_count = path[i].ep_header->eh_ecount + 1;
1544 		} else {
1545 			/* We've already was here, see at next index. */
1546 			path[i].ep_index--;
1547 		}
1548 
1549 		if (ext4_ext_more_to_rm(path + i)) {
1550 			memset(path + i + 1, 0, sizeof(*path));
1551 			bp = ext4_read_extent_tree_block(ip,
1552 			    ext4_ext_index_pblock(path[i].ep_index),
1553 			    path[0].ep_depth - (i + 1), 0);
1554 			if (!bp) {
1555 				error = EIO;
1556 				break;
1557 			}
1558 
1559 			ext4_ext_fill_path_bdata(&path[i+1], bp,
1560 			    ext4_ext_index_pblock(path[i].ep_index));
1561 			brelse(bp);
1562 			path[i].index_count = path[i].ep_header->eh_ecount;
1563 			i++;
1564 		} else {
1565 			if (path[i].ep_header->eh_ecount == 0 && i > 0) {
1566 				/* Index is empty, remove it. */
1567 				error = ext4_ext_rm_index(ip, path + i);
1568 			}
1569 			free(path[i].ep_data, M_EXT2EXTENTS);
1570 			path[i].ep_data = NULL;
1571 			i--;
1572 		}
1573 	}
1574 
1575 	if (path->ep_header->eh_ecount == 0) {
1576 		/*
1577 		 * Truncate the tree to zero.
1578 		 */
1579 		 ext4_ext_header(ip)->eh_depth = 0;
1580 		 ext4_ext_header(ip)->eh_max = ext4_ext_space_root(ip);
1581 		 ext4_ext_dirty(ip, path);
1582 
1583 	}
1584 
1585 	ext4_ext_drop_refs(path);
1586 	free(path, M_EXT2EXTENTS);
1587 
1588 	return (error);
1589 }
1590