xref: /freebsd/sys/fs/ext2fs/ext2_extents.c (revision 8a272653d9fbd9fc37691c9aad6a05089b4ecb4d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Zheng Liu <lz@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/vnode.h>
37 #include <sys/bio.h>
38 #include <sys/buf.h>
39 #include <sys/endian.h>
40 #include <sys/conf.h>
41 #include <sys/sdt.h>
42 #include <sys/stat.h>
43 
44 #include <fs/ext2fs/ext2_mount.h>
45 #include <fs/ext2fs/fs.h>
46 #include <fs/ext2fs/inode.h>
47 #include <fs/ext2fs/ext2fs.h>
48 #include <fs/ext2fs/ext2_extents.h>
49 #include <fs/ext2fs/ext2_extern.h>
50 
51 SDT_PROVIDER_DECLARE(ext2fs);
52 /*
53  * ext2fs trace probe:
54  * arg0: verbosity. Higher numbers give more verbose messages
55  * arg1: Textual message
56  */
57 SDT_PROBE_DEFINE2(ext2fs, , trace, extents, "int", "char*");
58 
59 static MALLOC_DEFINE(M_EXT2EXTENTS, "ext2_extents", "EXT2 extents");
60 
61 #ifdef EXT2FS_PRINT_EXTENTS
62 static void
63 ext4_ext_print_extent(struct ext4_extent *ep)
64 {
65 
66 	printf("    ext %p => (blk %u len %u start %ju)\n",
67 	    ep, le32toh(ep->e_blk), le16toh(ep->e_len),
68 	    (uint64_t)le16toh(ep->e_start_hi) << 32 | le32toh(ep->e_start_lo));
69 }
70 
71 static void ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp);
72 
73 static void
74 ext4_ext_print_index(struct inode *ip, struct ext4_extent_index *ex, int do_walk)
75 {
76 	struct m_ext2fs *fs;
77 	struct buf *bp;
78 	int error;
79 
80 	fs = ip->i_e2fs;
81 
82 	printf("    index %p => (blk %u pblk %ju)\n",
83 	    ex, le32toh(ex->ei_blk), (uint64_t)le16toh(ex->ei_leaf_hi) << 32 |
84 	    le32toh(ex->ei_leaf_lo));
85 
86 	if(!do_walk)
87 		return;
88 
89 	if ((error = bread(ip->i_devvp,
90 	    fsbtodb(fs, ((uint64_t)le16toh(ex->ei_leaf_hi) << 32 |
91 	    le32toh(ex->ei_leaf_lo))), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
92 		brelse(bp);
93 		return;
94 	}
95 
96 	ext4_ext_print_header(ip, (struct ext4_extent_header *)bp->b_data);
97 
98 	brelse(bp);
99 
100 }
101 
102 static void
103 ext4_ext_print_header(struct inode *ip, struct ext4_extent_header *ehp)
104 {
105 	int i;
106 
107 	printf("header %p => (magic 0x%x entries %d max %d depth %d gen %d)\n",
108 	    ehp, le16toh(ehp->eh_magic), le16toh(ehp->eh_ecount),
109 	    le16toh(ehp->eh_max), le16toh(ehp->eh_depth), le32toh(ehp->eh_gen));
110 
111 	for (i = 0; i < le16toh(ehp->eh_ecount); i++)
112 		if (ehp->eh_depth != 0)
113 			ext4_ext_print_index(ip,
114 			    (struct ext4_extent_index *)(ehp + 1 + i), 1);
115 		else
116 			ext4_ext_print_extent((struct ext4_extent *)(ehp + 1 + i));
117 }
118 
119 static void
120 ext4_ext_print_path(struct inode *ip, struct ext4_extent_path *path)
121 {
122 	int k, l;
123 
124 	l = path->ep_depth;
125 
126 	printf("ip=%ju, Path:\n", ip->i_number);
127 	for (k = 0; k <= l; k++, path++) {
128 		if (path->ep_index) {
129 			ext4_ext_print_index(ip, path->ep_index, 0);
130 		} else if (path->ep_ext) {
131 			ext4_ext_print_extent(path->ep_ext);
132 		}
133 	}
134 }
135 
136 void
137 ext4_ext_print_extent_tree_status(struct inode *ip)
138 {
139 	struct ext4_extent_header *ehp;
140 
141 	ehp = (struct ext4_extent_header *)(char *)ip->i_db;
142 
143 	printf("Extent status:ip=%ju\n", ip->i_number);
144 	if (!(ip->i_flag & IN_E4EXTENTS))
145 		return;
146 
147 	ext4_ext_print_header(ip, ehp);
148 
149 	return;
150 }
151 #endif
152 
153 static inline struct ext4_extent_header *
154 ext4_ext_inode_header(struct inode *ip)
155 {
156 
157 	return ((struct ext4_extent_header *)ip->i_db);
158 }
159 
160 static inline struct ext4_extent_header *
161 ext4_ext_block_header(char *bdata)
162 {
163 
164 	return ((struct ext4_extent_header *)bdata);
165 }
166 
167 static inline unsigned short
168 ext4_ext_inode_depth(struct inode *ip)
169 {
170 	struct ext4_extent_header *ehp;
171 
172 	ehp = (struct ext4_extent_header *)ip->i_data;
173 	return (le16toh(ehp->eh_depth));
174 }
175 
176 static inline e4fs_daddr_t
177 ext4_ext_index_pblock(struct ext4_extent_index *index)
178 {
179 	e4fs_daddr_t blk;
180 
181 	blk = le32toh(index->ei_leaf_lo);
182 	blk |= (e4fs_daddr_t)le16toh(index->ei_leaf_hi) << 32;
183 
184 	return (blk);
185 }
186 
187 static inline void
188 ext4_index_store_pblock(struct ext4_extent_index *index, e4fs_daddr_t pb)
189 {
190 
191 	index->ei_leaf_lo = htole32(pb & 0xffffffff);
192 	index->ei_leaf_hi = htole16((pb >> 32) & 0xffff);
193 }
194 
195 static inline e4fs_daddr_t
196 ext4_ext_extent_pblock(struct ext4_extent *extent)
197 {
198 	e4fs_daddr_t blk;
199 
200 	blk = le32toh(extent->e_start_lo);
201 	blk |= (e4fs_daddr_t)le16toh(extent->e_start_hi) << 32;
202 
203 	return (blk);
204 }
205 
206 static inline void
207 ext4_ext_store_pblock(struct ext4_extent *ex, e4fs_daddr_t pb)
208 {
209 
210 	ex->e_start_lo = htole32(pb & 0xffffffff);
211 	ex->e_start_hi = htole16((pb >> 32) & 0xffff);
212 }
213 
214 int
215 ext4_ext_in_cache(struct inode *ip, daddr_t lbn, struct ext4_extent *ep)
216 {
217 	struct ext4_extent_cache *ecp;
218 	int ret = EXT4_EXT_CACHE_NO;
219 
220 	ecp = &ip->i_ext_cache;
221 	if (ecp->ec_type == EXT4_EXT_CACHE_NO)
222 		return (ret);
223 
224 	if (lbn >= ecp->ec_blk && lbn < ecp->ec_blk + ecp->ec_len) {
225 		ep->e_blk = htole32(ecp->ec_blk);
226 		ep->e_start_lo = htole32(ecp->ec_start & 0xffffffff);
227 		ep->e_start_hi = htole16(ecp->ec_start >> 32 & 0xffff);
228 		ep->e_len = htole16(ecp->ec_len);
229 		ret = ecp->ec_type;
230 	}
231 	return (ret);
232 }
233 
234 static int
235 ext4_ext_check_header(struct inode *ip, struct ext4_extent_header *eh)
236 {
237 	struct m_ext2fs *fs;
238 	char *error_msg;
239 
240 	fs = ip->i_e2fs;
241 
242 	if (le16toh(eh->eh_magic) != EXT4_EXT_MAGIC) {
243 		error_msg = "header: invalid magic";
244 		goto corrupted;
245 	}
246 	if (eh->eh_max == 0) {
247 		error_msg = "header: invalid eh_max";
248 		goto corrupted;
249 	}
250 	if (le16toh(eh->eh_ecount) > le16toh(eh->eh_max)) {
251 		error_msg = "header: invalid eh_entries";
252 		goto corrupted;
253 	}
254 
255 	return (0);
256 
257 corrupted:
258 	SDT_PROBE2(ext2fs, , trace, extents, 1, error_msg);
259 	return (EIO);
260 }
261 
262 static void
263 ext4_ext_binsearch_index(struct ext4_extent_path *path, int blk)
264 {
265 	struct ext4_extent_header *eh;
266 	struct ext4_extent_index *r, *l, *m;
267 
268 	eh = path->ep_header;
269 
270 	KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max) &&
271 	    le16toh(eh->eh_ecount) > 0,
272 	    ("ext4_ext_binsearch_index: bad args"));
273 
274 	l = EXT_FIRST_INDEX(eh) + 1;
275 	r = EXT_FIRST_INDEX(eh) + le16toh(eh->eh_ecount) - 1;
276 	while (l <= r) {
277 		m = l + (r - l) / 2;
278 		if (blk < le32toh(m->ei_blk))
279 			r = m - 1;
280 		else
281 			l = m + 1;
282 	}
283 
284 	path->ep_index = l - 1;
285 }
286 
287 static void
288 ext4_ext_binsearch_ext(struct ext4_extent_path *path, int blk)
289 {
290 	struct ext4_extent_header *eh;
291 	struct ext4_extent *r, *l, *m;
292 
293 	eh = path->ep_header;
294 
295 	KASSERT(le16toh(eh->eh_ecount) <= le16toh(eh->eh_max),
296 	    ("ext4_ext_binsearch_ext: bad args"));
297 
298 	if (eh->eh_ecount == 0)
299 		return;
300 
301 	l = EXT_FIRST_EXTENT(eh) + 1;
302 	r = EXT_FIRST_EXTENT(eh) + le16toh(eh->eh_ecount) - 1;
303 
304 	while (l <= r) {
305 		m = l + (r - l) / 2;
306 		if (blk < le32toh(m->e_blk))
307 			r = m - 1;
308 		else
309 			l = m + 1;
310 	}
311 
312 	path->ep_ext = l - 1;
313 }
314 
315 static int
316 ext4_ext_fill_path_bdata(struct ext4_extent_path *path,
317     struct buf *bp, uint64_t blk)
318 {
319 
320 	KASSERT(path->ep_data == NULL,
321 	    ("ext4_ext_fill_path_bdata: bad ep_data"));
322 
323 	path->ep_data = malloc(bp->b_bufsize, M_EXT2EXTENTS, M_WAITOK);
324 	memcpy(path->ep_data, bp->b_data, bp->b_bufsize);
325 	path->ep_blk = blk;
326 
327 	return (0);
328 }
329 
330 static void
331 ext4_ext_fill_path_buf(struct ext4_extent_path *path, struct buf *bp)
332 {
333 
334 	KASSERT(path->ep_data != NULL,
335 	    ("ext4_ext_fill_path_buf: bad ep_data"));
336 
337 	memcpy(bp->b_data, path->ep_data, bp->b_bufsize);
338 }
339 
340 static void
341 ext4_ext_drop_refs(struct ext4_extent_path *path)
342 {
343 	int depth, i;
344 
345 	if (!path)
346 		return;
347 
348 	depth = path->ep_depth;
349 	for (i = 0; i <= depth; i++, path++)
350 		if (path->ep_data) {
351 			free(path->ep_data, M_EXT2EXTENTS);
352 			path->ep_data = NULL;
353 		}
354 }
355 
356 void
357 ext4_ext_path_free(struct ext4_extent_path *path)
358 {
359 
360 	if (!path)
361 		return;
362 
363 	ext4_ext_drop_refs(path);
364 	free(path, M_EXT2EXTENTS);
365 }
366 
367 int
368 ext4_ext_find_extent(struct inode *ip, daddr_t block,
369     struct ext4_extent_path **ppath)
370 {
371 	struct m_ext2fs *fs;
372 	struct ext4_extent_header *eh;
373 	struct ext4_extent_path *path;
374 	struct buf *bp;
375 	uint64_t blk;
376 	int error, depth, i, ppos, alloc;
377 
378 	fs = ip->i_e2fs;
379 	eh = ext4_ext_inode_header(ip);
380 	depth = ext4_ext_inode_depth(ip);
381 	ppos = 0;
382 	alloc = 0;
383 
384 	error = ext4_ext_check_header(ip, eh);
385 	if (error)
386 		return (error);
387 
388 	if (ppath == NULL)
389 		return (EINVAL);
390 
391 	path = *ppath;
392 	if (path == NULL) {
393 		path = malloc(EXT4_EXT_DEPTH_MAX *
394 		    sizeof(struct ext4_extent_path),
395 		    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
396 		*ppath = path;
397 		alloc = 1;
398 	}
399 
400 	path[0].ep_header = eh;
401 	path[0].ep_data = NULL;
402 
403 	/* Walk through the tree. */
404 	i = depth;
405 	while (i) {
406 		ext4_ext_binsearch_index(&path[ppos], block);
407 		blk = ext4_ext_index_pblock(path[ppos].ep_index);
408 		path[ppos].ep_depth = i;
409 		path[ppos].ep_ext = NULL;
410 
411 		error = bread(ip->i_devvp, fsbtodb(ip->i_e2fs, blk),
412 		    ip->i_e2fs->e2fs_bsize, NOCRED, &bp);
413 		if (error) {
414 			goto error;
415 		}
416 
417 		ppos++;
418 		if (ppos > depth) {
419 			SDT_PROBE2(ext2fs, , trace, extents, 1,
420 			    "ppos > depth => extent corrupted");
421 			error = EIO;
422 			brelse(bp);
423 			goto error;
424 		}
425 
426 		ext4_ext_fill_path_bdata(&path[ppos], bp, blk);
427 		bqrelse(bp);
428 
429 		eh = ext4_ext_block_header(path[ppos].ep_data);
430 		if (ext4_ext_check_header(ip, eh) ||
431 		    ext2_extent_blk_csum_verify(ip, path[ppos].ep_data)) {
432 			error = EIO;
433 			goto error;
434 		}
435 
436 		path[ppos].ep_header = eh;
437 
438 		i--;
439 	}
440 
441 	error = ext4_ext_check_header(ip, eh);
442 	if (error)
443 		goto error;
444 
445 	/* Find extent. */
446 	path[ppos].ep_depth = i;
447 	path[ppos].ep_header = eh;
448 	path[ppos].ep_ext = NULL;
449 	path[ppos].ep_index = NULL;
450 	ext4_ext_binsearch_ext(&path[ppos], block);
451 	return (0);
452 
453 error:
454 	ext4_ext_drop_refs(path);
455 	if (alloc)
456 		free(path, M_EXT2EXTENTS);
457 
458 	*ppath = NULL;
459 
460 	return (error);
461 }
462 
463 static inline int
464 ext4_ext_space_root(struct inode *ip)
465 {
466 	int size;
467 
468 	size = sizeof(ip->i_data);
469 	size -= sizeof(struct ext4_extent_header);
470 	size /= sizeof(struct ext4_extent);
471 
472 	return (size);
473 }
474 
475 static inline int
476 ext4_ext_space_block(struct inode *ip)
477 {
478 	struct m_ext2fs *fs;
479 	int size;
480 
481 	fs = ip->i_e2fs;
482 
483 	size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
484 	    sizeof(struct ext4_extent);
485 
486 	return (size);
487 }
488 
489 static inline int
490 ext4_ext_space_block_index(struct inode *ip)
491 {
492 	struct m_ext2fs *fs;
493 	int size;
494 
495 	fs = ip->i_e2fs;
496 
497 	size = (fs->e2fs_bsize - sizeof(struct ext4_extent_header)) /
498 	    sizeof(struct ext4_extent_index);
499 
500 	return (size);
501 }
502 
503 void
504 ext4_ext_tree_init(struct inode *ip)
505 {
506 	struct ext4_extent_header *ehp;
507 
508 	ip->i_flag |= IN_E4EXTENTS;
509 
510 	memset(ip->i_data, 0, EXT2_NDADDR + EXT2_NIADDR);
511 	ehp = (struct ext4_extent_header *)ip->i_data;
512 	ehp->eh_magic = htole16(EXT4_EXT_MAGIC);
513 	ehp->eh_max = htole16(ext4_ext_space_root(ip));
514 	ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
515 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
516 	ext2_update(ip->i_vnode, 1);
517 }
518 
519 static inline void
520 ext4_ext_put_in_cache(struct inode *ip, uint32_t blk,
521 			uint32_t len, uint32_t start, int type)
522 {
523 
524 	KASSERT(len != 0, ("ext4_ext_put_in_cache: bad input"));
525 
526 	ip->i_ext_cache.ec_type = type;
527 	ip->i_ext_cache.ec_blk = blk;
528 	ip->i_ext_cache.ec_len = len;
529 	ip->i_ext_cache.ec_start = start;
530 }
531 
532 static e4fs_daddr_t
533 ext4_ext_blkpref(struct inode *ip, struct ext4_extent_path *path,
534     e4fs_daddr_t block)
535 {
536 	struct m_ext2fs *fs;
537 	struct ext4_extent *ex;
538 	e4fs_daddr_t bg_start;
539 	int depth;
540 
541 	fs = ip->i_e2fs;
542 
543 	if (path) {
544 		depth = path->ep_depth;
545 		ex = path[depth].ep_ext;
546 		if (ex) {
547 			e4fs_daddr_t pblk = ext4_ext_extent_pblock(ex);
548 			e2fs_daddr_t blk = le32toh(ex->e_blk);
549 
550 			if (block > blk)
551 				return (pblk + (block - blk));
552 			else
553 				return (pblk - (blk - block));
554 		}
555 
556 		/* Try to get block from index itself. */
557 		if (path[depth].ep_data)
558 			return (path[depth].ep_blk);
559 	}
560 
561 	/* Use inode's group. */
562 	bg_start = (ip->i_block_group * EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
563 	    le32toh(fs->e2fs->e2fs_first_dblock);
564 
565 	return (bg_start + block);
566 }
567 
568 static int inline
569 ext4_can_extents_be_merged(struct ext4_extent *ex1,
570     struct ext4_extent *ex2)
571 {
572 
573 	if (le32toh(ex1->e_blk) + le16toh(ex1->e_len) != le32toh(ex2->e_blk))
574 		return (0);
575 
576 	if (le16toh(ex1->e_len) + le16toh(ex2->e_len) > EXT4_MAX_LEN)
577 		return (0);
578 
579 	if (ext4_ext_extent_pblock(ex1) + le16toh(ex1->e_len) ==
580 	    ext4_ext_extent_pblock(ex2))
581 		return (1);
582 
583 	return (0);
584 }
585 
586 static unsigned
587 ext4_ext_next_leaf_block(struct inode *ip, struct ext4_extent_path *path)
588 {
589 	int depth = path->ep_depth;
590 
591 	/* Empty tree */
592 	if (depth == 0)
593 		return (EXT4_MAX_BLOCKS);
594 
595 	/* Go to indexes. */
596 	depth--;
597 
598 	while (depth >= 0) {
599 		if (path[depth].ep_index !=
600 		    EXT_LAST_INDEX(path[depth].ep_header))
601 			return (le32toh(path[depth].ep_index[1].ei_blk));
602 
603 		depth--;
604 	}
605 
606 	return (EXT4_MAX_BLOCKS);
607 }
608 
609 static int
610 ext4_ext_dirty(struct inode *ip, struct ext4_extent_path *path)
611 {
612 	struct m_ext2fs *fs;
613 	struct buf *bp;
614 	uint64_t blk;
615 	int error;
616 
617 	fs = ip->i_e2fs;
618 
619 	if (!path)
620 		return (EINVAL);
621 
622 	if (path->ep_data) {
623 		blk = path->ep_blk;
624 		bp = getblk(ip->i_devvp, fsbtodb(fs, blk),
625 		    fs->e2fs_bsize, 0, 0, 0);
626 		if (!bp)
627 			return (EIO);
628 		ext4_ext_fill_path_buf(path, bp);
629 		ext2_extent_blk_csum_set(ip, bp->b_data);
630 		error = bwrite(bp);
631 	} else {
632 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
633 		error = ext2_update(ip->i_vnode, 1);
634 	}
635 
636 	return (error);
637 }
638 
639 static int
640 ext4_ext_insert_index(struct inode *ip, struct ext4_extent_path *path,
641     uint32_t lblk, e4fs_daddr_t blk)
642 {
643 	struct m_ext2fs *fs;
644 	struct ext4_extent_index *idx;
645 	int len;
646 
647 	fs = ip->i_e2fs;
648 
649 	if (lblk == le32toh(path->ep_index->ei_blk)) {
650 		SDT_PROBE2(ext2fs, , trace, extents, 1,
651 		    "lblk == index blk => extent corrupted");
652 		return (EIO);
653 	}
654 
655 	if (le16toh(path->ep_header->eh_ecount) >=
656 	    le16toh(path->ep_header->eh_max)) {
657 		SDT_PROBE2(ext2fs, , trace, extents, 1,
658 		    "ecout > maxcount => extent corrupted");
659 		return (EIO);
660 	}
661 
662 	if (lblk > le32toh(path->ep_index->ei_blk)) {
663 		/* Insert after. */
664 		idx = path->ep_index + 1;
665 	} else {
666 		/* Insert before. */
667 		idx = path->ep_index;
668 	}
669 
670 	len = EXT_LAST_INDEX(path->ep_header) - idx + 1;
671 	if (len > 0)
672 		memmove(idx + 1, idx, len * sizeof(struct ext4_extent_index));
673 
674 	if (idx > EXT_MAX_INDEX(path->ep_header)) {
675 		SDT_PROBE2(ext2fs, , trace, extents, 1,
676 		    "index is out of range => extent corrupted");
677 		return (EIO);
678 	}
679 
680 	idx->ei_blk = htole32(lblk);
681 	ext4_index_store_pblock(idx, blk);
682 	path->ep_header->eh_ecount =
683 	    htole16(le16toh(path->ep_header->eh_ecount) + 1);
684 
685 	return (ext4_ext_dirty(ip, path));
686 }
687 
688 static e4fs_daddr_t
689 ext4_ext_alloc_meta(struct inode *ip)
690 {
691 	e4fs_daddr_t blk = ext2_alloc_meta(ip);
692 	if (blk) {
693 		ip->i_blocks += btodb(ip->i_e2fs->e2fs_bsize);
694 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
695 		ext2_update(ip->i_vnode, 1);
696 	}
697 
698 	return (blk);
699 }
700 
701 static void
702 ext4_ext_blkfree(struct inode *ip, uint64_t blk, int count, int flags)
703 {
704 	struct m_ext2fs *fs;
705 	int i, blocksreleased;
706 
707 	fs = ip->i_e2fs;
708 	blocksreleased = count;
709 
710 	for(i = 0; i < count; i++)
711 		ext2_blkfree(ip, blk + i, fs->e2fs_bsize);
712 
713 	if (ip->i_blocks >= blocksreleased)
714 		ip->i_blocks -= (btodb(fs->e2fs_bsize)*blocksreleased);
715 	else
716 		ip->i_blocks = 0;
717 
718 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
719 	ext2_update(ip->i_vnode, 1);
720 }
721 
722 static int
723 ext4_ext_split(struct inode *ip, struct ext4_extent_path *path,
724     struct ext4_extent *newext, int at)
725 {
726 	struct m_ext2fs *fs;
727 	struct  buf *bp;
728 	int depth = ext4_ext_inode_depth(ip);
729 	struct ext4_extent_header *neh;
730 	struct ext4_extent_index *fidx;
731 	struct ext4_extent *ex;
732 	int i = at, k, m, a;
733 	e4fs_daddr_t newblk, oldblk;
734 	uint32_t border;
735 	e4fs_daddr_t *ablks = NULL;
736 	int error = 0;
737 
738 	fs = ip->i_e2fs;
739 	bp = NULL;
740 
741 	/*
742 	 * We will split at current extent for now.
743 	 */
744 	if (path[depth].ep_ext > EXT_MAX_EXTENT(path[depth].ep_header)) {
745 		SDT_PROBE2(ext2fs, , trace, extents, 1,
746 		    "extent is out of range => extent corrupted");
747 		return (EIO);
748 	}
749 
750 	if (path[depth].ep_ext != EXT_MAX_EXTENT(path[depth].ep_header))
751 		border = le32toh(path[depth].ep_ext[1].e_blk);
752 	else
753 		border = le32toh(newext->e_blk);
754 
755 	/* Allocate new blocks. */
756 	ablks = malloc(sizeof(e4fs_daddr_t) * depth,
757 	    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
758 	for (a = 0; a < depth - at; a++) {
759 		newblk = ext4_ext_alloc_meta(ip);
760 		if (newblk == 0)
761 			goto cleanup;
762 		ablks[a] = newblk;
763 	}
764 
765 	newblk = ablks[--a];
766 	bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
767 	if (!bp) {
768 		error = EIO;
769 		goto cleanup;
770 	}
771 
772 	neh = ext4_ext_block_header(bp->b_data);
773 	neh->eh_ecount = 0;
774 	neh->eh_max = le16toh(ext4_ext_space_block(ip));
775 	neh->eh_magic = le16toh(EXT4_EXT_MAGIC);
776 	neh->eh_depth = 0;
777 	ex = EXT_FIRST_EXTENT(neh);
778 
779 	if (le16toh(path[depth].ep_header->eh_ecount) !=
780 	    le16toh(path[depth].ep_header->eh_max)) {
781 		SDT_PROBE2(ext2fs, , trace, extents, 1,
782 		    "extents count out of range => extent corrupted");
783 		error = EIO;
784 		goto cleanup;
785 	}
786 
787 	/* Start copy from next extent. */
788 	m = 0;
789 	path[depth].ep_ext++;
790 	while (path[depth].ep_ext <= EXT_MAX_EXTENT(path[depth].ep_header)) {
791 		path[depth].ep_ext++;
792 		m++;
793 	}
794 	if (m) {
795 		memmove(ex, path[depth].ep_ext - m,
796 		    sizeof(struct ext4_extent) * m);
797 		neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m);
798 	}
799 
800 	ext2_extent_blk_csum_set(ip, bp->b_data);
801 	bwrite(bp);
802 	bp = NULL;
803 
804 	/* Fix old leaf. */
805 	if (m) {
806 		path[depth].ep_header->eh_ecount =
807 		    htole16(le16toh(path[depth].ep_header->eh_ecount) - m);
808 		ext4_ext_dirty(ip, path + depth);
809 	}
810 
811 	/* Create intermediate indexes. */
812 	k = depth - at - 1;
813 	KASSERT(k >= 0, ("ext4_ext_split: negative k"));
814 
815 	/* Insert new index into current index block. */
816 	i = depth - 1;
817 	while (k--) {
818 		oldblk = newblk;
819 		newblk = ablks[--a];
820 		error = bread(ip->i_devvp, fsbtodb(fs, newblk),
821 		    (int)fs->e2fs_bsize, NOCRED, &bp);
822 		if (error) {
823 			goto cleanup;
824 		}
825 
826 		neh = (struct ext4_extent_header *)bp->b_data;
827 		neh->eh_ecount = htole16(1);
828 		neh->eh_magic = htole16(EXT4_EXT_MAGIC);
829 		neh->eh_max = htole16(ext4_ext_space_block_index(ip));
830 		neh->eh_depth = htole16(depth - i);
831 		fidx = EXT_FIRST_INDEX(neh);
832 		fidx->ei_blk = htole32(border);
833 		ext4_index_store_pblock(fidx, oldblk);
834 
835 		m = 0;
836 		path[i].ep_index++;
837 		while (path[i].ep_index <= EXT_MAX_INDEX(path[i].ep_header)) {
838 			path[i].ep_index++;
839 			m++;
840 		}
841 		if (m) {
842 			memmove(++fidx, path[i].ep_index - m,
843 			    sizeof(struct ext4_extent_index) * m);
844 			neh->eh_ecount = htole16(le16toh(neh->eh_ecount) + m);
845 		}
846 
847 		ext2_extent_blk_csum_set(ip, bp->b_data);
848 		bwrite(bp);
849 		bp = NULL;
850 
851 		/* Fix old index. */
852 		if (m) {
853 			path[i].ep_header->eh_ecount =
854 			    htole16(le16toh(path[i].ep_header->eh_ecount) - m);
855 			ext4_ext_dirty(ip, path + i);
856 		}
857 
858 		i--;
859 	}
860 
861 	error = ext4_ext_insert_index(ip, path + at, border, newblk);
862 
863 cleanup:
864 	if (bp)
865 		brelse(bp);
866 
867 	if (error) {
868 		for (i = 0; i < depth; i++) {
869 			if (!ablks[i])
870 				continue;
871 			ext4_ext_blkfree(ip, ablks[i], 1, 0);
872 		}
873 	}
874 
875 	free(ablks, M_EXT2EXTENTS);
876 
877 	return (error);
878 }
879 
880 static int
881 ext4_ext_grow_indepth(struct inode *ip, struct ext4_extent_path *path,
882     struct ext4_extent *newext)
883 {
884 	struct m_ext2fs *fs;
885 	struct ext4_extent_path *curpath;
886 	struct ext4_extent_header *neh;
887 	struct buf *bp;
888 	e4fs_daddr_t newblk;
889 	int error = 0;
890 
891 	fs = ip->i_e2fs;
892 	curpath = path;
893 
894 	newblk = ext4_ext_alloc_meta(ip);
895 	if (newblk == 0)
896 		return (error);
897 
898 	bp = getblk(ip->i_devvp, fsbtodb(fs, newblk), fs->e2fs_bsize, 0, 0, 0);
899 	if (!bp)
900 		return (EIO);
901 
902 	/* Move top-level index/leaf into new block. */
903 	memmove(bp->b_data, curpath->ep_header, sizeof(ip->i_data));
904 
905 	/* Set size of new block */
906 	neh = ext4_ext_block_header(bp->b_data);
907 	neh->eh_magic = htole16(EXT4_EXT_MAGIC);
908 
909 	if (ext4_ext_inode_depth(ip))
910 		neh->eh_max = htole16(ext4_ext_space_block_index(ip));
911 	else
912 		neh->eh_max = htole16(ext4_ext_space_block(ip));
913 
914 	ext2_extent_blk_csum_set(ip, bp->b_data);
915 	error = bwrite(bp);
916 	if (error)
917 		goto out;
918 
919 	bp = NULL;
920 
921 	curpath->ep_header->eh_magic = htole16(EXT4_EXT_MAGIC);
922 	curpath->ep_header->eh_max = htole16(ext4_ext_space_root(ip));
923 	curpath->ep_header->eh_ecount = htole16(1);
924 	curpath->ep_index = EXT_FIRST_INDEX(curpath->ep_header);
925 	curpath->ep_index->ei_blk = EXT_FIRST_EXTENT(path[0].ep_header)->e_blk;
926 	ext4_index_store_pblock(curpath->ep_index, newblk);
927 
928 	neh = ext4_ext_inode_header(ip);
929 	neh->eh_depth = htole16(path->ep_depth + 1);
930 	ext4_ext_dirty(ip, curpath);
931 out:
932 	brelse(bp);
933 
934 	return (error);
935 }
936 
937 static int
938 ext4_ext_create_new_leaf(struct inode *ip, struct ext4_extent_path *path,
939     struct ext4_extent *newext)
940 {
941 	struct ext4_extent_path *curpath;
942 	int depth, i, error;
943 
944 repeat:
945 	i = depth = ext4_ext_inode_depth(ip);
946 
947 	/* Look for free index entry int the tree */
948 	curpath = path + depth;
949 	while (i > 0 && !EXT_HAS_FREE_INDEX(curpath)) {
950 		i--;
951 		curpath--;
952 	}
953 
954 	/*
955 	 * We use already allocated block for index block,
956 	 * so subsequent data blocks should be contiguous.
957 	 */
958 	if (EXT_HAS_FREE_INDEX(curpath)) {
959 		error = ext4_ext_split(ip, path, newext, i);
960 		if (error)
961 			goto out;
962 
963 		/* Refill path. */
964 		ext4_ext_drop_refs(path);
965 		error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path);
966 		if (error)
967 			goto out;
968 	} else {
969 		/* Tree is full, do grow in depth. */
970 		error = ext4_ext_grow_indepth(ip, path, newext);
971 		if (error)
972 			goto out;
973 
974 		/* Refill path. */
975 		ext4_ext_drop_refs(path);
976 		error = ext4_ext_find_extent(ip, le32toh(newext->e_blk), &path);
977 		if (error)
978 			goto out;
979 
980 		/* Check and split tree if required. */
981 		depth = ext4_ext_inode_depth(ip);
982 		if (le16toh(path[depth].ep_header->eh_ecount) ==
983 		    le16toh(path[depth].ep_header->eh_max))
984 			goto repeat;
985 	}
986 
987 out:
988 	return (error);
989 }
990 
991 static int
992 ext4_ext_correct_indexes(struct inode *ip, struct ext4_extent_path *path)
993 {
994 	struct ext4_extent_header *eh;
995 	struct ext4_extent *ex;
996 	int32_t border;
997 	int depth, k;
998 
999 	depth = ext4_ext_inode_depth(ip);
1000 	eh = path[depth].ep_header;
1001 	ex = path[depth].ep_ext;
1002 
1003 	if (ex == NULL || eh == NULL)
1004 		return (EIO);
1005 
1006 	if (!depth)
1007 		return (0);
1008 
1009 	/* We will correct tree if first leaf got modified only. */
1010 	if (ex != EXT_FIRST_EXTENT(eh))
1011 		return (0);
1012 
1013 	k = depth - 1;
1014 	border = le32toh(path[depth].ep_ext->e_blk);
1015 	path[k].ep_index->ei_blk = htole32(border);
1016 	ext4_ext_dirty(ip, path + k);
1017 	while (k--) {
1018 		/* Change all left-side indexes. */
1019 		if (path[k+1].ep_index != EXT_FIRST_INDEX(path[k+1].ep_header))
1020 			break;
1021 
1022 		path[k].ep_index->ei_blk = htole32(border);
1023 		ext4_ext_dirty(ip, path + k);
1024 	}
1025 
1026 	return (0);
1027 }
1028 
1029 static int
1030 ext4_ext_insert_extent(struct inode *ip, struct ext4_extent_path *path,
1031     struct ext4_extent *newext)
1032 {
1033 	struct ext4_extent_header * eh;
1034 	struct ext4_extent *ex, *nex, *nearex;
1035 	struct ext4_extent_path *npath;
1036 	int depth, len, error, next;
1037 
1038 	depth = ext4_ext_inode_depth(ip);
1039 	ex = path[depth].ep_ext;
1040 	npath = NULL;
1041 
1042 	if (htole16(newext->e_len) == 0 || path[depth].ep_header == NULL)
1043 		return (EINVAL);
1044 
1045 	/* Insert block into found extent. */
1046 	if (ex && ext4_can_extents_be_merged(ex, newext)) {
1047 		ex->e_len = htole16(le16toh(ex->e_len) + le16toh(newext->e_len));
1048 		eh = path[depth].ep_header;
1049 		nearex = ex;
1050 		goto merge;
1051 	}
1052 
1053 repeat:
1054 	depth = ext4_ext_inode_depth(ip);
1055 	eh = path[depth].ep_header;
1056 	if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max))
1057 		goto has_space;
1058 
1059 	/* Try next leaf */
1060 	nex = EXT_LAST_EXTENT(eh);
1061 	next = ext4_ext_next_leaf_block(ip, path);
1062 	if (le32toh(newext->e_blk) > le32toh(nex->e_blk) && next !=
1063 	    EXT4_MAX_BLOCKS) {
1064 		KASSERT(npath == NULL,
1065 		    ("ext4_ext_insert_extent: bad path"));
1066 
1067 		error = ext4_ext_find_extent(ip, next, &npath);
1068 		if (error)
1069 			goto cleanup;
1070 
1071 		if (npath->ep_depth != path->ep_depth) {
1072 			error = EIO;
1073 			goto cleanup;
1074 		}
1075 
1076 		eh = npath[depth].ep_header;
1077 		if (le16toh(eh->eh_ecount) < le16toh(eh->eh_max)) {
1078 			path = npath;
1079 			goto repeat;
1080 		}
1081 	}
1082 
1083 	/*
1084 	 * There is no free space in the found leaf,
1085 	 * try to add a new leaf to the tree.
1086 	 */
1087 	error = ext4_ext_create_new_leaf(ip, path, newext);
1088 	if (error)
1089 		goto cleanup;
1090 
1091 	depth = ext4_ext_inode_depth(ip);
1092 	eh = path[depth].ep_header;
1093 
1094 has_space:
1095 	nearex = path[depth].ep_ext;
1096 	if (!nearex) {
1097 		/* Create new extent in the leaf. */
1098 		path[depth].ep_ext = EXT_FIRST_EXTENT(eh);
1099 	} else if (le32toh(newext->e_blk) > le32toh(nearex->e_blk)) {
1100 		if (nearex != EXT_LAST_EXTENT(eh)) {
1101 			len = EXT_MAX_EXTENT(eh) - nearex;
1102 			len = (len - 1) * sizeof(struct ext4_extent);
1103 			len = len < 0 ? 0 : len;
1104 			memmove(nearex + 2, nearex + 1, len);
1105 		}
1106 		path[depth].ep_ext = nearex + 1;
1107 	} else {
1108 		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1109 		len = len < 0 ? 0 : len;
1110 		memmove(nearex + 1, nearex, len);
1111 		path[depth].ep_ext = nearex;
1112 	}
1113 
1114 	eh->eh_ecount = htole16(le16toh(eh->eh_ecount) + 1);
1115 	nearex = path[depth].ep_ext;
1116 	nearex->e_blk = newext->e_blk;
1117 	nearex->e_start_lo = newext->e_start_lo;
1118 	nearex->e_start_hi = newext->e_start_hi;
1119 	nearex->e_len = newext->e_len;
1120 
1121 merge:
1122 	/* Try to merge extents to the right. */
1123 	while (nearex < EXT_LAST_EXTENT(eh)) {
1124 		if (!ext4_can_extents_be_merged(nearex, nearex + 1))
1125 			break;
1126 
1127 		/* Merge with next extent. */
1128 		nearex->e_len = htole16(le16toh(nearex->e_len) +
1129 		    le16toh(nearex[1].e_len));
1130 		if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1131 			len = (EXT_LAST_EXTENT(eh) - nearex - 1) *
1132 			    sizeof(struct ext4_extent);
1133 			memmove(nearex + 1, nearex + 2, len);
1134 		}
1135 
1136 		eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1);
1137 		KASSERT(le16toh(eh->eh_ecount) != 0,
1138 		    ("ext4_ext_insert_extent: bad ecount"));
1139 	}
1140 
1141 	/*
1142 	 * Try to merge extents to the left,
1143 	 * start from inexes correction.
1144 	 */
1145 	error = ext4_ext_correct_indexes(ip, path);
1146 	if (error)
1147 		goto cleanup;
1148 
1149 	ext4_ext_dirty(ip, path + depth);
1150 
1151 cleanup:
1152 	if (npath) {
1153 		ext4_ext_drop_refs(npath);
1154 		free(npath, M_EXT2EXTENTS);
1155 	}
1156 
1157 	ip->i_ext_cache.ec_type = EXT4_EXT_CACHE_NO;
1158 	return (error);
1159 }
1160 
1161 static e4fs_daddr_t
1162 ext4_new_blocks(struct inode *ip, daddr_t lbn, e4fs_daddr_t pref,
1163     struct ucred *cred, unsigned long *count, int *perror)
1164 {
1165 	struct m_ext2fs *fs;
1166 	e4fs_daddr_t newblk;
1167 
1168 	/*
1169 	 * We will allocate only single block for now.
1170 	 */
1171 	if (*count > 1)
1172 		return (0);
1173 
1174 	fs = ip->i_e2fs;
1175 	EXT2_LOCK(ip->i_ump);
1176 	*perror = ext2_alloc(ip, lbn, pref, (int)fs->e2fs_bsize, cred, &newblk);
1177 	if (*perror)
1178 		return (0);
1179 
1180 	if (newblk) {
1181 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
1182 		ext2_update(ip->i_vnode, 1);
1183 	}
1184 
1185 	return (newblk);
1186 }
1187 
1188 int
1189 ext4_ext_get_blocks(struct inode *ip, e4fs_daddr_t iblk,
1190     unsigned long max_blocks, struct ucred *cred, struct buf **bpp,
1191     int *pallocated, daddr_t *nb)
1192 {
1193 	struct m_ext2fs *fs;
1194 	struct buf *bp = NULL;
1195 	struct ext4_extent_path *path;
1196 	struct ext4_extent newex, *ex;
1197 	e4fs_daddr_t bpref, newblk = 0;
1198 	unsigned long allocated = 0;
1199 	int error = 0, depth;
1200 
1201 	if(bpp)
1202 		*bpp = NULL;
1203 	*pallocated = 0;
1204 
1205 	/* Check cache. */
1206 	path = NULL;
1207 	if ((bpref = ext4_ext_in_cache(ip, iblk, &newex))) {
1208 		if (bpref == EXT4_EXT_CACHE_IN) {
1209 			/* Block is already allocated. */
1210 			newblk = iblk - le32toh(newex.e_blk) +
1211 			    ext4_ext_extent_pblock(&newex);
1212 			allocated = le16toh(newex.e_len) - (iblk - le32toh(newex.e_blk));
1213 			goto out;
1214 		} else {
1215 			error = EIO;
1216 			goto out2;
1217 		}
1218 	}
1219 
1220 	error = ext4_ext_find_extent(ip, iblk, &path);
1221 	if (error) {
1222 		goto out2;
1223 	}
1224 
1225 	depth = ext4_ext_inode_depth(ip);
1226 	if (path[depth].ep_ext == NULL && depth != 0) {
1227 		error = EIO;
1228 		goto out2;
1229 	}
1230 
1231 	if ((ex = path[depth].ep_ext)) {
1232 		uint64_t lblk = le32toh(ex->e_blk);
1233 		uint16_t e_len  = le16toh(ex->e_len);
1234 		e4fs_daddr_t e_start = ext4_ext_extent_pblock(ex);
1235 
1236 		if (e_len > EXT4_MAX_LEN)
1237 			goto out2;
1238 
1239 		/* If we found extent covers block, simply return it. */
1240 		if (iblk >= lblk && iblk < lblk + e_len) {
1241 			newblk = iblk - lblk + e_start;
1242 			allocated = e_len - (iblk - lblk);
1243 			ext4_ext_put_in_cache(ip, lblk, e_len,
1244 			    e_start, EXT4_EXT_CACHE_IN);
1245 			goto out;
1246 		}
1247 	}
1248 
1249 	/* Allocate the new block. */
1250 	if (S_ISREG(ip->i_mode) && (!ip->i_next_alloc_block)) {
1251 		ip->i_next_alloc_goal = 0;
1252 	}
1253 
1254 	bpref = ext4_ext_blkpref(ip, path, iblk);
1255 	allocated = max_blocks;
1256 	newblk = ext4_new_blocks(ip, iblk, bpref, cred, &allocated, &error);
1257 	if (!newblk)
1258 		goto out2;
1259 
1260 	/* Try to insert new extent into found leaf and return. */
1261 	newex.e_blk = htole32(iblk);
1262 	ext4_ext_store_pblock(&newex, newblk);
1263 	newex.e_len = htole16(allocated);
1264 	error = ext4_ext_insert_extent(ip, path, &newex);
1265 	if (error)
1266 		goto out2;
1267 
1268 	newblk = ext4_ext_extent_pblock(&newex);
1269 	ext4_ext_put_in_cache(ip, iblk, allocated, newblk, EXT4_EXT_CACHE_IN);
1270 	*pallocated = 1;
1271 
1272 out:
1273 	if (allocated > max_blocks)
1274 		allocated = max_blocks;
1275 
1276 	if (bpp)
1277 	{
1278 		fs = ip->i_e2fs;
1279 		error = bread(ip->i_devvp, fsbtodb(fs, newblk),
1280 		    fs->e2fs_bsize, cred, &bp);
1281 		if (error) {
1282 			brelse(bp);
1283 		} else {
1284 			*bpp = bp;
1285 		}
1286 	}
1287 
1288 out2:
1289 	if (path) {
1290 		ext4_ext_drop_refs(path);
1291 		free(path, M_EXT2EXTENTS);
1292 	}
1293 
1294 	if (nb)
1295 		*nb = newblk;
1296 
1297 	return (error);
1298 }
1299 
1300 static inline uint16_t
1301 ext4_ext_get_actual_len(struct ext4_extent *ext)
1302 {
1303 
1304 	return (le16toh(ext->e_len) <= EXT_INIT_MAX_LEN ?
1305 	    le16toh(ext->e_len) : (le16toh(ext->e_len) - EXT_INIT_MAX_LEN));
1306 }
1307 
1308 static inline struct ext4_extent_header *
1309 ext4_ext_header(struct inode *ip)
1310 {
1311 
1312 	return ((struct ext4_extent_header *)ip->i_db);
1313 }
1314 
1315 static int
1316 ext4_remove_blocks(struct inode *ip, struct ext4_extent *ex,
1317     unsigned long from, unsigned long to)
1318 {
1319 	unsigned long num, start;
1320 
1321 	if (from >= le32toh(ex->e_blk) &&
1322 	    to == le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - 1) {
1323 		/* Tail cleanup. */
1324 		num = le32toh(ex->e_blk) + ext4_ext_get_actual_len(ex) - from;
1325 		start = ext4_ext_extent_pblock(ex) +
1326 		    ext4_ext_get_actual_len(ex) - num;
1327 		ext4_ext_blkfree(ip, start, num, 0);
1328 	}
1329 
1330 	return (0);
1331 }
1332 
1333 static int
1334 ext4_ext_rm_index(struct inode *ip, struct ext4_extent_path *path)
1335 {
1336 	e4fs_daddr_t leaf;
1337 
1338 	/* Free index block. */
1339 	path--;
1340 	leaf = ext4_ext_index_pblock(path->ep_index);
1341 	KASSERT(path->ep_header->eh_ecount != 0,
1342 	    ("ext4_ext_rm_index: bad ecount"));
1343 	path->ep_header->eh_ecount =
1344 	    htole16(le16toh(path->ep_header->eh_ecount) - 1);
1345 	ext4_ext_dirty(ip, path);
1346 	ext4_ext_blkfree(ip, leaf, 1, 0);
1347 	return (0);
1348 }
1349 
1350 static int
1351 ext4_ext_rm_leaf(struct inode *ip, struct ext4_extent_path *path,
1352     uint64_t start)
1353 {
1354 	struct ext4_extent_header *eh;
1355 	struct ext4_extent *ex;
1356 	unsigned int a, b, block, num;
1357 	unsigned long ex_blk;
1358 	unsigned short ex_len;
1359 	int depth;
1360 	int error, correct_index;
1361 
1362 	depth = ext4_ext_inode_depth(ip);
1363 	if (!path[depth].ep_header) {
1364 		if (path[depth].ep_data == NULL)
1365 			return (EINVAL);
1366 		path[depth].ep_header =
1367 		    (struct ext4_extent_header* )path[depth].ep_data;
1368 	}
1369 
1370 	eh = path[depth].ep_header;
1371 	if (!eh) {
1372 		SDT_PROBE2(ext2fs, , trace, extents, 1,
1373 		    "bad header => extent corrupted");
1374 		return (EIO);
1375 	}
1376 
1377 	ex = EXT_LAST_EXTENT(eh);
1378 	ex_blk = le32toh(ex->e_blk);
1379 	ex_len = ext4_ext_get_actual_len(ex);
1380 
1381 	error = 0;
1382 	correct_index = 0;
1383 	while (ex >= EXT_FIRST_EXTENT(eh) && ex_blk + ex_len > start) {
1384 		path[depth].ep_ext = ex;
1385 		a = ex_blk > start ? ex_blk : start;
1386 		b = (uint64_t)ex_blk + ex_len - 1 <
1387 		    EXT4_MAX_BLOCKS ? ex_blk + ex_len - 1 : EXT4_MAX_BLOCKS;
1388 
1389 		if (a != ex_blk && b != ex_blk + ex_len - 1)
1390 			return (EINVAL);
1391 		else if (a != ex_blk) {
1392 			/* Remove tail of the extent. */
1393 			block = ex_blk;
1394 			num = a - block;
1395 		} else if (b != ex_blk + ex_len - 1) {
1396 			/* Remove head of the extent, not implemented. */
1397 			return (EINVAL);
1398 		} else {
1399 			/* Remove whole extent. */
1400 			block = ex_blk;
1401 			num = 0;
1402 		}
1403 
1404 		if (ex == EXT_FIRST_EXTENT(eh))
1405 			correct_index = 1;
1406 
1407 		error = ext4_remove_blocks(ip, ex, a, b);
1408 		if (error)
1409 			goto out;
1410 
1411 		if (num == 0) {
1412 			ext4_ext_store_pblock(ex, 0);
1413 			eh->eh_ecount = htole16(le16toh(eh->eh_ecount) - 1);
1414 		}
1415 
1416 		ex->e_blk = htole32(block);
1417 		ex->e_len = htole16(num);
1418 
1419 		ext4_ext_dirty(ip, path + depth);
1420 
1421 		ex--;
1422 		ex_blk = htole32(ex->e_blk);
1423 		ex_len = ext4_ext_get_actual_len(ex);
1424 	};
1425 
1426 	if (correct_index && le16toh(eh->eh_ecount))
1427 		error = ext4_ext_correct_indexes(ip, path);
1428 
1429 	/*
1430 	 * If this leaf is free, we should
1431 	 * remove it from index block above.
1432 	 */
1433 	if (error == 0 && eh->eh_ecount == 0 &&
1434 	    path[depth].ep_data != NULL)
1435 		error = ext4_ext_rm_index(ip, path + depth);
1436 
1437 out:
1438 	return (error);
1439 }
1440 
1441 static struct buf *
1442 ext4_read_extent_tree_block(struct inode *ip, e4fs_daddr_t pblk,
1443     int depth, int flags)
1444 {
1445 	struct m_ext2fs *fs;
1446 	struct ext4_extent_header *eh;
1447 	struct buf *bp;
1448 	int error;
1449 
1450 	fs = ip->i_e2fs;
1451 	error = bread(ip->i_devvp, fsbtodb(fs, pblk),
1452 	    fs->e2fs_bsize, NOCRED, &bp);
1453 	if (error) {
1454 		return (NULL);
1455 	}
1456 
1457 	eh = ext4_ext_block_header(bp->b_data);
1458 	if (le16toh(eh->eh_depth) != depth) {
1459 		SDT_PROBE2(ext2fs, , trace, extents, 1,
1460 		    "unexpected eh_depth");
1461 		goto err;
1462 	}
1463 
1464 	error = ext4_ext_check_header(ip, eh);
1465 	if (error)
1466 		goto err;
1467 
1468 	return (bp);
1469 
1470 err:
1471 	brelse(bp);
1472 	return (NULL);
1473 
1474 }
1475 
1476 static int inline
1477 ext4_ext_more_to_rm(struct ext4_extent_path *path)
1478 {
1479 
1480 	KASSERT(path->ep_index != NULL,
1481 	    ("ext4_ext_more_to_rm: bad index from path"));
1482 
1483 	if (path->ep_index < EXT_FIRST_INDEX(path->ep_header))
1484 		return (0);
1485 
1486 	if (le16toh(path->ep_header->eh_ecount) == path->index_count)
1487 		return (0);
1488 
1489 	return (1);
1490 }
1491 
1492 int
1493 ext4_ext_remove_space(struct inode *ip, off_t length, int flags,
1494     struct ucred *cred, struct thread *td)
1495 {
1496 	struct buf *bp;
1497 	struct ext4_extent_header *ehp;
1498 	struct ext4_extent_path *path;
1499 	int depth;
1500 	int i, error;
1501 
1502 	ehp = (struct ext4_extent_header *)ip->i_db;
1503 	depth = ext4_ext_inode_depth(ip);
1504 
1505 	error = ext4_ext_check_header(ip, ehp);
1506 	if(error)
1507 		return (error);
1508 
1509 	path = malloc(sizeof(struct ext4_extent_path) * (depth + 1),
1510 	    M_EXT2EXTENTS, M_WAITOK | M_ZERO);
1511 	path[0].ep_header = ehp;
1512 	path[0].ep_depth = depth;
1513 	i = 0;
1514 	while (error == 0 && i >= 0) {
1515 		if (i == depth) {
1516 			/* This is leaf. */
1517 			error = ext4_ext_rm_leaf(ip, path, length);
1518 			if (error)
1519 				break;
1520 			free(path[i].ep_data, M_EXT2EXTENTS);
1521 			path[i].ep_data = NULL;
1522 			i--;
1523 			continue;
1524 		}
1525 
1526 		/* This is index. */
1527 		if (!path[i].ep_header)
1528 			path[i].ep_header =
1529 			    (struct ext4_extent_header *)path[i].ep_data;
1530 
1531 		if (!path[i].ep_index) {
1532 			/* This level hasn't touched yet. */
1533 			path[i].ep_index = EXT_LAST_INDEX(path[i].ep_header);
1534 			path[i].index_count =
1535 			    le16toh(path[i].ep_header->eh_ecount) + 1;
1536 		} else {
1537 			/* We've already was here, see at next index. */
1538 			path[i].ep_index--;
1539 		}
1540 
1541 		if (ext4_ext_more_to_rm(path + i)) {
1542 			memset(path + i + 1, 0, sizeof(*path));
1543 			bp = ext4_read_extent_tree_block(ip,
1544 			    ext4_ext_index_pblock(path[i].ep_index),
1545 			    path[0].ep_depth - (i + 1), 0);
1546 			if (!bp) {
1547 				error = EIO;
1548 				break;
1549 			}
1550 
1551 			ext4_ext_fill_path_bdata(&path[i+1], bp,
1552 			    ext4_ext_index_pblock(path[i].ep_index));
1553 			brelse(bp);
1554 			path[i].index_count =
1555 			    le16toh(path[i].ep_header->eh_ecount);
1556 			i++;
1557 		} else {
1558 			if (path[i].ep_header->eh_ecount == 0 && i > 0) {
1559 				/* Index is empty, remove it. */
1560 				error = ext4_ext_rm_index(ip, path + i);
1561 			}
1562 			free(path[i].ep_data, M_EXT2EXTENTS);
1563 			path[i].ep_data = NULL;
1564 			i--;
1565 		}
1566 	}
1567 
1568 	if (path->ep_header->eh_ecount == 0) {
1569 		/*
1570 		 * Truncate the tree to zero.
1571 		 */
1572 		 ext4_ext_header(ip)->eh_depth = 0;
1573 		 ext4_ext_header(ip)->eh_max = htole16(ext4_ext_space_root(ip));
1574 		 ext4_ext_dirty(ip, path);
1575 	}
1576 
1577 	ext4_ext_drop_refs(path);
1578 	free(path, M_EXT2EXTENTS);
1579 
1580 	return (error);
1581 }
1582