xref: /linux/fs/btrfs/extent_map.c (revision 8e3ed5440b0c305dcd1d5fa7419bd8066d22ef42)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/err.h>
4 #include <linux/slab.h>
5 #include <linux/spinlock.h>
6 #include "messages.h"
7 #include "ctree.h"
8 #include "volumes.h"
9 #include "extent_map.h"
10 #include "compression.h"
11 #include "btrfs_inode.h"
12 
13 
14 static struct kmem_cache *extent_map_cache;
15 
16 int __init extent_map_init(void)
17 {
18 	extent_map_cache = kmem_cache_create("btrfs_extent_map",
19 			sizeof(struct extent_map), 0,
20 			SLAB_MEM_SPREAD, NULL);
21 	if (!extent_map_cache)
22 		return -ENOMEM;
23 	return 0;
24 }
25 
26 void __cold extent_map_exit(void)
27 {
28 	kmem_cache_destroy(extent_map_cache);
29 }
30 
31 /*
32  * Initialize the extent tree @tree.  Should be called for each new inode or
33  * other user of the extent_map interface.
34  */
35 void extent_map_tree_init(struct extent_map_tree *tree)
36 {
37 	tree->map = RB_ROOT_CACHED;
38 	INIT_LIST_HEAD(&tree->modified_extents);
39 	rwlock_init(&tree->lock);
40 }
41 
42 /*
43  * Allocate a new extent_map structure.  The new structure is returned with a
44  * reference count of one and needs to be freed using free_extent_map()
45  */
46 struct extent_map *alloc_extent_map(void)
47 {
48 	struct extent_map *em;
49 	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
50 	if (!em)
51 		return NULL;
52 	RB_CLEAR_NODE(&em->rb_node);
53 	refcount_set(&em->refs, 1);
54 	INIT_LIST_HEAD(&em->list);
55 	return em;
56 }
57 
58 /*
59  * Drop the reference out on @em by one and free the structure if the reference
60  * count hits zero.
61  */
62 void free_extent_map(struct extent_map *em)
63 {
64 	if (!em)
65 		return;
66 	if (refcount_dec_and_test(&em->refs)) {
67 		WARN_ON(extent_map_in_tree(em));
68 		WARN_ON(!list_empty(&em->list));
69 		kmem_cache_free(extent_map_cache, em);
70 	}
71 }
72 
73 /* Do the math around the end of an extent, handling wrapping. */
74 static u64 range_end(u64 start, u64 len)
75 {
76 	if (start + len < start)
77 		return (u64)-1;
78 	return start + len;
79 }
80 
81 static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
82 {
83 	struct rb_node **p = &root->rb_root.rb_node;
84 	struct rb_node *parent = NULL;
85 	struct extent_map *entry = NULL;
86 	struct rb_node *orig_parent = NULL;
87 	u64 end = range_end(em->start, em->len);
88 	bool leftmost = true;
89 
90 	while (*p) {
91 		parent = *p;
92 		entry = rb_entry(parent, struct extent_map, rb_node);
93 
94 		if (em->start < entry->start) {
95 			p = &(*p)->rb_left;
96 		} else if (em->start >= extent_map_end(entry)) {
97 			p = &(*p)->rb_right;
98 			leftmost = false;
99 		} else {
100 			return -EEXIST;
101 		}
102 	}
103 
104 	orig_parent = parent;
105 	while (parent && em->start >= extent_map_end(entry)) {
106 		parent = rb_next(parent);
107 		entry = rb_entry(parent, struct extent_map, rb_node);
108 	}
109 	if (parent)
110 		if (end > entry->start && em->start < extent_map_end(entry))
111 			return -EEXIST;
112 
113 	parent = orig_parent;
114 	entry = rb_entry(parent, struct extent_map, rb_node);
115 	while (parent && em->start < entry->start) {
116 		parent = rb_prev(parent);
117 		entry = rb_entry(parent, struct extent_map, rb_node);
118 	}
119 	if (parent)
120 		if (end > entry->start && em->start < extent_map_end(entry))
121 			return -EEXIST;
122 
123 	rb_link_node(&em->rb_node, orig_parent, p);
124 	rb_insert_color_cached(&em->rb_node, root, leftmost);
125 	return 0;
126 }
127 
128 /*
129  * Search through the tree for an extent_map with a given offset.  If it can't
130  * be found, try to find some neighboring extents
131  */
132 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
133 				     struct rb_node **prev_or_next_ret)
134 {
135 	struct rb_node *n = root->rb_node;
136 	struct rb_node *prev = NULL;
137 	struct rb_node *orig_prev = NULL;
138 	struct extent_map *entry;
139 	struct extent_map *prev_entry = NULL;
140 
141 	ASSERT(prev_or_next_ret);
142 
143 	while (n) {
144 		entry = rb_entry(n, struct extent_map, rb_node);
145 		prev = n;
146 		prev_entry = entry;
147 
148 		if (offset < entry->start)
149 			n = n->rb_left;
150 		else if (offset >= extent_map_end(entry))
151 			n = n->rb_right;
152 		else
153 			return n;
154 	}
155 
156 	orig_prev = prev;
157 	while (prev && offset >= extent_map_end(prev_entry)) {
158 		prev = rb_next(prev);
159 		prev_entry = rb_entry(prev, struct extent_map, rb_node);
160 	}
161 
162 	/*
163 	 * Previous extent map found, return as in this case the caller does not
164 	 * care about the next one.
165 	 */
166 	if (prev) {
167 		*prev_or_next_ret = prev;
168 		return NULL;
169 	}
170 
171 	prev = orig_prev;
172 	prev_entry = rb_entry(prev, struct extent_map, rb_node);
173 	while (prev && offset < prev_entry->start) {
174 		prev = rb_prev(prev);
175 		prev_entry = rb_entry(prev, struct extent_map, rb_node);
176 	}
177 	*prev_or_next_ret = prev;
178 
179 	return NULL;
180 }
181 
182 static inline u64 extent_map_block_end(const struct extent_map *em)
183 {
184 	if (em->block_start + em->block_len < em->block_start)
185 		return (u64)-1;
186 	return em->block_start + em->block_len;
187 }
188 
189 static bool can_merge_extent_map(const struct extent_map *em)
190 {
191 	if (em->flags & EXTENT_FLAG_PINNED)
192 		return false;
193 
194 	/* Don't merge compressed extents, we need to know their actual size. */
195 	if (extent_map_is_compressed(em))
196 		return false;
197 
198 	if (em->flags & EXTENT_FLAG_LOGGING)
199 		return false;
200 
201 	/*
202 	 * We don't want to merge stuff that hasn't been written to the log yet
203 	 * since it may not reflect exactly what is on disk, and that would be
204 	 * bad.
205 	 */
206 	if (!list_empty(&em->list))
207 		return false;
208 
209 	return true;
210 }
211 
212 /* Check to see if two extent_map structs are adjacent and safe to merge. */
213 static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next)
214 {
215 	if (extent_map_end(prev) != next->start)
216 		return false;
217 
218 	if (prev->flags != next->flags)
219 		return false;
220 
221 	if (next->block_start < EXTENT_MAP_LAST_BYTE - 1)
222 		return next->block_start == extent_map_block_end(prev);
223 
224 	/* HOLES and INLINE extents. */
225 	return next->block_start == prev->block_start;
226 }
227 
228 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
229 {
230 	struct extent_map *merge = NULL;
231 	struct rb_node *rb;
232 
233 	/*
234 	 * We can't modify an extent map that is in the tree and that is being
235 	 * used by another task, as it can cause that other task to see it in
236 	 * inconsistent state during the merging. We always have 1 reference for
237 	 * the tree and 1 for this task (which is unpinning the extent map or
238 	 * clearing the logging flag), so anything > 2 means it's being used by
239 	 * other tasks too.
240 	 */
241 	if (refcount_read(&em->refs) > 2)
242 		return;
243 
244 	if (!can_merge_extent_map(em))
245 		return;
246 
247 	if (em->start != 0) {
248 		rb = rb_prev(&em->rb_node);
249 		if (rb)
250 			merge = rb_entry(rb, struct extent_map, rb_node);
251 		if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
252 			em->start = merge->start;
253 			em->orig_start = merge->orig_start;
254 			em->len += merge->len;
255 			em->block_len += merge->block_len;
256 			em->block_start = merge->block_start;
257 			em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
258 			em->mod_start = merge->mod_start;
259 			em->generation = max(em->generation, merge->generation);
260 			em->flags |= EXTENT_FLAG_MERGED;
261 
262 			rb_erase_cached(&merge->rb_node, &tree->map);
263 			RB_CLEAR_NODE(&merge->rb_node);
264 			free_extent_map(merge);
265 		}
266 	}
267 
268 	rb = rb_next(&em->rb_node);
269 	if (rb)
270 		merge = rb_entry(rb, struct extent_map, rb_node);
271 	if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
272 		em->len += merge->len;
273 		em->block_len += merge->block_len;
274 		rb_erase_cached(&merge->rb_node, &tree->map);
275 		RB_CLEAR_NODE(&merge->rb_node);
276 		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
277 		em->generation = max(em->generation, merge->generation);
278 		em->flags |= EXTENT_FLAG_MERGED;
279 		free_extent_map(merge);
280 	}
281 }
282 
283 /*
284  * Unpin an extent from the cache.
285  *
286  * @inode:	the inode from which we are unpinning an extent range
287  * @start:	logical offset in the file
288  * @len:	length of the extent
289  * @gen:	generation that this extent has been modified in
290  *
291  * Called after an extent has been written to disk properly.  Set the generation
292  * to the generation that actually added the file item to the inode so we know
293  * we need to sync this extent when we call fsync().
294  */
295 int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
296 {
297 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
298 	struct extent_map_tree *tree = &inode->extent_tree;
299 	int ret = 0;
300 	struct extent_map *em;
301 	bool prealloc = false;
302 
303 	write_lock(&tree->lock);
304 	em = lookup_extent_mapping(tree, start, len);
305 
306 	if (WARN_ON(!em)) {
307 		btrfs_warn(fs_info,
308 "no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
309 			   btrfs_ino(inode), btrfs_root_id(inode->root),
310 			   start, len, gen);
311 		goto out;
312 	}
313 
314 	if (WARN_ON(em->start != start))
315 		btrfs_warn(fs_info,
316 "found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
317 			   btrfs_ino(inode), btrfs_root_id(inode->root),
318 			   em->start, start, len, gen);
319 
320 	em->generation = gen;
321 	em->flags &= ~EXTENT_FLAG_PINNED;
322 	em->mod_start = em->start;
323 	em->mod_len = em->len;
324 
325 	if (em->flags & EXTENT_FLAG_FILLING) {
326 		prealloc = true;
327 		em->flags &= ~EXTENT_FLAG_FILLING;
328 	}
329 
330 	try_merge_map(tree, em);
331 
332 	if (prealloc) {
333 		em->mod_start = em->start;
334 		em->mod_len = em->len;
335 	}
336 
337 	free_extent_map(em);
338 out:
339 	write_unlock(&tree->lock);
340 	return ret;
341 
342 }
343 
344 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
345 {
346 	lockdep_assert_held_write(&tree->lock);
347 
348 	em->flags &= ~EXTENT_FLAG_LOGGING;
349 	if (extent_map_in_tree(em))
350 		try_merge_map(tree, em);
351 }
352 
353 static inline void setup_extent_mapping(struct extent_map_tree *tree,
354 					struct extent_map *em,
355 					int modified)
356 {
357 	refcount_inc(&em->refs);
358 	em->mod_start = em->start;
359 	em->mod_len = em->len;
360 
361 	ASSERT(list_empty(&em->list));
362 
363 	if (modified)
364 		list_add(&em->list, &tree->modified_extents);
365 	else
366 		try_merge_map(tree, em);
367 }
368 
369 /*
370  * Add new extent map to the extent tree
371  *
372  * @tree:	tree to insert new map in
373  * @em:		map to insert
374  * @modified:	indicate whether the given @em should be added to the
375  *	        modified list, which indicates the extent needs to be logged
376  *
377  * Insert @em into @tree or perform a simple forward/backward merge with
378  * existing mappings.  The extent_map struct passed in will be inserted
379  * into the tree directly, with an additional reference taken, or a
380  * reference dropped if the merge attempt was successful.
381  */
382 static int add_extent_mapping(struct extent_map_tree *tree,
383 			      struct extent_map *em, int modified)
384 {
385 	int ret = 0;
386 
387 	lockdep_assert_held_write(&tree->lock);
388 
389 	ret = tree_insert(&tree->map, em);
390 	if (ret)
391 		goto out;
392 
393 	setup_extent_mapping(tree, em, modified);
394 out:
395 	return ret;
396 }
397 
398 static struct extent_map *
399 __lookup_extent_mapping(struct extent_map_tree *tree,
400 			u64 start, u64 len, int strict)
401 {
402 	struct extent_map *em;
403 	struct rb_node *rb_node;
404 	struct rb_node *prev_or_next = NULL;
405 	u64 end = range_end(start, len);
406 
407 	rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next);
408 	if (!rb_node) {
409 		if (prev_or_next)
410 			rb_node = prev_or_next;
411 		else
412 			return NULL;
413 	}
414 
415 	em = rb_entry(rb_node, struct extent_map, rb_node);
416 
417 	if (strict && !(end > em->start && start < extent_map_end(em)))
418 		return NULL;
419 
420 	refcount_inc(&em->refs);
421 	return em;
422 }
423 
424 /*
425  * Lookup extent_map that intersects @start + @len range.
426  *
427  * @tree:	tree to lookup in
428  * @start:	byte offset to start the search
429  * @len:	length of the lookup range
430  *
431  * Find and return the first extent_map struct in @tree that intersects the
432  * [start, len] range.  There may be additional objects in the tree that
433  * intersect, so check the object returned carefully to make sure that no
434  * additional lookups are needed.
435  */
436 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
437 					 u64 start, u64 len)
438 {
439 	return __lookup_extent_mapping(tree, start, len, 1);
440 }
441 
442 /*
443  * Find a nearby extent map intersecting @start + @len (not an exact search).
444  *
445  * @tree:	tree to lookup in
446  * @start:	byte offset to start the search
447  * @len:	length of the lookup range
448  *
449  * Find and return the first extent_map struct in @tree that intersects the
450  * [start, len] range.
451  *
452  * If one can't be found, any nearby extent may be returned
453  */
454 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
455 					 u64 start, u64 len)
456 {
457 	return __lookup_extent_mapping(tree, start, len, 0);
458 }
459 
460 /*
461  * Remove an extent_map from the extent tree.
462  *
463  * @tree:	extent tree to remove from
464  * @em:		extent map being removed
465  *
466  * Remove @em from @tree.  No reference counts are dropped, and no checks
467  * are done to see if the range is in use.
468  */
469 void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
470 {
471 	lockdep_assert_held_write(&tree->lock);
472 
473 	WARN_ON(em->flags & EXTENT_FLAG_PINNED);
474 	rb_erase_cached(&em->rb_node, &tree->map);
475 	if (!(em->flags & EXTENT_FLAG_LOGGING))
476 		list_del_init(&em->list);
477 	RB_CLEAR_NODE(&em->rb_node);
478 }
479 
480 static void replace_extent_mapping(struct extent_map_tree *tree,
481 				   struct extent_map *cur,
482 				   struct extent_map *new,
483 				   int modified)
484 {
485 	lockdep_assert_held_write(&tree->lock);
486 
487 	WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
488 	ASSERT(extent_map_in_tree(cur));
489 	if (!(cur->flags & EXTENT_FLAG_LOGGING))
490 		list_del_init(&cur->list);
491 	rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
492 	RB_CLEAR_NODE(&cur->rb_node);
493 
494 	setup_extent_mapping(tree, new, modified);
495 }
496 
497 static struct extent_map *next_extent_map(const struct extent_map *em)
498 {
499 	struct rb_node *next;
500 
501 	next = rb_next(&em->rb_node);
502 	if (!next)
503 		return NULL;
504 	return container_of(next, struct extent_map, rb_node);
505 }
506 
507 static struct extent_map *prev_extent_map(struct extent_map *em)
508 {
509 	struct rb_node *prev;
510 
511 	prev = rb_prev(&em->rb_node);
512 	if (!prev)
513 		return NULL;
514 	return container_of(prev, struct extent_map, rb_node);
515 }
516 
517 /*
518  * Helper for btrfs_get_extent.  Given an existing extent in the tree,
519  * the existing extent is the nearest extent to map_start,
520  * and an extent that you want to insert, deal with overlap and insert
521  * the best fitted new extent into the tree.
522  */
523 static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
524 					 struct extent_map *existing,
525 					 struct extent_map *em,
526 					 u64 map_start)
527 {
528 	struct extent_map *prev;
529 	struct extent_map *next;
530 	u64 start;
531 	u64 end;
532 	u64 start_diff;
533 
534 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
535 
536 	if (existing->start > map_start) {
537 		next = existing;
538 		prev = prev_extent_map(next);
539 	} else {
540 		prev = existing;
541 		next = next_extent_map(prev);
542 	}
543 
544 	start = prev ? extent_map_end(prev) : em->start;
545 	start = max_t(u64, start, em->start);
546 	end = next ? next->start : extent_map_end(em);
547 	end = min_t(u64, end, extent_map_end(em));
548 	start_diff = start - em->start;
549 	em->start = start;
550 	em->len = end - start;
551 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
552 	    !extent_map_is_compressed(em)) {
553 		em->block_start += start_diff;
554 		em->block_len = em->len;
555 	}
556 	return add_extent_mapping(em_tree, em, 0);
557 }
558 
559 /*
560  * Add extent mapping into em_tree.
561  *
562  * @fs_info:  the filesystem
563  * @em_tree:  extent tree into which we want to insert the extent mapping
564  * @em_in:    extent we are inserting
565  * @start:    start of the logical range btrfs_get_extent() is requesting
566  * @len:      length of the logical range btrfs_get_extent() is requesting
567  *
568  * Note that @em_in's range may be different from [start, start+len),
569  * but they must be overlapped.
570  *
571  * Insert @em_in into @em_tree. In case there is an overlapping range, handle
572  * the -EEXIST by either:
573  * a) Returning the existing extent in @em_in if @start is within the
574  *    existing em.
575  * b) Merge the existing extent with @em_in passed in.
576  *
577  * Return 0 on success, otherwise -EEXIST.
578  *
579  */
580 int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
581 			     struct extent_map_tree *em_tree,
582 			     struct extent_map **em_in, u64 start, u64 len)
583 {
584 	int ret;
585 	struct extent_map *em = *em_in;
586 
587 	/*
588 	 * Tree-checker should have rejected any inline extent with non-zero
589 	 * file offset. Here just do a sanity check.
590 	 */
591 	if (em->block_start == EXTENT_MAP_INLINE)
592 		ASSERT(em->start == 0);
593 
594 	ret = add_extent_mapping(em_tree, em, 0);
595 	/* it is possible that someone inserted the extent into the tree
596 	 * while we had the lock dropped.  It is also possible that
597 	 * an overlapping map exists in the tree
598 	 */
599 	if (ret == -EEXIST) {
600 		struct extent_map *existing;
601 
602 		existing = search_extent_mapping(em_tree, start, len);
603 
604 		trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
605 
606 		/*
607 		 * existing will always be non-NULL, since there must be
608 		 * extent causing the -EEXIST.
609 		 */
610 		if (start >= existing->start &&
611 		    start < extent_map_end(existing)) {
612 			free_extent_map(em);
613 			*em_in = existing;
614 			ret = 0;
615 		} else {
616 			u64 orig_start = em->start;
617 			u64 orig_len = em->len;
618 
619 			/*
620 			 * The existing extent map is the one nearest to
621 			 * the [start, start + len) range which overlaps
622 			 */
623 			ret = merge_extent_mapping(em_tree, existing,
624 						   em, start);
625 			if (ret) {
626 				free_extent_map(em);
627 				*em_in = NULL;
628 				WARN_ONCE(ret,
629 "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
630 					  ret, existing->start, existing->len,
631 					  orig_start, orig_len);
632 			}
633 			free_extent_map(existing);
634 		}
635 	}
636 
637 	ASSERT(ret == 0 || ret == -EEXIST);
638 	return ret;
639 }
640 
641 /*
642  * Drop all extent maps from a tree in the fastest possible way, rescheduling
643  * if needed. This avoids searching the tree, from the root down to the first
644  * extent map, before each deletion.
645  */
646 static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
647 {
648 	write_lock(&tree->lock);
649 	while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
650 		struct extent_map *em;
651 		struct rb_node *node;
652 
653 		node = rb_first_cached(&tree->map);
654 		em = rb_entry(node, struct extent_map, rb_node);
655 		em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
656 		remove_extent_mapping(tree, em);
657 		free_extent_map(em);
658 		cond_resched_rwlock_write(&tree->lock);
659 	}
660 	write_unlock(&tree->lock);
661 }
662 
663 /*
664  * Drop all extent maps in a given range.
665  *
666  * @inode:       The target inode.
667  * @start:       Start offset of the range.
668  * @end:         End offset of the range (inclusive value).
669  * @skip_pinned: Indicate if pinned extent maps should be ignored or not.
670  *
671  * This drops all the extent maps that intersect the given range [@start, @end].
672  * Extent maps that partially overlap the range and extend behind or beyond it,
673  * are split.
674  * The caller should have locked an appropriate file range in the inode's io
675  * tree before calling this function.
676  */
677 void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
678 				 bool skip_pinned)
679 {
680 	struct extent_map *split;
681 	struct extent_map *split2;
682 	struct extent_map *em;
683 	struct extent_map_tree *em_tree = &inode->extent_tree;
684 	u64 len = end - start + 1;
685 
686 	WARN_ON(end < start);
687 	if (end == (u64)-1) {
688 		if (start == 0 && !skip_pinned) {
689 			drop_all_extent_maps_fast(em_tree);
690 			return;
691 		}
692 		len = (u64)-1;
693 	} else {
694 		/* Make end offset exclusive for use in the loop below. */
695 		end++;
696 	}
697 
698 	/*
699 	 * It's ok if we fail to allocate the extent maps, see the comment near
700 	 * the bottom of the loop below. We only need two spare extent maps in
701 	 * the worst case, where the first extent map that intersects our range
702 	 * starts before the range and the last extent map that intersects our
703 	 * range ends after our range (and they might be the same extent map),
704 	 * because we need to split those two extent maps at the boundaries.
705 	 */
706 	split = alloc_extent_map();
707 	split2 = alloc_extent_map();
708 
709 	write_lock(&em_tree->lock);
710 	em = lookup_extent_mapping(em_tree, start, len);
711 
712 	while (em) {
713 		/* extent_map_end() returns exclusive value (last byte + 1). */
714 		const u64 em_end = extent_map_end(em);
715 		struct extent_map *next_em = NULL;
716 		u64 gen;
717 		unsigned long flags;
718 		bool modified;
719 		bool compressed;
720 
721 		if (em_end < end) {
722 			next_em = next_extent_map(em);
723 			if (next_em) {
724 				if (next_em->start < end)
725 					refcount_inc(&next_em->refs);
726 				else
727 					next_em = NULL;
728 			}
729 		}
730 
731 		if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) {
732 			start = em_end;
733 			goto next;
734 		}
735 
736 		flags = em->flags;
737 		/*
738 		 * In case we split the extent map, we want to preserve the
739 		 * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
740 		 * it on the new extent maps.
741 		 */
742 		em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
743 		modified = !list_empty(&em->list);
744 
745 		/*
746 		 * The extent map does not cross our target range, so no need to
747 		 * split it, we can remove it directly.
748 		 */
749 		if (em->start >= start && em_end <= end)
750 			goto remove_em;
751 
752 		gen = em->generation;
753 		compressed = extent_map_is_compressed(em);
754 
755 		if (em->start < start) {
756 			if (!split) {
757 				split = split2;
758 				split2 = NULL;
759 				if (!split)
760 					goto remove_em;
761 			}
762 			split->start = em->start;
763 			split->len = start - em->start;
764 
765 			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
766 				split->orig_start = em->orig_start;
767 				split->block_start = em->block_start;
768 
769 				if (compressed)
770 					split->block_len = em->block_len;
771 				else
772 					split->block_len = split->len;
773 				split->orig_block_len = max(split->block_len,
774 						em->orig_block_len);
775 				split->ram_bytes = em->ram_bytes;
776 			} else {
777 				split->orig_start = split->start;
778 				split->block_len = 0;
779 				split->block_start = em->block_start;
780 				split->orig_block_len = 0;
781 				split->ram_bytes = split->len;
782 			}
783 
784 			split->generation = gen;
785 			split->flags = flags;
786 			replace_extent_mapping(em_tree, em, split, modified);
787 			free_extent_map(split);
788 			split = split2;
789 			split2 = NULL;
790 		}
791 		if (em_end > end) {
792 			if (!split) {
793 				split = split2;
794 				split2 = NULL;
795 				if (!split)
796 					goto remove_em;
797 			}
798 			split->start = end;
799 			split->len = em_end - end;
800 			split->block_start = em->block_start;
801 			split->flags = flags;
802 			split->generation = gen;
803 
804 			if (em->block_start < EXTENT_MAP_LAST_BYTE) {
805 				split->orig_block_len = max(em->block_len,
806 						    em->orig_block_len);
807 
808 				split->ram_bytes = em->ram_bytes;
809 				if (compressed) {
810 					split->block_len = em->block_len;
811 					split->orig_start = em->orig_start;
812 				} else {
813 					const u64 diff = start + len - em->start;
814 
815 					split->block_len = split->len;
816 					split->block_start += diff;
817 					split->orig_start = em->orig_start;
818 				}
819 			} else {
820 				split->ram_bytes = split->len;
821 				split->orig_start = split->start;
822 				split->block_len = 0;
823 				split->orig_block_len = 0;
824 			}
825 
826 			if (extent_map_in_tree(em)) {
827 				replace_extent_mapping(em_tree, em, split,
828 						       modified);
829 			} else {
830 				int ret;
831 
832 				ret = add_extent_mapping(em_tree, split,
833 							 modified);
834 				/* Logic error, shouldn't happen. */
835 				ASSERT(ret == 0);
836 				if (WARN_ON(ret != 0) && modified)
837 					btrfs_set_inode_full_sync(inode);
838 			}
839 			free_extent_map(split);
840 			split = NULL;
841 		}
842 remove_em:
843 		if (extent_map_in_tree(em)) {
844 			/*
845 			 * If the extent map is still in the tree it means that
846 			 * either of the following is true:
847 			 *
848 			 * 1) It fits entirely in our range (doesn't end beyond
849 			 *    it or starts before it);
850 			 *
851 			 * 2) It starts before our range and/or ends after our
852 			 *    range, and we were not able to allocate the extent
853 			 *    maps for split operations, @split and @split2.
854 			 *
855 			 * If we are at case 2) then we just remove the entire
856 			 * extent map - this is fine since if anyone needs it to
857 			 * access the subranges outside our range, will just
858 			 * load it again from the subvolume tree's file extent
859 			 * item. However if the extent map was in the list of
860 			 * modified extents, then we must mark the inode for a
861 			 * full fsync, otherwise a fast fsync will miss this
862 			 * extent if it's new and needs to be logged.
863 			 */
864 			if ((em->start < start || em_end > end) && modified) {
865 				ASSERT(!split);
866 				btrfs_set_inode_full_sync(inode);
867 			}
868 			remove_extent_mapping(em_tree, em);
869 		}
870 
871 		/*
872 		 * Once for the tree reference (we replaced or removed the
873 		 * extent map from the tree).
874 		 */
875 		free_extent_map(em);
876 next:
877 		/* Once for us (for our lookup reference). */
878 		free_extent_map(em);
879 
880 		em = next_em;
881 	}
882 
883 	write_unlock(&em_tree->lock);
884 
885 	free_extent_map(split);
886 	free_extent_map(split2);
887 }
888 
889 /*
890  * Replace a range in the inode's extent map tree with a new extent map.
891  *
892  * @inode:      The target inode.
893  * @new_em:     The new extent map to add to the inode's extent map tree.
894  * @modified:   Indicate if the new extent map should be added to the list of
895  *              modified extents (for fast fsync tracking).
896  *
897  * Drops all the extent maps in the inode's extent map tree that intersect the
898  * range of the new extent map and adds the new extent map to the tree.
899  * The caller should have locked an appropriate file range in the inode's io
900  * tree before calling this function.
901  */
902 int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
903 				   struct extent_map *new_em,
904 				   bool modified)
905 {
906 	const u64 end = new_em->start + new_em->len - 1;
907 	struct extent_map_tree *tree = &inode->extent_tree;
908 	int ret;
909 
910 	ASSERT(!extent_map_in_tree(new_em));
911 
912 	/*
913 	 * The caller has locked an appropriate file range in the inode's io
914 	 * tree, but getting -EEXIST when adding the new extent map can still
915 	 * happen in case there are extents that partially cover the range, and
916 	 * this is due to two tasks operating on different parts of the extent.
917 	 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from
918 	 * btrfs_get_extent") for an example and details.
919 	 */
920 	do {
921 		btrfs_drop_extent_map_range(inode, new_em->start, end, false);
922 		write_lock(&tree->lock);
923 		ret = add_extent_mapping(tree, new_em, modified);
924 		write_unlock(&tree->lock);
925 	} while (ret == -EEXIST);
926 
927 	return ret;
928 }
929 
930 /*
931  * Split off the first pre bytes from the extent_map at [start, start + len],
932  * and set the block_start for it to new_logical.
933  *
934  * This function is used when an ordered_extent needs to be split.
935  */
936 int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
937 		     u64 new_logical)
938 {
939 	struct extent_map_tree *em_tree = &inode->extent_tree;
940 	struct extent_map *em;
941 	struct extent_map *split_pre = NULL;
942 	struct extent_map *split_mid = NULL;
943 	int ret = 0;
944 	unsigned long flags;
945 
946 	ASSERT(pre != 0);
947 	ASSERT(pre < len);
948 
949 	split_pre = alloc_extent_map();
950 	if (!split_pre)
951 		return -ENOMEM;
952 	split_mid = alloc_extent_map();
953 	if (!split_mid) {
954 		ret = -ENOMEM;
955 		goto out_free_pre;
956 	}
957 
958 	lock_extent(&inode->io_tree, start, start + len - 1, NULL);
959 	write_lock(&em_tree->lock);
960 	em = lookup_extent_mapping(em_tree, start, len);
961 	if (!em) {
962 		ret = -EIO;
963 		goto out_unlock;
964 	}
965 
966 	ASSERT(em->len == len);
967 	ASSERT(!extent_map_is_compressed(em));
968 	ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
969 	ASSERT(em->flags & EXTENT_FLAG_PINNED);
970 	ASSERT(!(em->flags & EXTENT_FLAG_LOGGING));
971 	ASSERT(!list_empty(&em->list));
972 
973 	flags = em->flags;
974 	em->flags &= ~EXTENT_FLAG_PINNED;
975 
976 	/* First, replace the em with a new extent_map starting from * em->start */
977 	split_pre->start = em->start;
978 	split_pre->len = pre;
979 	split_pre->orig_start = split_pre->start;
980 	split_pre->block_start = new_logical;
981 	split_pre->block_len = split_pre->len;
982 	split_pre->orig_block_len = split_pre->block_len;
983 	split_pre->ram_bytes = split_pre->len;
984 	split_pre->flags = flags;
985 	split_pre->generation = em->generation;
986 
987 	replace_extent_mapping(em_tree, em, split_pre, 1);
988 
989 	/*
990 	 * Now we only have an extent_map at:
991 	 *     [em->start, em->start + pre]
992 	 */
993 
994 	/* Insert the middle extent_map. */
995 	split_mid->start = em->start + pre;
996 	split_mid->len = em->len - pre;
997 	split_mid->orig_start = split_mid->start;
998 	split_mid->block_start = em->block_start + pre;
999 	split_mid->block_len = split_mid->len;
1000 	split_mid->orig_block_len = split_mid->block_len;
1001 	split_mid->ram_bytes = split_mid->len;
1002 	split_mid->flags = flags;
1003 	split_mid->generation = em->generation;
1004 	add_extent_mapping(em_tree, split_mid, 1);
1005 
1006 	/* Once for us */
1007 	free_extent_map(em);
1008 	/* Once for the tree */
1009 	free_extent_map(em);
1010 
1011 out_unlock:
1012 	write_unlock(&em_tree->lock);
1013 	unlock_extent(&inode->io_tree, start, start + len - 1, NULL);
1014 	free_extent_map(split_mid);
1015 out_free_pre:
1016 	free_extent_map(split_pre);
1017 	return ret;
1018 }
1019