xref: /linux/fs/btrfs/defrag.c (revision 62597edf6340191511bdf9a7f64fa315ddc58805)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include "ctree.h"
8 #include "disk-io.h"
9 #include "transaction.h"
10 #include "locking.h"
11 #include "accessors.h"
12 #include "messages.h"
13 #include "delalloc-space.h"
14 #include "subpage.h"
15 #include "defrag.h"
16 #include "file-item.h"
17 #include "super.h"
18 
19 static struct kmem_cache *btrfs_inode_defrag_cachep;
20 
21 /*
22  * When auto defrag is enabled we queue up these defrag structs to remember
23  * which inodes need defragging passes.
24  */
25 struct inode_defrag {
26 	struct rb_node rb_node;
27 	/* Inode number */
28 	u64 ino;
29 	/*
30 	 * Transid where the defrag was added, we search for extents newer than
31 	 * this.
32 	 */
33 	u64 transid;
34 
35 	/* Root objectid */
36 	u64 root;
37 
38 	/*
39 	 * The extent size threshold for autodefrag.
40 	 *
41 	 * This value is different for compressed/non-compressed extents, thus
42 	 * needs to be passed from higher layer.
43 	 * (aka, inode_should_defrag())
44 	 */
45 	u32 extent_thresh;
46 };
47 
48 static int compare_inode_defrag(const struct inode_defrag *defrag1,
49 				const struct inode_defrag *defrag2)
50 {
51 	if (defrag1->root > defrag2->root)
52 		return 1;
53 	else if (defrag1->root < defrag2->root)
54 		return -1;
55 	else if (defrag1->ino > defrag2->ino)
56 		return 1;
57 	else if (defrag1->ino < defrag2->ino)
58 		return -1;
59 	else
60 		return 0;
61 }
62 
63 /*
64  * Insert a record for an inode into the defrag tree.  The lock must be held
65  * already.
66  *
67  * If you're inserting a record for an older transid than an existing record,
68  * the transid already in the tree is lowered.
69  */
70 static int btrfs_insert_inode_defrag(struct btrfs_inode *inode,
71 				     struct inode_defrag *defrag)
72 {
73 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
74 	struct inode_defrag *entry;
75 	struct rb_node **p;
76 	struct rb_node *parent = NULL;
77 	int ret;
78 
79 	p = &fs_info->defrag_inodes.rb_node;
80 	while (*p) {
81 		parent = *p;
82 		entry = rb_entry(parent, struct inode_defrag, rb_node);
83 
84 		ret = compare_inode_defrag(defrag, entry);
85 		if (ret < 0)
86 			p = &parent->rb_left;
87 		else if (ret > 0)
88 			p = &parent->rb_right;
89 		else {
90 			/*
91 			 * If we're reinserting an entry for an old defrag run,
92 			 * make sure to lower the transid of our existing
93 			 * record.
94 			 */
95 			if (defrag->transid < entry->transid)
96 				entry->transid = defrag->transid;
97 			entry->extent_thresh = min(defrag->extent_thresh,
98 						   entry->extent_thresh);
99 			return -EEXIST;
100 		}
101 	}
102 	set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
103 	rb_link_node(&defrag->rb_node, parent, p);
104 	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
105 	return 0;
106 }
107 
108 static inline int need_auto_defrag(struct btrfs_fs_info *fs_info)
109 {
110 	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
111 		return 0;
112 
113 	if (btrfs_fs_closing(fs_info))
114 		return 0;
115 
116 	return 1;
117 }
118 
119 /*
120  * Insert a defrag record for this inode if auto defrag is enabled. No errors
121  * returned as they're not considered fatal.
122  */
123 void btrfs_add_inode_defrag(struct btrfs_inode *inode, u32 extent_thresh)
124 {
125 	struct btrfs_root *root = inode->root;
126 	struct btrfs_fs_info *fs_info = root->fs_info;
127 	struct inode_defrag *defrag;
128 	int ret;
129 
130 	if (!need_auto_defrag(fs_info))
131 		return;
132 
133 	if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
134 		return;
135 
136 	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
137 	if (!defrag)
138 		return;
139 
140 	defrag->ino = btrfs_ino(inode);
141 	defrag->transid = btrfs_get_root_last_trans(root);
142 	defrag->root = btrfs_root_id(root);
143 	defrag->extent_thresh = extent_thresh;
144 
145 	spin_lock(&fs_info->defrag_inodes_lock);
146 	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
147 		/*
148 		 * If we set IN_DEFRAG flag and evict the inode from memory,
149 		 * and then re-read this inode, this new inode doesn't have
150 		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
151 		 */
152 		ret = btrfs_insert_inode_defrag(inode, defrag);
153 		if (ret)
154 			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
155 	} else {
156 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
157 	}
158 	spin_unlock(&fs_info->defrag_inodes_lock);
159 }
160 
161 /*
162  * Pick the defragable inode that we want, if it doesn't exist, we will get the
163  * next one.
164  */
165 static struct inode_defrag *btrfs_pick_defrag_inode(
166 			struct btrfs_fs_info *fs_info, u64 root, u64 ino)
167 {
168 	struct inode_defrag *entry = NULL;
169 	struct inode_defrag tmp;
170 	struct rb_node *p;
171 	struct rb_node *parent = NULL;
172 	int ret;
173 
174 	tmp.ino = ino;
175 	tmp.root = root;
176 
177 	spin_lock(&fs_info->defrag_inodes_lock);
178 	p = fs_info->defrag_inodes.rb_node;
179 	while (p) {
180 		parent = p;
181 		entry = rb_entry(parent, struct inode_defrag, rb_node);
182 
183 		ret = compare_inode_defrag(&tmp, entry);
184 		if (ret < 0)
185 			p = parent->rb_left;
186 		else if (ret > 0)
187 			p = parent->rb_right;
188 		else
189 			goto out;
190 	}
191 
192 	if (parent && compare_inode_defrag(&tmp, entry) > 0) {
193 		parent = rb_next(parent);
194 		if (parent)
195 			entry = rb_entry(parent, struct inode_defrag, rb_node);
196 		else
197 			entry = NULL;
198 	}
199 out:
200 	if (entry)
201 		rb_erase(parent, &fs_info->defrag_inodes);
202 	spin_unlock(&fs_info->defrag_inodes_lock);
203 	return entry;
204 }
205 
206 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
207 {
208 	struct inode_defrag *defrag, *next;
209 
210 	spin_lock(&fs_info->defrag_inodes_lock);
211 
212 	rbtree_postorder_for_each_entry_safe(defrag, next,
213 					     &fs_info->defrag_inodes, rb_node)
214 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
215 
216 	spin_unlock(&fs_info->defrag_inodes_lock);
217 }
218 
219 #define BTRFS_DEFRAG_BATCH	1024
220 
221 static int btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
222 				  struct inode_defrag *defrag,
223 				  struct file_ra_state *ra)
224 {
225 	struct btrfs_root *inode_root;
226 	struct inode *inode;
227 	struct btrfs_ioctl_defrag_range_args range;
228 	int ret = 0;
229 	u64 cur = 0;
230 
231 again:
232 	if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
233 		goto cleanup;
234 	if (!need_auto_defrag(fs_info))
235 		goto cleanup;
236 
237 	/* Get the inode */
238 	inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
239 	if (IS_ERR(inode_root)) {
240 		ret = PTR_ERR(inode_root);
241 		goto cleanup;
242 	}
243 
244 	inode = btrfs_iget(defrag->ino, inode_root);
245 	btrfs_put_root(inode_root);
246 	if (IS_ERR(inode)) {
247 		ret = PTR_ERR(inode);
248 		goto cleanup;
249 	}
250 
251 	if (cur >= i_size_read(inode)) {
252 		iput(inode);
253 		goto cleanup;
254 	}
255 
256 	/* Do a chunk of defrag */
257 	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
258 	memset(&range, 0, sizeof(range));
259 	range.len = (u64)-1;
260 	range.start = cur;
261 	range.extent_thresh = defrag->extent_thresh;
262 	file_ra_state_init(ra, inode->i_mapping);
263 
264 	sb_start_write(fs_info->sb);
265 	ret = btrfs_defrag_file(inode, ra, &range, defrag->transid,
266 				       BTRFS_DEFRAG_BATCH);
267 	sb_end_write(fs_info->sb);
268 	iput(inode);
269 
270 	if (ret < 0)
271 		goto cleanup;
272 
273 	cur = max(cur + fs_info->sectorsize, range.start);
274 	goto again;
275 
276 cleanup:
277 	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
278 	return ret;
279 }
280 
281 /*
282  * Run through the list of inodes in the FS that need defragging.
283  */
284 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
285 {
286 	struct inode_defrag *defrag;
287 	u64 first_ino = 0;
288 	u64 root_objectid = 0;
289 
290 	atomic_inc(&fs_info->defrag_running);
291 	while (1) {
292 		struct file_ra_state ra = { 0 };
293 
294 		/* Pause the auto defragger. */
295 		if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
296 			break;
297 
298 		if (!need_auto_defrag(fs_info))
299 			break;
300 
301 		/* find an inode to defrag */
302 		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, first_ino);
303 		if (!defrag) {
304 			if (root_objectid || first_ino) {
305 				root_objectid = 0;
306 				first_ino = 0;
307 				continue;
308 			} else {
309 				break;
310 			}
311 		}
312 
313 		first_ino = defrag->ino + 1;
314 		root_objectid = defrag->root;
315 
316 		btrfs_run_defrag_inode(fs_info, defrag, &ra);
317 	}
318 	atomic_dec(&fs_info->defrag_running);
319 
320 	/*
321 	 * During unmount, we use the transaction_wait queue to wait for the
322 	 * defragger to stop.
323 	 */
324 	wake_up(&fs_info->transaction_wait);
325 	return 0;
326 }
327 
328 /*
329  * Check if two blocks addresses are close, used by defrag.
330  */
331 static bool close_blocks(u64 blocknr, u64 other, u32 blocksize)
332 {
333 	if (blocknr < other && other - (blocknr + blocksize) < SZ_32K)
334 		return true;
335 	if (blocknr > other && blocknr - (other + blocksize) < SZ_32K)
336 		return true;
337 	return false;
338 }
339 
340 /*
341  * Go through all the leaves pointed to by a node and reallocate them so that
342  * disk order is close to key order.
343  */
344 static int btrfs_realloc_node(struct btrfs_trans_handle *trans,
345 			      struct btrfs_root *root,
346 			      struct extent_buffer *parent,
347 			      int start_slot, u64 *last_ret,
348 			      struct btrfs_key *progress)
349 {
350 	struct btrfs_fs_info *fs_info = root->fs_info;
351 	const u32 blocksize = fs_info->nodesize;
352 	const int end_slot = btrfs_header_nritems(parent) - 1;
353 	u64 search_start = *last_ret;
354 	u64 last_block = 0;
355 	int ret = 0;
356 	bool progress_passed = false;
357 
358 	/*
359 	 * COWing must happen through a running transaction, which always
360 	 * matches the current fs generation (it's a transaction with a state
361 	 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
362 	 * into error state to prevent the commit of any transaction.
363 	 */
364 	if (unlikely(trans->transaction != fs_info->running_transaction ||
365 		     trans->transid != fs_info->generation)) {
366 		btrfs_abort_transaction(trans, -EUCLEAN);
367 		btrfs_crit(fs_info,
368 "unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
369 			   parent->start, btrfs_root_id(root), trans->transid,
370 			   fs_info->running_transaction->transid,
371 			   fs_info->generation);
372 		return -EUCLEAN;
373 	}
374 
375 	if (btrfs_header_nritems(parent) <= 1)
376 		return 0;
377 
378 	for (int i = start_slot; i <= end_slot; i++) {
379 		struct extent_buffer *cur;
380 		struct btrfs_disk_key disk_key;
381 		u64 blocknr;
382 		u64 other;
383 		bool close = true;
384 
385 		btrfs_node_key(parent, &disk_key, i);
386 		if (!progress_passed && btrfs_comp_keys(&disk_key, progress) < 0)
387 			continue;
388 
389 		progress_passed = true;
390 		blocknr = btrfs_node_blockptr(parent, i);
391 		if (last_block == 0)
392 			last_block = blocknr;
393 
394 		if (i > 0) {
395 			other = btrfs_node_blockptr(parent, i - 1);
396 			close = close_blocks(blocknr, other, blocksize);
397 		}
398 		if (!close && i < end_slot) {
399 			other = btrfs_node_blockptr(parent, i + 1);
400 			close = close_blocks(blocknr, other, blocksize);
401 		}
402 		if (close) {
403 			last_block = blocknr;
404 			continue;
405 		}
406 
407 		cur = btrfs_read_node_slot(parent, i);
408 		if (IS_ERR(cur))
409 			return PTR_ERR(cur);
410 		if (search_start == 0)
411 			search_start = last_block;
412 
413 		btrfs_tree_lock(cur);
414 		ret = btrfs_force_cow_block(trans, root, cur, parent, i,
415 					    &cur, search_start,
416 					    min(16 * blocksize,
417 						(end_slot - i) * blocksize),
418 					    BTRFS_NESTING_COW);
419 		if (ret) {
420 			btrfs_tree_unlock(cur);
421 			free_extent_buffer(cur);
422 			break;
423 		}
424 		search_start = cur->start;
425 		last_block = cur->start;
426 		*last_ret = search_start;
427 		btrfs_tree_unlock(cur);
428 		free_extent_buffer(cur);
429 	}
430 	return ret;
431 }
432 
433 /*
434  * Defrag all the leaves in a given btree.
435  * Read all the leaves and try to get key order to
436  * better reflect disk order
437  */
438 
439 static int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
440 			       struct btrfs_root *root)
441 {
442 	struct btrfs_path *path = NULL;
443 	struct btrfs_key key;
444 	int ret = 0;
445 	int wret;
446 	int level;
447 	int next_key_ret = 0;
448 	u64 last_ret = 0;
449 
450 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
451 		goto out;
452 
453 	path = btrfs_alloc_path();
454 	if (!path) {
455 		ret = -ENOMEM;
456 		goto out;
457 	}
458 
459 	level = btrfs_header_level(root->node);
460 
461 	if (level == 0)
462 		goto out;
463 
464 	if (root->defrag_progress.objectid == 0) {
465 		struct extent_buffer *root_node;
466 		u32 nritems;
467 
468 		root_node = btrfs_lock_root_node(root);
469 		nritems = btrfs_header_nritems(root_node);
470 		root->defrag_max.objectid = 0;
471 		/* from above we know this is not a leaf */
472 		btrfs_node_key_to_cpu(root_node, &root->defrag_max,
473 				      nritems - 1);
474 		btrfs_tree_unlock(root_node);
475 		free_extent_buffer(root_node);
476 		memset(&key, 0, sizeof(key));
477 	} else {
478 		memcpy(&key, &root->defrag_progress, sizeof(key));
479 	}
480 
481 	path->keep_locks = 1;
482 
483 	ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
484 	if (ret < 0)
485 		goto out;
486 	if (ret > 0) {
487 		ret = 0;
488 		goto out;
489 	}
490 	btrfs_release_path(path);
491 	/*
492 	 * We don't need a lock on a leaf. btrfs_realloc_node() will lock all
493 	 * leafs from path->nodes[1], so set lowest_level to 1 to avoid later
494 	 * a deadlock (attempting to write lock an already write locked leaf).
495 	 */
496 	path->lowest_level = 1;
497 	wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
498 
499 	if (wret < 0) {
500 		ret = wret;
501 		goto out;
502 	}
503 	if (!path->nodes[1]) {
504 		ret = 0;
505 		goto out;
506 	}
507 	/*
508 	 * The node at level 1 must always be locked when our path has
509 	 * keep_locks set and lowest_level is 1, regardless of the value of
510 	 * path->slots[1].
511 	 */
512 	ASSERT(path->locks[1] != 0);
513 	ret = btrfs_realloc_node(trans, root,
514 				 path->nodes[1], 0,
515 				 &last_ret,
516 				 &root->defrag_progress);
517 	if (ret) {
518 		WARN_ON(ret == -EAGAIN);
519 		goto out;
520 	}
521 	/*
522 	 * Now that we reallocated the node we can find the next key. Note that
523 	 * btrfs_find_next_key() can release our path and do another search
524 	 * without COWing, this is because even with path->keep_locks = 1,
525 	 * btrfs_search_slot() / ctree.c:unlock_up() does not keeps a lock on a
526 	 * node when path->slots[node_level - 1] does not point to the last
527 	 * item or a slot beyond the last item (ctree.c:unlock_up()). Therefore
528 	 * we search for the next key after reallocating our node.
529 	 */
530 	path->slots[1] = btrfs_header_nritems(path->nodes[1]);
531 	next_key_ret = btrfs_find_next_key(root, path, &key, 1,
532 					   BTRFS_OLDEST_GENERATION);
533 	if (next_key_ret == 0) {
534 		memcpy(&root->defrag_progress, &key, sizeof(key));
535 		ret = -EAGAIN;
536 	}
537 out:
538 	btrfs_free_path(path);
539 	if (ret == -EAGAIN) {
540 		if (root->defrag_max.objectid > root->defrag_progress.objectid)
541 			goto done;
542 		if (root->defrag_max.type > root->defrag_progress.type)
543 			goto done;
544 		if (root->defrag_max.offset > root->defrag_progress.offset)
545 			goto done;
546 		ret = 0;
547 	}
548 done:
549 	if (ret != -EAGAIN)
550 		memset(&root->defrag_progress, 0,
551 		       sizeof(root->defrag_progress));
552 
553 	return ret;
554 }
555 
556 /*
557  * Defrag a given btree.  Every leaf in the btree is read and defragmented.
558  */
559 int btrfs_defrag_root(struct btrfs_root *root)
560 {
561 	struct btrfs_fs_info *fs_info = root->fs_info;
562 	int ret;
563 
564 	if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
565 		return 0;
566 
567 	while (1) {
568 		struct btrfs_trans_handle *trans;
569 
570 		trans = btrfs_start_transaction(root, 0);
571 		if (IS_ERR(trans)) {
572 			ret = PTR_ERR(trans);
573 			break;
574 		}
575 
576 		ret = btrfs_defrag_leaves(trans, root);
577 
578 		btrfs_end_transaction(trans);
579 		btrfs_btree_balance_dirty(fs_info);
580 		cond_resched();
581 
582 		if (btrfs_fs_closing(fs_info) || ret != -EAGAIN)
583 			break;
584 
585 		if (btrfs_defrag_cancelled(fs_info)) {
586 			btrfs_debug(fs_info, "defrag_root cancelled");
587 			ret = -EAGAIN;
588 			break;
589 		}
590 	}
591 	clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
592 	return ret;
593 }
594 
595 /*
596  * Defrag specific helper to get an extent map.
597  *
598  * Differences between this and btrfs_get_extent() are:
599  *
600  * - No extent_map will be added to inode->extent_tree
601  *   To reduce memory usage in the long run.
602  *
603  * - Extra optimization to skip file extents older than @newer_than
604  *   By using btrfs_search_forward() we can skip entire file ranges that
605  *   have extents created in past transactions, because btrfs_search_forward()
606  *   will not visit leaves and nodes with a generation smaller than given
607  *   minimal generation threshold (@newer_than).
608  *
609  * Return valid em if we find a file extent matching the requirement.
610  * Return NULL if we can not find a file extent matching the requirement.
611  *
612  * Return ERR_PTR() for error.
613  */
614 static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
615 					    u64 start, u64 newer_than)
616 {
617 	struct btrfs_root *root = inode->root;
618 	struct btrfs_file_extent_item *fi;
619 	struct btrfs_path path = { 0 };
620 	struct extent_map *em;
621 	struct btrfs_key key;
622 	u64 ino = btrfs_ino(inode);
623 	int ret;
624 
625 	em = alloc_extent_map();
626 	if (!em) {
627 		ret = -ENOMEM;
628 		goto err;
629 	}
630 
631 	key.objectid = ino;
632 	key.type = BTRFS_EXTENT_DATA_KEY;
633 	key.offset = start;
634 
635 	if (newer_than) {
636 		ret = btrfs_search_forward(root, &key, &path, newer_than);
637 		if (ret < 0)
638 			goto err;
639 		/* Can't find anything newer */
640 		if (ret > 0)
641 			goto not_found;
642 	} else {
643 		ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
644 		if (ret < 0)
645 			goto err;
646 	}
647 	if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
648 		/*
649 		 * If btrfs_search_slot() makes path to point beyond nritems,
650 		 * we should not have an empty leaf, as this inode must at
651 		 * least have its INODE_ITEM.
652 		 */
653 		ASSERT(btrfs_header_nritems(path.nodes[0]));
654 		path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1;
655 	}
656 	btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
657 	/* Perfect match, no need to go one slot back */
658 	if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY &&
659 	    key.offset == start)
660 		goto iterate;
661 
662 	/* We didn't find a perfect match, needs to go one slot back */
663 	if (path.slots[0] > 0) {
664 		btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
665 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
666 			path.slots[0]--;
667 	}
668 
669 iterate:
670 	/* Iterate through the path to find a file extent covering @start */
671 	while (true) {
672 		u64 extent_end;
673 
674 		if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
675 			goto next;
676 
677 		btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
678 
679 		/*
680 		 * We may go one slot back to INODE_REF/XATTR item, then
681 		 * need to go forward until we reach an EXTENT_DATA.
682 		 * But we should still has the correct ino as key.objectid.
683 		 */
684 		if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY)
685 			goto next;
686 
687 		/* It's beyond our target range, definitely not extent found */
688 		if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY)
689 			goto not_found;
690 
691 		/*
692 		 *	|	|<- File extent ->|
693 		 *	\- start
694 		 *
695 		 * This means there is a hole between start and key.offset.
696 		 */
697 		if (key.offset > start) {
698 			em->start = start;
699 			em->disk_bytenr = EXTENT_MAP_HOLE;
700 			em->disk_num_bytes = 0;
701 			em->ram_bytes = 0;
702 			em->offset = 0;
703 			em->len = key.offset - start;
704 			break;
705 		}
706 
707 		fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
708 				    struct btrfs_file_extent_item);
709 		extent_end = btrfs_file_extent_end(&path);
710 
711 		/*
712 		 *	|<- file extent ->|	|
713 		 *				\- start
714 		 *
715 		 * We haven't reached start, search next slot.
716 		 */
717 		if (extent_end <= start)
718 			goto next;
719 
720 		/* Now this extent covers @start, convert it to em */
721 		btrfs_extent_item_to_extent_map(inode, &path, fi, em);
722 		break;
723 next:
724 		ret = btrfs_next_item(root, &path);
725 		if (ret < 0)
726 			goto err;
727 		if (ret > 0)
728 			goto not_found;
729 	}
730 	btrfs_release_path(&path);
731 	return em;
732 
733 not_found:
734 	btrfs_release_path(&path);
735 	free_extent_map(em);
736 	return NULL;
737 
738 err:
739 	btrfs_release_path(&path);
740 	free_extent_map(em);
741 	return ERR_PTR(ret);
742 }
743 
744 static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
745 					       u64 newer_than, bool locked)
746 {
747 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
748 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
749 	struct extent_map *em;
750 	const u32 sectorsize = BTRFS_I(inode)->root->fs_info->sectorsize;
751 
752 	/*
753 	 * Hopefully we have this extent in the tree already, try without the
754 	 * full extent lock.
755 	 */
756 	read_lock(&em_tree->lock);
757 	em = lookup_extent_mapping(em_tree, start, sectorsize);
758 	read_unlock(&em_tree->lock);
759 
760 	/*
761 	 * We can get a merged extent, in that case, we need to re-search
762 	 * tree to get the original em for defrag.
763 	 *
764 	 * If @newer_than is 0 or em::generation < newer_than, we can trust
765 	 * this em, as either we don't care about the generation, or the
766 	 * merged extent map will be rejected anyway.
767 	 */
768 	if (em && (em->flags & EXTENT_FLAG_MERGED) &&
769 	    newer_than && em->generation >= newer_than) {
770 		free_extent_map(em);
771 		em = NULL;
772 	}
773 
774 	if (!em) {
775 		struct extent_state *cached = NULL;
776 		u64 end = start + sectorsize - 1;
777 
778 		/* Get the big lock and read metadata off disk. */
779 		if (!locked)
780 			lock_extent(io_tree, start, end, &cached);
781 		em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
782 		if (!locked)
783 			unlock_extent(io_tree, start, end, &cached);
784 
785 		if (IS_ERR(em))
786 			return NULL;
787 	}
788 
789 	return em;
790 }
791 
792 static u32 get_extent_max_capacity(const struct btrfs_fs_info *fs_info,
793 				   const struct extent_map *em)
794 {
795 	if (extent_map_is_compressed(em))
796 		return BTRFS_MAX_COMPRESSED;
797 	return fs_info->max_extent_size;
798 }
799 
800 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
801 				     u32 extent_thresh, u64 newer_than, bool locked)
802 {
803 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
804 	struct extent_map *next;
805 	bool ret = false;
806 
807 	/* This is the last extent */
808 	if (em->start + em->len >= i_size_read(inode))
809 		return false;
810 
811 	/*
812 	 * Here we need to pass @newer_then when checking the next extent, or
813 	 * we will hit a case we mark current extent for defrag, but the next
814 	 * one will not be a target.
815 	 * This will just cause extra IO without really reducing the fragments.
816 	 */
817 	next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
818 	/* No more em or hole */
819 	if (!next || next->disk_bytenr >= EXTENT_MAP_LAST_BYTE)
820 		goto out;
821 	if (next->flags & EXTENT_FLAG_PREALLOC)
822 		goto out;
823 	/*
824 	 * If the next extent is at its max capacity, defragging current extent
825 	 * makes no sense, as the total number of extents won't change.
826 	 */
827 	if (next->len >= get_extent_max_capacity(fs_info, em))
828 		goto out;
829 	/* Skip older extent */
830 	if (next->generation < newer_than)
831 		goto out;
832 	/* Also check extent size */
833 	if (next->len >= extent_thresh)
834 		goto out;
835 
836 	ret = true;
837 out:
838 	free_extent_map(next);
839 	return ret;
840 }
841 
842 /*
843  * Prepare one page to be defragged.
844  *
845  * This will ensure:
846  *
847  * - Returned page is locked and has been set up properly.
848  * - No ordered extent exists in the page.
849  * - The page is uptodate.
850  *
851  * NOTE: Caller should also wait for page writeback after the cluster is
852  * prepared, here we don't do writeback wait for each page.
853  */
854 static struct folio *defrag_prepare_one_folio(struct btrfs_inode *inode, pgoff_t index)
855 {
856 	struct address_space *mapping = inode->vfs_inode.i_mapping;
857 	gfp_t mask = btrfs_alloc_write_mask(mapping);
858 	u64 page_start = (u64)index << PAGE_SHIFT;
859 	u64 page_end = page_start + PAGE_SIZE - 1;
860 	struct extent_state *cached_state = NULL;
861 	struct folio *folio;
862 	int ret;
863 
864 again:
865 	folio = __filemap_get_folio(mapping, index,
866 				    FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
867 	if (IS_ERR(folio))
868 		return folio;
869 
870 	/*
871 	 * Since we can defragment files opened read-only, we can encounter
872 	 * transparent huge pages here (see CONFIG_READ_ONLY_THP_FOR_FS). We
873 	 * can't do I/O using huge pages yet, so return an error for now.
874 	 * Filesystem transparent huge pages are typically only used for
875 	 * executables that explicitly enable them, so this isn't very
876 	 * restrictive.
877 	 */
878 	if (folio_test_large(folio)) {
879 		folio_unlock(folio);
880 		folio_put(folio);
881 		return ERR_PTR(-ETXTBSY);
882 	}
883 
884 	ret = set_folio_extent_mapped(folio);
885 	if (ret < 0) {
886 		folio_unlock(folio);
887 		folio_put(folio);
888 		return ERR_PTR(ret);
889 	}
890 
891 	/* Wait for any existing ordered extent in the range */
892 	while (1) {
893 		struct btrfs_ordered_extent *ordered;
894 
895 		lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
896 		ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
897 		unlock_extent(&inode->io_tree, page_start, page_end,
898 			      &cached_state);
899 		if (!ordered)
900 			break;
901 
902 		folio_unlock(folio);
903 		btrfs_start_ordered_extent(ordered);
904 		btrfs_put_ordered_extent(ordered);
905 		folio_lock(folio);
906 		/*
907 		 * We unlocked the folio above, so we need check if it was
908 		 * released or not.
909 		 */
910 		if (folio->mapping != mapping || !folio->private) {
911 			folio_unlock(folio);
912 			folio_put(folio);
913 			goto again;
914 		}
915 	}
916 
917 	/*
918 	 * Now the page range has no ordered extent any more.  Read the page to
919 	 * make it uptodate.
920 	 */
921 	if (!folio_test_uptodate(folio)) {
922 		btrfs_read_folio(NULL, folio);
923 		folio_lock(folio);
924 		if (folio->mapping != mapping || !folio->private) {
925 			folio_unlock(folio);
926 			folio_put(folio);
927 			goto again;
928 		}
929 		if (!folio_test_uptodate(folio)) {
930 			folio_unlock(folio);
931 			folio_put(folio);
932 			return ERR_PTR(-EIO);
933 		}
934 	}
935 	return folio;
936 }
937 
938 struct defrag_target_range {
939 	struct list_head list;
940 	u64 start;
941 	u64 len;
942 };
943 
944 /*
945  * Collect all valid target extents.
946  *
947  * @start:	   file offset to lookup
948  * @len:	   length to lookup
949  * @extent_thresh: file extent size threshold, any extent size >= this value
950  *		   will be ignored
951  * @newer_than:    only defrag extents newer than this value
952  * @do_compress:   whether the defrag is doing compression
953  *		   if true, @extent_thresh will be ignored and all regular
954  *		   file extents meeting @newer_than will be targets.
955  * @locked:	   if the range has already held extent lock
956  * @target_list:   list of targets file extents
957  */
958 static int defrag_collect_targets(struct btrfs_inode *inode,
959 				  u64 start, u64 len, u32 extent_thresh,
960 				  u64 newer_than, bool do_compress,
961 				  bool locked, struct list_head *target_list,
962 				  u64 *last_scanned_ret)
963 {
964 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
965 	bool last_is_target = false;
966 	u64 cur = start;
967 	int ret = 0;
968 
969 	while (cur < start + len) {
970 		struct extent_map *em;
971 		struct defrag_target_range *new;
972 		bool next_mergeable = true;
973 		u64 range_len;
974 
975 		last_is_target = false;
976 		em = defrag_lookup_extent(&inode->vfs_inode, cur, newer_than, locked);
977 		if (!em)
978 			break;
979 
980 		/*
981 		 * If the file extent is an inlined one, we may still want to
982 		 * defrag it (fallthrough) if it will cause a regular extent.
983 		 * This is for users who want to convert inline extents to
984 		 * regular ones through max_inline= mount option.
985 		 */
986 		if (em->disk_bytenr == EXTENT_MAP_INLINE &&
987 		    em->len <= inode->root->fs_info->max_inline)
988 			goto next;
989 
990 		/* Skip holes and preallocated extents. */
991 		if (em->disk_bytenr == EXTENT_MAP_HOLE ||
992 		    (em->flags & EXTENT_FLAG_PREALLOC))
993 			goto next;
994 
995 		/* Skip older extent */
996 		if (em->generation < newer_than)
997 			goto next;
998 
999 		/* This em is under writeback, no need to defrag */
1000 		if (em->generation == (u64)-1)
1001 			goto next;
1002 
1003 		/*
1004 		 * Our start offset might be in the middle of an existing extent
1005 		 * map, so take that into account.
1006 		 */
1007 		range_len = em->len - (cur - em->start);
1008 		/*
1009 		 * If this range of the extent map is already flagged for delalloc,
1010 		 * skip it, because:
1011 		 *
1012 		 * 1) We could deadlock later, when trying to reserve space for
1013 		 *    delalloc, because in case we can't immediately reserve space
1014 		 *    the flusher can start delalloc and wait for the respective
1015 		 *    ordered extents to complete. The deadlock would happen
1016 		 *    because we do the space reservation while holding the range
1017 		 *    locked, and starting writeback, or finishing an ordered
1018 		 *    extent, requires locking the range;
1019 		 *
1020 		 * 2) If there's delalloc there, it means there's dirty pages for
1021 		 *    which writeback has not started yet (we clean the delalloc
1022 		 *    flag when starting writeback and after creating an ordered
1023 		 *    extent). If we mark pages in an adjacent range for defrag,
1024 		 *    then we will have a larger contiguous range for delalloc,
1025 		 *    very likely resulting in a larger extent after writeback is
1026 		 *    triggered (except in a case of free space fragmentation).
1027 		 */
1028 		if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
1029 					  EXTENT_DELALLOC))
1030 			goto next;
1031 
1032 		/*
1033 		 * For do_compress case, we want to compress all valid file
1034 		 * extents, thus no @extent_thresh or mergeable check.
1035 		 */
1036 		if (do_compress)
1037 			goto add;
1038 
1039 		/* Skip too large extent */
1040 		if (em->len >= extent_thresh)
1041 			goto next;
1042 
1043 		/*
1044 		 * Skip extents already at its max capacity, this is mostly for
1045 		 * compressed extents, which max cap is only 128K.
1046 		 */
1047 		if (em->len >= get_extent_max_capacity(fs_info, em))
1048 			goto next;
1049 
1050 		/*
1051 		 * Normally there are no more extents after an inline one, thus
1052 		 * @next_mergeable will normally be false and not defragged.
1053 		 * So if an inline extent passed all above checks, just add it
1054 		 * for defrag, and be converted to regular extents.
1055 		 */
1056 		if (em->disk_bytenr == EXTENT_MAP_INLINE)
1057 			goto add;
1058 
1059 		next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
1060 						extent_thresh, newer_than, locked);
1061 		if (!next_mergeable) {
1062 			struct defrag_target_range *last;
1063 
1064 			/* Empty target list, no way to merge with last entry */
1065 			if (list_empty(target_list))
1066 				goto next;
1067 			last = list_entry(target_list->prev,
1068 					  struct defrag_target_range, list);
1069 			/* Not mergeable with last entry */
1070 			if (last->start + last->len != cur)
1071 				goto next;
1072 
1073 			/* Mergeable, fall through to add it to @target_list. */
1074 		}
1075 
1076 add:
1077 		last_is_target = true;
1078 		range_len = min(extent_map_end(em), start + len) - cur;
1079 		/*
1080 		 * This one is a good target, check if it can be merged into
1081 		 * last range of the target list.
1082 		 */
1083 		if (!list_empty(target_list)) {
1084 			struct defrag_target_range *last;
1085 
1086 			last = list_entry(target_list->prev,
1087 					  struct defrag_target_range, list);
1088 			ASSERT(last->start + last->len <= cur);
1089 			if (last->start + last->len == cur) {
1090 				/* Mergeable, enlarge the last entry */
1091 				last->len += range_len;
1092 				goto next;
1093 			}
1094 			/* Fall through to allocate a new entry */
1095 		}
1096 
1097 		/* Allocate new defrag_target_range */
1098 		new = kmalloc(sizeof(*new), GFP_NOFS);
1099 		if (!new) {
1100 			free_extent_map(em);
1101 			ret = -ENOMEM;
1102 			break;
1103 		}
1104 		new->start = cur;
1105 		new->len = range_len;
1106 		list_add_tail(&new->list, target_list);
1107 
1108 next:
1109 		cur = extent_map_end(em);
1110 		free_extent_map(em);
1111 	}
1112 	if (ret < 0) {
1113 		struct defrag_target_range *entry;
1114 		struct defrag_target_range *tmp;
1115 
1116 		list_for_each_entry_safe(entry, tmp, target_list, list) {
1117 			list_del_init(&entry->list);
1118 			kfree(entry);
1119 		}
1120 	}
1121 	if (!ret && last_scanned_ret) {
1122 		/*
1123 		 * If the last extent is not a target, the caller can skip to
1124 		 * the end of that extent.
1125 		 * Otherwise, we can only go the end of the specified range.
1126 		 */
1127 		if (!last_is_target)
1128 			*last_scanned_ret = max(cur, *last_scanned_ret);
1129 		else
1130 			*last_scanned_ret = max(start + len, *last_scanned_ret);
1131 	}
1132 	return ret;
1133 }
1134 
1135 #define CLUSTER_SIZE	(SZ_256K)
1136 static_assert(PAGE_ALIGNED(CLUSTER_SIZE));
1137 
1138 /*
1139  * Defrag one contiguous target range.
1140  *
1141  * @inode:	target inode
1142  * @target:	target range to defrag
1143  * @pages:	locked pages covering the defrag range
1144  * @nr_pages:	number of locked pages
1145  *
1146  * Caller should ensure:
1147  *
1148  * - Pages are prepared
1149  *   Pages should be locked, no ordered extent in the pages range,
1150  *   no writeback.
1151  *
1152  * - Extent bits are locked
1153  */
1154 static int defrag_one_locked_target(struct btrfs_inode *inode,
1155 				    struct defrag_target_range *target,
1156 				    struct folio **folios, int nr_pages,
1157 				    struct extent_state **cached_state)
1158 {
1159 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1160 	struct extent_changeset *data_reserved = NULL;
1161 	const u64 start = target->start;
1162 	const u64 len = target->len;
1163 	unsigned long last_index = (start + len - 1) >> PAGE_SHIFT;
1164 	unsigned long start_index = start >> PAGE_SHIFT;
1165 	unsigned long first_index = folios[0]->index;
1166 	int ret = 0;
1167 	int i;
1168 
1169 	ASSERT(last_index - first_index + 1 <= nr_pages);
1170 
1171 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start, len);
1172 	if (ret < 0)
1173 		return ret;
1174 	clear_extent_bit(&inode->io_tree, start, start + len - 1,
1175 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
1176 			 EXTENT_DEFRAG, cached_state);
1177 	set_extent_bit(&inode->io_tree, start, start + len - 1,
1178 		       EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state);
1179 
1180 	/* Update the page status */
1181 	for (i = start_index - first_index; i <= last_index - first_index; i++) {
1182 		folio_clear_checked(folios[i]);
1183 		btrfs_folio_clamp_set_dirty(fs_info, folios[i], start, len);
1184 	}
1185 	btrfs_delalloc_release_extents(inode, len);
1186 	extent_changeset_free(data_reserved);
1187 
1188 	return ret;
1189 }
1190 
1191 static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
1192 			    u32 extent_thresh, u64 newer_than, bool do_compress,
1193 			    u64 *last_scanned_ret)
1194 {
1195 	struct extent_state *cached_state = NULL;
1196 	struct defrag_target_range *entry;
1197 	struct defrag_target_range *tmp;
1198 	LIST_HEAD(target_list);
1199 	struct folio **folios;
1200 	const u32 sectorsize = inode->root->fs_info->sectorsize;
1201 	u64 last_index = (start + len - 1) >> PAGE_SHIFT;
1202 	u64 start_index = start >> PAGE_SHIFT;
1203 	unsigned int nr_pages = last_index - start_index + 1;
1204 	int ret = 0;
1205 	int i;
1206 
1207 	ASSERT(nr_pages <= CLUSTER_SIZE / PAGE_SIZE);
1208 	ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(len, sectorsize));
1209 
1210 	folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS);
1211 	if (!folios)
1212 		return -ENOMEM;
1213 
1214 	/* Prepare all pages */
1215 	for (i = 0; i < nr_pages; i++) {
1216 		folios[i] = defrag_prepare_one_folio(inode, start_index + i);
1217 		if (IS_ERR(folios[i])) {
1218 			ret = PTR_ERR(folios[i]);
1219 			nr_pages = i;
1220 			goto free_folios;
1221 		}
1222 	}
1223 	for (i = 0; i < nr_pages; i++)
1224 		folio_wait_writeback(folios[i]);
1225 
1226 	/* Lock the pages range */
1227 	lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
1228 		    (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1229 		    &cached_state);
1230 	/*
1231 	 * Now we have a consistent view about the extent map, re-check
1232 	 * which range really needs to be defragged.
1233 	 *
1234 	 * And this time we have extent locked already, pass @locked = true
1235 	 * so that we won't relock the extent range and cause deadlock.
1236 	 */
1237 	ret = defrag_collect_targets(inode, start, len, extent_thresh,
1238 				     newer_than, do_compress, true,
1239 				     &target_list, last_scanned_ret);
1240 	if (ret < 0)
1241 		goto unlock_extent;
1242 
1243 	list_for_each_entry(entry, &target_list, list) {
1244 		ret = defrag_one_locked_target(inode, entry, folios, nr_pages,
1245 					       &cached_state);
1246 		if (ret < 0)
1247 			break;
1248 	}
1249 
1250 	list_for_each_entry_safe(entry, tmp, &target_list, list) {
1251 		list_del_init(&entry->list);
1252 		kfree(entry);
1253 	}
1254 unlock_extent:
1255 	unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
1256 		      (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
1257 		      &cached_state);
1258 free_folios:
1259 	for (i = 0; i < nr_pages; i++) {
1260 		folio_unlock(folios[i]);
1261 		folio_put(folios[i]);
1262 	}
1263 	kfree(folios);
1264 	return ret;
1265 }
1266 
1267 static int defrag_one_cluster(struct btrfs_inode *inode,
1268 			      struct file_ra_state *ra,
1269 			      u64 start, u32 len, u32 extent_thresh,
1270 			      u64 newer_than, bool do_compress,
1271 			      unsigned long *sectors_defragged,
1272 			      unsigned long max_sectors,
1273 			      u64 *last_scanned_ret)
1274 {
1275 	const u32 sectorsize = inode->root->fs_info->sectorsize;
1276 	struct defrag_target_range *entry;
1277 	struct defrag_target_range *tmp;
1278 	LIST_HEAD(target_list);
1279 	int ret;
1280 
1281 	ret = defrag_collect_targets(inode, start, len, extent_thresh,
1282 				     newer_than, do_compress, false,
1283 				     &target_list, NULL);
1284 	if (ret < 0)
1285 		goto out;
1286 
1287 	list_for_each_entry(entry, &target_list, list) {
1288 		u32 range_len = entry->len;
1289 
1290 		/* Reached or beyond the limit */
1291 		if (max_sectors && *sectors_defragged >= max_sectors) {
1292 			ret = 1;
1293 			break;
1294 		}
1295 
1296 		if (max_sectors)
1297 			range_len = min_t(u32, range_len,
1298 				(max_sectors - *sectors_defragged) * sectorsize);
1299 
1300 		/*
1301 		 * If defrag_one_range() has updated last_scanned_ret,
1302 		 * our range may already be invalid (e.g. hole punched).
1303 		 * Skip if our range is before last_scanned_ret, as there is
1304 		 * no need to defrag the range anymore.
1305 		 */
1306 		if (entry->start + range_len <= *last_scanned_ret)
1307 			continue;
1308 
1309 		page_cache_sync_readahead(inode->vfs_inode.i_mapping,
1310 				ra, NULL, entry->start >> PAGE_SHIFT,
1311 				((entry->start + range_len - 1) >> PAGE_SHIFT) -
1312 				(entry->start >> PAGE_SHIFT) + 1);
1313 		/*
1314 		 * Here we may not defrag any range if holes are punched before
1315 		 * we locked the pages.
1316 		 * But that's fine, it only affects the @sectors_defragged
1317 		 * accounting.
1318 		 */
1319 		ret = defrag_one_range(inode, entry->start, range_len,
1320 				       extent_thresh, newer_than, do_compress,
1321 				       last_scanned_ret);
1322 		if (ret < 0)
1323 			break;
1324 		*sectors_defragged += range_len >>
1325 				      inode->root->fs_info->sectorsize_bits;
1326 	}
1327 out:
1328 	list_for_each_entry_safe(entry, tmp, &target_list, list) {
1329 		list_del_init(&entry->list);
1330 		kfree(entry);
1331 	}
1332 	if (ret >= 0)
1333 		*last_scanned_ret = max(*last_scanned_ret, start + len);
1334 	return ret;
1335 }
1336 
1337 /*
1338  * Entry point to file defragmentation.
1339  *
1340  * @inode:	   inode to be defragged
1341  * @ra:		   readahead state
1342  * @range:	   defrag options including range and flags
1343  * @newer_than:	   minimum transid to defrag
1344  * @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
1345  *		   will be defragged.
1346  *
1347  * Return <0 for error.
1348  * Return >=0 for the number of sectors defragged, and range->start will be updated
1349  * to indicate the file offset where next defrag should be started at.
1350  * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
1351  *  defragging all the range).
1352  */
1353 int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
1354 		      struct btrfs_ioctl_defrag_range_args *range,
1355 		      u64 newer_than, unsigned long max_to_defrag)
1356 {
1357 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1358 	unsigned long sectors_defragged = 0;
1359 	u64 isize = i_size_read(inode);
1360 	u64 cur;
1361 	u64 last_byte;
1362 	bool do_compress = (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS);
1363 	int compress_type = BTRFS_COMPRESS_ZLIB;
1364 	int ret = 0;
1365 	u32 extent_thresh = range->extent_thresh;
1366 	pgoff_t start_index;
1367 
1368 	ASSERT(ra);
1369 
1370 	if (isize == 0)
1371 		return 0;
1372 
1373 	if (range->start >= isize)
1374 		return -EINVAL;
1375 
1376 	if (do_compress) {
1377 		if (range->compress_type >= BTRFS_NR_COMPRESS_TYPES)
1378 			return -EINVAL;
1379 		if (range->compress_type)
1380 			compress_type = range->compress_type;
1381 	}
1382 
1383 	if (extent_thresh == 0)
1384 		extent_thresh = SZ_256K;
1385 
1386 	if (range->start + range->len > range->start) {
1387 		/* Got a specific range */
1388 		last_byte = min(isize, range->start + range->len);
1389 	} else {
1390 		/* Defrag until file end */
1391 		last_byte = isize;
1392 	}
1393 
1394 	/* Align the range */
1395 	cur = round_down(range->start, fs_info->sectorsize);
1396 	last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
1397 
1398 	/*
1399 	 * Make writeback start from the beginning of the range, so that the
1400 	 * defrag range can be written sequentially.
1401 	 */
1402 	start_index = cur >> PAGE_SHIFT;
1403 	if (start_index < inode->i_mapping->writeback_index)
1404 		inode->i_mapping->writeback_index = start_index;
1405 
1406 	while (cur < last_byte) {
1407 		const unsigned long prev_sectors_defragged = sectors_defragged;
1408 		u64 last_scanned = cur;
1409 		u64 cluster_end;
1410 
1411 		if (btrfs_defrag_cancelled(fs_info)) {
1412 			ret = -EAGAIN;
1413 			break;
1414 		}
1415 
1416 		/* We want the cluster end at page boundary when possible */
1417 		cluster_end = (((cur >> PAGE_SHIFT) +
1418 			       (SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
1419 		cluster_end = min(cluster_end, last_byte);
1420 
1421 		btrfs_inode_lock(BTRFS_I(inode), 0);
1422 		if (IS_SWAPFILE(inode)) {
1423 			ret = -ETXTBSY;
1424 			btrfs_inode_unlock(BTRFS_I(inode), 0);
1425 			break;
1426 		}
1427 		if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
1428 			btrfs_inode_unlock(BTRFS_I(inode), 0);
1429 			break;
1430 		}
1431 		if (do_compress)
1432 			BTRFS_I(inode)->defrag_compress = compress_type;
1433 		ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
1434 				cluster_end + 1 - cur, extent_thresh,
1435 				newer_than, do_compress, &sectors_defragged,
1436 				max_to_defrag, &last_scanned);
1437 
1438 		if (sectors_defragged > prev_sectors_defragged)
1439 			balance_dirty_pages_ratelimited(inode->i_mapping);
1440 
1441 		btrfs_inode_unlock(BTRFS_I(inode), 0);
1442 		if (ret < 0)
1443 			break;
1444 		cur = max(cluster_end + 1, last_scanned);
1445 		if (ret > 0) {
1446 			ret = 0;
1447 			break;
1448 		}
1449 		cond_resched();
1450 	}
1451 
1452 	/*
1453 	 * Update range.start for autodefrag, this will indicate where to start
1454 	 * in next run.
1455 	 */
1456 	range->start = cur;
1457 	if (sectors_defragged) {
1458 		/*
1459 		 * We have defragged some sectors, for compression case they
1460 		 * need to be written back immediately.
1461 		 */
1462 		if (range->flags & BTRFS_DEFRAG_RANGE_START_IO) {
1463 			filemap_flush(inode->i_mapping);
1464 			if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1465 				     &BTRFS_I(inode)->runtime_flags))
1466 				filemap_flush(inode->i_mapping);
1467 		}
1468 		if (range->compress_type == BTRFS_COMPRESS_LZO)
1469 			btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
1470 		else if (range->compress_type == BTRFS_COMPRESS_ZSTD)
1471 			btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
1472 		ret = sectors_defragged;
1473 	}
1474 	if (do_compress) {
1475 		btrfs_inode_lock(BTRFS_I(inode), 0);
1476 		BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
1477 		btrfs_inode_unlock(BTRFS_I(inode), 0);
1478 	}
1479 	return ret;
1480 }
1481 
1482 void __cold btrfs_auto_defrag_exit(void)
1483 {
1484 	kmem_cache_destroy(btrfs_inode_defrag_cachep);
1485 }
1486 
1487 int __init btrfs_auto_defrag_init(void)
1488 {
1489 	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
1490 					sizeof(struct inode_defrag), 0, 0, NULL);
1491 	if (!btrfs_inode_defrag_cachep)
1492 		return -ENOMEM;
1493 
1494 	return 0;
1495 }
1496