xref: /linux/fs/btrfs/extent-tree.c (revision 0eb4aaa230d725fa9b1cd758c0f17abca5597af6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/sched/signal.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/sort.h>
12 #include <linux/rcupdate.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/ratelimit.h>
16 #include <linux/percpu_counter.h>
17 #include <linux/lockdep.h>
18 #include <linux/crc32c.h>
19 #include "ctree.h"
20 #include "extent-tree.h"
21 #include "transaction.h"
22 #include "disk-io.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "locking.h"
27 #include "free-space-cache.h"
28 #include "free-space-tree.h"
29 #include "qgroup.h"
30 #include "ref-verify.h"
31 #include "space-info.h"
32 #include "block-rsv.h"
33 #include "discard.h"
34 #include "zoned.h"
35 #include "dev-replace.h"
36 #include "fs.h"
37 #include "accessors.h"
38 #include "root-tree.h"
39 #include "file-item.h"
40 #include "orphan.h"
41 #include "tree-checker.h"
42 #include "raid-stripe-tree.h"
43 
44 #undef SCRAMBLE_DELAYED_REFS
45 
46 
47 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
48 			       struct btrfs_delayed_ref_head *href,
49 			       struct btrfs_delayed_ref_node *node,
50 			       struct btrfs_delayed_extent_op *extra_op);
51 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
52 				    struct extent_buffer *leaf,
53 				    struct btrfs_extent_item *ei);
54 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
55 				      u64 parent, u64 root_objectid,
56 				      u64 flags, u64 owner, u64 offset,
57 				      struct btrfs_key *ins, int ref_mod, u64 oref_root);
58 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
59 				     struct btrfs_delayed_ref_node *node,
60 				     struct btrfs_delayed_extent_op *extent_op);
61 static int find_next_key(struct btrfs_path *path, int level,
62 			 struct btrfs_key *key);
63 
block_group_bits(struct btrfs_block_group * cache,u64 bits)64 static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
65 {
66 	return (cache->flags & bits) == bits;
67 }
68 
69 /* simple helper to search for an existing data extent at a given offset */
btrfs_lookup_data_extent(struct btrfs_fs_info * fs_info,u64 start,u64 len)70 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
71 {
72 	struct btrfs_root *root = btrfs_extent_root(fs_info, start);
73 	int ret;
74 	struct btrfs_key key;
75 	struct btrfs_path *path;
76 
77 	path = btrfs_alloc_path();
78 	if (!path)
79 		return -ENOMEM;
80 
81 	key.objectid = start;
82 	key.offset = len;
83 	key.type = BTRFS_EXTENT_ITEM_KEY;
84 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
85 	btrfs_free_path(path);
86 	return ret;
87 }
88 
89 /*
90  * helper function to lookup reference count and flags of a tree block.
91  *
92  * the head node for delayed ref is used to store the sum of all the
93  * reference count modifications queued up in the rbtree. the head
94  * node may also store the extent flags to set. This way you can check
95  * to see what the reference count and extent flags would be if all of
96  * the delayed refs are not processed.
97  */
btrfs_lookup_extent_info(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 offset,int metadata,u64 * refs,u64 * flags,u64 * owning_root)98 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
99 			     struct btrfs_fs_info *fs_info, u64 bytenr,
100 			     u64 offset, int metadata, u64 *refs, u64 *flags,
101 			     u64 *owning_root)
102 {
103 	struct btrfs_root *extent_root;
104 	struct btrfs_delayed_ref_head *head;
105 	struct btrfs_delayed_ref_root *delayed_refs;
106 	struct btrfs_path *path;
107 	struct btrfs_key key;
108 	u64 num_refs;
109 	u64 extent_flags;
110 	u64 owner = 0;
111 	int ret;
112 
113 	/*
114 	 * If we don't have skinny metadata, don't bother doing anything
115 	 * different
116 	 */
117 	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
118 		offset = fs_info->nodesize;
119 		metadata = 0;
120 	}
121 
122 	path = btrfs_alloc_path();
123 	if (!path)
124 		return -ENOMEM;
125 
126 search_again:
127 	key.objectid = bytenr;
128 	key.offset = offset;
129 	if (metadata)
130 		key.type = BTRFS_METADATA_ITEM_KEY;
131 	else
132 		key.type = BTRFS_EXTENT_ITEM_KEY;
133 
134 	extent_root = btrfs_extent_root(fs_info, bytenr);
135 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
136 	if (ret < 0)
137 		goto out_free;
138 
139 	if (ret > 0 && key.type == BTRFS_METADATA_ITEM_KEY) {
140 		if (path->slots[0]) {
141 			path->slots[0]--;
142 			btrfs_item_key_to_cpu(path->nodes[0], &key,
143 					      path->slots[0]);
144 			if (key.objectid == bytenr &&
145 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
146 			    key.offset == fs_info->nodesize)
147 				ret = 0;
148 		}
149 	}
150 
151 	if (ret == 0) {
152 		struct extent_buffer *leaf = path->nodes[0];
153 		struct btrfs_extent_item *ei;
154 		const u32 item_size = btrfs_item_size(leaf, path->slots[0]);
155 
156 		if (unlikely(item_size < sizeof(*ei))) {
157 			ret = -EUCLEAN;
158 			btrfs_err(fs_info,
159 			"unexpected extent item size, has %u expect >= %zu",
160 				  item_size, sizeof(*ei));
161 			btrfs_abort_transaction(trans, ret);
162 			goto out_free;
163 		}
164 
165 		ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
166 		num_refs = btrfs_extent_refs(leaf, ei);
167 		if (unlikely(num_refs == 0)) {
168 			ret = -EUCLEAN;
169 			btrfs_err(fs_info,
170 		"unexpected zero reference count for extent item (%llu %u %llu)",
171 				  key.objectid, key.type, key.offset);
172 			btrfs_abort_transaction(trans, ret);
173 			goto out_free;
174 		}
175 		extent_flags = btrfs_extent_flags(leaf, ei);
176 		owner = btrfs_get_extent_owner_root(fs_info, leaf, path->slots[0]);
177 	} else {
178 		num_refs = 0;
179 		extent_flags = 0;
180 		ret = 0;
181 	}
182 
183 	delayed_refs = &trans->transaction->delayed_refs;
184 	spin_lock(&delayed_refs->lock);
185 	head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
186 	if (head) {
187 		if (!mutex_trylock(&head->mutex)) {
188 			refcount_inc(&head->refs);
189 			spin_unlock(&delayed_refs->lock);
190 
191 			btrfs_release_path(path);
192 
193 			/*
194 			 * Mutex was contended, block until it's released and try
195 			 * again
196 			 */
197 			mutex_lock(&head->mutex);
198 			mutex_unlock(&head->mutex);
199 			btrfs_put_delayed_ref_head(head);
200 			goto search_again;
201 		}
202 		spin_lock(&head->lock);
203 		if (head->extent_op && head->extent_op->update_flags)
204 			extent_flags |= head->extent_op->flags_to_set;
205 
206 		num_refs += head->ref_mod;
207 		spin_unlock(&head->lock);
208 		mutex_unlock(&head->mutex);
209 	}
210 	spin_unlock(&delayed_refs->lock);
211 
212 	WARN_ON(num_refs == 0);
213 	if (refs)
214 		*refs = num_refs;
215 	if (flags)
216 		*flags = extent_flags;
217 	if (owning_root)
218 		*owning_root = owner;
219 out_free:
220 	btrfs_free_path(path);
221 	return ret;
222 }
223 
224 /*
225  * Back reference rules.  Back refs have three main goals:
226  *
227  * 1) differentiate between all holders of references to an extent so that
228  *    when a reference is dropped we can make sure it was a valid reference
229  *    before freeing the extent.
230  *
231  * 2) Provide enough information to quickly find the holders of an extent
232  *    if we notice a given block is corrupted or bad.
233  *
234  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
235  *    maintenance.  This is actually the same as #2, but with a slightly
236  *    different use case.
237  *
238  * There are two kinds of back refs. The implicit back refs is optimized
239  * for pointers in non-shared tree blocks. For a given pointer in a block,
240  * back refs of this kind provide information about the block's owner tree
241  * and the pointer's key. These information allow us to find the block by
242  * b-tree searching. The full back refs is for pointers in tree blocks not
243  * referenced by their owner trees. The location of tree block is recorded
244  * in the back refs. Actually the full back refs is generic, and can be
245  * used in all cases the implicit back refs is used. The major shortcoming
246  * of the full back refs is its overhead. Every time a tree block gets
247  * COWed, we have to update back refs entry for all pointers in it.
248  *
249  * For a newly allocated tree block, we use implicit back refs for
250  * pointers in it. This means most tree related operations only involve
251  * implicit back refs. For a tree block created in old transaction, the
252  * only way to drop a reference to it is COW it. So we can detect the
253  * event that tree block loses its owner tree's reference and do the
254  * back refs conversion.
255  *
256  * When a tree block is COWed through a tree, there are four cases:
257  *
258  * The reference count of the block is one and the tree is the block's
259  * owner tree. Nothing to do in this case.
260  *
261  * The reference count of the block is one and the tree is not the
262  * block's owner tree. In this case, full back refs is used for pointers
263  * in the block. Remove these full back refs, add implicit back refs for
264  * every pointers in the new block.
265  *
266  * The reference count of the block is greater than one and the tree is
267  * the block's owner tree. In this case, implicit back refs is used for
268  * pointers in the block. Add full back refs for every pointers in the
269  * block, increase lower level extents' reference counts. The original
270  * implicit back refs are entailed to the new block.
271  *
272  * The reference count of the block is greater than one and the tree is
273  * not the block's owner tree. Add implicit back refs for every pointer in
274  * the new block, increase lower level extents' reference count.
275  *
276  * Back Reference Key composing:
277  *
278  * The key objectid corresponds to the first byte in the extent,
279  * The key type is used to differentiate between types of back refs.
280  * There are different meanings of the key offset for different types
281  * of back refs.
282  *
283  * File extents can be referenced by:
284  *
285  * - multiple snapshots, subvolumes, or different generations in one subvol
286  * - different files inside a single subvolume
287  * - different offsets inside a file (bookend extents in file.c)
288  *
289  * The extent ref structure for the implicit back refs has fields for:
290  *
291  * - Objectid of the subvolume root
292  * - objectid of the file holding the reference
293  * - original offset in the file
294  * - how many bookend extents
295  *
296  * The key offset for the implicit back refs is hash of the first
297  * three fields.
298  *
299  * The extent ref structure for the full back refs has field for:
300  *
301  * - number of pointers in the tree leaf
302  *
303  * The key offset for the implicit back refs is the first byte of
304  * the tree leaf
305  *
306  * When a file extent is allocated, The implicit back refs is used.
307  * the fields are filled in:
308  *
309  *     (root_key.objectid, inode objectid, offset in file, 1)
310  *
311  * When a file extent is removed file truncation, we find the
312  * corresponding implicit back refs and check the following fields:
313  *
314  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
315  *
316  * Btree extents can be referenced by:
317  *
318  * - Different subvolumes
319  *
320  * Both the implicit back refs and the full back refs for tree blocks
321  * only consist of key. The key offset for the implicit back refs is
322  * objectid of block's owner tree. The key offset for the full back refs
323  * is the first byte of parent block.
324  *
325  * When implicit back refs is used, information about the lowest key and
326  * level of the tree block are required. These information are stored in
327  * tree block info structure.
328  */
329 
330 /*
331  * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
332  * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
333  * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
334  */
btrfs_get_extent_inline_ref_type(const struct extent_buffer * eb,struct btrfs_extent_inline_ref * iref,enum btrfs_inline_ref_type is_data)335 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
336 				     struct btrfs_extent_inline_ref *iref,
337 				     enum btrfs_inline_ref_type is_data)
338 {
339 	struct btrfs_fs_info *fs_info = eb->fs_info;
340 	int type = btrfs_extent_inline_ref_type(eb, iref);
341 	u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
342 
343 	if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
344 		ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
345 		return type;
346 	}
347 
348 	if (type == BTRFS_TREE_BLOCK_REF_KEY ||
349 	    type == BTRFS_SHARED_BLOCK_REF_KEY ||
350 	    type == BTRFS_SHARED_DATA_REF_KEY ||
351 	    type == BTRFS_EXTENT_DATA_REF_KEY) {
352 		if (is_data == BTRFS_REF_TYPE_BLOCK) {
353 			if (type == BTRFS_TREE_BLOCK_REF_KEY)
354 				return type;
355 			if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
356 				ASSERT(fs_info);
357 				/*
358 				 * Every shared one has parent tree block,
359 				 * which must be aligned to sector size.
360 				 */
361 				if (offset && IS_ALIGNED(offset, fs_info->sectorsize))
362 					return type;
363 			}
364 		} else if (is_data == BTRFS_REF_TYPE_DATA) {
365 			if (type == BTRFS_EXTENT_DATA_REF_KEY)
366 				return type;
367 			if (type == BTRFS_SHARED_DATA_REF_KEY) {
368 				ASSERT(fs_info);
369 				/*
370 				 * Every shared one has parent tree block,
371 				 * which must be aligned to sector size.
372 				 */
373 				if (offset &&
374 				    IS_ALIGNED(offset, fs_info->sectorsize))
375 					return type;
376 			}
377 		} else {
378 			ASSERT(is_data == BTRFS_REF_TYPE_ANY);
379 			return type;
380 		}
381 	}
382 
383 	WARN_ON(1);
384 	btrfs_print_leaf(eb);
385 	btrfs_err(fs_info,
386 		  "eb %llu iref 0x%lx invalid extent inline ref type %d",
387 		  eb->start, (unsigned long)iref, type);
388 
389 	return BTRFS_REF_TYPE_INVALID;
390 }
391 
hash_extent_data_ref(u64 root_objectid,u64 owner,u64 offset)392 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
393 {
394 	u32 high_crc = ~(u32)0;
395 	u32 low_crc = ~(u32)0;
396 	__le64 lenum;
397 
398 	lenum = cpu_to_le64(root_objectid);
399 	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
400 	lenum = cpu_to_le64(owner);
401 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
402 	lenum = cpu_to_le64(offset);
403 	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
404 
405 	return ((u64)high_crc << 31) ^ (u64)low_crc;
406 }
407 
hash_extent_data_ref_item(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref)408 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
409 				     struct btrfs_extent_data_ref *ref)
410 {
411 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
412 				    btrfs_extent_data_ref_objectid(leaf, ref),
413 				    btrfs_extent_data_ref_offset(leaf, ref));
414 }
415 
match_extent_data_ref(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref,u64 root_objectid,u64 owner,u64 offset)416 static int match_extent_data_ref(struct extent_buffer *leaf,
417 				 struct btrfs_extent_data_ref *ref,
418 				 u64 root_objectid, u64 owner, u64 offset)
419 {
420 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
421 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
422 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
423 		return 0;
424 	return 1;
425 }
426 
lookup_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid,u64 owner,u64 offset)427 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
428 					   struct btrfs_path *path,
429 					   u64 bytenr, u64 parent,
430 					   u64 root_objectid,
431 					   u64 owner, u64 offset)
432 {
433 	struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
434 	struct btrfs_key key;
435 	struct btrfs_extent_data_ref *ref;
436 	struct extent_buffer *leaf;
437 	u32 nritems;
438 	int recow;
439 	int ret;
440 
441 	key.objectid = bytenr;
442 	if (parent) {
443 		key.type = BTRFS_SHARED_DATA_REF_KEY;
444 		key.offset = parent;
445 	} else {
446 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
447 		key.offset = hash_extent_data_ref(root_objectid,
448 						  owner, offset);
449 	}
450 again:
451 	recow = 0;
452 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
453 	if (ret < 0)
454 		return ret;
455 
456 	if (parent) {
457 		if (ret)
458 			return -ENOENT;
459 		return 0;
460 	}
461 
462 	ret = -ENOENT;
463 	leaf = path->nodes[0];
464 	nritems = btrfs_header_nritems(leaf);
465 	while (1) {
466 		if (path->slots[0] >= nritems) {
467 			ret = btrfs_next_leaf(root, path);
468 			if (ret) {
469 				if (ret > 0)
470 					return -ENOENT;
471 				return ret;
472 			}
473 
474 			leaf = path->nodes[0];
475 			nritems = btrfs_header_nritems(leaf);
476 			recow = 1;
477 		}
478 
479 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
480 		if (key.objectid != bytenr ||
481 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
482 			goto fail;
483 
484 		ref = btrfs_item_ptr(leaf, path->slots[0],
485 				     struct btrfs_extent_data_ref);
486 
487 		if (match_extent_data_ref(leaf, ref, root_objectid,
488 					  owner, offset)) {
489 			if (recow) {
490 				btrfs_release_path(path);
491 				goto again;
492 			}
493 			ret = 0;
494 			break;
495 		}
496 		path->slots[0]++;
497 	}
498 fail:
499 	return ret;
500 }
501 
insert_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_ref_node * node,u64 bytenr)502 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
503 					   struct btrfs_path *path,
504 					   struct btrfs_delayed_ref_node *node,
505 					   u64 bytenr)
506 {
507 	struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
508 	struct btrfs_key key;
509 	struct extent_buffer *leaf;
510 	u64 owner = btrfs_delayed_ref_owner(node);
511 	u64 offset = btrfs_delayed_ref_offset(node);
512 	u32 size;
513 	u32 num_refs;
514 	int ret;
515 
516 	key.objectid = bytenr;
517 	if (node->parent) {
518 		key.type = BTRFS_SHARED_DATA_REF_KEY;
519 		key.offset = node->parent;
520 		size = sizeof(struct btrfs_shared_data_ref);
521 	} else {
522 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
523 		key.offset = hash_extent_data_ref(node->ref_root, owner, offset);
524 		size = sizeof(struct btrfs_extent_data_ref);
525 	}
526 
527 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
528 	if (ret && ret != -EEXIST)
529 		goto fail;
530 
531 	leaf = path->nodes[0];
532 	if (node->parent) {
533 		struct btrfs_shared_data_ref *ref;
534 		ref = btrfs_item_ptr(leaf, path->slots[0],
535 				     struct btrfs_shared_data_ref);
536 		if (ret == 0) {
537 			btrfs_set_shared_data_ref_count(leaf, ref, node->ref_mod);
538 		} else {
539 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
540 			num_refs += node->ref_mod;
541 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
542 		}
543 	} else {
544 		struct btrfs_extent_data_ref *ref;
545 		while (ret == -EEXIST) {
546 			ref = btrfs_item_ptr(leaf, path->slots[0],
547 					     struct btrfs_extent_data_ref);
548 			if (match_extent_data_ref(leaf, ref, node->ref_root,
549 						  owner, offset))
550 				break;
551 			btrfs_release_path(path);
552 			key.offset++;
553 			ret = btrfs_insert_empty_item(trans, root, path, &key,
554 						      size);
555 			if (ret && ret != -EEXIST)
556 				goto fail;
557 
558 			leaf = path->nodes[0];
559 		}
560 		ref = btrfs_item_ptr(leaf, path->slots[0],
561 				     struct btrfs_extent_data_ref);
562 		if (ret == 0) {
563 			btrfs_set_extent_data_ref_root(leaf, ref, node->ref_root);
564 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
565 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
566 			btrfs_set_extent_data_ref_count(leaf, ref, node->ref_mod);
567 		} else {
568 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
569 			num_refs += node->ref_mod;
570 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
571 		}
572 	}
573 	ret = 0;
574 fail:
575 	btrfs_release_path(path);
576 	return ret;
577 }
578 
remove_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int refs_to_drop)579 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
580 					   struct btrfs_root *root,
581 					   struct btrfs_path *path,
582 					   int refs_to_drop)
583 {
584 	struct btrfs_key key;
585 	struct btrfs_extent_data_ref *ref1 = NULL;
586 	struct btrfs_shared_data_ref *ref2 = NULL;
587 	struct extent_buffer *leaf;
588 	u32 num_refs = 0;
589 	int ret = 0;
590 
591 	leaf = path->nodes[0];
592 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
593 
594 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
595 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
596 				      struct btrfs_extent_data_ref);
597 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
598 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
599 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
600 				      struct btrfs_shared_data_ref);
601 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
602 	} else {
603 		btrfs_err(trans->fs_info,
604 			  "unrecognized backref key (%llu %u %llu)",
605 			  key.objectid, key.type, key.offset);
606 		btrfs_abort_transaction(trans, -EUCLEAN);
607 		return -EUCLEAN;
608 	}
609 
610 	BUG_ON(num_refs < refs_to_drop);
611 	num_refs -= refs_to_drop;
612 
613 	if (num_refs == 0) {
614 		ret = btrfs_del_item(trans, root, path);
615 	} else {
616 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
617 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
618 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
619 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
620 	}
621 	return ret;
622 }
623 
extent_data_ref_count(struct btrfs_path * path,struct btrfs_extent_inline_ref * iref)624 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
625 					  struct btrfs_extent_inline_ref *iref)
626 {
627 	struct btrfs_key key;
628 	struct extent_buffer *leaf;
629 	struct btrfs_extent_data_ref *ref1;
630 	struct btrfs_shared_data_ref *ref2;
631 	u32 num_refs = 0;
632 	int type;
633 
634 	leaf = path->nodes[0];
635 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
636 
637 	if (iref) {
638 		/*
639 		 * If type is invalid, we should have bailed out earlier than
640 		 * this call.
641 		 */
642 		type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
643 		ASSERT(type != BTRFS_REF_TYPE_INVALID);
644 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
645 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
646 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
647 		} else {
648 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
649 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
650 		}
651 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
652 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
653 				      struct btrfs_extent_data_ref);
654 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
655 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
656 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
657 				      struct btrfs_shared_data_ref);
658 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
659 	} else {
660 		WARN_ON(1);
661 	}
662 	return num_refs;
663 }
664 
lookup_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid)665 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
666 					  struct btrfs_path *path,
667 					  u64 bytenr, u64 parent,
668 					  u64 root_objectid)
669 {
670 	struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
671 	struct btrfs_key key;
672 	int ret;
673 
674 	key.objectid = bytenr;
675 	if (parent) {
676 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
677 		key.offset = parent;
678 	} else {
679 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
680 		key.offset = root_objectid;
681 	}
682 
683 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
684 	if (ret > 0)
685 		ret = -ENOENT;
686 	return ret;
687 }
688 
insert_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_ref_node * node,u64 bytenr)689 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
690 					  struct btrfs_path *path,
691 					  struct btrfs_delayed_ref_node *node,
692 					  u64 bytenr)
693 {
694 	struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
695 	struct btrfs_key key;
696 	int ret;
697 
698 	key.objectid = bytenr;
699 	if (node->parent) {
700 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
701 		key.offset = node->parent;
702 	} else {
703 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
704 		key.offset = node->ref_root;
705 	}
706 
707 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
708 	btrfs_release_path(path);
709 	return ret;
710 }
711 
extent_ref_type(u64 parent,u64 owner)712 static inline int extent_ref_type(u64 parent, u64 owner)
713 {
714 	int type;
715 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
716 		if (parent > 0)
717 			type = BTRFS_SHARED_BLOCK_REF_KEY;
718 		else
719 			type = BTRFS_TREE_BLOCK_REF_KEY;
720 	} else {
721 		if (parent > 0)
722 			type = BTRFS_SHARED_DATA_REF_KEY;
723 		else
724 			type = BTRFS_EXTENT_DATA_REF_KEY;
725 	}
726 	return type;
727 }
728 
find_next_key(struct btrfs_path * path,int level,struct btrfs_key * key)729 static int find_next_key(struct btrfs_path *path, int level,
730 			 struct btrfs_key *key)
731 
732 {
733 	for (; level < BTRFS_MAX_LEVEL; level++) {
734 		if (!path->nodes[level])
735 			break;
736 		if (path->slots[level] + 1 >=
737 		    btrfs_header_nritems(path->nodes[level]))
738 			continue;
739 		if (level == 0)
740 			btrfs_item_key_to_cpu(path->nodes[level], key,
741 					      path->slots[level] + 1);
742 		else
743 			btrfs_node_key_to_cpu(path->nodes[level], key,
744 					      path->slots[level] + 1);
745 		return 0;
746 	}
747 	return 1;
748 }
749 
750 /*
751  * look for inline back ref. if back ref is found, *ref_ret is set
752  * to the address of inline back ref, and 0 is returned.
753  *
754  * if back ref isn't found, *ref_ret is set to the address where it
755  * should be inserted, and -ENOENT is returned.
756  *
757  * if insert is true and there are too many inline back refs, the path
758  * points to the extent item, and -EAGAIN is returned.
759  *
760  * NOTE: inline back refs are ordered in the same way that back ref
761  *	 items in the tree are ordered.
762  */
763 static noinline_for_stack
lookup_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int insert)764 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
765 				 struct btrfs_path *path,
766 				 struct btrfs_extent_inline_ref **ref_ret,
767 				 u64 bytenr, u64 num_bytes,
768 				 u64 parent, u64 root_objectid,
769 				 u64 owner, u64 offset, int insert)
770 {
771 	struct btrfs_fs_info *fs_info = trans->fs_info;
772 	struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr);
773 	struct btrfs_key key;
774 	struct extent_buffer *leaf;
775 	struct btrfs_extent_item *ei;
776 	struct btrfs_extent_inline_ref *iref;
777 	u64 flags;
778 	u64 item_size;
779 	unsigned long ptr;
780 	unsigned long end;
781 	int extra_size;
782 	int type;
783 	int want;
784 	int ret;
785 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
786 	int needed;
787 
788 	key.objectid = bytenr;
789 	key.type = BTRFS_EXTENT_ITEM_KEY;
790 	key.offset = num_bytes;
791 
792 	want = extent_ref_type(parent, owner);
793 	if (insert) {
794 		extra_size = btrfs_extent_inline_ref_size(want);
795 		path->search_for_extension = 1;
796 	} else
797 		extra_size = -1;
798 
799 	/*
800 	 * Owner is our level, so we can just add one to get the level for the
801 	 * block we are interested in.
802 	 */
803 	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
804 		key.type = BTRFS_METADATA_ITEM_KEY;
805 		key.offset = owner;
806 	}
807 
808 again:
809 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
810 	if (ret < 0)
811 		goto out;
812 
813 	/*
814 	 * We may be a newly converted file system which still has the old fat
815 	 * extent entries for metadata, so try and see if we have one of those.
816 	 */
817 	if (ret > 0 && skinny_metadata) {
818 		skinny_metadata = false;
819 		if (path->slots[0]) {
820 			path->slots[0]--;
821 			btrfs_item_key_to_cpu(path->nodes[0], &key,
822 					      path->slots[0]);
823 			if (key.objectid == bytenr &&
824 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
825 			    key.offset == num_bytes)
826 				ret = 0;
827 		}
828 		if (ret) {
829 			key.objectid = bytenr;
830 			key.type = BTRFS_EXTENT_ITEM_KEY;
831 			key.offset = num_bytes;
832 			btrfs_release_path(path);
833 			goto again;
834 		}
835 	}
836 
837 	if (ret && !insert) {
838 		ret = -ENOENT;
839 		goto out;
840 	} else if (WARN_ON(ret)) {
841 		btrfs_print_leaf(path->nodes[0]);
842 		btrfs_err(fs_info,
843 "extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
844 			  bytenr, num_bytes, parent, root_objectid, owner,
845 			  offset);
846 		ret = -EUCLEAN;
847 		goto out;
848 	}
849 
850 	leaf = path->nodes[0];
851 	item_size = btrfs_item_size(leaf, path->slots[0]);
852 	if (unlikely(item_size < sizeof(*ei))) {
853 		ret = -EUCLEAN;
854 		btrfs_err(fs_info,
855 			  "unexpected extent item size, has %llu expect >= %zu",
856 			  item_size, sizeof(*ei));
857 		btrfs_abort_transaction(trans, ret);
858 		goto out;
859 	}
860 
861 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
862 	flags = btrfs_extent_flags(leaf, ei);
863 
864 	ptr = (unsigned long)(ei + 1);
865 	end = (unsigned long)ei + item_size;
866 
867 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
868 		ptr += sizeof(struct btrfs_tree_block_info);
869 		BUG_ON(ptr > end);
870 	}
871 
872 	if (owner >= BTRFS_FIRST_FREE_OBJECTID)
873 		needed = BTRFS_REF_TYPE_DATA;
874 	else
875 		needed = BTRFS_REF_TYPE_BLOCK;
876 
877 	ret = -ENOENT;
878 	while (ptr < end) {
879 		iref = (struct btrfs_extent_inline_ref *)ptr;
880 		type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
881 		if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
882 			ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
883 			ptr += btrfs_extent_inline_ref_size(type);
884 			continue;
885 		}
886 		if (type == BTRFS_REF_TYPE_INVALID) {
887 			ret = -EUCLEAN;
888 			goto out;
889 		}
890 
891 		if (want < type)
892 			break;
893 		if (want > type) {
894 			ptr += btrfs_extent_inline_ref_size(type);
895 			continue;
896 		}
897 
898 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
899 			struct btrfs_extent_data_ref *dref;
900 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
901 			if (match_extent_data_ref(leaf, dref, root_objectid,
902 						  owner, offset)) {
903 				ret = 0;
904 				break;
905 			}
906 			if (hash_extent_data_ref_item(leaf, dref) <
907 			    hash_extent_data_ref(root_objectid, owner, offset))
908 				break;
909 		} else {
910 			u64 ref_offset;
911 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
912 			if (parent > 0) {
913 				if (parent == ref_offset) {
914 					ret = 0;
915 					break;
916 				}
917 				if (ref_offset < parent)
918 					break;
919 			} else {
920 				if (root_objectid == ref_offset) {
921 					ret = 0;
922 					break;
923 				}
924 				if (ref_offset < root_objectid)
925 					break;
926 			}
927 		}
928 		ptr += btrfs_extent_inline_ref_size(type);
929 	}
930 
931 	if (unlikely(ptr > end)) {
932 		ret = -EUCLEAN;
933 		btrfs_print_leaf(path->nodes[0]);
934 		btrfs_crit(fs_info,
935 "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu",
936 			   path->slots[0], root_objectid, owner, offset, parent);
937 		goto out;
938 	}
939 
940 	if (ret == -ENOENT && insert) {
941 		if (item_size + extra_size >=
942 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
943 			ret = -EAGAIN;
944 			goto out;
945 		}
946 
947 		if (path->slots[0] + 1 < btrfs_header_nritems(path->nodes[0])) {
948 			struct btrfs_key tmp_key;
949 
950 			btrfs_item_key_to_cpu(path->nodes[0], &tmp_key, path->slots[0] + 1);
951 			if (tmp_key.objectid == bytenr &&
952 			    tmp_key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
953 				ret = -EAGAIN;
954 				goto out;
955 			}
956 			goto out_no_entry;
957 		}
958 
959 		if (!path->keep_locks) {
960 			btrfs_release_path(path);
961 			path->keep_locks = 1;
962 			goto again;
963 		}
964 
965 		/*
966 		 * To add new inline back ref, we have to make sure
967 		 * there is no corresponding back ref item.
968 		 * For simplicity, we just do not add new inline back
969 		 * ref if there is any kind of item for this block
970 		 */
971 		if (find_next_key(path, 0, &key) == 0 &&
972 		    key.objectid == bytenr &&
973 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
974 			ret = -EAGAIN;
975 			goto out;
976 		}
977 	}
978 out_no_entry:
979 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
980 out:
981 	if (path->keep_locks) {
982 		path->keep_locks = 0;
983 		btrfs_unlock_up_safe(path, 1);
984 	}
985 	if (insert)
986 		path->search_for_extension = 0;
987 	return ret;
988 }
989 
990 /*
991  * helper to add new inline back ref
992  */
993 static noinline_for_stack
setup_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)994 void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
995 				 struct btrfs_path *path,
996 				 struct btrfs_extent_inline_ref *iref,
997 				 u64 parent, u64 root_objectid,
998 				 u64 owner, u64 offset, int refs_to_add,
999 				 struct btrfs_delayed_extent_op *extent_op)
1000 {
1001 	struct extent_buffer *leaf;
1002 	struct btrfs_extent_item *ei;
1003 	unsigned long ptr;
1004 	unsigned long end;
1005 	unsigned long item_offset;
1006 	u64 refs;
1007 	int size;
1008 	int type;
1009 
1010 	leaf = path->nodes[0];
1011 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1012 	item_offset = (unsigned long)iref - (unsigned long)ei;
1013 
1014 	type = extent_ref_type(parent, owner);
1015 	size = btrfs_extent_inline_ref_size(type);
1016 
1017 	btrfs_extend_item(trans, path, size);
1018 
1019 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1020 	refs = btrfs_extent_refs(leaf, ei);
1021 	refs += refs_to_add;
1022 	btrfs_set_extent_refs(leaf, ei, refs);
1023 	if (extent_op)
1024 		__run_delayed_extent_op(extent_op, leaf, ei);
1025 
1026 	ptr = (unsigned long)ei + item_offset;
1027 	end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]);
1028 	if (ptr < end - size)
1029 		memmove_extent_buffer(leaf, ptr + size, ptr,
1030 				      end - size - ptr);
1031 
1032 	iref = (struct btrfs_extent_inline_ref *)ptr;
1033 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1034 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1035 		struct btrfs_extent_data_ref *dref;
1036 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1037 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1038 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1039 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1040 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1041 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1042 		struct btrfs_shared_data_ref *sref;
1043 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1044 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1045 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1046 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1047 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1048 	} else {
1049 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1050 	}
1051 }
1052 
lookup_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset)1053 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1054 				 struct btrfs_path *path,
1055 				 struct btrfs_extent_inline_ref **ref_ret,
1056 				 u64 bytenr, u64 num_bytes, u64 parent,
1057 				 u64 root_objectid, u64 owner, u64 offset)
1058 {
1059 	int ret;
1060 
1061 	ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
1062 					   num_bytes, parent, root_objectid,
1063 					   owner, offset, 0);
1064 	if (ret != -ENOENT)
1065 		return ret;
1066 
1067 	btrfs_release_path(path);
1068 	*ref_ret = NULL;
1069 
1070 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1071 		ret = lookup_tree_block_ref(trans, path, bytenr, parent,
1072 					    root_objectid);
1073 	} else {
1074 		ret = lookup_extent_data_ref(trans, path, bytenr, parent,
1075 					     root_objectid, owner, offset);
1076 	}
1077 	return ret;
1078 }
1079 
1080 /*
1081  * helper to update/remove inline back ref
1082  */
update_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_mod,struct btrfs_delayed_extent_op * extent_op)1083 static noinline_for_stack int update_inline_extent_backref(
1084 				  struct btrfs_trans_handle *trans,
1085 				  struct btrfs_path *path,
1086 				  struct btrfs_extent_inline_ref *iref,
1087 				  int refs_to_mod,
1088 				  struct btrfs_delayed_extent_op *extent_op)
1089 {
1090 	struct extent_buffer *leaf = path->nodes[0];
1091 	struct btrfs_fs_info *fs_info = leaf->fs_info;
1092 	struct btrfs_extent_item *ei;
1093 	struct btrfs_extent_data_ref *dref = NULL;
1094 	struct btrfs_shared_data_ref *sref = NULL;
1095 	unsigned long ptr;
1096 	unsigned long end;
1097 	u32 item_size;
1098 	int size;
1099 	int type;
1100 	u64 refs;
1101 
1102 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1103 	refs = btrfs_extent_refs(leaf, ei);
1104 	if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) {
1105 		struct btrfs_key key;
1106 		u32 extent_size;
1107 
1108 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1109 		if (key.type == BTRFS_METADATA_ITEM_KEY)
1110 			extent_size = fs_info->nodesize;
1111 		else
1112 			extent_size = key.offset;
1113 		btrfs_print_leaf(leaf);
1114 		btrfs_err(fs_info,
1115 	"invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu",
1116 			  key.objectid, extent_size, refs_to_mod, refs);
1117 		return -EUCLEAN;
1118 	}
1119 	refs += refs_to_mod;
1120 	btrfs_set_extent_refs(leaf, ei, refs);
1121 	if (extent_op)
1122 		__run_delayed_extent_op(extent_op, leaf, ei);
1123 
1124 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
1125 	/*
1126 	 * Function btrfs_get_extent_inline_ref_type() has already printed
1127 	 * error messages.
1128 	 */
1129 	if (unlikely(type == BTRFS_REF_TYPE_INVALID))
1130 		return -EUCLEAN;
1131 
1132 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1133 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1134 		refs = btrfs_extent_data_ref_count(leaf, dref);
1135 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1136 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1137 		refs = btrfs_shared_data_ref_count(leaf, sref);
1138 	} else {
1139 		refs = 1;
1140 		/*
1141 		 * For tree blocks we can only drop one ref for it, and tree
1142 		 * blocks should not have refs > 1.
1143 		 *
1144 		 * Furthermore if we're inserting a new inline backref, we
1145 		 * won't reach this path either. That would be
1146 		 * setup_inline_extent_backref().
1147 		 */
1148 		if (unlikely(refs_to_mod != -1)) {
1149 			struct btrfs_key key;
1150 
1151 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1152 
1153 			btrfs_print_leaf(leaf);
1154 			btrfs_err(fs_info,
1155 			"invalid refs_to_mod for tree block %llu, has %d expect -1",
1156 				  key.objectid, refs_to_mod);
1157 			return -EUCLEAN;
1158 		}
1159 	}
1160 
1161 	if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) {
1162 		struct btrfs_key key;
1163 		u32 extent_size;
1164 
1165 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1166 		if (key.type == BTRFS_METADATA_ITEM_KEY)
1167 			extent_size = fs_info->nodesize;
1168 		else
1169 			extent_size = key.offset;
1170 		btrfs_print_leaf(leaf);
1171 		btrfs_err(fs_info,
1172 "invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu",
1173 			  (unsigned long)iref, key.objectid, extent_size,
1174 			  refs_to_mod, refs);
1175 		return -EUCLEAN;
1176 	}
1177 	refs += refs_to_mod;
1178 
1179 	if (refs > 0) {
1180 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1181 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1182 		else
1183 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1184 	} else {
1185 		size =  btrfs_extent_inline_ref_size(type);
1186 		item_size = btrfs_item_size(leaf, path->slots[0]);
1187 		ptr = (unsigned long)iref;
1188 		end = (unsigned long)ei + item_size;
1189 		if (ptr + size < end)
1190 			memmove_extent_buffer(leaf, ptr, ptr + size,
1191 					      end - ptr - size);
1192 		item_size -= size;
1193 		btrfs_truncate_item(trans, path, item_size, 1);
1194 	}
1195 	return 0;
1196 }
1197 
1198 static noinline_for_stack
insert_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)1199 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1200 				 struct btrfs_path *path,
1201 				 u64 bytenr, u64 num_bytes, u64 parent,
1202 				 u64 root_objectid, u64 owner,
1203 				 u64 offset, int refs_to_add,
1204 				 struct btrfs_delayed_extent_op *extent_op)
1205 {
1206 	struct btrfs_extent_inline_ref *iref;
1207 	int ret;
1208 
1209 	ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
1210 					   num_bytes, parent, root_objectid,
1211 					   owner, offset, 1);
1212 	if (ret == 0) {
1213 		/*
1214 		 * We're adding refs to a tree block we already own, this
1215 		 * should not happen at all.
1216 		 */
1217 		if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1218 			btrfs_print_leaf(path->nodes[0]);
1219 			btrfs_crit(trans->fs_info,
1220 "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u",
1221 				   bytenr, num_bytes, root_objectid, path->slots[0]);
1222 			return -EUCLEAN;
1223 		}
1224 		ret = update_inline_extent_backref(trans, path, iref,
1225 						   refs_to_add, extent_op);
1226 	} else if (ret == -ENOENT) {
1227 		setup_inline_extent_backref(trans, path, iref, parent,
1228 					    root_objectid, owner, offset,
1229 					    refs_to_add, extent_op);
1230 		ret = 0;
1231 	}
1232 	return ret;
1233 }
1234 
remove_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_drop,int is_data)1235 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1236 				 struct btrfs_root *root,
1237 				 struct btrfs_path *path,
1238 				 struct btrfs_extent_inline_ref *iref,
1239 				 int refs_to_drop, int is_data)
1240 {
1241 	int ret = 0;
1242 
1243 	BUG_ON(!is_data && refs_to_drop != 1);
1244 	if (iref)
1245 		ret = update_inline_extent_backref(trans, path, iref,
1246 						   -refs_to_drop, NULL);
1247 	else if (is_data)
1248 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1249 	else
1250 		ret = btrfs_del_item(trans, root, path);
1251 	return ret;
1252 }
1253 
btrfs_issue_discard(struct block_device * bdev,u64 start,u64 len,u64 * discarded_bytes)1254 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1255 			       u64 *discarded_bytes)
1256 {
1257 	int j, ret = 0;
1258 	u64 bytes_left, end;
1259 	u64 aligned_start = ALIGN(start, SECTOR_SIZE);
1260 
1261 	/* Adjust the range to be aligned to 512B sectors if necessary. */
1262 	if (start != aligned_start) {
1263 		len -= aligned_start - start;
1264 		len = round_down(len, SECTOR_SIZE);
1265 		start = aligned_start;
1266 	}
1267 
1268 	*discarded_bytes = 0;
1269 
1270 	if (!len)
1271 		return 0;
1272 
1273 	end = start + len;
1274 	bytes_left = len;
1275 
1276 	/* Skip any superblocks on this device. */
1277 	for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1278 		u64 sb_start = btrfs_sb_offset(j);
1279 		u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1280 		u64 size = sb_start - start;
1281 
1282 		if (!in_range(sb_start, start, bytes_left) &&
1283 		    !in_range(sb_end, start, bytes_left) &&
1284 		    !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1285 			continue;
1286 
1287 		/*
1288 		 * Superblock spans beginning of range.  Adjust start and
1289 		 * try again.
1290 		 */
1291 		if (sb_start <= start) {
1292 			start += sb_end - start;
1293 			if (start > end) {
1294 				bytes_left = 0;
1295 				break;
1296 			}
1297 			bytes_left = end - start;
1298 			continue;
1299 		}
1300 
1301 		if (size) {
1302 			ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
1303 						   size >> SECTOR_SHIFT,
1304 						   GFP_NOFS);
1305 			if (!ret)
1306 				*discarded_bytes += size;
1307 			else if (ret != -EOPNOTSUPP)
1308 				return ret;
1309 		}
1310 
1311 		start = sb_end;
1312 		if (start > end) {
1313 			bytes_left = 0;
1314 			break;
1315 		}
1316 		bytes_left = end - start;
1317 	}
1318 
1319 	while (bytes_left) {
1320 		u64 bytes_to_discard = min(BTRFS_MAX_DISCARD_CHUNK_SIZE, bytes_left);
1321 
1322 		ret = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
1323 					   bytes_to_discard >> SECTOR_SHIFT,
1324 					   GFP_NOFS);
1325 
1326 		if (ret) {
1327 			if (ret != -EOPNOTSUPP)
1328 				break;
1329 			continue;
1330 		}
1331 
1332 		start += bytes_to_discard;
1333 		bytes_left -= bytes_to_discard;
1334 		*discarded_bytes += bytes_to_discard;
1335 
1336 		if (btrfs_trim_interrupted()) {
1337 			ret = -ERESTARTSYS;
1338 			break;
1339 		}
1340 	}
1341 
1342 	return ret;
1343 }
1344 
do_discard_extent(struct btrfs_discard_stripe * stripe,u64 * bytes)1345 static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes)
1346 {
1347 	struct btrfs_device *dev = stripe->dev;
1348 	struct btrfs_fs_info *fs_info = dev->fs_info;
1349 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
1350 	u64 phys = stripe->physical;
1351 	u64 len = stripe->length;
1352 	u64 discarded = 0;
1353 	int ret = 0;
1354 
1355 	/* Zone reset on a zoned filesystem */
1356 	if (btrfs_can_zone_reset(dev, phys, len)) {
1357 		u64 src_disc;
1358 
1359 		ret = btrfs_reset_device_zone(dev, phys, len, &discarded);
1360 		if (ret)
1361 			goto out;
1362 
1363 		if (!btrfs_dev_replace_is_ongoing(dev_replace) ||
1364 		    dev != dev_replace->srcdev)
1365 			goto out;
1366 
1367 		src_disc = discarded;
1368 
1369 		/* Send to replace target as well */
1370 		ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len,
1371 					      &discarded);
1372 		discarded += src_disc;
1373 	} else if (bdev_max_discard_sectors(stripe->dev->bdev)) {
1374 		ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded);
1375 	} else {
1376 		ret = 0;
1377 		*bytes = 0;
1378 	}
1379 
1380 out:
1381 	*bytes = discarded;
1382 	return ret;
1383 }
1384 
btrfs_discard_extent(struct btrfs_fs_info * fs_info,u64 bytenr,u64 num_bytes,u64 * actual_bytes)1385 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1386 			 u64 num_bytes, u64 *actual_bytes)
1387 {
1388 	int ret = 0;
1389 	u64 discarded_bytes = 0;
1390 	u64 end = bytenr + num_bytes;
1391 	u64 cur = bytenr;
1392 
1393 	/*
1394 	 * Avoid races with device replace and make sure the devices in the
1395 	 * stripes don't go away while we are discarding.
1396 	 */
1397 	btrfs_bio_counter_inc_blocked(fs_info);
1398 	while (cur < end) {
1399 		struct btrfs_discard_stripe *stripes;
1400 		unsigned int num_stripes;
1401 		int i;
1402 
1403 		num_bytes = end - cur;
1404 		stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes);
1405 		if (IS_ERR(stripes)) {
1406 			ret = PTR_ERR(stripes);
1407 			if (ret == -EOPNOTSUPP)
1408 				ret = 0;
1409 			break;
1410 		}
1411 
1412 		for (i = 0; i < num_stripes; i++) {
1413 			struct btrfs_discard_stripe *stripe = stripes + i;
1414 			u64 bytes;
1415 
1416 			if (!stripe->dev->bdev) {
1417 				ASSERT(btrfs_test_opt(fs_info, DEGRADED));
1418 				continue;
1419 			}
1420 
1421 			if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
1422 					&stripe->dev->dev_state))
1423 				continue;
1424 
1425 			ret = do_discard_extent(stripe, &bytes);
1426 			if (ret) {
1427 				/*
1428 				 * Keep going if discard is not supported by the
1429 				 * device.
1430 				 */
1431 				if (ret != -EOPNOTSUPP)
1432 					break;
1433 				ret = 0;
1434 			} else {
1435 				discarded_bytes += bytes;
1436 			}
1437 		}
1438 		kfree(stripes);
1439 		if (ret)
1440 			break;
1441 		cur += num_bytes;
1442 	}
1443 	btrfs_bio_counter_dec(fs_info);
1444 	if (actual_bytes)
1445 		*actual_bytes = discarded_bytes;
1446 	return ret;
1447 }
1448 
1449 /* Can return -ENOMEM */
btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_ref * generic_ref)1450 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1451 			 struct btrfs_ref *generic_ref)
1452 {
1453 	struct btrfs_fs_info *fs_info = trans->fs_info;
1454 	int ret;
1455 
1456 	ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
1457 	       generic_ref->action);
1458 	BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
1459 	       generic_ref->ref_root == BTRFS_TREE_LOG_OBJECTID);
1460 
1461 	if (generic_ref->type == BTRFS_REF_METADATA)
1462 		ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
1463 	else
1464 		ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0);
1465 
1466 	btrfs_ref_tree_mod(fs_info, generic_ref);
1467 
1468 	return ret;
1469 }
1470 
1471 /*
1472  * Insert backreference for a given extent.
1473  *
1474  * The counterpart is in __btrfs_free_extent(), with examples and more details
1475  * how it works.
1476  *
1477  * @trans:	    Handle of transaction
1478  *
1479  * @node:	    The delayed ref node used to get the bytenr/length for
1480  *		    extent whose references are incremented.
1481  *
1482  * @extent_op       Pointer to a structure, holding information necessary when
1483  *                  updating a tree block's flags
1484  *
1485  */
__btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op)1486 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1487 				  struct btrfs_delayed_ref_node *node,
1488 				  struct btrfs_delayed_extent_op *extent_op)
1489 {
1490 	struct btrfs_path *path;
1491 	struct extent_buffer *leaf;
1492 	struct btrfs_extent_item *item;
1493 	struct btrfs_key key;
1494 	u64 bytenr = node->bytenr;
1495 	u64 num_bytes = node->num_bytes;
1496 	u64 owner = btrfs_delayed_ref_owner(node);
1497 	u64 offset = btrfs_delayed_ref_offset(node);
1498 	u64 refs;
1499 	int refs_to_add = node->ref_mod;
1500 	int ret;
1501 
1502 	path = btrfs_alloc_path();
1503 	if (!path)
1504 		return -ENOMEM;
1505 
1506 	/* this will setup the path even if it fails to insert the back ref */
1507 	ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
1508 					   node->parent, node->ref_root, owner,
1509 					   offset, refs_to_add, extent_op);
1510 	if ((ret < 0 && ret != -EAGAIN) || !ret)
1511 		goto out;
1512 
1513 	/*
1514 	 * Ok we had -EAGAIN which means we didn't have space to insert and
1515 	 * inline extent ref, so just update the reference count and add a
1516 	 * normal backref.
1517 	 */
1518 	leaf = path->nodes[0];
1519 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1520 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1521 	refs = btrfs_extent_refs(leaf, item);
1522 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1523 	if (extent_op)
1524 		__run_delayed_extent_op(extent_op, leaf, item);
1525 
1526 	btrfs_release_path(path);
1527 
1528 	/* now insert the actual backref */
1529 	if (owner < BTRFS_FIRST_FREE_OBJECTID)
1530 		ret = insert_tree_block_ref(trans, path, node, bytenr);
1531 	else
1532 		ret = insert_extent_data_ref(trans, path, node, bytenr);
1533 
1534 	if (ret)
1535 		btrfs_abort_transaction(trans, ret);
1536 out:
1537 	btrfs_free_path(path);
1538 	return ret;
1539 }
1540 
free_head_ref_squota_rsv(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_head * href)1541 static void free_head_ref_squota_rsv(struct btrfs_fs_info *fs_info,
1542 				     struct btrfs_delayed_ref_head *href)
1543 {
1544 	u64 root = href->owning_root;
1545 
1546 	/*
1547 	 * Don't check must_insert_reserved, as this is called from contexts
1548 	 * where it has already been unset.
1549 	 */
1550 	if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE ||
1551 	    !href->is_data || !is_fstree(root))
1552 		return;
1553 
1554 	btrfs_qgroup_free_refroot(fs_info, root, href->reserved_bytes,
1555 				  BTRFS_QGROUP_RSV_DATA);
1556 }
1557 
run_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,bool insert_reserved)1558 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1559 				struct btrfs_delayed_ref_head *href,
1560 				struct btrfs_delayed_ref_node *node,
1561 				struct btrfs_delayed_extent_op *extent_op,
1562 				bool insert_reserved)
1563 {
1564 	int ret = 0;
1565 	u64 parent = 0;
1566 	u64 flags = 0;
1567 
1568 	trace_run_delayed_data_ref(trans->fs_info, node);
1569 
1570 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1571 		parent = node->parent;
1572 
1573 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1574 		struct btrfs_key key;
1575 		struct btrfs_squota_delta delta = {
1576 			.root = href->owning_root,
1577 			.num_bytes = node->num_bytes,
1578 			.is_data = true,
1579 			.is_inc	= true,
1580 			.generation = trans->transid,
1581 		};
1582 		u64 owner = btrfs_delayed_ref_owner(node);
1583 		u64 offset = btrfs_delayed_ref_offset(node);
1584 
1585 		if (extent_op)
1586 			flags |= extent_op->flags_to_set;
1587 
1588 		key.objectid = node->bytenr;
1589 		key.type = BTRFS_EXTENT_ITEM_KEY;
1590 		key.offset = node->num_bytes;
1591 
1592 		ret = alloc_reserved_file_extent(trans, parent, node->ref_root,
1593 						 flags, owner, offset, &key,
1594 						 node->ref_mod,
1595 						 href->owning_root);
1596 		free_head_ref_squota_rsv(trans->fs_info, href);
1597 		if (!ret)
1598 			ret = btrfs_record_squota_delta(trans->fs_info, &delta);
1599 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1600 		ret = __btrfs_inc_extent_ref(trans, node, extent_op);
1601 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1602 		ret = __btrfs_free_extent(trans, href, node, extent_op);
1603 	} else {
1604 		BUG();
1605 	}
1606 	return ret;
1607 }
1608 
__run_delayed_extent_op(struct btrfs_delayed_extent_op * extent_op,struct extent_buffer * leaf,struct btrfs_extent_item * ei)1609 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1610 				    struct extent_buffer *leaf,
1611 				    struct btrfs_extent_item *ei)
1612 {
1613 	u64 flags = btrfs_extent_flags(leaf, ei);
1614 	if (extent_op->update_flags) {
1615 		flags |= extent_op->flags_to_set;
1616 		btrfs_set_extent_flags(leaf, ei, flags);
1617 	}
1618 
1619 	if (extent_op->update_key) {
1620 		struct btrfs_tree_block_info *bi;
1621 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1622 		bi = (struct btrfs_tree_block_info *)(ei + 1);
1623 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1624 	}
1625 }
1626 
run_delayed_extent_op(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_extent_op * extent_op)1627 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1628 				 struct btrfs_delayed_ref_head *head,
1629 				 struct btrfs_delayed_extent_op *extent_op)
1630 {
1631 	struct btrfs_fs_info *fs_info = trans->fs_info;
1632 	struct btrfs_root *root;
1633 	struct btrfs_key key;
1634 	struct btrfs_path *path;
1635 	struct btrfs_extent_item *ei;
1636 	struct extent_buffer *leaf;
1637 	u32 item_size;
1638 	int ret;
1639 	int metadata = 1;
1640 
1641 	if (TRANS_ABORTED(trans))
1642 		return 0;
1643 
1644 	if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1645 		metadata = 0;
1646 
1647 	path = btrfs_alloc_path();
1648 	if (!path)
1649 		return -ENOMEM;
1650 
1651 	key.objectid = head->bytenr;
1652 
1653 	if (metadata) {
1654 		key.type = BTRFS_METADATA_ITEM_KEY;
1655 		key.offset = head->level;
1656 	} else {
1657 		key.type = BTRFS_EXTENT_ITEM_KEY;
1658 		key.offset = head->num_bytes;
1659 	}
1660 
1661 	root = btrfs_extent_root(fs_info, key.objectid);
1662 again:
1663 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1664 	if (ret < 0) {
1665 		goto out;
1666 	} else if (ret > 0) {
1667 		if (metadata) {
1668 			if (path->slots[0] > 0) {
1669 				path->slots[0]--;
1670 				btrfs_item_key_to_cpu(path->nodes[0], &key,
1671 						      path->slots[0]);
1672 				if (key.objectid == head->bytenr &&
1673 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
1674 				    key.offset == head->num_bytes)
1675 					ret = 0;
1676 			}
1677 			if (ret > 0) {
1678 				btrfs_release_path(path);
1679 				metadata = 0;
1680 
1681 				key.objectid = head->bytenr;
1682 				key.offset = head->num_bytes;
1683 				key.type = BTRFS_EXTENT_ITEM_KEY;
1684 				goto again;
1685 			}
1686 		} else {
1687 			ret = -EUCLEAN;
1688 			btrfs_err(fs_info,
1689 		  "missing extent item for extent %llu num_bytes %llu level %d",
1690 				  head->bytenr, head->num_bytes, head->level);
1691 			goto out;
1692 		}
1693 	}
1694 
1695 	leaf = path->nodes[0];
1696 	item_size = btrfs_item_size(leaf, path->slots[0]);
1697 
1698 	if (unlikely(item_size < sizeof(*ei))) {
1699 		ret = -EUCLEAN;
1700 		btrfs_err(fs_info,
1701 			  "unexpected extent item size, has %u expect >= %zu",
1702 			  item_size, sizeof(*ei));
1703 		btrfs_abort_transaction(trans, ret);
1704 		goto out;
1705 	}
1706 
1707 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1708 	__run_delayed_extent_op(extent_op, leaf, ei);
1709 out:
1710 	btrfs_free_path(path);
1711 	return ret;
1712 }
1713 
run_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,bool insert_reserved)1714 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1715 				struct btrfs_delayed_ref_head *href,
1716 				struct btrfs_delayed_ref_node *node,
1717 				struct btrfs_delayed_extent_op *extent_op,
1718 				bool insert_reserved)
1719 {
1720 	int ret = 0;
1721 	struct btrfs_fs_info *fs_info = trans->fs_info;
1722 	u64 parent = 0;
1723 	u64 ref_root = 0;
1724 
1725 	trace_run_delayed_tree_ref(trans->fs_info, node);
1726 
1727 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1728 		parent = node->parent;
1729 	ref_root = node->ref_root;
1730 
1731 	if (unlikely(node->ref_mod != 1)) {
1732 		btrfs_err(trans->fs_info,
1733 	"btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
1734 			  node->bytenr, node->ref_mod, node->action, ref_root,
1735 			  parent);
1736 		return -EUCLEAN;
1737 	}
1738 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1739 		struct btrfs_squota_delta delta = {
1740 			.root = href->owning_root,
1741 			.num_bytes = fs_info->nodesize,
1742 			.is_data = false,
1743 			.is_inc = true,
1744 			.generation = trans->transid,
1745 		};
1746 
1747 		ret = alloc_reserved_tree_block(trans, node, extent_op);
1748 		if (!ret)
1749 			btrfs_record_squota_delta(fs_info, &delta);
1750 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
1751 		ret = __btrfs_inc_extent_ref(trans, node, extent_op);
1752 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
1753 		ret = __btrfs_free_extent(trans, href, node, extent_op);
1754 	} else {
1755 		BUG();
1756 	}
1757 	return ret;
1758 }
1759 
1760 /* helper function to actually process a single delayed ref entry */
run_one_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,bool insert_reserved)1761 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1762 			       struct btrfs_delayed_ref_head *href,
1763 			       struct btrfs_delayed_ref_node *node,
1764 			       struct btrfs_delayed_extent_op *extent_op,
1765 			       bool insert_reserved)
1766 {
1767 	int ret = 0;
1768 
1769 	if (TRANS_ABORTED(trans)) {
1770 		if (insert_reserved) {
1771 			btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1772 			free_head_ref_squota_rsv(trans->fs_info, href);
1773 		}
1774 		return 0;
1775 	}
1776 
1777 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1778 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1779 		ret = run_delayed_tree_ref(trans, href, node, extent_op,
1780 					   insert_reserved);
1781 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1782 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
1783 		ret = run_delayed_data_ref(trans, href, node, extent_op,
1784 					   insert_reserved);
1785 	else if (node->type == BTRFS_EXTENT_OWNER_REF_KEY)
1786 		ret = 0;
1787 	else
1788 		BUG();
1789 	if (ret && insert_reserved)
1790 		btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
1791 	if (ret < 0)
1792 		btrfs_err(trans->fs_info,
1793 "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d",
1794 			  node->bytenr, node->num_bytes, node->type,
1795 			  node->action, node->ref_mod, ret);
1796 	return ret;
1797 }
1798 
cleanup_extent_op(struct btrfs_delayed_ref_head * head)1799 static struct btrfs_delayed_extent_op *cleanup_extent_op(
1800 				struct btrfs_delayed_ref_head *head)
1801 {
1802 	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
1803 
1804 	if (!extent_op)
1805 		return NULL;
1806 
1807 	if (head->must_insert_reserved) {
1808 		head->extent_op = NULL;
1809 		btrfs_free_delayed_extent_op(extent_op);
1810 		return NULL;
1811 	}
1812 	return extent_op;
1813 }
1814 
run_and_cleanup_extent_op(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)1815 static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
1816 				     struct btrfs_delayed_ref_head *head)
1817 {
1818 	struct btrfs_delayed_extent_op *extent_op;
1819 	int ret;
1820 
1821 	extent_op = cleanup_extent_op(head);
1822 	if (!extent_op)
1823 		return 0;
1824 	head->extent_op = NULL;
1825 	spin_unlock(&head->lock);
1826 	ret = run_delayed_extent_op(trans, head, extent_op);
1827 	btrfs_free_delayed_extent_op(extent_op);
1828 	return ret ? ret : 1;
1829 }
1830 
btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)1831 u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
1832 				  struct btrfs_delayed_ref_root *delayed_refs,
1833 				  struct btrfs_delayed_ref_head *head)
1834 {
1835 	u64 ret = 0;
1836 
1837 	/*
1838 	 * We had csum deletions accounted for in our delayed refs rsv, we need
1839 	 * to drop the csum leaves for this update from our delayed_refs_rsv.
1840 	 */
1841 	if (head->total_ref_mod < 0 && head->is_data) {
1842 		int nr_csums;
1843 
1844 		spin_lock(&delayed_refs->lock);
1845 		delayed_refs->pending_csums -= head->num_bytes;
1846 		spin_unlock(&delayed_refs->lock);
1847 		nr_csums = btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
1848 
1849 		btrfs_delayed_refs_rsv_release(fs_info, 0, nr_csums);
1850 
1851 		ret = btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
1852 	}
1853 	/* must_insert_reserved can be set only if we didn't run the head ref. */
1854 	if (head->must_insert_reserved)
1855 		free_head_ref_squota_rsv(fs_info, head);
1856 
1857 	return ret;
1858 }
1859 
cleanup_ref_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head,u64 * bytes_released)1860 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
1861 			    struct btrfs_delayed_ref_head *head,
1862 			    u64 *bytes_released)
1863 {
1864 
1865 	struct btrfs_fs_info *fs_info = trans->fs_info;
1866 	struct btrfs_delayed_ref_root *delayed_refs;
1867 	int ret;
1868 
1869 	delayed_refs = &trans->transaction->delayed_refs;
1870 
1871 	ret = run_and_cleanup_extent_op(trans, head);
1872 	if (ret < 0) {
1873 		btrfs_unselect_ref_head(delayed_refs, head);
1874 		btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
1875 		return ret;
1876 	} else if (ret) {
1877 		return ret;
1878 	}
1879 
1880 	/*
1881 	 * Need to drop our head ref lock and re-acquire the delayed ref lock
1882 	 * and then re-check to make sure nobody got added.
1883 	 */
1884 	spin_unlock(&head->lock);
1885 	spin_lock(&delayed_refs->lock);
1886 	spin_lock(&head->lock);
1887 	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
1888 		spin_unlock(&head->lock);
1889 		spin_unlock(&delayed_refs->lock);
1890 		return 1;
1891 	}
1892 	btrfs_delete_ref_head(fs_info, delayed_refs, head);
1893 	spin_unlock(&head->lock);
1894 	spin_unlock(&delayed_refs->lock);
1895 
1896 	if (head->must_insert_reserved) {
1897 		btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
1898 		if (head->is_data) {
1899 			struct btrfs_root *csum_root;
1900 
1901 			csum_root = btrfs_csum_root(fs_info, head->bytenr);
1902 			ret = btrfs_del_csums(trans, csum_root, head->bytenr,
1903 					      head->num_bytes);
1904 		}
1905 	}
1906 
1907 	*bytes_released += btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1908 
1909 	trace_run_delayed_ref_head(fs_info, head, 0);
1910 	btrfs_delayed_ref_unlock(head);
1911 	btrfs_put_delayed_ref_head(head);
1912 	return ret;
1913 }
1914 
btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * locked_ref,u64 * bytes_released)1915 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
1916 					   struct btrfs_delayed_ref_head *locked_ref,
1917 					   u64 *bytes_released)
1918 {
1919 	struct btrfs_fs_info *fs_info = trans->fs_info;
1920 	struct btrfs_delayed_ref_root *delayed_refs;
1921 	struct btrfs_delayed_extent_op *extent_op;
1922 	struct btrfs_delayed_ref_node *ref;
1923 	bool must_insert_reserved;
1924 	int ret;
1925 
1926 	delayed_refs = &trans->transaction->delayed_refs;
1927 
1928 	lockdep_assert_held(&locked_ref->mutex);
1929 	lockdep_assert_held(&locked_ref->lock);
1930 
1931 	while ((ref = btrfs_select_delayed_ref(locked_ref))) {
1932 		if (ref->seq &&
1933 		    btrfs_check_delayed_seq(fs_info, ref->seq)) {
1934 			spin_unlock(&locked_ref->lock);
1935 			btrfs_unselect_ref_head(delayed_refs, locked_ref);
1936 			return -EAGAIN;
1937 		}
1938 
1939 		rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
1940 		RB_CLEAR_NODE(&ref->ref_node);
1941 		if (!list_empty(&ref->add_list))
1942 			list_del(&ref->add_list);
1943 		/*
1944 		 * When we play the delayed ref, also correct the ref_mod on
1945 		 * head
1946 		 */
1947 		switch (ref->action) {
1948 		case BTRFS_ADD_DELAYED_REF:
1949 		case BTRFS_ADD_DELAYED_EXTENT:
1950 			locked_ref->ref_mod -= ref->ref_mod;
1951 			break;
1952 		case BTRFS_DROP_DELAYED_REF:
1953 			locked_ref->ref_mod += ref->ref_mod;
1954 			break;
1955 		default:
1956 			WARN_ON(1);
1957 		}
1958 
1959 		/*
1960 		 * Record the must_insert_reserved flag before we drop the
1961 		 * spin lock.
1962 		 */
1963 		must_insert_reserved = locked_ref->must_insert_reserved;
1964 		/*
1965 		 * Unsetting this on the head ref relinquishes ownership of
1966 		 * the rsv_bytes, so it is critical that every possible code
1967 		 * path from here forward frees all reserves including qgroup
1968 		 * reserve.
1969 		 */
1970 		locked_ref->must_insert_reserved = false;
1971 
1972 		extent_op = locked_ref->extent_op;
1973 		locked_ref->extent_op = NULL;
1974 		spin_unlock(&locked_ref->lock);
1975 
1976 		ret = run_one_delayed_ref(trans, locked_ref, ref, extent_op,
1977 					  must_insert_reserved);
1978 		btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
1979 		*bytes_released += btrfs_calc_delayed_ref_bytes(fs_info, 1);
1980 
1981 		btrfs_free_delayed_extent_op(extent_op);
1982 		if (ret) {
1983 			btrfs_unselect_ref_head(delayed_refs, locked_ref);
1984 			btrfs_put_delayed_ref(ref);
1985 			return ret;
1986 		}
1987 
1988 		btrfs_put_delayed_ref(ref);
1989 		cond_resched();
1990 
1991 		spin_lock(&locked_ref->lock);
1992 		btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
1993 	}
1994 
1995 	return 0;
1996 }
1997 
1998 /*
1999  * Returns 0 on success or if called with an already aborted transaction.
2000  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2001  */
__btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,u64 min_bytes)2002 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2003 					     u64 min_bytes)
2004 {
2005 	struct btrfs_fs_info *fs_info = trans->fs_info;
2006 	struct btrfs_delayed_ref_root *delayed_refs;
2007 	struct btrfs_delayed_ref_head *locked_ref = NULL;
2008 	int ret;
2009 	unsigned long count = 0;
2010 	unsigned long max_count = 0;
2011 	u64 bytes_processed = 0;
2012 
2013 	delayed_refs = &trans->transaction->delayed_refs;
2014 	if (min_bytes == 0) {
2015 		max_count = delayed_refs->num_heads_ready;
2016 		min_bytes = U64_MAX;
2017 	}
2018 
2019 	do {
2020 		if (!locked_ref) {
2021 			locked_ref = btrfs_select_ref_head(fs_info, delayed_refs);
2022 			if (IS_ERR_OR_NULL(locked_ref)) {
2023 				if (PTR_ERR(locked_ref) == -EAGAIN) {
2024 					continue;
2025 				} else {
2026 					break;
2027 				}
2028 			}
2029 			count++;
2030 		}
2031 		/*
2032 		 * We need to try and merge add/drops of the same ref since we
2033 		 * can run into issues with relocate dropping the implicit ref
2034 		 * and then it being added back again before the drop can
2035 		 * finish.  If we merged anything we need to re-loop so we can
2036 		 * get a good ref.
2037 		 * Or we can get node references of the same type that weren't
2038 		 * merged when created due to bumps in the tree mod seq, and
2039 		 * we need to merge them to prevent adding an inline extent
2040 		 * backref before dropping it (triggering a BUG_ON at
2041 		 * insert_inline_extent_backref()).
2042 		 */
2043 		spin_lock(&locked_ref->lock);
2044 		btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
2045 
2046 		ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, &bytes_processed);
2047 		if (ret < 0 && ret != -EAGAIN) {
2048 			/*
2049 			 * Error, btrfs_run_delayed_refs_for_head already
2050 			 * unlocked everything so just bail out
2051 			 */
2052 			return ret;
2053 		} else if (!ret) {
2054 			/*
2055 			 * Success, perform the usual cleanup of a processed
2056 			 * head
2057 			 */
2058 			ret = cleanup_ref_head(trans, locked_ref, &bytes_processed);
2059 			if (ret > 0 ) {
2060 				/* We dropped our lock, we need to loop. */
2061 				ret = 0;
2062 				continue;
2063 			} else if (ret) {
2064 				return ret;
2065 			}
2066 		}
2067 
2068 		/*
2069 		 * Either success case or btrfs_run_delayed_refs_for_head
2070 		 * returned -EAGAIN, meaning we need to select another head
2071 		 */
2072 
2073 		locked_ref = NULL;
2074 		cond_resched();
2075 	} while ((min_bytes != U64_MAX && bytes_processed < min_bytes) ||
2076 		 (max_count > 0 && count < max_count) ||
2077 		 locked_ref);
2078 
2079 	return 0;
2080 }
2081 
2082 #ifdef SCRAMBLE_DELAYED_REFS
2083 /*
2084  * Normally delayed refs get processed in ascending bytenr order. This
2085  * correlates in most cases to the order added. To expose dependencies on this
2086  * order, we start to process the tree in the middle instead of the beginning
2087  */
find_middle(struct rb_root * root)2088 static u64 find_middle(struct rb_root *root)
2089 {
2090 	struct rb_node *n = root->rb_node;
2091 	struct btrfs_delayed_ref_node *entry;
2092 	int alt = 1;
2093 	u64 middle;
2094 	u64 first = 0, last = 0;
2095 
2096 	n = rb_first(root);
2097 	if (n) {
2098 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2099 		first = entry->bytenr;
2100 	}
2101 	n = rb_last(root);
2102 	if (n) {
2103 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2104 		last = entry->bytenr;
2105 	}
2106 	n = root->rb_node;
2107 
2108 	while (n) {
2109 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2110 		WARN_ON(!entry->in_tree);
2111 
2112 		middle = entry->bytenr;
2113 
2114 		if (alt)
2115 			n = n->rb_left;
2116 		else
2117 			n = n->rb_right;
2118 
2119 		alt = 1 - alt;
2120 	}
2121 	return middle;
2122 }
2123 #endif
2124 
2125 /*
2126  * Start processing the delayed reference count updates and extent insertions
2127  * we have queued up so far.
2128  *
2129  * @trans:	Transaction handle.
2130  * @min_bytes:	How many bytes of delayed references to process. After this
2131  *		many bytes we stop processing delayed references if there are
2132  *		any more. If 0 it means to run all existing delayed references,
2133  *		but not new ones added after running all existing ones.
2134  *		Use (u64)-1 (U64_MAX) to run all existing delayed references
2135  *		plus any new ones that are added.
2136  *
2137  * Returns 0 on success or if called with an aborted transaction
2138  * Returns <0 on error and aborts the transaction
2139  */
btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,u64 min_bytes)2140 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, u64 min_bytes)
2141 {
2142 	struct btrfs_fs_info *fs_info = trans->fs_info;
2143 	struct btrfs_delayed_ref_root *delayed_refs;
2144 	int ret;
2145 
2146 	/* We'll clean this up in btrfs_cleanup_transaction */
2147 	if (TRANS_ABORTED(trans))
2148 		return 0;
2149 
2150 	if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
2151 		return 0;
2152 
2153 	delayed_refs = &trans->transaction->delayed_refs;
2154 again:
2155 #ifdef SCRAMBLE_DELAYED_REFS
2156 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2157 #endif
2158 	ret = __btrfs_run_delayed_refs(trans, min_bytes);
2159 	if (ret < 0) {
2160 		btrfs_abort_transaction(trans, ret);
2161 		return ret;
2162 	}
2163 
2164 	if (min_bytes == U64_MAX) {
2165 		btrfs_create_pending_block_groups(trans);
2166 
2167 		spin_lock(&delayed_refs->lock);
2168 		if (xa_empty(&delayed_refs->head_refs)) {
2169 			spin_unlock(&delayed_refs->lock);
2170 			return 0;
2171 		}
2172 		spin_unlock(&delayed_refs->lock);
2173 
2174 		cond_resched();
2175 		goto again;
2176 	}
2177 
2178 	return 0;
2179 }
2180 
btrfs_set_disk_extent_flags(struct btrfs_trans_handle * trans,struct extent_buffer * eb,u64 flags)2181 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2182 				struct extent_buffer *eb, u64 flags)
2183 {
2184 	struct btrfs_delayed_extent_op *extent_op;
2185 	int ret;
2186 
2187 	extent_op = btrfs_alloc_delayed_extent_op();
2188 	if (!extent_op)
2189 		return -ENOMEM;
2190 
2191 	extent_op->flags_to_set = flags;
2192 	extent_op->update_flags = true;
2193 	extent_op->update_key = false;
2194 
2195 	ret = btrfs_add_delayed_extent_op(trans, eb->start, eb->len,
2196 					  btrfs_header_level(eb), extent_op);
2197 	if (ret)
2198 		btrfs_free_delayed_extent_op(extent_op);
2199 	return ret;
2200 }
2201 
check_delayed_ref(struct btrfs_inode * inode,struct btrfs_path * path,u64 offset,u64 bytenr)2202 static noinline int check_delayed_ref(struct btrfs_inode *inode,
2203 				      struct btrfs_path *path,
2204 				      u64 offset, u64 bytenr)
2205 {
2206 	struct btrfs_root *root = inode->root;
2207 	struct btrfs_delayed_ref_head *head;
2208 	struct btrfs_delayed_ref_node *ref;
2209 	struct btrfs_delayed_ref_root *delayed_refs;
2210 	struct btrfs_transaction *cur_trans;
2211 	struct rb_node *node;
2212 	int ret = 0;
2213 
2214 	spin_lock(&root->fs_info->trans_lock);
2215 	cur_trans = root->fs_info->running_transaction;
2216 	if (cur_trans)
2217 		refcount_inc(&cur_trans->use_count);
2218 	spin_unlock(&root->fs_info->trans_lock);
2219 	if (!cur_trans)
2220 		return 0;
2221 
2222 	delayed_refs = &cur_trans->delayed_refs;
2223 	spin_lock(&delayed_refs->lock);
2224 	head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr);
2225 	if (!head) {
2226 		spin_unlock(&delayed_refs->lock);
2227 		btrfs_put_transaction(cur_trans);
2228 		return 0;
2229 	}
2230 
2231 	if (!mutex_trylock(&head->mutex)) {
2232 		if (path->nowait) {
2233 			spin_unlock(&delayed_refs->lock);
2234 			btrfs_put_transaction(cur_trans);
2235 			return -EAGAIN;
2236 		}
2237 
2238 		refcount_inc(&head->refs);
2239 		spin_unlock(&delayed_refs->lock);
2240 
2241 		btrfs_release_path(path);
2242 
2243 		/*
2244 		 * Mutex was contended, block until it's released and let
2245 		 * caller try again
2246 		 */
2247 		mutex_lock(&head->mutex);
2248 		mutex_unlock(&head->mutex);
2249 		btrfs_put_delayed_ref_head(head);
2250 		btrfs_put_transaction(cur_trans);
2251 		return -EAGAIN;
2252 	}
2253 	spin_unlock(&delayed_refs->lock);
2254 
2255 	spin_lock(&head->lock);
2256 	/*
2257 	 * XXX: We should replace this with a proper search function in the
2258 	 * future.
2259 	 */
2260 	for (node = rb_first_cached(&head->ref_tree); node;
2261 	     node = rb_next(node)) {
2262 		u64 ref_owner;
2263 		u64 ref_offset;
2264 
2265 		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
2266 		/* If it's a shared ref we know a cross reference exists */
2267 		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2268 			ret = 1;
2269 			break;
2270 		}
2271 
2272 		ref_owner = btrfs_delayed_ref_owner(ref);
2273 		ref_offset = btrfs_delayed_ref_offset(ref);
2274 
2275 		/*
2276 		 * If our ref doesn't match the one we're currently looking at
2277 		 * then we have a cross reference.
2278 		 */
2279 		if (ref->ref_root != btrfs_root_id(root) ||
2280 		    ref_owner != btrfs_ino(inode) || ref_offset != offset) {
2281 			ret = 1;
2282 			break;
2283 		}
2284 	}
2285 	spin_unlock(&head->lock);
2286 	mutex_unlock(&head->mutex);
2287 	btrfs_put_transaction(cur_trans);
2288 	return ret;
2289 }
2290 
2291 /*
2292  * Check if there are references for a data extent other than the one belonging
2293  * to the given inode and offset.
2294  *
2295  * @inode:     The only inode we expect to find associated with the data extent.
2296  * @path:      A path to use for searching the extent tree.
2297  * @offset:    The only offset we expect to find associated with the data extent.
2298  * @bytenr:    The logical address of the data extent.
2299  *
2300  * When the extent does not have any other references other than the one we
2301  * expect to find, we always return a value of 0 with the path having a locked
2302  * leaf that contains the extent's extent item - this is necessary to ensure
2303  * we don't race with a task running delayed references, and our caller must
2304  * have such a path when calling check_delayed_ref() - it must lock a delayed
2305  * ref head while holding the leaf locked. In case the extent item is not found
2306  * in the extent tree, we return -ENOENT with the path having the leaf (locked)
2307  * where the extent item should be, in order to prevent races with another task
2308  * running delayed references, so that we don't miss any reference when calling
2309  * check_delayed_ref().
2310  *
2311  * Note: this may return false positives, and this is because we want to be
2312  *       quick here as we're called in write paths (when flushing delalloc and
2313  *       in the direct IO write path). For example we can have an extent with
2314  *       a single reference but that reference is not inlined, or we may have
2315  *       many references in the extent tree but we also have delayed references
2316  *       that cancel all the reference except the one for our inode and offset,
2317  *       but it would be expensive to do such checks and complex due to all
2318  *       locking to avoid races between the checks and flushing delayed refs,
2319  *       plus non-inline references may be located on leaves other than the one
2320  *       that contains the extent item in the extent tree. The important thing
2321  *       here is to not return false negatives and that the false positives are
2322  *       not very common.
2323  *
2324  * Returns: 0 if there are no cross references and with the path having a locked
2325  *          leaf from the extent tree that contains the extent's extent item.
2326  *
2327  *          1 if there are cross references (false positives can happen).
2328  *
2329  *          < 0 in case of an error. In case of -ENOENT the leaf in the extent
2330  *          tree where the extent item should be located at is read locked and
2331  *          accessible in the given path.
2332  */
check_committed_ref(struct btrfs_inode * inode,struct btrfs_path * path,u64 offset,u64 bytenr)2333 static noinline int check_committed_ref(struct btrfs_inode *inode,
2334 					struct btrfs_path *path,
2335 					u64 offset, u64 bytenr)
2336 {
2337 	struct btrfs_root *root = inode->root;
2338 	struct btrfs_fs_info *fs_info = root->fs_info;
2339 	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2340 	struct extent_buffer *leaf;
2341 	struct btrfs_extent_data_ref *ref;
2342 	struct btrfs_extent_inline_ref *iref;
2343 	struct btrfs_extent_item *ei;
2344 	struct btrfs_key key;
2345 	u32 item_size;
2346 	u32 expected_size;
2347 	int type;
2348 	int ret;
2349 
2350 	key.objectid = bytenr;
2351 	key.offset = (u64)-1;
2352 	key.type = BTRFS_EXTENT_ITEM_KEY;
2353 
2354 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2355 	if (ret < 0)
2356 		return ret;
2357 	if (ret == 0) {
2358 		/*
2359 		 * Key with offset -1 found, there would have to exist an extent
2360 		 * item with such offset, but this is out of the valid range.
2361 		 */
2362 		return -EUCLEAN;
2363 	}
2364 
2365 	if (path->slots[0] == 0)
2366 		return -ENOENT;
2367 
2368 	path->slots[0]--;
2369 	leaf = path->nodes[0];
2370 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2371 
2372 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2373 		return -ENOENT;
2374 
2375 	item_size = btrfs_item_size(leaf, path->slots[0]);
2376 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2377 	expected_size = sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY);
2378 
2379 	/* No inline refs; we need to bail before checking for owner ref. */
2380 	if (item_size == sizeof(*ei))
2381 		return 1;
2382 
2383 	/* Check for an owner ref; skip over it to the real inline refs. */
2384 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2385 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
2386 	if (btrfs_fs_incompat(fs_info, SIMPLE_QUOTA) && type == BTRFS_EXTENT_OWNER_REF_KEY) {
2387 		expected_size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
2388 		iref = (struct btrfs_extent_inline_ref *)(iref + 1);
2389 		type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
2390 	}
2391 
2392 	/* If extent item has more than 1 inline ref then it's shared */
2393 	if (item_size != expected_size)
2394 		return 1;
2395 
2396 	/* If this extent has SHARED_DATA_REF then it's shared */
2397 	if (type != BTRFS_EXTENT_DATA_REF_KEY)
2398 		return 1;
2399 
2400 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2401 	if (btrfs_extent_refs(leaf, ei) !=
2402 	    btrfs_extent_data_ref_count(leaf, ref) ||
2403 	    btrfs_extent_data_ref_root(leaf, ref) != btrfs_root_id(root) ||
2404 	    btrfs_extent_data_ref_objectid(leaf, ref) != btrfs_ino(inode) ||
2405 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
2406 		return 1;
2407 
2408 	return 0;
2409 }
2410 
btrfs_cross_ref_exist(struct btrfs_inode * inode,u64 offset,u64 bytenr,struct btrfs_path * path)2411 int btrfs_cross_ref_exist(struct btrfs_inode *inode, u64 offset,
2412 			  u64 bytenr, struct btrfs_path *path)
2413 {
2414 	int ret;
2415 
2416 	do {
2417 		ret = check_committed_ref(inode, path, offset, bytenr);
2418 		if (ret && ret != -ENOENT)
2419 			goto out;
2420 
2421 		/*
2422 		 * The path must have a locked leaf from the extent tree where
2423 		 * the extent item for our extent is located, in case it exists,
2424 		 * or where it should be located in case it doesn't exist yet
2425 		 * because it's new and its delayed ref was not yet flushed.
2426 		 * We need to lock the delayed ref head at check_delayed_ref(),
2427 		 * if one exists, while holding the leaf locked in order to not
2428 		 * race with delayed ref flushing, missing references and
2429 		 * incorrectly reporting that the extent is not shared.
2430 		 */
2431 		if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
2432 			struct extent_buffer *leaf = path->nodes[0];
2433 
2434 			ASSERT(leaf != NULL);
2435 			btrfs_assert_tree_read_locked(leaf);
2436 
2437 			if (ret != -ENOENT) {
2438 				struct btrfs_key key;
2439 
2440 				btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2441 				ASSERT(key.objectid == bytenr);
2442 				ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY);
2443 			}
2444 		}
2445 
2446 		ret = check_delayed_ref(inode, path, offset, bytenr);
2447 	} while (ret == -EAGAIN && !path->nowait);
2448 
2449 out:
2450 	btrfs_release_path(path);
2451 	if (btrfs_is_data_reloc_root(inode->root))
2452 		WARN_ON(ret > 0);
2453 	return ret;
2454 }
2455 
__btrfs_mod_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref,int inc)2456 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2457 			   struct btrfs_root *root,
2458 			   struct extent_buffer *buf,
2459 			   int full_backref, int inc)
2460 {
2461 	struct btrfs_fs_info *fs_info = root->fs_info;
2462 	u64 parent;
2463 	u64 ref_root;
2464 	u32 nritems;
2465 	struct btrfs_key key;
2466 	struct btrfs_file_extent_item *fi;
2467 	bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
2468 	int i;
2469 	int action;
2470 	int level;
2471 	int ret = 0;
2472 
2473 	if (btrfs_is_testing(fs_info))
2474 		return 0;
2475 
2476 	ref_root = btrfs_header_owner(buf);
2477 	nritems = btrfs_header_nritems(buf);
2478 	level = btrfs_header_level(buf);
2479 
2480 	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0)
2481 		return 0;
2482 
2483 	if (full_backref)
2484 		parent = buf->start;
2485 	else
2486 		parent = 0;
2487 	if (inc)
2488 		action = BTRFS_ADD_DELAYED_REF;
2489 	else
2490 		action = BTRFS_DROP_DELAYED_REF;
2491 
2492 	for (i = 0; i < nritems; i++) {
2493 		struct btrfs_ref ref = {
2494 			.action = action,
2495 			.parent = parent,
2496 			.ref_root = ref_root,
2497 		};
2498 
2499 		if (level == 0) {
2500 			btrfs_item_key_to_cpu(buf, &key, i);
2501 			if (key.type != BTRFS_EXTENT_DATA_KEY)
2502 				continue;
2503 			fi = btrfs_item_ptr(buf, i,
2504 					    struct btrfs_file_extent_item);
2505 			if (btrfs_file_extent_type(buf, fi) ==
2506 			    BTRFS_FILE_EXTENT_INLINE)
2507 				continue;
2508 			ref.bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2509 			if (ref.bytenr == 0)
2510 				continue;
2511 
2512 			ref.num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2513 			ref.owning_root = ref_root;
2514 
2515 			key.offset -= btrfs_file_extent_offset(buf, fi);
2516 			btrfs_init_data_ref(&ref, key.objectid, key.offset,
2517 					    btrfs_root_id(root), for_reloc);
2518 			if (inc)
2519 				ret = btrfs_inc_extent_ref(trans, &ref);
2520 			else
2521 				ret = btrfs_free_extent(trans, &ref);
2522 			if (ret)
2523 				goto fail;
2524 		} else {
2525 			/* We don't know the owning_root, leave as 0. */
2526 			ref.bytenr = btrfs_node_blockptr(buf, i);
2527 			ref.num_bytes = fs_info->nodesize;
2528 
2529 			btrfs_init_tree_ref(&ref, level - 1,
2530 					    btrfs_root_id(root), for_reloc);
2531 			if (inc)
2532 				ret = btrfs_inc_extent_ref(trans, &ref);
2533 			else
2534 				ret = btrfs_free_extent(trans, &ref);
2535 			if (ret)
2536 				goto fail;
2537 		}
2538 	}
2539 	return 0;
2540 fail:
2541 	return ret;
2542 }
2543 
btrfs_inc_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)2544 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2545 		  struct extent_buffer *buf, int full_backref)
2546 {
2547 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2548 }
2549 
btrfs_dec_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)2550 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2551 		  struct extent_buffer *buf, int full_backref)
2552 {
2553 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2554 }
2555 
get_alloc_profile_by_root(struct btrfs_root * root,int data)2556 static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
2557 {
2558 	struct btrfs_fs_info *fs_info = root->fs_info;
2559 	u64 flags;
2560 	u64 ret;
2561 
2562 	if (data)
2563 		flags = BTRFS_BLOCK_GROUP_DATA;
2564 	else if (root == fs_info->chunk_root)
2565 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
2566 	else
2567 		flags = BTRFS_BLOCK_GROUP_METADATA;
2568 
2569 	ret = btrfs_get_alloc_profile(fs_info, flags);
2570 	return ret;
2571 }
2572 
first_logical_byte(struct btrfs_fs_info * fs_info)2573 static u64 first_logical_byte(struct btrfs_fs_info *fs_info)
2574 {
2575 	struct rb_node *leftmost;
2576 	u64 bytenr = 0;
2577 
2578 	read_lock(&fs_info->block_group_cache_lock);
2579 	/* Get the block group with the lowest logical start address. */
2580 	leftmost = rb_first_cached(&fs_info->block_group_cache_tree);
2581 	if (leftmost) {
2582 		struct btrfs_block_group *bg;
2583 
2584 		bg = rb_entry(leftmost, struct btrfs_block_group, cache_node);
2585 		bytenr = bg->start;
2586 	}
2587 	read_unlock(&fs_info->block_group_cache_lock);
2588 
2589 	return bytenr;
2590 }
2591 
pin_down_extent(struct btrfs_trans_handle * trans,struct btrfs_block_group * cache,u64 bytenr,u64 num_bytes,int reserved)2592 static int pin_down_extent(struct btrfs_trans_handle *trans,
2593 			   struct btrfs_block_group *cache,
2594 			   u64 bytenr, u64 num_bytes, int reserved)
2595 {
2596 	spin_lock(&cache->space_info->lock);
2597 	spin_lock(&cache->lock);
2598 	cache->pinned += num_bytes;
2599 	btrfs_space_info_update_bytes_pinned(cache->space_info, num_bytes);
2600 	if (reserved) {
2601 		cache->reserved -= num_bytes;
2602 		cache->space_info->bytes_reserved -= num_bytes;
2603 	}
2604 	spin_unlock(&cache->lock);
2605 	spin_unlock(&cache->space_info->lock);
2606 
2607 	set_extent_bit(&trans->transaction->pinned_extents, bytenr,
2608 		       bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
2609 	return 0;
2610 }
2611 
btrfs_pin_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,int reserved)2612 int btrfs_pin_extent(struct btrfs_trans_handle *trans,
2613 		     u64 bytenr, u64 num_bytes, int reserved)
2614 {
2615 	struct btrfs_block_group *cache;
2616 
2617 	cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
2618 	BUG_ON(!cache); /* Logic error */
2619 
2620 	pin_down_extent(trans, cache, bytenr, num_bytes, reserved);
2621 
2622 	btrfs_put_block_group(cache);
2623 	return 0;
2624 }
2625 
btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle * trans,const struct extent_buffer * eb)2626 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
2627 				    const struct extent_buffer *eb)
2628 {
2629 	struct btrfs_block_group *cache;
2630 	int ret;
2631 
2632 	cache = btrfs_lookup_block_group(trans->fs_info, eb->start);
2633 	if (!cache)
2634 		return -EINVAL;
2635 
2636 	/*
2637 	 * Fully cache the free space first so that our pin removes the free space
2638 	 * from the cache.
2639 	 */
2640 	ret = btrfs_cache_block_group(cache, true);
2641 	if (ret)
2642 		goto out;
2643 
2644 	pin_down_extent(trans, cache, eb->start, eb->len, 0);
2645 
2646 	/* remove us from the free space cache (if we're there at all) */
2647 	ret = btrfs_remove_free_space(cache, eb->start, eb->len);
2648 out:
2649 	btrfs_put_block_group(cache);
2650 	return ret;
2651 }
2652 
__exclude_logged_extent(struct btrfs_fs_info * fs_info,u64 start,u64 num_bytes)2653 static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
2654 				   u64 start, u64 num_bytes)
2655 {
2656 	int ret;
2657 	struct btrfs_block_group *block_group;
2658 
2659 	block_group = btrfs_lookup_block_group(fs_info, start);
2660 	if (!block_group)
2661 		return -EINVAL;
2662 
2663 	ret = btrfs_cache_block_group(block_group, true);
2664 	if (ret)
2665 		goto out;
2666 
2667 	ret = btrfs_remove_free_space(block_group, start, num_bytes);
2668 out:
2669 	btrfs_put_block_group(block_group);
2670 	return ret;
2671 }
2672 
btrfs_exclude_logged_extents(struct extent_buffer * eb)2673 int btrfs_exclude_logged_extents(struct extent_buffer *eb)
2674 {
2675 	struct btrfs_fs_info *fs_info = eb->fs_info;
2676 	struct btrfs_file_extent_item *item;
2677 	struct btrfs_key key;
2678 	int found_type;
2679 	int i;
2680 	int ret = 0;
2681 
2682 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
2683 		return 0;
2684 
2685 	for (i = 0; i < btrfs_header_nritems(eb); i++) {
2686 		btrfs_item_key_to_cpu(eb, &key, i);
2687 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2688 			continue;
2689 		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
2690 		found_type = btrfs_file_extent_type(eb, item);
2691 		if (found_type == BTRFS_FILE_EXTENT_INLINE)
2692 			continue;
2693 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
2694 			continue;
2695 		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
2696 		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
2697 		ret = __exclude_logged_extent(fs_info, key.objectid, key.offset);
2698 		if (ret)
2699 			break;
2700 	}
2701 
2702 	return ret;
2703 }
2704 
2705 static void
btrfs_inc_block_group_reservations(struct btrfs_block_group * bg)2706 btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
2707 {
2708 	atomic_inc(&bg->reservations);
2709 }
2710 
2711 /*
2712  * Returns the free cluster for the given space info and sets empty_cluster to
2713  * what it should be based on the mount options.
2714  */
2715 static struct btrfs_free_cluster *
fetch_cluster_info(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 * empty_cluster)2716 fetch_cluster_info(struct btrfs_fs_info *fs_info,
2717 		   struct btrfs_space_info *space_info, u64 *empty_cluster)
2718 {
2719 	struct btrfs_free_cluster *ret = NULL;
2720 
2721 	*empty_cluster = 0;
2722 	if (btrfs_mixed_space_info(space_info))
2723 		return ret;
2724 
2725 	if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
2726 		ret = &fs_info->meta_alloc_cluster;
2727 		if (btrfs_test_opt(fs_info, SSD))
2728 			*empty_cluster = SZ_2M;
2729 		else
2730 			*empty_cluster = SZ_64K;
2731 	} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
2732 		   btrfs_test_opt(fs_info, SSD_SPREAD)) {
2733 		*empty_cluster = SZ_2M;
2734 		ret = &fs_info->data_alloc_cluster;
2735 	}
2736 
2737 	return ret;
2738 }
2739 
unpin_extent_range(struct btrfs_fs_info * fs_info,u64 start,u64 end,const bool return_free_space)2740 static int unpin_extent_range(struct btrfs_fs_info *fs_info,
2741 			      u64 start, u64 end,
2742 			      const bool return_free_space)
2743 {
2744 	struct btrfs_block_group *cache = NULL;
2745 	struct btrfs_space_info *space_info;
2746 	struct btrfs_free_cluster *cluster = NULL;
2747 	u64 total_unpinned = 0;
2748 	u64 empty_cluster = 0;
2749 	bool readonly;
2750 	int ret = 0;
2751 
2752 	while (start <= end) {
2753 		u64 len;
2754 
2755 		readonly = false;
2756 		if (!cache ||
2757 		    start >= cache->start + cache->length) {
2758 			if (cache)
2759 				btrfs_put_block_group(cache);
2760 			total_unpinned = 0;
2761 			cache = btrfs_lookup_block_group(fs_info, start);
2762 			if (cache == NULL) {
2763 				/* Logic error, something removed the block group. */
2764 				ret = -EUCLEAN;
2765 				goto out;
2766 			}
2767 
2768 			cluster = fetch_cluster_info(fs_info,
2769 						     cache->space_info,
2770 						     &empty_cluster);
2771 			empty_cluster <<= 1;
2772 		}
2773 
2774 		len = cache->start + cache->length - start;
2775 		len = min(len, end + 1 - start);
2776 
2777 		if (return_free_space)
2778 			btrfs_add_free_space(cache, start, len);
2779 
2780 		start += len;
2781 		total_unpinned += len;
2782 		space_info = cache->space_info;
2783 
2784 		/*
2785 		 * If this space cluster has been marked as fragmented and we've
2786 		 * unpinned enough in this block group to potentially allow a
2787 		 * cluster to be created inside of it go ahead and clear the
2788 		 * fragmented check.
2789 		 */
2790 		if (cluster && cluster->fragmented &&
2791 		    total_unpinned > empty_cluster) {
2792 			spin_lock(&cluster->lock);
2793 			cluster->fragmented = 0;
2794 			spin_unlock(&cluster->lock);
2795 		}
2796 
2797 		spin_lock(&space_info->lock);
2798 		spin_lock(&cache->lock);
2799 		cache->pinned -= len;
2800 		btrfs_space_info_update_bytes_pinned(space_info, -len);
2801 		space_info->max_extent_size = 0;
2802 		if (cache->ro) {
2803 			space_info->bytes_readonly += len;
2804 			readonly = true;
2805 		} else if (btrfs_is_zoned(fs_info)) {
2806 			/* Need reset before reusing in a zoned block group */
2807 			btrfs_space_info_update_bytes_zone_unusable(space_info, len);
2808 			readonly = true;
2809 		}
2810 		spin_unlock(&cache->lock);
2811 		if (!readonly && return_free_space)
2812 			btrfs_return_free_space(space_info, len);
2813 		spin_unlock(&space_info->lock);
2814 	}
2815 
2816 	if (cache)
2817 		btrfs_put_block_group(cache);
2818 out:
2819 	return ret;
2820 }
2821 
btrfs_finish_extent_commit(struct btrfs_trans_handle * trans)2822 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
2823 {
2824 	struct btrfs_fs_info *fs_info = trans->fs_info;
2825 	struct btrfs_block_group *block_group, *tmp;
2826 	struct list_head *deleted_bgs;
2827 	struct extent_io_tree *unpin;
2828 	u64 start;
2829 	u64 end;
2830 	int ret;
2831 
2832 	unpin = &trans->transaction->pinned_extents;
2833 
2834 	while (!TRANS_ABORTED(trans)) {
2835 		struct extent_state *cached_state = NULL;
2836 
2837 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
2838 		if (!find_first_extent_bit(unpin, 0, &start, &end,
2839 					   EXTENT_DIRTY, &cached_state)) {
2840 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2841 			break;
2842 		}
2843 
2844 		if (btrfs_test_opt(fs_info, DISCARD_SYNC))
2845 			ret = btrfs_discard_extent(fs_info, start,
2846 						   end + 1 - start, NULL);
2847 
2848 		clear_extent_dirty(unpin, start, end, &cached_state);
2849 		ret = unpin_extent_range(fs_info, start, end, true);
2850 		BUG_ON(ret);
2851 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
2852 		free_extent_state(cached_state);
2853 		cond_resched();
2854 	}
2855 
2856 	if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
2857 		btrfs_discard_calc_delay(&fs_info->discard_ctl);
2858 		btrfs_discard_schedule_work(&fs_info->discard_ctl, true);
2859 	}
2860 
2861 	/*
2862 	 * Transaction is finished.  We don't need the lock anymore.  We
2863 	 * do need to clean up the block groups in case of a transaction
2864 	 * abort.
2865 	 */
2866 	deleted_bgs = &trans->transaction->deleted_bgs;
2867 	list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
2868 		u64 trimmed = 0;
2869 
2870 		ret = -EROFS;
2871 		if (!TRANS_ABORTED(trans))
2872 			ret = btrfs_discard_extent(fs_info,
2873 						   block_group->start,
2874 						   block_group->length,
2875 						   &trimmed);
2876 
2877 		list_del_init(&block_group->bg_list);
2878 		btrfs_unfreeze_block_group(block_group);
2879 		btrfs_put_block_group(block_group);
2880 
2881 		if (ret) {
2882 			const char *errstr = btrfs_decode_error(ret);
2883 			btrfs_warn(fs_info,
2884 			   "discard failed while removing blockgroup: errno=%d %s",
2885 				   ret, errstr);
2886 		}
2887 	}
2888 
2889 	return 0;
2890 }
2891 
2892 /*
2893  * Parse an extent item's inline extents looking for a simple quotas owner ref.
2894  *
2895  * @fs_info:	the btrfs_fs_info for this mount
2896  * @leaf:	a leaf in the extent tree containing the extent item
2897  * @slot:	the slot in the leaf where the extent item is found
2898  *
2899  * Returns the objectid of the root that originally allocated the extent item
2900  * if the inline owner ref is expected and present, otherwise 0.
2901  *
2902  * If an extent item has an owner ref item, it will be the first inline ref
2903  * item. Therefore the logic is to check whether there are any inline ref
2904  * items, then check the type of the first one.
2905  */
btrfs_get_extent_owner_root(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,int slot)2906 u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info,
2907 				struct extent_buffer *leaf, int slot)
2908 {
2909 	struct btrfs_extent_item *ei;
2910 	struct btrfs_extent_inline_ref *iref;
2911 	struct btrfs_extent_owner_ref *oref;
2912 	unsigned long ptr;
2913 	unsigned long end;
2914 	int type;
2915 
2916 	if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA))
2917 		return 0;
2918 
2919 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
2920 	ptr = (unsigned long)(ei + 1);
2921 	end = (unsigned long)ei + btrfs_item_size(leaf, slot);
2922 
2923 	/* No inline ref items of any kind, can't check type. */
2924 	if (ptr == end)
2925 		return 0;
2926 
2927 	iref = (struct btrfs_extent_inline_ref *)ptr;
2928 	type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
2929 
2930 	/* We found an owner ref, get the root out of it. */
2931 	if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
2932 		oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
2933 		return btrfs_extent_owner_ref_root_id(leaf, oref);
2934 	}
2935 
2936 	/* We have inline refs, but not an owner ref. */
2937 	return 0;
2938 }
2939 
do_free_extent_accounting(struct btrfs_trans_handle * trans,u64 bytenr,struct btrfs_squota_delta * delta)2940 static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
2941 				     u64 bytenr, struct btrfs_squota_delta *delta)
2942 {
2943 	int ret;
2944 	u64 num_bytes = delta->num_bytes;
2945 
2946 	if (delta->is_data) {
2947 		struct btrfs_root *csum_root;
2948 
2949 		csum_root = btrfs_csum_root(trans->fs_info, bytenr);
2950 		ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes);
2951 		if (ret) {
2952 			btrfs_abort_transaction(trans, ret);
2953 			return ret;
2954 		}
2955 
2956 		ret = btrfs_delete_raid_extent(trans, bytenr, num_bytes);
2957 		if (ret) {
2958 			btrfs_abort_transaction(trans, ret);
2959 			return ret;
2960 		}
2961 	}
2962 
2963 	ret = btrfs_record_squota_delta(trans->fs_info, delta);
2964 	if (ret) {
2965 		btrfs_abort_transaction(trans, ret);
2966 		return ret;
2967 	}
2968 
2969 	ret = add_to_free_space_tree(trans, bytenr, num_bytes);
2970 	if (ret) {
2971 		btrfs_abort_transaction(trans, ret);
2972 		return ret;
2973 	}
2974 
2975 	ret = btrfs_update_block_group(trans, bytenr, num_bytes, false);
2976 	if (ret)
2977 		btrfs_abort_transaction(trans, ret);
2978 
2979 	return ret;
2980 }
2981 
2982 #define abort_and_dump(trans, path, fmt, args...)	\
2983 ({							\
2984 	btrfs_abort_transaction(trans, -EUCLEAN);	\
2985 	btrfs_print_leaf(path->nodes[0]);		\
2986 	btrfs_crit(trans->fs_info, fmt, ##args);	\
2987 })
2988 
2989 /*
2990  * Drop one or more refs of @node.
2991  *
2992  * 1. Locate the extent refs.
2993  *    It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item.
2994  *    Locate it, then reduce the refs number or remove the ref line completely.
2995  *
2996  * 2. Update the refs count in EXTENT/METADATA_ITEM
2997  *
2998  * Inline backref case:
2999  *
3000  * in extent tree we have:
3001  *
3002  * 	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82
3003  *		refs 2 gen 6 flags DATA
3004  *		extent data backref root FS_TREE objectid 258 offset 0 count 1
3005  *		extent data backref root FS_TREE objectid 257 offset 0 count 1
3006  *
3007  * This function gets called with:
3008  *
3009  *    node->bytenr = 13631488
3010  *    node->num_bytes = 1048576
3011  *    root_objectid = FS_TREE
3012  *    owner_objectid = 257
3013  *    owner_offset = 0
3014  *    refs_to_drop = 1
3015  *
3016  * Then we should get some like:
3017  *
3018  * 	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82
3019  *		refs 1 gen 6 flags DATA
3020  *		extent data backref root FS_TREE objectid 258 offset 0 count 1
3021  *
3022  * Keyed backref case:
3023  *
3024  * in extent tree we have:
3025  *
3026  *	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24
3027  *		refs 754 gen 6 flags DATA
3028  *	[...]
3029  *	item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28
3030  *		extent data backref root FS_TREE objectid 866 offset 0 count 1
3031  *
3032  * This function get called with:
3033  *
3034  *    node->bytenr = 13631488
3035  *    node->num_bytes = 1048576
3036  *    root_objectid = FS_TREE
3037  *    owner_objectid = 866
3038  *    owner_offset = 0
3039  *    refs_to_drop = 1
3040  *
3041  * Then we should get some like:
3042  *
3043  *	item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24
3044  *		refs 753 gen 6 flags DATA
3045  *
3046  * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed.
3047  */
__btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op)3048 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3049 			       struct btrfs_delayed_ref_head *href,
3050 			       struct btrfs_delayed_ref_node *node,
3051 			       struct btrfs_delayed_extent_op *extent_op)
3052 {
3053 	struct btrfs_fs_info *info = trans->fs_info;
3054 	struct btrfs_key key;
3055 	struct btrfs_path *path;
3056 	struct btrfs_root *extent_root;
3057 	struct extent_buffer *leaf;
3058 	struct btrfs_extent_item *ei;
3059 	struct btrfs_extent_inline_ref *iref;
3060 	int ret;
3061 	int is_data;
3062 	int extent_slot = 0;
3063 	int found_extent = 0;
3064 	int num_to_del = 1;
3065 	int refs_to_drop = node->ref_mod;
3066 	u32 item_size;
3067 	u64 refs;
3068 	u64 bytenr = node->bytenr;
3069 	u64 num_bytes = node->num_bytes;
3070 	u64 owner_objectid = btrfs_delayed_ref_owner(node);
3071 	u64 owner_offset = btrfs_delayed_ref_offset(node);
3072 	bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
3073 	u64 delayed_ref_root = href->owning_root;
3074 
3075 	extent_root = btrfs_extent_root(info, bytenr);
3076 	ASSERT(extent_root);
3077 
3078 	path = btrfs_alloc_path();
3079 	if (!path)
3080 		return -ENOMEM;
3081 
3082 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3083 
3084 	if (!is_data && refs_to_drop != 1) {
3085 		btrfs_crit(info,
3086 "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u",
3087 			   node->bytenr, refs_to_drop);
3088 		ret = -EINVAL;
3089 		btrfs_abort_transaction(trans, ret);
3090 		goto out;
3091 	}
3092 
3093 	if (is_data)
3094 		skinny_metadata = false;
3095 
3096 	ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
3097 				    node->parent, node->ref_root, owner_objectid,
3098 				    owner_offset);
3099 	if (ret == 0) {
3100 		/*
3101 		 * Either the inline backref or the SHARED_DATA_REF/
3102 		 * SHARED_BLOCK_REF is found
3103 		 *
3104 		 * Here is a quick path to locate EXTENT/METADATA_ITEM.
3105 		 * It's possible the EXTENT/METADATA_ITEM is near current slot.
3106 		 */
3107 		extent_slot = path->slots[0];
3108 		while (extent_slot >= 0) {
3109 			btrfs_item_key_to_cpu(path->nodes[0], &key,
3110 					      extent_slot);
3111 			if (key.objectid != bytenr)
3112 				break;
3113 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3114 			    key.offset == num_bytes) {
3115 				found_extent = 1;
3116 				break;
3117 			}
3118 			if (key.type == BTRFS_METADATA_ITEM_KEY &&
3119 			    key.offset == owner_objectid) {
3120 				found_extent = 1;
3121 				break;
3122 			}
3123 
3124 			/* Quick path didn't find the EXTENT/METADATA_ITEM */
3125 			if (path->slots[0] - extent_slot > 5)
3126 				break;
3127 			extent_slot--;
3128 		}
3129 
3130 		if (!found_extent) {
3131 			if (iref) {
3132 				abort_and_dump(trans, path,
3133 "invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref",
3134 					   path->slots[0]);
3135 				ret = -EUCLEAN;
3136 				goto out;
3137 			}
3138 			/* Must be SHARED_* item, remove the backref first */
3139 			ret = remove_extent_backref(trans, extent_root, path,
3140 						    NULL, refs_to_drop, is_data);
3141 			if (ret) {
3142 				btrfs_abort_transaction(trans, ret);
3143 				goto out;
3144 			}
3145 			btrfs_release_path(path);
3146 
3147 			/* Slow path to locate EXTENT/METADATA_ITEM */
3148 			key.objectid = bytenr;
3149 			key.type = BTRFS_EXTENT_ITEM_KEY;
3150 			key.offset = num_bytes;
3151 
3152 			if (!is_data && skinny_metadata) {
3153 				key.type = BTRFS_METADATA_ITEM_KEY;
3154 				key.offset = owner_objectid;
3155 			}
3156 
3157 			ret = btrfs_search_slot(trans, extent_root,
3158 						&key, path, -1, 1);
3159 			if (ret > 0 && skinny_metadata && path->slots[0]) {
3160 				/*
3161 				 * Couldn't find our skinny metadata item,
3162 				 * see if we have ye olde extent item.
3163 				 */
3164 				path->slots[0]--;
3165 				btrfs_item_key_to_cpu(path->nodes[0], &key,
3166 						      path->slots[0]);
3167 				if (key.objectid == bytenr &&
3168 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
3169 				    key.offset == num_bytes)
3170 					ret = 0;
3171 			}
3172 
3173 			if (ret > 0 && skinny_metadata) {
3174 				skinny_metadata = false;
3175 				key.objectid = bytenr;
3176 				key.type = BTRFS_EXTENT_ITEM_KEY;
3177 				key.offset = num_bytes;
3178 				btrfs_release_path(path);
3179 				ret = btrfs_search_slot(trans, extent_root,
3180 							&key, path, -1, 1);
3181 			}
3182 
3183 			if (ret) {
3184 				if (ret > 0)
3185 					btrfs_print_leaf(path->nodes[0]);
3186 				btrfs_err(info,
3187 			"umm, got %d back from search, was looking for %llu, slot %d",
3188 					  ret, bytenr, path->slots[0]);
3189 			}
3190 			if (ret < 0) {
3191 				btrfs_abort_transaction(trans, ret);
3192 				goto out;
3193 			}
3194 			extent_slot = path->slots[0];
3195 		}
3196 	} else if (WARN_ON(ret == -ENOENT)) {
3197 		abort_and_dump(trans, path,
3198 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d",
3199 			       bytenr, node->parent, node->ref_root, owner_objectid,
3200 			       owner_offset, path->slots[0]);
3201 		goto out;
3202 	} else {
3203 		btrfs_abort_transaction(trans, ret);
3204 		goto out;
3205 	}
3206 
3207 	leaf = path->nodes[0];
3208 	item_size = btrfs_item_size(leaf, extent_slot);
3209 	if (unlikely(item_size < sizeof(*ei))) {
3210 		ret = -EUCLEAN;
3211 		btrfs_err(trans->fs_info,
3212 			  "unexpected extent item size, has %u expect >= %zu",
3213 			  item_size, sizeof(*ei));
3214 		btrfs_abort_transaction(trans, ret);
3215 		goto out;
3216 	}
3217 	ei = btrfs_item_ptr(leaf, extent_slot,
3218 			    struct btrfs_extent_item);
3219 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
3220 	    key.type == BTRFS_EXTENT_ITEM_KEY) {
3221 		struct btrfs_tree_block_info *bi;
3222 
3223 		if (item_size < sizeof(*ei) + sizeof(*bi)) {
3224 			abort_and_dump(trans, path,
3225 "invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu",
3226 				       key.objectid, key.type, key.offset,
3227 				       path->slots[0], owner_objectid, item_size,
3228 				       sizeof(*ei) + sizeof(*bi));
3229 			ret = -EUCLEAN;
3230 			goto out;
3231 		}
3232 		bi = (struct btrfs_tree_block_info *)(ei + 1);
3233 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3234 	}
3235 
3236 	refs = btrfs_extent_refs(leaf, ei);
3237 	if (refs < refs_to_drop) {
3238 		abort_and_dump(trans, path,
3239 		"trying to drop %d refs but we only have %llu for bytenr %llu slot %u",
3240 			       refs_to_drop, refs, bytenr, path->slots[0]);
3241 		ret = -EUCLEAN;
3242 		goto out;
3243 	}
3244 	refs -= refs_to_drop;
3245 
3246 	if (refs > 0) {
3247 		if (extent_op)
3248 			__run_delayed_extent_op(extent_op, leaf, ei);
3249 		/*
3250 		 * In the case of inline back ref, reference count will
3251 		 * be updated by remove_extent_backref
3252 		 */
3253 		if (iref) {
3254 			if (!found_extent) {
3255 				abort_and_dump(trans, path,
3256 "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u",
3257 					       path->slots[0]);
3258 				ret = -EUCLEAN;
3259 				goto out;
3260 			}
3261 		} else {
3262 			btrfs_set_extent_refs(leaf, ei, refs);
3263 		}
3264 		if (found_extent) {
3265 			ret = remove_extent_backref(trans, extent_root, path,
3266 						    iref, refs_to_drop, is_data);
3267 			if (ret) {
3268 				btrfs_abort_transaction(trans, ret);
3269 				goto out;
3270 			}
3271 		}
3272 	} else {
3273 		struct btrfs_squota_delta delta = {
3274 			.root = delayed_ref_root,
3275 			.num_bytes = num_bytes,
3276 			.is_data = is_data,
3277 			.is_inc = false,
3278 			.generation = btrfs_extent_generation(leaf, ei),
3279 		};
3280 
3281 		/* In this branch refs == 1 */
3282 		if (found_extent) {
3283 			if (is_data && refs_to_drop !=
3284 			    extent_data_ref_count(path, iref)) {
3285 				abort_and_dump(trans, path,
3286 		"invalid refs_to_drop, current refs %u refs_to_drop %u slot %u",
3287 					       extent_data_ref_count(path, iref),
3288 					       refs_to_drop, path->slots[0]);
3289 				ret = -EUCLEAN;
3290 				goto out;
3291 			}
3292 			if (iref) {
3293 				if (path->slots[0] != extent_slot) {
3294 					abort_and_dump(trans, path,
3295 "invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref",
3296 						       key.objectid, key.type,
3297 						       key.offset, path->slots[0]);
3298 					ret = -EUCLEAN;
3299 					goto out;
3300 				}
3301 			} else {
3302 				/*
3303 				 * No inline ref, we must be at SHARED_* item,
3304 				 * And it's single ref, it must be:
3305 				 * |	extent_slot	  ||extent_slot + 1|
3306 				 * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ]
3307 				 */
3308 				if (path->slots[0] != extent_slot + 1) {
3309 					abort_and_dump(trans, path,
3310 	"invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM",
3311 						       path->slots[0]);
3312 					ret = -EUCLEAN;
3313 					goto out;
3314 				}
3315 				path->slots[0] = extent_slot;
3316 				num_to_del = 2;
3317 			}
3318 		}
3319 		/*
3320 		 * We can't infer the data owner from the delayed ref, so we need
3321 		 * to try to get it from the owning ref item.
3322 		 *
3323 		 * If it is not present, then that extent was not written under
3324 		 * simple quotas mode, so we don't need to account for its deletion.
3325 		 */
3326 		if (is_data)
3327 			delta.root = btrfs_get_extent_owner_root(trans->fs_info,
3328 								 leaf, extent_slot);
3329 
3330 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3331 				      num_to_del);
3332 		if (ret) {
3333 			btrfs_abort_transaction(trans, ret);
3334 			goto out;
3335 		}
3336 		btrfs_release_path(path);
3337 
3338 		ret = do_free_extent_accounting(trans, bytenr, &delta);
3339 	}
3340 	btrfs_release_path(path);
3341 
3342 out:
3343 	btrfs_free_path(path);
3344 	return ret;
3345 }
3346 
3347 /*
3348  * when we free an block, it is possible (and likely) that we free the last
3349  * delayed ref for that extent as well.  This searches the delayed ref tree for
3350  * a given extent, and if there are no other delayed refs to be processed, it
3351  * removes it from the tree.
3352  */
check_ref_cleanup(struct btrfs_trans_handle * trans,u64 bytenr)3353 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3354 				      u64 bytenr)
3355 {
3356 	struct btrfs_fs_info *fs_info = trans->fs_info;
3357 	struct btrfs_delayed_ref_head *head;
3358 	struct btrfs_delayed_ref_root *delayed_refs;
3359 	int ret = 0;
3360 
3361 	delayed_refs = &trans->transaction->delayed_refs;
3362 	spin_lock(&delayed_refs->lock);
3363 	head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
3364 	if (!head)
3365 		goto out_delayed_unlock;
3366 
3367 	spin_lock(&head->lock);
3368 	if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
3369 		goto out;
3370 
3371 	if (cleanup_extent_op(head) != NULL)
3372 		goto out;
3373 
3374 	/*
3375 	 * waiting for the lock here would deadlock.  If someone else has it
3376 	 * locked they are already in the process of dropping it anyway
3377 	 */
3378 	if (!mutex_trylock(&head->mutex))
3379 		goto out;
3380 
3381 	btrfs_delete_ref_head(fs_info, delayed_refs, head);
3382 	head->processing = false;
3383 
3384 	spin_unlock(&head->lock);
3385 	spin_unlock(&delayed_refs->lock);
3386 
3387 	BUG_ON(head->extent_op);
3388 	if (head->must_insert_reserved)
3389 		ret = 1;
3390 
3391 	btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
3392 	mutex_unlock(&head->mutex);
3393 	btrfs_put_delayed_ref_head(head);
3394 	return ret;
3395 out:
3396 	spin_unlock(&head->lock);
3397 
3398 out_delayed_unlock:
3399 	spin_unlock(&delayed_refs->lock);
3400 	return 0;
3401 }
3402 
btrfs_free_tree_block(struct btrfs_trans_handle * trans,u64 root_id,struct extent_buffer * buf,u64 parent,int last_ref)3403 int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
3404 			  u64 root_id,
3405 			  struct extent_buffer *buf,
3406 			  u64 parent, int last_ref)
3407 {
3408 	struct btrfs_fs_info *fs_info = trans->fs_info;
3409 	struct btrfs_block_group *bg;
3410 	int ret;
3411 
3412 	if (root_id != BTRFS_TREE_LOG_OBJECTID) {
3413 		struct btrfs_ref generic_ref = {
3414 			.action = BTRFS_DROP_DELAYED_REF,
3415 			.bytenr = buf->start,
3416 			.num_bytes = buf->len,
3417 			.parent = parent,
3418 			.owning_root = btrfs_header_owner(buf),
3419 			.ref_root = root_id,
3420 		};
3421 
3422 		/*
3423 		 * Assert that the extent buffer is not cleared due to
3424 		 * EXTENT_BUFFER_ZONED_ZEROOUT. Please refer
3425 		 * btrfs_clear_buffer_dirty() and btree_csum_one_bio() for
3426 		 * detail.
3427 		 */
3428 		ASSERT(btrfs_header_bytenr(buf) != 0);
3429 
3430 		btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 0, false);
3431 		btrfs_ref_tree_mod(fs_info, &generic_ref);
3432 		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
3433 		if (ret < 0)
3434 			return ret;
3435 	}
3436 
3437 	if (!last_ref)
3438 		return 0;
3439 
3440 	if (btrfs_header_generation(buf) != trans->transid)
3441 		goto out;
3442 
3443 	if (root_id != BTRFS_TREE_LOG_OBJECTID) {
3444 		ret = check_ref_cleanup(trans, buf->start);
3445 		if (!ret)
3446 			goto out;
3447 	}
3448 
3449 	bg = btrfs_lookup_block_group(fs_info, buf->start);
3450 
3451 	if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3452 		pin_down_extent(trans, bg, buf->start, buf->len, 1);
3453 		btrfs_put_block_group(bg);
3454 		goto out;
3455 	}
3456 
3457 	/*
3458 	 * If there are tree mod log users we may have recorded mod log
3459 	 * operations for this node.  If we re-allocate this node we
3460 	 * could replay operations on this node that happened when it
3461 	 * existed in a completely different root.  For example if it
3462 	 * was part of root A, then was reallocated to root B, and we
3463 	 * are doing a btrfs_old_search_slot(root b), we could replay
3464 	 * operations that happened when the block was part of root A,
3465 	 * giving us an inconsistent view of the btree.
3466 	 *
3467 	 * We are safe from races here because at this point no other
3468 	 * node or root points to this extent buffer, so if after this
3469 	 * check a new tree mod log user joins we will not have an
3470 	 * existing log of operations on this node that we have to
3471 	 * contend with.
3472 	 */
3473 
3474 	if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)
3475 		     || btrfs_is_zoned(fs_info)) {
3476 		pin_down_extent(trans, bg, buf->start, buf->len, 1);
3477 		btrfs_put_block_group(bg);
3478 		goto out;
3479 	}
3480 
3481 	WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
3482 
3483 	btrfs_add_free_space(bg, buf->start, buf->len);
3484 	btrfs_free_reserved_bytes(bg, buf->len, 0);
3485 	btrfs_put_block_group(bg);
3486 	trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
3487 
3488 out:
3489 
3490 	/*
3491 	 * Deleting the buffer, clear the corrupt flag since it doesn't
3492 	 * matter anymore.
3493 	 */
3494 	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
3495 	return 0;
3496 }
3497 
3498 /* Can return -ENOMEM */
btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_ref * ref)3499 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
3500 {
3501 	struct btrfs_fs_info *fs_info = trans->fs_info;
3502 	int ret;
3503 
3504 	if (btrfs_is_testing(fs_info))
3505 		return 0;
3506 
3507 	/*
3508 	 * tree log blocks never actually go into the extent allocation
3509 	 * tree, just update pinning info and exit early.
3510 	 */
3511 	if (ref->ref_root == BTRFS_TREE_LOG_OBJECTID) {
3512 		btrfs_pin_extent(trans, ref->bytenr, ref->num_bytes, 1);
3513 		ret = 0;
3514 	} else if (ref->type == BTRFS_REF_METADATA) {
3515 		ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
3516 	} else {
3517 		ret = btrfs_add_delayed_data_ref(trans, ref, 0);
3518 	}
3519 
3520 	if (ref->ref_root != BTRFS_TREE_LOG_OBJECTID)
3521 		btrfs_ref_tree_mod(fs_info, ref);
3522 
3523 	return ret;
3524 }
3525 
3526 enum btrfs_loop_type {
3527 	/*
3528 	 * Start caching block groups but do not wait for progress or for them
3529 	 * to be done.
3530 	 */
3531 	LOOP_CACHING_NOWAIT,
3532 
3533 	/*
3534 	 * Wait for the block group free_space >= the space we're waiting for if
3535 	 * the block group isn't cached.
3536 	 */
3537 	LOOP_CACHING_WAIT,
3538 
3539 	/*
3540 	 * Allow allocations to happen from block groups that do not yet have a
3541 	 * size classification.
3542 	 */
3543 	LOOP_UNSET_SIZE_CLASS,
3544 
3545 	/*
3546 	 * Allocate a chunk and then retry the allocation.
3547 	 */
3548 	LOOP_ALLOC_CHUNK,
3549 
3550 	/*
3551 	 * Ignore the size class restrictions for this allocation.
3552 	 */
3553 	LOOP_WRONG_SIZE_CLASS,
3554 
3555 	/*
3556 	 * Ignore the empty size, only try to allocate the number of bytes
3557 	 * needed for this allocation.
3558 	 */
3559 	LOOP_NO_EMPTY_SIZE,
3560 };
3561 
3562 static inline void
btrfs_lock_block_group(struct btrfs_block_group * cache,int delalloc)3563 btrfs_lock_block_group(struct btrfs_block_group *cache,
3564 		       int delalloc)
3565 {
3566 	if (delalloc)
3567 		down_read(&cache->data_rwsem);
3568 }
3569 
btrfs_grab_block_group(struct btrfs_block_group * cache,int delalloc)3570 static inline void btrfs_grab_block_group(struct btrfs_block_group *cache,
3571 		       int delalloc)
3572 {
3573 	btrfs_get_block_group(cache);
3574 	if (delalloc)
3575 		down_read(&cache->data_rwsem);
3576 }
3577 
btrfs_lock_cluster(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,int delalloc)3578 static struct btrfs_block_group *btrfs_lock_cluster(
3579 		   struct btrfs_block_group *block_group,
3580 		   struct btrfs_free_cluster *cluster,
3581 		   int delalloc)
3582 	__acquires(&cluster->refill_lock)
3583 {
3584 	struct btrfs_block_group *used_bg = NULL;
3585 
3586 	spin_lock(&cluster->refill_lock);
3587 	while (1) {
3588 		used_bg = cluster->block_group;
3589 		if (!used_bg)
3590 			return NULL;
3591 
3592 		if (used_bg == block_group)
3593 			return used_bg;
3594 
3595 		btrfs_get_block_group(used_bg);
3596 
3597 		if (!delalloc)
3598 			return used_bg;
3599 
3600 		if (down_read_trylock(&used_bg->data_rwsem))
3601 			return used_bg;
3602 
3603 		spin_unlock(&cluster->refill_lock);
3604 
3605 		/* We should only have one-level nested. */
3606 		down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
3607 
3608 		spin_lock(&cluster->refill_lock);
3609 		if (used_bg == cluster->block_group)
3610 			return used_bg;
3611 
3612 		up_read(&used_bg->data_rwsem);
3613 		btrfs_put_block_group(used_bg);
3614 	}
3615 }
3616 
3617 static inline void
btrfs_release_block_group(struct btrfs_block_group * cache,int delalloc)3618 btrfs_release_block_group(struct btrfs_block_group *cache,
3619 			 int delalloc)
3620 {
3621 	if (delalloc)
3622 		up_read(&cache->data_rwsem);
3623 	btrfs_put_block_group(cache);
3624 }
3625 
3626 /*
3627  * Helper function for find_free_extent().
3628  *
3629  * Return -ENOENT to inform caller that we need fallback to unclustered mode.
3630  * Return >0 to inform caller that we find nothing
3631  * Return 0 means we have found a location and set ffe_ctl->found_offset.
3632  */
find_free_extent_clustered(struct btrfs_block_group * bg,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** cluster_bg_ret)3633 static int find_free_extent_clustered(struct btrfs_block_group *bg,
3634 				      struct find_free_extent_ctl *ffe_ctl,
3635 				      struct btrfs_block_group **cluster_bg_ret)
3636 {
3637 	struct btrfs_block_group *cluster_bg;
3638 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3639 	u64 aligned_cluster;
3640 	u64 offset;
3641 	int ret;
3642 
3643 	cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc);
3644 	if (!cluster_bg)
3645 		goto refill_cluster;
3646 	if (cluster_bg != bg && (cluster_bg->ro ||
3647 	    !block_group_bits(cluster_bg, ffe_ctl->flags)))
3648 		goto release_cluster;
3649 
3650 	offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
3651 			ffe_ctl->num_bytes, cluster_bg->start,
3652 			&ffe_ctl->max_extent_size);
3653 	if (offset) {
3654 		/* We have a block, we're done */
3655 		spin_unlock(&last_ptr->refill_lock);
3656 		trace_btrfs_reserve_extent_cluster(cluster_bg, ffe_ctl);
3657 		*cluster_bg_ret = cluster_bg;
3658 		ffe_ctl->found_offset = offset;
3659 		return 0;
3660 	}
3661 	WARN_ON(last_ptr->block_group != cluster_bg);
3662 
3663 release_cluster:
3664 	/*
3665 	 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so
3666 	 * lets just skip it and let the allocator find whatever block it can
3667 	 * find. If we reach this point, we will have tried the cluster
3668 	 * allocator plenty of times and not have found anything, so we are
3669 	 * likely way too fragmented for the clustering stuff to find anything.
3670 	 *
3671 	 * However, if the cluster is taken from the current block group,
3672 	 * release the cluster first, so that we stand a better chance of
3673 	 * succeeding in the unclustered allocation.
3674 	 */
3675 	if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) {
3676 		spin_unlock(&last_ptr->refill_lock);
3677 		btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3678 		return -ENOENT;
3679 	}
3680 
3681 	/* This cluster didn't work out, free it and start over */
3682 	btrfs_return_cluster_to_free_space(NULL, last_ptr);
3683 
3684 	if (cluster_bg != bg)
3685 		btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
3686 
3687 refill_cluster:
3688 	if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) {
3689 		spin_unlock(&last_ptr->refill_lock);
3690 		return -ENOENT;
3691 	}
3692 
3693 	aligned_cluster = max_t(u64,
3694 			ffe_ctl->empty_cluster + ffe_ctl->empty_size,
3695 			bg->full_stripe_len);
3696 	ret = btrfs_find_space_cluster(bg, last_ptr, ffe_ctl->search_start,
3697 			ffe_ctl->num_bytes, aligned_cluster);
3698 	if (ret == 0) {
3699 		/* Now pull our allocation out of this cluster */
3700 		offset = btrfs_alloc_from_cluster(bg, last_ptr,
3701 				ffe_ctl->num_bytes, ffe_ctl->search_start,
3702 				&ffe_ctl->max_extent_size);
3703 		if (offset) {
3704 			/* We found one, proceed */
3705 			spin_unlock(&last_ptr->refill_lock);
3706 			ffe_ctl->found_offset = offset;
3707 			trace_btrfs_reserve_extent_cluster(bg, ffe_ctl);
3708 			return 0;
3709 		}
3710 	}
3711 	/*
3712 	 * At this point we either didn't find a cluster or we weren't able to
3713 	 * allocate a block from our cluster.  Free the cluster we've been
3714 	 * trying to use, and go to the next block group.
3715 	 */
3716 	btrfs_return_cluster_to_free_space(NULL, last_ptr);
3717 	spin_unlock(&last_ptr->refill_lock);
3718 	return 1;
3719 }
3720 
3721 /*
3722  * Return >0 to inform caller that we find nothing
3723  * Return 0 when we found an free extent and set ffe_ctrl->found_offset
3724  */
find_free_extent_unclustered(struct btrfs_block_group * bg,struct find_free_extent_ctl * ffe_ctl)3725 static int find_free_extent_unclustered(struct btrfs_block_group *bg,
3726 					struct find_free_extent_ctl *ffe_ctl)
3727 {
3728 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
3729 	u64 offset;
3730 
3731 	/*
3732 	 * We are doing an unclustered allocation, set the fragmented flag so
3733 	 * we don't bother trying to setup a cluster again until we get more
3734 	 * space.
3735 	 */
3736 	if (unlikely(last_ptr)) {
3737 		spin_lock(&last_ptr->lock);
3738 		last_ptr->fragmented = 1;
3739 		spin_unlock(&last_ptr->lock);
3740 	}
3741 	if (ffe_ctl->cached) {
3742 		struct btrfs_free_space_ctl *free_space_ctl;
3743 
3744 		free_space_ctl = bg->free_space_ctl;
3745 		spin_lock(&free_space_ctl->tree_lock);
3746 		if (free_space_ctl->free_space <
3747 		    ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
3748 		    ffe_ctl->empty_size) {
3749 			ffe_ctl->total_free_space = max_t(u64,
3750 					ffe_ctl->total_free_space,
3751 					free_space_ctl->free_space);
3752 			spin_unlock(&free_space_ctl->tree_lock);
3753 			return 1;
3754 		}
3755 		spin_unlock(&free_space_ctl->tree_lock);
3756 	}
3757 
3758 	offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
3759 			ffe_ctl->num_bytes, ffe_ctl->empty_size,
3760 			&ffe_ctl->max_extent_size);
3761 	if (!offset)
3762 		return 1;
3763 	ffe_ctl->found_offset = offset;
3764 	return 0;
3765 }
3766 
do_allocation_clustered(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3767 static int do_allocation_clustered(struct btrfs_block_group *block_group,
3768 				   struct find_free_extent_ctl *ffe_ctl,
3769 				   struct btrfs_block_group **bg_ret)
3770 {
3771 	int ret;
3772 
3773 	/* We want to try and use the cluster allocator, so lets look there */
3774 	if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) {
3775 		ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret);
3776 		if (ret >= 0)
3777 			return ret;
3778 		/* ret == -ENOENT case falls through */
3779 	}
3780 
3781 	return find_free_extent_unclustered(block_group, ffe_ctl);
3782 }
3783 
3784 /*
3785  * Tree-log block group locking
3786  * ============================
3787  *
3788  * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which
3789  * indicates the starting address of a block group, which is reserved only
3790  * for tree-log metadata.
3791  *
3792  * Lock nesting
3793  * ============
3794  *
3795  * space_info::lock
3796  *   block_group::lock
3797  *     fs_info::treelog_bg_lock
3798  */
3799 
3800 /*
3801  * Simple allocator for sequential-only block group. It only allows sequential
3802  * allocation. No need to play with trees. This function also reserves the
3803  * bytes as in btrfs_add_reserved_bytes.
3804  */
do_allocation_zoned(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3805 static int do_allocation_zoned(struct btrfs_block_group *block_group,
3806 			       struct find_free_extent_ctl *ffe_ctl,
3807 			       struct btrfs_block_group **bg_ret)
3808 {
3809 	struct btrfs_fs_info *fs_info = block_group->fs_info;
3810 	struct btrfs_space_info *space_info = block_group->space_info;
3811 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3812 	u64 start = block_group->start;
3813 	u64 num_bytes = ffe_ctl->num_bytes;
3814 	u64 avail;
3815 	u64 bytenr = block_group->start;
3816 	u64 log_bytenr;
3817 	u64 data_reloc_bytenr;
3818 	int ret = 0;
3819 	bool skip = false;
3820 
3821 	ASSERT(btrfs_is_zoned(block_group->fs_info));
3822 
3823 	/*
3824 	 * Do not allow non-tree-log blocks in the dedicated tree-log block
3825 	 * group, and vice versa.
3826 	 */
3827 	spin_lock(&fs_info->treelog_bg_lock);
3828 	log_bytenr = fs_info->treelog_bg;
3829 	if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) ||
3830 			   (!ffe_ctl->for_treelog && bytenr == log_bytenr)))
3831 		skip = true;
3832 	spin_unlock(&fs_info->treelog_bg_lock);
3833 	if (skip)
3834 		return 1;
3835 
3836 	/*
3837 	 * Do not allow non-relocation blocks in the dedicated relocation block
3838 	 * group, and vice versa.
3839 	 */
3840 	spin_lock(&fs_info->relocation_bg_lock);
3841 	data_reloc_bytenr = fs_info->data_reloc_bg;
3842 	if (data_reloc_bytenr &&
3843 	    ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) ||
3844 	     (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr)))
3845 		skip = true;
3846 	spin_unlock(&fs_info->relocation_bg_lock);
3847 	if (skip)
3848 		return 1;
3849 
3850 	/* Check RO and no space case before trying to activate it */
3851 	spin_lock(&block_group->lock);
3852 	if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) {
3853 		ret = 1;
3854 		/*
3855 		 * May need to clear fs_info->{treelog,data_reloc}_bg.
3856 		 * Return the error after taking the locks.
3857 		 */
3858 	}
3859 	spin_unlock(&block_group->lock);
3860 
3861 	/* Metadata block group is activated at write time. */
3862 	if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
3863 	    !btrfs_zone_activate(block_group)) {
3864 		ret = 1;
3865 		/*
3866 		 * May need to clear fs_info->{treelog,data_reloc}_bg.
3867 		 * Return the error after taking the locks.
3868 		 */
3869 	}
3870 
3871 	spin_lock(&space_info->lock);
3872 	spin_lock(&block_group->lock);
3873 	spin_lock(&fs_info->treelog_bg_lock);
3874 	spin_lock(&fs_info->relocation_bg_lock);
3875 
3876 	if (ret)
3877 		goto out;
3878 
3879 	ASSERT(!ffe_ctl->for_treelog ||
3880 	       block_group->start == fs_info->treelog_bg ||
3881 	       fs_info->treelog_bg == 0);
3882 	ASSERT(!ffe_ctl->for_data_reloc ||
3883 	       block_group->start == fs_info->data_reloc_bg ||
3884 	       fs_info->data_reloc_bg == 0);
3885 
3886 	if (block_group->ro ||
3887 	    (!ffe_ctl->for_data_reloc &&
3888 	     test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
3889 		ret = 1;
3890 		goto out;
3891 	}
3892 
3893 	/*
3894 	 * Do not allow currently using block group to be tree-log dedicated
3895 	 * block group.
3896 	 */
3897 	if (ffe_ctl->for_treelog && !fs_info->treelog_bg &&
3898 	    (block_group->used || block_group->reserved)) {
3899 		ret = 1;
3900 		goto out;
3901 	}
3902 
3903 	/*
3904 	 * Do not allow currently used block group to be the data relocation
3905 	 * dedicated block group.
3906 	 */
3907 	if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg &&
3908 	    (block_group->used || block_group->reserved)) {
3909 		ret = 1;
3910 		goto out;
3911 	}
3912 
3913 	WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
3914 	avail = block_group->zone_capacity - block_group->alloc_offset;
3915 	if (avail < num_bytes) {
3916 		if (ffe_ctl->max_extent_size < avail) {
3917 			/*
3918 			 * With sequential allocator, free space is always
3919 			 * contiguous
3920 			 */
3921 			ffe_ctl->max_extent_size = avail;
3922 			ffe_ctl->total_free_space = avail;
3923 		}
3924 		ret = 1;
3925 		goto out;
3926 	}
3927 
3928 	if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
3929 		fs_info->treelog_bg = block_group->start;
3930 
3931 	if (ffe_ctl->for_data_reloc) {
3932 		if (!fs_info->data_reloc_bg)
3933 			fs_info->data_reloc_bg = block_group->start;
3934 		/*
3935 		 * Do not allow allocations from this block group, unless it is
3936 		 * for data relocation. Compared to increasing the ->ro, setting
3937 		 * the ->zoned_data_reloc_ongoing flag still allows nocow
3938 		 * writers to come in. See btrfs_inc_nocow_writers().
3939 		 *
3940 		 * We need to disable an allocation to avoid an allocation of
3941 		 * regular (non-relocation data) extent. With mix of relocation
3942 		 * extents and regular extents, we can dispatch WRITE commands
3943 		 * (for relocation extents) and ZONE APPEND commands (for
3944 		 * regular extents) at the same time to the same zone, which
3945 		 * easily break the write pointer.
3946 		 *
3947 		 * Also, this flag avoids this block group to be zone finished.
3948 		 */
3949 		set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
3950 	}
3951 
3952 	ffe_ctl->found_offset = start + block_group->alloc_offset;
3953 	block_group->alloc_offset += num_bytes;
3954 	spin_lock(&ctl->tree_lock);
3955 	ctl->free_space -= num_bytes;
3956 	spin_unlock(&ctl->tree_lock);
3957 
3958 	/*
3959 	 * We do not check if found_offset is aligned to stripesize. The
3960 	 * address is anyway rewritten when using zone append writing.
3961 	 */
3962 
3963 	ffe_ctl->search_start = ffe_ctl->found_offset;
3964 
3965 out:
3966 	if (ret && ffe_ctl->for_treelog)
3967 		fs_info->treelog_bg = 0;
3968 	if (ret && ffe_ctl->for_data_reloc)
3969 		fs_info->data_reloc_bg = 0;
3970 	spin_unlock(&fs_info->relocation_bg_lock);
3971 	spin_unlock(&fs_info->treelog_bg_lock);
3972 	spin_unlock(&block_group->lock);
3973 	spin_unlock(&space_info->lock);
3974 	return ret;
3975 }
3976 
do_allocation(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group ** bg_ret)3977 static int do_allocation(struct btrfs_block_group *block_group,
3978 			 struct find_free_extent_ctl *ffe_ctl,
3979 			 struct btrfs_block_group **bg_ret)
3980 {
3981 	switch (ffe_ctl->policy) {
3982 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3983 		return do_allocation_clustered(block_group, ffe_ctl, bg_ret);
3984 	case BTRFS_EXTENT_ALLOC_ZONED:
3985 		return do_allocation_zoned(block_group, ffe_ctl, bg_ret);
3986 	default:
3987 		BUG();
3988 	}
3989 }
3990 
release_block_group(struct btrfs_block_group * block_group,struct find_free_extent_ctl * ffe_ctl,int delalloc)3991 static void release_block_group(struct btrfs_block_group *block_group,
3992 				struct find_free_extent_ctl *ffe_ctl,
3993 				int delalloc)
3994 {
3995 	switch (ffe_ctl->policy) {
3996 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
3997 		ffe_ctl->retry_uncached = false;
3998 		break;
3999 	case BTRFS_EXTENT_ALLOC_ZONED:
4000 		/* Nothing to do */
4001 		break;
4002 	default:
4003 		BUG();
4004 	}
4005 
4006 	BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
4007 	       ffe_ctl->index);
4008 	btrfs_release_block_group(block_group, delalloc);
4009 }
4010 
found_extent_clustered(struct find_free_extent_ctl * ffe_ctl,struct btrfs_key * ins)4011 static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl,
4012 				   struct btrfs_key *ins)
4013 {
4014 	struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
4015 
4016 	if (!ffe_ctl->use_cluster && last_ptr) {
4017 		spin_lock(&last_ptr->lock);
4018 		last_ptr->window_start = ins->objectid;
4019 		spin_unlock(&last_ptr->lock);
4020 	}
4021 }
4022 
found_extent(struct find_free_extent_ctl * ffe_ctl,struct btrfs_key * ins)4023 static void found_extent(struct find_free_extent_ctl *ffe_ctl,
4024 			 struct btrfs_key *ins)
4025 {
4026 	switch (ffe_ctl->policy) {
4027 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
4028 		found_extent_clustered(ffe_ctl, ins);
4029 		break;
4030 	case BTRFS_EXTENT_ALLOC_ZONED:
4031 		/* Nothing to do */
4032 		break;
4033 	default:
4034 		BUG();
4035 	}
4036 }
4037 
can_allocate_chunk_zoned(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl)4038 static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
4039 				    struct find_free_extent_ctl *ffe_ctl)
4040 {
4041 	/* Block group's activeness is not a requirement for METADATA block groups. */
4042 	if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA))
4043 		return 0;
4044 
4045 	/* If we can activate new zone, just allocate a chunk and use it */
4046 	if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags))
4047 		return 0;
4048 
4049 	/*
4050 	 * We already reached the max active zones. Try to finish one block
4051 	 * group to make a room for a new block group. This is only possible
4052 	 * for a data block group because btrfs_zone_finish() may need to wait
4053 	 * for a running transaction which can cause a deadlock for metadata
4054 	 * allocation.
4055 	 */
4056 	if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
4057 		int ret = btrfs_zone_finish_one_bg(fs_info);
4058 
4059 		if (ret == 1)
4060 			return 0;
4061 		else if (ret < 0)
4062 			return ret;
4063 	}
4064 
4065 	/*
4066 	 * If we have enough free space left in an already active block group
4067 	 * and we can't activate any other zone now, do not allow allocating a
4068 	 * new chunk and let find_free_extent() retry with a smaller size.
4069 	 */
4070 	if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size)
4071 		return -ENOSPC;
4072 
4073 	/*
4074 	 * Even min_alloc_size is not left in any block groups. Since we cannot
4075 	 * activate a new block group, allocating it may not help. Let's tell a
4076 	 * caller to try again and hope it progress something by writing some
4077 	 * parts of the region. That is only possible for data block groups,
4078 	 * where a part of the region can be written.
4079 	 */
4080 	if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)
4081 		return -EAGAIN;
4082 
4083 	/*
4084 	 * We cannot activate a new block group and no enough space left in any
4085 	 * block groups. So, allocating a new block group may not help. But,
4086 	 * there is nothing to do anyway, so let's go with it.
4087 	 */
4088 	return 0;
4089 }
4090 
can_allocate_chunk(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl)4091 static int can_allocate_chunk(struct btrfs_fs_info *fs_info,
4092 			      struct find_free_extent_ctl *ffe_ctl)
4093 {
4094 	switch (ffe_ctl->policy) {
4095 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
4096 		return 0;
4097 	case BTRFS_EXTENT_ALLOC_ZONED:
4098 		return can_allocate_chunk_zoned(fs_info, ffe_ctl);
4099 	default:
4100 		BUG();
4101 	}
4102 }
4103 
4104 /*
4105  * Return >0 means caller needs to re-search for free extent
4106  * Return 0 means we have the needed free extent.
4107  * Return <0 means we failed to locate any free extent.
4108  */
find_free_extent_update_loop(struct btrfs_fs_info * fs_info,struct btrfs_key * ins,struct find_free_extent_ctl * ffe_ctl,bool full_search)4109 static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
4110 					struct btrfs_key *ins,
4111 					struct find_free_extent_ctl *ffe_ctl,
4112 					bool full_search)
4113 {
4114 	struct btrfs_root *root = fs_info->chunk_root;
4115 	int ret;
4116 
4117 	if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
4118 	    ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg)
4119 		ffe_ctl->orig_have_caching_bg = true;
4120 
4121 	if (ins->objectid) {
4122 		found_extent(ffe_ctl, ins);
4123 		return 0;
4124 	}
4125 
4126 	if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg)
4127 		return 1;
4128 
4129 	ffe_ctl->index++;
4130 	if (ffe_ctl->index < BTRFS_NR_RAID_TYPES)
4131 		return 1;
4132 
4133 	/* See the comments for btrfs_loop_type for an explanation of the phases. */
4134 	if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
4135 		ffe_ctl->index = 0;
4136 		/*
4137 		 * We want to skip the LOOP_CACHING_WAIT step if we don't have
4138 		 * any uncached bgs and we've already done a full search
4139 		 * through.
4140 		 */
4141 		if (ffe_ctl->loop == LOOP_CACHING_NOWAIT &&
4142 		    (!ffe_ctl->orig_have_caching_bg && full_search))
4143 			ffe_ctl->loop++;
4144 		ffe_ctl->loop++;
4145 
4146 		if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) {
4147 			struct btrfs_trans_handle *trans;
4148 			int exist = 0;
4149 
4150 			/* Check if allocation policy allows to create a new chunk */
4151 			ret = can_allocate_chunk(fs_info, ffe_ctl);
4152 			if (ret)
4153 				return ret;
4154 
4155 			trans = current->journal_info;
4156 			if (trans)
4157 				exist = 1;
4158 			else
4159 				trans = btrfs_join_transaction(root);
4160 
4161 			if (IS_ERR(trans)) {
4162 				ret = PTR_ERR(trans);
4163 				return ret;
4164 			}
4165 
4166 			ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
4167 						CHUNK_ALLOC_FORCE_FOR_EXTENT);
4168 
4169 			/* Do not bail out on ENOSPC since we can do more. */
4170 			if (ret == -ENOSPC) {
4171 				ret = 0;
4172 				ffe_ctl->loop++;
4173 			}
4174 			else if (ret < 0)
4175 				btrfs_abort_transaction(trans, ret);
4176 			else
4177 				ret = 0;
4178 			if (!exist)
4179 				btrfs_end_transaction(trans);
4180 			if (ret)
4181 				return ret;
4182 		}
4183 
4184 		if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) {
4185 			if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED)
4186 				return -ENOSPC;
4187 
4188 			/*
4189 			 * Don't loop again if we already have no empty_size and
4190 			 * no empty_cluster.
4191 			 */
4192 			if (ffe_ctl->empty_size == 0 &&
4193 			    ffe_ctl->empty_cluster == 0)
4194 				return -ENOSPC;
4195 			ffe_ctl->empty_size = 0;
4196 			ffe_ctl->empty_cluster = 0;
4197 		}
4198 		return 1;
4199 	}
4200 	return -ENOSPC;
4201 }
4202 
find_free_extent_check_size_class(struct find_free_extent_ctl * ffe_ctl,struct btrfs_block_group * bg)4203 static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl,
4204 					      struct btrfs_block_group *bg)
4205 {
4206 	if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED)
4207 		return true;
4208 	if (!btrfs_block_group_should_use_size_class(bg))
4209 		return true;
4210 	if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS)
4211 		return true;
4212 	if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS &&
4213 	    bg->size_class == BTRFS_BG_SZ_NONE)
4214 		return true;
4215 	return ffe_ctl->size_class == bg->size_class;
4216 }
4217 
prepare_allocation_clustered(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl,struct btrfs_space_info * space_info,struct btrfs_key * ins)4218 static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
4219 					struct find_free_extent_ctl *ffe_ctl,
4220 					struct btrfs_space_info *space_info,
4221 					struct btrfs_key *ins)
4222 {
4223 	/*
4224 	 * If our free space is heavily fragmented we may not be able to make
4225 	 * big contiguous allocations, so instead of doing the expensive search
4226 	 * for free space, simply return ENOSPC with our max_extent_size so we
4227 	 * can go ahead and search for a more manageable chunk.
4228 	 *
4229 	 * If our max_extent_size is large enough for our allocation simply
4230 	 * disable clustering since we will likely not be able to find enough
4231 	 * space to create a cluster and induce latency trying.
4232 	 */
4233 	if (space_info->max_extent_size) {
4234 		spin_lock(&space_info->lock);
4235 		if (space_info->max_extent_size &&
4236 		    ffe_ctl->num_bytes > space_info->max_extent_size) {
4237 			ins->offset = space_info->max_extent_size;
4238 			spin_unlock(&space_info->lock);
4239 			return -ENOSPC;
4240 		} else if (space_info->max_extent_size) {
4241 			ffe_ctl->use_cluster = false;
4242 		}
4243 		spin_unlock(&space_info->lock);
4244 	}
4245 
4246 	ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info,
4247 					       &ffe_ctl->empty_cluster);
4248 	if (ffe_ctl->last_ptr) {
4249 		struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;
4250 
4251 		spin_lock(&last_ptr->lock);
4252 		if (last_ptr->block_group)
4253 			ffe_ctl->hint_byte = last_ptr->window_start;
4254 		if (last_ptr->fragmented) {
4255 			/*
4256 			 * We still set window_start so we can keep track of the
4257 			 * last place we found an allocation to try and save
4258 			 * some time.
4259 			 */
4260 			ffe_ctl->hint_byte = last_ptr->window_start;
4261 			ffe_ctl->use_cluster = false;
4262 		}
4263 		spin_unlock(&last_ptr->lock);
4264 	}
4265 
4266 	return 0;
4267 }
4268 
prepare_allocation_zoned(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl)4269 static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
4270 				    struct find_free_extent_ctl *ffe_ctl)
4271 {
4272 	if (ffe_ctl->for_treelog) {
4273 		spin_lock(&fs_info->treelog_bg_lock);
4274 		if (fs_info->treelog_bg)
4275 			ffe_ctl->hint_byte = fs_info->treelog_bg;
4276 		spin_unlock(&fs_info->treelog_bg_lock);
4277 	} else if (ffe_ctl->for_data_reloc) {
4278 		spin_lock(&fs_info->relocation_bg_lock);
4279 		if (fs_info->data_reloc_bg)
4280 			ffe_ctl->hint_byte = fs_info->data_reloc_bg;
4281 		spin_unlock(&fs_info->relocation_bg_lock);
4282 	} else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
4283 		struct btrfs_block_group *block_group;
4284 
4285 		spin_lock(&fs_info->zone_active_bgs_lock);
4286 		list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
4287 			/*
4288 			 * No lock is OK here because avail is monotinically
4289 			 * decreasing, and this is just a hint.
4290 			 */
4291 			u64 avail = block_group->zone_capacity - block_group->alloc_offset;
4292 
4293 			if (block_group_bits(block_group, ffe_ctl->flags) &&
4294 			    avail >= ffe_ctl->num_bytes) {
4295 				ffe_ctl->hint_byte = block_group->start;
4296 				break;
4297 			}
4298 		}
4299 		spin_unlock(&fs_info->zone_active_bgs_lock);
4300 	}
4301 
4302 	return 0;
4303 }
4304 
prepare_allocation(struct btrfs_fs_info * fs_info,struct find_free_extent_ctl * ffe_ctl,struct btrfs_space_info * space_info,struct btrfs_key * ins)4305 static int prepare_allocation(struct btrfs_fs_info *fs_info,
4306 			      struct find_free_extent_ctl *ffe_ctl,
4307 			      struct btrfs_space_info *space_info,
4308 			      struct btrfs_key *ins)
4309 {
4310 	switch (ffe_ctl->policy) {
4311 	case BTRFS_EXTENT_ALLOC_CLUSTERED:
4312 		return prepare_allocation_clustered(fs_info, ffe_ctl,
4313 						    space_info, ins);
4314 	case BTRFS_EXTENT_ALLOC_ZONED:
4315 		return prepare_allocation_zoned(fs_info, ffe_ctl);
4316 	default:
4317 		BUG();
4318 	}
4319 }
4320 
4321 /*
4322  * walks the btree of allocated extents and find a hole of a given size.
4323  * The key ins is changed to record the hole:
4324  * ins->objectid == start position
4325  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4326  * ins->offset == the size of the hole.
4327  * Any available blocks before search_start are skipped.
4328  *
4329  * If there is no suitable free space, we will record the max size of
4330  * the free space extent currently.
4331  *
4332  * The overall logic and call chain:
4333  *
4334  * find_free_extent()
4335  * |- Iterate through all block groups
4336  * |  |- Get a valid block group
4337  * |  |- Try to do clustered allocation in that block group
4338  * |  |- Try to do unclustered allocation in that block group
4339  * |  |- Check if the result is valid
4340  * |  |  |- If valid, then exit
4341  * |  |- Jump to next block group
4342  * |
4343  * |- Push harder to find free extents
4344  *    |- If not found, re-iterate all block groups
4345  */
find_free_extent(struct btrfs_root * root,struct btrfs_key * ins,struct find_free_extent_ctl * ffe_ctl)4346 static noinline int find_free_extent(struct btrfs_root *root,
4347 				     struct btrfs_key *ins,
4348 				     struct find_free_extent_ctl *ffe_ctl)
4349 {
4350 	struct btrfs_fs_info *fs_info = root->fs_info;
4351 	int ret = 0;
4352 	int cache_block_group_error = 0;
4353 	struct btrfs_block_group *block_group = NULL;
4354 	struct btrfs_space_info *space_info;
4355 	bool full_search = false;
4356 
4357 	WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize);
4358 
4359 	ffe_ctl->search_start = 0;
4360 	/* For clustered allocation */
4361 	ffe_ctl->empty_cluster = 0;
4362 	ffe_ctl->last_ptr = NULL;
4363 	ffe_ctl->use_cluster = true;
4364 	ffe_ctl->have_caching_bg = false;
4365 	ffe_ctl->orig_have_caching_bg = false;
4366 	ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags);
4367 	ffe_ctl->loop = 0;
4368 	ffe_ctl->retry_uncached = false;
4369 	ffe_ctl->cached = 0;
4370 	ffe_ctl->max_extent_size = 0;
4371 	ffe_ctl->total_free_space = 0;
4372 	ffe_ctl->found_offset = 0;
4373 	ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED;
4374 	ffe_ctl->size_class = btrfs_calc_block_group_size_class(ffe_ctl->num_bytes);
4375 
4376 	if (btrfs_is_zoned(fs_info))
4377 		ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED;
4378 
4379 	ins->type = BTRFS_EXTENT_ITEM_KEY;
4380 	ins->objectid = 0;
4381 	ins->offset = 0;
4382 
4383 	trace_find_free_extent(root, ffe_ctl);
4384 
4385 	space_info = btrfs_find_space_info(fs_info, ffe_ctl->flags);
4386 	if (!space_info) {
4387 		btrfs_err(fs_info, "No space info for %llu", ffe_ctl->flags);
4388 		return -ENOSPC;
4389 	}
4390 
4391 	ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins);
4392 	if (ret < 0)
4393 		return ret;
4394 
4395 	ffe_ctl->search_start = max(ffe_ctl->search_start,
4396 				    first_logical_byte(fs_info));
4397 	ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte);
4398 	if (ffe_ctl->search_start == ffe_ctl->hint_byte) {
4399 		block_group = btrfs_lookup_block_group(fs_info,
4400 						       ffe_ctl->search_start);
4401 		/*
4402 		 * we don't want to use the block group if it doesn't match our
4403 		 * allocation bits, or if its not cached.
4404 		 *
4405 		 * However if we are re-searching with an ideal block group
4406 		 * picked out then we don't care that the block group is cached.
4407 		 */
4408 		if (block_group && block_group_bits(block_group, ffe_ctl->flags) &&
4409 		    block_group->cached != BTRFS_CACHE_NO) {
4410 			down_read(&space_info->groups_sem);
4411 			if (list_empty(&block_group->list) ||
4412 			    block_group->ro) {
4413 				/*
4414 				 * someone is removing this block group,
4415 				 * we can't jump into the have_block_group
4416 				 * target because our list pointers are not
4417 				 * valid
4418 				 */
4419 				btrfs_put_block_group(block_group);
4420 				up_read(&space_info->groups_sem);
4421 			} else {
4422 				ffe_ctl->index = btrfs_bg_flags_to_raid_index(
4423 							block_group->flags);
4424 				btrfs_lock_block_group(block_group,
4425 						       ffe_ctl->delalloc);
4426 				ffe_ctl->hinted = true;
4427 				goto have_block_group;
4428 			}
4429 		} else if (block_group) {
4430 			btrfs_put_block_group(block_group);
4431 		}
4432 	}
4433 search:
4434 	trace_find_free_extent_search_loop(root, ffe_ctl);
4435 	ffe_ctl->have_caching_bg = false;
4436 	if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(ffe_ctl->flags) ||
4437 	    ffe_ctl->index == 0)
4438 		full_search = true;
4439 	down_read(&space_info->groups_sem);
4440 	list_for_each_entry(block_group,
4441 			    &space_info->block_groups[ffe_ctl->index], list) {
4442 		struct btrfs_block_group *bg_ret;
4443 
4444 		ffe_ctl->hinted = false;
4445 		/* If the block group is read-only, we can skip it entirely. */
4446 		if (unlikely(block_group->ro)) {
4447 			if (ffe_ctl->for_treelog)
4448 				btrfs_clear_treelog_bg(block_group);
4449 			if (ffe_ctl->for_data_reloc)
4450 				btrfs_clear_data_reloc_bg(block_group);
4451 			continue;
4452 		}
4453 
4454 		btrfs_grab_block_group(block_group, ffe_ctl->delalloc);
4455 		ffe_ctl->search_start = block_group->start;
4456 
4457 		/*
4458 		 * this can happen if we end up cycling through all the
4459 		 * raid types, but we want to make sure we only allocate
4460 		 * for the proper type.
4461 		 */
4462 		if (!block_group_bits(block_group, ffe_ctl->flags)) {
4463 			u64 extra = BTRFS_BLOCK_GROUP_DUP |
4464 				BTRFS_BLOCK_GROUP_RAID1_MASK |
4465 				BTRFS_BLOCK_GROUP_RAID56_MASK |
4466 				BTRFS_BLOCK_GROUP_RAID10;
4467 
4468 			/*
4469 			 * if they asked for extra copies and this block group
4470 			 * doesn't provide them, bail.  This does allow us to
4471 			 * fill raid0 from raid1.
4472 			 */
4473 			if ((ffe_ctl->flags & extra) && !(block_group->flags & extra))
4474 				goto loop;
4475 
4476 			/*
4477 			 * This block group has different flags than we want.
4478 			 * It's possible that we have MIXED_GROUP flag but no
4479 			 * block group is mixed.  Just skip such block group.
4480 			 */
4481 			btrfs_release_block_group(block_group, ffe_ctl->delalloc);
4482 			continue;
4483 		}
4484 
4485 have_block_group:
4486 		trace_find_free_extent_have_block_group(root, ffe_ctl, block_group);
4487 		ffe_ctl->cached = btrfs_block_group_done(block_group);
4488 		if (unlikely(!ffe_ctl->cached)) {
4489 			ffe_ctl->have_caching_bg = true;
4490 			ret = btrfs_cache_block_group(block_group, false);
4491 
4492 			/*
4493 			 * If we get ENOMEM here or something else we want to
4494 			 * try other block groups, because it may not be fatal.
4495 			 * However if we can't find anything else we need to
4496 			 * save our return here so that we return the actual
4497 			 * error that caused problems, not ENOSPC.
4498 			 */
4499 			if (ret < 0) {
4500 				if (!cache_block_group_error)
4501 					cache_block_group_error = ret;
4502 				ret = 0;
4503 				goto loop;
4504 			}
4505 			ret = 0;
4506 		}
4507 
4508 		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
4509 			if (!cache_block_group_error)
4510 				cache_block_group_error = -EIO;
4511 			goto loop;
4512 		}
4513 
4514 		if (!find_free_extent_check_size_class(ffe_ctl, block_group))
4515 			goto loop;
4516 
4517 		bg_ret = NULL;
4518 		ret = do_allocation(block_group, ffe_ctl, &bg_ret);
4519 		if (ret > 0)
4520 			goto loop;
4521 
4522 		if (bg_ret && bg_ret != block_group) {
4523 			btrfs_release_block_group(block_group, ffe_ctl->delalloc);
4524 			block_group = bg_ret;
4525 		}
4526 
4527 		/* Checks */
4528 		ffe_ctl->search_start = round_up(ffe_ctl->found_offset,
4529 						 fs_info->stripesize);
4530 
4531 		/* move on to the next group */
4532 		if (ffe_ctl->search_start + ffe_ctl->num_bytes >
4533 		    block_group->start + block_group->length) {
4534 			btrfs_add_free_space_unused(block_group,
4535 					    ffe_ctl->found_offset,
4536 					    ffe_ctl->num_bytes);
4537 			goto loop;
4538 		}
4539 
4540 		if (ffe_ctl->found_offset < ffe_ctl->search_start)
4541 			btrfs_add_free_space_unused(block_group,
4542 					ffe_ctl->found_offset,
4543 					ffe_ctl->search_start - ffe_ctl->found_offset);
4544 
4545 		ret = btrfs_add_reserved_bytes(block_group, ffe_ctl->ram_bytes,
4546 					       ffe_ctl->num_bytes,
4547 					       ffe_ctl->delalloc,
4548 					       ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS);
4549 		if (ret == -EAGAIN) {
4550 			btrfs_add_free_space_unused(block_group,
4551 					ffe_ctl->found_offset,
4552 					ffe_ctl->num_bytes);
4553 			goto loop;
4554 		}
4555 		btrfs_inc_block_group_reservations(block_group);
4556 
4557 		/* we are all good, lets return */
4558 		ins->objectid = ffe_ctl->search_start;
4559 		ins->offset = ffe_ctl->num_bytes;
4560 
4561 		trace_btrfs_reserve_extent(block_group, ffe_ctl);
4562 		btrfs_release_block_group(block_group, ffe_ctl->delalloc);
4563 		break;
4564 loop:
4565 		if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
4566 		    !ffe_ctl->retry_uncached) {
4567 			ffe_ctl->retry_uncached = true;
4568 			btrfs_wait_block_group_cache_progress(block_group,
4569 						ffe_ctl->num_bytes +
4570 						ffe_ctl->empty_cluster +
4571 						ffe_ctl->empty_size);
4572 			goto have_block_group;
4573 		}
4574 		release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc);
4575 		cond_resched();
4576 	}
4577 	up_read(&space_info->groups_sem);
4578 
4579 	ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search);
4580 	if (ret > 0)
4581 		goto search;
4582 
4583 	if (ret == -ENOSPC && !cache_block_group_error) {
4584 		/*
4585 		 * Use ffe_ctl->total_free_space as fallback if we can't find
4586 		 * any contiguous hole.
4587 		 */
4588 		if (!ffe_ctl->max_extent_size)
4589 			ffe_ctl->max_extent_size = ffe_ctl->total_free_space;
4590 		spin_lock(&space_info->lock);
4591 		space_info->max_extent_size = ffe_ctl->max_extent_size;
4592 		spin_unlock(&space_info->lock);
4593 		ins->offset = ffe_ctl->max_extent_size;
4594 	} else if (ret == -ENOSPC) {
4595 		ret = cache_block_group_error;
4596 	}
4597 	return ret;
4598 }
4599 
4600 /*
4601  * Entry point to the extent allocator. Tries to find a hole that is at least
4602  * as big as @num_bytes.
4603  *
4604  * @root           -	The root that will contain this extent
4605  *
4606  * @ram_bytes      -	The amount of space in ram that @num_bytes take. This
4607  *			is used for accounting purposes. This value differs
4608  *			from @num_bytes only in the case of compressed extents.
4609  *
4610  * @num_bytes      -	Number of bytes to allocate on-disk.
4611  *
4612  * @min_alloc_size -	Indicates the minimum amount of space that the
4613  *			allocator should try to satisfy. In some cases
4614  *			@num_bytes may be larger than what is required and if
4615  *			the filesystem is fragmented then allocation fails.
4616  *			However, the presence of @min_alloc_size gives a
4617  *			chance to try and satisfy the smaller allocation.
4618  *
4619  * @empty_size     -	A hint that you plan on doing more COW. This is the
4620  *			size in bytes the allocator should try to find free
4621  *			next to the block it returns.  This is just a hint and
4622  *			may be ignored by the allocator.
4623  *
4624  * @hint_byte      -	Hint to the allocator to start searching above the byte
4625  *			address passed. It might be ignored.
4626  *
4627  * @ins            -	This key is modified to record the found hole. It will
4628  *			have the following values:
4629  *			ins->objectid == start position
4630  *			ins->flags = BTRFS_EXTENT_ITEM_KEY
4631  *			ins->offset == the size of the hole.
4632  *
4633  * @is_data        -	Boolean flag indicating whether an extent is
4634  *			allocated for data (true) or metadata (false)
4635  *
4636  * @delalloc       -	Boolean flag indicating whether this allocation is for
4637  *			delalloc or not. If 'true' data_rwsem of block groups
4638  *			is going to be acquired.
4639  *
4640  *
4641  * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
4642  * case -ENOSPC is returned then @ins->offset will contain the size of the
4643  * largest available hole the allocator managed to find.
4644  */
btrfs_reserve_extent(struct btrfs_root * root,u64 ram_bytes,u64 num_bytes,u64 min_alloc_size,u64 empty_size,u64 hint_byte,struct btrfs_key * ins,int is_data,int delalloc)4645 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
4646 			 u64 num_bytes, u64 min_alloc_size,
4647 			 u64 empty_size, u64 hint_byte,
4648 			 struct btrfs_key *ins, int is_data, int delalloc)
4649 {
4650 	struct btrfs_fs_info *fs_info = root->fs_info;
4651 	struct find_free_extent_ctl ffe_ctl = {};
4652 	bool final_tried = num_bytes == min_alloc_size;
4653 	u64 flags;
4654 	int ret;
4655 	bool for_treelog = (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID);
4656 	bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data);
4657 
4658 	flags = get_alloc_profile_by_root(root, is_data);
4659 again:
4660 	WARN_ON(num_bytes < fs_info->sectorsize);
4661 
4662 	ffe_ctl.ram_bytes = ram_bytes;
4663 	ffe_ctl.num_bytes = num_bytes;
4664 	ffe_ctl.min_alloc_size = min_alloc_size;
4665 	ffe_ctl.empty_size = empty_size;
4666 	ffe_ctl.flags = flags;
4667 	ffe_ctl.delalloc = delalloc;
4668 	ffe_ctl.hint_byte = hint_byte;
4669 	ffe_ctl.for_treelog = for_treelog;
4670 	ffe_ctl.for_data_reloc = for_data_reloc;
4671 
4672 	ret = find_free_extent(root, ins, &ffe_ctl);
4673 	if (!ret && !is_data) {
4674 		btrfs_dec_block_group_reservations(fs_info, ins->objectid);
4675 	} else if (ret == -ENOSPC) {
4676 		if (!final_tried && ins->offset) {
4677 			num_bytes = min(num_bytes >> 1, ins->offset);
4678 			num_bytes = round_down(num_bytes,
4679 					       fs_info->sectorsize);
4680 			num_bytes = max(num_bytes, min_alloc_size);
4681 			ram_bytes = num_bytes;
4682 			if (num_bytes == min_alloc_size)
4683 				final_tried = true;
4684 			goto again;
4685 		} else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4686 			struct btrfs_space_info *sinfo;
4687 
4688 			sinfo = btrfs_find_space_info(fs_info, flags);
4689 			btrfs_err(fs_info,
4690 	"allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d",
4691 				  flags, num_bytes, for_treelog, for_data_reloc);
4692 			if (sinfo)
4693 				btrfs_dump_space_info(fs_info, sinfo,
4694 						      num_bytes, 1);
4695 		}
4696 	}
4697 
4698 	return ret;
4699 }
4700 
btrfs_free_reserved_extent(struct btrfs_fs_info * fs_info,u64 start,u64 len,int delalloc)4701 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
4702 			       u64 start, u64 len, int delalloc)
4703 {
4704 	struct btrfs_block_group *cache;
4705 
4706 	cache = btrfs_lookup_block_group(fs_info, start);
4707 	if (!cache) {
4708 		btrfs_err(fs_info, "Unable to find block group for %llu",
4709 			  start);
4710 		return -ENOSPC;
4711 	}
4712 
4713 	btrfs_add_free_space(cache, start, len);
4714 	btrfs_free_reserved_bytes(cache, len, delalloc);
4715 	trace_btrfs_reserved_extent_free(fs_info, start, len);
4716 
4717 	btrfs_put_block_group(cache);
4718 	return 0;
4719 }
4720 
btrfs_pin_reserved_extent(struct btrfs_trans_handle * trans,const struct extent_buffer * eb)4721 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans,
4722 			      const struct extent_buffer *eb)
4723 {
4724 	struct btrfs_block_group *cache;
4725 	int ret = 0;
4726 
4727 	cache = btrfs_lookup_block_group(trans->fs_info, eb->start);
4728 	if (!cache) {
4729 		btrfs_err(trans->fs_info, "unable to find block group for %llu",
4730 			  eb->start);
4731 		return -ENOSPC;
4732 	}
4733 
4734 	ret = pin_down_extent(trans, cache, eb->start, eb->len, 1);
4735 	btrfs_put_block_group(cache);
4736 	return ret;
4737 }
4738 
alloc_reserved_extent(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes)4739 static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr,
4740 				 u64 num_bytes)
4741 {
4742 	struct btrfs_fs_info *fs_info = trans->fs_info;
4743 	int ret;
4744 
4745 	ret = remove_from_free_space_tree(trans, bytenr, num_bytes);
4746 	if (ret)
4747 		return ret;
4748 
4749 	ret = btrfs_update_block_group(trans, bytenr, num_bytes, true);
4750 	if (ret) {
4751 		ASSERT(!ret);
4752 		btrfs_err(fs_info, "update block group failed for %llu %llu",
4753 			  bytenr, num_bytes);
4754 		return ret;
4755 	}
4756 
4757 	trace_btrfs_reserved_extent_alloc(fs_info, bytenr, num_bytes);
4758 	return 0;
4759 }
4760 
alloc_reserved_file_extent(struct btrfs_trans_handle * trans,u64 parent,u64 root_objectid,u64 flags,u64 owner,u64 offset,struct btrfs_key * ins,int ref_mod,u64 oref_root)4761 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4762 				      u64 parent, u64 root_objectid,
4763 				      u64 flags, u64 owner, u64 offset,
4764 				      struct btrfs_key *ins, int ref_mod, u64 oref_root)
4765 {
4766 	struct btrfs_fs_info *fs_info = trans->fs_info;
4767 	struct btrfs_root *extent_root;
4768 	int ret;
4769 	struct btrfs_extent_item *extent_item;
4770 	struct btrfs_extent_owner_ref *oref;
4771 	struct btrfs_extent_inline_ref *iref;
4772 	struct btrfs_path *path;
4773 	struct extent_buffer *leaf;
4774 	int type;
4775 	u32 size;
4776 	const bool simple_quota = (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE);
4777 
4778 	if (parent > 0)
4779 		type = BTRFS_SHARED_DATA_REF_KEY;
4780 	else
4781 		type = BTRFS_EXTENT_DATA_REF_KEY;
4782 
4783 	size = sizeof(*extent_item);
4784 	if (simple_quota)
4785 		size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
4786 	size += btrfs_extent_inline_ref_size(type);
4787 
4788 	path = btrfs_alloc_path();
4789 	if (!path)
4790 		return -ENOMEM;
4791 
4792 	extent_root = btrfs_extent_root(fs_info, ins->objectid);
4793 	ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size);
4794 	if (ret) {
4795 		btrfs_free_path(path);
4796 		return ret;
4797 	}
4798 
4799 	leaf = path->nodes[0];
4800 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
4801 				     struct btrfs_extent_item);
4802 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4803 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4804 	btrfs_set_extent_flags(leaf, extent_item,
4805 			       flags | BTRFS_EXTENT_FLAG_DATA);
4806 
4807 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4808 	if (simple_quota) {
4809 		btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_EXTENT_OWNER_REF_KEY);
4810 		oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
4811 		btrfs_set_extent_owner_ref_root_id(leaf, oref, oref_root);
4812 		iref = (struct btrfs_extent_inline_ref *)(oref + 1);
4813 	}
4814 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
4815 
4816 	if (parent > 0) {
4817 		struct btrfs_shared_data_ref *ref;
4818 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
4819 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4820 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4821 	} else {
4822 		struct btrfs_extent_data_ref *ref;
4823 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4824 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4825 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4826 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4827 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4828 	}
4829 
4830 	btrfs_free_path(path);
4831 
4832 	return alloc_reserved_extent(trans, ins->objectid, ins->offset);
4833 }
4834 
alloc_reserved_tree_block(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op)4835 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4836 				     struct btrfs_delayed_ref_node *node,
4837 				     struct btrfs_delayed_extent_op *extent_op)
4838 {
4839 	struct btrfs_fs_info *fs_info = trans->fs_info;
4840 	struct btrfs_root *extent_root;
4841 	int ret;
4842 	struct btrfs_extent_item *extent_item;
4843 	struct btrfs_key extent_key;
4844 	struct btrfs_tree_block_info *block_info;
4845 	struct btrfs_extent_inline_ref *iref;
4846 	struct btrfs_path *path;
4847 	struct extent_buffer *leaf;
4848 	u32 size = sizeof(*extent_item) + sizeof(*iref);
4849 	const u64 flags = (extent_op ? extent_op->flags_to_set : 0);
4850 	/* The owner of a tree block is the level. */
4851 	int level = btrfs_delayed_ref_owner(node);
4852 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
4853 
4854 	extent_key.objectid = node->bytenr;
4855 	if (skinny_metadata) {
4856 		/* The owner of a tree block is the level. */
4857 		extent_key.offset = level;
4858 		extent_key.type = BTRFS_METADATA_ITEM_KEY;
4859 	} else {
4860 		extent_key.offset = node->num_bytes;
4861 		extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4862 		size += sizeof(*block_info);
4863 	}
4864 
4865 	path = btrfs_alloc_path();
4866 	if (!path)
4867 		return -ENOMEM;
4868 
4869 	extent_root = btrfs_extent_root(fs_info, extent_key.objectid);
4870 	ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key,
4871 				      size);
4872 	if (ret) {
4873 		btrfs_free_path(path);
4874 		return ret;
4875 	}
4876 
4877 	leaf = path->nodes[0];
4878 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
4879 				     struct btrfs_extent_item);
4880 	btrfs_set_extent_refs(leaf, extent_item, 1);
4881 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4882 	btrfs_set_extent_flags(leaf, extent_item,
4883 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4884 
4885 	if (skinny_metadata) {
4886 		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4887 	} else {
4888 		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4889 		btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
4890 		btrfs_set_tree_block_level(leaf, block_info, level);
4891 		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4892 	}
4893 
4894 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
4895 		btrfs_set_extent_inline_ref_type(leaf, iref,
4896 						 BTRFS_SHARED_BLOCK_REF_KEY);
4897 		btrfs_set_extent_inline_ref_offset(leaf, iref, node->parent);
4898 	} else {
4899 		btrfs_set_extent_inline_ref_type(leaf, iref,
4900 						 BTRFS_TREE_BLOCK_REF_KEY);
4901 		btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root);
4902 	}
4903 
4904 	btrfs_free_path(path);
4905 
4906 	return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
4907 }
4908 
btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 owner,u64 offset,u64 ram_bytes,struct btrfs_key * ins)4909 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4910 				     struct btrfs_root *root, u64 owner,
4911 				     u64 offset, u64 ram_bytes,
4912 				     struct btrfs_key *ins)
4913 {
4914 	struct btrfs_ref generic_ref = {
4915 		.action = BTRFS_ADD_DELAYED_EXTENT,
4916 		.bytenr = ins->objectid,
4917 		.num_bytes = ins->offset,
4918 		.owning_root = btrfs_root_id(root),
4919 		.ref_root = btrfs_root_id(root),
4920 	};
4921 
4922 	ASSERT(generic_ref.ref_root != BTRFS_TREE_LOG_OBJECTID);
4923 
4924 	if (btrfs_is_data_reloc_root(root) && is_fstree(root->relocation_src_root))
4925 		generic_ref.owning_root = root->relocation_src_root;
4926 
4927 	btrfs_init_data_ref(&generic_ref, owner, offset, 0, false);
4928 	btrfs_ref_tree_mod(root->fs_info, &generic_ref);
4929 
4930 	return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes);
4931 }
4932 
4933 /*
4934  * this is used by the tree logging recovery code.  It records that
4935  * an extent has been allocated and makes sure to clear the free
4936  * space cache bits as well
4937  */
btrfs_alloc_logged_file_extent(struct btrfs_trans_handle * trans,u64 root_objectid,u64 owner,u64 offset,struct btrfs_key * ins)4938 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4939 				   u64 root_objectid, u64 owner, u64 offset,
4940 				   struct btrfs_key *ins)
4941 {
4942 	struct btrfs_fs_info *fs_info = trans->fs_info;
4943 	int ret;
4944 	struct btrfs_block_group *block_group;
4945 	struct btrfs_space_info *space_info;
4946 	struct btrfs_squota_delta delta = {
4947 		.root = root_objectid,
4948 		.num_bytes = ins->offset,
4949 		.generation = trans->transid,
4950 		.is_data = true,
4951 		.is_inc = true,
4952 	};
4953 
4954 	/*
4955 	 * Mixed block groups will exclude before processing the log so we only
4956 	 * need to do the exclude dance if this fs isn't mixed.
4957 	 */
4958 	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
4959 		ret = __exclude_logged_extent(fs_info, ins->objectid,
4960 					      ins->offset);
4961 		if (ret)
4962 			return ret;
4963 	}
4964 
4965 	block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
4966 	if (!block_group)
4967 		return -EINVAL;
4968 
4969 	space_info = block_group->space_info;
4970 	spin_lock(&space_info->lock);
4971 	spin_lock(&block_group->lock);
4972 	space_info->bytes_reserved += ins->offset;
4973 	block_group->reserved += ins->offset;
4974 	spin_unlock(&block_group->lock);
4975 	spin_unlock(&space_info->lock);
4976 
4977 	ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
4978 					 offset, ins, 1, root_objectid);
4979 	if (ret)
4980 		btrfs_pin_extent(trans, ins->objectid, ins->offset, 1);
4981 	ret = btrfs_record_squota_delta(fs_info, &delta);
4982 	btrfs_put_block_group(block_group);
4983 	return ret;
4984 }
4985 
4986 #ifdef CONFIG_BTRFS_DEBUG
4987 /*
4988  * Extra safety check in case the extent tree is corrupted and extent allocator
4989  * chooses to use a tree block which is already used and locked.
4990  */
check_eb_lock_owner(const struct extent_buffer * eb)4991 static bool check_eb_lock_owner(const struct extent_buffer *eb)
4992 {
4993 	if (eb->lock_owner == current->pid) {
4994 		btrfs_err_rl(eb->fs_info,
4995 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
4996 			     eb->start, btrfs_header_owner(eb), current->pid);
4997 		return true;
4998 	}
4999 	return false;
5000 }
5001 #else
check_eb_lock_owner(struct extent_buffer * eb)5002 static bool check_eb_lock_owner(struct extent_buffer *eb)
5003 {
5004 	return false;
5005 }
5006 #endif
5007 
5008 static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,int level,u64 owner,enum btrfs_lock_nesting nest)5009 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5010 		      u64 bytenr, int level, u64 owner,
5011 		      enum btrfs_lock_nesting nest)
5012 {
5013 	struct btrfs_fs_info *fs_info = root->fs_info;
5014 	struct extent_buffer *buf;
5015 	u64 lockdep_owner = owner;
5016 
5017 	buf = btrfs_find_create_tree_block(fs_info, bytenr, owner, level);
5018 	if (IS_ERR(buf))
5019 		return buf;
5020 
5021 	if (check_eb_lock_owner(buf)) {
5022 		free_extent_buffer(buf);
5023 		return ERR_PTR(-EUCLEAN);
5024 	}
5025 
5026 	/*
5027 	 * The reloc trees are just snapshots, so we need them to appear to be
5028 	 * just like any other fs tree WRT lockdep.
5029 	 *
5030 	 * The exception however is in replace_path() in relocation, where we
5031 	 * hold the lock on the original fs root and then search for the reloc
5032 	 * root.  At that point we need to make sure any reloc root buffers are
5033 	 * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make
5034 	 * lockdep happy.
5035 	 */
5036 	if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID &&
5037 	    !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
5038 		lockdep_owner = BTRFS_FS_TREE_OBJECTID;
5039 
5040 	/* btrfs_clear_buffer_dirty() accesses generation field. */
5041 	btrfs_set_header_generation(buf, trans->transid);
5042 
5043 	/*
5044 	 * This needs to stay, because we could allocate a freed block from an
5045 	 * old tree into a new tree, so we need to make sure this new block is
5046 	 * set to the appropriate level and owner.
5047 	 */
5048 	btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level);
5049 
5050 	btrfs_tree_lock_nested(buf, nest);
5051 	btrfs_clear_buffer_dirty(trans, buf);
5052 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
5053 	clear_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &buf->bflags);
5054 
5055 	set_extent_buffer_uptodate(buf);
5056 
5057 	memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
5058 	btrfs_set_header_level(buf, level);
5059 	btrfs_set_header_bytenr(buf, buf->start);
5060 	btrfs_set_header_generation(buf, trans->transid);
5061 	btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
5062 	btrfs_set_header_owner(buf, owner);
5063 	write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid);
5064 	write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
5065 	if (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID) {
5066 		buf->log_index = root->log_transid % 2;
5067 		/*
5068 		 * we allow two log transactions at a time, use different
5069 		 * EXTENT bit to differentiate dirty pages.
5070 		 */
5071 		if (buf->log_index == 0)
5072 			set_extent_bit(&root->dirty_log_pages, buf->start,
5073 				       buf->start + buf->len - 1,
5074 				       EXTENT_DIRTY, NULL);
5075 		else
5076 			set_extent_bit(&root->dirty_log_pages, buf->start,
5077 				       buf->start + buf->len - 1,
5078 				       EXTENT_NEW, NULL);
5079 	} else {
5080 		buf->log_index = -1;
5081 		set_extent_bit(&trans->transaction->dirty_pages, buf->start,
5082 			       buf->start + buf->len - 1, EXTENT_DIRTY, NULL);
5083 	}
5084 	/* this returns a buffer locked for blocking */
5085 	return buf;
5086 }
5087 
5088 /*
5089  * finds a free extent and does all the dirty work required for allocation
5090  * returns the tree buffer or an ERR_PTR on error.
5091  */
btrfs_alloc_tree_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 parent,u64 root_objectid,const struct btrfs_disk_key * key,int level,u64 hint,u64 empty_size,u64 reloc_src_root,enum btrfs_lock_nesting nest)5092 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
5093 					     struct btrfs_root *root,
5094 					     u64 parent, u64 root_objectid,
5095 					     const struct btrfs_disk_key *key,
5096 					     int level, u64 hint,
5097 					     u64 empty_size,
5098 					     u64 reloc_src_root,
5099 					     enum btrfs_lock_nesting nest)
5100 {
5101 	struct btrfs_fs_info *fs_info = root->fs_info;
5102 	struct btrfs_key ins;
5103 	struct btrfs_block_rsv *block_rsv;
5104 	struct extent_buffer *buf;
5105 	u64 flags = 0;
5106 	int ret;
5107 	u32 blocksize = fs_info->nodesize;
5108 	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
5109 	u64 owning_root;
5110 
5111 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5112 	if (btrfs_is_testing(fs_info)) {
5113 		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
5114 					    level, root_objectid, nest);
5115 		if (!IS_ERR(buf))
5116 			root->alloc_bytenr += blocksize;
5117 		return buf;
5118 	}
5119 #endif
5120 
5121 	block_rsv = btrfs_use_block_rsv(trans, root, blocksize);
5122 	if (IS_ERR(block_rsv))
5123 		return ERR_CAST(block_rsv);
5124 
5125 	ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
5126 				   empty_size, hint, &ins, 0, 0);
5127 	if (ret)
5128 		goto out_unuse;
5129 
5130 	buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
5131 				    root_objectid, nest);
5132 	if (IS_ERR(buf)) {
5133 		ret = PTR_ERR(buf);
5134 		goto out_free_reserved;
5135 	}
5136 	owning_root = btrfs_header_owner(buf);
5137 
5138 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5139 		if (parent == 0)
5140 			parent = ins.objectid;
5141 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5142 		owning_root = reloc_src_root;
5143 	} else
5144 		BUG_ON(parent > 0);
5145 
5146 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5147 		struct btrfs_delayed_extent_op *extent_op;
5148 		struct btrfs_ref generic_ref = {
5149 			.action = BTRFS_ADD_DELAYED_EXTENT,
5150 			.bytenr = ins.objectid,
5151 			.num_bytes = ins.offset,
5152 			.parent = parent,
5153 			.owning_root = owning_root,
5154 			.ref_root = root_objectid,
5155 		};
5156 
5157 		if (!skinny_metadata || flags != 0) {
5158 			extent_op = btrfs_alloc_delayed_extent_op();
5159 			if (!extent_op) {
5160 				ret = -ENOMEM;
5161 				goto out_free_buf;
5162 			}
5163 			if (key)
5164 				memcpy(&extent_op->key, key, sizeof(extent_op->key));
5165 			else
5166 				memset(&extent_op->key, 0, sizeof(extent_op->key));
5167 			extent_op->flags_to_set = flags;
5168 			extent_op->update_key = (skinny_metadata ? false : true);
5169 			extent_op->update_flags = (flags != 0);
5170 		} else {
5171 			extent_op = NULL;
5172 		}
5173 
5174 		btrfs_init_tree_ref(&generic_ref, level, btrfs_root_id(root), false);
5175 		btrfs_ref_tree_mod(fs_info, &generic_ref);
5176 		ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
5177 		if (ret) {
5178 			btrfs_free_delayed_extent_op(extent_op);
5179 			goto out_free_buf;
5180 		}
5181 	}
5182 	return buf;
5183 
5184 out_free_buf:
5185 	btrfs_tree_unlock(buf);
5186 	free_extent_buffer(buf);
5187 out_free_reserved:
5188 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
5189 out_unuse:
5190 	btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize);
5191 	return ERR_PTR(ret);
5192 }
5193 
5194 struct walk_control {
5195 	u64 refs[BTRFS_MAX_LEVEL];
5196 	u64 flags[BTRFS_MAX_LEVEL];
5197 	struct btrfs_key update_progress;
5198 	struct btrfs_key drop_progress;
5199 	int drop_level;
5200 	int stage;
5201 	int level;
5202 	int shared_level;
5203 	int update_ref;
5204 	int keep_locks;
5205 	int reada_slot;
5206 	int reada_count;
5207 	int restarted;
5208 	/* Indicate that extent info needs to be looked up when walking the tree. */
5209 	int lookup_info;
5210 };
5211 
5212 /*
5213  * This is our normal stage.  We are traversing blocks the current snapshot owns
5214  * and we are dropping any of our references to any children we are able to, and
5215  * then freeing the block once we've processed all of the children.
5216  */
5217 #define DROP_REFERENCE	1
5218 
5219 /*
5220  * We enter this stage when we have to walk into a child block (meaning we can't
5221  * simply drop our reference to it from our current parent node) and there are
5222  * more than one reference on it.  If we are the owner of any of the children
5223  * blocks from the current parent node then we have to do the FULL_BACKREF dance
5224  * on them in order to drop our normal ref and add the shared ref.
5225  */
5226 #define UPDATE_BACKREF	2
5227 
5228 /*
5229  * Decide if we need to walk down into this node to adjust the references.
5230  *
5231  * @root:	the root we are currently deleting
5232  * @wc:		the walk control for this deletion
5233  * @eb:		the parent eb that we're currently visiting
5234  * @refs:	the number of refs for wc->level - 1
5235  * @flags:	the flags for wc->level - 1
5236  * @slot:	the slot in the eb that we're currently checking
5237  *
5238  * This is meant to be called when we're evaluating if a node we point to at
5239  * wc->level should be read and walked into, or if we can simply delete our
5240  * reference to it.  We return true if we should walk into the node, false if we
5241  * can skip it.
5242  *
5243  * We have assertions in here to make sure this is called correctly.  We assume
5244  * that sanity checking on the blocks read to this point has been done, so any
5245  * corrupted file systems must have been caught before calling this function.
5246  */
visit_node_for_delete(struct btrfs_root * root,struct walk_control * wc,struct extent_buffer * eb,u64 flags,int slot)5247 static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *wc,
5248 				  struct extent_buffer *eb, u64 flags, int slot)
5249 {
5250 	struct btrfs_key key;
5251 	u64 generation;
5252 	int level = wc->level;
5253 
5254 	ASSERT(level > 0);
5255 	ASSERT(wc->refs[level - 1] > 0);
5256 
5257 	/*
5258 	 * The update backref stage we only want to skip if we already have
5259 	 * FULL_BACKREF set, otherwise we need to read.
5260 	 */
5261 	if (wc->stage == UPDATE_BACKREF) {
5262 		if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5263 			return false;
5264 		return true;
5265 	}
5266 
5267 	/*
5268 	 * We're the last ref on this block, we must walk into it and process
5269 	 * any refs it's pointing at.
5270 	 */
5271 	if (wc->refs[level - 1] == 1)
5272 		return true;
5273 
5274 	/*
5275 	 * If we're already FULL_BACKREF then we know we can just drop our
5276 	 * current reference.
5277 	 */
5278 	if (level == 1 && flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5279 		return false;
5280 
5281 	/*
5282 	 * This block is older than our creation generation, we can drop our
5283 	 * reference to it.
5284 	 */
5285 	generation = btrfs_node_ptr_generation(eb, slot);
5286 	if (!wc->update_ref || generation <= btrfs_root_origin_generation(root))
5287 		return false;
5288 
5289 	/*
5290 	 * This block was processed from a previous snapshot deletion run, we
5291 	 * can skip it.
5292 	 */
5293 	btrfs_node_key_to_cpu(eb, &key, slot);
5294 	if (btrfs_comp_cpu_keys(&key, &wc->update_progress) < 0)
5295 		return false;
5296 
5297 	/* All other cases we need to wander into the node. */
5298 	return true;
5299 }
5300 
reada_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct walk_control * wc,struct btrfs_path * path)5301 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5302 				     struct btrfs_root *root,
5303 				     struct walk_control *wc,
5304 				     struct btrfs_path *path)
5305 {
5306 	struct btrfs_fs_info *fs_info = root->fs_info;
5307 	u64 bytenr;
5308 	u64 generation;
5309 	u64 refs;
5310 	u64 flags;
5311 	u32 nritems;
5312 	struct extent_buffer *eb;
5313 	int ret;
5314 	int slot;
5315 	int nread = 0;
5316 
5317 	if (path->slots[wc->level] < wc->reada_slot) {
5318 		wc->reada_count = wc->reada_count * 2 / 3;
5319 		wc->reada_count = max(wc->reada_count, 2);
5320 	} else {
5321 		wc->reada_count = wc->reada_count * 3 / 2;
5322 		wc->reada_count = min_t(int, wc->reada_count,
5323 					BTRFS_NODEPTRS_PER_BLOCK(fs_info));
5324 	}
5325 
5326 	eb = path->nodes[wc->level];
5327 	nritems = btrfs_header_nritems(eb);
5328 
5329 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5330 		if (nread >= wc->reada_count)
5331 			break;
5332 
5333 		cond_resched();
5334 		bytenr = btrfs_node_blockptr(eb, slot);
5335 		generation = btrfs_node_ptr_generation(eb, slot);
5336 
5337 		if (slot == path->slots[wc->level])
5338 			goto reada;
5339 
5340 		if (wc->stage == UPDATE_BACKREF &&
5341 		    generation <= btrfs_root_origin_generation(root))
5342 			continue;
5343 
5344 		/* We don't lock the tree block, it's OK to be racy here */
5345 		ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
5346 					       wc->level - 1, 1, &refs,
5347 					       &flags, NULL);
5348 		/* We don't care about errors in readahead. */
5349 		if (ret < 0)
5350 			continue;
5351 
5352 		/*
5353 		 * This could be racey, it's conceivable that we raced and end
5354 		 * up with a bogus refs count, if that's the case just skip, if
5355 		 * we are actually corrupt we will notice when we look up
5356 		 * everything again with our locks.
5357 		 */
5358 		if (refs == 0)
5359 			continue;
5360 
5361 		/* If we don't need to visit this node don't reada. */
5362 		if (!visit_node_for_delete(root, wc, eb, flags, slot))
5363 			continue;
5364 reada:
5365 		btrfs_readahead_node_child(eb, slot);
5366 		nread++;
5367 	}
5368 	wc->reada_slot = slot;
5369 }
5370 
5371 /*
5372  * helper to process tree block while walking down the tree.
5373  *
5374  * when wc->stage == UPDATE_BACKREF, this function updates
5375  * back refs for pointers in the block.
5376  *
5377  * NOTE: return value 1 means we should stop walking down.
5378  */
walk_down_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5379 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5380 				   struct btrfs_root *root,
5381 				   struct btrfs_path *path,
5382 				   struct walk_control *wc)
5383 {
5384 	struct btrfs_fs_info *fs_info = root->fs_info;
5385 	int level = wc->level;
5386 	struct extent_buffer *eb = path->nodes[level];
5387 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5388 	int ret;
5389 
5390 	if (wc->stage == UPDATE_BACKREF && btrfs_header_owner(eb) != btrfs_root_id(root))
5391 		return 1;
5392 
5393 	/*
5394 	 * when reference count of tree block is 1, it won't increase
5395 	 * again. once full backref flag is set, we never clear it.
5396 	 */
5397 	if (wc->lookup_info &&
5398 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5399 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5400 		ASSERT(path->locks[level]);
5401 		ret = btrfs_lookup_extent_info(trans, fs_info,
5402 					       eb->start, level, 1,
5403 					       &wc->refs[level],
5404 					       &wc->flags[level],
5405 					       NULL);
5406 		if (ret)
5407 			return ret;
5408 		if (unlikely(wc->refs[level] == 0)) {
5409 			btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
5410 				  eb->start);
5411 			return -EUCLEAN;
5412 		}
5413 	}
5414 
5415 	if (wc->stage == DROP_REFERENCE) {
5416 		if (wc->refs[level] > 1)
5417 			return 1;
5418 
5419 		if (path->locks[level] && !wc->keep_locks) {
5420 			btrfs_tree_unlock_rw(eb, path->locks[level]);
5421 			path->locks[level] = 0;
5422 		}
5423 		return 0;
5424 	}
5425 
5426 	/* wc->stage == UPDATE_BACKREF */
5427 	if (!(wc->flags[level] & flag)) {
5428 		ASSERT(path->locks[level]);
5429 		ret = btrfs_inc_ref(trans, root, eb, 1);
5430 		if (ret) {
5431 			btrfs_abort_transaction(trans, ret);
5432 			return ret;
5433 		}
5434 		ret = btrfs_dec_ref(trans, root, eb, 0);
5435 		if (ret) {
5436 			btrfs_abort_transaction(trans, ret);
5437 			return ret;
5438 		}
5439 		ret = btrfs_set_disk_extent_flags(trans, eb, flag);
5440 		if (ret) {
5441 			btrfs_abort_transaction(trans, ret);
5442 			return ret;
5443 		}
5444 		wc->flags[level] |= flag;
5445 	}
5446 
5447 	/*
5448 	 * the block is shared by multiple trees, so it's not good to
5449 	 * keep the tree lock
5450 	 */
5451 	if (path->locks[level] && level > 0) {
5452 		btrfs_tree_unlock_rw(eb, path->locks[level]);
5453 		path->locks[level] = 0;
5454 	}
5455 	return 0;
5456 }
5457 
5458 /*
5459  * This is used to verify a ref exists for this root to deal with a bug where we
5460  * would have a drop_progress key that hadn't been updated properly.
5461  */
check_ref_exists(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 parent,int level)5462 static int check_ref_exists(struct btrfs_trans_handle *trans,
5463 			    struct btrfs_root *root, u64 bytenr, u64 parent,
5464 			    int level)
5465 {
5466 	struct btrfs_delayed_ref_root *delayed_refs;
5467 	struct btrfs_delayed_ref_head *head;
5468 	struct btrfs_path *path;
5469 	struct btrfs_extent_inline_ref *iref;
5470 	int ret;
5471 	bool exists = false;
5472 
5473 	path = btrfs_alloc_path();
5474 	if (!path)
5475 		return -ENOMEM;
5476 again:
5477 	ret = lookup_extent_backref(trans, path, &iref, bytenr,
5478 				    root->fs_info->nodesize, parent,
5479 				    btrfs_root_id(root), level, 0);
5480 	if (ret != -ENOENT) {
5481 		/*
5482 		 * If we get 0 then we found our reference, return 1, else
5483 		 * return the error if it's not -ENOENT;
5484 		 */
5485 		btrfs_free_path(path);
5486 		return (ret < 0 ) ? ret : 1;
5487 	}
5488 
5489 	/*
5490 	 * We could have a delayed ref with this reference, so look it up while
5491 	 * we're holding the path open to make sure we don't race with the
5492 	 * delayed ref running.
5493 	 */
5494 	delayed_refs = &trans->transaction->delayed_refs;
5495 	spin_lock(&delayed_refs->lock);
5496 	head = btrfs_find_delayed_ref_head(root->fs_info, delayed_refs, bytenr);
5497 	if (!head)
5498 		goto out;
5499 	if (!mutex_trylock(&head->mutex)) {
5500 		/*
5501 		 * We're contended, means that the delayed ref is running, get a
5502 		 * reference and wait for the ref head to be complete and then
5503 		 * try again.
5504 		 */
5505 		refcount_inc(&head->refs);
5506 		spin_unlock(&delayed_refs->lock);
5507 
5508 		btrfs_release_path(path);
5509 
5510 		mutex_lock(&head->mutex);
5511 		mutex_unlock(&head->mutex);
5512 		btrfs_put_delayed_ref_head(head);
5513 		goto again;
5514 	}
5515 
5516 	exists = btrfs_find_delayed_tree_ref(head, root->root_key.objectid, parent);
5517 	mutex_unlock(&head->mutex);
5518 out:
5519 	spin_unlock(&delayed_refs->lock);
5520 	btrfs_free_path(path);
5521 	return exists ? 1 : 0;
5522 }
5523 
5524 /*
5525  * We may not have an uptodate block, so if we are going to walk down into this
5526  * block we need to drop the lock, read it off of the disk, re-lock it and
5527  * return to continue dropping the snapshot.
5528  */
check_next_block_uptodate(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,struct extent_buffer * next)5529 static int check_next_block_uptodate(struct btrfs_trans_handle *trans,
5530 				     struct btrfs_root *root,
5531 				     struct btrfs_path *path,
5532 				     struct walk_control *wc,
5533 				     struct extent_buffer *next)
5534 {
5535 	struct btrfs_tree_parent_check check = { 0 };
5536 	u64 generation;
5537 	int level = wc->level;
5538 	int ret;
5539 
5540 	btrfs_assert_tree_write_locked(next);
5541 
5542 	generation = btrfs_node_ptr_generation(path->nodes[level], path->slots[level]);
5543 
5544 	if (btrfs_buffer_uptodate(next, generation, 0))
5545 		return 0;
5546 
5547 	check.level = level - 1;
5548 	check.transid = generation;
5549 	check.owner_root = btrfs_root_id(root);
5550 	check.has_first_key = true;
5551 	btrfs_node_key_to_cpu(path->nodes[level], &check.first_key, path->slots[level]);
5552 
5553 	btrfs_tree_unlock(next);
5554 	if (level == 1)
5555 		reada_walk_down(trans, root, wc, path);
5556 	ret = btrfs_read_extent_buffer(next, &check);
5557 	if (ret) {
5558 		free_extent_buffer(next);
5559 		return ret;
5560 	}
5561 	btrfs_tree_lock(next);
5562 	wc->lookup_info = 1;
5563 	return 0;
5564 }
5565 
5566 /*
5567  * If we determine that we don't have to visit wc->level - 1 then we need to
5568  * determine if we can drop our reference.
5569  *
5570  * If we are UPDATE_BACKREF then we will not, we need to update our backrefs.
5571  *
5572  * If we are DROP_REFERENCE this will figure out if we need to drop our current
5573  * reference, skipping it if we dropped it from a previous incompleted drop, or
5574  * dropping it if we still have a reference to it.
5575  */
maybe_drop_reference(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,struct extent_buffer * next,u64 owner_root)5576 static int maybe_drop_reference(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5577 				struct btrfs_path *path, struct walk_control *wc,
5578 				struct extent_buffer *next, u64 owner_root)
5579 {
5580 	struct btrfs_ref ref = {
5581 		.action = BTRFS_DROP_DELAYED_REF,
5582 		.bytenr = next->start,
5583 		.num_bytes = root->fs_info->nodesize,
5584 		.owning_root = owner_root,
5585 		.ref_root = btrfs_root_id(root),
5586 	};
5587 	int level = wc->level;
5588 	int ret;
5589 
5590 	/* We are UPDATE_BACKREF, we're not dropping anything. */
5591 	if (wc->stage == UPDATE_BACKREF)
5592 		return 0;
5593 
5594 	if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5595 		ref.parent = path->nodes[level]->start;
5596 	} else {
5597 		ASSERT(btrfs_root_id(root) == btrfs_header_owner(path->nodes[level]));
5598 		if (btrfs_root_id(root) != btrfs_header_owner(path->nodes[level])) {
5599 			btrfs_err(root->fs_info, "mismatched block owner");
5600 			return -EIO;
5601 		}
5602 	}
5603 
5604 	/*
5605 	 * If we had a drop_progress we need to verify the refs are set as
5606 	 * expected.  If we find our ref then we know that from here on out
5607 	 * everything should be correct, and we can clear the
5608 	 * ->restarted flag.
5609 	 */
5610 	if (wc->restarted) {
5611 		ret = check_ref_exists(trans, root, next->start, ref.parent,
5612 				       level - 1);
5613 		if (ret <= 0)
5614 			return ret;
5615 		ret = 0;
5616 		wc->restarted = 0;
5617 	}
5618 
5619 	/*
5620 	 * Reloc tree doesn't contribute to qgroup numbers, and we have already
5621 	 * accounted them at merge time (replace_path), thus we could skip
5622 	 * expensive subtree trace here.
5623 	 */
5624 	if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
5625 	    wc->refs[level - 1] > 1) {
5626 		u64 generation = btrfs_node_ptr_generation(path->nodes[level],
5627 							   path->slots[level]);
5628 
5629 		ret = btrfs_qgroup_trace_subtree(trans, next, generation, level - 1);
5630 		if (ret) {
5631 			btrfs_err_rl(root->fs_info,
5632 "error %d accounting shared subtree, quota is out of sync, rescan required",
5633 				     ret);
5634 		}
5635 	}
5636 
5637 	/*
5638 	 * We need to update the next key in our walk control so we can update
5639 	 * the drop_progress key accordingly.  We don't care if find_next_key
5640 	 * doesn't find a key because that means we're at the end and are going
5641 	 * to clean up now.
5642 	 */
5643 	wc->drop_level = level;
5644 	find_next_key(path, level, &wc->drop_progress);
5645 
5646 	btrfs_init_tree_ref(&ref, level - 1, 0, false);
5647 	return btrfs_free_extent(trans, &ref);
5648 }
5649 
5650 /*
5651  * helper to process tree block pointer.
5652  *
5653  * when wc->stage == DROP_REFERENCE, this function checks
5654  * reference count of the block pointed to. if the block
5655  * is shared and we need update back refs for the subtree
5656  * rooted at the block, this function changes wc->stage to
5657  * UPDATE_BACKREF. if the block is shared and there is no
5658  * need to update back, this function drops the reference
5659  * to the block.
5660  *
5661  * NOTE: return value 1 means we should stop walking down.
5662  */
do_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5663 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5664 				 struct btrfs_root *root,
5665 				 struct btrfs_path *path,
5666 				 struct walk_control *wc)
5667 {
5668 	struct btrfs_fs_info *fs_info = root->fs_info;
5669 	u64 bytenr;
5670 	u64 generation;
5671 	u64 owner_root = 0;
5672 	struct extent_buffer *next;
5673 	int level = wc->level;
5674 	int ret = 0;
5675 
5676 	generation = btrfs_node_ptr_generation(path->nodes[level],
5677 					       path->slots[level]);
5678 	/*
5679 	 * if the lower level block was created before the snapshot
5680 	 * was created, we know there is no need to update back refs
5681 	 * for the subtree
5682 	 */
5683 	if (wc->stage == UPDATE_BACKREF &&
5684 	    generation <= btrfs_root_origin_generation(root)) {
5685 		wc->lookup_info = 1;
5686 		return 1;
5687 	}
5688 
5689 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5690 
5691 	next = btrfs_find_create_tree_block(fs_info, bytenr, btrfs_root_id(root),
5692 					    level - 1);
5693 	if (IS_ERR(next))
5694 		return PTR_ERR(next);
5695 
5696 	btrfs_tree_lock(next);
5697 
5698 	ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
5699 				       &wc->refs[level - 1],
5700 				       &wc->flags[level - 1],
5701 				       &owner_root);
5702 	if (ret < 0)
5703 		goto out_unlock;
5704 
5705 	if (unlikely(wc->refs[level - 1] == 0)) {
5706 		btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
5707 			  bytenr);
5708 		ret = -EUCLEAN;
5709 		goto out_unlock;
5710 	}
5711 	wc->lookup_info = 0;
5712 
5713 	/* If we don't have to walk into this node skip it. */
5714 	if (!visit_node_for_delete(root, wc, path->nodes[level],
5715 				   wc->flags[level - 1], path->slots[level]))
5716 		goto skip;
5717 
5718 	/*
5719 	 * We have to walk down into this node, and if we're currently at the
5720 	 * DROP_REFERNCE stage and this block is shared then we need to switch
5721 	 * to the UPDATE_BACKREF stage in order to convert to FULL_BACKREF.
5722 	 */
5723 	if (wc->stage == DROP_REFERENCE && wc->refs[level - 1] > 1) {
5724 		wc->stage = UPDATE_BACKREF;
5725 		wc->shared_level = level - 1;
5726 	}
5727 
5728 	ret = check_next_block_uptodate(trans, root, path, wc, next);
5729 	if (ret)
5730 		return ret;
5731 
5732 	level--;
5733 	ASSERT(level == btrfs_header_level(next));
5734 	if (level != btrfs_header_level(next)) {
5735 		btrfs_err(root->fs_info, "mismatched level");
5736 		ret = -EIO;
5737 		goto out_unlock;
5738 	}
5739 	path->nodes[level] = next;
5740 	path->slots[level] = 0;
5741 	path->locks[level] = BTRFS_WRITE_LOCK;
5742 	wc->level = level;
5743 	if (wc->level == 1)
5744 		wc->reada_slot = 0;
5745 	return 0;
5746 skip:
5747 	ret = maybe_drop_reference(trans, root, path, wc, next, owner_root);
5748 	if (ret)
5749 		goto out_unlock;
5750 	wc->refs[level - 1] = 0;
5751 	wc->flags[level - 1] = 0;
5752 	wc->lookup_info = 1;
5753 	ret = 1;
5754 
5755 out_unlock:
5756 	btrfs_tree_unlock(next);
5757 	free_extent_buffer(next);
5758 
5759 	return ret;
5760 }
5761 
5762 /*
5763  * helper to process tree block while walking up the tree.
5764  *
5765  * when wc->stage == DROP_REFERENCE, this function drops
5766  * reference count on the block.
5767  *
5768  * when wc->stage == UPDATE_BACKREF, this function changes
5769  * wc->stage back to DROP_REFERENCE if we changed wc->stage
5770  * to UPDATE_BACKREF previously while processing the block.
5771  *
5772  * NOTE: return value 1 means we should stop walking up.
5773  */
walk_up_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5774 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5775 				 struct btrfs_root *root,
5776 				 struct btrfs_path *path,
5777 				 struct walk_control *wc)
5778 {
5779 	struct btrfs_fs_info *fs_info = root->fs_info;
5780 	int ret = 0;
5781 	int level = wc->level;
5782 	struct extent_buffer *eb = path->nodes[level];
5783 	u64 parent = 0;
5784 
5785 	if (wc->stage == UPDATE_BACKREF) {
5786 		ASSERT(wc->shared_level >= level);
5787 		if (level < wc->shared_level)
5788 			goto out;
5789 
5790 		ret = find_next_key(path, level + 1, &wc->update_progress);
5791 		if (ret > 0)
5792 			wc->update_ref = 0;
5793 
5794 		wc->stage = DROP_REFERENCE;
5795 		wc->shared_level = -1;
5796 		path->slots[level] = 0;
5797 
5798 		/*
5799 		 * check reference count again if the block isn't locked.
5800 		 * we should start walking down the tree again if reference
5801 		 * count is one.
5802 		 */
5803 		if (!path->locks[level]) {
5804 			ASSERT(level > 0);
5805 			btrfs_tree_lock(eb);
5806 			path->locks[level] = BTRFS_WRITE_LOCK;
5807 
5808 			ret = btrfs_lookup_extent_info(trans, fs_info,
5809 						       eb->start, level, 1,
5810 						       &wc->refs[level],
5811 						       &wc->flags[level],
5812 						       NULL);
5813 			if (ret < 0) {
5814 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5815 				path->locks[level] = 0;
5816 				return ret;
5817 			}
5818 			if (unlikely(wc->refs[level] == 0)) {
5819 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5820 				btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
5821 					  eb->start);
5822 				return -EUCLEAN;
5823 			}
5824 			if (wc->refs[level] == 1) {
5825 				btrfs_tree_unlock_rw(eb, path->locks[level]);
5826 				path->locks[level] = 0;
5827 				return 1;
5828 			}
5829 		}
5830 	}
5831 
5832 	/* wc->stage == DROP_REFERENCE */
5833 	ASSERT(path->locks[level] || wc->refs[level] == 1);
5834 
5835 	if (wc->refs[level] == 1) {
5836 		if (level == 0) {
5837 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5838 				ret = btrfs_dec_ref(trans, root, eb, 1);
5839 			else
5840 				ret = btrfs_dec_ref(trans, root, eb, 0);
5841 			if (ret) {
5842 				btrfs_abort_transaction(trans, ret);
5843 				return ret;
5844 			}
5845 			if (is_fstree(btrfs_root_id(root))) {
5846 				ret = btrfs_qgroup_trace_leaf_items(trans, eb);
5847 				if (ret) {
5848 					btrfs_err_rl(fs_info,
5849 	"error %d accounting leaf items, quota is out of sync, rescan required",
5850 					     ret);
5851 				}
5852 			}
5853 		}
5854 		/* Make block locked assertion in btrfs_clear_buffer_dirty happy. */
5855 		if (!path->locks[level]) {
5856 			btrfs_tree_lock(eb);
5857 			path->locks[level] = BTRFS_WRITE_LOCK;
5858 		}
5859 		btrfs_clear_buffer_dirty(trans, eb);
5860 	}
5861 
5862 	if (eb == root->node) {
5863 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5864 			parent = eb->start;
5865 		else if (btrfs_root_id(root) != btrfs_header_owner(eb))
5866 			goto owner_mismatch;
5867 	} else {
5868 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5869 			parent = path->nodes[level + 1]->start;
5870 		else if (btrfs_root_id(root) !=
5871 			 btrfs_header_owner(path->nodes[level + 1]))
5872 			goto owner_mismatch;
5873 	}
5874 
5875 	ret = btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
5876 				    wc->refs[level] == 1);
5877 	if (ret < 0)
5878 		btrfs_abort_transaction(trans, ret);
5879 out:
5880 	wc->refs[level] = 0;
5881 	wc->flags[level] = 0;
5882 	return ret;
5883 
5884 owner_mismatch:
5885 	btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
5886 		     btrfs_header_owner(eb), btrfs_root_id(root));
5887 	return -EUCLEAN;
5888 }
5889 
5890 /*
5891  * walk_down_tree consists of two steps.
5892  *
5893  * walk_down_proc().  Look up the reference count and reference of our current
5894  * wc->level.  At this point path->nodes[wc->level] should be populated and
5895  * uptodate, and in most cases should already be locked.  If we are in
5896  * DROP_REFERENCE and our refcount is > 1 then we've entered a shared node and
5897  * we can walk back up the tree.  If we are UPDATE_BACKREF we have to set
5898  * FULL_BACKREF on this node if it's not already set, and then do the
5899  * FULL_BACKREF conversion dance, which is to drop the root reference and add
5900  * the shared reference to all of this nodes children.
5901  *
5902  * do_walk_down().  This is where we actually start iterating on the children of
5903  * our current path->nodes[wc->level].  For DROP_REFERENCE that means dropping
5904  * our reference to the children that return false from visit_node_for_delete(),
5905  * which has various conditions where we know we can just drop our reference
5906  * without visiting the node.  For UPDATE_BACKREF we will skip any children that
5907  * visit_node_for_delete() returns false for, only walking down when necessary.
5908  * The bulk of the work for UPDATE_BACKREF occurs in the walk_up_tree() part of
5909  * snapshot deletion.
5910  */
walk_down_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)5911 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5912 				   struct btrfs_root *root,
5913 				   struct btrfs_path *path,
5914 				   struct walk_control *wc)
5915 {
5916 	int level = wc->level;
5917 	int ret = 0;
5918 
5919 	wc->lookup_info = 1;
5920 	while (level >= 0) {
5921 		ret = walk_down_proc(trans, root, path, wc);
5922 		if (ret)
5923 			break;
5924 
5925 		if (level == 0)
5926 			break;
5927 
5928 		if (path->slots[level] >=
5929 		    btrfs_header_nritems(path->nodes[level]))
5930 			break;
5931 
5932 		ret = do_walk_down(trans, root, path, wc);
5933 		if (ret > 0) {
5934 			path->slots[level]++;
5935 			continue;
5936 		} else if (ret < 0)
5937 			break;
5938 		level = wc->level;
5939 	}
5940 	return (ret == 1) ? 0 : ret;
5941 }
5942 
5943 /*
5944  * walk_up_tree() is responsible for making sure we visit every slot on our
5945  * current node, and if we're at the end of that node then we call
5946  * walk_up_proc() on our current node which will do one of a few things based on
5947  * our stage.
5948  *
5949  * UPDATE_BACKREF.  If we wc->level is currently less than our wc->shared_level
5950  * then we need to walk back up the tree, and then going back down into the
5951  * other slots via walk_down_tree to update any other children from our original
5952  * wc->shared_level.  Once we're at or above our wc->shared_level we can switch
5953  * back to DROP_REFERENCE, lookup the current nodes refs and flags, and carry on.
5954  *
5955  * DROP_REFERENCE. If our refs == 1 then we're going to free this tree block.
5956  * If we're level 0 then we need to btrfs_dec_ref() on all of the data extents
5957  * in our current leaf.  After that we call btrfs_free_tree_block() on the
5958  * current node and walk up to the next node to walk down the next slot.
5959  */
walk_up_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int max_level)5960 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5961 				 struct btrfs_root *root,
5962 				 struct btrfs_path *path,
5963 				 struct walk_control *wc, int max_level)
5964 {
5965 	int level = wc->level;
5966 	int ret;
5967 
5968 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5969 	while (level < max_level && path->nodes[level]) {
5970 		wc->level = level;
5971 		if (path->slots[level] + 1 <
5972 		    btrfs_header_nritems(path->nodes[level])) {
5973 			path->slots[level]++;
5974 			return 0;
5975 		} else {
5976 			ret = walk_up_proc(trans, root, path, wc);
5977 			if (ret > 0)
5978 				return 0;
5979 			if (ret < 0)
5980 				return ret;
5981 
5982 			if (path->locks[level]) {
5983 				btrfs_tree_unlock_rw(path->nodes[level],
5984 						     path->locks[level]);
5985 				path->locks[level] = 0;
5986 			}
5987 			free_extent_buffer(path->nodes[level]);
5988 			path->nodes[level] = NULL;
5989 			level++;
5990 		}
5991 	}
5992 	return 1;
5993 }
5994 
5995 /*
5996  * drop a subvolume tree.
5997  *
5998  * this function traverses the tree freeing any blocks that only
5999  * referenced by the tree.
6000  *
6001  * when a shared tree block is found. this function decreases its
6002  * reference count by one. if update_ref is true, this function
6003  * also make sure backrefs for the shared block and all lower level
6004  * blocks are properly updated.
6005  *
6006  * If called with for_reloc == 0, may exit early with -EAGAIN
6007  */
btrfs_drop_snapshot(struct btrfs_root * root,int update_ref,int for_reloc)6008 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
6009 {
6010 	const bool is_reloc_root = (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID);
6011 	struct btrfs_fs_info *fs_info = root->fs_info;
6012 	struct btrfs_path *path;
6013 	struct btrfs_trans_handle *trans;
6014 	struct btrfs_root *tree_root = fs_info->tree_root;
6015 	struct btrfs_root_item *root_item = &root->root_item;
6016 	struct walk_control *wc;
6017 	struct btrfs_key key;
6018 	const u64 rootid = btrfs_root_id(root);
6019 	int ret = 0;
6020 	int level;
6021 	bool root_dropped = false;
6022 	bool unfinished_drop = false;
6023 
6024 	btrfs_debug(fs_info, "Drop subvolume %llu", btrfs_root_id(root));
6025 
6026 	path = btrfs_alloc_path();
6027 	if (!path) {
6028 		ret = -ENOMEM;
6029 		goto out;
6030 	}
6031 
6032 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
6033 	if (!wc) {
6034 		btrfs_free_path(path);
6035 		ret = -ENOMEM;
6036 		goto out;
6037 	}
6038 
6039 	/*
6040 	 * Use join to avoid potential EINTR from transaction start. See
6041 	 * wait_reserve_ticket and the whole reservation callchain.
6042 	 */
6043 	if (for_reloc)
6044 		trans = btrfs_join_transaction(tree_root);
6045 	else
6046 		trans = btrfs_start_transaction(tree_root, 0);
6047 	if (IS_ERR(trans)) {
6048 		ret = PTR_ERR(trans);
6049 		goto out_free;
6050 	}
6051 
6052 	ret = btrfs_run_delayed_items(trans);
6053 	if (ret)
6054 		goto out_end_trans;
6055 
6056 	/*
6057 	 * This will help us catch people modifying the fs tree while we're
6058 	 * dropping it.  It is unsafe to mess with the fs tree while it's being
6059 	 * dropped as we unlock the root node and parent nodes as we walk down
6060 	 * the tree, assuming nothing will change.  If something does change
6061 	 * then we'll have stale information and drop references to blocks we've
6062 	 * already dropped.
6063 	 */
6064 	set_bit(BTRFS_ROOT_DELETING, &root->state);
6065 	unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state);
6066 
6067 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6068 		level = btrfs_header_level(root->node);
6069 		path->nodes[level] = btrfs_lock_root_node(root);
6070 		path->slots[level] = 0;
6071 		path->locks[level] = BTRFS_WRITE_LOCK;
6072 		memset(&wc->update_progress, 0,
6073 		       sizeof(wc->update_progress));
6074 	} else {
6075 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6076 		memcpy(&wc->update_progress, &key,
6077 		       sizeof(wc->update_progress));
6078 
6079 		level = btrfs_root_drop_level(root_item);
6080 		BUG_ON(level == 0);
6081 		path->lowest_level = level;
6082 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6083 		path->lowest_level = 0;
6084 		if (ret < 0)
6085 			goto out_end_trans;
6086 
6087 		WARN_ON(ret > 0);
6088 		ret = 0;
6089 
6090 		/*
6091 		 * unlock our path, this is safe because only this
6092 		 * function is allowed to delete this snapshot
6093 		 */
6094 		btrfs_unlock_up_safe(path, 0);
6095 
6096 		level = btrfs_header_level(root->node);
6097 		while (1) {
6098 			btrfs_tree_lock(path->nodes[level]);
6099 			path->locks[level] = BTRFS_WRITE_LOCK;
6100 
6101 			/*
6102 			 * btrfs_lookup_extent_info() returns 0 for success,
6103 			 * or < 0 for error.
6104 			 */
6105 			ret = btrfs_lookup_extent_info(trans, fs_info,
6106 						path->nodes[level]->start,
6107 						level, 1, &wc->refs[level],
6108 						&wc->flags[level], NULL);
6109 			if (ret < 0)
6110 				goto out_end_trans;
6111 
6112 			BUG_ON(wc->refs[level] == 0);
6113 
6114 			if (level == btrfs_root_drop_level(root_item))
6115 				break;
6116 
6117 			btrfs_tree_unlock(path->nodes[level]);
6118 			path->locks[level] = 0;
6119 			WARN_ON(wc->refs[level] != 1);
6120 			level--;
6121 		}
6122 	}
6123 
6124 	wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state);
6125 	wc->level = level;
6126 	wc->shared_level = -1;
6127 	wc->stage = DROP_REFERENCE;
6128 	wc->update_ref = update_ref;
6129 	wc->keep_locks = 0;
6130 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
6131 
6132 	while (1) {
6133 
6134 		ret = walk_down_tree(trans, root, path, wc);
6135 		if (ret < 0) {
6136 			btrfs_abort_transaction(trans, ret);
6137 			break;
6138 		}
6139 
6140 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6141 		if (ret < 0) {
6142 			btrfs_abort_transaction(trans, ret);
6143 			break;
6144 		}
6145 
6146 		if (ret > 0) {
6147 			BUG_ON(wc->stage != DROP_REFERENCE);
6148 			ret = 0;
6149 			break;
6150 		}
6151 
6152 		if (wc->stage == DROP_REFERENCE) {
6153 			wc->drop_level = wc->level;
6154 			btrfs_node_key_to_cpu(path->nodes[wc->drop_level],
6155 					      &wc->drop_progress,
6156 					      path->slots[wc->drop_level]);
6157 		}
6158 		btrfs_cpu_key_to_disk(&root_item->drop_progress,
6159 				      &wc->drop_progress);
6160 		btrfs_set_root_drop_level(root_item, wc->drop_level);
6161 
6162 		BUG_ON(wc->level == 0);
6163 		if (btrfs_should_end_transaction(trans) ||
6164 		    (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
6165 			ret = btrfs_update_root(trans, tree_root,
6166 						&root->root_key,
6167 						root_item);
6168 			if (ret) {
6169 				btrfs_abort_transaction(trans, ret);
6170 				goto out_end_trans;
6171 			}
6172 
6173 			if (!is_reloc_root)
6174 				btrfs_set_last_root_drop_gen(fs_info, trans->transid);
6175 
6176 			btrfs_end_transaction_throttle(trans);
6177 			if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
6178 				btrfs_debug(fs_info,
6179 					    "drop snapshot early exit");
6180 				ret = -EAGAIN;
6181 				goto out_free;
6182 			}
6183 
6184 		       /*
6185 			* Use join to avoid potential EINTR from transaction
6186 			* start. See wait_reserve_ticket and the whole
6187 			* reservation callchain.
6188 			*/
6189 			if (for_reloc)
6190 				trans = btrfs_join_transaction(tree_root);
6191 			else
6192 				trans = btrfs_start_transaction(tree_root, 0);
6193 			if (IS_ERR(trans)) {
6194 				ret = PTR_ERR(trans);
6195 				goto out_free;
6196 			}
6197 		}
6198 	}
6199 	btrfs_release_path(path);
6200 	if (ret)
6201 		goto out_end_trans;
6202 
6203 	ret = btrfs_del_root(trans, &root->root_key);
6204 	if (ret) {
6205 		btrfs_abort_transaction(trans, ret);
6206 		goto out_end_trans;
6207 	}
6208 
6209 	if (!is_reloc_root) {
6210 		ret = btrfs_find_root(tree_root, &root->root_key, path,
6211 				      NULL, NULL);
6212 		if (ret < 0) {
6213 			btrfs_abort_transaction(trans, ret);
6214 			goto out_end_trans;
6215 		} else if (ret > 0) {
6216 			ret = 0;
6217 			/*
6218 			 * If we fail to delete the orphan item this time
6219 			 * around, it'll get picked up the next time.
6220 			 *
6221 			 * The most common failure here is just -ENOENT.
6222 			 */
6223 			btrfs_del_orphan_item(trans, tree_root, btrfs_root_id(root));
6224 		}
6225 	}
6226 
6227 	/*
6228 	 * This subvolume is going to be completely dropped, and won't be
6229 	 * recorded as dirty roots, thus pertrans meta rsv will not be freed at
6230 	 * commit transaction time.  So free it here manually.
6231 	 */
6232 	btrfs_qgroup_convert_reserved_meta(root, INT_MAX);
6233 	btrfs_qgroup_free_meta_all_pertrans(root);
6234 
6235 	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state))
6236 		btrfs_add_dropped_root(trans, root);
6237 	else
6238 		btrfs_put_root(root);
6239 	root_dropped = true;
6240 out_end_trans:
6241 	if (!is_reloc_root)
6242 		btrfs_set_last_root_drop_gen(fs_info, trans->transid);
6243 
6244 	btrfs_end_transaction_throttle(trans);
6245 out_free:
6246 	kfree(wc);
6247 	btrfs_free_path(path);
6248 out:
6249 	if (!ret && root_dropped) {
6250 		ret = btrfs_qgroup_cleanup_dropped_subvolume(fs_info, rootid);
6251 		if (ret < 0)
6252 			btrfs_warn_rl(fs_info,
6253 				      "failed to cleanup qgroup 0/%llu: %d",
6254 				      rootid, ret);
6255 		ret = 0;
6256 	}
6257 	/*
6258 	 * We were an unfinished drop root, check to see if there are any
6259 	 * pending, and if not clear and wake up any waiters.
6260 	 */
6261 	if (!ret && unfinished_drop)
6262 		btrfs_maybe_wake_unfinished_drop(fs_info);
6263 
6264 	/*
6265 	 * So if we need to stop dropping the snapshot for whatever reason we
6266 	 * need to make sure to add it back to the dead root list so that we
6267 	 * keep trying to do the work later.  This also cleans up roots if we
6268 	 * don't have it in the radix (like when we recover after a power fail
6269 	 * or unmount) so we don't leak memory.
6270 	 */
6271 	if (!for_reloc && !root_dropped)
6272 		btrfs_add_dead_root(root);
6273 	return ret;
6274 }
6275 
6276 /*
6277  * drop subtree rooted at tree block 'node'.
6278  *
6279  * NOTE: this function will unlock and release tree block 'node'
6280  * only used by relocation code
6281  */
btrfs_drop_subtree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * node,struct extent_buffer * parent)6282 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6283 			struct btrfs_root *root,
6284 			struct extent_buffer *node,
6285 			struct extent_buffer *parent)
6286 {
6287 	struct btrfs_fs_info *fs_info = root->fs_info;
6288 	struct btrfs_path *path;
6289 	struct walk_control *wc;
6290 	int level;
6291 	int parent_level;
6292 	int ret = 0;
6293 
6294 	BUG_ON(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
6295 
6296 	path = btrfs_alloc_path();
6297 	if (!path)
6298 		return -ENOMEM;
6299 
6300 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
6301 	if (!wc) {
6302 		btrfs_free_path(path);
6303 		return -ENOMEM;
6304 	}
6305 
6306 	btrfs_assert_tree_write_locked(parent);
6307 	parent_level = btrfs_header_level(parent);
6308 	atomic_inc(&parent->refs);
6309 	path->nodes[parent_level] = parent;
6310 	path->slots[parent_level] = btrfs_header_nritems(parent);
6311 
6312 	btrfs_assert_tree_write_locked(node);
6313 	level = btrfs_header_level(node);
6314 	path->nodes[level] = node;
6315 	path->slots[level] = 0;
6316 	path->locks[level] = BTRFS_WRITE_LOCK;
6317 
6318 	wc->refs[parent_level] = 1;
6319 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6320 	wc->level = level;
6321 	wc->shared_level = -1;
6322 	wc->stage = DROP_REFERENCE;
6323 	wc->update_ref = 0;
6324 	wc->keep_locks = 1;
6325 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
6326 
6327 	while (1) {
6328 		ret = walk_down_tree(trans, root, path, wc);
6329 		if (ret < 0)
6330 			break;
6331 
6332 		ret = walk_up_tree(trans, root, path, wc, parent_level);
6333 		if (ret) {
6334 			if (ret > 0)
6335 				ret = 0;
6336 			break;
6337 		}
6338 	}
6339 
6340 	kfree(wc);
6341 	btrfs_free_path(path);
6342 	return ret;
6343 }
6344 
6345 /*
6346  * Unpin the extent range in an error context and don't add the space back.
6347  * Errors are not propagated further.
6348  */
btrfs_error_unpin_extent_range(struct btrfs_fs_info * fs_info,u64 start,u64 end)6349 void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end)
6350 {
6351 	unpin_extent_range(fs_info, start, end, false);
6352 }
6353 
6354 /*
6355  * It used to be that old block groups would be left around forever.
6356  * Iterating over them would be enough to trim unused space.  Since we
6357  * now automatically remove them, we also need to iterate over unallocated
6358  * space.
6359  *
6360  * We don't want a transaction for this since the discard may take a
6361  * substantial amount of time.  We don't require that a transaction be
6362  * running, but we do need to take a running transaction into account
6363  * to ensure that we're not discarding chunks that were released or
6364  * allocated in the current transaction.
6365  *
6366  * Holding the chunks lock will prevent other threads from allocating
6367  * or releasing chunks, but it won't prevent a running transaction
6368  * from committing and releasing the memory that the pending chunks
6369  * list head uses.  For that, we need to take a reference to the
6370  * transaction and hold the commit root sem.  We only need to hold
6371  * it while performing the free space search since we have already
6372  * held back allocations.
6373  */
btrfs_trim_free_extents(struct btrfs_device * device,u64 * trimmed)6374 static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
6375 {
6376 	u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0;
6377 	int ret;
6378 
6379 	*trimmed = 0;
6380 
6381 	/* Discard not supported = nothing to do. */
6382 	if (!bdev_max_discard_sectors(device->bdev))
6383 		return 0;
6384 
6385 	/* Not writable = nothing to do. */
6386 	if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
6387 		return 0;
6388 
6389 	/* No free space = nothing to do. */
6390 	if (device->total_bytes <= device->bytes_used)
6391 		return 0;
6392 
6393 	ret = 0;
6394 
6395 	while (1) {
6396 		struct btrfs_fs_info *fs_info = device->fs_info;
6397 		u64 bytes;
6398 
6399 		ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
6400 		if (ret)
6401 			break;
6402 
6403 		find_first_clear_extent_bit(&device->alloc_state, start,
6404 					    &start, &end,
6405 					    CHUNK_TRIMMED | CHUNK_ALLOCATED);
6406 
6407 		/* Check if there are any CHUNK_* bits left */
6408 		if (start > device->total_bytes) {
6409 			WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
6410 			btrfs_warn_in_rcu(fs_info,
6411 "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu",
6412 					  start, end - start + 1,
6413 					  btrfs_dev_name(device),
6414 					  device->total_bytes);
6415 			mutex_unlock(&fs_info->chunk_mutex);
6416 			ret = 0;
6417 			break;
6418 		}
6419 
6420 		/* Ensure we skip the reserved space on each device. */
6421 		start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
6422 
6423 		/*
6424 		 * If find_first_clear_extent_bit find a range that spans the
6425 		 * end of the device it will set end to -1, in this case it's up
6426 		 * to the caller to trim the value to the size of the device.
6427 		 */
6428 		end = min(end, device->total_bytes - 1);
6429 
6430 		len = end - start + 1;
6431 
6432 		/* We didn't find any extents */
6433 		if (!len) {
6434 			mutex_unlock(&fs_info->chunk_mutex);
6435 			ret = 0;
6436 			break;
6437 		}
6438 
6439 		ret = btrfs_issue_discard(device->bdev, start, len,
6440 					  &bytes);
6441 		if (!ret)
6442 			set_extent_bit(&device->alloc_state, start,
6443 				       start + bytes - 1, CHUNK_TRIMMED, NULL);
6444 		mutex_unlock(&fs_info->chunk_mutex);
6445 
6446 		if (ret)
6447 			break;
6448 
6449 		start += len;
6450 		*trimmed += bytes;
6451 
6452 		if (btrfs_trim_interrupted()) {
6453 			ret = -ERESTARTSYS;
6454 			break;
6455 		}
6456 
6457 		cond_resched();
6458 	}
6459 
6460 	return ret;
6461 }
6462 
6463 /*
6464  * Trim the whole filesystem by:
6465  * 1) trimming the free space in each block group
6466  * 2) trimming the unallocated space on each device
6467  *
6468  * This will also continue trimming even if a block group or device encounters
6469  * an error.  The return value will be the last error, or 0 if nothing bad
6470  * happens.
6471  */
btrfs_trim_fs(struct btrfs_fs_info * fs_info,struct fstrim_range * range)6472 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
6473 {
6474 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6475 	struct btrfs_block_group *cache = NULL;
6476 	struct btrfs_device *device;
6477 	u64 group_trimmed;
6478 	u64 range_end = U64_MAX;
6479 	u64 start;
6480 	u64 end;
6481 	u64 trimmed = 0;
6482 	u64 bg_failed = 0;
6483 	u64 dev_failed = 0;
6484 	int bg_ret = 0;
6485 	int dev_ret = 0;
6486 	int ret = 0;
6487 
6488 	if (range->start == U64_MAX)
6489 		return -EINVAL;
6490 
6491 	/*
6492 	 * Check range overflow if range->len is set.
6493 	 * The default range->len is U64_MAX.
6494 	 */
6495 	if (range->len != U64_MAX &&
6496 	    check_add_overflow(range->start, range->len, &range_end))
6497 		return -EINVAL;
6498 
6499 	cache = btrfs_lookup_first_block_group(fs_info, range->start);
6500 	for (; cache; cache = btrfs_next_block_group(cache)) {
6501 		if (cache->start >= range_end) {
6502 			btrfs_put_block_group(cache);
6503 			break;
6504 		}
6505 
6506 		start = max(range->start, cache->start);
6507 		end = min(range_end, cache->start + cache->length);
6508 
6509 		if (end - start >= range->minlen) {
6510 			if (!btrfs_block_group_done(cache)) {
6511 				ret = btrfs_cache_block_group(cache, true);
6512 				if (ret) {
6513 					bg_failed++;
6514 					bg_ret = ret;
6515 					continue;
6516 				}
6517 			}
6518 			ret = btrfs_trim_block_group(cache,
6519 						     &group_trimmed,
6520 						     start,
6521 						     end,
6522 						     range->minlen);
6523 
6524 			trimmed += group_trimmed;
6525 			if (ret) {
6526 				bg_failed++;
6527 				bg_ret = ret;
6528 				continue;
6529 			}
6530 		}
6531 	}
6532 
6533 	if (bg_failed)
6534 		btrfs_warn(fs_info,
6535 			"failed to trim %llu block group(s), last error %d",
6536 			bg_failed, bg_ret);
6537 
6538 	mutex_lock(&fs_devices->device_list_mutex);
6539 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
6540 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
6541 			continue;
6542 
6543 		ret = btrfs_trim_free_extents(device, &group_trimmed);
6544 
6545 		trimmed += group_trimmed;
6546 		if (ret) {
6547 			dev_failed++;
6548 			dev_ret = ret;
6549 			break;
6550 		}
6551 	}
6552 	mutex_unlock(&fs_devices->device_list_mutex);
6553 
6554 	if (dev_failed)
6555 		btrfs_warn(fs_info,
6556 			"failed to trim %llu device(s), last error %d",
6557 			dev_failed, dev_ret);
6558 	range->len = trimmed;
6559 	if (bg_ret)
6560 		return bg_ret;
6561 	return dev_ret;
6562 }
6563