xref: /linux/fs/btrfs/ctree.h (revision 7696286034ac72cf9b46499be1715ac62fd302c3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #ifndef BTRFS_CTREE_H
7 #define BTRFS_CTREE_H
8 
9 #include <linux/cleanup.h>
10 #include <linux/spinlock.h>
11 #include <linux/rbtree.h>
12 #include <linux/mutex.h>
13 #include <linux/wait.h>
14 #include <linux/list.h>
15 #include <linux/atomic.h>
16 #include <linux/xarray.h>
17 #include <linux/refcount.h>
18 #include <uapi/linux/btrfs_tree.h>
19 #include "locking.h"
20 #include "accessors.h"
21 
22 struct extent_buffer;
23 struct btrfs_block_rsv;
24 struct btrfs_trans_handle;
25 struct btrfs_block_group;
26 
27 /* Read ahead values for struct btrfs_path.reada */
28 enum {
29 	READA_NONE,
30 	READA_BACK,
31 	READA_FORWARD,
32 	/*
33 	 * Similar to READA_FORWARD but unlike it:
34 	 *
35 	 * 1) It will trigger readahead even for leaves that are not close to
36 	 *    each other on disk;
37 	 * 2) It also triggers readahead for nodes;
38 	 * 3) During a search, even when a node or leaf is already in memory, it
39 	 *    will still trigger readahead for other nodes and leaves that follow
40 	 *    it.
41 	 *
42 	 * This is meant to be used only when we know we are iterating over the
43 	 * entire tree or a very large part of it.
44 	 */
45 	READA_FORWARD_ALWAYS,
46 };
47 
48 /*
49  * btrfs_paths remember the path taken from the root down to the leaf.
50  * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
51  * to any other levels that are present.
52  *
53  * The slots array records the index of the item or block pointer
54  * used while walking the tree.
55  */
56 struct btrfs_path {
57 	struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
58 	int slots[BTRFS_MAX_LEVEL];
59 	/* if there is real range locking, this locks field will change */
60 	u8 locks[BTRFS_MAX_LEVEL];
61 	u8 reada;
62 	u8 lowest_level;
63 
64 	/*
65 	 * set by btrfs_split_item, tells search_slot to keep all locks
66 	 * and to force calls to keep space in the nodes
67 	 */
68 	bool search_for_split:1;
69 	/* Keep some upper locks as we walk down. */
70 	bool keep_locks:1;
71 	bool skip_locking:1;
72 	bool search_commit_root:1;
73 	bool need_commit_sem:1;
74 	bool skip_release_on_error:1;
75 	/*
76 	 * Indicate that new item (btrfs_search_slot) is extending already
77 	 * existing item and ins_len contains only the data size and not item
78 	 * header (ie. sizeof(struct btrfs_item) is not included).
79 	 */
80 	bool search_for_extension:1;
81 	/* Stop search if any locks need to be taken (for read) */
82 	bool nowait:1;
83 };
84 
85 #define BTRFS_PATH_AUTO_FREE(path_name)					\
86 	struct btrfs_path *path_name __free(btrfs_free_path) = NULL
87 
88 /*
89  * The state of btrfs root
90  */
91 enum {
92 	/*
93 	 * btrfs_record_root_in_trans is a multi-step process, and it can race
94 	 * with the balancing code.   But the race is very small, and only the
95 	 * first time the root is added to each transaction.  So IN_TRANS_SETUP
96 	 * is used to tell us when more checks are required
97 	 */
98 	BTRFS_ROOT_IN_TRANS_SETUP,
99 
100 	/*
101 	 * Set if tree blocks of this root can be shared by other roots.
102 	 * Only subvolume trees and their reloc trees have this bit set.
103 	 * Conflicts with TRACK_DIRTY bit.
104 	 *
105 	 * This affects two things:
106 	 *
107 	 * - How balance works
108 	 *   For shareable roots, we need to use reloc tree and do path
109 	 *   replacement for balance, and need various pre/post hooks for
110 	 *   snapshot creation to handle them.
111 	 *
112 	 *   While for non-shareable trees, we just simply do a tree search
113 	 *   with COW.
114 	 *
115 	 * - How dirty roots are tracked
116 	 *   For shareable roots, btrfs_record_root_in_trans() is needed to
117 	 *   track them, while non-subvolume roots have TRACK_DIRTY bit, they
118 	 *   don't need to set this manually.
119 	 */
120 	BTRFS_ROOT_SHAREABLE,
121 	BTRFS_ROOT_TRACK_DIRTY,
122 	BTRFS_ROOT_IN_RADIX,
123 	BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
124 	BTRFS_ROOT_DEFRAG_RUNNING,
125 	BTRFS_ROOT_FORCE_COW,
126 	BTRFS_ROOT_MULTI_LOG_TASKS,
127 	BTRFS_ROOT_DIRTY,
128 	BTRFS_ROOT_DELETING,
129 
130 	/*
131 	 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
132 	 *
133 	 * Set for the subvolume tree owning the reloc tree.
134 	 */
135 	BTRFS_ROOT_DEAD_RELOC_TREE,
136 	/* Mark dead root stored on device whose cleanup needs to be resumed */
137 	BTRFS_ROOT_DEAD_TREE,
138 	/* The root has a log tree. Used for subvolume roots and the tree root. */
139 	BTRFS_ROOT_HAS_LOG_TREE,
140 	/* Qgroup flushing is in progress */
141 	BTRFS_ROOT_QGROUP_FLUSHING,
142 	/* We started the orphan cleanup for this root. */
143 	BTRFS_ROOT_ORPHAN_CLEANUP,
144 	/* This root has a drop operation that was started previously. */
145 	BTRFS_ROOT_UNFINISHED_DROP,
146 	/* This reloc root needs to have its buffers lockdep class reset. */
147 	BTRFS_ROOT_RESET_LOCKDEP_CLASS,
148 };
149 
150 /*
151  * Record swapped tree blocks of a subvolume tree for delayed subtree trace
152  * code. For detail check comment in fs/btrfs/qgroup.c.
153  */
154 struct btrfs_qgroup_swapped_blocks {
155 	spinlock_t lock;
156 	/* RM_EMPTY_ROOT() of above blocks[] */
157 	bool swapped;
158 	struct rb_root blocks[BTRFS_MAX_LEVEL];
159 };
160 
161 /*
162  * in ram representation of the tree.  extent_root is used for all allocations
163  * and for the extent tree extent_root root.
164  */
165 struct btrfs_root {
166 	struct rb_node rb_node;
167 
168 	struct extent_buffer *node;
169 
170 	struct extent_buffer *commit_root;
171 	struct btrfs_root *log_root;
172 	struct btrfs_root *reloc_root;
173 
174 	unsigned long state;
175 	struct btrfs_root_item root_item;
176 	struct btrfs_key root_key;
177 	struct btrfs_fs_info *fs_info;
178 	struct extent_io_tree dirty_log_pages;
179 
180 	struct mutex objectid_mutex;
181 
182 	spinlock_t accounting_lock;
183 	struct btrfs_block_rsv *block_rsv;
184 
185 	struct mutex log_mutex;
186 	wait_queue_head_t log_writer_wait;
187 	wait_queue_head_t log_commit_wait[2];
188 	struct list_head log_ctxs[2];
189 	/* Used only for log trees of subvolumes, not for the log root tree */
190 	atomic_t log_writers;
191 	atomic_t log_commit[2];
192 	/* Used only for log trees of subvolumes, not for the log root tree */
193 	atomic_t log_batch;
194 	/*
195 	 * Protected by the 'log_mutex' lock but can be read without holding
196 	 * that lock to avoid unnecessary lock contention, in which case it
197 	 * should be read using btrfs_get_root_log_transid() except if it's a
198 	 * log tree in which case it can be directly accessed. Updates to this
199 	 * field should always use btrfs_set_root_log_transid(), except for log
200 	 * trees where the field can be updated directly.
201 	 */
202 	int log_transid;
203 	/* No matter the commit succeeds or not*/
204 	int log_transid_committed;
205 	/*
206 	 * Just be updated when the commit succeeds. Use
207 	 * btrfs_get_root_last_log_commit() and btrfs_set_root_last_log_commit()
208 	 * to access this field.
209 	 */
210 	int last_log_commit;
211 	pid_t log_start_pid;
212 
213 	u64 last_trans;
214 
215 	u64 free_objectid;
216 
217 	struct btrfs_key defrag_progress;
218 	struct btrfs_key defrag_max;
219 
220 	/* The dirty list is only used by non-shareable roots */
221 	struct list_head dirty_list;
222 
223 	struct list_head root_list;
224 
225 	/* Xarray that keeps track of in-memory inodes. */
226 	struct xarray inodes;
227 
228 	/* Xarray that keeps track of delayed nodes of every inode. */
229 	struct xarray delayed_nodes;
230 	/*
231 	 * right now this just gets used so that a root has its own devid
232 	 * for stat.  It may be used for more later
233 	 */
234 	dev_t anon_dev;
235 
236 	spinlock_t root_item_lock;
237 	refcount_t refs;
238 
239 	struct mutex delalloc_mutex;
240 	spinlock_t delalloc_lock;
241 	/*
242 	 * all of the inodes that have delalloc bytes.  It is possible for
243 	 * this list to be empty even when there is still dirty data=ordered
244 	 * extents waiting to finish IO.
245 	 */
246 	struct list_head delalloc_inodes;
247 	struct list_head delalloc_root;
248 	u64 nr_delalloc_inodes;
249 
250 	struct mutex ordered_extent_mutex;
251 	/*
252 	 * this is used by the balancing code to wait for all the pending
253 	 * ordered extents
254 	 */
255 	spinlock_t ordered_extent_lock;
256 
257 	/*
258 	 * all of the data=ordered extents pending writeback
259 	 * these can span multiple transactions and basically include
260 	 * every dirty data page that isn't from nodatacow
261 	 */
262 	struct list_head ordered_extents;
263 	struct list_head ordered_root;
264 	u64 nr_ordered_extents;
265 
266 	/*
267 	 * Not empty if this subvolume root has gone through tree block swap
268 	 * (relocation)
269 	 *
270 	 * Will be used by reloc_control::dirty_subvol_roots.
271 	 */
272 	struct list_head reloc_dirty_list;
273 
274 	/*
275 	 * Number of currently running SEND ioctls to prevent
276 	 * manipulation with the read-only status via SUBVOL_SETFLAGS
277 	 */
278 	int send_in_progress;
279 	/*
280 	 * Number of currently running deduplication operations that have a
281 	 * destination inode belonging to this root. Protected by the lock
282 	 * root_item_lock.
283 	 */
284 	int dedupe_in_progress;
285 	/* For exclusion of snapshot creation and nocow writes */
286 	struct btrfs_drew_lock snapshot_lock;
287 
288 	atomic_t snapshot_force_cow;
289 
290 	/* For qgroup metadata reserved space */
291 	spinlock_t qgroup_meta_rsv_lock;
292 	u64 qgroup_meta_rsv_pertrans;
293 	u64 qgroup_meta_rsv_prealloc;
294 	wait_queue_head_t qgroup_flush_wait;
295 
296 	/* Number of active swapfiles */
297 	atomic_t nr_swapfiles;
298 
299 	/* Record pairs of swapped blocks for qgroup */
300 	struct btrfs_qgroup_swapped_blocks swapped_blocks;
301 
302 	/* Used only by log trees, when logging csum items */
303 	struct extent_io_tree log_csum_range;
304 
305 	/* Used in simple quotas, track root during relocation. */
306 	u64 relocation_src_root;
307 
308 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
309 	u64 alloc_bytenr;
310 #endif
311 
312 #ifdef CONFIG_BTRFS_DEBUG
313 	struct list_head leak_list;
314 #endif
315 };
316 
317 static inline bool btrfs_root_readonly(const struct btrfs_root *root)
318 {
319 	/* Byte-swap the constant at compile time, root_item::flags is LE */
320 	return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
321 }
322 
323 static inline bool btrfs_root_dead(const struct btrfs_root *root)
324 {
325 	/* Byte-swap the constant at compile time, root_item::flags is LE */
326 	return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
327 }
328 
329 static inline u64 btrfs_root_id(const struct btrfs_root *root)
330 {
331 	return root->root_key.objectid;
332 }
333 
334 static inline int btrfs_get_root_log_transid(const struct btrfs_root *root)
335 {
336 	return READ_ONCE(root->log_transid);
337 }
338 
339 static inline void btrfs_set_root_log_transid(struct btrfs_root *root, int log_transid)
340 {
341 	WRITE_ONCE(root->log_transid, log_transid);
342 }
343 
344 static inline int btrfs_get_root_last_log_commit(const struct btrfs_root *root)
345 {
346 	return READ_ONCE(root->last_log_commit);
347 }
348 
349 static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int commit_id)
350 {
351 	WRITE_ONCE(root->last_log_commit, commit_id);
352 }
353 
354 static inline u64 btrfs_get_root_last_trans(const struct btrfs_root *root)
355 {
356 	return READ_ONCE(root->last_trans);
357 }
358 
359 static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transid)
360 {
361 	WRITE_ONCE(root->last_trans, transid);
362 }
363 
364 /*
365  * Return the generation this root started with.
366  *
367  * Every normal root that is created with root->root_key.offset set to it's
368  * originating generation.  If it is a snapshot it is the generation when the
369  * snapshot was created.
370  *
371  * However for TREE_RELOC roots root_key.offset is the objectid of the owning
372  * tree root.  Thankfully we copy the root item of the owning tree root, which
373  * has it's last_snapshot set to what we would have root_key.offset set to, so
374  * return that if this is a TREE_RELOC root.
375  */
376 static inline u64 btrfs_root_origin_generation(const struct btrfs_root *root)
377 {
378 	if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
379 		return btrfs_root_last_snapshot(&root->root_item);
380 	return root->root_key.offset;
381 }
382 
383 /*
384  * Structure that conveys information about an extent that is going to replace
385  * all the extents in a file range.
386  */
387 struct btrfs_replace_extent_info {
388 	u64 disk_offset;
389 	u64 disk_len;
390 	u64 data_offset;
391 	u64 data_len;
392 	u64 file_offset;
393 	/* Pointer to a file extent item of type regular or prealloc. */
394 	char *extent_buf;
395 	/*
396 	 * Set to true when attempting to replace a file range with a new extent
397 	 * described by this structure, set to false when attempting to clone an
398 	 * existing extent into a file range.
399 	 */
400 	bool is_new_extent;
401 	/* Indicate if we should update the inode's mtime and ctime. */
402 	bool update_times;
403 	/* Meaningful only if is_new_extent is true. */
404 	int qgroup_reserved;
405 	/*
406 	 * Meaningful only if is_new_extent is true.
407 	 * Used to track how many extent items we have already inserted in a
408 	 * subvolume tree that refer to the extent described by this structure,
409 	 * so that we know when to create a new delayed ref or update an existing
410 	 * one.
411 	 */
412 	int insertions;
413 };
414 
415 /* Arguments for btrfs_drop_extents() */
416 struct btrfs_drop_extents_args {
417 	/* Input parameters */
418 
419 	/*
420 	 * If NULL, btrfs_drop_extents() will allocate and free its own path.
421 	 * If 'replace_extent' is true, this must not be NULL. Also the path
422 	 * is always released except if 'replace_extent' is true and
423 	 * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
424 	 * the path is kept locked.
425 	 */
426 	struct btrfs_path *path;
427 	/* Start offset of the range to drop extents from */
428 	u64 start;
429 	/* End (exclusive, last byte + 1) of the range to drop extents from */
430 	u64 end;
431 	/* If true drop all the extent maps in the range */
432 	bool drop_cache;
433 	/*
434 	 * If true it means we want to insert a new extent after dropping all
435 	 * the extents in the range. If this is true, the 'extent_item_size'
436 	 * parameter must be set as well and the 'extent_inserted' field will
437 	 * be set to true by btrfs_drop_extents() if it could insert the new
438 	 * extent.
439 	 * Note: when this is set to true the path must not be NULL.
440 	 */
441 	bool replace_extent;
442 	/*
443 	 * Used if 'replace_extent' is true. Size of the file extent item to
444 	 * insert after dropping all existing extents in the range
445 	 */
446 	u32 extent_item_size;
447 
448 	/* Output parameters */
449 
450 	/*
451 	 * Set to the minimum between the input parameter 'end' and the end
452 	 * (exclusive, last byte + 1) of the last dropped extent. This is always
453 	 * set even if btrfs_drop_extents() returns an error.
454 	 */
455 	u64 drop_end;
456 	/*
457 	 * The number of allocated bytes found in the range. This can be smaller
458 	 * than the range's length when there are holes in the range.
459 	 */
460 	u64 bytes_found;
461 	/*
462 	 * Only set if 'replace_extent' is true. Set to true if we were able
463 	 * to insert a replacement extent after dropping all extents in the
464 	 * range, otherwise set to false by btrfs_drop_extents().
465 	 * Also, if btrfs_drop_extents() has set this to true it means it
466 	 * returned with the path locked, otherwise if it has set this to
467 	 * false it has returned with the path released.
468 	 */
469 	bool extent_inserted;
470 };
471 
472 struct btrfs_file_private {
473 	void *filldir_buf;
474 	u64 last_index;
475 	struct extent_state *llseek_cached_state;
476 	/* Task that allocated this structure. */
477 	struct task_struct *owner_task;
478 };
479 
480 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
481 {
482 	return info->nodesize - sizeof(struct btrfs_header);
483 }
484 
485 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
486 {
487 	return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
488 }
489 
490 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
491 {
492 	return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
493 }
494 
495 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
496 {
497 	return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
498 }
499 
500 int __init btrfs_ctree_init(void);
501 void __cold btrfs_ctree_exit(void);
502 
503 int btrfs_bin_search(const struct extent_buffer *eb, int first_slot,
504 		     const struct btrfs_key *key, int *slot);
505 
506 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
507 
508 #ifdef __LITTLE_ENDIAN
509 
510 /*
511  * Compare two keys, on little-endian the disk order is same as CPU order and
512  * we can avoid the conversion.
513  */
514 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
515 				  const struct btrfs_key *k2)
516 {
517 	const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
518 
519 	return btrfs_comp_cpu_keys(k1, k2);
520 }
521 
522 #else
523 
524 /* Compare two keys in a memcmp fashion. */
525 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
526 				  const struct btrfs_key *k2)
527 {
528 	struct btrfs_key k1;
529 
530 	btrfs_disk_key_to_cpu(&k1, disk);
531 
532 	return btrfs_comp_cpu_keys(&k1, k2);
533 }
534 
535 #endif
536 
537 int btrfs_previous_item(struct btrfs_root *root,
538 			struct btrfs_path *path, u64 min_objectid,
539 			int type);
540 int btrfs_previous_extent_item(struct btrfs_root *root,
541 			struct btrfs_path *path, u64 min_objectid);
542 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
543 			     const struct btrfs_path *path,
544 			     const struct btrfs_key *new_key);
545 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
546 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
547 			struct btrfs_key *key, int lowest_level,
548 			u64 min_trans);
549 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
550 			 struct btrfs_path *path,
551 			 u64 min_trans);
552 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
553 					   int slot);
554 
555 int btrfs_cow_block(struct btrfs_trans_handle *trans,
556 		    struct btrfs_root *root, struct extent_buffer *buf,
557 		    struct extent_buffer *parent, int parent_slot,
558 		    struct extent_buffer **cow_ret,
559 		    enum btrfs_lock_nesting nest);
560 int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
561 			  struct btrfs_root *root,
562 			  struct extent_buffer *buf,
563 			  struct extent_buffer *parent, int parent_slot,
564 			  struct extent_buffer **cow_ret,
565 			  u64 search_start, u64 empty_size,
566 			  enum btrfs_lock_nesting nest);
567 int btrfs_copy_root(struct btrfs_trans_handle *trans,
568 		      struct btrfs_root *root,
569 		      struct extent_buffer *buf,
570 		      struct extent_buffer **cow_ret, u64 new_root_objectid);
571 bool btrfs_block_can_be_shared(const struct btrfs_trans_handle *trans,
572 			       const struct btrfs_root *root,
573 			       const struct extent_buffer *buf);
574 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
575 		  struct btrfs_path *path, int level, int slot);
576 void btrfs_extend_item(struct btrfs_trans_handle *trans,
577 		       const struct btrfs_path *path, u32 data_size);
578 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
579 			 const struct btrfs_path *path, u32 new_size, int from_end);
580 int btrfs_split_item(struct btrfs_trans_handle *trans,
581 		     struct btrfs_root *root,
582 		     struct btrfs_path *path,
583 		     const struct btrfs_key *new_key,
584 		     unsigned long split_offset);
585 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
586 			 struct btrfs_root *root,
587 			 struct btrfs_path *path,
588 			 const struct btrfs_key *new_key);
589 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
590 		u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
591 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
592 		      const struct btrfs_key *key, struct btrfs_path *p,
593 		      int ins_len, int cow);
594 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
595 			  struct btrfs_path *p, u64 time_seq);
596 int btrfs_search_slot_for_read(struct btrfs_root *root,
597 			       const struct btrfs_key *key,
598 			       struct btrfs_path *p, int find_higher,
599 			       int return_any);
600 void btrfs_release_path(struct btrfs_path *p);
601 struct btrfs_path *btrfs_alloc_path(void);
602 void btrfs_free_path(struct btrfs_path *p);
603 DEFINE_FREE(btrfs_free_path, struct btrfs_path *, btrfs_free_path(_T))
604 
605 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
606 		   struct btrfs_path *path, int slot, int nr);
607 static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
608 				 struct btrfs_root *root,
609 				 struct btrfs_path *path)
610 {
611 	return btrfs_del_items(trans, root, path, path->slots[0], 1);
612 }
613 
614 /*
615  * Describes a batch of items to insert in a btree. This is used by
616  * btrfs_insert_empty_items().
617  */
618 struct btrfs_item_batch {
619 	/*
620 	 * Pointer to an array containing the keys of the items to insert (in
621 	 * sorted order).
622 	 */
623 	const struct btrfs_key *keys;
624 	/* Pointer to an array containing the data size for each item to insert. */
625 	const u32 *data_sizes;
626 	/*
627 	 * The sum of data sizes for all items. The caller can compute this while
628 	 * setting up the data_sizes array, so it ends up being more efficient
629 	 * than having btrfs_insert_empty_items() or setup_item_for_insert()
630 	 * doing it, as it would avoid an extra loop over a potentially large
631 	 * array, and in the case of setup_item_for_insert(), we would be doing
632 	 * it while holding a write lock on a leaf and often on upper level nodes
633 	 * too, unnecessarily increasing the size of a critical section.
634 	 */
635 	u32 total_data_size;
636 	/* Size of the keys and data_sizes arrays (number of items in the batch). */
637 	int nr;
638 };
639 
640 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
641 				 struct btrfs_root *root,
642 				 struct btrfs_path *path,
643 				 const struct btrfs_key *key,
644 				 u32 data_size);
645 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
646 		      const struct btrfs_key *key, void *data, u32 data_size);
647 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
648 			     struct btrfs_root *root,
649 			     struct btrfs_path *path,
650 			     const struct btrfs_item_batch *batch);
651 
652 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
653 					  struct btrfs_root *root,
654 					  struct btrfs_path *path,
655 					  const struct btrfs_key *key,
656 					  u32 data_size)
657 {
658 	struct btrfs_item_batch batch;
659 
660 	batch.keys = key;
661 	batch.data_sizes = &data_size;
662 	batch.total_data_size = data_size;
663 	batch.nr = 1;
664 
665 	return btrfs_insert_empty_items(trans, root, path, &batch);
666 }
667 
668 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
669 			u64 time_seq);
670 
671 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
672 			   struct btrfs_path *path);
673 
674 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
675 			      struct btrfs_path *path);
676 
677 /*
678  * Search in @root for a given @key, and store the slot found in @found_key.
679  *
680  * @root:	The root node of the tree.
681  * @key:	The key we are looking for.
682  * @found_key:	Will hold the found item.
683  * @path:	Holds the current slot/leaf.
684  * @iter_ret:	Contains the value returned from btrfs_search_slot or
685  * 		btrfs_get_next_valid_item, whichever was executed last.
686  *
687  * The @iter_ret is an output variable that will contain the return value of
688  * btrfs_search_slot, if it encountered an error, or the value returned from
689  * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
690  * slot was found, 1 if there were no more leaves, and <0 if there was an error.
691  *
692  * It's recommended to use a separate variable for iter_ret and then use it to
693  * set the function return value so there's no confusion of the 0/1/errno
694  * values stemming from btrfs_search_slot.
695  */
696 #define btrfs_for_each_slot(root, key, found_key, path, iter_ret)		\
697 	for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0);	\
698 		(iter_ret) >= 0 &&						\
699 		(iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
700 		(path)->slots[0]++						\
701 	)
702 
703 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
704 
705 /*
706  * Search the tree again to find a leaf with greater keys.
707  *
708  * Returns 0 if it found something or 1 if there are no greater leaves.
709  * Returns < 0 on error.
710  */
711 static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
712 {
713 	return btrfs_next_old_leaf(root, path, 0);
714 }
715 
716 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
717 {
718 	return btrfs_next_old_item(root, p, 0);
719 }
720 int btrfs_leaf_free_space(const struct extent_buffer *leaf);
721 
722 static inline bool btrfs_is_fstree(u64 rootid)
723 {
724 	if (rootid == BTRFS_FS_TREE_OBJECTID)
725 		return true;
726 
727 	if ((s64)rootid < (s64)BTRFS_FIRST_FREE_OBJECTID)
728 		return false;
729 
730 	if (btrfs_qgroup_level(rootid) != 0)
731 		return false;
732 
733 	return true;
734 }
735 
736 static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
737 {
738 	return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
739 }
740 
741 #endif
742