xref: /linux/fs/btrfs/ctree.h (revision 9ab27b018649c9504e894496cb4d7d8afcffd897)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #ifndef BTRFS_CTREE_H
7 #define BTRFS_CTREE_H
8 
9 #include "linux/cleanup.h"
10 #include <linux/pagemap.h>
11 #include <linux/spinlock.h>
12 #include <linux/rbtree.h>
13 #include <linux/mutex.h>
14 #include <linux/wait.h>
15 #include <linux/list.h>
16 #include <linux/atomic.h>
17 #include <linux/xarray.h>
18 #include <linux/refcount.h>
19 #include <uapi/linux/btrfs_tree.h>
20 #include "locking.h"
21 #include "fs.h"
22 #include "accessors.h"
23 #include "extent-io-tree.h"
24 
25 struct extent_buffer;
26 struct btrfs_block_rsv;
27 struct btrfs_trans_handle;
28 struct btrfs_block_group;
29 
30 /* Read ahead values for struct btrfs_path.reada */
31 enum {
32 	READA_NONE,
33 	READA_BACK,
34 	READA_FORWARD,
35 	/*
36 	 * Similar to READA_FORWARD but unlike it:
37 	 *
38 	 * 1) It will trigger readahead even for leaves that are not close to
39 	 *    each other on disk;
40 	 * 2) It also triggers readahead for nodes;
41 	 * 3) During a search, even when a node or leaf is already in memory, it
42 	 *    will still trigger readahead for other nodes and leaves that follow
43 	 *    it.
44 	 *
45 	 * This is meant to be used only when we know we are iterating over the
46 	 * entire tree or a very large part of it.
47 	 */
48 	READA_FORWARD_ALWAYS,
49 };
50 
51 /*
52  * btrfs_paths remember the path taken from the root down to the leaf.
53  * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
54  * to any other levels that are present.
55  *
56  * The slots array records the index of the item or block pointer
57  * used while walking the tree.
58  */
59 struct btrfs_path {
60 	struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
61 	int slots[BTRFS_MAX_LEVEL];
62 	/* if there is real range locking, this locks field will change */
63 	u8 locks[BTRFS_MAX_LEVEL];
64 	u8 reada;
65 	/* keep some upper locks as we walk down */
66 	u8 lowest_level;
67 
68 	/*
69 	 * set by btrfs_split_item, tells search_slot to keep all locks
70 	 * and to force calls to keep space in the nodes
71 	 */
72 	unsigned int search_for_split:1;
73 	unsigned int keep_locks:1;
74 	unsigned int skip_locking:1;
75 	unsigned int search_commit_root:1;
76 	unsigned int need_commit_sem:1;
77 	unsigned int skip_release_on_error:1;
78 	/*
79 	 * Indicate that new item (btrfs_search_slot) is extending already
80 	 * existing item and ins_len contains only the data size and not item
81 	 * header (ie. sizeof(struct btrfs_item) is not included).
82 	 */
83 	unsigned int search_for_extension:1;
84 	/* Stop search if any locks need to be taken (for read) */
85 	unsigned int nowait:1;
86 };
87 
88 #define BTRFS_PATH_AUTO_FREE(path_name)					\
89 	struct btrfs_path *path_name __free(btrfs_free_path) = NULL
90 
91 /*
92  * The state of btrfs root
93  */
94 enum {
95 	/*
96 	 * btrfs_record_root_in_trans is a multi-step process, and it can race
97 	 * with the balancing code.   But the race is very small, and only the
98 	 * first time the root is added to each transaction.  So IN_TRANS_SETUP
99 	 * is used to tell us when more checks are required
100 	 */
101 	BTRFS_ROOT_IN_TRANS_SETUP,
102 
103 	/*
104 	 * Set if tree blocks of this root can be shared by other roots.
105 	 * Only subvolume trees and their reloc trees have this bit set.
106 	 * Conflicts with TRACK_DIRTY bit.
107 	 *
108 	 * This affects two things:
109 	 *
110 	 * - How balance works
111 	 *   For shareable roots, we need to use reloc tree and do path
112 	 *   replacement for balance, and need various pre/post hooks for
113 	 *   snapshot creation to handle them.
114 	 *
115 	 *   While for non-shareable trees, we just simply do a tree search
116 	 *   with COW.
117 	 *
118 	 * - How dirty roots are tracked
119 	 *   For shareable roots, btrfs_record_root_in_trans() is needed to
120 	 *   track them, while non-subvolume roots have TRACK_DIRTY bit, they
121 	 *   don't need to set this manually.
122 	 */
123 	BTRFS_ROOT_SHAREABLE,
124 	BTRFS_ROOT_TRACK_DIRTY,
125 	BTRFS_ROOT_IN_RADIX,
126 	BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
127 	BTRFS_ROOT_DEFRAG_RUNNING,
128 	BTRFS_ROOT_FORCE_COW,
129 	BTRFS_ROOT_MULTI_LOG_TASKS,
130 	BTRFS_ROOT_DIRTY,
131 	BTRFS_ROOT_DELETING,
132 
133 	/*
134 	 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
135 	 *
136 	 * Set for the subvolume tree owning the reloc tree.
137 	 */
138 	BTRFS_ROOT_DEAD_RELOC_TREE,
139 	/* Mark dead root stored on device whose cleanup needs to be resumed */
140 	BTRFS_ROOT_DEAD_TREE,
141 	/* The root has a log tree. Used for subvolume roots and the tree root. */
142 	BTRFS_ROOT_HAS_LOG_TREE,
143 	/* Qgroup flushing is in progress */
144 	BTRFS_ROOT_QGROUP_FLUSHING,
145 	/* We started the orphan cleanup for this root. */
146 	BTRFS_ROOT_ORPHAN_CLEANUP,
147 	/* This root has a drop operation that was started previously. */
148 	BTRFS_ROOT_UNFINISHED_DROP,
149 	/* This reloc root needs to have its buffers lockdep class reset. */
150 	BTRFS_ROOT_RESET_LOCKDEP_CLASS,
151 };
152 
153 /*
154  * Record swapped tree blocks of a subvolume tree for delayed subtree trace
155  * code. For detail check comment in fs/btrfs/qgroup.c.
156  */
157 struct btrfs_qgroup_swapped_blocks {
158 	spinlock_t lock;
159 	/* RM_EMPTY_ROOT() of above blocks[] */
160 	bool swapped;
161 	struct rb_root blocks[BTRFS_MAX_LEVEL];
162 };
163 
164 /*
165  * in ram representation of the tree.  extent_root is used for all allocations
166  * and for the extent tree extent_root root.
167  */
168 struct btrfs_root {
169 	struct rb_node rb_node;
170 
171 	struct extent_buffer *node;
172 
173 	struct extent_buffer *commit_root;
174 	struct btrfs_root *log_root;
175 	struct btrfs_root *reloc_root;
176 
177 	unsigned long state;
178 	struct btrfs_root_item root_item;
179 	struct btrfs_key root_key;
180 	struct btrfs_fs_info *fs_info;
181 	struct extent_io_tree dirty_log_pages;
182 
183 	struct mutex objectid_mutex;
184 
185 	spinlock_t accounting_lock;
186 	struct btrfs_block_rsv *block_rsv;
187 
188 	struct mutex log_mutex;
189 	wait_queue_head_t log_writer_wait;
190 	wait_queue_head_t log_commit_wait[2];
191 	struct list_head log_ctxs[2];
192 	/* Used only for log trees of subvolumes, not for the log root tree */
193 	atomic_t log_writers;
194 	atomic_t log_commit[2];
195 	/* Used only for log trees of subvolumes, not for the log root tree */
196 	atomic_t log_batch;
197 	/*
198 	 * Protected by the 'log_mutex' lock but can be read without holding
199 	 * that lock to avoid unnecessary lock contention, in which case it
200 	 * should be read using btrfs_get_root_log_transid() except if it's a
201 	 * log tree in which case it can be directly accessed. Updates to this
202 	 * field should always use btrfs_set_root_log_transid(), except for log
203 	 * trees where the field can be updated directly.
204 	 */
205 	int log_transid;
206 	/* No matter the commit succeeds or not*/
207 	int log_transid_committed;
208 	/*
209 	 * Just be updated when the commit succeeds. Use
210 	 * btrfs_get_root_last_log_commit() and btrfs_set_root_last_log_commit()
211 	 * to access this field.
212 	 */
213 	int last_log_commit;
214 	pid_t log_start_pid;
215 
216 	u64 last_trans;
217 
218 	u64 free_objectid;
219 
220 	struct btrfs_key defrag_progress;
221 	struct btrfs_key defrag_max;
222 
223 	/* The dirty list is only used by non-shareable roots */
224 	struct list_head dirty_list;
225 
226 	struct list_head root_list;
227 
228 	/*
229 	 * Xarray that keeps track of in-memory inodes, protected by the lock
230 	 * @inode_lock.
231 	 */
232 	struct xarray inodes;
233 
234 	/*
235 	 * Xarray that keeps track of delayed nodes of every inode, protected
236 	 * by @inode_lock.
237 	 */
238 	struct xarray delayed_nodes;
239 	/*
240 	 * right now this just gets used so that a root has its own devid
241 	 * for stat.  It may be used for more later
242 	 */
243 	dev_t anon_dev;
244 
245 	spinlock_t root_item_lock;
246 	refcount_t refs;
247 
248 	struct mutex delalloc_mutex;
249 	spinlock_t delalloc_lock;
250 	/*
251 	 * all of the inodes that have delalloc bytes.  It is possible for
252 	 * this list to be empty even when there is still dirty data=ordered
253 	 * extents waiting to finish IO.
254 	 */
255 	struct list_head delalloc_inodes;
256 	struct list_head delalloc_root;
257 	u64 nr_delalloc_inodes;
258 
259 	struct mutex ordered_extent_mutex;
260 	/*
261 	 * this is used by the balancing code to wait for all the pending
262 	 * ordered extents
263 	 */
264 	spinlock_t ordered_extent_lock;
265 
266 	/*
267 	 * all of the data=ordered extents pending writeback
268 	 * these can span multiple transactions and basically include
269 	 * every dirty data page that isn't from nodatacow
270 	 */
271 	struct list_head ordered_extents;
272 	struct list_head ordered_root;
273 	u64 nr_ordered_extents;
274 
275 	/*
276 	 * Not empty if this subvolume root has gone through tree block swap
277 	 * (relocation)
278 	 *
279 	 * Will be used by reloc_control::dirty_subvol_roots.
280 	 */
281 	struct list_head reloc_dirty_list;
282 
283 	/*
284 	 * Number of currently running SEND ioctls to prevent
285 	 * manipulation with the read-only status via SUBVOL_SETFLAGS
286 	 */
287 	int send_in_progress;
288 	/*
289 	 * Number of currently running deduplication operations that have a
290 	 * destination inode belonging to this root. Protected by the lock
291 	 * root_item_lock.
292 	 */
293 	int dedupe_in_progress;
294 	/* For exclusion of snapshot creation and nocow writes */
295 	struct btrfs_drew_lock snapshot_lock;
296 
297 	atomic_t snapshot_force_cow;
298 
299 	/* For qgroup metadata reserved space */
300 	spinlock_t qgroup_meta_rsv_lock;
301 	u64 qgroup_meta_rsv_pertrans;
302 	u64 qgroup_meta_rsv_prealloc;
303 	wait_queue_head_t qgroup_flush_wait;
304 
305 	/* Number of active swapfiles */
306 	atomic_t nr_swapfiles;
307 
308 	/* Record pairs of swapped blocks for qgroup */
309 	struct btrfs_qgroup_swapped_blocks swapped_blocks;
310 
311 	/* Used only by log trees, when logging csum items */
312 	struct extent_io_tree log_csum_range;
313 
314 	/* Used in simple quotas, track root during relocation. */
315 	u64 relocation_src_root;
316 
317 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
318 	u64 alloc_bytenr;
319 #endif
320 
321 #ifdef CONFIG_BTRFS_DEBUG
322 	struct list_head leak_list;
323 #endif
324 };
325 
326 static inline bool btrfs_root_readonly(const struct btrfs_root *root)
327 {
328 	/* Byte-swap the constant at compile time, root_item::flags is LE */
329 	return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
330 }
331 
332 static inline bool btrfs_root_dead(const struct btrfs_root *root)
333 {
334 	/* Byte-swap the constant at compile time, root_item::flags is LE */
335 	return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
336 }
337 
338 static inline u64 btrfs_root_id(const struct btrfs_root *root)
339 {
340 	return root->root_key.objectid;
341 }
342 
343 static inline int btrfs_get_root_log_transid(const struct btrfs_root *root)
344 {
345 	return READ_ONCE(root->log_transid);
346 }
347 
348 static inline void btrfs_set_root_log_transid(struct btrfs_root *root, int log_transid)
349 {
350 	WRITE_ONCE(root->log_transid, log_transid);
351 }
352 
353 static inline int btrfs_get_root_last_log_commit(const struct btrfs_root *root)
354 {
355 	return READ_ONCE(root->last_log_commit);
356 }
357 
358 static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int commit_id)
359 {
360 	WRITE_ONCE(root->last_log_commit, commit_id);
361 }
362 
363 static inline u64 btrfs_get_root_last_trans(const struct btrfs_root *root)
364 {
365 	return READ_ONCE(root->last_trans);
366 }
367 
368 static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transid)
369 {
370 	WRITE_ONCE(root->last_trans, transid);
371 }
372 
373 /*
374  * Structure that conveys information about an extent that is going to replace
375  * all the extents in a file range.
376  */
377 struct btrfs_replace_extent_info {
378 	u64 disk_offset;
379 	u64 disk_len;
380 	u64 data_offset;
381 	u64 data_len;
382 	u64 file_offset;
383 	/* Pointer to a file extent item of type regular or prealloc. */
384 	char *extent_buf;
385 	/*
386 	 * Set to true when attempting to replace a file range with a new extent
387 	 * described by this structure, set to false when attempting to clone an
388 	 * existing extent into a file range.
389 	 */
390 	bool is_new_extent;
391 	/* Indicate if we should update the inode's mtime and ctime. */
392 	bool update_times;
393 	/* Meaningful only if is_new_extent is true. */
394 	int qgroup_reserved;
395 	/*
396 	 * Meaningful only if is_new_extent is true.
397 	 * Used to track how many extent items we have already inserted in a
398 	 * subvolume tree that refer to the extent described by this structure,
399 	 * so that we know when to create a new delayed ref or update an existing
400 	 * one.
401 	 */
402 	int insertions;
403 };
404 
405 /* Arguments for btrfs_drop_extents() */
406 struct btrfs_drop_extents_args {
407 	/* Input parameters */
408 
409 	/*
410 	 * If NULL, btrfs_drop_extents() will allocate and free its own path.
411 	 * If 'replace_extent' is true, this must not be NULL. Also the path
412 	 * is always released except if 'replace_extent' is true and
413 	 * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
414 	 * the path is kept locked.
415 	 */
416 	struct btrfs_path *path;
417 	/* Start offset of the range to drop extents from */
418 	u64 start;
419 	/* End (exclusive, last byte + 1) of the range to drop extents from */
420 	u64 end;
421 	/* If true drop all the extent maps in the range */
422 	bool drop_cache;
423 	/*
424 	 * If true it means we want to insert a new extent after dropping all
425 	 * the extents in the range. If this is true, the 'extent_item_size'
426 	 * parameter must be set as well and the 'extent_inserted' field will
427 	 * be set to true by btrfs_drop_extents() if it could insert the new
428 	 * extent.
429 	 * Note: when this is set to true the path must not be NULL.
430 	 */
431 	bool replace_extent;
432 	/*
433 	 * Used if 'replace_extent' is true. Size of the file extent item to
434 	 * insert after dropping all existing extents in the range
435 	 */
436 	u32 extent_item_size;
437 
438 	/* Output parameters */
439 
440 	/*
441 	 * Set to the minimum between the input parameter 'end' and the end
442 	 * (exclusive, last byte + 1) of the last dropped extent. This is always
443 	 * set even if btrfs_drop_extents() returns an error.
444 	 */
445 	u64 drop_end;
446 	/*
447 	 * The number of allocated bytes found in the range. This can be smaller
448 	 * than the range's length when there are holes in the range.
449 	 */
450 	u64 bytes_found;
451 	/*
452 	 * Only set if 'replace_extent' is true. Set to true if we were able
453 	 * to insert a replacement extent after dropping all extents in the
454 	 * range, otherwise set to false by btrfs_drop_extents().
455 	 * Also, if btrfs_drop_extents() has set this to true it means it
456 	 * returned with the path locked, otherwise if it has set this to
457 	 * false it has returned with the path released.
458 	 */
459 	bool extent_inserted;
460 };
461 
462 struct btrfs_file_private {
463 	void *filldir_buf;
464 	u64 last_index;
465 	struct extent_state *llseek_cached_state;
466 	/* Task that allocated this structure. */
467 	struct task_struct *owner_task;
468 };
469 
470 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
471 {
472 	return info->nodesize - sizeof(struct btrfs_header);
473 }
474 
475 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
476 {
477 	return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
478 }
479 
480 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
481 {
482 	return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
483 }
484 
485 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
486 {
487 	return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
488 }
489 
490 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
491 				((bytes) >> (fs_info)->sectorsize_bits)
492 
493 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
494 {
495 	return mapping_gfp_constraint(mapping, ~__GFP_FS);
496 }
497 
498 void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
499 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
500 			 u64 num_bytes, u64 *actual_bytes);
501 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
502 
503 /* ctree.c */
504 int __init btrfs_ctree_init(void);
505 void __cold btrfs_ctree_exit(void);
506 
507 int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
508 		     const struct btrfs_key *key, int *slot);
509 
510 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
511 
512 #ifdef __LITTLE_ENDIAN
513 
514 /*
515  * Compare two keys, on little-endian the disk order is same as CPU order and
516  * we can avoid the conversion.
517  */
518 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
519 				  const struct btrfs_key *k2)
520 {
521 	const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
522 
523 	return btrfs_comp_cpu_keys(k1, k2);
524 }
525 
526 #else
527 
528 /* Compare two keys in a memcmp fashion. */
529 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
530 				  const struct btrfs_key *k2)
531 {
532 	struct btrfs_key k1;
533 
534 	btrfs_disk_key_to_cpu(&k1, disk);
535 
536 	return btrfs_comp_cpu_keys(&k1, k2);
537 }
538 
539 #endif
540 
541 int btrfs_previous_item(struct btrfs_root *root,
542 			struct btrfs_path *path, u64 min_objectid,
543 			int type);
544 int btrfs_previous_extent_item(struct btrfs_root *root,
545 			struct btrfs_path *path, u64 min_objectid);
546 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
547 			     const struct btrfs_path *path,
548 			     const struct btrfs_key *new_key);
549 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
550 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
551 			struct btrfs_key *key, int lowest_level,
552 			u64 min_trans);
553 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
554 			 struct btrfs_path *path,
555 			 u64 min_trans);
556 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
557 					   int slot);
558 
559 int btrfs_cow_block(struct btrfs_trans_handle *trans,
560 		    struct btrfs_root *root, struct extent_buffer *buf,
561 		    struct extent_buffer *parent, int parent_slot,
562 		    struct extent_buffer **cow_ret,
563 		    enum btrfs_lock_nesting nest);
564 int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
565 			  struct btrfs_root *root,
566 			  struct extent_buffer *buf,
567 			  struct extent_buffer *parent, int parent_slot,
568 			  struct extent_buffer **cow_ret,
569 			  u64 search_start, u64 empty_size,
570 			  enum btrfs_lock_nesting nest);
571 int btrfs_copy_root(struct btrfs_trans_handle *trans,
572 		      struct btrfs_root *root,
573 		      struct extent_buffer *buf,
574 		      struct extent_buffer **cow_ret, u64 new_root_objectid);
575 bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
576 			       struct btrfs_root *root,
577 			       struct extent_buffer *buf);
578 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
579 		  struct btrfs_path *path, int level, int slot);
580 void btrfs_extend_item(struct btrfs_trans_handle *trans,
581 		       const struct btrfs_path *path, u32 data_size);
582 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
583 			 const struct btrfs_path *path, u32 new_size, int from_end);
584 int btrfs_split_item(struct btrfs_trans_handle *trans,
585 		     struct btrfs_root *root,
586 		     struct btrfs_path *path,
587 		     const struct btrfs_key *new_key,
588 		     unsigned long split_offset);
589 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
590 			 struct btrfs_root *root,
591 			 struct btrfs_path *path,
592 			 const struct btrfs_key *new_key);
593 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
594 		u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
595 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
596 		      const struct btrfs_key *key, struct btrfs_path *p,
597 		      int ins_len, int cow);
598 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
599 			  struct btrfs_path *p, u64 time_seq);
600 int btrfs_search_slot_for_read(struct btrfs_root *root,
601 			       const struct btrfs_key *key,
602 			       struct btrfs_path *p, int find_higher,
603 			       int return_any);
604 void btrfs_release_path(struct btrfs_path *p);
605 struct btrfs_path *btrfs_alloc_path(void);
606 void btrfs_free_path(struct btrfs_path *p);
607 DEFINE_FREE(btrfs_free_path, struct btrfs_path *, btrfs_free_path(_T))
608 
609 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
610 		   struct btrfs_path *path, int slot, int nr);
611 static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
612 				 struct btrfs_root *root,
613 				 struct btrfs_path *path)
614 {
615 	return btrfs_del_items(trans, root, path, path->slots[0], 1);
616 }
617 
618 /*
619  * Describes a batch of items to insert in a btree. This is used by
620  * btrfs_insert_empty_items().
621  */
622 struct btrfs_item_batch {
623 	/*
624 	 * Pointer to an array containing the keys of the items to insert (in
625 	 * sorted order).
626 	 */
627 	const struct btrfs_key *keys;
628 	/* Pointer to an array containing the data size for each item to insert. */
629 	const u32 *data_sizes;
630 	/*
631 	 * The sum of data sizes for all items. The caller can compute this while
632 	 * setting up the data_sizes array, so it ends up being more efficient
633 	 * than having btrfs_insert_empty_items() or setup_item_for_insert()
634 	 * doing it, as it would avoid an extra loop over a potentially large
635 	 * array, and in the case of setup_item_for_insert(), we would be doing
636 	 * it while holding a write lock on a leaf and often on upper level nodes
637 	 * too, unnecessarily increasing the size of a critical section.
638 	 */
639 	u32 total_data_size;
640 	/* Size of the keys and data_sizes arrays (number of items in the batch). */
641 	int nr;
642 };
643 
644 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
645 				 struct btrfs_root *root,
646 				 struct btrfs_path *path,
647 				 const struct btrfs_key *key,
648 				 u32 data_size);
649 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
650 		      const struct btrfs_key *key, void *data, u32 data_size);
651 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
652 			     struct btrfs_root *root,
653 			     struct btrfs_path *path,
654 			     const struct btrfs_item_batch *batch);
655 
656 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
657 					  struct btrfs_root *root,
658 					  struct btrfs_path *path,
659 					  const struct btrfs_key *key,
660 					  u32 data_size)
661 {
662 	struct btrfs_item_batch batch;
663 
664 	batch.keys = key;
665 	batch.data_sizes = &data_size;
666 	batch.total_data_size = data_size;
667 	batch.nr = 1;
668 
669 	return btrfs_insert_empty_items(trans, root, path, &batch);
670 }
671 
672 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
673 			u64 time_seq);
674 
675 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
676 			   struct btrfs_path *path);
677 
678 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
679 			      struct btrfs_path *path);
680 
681 /*
682  * Search in @root for a given @key, and store the slot found in @found_key.
683  *
684  * @root:	The root node of the tree.
685  * @key:	The key we are looking for.
686  * @found_key:	Will hold the found item.
687  * @path:	Holds the current slot/leaf.
688  * @iter_ret:	Contains the value returned from btrfs_search_slot or
689  * 		btrfs_get_next_valid_item, whichever was executed last.
690  *
691  * The @iter_ret is an output variable that will contain the return value of
692  * btrfs_search_slot, if it encountered an error, or the value returned from
693  * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
694  * slot was found, 1 if there were no more leaves, and <0 if there was an error.
695  *
696  * It's recommended to use a separate variable for iter_ret and then use it to
697  * set the function return value so there's no confusion of the 0/1/errno
698  * values stemming from btrfs_search_slot.
699  */
700 #define btrfs_for_each_slot(root, key, found_key, path, iter_ret)		\
701 	for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0);	\
702 		(iter_ret) >= 0 &&						\
703 		(iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
704 		(path)->slots[0]++						\
705 	)
706 
707 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
708 
709 /*
710  * Search the tree again to find a leaf with greater keys.
711  *
712  * Returns 0 if it found something or 1 if there are no greater leaves.
713  * Returns < 0 on error.
714  */
715 static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
716 {
717 	return btrfs_next_old_leaf(root, path, 0);
718 }
719 
720 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
721 {
722 	return btrfs_next_old_item(root, p, 0);
723 }
724 int btrfs_leaf_free_space(const struct extent_buffer *leaf);
725 
726 static inline int is_fstree(u64 rootid)
727 {
728 	if (rootid == BTRFS_FS_TREE_OBJECTID ||
729 	    ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
730 	      !btrfs_qgroup_level(rootid)))
731 		return 1;
732 	return 0;
733 }
734 
735 static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
736 {
737 	return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
738 }
739 
740 u16 btrfs_csum_type_size(u16 type);
741 int btrfs_super_csum_size(const struct btrfs_super_block *s);
742 const char *btrfs_super_csum_name(u16 csum_type);
743 const char *btrfs_super_csum_driver(u16 csum_type);
744 size_t __attribute_const__ btrfs_get_num_csums(void);
745 
746 /*
747  * We use page status Private2 to indicate there is an ordered extent with
748  * unfinished IO.
749  *
750  * Rename the Private2 accessors to Ordered, to improve readability.
751  */
752 #define PageOrdered(page)		PagePrivate2(page)
753 #define SetPageOrdered(page)		SetPagePrivate2(page)
754 #define ClearPageOrdered(page)		ClearPagePrivate2(page)
755 #define folio_test_ordered(folio)	folio_test_private_2(folio)
756 #define folio_set_ordered(folio)	folio_set_private_2(folio)
757 #define folio_clear_ordered(folio)	folio_clear_private_2(folio)
758 
759 #endif
760