1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
9 #include <linux/mm.h>
10 #include <linux/error-injection.h>
11 #include "messages.h"
12 #include "ctree.h"
13 #include "disk-io.h"
14 #include "transaction.h"
15 #include "print-tree.h"
16 #include "locking.h"
17 #include "volumes.h"
18 #include "qgroup.h"
19 #include "tree-mod-log.h"
20 #include "tree-checker.h"
21 #include "fs.h"
22 #include "accessors.h"
23 #include "extent-tree.h"
24 #include "relocation.h"
25 #include "file-item.h"
26
27 static struct kmem_cache *btrfs_path_cachep;
28
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
32 const struct btrfs_key *ins_key, struct btrfs_path *path,
33 int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37 static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 /*
41 * The leaf data grows from end-to-front in the node. this returns the address
42 * of the start of the last item, which is the stop of the leaf data stack.
43 */
leaf_data_end(const struct extent_buffer * leaf)44 static unsigned int leaf_data_end(const struct extent_buffer *leaf)
45 {
46 u32 nr = btrfs_header_nritems(leaf);
47
48 if (nr == 0)
49 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info);
50 return btrfs_item_offset(leaf, nr - 1);
51 }
52
53 /*
54 * Move data in a @leaf (using memmove, safe for overlapping ranges).
55 *
56 * @leaf: leaf that we're doing a memmove on
57 * @dst_offset: item data offset we're moving to
58 * @src_offset: item data offset were' moving from
59 * @len: length of the data we're moving
60 *
61 * Wrapper around memmove_extent_buffer() that takes into account the header on
62 * the leaf. The btrfs_item offset's start directly after the header, so we
63 * have to adjust any offsets to account for the header in the leaf. This
64 * handles that math to simplify the callers.
65 */
memmove_leaf_data(const struct extent_buffer * leaf,unsigned long dst_offset,unsigned long src_offset,unsigned long len)66 static inline void memmove_leaf_data(const struct extent_buffer *leaf,
67 unsigned long dst_offset,
68 unsigned long src_offset,
69 unsigned long len)
70 {
71 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset,
72 btrfs_item_nr_offset(leaf, 0) + src_offset, len);
73 }
74
75 /*
76 * Copy item data from @src into @dst at the given @offset.
77 *
78 * @dst: destination leaf that we're copying into
79 * @src: source leaf that we're copying from
80 * @dst_offset: item data offset we're copying to
81 * @src_offset: item data offset were' copying from
82 * @len: length of the data we're copying
83 *
84 * Wrapper around copy_extent_buffer() that takes into account the header on
85 * the leaf. The btrfs_item offset's start directly after the header, so we
86 * have to adjust any offsets to account for the header in the leaf. This
87 * handles that math to simplify the callers.
88 */
copy_leaf_data(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)89 static inline void copy_leaf_data(const struct extent_buffer *dst,
90 const struct extent_buffer *src,
91 unsigned long dst_offset,
92 unsigned long src_offset, unsigned long len)
93 {
94 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset,
95 btrfs_item_nr_offset(src, 0) + src_offset, len);
96 }
97
98 /*
99 * Move items in a @leaf (using memmove).
100 *
101 * @dst: destination leaf for the items
102 * @dst_item: the item nr we're copying into
103 * @src_item: the item nr we're copying from
104 * @nr_items: the number of items to copy
105 *
106 * Wrapper around memmove_extent_buffer() that does the math to get the
107 * appropriate offsets into the leaf from the item numbers.
108 */
memmove_leaf_items(const struct extent_buffer * leaf,int dst_item,int src_item,int nr_items)109 static inline void memmove_leaf_items(const struct extent_buffer *leaf,
110 int dst_item, int src_item, int nr_items)
111 {
112 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item),
113 btrfs_item_nr_offset(leaf, src_item),
114 nr_items * sizeof(struct btrfs_item));
115 }
116
117 /*
118 * Copy items from @src into @dst at the given @offset.
119 *
120 * @dst: destination leaf for the items
121 * @src: source leaf for the items
122 * @dst_item: the item nr we're copying into
123 * @src_item: the item nr we're copying from
124 * @nr_items: the number of items to copy
125 *
126 * Wrapper around copy_extent_buffer() that does the math to get the
127 * appropriate offsets into the leaf from the item numbers.
128 */
copy_leaf_items(const struct extent_buffer * dst,const struct extent_buffer * src,int dst_item,int src_item,int nr_items)129 static inline void copy_leaf_items(const struct extent_buffer *dst,
130 const struct extent_buffer *src,
131 int dst_item, int src_item, int nr_items)
132 {
133 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item),
134 btrfs_item_nr_offset(src, src_item),
135 nr_items * sizeof(struct btrfs_item));
136 }
137
btrfs_alloc_path(void)138 struct btrfs_path *btrfs_alloc_path(void)
139 {
140 might_sleep();
141
142 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
143 }
144
145 /* this also releases the path */
btrfs_free_path(struct btrfs_path * p)146 void btrfs_free_path(struct btrfs_path *p)
147 {
148 if (!p)
149 return;
150 btrfs_release_path(p);
151 kmem_cache_free(btrfs_path_cachep, p);
152 }
153
154 /*
155 * path release drops references on the extent buffers in the path
156 * and it drops any locks held by this path
157 *
158 * It is safe to call this on paths that no locks or extent buffers held.
159 */
btrfs_release_path(struct btrfs_path * p)160 noinline void btrfs_release_path(struct btrfs_path *p)
161 {
162 int i;
163
164 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
165 p->slots[i] = 0;
166 if (!p->nodes[i])
167 continue;
168 if (p->locks[i]) {
169 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
170 p->locks[i] = 0;
171 }
172 free_extent_buffer(p->nodes[i]);
173 p->nodes[i] = NULL;
174 }
175 }
176
177 /*
178 * safely gets a reference on the root node of a tree. A lock
179 * is not taken, so a concurrent writer may put a different node
180 * at the root of the tree. See btrfs_lock_root_node for the
181 * looping required.
182 *
183 * The extent buffer returned by this has a reference taken, so
184 * it won't disappear. It may stop being the root of the tree
185 * at any time because there are no locks held.
186 */
btrfs_root_node(struct btrfs_root * root)187 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
188 {
189 struct extent_buffer *eb;
190
191 while (1) {
192 rcu_read_lock();
193 eb = rcu_dereference(root->node);
194
195 /*
196 * RCU really hurts here, we could free up the root node because
197 * it was COWed but we may not get the new root node yet so do
198 * the inc_not_zero dance and if it doesn't work then
199 * synchronize_rcu and try again.
200 */
201 if (atomic_inc_not_zero(&eb->refs)) {
202 rcu_read_unlock();
203 break;
204 }
205 rcu_read_unlock();
206 synchronize_rcu();
207 }
208 return eb;
209 }
210
211 /*
212 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
213 * just get put onto a simple dirty list. Transaction walks this list to make
214 * sure they get properly updated on disk.
215 */
add_root_to_dirty_list(struct btrfs_root * root)216 static void add_root_to_dirty_list(struct btrfs_root *root)
217 {
218 struct btrfs_fs_info *fs_info = root->fs_info;
219
220 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
221 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
222 return;
223
224 spin_lock(&fs_info->trans_lock);
225 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
226 /* Want the extent tree to be the last on the list */
227 if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID)
228 list_move_tail(&root->dirty_list,
229 &fs_info->dirty_cowonly_roots);
230 else
231 list_move(&root->dirty_list,
232 &fs_info->dirty_cowonly_roots);
233 }
234 spin_unlock(&fs_info->trans_lock);
235 }
236
237 /*
238 * used by snapshot creation to make a copy of a root for a tree with
239 * a given objectid. The buffer with the new root node is returned in
240 * cow_ret, and this func returns zero on success or a negative error code.
241 */
btrfs_copy_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer ** cow_ret,u64 new_root_objectid)242 int btrfs_copy_root(struct btrfs_trans_handle *trans,
243 struct btrfs_root *root,
244 struct extent_buffer *buf,
245 struct extent_buffer **cow_ret, u64 new_root_objectid)
246 {
247 struct btrfs_fs_info *fs_info = root->fs_info;
248 struct extent_buffer *cow;
249 int ret = 0;
250 int level;
251 struct btrfs_disk_key disk_key;
252 u64 reloc_src_root = 0;
253
254 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
255 trans->transid != fs_info->running_transaction->transid);
256 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
257 trans->transid != btrfs_get_root_last_trans(root));
258
259 level = btrfs_header_level(buf);
260 if (level == 0)
261 btrfs_item_key(buf, &disk_key, 0);
262 else
263 btrfs_node_key(buf, &disk_key, 0);
264
265 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
266 reloc_src_root = btrfs_header_owner(buf);
267 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
268 &disk_key, level, buf->start, 0,
269 reloc_src_root, BTRFS_NESTING_NEW_ROOT);
270 if (IS_ERR(cow))
271 return PTR_ERR(cow);
272
273 copy_extent_buffer_full(cow, buf);
274 btrfs_set_header_bytenr(cow, cow->start);
275 btrfs_set_header_generation(cow, trans->transid);
276 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
277 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
278 BTRFS_HEADER_FLAG_RELOC);
279 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
280 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
281 else
282 btrfs_set_header_owner(cow, new_root_objectid);
283
284 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
285
286 WARN_ON(btrfs_header_generation(buf) > trans->transid);
287 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
288 ret = btrfs_inc_ref(trans, root, cow, 1);
289 else
290 ret = btrfs_inc_ref(trans, root, cow, 0);
291 if (ret) {
292 btrfs_tree_unlock(cow);
293 free_extent_buffer(cow);
294 btrfs_abort_transaction(trans, ret);
295 return ret;
296 }
297
298 btrfs_mark_buffer_dirty(trans, cow);
299 *cow_ret = cow;
300 return 0;
301 }
302
303 /*
304 * check if the tree block can be shared by multiple trees
305 */
btrfs_block_can_be_shared(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf)306 bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
307 struct btrfs_root *root,
308 struct extent_buffer *buf)
309 {
310 const u64 buf_gen = btrfs_header_generation(buf);
311
312 /*
313 * Tree blocks not in shareable trees and tree roots are never shared.
314 * If a block was allocated after the last snapshot and the block was
315 * not allocated by tree relocation, we know the block is not shared.
316 */
317
318 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
319 return false;
320
321 if (buf == root->node)
322 return false;
323
324 if (buf_gen > btrfs_root_last_snapshot(&root->root_item) &&
325 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))
326 return false;
327
328 if (buf != root->commit_root)
329 return true;
330
331 /*
332 * An extent buffer that used to be the commit root may still be shared
333 * because the tree height may have increased and it became a child of a
334 * higher level root. This can happen when snapshotting a subvolume
335 * created in the current transaction.
336 */
337 if (buf_gen == trans->transid)
338 return true;
339
340 return false;
341 }
342
update_ref_for_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * cow,int * last_ref)343 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
344 struct btrfs_root *root,
345 struct extent_buffer *buf,
346 struct extent_buffer *cow,
347 int *last_ref)
348 {
349 struct btrfs_fs_info *fs_info = root->fs_info;
350 u64 refs;
351 u64 owner;
352 u64 flags;
353 int ret;
354
355 /*
356 * Backrefs update rules:
357 *
358 * Always use full backrefs for extent pointers in tree block
359 * allocated by tree relocation.
360 *
361 * If a shared tree block is no longer referenced by its owner
362 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
363 * use full backrefs for extent pointers in tree block.
364 *
365 * If a tree block is been relocating
366 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
367 * use full backrefs for extent pointers in tree block.
368 * The reason for this is some operations (such as drop tree)
369 * are only allowed for blocks use full backrefs.
370 */
371
372 if (btrfs_block_can_be_shared(trans, root, buf)) {
373 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
374 btrfs_header_level(buf), 1,
375 &refs, &flags, NULL);
376 if (ret)
377 return ret;
378 if (unlikely(refs == 0)) {
379 btrfs_crit(fs_info,
380 "found 0 references for tree block at bytenr %llu level %d root %llu",
381 buf->start, btrfs_header_level(buf),
382 btrfs_root_id(root));
383 ret = -EUCLEAN;
384 btrfs_abort_transaction(trans, ret);
385 return ret;
386 }
387 } else {
388 refs = 1;
389 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
390 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
391 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
392 else
393 flags = 0;
394 }
395
396 owner = btrfs_header_owner(buf);
397 if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID &&
398 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) {
399 btrfs_crit(fs_info,
400 "found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set",
401 buf->start, btrfs_header_level(buf),
402 btrfs_root_id(root), refs, flags);
403 ret = -EUCLEAN;
404 btrfs_abort_transaction(trans, ret);
405 return ret;
406 }
407
408 if (refs > 1) {
409 if ((owner == btrfs_root_id(root) ||
410 btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) &&
411 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
412 ret = btrfs_inc_ref(trans, root, buf, 1);
413 if (ret)
414 return ret;
415
416 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
417 ret = btrfs_dec_ref(trans, root, buf, 0);
418 if (ret)
419 return ret;
420 ret = btrfs_inc_ref(trans, root, cow, 1);
421 if (ret)
422 return ret;
423 }
424 ret = btrfs_set_disk_extent_flags(trans, buf,
425 BTRFS_BLOCK_FLAG_FULL_BACKREF);
426 if (ret)
427 return ret;
428 } else {
429
430 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
431 ret = btrfs_inc_ref(trans, root, cow, 1);
432 else
433 ret = btrfs_inc_ref(trans, root, cow, 0);
434 if (ret)
435 return ret;
436 }
437 } else {
438 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
439 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
440 ret = btrfs_inc_ref(trans, root, cow, 1);
441 else
442 ret = btrfs_inc_ref(trans, root, cow, 0);
443 if (ret)
444 return ret;
445 ret = btrfs_dec_ref(trans, root, buf, 1);
446 if (ret)
447 return ret;
448 }
449 btrfs_clear_buffer_dirty(trans, buf);
450 *last_ref = 1;
451 }
452 return 0;
453 }
454
455 /*
456 * does the dirty work in cow of a single block. The parent block (if
457 * supplied) is updated to point to the new cow copy. The new buffer is marked
458 * dirty and returned locked. If you modify the block it needs to be marked
459 * dirty again.
460 *
461 * search_start -- an allocation hint for the new block
462 *
463 * empty_size -- a hint that you plan on doing more cow. This is the size in
464 * bytes the allocator should try to find free next to the block it returns.
465 * This is just a hint and may be ignored by the allocator.
466 */
btrfs_force_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * parent,int parent_slot,struct extent_buffer ** cow_ret,u64 search_start,u64 empty_size,enum btrfs_lock_nesting nest)467 int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
468 struct btrfs_root *root,
469 struct extent_buffer *buf,
470 struct extent_buffer *parent, int parent_slot,
471 struct extent_buffer **cow_ret,
472 u64 search_start, u64 empty_size,
473 enum btrfs_lock_nesting nest)
474 {
475 struct btrfs_fs_info *fs_info = root->fs_info;
476 struct btrfs_disk_key disk_key;
477 struct extent_buffer *cow;
478 int level, ret;
479 int last_ref = 0;
480 int unlock_orig = 0;
481 u64 parent_start = 0;
482 u64 reloc_src_root = 0;
483
484 if (*cow_ret == buf)
485 unlock_orig = 1;
486
487 btrfs_assert_tree_write_locked(buf);
488
489 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
490 trans->transid != fs_info->running_transaction->transid);
491 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
492 trans->transid != btrfs_get_root_last_trans(root));
493
494 level = btrfs_header_level(buf);
495
496 if (level == 0)
497 btrfs_item_key(buf, &disk_key, 0);
498 else
499 btrfs_node_key(buf, &disk_key, 0);
500
501 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
502 if (parent)
503 parent_start = parent->start;
504 reloc_src_root = btrfs_header_owner(buf);
505 }
506 cow = btrfs_alloc_tree_block(trans, root, parent_start,
507 btrfs_root_id(root), &disk_key, level,
508 search_start, empty_size, reloc_src_root, nest);
509 if (IS_ERR(cow))
510 return PTR_ERR(cow);
511
512 /* cow is set to blocking by btrfs_init_new_buffer */
513
514 copy_extent_buffer_full(cow, buf);
515 btrfs_set_header_bytenr(cow, cow->start);
516 btrfs_set_header_generation(cow, trans->transid);
517 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
518 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
519 BTRFS_HEADER_FLAG_RELOC);
520 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
521 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
522 else
523 btrfs_set_header_owner(cow, btrfs_root_id(root));
524
525 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
526
527 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
528 if (ret) {
529 btrfs_abort_transaction(trans, ret);
530 goto error_unlock_cow;
531 }
532
533 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
534 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
535 if (ret) {
536 btrfs_abort_transaction(trans, ret);
537 goto error_unlock_cow;
538 }
539 }
540
541 if (buf == root->node) {
542 WARN_ON(parent && parent != buf);
543 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
544 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
545 parent_start = buf->start;
546
547 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
548 if (ret < 0) {
549 btrfs_abort_transaction(trans, ret);
550 goto error_unlock_cow;
551 }
552 atomic_inc(&cow->refs);
553 rcu_assign_pointer(root->node, cow);
554
555 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
556 parent_start, last_ref);
557 free_extent_buffer(buf);
558 add_root_to_dirty_list(root);
559 if (ret < 0) {
560 btrfs_abort_transaction(trans, ret);
561 goto error_unlock_cow;
562 }
563 } else {
564 WARN_ON(trans->transid != btrfs_header_generation(parent));
565 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot,
566 BTRFS_MOD_LOG_KEY_REPLACE);
567 if (ret) {
568 btrfs_abort_transaction(trans, ret);
569 goto error_unlock_cow;
570 }
571 btrfs_set_node_blockptr(parent, parent_slot,
572 cow->start);
573 btrfs_set_node_ptr_generation(parent, parent_slot,
574 trans->transid);
575 btrfs_mark_buffer_dirty(trans, parent);
576 if (last_ref) {
577 ret = btrfs_tree_mod_log_free_eb(buf);
578 if (ret) {
579 btrfs_abort_transaction(trans, ret);
580 goto error_unlock_cow;
581 }
582 }
583 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
584 parent_start, last_ref);
585 if (ret < 0) {
586 btrfs_abort_transaction(trans, ret);
587 goto error_unlock_cow;
588 }
589 }
590
591 trace_btrfs_cow_block(root, buf, cow);
592 if (unlock_orig)
593 btrfs_tree_unlock(buf);
594 free_extent_buffer_stale(buf);
595 btrfs_mark_buffer_dirty(trans, cow);
596 *cow_ret = cow;
597 return 0;
598
599 error_unlock_cow:
600 btrfs_tree_unlock(cow);
601 free_extent_buffer(cow);
602 return ret;
603 }
604
should_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf)605 static inline int should_cow_block(struct btrfs_trans_handle *trans,
606 struct btrfs_root *root,
607 struct extent_buffer *buf)
608 {
609 if (btrfs_is_testing(root->fs_info))
610 return 0;
611
612 /* Ensure we can see the FORCE_COW bit */
613 smp_mb__before_atomic();
614
615 /*
616 * We do not need to cow a block if
617 * 1) this block is not created or changed in this transaction;
618 * 2) this block does not belong to TREE_RELOC tree;
619 * 3) the root is not forced COW.
620 *
621 * What is forced COW:
622 * when we create snapshot during committing the transaction,
623 * after we've finished copying src root, we must COW the shared
624 * block to ensure the metadata consistency.
625 */
626 if (btrfs_header_generation(buf) == trans->transid &&
627 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
628 !(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
629 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
630 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
631 return 0;
632 return 1;
633 }
634
635 /*
636 * COWs a single block, see btrfs_force_cow_block() for the real work.
637 * This version of it has extra checks so that a block isn't COWed more than
638 * once per transaction, as long as it hasn't been written yet
639 */
btrfs_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * parent,int parent_slot,struct extent_buffer ** cow_ret,enum btrfs_lock_nesting nest)640 int btrfs_cow_block(struct btrfs_trans_handle *trans,
641 struct btrfs_root *root, struct extent_buffer *buf,
642 struct extent_buffer *parent, int parent_slot,
643 struct extent_buffer **cow_ret,
644 enum btrfs_lock_nesting nest)
645 {
646 struct btrfs_fs_info *fs_info = root->fs_info;
647 u64 search_start;
648
649 if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
650 btrfs_abort_transaction(trans, -EUCLEAN);
651 btrfs_crit(fs_info,
652 "attempt to COW block %llu on root %llu that is being deleted",
653 buf->start, btrfs_root_id(root));
654 return -EUCLEAN;
655 }
656
657 /*
658 * COWing must happen through a running transaction, which always
659 * matches the current fs generation (it's a transaction with a state
660 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
661 * into error state to prevent the commit of any transaction.
662 */
663 if (unlikely(trans->transaction != fs_info->running_transaction ||
664 trans->transid != fs_info->generation)) {
665 btrfs_abort_transaction(trans, -EUCLEAN);
666 btrfs_crit(fs_info,
667 "unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu",
668 buf->start, btrfs_root_id(root), trans->transid,
669 fs_info->running_transaction->transid,
670 fs_info->generation);
671 return -EUCLEAN;
672 }
673
674 if (!should_cow_block(trans, root, buf)) {
675 *cow_ret = buf;
676 return 0;
677 }
678
679 search_start = round_down(buf->start, SZ_1G);
680
681 /*
682 * Before CoWing this block for later modification, check if it's
683 * the subtree root and do the delayed subtree trace if needed.
684 *
685 * Also We don't care about the error, as it's handled internally.
686 */
687 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
688 return btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
689 cow_ret, search_start, 0, nest);
690 }
691 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
692
693 /*
694 * same as comp_keys only with two btrfs_key's
695 */
btrfs_comp_cpu_keys(const struct btrfs_key * k1,const struct btrfs_key * k2)696 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
697 {
698 if (k1->objectid > k2->objectid)
699 return 1;
700 if (k1->objectid < k2->objectid)
701 return -1;
702 if (k1->type > k2->type)
703 return 1;
704 if (k1->type < k2->type)
705 return -1;
706 if (k1->offset > k2->offset)
707 return 1;
708 if (k1->offset < k2->offset)
709 return -1;
710 return 0;
711 }
712
713 /*
714 * Search for a key in the given extent_buffer.
715 *
716 * The lower boundary for the search is specified by the slot number @first_slot.
717 * Use a value of 0 to search over the whole extent buffer. Works for both
718 * leaves and nodes.
719 *
720 * The slot in the extent buffer is returned via @slot. If the key exists in the
721 * extent buffer, then @slot will point to the slot where the key is, otherwise
722 * it points to the slot where you would insert the key.
723 *
724 * Slot may point to the total number of items (i.e. one position beyond the last
725 * key) if the key is bigger than the last key in the extent buffer.
726 */
btrfs_bin_search(struct extent_buffer * eb,int first_slot,const struct btrfs_key * key,int * slot)727 int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
728 const struct btrfs_key *key, int *slot)
729 {
730 unsigned long p;
731 int item_size;
732 /*
733 * Use unsigned types for the low and high slots, so that we get a more
734 * efficient division in the search loop below.
735 */
736 u32 low = first_slot;
737 u32 high = btrfs_header_nritems(eb);
738 int ret;
739 const int key_size = sizeof(struct btrfs_disk_key);
740
741 if (unlikely(low > high)) {
742 btrfs_err(eb->fs_info,
743 "%s: low (%u) > high (%u) eb %llu owner %llu level %d",
744 __func__, low, high, eb->start,
745 btrfs_header_owner(eb), btrfs_header_level(eb));
746 return -EINVAL;
747 }
748
749 if (btrfs_header_level(eb) == 0) {
750 p = offsetof(struct btrfs_leaf, items);
751 item_size = sizeof(struct btrfs_item);
752 } else {
753 p = offsetof(struct btrfs_node, ptrs);
754 item_size = sizeof(struct btrfs_key_ptr);
755 }
756
757 while (low < high) {
758 const int unit_size = eb->folio_size;
759 unsigned long oil;
760 unsigned long offset;
761 struct btrfs_disk_key *tmp;
762 struct btrfs_disk_key unaligned;
763 int mid;
764
765 mid = (low + high) / 2;
766 offset = p + mid * item_size;
767 oil = get_eb_offset_in_folio(eb, offset);
768
769 if (oil + key_size <= unit_size) {
770 const unsigned long idx = get_eb_folio_index(eb, offset);
771 char *kaddr = folio_address(eb->folios[idx]);
772
773 oil = get_eb_offset_in_folio(eb, offset);
774 tmp = (struct btrfs_disk_key *)(kaddr + oil);
775 } else {
776 read_extent_buffer(eb, &unaligned, offset, key_size);
777 tmp = &unaligned;
778 }
779
780 ret = btrfs_comp_keys(tmp, key);
781
782 if (ret < 0)
783 low = mid + 1;
784 else if (ret > 0)
785 high = mid;
786 else {
787 *slot = mid;
788 return 0;
789 }
790 }
791 *slot = low;
792 return 1;
793 }
794
root_add_used_bytes(struct btrfs_root * root)795 static void root_add_used_bytes(struct btrfs_root *root)
796 {
797 spin_lock(&root->accounting_lock);
798 btrfs_set_root_used(&root->root_item,
799 btrfs_root_used(&root->root_item) + root->fs_info->nodesize);
800 spin_unlock(&root->accounting_lock);
801 }
802
root_sub_used_bytes(struct btrfs_root * root)803 static void root_sub_used_bytes(struct btrfs_root *root)
804 {
805 spin_lock(&root->accounting_lock);
806 btrfs_set_root_used(&root->root_item,
807 btrfs_root_used(&root->root_item) - root->fs_info->nodesize);
808 spin_unlock(&root->accounting_lock);
809 }
810
811 /* given a node and slot number, this reads the blocks it points to. The
812 * extent buffer is returned with a reference taken (but unlocked).
813 */
btrfs_read_node_slot(struct extent_buffer * parent,int slot)814 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
815 int slot)
816 {
817 int level = btrfs_header_level(parent);
818 struct btrfs_tree_parent_check check = { 0 };
819 struct extent_buffer *eb;
820
821 if (slot < 0 || slot >= btrfs_header_nritems(parent))
822 return ERR_PTR(-ENOENT);
823
824 ASSERT(level);
825
826 check.level = level - 1;
827 check.transid = btrfs_node_ptr_generation(parent, slot);
828 check.owner_root = btrfs_header_owner(parent);
829 check.has_first_key = true;
830 btrfs_node_key_to_cpu(parent, &check.first_key, slot);
831
832 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
833 &check);
834 if (IS_ERR(eb))
835 return eb;
836 if (!extent_buffer_uptodate(eb)) {
837 free_extent_buffer(eb);
838 return ERR_PTR(-EIO);
839 }
840
841 return eb;
842 }
843
844 /*
845 * node level balancing, used to make sure nodes are in proper order for
846 * item deletion. We balance from the top down, so we have to make sure
847 * that a deletion won't leave an node completely empty later on.
848 */
balance_level(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)849 static noinline int balance_level(struct btrfs_trans_handle *trans,
850 struct btrfs_root *root,
851 struct btrfs_path *path, int level)
852 {
853 struct btrfs_fs_info *fs_info = root->fs_info;
854 struct extent_buffer *right = NULL;
855 struct extent_buffer *mid;
856 struct extent_buffer *left = NULL;
857 struct extent_buffer *parent = NULL;
858 int ret = 0;
859 int wret;
860 int pslot;
861 int orig_slot = path->slots[level];
862 u64 orig_ptr;
863
864 ASSERT(level > 0);
865
866 mid = path->nodes[level];
867
868 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
869 WARN_ON(btrfs_header_generation(mid) != trans->transid);
870
871 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
872
873 if (level < BTRFS_MAX_LEVEL - 1) {
874 parent = path->nodes[level + 1];
875 pslot = path->slots[level + 1];
876 }
877
878 /*
879 * deal with the case where there is only one pointer in the root
880 * by promoting the node below to a root
881 */
882 if (!parent) {
883 struct extent_buffer *child;
884
885 if (btrfs_header_nritems(mid) != 1)
886 return 0;
887
888 /* promote the child to a root */
889 child = btrfs_read_node_slot(mid, 0);
890 if (IS_ERR(child)) {
891 ret = PTR_ERR(child);
892 goto out;
893 }
894
895 btrfs_tree_lock(child);
896 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
897 BTRFS_NESTING_COW);
898 if (ret) {
899 btrfs_tree_unlock(child);
900 free_extent_buffer(child);
901 goto out;
902 }
903
904 ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
905 if (ret < 0) {
906 btrfs_tree_unlock(child);
907 free_extent_buffer(child);
908 btrfs_abort_transaction(trans, ret);
909 goto out;
910 }
911 rcu_assign_pointer(root->node, child);
912
913 add_root_to_dirty_list(root);
914 btrfs_tree_unlock(child);
915
916 path->locks[level] = 0;
917 path->nodes[level] = NULL;
918 btrfs_clear_buffer_dirty(trans, mid);
919 btrfs_tree_unlock(mid);
920 /* once for the path */
921 free_extent_buffer(mid);
922
923 root_sub_used_bytes(root);
924 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
925 /* once for the root ptr */
926 free_extent_buffer_stale(mid);
927 if (ret < 0) {
928 btrfs_abort_transaction(trans, ret);
929 goto out;
930 }
931 return 0;
932 }
933 if (btrfs_header_nritems(mid) >
934 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
935 return 0;
936
937 if (pslot) {
938 left = btrfs_read_node_slot(parent, pslot - 1);
939 if (IS_ERR(left)) {
940 ret = PTR_ERR(left);
941 left = NULL;
942 goto out;
943 }
944
945 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT);
946 wret = btrfs_cow_block(trans, root, left,
947 parent, pslot - 1, &left,
948 BTRFS_NESTING_LEFT_COW);
949 if (wret) {
950 ret = wret;
951 goto out;
952 }
953 }
954
955 if (pslot + 1 < btrfs_header_nritems(parent)) {
956 right = btrfs_read_node_slot(parent, pslot + 1);
957 if (IS_ERR(right)) {
958 ret = PTR_ERR(right);
959 right = NULL;
960 goto out;
961 }
962
963 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT);
964 wret = btrfs_cow_block(trans, root, right,
965 parent, pslot + 1, &right,
966 BTRFS_NESTING_RIGHT_COW);
967 if (wret) {
968 ret = wret;
969 goto out;
970 }
971 }
972
973 /* first, try to make some room in the middle buffer */
974 if (left) {
975 orig_slot += btrfs_header_nritems(left);
976 wret = push_node_left(trans, left, mid, 1);
977 if (wret < 0)
978 ret = wret;
979 }
980
981 /*
982 * then try to empty the right most buffer into the middle
983 */
984 if (right) {
985 wret = push_node_left(trans, mid, right, 1);
986 if (wret < 0 && wret != -ENOSPC)
987 ret = wret;
988 if (btrfs_header_nritems(right) == 0) {
989 btrfs_clear_buffer_dirty(trans, right);
990 btrfs_tree_unlock(right);
991 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot + 1);
992 if (ret < 0) {
993 free_extent_buffer_stale(right);
994 right = NULL;
995 goto out;
996 }
997 root_sub_used_bytes(root);
998 ret = btrfs_free_tree_block(trans, btrfs_root_id(root),
999 right, 0, 1);
1000 free_extent_buffer_stale(right);
1001 right = NULL;
1002 if (ret < 0) {
1003 btrfs_abort_transaction(trans, ret);
1004 goto out;
1005 }
1006 } else {
1007 struct btrfs_disk_key right_key;
1008 btrfs_node_key(right, &right_key, 0);
1009 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1010 BTRFS_MOD_LOG_KEY_REPLACE);
1011 if (ret < 0) {
1012 btrfs_abort_transaction(trans, ret);
1013 goto out;
1014 }
1015 btrfs_set_node_key(parent, &right_key, pslot + 1);
1016 btrfs_mark_buffer_dirty(trans, parent);
1017 }
1018 }
1019 if (btrfs_header_nritems(mid) == 1) {
1020 /*
1021 * we're not allowed to leave a node with one item in the
1022 * tree during a delete. A deletion from lower in the tree
1023 * could try to delete the only pointer in this node.
1024 * So, pull some keys from the left.
1025 * There has to be a left pointer at this point because
1026 * otherwise we would have pulled some pointers from the
1027 * right
1028 */
1029 if (unlikely(!left)) {
1030 btrfs_crit(fs_info,
1031 "missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu",
1032 parent->start, btrfs_header_level(parent),
1033 mid->start, btrfs_root_id(root));
1034 ret = -EUCLEAN;
1035 btrfs_abort_transaction(trans, ret);
1036 goto out;
1037 }
1038 wret = balance_node_right(trans, mid, left);
1039 if (wret < 0) {
1040 ret = wret;
1041 goto out;
1042 }
1043 if (wret == 1) {
1044 wret = push_node_left(trans, left, mid, 1);
1045 if (wret < 0)
1046 ret = wret;
1047 }
1048 BUG_ON(wret == 1);
1049 }
1050 if (btrfs_header_nritems(mid) == 0) {
1051 btrfs_clear_buffer_dirty(trans, mid);
1052 btrfs_tree_unlock(mid);
1053 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot);
1054 if (ret < 0) {
1055 free_extent_buffer_stale(mid);
1056 mid = NULL;
1057 goto out;
1058 }
1059 root_sub_used_bytes(root);
1060 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1061 free_extent_buffer_stale(mid);
1062 mid = NULL;
1063 if (ret < 0) {
1064 btrfs_abort_transaction(trans, ret);
1065 goto out;
1066 }
1067 } else {
1068 /* update the parent key to reflect our changes */
1069 struct btrfs_disk_key mid_key;
1070 btrfs_node_key(mid, &mid_key, 0);
1071 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1072 BTRFS_MOD_LOG_KEY_REPLACE);
1073 if (ret < 0) {
1074 btrfs_abort_transaction(trans, ret);
1075 goto out;
1076 }
1077 btrfs_set_node_key(parent, &mid_key, pslot);
1078 btrfs_mark_buffer_dirty(trans, parent);
1079 }
1080
1081 /* update the path */
1082 if (left) {
1083 if (btrfs_header_nritems(left) > orig_slot) {
1084 atomic_inc(&left->refs);
1085 /* left was locked after cow */
1086 path->nodes[level] = left;
1087 path->slots[level + 1] -= 1;
1088 path->slots[level] = orig_slot;
1089 if (mid) {
1090 btrfs_tree_unlock(mid);
1091 free_extent_buffer(mid);
1092 }
1093 } else {
1094 orig_slot -= btrfs_header_nritems(left);
1095 path->slots[level] = orig_slot;
1096 }
1097 }
1098 /* double check we haven't messed things up */
1099 if (orig_ptr !=
1100 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1101 BUG();
1102 out:
1103 if (right) {
1104 btrfs_tree_unlock(right);
1105 free_extent_buffer(right);
1106 }
1107 if (left) {
1108 if (path->nodes[level] != left)
1109 btrfs_tree_unlock(left);
1110 free_extent_buffer(left);
1111 }
1112 return ret;
1113 }
1114
1115 /* Node balancing for insertion. Here we only split or push nodes around
1116 * when they are completely full. This is also done top down, so we
1117 * have to be pessimistic.
1118 */
push_nodes_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)1119 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1120 struct btrfs_root *root,
1121 struct btrfs_path *path, int level)
1122 {
1123 struct btrfs_fs_info *fs_info = root->fs_info;
1124 struct extent_buffer *right = NULL;
1125 struct extent_buffer *mid;
1126 struct extent_buffer *left = NULL;
1127 struct extent_buffer *parent = NULL;
1128 int ret = 0;
1129 int wret;
1130 int pslot;
1131 int orig_slot = path->slots[level];
1132
1133 if (level == 0)
1134 return 1;
1135
1136 mid = path->nodes[level];
1137 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1138
1139 if (level < BTRFS_MAX_LEVEL - 1) {
1140 parent = path->nodes[level + 1];
1141 pslot = path->slots[level + 1];
1142 }
1143
1144 if (!parent)
1145 return 1;
1146
1147 /* first, try to make some room in the middle buffer */
1148 if (pslot) {
1149 u32 left_nr;
1150
1151 left = btrfs_read_node_slot(parent, pslot - 1);
1152 if (IS_ERR(left))
1153 return PTR_ERR(left);
1154
1155 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT);
1156
1157 left_nr = btrfs_header_nritems(left);
1158 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1159 wret = 1;
1160 } else {
1161 ret = btrfs_cow_block(trans, root, left, parent,
1162 pslot - 1, &left,
1163 BTRFS_NESTING_LEFT_COW);
1164 if (ret)
1165 wret = 1;
1166 else {
1167 wret = push_node_left(trans, left, mid, 0);
1168 }
1169 }
1170 if (wret < 0)
1171 ret = wret;
1172 if (wret == 0) {
1173 struct btrfs_disk_key disk_key;
1174 orig_slot += left_nr;
1175 btrfs_node_key(mid, &disk_key, 0);
1176 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1177 BTRFS_MOD_LOG_KEY_REPLACE);
1178 if (ret < 0) {
1179 btrfs_tree_unlock(left);
1180 free_extent_buffer(left);
1181 btrfs_abort_transaction(trans, ret);
1182 return ret;
1183 }
1184 btrfs_set_node_key(parent, &disk_key, pslot);
1185 btrfs_mark_buffer_dirty(trans, parent);
1186 if (btrfs_header_nritems(left) > orig_slot) {
1187 path->nodes[level] = left;
1188 path->slots[level + 1] -= 1;
1189 path->slots[level] = orig_slot;
1190 btrfs_tree_unlock(mid);
1191 free_extent_buffer(mid);
1192 } else {
1193 orig_slot -=
1194 btrfs_header_nritems(left);
1195 path->slots[level] = orig_slot;
1196 btrfs_tree_unlock(left);
1197 free_extent_buffer(left);
1198 }
1199 return 0;
1200 }
1201 btrfs_tree_unlock(left);
1202 free_extent_buffer(left);
1203 }
1204
1205 /*
1206 * then try to empty the right most buffer into the middle
1207 */
1208 if (pslot + 1 < btrfs_header_nritems(parent)) {
1209 u32 right_nr;
1210
1211 right = btrfs_read_node_slot(parent, pslot + 1);
1212 if (IS_ERR(right))
1213 return PTR_ERR(right);
1214
1215 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT);
1216
1217 right_nr = btrfs_header_nritems(right);
1218 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1219 wret = 1;
1220 } else {
1221 ret = btrfs_cow_block(trans, root, right,
1222 parent, pslot + 1,
1223 &right, BTRFS_NESTING_RIGHT_COW);
1224 if (ret)
1225 wret = 1;
1226 else {
1227 wret = balance_node_right(trans, right, mid);
1228 }
1229 }
1230 if (wret < 0)
1231 ret = wret;
1232 if (wret == 0) {
1233 struct btrfs_disk_key disk_key;
1234
1235 btrfs_node_key(right, &disk_key, 0);
1236 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1237 BTRFS_MOD_LOG_KEY_REPLACE);
1238 if (ret < 0) {
1239 btrfs_tree_unlock(right);
1240 free_extent_buffer(right);
1241 btrfs_abort_transaction(trans, ret);
1242 return ret;
1243 }
1244 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1245 btrfs_mark_buffer_dirty(trans, parent);
1246
1247 if (btrfs_header_nritems(mid) <= orig_slot) {
1248 path->nodes[level] = right;
1249 path->slots[level + 1] += 1;
1250 path->slots[level] = orig_slot -
1251 btrfs_header_nritems(mid);
1252 btrfs_tree_unlock(mid);
1253 free_extent_buffer(mid);
1254 } else {
1255 btrfs_tree_unlock(right);
1256 free_extent_buffer(right);
1257 }
1258 return 0;
1259 }
1260 btrfs_tree_unlock(right);
1261 free_extent_buffer(right);
1262 }
1263 return 1;
1264 }
1265
1266 /*
1267 * readahead one full node of leaves, finding things that are close
1268 * to the block in 'slot', and triggering ra on them.
1269 */
reada_for_search(struct btrfs_fs_info * fs_info,struct btrfs_path * path,int level,int slot,u64 objectid)1270 static void reada_for_search(struct btrfs_fs_info *fs_info,
1271 struct btrfs_path *path,
1272 int level, int slot, u64 objectid)
1273 {
1274 struct extent_buffer *node;
1275 struct btrfs_disk_key disk_key;
1276 u32 nritems;
1277 u64 search;
1278 u64 target;
1279 u64 nread = 0;
1280 u64 nread_max;
1281 u32 nr;
1282 u32 blocksize;
1283 u32 nscan = 0;
1284
1285 if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1286 return;
1287
1288 if (!path->nodes[level])
1289 return;
1290
1291 node = path->nodes[level];
1292
1293 /*
1294 * Since the time between visiting leaves is much shorter than the time
1295 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1296 * much IO at once (possibly random).
1297 */
1298 if (path->reada == READA_FORWARD_ALWAYS) {
1299 if (level > 1)
1300 nread_max = node->fs_info->nodesize;
1301 else
1302 nread_max = SZ_128K;
1303 } else {
1304 nread_max = SZ_64K;
1305 }
1306
1307 search = btrfs_node_blockptr(node, slot);
1308 blocksize = fs_info->nodesize;
1309 if (path->reada != READA_FORWARD_ALWAYS) {
1310 struct extent_buffer *eb;
1311
1312 eb = find_extent_buffer(fs_info, search);
1313 if (eb) {
1314 free_extent_buffer(eb);
1315 return;
1316 }
1317 }
1318
1319 target = search;
1320
1321 nritems = btrfs_header_nritems(node);
1322 nr = slot;
1323
1324 while (1) {
1325 if (path->reada == READA_BACK) {
1326 if (nr == 0)
1327 break;
1328 nr--;
1329 } else if (path->reada == READA_FORWARD ||
1330 path->reada == READA_FORWARD_ALWAYS) {
1331 nr++;
1332 if (nr >= nritems)
1333 break;
1334 }
1335 if (path->reada == READA_BACK && objectid) {
1336 btrfs_node_key(node, &disk_key, nr);
1337 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1338 break;
1339 }
1340 search = btrfs_node_blockptr(node, nr);
1341 if (path->reada == READA_FORWARD_ALWAYS ||
1342 (search <= target && target - search <= 65536) ||
1343 (search > target && search - target <= 65536)) {
1344 btrfs_readahead_node_child(node, nr);
1345 nread += blocksize;
1346 }
1347 nscan++;
1348 if (nread > nread_max || nscan > 32)
1349 break;
1350 }
1351 }
1352
reada_for_balance(struct btrfs_path * path,int level)1353 static noinline void reada_for_balance(struct btrfs_path *path, int level)
1354 {
1355 struct extent_buffer *parent;
1356 int slot;
1357 int nritems;
1358
1359 parent = path->nodes[level + 1];
1360 if (!parent)
1361 return;
1362
1363 nritems = btrfs_header_nritems(parent);
1364 slot = path->slots[level + 1];
1365
1366 if (slot > 0)
1367 btrfs_readahead_node_child(parent, slot - 1);
1368 if (slot + 1 < nritems)
1369 btrfs_readahead_node_child(parent, slot + 1);
1370 }
1371
1372
1373 /*
1374 * when we walk down the tree, it is usually safe to unlock the higher layers
1375 * in the tree. The exceptions are when our path goes through slot 0, because
1376 * operations on the tree might require changing key pointers higher up in the
1377 * tree.
1378 *
1379 * callers might also have set path->keep_locks, which tells this code to keep
1380 * the lock if the path points to the last slot in the block. This is part of
1381 * walking through the tree, and selecting the next slot in the higher block.
1382 *
1383 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1384 * if lowest_unlock is 1, level 0 won't be unlocked
1385 */
unlock_up(struct btrfs_path * path,int level,int lowest_unlock,int min_write_lock_level,int * write_lock_level)1386 static noinline void unlock_up(struct btrfs_path *path, int level,
1387 int lowest_unlock, int min_write_lock_level,
1388 int *write_lock_level)
1389 {
1390 int i;
1391 int skip_level = level;
1392 bool check_skip = true;
1393
1394 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1395 if (!path->nodes[i])
1396 break;
1397 if (!path->locks[i])
1398 break;
1399
1400 if (check_skip) {
1401 if (path->slots[i] == 0) {
1402 skip_level = i + 1;
1403 continue;
1404 }
1405
1406 if (path->keep_locks) {
1407 u32 nritems;
1408
1409 nritems = btrfs_header_nritems(path->nodes[i]);
1410 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1411 skip_level = i + 1;
1412 continue;
1413 }
1414 }
1415 }
1416
1417 if (i >= lowest_unlock && i > skip_level) {
1418 check_skip = false;
1419 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
1420 path->locks[i] = 0;
1421 if (write_lock_level &&
1422 i > min_write_lock_level &&
1423 i <= *write_lock_level) {
1424 *write_lock_level = i - 1;
1425 }
1426 }
1427 }
1428 }
1429
1430 /*
1431 * Helper function for btrfs_search_slot() and other functions that do a search
1432 * on a btree. The goal is to find a tree block in the cache (the radix tree at
1433 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read
1434 * its pages from disk.
1435 *
1436 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the
1437 * whole btree search, starting again from the current root node.
1438 */
1439 static int
read_block_for_search(struct btrfs_root * root,struct btrfs_path * p,struct extent_buffer ** eb_ret,int slot,const struct btrfs_key * key)1440 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1441 struct extent_buffer **eb_ret, int slot,
1442 const struct btrfs_key *key)
1443 {
1444 struct btrfs_fs_info *fs_info = root->fs_info;
1445 struct btrfs_tree_parent_check check = { 0 };
1446 u64 blocknr;
1447 struct extent_buffer *tmp = NULL;
1448 int ret = 0;
1449 int parent_level;
1450 int err;
1451 bool read_tmp = false;
1452 bool tmp_locked = false;
1453 bool path_released = false;
1454
1455 blocknr = btrfs_node_blockptr(*eb_ret, slot);
1456 parent_level = btrfs_header_level(*eb_ret);
1457 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot);
1458 check.has_first_key = true;
1459 check.level = parent_level - 1;
1460 check.transid = btrfs_node_ptr_generation(*eb_ret, slot);
1461 check.owner_root = btrfs_root_id(root);
1462
1463 /*
1464 * If we need to read an extent buffer from disk and we are holding locks
1465 * on upper level nodes, we unlock all the upper nodes before reading the
1466 * extent buffer, and then return -EAGAIN to the caller as it needs to
1467 * restart the search. We don't release the lock on the current level
1468 * because we need to walk this node to figure out which blocks to read.
1469 */
1470 tmp = find_extent_buffer(fs_info, blocknr);
1471 if (tmp) {
1472 if (p->reada == READA_FORWARD_ALWAYS)
1473 reada_for_search(fs_info, p, parent_level, slot, key->objectid);
1474
1475 /* first we do an atomic uptodate check */
1476 if (btrfs_buffer_uptodate(tmp, check.transid, 1) > 0) {
1477 /*
1478 * Do extra check for first_key, eb can be stale due to
1479 * being cached, read from scrub, or have multiple
1480 * parents (shared tree blocks).
1481 */
1482 if (btrfs_verify_level_key(tmp, &check)) {
1483 ret = -EUCLEAN;
1484 goto out;
1485 }
1486 *eb_ret = tmp;
1487 tmp = NULL;
1488 ret = 0;
1489 goto out;
1490 }
1491
1492 if (p->nowait) {
1493 ret = -EAGAIN;
1494 goto out;
1495 }
1496
1497 if (!p->skip_locking) {
1498 btrfs_unlock_up_safe(p, parent_level + 1);
1499 btrfs_maybe_reset_lockdep_class(root, tmp);
1500 tmp_locked = true;
1501 btrfs_tree_read_lock(tmp);
1502 btrfs_release_path(p);
1503 ret = -EAGAIN;
1504 path_released = true;
1505 }
1506
1507 /* Now we're allowed to do a blocking uptodate check. */
1508 err = btrfs_read_extent_buffer(tmp, &check);
1509 if (err) {
1510 ret = err;
1511 goto out;
1512 }
1513
1514 if (ret == 0) {
1515 ASSERT(!tmp_locked);
1516 *eb_ret = tmp;
1517 tmp = NULL;
1518 }
1519 goto out;
1520 } else if (p->nowait) {
1521 ret = -EAGAIN;
1522 goto out;
1523 }
1524
1525 if (!p->skip_locking) {
1526 btrfs_unlock_up_safe(p, parent_level + 1);
1527 ret = -EAGAIN;
1528 }
1529
1530 if (p->reada != READA_NONE)
1531 reada_for_search(fs_info, p, parent_level, slot, key->objectid);
1532
1533 tmp = btrfs_find_create_tree_block(fs_info, blocknr, check.owner_root, check.level);
1534 if (IS_ERR(tmp)) {
1535 ret = PTR_ERR(tmp);
1536 tmp = NULL;
1537 goto out;
1538 }
1539 read_tmp = true;
1540
1541 if (!p->skip_locking) {
1542 ASSERT(ret == -EAGAIN);
1543 btrfs_maybe_reset_lockdep_class(root, tmp);
1544 tmp_locked = true;
1545 btrfs_tree_read_lock(tmp);
1546 btrfs_release_path(p);
1547 path_released = true;
1548 }
1549
1550 /* Now we're allowed to do a blocking uptodate check. */
1551 err = btrfs_read_extent_buffer(tmp, &check);
1552 if (err) {
1553 ret = err;
1554 goto out;
1555 }
1556
1557 /*
1558 * If the read above didn't mark this buffer up to date,
1559 * it will never end up being up to date. Set ret to EIO now
1560 * and give up so that our caller doesn't loop forever
1561 * on our EAGAINs.
1562 */
1563 if (!extent_buffer_uptodate(tmp)) {
1564 ret = -EIO;
1565 goto out;
1566 }
1567
1568 if (ret == 0) {
1569 ASSERT(!tmp_locked);
1570 *eb_ret = tmp;
1571 tmp = NULL;
1572 }
1573 out:
1574 if (tmp) {
1575 if (tmp_locked)
1576 btrfs_tree_read_unlock(tmp);
1577 if (read_tmp && ret && ret != -EAGAIN)
1578 free_extent_buffer_stale(tmp);
1579 else
1580 free_extent_buffer(tmp);
1581 }
1582 if (ret && !path_released)
1583 btrfs_release_path(p);
1584
1585 return ret;
1586 }
1587
1588 /*
1589 * helper function for btrfs_search_slot. This does all of the checks
1590 * for node-level blocks and does any balancing required based on
1591 * the ins_len.
1592 *
1593 * If no extra work was required, zero is returned. If we had to
1594 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1595 * start over
1596 */
1597 static int
setup_nodes_for_search(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * p,struct extent_buffer * b,int level,int ins_len,int * write_lock_level)1598 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1599 struct btrfs_root *root, struct btrfs_path *p,
1600 struct extent_buffer *b, int level, int ins_len,
1601 int *write_lock_level)
1602 {
1603 struct btrfs_fs_info *fs_info = root->fs_info;
1604 int ret = 0;
1605
1606 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1607 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1608
1609 if (*write_lock_level < level + 1) {
1610 *write_lock_level = level + 1;
1611 btrfs_release_path(p);
1612 return -EAGAIN;
1613 }
1614
1615 reada_for_balance(p, level);
1616 ret = split_node(trans, root, p, level);
1617
1618 b = p->nodes[level];
1619 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1620 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1621
1622 if (*write_lock_level < level + 1) {
1623 *write_lock_level = level + 1;
1624 btrfs_release_path(p);
1625 return -EAGAIN;
1626 }
1627
1628 reada_for_balance(p, level);
1629 ret = balance_level(trans, root, p, level);
1630 if (ret)
1631 return ret;
1632
1633 b = p->nodes[level];
1634 if (!b) {
1635 btrfs_release_path(p);
1636 return -EAGAIN;
1637 }
1638 BUG_ON(btrfs_header_nritems(b) == 1);
1639 }
1640 return ret;
1641 }
1642
btrfs_find_item(struct btrfs_root * fs_root,struct btrfs_path * path,u64 iobjectid,u64 ioff,u8 key_type,struct btrfs_key * found_key)1643 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1644 u64 iobjectid, u64 ioff, u8 key_type,
1645 struct btrfs_key *found_key)
1646 {
1647 int ret;
1648 struct btrfs_key key;
1649 struct extent_buffer *eb;
1650
1651 ASSERT(path);
1652 ASSERT(found_key);
1653
1654 key.type = key_type;
1655 key.objectid = iobjectid;
1656 key.offset = ioff;
1657
1658 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1659 if (ret < 0)
1660 return ret;
1661
1662 eb = path->nodes[0];
1663 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1664 ret = btrfs_next_leaf(fs_root, path);
1665 if (ret)
1666 return ret;
1667 eb = path->nodes[0];
1668 }
1669
1670 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1671 if (found_key->type != key.type ||
1672 found_key->objectid != key.objectid)
1673 return 1;
1674
1675 return 0;
1676 }
1677
btrfs_search_slot_get_root(struct btrfs_root * root,struct btrfs_path * p,int write_lock_level)1678 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1679 struct btrfs_path *p,
1680 int write_lock_level)
1681 {
1682 struct extent_buffer *b;
1683 int root_lock = 0;
1684 int level = 0;
1685
1686 if (p->search_commit_root) {
1687 b = root->commit_root;
1688 atomic_inc(&b->refs);
1689 level = btrfs_header_level(b);
1690 /*
1691 * Ensure that all callers have set skip_locking when
1692 * p->search_commit_root = 1.
1693 */
1694 ASSERT(p->skip_locking == 1);
1695
1696 goto out;
1697 }
1698
1699 if (p->skip_locking) {
1700 b = btrfs_root_node(root);
1701 level = btrfs_header_level(b);
1702 goto out;
1703 }
1704
1705 /* We try very hard to do read locks on the root */
1706 root_lock = BTRFS_READ_LOCK;
1707
1708 /*
1709 * If the level is set to maximum, we can skip trying to get the read
1710 * lock.
1711 */
1712 if (write_lock_level < BTRFS_MAX_LEVEL) {
1713 /*
1714 * We don't know the level of the root node until we actually
1715 * have it read locked
1716 */
1717 if (p->nowait) {
1718 b = btrfs_try_read_lock_root_node(root);
1719 if (IS_ERR(b))
1720 return b;
1721 } else {
1722 b = btrfs_read_lock_root_node(root);
1723 }
1724 level = btrfs_header_level(b);
1725 if (level > write_lock_level)
1726 goto out;
1727
1728 /* Whoops, must trade for write lock */
1729 btrfs_tree_read_unlock(b);
1730 free_extent_buffer(b);
1731 }
1732
1733 b = btrfs_lock_root_node(root);
1734 root_lock = BTRFS_WRITE_LOCK;
1735
1736 /* The level might have changed, check again */
1737 level = btrfs_header_level(b);
1738
1739 out:
1740 /*
1741 * The root may have failed to write out at some point, and thus is no
1742 * longer valid, return an error in this case.
1743 */
1744 if (!extent_buffer_uptodate(b)) {
1745 if (root_lock)
1746 btrfs_tree_unlock_rw(b, root_lock);
1747 free_extent_buffer(b);
1748 return ERR_PTR(-EIO);
1749 }
1750
1751 p->nodes[level] = b;
1752 if (!p->skip_locking)
1753 p->locks[level] = root_lock;
1754 /*
1755 * Callers are responsible for dropping b's references.
1756 */
1757 return b;
1758 }
1759
1760 /*
1761 * Replace the extent buffer at the lowest level of the path with a cloned
1762 * version. The purpose is to be able to use it safely, after releasing the
1763 * commit root semaphore, even if relocation is happening in parallel, the
1764 * transaction used for relocation is committed and the extent buffer is
1765 * reallocated in the next transaction.
1766 *
1767 * This is used in a context where the caller does not prevent transaction
1768 * commits from happening, either by holding a transaction handle or holding
1769 * some lock, while it's doing searches through a commit root.
1770 * At the moment it's only used for send operations.
1771 */
finish_need_commit_sem_search(struct btrfs_path * path)1772 static int finish_need_commit_sem_search(struct btrfs_path *path)
1773 {
1774 const int i = path->lowest_level;
1775 const int slot = path->slots[i];
1776 struct extent_buffer *lowest = path->nodes[i];
1777 struct extent_buffer *clone;
1778
1779 ASSERT(path->need_commit_sem);
1780
1781 if (!lowest)
1782 return 0;
1783
1784 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem);
1785
1786 clone = btrfs_clone_extent_buffer(lowest);
1787 if (!clone)
1788 return -ENOMEM;
1789
1790 btrfs_release_path(path);
1791 path->nodes[i] = clone;
1792 path->slots[i] = slot;
1793
1794 return 0;
1795 }
1796
search_for_key_slot(struct extent_buffer * eb,int search_low_slot,const struct btrfs_key * key,int prev_cmp,int * slot)1797 static inline int search_for_key_slot(struct extent_buffer *eb,
1798 int search_low_slot,
1799 const struct btrfs_key *key,
1800 int prev_cmp,
1801 int *slot)
1802 {
1803 /*
1804 * If a previous call to btrfs_bin_search() on a parent node returned an
1805 * exact match (prev_cmp == 0), we can safely assume the target key will
1806 * always be at slot 0 on lower levels, since each key pointer
1807 * (struct btrfs_key_ptr) refers to the lowest key accessible from the
1808 * subtree it points to. Thus we can skip searching lower levels.
1809 */
1810 if (prev_cmp == 0) {
1811 *slot = 0;
1812 return 0;
1813 }
1814
1815 return btrfs_bin_search(eb, search_low_slot, key, slot);
1816 }
1817
search_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * path,int ins_len,int prev_cmp)1818 static int search_leaf(struct btrfs_trans_handle *trans,
1819 struct btrfs_root *root,
1820 const struct btrfs_key *key,
1821 struct btrfs_path *path,
1822 int ins_len,
1823 int prev_cmp)
1824 {
1825 struct extent_buffer *leaf = path->nodes[0];
1826 int leaf_free_space = -1;
1827 int search_low_slot = 0;
1828 int ret;
1829 bool do_bin_search = true;
1830
1831 /*
1832 * If we are doing an insertion, the leaf has enough free space and the
1833 * destination slot for the key is not slot 0, then we can unlock our
1834 * write lock on the parent, and any other upper nodes, before doing the
1835 * binary search on the leaf (with search_for_key_slot()), allowing other
1836 * tasks to lock the parent and any other upper nodes.
1837 */
1838 if (ins_len > 0) {
1839 /*
1840 * Cache the leaf free space, since we will need it later and it
1841 * will not change until then.
1842 */
1843 leaf_free_space = btrfs_leaf_free_space(leaf);
1844
1845 /*
1846 * !path->locks[1] means we have a single node tree, the leaf is
1847 * the root of the tree.
1848 */
1849 if (path->locks[1] && leaf_free_space >= ins_len) {
1850 struct btrfs_disk_key first_key;
1851
1852 ASSERT(btrfs_header_nritems(leaf) > 0);
1853 btrfs_item_key(leaf, &first_key, 0);
1854
1855 /*
1856 * Doing the extra comparison with the first key is cheap,
1857 * taking into account that the first key is very likely
1858 * already in a cache line because it immediately follows
1859 * the extent buffer's header and we have recently accessed
1860 * the header's level field.
1861 */
1862 ret = btrfs_comp_keys(&first_key, key);
1863 if (ret < 0) {
1864 /*
1865 * The first key is smaller than the key we want
1866 * to insert, so we are safe to unlock all upper
1867 * nodes and we have to do the binary search.
1868 *
1869 * We do use btrfs_unlock_up_safe() and not
1870 * unlock_up() because the later does not unlock
1871 * nodes with a slot of 0 - we can safely unlock
1872 * any node even if its slot is 0 since in this
1873 * case the key does not end up at slot 0 of the
1874 * leaf and there's no need to split the leaf.
1875 */
1876 btrfs_unlock_up_safe(path, 1);
1877 search_low_slot = 1;
1878 } else {
1879 /*
1880 * The first key is >= then the key we want to
1881 * insert, so we can skip the binary search as
1882 * the target key will be at slot 0.
1883 *
1884 * We can not unlock upper nodes when the key is
1885 * less than the first key, because we will need
1886 * to update the key at slot 0 of the parent node
1887 * and possibly of other upper nodes too.
1888 * If the key matches the first key, then we can
1889 * unlock all the upper nodes, using
1890 * btrfs_unlock_up_safe() instead of unlock_up()
1891 * as stated above.
1892 */
1893 if (ret == 0)
1894 btrfs_unlock_up_safe(path, 1);
1895 /*
1896 * ret is already 0 or 1, matching the result of
1897 * a btrfs_bin_search() call, so there is no need
1898 * to adjust it.
1899 */
1900 do_bin_search = false;
1901 path->slots[0] = 0;
1902 }
1903 }
1904 }
1905
1906 if (do_bin_search) {
1907 ret = search_for_key_slot(leaf, search_low_slot, key,
1908 prev_cmp, &path->slots[0]);
1909 if (ret < 0)
1910 return ret;
1911 }
1912
1913 if (ins_len > 0) {
1914 /*
1915 * Item key already exists. In this case, if we are allowed to
1916 * insert the item (for example, in dir_item case, item key
1917 * collision is allowed), it will be merged with the original
1918 * item. Only the item size grows, no new btrfs item will be
1919 * added. If search_for_extension is not set, ins_len already
1920 * accounts the size btrfs_item, deduct it here so leaf space
1921 * check will be correct.
1922 */
1923 if (ret == 0 && !path->search_for_extension) {
1924 ASSERT(ins_len >= sizeof(struct btrfs_item));
1925 ins_len -= sizeof(struct btrfs_item);
1926 }
1927
1928 ASSERT(leaf_free_space >= 0);
1929
1930 if (leaf_free_space < ins_len) {
1931 int err;
1932
1933 err = split_leaf(trans, root, key, path, ins_len,
1934 (ret == 0));
1935 ASSERT(err <= 0);
1936 if (WARN_ON(err > 0))
1937 err = -EUCLEAN;
1938 if (err)
1939 ret = err;
1940 }
1941 }
1942
1943 return ret;
1944 }
1945
1946 /*
1947 * Look for a key in a tree and perform necessary modifications to preserve
1948 * tree invariants.
1949 *
1950 * @trans: Handle of transaction, used when modifying the tree
1951 * @p: Holds all btree nodes along the search path
1952 * @root: The root node of the tree
1953 * @key: The key we are looking for
1954 * @ins_len: Indicates purpose of search:
1955 * >0 for inserts it's size of item inserted (*)
1956 * <0 for deletions
1957 * 0 for plain searches, not modifying the tree
1958 *
1959 * (*) If size of item inserted doesn't include
1960 * sizeof(struct btrfs_item), then p->search_for_extension must
1961 * be set.
1962 * @cow: boolean should CoW operations be performed. Must always be 1
1963 * when modifying the tree.
1964 *
1965 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
1966 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
1967 *
1968 * If @key is found, 0 is returned and you can find the item in the leaf level
1969 * of the path (level 0)
1970 *
1971 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
1972 * points to the slot where it should be inserted
1973 *
1974 * If an error is encountered while searching the tree a negative error number
1975 * is returned
1976 */
btrfs_search_slot(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,int ins_len,int cow)1977 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1978 const struct btrfs_key *key, struct btrfs_path *p,
1979 int ins_len, int cow)
1980 {
1981 struct btrfs_fs_info *fs_info;
1982 struct extent_buffer *b;
1983 int slot;
1984 int ret;
1985 int err;
1986 int level;
1987 int lowest_unlock = 1;
1988 /* everything at write_lock_level or lower must be write locked */
1989 int write_lock_level = 0;
1990 u8 lowest_level = 0;
1991 int min_write_lock_level;
1992 int prev_cmp;
1993
1994 if (!root)
1995 return -EINVAL;
1996
1997 fs_info = root->fs_info;
1998 might_sleep();
1999
2000 lowest_level = p->lowest_level;
2001 WARN_ON(lowest_level && ins_len > 0);
2002 WARN_ON(p->nodes[0] != NULL);
2003 BUG_ON(!cow && ins_len);
2004
2005 /*
2006 * For now only allow nowait for read only operations. There's no
2007 * strict reason why we can't, we just only need it for reads so it's
2008 * only implemented for reads.
2009 */
2010 ASSERT(!p->nowait || !cow);
2011
2012 if (ins_len < 0) {
2013 lowest_unlock = 2;
2014
2015 /* when we are removing items, we might have to go up to level
2016 * two as we update tree pointers Make sure we keep write
2017 * for those levels as well
2018 */
2019 write_lock_level = 2;
2020 } else if (ins_len > 0) {
2021 /*
2022 * for inserting items, make sure we have a write lock on
2023 * level 1 so we can update keys
2024 */
2025 write_lock_level = 1;
2026 }
2027
2028 if (!cow)
2029 write_lock_level = -1;
2030
2031 if (cow && (p->keep_locks || p->lowest_level))
2032 write_lock_level = BTRFS_MAX_LEVEL;
2033
2034 min_write_lock_level = write_lock_level;
2035
2036 if (p->need_commit_sem) {
2037 ASSERT(p->search_commit_root);
2038 if (p->nowait) {
2039 if (!down_read_trylock(&fs_info->commit_root_sem))
2040 return -EAGAIN;
2041 } else {
2042 down_read(&fs_info->commit_root_sem);
2043 }
2044 }
2045
2046 again:
2047 prev_cmp = -1;
2048 b = btrfs_search_slot_get_root(root, p, write_lock_level);
2049 if (IS_ERR(b)) {
2050 ret = PTR_ERR(b);
2051 goto done;
2052 }
2053
2054 while (b) {
2055 int dec = 0;
2056
2057 level = btrfs_header_level(b);
2058
2059 if (cow) {
2060 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2061
2062 /*
2063 * if we don't really need to cow this block
2064 * then we don't want to set the path blocking,
2065 * so we test it here
2066 */
2067 if (!should_cow_block(trans, root, b))
2068 goto cow_done;
2069
2070 /*
2071 * must have write locks on this node and the
2072 * parent
2073 */
2074 if (level > write_lock_level ||
2075 (level + 1 > write_lock_level &&
2076 level + 1 < BTRFS_MAX_LEVEL &&
2077 p->nodes[level + 1])) {
2078 write_lock_level = level + 1;
2079 btrfs_release_path(p);
2080 goto again;
2081 }
2082
2083 if (last_level)
2084 err = btrfs_cow_block(trans, root, b, NULL, 0,
2085 &b,
2086 BTRFS_NESTING_COW);
2087 else
2088 err = btrfs_cow_block(trans, root, b,
2089 p->nodes[level + 1],
2090 p->slots[level + 1], &b,
2091 BTRFS_NESTING_COW);
2092 if (err) {
2093 ret = err;
2094 goto done;
2095 }
2096 }
2097 cow_done:
2098 p->nodes[level] = b;
2099
2100 /*
2101 * we have a lock on b and as long as we aren't changing
2102 * the tree, there is no way to for the items in b to change.
2103 * It is safe to drop the lock on our parent before we
2104 * go through the expensive btree search on b.
2105 *
2106 * If we're inserting or deleting (ins_len != 0), then we might
2107 * be changing slot zero, which may require changing the parent.
2108 * So, we can't drop the lock until after we know which slot
2109 * we're operating on.
2110 */
2111 if (!ins_len && !p->keep_locks) {
2112 int u = level + 1;
2113
2114 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2115 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2116 p->locks[u] = 0;
2117 }
2118 }
2119
2120 if (level == 0) {
2121 if (ins_len > 0)
2122 ASSERT(write_lock_level >= 1);
2123
2124 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp);
2125 if (!p->search_for_split)
2126 unlock_up(p, level, lowest_unlock,
2127 min_write_lock_level, NULL);
2128 goto done;
2129 }
2130
2131 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot);
2132 if (ret < 0)
2133 goto done;
2134 prev_cmp = ret;
2135
2136 if (ret && slot > 0) {
2137 dec = 1;
2138 slot--;
2139 }
2140 p->slots[level] = slot;
2141 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2142 &write_lock_level);
2143 if (err == -EAGAIN)
2144 goto again;
2145 if (err) {
2146 ret = err;
2147 goto done;
2148 }
2149 b = p->nodes[level];
2150 slot = p->slots[level];
2151
2152 /*
2153 * Slot 0 is special, if we change the key we have to update
2154 * the parent pointer which means we must have a write lock on
2155 * the parent
2156 */
2157 if (slot == 0 && ins_len && write_lock_level < level + 1) {
2158 write_lock_level = level + 1;
2159 btrfs_release_path(p);
2160 goto again;
2161 }
2162
2163 unlock_up(p, level, lowest_unlock, min_write_lock_level,
2164 &write_lock_level);
2165
2166 if (level == lowest_level) {
2167 if (dec)
2168 p->slots[level]++;
2169 goto done;
2170 }
2171
2172 err = read_block_for_search(root, p, &b, slot, key);
2173 if (err == -EAGAIN && !p->nowait)
2174 goto again;
2175 if (err) {
2176 ret = err;
2177 goto done;
2178 }
2179
2180 if (!p->skip_locking) {
2181 level = btrfs_header_level(b);
2182
2183 btrfs_maybe_reset_lockdep_class(root, b);
2184
2185 if (level <= write_lock_level) {
2186 btrfs_tree_lock(b);
2187 p->locks[level] = BTRFS_WRITE_LOCK;
2188 } else {
2189 if (p->nowait) {
2190 if (!btrfs_try_tree_read_lock(b)) {
2191 free_extent_buffer(b);
2192 ret = -EAGAIN;
2193 goto done;
2194 }
2195 } else {
2196 btrfs_tree_read_lock(b);
2197 }
2198 p->locks[level] = BTRFS_READ_LOCK;
2199 }
2200 p->nodes[level] = b;
2201 }
2202 }
2203 ret = 1;
2204 done:
2205 if (ret < 0 && !p->skip_release_on_error)
2206 btrfs_release_path(p);
2207
2208 if (p->need_commit_sem) {
2209 int ret2;
2210
2211 ret2 = finish_need_commit_sem_search(p);
2212 up_read(&fs_info->commit_root_sem);
2213 if (ret2)
2214 ret = ret2;
2215 }
2216
2217 return ret;
2218 }
2219 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
2220
2221 /*
2222 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2223 * current state of the tree together with the operations recorded in the tree
2224 * modification log to search for the key in a previous version of this tree, as
2225 * denoted by the time_seq parameter.
2226 *
2227 * Naturally, there is no support for insert, delete or cow operations.
2228 *
2229 * The resulting path and return value will be set up as if we called
2230 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2231 */
btrfs_search_old_slot(struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,u64 time_seq)2232 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2233 struct btrfs_path *p, u64 time_seq)
2234 {
2235 struct btrfs_fs_info *fs_info = root->fs_info;
2236 struct extent_buffer *b;
2237 int slot;
2238 int ret;
2239 int err;
2240 int level;
2241 int lowest_unlock = 1;
2242 u8 lowest_level = 0;
2243
2244 lowest_level = p->lowest_level;
2245 WARN_ON(p->nodes[0] != NULL);
2246 ASSERT(!p->nowait);
2247
2248 if (p->search_commit_root) {
2249 BUG_ON(time_seq);
2250 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2251 }
2252
2253 again:
2254 b = btrfs_get_old_root(root, time_seq);
2255 if (!b) {
2256 ret = -EIO;
2257 goto done;
2258 }
2259 level = btrfs_header_level(b);
2260 p->locks[level] = BTRFS_READ_LOCK;
2261
2262 while (b) {
2263 int dec = 0;
2264
2265 level = btrfs_header_level(b);
2266 p->nodes[level] = b;
2267
2268 /*
2269 * we have a lock on b and as long as we aren't changing
2270 * the tree, there is no way to for the items in b to change.
2271 * It is safe to drop the lock on our parent before we
2272 * go through the expensive btree search on b.
2273 */
2274 btrfs_unlock_up_safe(p, level + 1);
2275
2276 ret = btrfs_bin_search(b, 0, key, &slot);
2277 if (ret < 0)
2278 goto done;
2279
2280 if (level == 0) {
2281 p->slots[level] = slot;
2282 unlock_up(p, level, lowest_unlock, 0, NULL);
2283 goto done;
2284 }
2285
2286 if (ret && slot > 0) {
2287 dec = 1;
2288 slot--;
2289 }
2290 p->slots[level] = slot;
2291 unlock_up(p, level, lowest_unlock, 0, NULL);
2292
2293 if (level == lowest_level) {
2294 if (dec)
2295 p->slots[level]++;
2296 goto done;
2297 }
2298
2299 err = read_block_for_search(root, p, &b, slot, key);
2300 if (err == -EAGAIN && !p->nowait)
2301 goto again;
2302 if (err) {
2303 ret = err;
2304 goto done;
2305 }
2306
2307 level = btrfs_header_level(b);
2308 btrfs_tree_read_lock(b);
2309 b = btrfs_tree_mod_log_rewind(fs_info, b, time_seq);
2310 if (!b) {
2311 ret = -ENOMEM;
2312 goto done;
2313 }
2314 p->locks[level] = BTRFS_READ_LOCK;
2315 p->nodes[level] = b;
2316 }
2317 ret = 1;
2318 done:
2319 if (ret < 0)
2320 btrfs_release_path(p);
2321
2322 return ret;
2323 }
2324
2325 /*
2326 * Search the tree again to find a leaf with smaller keys.
2327 * Returns 0 if it found something.
2328 * Returns 1 if there are no smaller keys.
2329 * Returns < 0 on error.
2330 *
2331 * This may release the path, and so you may lose any locks held at the
2332 * time you call it.
2333 */
btrfs_prev_leaf(struct btrfs_root * root,struct btrfs_path * path)2334 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
2335 {
2336 struct btrfs_key key;
2337 struct btrfs_key orig_key;
2338 struct btrfs_disk_key found_key;
2339 int ret;
2340
2341 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
2342 orig_key = key;
2343
2344 if (key.offset > 0) {
2345 key.offset--;
2346 } else if (key.type > 0) {
2347 key.type--;
2348 key.offset = (u64)-1;
2349 } else if (key.objectid > 0) {
2350 key.objectid--;
2351 key.type = (u8)-1;
2352 key.offset = (u64)-1;
2353 } else {
2354 return 1;
2355 }
2356
2357 btrfs_release_path(path);
2358 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2359 if (ret <= 0)
2360 return ret;
2361
2362 /*
2363 * Previous key not found. Even if we were at slot 0 of the leaf we had
2364 * before releasing the path and calling btrfs_search_slot(), we now may
2365 * be in a slot pointing to the same original key - this can happen if
2366 * after we released the path, one of more items were moved from a
2367 * sibling leaf into the front of the leaf we had due to an insertion
2368 * (see push_leaf_right()).
2369 * If we hit this case and our slot is > 0 and just decrement the slot
2370 * so that the caller does not process the same key again, which may or
2371 * may not break the caller, depending on its logic.
2372 */
2373 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
2374 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
2375 ret = btrfs_comp_keys(&found_key, &orig_key);
2376 if (ret == 0) {
2377 if (path->slots[0] > 0) {
2378 path->slots[0]--;
2379 return 0;
2380 }
2381 /*
2382 * At slot 0, same key as before, it means orig_key is
2383 * the lowest, leftmost, key in the tree. We're done.
2384 */
2385 return 1;
2386 }
2387 }
2388
2389 btrfs_item_key(path->nodes[0], &found_key, 0);
2390 ret = btrfs_comp_keys(&found_key, &key);
2391 /*
2392 * We might have had an item with the previous key in the tree right
2393 * before we released our path. And after we released our path, that
2394 * item might have been pushed to the first slot (0) of the leaf we
2395 * were holding due to a tree balance. Alternatively, an item with the
2396 * previous key can exist as the only element of a leaf (big fat item).
2397 * Therefore account for these 2 cases, so that our callers (like
2398 * btrfs_previous_item) don't miss an existing item with a key matching
2399 * the previous key we computed above.
2400 */
2401 if (ret <= 0)
2402 return 0;
2403 return 1;
2404 }
2405
2406 /*
2407 * helper to use instead of search slot if no exact match is needed but
2408 * instead the next or previous item should be returned.
2409 * When find_higher is true, the next higher item is returned, the next lower
2410 * otherwise.
2411 * When return_any and find_higher are both true, and no higher item is found,
2412 * return the next lower instead.
2413 * When return_any is true and find_higher is false, and no lower item is found,
2414 * return the next higher instead.
2415 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2416 * < 0 on error
2417 */
btrfs_search_slot_for_read(struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,int find_higher,int return_any)2418 int btrfs_search_slot_for_read(struct btrfs_root *root,
2419 const struct btrfs_key *key,
2420 struct btrfs_path *p, int find_higher,
2421 int return_any)
2422 {
2423 int ret;
2424 struct extent_buffer *leaf;
2425
2426 again:
2427 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2428 if (ret <= 0)
2429 return ret;
2430 /*
2431 * a return value of 1 means the path is at the position where the
2432 * item should be inserted. Normally this is the next bigger item,
2433 * but in case the previous item is the last in a leaf, path points
2434 * to the first free slot in the previous leaf, i.e. at an invalid
2435 * item.
2436 */
2437 leaf = p->nodes[0];
2438
2439 if (find_higher) {
2440 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2441 ret = btrfs_next_leaf(root, p);
2442 if (ret <= 0)
2443 return ret;
2444 if (!return_any)
2445 return 1;
2446 /*
2447 * no higher item found, return the next
2448 * lower instead
2449 */
2450 return_any = 0;
2451 find_higher = 0;
2452 btrfs_release_path(p);
2453 goto again;
2454 }
2455 } else {
2456 if (p->slots[0] == 0) {
2457 ret = btrfs_prev_leaf(root, p);
2458 if (ret < 0)
2459 return ret;
2460 if (!ret) {
2461 leaf = p->nodes[0];
2462 if (p->slots[0] == btrfs_header_nritems(leaf))
2463 p->slots[0]--;
2464 return 0;
2465 }
2466 if (!return_any)
2467 return 1;
2468 /*
2469 * no lower item found, return the next
2470 * higher instead
2471 */
2472 return_any = 0;
2473 find_higher = 1;
2474 btrfs_release_path(p);
2475 goto again;
2476 } else {
2477 --p->slots[0];
2478 }
2479 }
2480 return 0;
2481 }
2482
2483 /*
2484 * Execute search and call btrfs_previous_item to traverse backwards if the item
2485 * was not found.
2486 *
2487 * Return 0 if found, 1 if not found and < 0 if error.
2488 */
btrfs_search_backwards(struct btrfs_root * root,struct btrfs_key * key,struct btrfs_path * path)2489 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
2490 struct btrfs_path *path)
2491 {
2492 int ret;
2493
2494 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
2495 if (ret > 0)
2496 ret = btrfs_previous_item(root, path, key->objectid, key->type);
2497
2498 if (ret == 0)
2499 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2500
2501 return ret;
2502 }
2503
2504 /*
2505 * Search for a valid slot for the given path.
2506 *
2507 * @root: The root node of the tree.
2508 * @key: Will contain a valid item if found.
2509 * @path: The starting point to validate the slot.
2510 *
2511 * Return: 0 if the item is valid
2512 * 1 if not found
2513 * <0 if error.
2514 */
btrfs_get_next_valid_item(struct btrfs_root * root,struct btrfs_key * key,struct btrfs_path * path)2515 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
2516 struct btrfs_path *path)
2517 {
2518 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2519 int ret;
2520
2521 ret = btrfs_next_leaf(root, path);
2522 if (ret)
2523 return ret;
2524 }
2525
2526 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2527 return 0;
2528 }
2529
2530 /*
2531 * adjust the pointers going up the tree, starting at level
2532 * making sure the right key of each node is points to 'key'.
2533 * This is used after shifting pointers to the left, so it stops
2534 * fixing up pointers when a given leaf/node is not in slot 0 of the
2535 * higher levels
2536 *
2537 */
fixup_low_keys(struct btrfs_trans_handle * trans,const struct btrfs_path * path,const struct btrfs_disk_key * key,int level)2538 static void fixup_low_keys(struct btrfs_trans_handle *trans,
2539 const struct btrfs_path *path,
2540 const struct btrfs_disk_key *key, int level)
2541 {
2542 int i;
2543 struct extent_buffer *t;
2544 int ret;
2545
2546 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2547 int tslot = path->slots[i];
2548
2549 if (!path->nodes[i])
2550 break;
2551 t = path->nodes[i];
2552 ret = btrfs_tree_mod_log_insert_key(t, tslot,
2553 BTRFS_MOD_LOG_KEY_REPLACE);
2554 BUG_ON(ret < 0);
2555 btrfs_set_node_key(t, key, tslot);
2556 btrfs_mark_buffer_dirty(trans, path->nodes[i]);
2557 if (tslot != 0)
2558 break;
2559 }
2560 }
2561
2562 /*
2563 * update item key.
2564 *
2565 * This function isn't completely safe. It's the caller's responsibility
2566 * that the new key won't break the order
2567 */
btrfs_set_item_key_safe(struct btrfs_trans_handle * trans,const struct btrfs_path * path,const struct btrfs_key * new_key)2568 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2569 const struct btrfs_path *path,
2570 const struct btrfs_key *new_key)
2571 {
2572 struct btrfs_fs_info *fs_info = trans->fs_info;
2573 struct btrfs_disk_key disk_key;
2574 struct extent_buffer *eb;
2575 int slot;
2576
2577 eb = path->nodes[0];
2578 slot = path->slots[0];
2579 if (slot > 0) {
2580 btrfs_item_key(eb, &disk_key, slot - 1);
2581 if (unlikely(btrfs_comp_keys(&disk_key, new_key) >= 0)) {
2582 btrfs_print_leaf(eb);
2583 btrfs_crit(fs_info,
2584 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2585 slot, btrfs_disk_key_objectid(&disk_key),
2586 btrfs_disk_key_type(&disk_key),
2587 btrfs_disk_key_offset(&disk_key),
2588 new_key->objectid, new_key->type,
2589 new_key->offset);
2590 BUG();
2591 }
2592 }
2593 if (slot < btrfs_header_nritems(eb) - 1) {
2594 btrfs_item_key(eb, &disk_key, slot + 1);
2595 if (unlikely(btrfs_comp_keys(&disk_key, new_key) <= 0)) {
2596 btrfs_print_leaf(eb);
2597 btrfs_crit(fs_info,
2598 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2599 slot, btrfs_disk_key_objectid(&disk_key),
2600 btrfs_disk_key_type(&disk_key),
2601 btrfs_disk_key_offset(&disk_key),
2602 new_key->objectid, new_key->type,
2603 new_key->offset);
2604 BUG();
2605 }
2606 }
2607
2608 btrfs_cpu_key_to_disk(&disk_key, new_key);
2609 btrfs_set_item_key(eb, &disk_key, slot);
2610 btrfs_mark_buffer_dirty(trans, eb);
2611 if (slot == 0)
2612 fixup_low_keys(trans, path, &disk_key, 1);
2613 }
2614
2615 /*
2616 * Check key order of two sibling extent buffers.
2617 *
2618 * Return true if something is wrong.
2619 * Return false if everything is fine.
2620 *
2621 * Tree-checker only works inside one tree block, thus the following
2622 * corruption can not be detected by tree-checker:
2623 *
2624 * Leaf @left | Leaf @right
2625 * --------------------------------------------------------------
2626 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
2627 *
2628 * Key f6 in leaf @left itself is valid, but not valid when the next
2629 * key in leaf @right is 7.
2630 * This can only be checked at tree block merge time.
2631 * And since tree checker has ensured all key order in each tree block
2632 * is correct, we only need to bother the last key of @left and the first
2633 * key of @right.
2634 */
check_sibling_keys(const struct extent_buffer * left,const struct extent_buffer * right)2635 static bool check_sibling_keys(const struct extent_buffer *left,
2636 const struct extent_buffer *right)
2637 {
2638 struct btrfs_key left_last;
2639 struct btrfs_key right_first;
2640 int level = btrfs_header_level(left);
2641 int nr_left = btrfs_header_nritems(left);
2642 int nr_right = btrfs_header_nritems(right);
2643
2644 /* No key to check in one of the tree blocks */
2645 if (!nr_left || !nr_right)
2646 return false;
2647
2648 if (level) {
2649 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2650 btrfs_node_key_to_cpu(right, &right_first, 0);
2651 } else {
2652 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2653 btrfs_item_key_to_cpu(right, &right_first, 0);
2654 }
2655
2656 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) {
2657 btrfs_crit(left->fs_info, "left extent buffer:");
2658 btrfs_print_tree(left, false);
2659 btrfs_crit(left->fs_info, "right extent buffer:");
2660 btrfs_print_tree(right, false);
2661 btrfs_crit(left->fs_info,
2662 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2663 left_last.objectid, left_last.type,
2664 left_last.offset, right_first.objectid,
2665 right_first.type, right_first.offset);
2666 return true;
2667 }
2668 return false;
2669 }
2670
2671 /*
2672 * try to push data from one node into the next node left in the
2673 * tree.
2674 *
2675 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2676 * error, and > 0 if there was no room in the left hand block.
2677 */
push_node_left(struct btrfs_trans_handle * trans,struct extent_buffer * dst,struct extent_buffer * src,int empty)2678 static int push_node_left(struct btrfs_trans_handle *trans,
2679 struct extent_buffer *dst,
2680 struct extent_buffer *src, int empty)
2681 {
2682 struct btrfs_fs_info *fs_info = trans->fs_info;
2683 int push_items = 0;
2684 int src_nritems;
2685 int dst_nritems;
2686 int ret = 0;
2687
2688 src_nritems = btrfs_header_nritems(src);
2689 dst_nritems = btrfs_header_nritems(dst);
2690 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2691 WARN_ON(btrfs_header_generation(src) != trans->transid);
2692 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2693
2694 if (!empty && src_nritems <= 8)
2695 return 1;
2696
2697 if (push_items <= 0)
2698 return 1;
2699
2700 if (empty) {
2701 push_items = min(src_nritems, push_items);
2702 if (push_items < src_nritems) {
2703 /* leave at least 8 pointers in the node if
2704 * we aren't going to empty it
2705 */
2706 if (src_nritems - push_items < 8) {
2707 if (push_items <= 8)
2708 return 1;
2709 push_items -= 8;
2710 }
2711 }
2712 } else
2713 push_items = min(src_nritems - 8, push_items);
2714
2715 /* dst is the left eb, src is the middle eb */
2716 if (check_sibling_keys(dst, src)) {
2717 ret = -EUCLEAN;
2718 btrfs_abort_transaction(trans, ret);
2719 return ret;
2720 }
2721 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2722 if (ret) {
2723 btrfs_abort_transaction(trans, ret);
2724 return ret;
2725 }
2726 copy_extent_buffer(dst, src,
2727 btrfs_node_key_ptr_offset(dst, dst_nritems),
2728 btrfs_node_key_ptr_offset(src, 0),
2729 push_items * sizeof(struct btrfs_key_ptr));
2730
2731 if (push_items < src_nritems) {
2732 /*
2733 * btrfs_tree_mod_log_eb_copy handles logging the move, so we
2734 * don't need to do an explicit tree mod log operation for it.
2735 */
2736 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0),
2737 btrfs_node_key_ptr_offset(src, push_items),
2738 (src_nritems - push_items) *
2739 sizeof(struct btrfs_key_ptr));
2740 }
2741 btrfs_set_header_nritems(src, src_nritems - push_items);
2742 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2743 btrfs_mark_buffer_dirty(trans, src);
2744 btrfs_mark_buffer_dirty(trans, dst);
2745
2746 return ret;
2747 }
2748
2749 /*
2750 * try to push data from one node into the next node right in the
2751 * tree.
2752 *
2753 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2754 * error, and > 0 if there was no room in the right hand block.
2755 *
2756 * this will only push up to 1/2 the contents of the left node over
2757 */
balance_node_right(struct btrfs_trans_handle * trans,struct extent_buffer * dst,struct extent_buffer * src)2758 static int balance_node_right(struct btrfs_trans_handle *trans,
2759 struct extent_buffer *dst,
2760 struct extent_buffer *src)
2761 {
2762 struct btrfs_fs_info *fs_info = trans->fs_info;
2763 int push_items = 0;
2764 int max_push;
2765 int src_nritems;
2766 int dst_nritems;
2767 int ret = 0;
2768
2769 WARN_ON(btrfs_header_generation(src) != trans->transid);
2770 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2771
2772 src_nritems = btrfs_header_nritems(src);
2773 dst_nritems = btrfs_header_nritems(dst);
2774 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2775 if (push_items <= 0)
2776 return 1;
2777
2778 if (src_nritems < 4)
2779 return 1;
2780
2781 max_push = src_nritems / 2 + 1;
2782 /* don't try to empty the node */
2783 if (max_push >= src_nritems)
2784 return 1;
2785
2786 if (max_push < push_items)
2787 push_items = max_push;
2788
2789 /* dst is the right eb, src is the middle eb */
2790 if (check_sibling_keys(src, dst)) {
2791 ret = -EUCLEAN;
2792 btrfs_abort_transaction(trans, ret);
2793 return ret;
2794 }
2795
2796 /*
2797 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't
2798 * need to do an explicit tree mod log operation for it.
2799 */
2800 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items),
2801 btrfs_node_key_ptr_offset(dst, 0),
2802 (dst_nritems) *
2803 sizeof(struct btrfs_key_ptr));
2804
2805 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2806 push_items);
2807 if (ret) {
2808 btrfs_abort_transaction(trans, ret);
2809 return ret;
2810 }
2811 copy_extent_buffer(dst, src,
2812 btrfs_node_key_ptr_offset(dst, 0),
2813 btrfs_node_key_ptr_offset(src, src_nritems - push_items),
2814 push_items * sizeof(struct btrfs_key_ptr));
2815
2816 btrfs_set_header_nritems(src, src_nritems - push_items);
2817 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2818
2819 btrfs_mark_buffer_dirty(trans, src);
2820 btrfs_mark_buffer_dirty(trans, dst);
2821
2822 return ret;
2823 }
2824
2825 /*
2826 * helper function to insert a new root level in the tree.
2827 * A new node is allocated, and a single item is inserted to
2828 * point to the existing root
2829 *
2830 * returns zero on success or < 0 on failure.
2831 */
insert_new_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)2832 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2833 struct btrfs_root *root,
2834 struct btrfs_path *path, int level)
2835 {
2836 u64 lower_gen;
2837 struct extent_buffer *lower;
2838 struct extent_buffer *c;
2839 struct extent_buffer *old;
2840 struct btrfs_disk_key lower_key;
2841 int ret;
2842
2843 BUG_ON(path->nodes[level]);
2844 BUG_ON(path->nodes[level-1] != root->node);
2845
2846 lower = path->nodes[level-1];
2847 if (level == 1)
2848 btrfs_item_key(lower, &lower_key, 0);
2849 else
2850 btrfs_node_key(lower, &lower_key, 0);
2851
2852 c = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root),
2853 &lower_key, level, root->node->start, 0,
2854 0, BTRFS_NESTING_NEW_ROOT);
2855 if (IS_ERR(c))
2856 return PTR_ERR(c);
2857
2858 root_add_used_bytes(root);
2859
2860 btrfs_set_header_nritems(c, 1);
2861 btrfs_set_node_key(c, &lower_key, 0);
2862 btrfs_set_node_blockptr(c, 0, lower->start);
2863 lower_gen = btrfs_header_generation(lower);
2864 WARN_ON(lower_gen != trans->transid);
2865
2866 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2867
2868 btrfs_mark_buffer_dirty(trans, c);
2869
2870 old = root->node;
2871 ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
2872 if (ret < 0) {
2873 int ret2;
2874
2875 ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1);
2876 if (ret2 < 0)
2877 btrfs_abort_transaction(trans, ret2);
2878 btrfs_tree_unlock(c);
2879 free_extent_buffer(c);
2880 return ret;
2881 }
2882 rcu_assign_pointer(root->node, c);
2883
2884 /* the super has an extra ref to root->node */
2885 free_extent_buffer(old);
2886
2887 add_root_to_dirty_list(root);
2888 atomic_inc(&c->refs);
2889 path->nodes[level] = c;
2890 path->locks[level] = BTRFS_WRITE_LOCK;
2891 path->slots[level] = 0;
2892 return 0;
2893 }
2894
2895 /*
2896 * worker function to insert a single pointer in a node.
2897 * the node should have enough room for the pointer already
2898 *
2899 * slot and level indicate where you want the key to go, and
2900 * blocknr is the block the key points to.
2901 */
insert_ptr(struct btrfs_trans_handle * trans,const struct btrfs_path * path,const struct btrfs_disk_key * key,u64 bytenr,int slot,int level)2902 static int insert_ptr(struct btrfs_trans_handle *trans,
2903 const struct btrfs_path *path,
2904 const struct btrfs_disk_key *key, u64 bytenr,
2905 int slot, int level)
2906 {
2907 struct extent_buffer *lower;
2908 int nritems;
2909 int ret;
2910
2911 BUG_ON(!path->nodes[level]);
2912 btrfs_assert_tree_write_locked(path->nodes[level]);
2913 lower = path->nodes[level];
2914 nritems = btrfs_header_nritems(lower);
2915 BUG_ON(slot > nritems);
2916 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
2917 if (slot != nritems) {
2918 if (level) {
2919 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
2920 slot, nritems - slot);
2921 if (ret < 0) {
2922 btrfs_abort_transaction(trans, ret);
2923 return ret;
2924 }
2925 }
2926 memmove_extent_buffer(lower,
2927 btrfs_node_key_ptr_offset(lower, slot + 1),
2928 btrfs_node_key_ptr_offset(lower, slot),
2929 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2930 }
2931 if (level) {
2932 ret = btrfs_tree_mod_log_insert_key(lower, slot,
2933 BTRFS_MOD_LOG_KEY_ADD);
2934 if (ret < 0) {
2935 btrfs_abort_transaction(trans, ret);
2936 return ret;
2937 }
2938 }
2939 btrfs_set_node_key(lower, key, slot);
2940 btrfs_set_node_blockptr(lower, slot, bytenr);
2941 WARN_ON(trans->transid == 0);
2942 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2943 btrfs_set_header_nritems(lower, nritems + 1);
2944 btrfs_mark_buffer_dirty(trans, lower);
2945
2946 return 0;
2947 }
2948
2949 /*
2950 * split the node at the specified level in path in two.
2951 * The path is corrected to point to the appropriate node after the split
2952 *
2953 * Before splitting this tries to make some room in the node by pushing
2954 * left and right, if either one works, it returns right away.
2955 *
2956 * returns 0 on success and < 0 on failure
2957 */
split_node(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)2958 static noinline int split_node(struct btrfs_trans_handle *trans,
2959 struct btrfs_root *root,
2960 struct btrfs_path *path, int level)
2961 {
2962 struct btrfs_fs_info *fs_info = root->fs_info;
2963 struct extent_buffer *c;
2964 struct extent_buffer *split;
2965 struct btrfs_disk_key disk_key;
2966 int mid;
2967 int ret;
2968 u32 c_nritems;
2969
2970 c = path->nodes[level];
2971 WARN_ON(btrfs_header_generation(c) != trans->transid);
2972 if (c == root->node) {
2973 /*
2974 * trying to split the root, lets make a new one
2975 *
2976 * tree mod log: We don't log_removal old root in
2977 * insert_new_root, because that root buffer will be kept as a
2978 * normal node. We are going to log removal of half of the
2979 * elements below with btrfs_tree_mod_log_eb_copy(). We're
2980 * holding a tree lock on the buffer, which is why we cannot
2981 * race with other tree_mod_log users.
2982 */
2983 ret = insert_new_root(trans, root, path, level + 1);
2984 if (ret)
2985 return ret;
2986 } else {
2987 ret = push_nodes_for_insert(trans, root, path, level);
2988 c = path->nodes[level];
2989 if (!ret && btrfs_header_nritems(c) <
2990 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
2991 return 0;
2992 if (ret < 0)
2993 return ret;
2994 }
2995
2996 c_nritems = btrfs_header_nritems(c);
2997 mid = (c_nritems + 1) / 2;
2998 btrfs_node_key(c, &disk_key, mid);
2999
3000 split = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root),
3001 &disk_key, level, c->start, 0,
3002 0, BTRFS_NESTING_SPLIT);
3003 if (IS_ERR(split))
3004 return PTR_ERR(split);
3005
3006 root_add_used_bytes(root);
3007 ASSERT(btrfs_header_level(c) == level);
3008
3009 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
3010 if (ret) {
3011 btrfs_tree_unlock(split);
3012 free_extent_buffer(split);
3013 btrfs_abort_transaction(trans, ret);
3014 return ret;
3015 }
3016 copy_extent_buffer(split, c,
3017 btrfs_node_key_ptr_offset(split, 0),
3018 btrfs_node_key_ptr_offset(c, mid),
3019 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3020 btrfs_set_header_nritems(split, c_nritems - mid);
3021 btrfs_set_header_nritems(c, mid);
3022
3023 btrfs_mark_buffer_dirty(trans, c);
3024 btrfs_mark_buffer_dirty(trans, split);
3025
3026 ret = insert_ptr(trans, path, &disk_key, split->start,
3027 path->slots[level + 1] + 1, level + 1);
3028 if (ret < 0) {
3029 btrfs_tree_unlock(split);
3030 free_extent_buffer(split);
3031 return ret;
3032 }
3033
3034 if (path->slots[level] >= mid) {
3035 path->slots[level] -= mid;
3036 btrfs_tree_unlock(c);
3037 free_extent_buffer(c);
3038 path->nodes[level] = split;
3039 path->slots[level + 1] += 1;
3040 } else {
3041 btrfs_tree_unlock(split);
3042 free_extent_buffer(split);
3043 }
3044 return 0;
3045 }
3046
3047 /*
3048 * how many bytes are required to store the items in a leaf. start
3049 * and nr indicate which items in the leaf to check. This totals up the
3050 * space used both by the item structs and the item data
3051 */
leaf_space_used(const struct extent_buffer * l,int start,int nr)3052 static int leaf_space_used(const struct extent_buffer *l, int start, int nr)
3053 {
3054 int data_len;
3055 int nritems = btrfs_header_nritems(l);
3056 int end = min(nritems, start + nr) - 1;
3057
3058 if (!nr)
3059 return 0;
3060 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start);
3061 data_len = data_len - btrfs_item_offset(l, end);
3062 data_len += sizeof(struct btrfs_item) * nr;
3063 WARN_ON(data_len < 0);
3064 return data_len;
3065 }
3066
3067 /*
3068 * The space between the end of the leaf items and
3069 * the start of the leaf data. IOW, how much room
3070 * the leaf has left for both items and data
3071 */
btrfs_leaf_free_space(const struct extent_buffer * leaf)3072 int btrfs_leaf_free_space(const struct extent_buffer *leaf)
3073 {
3074 struct btrfs_fs_info *fs_info = leaf->fs_info;
3075 int nritems = btrfs_header_nritems(leaf);
3076 int ret;
3077
3078 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3079 if (ret < 0) {
3080 btrfs_crit(fs_info,
3081 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3082 ret,
3083 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3084 leaf_space_used(leaf, 0, nritems), nritems);
3085 }
3086 return ret;
3087 }
3088
3089 /*
3090 * min slot controls the lowest index we're willing to push to the
3091 * right. We'll push up to and including min_slot, but no lower
3092 */
__push_leaf_right(struct btrfs_trans_handle * trans,struct btrfs_path * path,int data_size,int empty,struct extent_buffer * right,int free_space,u32 left_nritems,u32 min_slot)3093 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3094 struct btrfs_path *path,
3095 int data_size, int empty,
3096 struct extent_buffer *right,
3097 int free_space, u32 left_nritems,
3098 u32 min_slot)
3099 {
3100 struct btrfs_fs_info *fs_info = right->fs_info;
3101 struct extent_buffer *left = path->nodes[0];
3102 struct extent_buffer *upper = path->nodes[1];
3103 struct btrfs_map_token token;
3104 struct btrfs_disk_key disk_key;
3105 int slot;
3106 u32 i;
3107 int push_space = 0;
3108 int push_items = 0;
3109 u32 nr;
3110 u32 right_nritems;
3111 u32 data_end;
3112 u32 this_item_size;
3113
3114 if (empty)
3115 nr = 0;
3116 else
3117 nr = max_t(u32, 1, min_slot);
3118
3119 if (path->slots[0] >= left_nritems)
3120 push_space += data_size;
3121
3122 slot = path->slots[1];
3123 i = left_nritems - 1;
3124 while (i >= nr) {
3125 if (!empty && push_items > 0) {
3126 if (path->slots[0] > i)
3127 break;
3128 if (path->slots[0] == i) {
3129 int space = btrfs_leaf_free_space(left);
3130
3131 if (space + push_space * 2 > free_space)
3132 break;
3133 }
3134 }
3135
3136 if (path->slots[0] == i)
3137 push_space += data_size;
3138
3139 this_item_size = btrfs_item_size(left, i);
3140 if (this_item_size + sizeof(struct btrfs_item) +
3141 push_space > free_space)
3142 break;
3143
3144 push_items++;
3145 push_space += this_item_size + sizeof(struct btrfs_item);
3146 if (i == 0)
3147 break;
3148 i--;
3149 }
3150
3151 if (push_items == 0)
3152 goto out_unlock;
3153
3154 WARN_ON(!empty && push_items == left_nritems);
3155
3156 /* push left to right */
3157 right_nritems = btrfs_header_nritems(right);
3158
3159 push_space = btrfs_item_data_end(left, left_nritems - push_items);
3160 push_space -= leaf_data_end(left);
3161
3162 /* make room in the right data area */
3163 data_end = leaf_data_end(right);
3164 memmove_leaf_data(right, data_end - push_space, data_end,
3165 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3166
3167 /* copy from the left data area */
3168 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3169 leaf_data_end(left), push_space);
3170
3171 memmove_leaf_items(right, push_items, 0, right_nritems);
3172
3173 /* copy the items from left to right */
3174 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items);
3175
3176 /* update the item pointers */
3177 btrfs_init_map_token(&token, right);
3178 right_nritems += push_items;
3179 btrfs_set_header_nritems(right, right_nritems);
3180 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3181 for (i = 0; i < right_nritems; i++) {
3182 push_space -= btrfs_token_item_size(&token, i);
3183 btrfs_set_token_item_offset(&token, i, push_space);
3184 }
3185
3186 left_nritems -= push_items;
3187 btrfs_set_header_nritems(left, left_nritems);
3188
3189 if (left_nritems)
3190 btrfs_mark_buffer_dirty(trans, left);
3191 else
3192 btrfs_clear_buffer_dirty(trans, left);
3193
3194 btrfs_mark_buffer_dirty(trans, right);
3195
3196 btrfs_item_key(right, &disk_key, 0);
3197 btrfs_set_node_key(upper, &disk_key, slot + 1);
3198 btrfs_mark_buffer_dirty(trans, upper);
3199
3200 /* then fixup the leaf pointer in the path */
3201 if (path->slots[0] >= left_nritems) {
3202 path->slots[0] -= left_nritems;
3203 if (btrfs_header_nritems(path->nodes[0]) == 0)
3204 btrfs_clear_buffer_dirty(trans, path->nodes[0]);
3205 btrfs_tree_unlock(path->nodes[0]);
3206 free_extent_buffer(path->nodes[0]);
3207 path->nodes[0] = right;
3208 path->slots[1] += 1;
3209 } else {
3210 btrfs_tree_unlock(right);
3211 free_extent_buffer(right);
3212 }
3213 return 0;
3214
3215 out_unlock:
3216 btrfs_tree_unlock(right);
3217 free_extent_buffer(right);
3218 return 1;
3219 }
3220
3221 /*
3222 * push some data in the path leaf to the right, trying to free up at
3223 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3224 *
3225 * returns 1 if the push failed because the other node didn't have enough
3226 * room, 0 if everything worked out and < 0 if there were major errors.
3227 *
3228 * this will push starting from min_slot to the end of the leaf. It won't
3229 * push any slot lower than min_slot
3230 */
push_leaf_right(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int min_data_size,int data_size,int empty,u32 min_slot)3231 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3232 *root, struct btrfs_path *path,
3233 int min_data_size, int data_size,
3234 int empty, u32 min_slot)
3235 {
3236 struct extent_buffer *left = path->nodes[0];
3237 struct extent_buffer *right;
3238 struct extent_buffer *upper;
3239 int slot;
3240 int free_space;
3241 u32 left_nritems;
3242 int ret;
3243
3244 if (!path->nodes[1])
3245 return 1;
3246
3247 slot = path->slots[1];
3248 upper = path->nodes[1];
3249 if (slot >= btrfs_header_nritems(upper) - 1)
3250 return 1;
3251
3252 btrfs_assert_tree_write_locked(path->nodes[1]);
3253
3254 right = btrfs_read_node_slot(upper, slot + 1);
3255 if (IS_ERR(right))
3256 return PTR_ERR(right);
3257
3258 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT);
3259
3260 free_space = btrfs_leaf_free_space(right);
3261 if (free_space < data_size)
3262 goto out_unlock;
3263
3264 ret = btrfs_cow_block(trans, root, right, upper,
3265 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
3266 if (ret)
3267 goto out_unlock;
3268
3269 left_nritems = btrfs_header_nritems(left);
3270 if (left_nritems == 0)
3271 goto out_unlock;
3272
3273 if (check_sibling_keys(left, right)) {
3274 ret = -EUCLEAN;
3275 btrfs_abort_transaction(trans, ret);
3276 btrfs_tree_unlock(right);
3277 free_extent_buffer(right);
3278 return ret;
3279 }
3280 if (path->slots[0] == left_nritems && !empty) {
3281 /* Key greater than all keys in the leaf, right neighbor has
3282 * enough room for it and we're not emptying our leaf to delete
3283 * it, therefore use right neighbor to insert the new item and
3284 * no need to touch/dirty our left leaf. */
3285 btrfs_tree_unlock(left);
3286 free_extent_buffer(left);
3287 path->nodes[0] = right;
3288 path->slots[0] = 0;
3289 path->slots[1]++;
3290 return 0;
3291 }
3292
3293 return __push_leaf_right(trans, path, min_data_size, empty, right,
3294 free_space, left_nritems, min_slot);
3295 out_unlock:
3296 btrfs_tree_unlock(right);
3297 free_extent_buffer(right);
3298 return 1;
3299 }
3300
3301 /*
3302 * push some data in the path leaf to the left, trying to free up at
3303 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3304 *
3305 * max_slot can put a limit on how far into the leaf we'll push items. The
3306 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3307 * items
3308 */
__push_leaf_left(struct btrfs_trans_handle * trans,struct btrfs_path * path,int data_size,int empty,struct extent_buffer * left,int free_space,u32 right_nritems,u32 max_slot)3309 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3310 struct btrfs_path *path, int data_size,
3311 int empty, struct extent_buffer *left,
3312 int free_space, u32 right_nritems,
3313 u32 max_slot)
3314 {
3315 struct btrfs_fs_info *fs_info = left->fs_info;
3316 struct btrfs_disk_key disk_key;
3317 struct extent_buffer *right = path->nodes[0];
3318 int i;
3319 int push_space = 0;
3320 int push_items = 0;
3321 u32 old_left_nritems;
3322 u32 nr;
3323 int ret = 0;
3324 u32 this_item_size;
3325 u32 old_left_item_size;
3326 struct btrfs_map_token token;
3327
3328 if (empty)
3329 nr = min(right_nritems, max_slot);
3330 else
3331 nr = min(right_nritems - 1, max_slot);
3332
3333 for (i = 0; i < nr; i++) {
3334 if (!empty && push_items > 0) {
3335 if (path->slots[0] < i)
3336 break;
3337 if (path->slots[0] == i) {
3338 int space = btrfs_leaf_free_space(right);
3339
3340 if (space + push_space * 2 > free_space)
3341 break;
3342 }
3343 }
3344
3345 if (path->slots[0] == i)
3346 push_space += data_size;
3347
3348 this_item_size = btrfs_item_size(right, i);
3349 if (this_item_size + sizeof(struct btrfs_item) + push_space >
3350 free_space)
3351 break;
3352
3353 push_items++;
3354 push_space += this_item_size + sizeof(struct btrfs_item);
3355 }
3356
3357 if (push_items == 0) {
3358 ret = 1;
3359 goto out;
3360 }
3361 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3362
3363 /* push data from right to left */
3364 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items);
3365
3366 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3367 btrfs_item_offset(right, push_items - 1);
3368
3369 copy_leaf_data(left, right, leaf_data_end(left) - push_space,
3370 btrfs_item_offset(right, push_items - 1), push_space);
3371 old_left_nritems = btrfs_header_nritems(left);
3372 BUG_ON(old_left_nritems <= 0);
3373
3374 btrfs_init_map_token(&token, left);
3375 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1);
3376 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3377 u32 ioff;
3378
3379 ioff = btrfs_token_item_offset(&token, i);
3380 btrfs_set_token_item_offset(&token, i,
3381 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
3382 }
3383 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3384
3385 /* fixup right node */
3386 if (push_items > right_nritems)
3387 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3388 right_nritems);
3389
3390 if (push_items < right_nritems) {
3391 push_space = btrfs_item_offset(right, push_items - 1) -
3392 leaf_data_end(right);
3393 memmove_leaf_data(right,
3394 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3395 leaf_data_end(right), push_space);
3396
3397 memmove_leaf_items(right, 0, push_items,
3398 btrfs_header_nritems(right) - push_items);
3399 }
3400
3401 btrfs_init_map_token(&token, right);
3402 right_nritems -= push_items;
3403 btrfs_set_header_nritems(right, right_nritems);
3404 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3405 for (i = 0; i < right_nritems; i++) {
3406 push_space = push_space - btrfs_token_item_size(&token, i);
3407 btrfs_set_token_item_offset(&token, i, push_space);
3408 }
3409
3410 btrfs_mark_buffer_dirty(trans, left);
3411 if (right_nritems)
3412 btrfs_mark_buffer_dirty(trans, right);
3413 else
3414 btrfs_clear_buffer_dirty(trans, right);
3415
3416 btrfs_item_key(right, &disk_key, 0);
3417 fixup_low_keys(trans, path, &disk_key, 1);
3418
3419 /* then fixup the leaf pointer in the path */
3420 if (path->slots[0] < push_items) {
3421 path->slots[0] += old_left_nritems;
3422 btrfs_tree_unlock(path->nodes[0]);
3423 free_extent_buffer(path->nodes[0]);
3424 path->nodes[0] = left;
3425 path->slots[1] -= 1;
3426 } else {
3427 btrfs_tree_unlock(left);
3428 free_extent_buffer(left);
3429 path->slots[0] -= push_items;
3430 }
3431 BUG_ON(path->slots[0] < 0);
3432 return ret;
3433 out:
3434 btrfs_tree_unlock(left);
3435 free_extent_buffer(left);
3436 return ret;
3437 }
3438
3439 /*
3440 * push some data in the path leaf to the left, trying to free up at
3441 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3442 *
3443 * max_slot can put a limit on how far into the leaf we'll push items. The
3444 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3445 * items
3446 */
push_leaf_left(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int min_data_size,int data_size,int empty,u32 max_slot)3447 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3448 *root, struct btrfs_path *path, int min_data_size,
3449 int data_size, int empty, u32 max_slot)
3450 {
3451 struct extent_buffer *right = path->nodes[0];
3452 struct extent_buffer *left;
3453 int slot;
3454 int free_space;
3455 u32 right_nritems;
3456 int ret = 0;
3457
3458 slot = path->slots[1];
3459 if (slot == 0)
3460 return 1;
3461 if (!path->nodes[1])
3462 return 1;
3463
3464 right_nritems = btrfs_header_nritems(right);
3465 if (right_nritems == 0)
3466 return 1;
3467
3468 btrfs_assert_tree_write_locked(path->nodes[1]);
3469
3470 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3471 if (IS_ERR(left))
3472 return PTR_ERR(left);
3473
3474 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT);
3475
3476 free_space = btrfs_leaf_free_space(left);
3477 if (free_space < data_size) {
3478 ret = 1;
3479 goto out;
3480 }
3481
3482 ret = btrfs_cow_block(trans, root, left,
3483 path->nodes[1], slot - 1, &left,
3484 BTRFS_NESTING_LEFT_COW);
3485 if (ret) {
3486 /* we hit -ENOSPC, but it isn't fatal here */
3487 if (ret == -ENOSPC)
3488 ret = 1;
3489 goto out;
3490 }
3491
3492 if (check_sibling_keys(left, right)) {
3493 ret = -EUCLEAN;
3494 btrfs_abort_transaction(trans, ret);
3495 goto out;
3496 }
3497 return __push_leaf_left(trans, path, min_data_size, empty, left,
3498 free_space, right_nritems, max_slot);
3499 out:
3500 btrfs_tree_unlock(left);
3501 free_extent_buffer(left);
3502 return ret;
3503 }
3504
3505 /*
3506 * split the path's leaf in two, making sure there is at least data_size
3507 * available for the resulting leaf level of the path.
3508 */
copy_for_split(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct extent_buffer * l,struct extent_buffer * right,int slot,int mid,int nritems)3509 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
3510 struct btrfs_path *path,
3511 struct extent_buffer *l,
3512 struct extent_buffer *right,
3513 int slot, int mid, int nritems)
3514 {
3515 struct btrfs_fs_info *fs_info = trans->fs_info;
3516 int data_copy_size;
3517 int rt_data_off;
3518 int i;
3519 int ret;
3520 struct btrfs_disk_key disk_key;
3521 struct btrfs_map_token token;
3522
3523 nritems = nritems - mid;
3524 btrfs_set_header_nritems(right, nritems);
3525 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l);
3526
3527 copy_leaf_items(right, l, 0, mid, nritems);
3528
3529 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size,
3530 leaf_data_end(l), data_copy_size);
3531
3532 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid);
3533
3534 btrfs_init_map_token(&token, right);
3535 for (i = 0; i < nritems; i++) {
3536 u32 ioff;
3537
3538 ioff = btrfs_token_item_offset(&token, i);
3539 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off);
3540 }
3541
3542 btrfs_set_header_nritems(l, mid);
3543 btrfs_item_key(right, &disk_key, 0);
3544 ret = insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3545 if (ret < 0)
3546 return ret;
3547
3548 btrfs_mark_buffer_dirty(trans, right);
3549 btrfs_mark_buffer_dirty(trans, l);
3550 BUG_ON(path->slots[0] != slot);
3551
3552 if (mid <= slot) {
3553 btrfs_tree_unlock(path->nodes[0]);
3554 free_extent_buffer(path->nodes[0]);
3555 path->nodes[0] = right;
3556 path->slots[0] -= mid;
3557 path->slots[1] += 1;
3558 } else {
3559 btrfs_tree_unlock(right);
3560 free_extent_buffer(right);
3561 }
3562
3563 BUG_ON(path->slots[0] < 0);
3564
3565 return 0;
3566 }
3567
3568 /*
3569 * double splits happen when we need to insert a big item in the middle
3570 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3571 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3572 * A B C
3573 *
3574 * We avoid this by trying to push the items on either side of our target
3575 * into the adjacent leaves. If all goes well we can avoid the double split
3576 * completely.
3577 */
push_for_double_split(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int data_size)3578 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3579 struct btrfs_root *root,
3580 struct btrfs_path *path,
3581 int data_size)
3582 {
3583 int ret;
3584 int progress = 0;
3585 int slot;
3586 u32 nritems;
3587 int space_needed = data_size;
3588
3589 slot = path->slots[0];
3590 if (slot < btrfs_header_nritems(path->nodes[0]))
3591 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3592
3593 /*
3594 * try to push all the items after our slot into the
3595 * right leaf
3596 */
3597 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3598 if (ret < 0)
3599 return ret;
3600
3601 if (ret == 0)
3602 progress++;
3603
3604 nritems = btrfs_header_nritems(path->nodes[0]);
3605 /*
3606 * our goal is to get our slot at the start or end of a leaf. If
3607 * we've done so we're done
3608 */
3609 if (path->slots[0] == 0 || path->slots[0] == nritems)
3610 return 0;
3611
3612 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3613 return 0;
3614
3615 /* try to push all the items before our slot into the next leaf */
3616 slot = path->slots[0];
3617 space_needed = data_size;
3618 if (slot > 0)
3619 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3620 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3621 if (ret < 0)
3622 return ret;
3623
3624 if (ret == 0)
3625 progress++;
3626
3627 if (progress)
3628 return 0;
3629 return 1;
3630 }
3631
3632 /*
3633 * split the path's leaf in two, making sure there is at least data_size
3634 * available for the resulting leaf level of the path.
3635 *
3636 * returns 0 if all went well and < 0 on failure.
3637 */
split_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * ins_key,struct btrfs_path * path,int data_size,int extend)3638 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3639 struct btrfs_root *root,
3640 const struct btrfs_key *ins_key,
3641 struct btrfs_path *path, int data_size,
3642 int extend)
3643 {
3644 struct btrfs_disk_key disk_key;
3645 struct extent_buffer *l;
3646 u32 nritems;
3647 int mid;
3648 int slot;
3649 struct extent_buffer *right;
3650 struct btrfs_fs_info *fs_info = root->fs_info;
3651 int ret = 0;
3652 int wret;
3653 int split;
3654 int num_doubles = 0;
3655 int tried_avoid_double = 0;
3656
3657 l = path->nodes[0];
3658 slot = path->slots[0];
3659 if (extend && data_size + btrfs_item_size(l, slot) +
3660 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3661 return -EOVERFLOW;
3662
3663 /* first try to make some room by pushing left and right */
3664 if (data_size && path->nodes[1]) {
3665 int space_needed = data_size;
3666
3667 if (slot < btrfs_header_nritems(l))
3668 space_needed -= btrfs_leaf_free_space(l);
3669
3670 wret = push_leaf_right(trans, root, path, space_needed,
3671 space_needed, 0, 0);
3672 if (wret < 0)
3673 return wret;
3674 if (wret) {
3675 space_needed = data_size;
3676 if (slot > 0)
3677 space_needed -= btrfs_leaf_free_space(l);
3678 wret = push_leaf_left(trans, root, path, space_needed,
3679 space_needed, 0, (u32)-1);
3680 if (wret < 0)
3681 return wret;
3682 }
3683 l = path->nodes[0];
3684
3685 /* did the pushes work? */
3686 if (btrfs_leaf_free_space(l) >= data_size)
3687 return 0;
3688 }
3689
3690 if (!path->nodes[1]) {
3691 ret = insert_new_root(trans, root, path, 1);
3692 if (ret)
3693 return ret;
3694 }
3695 again:
3696 split = 1;
3697 l = path->nodes[0];
3698 slot = path->slots[0];
3699 nritems = btrfs_header_nritems(l);
3700 mid = (nritems + 1) / 2;
3701
3702 if (mid <= slot) {
3703 if (nritems == 1 ||
3704 leaf_space_used(l, mid, nritems - mid) + data_size >
3705 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3706 if (slot >= nritems) {
3707 split = 0;
3708 } else {
3709 mid = slot;
3710 if (mid != nritems &&
3711 leaf_space_used(l, mid, nritems - mid) +
3712 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3713 if (data_size && !tried_avoid_double)
3714 goto push_for_double;
3715 split = 2;
3716 }
3717 }
3718 }
3719 } else {
3720 if (leaf_space_used(l, 0, mid) + data_size >
3721 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3722 if (!extend && data_size && slot == 0) {
3723 split = 0;
3724 } else if ((extend || !data_size) && slot == 0) {
3725 mid = 1;
3726 } else {
3727 mid = slot;
3728 if (mid != nritems &&
3729 leaf_space_used(l, mid, nritems - mid) +
3730 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3731 if (data_size && !tried_avoid_double)
3732 goto push_for_double;
3733 split = 2;
3734 }
3735 }
3736 }
3737 }
3738
3739 if (split == 0)
3740 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3741 else
3742 btrfs_item_key(l, &disk_key, mid);
3743
3744 /*
3745 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3746 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3747 * subclasses, which is 8 at the time of this patch, and we've maxed it
3748 * out. In the future we could add a
3749 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3750 * use BTRFS_NESTING_NEW_ROOT.
3751 */
3752 right = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root),
3753 &disk_key, 0, l->start, 0, 0,
3754 num_doubles ? BTRFS_NESTING_NEW_ROOT :
3755 BTRFS_NESTING_SPLIT);
3756 if (IS_ERR(right))
3757 return PTR_ERR(right);
3758
3759 root_add_used_bytes(root);
3760
3761 if (split == 0) {
3762 if (mid <= slot) {
3763 btrfs_set_header_nritems(right, 0);
3764 ret = insert_ptr(trans, path, &disk_key,
3765 right->start, path->slots[1] + 1, 1);
3766 if (ret < 0) {
3767 btrfs_tree_unlock(right);
3768 free_extent_buffer(right);
3769 return ret;
3770 }
3771 btrfs_tree_unlock(path->nodes[0]);
3772 free_extent_buffer(path->nodes[0]);
3773 path->nodes[0] = right;
3774 path->slots[0] = 0;
3775 path->slots[1] += 1;
3776 } else {
3777 btrfs_set_header_nritems(right, 0);
3778 ret = insert_ptr(trans, path, &disk_key,
3779 right->start, path->slots[1], 1);
3780 if (ret < 0) {
3781 btrfs_tree_unlock(right);
3782 free_extent_buffer(right);
3783 return ret;
3784 }
3785 btrfs_tree_unlock(path->nodes[0]);
3786 free_extent_buffer(path->nodes[0]);
3787 path->nodes[0] = right;
3788 path->slots[0] = 0;
3789 if (path->slots[1] == 0)
3790 fixup_low_keys(trans, path, &disk_key, 1);
3791 }
3792 /*
3793 * We create a new leaf 'right' for the required ins_len and
3794 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3795 * the content of ins_len to 'right'.
3796 */
3797 return ret;
3798 }
3799
3800 ret = copy_for_split(trans, path, l, right, slot, mid, nritems);
3801 if (ret < 0) {
3802 btrfs_tree_unlock(right);
3803 free_extent_buffer(right);
3804 return ret;
3805 }
3806
3807 if (split == 2) {
3808 BUG_ON(num_doubles != 0);
3809 num_doubles++;
3810 goto again;
3811 }
3812
3813 return 0;
3814
3815 push_for_double:
3816 push_for_double_split(trans, root, path, data_size);
3817 tried_avoid_double = 1;
3818 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3819 return 0;
3820 goto again;
3821 }
3822
setup_leaf_for_split(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int ins_len)3823 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3824 struct btrfs_root *root,
3825 struct btrfs_path *path, int ins_len)
3826 {
3827 struct btrfs_key key;
3828 struct extent_buffer *leaf;
3829 struct btrfs_file_extent_item *fi;
3830 u64 extent_len = 0;
3831 u32 item_size;
3832 int ret;
3833
3834 leaf = path->nodes[0];
3835 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3836
3837 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3838 key.type != BTRFS_RAID_STRIPE_KEY &&
3839 key.type != BTRFS_EXTENT_CSUM_KEY);
3840
3841 if (btrfs_leaf_free_space(leaf) >= ins_len)
3842 return 0;
3843
3844 item_size = btrfs_item_size(leaf, path->slots[0]);
3845 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3846 fi = btrfs_item_ptr(leaf, path->slots[0],
3847 struct btrfs_file_extent_item);
3848 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3849 }
3850 btrfs_release_path(path);
3851
3852 path->keep_locks = 1;
3853 path->search_for_split = 1;
3854 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3855 path->search_for_split = 0;
3856 if (ret > 0)
3857 ret = -EAGAIN;
3858 if (ret < 0)
3859 goto err;
3860
3861 ret = -EAGAIN;
3862 leaf = path->nodes[0];
3863 /* if our item isn't there, return now */
3864 if (item_size != btrfs_item_size(leaf, path->slots[0]))
3865 goto err;
3866
3867 /* the leaf has changed, it now has room. return now */
3868 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
3869 goto err;
3870
3871 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3872 fi = btrfs_item_ptr(leaf, path->slots[0],
3873 struct btrfs_file_extent_item);
3874 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3875 goto err;
3876 }
3877
3878 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3879 if (ret)
3880 goto err;
3881
3882 path->keep_locks = 0;
3883 btrfs_unlock_up_safe(path, 1);
3884 return 0;
3885 err:
3886 path->keep_locks = 0;
3887 return ret;
3888 }
3889
split_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,const struct btrfs_key * new_key,unsigned long split_offset)3890 static noinline int split_item(struct btrfs_trans_handle *trans,
3891 struct btrfs_path *path,
3892 const struct btrfs_key *new_key,
3893 unsigned long split_offset)
3894 {
3895 struct extent_buffer *leaf;
3896 int orig_slot, slot;
3897 char *buf;
3898 u32 nritems;
3899 u32 item_size;
3900 u32 orig_offset;
3901 struct btrfs_disk_key disk_key;
3902
3903 leaf = path->nodes[0];
3904 /*
3905 * Shouldn't happen because the caller must have previously called
3906 * setup_leaf_for_split() to make room for the new item in the leaf.
3907 */
3908 if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)))
3909 return -ENOSPC;
3910
3911 orig_slot = path->slots[0];
3912 orig_offset = btrfs_item_offset(leaf, path->slots[0]);
3913 item_size = btrfs_item_size(leaf, path->slots[0]);
3914
3915 buf = kmalloc(item_size, GFP_NOFS);
3916 if (!buf)
3917 return -ENOMEM;
3918
3919 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3920 path->slots[0]), item_size);
3921
3922 slot = path->slots[0] + 1;
3923 nritems = btrfs_header_nritems(leaf);
3924 if (slot != nritems) {
3925 /* shift the items */
3926 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot);
3927 }
3928
3929 btrfs_cpu_key_to_disk(&disk_key, new_key);
3930 btrfs_set_item_key(leaf, &disk_key, slot);
3931
3932 btrfs_set_item_offset(leaf, slot, orig_offset);
3933 btrfs_set_item_size(leaf, slot, item_size - split_offset);
3934
3935 btrfs_set_item_offset(leaf, orig_slot,
3936 orig_offset + item_size - split_offset);
3937 btrfs_set_item_size(leaf, orig_slot, split_offset);
3938
3939 btrfs_set_header_nritems(leaf, nritems + 1);
3940
3941 /* write the data for the start of the original item */
3942 write_extent_buffer(leaf, buf,
3943 btrfs_item_ptr_offset(leaf, path->slots[0]),
3944 split_offset);
3945
3946 /* write the data for the new item */
3947 write_extent_buffer(leaf, buf + split_offset,
3948 btrfs_item_ptr_offset(leaf, slot),
3949 item_size - split_offset);
3950 btrfs_mark_buffer_dirty(trans, leaf);
3951
3952 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
3953 kfree(buf);
3954 return 0;
3955 }
3956
3957 /*
3958 * This function splits a single item into two items,
3959 * giving 'new_key' to the new item and splitting the
3960 * old one at split_offset (from the start of the item).
3961 *
3962 * The path may be released by this operation. After
3963 * the split, the path is pointing to the old item. The
3964 * new item is going to be in the same node as the old one.
3965 *
3966 * Note, the item being split must be smaller enough to live alone on
3967 * a tree block with room for one extra struct btrfs_item
3968 *
3969 * This allows us to split the item in place, keeping a lock on the
3970 * leaf the entire time.
3971 */
btrfs_split_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * new_key,unsigned long split_offset)3972 int btrfs_split_item(struct btrfs_trans_handle *trans,
3973 struct btrfs_root *root,
3974 struct btrfs_path *path,
3975 const struct btrfs_key *new_key,
3976 unsigned long split_offset)
3977 {
3978 int ret;
3979 ret = setup_leaf_for_split(trans, root, path,
3980 sizeof(struct btrfs_item));
3981 if (ret)
3982 return ret;
3983
3984 ret = split_item(trans, path, new_key, split_offset);
3985 return ret;
3986 }
3987
3988 /*
3989 * make the item pointed to by the path smaller. new_size indicates
3990 * how small to make it, and from_end tells us if we just chop bytes
3991 * off the end of the item or if we shift the item to chop bytes off
3992 * the front.
3993 */
btrfs_truncate_item(struct btrfs_trans_handle * trans,const struct btrfs_path * path,u32 new_size,int from_end)3994 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
3995 const struct btrfs_path *path, u32 new_size, int from_end)
3996 {
3997 int slot;
3998 struct extent_buffer *leaf;
3999 u32 nritems;
4000 unsigned int data_end;
4001 unsigned int old_data_start;
4002 unsigned int old_size;
4003 unsigned int size_diff;
4004 int i;
4005 struct btrfs_map_token token;
4006
4007 leaf = path->nodes[0];
4008 slot = path->slots[0];
4009
4010 old_size = btrfs_item_size(leaf, slot);
4011 if (old_size == new_size)
4012 return;
4013
4014 nritems = btrfs_header_nritems(leaf);
4015 data_end = leaf_data_end(leaf);
4016
4017 old_data_start = btrfs_item_offset(leaf, slot);
4018
4019 size_diff = old_size - new_size;
4020
4021 BUG_ON(slot < 0);
4022 BUG_ON(slot >= nritems);
4023
4024 /*
4025 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4026 */
4027 /* first correct the data pointers */
4028 btrfs_init_map_token(&token, leaf);
4029 for (i = slot; i < nritems; i++) {
4030 u32 ioff;
4031
4032 ioff = btrfs_token_item_offset(&token, i);
4033 btrfs_set_token_item_offset(&token, i, ioff + size_diff);
4034 }
4035
4036 /* shift the data */
4037 if (from_end) {
4038 memmove_leaf_data(leaf, data_end + size_diff, data_end,
4039 old_data_start + new_size - data_end);
4040 } else {
4041 struct btrfs_disk_key disk_key;
4042 u64 offset;
4043
4044 btrfs_item_key(leaf, &disk_key, slot);
4045
4046 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4047 unsigned long ptr;
4048 struct btrfs_file_extent_item *fi;
4049
4050 fi = btrfs_item_ptr(leaf, slot,
4051 struct btrfs_file_extent_item);
4052 fi = (struct btrfs_file_extent_item *)(
4053 (unsigned long)fi - size_diff);
4054
4055 if (btrfs_file_extent_type(leaf, fi) ==
4056 BTRFS_FILE_EXTENT_INLINE) {
4057 ptr = btrfs_item_ptr_offset(leaf, slot);
4058 memmove_extent_buffer(leaf, ptr,
4059 (unsigned long)fi,
4060 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4061 }
4062 }
4063
4064 memmove_leaf_data(leaf, data_end + size_diff, data_end,
4065 old_data_start - data_end);
4066
4067 offset = btrfs_disk_key_offset(&disk_key);
4068 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4069 btrfs_set_item_key(leaf, &disk_key, slot);
4070 if (slot == 0)
4071 fixup_low_keys(trans, path, &disk_key, 1);
4072 }
4073
4074 btrfs_set_item_size(leaf, slot, new_size);
4075 btrfs_mark_buffer_dirty(trans, leaf);
4076
4077 if (btrfs_leaf_free_space(leaf) < 0) {
4078 btrfs_print_leaf(leaf);
4079 BUG();
4080 }
4081 }
4082
4083 /*
4084 * make the item pointed to by the path bigger, data_size is the added size.
4085 */
btrfs_extend_item(struct btrfs_trans_handle * trans,const struct btrfs_path * path,u32 data_size)4086 void btrfs_extend_item(struct btrfs_trans_handle *trans,
4087 const struct btrfs_path *path, u32 data_size)
4088 {
4089 int slot;
4090 struct extent_buffer *leaf;
4091 u32 nritems;
4092 unsigned int data_end;
4093 unsigned int old_data;
4094 unsigned int old_size;
4095 int i;
4096 struct btrfs_map_token token;
4097
4098 leaf = path->nodes[0];
4099
4100 nritems = btrfs_header_nritems(leaf);
4101 data_end = leaf_data_end(leaf);
4102
4103 if (btrfs_leaf_free_space(leaf) < data_size) {
4104 btrfs_print_leaf(leaf);
4105 BUG();
4106 }
4107 slot = path->slots[0];
4108 old_data = btrfs_item_data_end(leaf, slot);
4109
4110 BUG_ON(slot < 0);
4111 if (slot >= nritems) {
4112 btrfs_print_leaf(leaf);
4113 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4114 slot, nritems);
4115 BUG();
4116 }
4117
4118 /*
4119 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4120 */
4121 /* first correct the data pointers */
4122 btrfs_init_map_token(&token, leaf);
4123 for (i = slot; i < nritems; i++) {
4124 u32 ioff;
4125
4126 ioff = btrfs_token_item_offset(&token, i);
4127 btrfs_set_token_item_offset(&token, i, ioff - data_size);
4128 }
4129
4130 /* shift the data */
4131 memmove_leaf_data(leaf, data_end - data_size, data_end,
4132 old_data - data_end);
4133
4134 data_end = old_data;
4135 old_size = btrfs_item_size(leaf, slot);
4136 btrfs_set_item_size(leaf, slot, old_size + data_size);
4137 btrfs_mark_buffer_dirty(trans, leaf);
4138
4139 if (btrfs_leaf_free_space(leaf) < 0) {
4140 btrfs_print_leaf(leaf);
4141 BUG();
4142 }
4143 }
4144
4145 /*
4146 * Make space in the node before inserting one or more items.
4147 *
4148 * @trans: transaction handle
4149 * @root: root we are inserting items to
4150 * @path: points to the leaf/slot where we are going to insert new items
4151 * @batch: information about the batch of items to insert
4152 *
4153 * Main purpose is to save stack depth by doing the bulk of the work in a
4154 * function that doesn't call btrfs_search_slot
4155 */
setup_items_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_item_batch * batch)4156 static void setup_items_for_insert(struct btrfs_trans_handle *trans,
4157 struct btrfs_root *root, struct btrfs_path *path,
4158 const struct btrfs_item_batch *batch)
4159 {
4160 struct btrfs_fs_info *fs_info = root->fs_info;
4161 int i;
4162 u32 nritems;
4163 unsigned int data_end;
4164 struct btrfs_disk_key disk_key;
4165 struct extent_buffer *leaf;
4166 int slot;
4167 struct btrfs_map_token token;
4168 u32 total_size;
4169
4170 /*
4171 * Before anything else, update keys in the parent and other ancestors
4172 * if needed, then release the write locks on them, so that other tasks
4173 * can use them while we modify the leaf.
4174 */
4175 if (path->slots[0] == 0) {
4176 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
4177 fixup_low_keys(trans, path, &disk_key, 1);
4178 }
4179 btrfs_unlock_up_safe(path, 1);
4180
4181 leaf = path->nodes[0];
4182 slot = path->slots[0];
4183
4184 nritems = btrfs_header_nritems(leaf);
4185 data_end = leaf_data_end(leaf);
4186 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4187
4188 if (btrfs_leaf_free_space(leaf) < total_size) {
4189 btrfs_print_leaf(leaf);
4190 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4191 total_size, btrfs_leaf_free_space(leaf));
4192 BUG();
4193 }
4194
4195 btrfs_init_map_token(&token, leaf);
4196 if (slot != nritems) {
4197 unsigned int old_data = btrfs_item_data_end(leaf, slot);
4198
4199 if (old_data < data_end) {
4200 btrfs_print_leaf(leaf);
4201 btrfs_crit(fs_info,
4202 "item at slot %d with data offset %u beyond data end of leaf %u",
4203 slot, old_data, data_end);
4204 BUG();
4205 }
4206 /*
4207 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4208 */
4209 /* first correct the data pointers */
4210 for (i = slot; i < nritems; i++) {
4211 u32 ioff;
4212
4213 ioff = btrfs_token_item_offset(&token, i);
4214 btrfs_set_token_item_offset(&token, i,
4215 ioff - batch->total_data_size);
4216 }
4217 /* shift the items */
4218 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot);
4219
4220 /* shift the data */
4221 memmove_leaf_data(leaf, data_end - batch->total_data_size,
4222 data_end, old_data - data_end);
4223 data_end = old_data;
4224 }
4225
4226 /* setup the item for the new data */
4227 for (i = 0; i < batch->nr; i++) {
4228 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
4229 btrfs_set_item_key(leaf, &disk_key, slot + i);
4230 data_end -= batch->data_sizes[i];
4231 btrfs_set_token_item_offset(&token, slot + i, data_end);
4232 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]);
4233 }
4234
4235 btrfs_set_header_nritems(leaf, nritems + batch->nr);
4236 btrfs_mark_buffer_dirty(trans, leaf);
4237
4238 if (btrfs_leaf_free_space(leaf) < 0) {
4239 btrfs_print_leaf(leaf);
4240 BUG();
4241 }
4242 }
4243
4244 /*
4245 * Insert a new item into a leaf.
4246 *
4247 * @trans: Transaction handle.
4248 * @root: The root of the btree.
4249 * @path: A path pointing to the target leaf and slot.
4250 * @key: The key of the new item.
4251 * @data_size: The size of the data associated with the new key.
4252 */
btrfs_setup_item_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * key,u32 data_size)4253 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
4254 struct btrfs_root *root,
4255 struct btrfs_path *path,
4256 const struct btrfs_key *key,
4257 u32 data_size)
4258 {
4259 struct btrfs_item_batch batch;
4260
4261 batch.keys = key;
4262 batch.data_sizes = &data_size;
4263 batch.total_data_size = data_size;
4264 batch.nr = 1;
4265
4266 setup_items_for_insert(trans, root, path, &batch);
4267 }
4268
4269 /*
4270 * Given a key and some data, insert items into the tree.
4271 * This does all the path init required, making room in the tree if needed.
4272 *
4273 * Returns: 0 on success
4274 * -EEXIST if the first key already exists
4275 * < 0 on other errors
4276 */
btrfs_insert_empty_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_item_batch * batch)4277 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4278 struct btrfs_root *root,
4279 struct btrfs_path *path,
4280 const struct btrfs_item_batch *batch)
4281 {
4282 int ret = 0;
4283 int slot;
4284 u32 total_size;
4285
4286 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4287 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1);
4288 if (ret == 0)
4289 return -EEXIST;
4290 if (ret < 0)
4291 return ret;
4292
4293 slot = path->slots[0];
4294 BUG_ON(slot < 0);
4295
4296 setup_items_for_insert(trans, root, path, batch);
4297 return 0;
4298 }
4299
4300 /*
4301 * Given a key and some data, insert an item into the tree.
4302 * This does all the path init required, making room in the tree if needed.
4303 */
btrfs_insert_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * cpu_key,void * data,u32 data_size)4304 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4305 const struct btrfs_key *cpu_key, void *data,
4306 u32 data_size)
4307 {
4308 int ret = 0;
4309 struct btrfs_path *path;
4310 struct extent_buffer *leaf;
4311 unsigned long ptr;
4312
4313 path = btrfs_alloc_path();
4314 if (!path)
4315 return -ENOMEM;
4316 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4317 if (!ret) {
4318 leaf = path->nodes[0];
4319 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4320 write_extent_buffer(leaf, data, ptr, data_size);
4321 btrfs_mark_buffer_dirty(trans, leaf);
4322 }
4323 btrfs_free_path(path);
4324 return ret;
4325 }
4326
4327 /*
4328 * This function duplicates an item, giving 'new_key' to the new item.
4329 * It guarantees both items live in the same tree leaf and the new item is
4330 * contiguous with the original item.
4331 *
4332 * This allows us to split a file extent in place, keeping a lock on the leaf
4333 * the entire time.
4334 */
btrfs_duplicate_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * new_key)4335 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4336 struct btrfs_root *root,
4337 struct btrfs_path *path,
4338 const struct btrfs_key *new_key)
4339 {
4340 struct extent_buffer *leaf;
4341 int ret;
4342 u32 item_size;
4343
4344 leaf = path->nodes[0];
4345 item_size = btrfs_item_size(leaf, path->slots[0]);
4346 ret = setup_leaf_for_split(trans, root, path,
4347 item_size + sizeof(struct btrfs_item));
4348 if (ret)
4349 return ret;
4350
4351 path->slots[0]++;
4352 btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
4353 leaf = path->nodes[0];
4354 memcpy_extent_buffer(leaf,
4355 btrfs_item_ptr_offset(leaf, path->slots[0]),
4356 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4357 item_size);
4358 return 0;
4359 }
4360
4361 /*
4362 * delete the pointer from a given node.
4363 *
4364 * the tree should have been previously balanced so the deletion does not
4365 * empty a node.
4366 *
4367 * This is exported for use inside btrfs-progs, don't un-export it.
4368 */
btrfs_del_ptr(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level,int slot)4369 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4370 struct btrfs_path *path, int level, int slot)
4371 {
4372 struct extent_buffer *parent = path->nodes[level];
4373 u32 nritems;
4374 int ret;
4375
4376 nritems = btrfs_header_nritems(parent);
4377 if (slot != nritems - 1) {
4378 if (level) {
4379 ret = btrfs_tree_mod_log_insert_move(parent, slot,
4380 slot + 1, nritems - slot - 1);
4381 if (ret < 0) {
4382 btrfs_abort_transaction(trans, ret);
4383 return ret;
4384 }
4385 }
4386 memmove_extent_buffer(parent,
4387 btrfs_node_key_ptr_offset(parent, slot),
4388 btrfs_node_key_ptr_offset(parent, slot + 1),
4389 sizeof(struct btrfs_key_ptr) *
4390 (nritems - slot - 1));
4391 } else if (level) {
4392 ret = btrfs_tree_mod_log_insert_key(parent, slot,
4393 BTRFS_MOD_LOG_KEY_REMOVE);
4394 if (ret < 0) {
4395 btrfs_abort_transaction(trans, ret);
4396 return ret;
4397 }
4398 }
4399
4400 nritems--;
4401 btrfs_set_header_nritems(parent, nritems);
4402 if (nritems == 0 && parent == root->node) {
4403 BUG_ON(btrfs_header_level(root->node) != 1);
4404 /* just turn the root into a leaf and break */
4405 btrfs_set_header_level(root->node, 0);
4406 } else if (slot == 0) {
4407 struct btrfs_disk_key disk_key;
4408
4409 btrfs_node_key(parent, &disk_key, 0);
4410 fixup_low_keys(trans, path, &disk_key, level + 1);
4411 }
4412 btrfs_mark_buffer_dirty(trans, parent);
4413 return 0;
4414 }
4415
4416 /*
4417 * a helper function to delete the leaf pointed to by path->slots[1] and
4418 * path->nodes[1].
4419 *
4420 * This deletes the pointer in path->nodes[1] and frees the leaf
4421 * block extent. zero is returned if it all worked out, < 0 otherwise.
4422 *
4423 * The path must have already been setup for deleting the leaf, including
4424 * all the proper balancing. path->nodes[1] must be locked.
4425 */
btrfs_del_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * leaf)4426 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
4427 struct btrfs_root *root,
4428 struct btrfs_path *path,
4429 struct extent_buffer *leaf)
4430 {
4431 int ret;
4432
4433 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4434 ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]);
4435 if (ret < 0)
4436 return ret;
4437
4438 /*
4439 * btrfs_free_extent is expensive, we want to make sure we
4440 * aren't holding any locks when we call it
4441 */
4442 btrfs_unlock_up_safe(path, 0);
4443
4444 root_sub_used_bytes(root);
4445
4446 atomic_inc(&leaf->refs);
4447 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
4448 free_extent_buffer_stale(leaf);
4449 if (ret < 0)
4450 btrfs_abort_transaction(trans, ret);
4451
4452 return ret;
4453 }
4454 /*
4455 * delete the item at the leaf level in path. If that empties
4456 * the leaf, remove it from the tree
4457 */
btrfs_del_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int slot,int nr)4458 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4459 struct btrfs_path *path, int slot, int nr)
4460 {
4461 struct btrfs_fs_info *fs_info = root->fs_info;
4462 struct extent_buffer *leaf;
4463 int ret = 0;
4464 int wret;
4465 u32 nritems;
4466
4467 leaf = path->nodes[0];
4468 nritems = btrfs_header_nritems(leaf);
4469
4470 if (slot + nr != nritems) {
4471 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1);
4472 const int data_end = leaf_data_end(leaf);
4473 struct btrfs_map_token token;
4474 u32 dsize = 0;
4475 int i;
4476
4477 for (i = 0; i < nr; i++)
4478 dsize += btrfs_item_size(leaf, slot + i);
4479
4480 memmove_leaf_data(leaf, data_end + dsize, data_end,
4481 last_off - data_end);
4482
4483 btrfs_init_map_token(&token, leaf);
4484 for (i = slot + nr; i < nritems; i++) {
4485 u32 ioff;
4486
4487 ioff = btrfs_token_item_offset(&token, i);
4488 btrfs_set_token_item_offset(&token, i, ioff + dsize);
4489 }
4490
4491 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr);
4492 }
4493 btrfs_set_header_nritems(leaf, nritems - nr);
4494 nritems -= nr;
4495
4496 /* delete the leaf if we've emptied it */
4497 if (nritems == 0) {
4498 if (leaf == root->node) {
4499 btrfs_set_header_level(leaf, 0);
4500 } else {
4501 btrfs_clear_buffer_dirty(trans, leaf);
4502 ret = btrfs_del_leaf(trans, root, path, leaf);
4503 if (ret < 0)
4504 return ret;
4505 }
4506 } else {
4507 int used = leaf_space_used(leaf, 0, nritems);
4508 if (slot == 0) {
4509 struct btrfs_disk_key disk_key;
4510
4511 btrfs_item_key(leaf, &disk_key, 0);
4512 fixup_low_keys(trans, path, &disk_key, 1);
4513 }
4514
4515 /*
4516 * Try to delete the leaf if it is mostly empty. We do this by
4517 * trying to move all its items into its left and right neighbours.
4518 * If we can't move all the items, then we don't delete it - it's
4519 * not ideal, but future insertions might fill the leaf with more
4520 * items, or items from other leaves might be moved later into our
4521 * leaf due to deletions on those leaves.
4522 */
4523 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4524 u32 min_push_space;
4525
4526 /* push_leaf_left fixes the path.
4527 * make sure the path still points to our leaf
4528 * for possible call to btrfs_del_ptr below
4529 */
4530 slot = path->slots[1];
4531 atomic_inc(&leaf->refs);
4532 /*
4533 * We want to be able to at least push one item to the
4534 * left neighbour leaf, and that's the first item.
4535 */
4536 min_push_space = sizeof(struct btrfs_item) +
4537 btrfs_item_size(leaf, 0);
4538 wret = push_leaf_left(trans, root, path, 0,
4539 min_push_space, 1, (u32)-1);
4540 if (wret < 0 && wret != -ENOSPC)
4541 ret = wret;
4542
4543 if (path->nodes[0] == leaf &&
4544 btrfs_header_nritems(leaf)) {
4545 /*
4546 * If we were not able to push all items from our
4547 * leaf to its left neighbour, then attempt to
4548 * either push all the remaining items to the
4549 * right neighbour or none. There's no advantage
4550 * in pushing only some items, instead of all, as
4551 * it's pointless to end up with a leaf having
4552 * too few items while the neighbours can be full
4553 * or nearly full.
4554 */
4555 nritems = btrfs_header_nritems(leaf);
4556 min_push_space = leaf_space_used(leaf, 0, nritems);
4557 wret = push_leaf_right(trans, root, path, 0,
4558 min_push_space, 1, 0);
4559 if (wret < 0 && wret != -ENOSPC)
4560 ret = wret;
4561 }
4562
4563 if (btrfs_header_nritems(leaf) == 0) {
4564 path->slots[1] = slot;
4565 ret = btrfs_del_leaf(trans, root, path, leaf);
4566 if (ret < 0)
4567 return ret;
4568 free_extent_buffer(leaf);
4569 ret = 0;
4570 } else {
4571 /* if we're still in the path, make sure
4572 * we're dirty. Otherwise, one of the
4573 * push_leaf functions must have already
4574 * dirtied this buffer
4575 */
4576 if (path->nodes[0] == leaf)
4577 btrfs_mark_buffer_dirty(trans, leaf);
4578 free_extent_buffer(leaf);
4579 }
4580 } else {
4581 btrfs_mark_buffer_dirty(trans, leaf);
4582 }
4583 }
4584 return ret;
4585 }
4586
4587 /*
4588 * A helper function to walk down the tree starting at min_key, and looking
4589 * for nodes or leaves that are have a minimum transaction id.
4590 * This is used by the btree defrag code, and tree logging
4591 *
4592 * This does not cow, but it does stuff the starting key it finds back
4593 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4594 * key and get a writable path.
4595 *
4596 * This honors path->lowest_level to prevent descent past a given level
4597 * of the tree.
4598 *
4599 * min_trans indicates the oldest transaction that you are interested
4600 * in walking through. Any nodes or leaves older than min_trans are
4601 * skipped over (without reading them).
4602 *
4603 * returns zero if something useful was found, < 0 on error and 1 if there
4604 * was nothing in the tree that matched the search criteria.
4605 */
btrfs_search_forward(struct btrfs_root * root,struct btrfs_key * min_key,struct btrfs_path * path,u64 min_trans)4606 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4607 struct btrfs_path *path,
4608 u64 min_trans)
4609 {
4610 struct extent_buffer *cur;
4611 struct btrfs_key found_key;
4612 int slot;
4613 int sret;
4614 u32 nritems;
4615 int level;
4616 int ret = 1;
4617 int keep_locks = path->keep_locks;
4618
4619 ASSERT(!path->nowait);
4620 path->keep_locks = 1;
4621 again:
4622 cur = btrfs_read_lock_root_node(root);
4623 level = btrfs_header_level(cur);
4624 WARN_ON(path->nodes[level]);
4625 path->nodes[level] = cur;
4626 path->locks[level] = BTRFS_READ_LOCK;
4627
4628 if (btrfs_header_generation(cur) < min_trans) {
4629 ret = 1;
4630 goto out;
4631 }
4632 while (1) {
4633 nritems = btrfs_header_nritems(cur);
4634 level = btrfs_header_level(cur);
4635 sret = btrfs_bin_search(cur, 0, min_key, &slot);
4636 if (sret < 0) {
4637 ret = sret;
4638 goto out;
4639 }
4640
4641 /* at the lowest level, we're done, setup the path and exit */
4642 if (level == path->lowest_level) {
4643 if (slot >= nritems)
4644 goto find_next_key;
4645 ret = 0;
4646 path->slots[level] = slot;
4647 btrfs_item_key_to_cpu(cur, &found_key, slot);
4648 goto out;
4649 }
4650 if (sret && slot > 0)
4651 slot--;
4652 /*
4653 * check this node pointer against the min_trans parameters.
4654 * If it is too old, skip to the next one.
4655 */
4656 while (slot < nritems) {
4657 u64 gen;
4658
4659 gen = btrfs_node_ptr_generation(cur, slot);
4660 if (gen < min_trans) {
4661 slot++;
4662 continue;
4663 }
4664 break;
4665 }
4666 find_next_key:
4667 /*
4668 * we didn't find a candidate key in this node, walk forward
4669 * and find another one
4670 */
4671 if (slot >= nritems) {
4672 path->slots[level] = slot;
4673 sret = btrfs_find_next_key(root, path, min_key, level,
4674 min_trans);
4675 if (sret == 0) {
4676 btrfs_release_path(path);
4677 goto again;
4678 } else {
4679 goto out;
4680 }
4681 }
4682 /* save our key for returning back */
4683 btrfs_node_key_to_cpu(cur, &found_key, slot);
4684 path->slots[level] = slot;
4685 if (level == path->lowest_level) {
4686 ret = 0;
4687 goto out;
4688 }
4689 cur = btrfs_read_node_slot(cur, slot);
4690 if (IS_ERR(cur)) {
4691 ret = PTR_ERR(cur);
4692 goto out;
4693 }
4694
4695 btrfs_tree_read_lock(cur);
4696
4697 path->locks[level - 1] = BTRFS_READ_LOCK;
4698 path->nodes[level - 1] = cur;
4699 unlock_up(path, level, 1, 0, NULL);
4700 }
4701 out:
4702 path->keep_locks = keep_locks;
4703 if (ret == 0) {
4704 btrfs_unlock_up_safe(path, path->lowest_level + 1);
4705 memcpy(min_key, &found_key, sizeof(found_key));
4706 }
4707 return ret;
4708 }
4709
4710 /*
4711 * this is similar to btrfs_next_leaf, but does not try to preserve
4712 * and fixup the path. It looks for and returns the next key in the
4713 * tree based on the current path and the min_trans parameters.
4714 *
4715 * 0 is returned if another key is found, < 0 if there are any errors
4716 * and 1 is returned if there are no higher keys in the tree
4717 *
4718 * path->keep_locks should be set to 1 on the search made before
4719 * calling this function.
4720 */
btrfs_find_next_key(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,int level,u64 min_trans)4721 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4722 struct btrfs_key *key, int level, u64 min_trans)
4723 {
4724 int slot;
4725 struct extent_buffer *c;
4726
4727 WARN_ON(!path->keep_locks && !path->skip_locking);
4728 while (level < BTRFS_MAX_LEVEL) {
4729 if (!path->nodes[level])
4730 return 1;
4731
4732 slot = path->slots[level] + 1;
4733 c = path->nodes[level];
4734 next:
4735 if (slot >= btrfs_header_nritems(c)) {
4736 int ret;
4737 int orig_lowest;
4738 struct btrfs_key cur_key;
4739 if (level + 1 >= BTRFS_MAX_LEVEL ||
4740 !path->nodes[level + 1])
4741 return 1;
4742
4743 if (path->locks[level + 1] || path->skip_locking) {
4744 level++;
4745 continue;
4746 }
4747
4748 slot = btrfs_header_nritems(c) - 1;
4749 if (level == 0)
4750 btrfs_item_key_to_cpu(c, &cur_key, slot);
4751 else
4752 btrfs_node_key_to_cpu(c, &cur_key, slot);
4753
4754 orig_lowest = path->lowest_level;
4755 btrfs_release_path(path);
4756 path->lowest_level = level;
4757 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4758 0, 0);
4759 path->lowest_level = orig_lowest;
4760 if (ret < 0)
4761 return ret;
4762
4763 c = path->nodes[level];
4764 slot = path->slots[level];
4765 if (ret == 0)
4766 slot++;
4767 goto next;
4768 }
4769
4770 if (level == 0)
4771 btrfs_item_key_to_cpu(c, key, slot);
4772 else {
4773 u64 gen = btrfs_node_ptr_generation(c, slot);
4774
4775 if (gen < min_trans) {
4776 slot++;
4777 goto next;
4778 }
4779 btrfs_node_key_to_cpu(c, key, slot);
4780 }
4781 return 0;
4782 }
4783 return 1;
4784 }
4785
btrfs_next_old_leaf(struct btrfs_root * root,struct btrfs_path * path,u64 time_seq)4786 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4787 u64 time_seq)
4788 {
4789 int slot;
4790 int level;
4791 struct extent_buffer *c;
4792 struct extent_buffer *next;
4793 struct btrfs_fs_info *fs_info = root->fs_info;
4794 struct btrfs_key key;
4795 bool need_commit_sem = false;
4796 u32 nritems;
4797 int ret;
4798 int i;
4799
4800 /*
4801 * The nowait semantics are used only for write paths, where we don't
4802 * use the tree mod log and sequence numbers.
4803 */
4804 if (time_seq)
4805 ASSERT(!path->nowait);
4806
4807 nritems = btrfs_header_nritems(path->nodes[0]);
4808 if (nritems == 0)
4809 return 1;
4810
4811 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4812 again:
4813 level = 1;
4814 next = NULL;
4815 btrfs_release_path(path);
4816
4817 path->keep_locks = 1;
4818
4819 if (time_seq) {
4820 ret = btrfs_search_old_slot(root, &key, path, time_seq);
4821 } else {
4822 if (path->need_commit_sem) {
4823 path->need_commit_sem = 0;
4824 need_commit_sem = true;
4825 if (path->nowait) {
4826 if (!down_read_trylock(&fs_info->commit_root_sem)) {
4827 ret = -EAGAIN;
4828 goto done;
4829 }
4830 } else {
4831 down_read(&fs_info->commit_root_sem);
4832 }
4833 }
4834 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4835 }
4836 path->keep_locks = 0;
4837
4838 if (ret < 0)
4839 goto done;
4840
4841 nritems = btrfs_header_nritems(path->nodes[0]);
4842 /*
4843 * by releasing the path above we dropped all our locks. A balance
4844 * could have added more items next to the key that used to be
4845 * at the very end of the block. So, check again here and
4846 * advance the path if there are now more items available.
4847 */
4848 if (nritems > 0 && path->slots[0] < nritems - 1) {
4849 if (ret == 0)
4850 path->slots[0]++;
4851 ret = 0;
4852 goto done;
4853 }
4854 /*
4855 * So the above check misses one case:
4856 * - after releasing the path above, someone has removed the item that
4857 * used to be at the very end of the block, and balance between leafs
4858 * gets another one with bigger key.offset to replace it.
4859 *
4860 * This one should be returned as well, or we can get leaf corruption
4861 * later(esp. in __btrfs_drop_extents()).
4862 *
4863 * And a bit more explanation about this check,
4864 * with ret > 0, the key isn't found, the path points to the slot
4865 * where it should be inserted, so the path->slots[0] item must be the
4866 * bigger one.
4867 */
4868 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
4869 ret = 0;
4870 goto done;
4871 }
4872
4873 while (level < BTRFS_MAX_LEVEL) {
4874 if (!path->nodes[level]) {
4875 ret = 1;
4876 goto done;
4877 }
4878
4879 slot = path->slots[level] + 1;
4880 c = path->nodes[level];
4881 if (slot >= btrfs_header_nritems(c)) {
4882 level++;
4883 if (level == BTRFS_MAX_LEVEL) {
4884 ret = 1;
4885 goto done;
4886 }
4887 continue;
4888 }
4889
4890
4891 /*
4892 * Our current level is where we're going to start from, and to
4893 * make sure lockdep doesn't complain we need to drop our locks
4894 * and nodes from 0 to our current level.
4895 */
4896 for (i = 0; i < level; i++) {
4897 if (path->locks[level]) {
4898 btrfs_tree_read_unlock(path->nodes[i]);
4899 path->locks[i] = 0;
4900 }
4901 free_extent_buffer(path->nodes[i]);
4902 path->nodes[i] = NULL;
4903 }
4904
4905 next = c;
4906 ret = read_block_for_search(root, path, &next, slot, &key);
4907 if (ret == -EAGAIN && !path->nowait)
4908 goto again;
4909
4910 if (ret < 0) {
4911 btrfs_release_path(path);
4912 goto done;
4913 }
4914
4915 if (!path->skip_locking) {
4916 ret = btrfs_try_tree_read_lock(next);
4917 if (!ret && path->nowait) {
4918 ret = -EAGAIN;
4919 goto done;
4920 }
4921 if (!ret && time_seq) {
4922 /*
4923 * If we don't get the lock, we may be racing
4924 * with push_leaf_left, holding that lock while
4925 * itself waiting for the leaf we've currently
4926 * locked. To solve this situation, we give up
4927 * on our lock and cycle.
4928 */
4929 free_extent_buffer(next);
4930 btrfs_release_path(path);
4931 cond_resched();
4932 goto again;
4933 }
4934 if (!ret)
4935 btrfs_tree_read_lock(next);
4936 }
4937 break;
4938 }
4939 path->slots[level] = slot;
4940 while (1) {
4941 level--;
4942 path->nodes[level] = next;
4943 path->slots[level] = 0;
4944 if (!path->skip_locking)
4945 path->locks[level] = BTRFS_READ_LOCK;
4946 if (!level)
4947 break;
4948
4949 ret = read_block_for_search(root, path, &next, 0, &key);
4950 if (ret == -EAGAIN && !path->nowait)
4951 goto again;
4952
4953 if (ret < 0) {
4954 btrfs_release_path(path);
4955 goto done;
4956 }
4957
4958 if (!path->skip_locking) {
4959 if (path->nowait) {
4960 if (!btrfs_try_tree_read_lock(next)) {
4961 ret = -EAGAIN;
4962 goto done;
4963 }
4964 } else {
4965 btrfs_tree_read_lock(next);
4966 }
4967 }
4968 }
4969 ret = 0;
4970 done:
4971 unlock_up(path, 0, 1, 0, NULL);
4972 if (need_commit_sem) {
4973 int ret2;
4974
4975 path->need_commit_sem = 1;
4976 ret2 = finish_need_commit_sem_search(path);
4977 up_read(&fs_info->commit_root_sem);
4978 if (ret2)
4979 ret = ret2;
4980 }
4981
4982 return ret;
4983 }
4984
btrfs_next_old_item(struct btrfs_root * root,struct btrfs_path * path,u64 time_seq)4985 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq)
4986 {
4987 path->slots[0]++;
4988 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
4989 return btrfs_next_old_leaf(root, path, time_seq);
4990 return 0;
4991 }
4992
4993 /*
4994 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4995 * searching until it gets past min_objectid or finds an item of 'type'
4996 *
4997 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4998 */
btrfs_previous_item(struct btrfs_root * root,struct btrfs_path * path,u64 min_objectid,int type)4999 int btrfs_previous_item(struct btrfs_root *root,
5000 struct btrfs_path *path, u64 min_objectid,
5001 int type)
5002 {
5003 struct btrfs_key found_key;
5004 struct extent_buffer *leaf;
5005 u32 nritems;
5006 int ret;
5007
5008 while (1) {
5009 if (path->slots[0] == 0) {
5010 ret = btrfs_prev_leaf(root, path);
5011 if (ret != 0)
5012 return ret;
5013 } else {
5014 path->slots[0]--;
5015 }
5016 leaf = path->nodes[0];
5017 nritems = btrfs_header_nritems(leaf);
5018 if (nritems == 0)
5019 return 1;
5020 if (path->slots[0] == nritems)
5021 path->slots[0]--;
5022
5023 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5024 if (found_key.objectid < min_objectid)
5025 break;
5026 if (found_key.type == type)
5027 return 0;
5028 if (found_key.objectid == min_objectid &&
5029 found_key.type < type)
5030 break;
5031 }
5032 return 1;
5033 }
5034
5035 /*
5036 * search in extent tree to find a previous Metadata/Data extent item with
5037 * min objecitd.
5038 *
5039 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5040 */
btrfs_previous_extent_item(struct btrfs_root * root,struct btrfs_path * path,u64 min_objectid)5041 int btrfs_previous_extent_item(struct btrfs_root *root,
5042 struct btrfs_path *path, u64 min_objectid)
5043 {
5044 struct btrfs_key found_key;
5045 struct extent_buffer *leaf;
5046 u32 nritems;
5047 int ret;
5048
5049 while (1) {
5050 if (path->slots[0] == 0) {
5051 ret = btrfs_prev_leaf(root, path);
5052 if (ret != 0)
5053 return ret;
5054 } else {
5055 path->slots[0]--;
5056 }
5057 leaf = path->nodes[0];
5058 nritems = btrfs_header_nritems(leaf);
5059 if (nritems == 0)
5060 return 1;
5061 if (path->slots[0] == nritems)
5062 path->slots[0]--;
5063
5064 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5065 if (found_key.objectid < min_objectid)
5066 break;
5067 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5068 found_key.type == BTRFS_METADATA_ITEM_KEY)
5069 return 0;
5070 if (found_key.objectid == min_objectid &&
5071 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5072 break;
5073 }
5074 return 1;
5075 }
5076
btrfs_ctree_init(void)5077 int __init btrfs_ctree_init(void)
5078 {
5079 btrfs_path_cachep = KMEM_CACHE(btrfs_path, 0);
5080 if (!btrfs_path_cachep)
5081 return -ENOMEM;
5082 return 0;
5083 }
5084
btrfs_ctree_exit(void)5085 void __cold btrfs_ctree_exit(void)
5086 {
5087 kmem_cache_destroy(btrfs_path_cachep);
5088 }
5089