xref: /linux/fs/btrfs/delayed-inode.c (revision 0eb4aaa230d725fa9b1cd758c0f17abca5597af6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Fujitsu.  All rights reserved.
4  * Written by Miao Xie <miaox@cn.fujitsu.com>
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "ctree.h"
10 #include "fs.h"
11 #include "messages.h"
12 #include "misc.h"
13 #include "delayed-inode.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "qgroup.h"
17 #include "locking.h"
18 #include "inode-item.h"
19 #include "space-info.h"
20 #include "accessors.h"
21 #include "file-item.h"
22 
23 #define BTRFS_DELAYED_WRITEBACK		512
24 #define BTRFS_DELAYED_BACKGROUND	128
25 #define BTRFS_DELAYED_BATCH		16
26 
27 static struct kmem_cache *delayed_node_cache;
28 
btrfs_delayed_inode_init(void)29 int __init btrfs_delayed_inode_init(void)
30 {
31 	delayed_node_cache = KMEM_CACHE(btrfs_delayed_node, 0);
32 	if (!delayed_node_cache)
33 		return -ENOMEM;
34 	return 0;
35 }
36 
btrfs_delayed_inode_exit(void)37 void __cold btrfs_delayed_inode_exit(void)
38 {
39 	kmem_cache_destroy(delayed_node_cache);
40 }
41 
btrfs_init_delayed_root(struct btrfs_delayed_root * delayed_root)42 void btrfs_init_delayed_root(struct btrfs_delayed_root *delayed_root)
43 {
44 	atomic_set(&delayed_root->items, 0);
45 	atomic_set(&delayed_root->items_seq, 0);
46 	delayed_root->nodes = 0;
47 	spin_lock_init(&delayed_root->lock);
48 	init_waitqueue_head(&delayed_root->wait);
49 	INIT_LIST_HEAD(&delayed_root->node_list);
50 	INIT_LIST_HEAD(&delayed_root->prepare_list);
51 }
52 
btrfs_init_delayed_node(struct btrfs_delayed_node * delayed_node,struct btrfs_root * root,u64 inode_id)53 static inline void btrfs_init_delayed_node(
54 				struct btrfs_delayed_node *delayed_node,
55 				struct btrfs_root *root, u64 inode_id)
56 {
57 	delayed_node->root = root;
58 	delayed_node->inode_id = inode_id;
59 	refcount_set(&delayed_node->refs, 0);
60 	delayed_node->ins_root = RB_ROOT_CACHED;
61 	delayed_node->del_root = RB_ROOT_CACHED;
62 	mutex_init(&delayed_node->mutex);
63 	INIT_LIST_HEAD(&delayed_node->n_list);
64 	INIT_LIST_HEAD(&delayed_node->p_list);
65 }
66 
btrfs_get_delayed_node(struct btrfs_inode * btrfs_inode)67 static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 		struct btrfs_inode *btrfs_inode)
69 {
70 	struct btrfs_root *root = btrfs_inode->root;
71 	u64 ino = btrfs_ino(btrfs_inode);
72 	struct btrfs_delayed_node *node;
73 
74 	node = READ_ONCE(btrfs_inode->delayed_node);
75 	if (node) {
76 		refcount_inc(&node->refs);
77 		return node;
78 	}
79 
80 	xa_lock(&root->delayed_nodes);
81 	node = xa_load(&root->delayed_nodes, ino);
82 
83 	if (node) {
84 		if (btrfs_inode->delayed_node) {
85 			refcount_inc(&node->refs);	/* can be accessed */
86 			BUG_ON(btrfs_inode->delayed_node != node);
87 			xa_unlock(&root->delayed_nodes);
88 			return node;
89 		}
90 
91 		/*
92 		 * It's possible that we're racing into the middle of removing
93 		 * this node from the xarray.  In this case, the refcount
94 		 * was zero and it should never go back to one.  Just return
95 		 * NULL like it was never in the xarray at all; our release
96 		 * function is in the process of removing it.
97 		 *
98 		 * Some implementations of refcount_inc refuse to bump the
99 		 * refcount once it has hit zero.  If we don't do this dance
100 		 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 		 * of actually bumping the refcount.
102 		 *
103 		 * If this node is properly in the xarray, we want to bump the
104 		 * refcount twice, once for the inode and once for this get
105 		 * operation.
106 		 */
107 		if (refcount_inc_not_zero(&node->refs)) {
108 			refcount_inc(&node->refs);
109 			btrfs_inode->delayed_node = node;
110 		} else {
111 			node = NULL;
112 		}
113 
114 		xa_unlock(&root->delayed_nodes);
115 		return node;
116 	}
117 	xa_unlock(&root->delayed_nodes);
118 
119 	return NULL;
120 }
121 
122 /* Will return either the node or PTR_ERR(-ENOMEM) */
btrfs_get_or_create_delayed_node(struct btrfs_inode * btrfs_inode)123 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124 		struct btrfs_inode *btrfs_inode)
125 {
126 	struct btrfs_delayed_node *node;
127 	struct btrfs_root *root = btrfs_inode->root;
128 	u64 ino = btrfs_ino(btrfs_inode);
129 	int ret;
130 	void *ptr;
131 
132 again:
133 	node = btrfs_get_delayed_node(btrfs_inode);
134 	if (node)
135 		return node;
136 
137 	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
138 	if (!node)
139 		return ERR_PTR(-ENOMEM);
140 	btrfs_init_delayed_node(node, root, ino);
141 
142 	/* Cached in the inode and can be accessed. */
143 	refcount_set(&node->refs, 2);
144 
145 	/* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
146 	ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
147 	if (ret == -ENOMEM) {
148 		kmem_cache_free(delayed_node_cache, node);
149 		return ERR_PTR(-ENOMEM);
150 	}
151 	xa_lock(&root->delayed_nodes);
152 	ptr = xa_load(&root->delayed_nodes, ino);
153 	if (ptr) {
154 		/* Somebody inserted it, go back and read it. */
155 		xa_unlock(&root->delayed_nodes);
156 		kmem_cache_free(delayed_node_cache, node);
157 		node = NULL;
158 		goto again;
159 	}
160 	ptr = __xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
161 	ASSERT(xa_err(ptr) != -EINVAL);
162 	ASSERT(xa_err(ptr) != -ENOMEM);
163 	ASSERT(ptr == NULL);
164 	btrfs_inode->delayed_node = node;
165 	xa_unlock(&root->delayed_nodes);
166 
167 	return node;
168 }
169 
170 /*
171  * Call it when holding delayed_node->mutex
172  *
173  * If mod = 1, add this node into the prepared list.
174  */
btrfs_queue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node,int mod)175 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
176 				     struct btrfs_delayed_node *node,
177 				     int mod)
178 {
179 	spin_lock(&root->lock);
180 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
181 		if (!list_empty(&node->p_list))
182 			list_move_tail(&node->p_list, &root->prepare_list);
183 		else if (mod)
184 			list_add_tail(&node->p_list, &root->prepare_list);
185 	} else {
186 		list_add_tail(&node->n_list, &root->node_list);
187 		list_add_tail(&node->p_list, &root->prepare_list);
188 		refcount_inc(&node->refs);	/* inserted into list */
189 		root->nodes++;
190 		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
191 	}
192 	spin_unlock(&root->lock);
193 }
194 
195 /* Call it when holding delayed_node->mutex */
btrfs_dequeue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node)196 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
197 				       struct btrfs_delayed_node *node)
198 {
199 	spin_lock(&root->lock);
200 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
201 		root->nodes--;
202 		refcount_dec(&node->refs);	/* not in the list */
203 		list_del_init(&node->n_list);
204 		if (!list_empty(&node->p_list))
205 			list_del_init(&node->p_list);
206 		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
207 	}
208 	spin_unlock(&root->lock);
209 }
210 
btrfs_first_delayed_node(struct btrfs_delayed_root * delayed_root)211 static struct btrfs_delayed_node *btrfs_first_delayed_node(
212 			struct btrfs_delayed_root *delayed_root)
213 {
214 	struct list_head *p;
215 	struct btrfs_delayed_node *node = NULL;
216 
217 	spin_lock(&delayed_root->lock);
218 	if (list_empty(&delayed_root->node_list))
219 		goto out;
220 
221 	p = delayed_root->node_list.next;
222 	node = list_entry(p, struct btrfs_delayed_node, n_list);
223 	refcount_inc(&node->refs);
224 out:
225 	spin_unlock(&delayed_root->lock);
226 
227 	return node;
228 }
229 
btrfs_next_delayed_node(struct btrfs_delayed_node * node)230 static struct btrfs_delayed_node *btrfs_next_delayed_node(
231 						struct btrfs_delayed_node *node)
232 {
233 	struct btrfs_delayed_root *delayed_root;
234 	struct list_head *p;
235 	struct btrfs_delayed_node *next = NULL;
236 
237 	delayed_root = node->root->fs_info->delayed_root;
238 	spin_lock(&delayed_root->lock);
239 	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
240 		/* not in the list */
241 		if (list_empty(&delayed_root->node_list))
242 			goto out;
243 		p = delayed_root->node_list.next;
244 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
245 		goto out;
246 	else
247 		p = node->n_list.next;
248 
249 	next = list_entry(p, struct btrfs_delayed_node, n_list);
250 	refcount_inc(&next->refs);
251 out:
252 	spin_unlock(&delayed_root->lock);
253 
254 	return next;
255 }
256 
__btrfs_release_delayed_node(struct btrfs_delayed_node * delayed_node,int mod)257 static void __btrfs_release_delayed_node(
258 				struct btrfs_delayed_node *delayed_node,
259 				int mod)
260 {
261 	struct btrfs_delayed_root *delayed_root;
262 
263 	if (!delayed_node)
264 		return;
265 
266 	delayed_root = delayed_node->root->fs_info->delayed_root;
267 
268 	mutex_lock(&delayed_node->mutex);
269 	if (delayed_node->count)
270 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
271 	else
272 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
273 	mutex_unlock(&delayed_node->mutex);
274 
275 	if (refcount_dec_and_test(&delayed_node->refs)) {
276 		struct btrfs_root *root = delayed_node->root;
277 
278 		xa_erase(&root->delayed_nodes, delayed_node->inode_id);
279 		/*
280 		 * Once our refcount goes to zero, nobody is allowed to bump it
281 		 * back up.  We can delete it now.
282 		 */
283 		ASSERT(refcount_read(&delayed_node->refs) == 0);
284 		kmem_cache_free(delayed_node_cache, delayed_node);
285 	}
286 }
287 
btrfs_release_delayed_node(struct btrfs_delayed_node * node)288 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
289 {
290 	__btrfs_release_delayed_node(node, 0);
291 }
292 
btrfs_first_prepared_delayed_node(struct btrfs_delayed_root * delayed_root)293 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
294 					struct btrfs_delayed_root *delayed_root)
295 {
296 	struct list_head *p;
297 	struct btrfs_delayed_node *node = NULL;
298 
299 	spin_lock(&delayed_root->lock);
300 	if (list_empty(&delayed_root->prepare_list))
301 		goto out;
302 
303 	p = delayed_root->prepare_list.next;
304 	list_del_init(p);
305 	node = list_entry(p, struct btrfs_delayed_node, p_list);
306 	refcount_inc(&node->refs);
307 out:
308 	spin_unlock(&delayed_root->lock);
309 
310 	return node;
311 }
312 
btrfs_release_prepared_delayed_node(struct btrfs_delayed_node * node)313 static inline void btrfs_release_prepared_delayed_node(
314 					struct btrfs_delayed_node *node)
315 {
316 	__btrfs_release_delayed_node(node, 1);
317 }
318 
btrfs_alloc_delayed_item(u16 data_len,struct btrfs_delayed_node * node,enum btrfs_delayed_item_type type)319 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
320 					   struct btrfs_delayed_node *node,
321 					   enum btrfs_delayed_item_type type)
322 {
323 	struct btrfs_delayed_item *item;
324 
325 	item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
326 	if (item) {
327 		item->data_len = data_len;
328 		item->type = type;
329 		item->bytes_reserved = 0;
330 		item->delayed_node = node;
331 		RB_CLEAR_NODE(&item->rb_node);
332 		INIT_LIST_HEAD(&item->log_list);
333 		item->logged = false;
334 		refcount_set(&item->refs, 1);
335 	}
336 	return item;
337 }
338 
339 /*
340  * Look up the delayed item by key.
341  *
342  * @delayed_node: pointer to the delayed node
343  * @index:	  the dir index value to lookup (offset of a dir index key)
344  *
345  * Note: if we don't find the right item, we will return the prev item and
346  * the next item.
347  */
__btrfs_lookup_delayed_item(struct rb_root * root,u64 index)348 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
349 				struct rb_root *root,
350 				u64 index)
351 {
352 	struct rb_node *node = root->rb_node;
353 	struct btrfs_delayed_item *delayed_item = NULL;
354 
355 	while (node) {
356 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
357 					rb_node);
358 		if (delayed_item->index < index)
359 			node = node->rb_right;
360 		else if (delayed_item->index > index)
361 			node = node->rb_left;
362 		else
363 			return delayed_item;
364 	}
365 
366 	return NULL;
367 }
368 
btrfs_delayed_item_cmp(const struct rb_node * new,const struct rb_node * exist)369 static int btrfs_delayed_item_cmp(const struct rb_node *new,
370 				  const struct rb_node *exist)
371 {
372 	const struct btrfs_delayed_item *new_item =
373 		rb_entry(new, struct btrfs_delayed_item, rb_node);
374 	const struct btrfs_delayed_item *exist_item =
375 		rb_entry(exist, struct btrfs_delayed_item, rb_node);
376 
377 	if (new_item->index < exist_item->index)
378 		return -1;
379 	if (new_item->index > exist_item->index)
380 		return 1;
381 	return 0;
382 }
383 
__btrfs_add_delayed_item(struct btrfs_delayed_node * delayed_node,struct btrfs_delayed_item * ins)384 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
385 				    struct btrfs_delayed_item *ins)
386 {
387 	struct rb_root_cached *root;
388 	struct rb_node *exist;
389 
390 	if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
391 		root = &delayed_node->ins_root;
392 	else
393 		root = &delayed_node->del_root;
394 
395 	exist = rb_find_add_cached(&ins->rb_node, root, btrfs_delayed_item_cmp);
396 	if (exist)
397 		return -EEXIST;
398 
399 	if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
400 	    ins->index >= delayed_node->index_cnt)
401 		delayed_node->index_cnt = ins->index + 1;
402 
403 	delayed_node->count++;
404 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
405 	return 0;
406 }
407 
finish_one_item(struct btrfs_delayed_root * delayed_root)408 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
409 {
410 	int seq = atomic_inc_return(&delayed_root->items_seq);
411 
412 	/* atomic_dec_return implies a barrier */
413 	if ((atomic_dec_return(&delayed_root->items) <
414 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
415 		cond_wake_up_nomb(&delayed_root->wait);
416 }
417 
__btrfs_remove_delayed_item(struct btrfs_delayed_item * delayed_item)418 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
419 {
420 	struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
421 	struct rb_root_cached *root;
422 	struct btrfs_delayed_root *delayed_root;
423 
424 	/* Not inserted, ignore it. */
425 	if (RB_EMPTY_NODE(&delayed_item->rb_node))
426 		return;
427 
428 	/* If it's in a rbtree, then we need to have delayed node locked. */
429 	lockdep_assert_held(&delayed_node->mutex);
430 
431 	delayed_root = delayed_node->root->fs_info->delayed_root;
432 
433 	if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
434 		root = &delayed_node->ins_root;
435 	else
436 		root = &delayed_node->del_root;
437 
438 	rb_erase_cached(&delayed_item->rb_node, root);
439 	RB_CLEAR_NODE(&delayed_item->rb_node);
440 	delayed_node->count--;
441 
442 	finish_one_item(delayed_root);
443 }
444 
btrfs_release_delayed_item(struct btrfs_delayed_item * item)445 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
446 {
447 	if (item) {
448 		__btrfs_remove_delayed_item(item);
449 		if (refcount_dec_and_test(&item->refs))
450 			kfree(item);
451 	}
452 }
453 
__btrfs_first_delayed_insertion_item(struct btrfs_delayed_node * delayed_node)454 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
455 					struct btrfs_delayed_node *delayed_node)
456 {
457 	struct rb_node *p;
458 	struct btrfs_delayed_item *item = NULL;
459 
460 	p = rb_first_cached(&delayed_node->ins_root);
461 	if (p)
462 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
463 
464 	return item;
465 }
466 
__btrfs_first_delayed_deletion_item(struct btrfs_delayed_node * delayed_node)467 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
468 					struct btrfs_delayed_node *delayed_node)
469 {
470 	struct rb_node *p;
471 	struct btrfs_delayed_item *item = NULL;
472 
473 	p = rb_first_cached(&delayed_node->del_root);
474 	if (p)
475 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
476 
477 	return item;
478 }
479 
__btrfs_next_delayed_item(struct btrfs_delayed_item * item)480 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
481 						struct btrfs_delayed_item *item)
482 {
483 	struct rb_node *p;
484 	struct btrfs_delayed_item *next = NULL;
485 
486 	p = rb_next(&item->rb_node);
487 	if (p)
488 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
489 
490 	return next;
491 }
492 
btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_delayed_item * item)493 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
494 					       struct btrfs_delayed_item *item)
495 {
496 	struct btrfs_block_rsv *src_rsv;
497 	struct btrfs_block_rsv *dst_rsv;
498 	struct btrfs_fs_info *fs_info = trans->fs_info;
499 	u64 num_bytes;
500 	int ret;
501 
502 	if (!trans->bytes_reserved)
503 		return 0;
504 
505 	src_rsv = trans->block_rsv;
506 	dst_rsv = &fs_info->delayed_block_rsv;
507 
508 	num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
509 
510 	/*
511 	 * Here we migrate space rsv from transaction rsv, since have already
512 	 * reserved space when starting a transaction.  So no need to reserve
513 	 * qgroup space here.
514 	 */
515 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
516 	if (!ret) {
517 		trace_btrfs_space_reservation(fs_info, "delayed_item",
518 					      item->delayed_node->inode_id,
519 					      num_bytes, 1);
520 		/*
521 		 * For insertions we track reserved metadata space by accounting
522 		 * for the number of leaves that will be used, based on the delayed
523 		 * node's curr_index_batch_size and index_item_leaves fields.
524 		 */
525 		if (item->type == BTRFS_DELAYED_DELETION_ITEM)
526 			item->bytes_reserved = num_bytes;
527 	}
528 
529 	return ret;
530 }
531 
btrfs_delayed_item_release_metadata(struct btrfs_root * root,struct btrfs_delayed_item * item)532 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
533 						struct btrfs_delayed_item *item)
534 {
535 	struct btrfs_block_rsv *rsv;
536 	struct btrfs_fs_info *fs_info = root->fs_info;
537 
538 	if (!item->bytes_reserved)
539 		return;
540 
541 	rsv = &fs_info->delayed_block_rsv;
542 	/*
543 	 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
544 	 * to release/reserve qgroup space.
545 	 */
546 	trace_btrfs_space_reservation(fs_info, "delayed_item",
547 				      item->delayed_node->inode_id,
548 				      item->bytes_reserved, 0);
549 	btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
550 }
551 
btrfs_delayed_item_release_leaves(struct btrfs_delayed_node * node,unsigned int num_leaves)552 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
553 					      unsigned int num_leaves)
554 {
555 	struct btrfs_fs_info *fs_info = node->root->fs_info;
556 	const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
557 
558 	/* There are no space reservations during log replay, bail out. */
559 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
560 		return;
561 
562 	trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
563 				      bytes, 0);
564 	btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
565 }
566 
btrfs_delayed_inode_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_node * node)567 static int btrfs_delayed_inode_reserve_metadata(
568 					struct btrfs_trans_handle *trans,
569 					struct btrfs_root *root,
570 					struct btrfs_delayed_node *node)
571 {
572 	struct btrfs_fs_info *fs_info = root->fs_info;
573 	struct btrfs_block_rsv *src_rsv;
574 	struct btrfs_block_rsv *dst_rsv;
575 	u64 num_bytes;
576 	int ret;
577 
578 	src_rsv = trans->block_rsv;
579 	dst_rsv = &fs_info->delayed_block_rsv;
580 
581 	num_bytes = btrfs_calc_metadata_size(fs_info, 1);
582 
583 	/*
584 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
585 	 * which doesn't reserve space for speed.  This is a problem since we
586 	 * still need to reserve space for this update, so try to reserve the
587 	 * space.
588 	 *
589 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
590 	 * we always reserve enough to update the inode item.
591 	 */
592 	if (!src_rsv || (!trans->bytes_reserved &&
593 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
594 		ret = btrfs_qgroup_reserve_meta(root, num_bytes,
595 					  BTRFS_QGROUP_RSV_META_PREALLOC, true);
596 		if (ret < 0)
597 			return ret;
598 		ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
599 					  BTRFS_RESERVE_NO_FLUSH);
600 		/* NO_FLUSH could only fail with -ENOSPC */
601 		ASSERT(ret == 0 || ret == -ENOSPC);
602 		if (ret)
603 			btrfs_qgroup_free_meta_prealloc(root, num_bytes);
604 	} else {
605 		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
606 	}
607 
608 	if (!ret) {
609 		trace_btrfs_space_reservation(fs_info, "delayed_inode",
610 					      node->inode_id, num_bytes, 1);
611 		node->bytes_reserved = num_bytes;
612 	}
613 
614 	return ret;
615 }
616 
btrfs_delayed_inode_release_metadata(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,bool qgroup_free)617 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
618 						struct btrfs_delayed_node *node,
619 						bool qgroup_free)
620 {
621 	struct btrfs_block_rsv *rsv;
622 
623 	if (!node->bytes_reserved)
624 		return;
625 
626 	rsv = &fs_info->delayed_block_rsv;
627 	trace_btrfs_space_reservation(fs_info, "delayed_inode",
628 				      node->inode_id, node->bytes_reserved, 0);
629 	btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
630 	if (qgroup_free)
631 		btrfs_qgroup_free_meta_prealloc(node->root,
632 				node->bytes_reserved);
633 	else
634 		btrfs_qgroup_convert_reserved_meta(node->root,
635 				node->bytes_reserved);
636 	node->bytes_reserved = 0;
637 }
638 
639 /*
640  * Insert a single delayed item or a batch of delayed items, as many as possible
641  * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
642  * in the rbtree, and if there's a gap between two consecutive dir index items,
643  * then it means at some point we had delayed dir indexes to add but they got
644  * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
645  * into the subvolume tree. Dir index keys also have their offsets coming from a
646  * monotonically increasing counter, so we can't get new keys with an offset that
647  * fits within a gap between delayed dir index items.
648  */
btrfs_insert_delayed_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * first_item)649 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
650 				     struct btrfs_root *root,
651 				     struct btrfs_path *path,
652 				     struct btrfs_delayed_item *first_item)
653 {
654 	struct btrfs_fs_info *fs_info = root->fs_info;
655 	struct btrfs_delayed_node *node = first_item->delayed_node;
656 	LIST_HEAD(item_list);
657 	struct btrfs_delayed_item *curr;
658 	struct btrfs_delayed_item *next;
659 	const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
660 	struct btrfs_item_batch batch;
661 	struct btrfs_key first_key;
662 	const u32 first_data_size = first_item->data_len;
663 	int total_size;
664 	char *ins_data = NULL;
665 	int ret;
666 	bool continuous_keys_only = false;
667 
668 	lockdep_assert_held(&node->mutex);
669 
670 	/*
671 	 * During normal operation the delayed index offset is continuously
672 	 * increasing, so we can batch insert all items as there will not be any
673 	 * overlapping keys in the tree.
674 	 *
675 	 * The exception to this is log replay, where we may have interleaved
676 	 * offsets in the tree, so our batch needs to be continuous keys only in
677 	 * order to ensure we do not end up with out of order items in our leaf.
678 	 */
679 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
680 		continuous_keys_only = true;
681 
682 	/*
683 	 * For delayed items to insert, we track reserved metadata bytes based
684 	 * on the number of leaves that we will use.
685 	 * See btrfs_insert_delayed_dir_index() and
686 	 * btrfs_delayed_item_reserve_metadata()).
687 	 */
688 	ASSERT(first_item->bytes_reserved == 0);
689 
690 	list_add_tail(&first_item->tree_list, &item_list);
691 	batch.total_data_size = first_data_size;
692 	batch.nr = 1;
693 	total_size = first_data_size + sizeof(struct btrfs_item);
694 	curr = first_item;
695 
696 	while (true) {
697 		int next_size;
698 
699 		next = __btrfs_next_delayed_item(curr);
700 		if (!next)
701 			break;
702 
703 		/*
704 		 * We cannot allow gaps in the key space if we're doing log
705 		 * replay.
706 		 */
707 		if (continuous_keys_only && (next->index != curr->index + 1))
708 			break;
709 
710 		ASSERT(next->bytes_reserved == 0);
711 
712 		next_size = next->data_len + sizeof(struct btrfs_item);
713 		if (total_size + next_size > max_size)
714 			break;
715 
716 		list_add_tail(&next->tree_list, &item_list);
717 		batch.nr++;
718 		total_size += next_size;
719 		batch.total_data_size += next->data_len;
720 		curr = next;
721 	}
722 
723 	if (batch.nr == 1) {
724 		first_key.objectid = node->inode_id;
725 		first_key.type = BTRFS_DIR_INDEX_KEY;
726 		first_key.offset = first_item->index;
727 		batch.keys = &first_key;
728 		batch.data_sizes = &first_data_size;
729 	} else {
730 		struct btrfs_key *ins_keys;
731 		u32 *ins_sizes;
732 		int i = 0;
733 
734 		ins_data = kmalloc(batch.nr * sizeof(u32) +
735 				   batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
736 		if (!ins_data) {
737 			ret = -ENOMEM;
738 			goto out;
739 		}
740 		ins_sizes = (u32 *)ins_data;
741 		ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
742 		batch.keys = ins_keys;
743 		batch.data_sizes = ins_sizes;
744 		list_for_each_entry(curr, &item_list, tree_list) {
745 			ins_keys[i].objectid = node->inode_id;
746 			ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
747 			ins_keys[i].offset = curr->index;
748 			ins_sizes[i] = curr->data_len;
749 			i++;
750 		}
751 	}
752 
753 	ret = btrfs_insert_empty_items(trans, root, path, &batch);
754 	if (ret)
755 		goto out;
756 
757 	list_for_each_entry(curr, &item_list, tree_list) {
758 		char *data_ptr;
759 
760 		data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
761 		write_extent_buffer(path->nodes[0], &curr->data,
762 				    (unsigned long)data_ptr, curr->data_len);
763 		path->slots[0]++;
764 	}
765 
766 	/*
767 	 * Now release our path before releasing the delayed items and their
768 	 * metadata reservations, so that we don't block other tasks for more
769 	 * time than needed.
770 	 */
771 	btrfs_release_path(path);
772 
773 	ASSERT(node->index_item_leaves > 0);
774 
775 	/*
776 	 * For normal operations we will batch an entire leaf's worth of delayed
777 	 * items, so if there are more items to process we can decrement
778 	 * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
779 	 *
780 	 * However for log replay we may not have inserted an entire leaf's
781 	 * worth of items, we may have not had continuous items, so decrementing
782 	 * here would mess up the index_item_leaves accounting.  For this case
783 	 * only clean up the accounting when there are no items left.
784 	 */
785 	if (next && !continuous_keys_only) {
786 		/*
787 		 * We inserted one batch of items into a leaf a there are more
788 		 * items to flush in a future batch, now release one unit of
789 		 * metadata space from the delayed block reserve, corresponding
790 		 * the leaf we just flushed to.
791 		 */
792 		btrfs_delayed_item_release_leaves(node, 1);
793 		node->index_item_leaves--;
794 	} else if (!next) {
795 		/*
796 		 * There are no more items to insert. We can have a number of
797 		 * reserved leaves > 1 here - this happens when many dir index
798 		 * items are added and then removed before they are flushed (file
799 		 * names with a very short life, never span a transaction). So
800 		 * release all remaining leaves.
801 		 */
802 		btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
803 		node->index_item_leaves = 0;
804 	}
805 
806 	list_for_each_entry_safe(curr, next, &item_list, tree_list) {
807 		list_del(&curr->tree_list);
808 		btrfs_release_delayed_item(curr);
809 	}
810 out:
811 	kfree(ins_data);
812 	return ret;
813 }
814 
btrfs_insert_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)815 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
816 				      struct btrfs_path *path,
817 				      struct btrfs_root *root,
818 				      struct btrfs_delayed_node *node)
819 {
820 	int ret = 0;
821 
822 	while (ret == 0) {
823 		struct btrfs_delayed_item *curr;
824 
825 		mutex_lock(&node->mutex);
826 		curr = __btrfs_first_delayed_insertion_item(node);
827 		if (!curr) {
828 			mutex_unlock(&node->mutex);
829 			break;
830 		}
831 		ret = btrfs_insert_delayed_item(trans, root, path, curr);
832 		mutex_unlock(&node->mutex);
833 	}
834 
835 	return ret;
836 }
837 
btrfs_batch_delete_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)838 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
839 				    struct btrfs_root *root,
840 				    struct btrfs_path *path,
841 				    struct btrfs_delayed_item *item)
842 {
843 	const u64 ino = item->delayed_node->inode_id;
844 	struct btrfs_fs_info *fs_info = root->fs_info;
845 	struct btrfs_delayed_item *curr, *next;
846 	struct extent_buffer *leaf = path->nodes[0];
847 	LIST_HEAD(batch_list);
848 	int nitems, slot, last_slot;
849 	int ret;
850 	u64 total_reserved_size = item->bytes_reserved;
851 
852 	ASSERT(leaf != NULL);
853 
854 	slot = path->slots[0];
855 	last_slot = btrfs_header_nritems(leaf) - 1;
856 	/*
857 	 * Our caller always gives us a path pointing to an existing item, so
858 	 * this can not happen.
859 	 */
860 	ASSERT(slot <= last_slot);
861 	if (WARN_ON(slot > last_slot))
862 		return -ENOENT;
863 
864 	nitems = 1;
865 	curr = item;
866 	list_add_tail(&curr->tree_list, &batch_list);
867 
868 	/*
869 	 * Keep checking if the next delayed item matches the next item in the
870 	 * leaf - if so, we can add it to the batch of items to delete from the
871 	 * leaf.
872 	 */
873 	while (slot < last_slot) {
874 		struct btrfs_key key;
875 
876 		next = __btrfs_next_delayed_item(curr);
877 		if (!next)
878 			break;
879 
880 		slot++;
881 		btrfs_item_key_to_cpu(leaf, &key, slot);
882 		if (key.objectid != ino ||
883 		    key.type != BTRFS_DIR_INDEX_KEY ||
884 		    key.offset != next->index)
885 			break;
886 		nitems++;
887 		curr = next;
888 		list_add_tail(&curr->tree_list, &batch_list);
889 		total_reserved_size += curr->bytes_reserved;
890 	}
891 
892 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
893 	if (ret)
894 		return ret;
895 
896 	/* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
897 	if (total_reserved_size > 0) {
898 		/*
899 		 * Check btrfs_delayed_item_reserve_metadata() to see why we
900 		 * don't need to release/reserve qgroup space.
901 		 */
902 		trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
903 					      total_reserved_size, 0);
904 		btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
905 					total_reserved_size, NULL);
906 	}
907 
908 	list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
909 		list_del(&curr->tree_list);
910 		btrfs_release_delayed_item(curr);
911 	}
912 
913 	return 0;
914 }
915 
btrfs_delete_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)916 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
917 				      struct btrfs_path *path,
918 				      struct btrfs_root *root,
919 				      struct btrfs_delayed_node *node)
920 {
921 	struct btrfs_key key;
922 	int ret = 0;
923 
924 	key.objectid = node->inode_id;
925 	key.type = BTRFS_DIR_INDEX_KEY;
926 
927 	while (ret == 0) {
928 		struct btrfs_delayed_item *item;
929 
930 		mutex_lock(&node->mutex);
931 		item = __btrfs_first_delayed_deletion_item(node);
932 		if (!item) {
933 			mutex_unlock(&node->mutex);
934 			break;
935 		}
936 
937 		key.offset = item->index;
938 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
939 		if (ret > 0) {
940 			/*
941 			 * There's no matching item in the leaf. This means we
942 			 * have already deleted this item in a past run of the
943 			 * delayed items. We ignore errors when running delayed
944 			 * items from an async context, through a work queue job
945 			 * running btrfs_async_run_delayed_root(), and don't
946 			 * release delayed items that failed to complete. This
947 			 * is because we will retry later, and at transaction
948 			 * commit time we always run delayed items and will
949 			 * then deal with errors if they fail to run again.
950 			 *
951 			 * So just release delayed items for which we can't find
952 			 * an item in the tree, and move to the next item.
953 			 */
954 			btrfs_release_path(path);
955 			btrfs_release_delayed_item(item);
956 			ret = 0;
957 		} else if (ret == 0) {
958 			ret = btrfs_batch_delete_items(trans, root, path, item);
959 			btrfs_release_path(path);
960 		}
961 
962 		/*
963 		 * We unlock and relock on each iteration, this is to prevent
964 		 * blocking other tasks for too long while we are being run from
965 		 * the async context (work queue job). Those tasks are typically
966 		 * running system calls like creat/mkdir/rename/unlink/etc which
967 		 * need to add delayed items to this delayed node.
968 		 */
969 		mutex_unlock(&node->mutex);
970 	}
971 
972 	return ret;
973 }
974 
btrfs_release_delayed_inode(struct btrfs_delayed_node * delayed_node)975 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
976 {
977 	struct btrfs_delayed_root *delayed_root;
978 
979 	if (delayed_node &&
980 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
981 		ASSERT(delayed_node->root);
982 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
983 		delayed_node->count--;
984 
985 		delayed_root = delayed_node->root->fs_info->delayed_root;
986 		finish_one_item(delayed_root);
987 	}
988 }
989 
btrfs_release_delayed_iref(struct btrfs_delayed_node * delayed_node)990 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
991 {
992 
993 	if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
994 		struct btrfs_delayed_root *delayed_root;
995 
996 		ASSERT(delayed_node->root);
997 		delayed_node->count--;
998 
999 		delayed_root = delayed_node->root->fs_info->delayed_root;
1000 		finish_one_item(delayed_root);
1001 	}
1002 }
1003 
__btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1004 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1005 					struct btrfs_root *root,
1006 					struct btrfs_path *path,
1007 					struct btrfs_delayed_node *node)
1008 {
1009 	struct btrfs_fs_info *fs_info = root->fs_info;
1010 	struct btrfs_key key;
1011 	struct btrfs_inode_item *inode_item;
1012 	struct extent_buffer *leaf;
1013 	int mod;
1014 	int ret;
1015 
1016 	key.objectid = node->inode_id;
1017 	key.type = BTRFS_INODE_ITEM_KEY;
1018 	key.offset = 0;
1019 
1020 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1021 		mod = -1;
1022 	else
1023 		mod = 1;
1024 
1025 	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1026 	if (ret > 0)
1027 		ret = -ENOENT;
1028 	if (ret < 0)
1029 		goto out;
1030 
1031 	leaf = path->nodes[0];
1032 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1033 				    struct btrfs_inode_item);
1034 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1035 			    sizeof(struct btrfs_inode_item));
1036 
1037 	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1038 		goto out;
1039 
1040 	/*
1041 	 * Now we're going to delete the INODE_REF/EXTREF, which should be the
1042 	 * only one ref left.  Check if the next item is an INODE_REF/EXTREF.
1043 	 *
1044 	 * But if we're the last item already, release and search for the last
1045 	 * INODE_REF/EXTREF.
1046 	 */
1047 	if (path->slots[0] + 1 >= btrfs_header_nritems(leaf)) {
1048 		key.objectid = node->inode_id;
1049 		key.type = BTRFS_INODE_EXTREF_KEY;
1050 		key.offset = (u64)-1;
1051 
1052 		btrfs_release_path(path);
1053 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1054 		if (ret < 0)
1055 			goto err_out;
1056 		ASSERT(ret > 0);
1057 		ASSERT(path->slots[0] > 0);
1058 		ret = 0;
1059 		path->slots[0]--;
1060 		leaf = path->nodes[0];
1061 	} else {
1062 		path->slots[0]++;
1063 	}
1064 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1065 	if (key.objectid != node->inode_id)
1066 		goto out;
1067 	if (key.type != BTRFS_INODE_REF_KEY &&
1068 	    key.type != BTRFS_INODE_EXTREF_KEY)
1069 		goto out;
1070 
1071 	/*
1072 	 * Delayed iref deletion is for the inode who has only one link,
1073 	 * so there is only one iref. The case that several irefs are
1074 	 * in the same item doesn't exist.
1075 	 */
1076 	ret = btrfs_del_item(trans, root, path);
1077 out:
1078 	btrfs_release_delayed_iref(node);
1079 	btrfs_release_path(path);
1080 err_out:
1081 	btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1082 	btrfs_release_delayed_inode(node);
1083 
1084 	/*
1085 	 * If we fail to update the delayed inode we need to abort the
1086 	 * transaction, because we could leave the inode with the improper
1087 	 * counts behind.
1088 	 */
1089 	if (ret && ret != -ENOENT)
1090 		btrfs_abort_transaction(trans, ret);
1091 
1092 	return ret;
1093 }
1094 
btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1095 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1096 					     struct btrfs_root *root,
1097 					     struct btrfs_path *path,
1098 					     struct btrfs_delayed_node *node)
1099 {
1100 	int ret;
1101 
1102 	mutex_lock(&node->mutex);
1103 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1104 		mutex_unlock(&node->mutex);
1105 		return 0;
1106 	}
1107 
1108 	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1109 	mutex_unlock(&node->mutex);
1110 	return ret;
1111 }
1112 
1113 static inline int
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_node * node)1114 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1115 				   struct btrfs_path *path,
1116 				   struct btrfs_delayed_node *node)
1117 {
1118 	int ret;
1119 
1120 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1121 	if (ret)
1122 		return ret;
1123 
1124 	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1125 	if (ret)
1126 		return ret;
1127 
1128 	ret = btrfs_record_root_in_trans(trans, node->root);
1129 	if (ret)
1130 		return ret;
1131 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1132 	return ret;
1133 }
1134 
1135 /*
1136  * Called when committing the transaction.
1137  * Returns 0 on success.
1138  * Returns < 0 on error and returns with an aborted transaction with any
1139  * outstanding delayed items cleaned up.
1140  */
__btrfs_run_delayed_items(struct btrfs_trans_handle * trans,int nr)1141 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1142 {
1143 	struct btrfs_fs_info *fs_info = trans->fs_info;
1144 	struct btrfs_delayed_root *delayed_root;
1145 	struct btrfs_delayed_node *curr_node, *prev_node;
1146 	struct btrfs_path *path;
1147 	struct btrfs_block_rsv *block_rsv;
1148 	int ret = 0;
1149 	bool count = (nr > 0);
1150 
1151 	if (TRANS_ABORTED(trans))
1152 		return -EIO;
1153 
1154 	path = btrfs_alloc_path();
1155 	if (!path)
1156 		return -ENOMEM;
1157 
1158 	block_rsv = trans->block_rsv;
1159 	trans->block_rsv = &fs_info->delayed_block_rsv;
1160 
1161 	delayed_root = fs_info->delayed_root;
1162 
1163 	curr_node = btrfs_first_delayed_node(delayed_root);
1164 	while (curr_node && (!count || nr--)) {
1165 		ret = __btrfs_commit_inode_delayed_items(trans, path,
1166 							 curr_node);
1167 		if (ret) {
1168 			btrfs_abort_transaction(trans, ret);
1169 			break;
1170 		}
1171 
1172 		prev_node = curr_node;
1173 		curr_node = btrfs_next_delayed_node(curr_node);
1174 		/*
1175 		 * See the comment below about releasing path before releasing
1176 		 * node. If the commit of delayed items was successful the path
1177 		 * should always be released, but in case of an error, it may
1178 		 * point to locked extent buffers (a leaf at the very least).
1179 		 */
1180 		ASSERT(path->nodes[0] == NULL);
1181 		btrfs_release_delayed_node(prev_node);
1182 	}
1183 
1184 	/*
1185 	 * Release the path to avoid a potential deadlock and lockdep splat when
1186 	 * releasing the delayed node, as that requires taking the delayed node's
1187 	 * mutex. If another task starts running delayed items before we take
1188 	 * the mutex, it will first lock the mutex and then it may try to lock
1189 	 * the same btree path (leaf).
1190 	 */
1191 	btrfs_free_path(path);
1192 
1193 	if (curr_node)
1194 		btrfs_release_delayed_node(curr_node);
1195 	trans->block_rsv = block_rsv;
1196 
1197 	return ret;
1198 }
1199 
btrfs_run_delayed_items(struct btrfs_trans_handle * trans)1200 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1201 {
1202 	return __btrfs_run_delayed_items(trans, -1);
1203 }
1204 
btrfs_run_delayed_items_nr(struct btrfs_trans_handle * trans,int nr)1205 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1206 {
1207 	return __btrfs_run_delayed_items(trans, nr);
1208 }
1209 
btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)1210 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1211 				     struct btrfs_inode *inode)
1212 {
1213 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1214 	struct btrfs_path *path;
1215 	struct btrfs_block_rsv *block_rsv;
1216 	int ret;
1217 
1218 	if (!delayed_node)
1219 		return 0;
1220 
1221 	mutex_lock(&delayed_node->mutex);
1222 	if (!delayed_node->count) {
1223 		mutex_unlock(&delayed_node->mutex);
1224 		btrfs_release_delayed_node(delayed_node);
1225 		return 0;
1226 	}
1227 	mutex_unlock(&delayed_node->mutex);
1228 
1229 	path = btrfs_alloc_path();
1230 	if (!path) {
1231 		btrfs_release_delayed_node(delayed_node);
1232 		return -ENOMEM;
1233 	}
1234 
1235 	block_rsv = trans->block_rsv;
1236 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1237 
1238 	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1239 
1240 	btrfs_release_delayed_node(delayed_node);
1241 	btrfs_free_path(path);
1242 	trans->block_rsv = block_rsv;
1243 
1244 	return ret;
1245 }
1246 
btrfs_commit_inode_delayed_inode(struct btrfs_inode * inode)1247 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1248 {
1249 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1250 	struct btrfs_trans_handle *trans;
1251 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1252 	struct btrfs_path *path;
1253 	struct btrfs_block_rsv *block_rsv;
1254 	int ret;
1255 
1256 	if (!delayed_node)
1257 		return 0;
1258 
1259 	mutex_lock(&delayed_node->mutex);
1260 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1261 		mutex_unlock(&delayed_node->mutex);
1262 		btrfs_release_delayed_node(delayed_node);
1263 		return 0;
1264 	}
1265 	mutex_unlock(&delayed_node->mutex);
1266 
1267 	trans = btrfs_join_transaction(delayed_node->root);
1268 	if (IS_ERR(trans)) {
1269 		ret = PTR_ERR(trans);
1270 		goto out;
1271 	}
1272 
1273 	path = btrfs_alloc_path();
1274 	if (!path) {
1275 		ret = -ENOMEM;
1276 		goto trans_out;
1277 	}
1278 
1279 	block_rsv = trans->block_rsv;
1280 	trans->block_rsv = &fs_info->delayed_block_rsv;
1281 
1282 	mutex_lock(&delayed_node->mutex);
1283 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1284 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1285 						   path, delayed_node);
1286 	else
1287 		ret = 0;
1288 	mutex_unlock(&delayed_node->mutex);
1289 
1290 	btrfs_free_path(path);
1291 	trans->block_rsv = block_rsv;
1292 trans_out:
1293 	btrfs_end_transaction(trans);
1294 	btrfs_btree_balance_dirty(fs_info);
1295 out:
1296 	btrfs_release_delayed_node(delayed_node);
1297 
1298 	return ret;
1299 }
1300 
btrfs_remove_delayed_node(struct btrfs_inode * inode)1301 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1302 {
1303 	struct btrfs_delayed_node *delayed_node;
1304 
1305 	delayed_node = READ_ONCE(inode->delayed_node);
1306 	if (!delayed_node)
1307 		return;
1308 
1309 	inode->delayed_node = NULL;
1310 	btrfs_release_delayed_node(delayed_node);
1311 }
1312 
1313 struct btrfs_async_delayed_work {
1314 	struct btrfs_delayed_root *delayed_root;
1315 	int nr;
1316 	struct btrfs_work work;
1317 };
1318 
btrfs_async_run_delayed_root(struct btrfs_work * work)1319 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1320 {
1321 	struct btrfs_async_delayed_work *async_work;
1322 	struct btrfs_delayed_root *delayed_root;
1323 	struct btrfs_trans_handle *trans;
1324 	struct btrfs_path *path;
1325 	struct btrfs_delayed_node *delayed_node = NULL;
1326 	struct btrfs_root *root;
1327 	struct btrfs_block_rsv *block_rsv;
1328 	int total_done = 0;
1329 
1330 	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1331 	delayed_root = async_work->delayed_root;
1332 
1333 	path = btrfs_alloc_path();
1334 	if (!path)
1335 		goto out;
1336 
1337 	do {
1338 		if (atomic_read(&delayed_root->items) <
1339 		    BTRFS_DELAYED_BACKGROUND / 2)
1340 			break;
1341 
1342 		delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1343 		if (!delayed_node)
1344 			break;
1345 
1346 		root = delayed_node->root;
1347 
1348 		trans = btrfs_join_transaction(root);
1349 		if (IS_ERR(trans)) {
1350 			btrfs_release_path(path);
1351 			btrfs_release_prepared_delayed_node(delayed_node);
1352 			total_done++;
1353 			continue;
1354 		}
1355 
1356 		block_rsv = trans->block_rsv;
1357 		trans->block_rsv = &root->fs_info->delayed_block_rsv;
1358 
1359 		__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1360 
1361 		trans->block_rsv = block_rsv;
1362 		btrfs_end_transaction(trans);
1363 		btrfs_btree_balance_dirty_nodelay(root->fs_info);
1364 
1365 		btrfs_release_path(path);
1366 		btrfs_release_prepared_delayed_node(delayed_node);
1367 		total_done++;
1368 
1369 	} while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1370 		 || total_done < async_work->nr);
1371 
1372 	btrfs_free_path(path);
1373 out:
1374 	wake_up(&delayed_root->wait);
1375 	kfree(async_work);
1376 }
1377 
1378 
btrfs_wq_run_delayed_node(struct btrfs_delayed_root * delayed_root,struct btrfs_fs_info * fs_info,int nr)1379 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1380 				     struct btrfs_fs_info *fs_info, int nr)
1381 {
1382 	struct btrfs_async_delayed_work *async_work;
1383 
1384 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1385 	if (!async_work)
1386 		return -ENOMEM;
1387 
1388 	async_work->delayed_root = delayed_root;
1389 	btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
1390 	async_work->nr = nr;
1391 
1392 	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1393 	return 0;
1394 }
1395 
btrfs_assert_delayed_root_empty(struct btrfs_fs_info * fs_info)1396 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1397 {
1398 	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1399 }
1400 
could_end_wait(struct btrfs_delayed_root * delayed_root,int seq)1401 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1402 {
1403 	int val = atomic_read(&delayed_root->items_seq);
1404 
1405 	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1406 		return 1;
1407 
1408 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1409 		return 1;
1410 
1411 	return 0;
1412 }
1413 
btrfs_balance_delayed_items(struct btrfs_fs_info * fs_info)1414 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1415 {
1416 	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1417 
1418 	if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1419 		btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1420 		return;
1421 
1422 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1423 		int seq;
1424 		int ret;
1425 
1426 		seq = atomic_read(&delayed_root->items_seq);
1427 
1428 		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1429 		if (ret)
1430 			return;
1431 
1432 		wait_event_interruptible(delayed_root->wait,
1433 					 could_end_wait(delayed_root, seq));
1434 		return;
1435 	}
1436 
1437 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1438 }
1439 
btrfs_release_dir_index_item_space(struct btrfs_trans_handle * trans)1440 static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1441 {
1442 	struct btrfs_fs_info *fs_info = trans->fs_info;
1443 	const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1444 
1445 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1446 		return;
1447 
1448 	/*
1449 	 * Adding the new dir index item does not require touching another
1450 	 * leaf, so we can release 1 unit of metadata that was previously
1451 	 * reserved when starting the transaction. This applies only to
1452 	 * the case where we had a transaction start and excludes the
1453 	 * transaction join case (when replaying log trees).
1454 	 */
1455 	trace_btrfs_space_reservation(fs_info, "transaction",
1456 				      trans->transid, bytes, 0);
1457 	btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1458 	ASSERT(trans->bytes_reserved >= bytes);
1459 	trans->bytes_reserved -= bytes;
1460 }
1461 
1462 /* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
btrfs_insert_delayed_dir_index(struct btrfs_trans_handle * trans,const char * name,int name_len,struct btrfs_inode * dir,const struct btrfs_disk_key * disk_key,u8 flags,u64 index)1463 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1464 				   const char *name, int name_len,
1465 				   struct btrfs_inode *dir,
1466 				   const struct btrfs_disk_key *disk_key, u8 flags,
1467 				   u64 index)
1468 {
1469 	struct btrfs_fs_info *fs_info = trans->fs_info;
1470 	const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1471 	struct btrfs_delayed_node *delayed_node;
1472 	struct btrfs_delayed_item *delayed_item;
1473 	struct btrfs_dir_item *dir_item;
1474 	bool reserve_leaf_space;
1475 	u32 data_len;
1476 	int ret;
1477 
1478 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1479 	if (IS_ERR(delayed_node))
1480 		return PTR_ERR(delayed_node);
1481 
1482 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1483 						delayed_node,
1484 						BTRFS_DELAYED_INSERTION_ITEM);
1485 	if (!delayed_item) {
1486 		ret = -ENOMEM;
1487 		goto release_node;
1488 	}
1489 
1490 	delayed_item->index = index;
1491 
1492 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1493 	dir_item->location = *disk_key;
1494 	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1495 	btrfs_set_stack_dir_data_len(dir_item, 0);
1496 	btrfs_set_stack_dir_name_len(dir_item, name_len);
1497 	btrfs_set_stack_dir_flags(dir_item, flags);
1498 	memcpy((char *)(dir_item + 1), name, name_len);
1499 
1500 	data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1501 
1502 	mutex_lock(&delayed_node->mutex);
1503 
1504 	/*
1505 	 * First attempt to insert the delayed item. This is to make the error
1506 	 * handling path simpler in case we fail (-EEXIST). There's no risk of
1507 	 * any other task coming in and running the delayed item before we do
1508 	 * the metadata space reservation below, because we are holding the
1509 	 * delayed node's mutex and that mutex must also be locked before the
1510 	 * node's delayed items can be run.
1511 	 */
1512 	ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1513 	if (unlikely(ret)) {
1514 		btrfs_err(trans->fs_info,
1515 "error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1516 			  name_len, name, index, btrfs_root_id(delayed_node->root),
1517 			  delayed_node->inode_id, dir->index_cnt,
1518 			  delayed_node->index_cnt, ret);
1519 		btrfs_release_delayed_item(delayed_item);
1520 		btrfs_release_dir_index_item_space(trans);
1521 		mutex_unlock(&delayed_node->mutex);
1522 		goto release_node;
1523 	}
1524 
1525 	if (delayed_node->index_item_leaves == 0 ||
1526 	    delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1527 		delayed_node->curr_index_batch_size = data_len;
1528 		reserve_leaf_space = true;
1529 	} else {
1530 		delayed_node->curr_index_batch_size += data_len;
1531 		reserve_leaf_space = false;
1532 	}
1533 
1534 	if (reserve_leaf_space) {
1535 		ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1536 		/*
1537 		 * Space was reserved for a dir index item insertion when we
1538 		 * started the transaction, so getting a failure here should be
1539 		 * impossible.
1540 		 */
1541 		if (WARN_ON(ret)) {
1542 			btrfs_release_delayed_item(delayed_item);
1543 			mutex_unlock(&delayed_node->mutex);
1544 			goto release_node;
1545 		}
1546 
1547 		delayed_node->index_item_leaves++;
1548 	} else {
1549 		btrfs_release_dir_index_item_space(trans);
1550 	}
1551 	mutex_unlock(&delayed_node->mutex);
1552 
1553 release_node:
1554 	btrfs_release_delayed_node(delayed_node);
1555 	return ret;
1556 }
1557 
btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node * node,u64 index)1558 static int btrfs_delete_delayed_insertion_item(struct btrfs_delayed_node *node,
1559 					       u64 index)
1560 {
1561 	struct btrfs_delayed_item *item;
1562 
1563 	mutex_lock(&node->mutex);
1564 	item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1565 	if (!item) {
1566 		mutex_unlock(&node->mutex);
1567 		return 1;
1568 	}
1569 
1570 	/*
1571 	 * For delayed items to insert, we track reserved metadata bytes based
1572 	 * on the number of leaves that we will use.
1573 	 * See btrfs_insert_delayed_dir_index() and
1574 	 * btrfs_delayed_item_reserve_metadata()).
1575 	 */
1576 	ASSERT(item->bytes_reserved == 0);
1577 	ASSERT(node->index_item_leaves > 0);
1578 
1579 	/*
1580 	 * If there's only one leaf reserved, we can decrement this item from the
1581 	 * current batch, otherwise we can not because we don't know which leaf
1582 	 * it belongs to. With the current limit on delayed items, we rarely
1583 	 * accumulate enough dir index items to fill more than one leaf (even
1584 	 * when using a leaf size of 4K).
1585 	 */
1586 	if (node->index_item_leaves == 1) {
1587 		const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1588 
1589 		ASSERT(node->curr_index_batch_size >= data_len);
1590 		node->curr_index_batch_size -= data_len;
1591 	}
1592 
1593 	btrfs_release_delayed_item(item);
1594 
1595 	/* If we now have no more dir index items, we can release all leaves. */
1596 	if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1597 		btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1598 		node->index_item_leaves = 0;
1599 	}
1600 
1601 	mutex_unlock(&node->mutex);
1602 	return 0;
1603 }
1604 
btrfs_delete_delayed_dir_index(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,u64 index)1605 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1606 				   struct btrfs_inode *dir, u64 index)
1607 {
1608 	struct btrfs_delayed_node *node;
1609 	struct btrfs_delayed_item *item;
1610 	int ret;
1611 
1612 	node = btrfs_get_or_create_delayed_node(dir);
1613 	if (IS_ERR(node))
1614 		return PTR_ERR(node);
1615 
1616 	ret = btrfs_delete_delayed_insertion_item(node, index);
1617 	if (!ret)
1618 		goto end;
1619 
1620 	item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1621 	if (!item) {
1622 		ret = -ENOMEM;
1623 		goto end;
1624 	}
1625 
1626 	item->index = index;
1627 
1628 	ret = btrfs_delayed_item_reserve_metadata(trans, item);
1629 	/*
1630 	 * we have reserved enough space when we start a new transaction,
1631 	 * so reserving metadata failure is impossible.
1632 	 */
1633 	if (ret < 0) {
1634 		btrfs_err(trans->fs_info,
1635 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1636 		btrfs_release_delayed_item(item);
1637 		goto end;
1638 	}
1639 
1640 	mutex_lock(&node->mutex);
1641 	ret = __btrfs_add_delayed_item(node, item);
1642 	if (unlikely(ret)) {
1643 		btrfs_err(trans->fs_info,
1644 			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1645 			  index, btrfs_root_id(node->root),
1646 			  node->inode_id, ret);
1647 		btrfs_delayed_item_release_metadata(dir->root, item);
1648 		btrfs_release_delayed_item(item);
1649 	}
1650 	mutex_unlock(&node->mutex);
1651 end:
1652 	btrfs_release_delayed_node(node);
1653 	return ret;
1654 }
1655 
btrfs_inode_delayed_dir_index_count(struct btrfs_inode * inode)1656 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1657 {
1658 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1659 
1660 	if (!delayed_node)
1661 		return -ENOENT;
1662 
1663 	/*
1664 	 * Since we have held i_mutex of this directory, it is impossible that
1665 	 * a new directory index is added into the delayed node and index_cnt
1666 	 * is updated now. So we needn't lock the delayed node.
1667 	 */
1668 	if (!delayed_node->index_cnt) {
1669 		btrfs_release_delayed_node(delayed_node);
1670 		return -EINVAL;
1671 	}
1672 
1673 	inode->index_cnt = delayed_node->index_cnt;
1674 	btrfs_release_delayed_node(delayed_node);
1675 	return 0;
1676 }
1677 
btrfs_readdir_get_delayed_items(struct btrfs_inode * inode,u64 last_index,struct list_head * ins_list,struct list_head * del_list)1678 bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode,
1679 				     u64 last_index,
1680 				     struct list_head *ins_list,
1681 				     struct list_head *del_list)
1682 {
1683 	struct btrfs_delayed_node *delayed_node;
1684 	struct btrfs_delayed_item *item;
1685 
1686 	delayed_node = btrfs_get_delayed_node(inode);
1687 	if (!delayed_node)
1688 		return false;
1689 
1690 	/*
1691 	 * We can only do one readdir with delayed items at a time because of
1692 	 * item->readdir_list.
1693 	 */
1694 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1695 	btrfs_inode_lock(inode, 0);
1696 
1697 	mutex_lock(&delayed_node->mutex);
1698 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1699 	while (item && item->index <= last_index) {
1700 		refcount_inc(&item->refs);
1701 		list_add_tail(&item->readdir_list, ins_list);
1702 		item = __btrfs_next_delayed_item(item);
1703 	}
1704 
1705 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1706 	while (item && item->index <= last_index) {
1707 		refcount_inc(&item->refs);
1708 		list_add_tail(&item->readdir_list, del_list);
1709 		item = __btrfs_next_delayed_item(item);
1710 	}
1711 	mutex_unlock(&delayed_node->mutex);
1712 	/*
1713 	 * This delayed node is still cached in the btrfs inode, so refs
1714 	 * must be > 1 now, and we needn't check it is going to be freed
1715 	 * or not.
1716 	 *
1717 	 * Besides that, this function is used to read dir, we do not
1718 	 * insert/delete delayed items in this period. So we also needn't
1719 	 * requeue or dequeue this delayed node.
1720 	 */
1721 	refcount_dec(&delayed_node->refs);
1722 
1723 	return true;
1724 }
1725 
btrfs_readdir_put_delayed_items(struct btrfs_inode * inode,struct list_head * ins_list,struct list_head * del_list)1726 void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode,
1727 				     struct list_head *ins_list,
1728 				     struct list_head *del_list)
1729 {
1730 	struct btrfs_delayed_item *curr, *next;
1731 
1732 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1733 		list_del(&curr->readdir_list);
1734 		if (refcount_dec_and_test(&curr->refs))
1735 			kfree(curr);
1736 	}
1737 
1738 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1739 		list_del(&curr->readdir_list);
1740 		if (refcount_dec_and_test(&curr->refs))
1741 			kfree(curr);
1742 	}
1743 
1744 	/*
1745 	 * The VFS is going to do up_read(), so we need to downgrade back to a
1746 	 * read lock.
1747 	 */
1748 	downgrade_write(&inode->vfs_inode.i_rwsem);
1749 }
1750 
btrfs_should_delete_dir_index(const struct list_head * del_list,u64 index)1751 int btrfs_should_delete_dir_index(const struct list_head *del_list,
1752 				  u64 index)
1753 {
1754 	struct btrfs_delayed_item *curr;
1755 	int ret = 0;
1756 
1757 	list_for_each_entry(curr, del_list, readdir_list) {
1758 		if (curr->index > index)
1759 			break;
1760 		if (curr->index == index) {
1761 			ret = 1;
1762 			break;
1763 		}
1764 	}
1765 	return ret;
1766 }
1767 
1768 /*
1769  * Read dir info stored in the delayed tree.
1770  */
btrfs_readdir_delayed_dir_index(struct dir_context * ctx,const struct list_head * ins_list)1771 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1772 				    const struct list_head *ins_list)
1773 {
1774 	struct btrfs_dir_item *di;
1775 	struct btrfs_delayed_item *curr, *next;
1776 	struct btrfs_key location;
1777 	char *name;
1778 	int name_len;
1779 	int over = 0;
1780 	unsigned char d_type;
1781 
1782 	/*
1783 	 * Changing the data of the delayed item is impossible. So
1784 	 * we needn't lock them. And we have held i_mutex of the
1785 	 * directory, nobody can delete any directory indexes now.
1786 	 */
1787 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1788 		list_del(&curr->readdir_list);
1789 
1790 		if (curr->index < ctx->pos) {
1791 			if (refcount_dec_and_test(&curr->refs))
1792 				kfree(curr);
1793 			continue;
1794 		}
1795 
1796 		ctx->pos = curr->index;
1797 
1798 		di = (struct btrfs_dir_item *)curr->data;
1799 		name = (char *)(di + 1);
1800 		name_len = btrfs_stack_dir_name_len(di);
1801 
1802 		d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
1803 		btrfs_disk_key_to_cpu(&location, &di->location);
1804 
1805 		over = !dir_emit(ctx, name, name_len,
1806 			       location.objectid, d_type);
1807 
1808 		if (refcount_dec_and_test(&curr->refs))
1809 			kfree(curr);
1810 
1811 		if (over)
1812 			return 1;
1813 		ctx->pos++;
1814 	}
1815 	return 0;
1816 }
1817 
fill_stack_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode_item * inode_item,struct inode * inode)1818 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1819 				  struct btrfs_inode_item *inode_item,
1820 				  struct inode *inode)
1821 {
1822 	u64 flags;
1823 
1824 	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1825 	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1826 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1827 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1828 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1829 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1830 	btrfs_set_stack_inode_generation(inode_item,
1831 					 BTRFS_I(inode)->generation);
1832 	btrfs_set_stack_inode_sequence(inode_item,
1833 				       inode_peek_iversion(inode));
1834 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1835 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1836 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1837 					  BTRFS_I(inode)->ro_flags);
1838 	btrfs_set_stack_inode_flags(inode_item, flags);
1839 	btrfs_set_stack_inode_block_group(inode_item, 0);
1840 
1841 	btrfs_set_stack_timespec_sec(&inode_item->atime,
1842 				     inode_get_atime_sec(inode));
1843 	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1844 				      inode_get_atime_nsec(inode));
1845 
1846 	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1847 				     inode_get_mtime_sec(inode));
1848 	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1849 				      inode_get_mtime_nsec(inode));
1850 
1851 	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1852 				     inode_get_ctime_sec(inode));
1853 	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1854 				      inode_get_ctime_nsec(inode));
1855 
1856 	btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
1857 	btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
1858 }
1859 
btrfs_fill_inode(struct inode * inode,u32 * rdev)1860 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1861 {
1862 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1863 	struct btrfs_delayed_node *delayed_node;
1864 	struct btrfs_inode_item *inode_item;
1865 
1866 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1867 	if (!delayed_node)
1868 		return -ENOENT;
1869 
1870 	mutex_lock(&delayed_node->mutex);
1871 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1872 		mutex_unlock(&delayed_node->mutex);
1873 		btrfs_release_delayed_node(delayed_node);
1874 		return -ENOENT;
1875 	}
1876 
1877 	inode_item = &delayed_node->inode_item;
1878 
1879 	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1880 	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1881 	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1882 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1883 			round_up(i_size_read(inode), fs_info->sectorsize));
1884 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1885 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1886 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1887 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1888         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1889 
1890 	inode_set_iversion_queried(inode,
1891 				   btrfs_stack_inode_sequence(inode_item));
1892 	inode->i_rdev = 0;
1893 	*rdev = btrfs_stack_inode_rdev(inode_item);
1894 	btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1895 				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1896 
1897 	inode_set_atime(inode, btrfs_stack_timespec_sec(&inode_item->atime),
1898 			btrfs_stack_timespec_nsec(&inode_item->atime));
1899 
1900 	inode_set_mtime(inode, btrfs_stack_timespec_sec(&inode_item->mtime),
1901 			btrfs_stack_timespec_nsec(&inode_item->mtime));
1902 
1903 	inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
1904 			btrfs_stack_timespec_nsec(&inode_item->ctime));
1905 
1906 	BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
1907 	BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
1908 
1909 	inode->i_generation = BTRFS_I(inode)->generation;
1910 	if (S_ISDIR(inode->i_mode))
1911 		BTRFS_I(inode)->index_cnt = (u64)-1;
1912 
1913 	mutex_unlock(&delayed_node->mutex);
1914 	btrfs_release_delayed_node(delayed_node);
1915 	return 0;
1916 }
1917 
btrfs_delayed_update_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)1918 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1919 			       struct btrfs_inode *inode)
1920 {
1921 	struct btrfs_root *root = inode->root;
1922 	struct btrfs_delayed_node *delayed_node;
1923 	int ret = 0;
1924 
1925 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1926 	if (IS_ERR(delayed_node))
1927 		return PTR_ERR(delayed_node);
1928 
1929 	mutex_lock(&delayed_node->mutex);
1930 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1931 		fill_stack_inode_item(trans, &delayed_node->inode_item,
1932 				      &inode->vfs_inode);
1933 		goto release_node;
1934 	}
1935 
1936 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1937 	if (ret)
1938 		goto release_node;
1939 
1940 	fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1941 	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1942 	delayed_node->count++;
1943 	atomic_inc(&root->fs_info->delayed_root->items);
1944 release_node:
1945 	mutex_unlock(&delayed_node->mutex);
1946 	btrfs_release_delayed_node(delayed_node);
1947 	return ret;
1948 }
1949 
btrfs_delayed_delete_inode_ref(struct btrfs_inode * inode)1950 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1951 {
1952 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1953 	struct btrfs_delayed_node *delayed_node;
1954 
1955 	/*
1956 	 * we don't do delayed inode updates during log recovery because it
1957 	 * leads to enospc problems.  This means we also can't do
1958 	 * delayed inode refs
1959 	 */
1960 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1961 		return -EAGAIN;
1962 
1963 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1964 	if (IS_ERR(delayed_node))
1965 		return PTR_ERR(delayed_node);
1966 
1967 	/*
1968 	 * We don't reserve space for inode ref deletion is because:
1969 	 * - We ONLY do async inode ref deletion for the inode who has only
1970 	 *   one link(i_nlink == 1), it means there is only one inode ref.
1971 	 *   And in most case, the inode ref and the inode item are in the
1972 	 *   same leaf, and we will deal with them at the same time.
1973 	 *   Since we are sure we will reserve the space for the inode item,
1974 	 *   it is unnecessary to reserve space for inode ref deletion.
1975 	 * - If the inode ref and the inode item are not in the same leaf,
1976 	 *   We also needn't worry about enospc problem, because we reserve
1977 	 *   much more space for the inode update than it needs.
1978 	 * - At the worst, we can steal some space from the global reservation.
1979 	 *   It is very rare.
1980 	 */
1981 	mutex_lock(&delayed_node->mutex);
1982 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1983 		goto release_node;
1984 
1985 	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1986 	delayed_node->count++;
1987 	atomic_inc(&fs_info->delayed_root->items);
1988 release_node:
1989 	mutex_unlock(&delayed_node->mutex);
1990 	btrfs_release_delayed_node(delayed_node);
1991 	return 0;
1992 }
1993 
__btrfs_kill_delayed_node(struct btrfs_delayed_node * delayed_node)1994 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1995 {
1996 	struct btrfs_root *root = delayed_node->root;
1997 	struct btrfs_fs_info *fs_info = root->fs_info;
1998 	struct btrfs_delayed_item *curr_item, *prev_item;
1999 
2000 	mutex_lock(&delayed_node->mutex);
2001 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
2002 	while (curr_item) {
2003 		prev_item = curr_item;
2004 		curr_item = __btrfs_next_delayed_item(prev_item);
2005 		btrfs_release_delayed_item(prev_item);
2006 	}
2007 
2008 	if (delayed_node->index_item_leaves > 0) {
2009 		btrfs_delayed_item_release_leaves(delayed_node,
2010 					  delayed_node->index_item_leaves);
2011 		delayed_node->index_item_leaves = 0;
2012 	}
2013 
2014 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2015 	while (curr_item) {
2016 		btrfs_delayed_item_release_metadata(root, curr_item);
2017 		prev_item = curr_item;
2018 		curr_item = __btrfs_next_delayed_item(prev_item);
2019 		btrfs_release_delayed_item(prev_item);
2020 	}
2021 
2022 	btrfs_release_delayed_iref(delayed_node);
2023 
2024 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2025 		btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2026 		btrfs_release_delayed_inode(delayed_node);
2027 	}
2028 	mutex_unlock(&delayed_node->mutex);
2029 }
2030 
btrfs_kill_delayed_inode_items(struct btrfs_inode * inode)2031 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2032 {
2033 	struct btrfs_delayed_node *delayed_node;
2034 
2035 	delayed_node = btrfs_get_delayed_node(inode);
2036 	if (!delayed_node)
2037 		return;
2038 
2039 	__btrfs_kill_delayed_node(delayed_node);
2040 	btrfs_release_delayed_node(delayed_node);
2041 }
2042 
btrfs_kill_all_delayed_nodes(struct btrfs_root * root)2043 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2044 {
2045 	unsigned long index = 0;
2046 	struct btrfs_delayed_node *delayed_nodes[8];
2047 
2048 	while (1) {
2049 		struct btrfs_delayed_node *node;
2050 		int count;
2051 
2052 		xa_lock(&root->delayed_nodes);
2053 		if (xa_empty(&root->delayed_nodes)) {
2054 			xa_unlock(&root->delayed_nodes);
2055 			return;
2056 		}
2057 
2058 		count = 0;
2059 		xa_for_each_start(&root->delayed_nodes, index, node, index) {
2060 			/*
2061 			 * Don't increase refs in case the node is dead and
2062 			 * about to be removed from the tree in the loop below
2063 			 */
2064 			if (refcount_inc_not_zero(&node->refs)) {
2065 				delayed_nodes[count] = node;
2066 				count++;
2067 			}
2068 			if (count >= ARRAY_SIZE(delayed_nodes))
2069 				break;
2070 		}
2071 		xa_unlock(&root->delayed_nodes);
2072 		index++;
2073 
2074 		for (int i = 0; i < count; i++) {
2075 			__btrfs_kill_delayed_node(delayed_nodes[i]);
2076 			btrfs_release_delayed_node(delayed_nodes[i]);
2077 		}
2078 	}
2079 }
2080 
btrfs_destroy_delayed_inodes(struct btrfs_fs_info * fs_info)2081 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2082 {
2083 	struct btrfs_delayed_node *curr_node, *prev_node;
2084 
2085 	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2086 	while (curr_node) {
2087 		__btrfs_kill_delayed_node(curr_node);
2088 
2089 		prev_node = curr_node;
2090 		curr_node = btrfs_next_delayed_node(curr_node);
2091 		btrfs_release_delayed_node(prev_node);
2092 	}
2093 }
2094 
btrfs_log_get_delayed_items(struct btrfs_inode * inode,struct list_head * ins_list,struct list_head * del_list)2095 void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2096 				 struct list_head *ins_list,
2097 				 struct list_head *del_list)
2098 {
2099 	struct btrfs_delayed_node *node;
2100 	struct btrfs_delayed_item *item;
2101 
2102 	node = btrfs_get_delayed_node(inode);
2103 	if (!node)
2104 		return;
2105 
2106 	mutex_lock(&node->mutex);
2107 	item = __btrfs_first_delayed_insertion_item(node);
2108 	while (item) {
2109 		/*
2110 		 * It's possible that the item is already in a log list. This
2111 		 * can happen in case two tasks are trying to log the same
2112 		 * directory. For example if we have tasks A and task B:
2113 		 *
2114 		 * Task A collected the delayed items into a log list while
2115 		 * under the inode's log_mutex (at btrfs_log_inode()), but it
2116 		 * only releases the items after logging the inodes they point
2117 		 * to (if they are new inodes), which happens after unlocking
2118 		 * the log mutex;
2119 		 *
2120 		 * Task B enters btrfs_log_inode() and acquires the log_mutex
2121 		 * of the same directory inode, before task B releases the
2122 		 * delayed items. This can happen for example when logging some
2123 		 * inode we need to trigger logging of its parent directory, so
2124 		 * logging two files that have the same parent directory can
2125 		 * lead to this.
2126 		 *
2127 		 * If this happens, just ignore delayed items already in a log
2128 		 * list. All the tasks logging the directory are under a log
2129 		 * transaction and whichever finishes first can not sync the log
2130 		 * before the other completes and leaves the log transaction.
2131 		 */
2132 		if (!item->logged && list_empty(&item->log_list)) {
2133 			refcount_inc(&item->refs);
2134 			list_add_tail(&item->log_list, ins_list);
2135 		}
2136 		item = __btrfs_next_delayed_item(item);
2137 	}
2138 
2139 	item = __btrfs_first_delayed_deletion_item(node);
2140 	while (item) {
2141 		/* It may be non-empty, for the same reason mentioned above. */
2142 		if (!item->logged && list_empty(&item->log_list)) {
2143 			refcount_inc(&item->refs);
2144 			list_add_tail(&item->log_list, del_list);
2145 		}
2146 		item = __btrfs_next_delayed_item(item);
2147 	}
2148 	mutex_unlock(&node->mutex);
2149 
2150 	/*
2151 	 * We are called during inode logging, which means the inode is in use
2152 	 * and can not be evicted before we finish logging the inode. So we never
2153 	 * have the last reference on the delayed inode.
2154 	 * Also, we don't use btrfs_release_delayed_node() because that would
2155 	 * requeue the delayed inode (change its order in the list of prepared
2156 	 * nodes) and we don't want to do such change because we don't create or
2157 	 * delete delayed items.
2158 	 */
2159 	ASSERT(refcount_read(&node->refs) > 1);
2160 	refcount_dec(&node->refs);
2161 }
2162 
btrfs_log_put_delayed_items(struct btrfs_inode * inode,struct list_head * ins_list,struct list_head * del_list)2163 void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2164 				 struct list_head *ins_list,
2165 				 struct list_head *del_list)
2166 {
2167 	struct btrfs_delayed_node *node;
2168 	struct btrfs_delayed_item *item;
2169 	struct btrfs_delayed_item *next;
2170 
2171 	node = btrfs_get_delayed_node(inode);
2172 	if (!node)
2173 		return;
2174 
2175 	mutex_lock(&node->mutex);
2176 
2177 	list_for_each_entry_safe(item, next, ins_list, log_list) {
2178 		item->logged = true;
2179 		list_del_init(&item->log_list);
2180 		if (refcount_dec_and_test(&item->refs))
2181 			kfree(item);
2182 	}
2183 
2184 	list_for_each_entry_safe(item, next, del_list, log_list) {
2185 		item->logged = true;
2186 		list_del_init(&item->log_list);
2187 		if (refcount_dec_and_test(&item->refs))
2188 			kfree(item);
2189 	}
2190 
2191 	mutex_unlock(&node->mutex);
2192 
2193 	/*
2194 	 * We are called during inode logging, which means the inode is in use
2195 	 * and can not be evicted before we finish logging the inode. So we never
2196 	 * have the last reference on the delayed inode.
2197 	 * Also, we don't use btrfs_release_delayed_node() because that would
2198 	 * requeue the delayed inode (change its order in the list of prepared
2199 	 * nodes) and we don't want to do such change because we don't create or
2200 	 * delete delayed items.
2201 	 */
2202 	ASSERT(refcount_read(&node->refs) > 1);
2203 	refcount_dec(&node->refs);
2204 }
2205