xref: /linux/fs/btrfs/delayed-inode.c (revision 95db3b255fde4e830e5f8cc011eb404023f669d4)
1 /*
2  * Copyright (C) 2011 Fujitsu.  All rights reserved.
3  * Written by Miao Xie <miaox@cn.fujitsu.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "ctree.h"
25 
26 #define BTRFS_DELAYED_WRITEBACK		512
27 #define BTRFS_DELAYED_BACKGROUND	128
28 #define BTRFS_DELAYED_BATCH		16
29 
30 static struct kmem_cache *delayed_node_cache;
31 
32 int __init btrfs_delayed_inode_init(void)
33 {
34 	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 					sizeof(struct btrfs_delayed_node),
36 					0,
37 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
38 					NULL);
39 	if (!delayed_node_cache)
40 		return -ENOMEM;
41 	return 0;
42 }
43 
44 void btrfs_delayed_inode_exit(void)
45 {
46 	kmem_cache_destroy(delayed_node_cache);
47 }
48 
49 static inline void btrfs_init_delayed_node(
50 				struct btrfs_delayed_node *delayed_node,
51 				struct btrfs_root *root, u64 inode_id)
52 {
53 	delayed_node->root = root;
54 	delayed_node->inode_id = inode_id;
55 	atomic_set(&delayed_node->refs, 0);
56 	delayed_node->ins_root = RB_ROOT;
57 	delayed_node->del_root = RB_ROOT;
58 	mutex_init(&delayed_node->mutex);
59 	INIT_LIST_HEAD(&delayed_node->n_list);
60 	INIT_LIST_HEAD(&delayed_node->p_list);
61 }
62 
63 static inline int btrfs_is_continuous_delayed_item(
64 					struct btrfs_delayed_item *item1,
65 					struct btrfs_delayed_item *item2)
66 {
67 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
68 	    item1->key.objectid == item2->key.objectid &&
69 	    item1->key.type == item2->key.type &&
70 	    item1->key.offset + 1 == item2->key.offset)
71 		return 1;
72 	return 0;
73 }
74 
75 static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
76 							struct btrfs_root *root)
77 {
78 	return root->fs_info->delayed_root;
79 }
80 
81 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
82 {
83 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
84 	struct btrfs_root *root = btrfs_inode->root;
85 	u64 ino = btrfs_ino(inode);
86 	struct btrfs_delayed_node *node;
87 
88 	node = ACCESS_ONCE(btrfs_inode->delayed_node);
89 	if (node) {
90 		atomic_inc(&node->refs);
91 		return node;
92 	}
93 
94 	spin_lock(&root->inode_lock);
95 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
96 	if (node) {
97 		if (btrfs_inode->delayed_node) {
98 			atomic_inc(&node->refs);	/* can be accessed */
99 			BUG_ON(btrfs_inode->delayed_node != node);
100 			spin_unlock(&root->inode_lock);
101 			return node;
102 		}
103 		btrfs_inode->delayed_node = node;
104 		/* can be accessed and cached in the inode */
105 		atomic_add(2, &node->refs);
106 		spin_unlock(&root->inode_lock);
107 		return node;
108 	}
109 	spin_unlock(&root->inode_lock);
110 
111 	return NULL;
112 }
113 
114 /* Will return either the node or PTR_ERR(-ENOMEM) */
115 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
116 							struct inode *inode)
117 {
118 	struct btrfs_delayed_node *node;
119 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
120 	struct btrfs_root *root = btrfs_inode->root;
121 	u64 ino = btrfs_ino(inode);
122 	int ret;
123 
124 again:
125 	node = btrfs_get_delayed_node(inode);
126 	if (node)
127 		return node;
128 
129 	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
130 	if (!node)
131 		return ERR_PTR(-ENOMEM);
132 	btrfs_init_delayed_node(node, root, ino);
133 
134 	/* cached in the btrfs inode and can be accessed */
135 	atomic_add(2, &node->refs);
136 
137 	ret = radix_tree_preload(GFP_NOFS);
138 	if (ret) {
139 		kmem_cache_free(delayed_node_cache, node);
140 		return ERR_PTR(ret);
141 	}
142 
143 	spin_lock(&root->inode_lock);
144 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
145 	if (ret == -EEXIST) {
146 		spin_unlock(&root->inode_lock);
147 		kmem_cache_free(delayed_node_cache, node);
148 		radix_tree_preload_end();
149 		goto again;
150 	}
151 	btrfs_inode->delayed_node = node;
152 	spin_unlock(&root->inode_lock);
153 	radix_tree_preload_end();
154 
155 	return node;
156 }
157 
158 /*
159  * Call it when holding delayed_node->mutex
160  *
161  * If mod = 1, add this node into the prepared list.
162  */
163 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
164 				     struct btrfs_delayed_node *node,
165 				     int mod)
166 {
167 	spin_lock(&root->lock);
168 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
169 		if (!list_empty(&node->p_list))
170 			list_move_tail(&node->p_list, &root->prepare_list);
171 		else if (mod)
172 			list_add_tail(&node->p_list, &root->prepare_list);
173 	} else {
174 		list_add_tail(&node->n_list, &root->node_list);
175 		list_add_tail(&node->p_list, &root->prepare_list);
176 		atomic_inc(&node->refs);	/* inserted into list */
177 		root->nodes++;
178 		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
179 	}
180 	spin_unlock(&root->lock);
181 }
182 
183 /* Call it when holding delayed_node->mutex */
184 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
185 				       struct btrfs_delayed_node *node)
186 {
187 	spin_lock(&root->lock);
188 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
189 		root->nodes--;
190 		atomic_dec(&node->refs);	/* not in the list */
191 		list_del_init(&node->n_list);
192 		if (!list_empty(&node->p_list))
193 			list_del_init(&node->p_list);
194 		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
195 	}
196 	spin_unlock(&root->lock);
197 }
198 
199 static struct btrfs_delayed_node *btrfs_first_delayed_node(
200 			struct btrfs_delayed_root *delayed_root)
201 {
202 	struct list_head *p;
203 	struct btrfs_delayed_node *node = NULL;
204 
205 	spin_lock(&delayed_root->lock);
206 	if (list_empty(&delayed_root->node_list))
207 		goto out;
208 
209 	p = delayed_root->node_list.next;
210 	node = list_entry(p, struct btrfs_delayed_node, n_list);
211 	atomic_inc(&node->refs);
212 out:
213 	spin_unlock(&delayed_root->lock);
214 
215 	return node;
216 }
217 
218 static struct btrfs_delayed_node *btrfs_next_delayed_node(
219 						struct btrfs_delayed_node *node)
220 {
221 	struct btrfs_delayed_root *delayed_root;
222 	struct list_head *p;
223 	struct btrfs_delayed_node *next = NULL;
224 
225 	delayed_root = node->root->fs_info->delayed_root;
226 	spin_lock(&delayed_root->lock);
227 	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
228 		/* not in the list */
229 		if (list_empty(&delayed_root->node_list))
230 			goto out;
231 		p = delayed_root->node_list.next;
232 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
233 		goto out;
234 	else
235 		p = node->n_list.next;
236 
237 	next = list_entry(p, struct btrfs_delayed_node, n_list);
238 	atomic_inc(&next->refs);
239 out:
240 	spin_unlock(&delayed_root->lock);
241 
242 	return next;
243 }
244 
245 static void __btrfs_release_delayed_node(
246 				struct btrfs_delayed_node *delayed_node,
247 				int mod)
248 {
249 	struct btrfs_delayed_root *delayed_root;
250 
251 	if (!delayed_node)
252 		return;
253 
254 	delayed_root = delayed_node->root->fs_info->delayed_root;
255 
256 	mutex_lock(&delayed_node->mutex);
257 	if (delayed_node->count)
258 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
259 	else
260 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
261 	mutex_unlock(&delayed_node->mutex);
262 
263 	if (atomic_dec_and_test(&delayed_node->refs)) {
264 		bool free = false;
265 		struct btrfs_root *root = delayed_node->root;
266 		spin_lock(&root->inode_lock);
267 		if (atomic_read(&delayed_node->refs) == 0) {
268 			radix_tree_delete(&root->delayed_nodes_tree,
269 					  delayed_node->inode_id);
270 			free = true;
271 		}
272 		spin_unlock(&root->inode_lock);
273 		if (free)
274 			kmem_cache_free(delayed_node_cache, delayed_node);
275 	}
276 }
277 
278 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
279 {
280 	__btrfs_release_delayed_node(node, 0);
281 }
282 
283 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
284 					struct btrfs_delayed_root *delayed_root)
285 {
286 	struct list_head *p;
287 	struct btrfs_delayed_node *node = NULL;
288 
289 	spin_lock(&delayed_root->lock);
290 	if (list_empty(&delayed_root->prepare_list))
291 		goto out;
292 
293 	p = delayed_root->prepare_list.next;
294 	list_del_init(p);
295 	node = list_entry(p, struct btrfs_delayed_node, p_list);
296 	atomic_inc(&node->refs);
297 out:
298 	spin_unlock(&delayed_root->lock);
299 
300 	return node;
301 }
302 
303 static inline void btrfs_release_prepared_delayed_node(
304 					struct btrfs_delayed_node *node)
305 {
306 	__btrfs_release_delayed_node(node, 1);
307 }
308 
309 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
310 {
311 	struct btrfs_delayed_item *item;
312 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
313 	if (item) {
314 		item->data_len = data_len;
315 		item->ins_or_del = 0;
316 		item->bytes_reserved = 0;
317 		item->delayed_node = NULL;
318 		atomic_set(&item->refs, 1);
319 	}
320 	return item;
321 }
322 
323 /*
324  * __btrfs_lookup_delayed_item - look up the delayed item by key
325  * @delayed_node: pointer to the delayed node
326  * @key:	  the key to look up
327  * @prev:	  used to store the prev item if the right item isn't found
328  * @next:	  used to store the next item if the right item isn't found
329  *
330  * Note: if we don't find the right item, we will return the prev item and
331  * the next item.
332  */
333 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
334 				struct rb_root *root,
335 				struct btrfs_key *key,
336 				struct btrfs_delayed_item **prev,
337 				struct btrfs_delayed_item **next)
338 {
339 	struct rb_node *node, *prev_node = NULL;
340 	struct btrfs_delayed_item *delayed_item = NULL;
341 	int ret = 0;
342 
343 	node = root->rb_node;
344 
345 	while (node) {
346 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
347 					rb_node);
348 		prev_node = node;
349 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
350 		if (ret < 0)
351 			node = node->rb_right;
352 		else if (ret > 0)
353 			node = node->rb_left;
354 		else
355 			return delayed_item;
356 	}
357 
358 	if (prev) {
359 		if (!prev_node)
360 			*prev = NULL;
361 		else if (ret < 0)
362 			*prev = delayed_item;
363 		else if ((node = rb_prev(prev_node)) != NULL) {
364 			*prev = rb_entry(node, struct btrfs_delayed_item,
365 					 rb_node);
366 		} else
367 			*prev = NULL;
368 	}
369 
370 	if (next) {
371 		if (!prev_node)
372 			*next = NULL;
373 		else if (ret > 0)
374 			*next = delayed_item;
375 		else if ((node = rb_next(prev_node)) != NULL) {
376 			*next = rb_entry(node, struct btrfs_delayed_item,
377 					 rb_node);
378 		} else
379 			*next = NULL;
380 	}
381 	return NULL;
382 }
383 
384 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
385 					struct btrfs_delayed_node *delayed_node,
386 					struct btrfs_key *key)
387 {
388 	struct btrfs_delayed_item *item;
389 
390 	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
391 					   NULL, NULL);
392 	return item;
393 }
394 
395 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
396 				    struct btrfs_delayed_item *ins,
397 				    int action)
398 {
399 	struct rb_node **p, *node;
400 	struct rb_node *parent_node = NULL;
401 	struct rb_root *root;
402 	struct btrfs_delayed_item *item;
403 	int cmp;
404 
405 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
406 		root = &delayed_node->ins_root;
407 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
408 		root = &delayed_node->del_root;
409 	else
410 		BUG();
411 	p = &root->rb_node;
412 	node = &ins->rb_node;
413 
414 	while (*p) {
415 		parent_node = *p;
416 		item = rb_entry(parent_node, struct btrfs_delayed_item,
417 				 rb_node);
418 
419 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
420 		if (cmp < 0)
421 			p = &(*p)->rb_right;
422 		else if (cmp > 0)
423 			p = &(*p)->rb_left;
424 		else
425 			return -EEXIST;
426 	}
427 
428 	rb_link_node(node, parent_node, p);
429 	rb_insert_color(node, root);
430 	ins->delayed_node = delayed_node;
431 	ins->ins_or_del = action;
432 
433 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
434 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
435 	    ins->key.offset >= delayed_node->index_cnt)
436 			delayed_node->index_cnt = ins->key.offset + 1;
437 
438 	delayed_node->count++;
439 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
440 	return 0;
441 }
442 
443 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
444 					      struct btrfs_delayed_item *item)
445 {
446 	return __btrfs_add_delayed_item(node, item,
447 					BTRFS_DELAYED_INSERTION_ITEM);
448 }
449 
450 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
451 					     struct btrfs_delayed_item *item)
452 {
453 	return __btrfs_add_delayed_item(node, item,
454 					BTRFS_DELAYED_DELETION_ITEM);
455 }
456 
457 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
458 {
459 	int seq = atomic_inc_return(&delayed_root->items_seq);
460 
461 	/*
462 	 * atomic_dec_return implies a barrier for waitqueue_active
463 	 */
464 	if ((atomic_dec_return(&delayed_root->items) <
465 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
466 	    waitqueue_active(&delayed_root->wait))
467 		wake_up(&delayed_root->wait);
468 }
469 
470 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
471 {
472 	struct rb_root *root;
473 	struct btrfs_delayed_root *delayed_root;
474 
475 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
476 
477 	BUG_ON(!delayed_root);
478 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
479 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
480 
481 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
482 		root = &delayed_item->delayed_node->ins_root;
483 	else
484 		root = &delayed_item->delayed_node->del_root;
485 
486 	rb_erase(&delayed_item->rb_node, root);
487 	delayed_item->delayed_node->count--;
488 
489 	finish_one_item(delayed_root);
490 }
491 
492 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
493 {
494 	if (item) {
495 		__btrfs_remove_delayed_item(item);
496 		if (atomic_dec_and_test(&item->refs))
497 			kfree(item);
498 	}
499 }
500 
501 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
502 					struct btrfs_delayed_node *delayed_node)
503 {
504 	struct rb_node *p;
505 	struct btrfs_delayed_item *item = NULL;
506 
507 	p = rb_first(&delayed_node->ins_root);
508 	if (p)
509 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
510 
511 	return item;
512 }
513 
514 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
515 					struct btrfs_delayed_node *delayed_node)
516 {
517 	struct rb_node *p;
518 	struct btrfs_delayed_item *item = NULL;
519 
520 	p = rb_first(&delayed_node->del_root);
521 	if (p)
522 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
523 
524 	return item;
525 }
526 
527 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
528 						struct btrfs_delayed_item *item)
529 {
530 	struct rb_node *p;
531 	struct btrfs_delayed_item *next = NULL;
532 
533 	p = rb_next(&item->rb_node);
534 	if (p)
535 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
536 
537 	return next;
538 }
539 
540 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
541 					       struct btrfs_root *root,
542 					       struct btrfs_delayed_item *item)
543 {
544 	struct btrfs_block_rsv *src_rsv;
545 	struct btrfs_block_rsv *dst_rsv;
546 	u64 num_bytes;
547 	int ret;
548 
549 	if (!trans->bytes_reserved)
550 		return 0;
551 
552 	src_rsv = trans->block_rsv;
553 	dst_rsv = &root->fs_info->delayed_block_rsv;
554 
555 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
556 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
557 	if (!ret) {
558 		trace_btrfs_space_reservation(root->fs_info, "delayed_item",
559 					      item->key.objectid,
560 					      num_bytes, 1);
561 		item->bytes_reserved = num_bytes;
562 	}
563 
564 	return ret;
565 }
566 
567 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
568 						struct btrfs_delayed_item *item)
569 {
570 	struct btrfs_block_rsv *rsv;
571 
572 	if (!item->bytes_reserved)
573 		return;
574 
575 	rsv = &root->fs_info->delayed_block_rsv;
576 	trace_btrfs_space_reservation(root->fs_info, "delayed_item",
577 				      item->key.objectid, item->bytes_reserved,
578 				      0);
579 	btrfs_block_rsv_release(root, rsv,
580 				item->bytes_reserved);
581 }
582 
583 static int btrfs_delayed_inode_reserve_metadata(
584 					struct btrfs_trans_handle *trans,
585 					struct btrfs_root *root,
586 					struct inode *inode,
587 					struct btrfs_delayed_node *node)
588 {
589 	struct btrfs_block_rsv *src_rsv;
590 	struct btrfs_block_rsv *dst_rsv;
591 	u64 num_bytes;
592 	int ret;
593 	bool release = false;
594 
595 	src_rsv = trans->block_rsv;
596 	dst_rsv = &root->fs_info->delayed_block_rsv;
597 
598 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
599 
600 	/*
601 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
602 	 * which doesn't reserve space for speed.  This is a problem since we
603 	 * still need to reserve space for this update, so try to reserve the
604 	 * space.
605 	 *
606 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
607 	 * we're accounted for.
608 	 */
609 	if (!src_rsv || (!trans->bytes_reserved &&
610 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
611 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
612 					  BTRFS_RESERVE_NO_FLUSH);
613 		/*
614 		 * Since we're under a transaction reserve_metadata_bytes could
615 		 * try to commit the transaction which will make it return
616 		 * EAGAIN to make us stop the transaction we have, so return
617 		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
618 		 */
619 		if (ret == -EAGAIN)
620 			ret = -ENOSPC;
621 		if (!ret) {
622 			node->bytes_reserved = num_bytes;
623 			trace_btrfs_space_reservation(root->fs_info,
624 						      "delayed_inode",
625 						      btrfs_ino(inode),
626 						      num_bytes, 1);
627 		}
628 		return ret;
629 	} else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
630 		spin_lock(&BTRFS_I(inode)->lock);
631 		if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
632 				       &BTRFS_I(inode)->runtime_flags)) {
633 			spin_unlock(&BTRFS_I(inode)->lock);
634 			release = true;
635 			goto migrate;
636 		}
637 		spin_unlock(&BTRFS_I(inode)->lock);
638 
639 		/* Ok we didn't have space pre-reserved.  This shouldn't happen
640 		 * too often but it can happen if we do delalloc to an existing
641 		 * inode which gets dirtied because of the time update, and then
642 		 * isn't touched again until after the transaction commits and
643 		 * then we try to write out the data.  First try to be nice and
644 		 * reserve something strictly for us.  If not be a pain and try
645 		 * to steal from the delalloc block rsv.
646 		 */
647 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
648 					  BTRFS_RESERVE_NO_FLUSH);
649 		if (!ret)
650 			goto out;
651 
652 		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
653 		if (!ret)
654 			goto out;
655 
656 		if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
657 			btrfs_debug(root->fs_info,
658 				    "block rsv migrate returned %d", ret);
659 			WARN_ON(1);
660 		}
661 		/*
662 		 * Ok this is a problem, let's just steal from the global rsv
663 		 * since this really shouldn't happen that often.
664 		 */
665 		ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
666 					      dst_rsv, num_bytes);
667 		goto out;
668 	}
669 
670 migrate:
671 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
672 
673 out:
674 	/*
675 	 * Migrate only takes a reservation, it doesn't touch the size of the
676 	 * block_rsv.  This is to simplify people who don't normally have things
677 	 * migrated from their block rsv.  If they go to release their
678 	 * reservation, that will decrease the size as well, so if migrate
679 	 * reduced size we'd end up with a negative size.  But for the
680 	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
681 	 * but we could in fact do this reserve/migrate dance several times
682 	 * between the time we did the original reservation and we'd clean it
683 	 * up.  So to take care of this, release the space for the meta
684 	 * reservation here.  I think it may be time for a documentation page on
685 	 * how block rsvs. work.
686 	 */
687 	if (!ret) {
688 		trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
689 					      btrfs_ino(inode), num_bytes, 1);
690 		node->bytes_reserved = num_bytes;
691 	}
692 
693 	if (release) {
694 		trace_btrfs_space_reservation(root->fs_info, "delalloc",
695 					      btrfs_ino(inode), num_bytes, 0);
696 		btrfs_block_rsv_release(root, src_rsv, num_bytes);
697 	}
698 
699 	return ret;
700 }
701 
702 static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
703 						struct btrfs_delayed_node *node)
704 {
705 	struct btrfs_block_rsv *rsv;
706 
707 	if (!node->bytes_reserved)
708 		return;
709 
710 	rsv = &root->fs_info->delayed_block_rsv;
711 	trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
712 				      node->inode_id, node->bytes_reserved, 0);
713 	btrfs_block_rsv_release(root, rsv,
714 				node->bytes_reserved);
715 	node->bytes_reserved = 0;
716 }
717 
718 /*
719  * This helper will insert some continuous items into the same leaf according
720  * to the free space of the leaf.
721  */
722 static int btrfs_batch_insert_items(struct btrfs_root *root,
723 				    struct btrfs_path *path,
724 				    struct btrfs_delayed_item *item)
725 {
726 	struct btrfs_delayed_item *curr, *next;
727 	int free_space;
728 	int total_data_size = 0, total_size = 0;
729 	struct extent_buffer *leaf;
730 	char *data_ptr;
731 	struct btrfs_key *keys;
732 	u32 *data_size;
733 	struct list_head head;
734 	int slot;
735 	int nitems;
736 	int i;
737 	int ret = 0;
738 
739 	BUG_ON(!path->nodes[0]);
740 
741 	leaf = path->nodes[0];
742 	free_space = btrfs_leaf_free_space(root, leaf);
743 	INIT_LIST_HEAD(&head);
744 
745 	next = item;
746 	nitems = 0;
747 
748 	/*
749 	 * count the number of the continuous items that we can insert in batch
750 	 */
751 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
752 	       free_space) {
753 		total_data_size += next->data_len;
754 		total_size += next->data_len + sizeof(struct btrfs_item);
755 		list_add_tail(&next->tree_list, &head);
756 		nitems++;
757 
758 		curr = next;
759 		next = __btrfs_next_delayed_item(curr);
760 		if (!next)
761 			break;
762 
763 		if (!btrfs_is_continuous_delayed_item(curr, next))
764 			break;
765 	}
766 
767 	if (!nitems) {
768 		ret = 0;
769 		goto out;
770 	}
771 
772 	/*
773 	 * we need allocate some memory space, but it might cause the task
774 	 * to sleep, so we set all locked nodes in the path to blocking locks
775 	 * first.
776 	 */
777 	btrfs_set_path_blocking(path);
778 
779 	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
780 	if (!keys) {
781 		ret = -ENOMEM;
782 		goto out;
783 	}
784 
785 	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
786 	if (!data_size) {
787 		ret = -ENOMEM;
788 		goto error;
789 	}
790 
791 	/* get keys of all the delayed items */
792 	i = 0;
793 	list_for_each_entry(next, &head, tree_list) {
794 		keys[i] = next->key;
795 		data_size[i] = next->data_len;
796 		i++;
797 	}
798 
799 	/* reset all the locked nodes in the patch to spinning locks. */
800 	btrfs_clear_path_blocking(path, NULL, 0);
801 
802 	/* insert the keys of the items */
803 	setup_items_for_insert(root, path, keys, data_size,
804 			       total_data_size, total_size, nitems);
805 
806 	/* insert the dir index items */
807 	slot = path->slots[0];
808 	list_for_each_entry_safe(curr, next, &head, tree_list) {
809 		data_ptr = btrfs_item_ptr(leaf, slot, char);
810 		write_extent_buffer(leaf, &curr->data,
811 				    (unsigned long)data_ptr,
812 				    curr->data_len);
813 		slot++;
814 
815 		btrfs_delayed_item_release_metadata(root, curr);
816 
817 		list_del(&curr->tree_list);
818 		btrfs_release_delayed_item(curr);
819 	}
820 
821 error:
822 	kfree(data_size);
823 	kfree(keys);
824 out:
825 	return ret;
826 }
827 
828 /*
829  * This helper can just do simple insertion that needn't extend item for new
830  * data, such as directory name index insertion, inode insertion.
831  */
832 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
833 				     struct btrfs_root *root,
834 				     struct btrfs_path *path,
835 				     struct btrfs_delayed_item *delayed_item)
836 {
837 	struct extent_buffer *leaf;
838 	char *ptr;
839 	int ret;
840 
841 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
842 				      delayed_item->data_len);
843 	if (ret < 0 && ret != -EEXIST)
844 		return ret;
845 
846 	leaf = path->nodes[0];
847 
848 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
849 
850 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
851 			    delayed_item->data_len);
852 	btrfs_mark_buffer_dirty(leaf);
853 
854 	btrfs_delayed_item_release_metadata(root, delayed_item);
855 	return 0;
856 }
857 
858 /*
859  * we insert an item first, then if there are some continuous items, we try
860  * to insert those items into the same leaf.
861  */
862 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
863 				      struct btrfs_path *path,
864 				      struct btrfs_root *root,
865 				      struct btrfs_delayed_node *node)
866 {
867 	struct btrfs_delayed_item *curr, *prev;
868 	int ret = 0;
869 
870 do_again:
871 	mutex_lock(&node->mutex);
872 	curr = __btrfs_first_delayed_insertion_item(node);
873 	if (!curr)
874 		goto insert_end;
875 
876 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
877 	if (ret < 0) {
878 		btrfs_release_path(path);
879 		goto insert_end;
880 	}
881 
882 	prev = curr;
883 	curr = __btrfs_next_delayed_item(prev);
884 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
885 		/* insert the continuous items into the same leaf */
886 		path->slots[0]++;
887 		btrfs_batch_insert_items(root, path, curr);
888 	}
889 	btrfs_release_delayed_item(prev);
890 	btrfs_mark_buffer_dirty(path->nodes[0]);
891 
892 	btrfs_release_path(path);
893 	mutex_unlock(&node->mutex);
894 	goto do_again;
895 
896 insert_end:
897 	mutex_unlock(&node->mutex);
898 	return ret;
899 }
900 
901 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
902 				    struct btrfs_root *root,
903 				    struct btrfs_path *path,
904 				    struct btrfs_delayed_item *item)
905 {
906 	struct btrfs_delayed_item *curr, *next;
907 	struct extent_buffer *leaf;
908 	struct btrfs_key key;
909 	struct list_head head;
910 	int nitems, i, last_item;
911 	int ret = 0;
912 
913 	BUG_ON(!path->nodes[0]);
914 
915 	leaf = path->nodes[0];
916 
917 	i = path->slots[0];
918 	last_item = btrfs_header_nritems(leaf) - 1;
919 	if (i > last_item)
920 		return -ENOENT;	/* FIXME: Is errno suitable? */
921 
922 	next = item;
923 	INIT_LIST_HEAD(&head);
924 	btrfs_item_key_to_cpu(leaf, &key, i);
925 	nitems = 0;
926 	/*
927 	 * count the number of the dir index items that we can delete in batch
928 	 */
929 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
930 		list_add_tail(&next->tree_list, &head);
931 		nitems++;
932 
933 		curr = next;
934 		next = __btrfs_next_delayed_item(curr);
935 		if (!next)
936 			break;
937 
938 		if (!btrfs_is_continuous_delayed_item(curr, next))
939 			break;
940 
941 		i++;
942 		if (i > last_item)
943 			break;
944 		btrfs_item_key_to_cpu(leaf, &key, i);
945 	}
946 
947 	if (!nitems)
948 		return 0;
949 
950 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
951 	if (ret)
952 		goto out;
953 
954 	list_for_each_entry_safe(curr, next, &head, tree_list) {
955 		btrfs_delayed_item_release_metadata(root, curr);
956 		list_del(&curr->tree_list);
957 		btrfs_release_delayed_item(curr);
958 	}
959 
960 out:
961 	return ret;
962 }
963 
964 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
965 				      struct btrfs_path *path,
966 				      struct btrfs_root *root,
967 				      struct btrfs_delayed_node *node)
968 {
969 	struct btrfs_delayed_item *curr, *prev;
970 	int ret = 0;
971 
972 do_again:
973 	mutex_lock(&node->mutex);
974 	curr = __btrfs_first_delayed_deletion_item(node);
975 	if (!curr)
976 		goto delete_fail;
977 
978 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
979 	if (ret < 0)
980 		goto delete_fail;
981 	else if (ret > 0) {
982 		/*
983 		 * can't find the item which the node points to, so this node
984 		 * is invalid, just drop it.
985 		 */
986 		prev = curr;
987 		curr = __btrfs_next_delayed_item(prev);
988 		btrfs_release_delayed_item(prev);
989 		ret = 0;
990 		btrfs_release_path(path);
991 		if (curr) {
992 			mutex_unlock(&node->mutex);
993 			goto do_again;
994 		} else
995 			goto delete_fail;
996 	}
997 
998 	btrfs_batch_delete_items(trans, root, path, curr);
999 	btrfs_release_path(path);
1000 	mutex_unlock(&node->mutex);
1001 	goto do_again;
1002 
1003 delete_fail:
1004 	btrfs_release_path(path);
1005 	mutex_unlock(&node->mutex);
1006 	return ret;
1007 }
1008 
1009 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1010 {
1011 	struct btrfs_delayed_root *delayed_root;
1012 
1013 	if (delayed_node &&
1014 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1015 		BUG_ON(!delayed_node->root);
1016 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1017 		delayed_node->count--;
1018 
1019 		delayed_root = delayed_node->root->fs_info->delayed_root;
1020 		finish_one_item(delayed_root);
1021 	}
1022 }
1023 
1024 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1025 {
1026 	struct btrfs_delayed_root *delayed_root;
1027 
1028 	ASSERT(delayed_node->root);
1029 	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1030 	delayed_node->count--;
1031 
1032 	delayed_root = delayed_node->root->fs_info->delayed_root;
1033 	finish_one_item(delayed_root);
1034 }
1035 
1036 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1037 					struct btrfs_root *root,
1038 					struct btrfs_path *path,
1039 					struct btrfs_delayed_node *node)
1040 {
1041 	struct btrfs_key key;
1042 	struct btrfs_inode_item *inode_item;
1043 	struct extent_buffer *leaf;
1044 	int mod;
1045 	int ret;
1046 
1047 	key.objectid = node->inode_id;
1048 	key.type = BTRFS_INODE_ITEM_KEY;
1049 	key.offset = 0;
1050 
1051 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1052 		mod = -1;
1053 	else
1054 		mod = 1;
1055 
1056 	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1057 	if (ret > 0) {
1058 		btrfs_release_path(path);
1059 		return -ENOENT;
1060 	} else if (ret < 0) {
1061 		return ret;
1062 	}
1063 
1064 	leaf = path->nodes[0];
1065 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1066 				    struct btrfs_inode_item);
1067 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1068 			    sizeof(struct btrfs_inode_item));
1069 	btrfs_mark_buffer_dirty(leaf);
1070 
1071 	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1072 		goto no_iref;
1073 
1074 	path->slots[0]++;
1075 	if (path->slots[0] >= btrfs_header_nritems(leaf))
1076 		goto search;
1077 again:
1078 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1079 	if (key.objectid != node->inode_id)
1080 		goto out;
1081 
1082 	if (key.type != BTRFS_INODE_REF_KEY &&
1083 	    key.type != BTRFS_INODE_EXTREF_KEY)
1084 		goto out;
1085 
1086 	/*
1087 	 * Delayed iref deletion is for the inode who has only one link,
1088 	 * so there is only one iref. The case that several irefs are
1089 	 * in the same item doesn't exist.
1090 	 */
1091 	btrfs_del_item(trans, root, path);
1092 out:
1093 	btrfs_release_delayed_iref(node);
1094 no_iref:
1095 	btrfs_release_path(path);
1096 err_out:
1097 	btrfs_delayed_inode_release_metadata(root, node);
1098 	btrfs_release_delayed_inode(node);
1099 
1100 	return ret;
1101 
1102 search:
1103 	btrfs_release_path(path);
1104 
1105 	key.type = BTRFS_INODE_EXTREF_KEY;
1106 	key.offset = -1;
1107 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1108 	if (ret < 0)
1109 		goto err_out;
1110 	ASSERT(ret);
1111 
1112 	ret = 0;
1113 	leaf = path->nodes[0];
1114 	path->slots[0]--;
1115 	goto again;
1116 }
1117 
1118 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1119 					     struct btrfs_root *root,
1120 					     struct btrfs_path *path,
1121 					     struct btrfs_delayed_node *node)
1122 {
1123 	int ret;
1124 
1125 	mutex_lock(&node->mutex);
1126 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1127 		mutex_unlock(&node->mutex);
1128 		return 0;
1129 	}
1130 
1131 	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1132 	mutex_unlock(&node->mutex);
1133 	return ret;
1134 }
1135 
1136 static inline int
1137 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1138 				   struct btrfs_path *path,
1139 				   struct btrfs_delayed_node *node)
1140 {
1141 	int ret;
1142 
1143 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1144 	if (ret)
1145 		return ret;
1146 
1147 	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1148 	if (ret)
1149 		return ret;
1150 
1151 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1152 	return ret;
1153 }
1154 
1155 /*
1156  * Called when committing the transaction.
1157  * Returns 0 on success.
1158  * Returns < 0 on error and returns with an aborted transaction with any
1159  * outstanding delayed items cleaned up.
1160  */
1161 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1162 				     struct btrfs_root *root, int nr)
1163 {
1164 	struct btrfs_delayed_root *delayed_root;
1165 	struct btrfs_delayed_node *curr_node, *prev_node;
1166 	struct btrfs_path *path;
1167 	struct btrfs_block_rsv *block_rsv;
1168 	int ret = 0;
1169 	bool count = (nr > 0);
1170 
1171 	if (trans->aborted)
1172 		return -EIO;
1173 
1174 	path = btrfs_alloc_path();
1175 	if (!path)
1176 		return -ENOMEM;
1177 	path->leave_spinning = 1;
1178 
1179 	block_rsv = trans->block_rsv;
1180 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1181 
1182 	delayed_root = btrfs_get_delayed_root(root);
1183 
1184 	curr_node = btrfs_first_delayed_node(delayed_root);
1185 	while (curr_node && (!count || (count && nr--))) {
1186 		ret = __btrfs_commit_inode_delayed_items(trans, path,
1187 							 curr_node);
1188 		if (ret) {
1189 			btrfs_release_delayed_node(curr_node);
1190 			curr_node = NULL;
1191 			btrfs_abort_transaction(trans, root, ret);
1192 			break;
1193 		}
1194 
1195 		prev_node = curr_node;
1196 		curr_node = btrfs_next_delayed_node(curr_node);
1197 		btrfs_release_delayed_node(prev_node);
1198 	}
1199 
1200 	if (curr_node)
1201 		btrfs_release_delayed_node(curr_node);
1202 	btrfs_free_path(path);
1203 	trans->block_rsv = block_rsv;
1204 
1205 	return ret;
1206 }
1207 
1208 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1209 			    struct btrfs_root *root)
1210 {
1211 	return __btrfs_run_delayed_items(trans, root, -1);
1212 }
1213 
1214 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1215 			       struct btrfs_root *root, int nr)
1216 {
1217 	return __btrfs_run_delayed_items(trans, root, nr);
1218 }
1219 
1220 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1221 				     struct inode *inode)
1222 {
1223 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1224 	struct btrfs_path *path;
1225 	struct btrfs_block_rsv *block_rsv;
1226 	int ret;
1227 
1228 	if (!delayed_node)
1229 		return 0;
1230 
1231 	mutex_lock(&delayed_node->mutex);
1232 	if (!delayed_node->count) {
1233 		mutex_unlock(&delayed_node->mutex);
1234 		btrfs_release_delayed_node(delayed_node);
1235 		return 0;
1236 	}
1237 	mutex_unlock(&delayed_node->mutex);
1238 
1239 	path = btrfs_alloc_path();
1240 	if (!path) {
1241 		btrfs_release_delayed_node(delayed_node);
1242 		return -ENOMEM;
1243 	}
1244 	path->leave_spinning = 1;
1245 
1246 	block_rsv = trans->block_rsv;
1247 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1248 
1249 	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1250 
1251 	btrfs_release_delayed_node(delayed_node);
1252 	btrfs_free_path(path);
1253 	trans->block_rsv = block_rsv;
1254 
1255 	return ret;
1256 }
1257 
1258 int btrfs_commit_inode_delayed_inode(struct inode *inode)
1259 {
1260 	struct btrfs_trans_handle *trans;
1261 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1262 	struct btrfs_path *path;
1263 	struct btrfs_block_rsv *block_rsv;
1264 	int ret;
1265 
1266 	if (!delayed_node)
1267 		return 0;
1268 
1269 	mutex_lock(&delayed_node->mutex);
1270 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1271 		mutex_unlock(&delayed_node->mutex);
1272 		btrfs_release_delayed_node(delayed_node);
1273 		return 0;
1274 	}
1275 	mutex_unlock(&delayed_node->mutex);
1276 
1277 	trans = btrfs_join_transaction(delayed_node->root);
1278 	if (IS_ERR(trans)) {
1279 		ret = PTR_ERR(trans);
1280 		goto out;
1281 	}
1282 
1283 	path = btrfs_alloc_path();
1284 	if (!path) {
1285 		ret = -ENOMEM;
1286 		goto trans_out;
1287 	}
1288 	path->leave_spinning = 1;
1289 
1290 	block_rsv = trans->block_rsv;
1291 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1292 
1293 	mutex_lock(&delayed_node->mutex);
1294 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1295 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1296 						   path, delayed_node);
1297 	else
1298 		ret = 0;
1299 	mutex_unlock(&delayed_node->mutex);
1300 
1301 	btrfs_free_path(path);
1302 	trans->block_rsv = block_rsv;
1303 trans_out:
1304 	btrfs_end_transaction(trans, delayed_node->root);
1305 	btrfs_btree_balance_dirty(delayed_node->root);
1306 out:
1307 	btrfs_release_delayed_node(delayed_node);
1308 
1309 	return ret;
1310 }
1311 
1312 void btrfs_remove_delayed_node(struct inode *inode)
1313 {
1314 	struct btrfs_delayed_node *delayed_node;
1315 
1316 	delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1317 	if (!delayed_node)
1318 		return;
1319 
1320 	BTRFS_I(inode)->delayed_node = NULL;
1321 	btrfs_release_delayed_node(delayed_node);
1322 }
1323 
1324 struct btrfs_async_delayed_work {
1325 	struct btrfs_delayed_root *delayed_root;
1326 	int nr;
1327 	struct btrfs_work work;
1328 };
1329 
1330 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1331 {
1332 	struct btrfs_async_delayed_work *async_work;
1333 	struct btrfs_delayed_root *delayed_root;
1334 	struct btrfs_trans_handle *trans;
1335 	struct btrfs_path *path;
1336 	struct btrfs_delayed_node *delayed_node = NULL;
1337 	struct btrfs_root *root;
1338 	struct btrfs_block_rsv *block_rsv;
1339 	int total_done = 0;
1340 
1341 	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1342 	delayed_root = async_work->delayed_root;
1343 
1344 	path = btrfs_alloc_path();
1345 	if (!path)
1346 		goto out;
1347 
1348 again:
1349 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1350 		goto free_path;
1351 
1352 	delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1353 	if (!delayed_node)
1354 		goto free_path;
1355 
1356 	path->leave_spinning = 1;
1357 	root = delayed_node->root;
1358 
1359 	trans = btrfs_join_transaction(root);
1360 	if (IS_ERR(trans))
1361 		goto release_path;
1362 
1363 	block_rsv = trans->block_rsv;
1364 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1365 
1366 	__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1367 
1368 	trans->block_rsv = block_rsv;
1369 	btrfs_end_transaction(trans, root);
1370 	btrfs_btree_balance_dirty_nodelay(root);
1371 
1372 release_path:
1373 	btrfs_release_path(path);
1374 	total_done++;
1375 
1376 	btrfs_release_prepared_delayed_node(delayed_node);
1377 	if (async_work->nr == 0 || total_done < async_work->nr)
1378 		goto again;
1379 
1380 free_path:
1381 	btrfs_free_path(path);
1382 out:
1383 	wake_up(&delayed_root->wait);
1384 	kfree(async_work);
1385 }
1386 
1387 
1388 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1389 				     struct btrfs_fs_info *fs_info, int nr)
1390 {
1391 	struct btrfs_async_delayed_work *async_work;
1392 
1393 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1394 		return 0;
1395 
1396 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1397 	if (!async_work)
1398 		return -ENOMEM;
1399 
1400 	async_work->delayed_root = delayed_root;
1401 	btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1402 			btrfs_async_run_delayed_root, NULL, NULL);
1403 	async_work->nr = nr;
1404 
1405 	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1406 	return 0;
1407 }
1408 
1409 void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1410 {
1411 	struct btrfs_delayed_root *delayed_root;
1412 	delayed_root = btrfs_get_delayed_root(root);
1413 	WARN_ON(btrfs_first_delayed_node(delayed_root));
1414 }
1415 
1416 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1417 {
1418 	int val = atomic_read(&delayed_root->items_seq);
1419 
1420 	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1421 		return 1;
1422 
1423 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1424 		return 1;
1425 
1426 	return 0;
1427 }
1428 
1429 void btrfs_balance_delayed_items(struct btrfs_root *root)
1430 {
1431 	struct btrfs_delayed_root *delayed_root;
1432 	struct btrfs_fs_info *fs_info = root->fs_info;
1433 
1434 	delayed_root = btrfs_get_delayed_root(root);
1435 
1436 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1437 		return;
1438 
1439 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1440 		int seq;
1441 		int ret;
1442 
1443 		seq = atomic_read(&delayed_root->items_seq);
1444 
1445 		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1446 		if (ret)
1447 			return;
1448 
1449 		wait_event_interruptible(delayed_root->wait,
1450 					 could_end_wait(delayed_root, seq));
1451 		return;
1452 	}
1453 
1454 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1455 }
1456 
1457 /* Will return 0 or -ENOMEM */
1458 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1459 				   struct btrfs_root *root, const char *name,
1460 				   int name_len, struct inode *dir,
1461 				   struct btrfs_disk_key *disk_key, u8 type,
1462 				   u64 index)
1463 {
1464 	struct btrfs_delayed_node *delayed_node;
1465 	struct btrfs_delayed_item *delayed_item;
1466 	struct btrfs_dir_item *dir_item;
1467 	int ret;
1468 
1469 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1470 	if (IS_ERR(delayed_node))
1471 		return PTR_ERR(delayed_node);
1472 
1473 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1474 	if (!delayed_item) {
1475 		ret = -ENOMEM;
1476 		goto release_node;
1477 	}
1478 
1479 	delayed_item->key.objectid = btrfs_ino(dir);
1480 	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1481 	delayed_item->key.offset = index;
1482 
1483 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1484 	dir_item->location = *disk_key;
1485 	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1486 	btrfs_set_stack_dir_data_len(dir_item, 0);
1487 	btrfs_set_stack_dir_name_len(dir_item, name_len);
1488 	btrfs_set_stack_dir_type(dir_item, type);
1489 	memcpy((char *)(dir_item + 1), name, name_len);
1490 
1491 	ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1492 	/*
1493 	 * we have reserved enough space when we start a new transaction,
1494 	 * so reserving metadata failure is impossible
1495 	 */
1496 	BUG_ON(ret);
1497 
1498 
1499 	mutex_lock(&delayed_node->mutex);
1500 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1501 	if (unlikely(ret)) {
1502 		btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
1503 				"into the insertion tree of the delayed node"
1504 				"(root id: %llu, inode id: %llu, errno: %d)",
1505 				name_len, name, delayed_node->root->objectid,
1506 				delayed_node->inode_id, ret);
1507 		BUG();
1508 	}
1509 	mutex_unlock(&delayed_node->mutex);
1510 
1511 release_node:
1512 	btrfs_release_delayed_node(delayed_node);
1513 	return ret;
1514 }
1515 
1516 static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1517 					       struct btrfs_delayed_node *node,
1518 					       struct btrfs_key *key)
1519 {
1520 	struct btrfs_delayed_item *item;
1521 
1522 	mutex_lock(&node->mutex);
1523 	item = __btrfs_lookup_delayed_insertion_item(node, key);
1524 	if (!item) {
1525 		mutex_unlock(&node->mutex);
1526 		return 1;
1527 	}
1528 
1529 	btrfs_delayed_item_release_metadata(root, item);
1530 	btrfs_release_delayed_item(item);
1531 	mutex_unlock(&node->mutex);
1532 	return 0;
1533 }
1534 
1535 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1536 				   struct btrfs_root *root, struct inode *dir,
1537 				   u64 index)
1538 {
1539 	struct btrfs_delayed_node *node;
1540 	struct btrfs_delayed_item *item;
1541 	struct btrfs_key item_key;
1542 	int ret;
1543 
1544 	node = btrfs_get_or_create_delayed_node(dir);
1545 	if (IS_ERR(node))
1546 		return PTR_ERR(node);
1547 
1548 	item_key.objectid = btrfs_ino(dir);
1549 	item_key.type = BTRFS_DIR_INDEX_KEY;
1550 	item_key.offset = index;
1551 
1552 	ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1553 	if (!ret)
1554 		goto end;
1555 
1556 	item = btrfs_alloc_delayed_item(0);
1557 	if (!item) {
1558 		ret = -ENOMEM;
1559 		goto end;
1560 	}
1561 
1562 	item->key = item_key;
1563 
1564 	ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1565 	/*
1566 	 * we have reserved enough space when we start a new transaction,
1567 	 * so reserving metadata failure is impossible.
1568 	 */
1569 	BUG_ON(ret);
1570 
1571 	mutex_lock(&node->mutex);
1572 	ret = __btrfs_add_delayed_deletion_item(node, item);
1573 	if (unlikely(ret)) {
1574 		btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
1575 				"into the deletion tree of the delayed node"
1576 				"(root id: %llu, inode id: %llu, errno: %d)",
1577 				index, node->root->objectid, node->inode_id,
1578 				ret);
1579 		BUG();
1580 	}
1581 	mutex_unlock(&node->mutex);
1582 end:
1583 	btrfs_release_delayed_node(node);
1584 	return ret;
1585 }
1586 
1587 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1588 {
1589 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1590 
1591 	if (!delayed_node)
1592 		return -ENOENT;
1593 
1594 	/*
1595 	 * Since we have held i_mutex of this directory, it is impossible that
1596 	 * a new directory index is added into the delayed node and index_cnt
1597 	 * is updated now. So we needn't lock the delayed node.
1598 	 */
1599 	if (!delayed_node->index_cnt) {
1600 		btrfs_release_delayed_node(delayed_node);
1601 		return -EINVAL;
1602 	}
1603 
1604 	BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1605 	btrfs_release_delayed_node(delayed_node);
1606 	return 0;
1607 }
1608 
1609 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1610 				     struct list_head *ins_list,
1611 				     struct list_head *del_list)
1612 {
1613 	struct btrfs_delayed_node *delayed_node;
1614 	struct btrfs_delayed_item *item;
1615 
1616 	delayed_node = btrfs_get_delayed_node(inode);
1617 	if (!delayed_node)
1618 		return false;
1619 
1620 	/*
1621 	 * We can only do one readdir with delayed items at a time because of
1622 	 * item->readdir_list.
1623 	 */
1624 	inode_unlock_shared(inode);
1625 	inode_lock(inode);
1626 
1627 	mutex_lock(&delayed_node->mutex);
1628 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1629 	while (item) {
1630 		atomic_inc(&item->refs);
1631 		list_add_tail(&item->readdir_list, ins_list);
1632 		item = __btrfs_next_delayed_item(item);
1633 	}
1634 
1635 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1636 	while (item) {
1637 		atomic_inc(&item->refs);
1638 		list_add_tail(&item->readdir_list, del_list);
1639 		item = __btrfs_next_delayed_item(item);
1640 	}
1641 	mutex_unlock(&delayed_node->mutex);
1642 	/*
1643 	 * This delayed node is still cached in the btrfs inode, so refs
1644 	 * must be > 1 now, and we needn't check it is going to be freed
1645 	 * or not.
1646 	 *
1647 	 * Besides that, this function is used to read dir, we do not
1648 	 * insert/delete delayed items in this period. So we also needn't
1649 	 * requeue or dequeue this delayed node.
1650 	 */
1651 	atomic_dec(&delayed_node->refs);
1652 
1653 	return true;
1654 }
1655 
1656 void btrfs_readdir_put_delayed_items(struct inode *inode,
1657 				     struct list_head *ins_list,
1658 				     struct list_head *del_list)
1659 {
1660 	struct btrfs_delayed_item *curr, *next;
1661 
1662 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1663 		list_del(&curr->readdir_list);
1664 		if (atomic_dec_and_test(&curr->refs))
1665 			kfree(curr);
1666 	}
1667 
1668 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1669 		list_del(&curr->readdir_list);
1670 		if (atomic_dec_and_test(&curr->refs))
1671 			kfree(curr);
1672 	}
1673 
1674 	/*
1675 	 * The VFS is going to do up_read(), so we need to downgrade back to a
1676 	 * read lock.
1677 	 */
1678 	downgrade_write(&inode->i_rwsem);
1679 }
1680 
1681 int btrfs_should_delete_dir_index(struct list_head *del_list,
1682 				  u64 index)
1683 {
1684 	struct btrfs_delayed_item *curr, *next;
1685 	int ret;
1686 
1687 	if (list_empty(del_list))
1688 		return 0;
1689 
1690 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1691 		if (curr->key.offset > index)
1692 			break;
1693 
1694 		list_del(&curr->readdir_list);
1695 		ret = (curr->key.offset == index);
1696 
1697 		if (atomic_dec_and_test(&curr->refs))
1698 			kfree(curr);
1699 
1700 		if (ret)
1701 			return 1;
1702 		else
1703 			continue;
1704 	}
1705 	return 0;
1706 }
1707 
1708 /*
1709  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1710  *
1711  */
1712 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1713 				    struct list_head *ins_list, bool *emitted)
1714 {
1715 	struct btrfs_dir_item *di;
1716 	struct btrfs_delayed_item *curr, *next;
1717 	struct btrfs_key location;
1718 	char *name;
1719 	int name_len;
1720 	int over = 0;
1721 	unsigned char d_type;
1722 
1723 	if (list_empty(ins_list))
1724 		return 0;
1725 
1726 	/*
1727 	 * Changing the data of the delayed item is impossible. So
1728 	 * we needn't lock them. And we have held i_mutex of the
1729 	 * directory, nobody can delete any directory indexes now.
1730 	 */
1731 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1732 		list_del(&curr->readdir_list);
1733 
1734 		if (curr->key.offset < ctx->pos) {
1735 			if (atomic_dec_and_test(&curr->refs))
1736 				kfree(curr);
1737 			continue;
1738 		}
1739 
1740 		ctx->pos = curr->key.offset;
1741 
1742 		di = (struct btrfs_dir_item *)curr->data;
1743 		name = (char *)(di + 1);
1744 		name_len = btrfs_stack_dir_name_len(di);
1745 
1746 		d_type = btrfs_filetype_table[di->type];
1747 		btrfs_disk_key_to_cpu(&location, &di->location);
1748 
1749 		over = !dir_emit(ctx, name, name_len,
1750 			       location.objectid, d_type);
1751 
1752 		if (atomic_dec_and_test(&curr->refs))
1753 			kfree(curr);
1754 
1755 		if (over)
1756 			return 1;
1757 		*emitted = true;
1758 	}
1759 	return 0;
1760 }
1761 
1762 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1763 				  struct btrfs_inode_item *inode_item,
1764 				  struct inode *inode)
1765 {
1766 	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1767 	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1768 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1769 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1770 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1771 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1772 	btrfs_set_stack_inode_generation(inode_item,
1773 					 BTRFS_I(inode)->generation);
1774 	btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1775 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1776 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1777 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1778 	btrfs_set_stack_inode_block_group(inode_item, 0);
1779 
1780 	btrfs_set_stack_timespec_sec(&inode_item->atime,
1781 				     inode->i_atime.tv_sec);
1782 	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1783 				      inode->i_atime.tv_nsec);
1784 
1785 	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1786 				     inode->i_mtime.tv_sec);
1787 	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1788 				      inode->i_mtime.tv_nsec);
1789 
1790 	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1791 				     inode->i_ctime.tv_sec);
1792 	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1793 				      inode->i_ctime.tv_nsec);
1794 
1795 	btrfs_set_stack_timespec_sec(&inode_item->otime,
1796 				     BTRFS_I(inode)->i_otime.tv_sec);
1797 	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1798 				     BTRFS_I(inode)->i_otime.tv_nsec);
1799 }
1800 
1801 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1802 {
1803 	struct btrfs_delayed_node *delayed_node;
1804 	struct btrfs_inode_item *inode_item;
1805 
1806 	delayed_node = btrfs_get_delayed_node(inode);
1807 	if (!delayed_node)
1808 		return -ENOENT;
1809 
1810 	mutex_lock(&delayed_node->mutex);
1811 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1812 		mutex_unlock(&delayed_node->mutex);
1813 		btrfs_release_delayed_node(delayed_node);
1814 		return -ENOENT;
1815 	}
1816 
1817 	inode_item = &delayed_node->inode_item;
1818 
1819 	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1820 	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1821 	btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1822 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1823 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1824 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1825 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1826         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1827 
1828 	inode->i_version = btrfs_stack_inode_sequence(inode_item);
1829 	inode->i_rdev = 0;
1830 	*rdev = btrfs_stack_inode_rdev(inode_item);
1831 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1832 
1833 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1834 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1835 
1836 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1837 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1838 
1839 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1840 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1841 
1842 	BTRFS_I(inode)->i_otime.tv_sec =
1843 		btrfs_stack_timespec_sec(&inode_item->otime);
1844 	BTRFS_I(inode)->i_otime.tv_nsec =
1845 		btrfs_stack_timespec_nsec(&inode_item->otime);
1846 
1847 	inode->i_generation = BTRFS_I(inode)->generation;
1848 	BTRFS_I(inode)->index_cnt = (u64)-1;
1849 
1850 	mutex_unlock(&delayed_node->mutex);
1851 	btrfs_release_delayed_node(delayed_node);
1852 	return 0;
1853 }
1854 
1855 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1856 			       struct btrfs_root *root, struct inode *inode)
1857 {
1858 	struct btrfs_delayed_node *delayed_node;
1859 	int ret = 0;
1860 
1861 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1862 	if (IS_ERR(delayed_node))
1863 		return PTR_ERR(delayed_node);
1864 
1865 	mutex_lock(&delayed_node->mutex);
1866 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1867 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1868 		goto release_node;
1869 	}
1870 
1871 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1872 						   delayed_node);
1873 	if (ret)
1874 		goto release_node;
1875 
1876 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1877 	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1878 	delayed_node->count++;
1879 	atomic_inc(&root->fs_info->delayed_root->items);
1880 release_node:
1881 	mutex_unlock(&delayed_node->mutex);
1882 	btrfs_release_delayed_node(delayed_node);
1883 	return ret;
1884 }
1885 
1886 int btrfs_delayed_delete_inode_ref(struct inode *inode)
1887 {
1888 	struct btrfs_delayed_node *delayed_node;
1889 
1890 	/*
1891 	 * we don't do delayed inode updates during log recovery because it
1892 	 * leads to enospc problems.  This means we also can't do
1893 	 * delayed inode refs
1894 	 */
1895 	if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
1896 		return -EAGAIN;
1897 
1898 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1899 	if (IS_ERR(delayed_node))
1900 		return PTR_ERR(delayed_node);
1901 
1902 	/*
1903 	 * We don't reserve space for inode ref deletion is because:
1904 	 * - We ONLY do async inode ref deletion for the inode who has only
1905 	 *   one link(i_nlink == 1), it means there is only one inode ref.
1906 	 *   And in most case, the inode ref and the inode item are in the
1907 	 *   same leaf, and we will deal with them at the same time.
1908 	 *   Since we are sure we will reserve the space for the inode item,
1909 	 *   it is unnecessary to reserve space for inode ref deletion.
1910 	 * - If the inode ref and the inode item are not in the same leaf,
1911 	 *   We also needn't worry about enospc problem, because we reserve
1912 	 *   much more space for the inode update than it needs.
1913 	 * - At the worst, we can steal some space from the global reservation.
1914 	 *   It is very rare.
1915 	 */
1916 	mutex_lock(&delayed_node->mutex);
1917 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1918 		goto release_node;
1919 
1920 	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1921 	delayed_node->count++;
1922 	atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
1923 release_node:
1924 	mutex_unlock(&delayed_node->mutex);
1925 	btrfs_release_delayed_node(delayed_node);
1926 	return 0;
1927 }
1928 
1929 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1930 {
1931 	struct btrfs_root *root = delayed_node->root;
1932 	struct btrfs_delayed_item *curr_item, *prev_item;
1933 
1934 	mutex_lock(&delayed_node->mutex);
1935 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1936 	while (curr_item) {
1937 		btrfs_delayed_item_release_metadata(root, curr_item);
1938 		prev_item = curr_item;
1939 		curr_item = __btrfs_next_delayed_item(prev_item);
1940 		btrfs_release_delayed_item(prev_item);
1941 	}
1942 
1943 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1944 	while (curr_item) {
1945 		btrfs_delayed_item_release_metadata(root, curr_item);
1946 		prev_item = curr_item;
1947 		curr_item = __btrfs_next_delayed_item(prev_item);
1948 		btrfs_release_delayed_item(prev_item);
1949 	}
1950 
1951 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1952 		btrfs_release_delayed_iref(delayed_node);
1953 
1954 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1955 		btrfs_delayed_inode_release_metadata(root, delayed_node);
1956 		btrfs_release_delayed_inode(delayed_node);
1957 	}
1958 	mutex_unlock(&delayed_node->mutex);
1959 }
1960 
1961 void btrfs_kill_delayed_inode_items(struct inode *inode)
1962 {
1963 	struct btrfs_delayed_node *delayed_node;
1964 
1965 	delayed_node = btrfs_get_delayed_node(inode);
1966 	if (!delayed_node)
1967 		return;
1968 
1969 	__btrfs_kill_delayed_node(delayed_node);
1970 	btrfs_release_delayed_node(delayed_node);
1971 }
1972 
1973 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1974 {
1975 	u64 inode_id = 0;
1976 	struct btrfs_delayed_node *delayed_nodes[8];
1977 	int i, n;
1978 
1979 	while (1) {
1980 		spin_lock(&root->inode_lock);
1981 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1982 					   (void **)delayed_nodes, inode_id,
1983 					   ARRAY_SIZE(delayed_nodes));
1984 		if (!n) {
1985 			spin_unlock(&root->inode_lock);
1986 			break;
1987 		}
1988 
1989 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1990 
1991 		for (i = 0; i < n; i++)
1992 			atomic_inc(&delayed_nodes[i]->refs);
1993 		spin_unlock(&root->inode_lock);
1994 
1995 		for (i = 0; i < n; i++) {
1996 			__btrfs_kill_delayed_node(delayed_nodes[i]);
1997 			btrfs_release_delayed_node(delayed_nodes[i]);
1998 		}
1999 	}
2000 }
2001 
2002 void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
2003 {
2004 	struct btrfs_delayed_root *delayed_root;
2005 	struct btrfs_delayed_node *curr_node, *prev_node;
2006 
2007 	delayed_root = btrfs_get_delayed_root(root);
2008 
2009 	curr_node = btrfs_first_delayed_node(delayed_root);
2010 	while (curr_node) {
2011 		__btrfs_kill_delayed_node(curr_node);
2012 
2013 		prev_node = curr_node;
2014 		curr_node = btrfs_next_delayed_node(curr_node);
2015 		btrfs_release_delayed_node(prev_node);
2016 	}
2017 }
2018 
2019