xref: /linux/fs/btrfs/delayed-inode.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Copyright (C) 2011 Fujitsu.  All rights reserved.
3  * Written by Miao Xie <miaox@cn.fujitsu.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 
25 #define BTRFS_DELAYED_WRITEBACK		400
26 #define BTRFS_DELAYED_BACKGROUND	100
27 
28 static struct kmem_cache *delayed_node_cache;
29 
30 int __init btrfs_delayed_inode_init(void)
31 {
32 	delayed_node_cache = kmem_cache_create("delayed_node",
33 					sizeof(struct btrfs_delayed_node),
34 					0,
35 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
36 					NULL);
37 	if (!delayed_node_cache)
38 		return -ENOMEM;
39 	return 0;
40 }
41 
42 void btrfs_delayed_inode_exit(void)
43 {
44 	if (delayed_node_cache)
45 		kmem_cache_destroy(delayed_node_cache);
46 }
47 
48 static inline void btrfs_init_delayed_node(
49 				struct btrfs_delayed_node *delayed_node,
50 				struct btrfs_root *root, u64 inode_id)
51 {
52 	delayed_node->root = root;
53 	delayed_node->inode_id = inode_id;
54 	atomic_set(&delayed_node->refs, 0);
55 	delayed_node->count = 0;
56 	delayed_node->in_list = 0;
57 	delayed_node->inode_dirty = 0;
58 	delayed_node->ins_root = RB_ROOT;
59 	delayed_node->del_root = RB_ROOT;
60 	mutex_init(&delayed_node->mutex);
61 	delayed_node->index_cnt = 0;
62 	INIT_LIST_HEAD(&delayed_node->n_list);
63 	INIT_LIST_HEAD(&delayed_node->p_list);
64 	delayed_node->bytes_reserved = 0;
65 }
66 
67 static inline int btrfs_is_continuous_delayed_item(
68 					struct btrfs_delayed_item *item1,
69 					struct btrfs_delayed_item *item2)
70 {
71 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
72 	    item1->key.objectid == item2->key.objectid &&
73 	    item1->key.type == item2->key.type &&
74 	    item1->key.offset + 1 == item2->key.offset)
75 		return 1;
76 	return 0;
77 }
78 
79 static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
80 							struct btrfs_root *root)
81 {
82 	return root->fs_info->delayed_root;
83 }
84 
85 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
86 {
87 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
88 	struct btrfs_root *root = btrfs_inode->root;
89 	u64 ino = btrfs_ino(inode);
90 	struct btrfs_delayed_node *node;
91 
92 	node = ACCESS_ONCE(btrfs_inode->delayed_node);
93 	if (node) {
94 		atomic_inc(&node->refs);
95 		return node;
96 	}
97 
98 	spin_lock(&root->inode_lock);
99 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
100 	if (node) {
101 		if (btrfs_inode->delayed_node) {
102 			atomic_inc(&node->refs);	/* can be accessed */
103 			BUG_ON(btrfs_inode->delayed_node != node);
104 			spin_unlock(&root->inode_lock);
105 			return node;
106 		}
107 		btrfs_inode->delayed_node = node;
108 		atomic_inc(&node->refs);	/* can be accessed */
109 		atomic_inc(&node->refs);	/* cached in the inode */
110 		spin_unlock(&root->inode_lock);
111 		return node;
112 	}
113 	spin_unlock(&root->inode_lock);
114 
115 	return NULL;
116 }
117 
118 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
119 							struct inode *inode)
120 {
121 	struct btrfs_delayed_node *node;
122 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
123 	struct btrfs_root *root = btrfs_inode->root;
124 	u64 ino = btrfs_ino(inode);
125 	int ret;
126 
127 again:
128 	node = btrfs_get_delayed_node(inode);
129 	if (node)
130 		return node;
131 
132 	node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
133 	if (!node)
134 		return ERR_PTR(-ENOMEM);
135 	btrfs_init_delayed_node(node, root, ino);
136 
137 	atomic_inc(&node->refs);	/* cached in the btrfs inode */
138 	atomic_inc(&node->refs);	/* can be accessed */
139 
140 	ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
141 	if (ret) {
142 		kmem_cache_free(delayed_node_cache, node);
143 		return ERR_PTR(ret);
144 	}
145 
146 	spin_lock(&root->inode_lock);
147 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
148 	if (ret == -EEXIST) {
149 		kmem_cache_free(delayed_node_cache, node);
150 		spin_unlock(&root->inode_lock);
151 		radix_tree_preload_end();
152 		goto again;
153 	}
154 	btrfs_inode->delayed_node = node;
155 	spin_unlock(&root->inode_lock);
156 	radix_tree_preload_end();
157 
158 	return node;
159 }
160 
161 /*
162  * Call it when holding delayed_node->mutex
163  *
164  * If mod = 1, add this node into the prepared list.
165  */
166 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
167 				     struct btrfs_delayed_node *node,
168 				     int mod)
169 {
170 	spin_lock(&root->lock);
171 	if (node->in_list) {
172 		if (!list_empty(&node->p_list))
173 			list_move_tail(&node->p_list, &root->prepare_list);
174 		else if (mod)
175 			list_add_tail(&node->p_list, &root->prepare_list);
176 	} else {
177 		list_add_tail(&node->n_list, &root->node_list);
178 		list_add_tail(&node->p_list, &root->prepare_list);
179 		atomic_inc(&node->refs);	/* inserted into list */
180 		root->nodes++;
181 		node->in_list = 1;
182 	}
183 	spin_unlock(&root->lock);
184 }
185 
186 /* Call it when holding delayed_node->mutex */
187 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
188 				       struct btrfs_delayed_node *node)
189 {
190 	spin_lock(&root->lock);
191 	if (node->in_list) {
192 		root->nodes--;
193 		atomic_dec(&node->refs);	/* not in the list */
194 		list_del_init(&node->n_list);
195 		if (!list_empty(&node->p_list))
196 			list_del_init(&node->p_list);
197 		node->in_list = 0;
198 	}
199 	spin_unlock(&root->lock);
200 }
201 
202 struct btrfs_delayed_node *btrfs_first_delayed_node(
203 			struct btrfs_delayed_root *delayed_root)
204 {
205 	struct list_head *p;
206 	struct btrfs_delayed_node *node = NULL;
207 
208 	spin_lock(&delayed_root->lock);
209 	if (list_empty(&delayed_root->node_list))
210 		goto out;
211 
212 	p = delayed_root->node_list.next;
213 	node = list_entry(p, struct btrfs_delayed_node, n_list);
214 	atomic_inc(&node->refs);
215 out:
216 	spin_unlock(&delayed_root->lock);
217 
218 	return node;
219 }
220 
221 struct btrfs_delayed_node *btrfs_next_delayed_node(
222 						struct btrfs_delayed_node *node)
223 {
224 	struct btrfs_delayed_root *delayed_root;
225 	struct list_head *p;
226 	struct btrfs_delayed_node *next = NULL;
227 
228 	delayed_root = node->root->fs_info->delayed_root;
229 	spin_lock(&delayed_root->lock);
230 	if (!node->in_list) {	/* not in the list */
231 		if (list_empty(&delayed_root->node_list))
232 			goto out;
233 		p = delayed_root->node_list.next;
234 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
235 		goto out;
236 	else
237 		p = node->n_list.next;
238 
239 	next = list_entry(p, struct btrfs_delayed_node, n_list);
240 	atomic_inc(&next->refs);
241 out:
242 	spin_unlock(&delayed_root->lock);
243 
244 	return next;
245 }
246 
247 static void __btrfs_release_delayed_node(
248 				struct btrfs_delayed_node *delayed_node,
249 				int mod)
250 {
251 	struct btrfs_delayed_root *delayed_root;
252 
253 	if (!delayed_node)
254 		return;
255 
256 	delayed_root = delayed_node->root->fs_info->delayed_root;
257 
258 	mutex_lock(&delayed_node->mutex);
259 	if (delayed_node->count)
260 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
261 	else
262 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
263 	mutex_unlock(&delayed_node->mutex);
264 
265 	if (atomic_dec_and_test(&delayed_node->refs)) {
266 		struct btrfs_root *root = delayed_node->root;
267 		spin_lock(&root->inode_lock);
268 		if (atomic_read(&delayed_node->refs) == 0) {
269 			radix_tree_delete(&root->delayed_nodes_tree,
270 					  delayed_node->inode_id);
271 			kmem_cache_free(delayed_node_cache, delayed_node);
272 		}
273 		spin_unlock(&root->inode_lock);
274 	}
275 }
276 
277 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
278 {
279 	__btrfs_release_delayed_node(node, 0);
280 }
281 
282 struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
283 					struct btrfs_delayed_root *delayed_root)
284 {
285 	struct list_head *p;
286 	struct btrfs_delayed_node *node = NULL;
287 
288 	spin_lock(&delayed_root->lock);
289 	if (list_empty(&delayed_root->prepare_list))
290 		goto out;
291 
292 	p = delayed_root->prepare_list.next;
293 	list_del_init(p);
294 	node = list_entry(p, struct btrfs_delayed_node, p_list);
295 	atomic_inc(&node->refs);
296 out:
297 	spin_unlock(&delayed_root->lock);
298 
299 	return node;
300 }
301 
302 static inline void btrfs_release_prepared_delayed_node(
303 					struct btrfs_delayed_node *node)
304 {
305 	__btrfs_release_delayed_node(node, 1);
306 }
307 
308 struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
309 {
310 	struct btrfs_delayed_item *item;
311 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
312 	if (item) {
313 		item->data_len = data_len;
314 		item->ins_or_del = 0;
315 		item->bytes_reserved = 0;
316 		item->delayed_node = NULL;
317 		atomic_set(&item->refs, 1);
318 	}
319 	return item;
320 }
321 
322 /*
323  * __btrfs_lookup_delayed_item - look up the delayed item by key
324  * @delayed_node: pointer to the delayed node
325  * @key:	  the key to look up
326  * @prev:	  used to store the prev item if the right item isn't found
327  * @next:	  used to store the next item if the right item isn't found
328  *
329  * Note: if we don't find the right item, we will return the prev item and
330  * the next item.
331  */
332 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
333 				struct rb_root *root,
334 				struct btrfs_key *key,
335 				struct btrfs_delayed_item **prev,
336 				struct btrfs_delayed_item **next)
337 {
338 	struct rb_node *node, *prev_node = NULL;
339 	struct btrfs_delayed_item *delayed_item = NULL;
340 	int ret = 0;
341 
342 	node = root->rb_node;
343 
344 	while (node) {
345 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
346 					rb_node);
347 		prev_node = node;
348 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
349 		if (ret < 0)
350 			node = node->rb_right;
351 		else if (ret > 0)
352 			node = node->rb_left;
353 		else
354 			return delayed_item;
355 	}
356 
357 	if (prev) {
358 		if (!prev_node)
359 			*prev = NULL;
360 		else if (ret < 0)
361 			*prev = delayed_item;
362 		else if ((node = rb_prev(prev_node)) != NULL) {
363 			*prev = rb_entry(node, struct btrfs_delayed_item,
364 					 rb_node);
365 		} else
366 			*prev = NULL;
367 	}
368 
369 	if (next) {
370 		if (!prev_node)
371 			*next = NULL;
372 		else if (ret > 0)
373 			*next = delayed_item;
374 		else if ((node = rb_next(prev_node)) != NULL) {
375 			*next = rb_entry(node, struct btrfs_delayed_item,
376 					 rb_node);
377 		} else
378 			*next = NULL;
379 	}
380 	return NULL;
381 }
382 
383 struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
384 					struct btrfs_delayed_node *delayed_node,
385 					struct btrfs_key *key)
386 {
387 	struct btrfs_delayed_item *item;
388 
389 	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
390 					   NULL, NULL);
391 	return item;
392 }
393 
394 struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
395 					struct btrfs_delayed_node *delayed_node,
396 					struct btrfs_key *key)
397 {
398 	struct btrfs_delayed_item *item;
399 
400 	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
401 					   NULL, NULL);
402 	return item;
403 }
404 
405 struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
406 					struct btrfs_delayed_node *delayed_node,
407 					struct btrfs_key *key)
408 {
409 	struct btrfs_delayed_item *item, *next;
410 
411 	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
412 					   NULL, &next);
413 	if (!item)
414 		item = next;
415 
416 	return item;
417 }
418 
419 struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
420 					struct btrfs_delayed_node *delayed_node,
421 					struct btrfs_key *key)
422 {
423 	struct btrfs_delayed_item *item, *next;
424 
425 	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
426 					   NULL, &next);
427 	if (!item)
428 		item = next;
429 
430 	return item;
431 }
432 
433 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
434 				    struct btrfs_delayed_item *ins,
435 				    int action)
436 {
437 	struct rb_node **p, *node;
438 	struct rb_node *parent_node = NULL;
439 	struct rb_root *root;
440 	struct btrfs_delayed_item *item;
441 	int cmp;
442 
443 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
444 		root = &delayed_node->ins_root;
445 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
446 		root = &delayed_node->del_root;
447 	else
448 		BUG();
449 	p = &root->rb_node;
450 	node = &ins->rb_node;
451 
452 	while (*p) {
453 		parent_node = *p;
454 		item = rb_entry(parent_node, struct btrfs_delayed_item,
455 				 rb_node);
456 
457 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
458 		if (cmp < 0)
459 			p = &(*p)->rb_right;
460 		else if (cmp > 0)
461 			p = &(*p)->rb_left;
462 		else
463 			return -EEXIST;
464 	}
465 
466 	rb_link_node(node, parent_node, p);
467 	rb_insert_color(node, root);
468 	ins->delayed_node = delayed_node;
469 	ins->ins_or_del = action;
470 
471 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
472 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
473 	    ins->key.offset >= delayed_node->index_cnt)
474 			delayed_node->index_cnt = ins->key.offset + 1;
475 
476 	delayed_node->count++;
477 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
478 	return 0;
479 }
480 
481 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
482 					      struct btrfs_delayed_item *item)
483 {
484 	return __btrfs_add_delayed_item(node, item,
485 					BTRFS_DELAYED_INSERTION_ITEM);
486 }
487 
488 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
489 					     struct btrfs_delayed_item *item)
490 {
491 	return __btrfs_add_delayed_item(node, item,
492 					BTRFS_DELAYED_DELETION_ITEM);
493 }
494 
495 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
496 {
497 	struct rb_root *root;
498 	struct btrfs_delayed_root *delayed_root;
499 
500 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
501 
502 	BUG_ON(!delayed_root);
503 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
504 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
505 
506 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
507 		root = &delayed_item->delayed_node->ins_root;
508 	else
509 		root = &delayed_item->delayed_node->del_root;
510 
511 	rb_erase(&delayed_item->rb_node, root);
512 	delayed_item->delayed_node->count--;
513 	atomic_dec(&delayed_root->items);
514 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
515 	    waitqueue_active(&delayed_root->wait))
516 		wake_up(&delayed_root->wait);
517 }
518 
519 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
520 {
521 	if (item) {
522 		__btrfs_remove_delayed_item(item);
523 		if (atomic_dec_and_test(&item->refs))
524 			kfree(item);
525 	}
526 }
527 
528 struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
529 					struct btrfs_delayed_node *delayed_node)
530 {
531 	struct rb_node *p;
532 	struct btrfs_delayed_item *item = NULL;
533 
534 	p = rb_first(&delayed_node->ins_root);
535 	if (p)
536 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
537 
538 	return item;
539 }
540 
541 struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
542 					struct btrfs_delayed_node *delayed_node)
543 {
544 	struct rb_node *p;
545 	struct btrfs_delayed_item *item = NULL;
546 
547 	p = rb_first(&delayed_node->del_root);
548 	if (p)
549 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
550 
551 	return item;
552 }
553 
554 struct btrfs_delayed_item *__btrfs_next_delayed_item(
555 						struct btrfs_delayed_item *item)
556 {
557 	struct rb_node *p;
558 	struct btrfs_delayed_item *next = NULL;
559 
560 	p = rb_next(&item->rb_node);
561 	if (p)
562 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
563 
564 	return next;
565 }
566 
567 static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
568 						   u64 root_id)
569 {
570 	struct btrfs_key root_key;
571 
572 	if (root->objectid == root_id)
573 		return root;
574 
575 	root_key.objectid = root_id;
576 	root_key.type = BTRFS_ROOT_ITEM_KEY;
577 	root_key.offset = (u64)-1;
578 	return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
579 }
580 
581 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
582 					       struct btrfs_root *root,
583 					       struct btrfs_delayed_item *item)
584 {
585 	struct btrfs_block_rsv *src_rsv;
586 	struct btrfs_block_rsv *dst_rsv;
587 	u64 num_bytes;
588 	int ret;
589 
590 	if (!trans->bytes_reserved)
591 		return 0;
592 
593 	src_rsv = trans->block_rsv;
594 	dst_rsv = &root->fs_info->delayed_block_rsv;
595 
596 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
597 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
598 	if (!ret)
599 		item->bytes_reserved = num_bytes;
600 
601 	return ret;
602 }
603 
604 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
605 						struct btrfs_delayed_item *item)
606 {
607 	struct btrfs_block_rsv *rsv;
608 
609 	if (!item->bytes_reserved)
610 		return;
611 
612 	rsv = &root->fs_info->delayed_block_rsv;
613 	btrfs_block_rsv_release(root, rsv,
614 				item->bytes_reserved);
615 }
616 
617 static int btrfs_delayed_inode_reserve_metadata(
618 					struct btrfs_trans_handle *trans,
619 					struct btrfs_root *root,
620 					struct inode *inode,
621 					struct btrfs_delayed_node *node)
622 {
623 	struct btrfs_block_rsv *src_rsv;
624 	struct btrfs_block_rsv *dst_rsv;
625 	u64 num_bytes;
626 	int ret;
627 	int release = false;
628 
629 	src_rsv = trans->block_rsv;
630 	dst_rsv = &root->fs_info->delayed_block_rsv;
631 
632 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
633 
634 	/*
635 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
636 	 * which doesn't reserve space for speed.  This is a problem since we
637 	 * still need to reserve space for this update, so try to reserve the
638 	 * space.
639 	 *
640 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
641 	 * we're accounted for.
642 	 */
643 	if (!trans->bytes_reserved &&
644 	    src_rsv != &root->fs_info->delalloc_block_rsv) {
645 		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
646 		/*
647 		 * Since we're under a transaction reserve_metadata_bytes could
648 		 * try to commit the transaction which will make it return
649 		 * EAGAIN to make us stop the transaction we have, so return
650 		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
651 		 */
652 		if (ret == -EAGAIN)
653 			ret = -ENOSPC;
654 		if (!ret)
655 			node->bytes_reserved = num_bytes;
656 		return ret;
657 	} else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
658 		spin_lock(&BTRFS_I(inode)->lock);
659 		if (BTRFS_I(inode)->delalloc_meta_reserved) {
660 			BTRFS_I(inode)->delalloc_meta_reserved = 0;
661 			spin_unlock(&BTRFS_I(inode)->lock);
662 			release = true;
663 			goto migrate;
664 		}
665 		spin_unlock(&BTRFS_I(inode)->lock);
666 
667 		/* Ok we didn't have space pre-reserved.  This shouldn't happen
668 		 * too often but it can happen if we do delalloc to an existing
669 		 * inode which gets dirtied because of the time update, and then
670 		 * isn't touched again until after the transaction commits and
671 		 * then we try to write out the data.  First try to be nice and
672 		 * reserve something strictly for us.  If not be a pain and try
673 		 * to steal from the delalloc block rsv.
674 		 */
675 		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
676 		if (!ret)
677 			goto out;
678 
679 		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
680 		if (!ret)
681 			goto out;
682 
683 		/*
684 		 * Ok this is a problem, let's just steal from the global rsv
685 		 * since this really shouldn't happen that often.
686 		 */
687 		WARN_ON(1);
688 		ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
689 					      dst_rsv, num_bytes);
690 		goto out;
691 	}
692 
693 migrate:
694 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
695 
696 out:
697 	/*
698 	 * Migrate only takes a reservation, it doesn't touch the size of the
699 	 * block_rsv.  This is to simplify people who don't normally have things
700 	 * migrated from their block rsv.  If they go to release their
701 	 * reservation, that will decrease the size as well, so if migrate
702 	 * reduced size we'd end up with a negative size.  But for the
703 	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
704 	 * but we could in fact do this reserve/migrate dance several times
705 	 * between the time we did the original reservation and we'd clean it
706 	 * up.  So to take care of this, release the space for the meta
707 	 * reservation here.  I think it may be time for a documentation page on
708 	 * how block rsvs. work.
709 	 */
710 	if (!ret)
711 		node->bytes_reserved = num_bytes;
712 
713 	if (release)
714 		btrfs_block_rsv_release(root, src_rsv, num_bytes);
715 
716 	return ret;
717 }
718 
719 static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
720 						struct btrfs_delayed_node *node)
721 {
722 	struct btrfs_block_rsv *rsv;
723 
724 	if (!node->bytes_reserved)
725 		return;
726 
727 	rsv = &root->fs_info->delayed_block_rsv;
728 	btrfs_block_rsv_release(root, rsv,
729 				node->bytes_reserved);
730 	node->bytes_reserved = 0;
731 }
732 
733 /*
734  * This helper will insert some continuous items into the same leaf according
735  * to the free space of the leaf.
736  */
737 static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
738 				struct btrfs_root *root,
739 				struct btrfs_path *path,
740 				struct btrfs_delayed_item *item)
741 {
742 	struct btrfs_delayed_item *curr, *next;
743 	int free_space;
744 	int total_data_size = 0, total_size = 0;
745 	struct extent_buffer *leaf;
746 	char *data_ptr;
747 	struct btrfs_key *keys;
748 	u32 *data_size;
749 	struct list_head head;
750 	int slot;
751 	int nitems;
752 	int i;
753 	int ret = 0;
754 
755 	BUG_ON(!path->nodes[0]);
756 
757 	leaf = path->nodes[0];
758 	free_space = btrfs_leaf_free_space(root, leaf);
759 	INIT_LIST_HEAD(&head);
760 
761 	next = item;
762 	nitems = 0;
763 
764 	/*
765 	 * count the number of the continuous items that we can insert in batch
766 	 */
767 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
768 	       free_space) {
769 		total_data_size += next->data_len;
770 		total_size += next->data_len + sizeof(struct btrfs_item);
771 		list_add_tail(&next->tree_list, &head);
772 		nitems++;
773 
774 		curr = next;
775 		next = __btrfs_next_delayed_item(curr);
776 		if (!next)
777 			break;
778 
779 		if (!btrfs_is_continuous_delayed_item(curr, next))
780 			break;
781 	}
782 
783 	if (!nitems) {
784 		ret = 0;
785 		goto out;
786 	}
787 
788 	/*
789 	 * we need allocate some memory space, but it might cause the task
790 	 * to sleep, so we set all locked nodes in the path to blocking locks
791 	 * first.
792 	 */
793 	btrfs_set_path_blocking(path);
794 
795 	keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
796 	if (!keys) {
797 		ret = -ENOMEM;
798 		goto out;
799 	}
800 
801 	data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
802 	if (!data_size) {
803 		ret = -ENOMEM;
804 		goto error;
805 	}
806 
807 	/* get keys of all the delayed items */
808 	i = 0;
809 	list_for_each_entry(next, &head, tree_list) {
810 		keys[i] = next->key;
811 		data_size[i] = next->data_len;
812 		i++;
813 	}
814 
815 	/* reset all the locked nodes in the patch to spinning locks. */
816 	btrfs_clear_path_blocking(path, NULL, 0);
817 
818 	/* insert the keys of the items */
819 	ret = setup_items_for_insert(trans, root, path, keys, data_size,
820 				     total_data_size, total_size, nitems);
821 	if (ret)
822 		goto error;
823 
824 	/* insert the dir index items */
825 	slot = path->slots[0];
826 	list_for_each_entry_safe(curr, next, &head, tree_list) {
827 		data_ptr = btrfs_item_ptr(leaf, slot, char);
828 		write_extent_buffer(leaf, &curr->data,
829 				    (unsigned long)data_ptr,
830 				    curr->data_len);
831 		slot++;
832 
833 		btrfs_delayed_item_release_metadata(root, curr);
834 
835 		list_del(&curr->tree_list);
836 		btrfs_release_delayed_item(curr);
837 	}
838 
839 error:
840 	kfree(data_size);
841 	kfree(keys);
842 out:
843 	return ret;
844 }
845 
846 /*
847  * This helper can just do simple insertion that needn't extend item for new
848  * data, such as directory name index insertion, inode insertion.
849  */
850 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
851 				     struct btrfs_root *root,
852 				     struct btrfs_path *path,
853 				     struct btrfs_delayed_item *delayed_item)
854 {
855 	struct extent_buffer *leaf;
856 	struct btrfs_item *item;
857 	char *ptr;
858 	int ret;
859 
860 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
861 				      delayed_item->data_len);
862 	if (ret < 0 && ret != -EEXIST)
863 		return ret;
864 
865 	leaf = path->nodes[0];
866 
867 	item = btrfs_item_nr(leaf, path->slots[0]);
868 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
869 
870 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
871 			    delayed_item->data_len);
872 	btrfs_mark_buffer_dirty(leaf);
873 
874 	btrfs_delayed_item_release_metadata(root, delayed_item);
875 	return 0;
876 }
877 
878 /*
879  * we insert an item first, then if there are some continuous items, we try
880  * to insert those items into the same leaf.
881  */
882 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
883 				      struct btrfs_path *path,
884 				      struct btrfs_root *root,
885 				      struct btrfs_delayed_node *node)
886 {
887 	struct btrfs_delayed_item *curr, *prev;
888 	int ret = 0;
889 
890 do_again:
891 	mutex_lock(&node->mutex);
892 	curr = __btrfs_first_delayed_insertion_item(node);
893 	if (!curr)
894 		goto insert_end;
895 
896 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
897 	if (ret < 0) {
898 		btrfs_release_path(path);
899 		goto insert_end;
900 	}
901 
902 	prev = curr;
903 	curr = __btrfs_next_delayed_item(prev);
904 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
905 		/* insert the continuous items into the same leaf */
906 		path->slots[0]++;
907 		btrfs_batch_insert_items(trans, root, path, curr);
908 	}
909 	btrfs_release_delayed_item(prev);
910 	btrfs_mark_buffer_dirty(path->nodes[0]);
911 
912 	btrfs_release_path(path);
913 	mutex_unlock(&node->mutex);
914 	goto do_again;
915 
916 insert_end:
917 	mutex_unlock(&node->mutex);
918 	return ret;
919 }
920 
921 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
922 				    struct btrfs_root *root,
923 				    struct btrfs_path *path,
924 				    struct btrfs_delayed_item *item)
925 {
926 	struct btrfs_delayed_item *curr, *next;
927 	struct extent_buffer *leaf;
928 	struct btrfs_key key;
929 	struct list_head head;
930 	int nitems, i, last_item;
931 	int ret = 0;
932 
933 	BUG_ON(!path->nodes[0]);
934 
935 	leaf = path->nodes[0];
936 
937 	i = path->slots[0];
938 	last_item = btrfs_header_nritems(leaf) - 1;
939 	if (i > last_item)
940 		return -ENOENT;	/* FIXME: Is errno suitable? */
941 
942 	next = item;
943 	INIT_LIST_HEAD(&head);
944 	btrfs_item_key_to_cpu(leaf, &key, i);
945 	nitems = 0;
946 	/*
947 	 * count the number of the dir index items that we can delete in batch
948 	 */
949 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
950 		list_add_tail(&next->tree_list, &head);
951 		nitems++;
952 
953 		curr = next;
954 		next = __btrfs_next_delayed_item(curr);
955 		if (!next)
956 			break;
957 
958 		if (!btrfs_is_continuous_delayed_item(curr, next))
959 			break;
960 
961 		i++;
962 		if (i > last_item)
963 			break;
964 		btrfs_item_key_to_cpu(leaf, &key, i);
965 	}
966 
967 	if (!nitems)
968 		return 0;
969 
970 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
971 	if (ret)
972 		goto out;
973 
974 	list_for_each_entry_safe(curr, next, &head, tree_list) {
975 		btrfs_delayed_item_release_metadata(root, curr);
976 		list_del(&curr->tree_list);
977 		btrfs_release_delayed_item(curr);
978 	}
979 
980 out:
981 	return ret;
982 }
983 
984 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
985 				      struct btrfs_path *path,
986 				      struct btrfs_root *root,
987 				      struct btrfs_delayed_node *node)
988 {
989 	struct btrfs_delayed_item *curr, *prev;
990 	int ret = 0;
991 
992 do_again:
993 	mutex_lock(&node->mutex);
994 	curr = __btrfs_first_delayed_deletion_item(node);
995 	if (!curr)
996 		goto delete_fail;
997 
998 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
999 	if (ret < 0)
1000 		goto delete_fail;
1001 	else if (ret > 0) {
1002 		/*
1003 		 * can't find the item which the node points to, so this node
1004 		 * is invalid, just drop it.
1005 		 */
1006 		prev = curr;
1007 		curr = __btrfs_next_delayed_item(prev);
1008 		btrfs_release_delayed_item(prev);
1009 		ret = 0;
1010 		btrfs_release_path(path);
1011 		if (curr)
1012 			goto do_again;
1013 		else
1014 			goto delete_fail;
1015 	}
1016 
1017 	btrfs_batch_delete_items(trans, root, path, curr);
1018 	btrfs_release_path(path);
1019 	mutex_unlock(&node->mutex);
1020 	goto do_again;
1021 
1022 delete_fail:
1023 	btrfs_release_path(path);
1024 	mutex_unlock(&node->mutex);
1025 	return ret;
1026 }
1027 
1028 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1029 {
1030 	struct btrfs_delayed_root *delayed_root;
1031 
1032 	if (delayed_node && delayed_node->inode_dirty) {
1033 		BUG_ON(!delayed_node->root);
1034 		delayed_node->inode_dirty = 0;
1035 		delayed_node->count--;
1036 
1037 		delayed_root = delayed_node->root->fs_info->delayed_root;
1038 		atomic_dec(&delayed_root->items);
1039 		if (atomic_read(&delayed_root->items) <
1040 		    BTRFS_DELAYED_BACKGROUND &&
1041 		    waitqueue_active(&delayed_root->wait))
1042 			wake_up(&delayed_root->wait);
1043 	}
1044 }
1045 
1046 static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1047 				      struct btrfs_root *root,
1048 				      struct btrfs_path *path,
1049 				      struct btrfs_delayed_node *node)
1050 {
1051 	struct btrfs_key key;
1052 	struct btrfs_inode_item *inode_item;
1053 	struct extent_buffer *leaf;
1054 	int ret;
1055 
1056 	mutex_lock(&node->mutex);
1057 	if (!node->inode_dirty) {
1058 		mutex_unlock(&node->mutex);
1059 		return 0;
1060 	}
1061 
1062 	key.objectid = node->inode_id;
1063 	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1064 	key.offset = 0;
1065 	ret = btrfs_lookup_inode(trans, root, path, &key, 1);
1066 	if (ret > 0) {
1067 		btrfs_release_path(path);
1068 		mutex_unlock(&node->mutex);
1069 		return -ENOENT;
1070 	} else if (ret < 0) {
1071 		mutex_unlock(&node->mutex);
1072 		return ret;
1073 	}
1074 
1075 	btrfs_unlock_up_safe(path, 1);
1076 	leaf = path->nodes[0];
1077 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1078 				    struct btrfs_inode_item);
1079 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1080 			    sizeof(struct btrfs_inode_item));
1081 	btrfs_mark_buffer_dirty(leaf);
1082 	btrfs_release_path(path);
1083 
1084 	btrfs_delayed_inode_release_metadata(root, node);
1085 	btrfs_release_delayed_inode(node);
1086 	mutex_unlock(&node->mutex);
1087 
1088 	return 0;
1089 }
1090 
1091 /* Called when committing the transaction. */
1092 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1093 			    struct btrfs_root *root)
1094 {
1095 	struct btrfs_delayed_root *delayed_root;
1096 	struct btrfs_delayed_node *curr_node, *prev_node;
1097 	struct btrfs_path *path;
1098 	struct btrfs_block_rsv *block_rsv;
1099 	int ret = 0;
1100 
1101 	path = btrfs_alloc_path();
1102 	if (!path)
1103 		return -ENOMEM;
1104 	path->leave_spinning = 1;
1105 
1106 	block_rsv = trans->block_rsv;
1107 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1108 
1109 	delayed_root = btrfs_get_delayed_root(root);
1110 
1111 	curr_node = btrfs_first_delayed_node(delayed_root);
1112 	while (curr_node) {
1113 		root = curr_node->root;
1114 		ret = btrfs_insert_delayed_items(trans, path, root,
1115 						 curr_node);
1116 		if (!ret)
1117 			ret = btrfs_delete_delayed_items(trans, path, root,
1118 							 curr_node);
1119 		if (!ret)
1120 			ret = btrfs_update_delayed_inode(trans, root, path,
1121 							 curr_node);
1122 		if (ret) {
1123 			btrfs_release_delayed_node(curr_node);
1124 			break;
1125 		}
1126 
1127 		prev_node = curr_node;
1128 		curr_node = btrfs_next_delayed_node(curr_node);
1129 		btrfs_release_delayed_node(prev_node);
1130 	}
1131 
1132 	btrfs_free_path(path);
1133 	trans->block_rsv = block_rsv;
1134 	return ret;
1135 }
1136 
1137 static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1138 					      struct btrfs_delayed_node *node)
1139 {
1140 	struct btrfs_path *path;
1141 	struct btrfs_block_rsv *block_rsv;
1142 	int ret;
1143 
1144 	path = btrfs_alloc_path();
1145 	if (!path)
1146 		return -ENOMEM;
1147 	path->leave_spinning = 1;
1148 
1149 	block_rsv = trans->block_rsv;
1150 	trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
1151 
1152 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1153 	if (!ret)
1154 		ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1155 	if (!ret)
1156 		ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1157 	btrfs_free_path(path);
1158 
1159 	trans->block_rsv = block_rsv;
1160 	return ret;
1161 }
1162 
1163 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1164 				     struct inode *inode)
1165 {
1166 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1167 	int ret;
1168 
1169 	if (!delayed_node)
1170 		return 0;
1171 
1172 	mutex_lock(&delayed_node->mutex);
1173 	if (!delayed_node->count) {
1174 		mutex_unlock(&delayed_node->mutex);
1175 		btrfs_release_delayed_node(delayed_node);
1176 		return 0;
1177 	}
1178 	mutex_unlock(&delayed_node->mutex);
1179 
1180 	ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
1181 	btrfs_release_delayed_node(delayed_node);
1182 	return ret;
1183 }
1184 
1185 void btrfs_remove_delayed_node(struct inode *inode)
1186 {
1187 	struct btrfs_delayed_node *delayed_node;
1188 
1189 	delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1190 	if (!delayed_node)
1191 		return;
1192 
1193 	BTRFS_I(inode)->delayed_node = NULL;
1194 	btrfs_release_delayed_node(delayed_node);
1195 }
1196 
1197 struct btrfs_async_delayed_node {
1198 	struct btrfs_root *root;
1199 	struct btrfs_delayed_node *delayed_node;
1200 	struct btrfs_work work;
1201 };
1202 
1203 static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1204 {
1205 	struct btrfs_async_delayed_node *async_node;
1206 	struct btrfs_trans_handle *trans;
1207 	struct btrfs_path *path;
1208 	struct btrfs_delayed_node *delayed_node = NULL;
1209 	struct btrfs_root *root;
1210 	struct btrfs_block_rsv *block_rsv;
1211 	unsigned long nr = 0;
1212 	int need_requeue = 0;
1213 	int ret;
1214 
1215 	async_node = container_of(work, struct btrfs_async_delayed_node, work);
1216 
1217 	path = btrfs_alloc_path();
1218 	if (!path)
1219 		goto out;
1220 	path->leave_spinning = 1;
1221 
1222 	delayed_node = async_node->delayed_node;
1223 	root = delayed_node->root;
1224 
1225 	trans = btrfs_join_transaction(root);
1226 	if (IS_ERR(trans))
1227 		goto free_path;
1228 
1229 	block_rsv = trans->block_rsv;
1230 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1231 
1232 	ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1233 	if (!ret)
1234 		ret = btrfs_delete_delayed_items(trans, path, root,
1235 						 delayed_node);
1236 
1237 	if (!ret)
1238 		btrfs_update_delayed_inode(trans, root, path, delayed_node);
1239 
1240 	/*
1241 	 * Maybe new delayed items have been inserted, so we need requeue
1242 	 * the work. Besides that, we must dequeue the empty delayed nodes
1243 	 * to avoid the race between delayed items balance and the worker.
1244 	 * The race like this:
1245 	 * 	Task1				Worker thread
1246 	 * 					count == 0, needn't requeue
1247 	 * 					  also needn't insert the
1248 	 * 					  delayed node into prepare
1249 	 * 					  list again.
1250 	 * 	add lots of delayed items
1251 	 * 	queue the delayed node
1252 	 * 	  already in the list,
1253 	 * 	  and not in the prepare
1254 	 * 	  list, it means the delayed
1255 	 * 	  node is being dealt with
1256 	 * 	  by the worker.
1257 	 * 	do delayed items balance
1258 	 * 	  the delayed node is being
1259 	 * 	  dealt with by the worker
1260 	 * 	  now, just wait.
1261 	 * 	  				the worker goto idle.
1262 	 * Task1 will sleep until the transaction is commited.
1263 	 */
1264 	mutex_lock(&delayed_node->mutex);
1265 	if (delayed_node->count)
1266 		need_requeue = 1;
1267 	else
1268 		btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
1269 					   delayed_node);
1270 	mutex_unlock(&delayed_node->mutex);
1271 
1272 	nr = trans->blocks_used;
1273 
1274 	trans->block_rsv = block_rsv;
1275 	btrfs_end_transaction_dmeta(trans, root);
1276 	__btrfs_btree_balance_dirty(root, nr);
1277 free_path:
1278 	btrfs_free_path(path);
1279 out:
1280 	if (need_requeue)
1281 		btrfs_requeue_work(&async_node->work);
1282 	else {
1283 		btrfs_release_prepared_delayed_node(delayed_node);
1284 		kfree(async_node);
1285 	}
1286 }
1287 
1288 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1289 				     struct btrfs_root *root, int all)
1290 {
1291 	struct btrfs_async_delayed_node *async_node;
1292 	struct btrfs_delayed_node *curr;
1293 	int count = 0;
1294 
1295 again:
1296 	curr = btrfs_first_prepared_delayed_node(delayed_root);
1297 	if (!curr)
1298 		return 0;
1299 
1300 	async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
1301 	if (!async_node) {
1302 		btrfs_release_prepared_delayed_node(curr);
1303 		return -ENOMEM;
1304 	}
1305 
1306 	async_node->root = root;
1307 	async_node->delayed_node = curr;
1308 
1309 	async_node->work.func = btrfs_async_run_delayed_node_done;
1310 	async_node->work.flags = 0;
1311 
1312 	btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
1313 	count++;
1314 
1315 	if (all || count < 4)
1316 		goto again;
1317 
1318 	return 0;
1319 }
1320 
1321 void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1322 {
1323 	struct btrfs_delayed_root *delayed_root;
1324 	delayed_root = btrfs_get_delayed_root(root);
1325 	WARN_ON(btrfs_first_delayed_node(delayed_root));
1326 }
1327 
1328 void btrfs_balance_delayed_items(struct btrfs_root *root)
1329 {
1330 	struct btrfs_delayed_root *delayed_root;
1331 
1332 	delayed_root = btrfs_get_delayed_root(root);
1333 
1334 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1335 		return;
1336 
1337 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1338 		int ret;
1339 		ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
1340 		if (ret)
1341 			return;
1342 
1343 		wait_event_interruptible_timeout(
1344 				delayed_root->wait,
1345 				(atomic_read(&delayed_root->items) <
1346 				 BTRFS_DELAYED_BACKGROUND),
1347 				HZ);
1348 		return;
1349 	}
1350 
1351 	btrfs_wq_run_delayed_node(delayed_root, root, 0);
1352 }
1353 
1354 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1355 				   struct btrfs_root *root, const char *name,
1356 				   int name_len, struct inode *dir,
1357 				   struct btrfs_disk_key *disk_key, u8 type,
1358 				   u64 index)
1359 {
1360 	struct btrfs_delayed_node *delayed_node;
1361 	struct btrfs_delayed_item *delayed_item;
1362 	struct btrfs_dir_item *dir_item;
1363 	int ret;
1364 
1365 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1366 	if (IS_ERR(delayed_node))
1367 		return PTR_ERR(delayed_node);
1368 
1369 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1370 	if (!delayed_item) {
1371 		ret = -ENOMEM;
1372 		goto release_node;
1373 	}
1374 
1375 	ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1376 	/*
1377 	 * we have reserved enough space when we start a new transaction,
1378 	 * so reserving metadata failure is impossible
1379 	 */
1380 	BUG_ON(ret);
1381 
1382 	delayed_item->key.objectid = btrfs_ino(dir);
1383 	btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1384 	delayed_item->key.offset = index;
1385 
1386 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1387 	dir_item->location = *disk_key;
1388 	dir_item->transid = cpu_to_le64(trans->transid);
1389 	dir_item->data_len = 0;
1390 	dir_item->name_len = cpu_to_le16(name_len);
1391 	dir_item->type = type;
1392 	memcpy((char *)(dir_item + 1), name, name_len);
1393 
1394 	mutex_lock(&delayed_node->mutex);
1395 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1396 	if (unlikely(ret)) {
1397 		printk(KERN_ERR "err add delayed dir index item(name: %s) into "
1398 				"the insertion tree of the delayed node"
1399 				"(root id: %llu, inode id: %llu, errno: %d)\n",
1400 				name,
1401 				(unsigned long long)delayed_node->root->objectid,
1402 				(unsigned long long)delayed_node->inode_id,
1403 				ret);
1404 		BUG();
1405 	}
1406 	mutex_unlock(&delayed_node->mutex);
1407 
1408 release_node:
1409 	btrfs_release_delayed_node(delayed_node);
1410 	return ret;
1411 }
1412 
1413 static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1414 					       struct btrfs_delayed_node *node,
1415 					       struct btrfs_key *key)
1416 {
1417 	struct btrfs_delayed_item *item;
1418 
1419 	mutex_lock(&node->mutex);
1420 	item = __btrfs_lookup_delayed_insertion_item(node, key);
1421 	if (!item) {
1422 		mutex_unlock(&node->mutex);
1423 		return 1;
1424 	}
1425 
1426 	btrfs_delayed_item_release_metadata(root, item);
1427 	btrfs_release_delayed_item(item);
1428 	mutex_unlock(&node->mutex);
1429 	return 0;
1430 }
1431 
1432 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1433 				   struct btrfs_root *root, struct inode *dir,
1434 				   u64 index)
1435 {
1436 	struct btrfs_delayed_node *node;
1437 	struct btrfs_delayed_item *item;
1438 	struct btrfs_key item_key;
1439 	int ret;
1440 
1441 	node = btrfs_get_or_create_delayed_node(dir);
1442 	if (IS_ERR(node))
1443 		return PTR_ERR(node);
1444 
1445 	item_key.objectid = btrfs_ino(dir);
1446 	btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1447 	item_key.offset = index;
1448 
1449 	ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1450 	if (!ret)
1451 		goto end;
1452 
1453 	item = btrfs_alloc_delayed_item(0);
1454 	if (!item) {
1455 		ret = -ENOMEM;
1456 		goto end;
1457 	}
1458 
1459 	item->key = item_key;
1460 
1461 	ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1462 	/*
1463 	 * we have reserved enough space when we start a new transaction,
1464 	 * so reserving metadata failure is impossible.
1465 	 */
1466 	BUG_ON(ret);
1467 
1468 	mutex_lock(&node->mutex);
1469 	ret = __btrfs_add_delayed_deletion_item(node, item);
1470 	if (unlikely(ret)) {
1471 		printk(KERN_ERR "err add delayed dir index item(index: %llu) "
1472 				"into the deletion tree of the delayed node"
1473 				"(root id: %llu, inode id: %llu, errno: %d)\n",
1474 				(unsigned long long)index,
1475 				(unsigned long long)node->root->objectid,
1476 				(unsigned long long)node->inode_id,
1477 				ret);
1478 		BUG();
1479 	}
1480 	mutex_unlock(&node->mutex);
1481 end:
1482 	btrfs_release_delayed_node(node);
1483 	return ret;
1484 }
1485 
1486 int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1487 {
1488 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1489 
1490 	if (!delayed_node)
1491 		return -ENOENT;
1492 
1493 	/*
1494 	 * Since we have held i_mutex of this directory, it is impossible that
1495 	 * a new directory index is added into the delayed node and index_cnt
1496 	 * is updated now. So we needn't lock the delayed node.
1497 	 */
1498 	if (!delayed_node->index_cnt) {
1499 		btrfs_release_delayed_node(delayed_node);
1500 		return -EINVAL;
1501 	}
1502 
1503 	BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1504 	btrfs_release_delayed_node(delayed_node);
1505 	return 0;
1506 }
1507 
1508 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1509 			     struct list_head *del_list)
1510 {
1511 	struct btrfs_delayed_node *delayed_node;
1512 	struct btrfs_delayed_item *item;
1513 
1514 	delayed_node = btrfs_get_delayed_node(inode);
1515 	if (!delayed_node)
1516 		return;
1517 
1518 	mutex_lock(&delayed_node->mutex);
1519 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1520 	while (item) {
1521 		atomic_inc(&item->refs);
1522 		list_add_tail(&item->readdir_list, ins_list);
1523 		item = __btrfs_next_delayed_item(item);
1524 	}
1525 
1526 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1527 	while (item) {
1528 		atomic_inc(&item->refs);
1529 		list_add_tail(&item->readdir_list, del_list);
1530 		item = __btrfs_next_delayed_item(item);
1531 	}
1532 	mutex_unlock(&delayed_node->mutex);
1533 	/*
1534 	 * This delayed node is still cached in the btrfs inode, so refs
1535 	 * must be > 1 now, and we needn't check it is going to be freed
1536 	 * or not.
1537 	 *
1538 	 * Besides that, this function is used to read dir, we do not
1539 	 * insert/delete delayed items in this period. So we also needn't
1540 	 * requeue or dequeue this delayed node.
1541 	 */
1542 	atomic_dec(&delayed_node->refs);
1543 }
1544 
1545 void btrfs_put_delayed_items(struct list_head *ins_list,
1546 			     struct list_head *del_list)
1547 {
1548 	struct btrfs_delayed_item *curr, *next;
1549 
1550 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1551 		list_del(&curr->readdir_list);
1552 		if (atomic_dec_and_test(&curr->refs))
1553 			kfree(curr);
1554 	}
1555 
1556 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1557 		list_del(&curr->readdir_list);
1558 		if (atomic_dec_and_test(&curr->refs))
1559 			kfree(curr);
1560 	}
1561 }
1562 
1563 int btrfs_should_delete_dir_index(struct list_head *del_list,
1564 				  u64 index)
1565 {
1566 	struct btrfs_delayed_item *curr, *next;
1567 	int ret;
1568 
1569 	if (list_empty(del_list))
1570 		return 0;
1571 
1572 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1573 		if (curr->key.offset > index)
1574 			break;
1575 
1576 		list_del(&curr->readdir_list);
1577 		ret = (curr->key.offset == index);
1578 
1579 		if (atomic_dec_and_test(&curr->refs))
1580 			kfree(curr);
1581 
1582 		if (ret)
1583 			return 1;
1584 		else
1585 			continue;
1586 	}
1587 	return 0;
1588 }
1589 
1590 /*
1591  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1592  *
1593  */
1594 int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
1595 				    filldir_t filldir,
1596 				    struct list_head *ins_list)
1597 {
1598 	struct btrfs_dir_item *di;
1599 	struct btrfs_delayed_item *curr, *next;
1600 	struct btrfs_key location;
1601 	char *name;
1602 	int name_len;
1603 	int over = 0;
1604 	unsigned char d_type;
1605 
1606 	if (list_empty(ins_list))
1607 		return 0;
1608 
1609 	/*
1610 	 * Changing the data of the delayed item is impossible. So
1611 	 * we needn't lock them. And we have held i_mutex of the
1612 	 * directory, nobody can delete any directory indexes now.
1613 	 */
1614 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1615 		list_del(&curr->readdir_list);
1616 
1617 		if (curr->key.offset < filp->f_pos) {
1618 			if (atomic_dec_and_test(&curr->refs))
1619 				kfree(curr);
1620 			continue;
1621 		}
1622 
1623 		filp->f_pos = curr->key.offset;
1624 
1625 		di = (struct btrfs_dir_item *)curr->data;
1626 		name = (char *)(di + 1);
1627 		name_len = le16_to_cpu(di->name_len);
1628 
1629 		d_type = btrfs_filetype_table[di->type];
1630 		btrfs_disk_key_to_cpu(&location, &di->location);
1631 
1632 		over = filldir(dirent, name, name_len, curr->key.offset,
1633 			       location.objectid, d_type);
1634 
1635 		if (atomic_dec_and_test(&curr->refs))
1636 			kfree(curr);
1637 
1638 		if (over)
1639 			return 1;
1640 	}
1641 	return 0;
1642 }
1643 
1644 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
1645 			 generation, 64);
1646 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
1647 			 sequence, 64);
1648 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
1649 			 transid, 64);
1650 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
1651 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
1652 			 nbytes, 64);
1653 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
1654 			 block_group, 64);
1655 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
1656 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
1657 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
1658 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
1659 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
1660 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
1661 
1662 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
1663 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
1664 
1665 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1666 				  struct btrfs_inode_item *inode_item,
1667 				  struct inode *inode)
1668 {
1669 	btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
1670 	btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
1671 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1672 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1673 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1674 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1675 	btrfs_set_stack_inode_generation(inode_item,
1676 					 BTRFS_I(inode)->generation);
1677 	btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
1678 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1679 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1680 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1681 	btrfs_set_stack_inode_block_group(inode_item, 0);
1682 
1683 	btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1684 				     inode->i_atime.tv_sec);
1685 	btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1686 				      inode->i_atime.tv_nsec);
1687 
1688 	btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1689 				     inode->i_mtime.tv_sec);
1690 	btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1691 				      inode->i_mtime.tv_nsec);
1692 
1693 	btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1694 				     inode->i_ctime.tv_sec);
1695 	btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1696 				      inode->i_ctime.tv_nsec);
1697 }
1698 
1699 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1700 {
1701 	struct btrfs_delayed_node *delayed_node;
1702 	struct btrfs_inode_item *inode_item;
1703 	struct btrfs_timespec *tspec;
1704 
1705 	delayed_node = btrfs_get_delayed_node(inode);
1706 	if (!delayed_node)
1707 		return -ENOENT;
1708 
1709 	mutex_lock(&delayed_node->mutex);
1710 	if (!delayed_node->inode_dirty) {
1711 		mutex_unlock(&delayed_node->mutex);
1712 		btrfs_release_delayed_node(delayed_node);
1713 		return -ENOENT;
1714 	}
1715 
1716 	inode_item = &delayed_node->inode_item;
1717 
1718 	inode->i_uid = btrfs_stack_inode_uid(inode_item);
1719 	inode->i_gid = btrfs_stack_inode_gid(inode_item);
1720 	btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1721 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1722 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1723 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1724 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1725 	BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
1726 	inode->i_rdev = 0;
1727 	*rdev = btrfs_stack_inode_rdev(inode_item);
1728 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1729 
1730 	tspec = btrfs_inode_atime(inode_item);
1731 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1732 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1733 
1734 	tspec = btrfs_inode_mtime(inode_item);
1735 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1736 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1737 
1738 	tspec = btrfs_inode_ctime(inode_item);
1739 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1740 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1741 
1742 	inode->i_generation = BTRFS_I(inode)->generation;
1743 	BTRFS_I(inode)->index_cnt = (u64)-1;
1744 
1745 	mutex_unlock(&delayed_node->mutex);
1746 	btrfs_release_delayed_node(delayed_node);
1747 	return 0;
1748 }
1749 
1750 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1751 			       struct btrfs_root *root, struct inode *inode)
1752 {
1753 	struct btrfs_delayed_node *delayed_node;
1754 	int ret = 0;
1755 
1756 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1757 	if (IS_ERR(delayed_node))
1758 		return PTR_ERR(delayed_node);
1759 
1760 	mutex_lock(&delayed_node->mutex);
1761 	if (delayed_node->inode_dirty) {
1762 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1763 		goto release_node;
1764 	}
1765 
1766 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1767 						   delayed_node);
1768 	if (ret)
1769 		goto release_node;
1770 
1771 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1772 	delayed_node->inode_dirty = 1;
1773 	delayed_node->count++;
1774 	atomic_inc(&root->fs_info->delayed_root->items);
1775 release_node:
1776 	mutex_unlock(&delayed_node->mutex);
1777 	btrfs_release_delayed_node(delayed_node);
1778 	return ret;
1779 }
1780 
1781 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1782 {
1783 	struct btrfs_root *root = delayed_node->root;
1784 	struct btrfs_delayed_item *curr_item, *prev_item;
1785 
1786 	mutex_lock(&delayed_node->mutex);
1787 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1788 	while (curr_item) {
1789 		btrfs_delayed_item_release_metadata(root, curr_item);
1790 		prev_item = curr_item;
1791 		curr_item = __btrfs_next_delayed_item(prev_item);
1792 		btrfs_release_delayed_item(prev_item);
1793 	}
1794 
1795 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1796 	while (curr_item) {
1797 		btrfs_delayed_item_release_metadata(root, curr_item);
1798 		prev_item = curr_item;
1799 		curr_item = __btrfs_next_delayed_item(prev_item);
1800 		btrfs_release_delayed_item(prev_item);
1801 	}
1802 
1803 	if (delayed_node->inode_dirty) {
1804 		btrfs_delayed_inode_release_metadata(root, delayed_node);
1805 		btrfs_release_delayed_inode(delayed_node);
1806 	}
1807 	mutex_unlock(&delayed_node->mutex);
1808 }
1809 
1810 void btrfs_kill_delayed_inode_items(struct inode *inode)
1811 {
1812 	struct btrfs_delayed_node *delayed_node;
1813 
1814 	delayed_node = btrfs_get_delayed_node(inode);
1815 	if (!delayed_node)
1816 		return;
1817 
1818 	__btrfs_kill_delayed_node(delayed_node);
1819 	btrfs_release_delayed_node(delayed_node);
1820 }
1821 
1822 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1823 {
1824 	u64 inode_id = 0;
1825 	struct btrfs_delayed_node *delayed_nodes[8];
1826 	int i, n;
1827 
1828 	while (1) {
1829 		spin_lock(&root->inode_lock);
1830 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1831 					   (void **)delayed_nodes, inode_id,
1832 					   ARRAY_SIZE(delayed_nodes));
1833 		if (!n) {
1834 			spin_unlock(&root->inode_lock);
1835 			break;
1836 		}
1837 
1838 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1839 
1840 		for (i = 0; i < n; i++)
1841 			atomic_inc(&delayed_nodes[i]->refs);
1842 		spin_unlock(&root->inode_lock);
1843 
1844 		for (i = 0; i < n; i++) {
1845 			__btrfs_kill_delayed_node(delayed_nodes[i]);
1846 			btrfs_release_delayed_node(delayed_nodes[i]);
1847 		}
1848 	}
1849 }
1850