xref: /linux/fs/btrfs/ordered-data.c (revision 0dd9ac63ce26ec87b080ca9c3e6efed33c23ace6)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
27 
28 static u64 entry_end(struct btrfs_ordered_extent *entry)
29 {
30 	if (entry->file_offset + entry->len < entry->file_offset)
31 		return (u64)-1;
32 	return entry->file_offset + entry->len;
33 }
34 
35 /* returns NULL if the insertion worked, or it returns the node it did find
36  * in the tree
37  */
38 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
39 				   struct rb_node *node)
40 {
41 	struct rb_node **p = &root->rb_node;
42 	struct rb_node *parent = NULL;
43 	struct btrfs_ordered_extent *entry;
44 
45 	while (*p) {
46 		parent = *p;
47 		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
48 
49 		if (file_offset < entry->file_offset)
50 			p = &(*p)->rb_left;
51 		else if (file_offset >= entry_end(entry))
52 			p = &(*p)->rb_right;
53 		else
54 			return parent;
55 	}
56 
57 	rb_link_node(node, parent, p);
58 	rb_insert_color(node, root);
59 	return NULL;
60 }
61 
62 /*
63  * look for a given offset in the tree, and if it can't be found return the
64  * first lesser offset
65  */
66 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
67 				     struct rb_node **prev_ret)
68 {
69 	struct rb_node *n = root->rb_node;
70 	struct rb_node *prev = NULL;
71 	struct rb_node *test;
72 	struct btrfs_ordered_extent *entry;
73 	struct btrfs_ordered_extent *prev_entry = NULL;
74 
75 	while (n) {
76 		entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
77 		prev = n;
78 		prev_entry = entry;
79 
80 		if (file_offset < entry->file_offset)
81 			n = n->rb_left;
82 		else if (file_offset >= entry_end(entry))
83 			n = n->rb_right;
84 		else
85 			return n;
86 	}
87 	if (!prev_ret)
88 		return NULL;
89 
90 	while (prev && file_offset >= entry_end(prev_entry)) {
91 		test = rb_next(prev);
92 		if (!test)
93 			break;
94 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
95 				      rb_node);
96 		if (file_offset < entry_end(prev_entry))
97 			break;
98 
99 		prev = test;
100 	}
101 	if (prev)
102 		prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
103 				      rb_node);
104 	while (prev && file_offset < entry_end(prev_entry)) {
105 		test = rb_prev(prev);
106 		if (!test)
107 			break;
108 		prev_entry = rb_entry(test, struct btrfs_ordered_extent,
109 				      rb_node);
110 		prev = test;
111 	}
112 	*prev_ret = prev;
113 	return NULL;
114 }
115 
116 /*
117  * helper to check if a given offset is inside a given entry
118  */
119 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
120 {
121 	if (file_offset < entry->file_offset ||
122 	    entry->file_offset + entry->len <= file_offset)
123 		return 0;
124 	return 1;
125 }
126 
127 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
128 			  u64 len)
129 {
130 	if (file_offset + len <= entry->file_offset ||
131 	    entry->file_offset + entry->len <= file_offset)
132 		return 0;
133 	return 1;
134 }
135 
136 /*
137  * look find the first ordered struct that has this offset, otherwise
138  * the first one less than this offset
139  */
140 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
141 					  u64 file_offset)
142 {
143 	struct rb_root *root = &tree->tree;
144 	struct rb_node *prev;
145 	struct rb_node *ret;
146 	struct btrfs_ordered_extent *entry;
147 
148 	if (tree->last) {
149 		entry = rb_entry(tree->last, struct btrfs_ordered_extent,
150 				 rb_node);
151 		if (offset_in_entry(entry, file_offset))
152 			return tree->last;
153 	}
154 	ret = __tree_search(root, file_offset, &prev);
155 	if (!ret)
156 		ret = prev;
157 	if (ret)
158 		tree->last = ret;
159 	return ret;
160 }
161 
162 /* allocate and add a new ordered_extent into the per-inode tree.
163  * file_offset is the logical offset in the file
164  *
165  * start is the disk block number of an extent already reserved in the
166  * extent allocation tree
167  *
168  * len is the length of the extent
169  *
170  * The tree is given a single reference on the ordered extent that was
171  * inserted.
172  */
173 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
174 				      u64 start, u64 len, u64 disk_len,
175 				      int type, int dio)
176 {
177 	struct btrfs_ordered_inode_tree *tree;
178 	struct rb_node *node;
179 	struct btrfs_ordered_extent *entry;
180 
181 	tree = &BTRFS_I(inode)->ordered_tree;
182 	entry = kzalloc(sizeof(*entry), GFP_NOFS);
183 	if (!entry)
184 		return -ENOMEM;
185 
186 	entry->file_offset = file_offset;
187 	entry->start = start;
188 	entry->len = len;
189 	entry->disk_len = disk_len;
190 	entry->bytes_left = len;
191 	entry->inode = inode;
192 	if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
193 		set_bit(type, &entry->flags);
194 
195 	if (dio)
196 		set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
197 
198 	/* one ref for the tree */
199 	atomic_set(&entry->refs, 1);
200 	init_waitqueue_head(&entry->wait);
201 	INIT_LIST_HEAD(&entry->list);
202 	INIT_LIST_HEAD(&entry->root_extent_list);
203 
204 	spin_lock(&tree->lock);
205 	node = tree_insert(&tree->tree, file_offset,
206 			   &entry->rb_node);
207 	BUG_ON(node);
208 	spin_unlock(&tree->lock);
209 
210 	spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
211 	list_add_tail(&entry->root_extent_list,
212 		      &BTRFS_I(inode)->root->fs_info->ordered_extents);
213 	spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
214 
215 	BUG_ON(node);
216 	return 0;
217 }
218 
219 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
220 			     u64 start, u64 len, u64 disk_len, int type)
221 {
222 	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
223 					  disk_len, type, 0);
224 }
225 
226 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
227 				 u64 start, u64 len, u64 disk_len, int type)
228 {
229 	return __btrfs_add_ordered_extent(inode, file_offset, start, len,
230 					  disk_len, type, 1);
231 }
232 
233 /*
234  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
235  * when an ordered extent is finished.  If the list covers more than one
236  * ordered extent, it is split across multiples.
237  */
238 int btrfs_add_ordered_sum(struct inode *inode,
239 			  struct btrfs_ordered_extent *entry,
240 			  struct btrfs_ordered_sum *sum)
241 {
242 	struct btrfs_ordered_inode_tree *tree;
243 
244 	tree = &BTRFS_I(inode)->ordered_tree;
245 	spin_lock(&tree->lock);
246 	list_add_tail(&sum->list, &entry->list);
247 	spin_unlock(&tree->lock);
248 	return 0;
249 }
250 
251 /*
252  * this is used to account for finished IO across a given range
253  * of the file.  The IO should not span ordered extents.  If
254  * a given ordered_extent is completely done, 1 is returned, otherwise
255  * 0.
256  *
257  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
258  * to make sure this function only returns 1 once for a given ordered extent.
259  */
260 int btrfs_dec_test_ordered_pending(struct inode *inode,
261 				   struct btrfs_ordered_extent **cached,
262 				   u64 file_offset, u64 io_size)
263 {
264 	struct btrfs_ordered_inode_tree *tree;
265 	struct rb_node *node;
266 	struct btrfs_ordered_extent *entry = NULL;
267 	int ret;
268 
269 	tree = &BTRFS_I(inode)->ordered_tree;
270 	spin_lock(&tree->lock);
271 	node = tree_search(tree, file_offset);
272 	if (!node) {
273 		ret = 1;
274 		goto out;
275 	}
276 
277 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
278 	if (!offset_in_entry(entry, file_offset)) {
279 		ret = 1;
280 		goto out;
281 	}
282 
283 	if (io_size > entry->bytes_left) {
284 		printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
285 		       (unsigned long long)entry->bytes_left,
286 		       (unsigned long long)io_size);
287 	}
288 	entry->bytes_left -= io_size;
289 	if (entry->bytes_left == 0)
290 		ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
291 	else
292 		ret = 1;
293 out:
294 	if (!ret && cached && entry) {
295 		*cached = entry;
296 		atomic_inc(&entry->refs);
297 	}
298 	spin_unlock(&tree->lock);
299 	return ret == 0;
300 }
301 
302 /*
303  * used to drop a reference on an ordered extent.  This will free
304  * the extent if the last reference is dropped
305  */
306 int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
307 {
308 	struct list_head *cur;
309 	struct btrfs_ordered_sum *sum;
310 
311 	if (atomic_dec_and_test(&entry->refs)) {
312 		while (!list_empty(&entry->list)) {
313 			cur = entry->list.next;
314 			sum = list_entry(cur, struct btrfs_ordered_sum, list);
315 			list_del(&sum->list);
316 			kfree(sum);
317 		}
318 		kfree(entry);
319 	}
320 	return 0;
321 }
322 
323 /*
324  * remove an ordered extent from the tree.  No references are dropped
325  * and you must wake_up entry->wait.  You must hold the tree lock
326  * while you call this function.
327  */
328 static int __btrfs_remove_ordered_extent(struct inode *inode,
329 				struct btrfs_ordered_extent *entry)
330 {
331 	struct btrfs_ordered_inode_tree *tree;
332 	struct btrfs_root *root = BTRFS_I(inode)->root;
333 	struct rb_node *node;
334 
335 	tree = &BTRFS_I(inode)->ordered_tree;
336 	node = &entry->rb_node;
337 	rb_erase(node, &tree->tree);
338 	tree->last = NULL;
339 	set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
340 
341 	spin_lock(&root->fs_info->ordered_extent_lock);
342 	list_del_init(&entry->root_extent_list);
343 
344 	/*
345 	 * we have no more ordered extents for this inode and
346 	 * no dirty pages.  We can safely remove it from the
347 	 * list of ordered extents
348 	 */
349 	if (RB_EMPTY_ROOT(&tree->tree) &&
350 	    !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
351 		list_del_init(&BTRFS_I(inode)->ordered_operations);
352 	}
353 	spin_unlock(&root->fs_info->ordered_extent_lock);
354 
355 	return 0;
356 }
357 
358 /*
359  * remove an ordered extent from the tree.  No references are dropped
360  * but any waiters are woken.
361  */
362 int btrfs_remove_ordered_extent(struct inode *inode,
363 				struct btrfs_ordered_extent *entry)
364 {
365 	struct btrfs_ordered_inode_tree *tree;
366 	int ret;
367 
368 	tree = &BTRFS_I(inode)->ordered_tree;
369 	spin_lock(&tree->lock);
370 	ret = __btrfs_remove_ordered_extent(inode, entry);
371 	spin_unlock(&tree->lock);
372 	wake_up(&entry->wait);
373 
374 	return ret;
375 }
376 
377 /*
378  * wait for all the ordered extents in a root.  This is done when balancing
379  * space between drives.
380  */
381 int btrfs_wait_ordered_extents(struct btrfs_root *root,
382 			       int nocow_only, int delay_iput)
383 {
384 	struct list_head splice;
385 	struct list_head *cur;
386 	struct btrfs_ordered_extent *ordered;
387 	struct inode *inode;
388 
389 	INIT_LIST_HEAD(&splice);
390 
391 	spin_lock(&root->fs_info->ordered_extent_lock);
392 	list_splice_init(&root->fs_info->ordered_extents, &splice);
393 	while (!list_empty(&splice)) {
394 		cur = splice.next;
395 		ordered = list_entry(cur, struct btrfs_ordered_extent,
396 				     root_extent_list);
397 		if (nocow_only &&
398 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
399 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
400 			list_move(&ordered->root_extent_list,
401 				  &root->fs_info->ordered_extents);
402 			cond_resched_lock(&root->fs_info->ordered_extent_lock);
403 			continue;
404 		}
405 
406 		list_del_init(&ordered->root_extent_list);
407 		atomic_inc(&ordered->refs);
408 
409 		/*
410 		 * the inode may be getting freed (in sys_unlink path).
411 		 */
412 		inode = igrab(ordered->inode);
413 
414 		spin_unlock(&root->fs_info->ordered_extent_lock);
415 
416 		if (inode) {
417 			btrfs_start_ordered_extent(inode, ordered, 1);
418 			btrfs_put_ordered_extent(ordered);
419 			if (delay_iput)
420 				btrfs_add_delayed_iput(inode);
421 			else
422 				iput(inode);
423 		} else {
424 			btrfs_put_ordered_extent(ordered);
425 		}
426 
427 		spin_lock(&root->fs_info->ordered_extent_lock);
428 	}
429 	spin_unlock(&root->fs_info->ordered_extent_lock);
430 	return 0;
431 }
432 
433 /*
434  * this is used during transaction commit to write all the inodes
435  * added to the ordered operation list.  These files must be fully on
436  * disk before the transaction commits.
437  *
438  * we have two modes here, one is to just start the IO via filemap_flush
439  * and the other is to wait for all the io.  When we wait, we have an
440  * extra check to make sure the ordered operation list really is empty
441  * before we return
442  */
443 int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
444 {
445 	struct btrfs_inode *btrfs_inode;
446 	struct inode *inode;
447 	struct list_head splice;
448 
449 	INIT_LIST_HEAD(&splice);
450 
451 	mutex_lock(&root->fs_info->ordered_operations_mutex);
452 	spin_lock(&root->fs_info->ordered_extent_lock);
453 again:
454 	list_splice_init(&root->fs_info->ordered_operations, &splice);
455 
456 	while (!list_empty(&splice)) {
457 		btrfs_inode = list_entry(splice.next, struct btrfs_inode,
458 				   ordered_operations);
459 
460 		inode = &btrfs_inode->vfs_inode;
461 
462 		list_del_init(&btrfs_inode->ordered_operations);
463 
464 		/*
465 		 * the inode may be getting freed (in sys_unlink path).
466 		 */
467 		inode = igrab(inode);
468 
469 		if (!wait && inode) {
470 			list_add_tail(&BTRFS_I(inode)->ordered_operations,
471 			      &root->fs_info->ordered_operations);
472 		}
473 		spin_unlock(&root->fs_info->ordered_extent_lock);
474 
475 		if (inode) {
476 			if (wait)
477 				btrfs_wait_ordered_range(inode, 0, (u64)-1);
478 			else
479 				filemap_flush(inode->i_mapping);
480 			btrfs_add_delayed_iput(inode);
481 		}
482 
483 		cond_resched();
484 		spin_lock(&root->fs_info->ordered_extent_lock);
485 	}
486 	if (wait && !list_empty(&root->fs_info->ordered_operations))
487 		goto again;
488 
489 	spin_unlock(&root->fs_info->ordered_extent_lock);
490 	mutex_unlock(&root->fs_info->ordered_operations_mutex);
491 
492 	return 0;
493 }
494 
495 /*
496  * Used to start IO or wait for a given ordered extent to finish.
497  *
498  * If wait is one, this effectively waits on page writeback for all the pages
499  * in the extent, and it waits on the io completion code to insert
500  * metadata into the btree corresponding to the extent
501  */
502 void btrfs_start_ordered_extent(struct inode *inode,
503 				       struct btrfs_ordered_extent *entry,
504 				       int wait)
505 {
506 	u64 start = entry->file_offset;
507 	u64 end = start + entry->len - 1;
508 
509 	/*
510 	 * pages in the range can be dirty, clean or writeback.  We
511 	 * start IO on any dirty ones so the wait doesn't stall waiting
512 	 * for pdflush to find them
513 	 */
514 	if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
515 		filemap_fdatawrite_range(inode->i_mapping, start, end);
516 	if (wait) {
517 		wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
518 						 &entry->flags));
519 	}
520 }
521 
522 /*
523  * Used to wait on ordered extents across a large range of bytes.
524  */
525 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
526 {
527 	u64 end;
528 	u64 orig_end;
529 	u64 wait_end;
530 	struct btrfs_ordered_extent *ordered;
531 	int found;
532 
533 	if (start + len < start) {
534 		orig_end = INT_LIMIT(loff_t);
535 	} else {
536 		orig_end = start + len - 1;
537 		if (orig_end > INT_LIMIT(loff_t))
538 			orig_end = INT_LIMIT(loff_t);
539 	}
540 	wait_end = orig_end;
541 again:
542 	/* start IO across the range first to instantiate any delalloc
543 	 * extents
544 	 */
545 	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
546 
547 	/* The compression code will leave pages locked but return from
548 	 * writepage without setting the page writeback.  Starting again
549 	 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
550 	 */
551 	filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
552 
553 	filemap_fdatawait_range(inode->i_mapping, start, orig_end);
554 
555 	end = orig_end;
556 	found = 0;
557 	while (1) {
558 		ordered = btrfs_lookup_first_ordered_extent(inode, end);
559 		if (!ordered)
560 			break;
561 		if (ordered->file_offset > orig_end) {
562 			btrfs_put_ordered_extent(ordered);
563 			break;
564 		}
565 		if (ordered->file_offset + ordered->len < start) {
566 			btrfs_put_ordered_extent(ordered);
567 			break;
568 		}
569 		found++;
570 		btrfs_start_ordered_extent(inode, ordered, 1);
571 		end = ordered->file_offset;
572 		btrfs_put_ordered_extent(ordered);
573 		if (end == 0 || end == start)
574 			break;
575 		end--;
576 	}
577 	if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
578 			   EXTENT_DELALLOC, 0, NULL)) {
579 		schedule_timeout(1);
580 		goto again;
581 	}
582 	return 0;
583 }
584 
585 /*
586  * find an ordered extent corresponding to file_offset.  return NULL if
587  * nothing is found, otherwise take a reference on the extent and return it
588  */
589 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
590 							 u64 file_offset)
591 {
592 	struct btrfs_ordered_inode_tree *tree;
593 	struct rb_node *node;
594 	struct btrfs_ordered_extent *entry = NULL;
595 
596 	tree = &BTRFS_I(inode)->ordered_tree;
597 	spin_lock(&tree->lock);
598 	node = tree_search(tree, file_offset);
599 	if (!node)
600 		goto out;
601 
602 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
603 	if (!offset_in_entry(entry, file_offset))
604 		entry = NULL;
605 	if (entry)
606 		atomic_inc(&entry->refs);
607 out:
608 	spin_unlock(&tree->lock);
609 	return entry;
610 }
611 
612 /* Since the DIO code tries to lock a wide area we need to look for any ordered
613  * extents that exist in the range, rather than just the start of the range.
614  */
615 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
616 							u64 file_offset,
617 							u64 len)
618 {
619 	struct btrfs_ordered_inode_tree *tree;
620 	struct rb_node *node;
621 	struct btrfs_ordered_extent *entry = NULL;
622 
623 	tree = &BTRFS_I(inode)->ordered_tree;
624 	spin_lock(&tree->lock);
625 	node = tree_search(tree, file_offset);
626 	if (!node) {
627 		node = tree_search(tree, file_offset + len);
628 		if (!node)
629 			goto out;
630 	}
631 
632 	while (1) {
633 		entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
634 		if (range_overlaps(entry, file_offset, len))
635 			break;
636 
637 		if (entry->file_offset >= file_offset + len) {
638 			entry = NULL;
639 			break;
640 		}
641 		entry = NULL;
642 		node = rb_next(node);
643 		if (!node)
644 			break;
645 	}
646 out:
647 	if (entry)
648 		atomic_inc(&entry->refs);
649 	spin_unlock(&tree->lock);
650 	return entry;
651 }
652 
653 /*
654  * lookup and return any extent before 'file_offset'.  NULL is returned
655  * if none is found
656  */
657 struct btrfs_ordered_extent *
658 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
659 {
660 	struct btrfs_ordered_inode_tree *tree;
661 	struct rb_node *node;
662 	struct btrfs_ordered_extent *entry = NULL;
663 
664 	tree = &BTRFS_I(inode)->ordered_tree;
665 	spin_lock(&tree->lock);
666 	node = tree_search(tree, file_offset);
667 	if (!node)
668 		goto out;
669 
670 	entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
671 	atomic_inc(&entry->refs);
672 out:
673 	spin_unlock(&tree->lock);
674 	return entry;
675 }
676 
677 /*
678  * After an extent is done, call this to conditionally update the on disk
679  * i_size.  i_size is updated to cover any fully written part of the file.
680  */
681 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
682 				struct btrfs_ordered_extent *ordered)
683 {
684 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
685 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
686 	u64 disk_i_size;
687 	u64 new_i_size;
688 	u64 i_size_test;
689 	u64 i_size = i_size_read(inode);
690 	struct rb_node *node;
691 	struct rb_node *prev = NULL;
692 	struct btrfs_ordered_extent *test;
693 	int ret = 1;
694 
695 	if (ordered)
696 		offset = entry_end(ordered);
697 	else
698 		offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
699 
700 	spin_lock(&tree->lock);
701 	disk_i_size = BTRFS_I(inode)->disk_i_size;
702 
703 	/* truncate file */
704 	if (disk_i_size > i_size) {
705 		BTRFS_I(inode)->disk_i_size = i_size;
706 		ret = 0;
707 		goto out;
708 	}
709 
710 	/*
711 	 * if the disk i_size is already at the inode->i_size, or
712 	 * this ordered extent is inside the disk i_size, we're done
713 	 */
714 	if (disk_i_size == i_size || offset <= disk_i_size) {
715 		goto out;
716 	}
717 
718 	/*
719 	 * we can't update the disk_isize if there are delalloc bytes
720 	 * between disk_i_size and  this ordered extent
721 	 */
722 	if (test_range_bit(io_tree, disk_i_size, offset - 1,
723 			   EXTENT_DELALLOC, 0, NULL)) {
724 		goto out;
725 	}
726 	/*
727 	 * walk backward from this ordered extent to disk_i_size.
728 	 * if we find an ordered extent then we can't update disk i_size
729 	 * yet
730 	 */
731 	if (ordered) {
732 		node = rb_prev(&ordered->rb_node);
733 	} else {
734 		prev = tree_search(tree, offset);
735 		/*
736 		 * we insert file extents without involving ordered struct,
737 		 * so there should be no ordered struct cover this offset
738 		 */
739 		if (prev) {
740 			test = rb_entry(prev, struct btrfs_ordered_extent,
741 					rb_node);
742 			BUG_ON(offset_in_entry(test, offset));
743 		}
744 		node = prev;
745 	}
746 	while (node) {
747 		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
748 		if (test->file_offset + test->len <= disk_i_size)
749 			break;
750 		if (test->file_offset >= i_size)
751 			break;
752 		if (test->file_offset >= disk_i_size)
753 			goto out;
754 		node = rb_prev(node);
755 	}
756 	new_i_size = min_t(u64, offset, i_size);
757 
758 	/*
759 	 * at this point, we know we can safely update i_size to at least
760 	 * the offset from this ordered extent.  But, we need to
761 	 * walk forward and see if ios from higher up in the file have
762 	 * finished.
763 	 */
764 	if (ordered) {
765 		node = rb_next(&ordered->rb_node);
766 	} else {
767 		if (prev)
768 			node = rb_next(prev);
769 		else
770 			node = rb_first(&tree->tree);
771 	}
772 	i_size_test = 0;
773 	if (node) {
774 		/*
775 		 * do we have an area where IO might have finished
776 		 * between our ordered extent and the next one.
777 		 */
778 		test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
779 		if (test->file_offset > offset)
780 			i_size_test = test->file_offset;
781 	} else {
782 		i_size_test = i_size;
783 	}
784 
785 	/*
786 	 * i_size_test is the end of a region after this ordered
787 	 * extent where there are no ordered extents.  As long as there
788 	 * are no delalloc bytes in this area, it is safe to update
789 	 * disk_i_size to the end of the region.
790 	 */
791 	if (i_size_test > offset &&
792 	    !test_range_bit(io_tree, offset, i_size_test - 1,
793 			    EXTENT_DELALLOC, 0, NULL)) {
794 		new_i_size = min_t(u64, i_size_test, i_size);
795 	}
796 	BTRFS_I(inode)->disk_i_size = new_i_size;
797 	ret = 0;
798 out:
799 	/*
800 	 * we need to remove the ordered extent with the tree lock held
801 	 * so that other people calling this function don't find our fully
802 	 * processed ordered entry and skip updating the i_size
803 	 */
804 	if (ordered)
805 		__btrfs_remove_ordered_extent(inode, ordered);
806 	spin_unlock(&tree->lock);
807 	if (ordered)
808 		wake_up(&ordered->wait);
809 	return ret;
810 }
811 
812 /*
813  * search the ordered extents for one corresponding to 'offset' and
814  * try to find a checksum.  This is used because we allow pages to
815  * be reclaimed before their checksum is actually put into the btree
816  */
817 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
818 			   u32 *sum)
819 {
820 	struct btrfs_ordered_sum *ordered_sum;
821 	struct btrfs_sector_sum *sector_sums;
822 	struct btrfs_ordered_extent *ordered;
823 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
824 	unsigned long num_sectors;
825 	unsigned long i;
826 	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
827 	int ret = 1;
828 
829 	ordered = btrfs_lookup_ordered_extent(inode, offset);
830 	if (!ordered)
831 		return 1;
832 
833 	spin_lock(&tree->lock);
834 	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
835 		if (disk_bytenr >= ordered_sum->bytenr) {
836 			num_sectors = ordered_sum->len / sectorsize;
837 			sector_sums = ordered_sum->sums;
838 			for (i = 0; i < num_sectors; i++) {
839 				if (sector_sums[i].bytenr == disk_bytenr) {
840 					*sum = sector_sums[i].sum;
841 					ret = 0;
842 					goto out;
843 				}
844 			}
845 		}
846 	}
847 out:
848 	spin_unlock(&tree->lock);
849 	btrfs_put_ordered_extent(ordered);
850 	return ret;
851 }
852 
853 
854 /*
855  * add a given inode to the list of inodes that must be fully on
856  * disk before a transaction commit finishes.
857  *
858  * This basically gives us the ext3 style data=ordered mode, and it is mostly
859  * used to make sure renamed files are fully on disk.
860  *
861  * It is a noop if the inode is already fully on disk.
862  *
863  * If trans is not null, we'll do a friendly check for a transaction that
864  * is already flushing things and force the IO down ourselves.
865  */
866 int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
867 				struct btrfs_root *root,
868 				struct inode *inode)
869 {
870 	u64 last_mod;
871 
872 	last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
873 
874 	/*
875 	 * if this file hasn't been changed since the last transaction
876 	 * commit, we can safely return without doing anything
877 	 */
878 	if (last_mod < root->fs_info->last_trans_committed)
879 		return 0;
880 
881 	/*
882 	 * the transaction is already committing.  Just start the IO and
883 	 * don't bother with all of this list nonsense
884 	 */
885 	if (trans && root->fs_info->running_transaction->blocked) {
886 		btrfs_wait_ordered_range(inode, 0, (u64)-1);
887 		return 0;
888 	}
889 
890 	spin_lock(&root->fs_info->ordered_extent_lock);
891 	if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
892 		list_add_tail(&BTRFS_I(inode)->ordered_operations,
893 			      &root->fs_info->ordered_operations);
894 	}
895 	spin_unlock(&root->fs_info->ordered_extent_lock);
896 
897 	return 0;
898 }
899