xref: /linux/fs/btrfs/file.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 #include "volumes.h"
43 
44 static struct kmem_cache *btrfs_inode_defrag_cachep;
45 /*
46  * when auto defrag is enabled we
47  * queue up these defrag structs to remember which
48  * inodes need defragging passes
49  */
50 struct inode_defrag {
51 	struct rb_node rb_node;
52 	/* objectid */
53 	u64 ino;
54 	/*
55 	 * transid where the defrag was added, we search for
56 	 * extents newer than this
57 	 */
58 	u64 transid;
59 
60 	/* root objectid */
61 	u64 root;
62 
63 	/* last offset we were able to defrag */
64 	u64 last_offset;
65 
66 	/* if we've wrapped around back to zero once already */
67 	int cycled;
68 };
69 
70 static int __compare_inode_defrag(struct inode_defrag *defrag1,
71 				  struct inode_defrag *defrag2)
72 {
73 	if (defrag1->root > defrag2->root)
74 		return 1;
75 	else if (defrag1->root < defrag2->root)
76 		return -1;
77 	else if (defrag1->ino > defrag2->ino)
78 		return 1;
79 	else if (defrag1->ino < defrag2->ino)
80 		return -1;
81 	else
82 		return 0;
83 }
84 
85 /* pop a record for an inode into the defrag tree.  The lock
86  * must be held already
87  *
88  * If you're inserting a record for an older transid than an
89  * existing record, the transid already in the tree is lowered
90  *
91  * If an existing record is found the defrag item you
92  * pass in is freed
93  */
94 static int __btrfs_add_inode_defrag(struct inode *inode,
95 				    struct inode_defrag *defrag)
96 {
97 	struct btrfs_root *root = BTRFS_I(inode)->root;
98 	struct inode_defrag *entry;
99 	struct rb_node **p;
100 	struct rb_node *parent = NULL;
101 	int ret;
102 
103 	p = &root->fs_info->defrag_inodes.rb_node;
104 	while (*p) {
105 		parent = *p;
106 		entry = rb_entry(parent, struct inode_defrag, rb_node);
107 
108 		ret = __compare_inode_defrag(defrag, entry);
109 		if (ret < 0)
110 			p = &parent->rb_left;
111 		else if (ret > 0)
112 			p = &parent->rb_right;
113 		else {
114 			/* if we're reinserting an entry for
115 			 * an old defrag run, make sure to
116 			 * lower the transid of our existing record
117 			 */
118 			if (defrag->transid < entry->transid)
119 				entry->transid = defrag->transid;
120 			if (defrag->last_offset > entry->last_offset)
121 				entry->last_offset = defrag->last_offset;
122 			return -EEXIST;
123 		}
124 	}
125 	set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
126 	rb_link_node(&defrag->rb_node, parent, p);
127 	rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
128 	return 0;
129 }
130 
131 static inline int __need_auto_defrag(struct btrfs_root *root)
132 {
133 	if (!btrfs_test_opt(root, AUTO_DEFRAG))
134 		return 0;
135 
136 	if (btrfs_fs_closing(root->fs_info))
137 		return 0;
138 
139 	return 1;
140 }
141 
142 /*
143  * insert a defrag record for this inode if auto defrag is
144  * enabled
145  */
146 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
147 			   struct inode *inode)
148 {
149 	struct btrfs_root *root = BTRFS_I(inode)->root;
150 	struct inode_defrag *defrag;
151 	u64 transid;
152 	int ret;
153 
154 	if (!__need_auto_defrag(root))
155 		return 0;
156 
157 	if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
158 		return 0;
159 
160 	if (trans)
161 		transid = trans->transid;
162 	else
163 		transid = BTRFS_I(inode)->root->last_trans;
164 
165 	defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
166 	if (!defrag)
167 		return -ENOMEM;
168 
169 	defrag->ino = btrfs_ino(inode);
170 	defrag->transid = transid;
171 	defrag->root = root->root_key.objectid;
172 
173 	spin_lock(&root->fs_info->defrag_inodes_lock);
174 	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
175 		/*
176 		 * If we set IN_DEFRAG flag and evict the inode from memory,
177 		 * and then re-read this inode, this new inode doesn't have
178 		 * IN_DEFRAG flag. At the case, we may find the existed defrag.
179 		 */
180 		ret = __btrfs_add_inode_defrag(inode, defrag);
181 		if (ret)
182 			kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
183 	} else {
184 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
185 	}
186 	spin_unlock(&root->fs_info->defrag_inodes_lock);
187 	return 0;
188 }
189 
190 /*
191  * Requeue the defrag object. If there is a defrag object that points to
192  * the same inode in the tree, we will merge them together (by
193  * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
194  */
195 void btrfs_requeue_inode_defrag(struct inode *inode,
196 				struct inode_defrag *defrag)
197 {
198 	struct btrfs_root *root = BTRFS_I(inode)->root;
199 	int ret;
200 
201 	if (!__need_auto_defrag(root))
202 		goto out;
203 
204 	/*
205 	 * Here we don't check the IN_DEFRAG flag, because we need merge
206 	 * them together.
207 	 */
208 	spin_lock(&root->fs_info->defrag_inodes_lock);
209 	ret = __btrfs_add_inode_defrag(inode, defrag);
210 	spin_unlock(&root->fs_info->defrag_inodes_lock);
211 	if (ret)
212 		goto out;
213 	return;
214 out:
215 	kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
216 }
217 
218 /*
219  * pick the defragable inode that we want, if it doesn't exist, we will get
220  * the next one.
221  */
222 static struct inode_defrag *
223 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
224 {
225 	struct inode_defrag *entry = NULL;
226 	struct inode_defrag tmp;
227 	struct rb_node *p;
228 	struct rb_node *parent = NULL;
229 	int ret;
230 
231 	tmp.ino = ino;
232 	tmp.root = root;
233 
234 	spin_lock(&fs_info->defrag_inodes_lock);
235 	p = fs_info->defrag_inodes.rb_node;
236 	while (p) {
237 		parent = p;
238 		entry = rb_entry(parent, struct inode_defrag, rb_node);
239 
240 		ret = __compare_inode_defrag(&tmp, entry);
241 		if (ret < 0)
242 			p = parent->rb_left;
243 		else if (ret > 0)
244 			p = parent->rb_right;
245 		else
246 			goto out;
247 	}
248 
249 	if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
250 		parent = rb_next(parent);
251 		if (parent)
252 			entry = rb_entry(parent, struct inode_defrag, rb_node);
253 		else
254 			entry = NULL;
255 	}
256 out:
257 	if (entry)
258 		rb_erase(parent, &fs_info->defrag_inodes);
259 	spin_unlock(&fs_info->defrag_inodes_lock);
260 	return entry;
261 }
262 
263 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
264 {
265 	struct inode_defrag *defrag;
266 	struct rb_node *node;
267 
268 	spin_lock(&fs_info->defrag_inodes_lock);
269 	node = rb_first(&fs_info->defrag_inodes);
270 	while (node) {
271 		rb_erase(node, &fs_info->defrag_inodes);
272 		defrag = rb_entry(node, struct inode_defrag, rb_node);
273 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
274 
275 		if (need_resched()) {
276 			spin_unlock(&fs_info->defrag_inodes_lock);
277 			cond_resched();
278 			spin_lock(&fs_info->defrag_inodes_lock);
279 		}
280 
281 		node = rb_first(&fs_info->defrag_inodes);
282 	}
283 	spin_unlock(&fs_info->defrag_inodes_lock);
284 }
285 
286 #define BTRFS_DEFRAG_BATCH	1024
287 
288 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
289 				    struct inode_defrag *defrag)
290 {
291 	struct btrfs_root *inode_root;
292 	struct inode *inode;
293 	struct btrfs_key key;
294 	struct btrfs_ioctl_defrag_range_args range;
295 	int num_defrag;
296 
297 	/* get the inode */
298 	key.objectid = defrag->root;
299 	btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
300 	key.offset = (u64)-1;
301 	inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
302 	if (IS_ERR(inode_root)) {
303 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
304 		return PTR_ERR(inode_root);
305 	}
306 
307 	key.objectid = defrag->ino;
308 	btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
309 	key.offset = 0;
310 	inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
311 	if (IS_ERR(inode)) {
312 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
313 		return PTR_ERR(inode);
314 	}
315 
316 	/* do a chunk of defrag */
317 	clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
318 	memset(&range, 0, sizeof(range));
319 	range.len = (u64)-1;
320 	range.start = defrag->last_offset;
321 
322 	sb_start_write(fs_info->sb);
323 	num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
324 				       BTRFS_DEFRAG_BATCH);
325 	sb_end_write(fs_info->sb);
326 	/*
327 	 * if we filled the whole defrag batch, there
328 	 * must be more work to do.  Queue this defrag
329 	 * again
330 	 */
331 	if (num_defrag == BTRFS_DEFRAG_BATCH) {
332 		defrag->last_offset = range.start;
333 		btrfs_requeue_inode_defrag(inode, defrag);
334 	} else if (defrag->last_offset && !defrag->cycled) {
335 		/*
336 		 * we didn't fill our defrag batch, but
337 		 * we didn't start at zero.  Make sure we loop
338 		 * around to the start of the file.
339 		 */
340 		defrag->last_offset = 0;
341 		defrag->cycled = 1;
342 		btrfs_requeue_inode_defrag(inode, defrag);
343 	} else {
344 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
345 	}
346 
347 	iput(inode);
348 	return 0;
349 }
350 
351 /*
352  * run through the list of inodes in the FS that need
353  * defragging
354  */
355 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
356 {
357 	struct inode_defrag *defrag;
358 	u64 first_ino = 0;
359 	u64 root_objectid = 0;
360 
361 	atomic_inc(&fs_info->defrag_running);
362 	while(1) {
363 		if (!__need_auto_defrag(fs_info->tree_root))
364 			break;
365 
366 		/* find an inode to defrag */
367 		defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
368 						 first_ino);
369 		if (!defrag) {
370 			if (root_objectid || first_ino) {
371 				root_objectid = 0;
372 				first_ino = 0;
373 				continue;
374 			} else {
375 				break;
376 			}
377 		}
378 
379 		first_ino = defrag->ino + 1;
380 		root_objectid = defrag->root;
381 
382 		__btrfs_run_defrag_inode(fs_info, defrag);
383 	}
384 	atomic_dec(&fs_info->defrag_running);
385 
386 	/*
387 	 * during unmount, we use the transaction_wait queue to
388 	 * wait for the defragger to stop
389 	 */
390 	wake_up(&fs_info->transaction_wait);
391 	return 0;
392 }
393 
394 /* simple helper to fault in pages and copy.  This should go away
395  * and be replaced with calls into generic code.
396  */
397 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
398 					 size_t write_bytes,
399 					 struct page **prepared_pages,
400 					 struct iov_iter *i)
401 {
402 	size_t copied = 0;
403 	size_t total_copied = 0;
404 	int pg = 0;
405 	int offset = pos & (PAGE_CACHE_SIZE - 1);
406 
407 	while (write_bytes > 0) {
408 		size_t count = min_t(size_t,
409 				     PAGE_CACHE_SIZE - offset, write_bytes);
410 		struct page *page = prepared_pages[pg];
411 		/*
412 		 * Copy data from userspace to the current page
413 		 *
414 		 * Disable pagefault to avoid recursive lock since
415 		 * the pages are already locked
416 		 */
417 		pagefault_disable();
418 		copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
419 		pagefault_enable();
420 
421 		/* Flush processor's dcache for this page */
422 		flush_dcache_page(page);
423 
424 		/*
425 		 * if we get a partial write, we can end up with
426 		 * partially up to date pages.  These add
427 		 * a lot of complexity, so make sure they don't
428 		 * happen by forcing this copy to be retried.
429 		 *
430 		 * The rest of the btrfs_file_write code will fall
431 		 * back to page at a time copies after we return 0.
432 		 */
433 		if (!PageUptodate(page) && copied < count)
434 			copied = 0;
435 
436 		iov_iter_advance(i, copied);
437 		write_bytes -= copied;
438 		total_copied += copied;
439 
440 		/* Return to btrfs_file_aio_write to fault page */
441 		if (unlikely(copied == 0))
442 			break;
443 
444 		if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
445 			offset += copied;
446 		} else {
447 			pg++;
448 			offset = 0;
449 		}
450 	}
451 	return total_copied;
452 }
453 
454 /*
455  * unlocks pages after btrfs_file_write is done with them
456  */
457 void btrfs_drop_pages(struct page **pages, size_t num_pages)
458 {
459 	size_t i;
460 	for (i = 0; i < num_pages; i++) {
461 		/* page checked is some magic around finding pages that
462 		 * have been modified without going through btrfs_set_page_dirty
463 		 * clear it here
464 		 */
465 		ClearPageChecked(pages[i]);
466 		unlock_page(pages[i]);
467 		mark_page_accessed(pages[i]);
468 		page_cache_release(pages[i]);
469 	}
470 }
471 
472 /*
473  * after copy_from_user, pages need to be dirtied and we need to make
474  * sure holes are created between the current EOF and the start of
475  * any next extents (if required).
476  *
477  * this also makes the decision about creating an inline extent vs
478  * doing real data extents, marking pages dirty and delalloc as required.
479  */
480 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
481 		      struct page **pages, size_t num_pages,
482 		      loff_t pos, size_t write_bytes,
483 		      struct extent_state **cached)
484 {
485 	int err = 0;
486 	int i;
487 	u64 num_bytes;
488 	u64 start_pos;
489 	u64 end_of_last_block;
490 	u64 end_pos = pos + write_bytes;
491 	loff_t isize = i_size_read(inode);
492 
493 	start_pos = pos & ~((u64)root->sectorsize - 1);
494 	num_bytes = (write_bytes + pos - start_pos +
495 		    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
496 
497 	end_of_last_block = start_pos + num_bytes - 1;
498 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
499 					cached);
500 	if (err)
501 		return err;
502 
503 	for (i = 0; i < num_pages; i++) {
504 		struct page *p = pages[i];
505 		SetPageUptodate(p);
506 		ClearPageChecked(p);
507 		set_page_dirty(p);
508 	}
509 
510 	/*
511 	 * we've only changed i_size in ram, and we haven't updated
512 	 * the disk i_size.  There is no need to log the inode
513 	 * at this time.
514 	 */
515 	if (end_pos > isize)
516 		i_size_write(inode, end_pos);
517 	return 0;
518 }
519 
520 /*
521  * this drops all the extents in the cache that intersect the range
522  * [start, end].  Existing extents are split as required.
523  */
524 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
525 			     int skip_pinned)
526 {
527 	struct extent_map *em;
528 	struct extent_map *split = NULL;
529 	struct extent_map *split2 = NULL;
530 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
531 	u64 len = end - start + 1;
532 	u64 gen;
533 	int ret;
534 	int testend = 1;
535 	unsigned long flags;
536 	int compressed = 0;
537 
538 	WARN_ON(end < start);
539 	if (end == (u64)-1) {
540 		len = (u64)-1;
541 		testend = 0;
542 	}
543 	while (1) {
544 		int no_splits = 0;
545 
546 		if (!split)
547 			split = alloc_extent_map();
548 		if (!split2)
549 			split2 = alloc_extent_map();
550 		if (!split || !split2)
551 			no_splits = 1;
552 
553 		write_lock(&em_tree->lock);
554 		em = lookup_extent_mapping(em_tree, start, len);
555 		if (!em) {
556 			write_unlock(&em_tree->lock);
557 			break;
558 		}
559 		flags = em->flags;
560 		gen = em->generation;
561 		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
562 			if (testend && em->start + em->len >= start + len) {
563 				free_extent_map(em);
564 				write_unlock(&em_tree->lock);
565 				break;
566 			}
567 			start = em->start + em->len;
568 			if (testend)
569 				len = start + len - (em->start + em->len);
570 			free_extent_map(em);
571 			write_unlock(&em_tree->lock);
572 			continue;
573 		}
574 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
575 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
576 		remove_extent_mapping(em_tree, em);
577 		if (no_splits)
578 			goto next;
579 
580 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
581 		    em->start < start) {
582 			split->start = em->start;
583 			split->len = start - em->start;
584 			split->orig_start = em->orig_start;
585 			split->block_start = em->block_start;
586 
587 			if (compressed)
588 				split->block_len = em->block_len;
589 			else
590 				split->block_len = split->len;
591 			split->orig_block_len = max(split->block_len,
592 						    em->orig_block_len);
593 			split->generation = gen;
594 			split->bdev = em->bdev;
595 			split->flags = flags;
596 			split->compress_type = em->compress_type;
597 			ret = add_extent_mapping(em_tree, split);
598 			BUG_ON(ret); /* Logic error */
599 			list_move(&split->list, &em_tree->modified_extents);
600 			free_extent_map(split);
601 			split = split2;
602 			split2 = NULL;
603 		}
604 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
605 		    testend && em->start + em->len > start + len) {
606 			u64 diff = start + len - em->start;
607 
608 			split->start = start + len;
609 			split->len = em->start + em->len - (start + len);
610 			split->bdev = em->bdev;
611 			split->flags = flags;
612 			split->compress_type = em->compress_type;
613 			split->generation = gen;
614 			split->orig_block_len = max(em->block_len,
615 						    em->orig_block_len);
616 
617 			if (compressed) {
618 				split->block_len = em->block_len;
619 				split->block_start = em->block_start;
620 				split->orig_start = em->orig_start;
621 			} else {
622 				split->block_len = split->len;
623 				split->block_start = em->block_start + diff;
624 				split->orig_start = em->orig_start;
625 			}
626 
627 			ret = add_extent_mapping(em_tree, split);
628 			BUG_ON(ret); /* Logic error */
629 			list_move(&split->list, &em_tree->modified_extents);
630 			free_extent_map(split);
631 			split = NULL;
632 		}
633 next:
634 		write_unlock(&em_tree->lock);
635 
636 		/* once for us */
637 		free_extent_map(em);
638 		/* once for the tree*/
639 		free_extent_map(em);
640 	}
641 	if (split)
642 		free_extent_map(split);
643 	if (split2)
644 		free_extent_map(split2);
645 }
646 
647 /*
648  * this is very complex, but the basic idea is to drop all extents
649  * in the range start - end.  hint_block is filled in with a block number
650  * that would be a good hint to the block allocator for this file.
651  *
652  * If an extent intersects the range but is not entirely inside the range
653  * it is either truncated or split.  Anything entirely inside the range
654  * is deleted from the tree.
655  */
656 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
657 			 struct btrfs_root *root, struct inode *inode,
658 			 struct btrfs_path *path, u64 start, u64 end,
659 			 u64 *drop_end, int drop_cache)
660 {
661 	struct extent_buffer *leaf;
662 	struct btrfs_file_extent_item *fi;
663 	struct btrfs_key key;
664 	struct btrfs_key new_key;
665 	u64 ino = btrfs_ino(inode);
666 	u64 search_start = start;
667 	u64 disk_bytenr = 0;
668 	u64 num_bytes = 0;
669 	u64 extent_offset = 0;
670 	u64 extent_end = 0;
671 	int del_nr = 0;
672 	int del_slot = 0;
673 	int extent_type;
674 	int recow;
675 	int ret;
676 	int modify_tree = -1;
677 	int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
678 	int found = 0;
679 
680 	if (drop_cache)
681 		btrfs_drop_extent_cache(inode, start, end - 1, 0);
682 
683 	if (start >= BTRFS_I(inode)->disk_i_size)
684 		modify_tree = 0;
685 
686 	while (1) {
687 		recow = 0;
688 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
689 					       search_start, modify_tree);
690 		if (ret < 0)
691 			break;
692 		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
693 			leaf = path->nodes[0];
694 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
695 			if (key.objectid == ino &&
696 			    key.type == BTRFS_EXTENT_DATA_KEY)
697 				path->slots[0]--;
698 		}
699 		ret = 0;
700 next_slot:
701 		leaf = path->nodes[0];
702 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
703 			BUG_ON(del_nr > 0);
704 			ret = btrfs_next_leaf(root, path);
705 			if (ret < 0)
706 				break;
707 			if (ret > 0) {
708 				ret = 0;
709 				break;
710 			}
711 			leaf = path->nodes[0];
712 			recow = 1;
713 		}
714 
715 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
716 		if (key.objectid > ino ||
717 		    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
718 			break;
719 
720 		fi = btrfs_item_ptr(leaf, path->slots[0],
721 				    struct btrfs_file_extent_item);
722 		extent_type = btrfs_file_extent_type(leaf, fi);
723 
724 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
725 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
726 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
727 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
728 			extent_offset = btrfs_file_extent_offset(leaf, fi);
729 			extent_end = key.offset +
730 				btrfs_file_extent_num_bytes(leaf, fi);
731 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
732 			extent_end = key.offset +
733 				btrfs_file_extent_inline_len(leaf, fi);
734 		} else {
735 			WARN_ON(1);
736 			extent_end = search_start;
737 		}
738 
739 		if (extent_end <= search_start) {
740 			path->slots[0]++;
741 			goto next_slot;
742 		}
743 
744 		found = 1;
745 		search_start = max(key.offset, start);
746 		if (recow || !modify_tree) {
747 			modify_tree = -1;
748 			btrfs_release_path(path);
749 			continue;
750 		}
751 
752 		/*
753 		 *     | - range to drop - |
754 		 *  | -------- extent -------- |
755 		 */
756 		if (start > key.offset && end < extent_end) {
757 			BUG_ON(del_nr > 0);
758 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
759 
760 			memcpy(&new_key, &key, sizeof(new_key));
761 			new_key.offset = start;
762 			ret = btrfs_duplicate_item(trans, root, path,
763 						   &new_key);
764 			if (ret == -EAGAIN) {
765 				btrfs_release_path(path);
766 				continue;
767 			}
768 			if (ret < 0)
769 				break;
770 
771 			leaf = path->nodes[0];
772 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
773 					    struct btrfs_file_extent_item);
774 			btrfs_set_file_extent_num_bytes(leaf, fi,
775 							start - key.offset);
776 
777 			fi = btrfs_item_ptr(leaf, path->slots[0],
778 					    struct btrfs_file_extent_item);
779 
780 			extent_offset += start - key.offset;
781 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
782 			btrfs_set_file_extent_num_bytes(leaf, fi,
783 							extent_end - start);
784 			btrfs_mark_buffer_dirty(leaf);
785 
786 			if (update_refs && disk_bytenr > 0) {
787 				ret = btrfs_inc_extent_ref(trans, root,
788 						disk_bytenr, num_bytes, 0,
789 						root->root_key.objectid,
790 						new_key.objectid,
791 						start - extent_offset, 0);
792 				BUG_ON(ret); /* -ENOMEM */
793 			}
794 			key.offset = start;
795 		}
796 		/*
797 		 *  | ---- range to drop ----- |
798 		 *      | -------- extent -------- |
799 		 */
800 		if (start <= key.offset && end < extent_end) {
801 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
802 
803 			memcpy(&new_key, &key, sizeof(new_key));
804 			new_key.offset = end;
805 			btrfs_set_item_key_safe(trans, root, path, &new_key);
806 
807 			extent_offset += end - key.offset;
808 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
809 			btrfs_set_file_extent_num_bytes(leaf, fi,
810 							extent_end - end);
811 			btrfs_mark_buffer_dirty(leaf);
812 			if (update_refs && disk_bytenr > 0)
813 				inode_sub_bytes(inode, end - key.offset);
814 			break;
815 		}
816 
817 		search_start = extent_end;
818 		/*
819 		 *       | ---- range to drop ----- |
820 		 *  | -------- extent -------- |
821 		 */
822 		if (start > key.offset && end >= extent_end) {
823 			BUG_ON(del_nr > 0);
824 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
825 
826 			btrfs_set_file_extent_num_bytes(leaf, fi,
827 							start - key.offset);
828 			btrfs_mark_buffer_dirty(leaf);
829 			if (update_refs && disk_bytenr > 0)
830 				inode_sub_bytes(inode, extent_end - start);
831 			if (end == extent_end)
832 				break;
833 
834 			path->slots[0]++;
835 			goto next_slot;
836 		}
837 
838 		/*
839 		 *  | ---- range to drop ----- |
840 		 *    | ------ extent ------ |
841 		 */
842 		if (start <= key.offset && end >= extent_end) {
843 			if (del_nr == 0) {
844 				del_slot = path->slots[0];
845 				del_nr = 1;
846 			} else {
847 				BUG_ON(del_slot + del_nr != path->slots[0]);
848 				del_nr++;
849 			}
850 
851 			if (update_refs &&
852 			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
853 				inode_sub_bytes(inode,
854 						extent_end - key.offset);
855 				extent_end = ALIGN(extent_end,
856 						   root->sectorsize);
857 			} else if (update_refs && disk_bytenr > 0) {
858 				ret = btrfs_free_extent(trans, root,
859 						disk_bytenr, num_bytes, 0,
860 						root->root_key.objectid,
861 						key.objectid, key.offset -
862 						extent_offset, 0);
863 				BUG_ON(ret); /* -ENOMEM */
864 				inode_sub_bytes(inode,
865 						extent_end - key.offset);
866 			}
867 
868 			if (end == extent_end)
869 				break;
870 
871 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
872 				path->slots[0]++;
873 				goto next_slot;
874 			}
875 
876 			ret = btrfs_del_items(trans, root, path, del_slot,
877 					      del_nr);
878 			if (ret) {
879 				btrfs_abort_transaction(trans, root, ret);
880 				break;
881 			}
882 
883 			del_nr = 0;
884 			del_slot = 0;
885 
886 			btrfs_release_path(path);
887 			continue;
888 		}
889 
890 		BUG_ON(1);
891 	}
892 
893 	if (!ret && del_nr > 0) {
894 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
895 		if (ret)
896 			btrfs_abort_transaction(trans, root, ret);
897 	}
898 
899 	if (drop_end)
900 		*drop_end = found ? min(end, extent_end) : end;
901 	btrfs_release_path(path);
902 	return ret;
903 }
904 
905 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
906 		       struct btrfs_root *root, struct inode *inode, u64 start,
907 		       u64 end, int drop_cache)
908 {
909 	struct btrfs_path *path;
910 	int ret;
911 
912 	path = btrfs_alloc_path();
913 	if (!path)
914 		return -ENOMEM;
915 	ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
916 				   drop_cache);
917 	btrfs_free_path(path);
918 	return ret;
919 }
920 
921 static int extent_mergeable(struct extent_buffer *leaf, int slot,
922 			    u64 objectid, u64 bytenr, u64 orig_offset,
923 			    u64 *start, u64 *end)
924 {
925 	struct btrfs_file_extent_item *fi;
926 	struct btrfs_key key;
927 	u64 extent_end;
928 
929 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
930 		return 0;
931 
932 	btrfs_item_key_to_cpu(leaf, &key, slot);
933 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
934 		return 0;
935 
936 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
937 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
938 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
939 	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
940 	    btrfs_file_extent_compression(leaf, fi) ||
941 	    btrfs_file_extent_encryption(leaf, fi) ||
942 	    btrfs_file_extent_other_encoding(leaf, fi))
943 		return 0;
944 
945 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
946 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
947 		return 0;
948 
949 	*start = key.offset;
950 	*end = extent_end;
951 	return 1;
952 }
953 
954 /*
955  * Mark extent in the range start - end as written.
956  *
957  * This changes extent type from 'pre-allocated' to 'regular'. If only
958  * part of extent is marked as written, the extent will be split into
959  * two or three.
960  */
961 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
962 			      struct inode *inode, u64 start, u64 end)
963 {
964 	struct btrfs_root *root = BTRFS_I(inode)->root;
965 	struct extent_buffer *leaf;
966 	struct btrfs_path *path;
967 	struct btrfs_file_extent_item *fi;
968 	struct btrfs_key key;
969 	struct btrfs_key new_key;
970 	u64 bytenr;
971 	u64 num_bytes;
972 	u64 extent_end;
973 	u64 orig_offset;
974 	u64 other_start;
975 	u64 other_end;
976 	u64 split;
977 	int del_nr = 0;
978 	int del_slot = 0;
979 	int recow;
980 	int ret;
981 	u64 ino = btrfs_ino(inode);
982 
983 	path = btrfs_alloc_path();
984 	if (!path)
985 		return -ENOMEM;
986 again:
987 	recow = 0;
988 	split = start;
989 	key.objectid = ino;
990 	key.type = BTRFS_EXTENT_DATA_KEY;
991 	key.offset = split;
992 
993 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
994 	if (ret < 0)
995 		goto out;
996 	if (ret > 0 && path->slots[0] > 0)
997 		path->slots[0]--;
998 
999 	leaf = path->nodes[0];
1000 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1001 	BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1002 	fi = btrfs_item_ptr(leaf, path->slots[0],
1003 			    struct btrfs_file_extent_item);
1004 	BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1005 	       BTRFS_FILE_EXTENT_PREALLOC);
1006 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1007 	BUG_ON(key.offset > start || extent_end < end);
1008 
1009 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1010 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1011 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1012 	memcpy(&new_key, &key, sizeof(new_key));
1013 
1014 	if (start == key.offset && end < extent_end) {
1015 		other_start = 0;
1016 		other_end = start;
1017 		if (extent_mergeable(leaf, path->slots[0] - 1,
1018 				     ino, bytenr, orig_offset,
1019 				     &other_start, &other_end)) {
1020 			new_key.offset = end;
1021 			btrfs_set_item_key_safe(trans, root, path, &new_key);
1022 			fi = btrfs_item_ptr(leaf, path->slots[0],
1023 					    struct btrfs_file_extent_item);
1024 			btrfs_set_file_extent_generation(leaf, fi,
1025 							 trans->transid);
1026 			btrfs_set_file_extent_num_bytes(leaf, fi,
1027 							extent_end - end);
1028 			btrfs_set_file_extent_offset(leaf, fi,
1029 						     end - orig_offset);
1030 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1031 					    struct btrfs_file_extent_item);
1032 			btrfs_set_file_extent_generation(leaf, fi,
1033 							 trans->transid);
1034 			btrfs_set_file_extent_num_bytes(leaf, fi,
1035 							end - other_start);
1036 			btrfs_mark_buffer_dirty(leaf);
1037 			goto out;
1038 		}
1039 	}
1040 
1041 	if (start > key.offset && end == extent_end) {
1042 		other_start = end;
1043 		other_end = 0;
1044 		if (extent_mergeable(leaf, path->slots[0] + 1,
1045 				     ino, bytenr, orig_offset,
1046 				     &other_start, &other_end)) {
1047 			fi = btrfs_item_ptr(leaf, path->slots[0],
1048 					    struct btrfs_file_extent_item);
1049 			btrfs_set_file_extent_num_bytes(leaf, fi,
1050 							start - key.offset);
1051 			btrfs_set_file_extent_generation(leaf, fi,
1052 							 trans->transid);
1053 			path->slots[0]++;
1054 			new_key.offset = start;
1055 			btrfs_set_item_key_safe(trans, root, path, &new_key);
1056 
1057 			fi = btrfs_item_ptr(leaf, path->slots[0],
1058 					    struct btrfs_file_extent_item);
1059 			btrfs_set_file_extent_generation(leaf, fi,
1060 							 trans->transid);
1061 			btrfs_set_file_extent_num_bytes(leaf, fi,
1062 							other_end - start);
1063 			btrfs_set_file_extent_offset(leaf, fi,
1064 						     start - orig_offset);
1065 			btrfs_mark_buffer_dirty(leaf);
1066 			goto out;
1067 		}
1068 	}
1069 
1070 	while (start > key.offset || end < extent_end) {
1071 		if (key.offset == start)
1072 			split = end;
1073 
1074 		new_key.offset = split;
1075 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
1076 		if (ret == -EAGAIN) {
1077 			btrfs_release_path(path);
1078 			goto again;
1079 		}
1080 		if (ret < 0) {
1081 			btrfs_abort_transaction(trans, root, ret);
1082 			goto out;
1083 		}
1084 
1085 		leaf = path->nodes[0];
1086 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1087 				    struct btrfs_file_extent_item);
1088 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1089 		btrfs_set_file_extent_num_bytes(leaf, fi,
1090 						split - key.offset);
1091 
1092 		fi = btrfs_item_ptr(leaf, path->slots[0],
1093 				    struct btrfs_file_extent_item);
1094 
1095 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1096 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1097 		btrfs_set_file_extent_num_bytes(leaf, fi,
1098 						extent_end - split);
1099 		btrfs_mark_buffer_dirty(leaf);
1100 
1101 		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1102 					   root->root_key.objectid,
1103 					   ino, orig_offset, 0);
1104 		BUG_ON(ret); /* -ENOMEM */
1105 
1106 		if (split == start) {
1107 			key.offset = start;
1108 		} else {
1109 			BUG_ON(start != key.offset);
1110 			path->slots[0]--;
1111 			extent_end = end;
1112 		}
1113 		recow = 1;
1114 	}
1115 
1116 	other_start = end;
1117 	other_end = 0;
1118 	if (extent_mergeable(leaf, path->slots[0] + 1,
1119 			     ino, bytenr, orig_offset,
1120 			     &other_start, &other_end)) {
1121 		if (recow) {
1122 			btrfs_release_path(path);
1123 			goto again;
1124 		}
1125 		extent_end = other_end;
1126 		del_slot = path->slots[0] + 1;
1127 		del_nr++;
1128 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1129 					0, root->root_key.objectid,
1130 					ino, orig_offset, 0);
1131 		BUG_ON(ret); /* -ENOMEM */
1132 	}
1133 	other_start = 0;
1134 	other_end = start;
1135 	if (extent_mergeable(leaf, path->slots[0] - 1,
1136 			     ino, bytenr, orig_offset,
1137 			     &other_start, &other_end)) {
1138 		if (recow) {
1139 			btrfs_release_path(path);
1140 			goto again;
1141 		}
1142 		key.offset = other_start;
1143 		del_slot = path->slots[0];
1144 		del_nr++;
1145 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1146 					0, root->root_key.objectid,
1147 					ino, orig_offset, 0);
1148 		BUG_ON(ret); /* -ENOMEM */
1149 	}
1150 	if (del_nr == 0) {
1151 		fi = btrfs_item_ptr(leaf, path->slots[0],
1152 			   struct btrfs_file_extent_item);
1153 		btrfs_set_file_extent_type(leaf, fi,
1154 					   BTRFS_FILE_EXTENT_REG);
1155 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1156 		btrfs_mark_buffer_dirty(leaf);
1157 	} else {
1158 		fi = btrfs_item_ptr(leaf, del_slot - 1,
1159 			   struct btrfs_file_extent_item);
1160 		btrfs_set_file_extent_type(leaf, fi,
1161 					   BTRFS_FILE_EXTENT_REG);
1162 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1163 		btrfs_set_file_extent_num_bytes(leaf, fi,
1164 						extent_end - key.offset);
1165 		btrfs_mark_buffer_dirty(leaf);
1166 
1167 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1168 		if (ret < 0) {
1169 			btrfs_abort_transaction(trans, root, ret);
1170 			goto out;
1171 		}
1172 	}
1173 out:
1174 	btrfs_free_path(path);
1175 	return 0;
1176 }
1177 
1178 /*
1179  * on error we return an unlocked page and the error value
1180  * on success we return a locked page and 0
1181  */
1182 static int prepare_uptodate_page(struct page *page, u64 pos,
1183 				 bool force_uptodate)
1184 {
1185 	int ret = 0;
1186 
1187 	if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1188 	    !PageUptodate(page)) {
1189 		ret = btrfs_readpage(NULL, page);
1190 		if (ret)
1191 			return ret;
1192 		lock_page(page);
1193 		if (!PageUptodate(page)) {
1194 			unlock_page(page);
1195 			return -EIO;
1196 		}
1197 	}
1198 	return 0;
1199 }
1200 
1201 /*
1202  * this gets pages into the page cache and locks them down, it also properly
1203  * waits for data=ordered extents to finish before allowing the pages to be
1204  * modified.
1205  */
1206 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
1207 			 struct page **pages, size_t num_pages,
1208 			 loff_t pos, unsigned long first_index,
1209 			 size_t write_bytes, bool force_uptodate)
1210 {
1211 	struct extent_state *cached_state = NULL;
1212 	int i;
1213 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
1214 	struct inode *inode = fdentry(file)->d_inode;
1215 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1216 	int err = 0;
1217 	int faili = 0;
1218 	u64 start_pos;
1219 	u64 last_pos;
1220 
1221 	start_pos = pos & ~((u64)root->sectorsize - 1);
1222 	last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
1223 
1224 again:
1225 	for (i = 0; i < num_pages; i++) {
1226 		pages[i] = find_or_create_page(inode->i_mapping, index + i,
1227 					       mask | __GFP_WRITE);
1228 		if (!pages[i]) {
1229 			faili = i - 1;
1230 			err = -ENOMEM;
1231 			goto fail;
1232 		}
1233 
1234 		if (i == 0)
1235 			err = prepare_uptodate_page(pages[i], pos,
1236 						    force_uptodate);
1237 		if (i == num_pages - 1)
1238 			err = prepare_uptodate_page(pages[i],
1239 						    pos + write_bytes, false);
1240 		if (err) {
1241 			page_cache_release(pages[i]);
1242 			faili = i - 1;
1243 			goto fail;
1244 		}
1245 		wait_on_page_writeback(pages[i]);
1246 	}
1247 	err = 0;
1248 	if (start_pos < inode->i_size) {
1249 		struct btrfs_ordered_extent *ordered;
1250 		lock_extent_bits(&BTRFS_I(inode)->io_tree,
1251 				 start_pos, last_pos - 1, 0, &cached_state);
1252 		ordered = btrfs_lookup_first_ordered_extent(inode,
1253 							    last_pos - 1);
1254 		if (ordered &&
1255 		    ordered->file_offset + ordered->len > start_pos &&
1256 		    ordered->file_offset < last_pos) {
1257 			btrfs_put_ordered_extent(ordered);
1258 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1259 					     start_pos, last_pos - 1,
1260 					     &cached_state, GFP_NOFS);
1261 			for (i = 0; i < num_pages; i++) {
1262 				unlock_page(pages[i]);
1263 				page_cache_release(pages[i]);
1264 			}
1265 			btrfs_wait_ordered_range(inode, start_pos,
1266 						 last_pos - start_pos);
1267 			goto again;
1268 		}
1269 		if (ordered)
1270 			btrfs_put_ordered_extent(ordered);
1271 
1272 		clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1273 				  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
1274 				  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1275 				  0, 0, &cached_state, GFP_NOFS);
1276 		unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1277 				     start_pos, last_pos - 1, &cached_state,
1278 				     GFP_NOFS);
1279 	}
1280 	for (i = 0; i < num_pages; i++) {
1281 		if (clear_page_dirty_for_io(pages[i]))
1282 			account_page_redirty(pages[i]);
1283 		set_page_extent_mapped(pages[i]);
1284 		WARN_ON(!PageLocked(pages[i]));
1285 	}
1286 	return 0;
1287 fail:
1288 	while (faili >= 0) {
1289 		unlock_page(pages[faili]);
1290 		page_cache_release(pages[faili]);
1291 		faili--;
1292 	}
1293 	return err;
1294 
1295 }
1296 
1297 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1298 					       struct iov_iter *i,
1299 					       loff_t pos)
1300 {
1301 	struct inode *inode = fdentry(file)->d_inode;
1302 	struct btrfs_root *root = BTRFS_I(inode)->root;
1303 	struct page **pages = NULL;
1304 	unsigned long first_index;
1305 	size_t num_written = 0;
1306 	int nrptrs;
1307 	int ret = 0;
1308 	bool force_page_uptodate = false;
1309 
1310 	nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
1311 		     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
1312 		     (sizeof(struct page *)));
1313 	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1314 	nrptrs = max(nrptrs, 8);
1315 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1316 	if (!pages)
1317 		return -ENOMEM;
1318 
1319 	first_index = pos >> PAGE_CACHE_SHIFT;
1320 
1321 	while (iov_iter_count(i) > 0) {
1322 		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1323 		size_t write_bytes = min(iov_iter_count(i),
1324 					 nrptrs * (size_t)PAGE_CACHE_SIZE -
1325 					 offset);
1326 		size_t num_pages = (write_bytes + offset +
1327 				    PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1328 		size_t dirty_pages;
1329 		size_t copied;
1330 
1331 		WARN_ON(num_pages > nrptrs);
1332 
1333 		/*
1334 		 * Fault pages before locking them in prepare_pages
1335 		 * to avoid recursive lock
1336 		 */
1337 		if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1338 			ret = -EFAULT;
1339 			break;
1340 		}
1341 
1342 		ret = btrfs_delalloc_reserve_space(inode,
1343 					num_pages << PAGE_CACHE_SHIFT);
1344 		if (ret)
1345 			break;
1346 
1347 		/*
1348 		 * This is going to setup the pages array with the number of
1349 		 * pages we want, so we don't really need to worry about the
1350 		 * contents of pages from loop to loop
1351 		 */
1352 		ret = prepare_pages(root, file, pages, num_pages,
1353 				    pos, first_index, write_bytes,
1354 				    force_page_uptodate);
1355 		if (ret) {
1356 			btrfs_delalloc_release_space(inode,
1357 					num_pages << PAGE_CACHE_SHIFT);
1358 			break;
1359 		}
1360 
1361 		copied = btrfs_copy_from_user(pos, num_pages,
1362 					   write_bytes, pages, i);
1363 
1364 		/*
1365 		 * if we have trouble faulting in the pages, fall
1366 		 * back to one page at a time
1367 		 */
1368 		if (copied < write_bytes)
1369 			nrptrs = 1;
1370 
1371 		if (copied == 0) {
1372 			force_page_uptodate = true;
1373 			dirty_pages = 0;
1374 		} else {
1375 			force_page_uptodate = false;
1376 			dirty_pages = (copied + offset +
1377 				       PAGE_CACHE_SIZE - 1) >>
1378 				       PAGE_CACHE_SHIFT;
1379 		}
1380 
1381 		/*
1382 		 * If we had a short copy we need to release the excess delaloc
1383 		 * bytes we reserved.  We need to increment outstanding_extents
1384 		 * because btrfs_delalloc_release_space will decrement it, but
1385 		 * we still have an outstanding extent for the chunk we actually
1386 		 * managed to copy.
1387 		 */
1388 		if (num_pages > dirty_pages) {
1389 			if (copied > 0) {
1390 				spin_lock(&BTRFS_I(inode)->lock);
1391 				BTRFS_I(inode)->outstanding_extents++;
1392 				spin_unlock(&BTRFS_I(inode)->lock);
1393 			}
1394 			btrfs_delalloc_release_space(inode,
1395 					(num_pages - dirty_pages) <<
1396 					PAGE_CACHE_SHIFT);
1397 		}
1398 
1399 		if (copied > 0) {
1400 			ret = btrfs_dirty_pages(root, inode, pages,
1401 						dirty_pages, pos, copied,
1402 						NULL);
1403 			if (ret) {
1404 				btrfs_delalloc_release_space(inode,
1405 					dirty_pages << PAGE_CACHE_SHIFT);
1406 				btrfs_drop_pages(pages, num_pages);
1407 				break;
1408 			}
1409 		}
1410 
1411 		btrfs_drop_pages(pages, num_pages);
1412 
1413 		cond_resched();
1414 
1415 		balance_dirty_pages_ratelimited(inode->i_mapping);
1416 		if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1417 			btrfs_btree_balance_dirty(root);
1418 
1419 		pos += copied;
1420 		num_written += copied;
1421 	}
1422 
1423 	kfree(pages);
1424 
1425 	return num_written ? num_written : ret;
1426 }
1427 
1428 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1429 				    const struct iovec *iov,
1430 				    unsigned long nr_segs, loff_t pos,
1431 				    loff_t *ppos, size_t count, size_t ocount)
1432 {
1433 	struct file *file = iocb->ki_filp;
1434 	struct iov_iter i;
1435 	ssize_t written;
1436 	ssize_t written_buffered;
1437 	loff_t endbyte;
1438 	int err;
1439 
1440 	written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
1441 					    count, ocount);
1442 
1443 	if (written < 0 || written == count)
1444 		return written;
1445 
1446 	pos += written;
1447 	count -= written;
1448 	iov_iter_init(&i, iov, nr_segs, count, written);
1449 	written_buffered = __btrfs_buffered_write(file, &i, pos);
1450 	if (written_buffered < 0) {
1451 		err = written_buffered;
1452 		goto out;
1453 	}
1454 	endbyte = pos + written_buffered - 1;
1455 	err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
1456 	if (err)
1457 		goto out;
1458 	written += written_buffered;
1459 	*ppos = pos + written_buffered;
1460 	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1461 				 endbyte >> PAGE_CACHE_SHIFT);
1462 out:
1463 	return written ? written : err;
1464 }
1465 
1466 static void update_time_for_write(struct inode *inode)
1467 {
1468 	struct timespec now;
1469 
1470 	if (IS_NOCMTIME(inode))
1471 		return;
1472 
1473 	now = current_fs_time(inode->i_sb);
1474 	if (!timespec_equal(&inode->i_mtime, &now))
1475 		inode->i_mtime = now;
1476 
1477 	if (!timespec_equal(&inode->i_ctime, &now))
1478 		inode->i_ctime = now;
1479 
1480 	if (IS_I_VERSION(inode))
1481 		inode_inc_iversion(inode);
1482 }
1483 
1484 static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1485 				    const struct iovec *iov,
1486 				    unsigned long nr_segs, loff_t pos)
1487 {
1488 	struct file *file = iocb->ki_filp;
1489 	struct inode *inode = fdentry(file)->d_inode;
1490 	struct btrfs_root *root = BTRFS_I(inode)->root;
1491 	loff_t *ppos = &iocb->ki_pos;
1492 	u64 start_pos;
1493 	ssize_t num_written = 0;
1494 	ssize_t err = 0;
1495 	size_t count, ocount;
1496 	bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1497 
1498 	sb_start_write(inode->i_sb);
1499 
1500 	mutex_lock(&inode->i_mutex);
1501 
1502 	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1503 	if (err) {
1504 		mutex_unlock(&inode->i_mutex);
1505 		goto out;
1506 	}
1507 	count = ocount;
1508 
1509 	current->backing_dev_info = inode->i_mapping->backing_dev_info;
1510 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1511 	if (err) {
1512 		mutex_unlock(&inode->i_mutex);
1513 		goto out;
1514 	}
1515 
1516 	if (count == 0) {
1517 		mutex_unlock(&inode->i_mutex);
1518 		goto out;
1519 	}
1520 
1521 	err = file_remove_suid(file);
1522 	if (err) {
1523 		mutex_unlock(&inode->i_mutex);
1524 		goto out;
1525 	}
1526 
1527 	/*
1528 	 * If BTRFS flips readonly due to some impossible error
1529 	 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1530 	 * although we have opened a file as writable, we have
1531 	 * to stop this write operation to ensure FS consistency.
1532 	 */
1533 	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
1534 		mutex_unlock(&inode->i_mutex);
1535 		err = -EROFS;
1536 		goto out;
1537 	}
1538 
1539 	/*
1540 	 * We reserve space for updating the inode when we reserve space for the
1541 	 * extent we are going to write, so we will enospc out there.  We don't
1542 	 * need to start yet another transaction to update the inode as we will
1543 	 * update the inode when we finish writing whatever data we write.
1544 	 */
1545 	update_time_for_write(inode);
1546 
1547 	start_pos = round_down(pos, root->sectorsize);
1548 	if (start_pos > i_size_read(inode)) {
1549 		err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
1550 		if (err) {
1551 			mutex_unlock(&inode->i_mutex);
1552 			goto out;
1553 		}
1554 	}
1555 
1556 	if (sync)
1557 		atomic_inc(&BTRFS_I(inode)->sync_writers);
1558 
1559 	if (unlikely(file->f_flags & O_DIRECT)) {
1560 		num_written = __btrfs_direct_write(iocb, iov, nr_segs,
1561 						   pos, ppos, count, ocount);
1562 	} else {
1563 		struct iov_iter i;
1564 
1565 		iov_iter_init(&i, iov, nr_segs, count, num_written);
1566 
1567 		num_written = __btrfs_buffered_write(file, &i, pos);
1568 		if (num_written > 0)
1569 			*ppos = pos + num_written;
1570 	}
1571 
1572 	mutex_unlock(&inode->i_mutex);
1573 
1574 	/*
1575 	 * we want to make sure fsync finds this change
1576 	 * but we haven't joined a transaction running right now.
1577 	 *
1578 	 * Later on, someone is sure to update the inode and get the
1579 	 * real transid recorded.
1580 	 *
1581 	 * We set last_trans now to the fs_info generation + 1,
1582 	 * this will either be one more than the running transaction
1583 	 * or the generation used for the next transaction if there isn't
1584 	 * one running right now.
1585 	 *
1586 	 * We also have to set last_sub_trans to the current log transid,
1587 	 * otherwise subsequent syncs to a file that's been synced in this
1588 	 * transaction will appear to have already occured.
1589 	 */
1590 	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1591 	BTRFS_I(inode)->last_sub_trans = root->log_transid;
1592 	if (num_written > 0 || num_written == -EIOCBQUEUED) {
1593 		err = generic_write_sync(file, pos, num_written);
1594 		if (err < 0 && num_written > 0)
1595 			num_written = err;
1596 	}
1597 out:
1598 	if (sync)
1599 		atomic_dec(&BTRFS_I(inode)->sync_writers);
1600 	sb_end_write(inode->i_sb);
1601 	current->backing_dev_info = NULL;
1602 	return num_written ? num_written : err;
1603 }
1604 
1605 int btrfs_release_file(struct inode *inode, struct file *filp)
1606 {
1607 	/*
1608 	 * ordered_data_close is set by settattr when we are about to truncate
1609 	 * a file from a non-zero size to a zero size.  This tries to
1610 	 * flush down new bytes that may have been written if the
1611 	 * application were using truncate to replace a file in place.
1612 	 */
1613 	if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1614 			       &BTRFS_I(inode)->runtime_flags)) {
1615 		btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1616 		if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1617 			filemap_flush(inode->i_mapping);
1618 	}
1619 	if (filp->private_data)
1620 		btrfs_ioctl_trans_end(filp);
1621 	return 0;
1622 }
1623 
1624 /*
1625  * fsync call for both files and directories.  This logs the inode into
1626  * the tree log instead of forcing full commits whenever possible.
1627  *
1628  * It needs to call filemap_fdatawait so that all ordered extent updates are
1629  * in the metadata btree are up to date for copying to the log.
1630  *
1631  * It drops the inode mutex before doing the tree log commit.  This is an
1632  * important optimization for directories because holding the mutex prevents
1633  * new operations on the dir while we write to disk.
1634  */
1635 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1636 {
1637 	struct dentry *dentry = file->f_path.dentry;
1638 	struct inode *inode = dentry->d_inode;
1639 	struct btrfs_root *root = BTRFS_I(inode)->root;
1640 	int ret = 0;
1641 	struct btrfs_trans_handle *trans;
1642 
1643 	trace_btrfs_sync_file(file, datasync);
1644 
1645 	/*
1646 	 * We write the dirty pages in the range and wait until they complete
1647 	 * out of the ->i_mutex. If so, we can flush the dirty pages by
1648 	 * multi-task, and make the performance up.
1649 	 */
1650 	atomic_inc(&BTRFS_I(inode)->sync_writers);
1651 	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1652 	atomic_dec(&BTRFS_I(inode)->sync_writers);
1653 	if (ret)
1654 		return ret;
1655 
1656 	mutex_lock(&inode->i_mutex);
1657 
1658 	/*
1659 	 * We flush the dirty pages again to avoid some dirty pages in the
1660 	 * range being left.
1661 	 */
1662 	atomic_inc(&root->log_batch);
1663 	btrfs_wait_ordered_range(inode, start, end - start + 1);
1664 	atomic_inc(&root->log_batch);
1665 
1666 	/*
1667 	 * check the transaction that last modified this inode
1668 	 * and see if its already been committed
1669 	 */
1670 	if (!BTRFS_I(inode)->last_trans) {
1671 		mutex_unlock(&inode->i_mutex);
1672 		goto out;
1673 	}
1674 
1675 	/*
1676 	 * if the last transaction that changed this file was before
1677 	 * the current transaction, we can bail out now without any
1678 	 * syncing
1679 	 */
1680 	smp_mb();
1681 	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1682 	    BTRFS_I(inode)->last_trans <=
1683 	    root->fs_info->last_trans_committed) {
1684 		BTRFS_I(inode)->last_trans = 0;
1685 
1686 		/*
1687 		 * We'v had everything committed since the last time we were
1688 		 * modified so clear this flag in case it was set for whatever
1689 		 * reason, it's no longer relevant.
1690 		 */
1691 		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1692 			  &BTRFS_I(inode)->runtime_flags);
1693 		mutex_unlock(&inode->i_mutex);
1694 		goto out;
1695 	}
1696 
1697 	/*
1698 	 * ok we haven't committed the transaction yet, lets do a commit
1699 	 */
1700 	if (file->private_data)
1701 		btrfs_ioctl_trans_end(file);
1702 
1703 	trans = btrfs_start_transaction(root, 0);
1704 	if (IS_ERR(trans)) {
1705 		ret = PTR_ERR(trans);
1706 		mutex_unlock(&inode->i_mutex);
1707 		goto out;
1708 	}
1709 
1710 	ret = btrfs_log_dentry_safe(trans, root, dentry);
1711 	if (ret < 0) {
1712 		mutex_unlock(&inode->i_mutex);
1713 		goto out;
1714 	}
1715 
1716 	/* we've logged all the items and now have a consistent
1717 	 * version of the file in the log.  It is possible that
1718 	 * someone will come in and modify the file, but that's
1719 	 * fine because the log is consistent on disk, and we
1720 	 * have references to all of the file's extents
1721 	 *
1722 	 * It is possible that someone will come in and log the
1723 	 * file again, but that will end up using the synchronization
1724 	 * inside btrfs_sync_log to keep things safe.
1725 	 */
1726 	mutex_unlock(&inode->i_mutex);
1727 
1728 	if (ret != BTRFS_NO_LOG_SYNC) {
1729 		if (ret > 0) {
1730 			ret = btrfs_commit_transaction(trans, root);
1731 		} else {
1732 			ret = btrfs_sync_log(trans, root);
1733 			if (ret == 0)
1734 				ret = btrfs_end_transaction(trans, root);
1735 			else
1736 				ret = btrfs_commit_transaction(trans, root);
1737 		}
1738 	} else {
1739 		ret = btrfs_end_transaction(trans, root);
1740 	}
1741 out:
1742 	return ret > 0 ? -EIO : ret;
1743 }
1744 
1745 static const struct vm_operations_struct btrfs_file_vm_ops = {
1746 	.fault		= filemap_fault,
1747 	.page_mkwrite	= btrfs_page_mkwrite,
1748 	.remap_pages	= generic_file_remap_pages,
1749 };
1750 
1751 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
1752 {
1753 	struct address_space *mapping = filp->f_mapping;
1754 
1755 	if (!mapping->a_ops->readpage)
1756 		return -ENOEXEC;
1757 
1758 	file_accessed(filp);
1759 	vma->vm_ops = &btrfs_file_vm_ops;
1760 
1761 	return 0;
1762 }
1763 
1764 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
1765 			  int slot, u64 start, u64 end)
1766 {
1767 	struct btrfs_file_extent_item *fi;
1768 	struct btrfs_key key;
1769 
1770 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1771 		return 0;
1772 
1773 	btrfs_item_key_to_cpu(leaf, &key, slot);
1774 	if (key.objectid != btrfs_ino(inode) ||
1775 	    key.type != BTRFS_EXTENT_DATA_KEY)
1776 		return 0;
1777 
1778 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1779 
1780 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1781 		return 0;
1782 
1783 	if (btrfs_file_extent_disk_bytenr(leaf, fi))
1784 		return 0;
1785 
1786 	if (key.offset == end)
1787 		return 1;
1788 	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1789 		return 1;
1790 	return 0;
1791 }
1792 
1793 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
1794 		      struct btrfs_path *path, u64 offset, u64 end)
1795 {
1796 	struct btrfs_root *root = BTRFS_I(inode)->root;
1797 	struct extent_buffer *leaf;
1798 	struct btrfs_file_extent_item *fi;
1799 	struct extent_map *hole_em;
1800 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1801 	struct btrfs_key key;
1802 	int ret;
1803 
1804 	key.objectid = btrfs_ino(inode);
1805 	key.type = BTRFS_EXTENT_DATA_KEY;
1806 	key.offset = offset;
1807 
1808 
1809 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1810 	if (ret < 0)
1811 		return ret;
1812 	BUG_ON(!ret);
1813 
1814 	leaf = path->nodes[0];
1815 	if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
1816 		u64 num_bytes;
1817 
1818 		path->slots[0]--;
1819 		fi = btrfs_item_ptr(leaf, path->slots[0],
1820 				    struct btrfs_file_extent_item);
1821 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
1822 			end - offset;
1823 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1824 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1825 		btrfs_set_file_extent_offset(leaf, fi, 0);
1826 		btrfs_mark_buffer_dirty(leaf);
1827 		goto out;
1828 	}
1829 
1830 	if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
1831 		u64 num_bytes;
1832 
1833 		path->slots[0]++;
1834 		key.offset = offset;
1835 		btrfs_set_item_key_safe(trans, root, path, &key);
1836 		fi = btrfs_item_ptr(leaf, path->slots[0],
1837 				    struct btrfs_file_extent_item);
1838 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
1839 			offset;
1840 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1841 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
1842 		btrfs_set_file_extent_offset(leaf, fi, 0);
1843 		btrfs_mark_buffer_dirty(leaf);
1844 		goto out;
1845 	}
1846 	btrfs_release_path(path);
1847 
1848 	ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
1849 				       0, 0, end - offset, 0, end - offset,
1850 				       0, 0, 0);
1851 	if (ret)
1852 		return ret;
1853 
1854 out:
1855 	btrfs_release_path(path);
1856 
1857 	hole_em = alloc_extent_map();
1858 	if (!hole_em) {
1859 		btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1860 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1861 			&BTRFS_I(inode)->runtime_flags);
1862 	} else {
1863 		hole_em->start = offset;
1864 		hole_em->len = end - offset;
1865 		hole_em->orig_start = offset;
1866 
1867 		hole_em->block_start = EXTENT_MAP_HOLE;
1868 		hole_em->block_len = 0;
1869 		hole_em->orig_block_len = 0;
1870 		hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
1871 		hole_em->compress_type = BTRFS_COMPRESS_NONE;
1872 		hole_em->generation = trans->transid;
1873 
1874 		do {
1875 			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
1876 			write_lock(&em_tree->lock);
1877 			ret = add_extent_mapping(em_tree, hole_em);
1878 			if (!ret)
1879 				list_move(&hole_em->list,
1880 					  &em_tree->modified_extents);
1881 			write_unlock(&em_tree->lock);
1882 		} while (ret == -EEXIST);
1883 		free_extent_map(hole_em);
1884 		if (ret)
1885 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1886 				&BTRFS_I(inode)->runtime_flags);
1887 	}
1888 
1889 	return 0;
1890 }
1891 
1892 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1893 {
1894 	struct btrfs_root *root = BTRFS_I(inode)->root;
1895 	struct extent_state *cached_state = NULL;
1896 	struct btrfs_path *path;
1897 	struct btrfs_block_rsv *rsv;
1898 	struct btrfs_trans_handle *trans;
1899 	u64 lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
1900 	u64 lockend = round_down(offset + len,
1901 				 BTRFS_I(inode)->root->sectorsize) - 1;
1902 	u64 cur_offset = lockstart;
1903 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
1904 	u64 drop_end;
1905 	int ret = 0;
1906 	int err = 0;
1907 	bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
1908 			  ((offset + len - 1) >> PAGE_CACHE_SHIFT));
1909 
1910 	btrfs_wait_ordered_range(inode, offset, len);
1911 
1912 	mutex_lock(&inode->i_mutex);
1913 	/*
1914 	 * We needn't truncate any page which is beyond the end of the file
1915 	 * because we are sure there is no data there.
1916 	 */
1917 	/*
1918 	 * Only do this if we are in the same page and we aren't doing the
1919 	 * entire page.
1920 	 */
1921 	if (same_page && len < PAGE_CACHE_SIZE) {
1922 		if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE))
1923 			ret = btrfs_truncate_page(inode, offset, len, 0);
1924 		mutex_unlock(&inode->i_mutex);
1925 		return ret;
1926 	}
1927 
1928 	/* zero back part of the first page */
1929 	if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
1930 		ret = btrfs_truncate_page(inode, offset, 0, 0);
1931 		if (ret) {
1932 			mutex_unlock(&inode->i_mutex);
1933 			return ret;
1934 		}
1935 	}
1936 
1937 	/* zero the front end of the last page */
1938 	if (offset + len < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
1939 		ret = btrfs_truncate_page(inode, offset + len, 0, 1);
1940 		if (ret) {
1941 			mutex_unlock(&inode->i_mutex);
1942 			return ret;
1943 		}
1944 	}
1945 
1946 	if (lockend < lockstart) {
1947 		mutex_unlock(&inode->i_mutex);
1948 		return 0;
1949 	}
1950 
1951 	while (1) {
1952 		struct btrfs_ordered_extent *ordered;
1953 
1954 		truncate_pagecache_range(inode, lockstart, lockend);
1955 
1956 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
1957 				 0, &cached_state);
1958 		ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
1959 
1960 		/*
1961 		 * We need to make sure we have no ordered extents in this range
1962 		 * and nobody raced in and read a page in this range, if we did
1963 		 * we need to try again.
1964 		 */
1965 		if ((!ordered ||
1966 		    (ordered->file_offset + ordered->len < lockstart ||
1967 		     ordered->file_offset > lockend)) &&
1968 		     !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
1969 				     lockend, EXTENT_UPTODATE, 0,
1970 				     cached_state)) {
1971 			if (ordered)
1972 				btrfs_put_ordered_extent(ordered);
1973 			break;
1974 		}
1975 		if (ordered)
1976 			btrfs_put_ordered_extent(ordered);
1977 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
1978 				     lockend, &cached_state, GFP_NOFS);
1979 		btrfs_wait_ordered_range(inode, lockstart,
1980 					 lockend - lockstart + 1);
1981 	}
1982 
1983 	path = btrfs_alloc_path();
1984 	if (!path) {
1985 		ret = -ENOMEM;
1986 		goto out;
1987 	}
1988 
1989 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
1990 	if (!rsv) {
1991 		ret = -ENOMEM;
1992 		goto out_free;
1993 	}
1994 	rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
1995 	rsv->failfast = 1;
1996 
1997 	/*
1998 	 * 1 - update the inode
1999 	 * 1 - removing the extents in the range
2000 	 * 1 - adding the hole extent
2001 	 */
2002 	trans = btrfs_start_transaction(root, 3);
2003 	if (IS_ERR(trans)) {
2004 		err = PTR_ERR(trans);
2005 		goto out_free;
2006 	}
2007 
2008 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2009 				      min_size);
2010 	BUG_ON(ret);
2011 	trans->block_rsv = rsv;
2012 
2013 	while (cur_offset < lockend) {
2014 		ret = __btrfs_drop_extents(trans, root, inode, path,
2015 					   cur_offset, lockend + 1,
2016 					   &drop_end, 1);
2017 		if (ret != -ENOSPC)
2018 			break;
2019 
2020 		trans->block_rsv = &root->fs_info->trans_block_rsv;
2021 
2022 		ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2023 		if (ret) {
2024 			err = ret;
2025 			break;
2026 		}
2027 
2028 		cur_offset = drop_end;
2029 
2030 		ret = btrfs_update_inode(trans, root, inode);
2031 		if (ret) {
2032 			err = ret;
2033 			break;
2034 		}
2035 
2036 		btrfs_end_transaction(trans, root);
2037 		btrfs_btree_balance_dirty(root);
2038 
2039 		trans = btrfs_start_transaction(root, 3);
2040 		if (IS_ERR(trans)) {
2041 			ret = PTR_ERR(trans);
2042 			trans = NULL;
2043 			break;
2044 		}
2045 
2046 		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2047 					      rsv, min_size);
2048 		BUG_ON(ret);	/* shouldn't happen */
2049 		trans->block_rsv = rsv;
2050 	}
2051 
2052 	if (ret) {
2053 		err = ret;
2054 		goto out_trans;
2055 	}
2056 
2057 	trans->block_rsv = &root->fs_info->trans_block_rsv;
2058 	ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2059 	if (ret) {
2060 		err = ret;
2061 		goto out_trans;
2062 	}
2063 
2064 out_trans:
2065 	if (!trans)
2066 		goto out_free;
2067 
2068 	inode_inc_iversion(inode);
2069 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2070 
2071 	trans->block_rsv = &root->fs_info->trans_block_rsv;
2072 	ret = btrfs_update_inode(trans, root, inode);
2073 	btrfs_end_transaction(trans, root);
2074 	btrfs_btree_balance_dirty(root);
2075 out_free:
2076 	btrfs_free_path(path);
2077 	btrfs_free_block_rsv(root, rsv);
2078 out:
2079 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2080 			     &cached_state, GFP_NOFS);
2081 	mutex_unlock(&inode->i_mutex);
2082 	if (ret && !err)
2083 		err = ret;
2084 	return err;
2085 }
2086 
2087 static long btrfs_fallocate(struct file *file, int mode,
2088 			    loff_t offset, loff_t len)
2089 {
2090 	struct inode *inode = file->f_path.dentry->d_inode;
2091 	struct extent_state *cached_state = NULL;
2092 	u64 cur_offset;
2093 	u64 last_byte;
2094 	u64 alloc_start;
2095 	u64 alloc_end;
2096 	u64 alloc_hint = 0;
2097 	u64 locked_end;
2098 	struct extent_map *em;
2099 	int blocksize = BTRFS_I(inode)->root->sectorsize;
2100 	int ret;
2101 
2102 	alloc_start = round_down(offset, blocksize);
2103 	alloc_end = round_up(offset + len, blocksize);
2104 
2105 	/* Make sure we aren't being give some crap mode */
2106 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2107 		return -EOPNOTSUPP;
2108 
2109 	if (mode & FALLOC_FL_PUNCH_HOLE)
2110 		return btrfs_punch_hole(inode, offset, len);
2111 
2112 	/*
2113 	 * Make sure we have enough space before we do the
2114 	 * allocation.
2115 	 */
2116 	ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
2117 	if (ret)
2118 		return ret;
2119 
2120 	/*
2121 	 * wait for ordered IO before we have any locks.  We'll loop again
2122 	 * below with the locks held.
2123 	 */
2124 	btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
2125 
2126 	mutex_lock(&inode->i_mutex);
2127 	ret = inode_newsize_ok(inode, alloc_end);
2128 	if (ret)
2129 		goto out;
2130 
2131 	if (alloc_start > inode->i_size) {
2132 		ret = btrfs_cont_expand(inode, i_size_read(inode),
2133 					alloc_start);
2134 		if (ret)
2135 			goto out;
2136 	}
2137 
2138 	locked_end = alloc_end - 1;
2139 	while (1) {
2140 		struct btrfs_ordered_extent *ordered;
2141 
2142 		/* the extent lock is ordered inside the running
2143 		 * transaction
2144 		 */
2145 		lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2146 				 locked_end, 0, &cached_state);
2147 		ordered = btrfs_lookup_first_ordered_extent(inode,
2148 							    alloc_end - 1);
2149 		if (ordered &&
2150 		    ordered->file_offset + ordered->len > alloc_start &&
2151 		    ordered->file_offset < alloc_end) {
2152 			btrfs_put_ordered_extent(ordered);
2153 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2154 					     alloc_start, locked_end,
2155 					     &cached_state, GFP_NOFS);
2156 			/*
2157 			 * we can't wait on the range with the transaction
2158 			 * running or with the extent lock held
2159 			 */
2160 			btrfs_wait_ordered_range(inode, alloc_start,
2161 						 alloc_end - alloc_start);
2162 		} else {
2163 			if (ordered)
2164 				btrfs_put_ordered_extent(ordered);
2165 			break;
2166 		}
2167 	}
2168 
2169 	cur_offset = alloc_start;
2170 	while (1) {
2171 		u64 actual_end;
2172 
2173 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2174 				      alloc_end - cur_offset, 0);
2175 		if (IS_ERR_OR_NULL(em)) {
2176 			if (!em)
2177 				ret = -ENOMEM;
2178 			else
2179 				ret = PTR_ERR(em);
2180 			break;
2181 		}
2182 		last_byte = min(extent_map_end(em), alloc_end);
2183 		actual_end = min_t(u64, extent_map_end(em), offset + len);
2184 		last_byte = ALIGN(last_byte, blocksize);
2185 
2186 		if (em->block_start == EXTENT_MAP_HOLE ||
2187 		    (cur_offset >= inode->i_size &&
2188 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2189 			ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
2190 							last_byte - cur_offset,
2191 							1 << inode->i_blkbits,
2192 							offset + len,
2193 							&alloc_hint);
2194 
2195 			if (ret < 0) {
2196 				free_extent_map(em);
2197 				break;
2198 			}
2199 		} else if (actual_end > inode->i_size &&
2200 			   !(mode & FALLOC_FL_KEEP_SIZE)) {
2201 			/*
2202 			 * We didn't need to allocate any more space, but we
2203 			 * still extended the size of the file so we need to
2204 			 * update i_size.
2205 			 */
2206 			inode->i_ctime = CURRENT_TIME;
2207 			i_size_write(inode, actual_end);
2208 			btrfs_ordered_update_i_size(inode, actual_end, NULL);
2209 		}
2210 		free_extent_map(em);
2211 
2212 		cur_offset = last_byte;
2213 		if (cur_offset >= alloc_end) {
2214 			ret = 0;
2215 			break;
2216 		}
2217 	}
2218 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2219 			     &cached_state, GFP_NOFS);
2220 out:
2221 	mutex_unlock(&inode->i_mutex);
2222 	/* Let go of our reservation. */
2223 	btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
2224 	return ret;
2225 }
2226 
2227 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2228 {
2229 	struct btrfs_root *root = BTRFS_I(inode)->root;
2230 	struct extent_map *em;
2231 	struct extent_state *cached_state = NULL;
2232 	u64 lockstart = *offset;
2233 	u64 lockend = i_size_read(inode);
2234 	u64 start = *offset;
2235 	u64 orig_start = *offset;
2236 	u64 len = i_size_read(inode);
2237 	u64 last_end = 0;
2238 	int ret = 0;
2239 
2240 	lockend = max_t(u64, root->sectorsize, lockend);
2241 	if (lockend <= lockstart)
2242 		lockend = lockstart + root->sectorsize;
2243 
2244 	len = lockend - lockstart + 1;
2245 
2246 	len = max_t(u64, len, root->sectorsize);
2247 	if (inode->i_size == 0)
2248 		return -ENXIO;
2249 
2250 	lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
2251 			 &cached_state);
2252 
2253 	/*
2254 	 * Delalloc is such a pain.  If we have a hole and we have pending
2255 	 * delalloc for a portion of the hole we will get back a hole that
2256 	 * exists for the entire range since it hasn't been actually written
2257 	 * yet.  So to take care of this case we need to look for an extent just
2258 	 * before the position we want in case there is outstanding delalloc
2259 	 * going on here.
2260 	 */
2261 	if (whence == SEEK_HOLE && start != 0) {
2262 		if (start <= root->sectorsize)
2263 			em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
2264 						     root->sectorsize, 0);
2265 		else
2266 			em = btrfs_get_extent_fiemap(inode, NULL, 0,
2267 						     start - root->sectorsize,
2268 						     root->sectorsize, 0);
2269 		if (IS_ERR(em)) {
2270 			ret = PTR_ERR(em);
2271 			goto out;
2272 		}
2273 		last_end = em->start + em->len;
2274 		if (em->block_start == EXTENT_MAP_DELALLOC)
2275 			last_end = min_t(u64, last_end, inode->i_size);
2276 		free_extent_map(em);
2277 	}
2278 
2279 	while (1) {
2280 		em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2281 		if (IS_ERR(em)) {
2282 			ret = PTR_ERR(em);
2283 			break;
2284 		}
2285 
2286 		if (em->block_start == EXTENT_MAP_HOLE) {
2287 			if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2288 				if (last_end <= orig_start) {
2289 					free_extent_map(em);
2290 					ret = -ENXIO;
2291 					break;
2292 				}
2293 			}
2294 
2295 			if (whence == SEEK_HOLE) {
2296 				*offset = start;
2297 				free_extent_map(em);
2298 				break;
2299 			}
2300 		} else {
2301 			if (whence == SEEK_DATA) {
2302 				if (em->block_start == EXTENT_MAP_DELALLOC) {
2303 					if (start >= inode->i_size) {
2304 						free_extent_map(em);
2305 						ret = -ENXIO;
2306 						break;
2307 					}
2308 				}
2309 
2310 				*offset = start;
2311 				free_extent_map(em);
2312 				break;
2313 			}
2314 		}
2315 
2316 		start = em->start + em->len;
2317 		last_end = em->start + em->len;
2318 
2319 		if (em->block_start == EXTENT_MAP_DELALLOC)
2320 			last_end = min_t(u64, last_end, inode->i_size);
2321 
2322 		if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2323 			free_extent_map(em);
2324 			ret = -ENXIO;
2325 			break;
2326 		}
2327 		free_extent_map(em);
2328 		cond_resched();
2329 	}
2330 	if (!ret)
2331 		*offset = min(*offset, inode->i_size);
2332 out:
2333 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2334 			     &cached_state, GFP_NOFS);
2335 	return ret;
2336 }
2337 
2338 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2339 {
2340 	struct inode *inode = file->f_mapping->host;
2341 	int ret;
2342 
2343 	mutex_lock(&inode->i_mutex);
2344 	switch (whence) {
2345 	case SEEK_END:
2346 	case SEEK_CUR:
2347 		offset = generic_file_llseek(file, offset, whence);
2348 		goto out;
2349 	case SEEK_DATA:
2350 	case SEEK_HOLE:
2351 		if (offset >= i_size_read(inode)) {
2352 			mutex_unlock(&inode->i_mutex);
2353 			return -ENXIO;
2354 		}
2355 
2356 		ret = find_desired_extent(inode, &offset, whence);
2357 		if (ret) {
2358 			mutex_unlock(&inode->i_mutex);
2359 			return ret;
2360 		}
2361 	}
2362 
2363 	if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
2364 		offset = -EINVAL;
2365 		goto out;
2366 	}
2367 	if (offset > inode->i_sb->s_maxbytes) {
2368 		offset = -EINVAL;
2369 		goto out;
2370 	}
2371 
2372 	/* Special lock needed here? */
2373 	if (offset != file->f_pos) {
2374 		file->f_pos = offset;
2375 		file->f_version = 0;
2376 	}
2377 out:
2378 	mutex_unlock(&inode->i_mutex);
2379 	return offset;
2380 }
2381 
2382 const struct file_operations btrfs_file_operations = {
2383 	.llseek		= btrfs_file_llseek,
2384 	.read		= do_sync_read,
2385 	.write		= do_sync_write,
2386 	.aio_read       = generic_file_aio_read,
2387 	.splice_read	= generic_file_splice_read,
2388 	.aio_write	= btrfs_file_aio_write,
2389 	.mmap		= btrfs_file_mmap,
2390 	.open		= generic_file_open,
2391 	.release	= btrfs_release_file,
2392 	.fsync		= btrfs_sync_file,
2393 	.fallocate	= btrfs_fallocate,
2394 	.unlocked_ioctl	= btrfs_ioctl,
2395 #ifdef CONFIG_COMPAT
2396 	.compat_ioctl	= btrfs_ioctl,
2397 #endif
2398 };
2399 
2400 void btrfs_auto_defrag_exit(void)
2401 {
2402 	if (btrfs_inode_defrag_cachep)
2403 		kmem_cache_destroy(btrfs_inode_defrag_cachep);
2404 }
2405 
2406 int btrfs_auto_defrag_init(void)
2407 {
2408 	btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2409 					sizeof(struct inode_defrag), 0,
2410 					SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2411 					NULL);
2412 	if (!btrfs_inode_defrag_cachep)
2413 		return -ENOMEM;
2414 
2415 	return 0;
2416 }
2417