xref: /linux/fs/btrfs/inode.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/aio.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
40 #include <linux/slab.h>
41 #include <linux/ratelimit.h>
42 #include <linux/mount.h>
43 #include <linux/btrfs.h>
44 #include <linux/blkdev.h>
45 #include <linux/posix_acl_xattr.h>
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
52 #include "xattr.h"
53 #include "tree-log.h"
54 #include "volumes.h"
55 #include "compression.h"
56 #include "locking.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
59 #include "backref.h"
60 #include "hash.h"
61 
62 struct btrfs_iget_args {
63 	u64 ino;
64 	struct btrfs_root *root;
65 };
66 
67 static const struct inode_operations btrfs_dir_inode_operations;
68 static const struct inode_operations btrfs_symlink_inode_operations;
69 static const struct inode_operations btrfs_dir_ro_inode_operations;
70 static const struct inode_operations btrfs_special_inode_operations;
71 static const struct inode_operations btrfs_file_inode_operations;
72 static const struct address_space_operations btrfs_aops;
73 static const struct address_space_operations btrfs_symlink_aops;
74 static const struct file_operations btrfs_dir_file_operations;
75 static struct extent_io_ops btrfs_extent_io_ops;
76 
77 static struct kmem_cache *btrfs_inode_cachep;
78 static struct kmem_cache *btrfs_delalloc_work_cachep;
79 struct kmem_cache *btrfs_trans_handle_cachep;
80 struct kmem_cache *btrfs_transaction_cachep;
81 struct kmem_cache *btrfs_path_cachep;
82 struct kmem_cache *btrfs_free_space_cachep;
83 
84 #define S_SHIFT 12
85 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
86 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
87 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
88 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
89 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
90 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
91 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
92 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
93 };
94 
95 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
96 static int btrfs_truncate(struct inode *inode);
97 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
98 static noinline int cow_file_range(struct inode *inode,
99 				   struct page *locked_page,
100 				   u64 start, u64 end, int *page_started,
101 				   unsigned long *nr_written, int unlock);
102 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
103 					   u64 len, u64 orig_start,
104 					   u64 block_start, u64 block_len,
105 					   u64 orig_block_len, u64 ram_bytes,
106 					   int type);
107 
108 static int btrfs_dirty_inode(struct inode *inode);
109 
110 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
111 				     struct inode *inode,  struct inode *dir,
112 				     const struct qstr *qstr)
113 {
114 	int err;
115 
116 	err = btrfs_init_acl(trans, inode, dir);
117 	if (!err)
118 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
119 	return err;
120 }
121 
122 /*
123  * this does all the hard work for inserting an inline extent into
124  * the btree.  The caller should have done a btrfs_drop_extents so that
125  * no overlapping inline items exist in the btree
126  */
127 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
128 				struct btrfs_root *root, struct inode *inode,
129 				u64 start, size_t size, size_t compressed_size,
130 				int compress_type,
131 				struct page **compressed_pages)
132 {
133 	struct btrfs_key key;
134 	struct btrfs_path *path;
135 	struct extent_buffer *leaf;
136 	struct page *page = NULL;
137 	char *kaddr;
138 	unsigned long ptr;
139 	struct btrfs_file_extent_item *ei;
140 	int err = 0;
141 	int ret;
142 	size_t cur_size = size;
143 	size_t datasize;
144 	unsigned long offset;
145 
146 	if (compressed_size && compressed_pages)
147 		cur_size = compressed_size;
148 
149 	path = btrfs_alloc_path();
150 	if (!path)
151 		return -ENOMEM;
152 
153 	path->leave_spinning = 1;
154 
155 	key.objectid = btrfs_ino(inode);
156 	key.offset = start;
157 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
158 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
159 
160 	inode_add_bytes(inode, size);
161 	ret = btrfs_insert_empty_item(trans, root, path, &key,
162 				      datasize);
163 	if (ret) {
164 		err = ret;
165 		goto fail;
166 	}
167 	leaf = path->nodes[0];
168 	ei = btrfs_item_ptr(leaf, path->slots[0],
169 			    struct btrfs_file_extent_item);
170 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
171 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
172 	btrfs_set_file_extent_encryption(leaf, ei, 0);
173 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
174 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
175 	ptr = btrfs_file_extent_inline_start(ei);
176 
177 	if (compress_type != BTRFS_COMPRESS_NONE) {
178 		struct page *cpage;
179 		int i = 0;
180 		while (compressed_size > 0) {
181 			cpage = compressed_pages[i];
182 			cur_size = min_t(unsigned long, compressed_size,
183 				       PAGE_CACHE_SIZE);
184 
185 			kaddr = kmap_atomic(cpage);
186 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
187 			kunmap_atomic(kaddr);
188 
189 			i++;
190 			ptr += cur_size;
191 			compressed_size -= cur_size;
192 		}
193 		btrfs_set_file_extent_compression(leaf, ei,
194 						  compress_type);
195 	} else {
196 		page = find_get_page(inode->i_mapping,
197 				     start >> PAGE_CACHE_SHIFT);
198 		btrfs_set_file_extent_compression(leaf, ei, 0);
199 		kaddr = kmap_atomic(page);
200 		offset = start & (PAGE_CACHE_SIZE - 1);
201 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
202 		kunmap_atomic(kaddr);
203 		page_cache_release(page);
204 	}
205 	btrfs_mark_buffer_dirty(leaf);
206 	btrfs_free_path(path);
207 
208 	/*
209 	 * we're an inline extent, so nobody can
210 	 * extend the file past i_size without locking
211 	 * a page we already have locked.
212 	 *
213 	 * We must do any isize and inode updates
214 	 * before we unlock the pages.  Otherwise we
215 	 * could end up racing with unlink.
216 	 */
217 	BTRFS_I(inode)->disk_i_size = inode->i_size;
218 	ret = btrfs_update_inode(trans, root, inode);
219 
220 	return ret;
221 fail:
222 	btrfs_free_path(path);
223 	return err;
224 }
225 
226 
227 /*
228  * conditionally insert an inline extent into the file.  This
229  * does the checks required to make sure the data is small enough
230  * to fit as an inline extent.
231  */
232 static noinline int cow_file_range_inline(struct btrfs_root *root,
233 					  struct inode *inode, u64 start,
234 					  u64 end, size_t compressed_size,
235 					  int compress_type,
236 					  struct page **compressed_pages)
237 {
238 	struct btrfs_trans_handle *trans;
239 	u64 isize = i_size_read(inode);
240 	u64 actual_end = min(end + 1, isize);
241 	u64 inline_len = actual_end - start;
242 	u64 aligned_end = ALIGN(end, root->sectorsize);
243 	u64 data_len = inline_len;
244 	int ret;
245 
246 	if (compressed_size)
247 		data_len = compressed_size;
248 
249 	if (start > 0 ||
250 	    actual_end >= PAGE_CACHE_SIZE ||
251 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
252 	    (!compressed_size &&
253 	    (actual_end & (root->sectorsize - 1)) == 0) ||
254 	    end + 1 < isize ||
255 	    data_len > root->fs_info->max_inline) {
256 		return 1;
257 	}
258 
259 	trans = btrfs_join_transaction(root);
260 	if (IS_ERR(trans))
261 		return PTR_ERR(trans);
262 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
263 
264 	ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
265 	if (ret) {
266 		btrfs_abort_transaction(trans, root, ret);
267 		goto out;
268 	}
269 
270 	if (isize > actual_end)
271 		inline_len = min_t(u64, isize, actual_end);
272 	ret = insert_inline_extent(trans, root, inode, start,
273 				   inline_len, compressed_size,
274 				   compress_type, compressed_pages);
275 	if (ret && ret != -ENOSPC) {
276 		btrfs_abort_transaction(trans, root, ret);
277 		goto out;
278 	} else if (ret == -ENOSPC) {
279 		ret = 1;
280 		goto out;
281 	}
282 
283 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
284 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
285 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
286 out:
287 	btrfs_end_transaction(trans, root);
288 	return ret;
289 }
290 
291 struct async_extent {
292 	u64 start;
293 	u64 ram_size;
294 	u64 compressed_size;
295 	struct page **pages;
296 	unsigned long nr_pages;
297 	int compress_type;
298 	struct list_head list;
299 };
300 
301 struct async_cow {
302 	struct inode *inode;
303 	struct btrfs_root *root;
304 	struct page *locked_page;
305 	u64 start;
306 	u64 end;
307 	struct list_head extents;
308 	struct btrfs_work work;
309 };
310 
311 static noinline int add_async_extent(struct async_cow *cow,
312 				     u64 start, u64 ram_size,
313 				     u64 compressed_size,
314 				     struct page **pages,
315 				     unsigned long nr_pages,
316 				     int compress_type)
317 {
318 	struct async_extent *async_extent;
319 
320 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
321 	BUG_ON(!async_extent); /* -ENOMEM */
322 	async_extent->start = start;
323 	async_extent->ram_size = ram_size;
324 	async_extent->compressed_size = compressed_size;
325 	async_extent->pages = pages;
326 	async_extent->nr_pages = nr_pages;
327 	async_extent->compress_type = compress_type;
328 	list_add_tail(&async_extent->list, &cow->extents);
329 	return 0;
330 }
331 
332 /*
333  * we create compressed extents in two phases.  The first
334  * phase compresses a range of pages that have already been
335  * locked (both pages and state bits are locked).
336  *
337  * This is done inside an ordered work queue, and the compression
338  * is spread across many cpus.  The actual IO submission is step
339  * two, and the ordered work queue takes care of making sure that
340  * happens in the same order things were put onto the queue by
341  * writepages and friends.
342  *
343  * If this code finds it can't get good compression, it puts an
344  * entry onto the work queue to write the uncompressed bytes.  This
345  * makes sure that both compressed inodes and uncompressed inodes
346  * are written in the same order that the flusher thread sent them
347  * down.
348  */
349 static noinline int compress_file_range(struct inode *inode,
350 					struct page *locked_page,
351 					u64 start, u64 end,
352 					struct async_cow *async_cow,
353 					int *num_added)
354 {
355 	struct btrfs_root *root = BTRFS_I(inode)->root;
356 	u64 num_bytes;
357 	u64 blocksize = root->sectorsize;
358 	u64 actual_end;
359 	u64 isize = i_size_read(inode);
360 	int ret = 0;
361 	struct page **pages = NULL;
362 	unsigned long nr_pages;
363 	unsigned long nr_pages_ret = 0;
364 	unsigned long total_compressed = 0;
365 	unsigned long total_in = 0;
366 	unsigned long max_compressed = 128 * 1024;
367 	unsigned long max_uncompressed = 128 * 1024;
368 	int i;
369 	int will_compress;
370 	int compress_type = root->fs_info->compress_type;
371 	int redirty = 0;
372 
373 	/* if this is a small write inside eof, kick off a defrag */
374 	if ((end - start + 1) < 16 * 1024 &&
375 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
376 		btrfs_add_inode_defrag(NULL, inode);
377 
378 	actual_end = min_t(u64, isize, end + 1);
379 again:
380 	will_compress = 0;
381 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
382 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
383 
384 	/*
385 	 * we don't want to send crud past the end of i_size through
386 	 * compression, that's just a waste of CPU time.  So, if the
387 	 * end of the file is before the start of our current
388 	 * requested range of bytes, we bail out to the uncompressed
389 	 * cleanup code that can deal with all of this.
390 	 *
391 	 * It isn't really the fastest way to fix things, but this is a
392 	 * very uncommon corner.
393 	 */
394 	if (actual_end <= start)
395 		goto cleanup_and_bail_uncompressed;
396 
397 	total_compressed = actual_end - start;
398 
399 	/* we want to make sure that amount of ram required to uncompress
400 	 * an extent is reasonable, so we limit the total size in ram
401 	 * of a compressed extent to 128k.  This is a crucial number
402 	 * because it also controls how easily we can spread reads across
403 	 * cpus for decompression.
404 	 *
405 	 * We also want to make sure the amount of IO required to do
406 	 * a random read is reasonably small, so we limit the size of
407 	 * a compressed extent to 128k.
408 	 */
409 	total_compressed = min(total_compressed, max_uncompressed);
410 	num_bytes = ALIGN(end - start + 1, blocksize);
411 	num_bytes = max(blocksize,  num_bytes);
412 	total_in = 0;
413 	ret = 0;
414 
415 	/*
416 	 * we do compression for mount -o compress and when the
417 	 * inode has not been flagged as nocompress.  This flag can
418 	 * change at any time if we discover bad compression ratios.
419 	 */
420 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
421 	    (btrfs_test_opt(root, COMPRESS) ||
422 	     (BTRFS_I(inode)->force_compress) ||
423 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
424 		WARN_ON(pages);
425 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
426 		if (!pages) {
427 			/* just bail out to the uncompressed code */
428 			goto cont;
429 		}
430 
431 		if (BTRFS_I(inode)->force_compress)
432 			compress_type = BTRFS_I(inode)->force_compress;
433 
434 		/*
435 		 * we need to call clear_page_dirty_for_io on each
436 		 * page in the range.  Otherwise applications with the file
437 		 * mmap'd can wander in and change the page contents while
438 		 * we are compressing them.
439 		 *
440 		 * If the compression fails for any reason, we set the pages
441 		 * dirty again later on.
442 		 */
443 		extent_range_clear_dirty_for_io(inode, start, end);
444 		redirty = 1;
445 		ret = btrfs_compress_pages(compress_type,
446 					   inode->i_mapping, start,
447 					   total_compressed, pages,
448 					   nr_pages, &nr_pages_ret,
449 					   &total_in,
450 					   &total_compressed,
451 					   max_compressed);
452 
453 		if (!ret) {
454 			unsigned long offset = total_compressed &
455 				(PAGE_CACHE_SIZE - 1);
456 			struct page *page = pages[nr_pages_ret - 1];
457 			char *kaddr;
458 
459 			/* zero the tail end of the last page, we might be
460 			 * sending it down to disk
461 			 */
462 			if (offset) {
463 				kaddr = kmap_atomic(page);
464 				memset(kaddr + offset, 0,
465 				       PAGE_CACHE_SIZE - offset);
466 				kunmap_atomic(kaddr);
467 			}
468 			will_compress = 1;
469 		}
470 	}
471 cont:
472 	if (start == 0) {
473 		/* lets try to make an inline extent */
474 		if (ret || total_in < (actual_end - start)) {
475 			/* we didn't compress the entire range, try
476 			 * to make an uncompressed inline extent.
477 			 */
478 			ret = cow_file_range_inline(root, inode, start, end,
479 						    0, 0, NULL);
480 		} else {
481 			/* try making a compressed inline extent */
482 			ret = cow_file_range_inline(root, inode, start, end,
483 						    total_compressed,
484 						    compress_type, pages);
485 		}
486 		if (ret <= 0) {
487 			unsigned long clear_flags = EXTENT_DELALLOC |
488 				EXTENT_DEFRAG;
489 			clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
490 
491 			/*
492 			 * inline extent creation worked or returned error,
493 			 * we don't need to create any more async work items.
494 			 * Unlock and free up our temp pages.
495 			 */
496 			extent_clear_unlock_delalloc(inode, start, end, NULL,
497 						     clear_flags, PAGE_UNLOCK |
498 						     PAGE_CLEAR_DIRTY |
499 						     PAGE_SET_WRITEBACK |
500 						     PAGE_END_WRITEBACK);
501 			goto free_pages_out;
502 		}
503 	}
504 
505 	if (will_compress) {
506 		/*
507 		 * we aren't doing an inline extent round the compressed size
508 		 * up to a block size boundary so the allocator does sane
509 		 * things
510 		 */
511 		total_compressed = ALIGN(total_compressed, blocksize);
512 
513 		/*
514 		 * one last check to make sure the compression is really a
515 		 * win, compare the page count read with the blocks on disk
516 		 */
517 		total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
518 		if (total_compressed >= total_in) {
519 			will_compress = 0;
520 		} else {
521 			num_bytes = total_in;
522 		}
523 	}
524 	if (!will_compress && pages) {
525 		/*
526 		 * the compression code ran but failed to make things smaller,
527 		 * free any pages it allocated and our page pointer array
528 		 */
529 		for (i = 0; i < nr_pages_ret; i++) {
530 			WARN_ON(pages[i]->mapping);
531 			page_cache_release(pages[i]);
532 		}
533 		kfree(pages);
534 		pages = NULL;
535 		total_compressed = 0;
536 		nr_pages_ret = 0;
537 
538 		/* flag the file so we don't compress in the future */
539 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
540 		    !(BTRFS_I(inode)->force_compress)) {
541 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
542 		}
543 	}
544 	if (will_compress) {
545 		*num_added += 1;
546 
547 		/* the async work queues will take care of doing actual
548 		 * allocation on disk for these compressed pages,
549 		 * and will submit them to the elevator.
550 		 */
551 		add_async_extent(async_cow, start, num_bytes,
552 				 total_compressed, pages, nr_pages_ret,
553 				 compress_type);
554 
555 		if (start + num_bytes < end) {
556 			start += num_bytes;
557 			pages = NULL;
558 			cond_resched();
559 			goto again;
560 		}
561 	} else {
562 cleanup_and_bail_uncompressed:
563 		/*
564 		 * No compression, but we still need to write the pages in
565 		 * the file we've been given so far.  redirty the locked
566 		 * page if it corresponds to our extent and set things up
567 		 * for the async work queue to run cow_file_range to do
568 		 * the normal delalloc dance
569 		 */
570 		if (page_offset(locked_page) >= start &&
571 		    page_offset(locked_page) <= end) {
572 			__set_page_dirty_nobuffers(locked_page);
573 			/* unlocked later on in the async handlers */
574 		}
575 		if (redirty)
576 			extent_range_redirty_for_io(inode, start, end);
577 		add_async_extent(async_cow, start, end - start + 1,
578 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
579 		*num_added += 1;
580 	}
581 
582 out:
583 	return ret;
584 
585 free_pages_out:
586 	for (i = 0; i < nr_pages_ret; i++) {
587 		WARN_ON(pages[i]->mapping);
588 		page_cache_release(pages[i]);
589 	}
590 	kfree(pages);
591 
592 	goto out;
593 }
594 
595 /*
596  * phase two of compressed writeback.  This is the ordered portion
597  * of the code, which only gets called in the order the work was
598  * queued.  We walk all the async extents created by compress_file_range
599  * and send them down to the disk.
600  */
601 static noinline int submit_compressed_extents(struct inode *inode,
602 					      struct async_cow *async_cow)
603 {
604 	struct async_extent *async_extent;
605 	u64 alloc_hint = 0;
606 	struct btrfs_key ins;
607 	struct extent_map *em;
608 	struct btrfs_root *root = BTRFS_I(inode)->root;
609 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
610 	struct extent_io_tree *io_tree;
611 	int ret = 0;
612 
613 	if (list_empty(&async_cow->extents))
614 		return 0;
615 
616 again:
617 	while (!list_empty(&async_cow->extents)) {
618 		async_extent = list_entry(async_cow->extents.next,
619 					  struct async_extent, list);
620 		list_del(&async_extent->list);
621 
622 		io_tree = &BTRFS_I(inode)->io_tree;
623 
624 retry:
625 		/* did the compression code fall back to uncompressed IO? */
626 		if (!async_extent->pages) {
627 			int page_started = 0;
628 			unsigned long nr_written = 0;
629 
630 			lock_extent(io_tree, async_extent->start,
631 					 async_extent->start +
632 					 async_extent->ram_size - 1);
633 
634 			/* allocate blocks */
635 			ret = cow_file_range(inode, async_cow->locked_page,
636 					     async_extent->start,
637 					     async_extent->start +
638 					     async_extent->ram_size - 1,
639 					     &page_started, &nr_written, 0);
640 
641 			/* JDM XXX */
642 
643 			/*
644 			 * if page_started, cow_file_range inserted an
645 			 * inline extent and took care of all the unlocking
646 			 * and IO for us.  Otherwise, we need to submit
647 			 * all those pages down to the drive.
648 			 */
649 			if (!page_started && !ret)
650 				extent_write_locked_range(io_tree,
651 						  inode, async_extent->start,
652 						  async_extent->start +
653 						  async_extent->ram_size - 1,
654 						  btrfs_get_extent,
655 						  WB_SYNC_ALL);
656 			else if (ret)
657 				unlock_page(async_cow->locked_page);
658 			kfree(async_extent);
659 			cond_resched();
660 			continue;
661 		}
662 
663 		lock_extent(io_tree, async_extent->start,
664 			    async_extent->start + async_extent->ram_size - 1);
665 
666 		ret = btrfs_reserve_extent(root,
667 					   async_extent->compressed_size,
668 					   async_extent->compressed_size,
669 					   0, alloc_hint, &ins, 1);
670 		if (ret) {
671 			int i;
672 
673 			for (i = 0; i < async_extent->nr_pages; i++) {
674 				WARN_ON(async_extent->pages[i]->mapping);
675 				page_cache_release(async_extent->pages[i]);
676 			}
677 			kfree(async_extent->pages);
678 			async_extent->nr_pages = 0;
679 			async_extent->pages = NULL;
680 
681 			if (ret == -ENOSPC) {
682 				unlock_extent(io_tree, async_extent->start,
683 					      async_extent->start +
684 					      async_extent->ram_size - 1);
685 				goto retry;
686 			}
687 			goto out_free;
688 		}
689 
690 		/*
691 		 * here we're doing allocation and writeback of the
692 		 * compressed pages
693 		 */
694 		btrfs_drop_extent_cache(inode, async_extent->start,
695 					async_extent->start +
696 					async_extent->ram_size - 1, 0);
697 
698 		em = alloc_extent_map();
699 		if (!em) {
700 			ret = -ENOMEM;
701 			goto out_free_reserve;
702 		}
703 		em->start = async_extent->start;
704 		em->len = async_extent->ram_size;
705 		em->orig_start = em->start;
706 		em->mod_start = em->start;
707 		em->mod_len = em->len;
708 
709 		em->block_start = ins.objectid;
710 		em->block_len = ins.offset;
711 		em->orig_block_len = ins.offset;
712 		em->ram_bytes = async_extent->ram_size;
713 		em->bdev = root->fs_info->fs_devices->latest_bdev;
714 		em->compress_type = async_extent->compress_type;
715 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
716 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
717 		em->generation = -1;
718 
719 		while (1) {
720 			write_lock(&em_tree->lock);
721 			ret = add_extent_mapping(em_tree, em, 1);
722 			write_unlock(&em_tree->lock);
723 			if (ret != -EEXIST) {
724 				free_extent_map(em);
725 				break;
726 			}
727 			btrfs_drop_extent_cache(inode, async_extent->start,
728 						async_extent->start +
729 						async_extent->ram_size - 1, 0);
730 		}
731 
732 		if (ret)
733 			goto out_free_reserve;
734 
735 		ret = btrfs_add_ordered_extent_compress(inode,
736 						async_extent->start,
737 						ins.objectid,
738 						async_extent->ram_size,
739 						ins.offset,
740 						BTRFS_ORDERED_COMPRESSED,
741 						async_extent->compress_type);
742 		if (ret)
743 			goto out_free_reserve;
744 
745 		/*
746 		 * clear dirty, set writeback and unlock the pages.
747 		 */
748 		extent_clear_unlock_delalloc(inode, async_extent->start,
749 				async_extent->start +
750 				async_extent->ram_size - 1,
751 				NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
752 				PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
753 				PAGE_SET_WRITEBACK);
754 		ret = btrfs_submit_compressed_write(inode,
755 				    async_extent->start,
756 				    async_extent->ram_size,
757 				    ins.objectid,
758 				    ins.offset, async_extent->pages,
759 				    async_extent->nr_pages);
760 		alloc_hint = ins.objectid + ins.offset;
761 		kfree(async_extent);
762 		if (ret)
763 			goto out;
764 		cond_resched();
765 	}
766 	ret = 0;
767 out:
768 	return ret;
769 out_free_reserve:
770 	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
771 out_free:
772 	extent_clear_unlock_delalloc(inode, async_extent->start,
773 				     async_extent->start +
774 				     async_extent->ram_size - 1,
775 				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
776 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
777 				     PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
778 				     PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
779 	kfree(async_extent);
780 	goto again;
781 }
782 
783 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
784 				      u64 num_bytes)
785 {
786 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
787 	struct extent_map *em;
788 	u64 alloc_hint = 0;
789 
790 	read_lock(&em_tree->lock);
791 	em = search_extent_mapping(em_tree, start, num_bytes);
792 	if (em) {
793 		/*
794 		 * if block start isn't an actual block number then find the
795 		 * first block in this inode and use that as a hint.  If that
796 		 * block is also bogus then just don't worry about it.
797 		 */
798 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
799 			free_extent_map(em);
800 			em = search_extent_mapping(em_tree, 0, 0);
801 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
802 				alloc_hint = em->block_start;
803 			if (em)
804 				free_extent_map(em);
805 		} else {
806 			alloc_hint = em->block_start;
807 			free_extent_map(em);
808 		}
809 	}
810 	read_unlock(&em_tree->lock);
811 
812 	return alloc_hint;
813 }
814 
815 /*
816  * when extent_io.c finds a delayed allocation range in the file,
817  * the call backs end up in this code.  The basic idea is to
818  * allocate extents on disk for the range, and create ordered data structs
819  * in ram to track those extents.
820  *
821  * locked_page is the page that writepage had locked already.  We use
822  * it to make sure we don't do extra locks or unlocks.
823  *
824  * *page_started is set to one if we unlock locked_page and do everything
825  * required to start IO on it.  It may be clean and already done with
826  * IO when we return.
827  */
828 static noinline int cow_file_range(struct inode *inode,
829 				   struct page *locked_page,
830 				   u64 start, u64 end, int *page_started,
831 				   unsigned long *nr_written,
832 				   int unlock)
833 {
834 	struct btrfs_root *root = BTRFS_I(inode)->root;
835 	u64 alloc_hint = 0;
836 	u64 num_bytes;
837 	unsigned long ram_size;
838 	u64 disk_num_bytes;
839 	u64 cur_alloc_size;
840 	u64 blocksize = root->sectorsize;
841 	struct btrfs_key ins;
842 	struct extent_map *em;
843 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
844 	int ret = 0;
845 
846 	if (btrfs_is_free_space_inode(inode)) {
847 		WARN_ON_ONCE(1);
848 		return -EINVAL;
849 	}
850 
851 	num_bytes = ALIGN(end - start + 1, blocksize);
852 	num_bytes = max(blocksize,  num_bytes);
853 	disk_num_bytes = num_bytes;
854 
855 	/* if this is a small write inside eof, kick off defrag */
856 	if (num_bytes < 64 * 1024 &&
857 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
858 		btrfs_add_inode_defrag(NULL, inode);
859 
860 	if (start == 0) {
861 		/* lets try to make an inline extent */
862 		ret = cow_file_range_inline(root, inode, start, end, 0, 0,
863 					    NULL);
864 		if (ret == 0) {
865 			extent_clear_unlock_delalloc(inode, start, end, NULL,
866 				     EXTENT_LOCKED | EXTENT_DELALLOC |
867 				     EXTENT_DEFRAG, PAGE_UNLOCK |
868 				     PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
869 				     PAGE_END_WRITEBACK);
870 
871 			*nr_written = *nr_written +
872 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
873 			*page_started = 1;
874 			goto out;
875 		} else if (ret < 0) {
876 			goto out_unlock;
877 		}
878 	}
879 
880 	BUG_ON(disk_num_bytes >
881 	       btrfs_super_total_bytes(root->fs_info->super_copy));
882 
883 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
884 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
885 
886 	while (disk_num_bytes > 0) {
887 		unsigned long op;
888 
889 		cur_alloc_size = disk_num_bytes;
890 		ret = btrfs_reserve_extent(root, cur_alloc_size,
891 					   root->sectorsize, 0, alloc_hint,
892 					   &ins, 1);
893 		if (ret < 0)
894 			goto out_unlock;
895 
896 		em = alloc_extent_map();
897 		if (!em) {
898 			ret = -ENOMEM;
899 			goto out_reserve;
900 		}
901 		em->start = start;
902 		em->orig_start = em->start;
903 		ram_size = ins.offset;
904 		em->len = ins.offset;
905 		em->mod_start = em->start;
906 		em->mod_len = em->len;
907 
908 		em->block_start = ins.objectid;
909 		em->block_len = ins.offset;
910 		em->orig_block_len = ins.offset;
911 		em->ram_bytes = ram_size;
912 		em->bdev = root->fs_info->fs_devices->latest_bdev;
913 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
914 		em->generation = -1;
915 
916 		while (1) {
917 			write_lock(&em_tree->lock);
918 			ret = add_extent_mapping(em_tree, em, 1);
919 			write_unlock(&em_tree->lock);
920 			if (ret != -EEXIST) {
921 				free_extent_map(em);
922 				break;
923 			}
924 			btrfs_drop_extent_cache(inode, start,
925 						start + ram_size - 1, 0);
926 		}
927 		if (ret)
928 			goto out_reserve;
929 
930 		cur_alloc_size = ins.offset;
931 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
932 					       ram_size, cur_alloc_size, 0);
933 		if (ret)
934 			goto out_reserve;
935 
936 		if (root->root_key.objectid ==
937 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
938 			ret = btrfs_reloc_clone_csums(inode, start,
939 						      cur_alloc_size);
940 			if (ret)
941 				goto out_reserve;
942 		}
943 
944 		if (disk_num_bytes < cur_alloc_size)
945 			break;
946 
947 		/* we're not doing compressed IO, don't unlock the first
948 		 * page (which the caller expects to stay locked), don't
949 		 * clear any dirty bits and don't set any writeback bits
950 		 *
951 		 * Do set the Private2 bit so we know this page was properly
952 		 * setup for writepage
953 		 */
954 		op = unlock ? PAGE_UNLOCK : 0;
955 		op |= PAGE_SET_PRIVATE2;
956 
957 		extent_clear_unlock_delalloc(inode, start,
958 					     start + ram_size - 1, locked_page,
959 					     EXTENT_LOCKED | EXTENT_DELALLOC,
960 					     op);
961 		disk_num_bytes -= cur_alloc_size;
962 		num_bytes -= cur_alloc_size;
963 		alloc_hint = ins.objectid + ins.offset;
964 		start += cur_alloc_size;
965 	}
966 out:
967 	return ret;
968 
969 out_reserve:
970 	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
971 out_unlock:
972 	extent_clear_unlock_delalloc(inode, start, end, locked_page,
973 				     EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
974 				     EXTENT_DELALLOC | EXTENT_DEFRAG,
975 				     PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
976 				     PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
977 	goto out;
978 }
979 
980 /*
981  * work queue call back to started compression on a file and pages
982  */
983 static noinline void async_cow_start(struct btrfs_work *work)
984 {
985 	struct async_cow *async_cow;
986 	int num_added = 0;
987 	async_cow = container_of(work, struct async_cow, work);
988 
989 	compress_file_range(async_cow->inode, async_cow->locked_page,
990 			    async_cow->start, async_cow->end, async_cow,
991 			    &num_added);
992 	if (num_added == 0) {
993 		btrfs_add_delayed_iput(async_cow->inode);
994 		async_cow->inode = NULL;
995 	}
996 }
997 
998 /*
999  * work queue call back to submit previously compressed pages
1000  */
1001 static noinline void async_cow_submit(struct btrfs_work *work)
1002 {
1003 	struct async_cow *async_cow;
1004 	struct btrfs_root *root;
1005 	unsigned long nr_pages;
1006 
1007 	async_cow = container_of(work, struct async_cow, work);
1008 
1009 	root = async_cow->root;
1010 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1011 		PAGE_CACHE_SHIFT;
1012 
1013 	if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1014 	    5 * 1024 * 1024 &&
1015 	    waitqueue_active(&root->fs_info->async_submit_wait))
1016 		wake_up(&root->fs_info->async_submit_wait);
1017 
1018 	if (async_cow->inode)
1019 		submit_compressed_extents(async_cow->inode, async_cow);
1020 }
1021 
1022 static noinline void async_cow_free(struct btrfs_work *work)
1023 {
1024 	struct async_cow *async_cow;
1025 	async_cow = container_of(work, struct async_cow, work);
1026 	if (async_cow->inode)
1027 		btrfs_add_delayed_iput(async_cow->inode);
1028 	kfree(async_cow);
1029 }
1030 
1031 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1032 				u64 start, u64 end, int *page_started,
1033 				unsigned long *nr_written)
1034 {
1035 	struct async_cow *async_cow;
1036 	struct btrfs_root *root = BTRFS_I(inode)->root;
1037 	unsigned long nr_pages;
1038 	u64 cur_end;
1039 	int limit = 10 * 1024 * 1024;
1040 
1041 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1042 			 1, 0, NULL, GFP_NOFS);
1043 	while (start < end) {
1044 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1045 		BUG_ON(!async_cow); /* -ENOMEM */
1046 		async_cow->inode = igrab(inode);
1047 		async_cow->root = root;
1048 		async_cow->locked_page = locked_page;
1049 		async_cow->start = start;
1050 
1051 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
1052 			cur_end = end;
1053 		else
1054 			cur_end = min(end, start + 512 * 1024 - 1);
1055 
1056 		async_cow->end = cur_end;
1057 		INIT_LIST_HEAD(&async_cow->extents);
1058 
1059 		async_cow->work.func = async_cow_start;
1060 		async_cow->work.ordered_func = async_cow_submit;
1061 		async_cow->work.ordered_free = async_cow_free;
1062 		async_cow->work.flags = 0;
1063 
1064 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1065 			PAGE_CACHE_SHIFT;
1066 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1067 
1068 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
1069 				   &async_cow->work);
1070 
1071 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1072 			wait_event(root->fs_info->async_submit_wait,
1073 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
1074 			    limit));
1075 		}
1076 
1077 		while (atomic_read(&root->fs_info->async_submit_draining) &&
1078 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
1079 			wait_event(root->fs_info->async_submit_wait,
1080 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
1081 			   0));
1082 		}
1083 
1084 		*nr_written += nr_pages;
1085 		start = cur_end + 1;
1086 	}
1087 	*page_started = 1;
1088 	return 0;
1089 }
1090 
1091 static noinline int csum_exist_in_range(struct btrfs_root *root,
1092 					u64 bytenr, u64 num_bytes)
1093 {
1094 	int ret;
1095 	struct btrfs_ordered_sum *sums;
1096 	LIST_HEAD(list);
1097 
1098 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1099 				       bytenr + num_bytes - 1, &list, 0);
1100 	if (ret == 0 && list_empty(&list))
1101 		return 0;
1102 
1103 	while (!list_empty(&list)) {
1104 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1105 		list_del(&sums->list);
1106 		kfree(sums);
1107 	}
1108 	return 1;
1109 }
1110 
1111 /*
1112  * when nowcow writeback call back.  This checks for snapshots or COW copies
1113  * of the extents that exist in the file, and COWs the file as required.
1114  *
1115  * If no cow copies or snapshots exist, we write directly to the existing
1116  * blocks on disk
1117  */
1118 static noinline int run_delalloc_nocow(struct inode *inode,
1119 				       struct page *locked_page,
1120 			      u64 start, u64 end, int *page_started, int force,
1121 			      unsigned long *nr_written)
1122 {
1123 	struct btrfs_root *root = BTRFS_I(inode)->root;
1124 	struct btrfs_trans_handle *trans;
1125 	struct extent_buffer *leaf;
1126 	struct btrfs_path *path;
1127 	struct btrfs_file_extent_item *fi;
1128 	struct btrfs_key found_key;
1129 	u64 cow_start;
1130 	u64 cur_offset;
1131 	u64 extent_end;
1132 	u64 extent_offset;
1133 	u64 disk_bytenr;
1134 	u64 num_bytes;
1135 	u64 disk_num_bytes;
1136 	u64 ram_bytes;
1137 	int extent_type;
1138 	int ret, err;
1139 	int type;
1140 	int nocow;
1141 	int check_prev = 1;
1142 	bool nolock;
1143 	u64 ino = btrfs_ino(inode);
1144 
1145 	path = btrfs_alloc_path();
1146 	if (!path) {
1147 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1148 					     EXTENT_LOCKED | EXTENT_DELALLOC |
1149 					     EXTENT_DO_ACCOUNTING |
1150 					     EXTENT_DEFRAG, PAGE_UNLOCK |
1151 					     PAGE_CLEAR_DIRTY |
1152 					     PAGE_SET_WRITEBACK |
1153 					     PAGE_END_WRITEBACK);
1154 		return -ENOMEM;
1155 	}
1156 
1157 	nolock = btrfs_is_free_space_inode(inode);
1158 
1159 	if (nolock)
1160 		trans = btrfs_join_transaction_nolock(root);
1161 	else
1162 		trans = btrfs_join_transaction(root);
1163 
1164 	if (IS_ERR(trans)) {
1165 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1166 					     EXTENT_LOCKED | EXTENT_DELALLOC |
1167 					     EXTENT_DO_ACCOUNTING |
1168 					     EXTENT_DEFRAG, PAGE_UNLOCK |
1169 					     PAGE_CLEAR_DIRTY |
1170 					     PAGE_SET_WRITEBACK |
1171 					     PAGE_END_WRITEBACK);
1172 		btrfs_free_path(path);
1173 		return PTR_ERR(trans);
1174 	}
1175 
1176 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1177 
1178 	cow_start = (u64)-1;
1179 	cur_offset = start;
1180 	while (1) {
1181 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1182 					       cur_offset, 0);
1183 		if (ret < 0)
1184 			goto error;
1185 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1186 			leaf = path->nodes[0];
1187 			btrfs_item_key_to_cpu(leaf, &found_key,
1188 					      path->slots[0] - 1);
1189 			if (found_key.objectid == ino &&
1190 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1191 				path->slots[0]--;
1192 		}
1193 		check_prev = 0;
1194 next_slot:
1195 		leaf = path->nodes[0];
1196 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1197 			ret = btrfs_next_leaf(root, path);
1198 			if (ret < 0)
1199 				goto error;
1200 			if (ret > 0)
1201 				break;
1202 			leaf = path->nodes[0];
1203 		}
1204 
1205 		nocow = 0;
1206 		disk_bytenr = 0;
1207 		num_bytes = 0;
1208 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1209 
1210 		if (found_key.objectid > ino ||
1211 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1212 		    found_key.offset > end)
1213 			break;
1214 
1215 		if (found_key.offset > cur_offset) {
1216 			extent_end = found_key.offset;
1217 			extent_type = 0;
1218 			goto out_check;
1219 		}
1220 
1221 		fi = btrfs_item_ptr(leaf, path->slots[0],
1222 				    struct btrfs_file_extent_item);
1223 		extent_type = btrfs_file_extent_type(leaf, fi);
1224 
1225 		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1226 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1227 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1228 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1229 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1230 			extent_end = found_key.offset +
1231 				btrfs_file_extent_num_bytes(leaf, fi);
1232 			disk_num_bytes =
1233 				btrfs_file_extent_disk_num_bytes(leaf, fi);
1234 			if (extent_end <= start) {
1235 				path->slots[0]++;
1236 				goto next_slot;
1237 			}
1238 			if (disk_bytenr == 0)
1239 				goto out_check;
1240 			if (btrfs_file_extent_compression(leaf, fi) ||
1241 			    btrfs_file_extent_encryption(leaf, fi) ||
1242 			    btrfs_file_extent_other_encoding(leaf, fi))
1243 				goto out_check;
1244 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1245 				goto out_check;
1246 			if (btrfs_extent_readonly(root, disk_bytenr))
1247 				goto out_check;
1248 			if (btrfs_cross_ref_exist(trans, root, ino,
1249 						  found_key.offset -
1250 						  extent_offset, disk_bytenr))
1251 				goto out_check;
1252 			disk_bytenr += extent_offset;
1253 			disk_bytenr += cur_offset - found_key.offset;
1254 			num_bytes = min(end + 1, extent_end) - cur_offset;
1255 			/*
1256 			 * force cow if csum exists in the range.
1257 			 * this ensure that csum for a given extent are
1258 			 * either valid or do not exist.
1259 			 */
1260 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1261 				goto out_check;
1262 			nocow = 1;
1263 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1264 			extent_end = found_key.offset +
1265 				btrfs_file_extent_inline_len(leaf, fi);
1266 			extent_end = ALIGN(extent_end, root->sectorsize);
1267 		} else {
1268 			BUG_ON(1);
1269 		}
1270 out_check:
1271 		if (extent_end <= start) {
1272 			path->slots[0]++;
1273 			goto next_slot;
1274 		}
1275 		if (!nocow) {
1276 			if (cow_start == (u64)-1)
1277 				cow_start = cur_offset;
1278 			cur_offset = extent_end;
1279 			if (cur_offset > end)
1280 				break;
1281 			path->slots[0]++;
1282 			goto next_slot;
1283 		}
1284 
1285 		btrfs_release_path(path);
1286 		if (cow_start != (u64)-1) {
1287 			ret = cow_file_range(inode, locked_page,
1288 					     cow_start, found_key.offset - 1,
1289 					     page_started, nr_written, 1);
1290 			if (ret)
1291 				goto error;
1292 			cow_start = (u64)-1;
1293 		}
1294 
1295 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1296 			struct extent_map *em;
1297 			struct extent_map_tree *em_tree;
1298 			em_tree = &BTRFS_I(inode)->extent_tree;
1299 			em = alloc_extent_map();
1300 			BUG_ON(!em); /* -ENOMEM */
1301 			em->start = cur_offset;
1302 			em->orig_start = found_key.offset - extent_offset;
1303 			em->len = num_bytes;
1304 			em->block_len = num_bytes;
1305 			em->block_start = disk_bytenr;
1306 			em->orig_block_len = disk_num_bytes;
1307 			em->ram_bytes = ram_bytes;
1308 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1309 			em->mod_start = em->start;
1310 			em->mod_len = em->len;
1311 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1312 			set_bit(EXTENT_FLAG_FILLING, &em->flags);
1313 			em->generation = -1;
1314 			while (1) {
1315 				write_lock(&em_tree->lock);
1316 				ret = add_extent_mapping(em_tree, em, 1);
1317 				write_unlock(&em_tree->lock);
1318 				if (ret != -EEXIST) {
1319 					free_extent_map(em);
1320 					break;
1321 				}
1322 				btrfs_drop_extent_cache(inode, em->start,
1323 						em->start + em->len - 1, 0);
1324 			}
1325 			type = BTRFS_ORDERED_PREALLOC;
1326 		} else {
1327 			type = BTRFS_ORDERED_NOCOW;
1328 		}
1329 
1330 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1331 					       num_bytes, num_bytes, type);
1332 		BUG_ON(ret); /* -ENOMEM */
1333 
1334 		if (root->root_key.objectid ==
1335 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1336 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1337 						      num_bytes);
1338 			if (ret)
1339 				goto error;
1340 		}
1341 
1342 		extent_clear_unlock_delalloc(inode, cur_offset,
1343 					     cur_offset + num_bytes - 1,
1344 					     locked_page, EXTENT_LOCKED |
1345 					     EXTENT_DELALLOC, PAGE_UNLOCK |
1346 					     PAGE_SET_PRIVATE2);
1347 		cur_offset = extent_end;
1348 		if (cur_offset > end)
1349 			break;
1350 	}
1351 	btrfs_release_path(path);
1352 
1353 	if (cur_offset <= end && cow_start == (u64)-1) {
1354 		cow_start = cur_offset;
1355 		cur_offset = end;
1356 	}
1357 
1358 	if (cow_start != (u64)-1) {
1359 		ret = cow_file_range(inode, locked_page, cow_start, end,
1360 				     page_started, nr_written, 1);
1361 		if (ret)
1362 			goto error;
1363 	}
1364 
1365 error:
1366 	err = btrfs_end_transaction(trans, root);
1367 	if (!ret)
1368 		ret = err;
1369 
1370 	if (ret && cur_offset < end)
1371 		extent_clear_unlock_delalloc(inode, cur_offset, end,
1372 					     locked_page, EXTENT_LOCKED |
1373 					     EXTENT_DELALLOC | EXTENT_DEFRAG |
1374 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1375 					     PAGE_CLEAR_DIRTY |
1376 					     PAGE_SET_WRITEBACK |
1377 					     PAGE_END_WRITEBACK);
1378 	btrfs_free_path(path);
1379 	return ret;
1380 }
1381 
1382 /*
1383  * extent_io.c call back to do delayed allocation processing
1384  */
1385 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1386 			      u64 start, u64 end, int *page_started,
1387 			      unsigned long *nr_written)
1388 {
1389 	int ret;
1390 	struct btrfs_root *root = BTRFS_I(inode)->root;
1391 
1392 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
1393 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1394 					 page_started, 1, nr_written);
1395 	} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
1396 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1397 					 page_started, 0, nr_written);
1398 	} else if (!btrfs_test_opt(root, COMPRESS) &&
1399 		   !(BTRFS_I(inode)->force_compress) &&
1400 		   !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
1401 		ret = cow_file_range(inode, locked_page, start, end,
1402 				      page_started, nr_written, 1);
1403 	} else {
1404 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1405 			&BTRFS_I(inode)->runtime_flags);
1406 		ret = cow_file_range_async(inode, locked_page, start, end,
1407 					   page_started, nr_written);
1408 	}
1409 	return ret;
1410 }
1411 
1412 static void btrfs_split_extent_hook(struct inode *inode,
1413 				    struct extent_state *orig, u64 split)
1414 {
1415 	/* not delalloc, ignore it */
1416 	if (!(orig->state & EXTENT_DELALLOC))
1417 		return;
1418 
1419 	spin_lock(&BTRFS_I(inode)->lock);
1420 	BTRFS_I(inode)->outstanding_extents++;
1421 	spin_unlock(&BTRFS_I(inode)->lock);
1422 }
1423 
1424 /*
1425  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1426  * extents so we can keep track of new extents that are just merged onto old
1427  * extents, such as when we are doing sequential writes, so we can properly
1428  * account for the metadata space we'll need.
1429  */
1430 static void btrfs_merge_extent_hook(struct inode *inode,
1431 				    struct extent_state *new,
1432 				    struct extent_state *other)
1433 {
1434 	/* not delalloc, ignore it */
1435 	if (!(other->state & EXTENT_DELALLOC))
1436 		return;
1437 
1438 	spin_lock(&BTRFS_I(inode)->lock);
1439 	BTRFS_I(inode)->outstanding_extents--;
1440 	spin_unlock(&BTRFS_I(inode)->lock);
1441 }
1442 
1443 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1444 				      struct inode *inode)
1445 {
1446 	spin_lock(&root->delalloc_lock);
1447 	if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1448 		list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1449 			      &root->delalloc_inodes);
1450 		set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1451 			&BTRFS_I(inode)->runtime_flags);
1452 		root->nr_delalloc_inodes++;
1453 		if (root->nr_delalloc_inodes == 1) {
1454 			spin_lock(&root->fs_info->delalloc_root_lock);
1455 			BUG_ON(!list_empty(&root->delalloc_root));
1456 			list_add_tail(&root->delalloc_root,
1457 				      &root->fs_info->delalloc_roots);
1458 			spin_unlock(&root->fs_info->delalloc_root_lock);
1459 		}
1460 	}
1461 	spin_unlock(&root->delalloc_lock);
1462 }
1463 
1464 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1465 				     struct inode *inode)
1466 {
1467 	spin_lock(&root->delalloc_lock);
1468 	if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1469 		list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1470 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1471 			  &BTRFS_I(inode)->runtime_flags);
1472 		root->nr_delalloc_inodes--;
1473 		if (!root->nr_delalloc_inodes) {
1474 			spin_lock(&root->fs_info->delalloc_root_lock);
1475 			BUG_ON(list_empty(&root->delalloc_root));
1476 			list_del_init(&root->delalloc_root);
1477 			spin_unlock(&root->fs_info->delalloc_root_lock);
1478 		}
1479 	}
1480 	spin_unlock(&root->delalloc_lock);
1481 }
1482 
1483 /*
1484  * extent_io.c set_bit_hook, used to track delayed allocation
1485  * bytes in this file, and to maintain the list of inodes that
1486  * have pending delalloc work to be done.
1487  */
1488 static void btrfs_set_bit_hook(struct inode *inode,
1489 			       struct extent_state *state, unsigned long *bits)
1490 {
1491 
1492 	/*
1493 	 * set_bit and clear bit hooks normally require _irqsave/restore
1494 	 * but in this case, we are only testing for the DELALLOC
1495 	 * bit, which is only set or cleared with irqs on
1496 	 */
1497 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1498 		struct btrfs_root *root = BTRFS_I(inode)->root;
1499 		u64 len = state->end + 1 - state->start;
1500 		bool do_list = !btrfs_is_free_space_inode(inode);
1501 
1502 		if (*bits & EXTENT_FIRST_DELALLOC) {
1503 			*bits &= ~EXTENT_FIRST_DELALLOC;
1504 		} else {
1505 			spin_lock(&BTRFS_I(inode)->lock);
1506 			BTRFS_I(inode)->outstanding_extents++;
1507 			spin_unlock(&BTRFS_I(inode)->lock);
1508 		}
1509 
1510 		__percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1511 				     root->fs_info->delalloc_batch);
1512 		spin_lock(&BTRFS_I(inode)->lock);
1513 		BTRFS_I(inode)->delalloc_bytes += len;
1514 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1515 					 &BTRFS_I(inode)->runtime_flags))
1516 			btrfs_add_delalloc_inodes(root, inode);
1517 		spin_unlock(&BTRFS_I(inode)->lock);
1518 	}
1519 }
1520 
1521 /*
1522  * extent_io.c clear_bit_hook, see set_bit_hook for why
1523  */
1524 static void btrfs_clear_bit_hook(struct inode *inode,
1525 				 struct extent_state *state,
1526 				 unsigned long *bits)
1527 {
1528 	/*
1529 	 * set_bit and clear bit hooks normally require _irqsave/restore
1530 	 * but in this case, we are only testing for the DELALLOC
1531 	 * bit, which is only set or cleared with irqs on
1532 	 */
1533 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1534 		struct btrfs_root *root = BTRFS_I(inode)->root;
1535 		u64 len = state->end + 1 - state->start;
1536 		bool do_list = !btrfs_is_free_space_inode(inode);
1537 
1538 		if (*bits & EXTENT_FIRST_DELALLOC) {
1539 			*bits &= ~EXTENT_FIRST_DELALLOC;
1540 		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1541 			spin_lock(&BTRFS_I(inode)->lock);
1542 			BTRFS_I(inode)->outstanding_extents--;
1543 			spin_unlock(&BTRFS_I(inode)->lock);
1544 		}
1545 
1546 		/*
1547 		 * We don't reserve metadata space for space cache inodes so we
1548 		 * don't need to call dellalloc_release_metadata if there is an
1549 		 * error.
1550 		 */
1551 		if (*bits & EXTENT_DO_ACCOUNTING &&
1552 		    root != root->fs_info->tree_root)
1553 			btrfs_delalloc_release_metadata(inode, len);
1554 
1555 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1556 		    && do_list && !(state->state & EXTENT_NORESERVE))
1557 			btrfs_free_reserved_data_space(inode, len);
1558 
1559 		__percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1560 				     root->fs_info->delalloc_batch);
1561 		spin_lock(&BTRFS_I(inode)->lock);
1562 		BTRFS_I(inode)->delalloc_bytes -= len;
1563 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1564 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1565 			     &BTRFS_I(inode)->runtime_flags))
1566 			btrfs_del_delalloc_inode(root, inode);
1567 		spin_unlock(&BTRFS_I(inode)->lock);
1568 	}
1569 }
1570 
1571 /*
1572  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1573  * we don't create bios that span stripes or chunks
1574  */
1575 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1576 			 size_t size, struct bio *bio,
1577 			 unsigned long bio_flags)
1578 {
1579 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1580 	u64 logical = (u64)bio->bi_sector << 9;
1581 	u64 length = 0;
1582 	u64 map_length;
1583 	int ret;
1584 
1585 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1586 		return 0;
1587 
1588 	length = bio->bi_size;
1589 	map_length = length;
1590 	ret = btrfs_map_block(root->fs_info, rw, logical,
1591 			      &map_length, NULL, 0);
1592 	/* Will always return 0 with map_multi == NULL */
1593 	BUG_ON(ret < 0);
1594 	if (map_length < length + size)
1595 		return 1;
1596 	return 0;
1597 }
1598 
1599 /*
1600  * in order to insert checksums into the metadata in large chunks,
1601  * we wait until bio submission time.   All the pages in the bio are
1602  * checksummed and sums are attached onto the ordered extent record.
1603  *
1604  * At IO completion time the cums attached on the ordered extent record
1605  * are inserted into the btree
1606  */
1607 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1608 				    struct bio *bio, int mirror_num,
1609 				    unsigned long bio_flags,
1610 				    u64 bio_offset)
1611 {
1612 	struct btrfs_root *root = BTRFS_I(inode)->root;
1613 	int ret = 0;
1614 
1615 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1616 	BUG_ON(ret); /* -ENOMEM */
1617 	return 0;
1618 }
1619 
1620 /*
1621  * in order to insert checksums into the metadata in large chunks,
1622  * we wait until bio submission time.   All the pages in the bio are
1623  * checksummed and sums are attached onto the ordered extent record.
1624  *
1625  * At IO completion time the cums attached on the ordered extent record
1626  * are inserted into the btree
1627  */
1628 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1629 			  int mirror_num, unsigned long bio_flags,
1630 			  u64 bio_offset)
1631 {
1632 	struct btrfs_root *root = BTRFS_I(inode)->root;
1633 	int ret;
1634 
1635 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1636 	if (ret)
1637 		bio_endio(bio, ret);
1638 	return ret;
1639 }
1640 
1641 /*
1642  * extent_io.c submission hook. This does the right thing for csum calculation
1643  * on write, or reading the csums from the tree before a read
1644  */
1645 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1646 			  int mirror_num, unsigned long bio_flags,
1647 			  u64 bio_offset)
1648 {
1649 	struct btrfs_root *root = BTRFS_I(inode)->root;
1650 	int ret = 0;
1651 	int skip_sum;
1652 	int metadata = 0;
1653 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1654 
1655 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1656 
1657 	if (btrfs_is_free_space_inode(inode))
1658 		metadata = 2;
1659 
1660 	if (!(rw & REQ_WRITE)) {
1661 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1662 		if (ret)
1663 			goto out;
1664 
1665 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1666 			ret = btrfs_submit_compressed_read(inode, bio,
1667 							   mirror_num,
1668 							   bio_flags);
1669 			goto out;
1670 		} else if (!skip_sum) {
1671 			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1672 			if (ret)
1673 				goto out;
1674 		}
1675 		goto mapit;
1676 	} else if (async && !skip_sum) {
1677 		/* csum items have already been cloned */
1678 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1679 			goto mapit;
1680 		/* we're doing a write, do the async checksumming */
1681 		ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1682 				   inode, rw, bio, mirror_num,
1683 				   bio_flags, bio_offset,
1684 				   __btrfs_submit_bio_start,
1685 				   __btrfs_submit_bio_done);
1686 		goto out;
1687 	} else if (!skip_sum) {
1688 		ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1689 		if (ret)
1690 			goto out;
1691 	}
1692 
1693 mapit:
1694 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1695 
1696 out:
1697 	if (ret < 0)
1698 		bio_endio(bio, ret);
1699 	return ret;
1700 }
1701 
1702 /*
1703  * given a list of ordered sums record them in the inode.  This happens
1704  * at IO completion time based on sums calculated at bio submission time.
1705  */
1706 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1707 			     struct inode *inode, u64 file_offset,
1708 			     struct list_head *list)
1709 {
1710 	struct btrfs_ordered_sum *sum;
1711 
1712 	list_for_each_entry(sum, list, list) {
1713 		trans->adding_csums = 1;
1714 		btrfs_csum_file_blocks(trans,
1715 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1716 		trans->adding_csums = 0;
1717 	}
1718 	return 0;
1719 }
1720 
1721 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1722 			      struct extent_state **cached_state)
1723 {
1724 	WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1725 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1726 				   cached_state, GFP_NOFS);
1727 }
1728 
1729 /* see btrfs_writepage_start_hook for details on why this is required */
1730 struct btrfs_writepage_fixup {
1731 	struct page *page;
1732 	struct btrfs_work work;
1733 };
1734 
1735 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1736 {
1737 	struct btrfs_writepage_fixup *fixup;
1738 	struct btrfs_ordered_extent *ordered;
1739 	struct extent_state *cached_state = NULL;
1740 	struct page *page;
1741 	struct inode *inode;
1742 	u64 page_start;
1743 	u64 page_end;
1744 	int ret;
1745 
1746 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1747 	page = fixup->page;
1748 again:
1749 	lock_page(page);
1750 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1751 		ClearPageChecked(page);
1752 		goto out_page;
1753 	}
1754 
1755 	inode = page->mapping->host;
1756 	page_start = page_offset(page);
1757 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1758 
1759 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1760 			 &cached_state);
1761 
1762 	/* already ordered? We're done */
1763 	if (PagePrivate2(page))
1764 		goto out;
1765 
1766 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1767 	if (ordered) {
1768 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1769 				     page_end, &cached_state, GFP_NOFS);
1770 		unlock_page(page);
1771 		btrfs_start_ordered_extent(inode, ordered, 1);
1772 		btrfs_put_ordered_extent(ordered);
1773 		goto again;
1774 	}
1775 
1776 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1777 	if (ret) {
1778 		mapping_set_error(page->mapping, ret);
1779 		end_extent_writepage(page, ret, page_start, page_end);
1780 		ClearPageChecked(page);
1781 		goto out;
1782 	 }
1783 
1784 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1785 	ClearPageChecked(page);
1786 	set_page_dirty(page);
1787 out:
1788 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1789 			     &cached_state, GFP_NOFS);
1790 out_page:
1791 	unlock_page(page);
1792 	page_cache_release(page);
1793 	kfree(fixup);
1794 }
1795 
1796 /*
1797  * There are a few paths in the higher layers of the kernel that directly
1798  * set the page dirty bit without asking the filesystem if it is a
1799  * good idea.  This causes problems because we want to make sure COW
1800  * properly happens and the data=ordered rules are followed.
1801  *
1802  * In our case any range that doesn't have the ORDERED bit set
1803  * hasn't been properly setup for IO.  We kick off an async process
1804  * to fix it up.  The async helper will wait for ordered extents, set
1805  * the delalloc bit and make it safe to write the page.
1806  */
1807 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1808 {
1809 	struct inode *inode = page->mapping->host;
1810 	struct btrfs_writepage_fixup *fixup;
1811 	struct btrfs_root *root = BTRFS_I(inode)->root;
1812 
1813 	/* this page is properly in the ordered list */
1814 	if (TestClearPagePrivate2(page))
1815 		return 0;
1816 
1817 	if (PageChecked(page))
1818 		return -EAGAIN;
1819 
1820 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1821 	if (!fixup)
1822 		return -EAGAIN;
1823 
1824 	SetPageChecked(page);
1825 	page_cache_get(page);
1826 	fixup->work.func = btrfs_writepage_fixup_worker;
1827 	fixup->page = page;
1828 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1829 	return -EBUSY;
1830 }
1831 
1832 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1833 				       struct inode *inode, u64 file_pos,
1834 				       u64 disk_bytenr, u64 disk_num_bytes,
1835 				       u64 num_bytes, u64 ram_bytes,
1836 				       u8 compression, u8 encryption,
1837 				       u16 other_encoding, int extent_type)
1838 {
1839 	struct btrfs_root *root = BTRFS_I(inode)->root;
1840 	struct btrfs_file_extent_item *fi;
1841 	struct btrfs_path *path;
1842 	struct extent_buffer *leaf;
1843 	struct btrfs_key ins;
1844 	int ret;
1845 
1846 	path = btrfs_alloc_path();
1847 	if (!path)
1848 		return -ENOMEM;
1849 
1850 	path->leave_spinning = 1;
1851 
1852 	/*
1853 	 * we may be replacing one extent in the tree with another.
1854 	 * The new extent is pinned in the extent map, and we don't want
1855 	 * to drop it from the cache until it is completely in the btree.
1856 	 *
1857 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1858 	 * the caller is expected to unpin it and allow it to be merged
1859 	 * with the others.
1860 	 */
1861 	ret = btrfs_drop_extents(trans, root, inode, file_pos,
1862 				 file_pos + num_bytes, 0);
1863 	if (ret)
1864 		goto out;
1865 
1866 	ins.objectid = btrfs_ino(inode);
1867 	ins.offset = file_pos;
1868 	ins.type = BTRFS_EXTENT_DATA_KEY;
1869 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1870 	if (ret)
1871 		goto out;
1872 	leaf = path->nodes[0];
1873 	fi = btrfs_item_ptr(leaf, path->slots[0],
1874 			    struct btrfs_file_extent_item);
1875 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1876 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1877 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1878 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1879 	btrfs_set_file_extent_offset(leaf, fi, 0);
1880 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1881 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1882 	btrfs_set_file_extent_compression(leaf, fi, compression);
1883 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1884 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1885 
1886 	btrfs_mark_buffer_dirty(leaf);
1887 	btrfs_release_path(path);
1888 
1889 	inode_add_bytes(inode, num_bytes);
1890 
1891 	ins.objectid = disk_bytenr;
1892 	ins.offset = disk_num_bytes;
1893 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1894 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1895 					root->root_key.objectid,
1896 					btrfs_ino(inode), file_pos, &ins);
1897 out:
1898 	btrfs_free_path(path);
1899 
1900 	return ret;
1901 }
1902 
1903 /* snapshot-aware defrag */
1904 struct sa_defrag_extent_backref {
1905 	struct rb_node node;
1906 	struct old_sa_defrag_extent *old;
1907 	u64 root_id;
1908 	u64 inum;
1909 	u64 file_pos;
1910 	u64 extent_offset;
1911 	u64 num_bytes;
1912 	u64 generation;
1913 };
1914 
1915 struct old_sa_defrag_extent {
1916 	struct list_head list;
1917 	struct new_sa_defrag_extent *new;
1918 
1919 	u64 extent_offset;
1920 	u64 bytenr;
1921 	u64 offset;
1922 	u64 len;
1923 	int count;
1924 };
1925 
1926 struct new_sa_defrag_extent {
1927 	struct rb_root root;
1928 	struct list_head head;
1929 	struct btrfs_path *path;
1930 	struct inode *inode;
1931 	u64 file_pos;
1932 	u64 len;
1933 	u64 bytenr;
1934 	u64 disk_len;
1935 	u8 compress_type;
1936 };
1937 
1938 static int backref_comp(struct sa_defrag_extent_backref *b1,
1939 			struct sa_defrag_extent_backref *b2)
1940 {
1941 	if (b1->root_id < b2->root_id)
1942 		return -1;
1943 	else if (b1->root_id > b2->root_id)
1944 		return 1;
1945 
1946 	if (b1->inum < b2->inum)
1947 		return -1;
1948 	else if (b1->inum > b2->inum)
1949 		return 1;
1950 
1951 	if (b1->file_pos < b2->file_pos)
1952 		return -1;
1953 	else if (b1->file_pos > b2->file_pos)
1954 		return 1;
1955 
1956 	/*
1957 	 * [------------------------------] ===> (a range of space)
1958 	 *     |<--->|   |<---->| =============> (fs/file tree A)
1959 	 * |<---------------------------->| ===> (fs/file tree B)
1960 	 *
1961 	 * A range of space can refer to two file extents in one tree while
1962 	 * refer to only one file extent in another tree.
1963 	 *
1964 	 * So we may process a disk offset more than one time(two extents in A)
1965 	 * and locate at the same extent(one extent in B), then insert two same
1966 	 * backrefs(both refer to the extent in B).
1967 	 */
1968 	return 0;
1969 }
1970 
1971 static void backref_insert(struct rb_root *root,
1972 			   struct sa_defrag_extent_backref *backref)
1973 {
1974 	struct rb_node **p = &root->rb_node;
1975 	struct rb_node *parent = NULL;
1976 	struct sa_defrag_extent_backref *entry;
1977 	int ret;
1978 
1979 	while (*p) {
1980 		parent = *p;
1981 		entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
1982 
1983 		ret = backref_comp(backref, entry);
1984 		if (ret < 0)
1985 			p = &(*p)->rb_left;
1986 		else
1987 			p = &(*p)->rb_right;
1988 	}
1989 
1990 	rb_link_node(&backref->node, parent, p);
1991 	rb_insert_color(&backref->node, root);
1992 }
1993 
1994 /*
1995  * Note the backref might has changed, and in this case we just return 0.
1996  */
1997 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
1998 				       void *ctx)
1999 {
2000 	struct btrfs_file_extent_item *extent;
2001 	struct btrfs_fs_info *fs_info;
2002 	struct old_sa_defrag_extent *old = ctx;
2003 	struct new_sa_defrag_extent *new = old->new;
2004 	struct btrfs_path *path = new->path;
2005 	struct btrfs_key key;
2006 	struct btrfs_root *root;
2007 	struct sa_defrag_extent_backref *backref;
2008 	struct extent_buffer *leaf;
2009 	struct inode *inode = new->inode;
2010 	int slot;
2011 	int ret;
2012 	u64 extent_offset;
2013 	u64 num_bytes;
2014 
2015 	if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2016 	    inum == btrfs_ino(inode))
2017 		return 0;
2018 
2019 	key.objectid = root_id;
2020 	key.type = BTRFS_ROOT_ITEM_KEY;
2021 	key.offset = (u64)-1;
2022 
2023 	fs_info = BTRFS_I(inode)->root->fs_info;
2024 	root = btrfs_read_fs_root_no_name(fs_info, &key);
2025 	if (IS_ERR(root)) {
2026 		if (PTR_ERR(root) == -ENOENT)
2027 			return 0;
2028 		WARN_ON(1);
2029 		pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2030 			 inum, offset, root_id);
2031 		return PTR_ERR(root);
2032 	}
2033 
2034 	key.objectid = inum;
2035 	key.type = BTRFS_EXTENT_DATA_KEY;
2036 	if (offset > (u64)-1 << 32)
2037 		key.offset = 0;
2038 	else
2039 		key.offset = offset;
2040 
2041 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2042 	if (WARN_ON(ret < 0))
2043 		return ret;
2044 	ret = 0;
2045 
2046 	while (1) {
2047 		cond_resched();
2048 
2049 		leaf = path->nodes[0];
2050 		slot = path->slots[0];
2051 
2052 		if (slot >= btrfs_header_nritems(leaf)) {
2053 			ret = btrfs_next_leaf(root, path);
2054 			if (ret < 0) {
2055 				goto out;
2056 			} else if (ret > 0) {
2057 				ret = 0;
2058 				goto out;
2059 			}
2060 			continue;
2061 		}
2062 
2063 		path->slots[0]++;
2064 
2065 		btrfs_item_key_to_cpu(leaf, &key, slot);
2066 
2067 		if (key.objectid > inum)
2068 			goto out;
2069 
2070 		if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2071 			continue;
2072 
2073 		extent = btrfs_item_ptr(leaf, slot,
2074 					struct btrfs_file_extent_item);
2075 
2076 		if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2077 			continue;
2078 
2079 		/*
2080 		 * 'offset' refers to the exact key.offset,
2081 		 * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2082 		 * (key.offset - extent_offset).
2083 		 */
2084 		if (key.offset != offset)
2085 			continue;
2086 
2087 		extent_offset = btrfs_file_extent_offset(leaf, extent);
2088 		num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2089 
2090 		if (extent_offset >= old->extent_offset + old->offset +
2091 		    old->len || extent_offset + num_bytes <=
2092 		    old->extent_offset + old->offset)
2093 			continue;
2094 		break;
2095 	}
2096 
2097 	backref = kmalloc(sizeof(*backref), GFP_NOFS);
2098 	if (!backref) {
2099 		ret = -ENOENT;
2100 		goto out;
2101 	}
2102 
2103 	backref->root_id = root_id;
2104 	backref->inum = inum;
2105 	backref->file_pos = offset;
2106 	backref->num_bytes = num_bytes;
2107 	backref->extent_offset = extent_offset;
2108 	backref->generation = btrfs_file_extent_generation(leaf, extent);
2109 	backref->old = old;
2110 	backref_insert(&new->root, backref);
2111 	old->count++;
2112 out:
2113 	btrfs_release_path(path);
2114 	WARN_ON(ret);
2115 	return ret;
2116 }
2117 
2118 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2119 				   struct new_sa_defrag_extent *new)
2120 {
2121 	struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2122 	struct old_sa_defrag_extent *old, *tmp;
2123 	int ret;
2124 
2125 	new->path = path;
2126 
2127 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2128 		ret = iterate_inodes_from_logical(old->bytenr +
2129 						  old->extent_offset, fs_info,
2130 						  path, record_one_backref,
2131 						  old);
2132 		if (ret < 0 && ret != -ENOENT)
2133 			return false;
2134 
2135 		/* no backref to be processed for this extent */
2136 		if (!old->count) {
2137 			list_del(&old->list);
2138 			kfree(old);
2139 		}
2140 	}
2141 
2142 	if (list_empty(&new->head))
2143 		return false;
2144 
2145 	return true;
2146 }
2147 
2148 static int relink_is_mergable(struct extent_buffer *leaf,
2149 			      struct btrfs_file_extent_item *fi,
2150 			      struct new_sa_defrag_extent *new)
2151 {
2152 	if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2153 		return 0;
2154 
2155 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2156 		return 0;
2157 
2158 	if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2159 		return 0;
2160 
2161 	if (btrfs_file_extent_encryption(leaf, fi) ||
2162 	    btrfs_file_extent_other_encoding(leaf, fi))
2163 		return 0;
2164 
2165 	return 1;
2166 }
2167 
2168 /*
2169  * Note the backref might has changed, and in this case we just return 0.
2170  */
2171 static noinline int relink_extent_backref(struct btrfs_path *path,
2172 				 struct sa_defrag_extent_backref *prev,
2173 				 struct sa_defrag_extent_backref *backref)
2174 {
2175 	struct btrfs_file_extent_item *extent;
2176 	struct btrfs_file_extent_item *item;
2177 	struct btrfs_ordered_extent *ordered;
2178 	struct btrfs_trans_handle *trans;
2179 	struct btrfs_fs_info *fs_info;
2180 	struct btrfs_root *root;
2181 	struct btrfs_key key;
2182 	struct extent_buffer *leaf;
2183 	struct old_sa_defrag_extent *old = backref->old;
2184 	struct new_sa_defrag_extent *new = old->new;
2185 	struct inode *src_inode = new->inode;
2186 	struct inode *inode;
2187 	struct extent_state *cached = NULL;
2188 	int ret = 0;
2189 	u64 start;
2190 	u64 len;
2191 	u64 lock_start;
2192 	u64 lock_end;
2193 	bool merge = false;
2194 	int index;
2195 
2196 	if (prev && prev->root_id == backref->root_id &&
2197 	    prev->inum == backref->inum &&
2198 	    prev->file_pos + prev->num_bytes == backref->file_pos)
2199 		merge = true;
2200 
2201 	/* step 1: get root */
2202 	key.objectid = backref->root_id;
2203 	key.type = BTRFS_ROOT_ITEM_KEY;
2204 	key.offset = (u64)-1;
2205 
2206 	fs_info = BTRFS_I(src_inode)->root->fs_info;
2207 	index = srcu_read_lock(&fs_info->subvol_srcu);
2208 
2209 	root = btrfs_read_fs_root_no_name(fs_info, &key);
2210 	if (IS_ERR(root)) {
2211 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2212 		if (PTR_ERR(root) == -ENOENT)
2213 			return 0;
2214 		return PTR_ERR(root);
2215 	}
2216 
2217 	/* step 2: get inode */
2218 	key.objectid = backref->inum;
2219 	key.type = BTRFS_INODE_ITEM_KEY;
2220 	key.offset = 0;
2221 
2222 	inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2223 	if (IS_ERR(inode)) {
2224 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2225 		return 0;
2226 	}
2227 
2228 	srcu_read_unlock(&fs_info->subvol_srcu, index);
2229 
2230 	/* step 3: relink backref */
2231 	lock_start = backref->file_pos;
2232 	lock_end = backref->file_pos + backref->num_bytes - 1;
2233 	lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2234 			 0, &cached);
2235 
2236 	ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2237 	if (ordered) {
2238 		btrfs_put_ordered_extent(ordered);
2239 		goto out_unlock;
2240 	}
2241 
2242 	trans = btrfs_join_transaction(root);
2243 	if (IS_ERR(trans)) {
2244 		ret = PTR_ERR(trans);
2245 		goto out_unlock;
2246 	}
2247 
2248 	key.objectid = backref->inum;
2249 	key.type = BTRFS_EXTENT_DATA_KEY;
2250 	key.offset = backref->file_pos;
2251 
2252 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2253 	if (ret < 0) {
2254 		goto out_free_path;
2255 	} else if (ret > 0) {
2256 		ret = 0;
2257 		goto out_free_path;
2258 	}
2259 
2260 	extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2261 				struct btrfs_file_extent_item);
2262 
2263 	if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2264 	    backref->generation)
2265 		goto out_free_path;
2266 
2267 	btrfs_release_path(path);
2268 
2269 	start = backref->file_pos;
2270 	if (backref->extent_offset < old->extent_offset + old->offset)
2271 		start += old->extent_offset + old->offset -
2272 			 backref->extent_offset;
2273 
2274 	len = min(backref->extent_offset + backref->num_bytes,
2275 		  old->extent_offset + old->offset + old->len);
2276 	len -= max(backref->extent_offset, old->extent_offset + old->offset);
2277 
2278 	ret = btrfs_drop_extents(trans, root, inode, start,
2279 				 start + len, 1);
2280 	if (ret)
2281 		goto out_free_path;
2282 again:
2283 	key.objectid = btrfs_ino(inode);
2284 	key.type = BTRFS_EXTENT_DATA_KEY;
2285 	key.offset = start;
2286 
2287 	path->leave_spinning = 1;
2288 	if (merge) {
2289 		struct btrfs_file_extent_item *fi;
2290 		u64 extent_len;
2291 		struct btrfs_key found_key;
2292 
2293 		ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
2294 		if (ret < 0)
2295 			goto out_free_path;
2296 
2297 		path->slots[0]--;
2298 		leaf = path->nodes[0];
2299 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2300 
2301 		fi = btrfs_item_ptr(leaf, path->slots[0],
2302 				    struct btrfs_file_extent_item);
2303 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2304 
2305 		if (extent_len + found_key.offset == start &&
2306 		    relink_is_mergable(leaf, fi, new)) {
2307 			btrfs_set_file_extent_num_bytes(leaf, fi,
2308 							extent_len + len);
2309 			btrfs_mark_buffer_dirty(leaf);
2310 			inode_add_bytes(inode, len);
2311 
2312 			ret = 1;
2313 			goto out_free_path;
2314 		} else {
2315 			merge = false;
2316 			btrfs_release_path(path);
2317 			goto again;
2318 		}
2319 	}
2320 
2321 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2322 					sizeof(*extent));
2323 	if (ret) {
2324 		btrfs_abort_transaction(trans, root, ret);
2325 		goto out_free_path;
2326 	}
2327 
2328 	leaf = path->nodes[0];
2329 	item = btrfs_item_ptr(leaf, path->slots[0],
2330 				struct btrfs_file_extent_item);
2331 	btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2332 	btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2333 	btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2334 	btrfs_set_file_extent_num_bytes(leaf, item, len);
2335 	btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2336 	btrfs_set_file_extent_generation(leaf, item, trans->transid);
2337 	btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2338 	btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2339 	btrfs_set_file_extent_encryption(leaf, item, 0);
2340 	btrfs_set_file_extent_other_encoding(leaf, item, 0);
2341 
2342 	btrfs_mark_buffer_dirty(leaf);
2343 	inode_add_bytes(inode, len);
2344 	btrfs_release_path(path);
2345 
2346 	ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2347 			new->disk_len, 0,
2348 			backref->root_id, backref->inum,
2349 			new->file_pos, 0);	/* start - extent_offset */
2350 	if (ret) {
2351 		btrfs_abort_transaction(trans, root, ret);
2352 		goto out_free_path;
2353 	}
2354 
2355 	ret = 1;
2356 out_free_path:
2357 	btrfs_release_path(path);
2358 	path->leave_spinning = 0;
2359 	btrfs_end_transaction(trans, root);
2360 out_unlock:
2361 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2362 			     &cached, GFP_NOFS);
2363 	iput(inode);
2364 	return ret;
2365 }
2366 
2367 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2368 {
2369 	struct old_sa_defrag_extent *old, *tmp;
2370 
2371 	if (!new)
2372 		return;
2373 
2374 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2375 		list_del(&old->list);
2376 		kfree(old);
2377 	}
2378 	kfree(new);
2379 }
2380 
2381 static void relink_file_extents(struct new_sa_defrag_extent *new)
2382 {
2383 	struct btrfs_path *path;
2384 	struct sa_defrag_extent_backref *backref;
2385 	struct sa_defrag_extent_backref *prev = NULL;
2386 	struct inode *inode;
2387 	struct btrfs_root *root;
2388 	struct rb_node *node;
2389 	int ret;
2390 
2391 	inode = new->inode;
2392 	root = BTRFS_I(inode)->root;
2393 
2394 	path = btrfs_alloc_path();
2395 	if (!path)
2396 		return;
2397 
2398 	if (!record_extent_backrefs(path, new)) {
2399 		btrfs_free_path(path);
2400 		goto out;
2401 	}
2402 	btrfs_release_path(path);
2403 
2404 	while (1) {
2405 		node = rb_first(&new->root);
2406 		if (!node)
2407 			break;
2408 		rb_erase(node, &new->root);
2409 
2410 		backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2411 
2412 		ret = relink_extent_backref(path, prev, backref);
2413 		WARN_ON(ret < 0);
2414 
2415 		kfree(prev);
2416 
2417 		if (ret == 1)
2418 			prev = backref;
2419 		else
2420 			prev = NULL;
2421 		cond_resched();
2422 	}
2423 	kfree(prev);
2424 
2425 	btrfs_free_path(path);
2426 out:
2427 	free_sa_defrag_extent(new);
2428 
2429 	atomic_dec(&root->fs_info->defrag_running);
2430 	wake_up(&root->fs_info->transaction_wait);
2431 }
2432 
2433 static struct new_sa_defrag_extent *
2434 record_old_file_extents(struct inode *inode,
2435 			struct btrfs_ordered_extent *ordered)
2436 {
2437 	struct btrfs_root *root = BTRFS_I(inode)->root;
2438 	struct btrfs_path *path;
2439 	struct btrfs_key key;
2440 	struct old_sa_defrag_extent *old;
2441 	struct new_sa_defrag_extent *new;
2442 	int ret;
2443 
2444 	new = kmalloc(sizeof(*new), GFP_NOFS);
2445 	if (!new)
2446 		return NULL;
2447 
2448 	new->inode = inode;
2449 	new->file_pos = ordered->file_offset;
2450 	new->len = ordered->len;
2451 	new->bytenr = ordered->start;
2452 	new->disk_len = ordered->disk_len;
2453 	new->compress_type = ordered->compress_type;
2454 	new->root = RB_ROOT;
2455 	INIT_LIST_HEAD(&new->head);
2456 
2457 	path = btrfs_alloc_path();
2458 	if (!path)
2459 		goto out_kfree;
2460 
2461 	key.objectid = btrfs_ino(inode);
2462 	key.type = BTRFS_EXTENT_DATA_KEY;
2463 	key.offset = new->file_pos;
2464 
2465 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2466 	if (ret < 0)
2467 		goto out_free_path;
2468 	if (ret > 0 && path->slots[0] > 0)
2469 		path->slots[0]--;
2470 
2471 	/* find out all the old extents for the file range */
2472 	while (1) {
2473 		struct btrfs_file_extent_item *extent;
2474 		struct extent_buffer *l;
2475 		int slot;
2476 		u64 num_bytes;
2477 		u64 offset;
2478 		u64 end;
2479 		u64 disk_bytenr;
2480 		u64 extent_offset;
2481 
2482 		l = path->nodes[0];
2483 		slot = path->slots[0];
2484 
2485 		if (slot >= btrfs_header_nritems(l)) {
2486 			ret = btrfs_next_leaf(root, path);
2487 			if (ret < 0)
2488 				goto out_free_path;
2489 			else if (ret > 0)
2490 				break;
2491 			continue;
2492 		}
2493 
2494 		btrfs_item_key_to_cpu(l, &key, slot);
2495 
2496 		if (key.objectid != btrfs_ino(inode))
2497 			break;
2498 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2499 			break;
2500 		if (key.offset >= new->file_pos + new->len)
2501 			break;
2502 
2503 		extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2504 
2505 		num_bytes = btrfs_file_extent_num_bytes(l, extent);
2506 		if (key.offset + num_bytes < new->file_pos)
2507 			goto next;
2508 
2509 		disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2510 		if (!disk_bytenr)
2511 			goto next;
2512 
2513 		extent_offset = btrfs_file_extent_offset(l, extent);
2514 
2515 		old = kmalloc(sizeof(*old), GFP_NOFS);
2516 		if (!old)
2517 			goto out_free_path;
2518 
2519 		offset = max(new->file_pos, key.offset);
2520 		end = min(new->file_pos + new->len, key.offset + num_bytes);
2521 
2522 		old->bytenr = disk_bytenr;
2523 		old->extent_offset = extent_offset;
2524 		old->offset = offset - key.offset;
2525 		old->len = end - offset;
2526 		old->new = new;
2527 		old->count = 0;
2528 		list_add_tail(&old->list, &new->head);
2529 next:
2530 		path->slots[0]++;
2531 		cond_resched();
2532 	}
2533 
2534 	btrfs_free_path(path);
2535 	atomic_inc(&root->fs_info->defrag_running);
2536 
2537 	return new;
2538 
2539 out_free_path:
2540 	btrfs_free_path(path);
2541 out_kfree:
2542 	free_sa_defrag_extent(new);
2543 	return NULL;
2544 }
2545 
2546 /*
2547  * helper function for btrfs_finish_ordered_io, this
2548  * just reads in some of the csum leaves to prime them into ram
2549  * before we start the transaction.  It limits the amount of btree
2550  * reads required while inside the transaction.
2551  */
2552 /* as ordered data IO finishes, this gets called so we can finish
2553  * an ordered extent if the range of bytes in the file it covers are
2554  * fully written.
2555  */
2556 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2557 {
2558 	struct inode *inode = ordered_extent->inode;
2559 	struct btrfs_root *root = BTRFS_I(inode)->root;
2560 	struct btrfs_trans_handle *trans = NULL;
2561 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2562 	struct extent_state *cached_state = NULL;
2563 	struct new_sa_defrag_extent *new = NULL;
2564 	int compress_type = 0;
2565 	int ret = 0;
2566 	u64 logical_len = ordered_extent->len;
2567 	bool nolock;
2568 	bool truncated = false;
2569 
2570 	nolock = btrfs_is_free_space_inode(inode);
2571 
2572 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2573 		ret = -EIO;
2574 		goto out;
2575 	}
2576 
2577 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2578 		truncated = true;
2579 		logical_len = ordered_extent->truncated_len;
2580 		/* Truncated the entire extent, don't bother adding */
2581 		if (!logical_len)
2582 			goto out;
2583 	}
2584 
2585 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2586 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2587 		btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2588 		if (nolock)
2589 			trans = btrfs_join_transaction_nolock(root);
2590 		else
2591 			trans = btrfs_join_transaction(root);
2592 		if (IS_ERR(trans)) {
2593 			ret = PTR_ERR(trans);
2594 			trans = NULL;
2595 			goto out;
2596 		}
2597 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2598 		ret = btrfs_update_inode_fallback(trans, root, inode);
2599 		if (ret) /* -ENOMEM or corruption */
2600 			btrfs_abort_transaction(trans, root, ret);
2601 		goto out;
2602 	}
2603 
2604 	lock_extent_bits(io_tree, ordered_extent->file_offset,
2605 			 ordered_extent->file_offset + ordered_extent->len - 1,
2606 			 0, &cached_state);
2607 
2608 	ret = test_range_bit(io_tree, ordered_extent->file_offset,
2609 			ordered_extent->file_offset + ordered_extent->len - 1,
2610 			EXTENT_DEFRAG, 1, cached_state);
2611 	if (ret) {
2612 		u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2613 		if (last_snapshot >= BTRFS_I(inode)->generation)
2614 			/* the inode is shared */
2615 			new = record_old_file_extents(inode, ordered_extent);
2616 
2617 		clear_extent_bit(io_tree, ordered_extent->file_offset,
2618 			ordered_extent->file_offset + ordered_extent->len - 1,
2619 			EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2620 	}
2621 
2622 	if (nolock)
2623 		trans = btrfs_join_transaction_nolock(root);
2624 	else
2625 		trans = btrfs_join_transaction(root);
2626 	if (IS_ERR(trans)) {
2627 		ret = PTR_ERR(trans);
2628 		trans = NULL;
2629 		goto out_unlock;
2630 	}
2631 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2632 
2633 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2634 		compress_type = ordered_extent->compress_type;
2635 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2636 		BUG_ON(compress_type);
2637 		ret = btrfs_mark_extent_written(trans, inode,
2638 						ordered_extent->file_offset,
2639 						ordered_extent->file_offset +
2640 						logical_len);
2641 	} else {
2642 		BUG_ON(root == root->fs_info->tree_root);
2643 		ret = insert_reserved_file_extent(trans, inode,
2644 						ordered_extent->file_offset,
2645 						ordered_extent->start,
2646 						ordered_extent->disk_len,
2647 						logical_len, logical_len,
2648 						compress_type, 0, 0,
2649 						BTRFS_FILE_EXTENT_REG);
2650 	}
2651 	unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2652 			   ordered_extent->file_offset, ordered_extent->len,
2653 			   trans->transid);
2654 	if (ret < 0) {
2655 		btrfs_abort_transaction(trans, root, ret);
2656 		goto out_unlock;
2657 	}
2658 
2659 	add_pending_csums(trans, inode, ordered_extent->file_offset,
2660 			  &ordered_extent->list);
2661 
2662 	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2663 	ret = btrfs_update_inode_fallback(trans, root, inode);
2664 	if (ret) { /* -ENOMEM or corruption */
2665 		btrfs_abort_transaction(trans, root, ret);
2666 		goto out_unlock;
2667 	}
2668 	ret = 0;
2669 out_unlock:
2670 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
2671 			     ordered_extent->file_offset +
2672 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
2673 out:
2674 	if (root != root->fs_info->tree_root)
2675 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2676 	if (trans)
2677 		btrfs_end_transaction(trans, root);
2678 
2679 	if (ret || truncated) {
2680 		u64 start, end;
2681 
2682 		if (truncated)
2683 			start = ordered_extent->file_offset + logical_len;
2684 		else
2685 			start = ordered_extent->file_offset;
2686 		end = ordered_extent->file_offset + ordered_extent->len - 1;
2687 		clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2688 
2689 		/* Drop the cache for the part of the extent we didn't write. */
2690 		btrfs_drop_extent_cache(inode, start, end, 0);
2691 
2692 		/*
2693 		 * If the ordered extent had an IOERR or something else went
2694 		 * wrong we need to return the space for this ordered extent
2695 		 * back to the allocator.  We only free the extent in the
2696 		 * truncated case if we didn't write out the extent at all.
2697 		 */
2698 		if ((ret || !logical_len) &&
2699 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2700 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2701 			btrfs_free_reserved_extent(root, ordered_extent->start,
2702 						   ordered_extent->disk_len);
2703 	}
2704 
2705 
2706 	/*
2707 	 * This needs to be done to make sure anybody waiting knows we are done
2708 	 * updating everything for this ordered extent.
2709 	 */
2710 	btrfs_remove_ordered_extent(inode, ordered_extent);
2711 
2712 	/* for snapshot-aware defrag */
2713 	if (new) {
2714 		if (ret) {
2715 			free_sa_defrag_extent(new);
2716 			atomic_dec(&root->fs_info->defrag_running);
2717 		} else {
2718 			relink_file_extents(new);
2719 		}
2720 	}
2721 
2722 	/* once for us */
2723 	btrfs_put_ordered_extent(ordered_extent);
2724 	/* once for the tree */
2725 	btrfs_put_ordered_extent(ordered_extent);
2726 
2727 	return ret;
2728 }
2729 
2730 static void finish_ordered_fn(struct btrfs_work *work)
2731 {
2732 	struct btrfs_ordered_extent *ordered_extent;
2733 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2734 	btrfs_finish_ordered_io(ordered_extent);
2735 }
2736 
2737 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2738 				struct extent_state *state, int uptodate)
2739 {
2740 	struct inode *inode = page->mapping->host;
2741 	struct btrfs_root *root = BTRFS_I(inode)->root;
2742 	struct btrfs_ordered_extent *ordered_extent = NULL;
2743 	struct btrfs_workers *workers;
2744 
2745 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2746 
2747 	ClearPagePrivate2(page);
2748 	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2749 					    end - start + 1, uptodate))
2750 		return 0;
2751 
2752 	ordered_extent->work.func = finish_ordered_fn;
2753 	ordered_extent->work.flags = 0;
2754 
2755 	if (btrfs_is_free_space_inode(inode))
2756 		workers = &root->fs_info->endio_freespace_worker;
2757 	else
2758 		workers = &root->fs_info->endio_write_workers;
2759 	btrfs_queue_worker(workers, &ordered_extent->work);
2760 
2761 	return 0;
2762 }
2763 
2764 /*
2765  * when reads are done, we need to check csums to verify the data is correct
2766  * if there's a match, we allow the bio to finish.  If not, the code in
2767  * extent_io.c will try to find good copies for us.
2768  */
2769 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
2770 				      u64 phy_offset, struct page *page,
2771 				      u64 start, u64 end, int mirror)
2772 {
2773 	size_t offset = start - page_offset(page);
2774 	struct inode *inode = page->mapping->host;
2775 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2776 	char *kaddr;
2777 	struct btrfs_root *root = BTRFS_I(inode)->root;
2778 	u32 csum_expected;
2779 	u32 csum = ~(u32)0;
2780 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
2781 	                              DEFAULT_RATELIMIT_BURST);
2782 
2783 	if (PageChecked(page)) {
2784 		ClearPageChecked(page);
2785 		goto good;
2786 	}
2787 
2788 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
2789 		goto good;
2790 
2791 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
2792 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2793 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2794 				  GFP_NOFS);
2795 		return 0;
2796 	}
2797 
2798 	phy_offset >>= inode->i_sb->s_blocksize_bits;
2799 	csum_expected = *(((u32 *)io_bio->csum) + phy_offset);
2800 
2801 	kaddr = kmap_atomic(page);
2802 	csum = btrfs_csum_data(kaddr + offset, csum,  end - start + 1);
2803 	btrfs_csum_final(csum, (char *)&csum);
2804 	if (csum != csum_expected)
2805 		goto zeroit;
2806 
2807 	kunmap_atomic(kaddr);
2808 good:
2809 	return 0;
2810 
2811 zeroit:
2812 	if (__ratelimit(&_rs))
2813 		btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
2814 			btrfs_ino(page->mapping->host), start, csum, csum_expected);
2815 	memset(kaddr + offset, 1, end - start + 1);
2816 	flush_dcache_page(page);
2817 	kunmap_atomic(kaddr);
2818 	if (csum_expected == 0)
2819 		return 0;
2820 	return -EIO;
2821 }
2822 
2823 struct delayed_iput {
2824 	struct list_head list;
2825 	struct inode *inode;
2826 };
2827 
2828 /* JDM: If this is fs-wide, why can't we add a pointer to
2829  * btrfs_inode instead and avoid the allocation? */
2830 void btrfs_add_delayed_iput(struct inode *inode)
2831 {
2832 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2833 	struct delayed_iput *delayed;
2834 
2835 	if (atomic_add_unless(&inode->i_count, -1, 1))
2836 		return;
2837 
2838 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2839 	delayed->inode = inode;
2840 
2841 	spin_lock(&fs_info->delayed_iput_lock);
2842 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2843 	spin_unlock(&fs_info->delayed_iput_lock);
2844 }
2845 
2846 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2847 {
2848 	LIST_HEAD(list);
2849 	struct btrfs_fs_info *fs_info = root->fs_info;
2850 	struct delayed_iput *delayed;
2851 	int empty;
2852 
2853 	spin_lock(&fs_info->delayed_iput_lock);
2854 	empty = list_empty(&fs_info->delayed_iputs);
2855 	spin_unlock(&fs_info->delayed_iput_lock);
2856 	if (empty)
2857 		return;
2858 
2859 	spin_lock(&fs_info->delayed_iput_lock);
2860 	list_splice_init(&fs_info->delayed_iputs, &list);
2861 	spin_unlock(&fs_info->delayed_iput_lock);
2862 
2863 	while (!list_empty(&list)) {
2864 		delayed = list_entry(list.next, struct delayed_iput, list);
2865 		list_del(&delayed->list);
2866 		iput(delayed->inode);
2867 		kfree(delayed);
2868 	}
2869 }
2870 
2871 /*
2872  * This is called in transaction commit time. If there are no orphan
2873  * files in the subvolume, it removes orphan item and frees block_rsv
2874  * structure.
2875  */
2876 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2877 			      struct btrfs_root *root)
2878 {
2879 	struct btrfs_block_rsv *block_rsv;
2880 	int ret;
2881 
2882 	if (atomic_read(&root->orphan_inodes) ||
2883 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2884 		return;
2885 
2886 	spin_lock(&root->orphan_lock);
2887 	if (atomic_read(&root->orphan_inodes)) {
2888 		spin_unlock(&root->orphan_lock);
2889 		return;
2890 	}
2891 
2892 	if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2893 		spin_unlock(&root->orphan_lock);
2894 		return;
2895 	}
2896 
2897 	block_rsv = root->orphan_block_rsv;
2898 	root->orphan_block_rsv = NULL;
2899 	spin_unlock(&root->orphan_lock);
2900 
2901 	if (root->orphan_item_inserted &&
2902 	    btrfs_root_refs(&root->root_item) > 0) {
2903 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2904 					    root->root_key.objectid);
2905 		if (ret)
2906 			btrfs_abort_transaction(trans, root, ret);
2907 		else
2908 			root->orphan_item_inserted = 0;
2909 	}
2910 
2911 	if (block_rsv) {
2912 		WARN_ON(block_rsv->size > 0);
2913 		btrfs_free_block_rsv(root, block_rsv);
2914 	}
2915 }
2916 
2917 /*
2918  * This creates an orphan entry for the given inode in case something goes
2919  * wrong in the middle of an unlink/truncate.
2920  *
2921  * NOTE: caller of this function should reserve 5 units of metadata for
2922  *	 this function.
2923  */
2924 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2925 {
2926 	struct btrfs_root *root = BTRFS_I(inode)->root;
2927 	struct btrfs_block_rsv *block_rsv = NULL;
2928 	int reserve = 0;
2929 	int insert = 0;
2930 	int ret;
2931 
2932 	if (!root->orphan_block_rsv) {
2933 		block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2934 		if (!block_rsv)
2935 			return -ENOMEM;
2936 	}
2937 
2938 	spin_lock(&root->orphan_lock);
2939 	if (!root->orphan_block_rsv) {
2940 		root->orphan_block_rsv = block_rsv;
2941 	} else if (block_rsv) {
2942 		btrfs_free_block_rsv(root, block_rsv);
2943 		block_rsv = NULL;
2944 	}
2945 
2946 	if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2947 			      &BTRFS_I(inode)->runtime_flags)) {
2948 #if 0
2949 		/*
2950 		 * For proper ENOSPC handling, we should do orphan
2951 		 * cleanup when mounting. But this introduces backward
2952 		 * compatibility issue.
2953 		 */
2954 		if (!xchg(&root->orphan_item_inserted, 1))
2955 			insert = 2;
2956 		else
2957 			insert = 1;
2958 #endif
2959 		insert = 1;
2960 		atomic_inc(&root->orphan_inodes);
2961 	}
2962 
2963 	if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2964 			      &BTRFS_I(inode)->runtime_flags))
2965 		reserve = 1;
2966 	spin_unlock(&root->orphan_lock);
2967 
2968 	/* grab metadata reservation from transaction handle */
2969 	if (reserve) {
2970 		ret = btrfs_orphan_reserve_metadata(trans, inode);
2971 		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
2972 	}
2973 
2974 	/* insert an orphan item to track this unlinked/truncated file */
2975 	if (insert >= 1) {
2976 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2977 		if (ret) {
2978 			atomic_dec(&root->orphan_inodes);
2979 			if (reserve) {
2980 				clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2981 					  &BTRFS_I(inode)->runtime_flags);
2982 				btrfs_orphan_release_metadata(inode);
2983 			}
2984 			if (ret != -EEXIST) {
2985 				clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2986 					  &BTRFS_I(inode)->runtime_flags);
2987 				btrfs_abort_transaction(trans, root, ret);
2988 				return ret;
2989 			}
2990 		}
2991 		ret = 0;
2992 	}
2993 
2994 	/* insert an orphan item to track subvolume contains orphan files */
2995 	if (insert >= 2) {
2996 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2997 					       root->root_key.objectid);
2998 		if (ret && ret != -EEXIST) {
2999 			btrfs_abort_transaction(trans, root, ret);
3000 			return ret;
3001 		}
3002 	}
3003 	return 0;
3004 }
3005 
3006 /*
3007  * We have done the truncate/delete so we can go ahead and remove the orphan
3008  * item for this particular inode.
3009  */
3010 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3011 			    struct inode *inode)
3012 {
3013 	struct btrfs_root *root = BTRFS_I(inode)->root;
3014 	int delete_item = 0;
3015 	int release_rsv = 0;
3016 	int ret = 0;
3017 
3018 	spin_lock(&root->orphan_lock);
3019 	if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3020 			       &BTRFS_I(inode)->runtime_flags))
3021 		delete_item = 1;
3022 
3023 	if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3024 			       &BTRFS_I(inode)->runtime_flags))
3025 		release_rsv = 1;
3026 	spin_unlock(&root->orphan_lock);
3027 
3028 	if (delete_item) {
3029 		atomic_dec(&root->orphan_inodes);
3030 		if (trans)
3031 			ret = btrfs_del_orphan_item(trans, root,
3032 						    btrfs_ino(inode));
3033 	}
3034 
3035 	if (release_rsv)
3036 		btrfs_orphan_release_metadata(inode);
3037 
3038 	return ret;
3039 }
3040 
3041 /*
3042  * this cleans up any orphans that may be left on the list from the last use
3043  * of this root.
3044  */
3045 int btrfs_orphan_cleanup(struct btrfs_root *root)
3046 {
3047 	struct btrfs_path *path;
3048 	struct extent_buffer *leaf;
3049 	struct btrfs_key key, found_key;
3050 	struct btrfs_trans_handle *trans;
3051 	struct inode *inode;
3052 	u64 last_objectid = 0;
3053 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
3054 
3055 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3056 		return 0;
3057 
3058 	path = btrfs_alloc_path();
3059 	if (!path) {
3060 		ret = -ENOMEM;
3061 		goto out;
3062 	}
3063 	path->reada = -1;
3064 
3065 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3066 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
3067 	key.offset = (u64)-1;
3068 
3069 	while (1) {
3070 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3071 		if (ret < 0)
3072 			goto out;
3073 
3074 		/*
3075 		 * if ret == 0 means we found what we were searching for, which
3076 		 * is weird, but possible, so only screw with path if we didn't
3077 		 * find the key and see if we have stuff that matches
3078 		 */
3079 		if (ret > 0) {
3080 			ret = 0;
3081 			if (path->slots[0] == 0)
3082 				break;
3083 			path->slots[0]--;
3084 		}
3085 
3086 		/* pull out the item */
3087 		leaf = path->nodes[0];
3088 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3089 
3090 		/* make sure the item matches what we want */
3091 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3092 			break;
3093 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
3094 			break;
3095 
3096 		/* release the path since we're done with it */
3097 		btrfs_release_path(path);
3098 
3099 		/*
3100 		 * this is where we are basically btrfs_lookup, without the
3101 		 * crossing root thing.  we store the inode number in the
3102 		 * offset of the orphan item.
3103 		 */
3104 
3105 		if (found_key.offset == last_objectid) {
3106 			btrfs_err(root->fs_info,
3107 				"Error removing orphan entry, stopping orphan cleanup");
3108 			ret = -EINVAL;
3109 			goto out;
3110 		}
3111 
3112 		last_objectid = found_key.offset;
3113 
3114 		found_key.objectid = found_key.offset;
3115 		found_key.type = BTRFS_INODE_ITEM_KEY;
3116 		found_key.offset = 0;
3117 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3118 		ret = PTR_ERR_OR_ZERO(inode);
3119 		if (ret && ret != -ESTALE)
3120 			goto out;
3121 
3122 		if (ret == -ESTALE && root == root->fs_info->tree_root) {
3123 			struct btrfs_root *dead_root;
3124 			struct btrfs_fs_info *fs_info = root->fs_info;
3125 			int is_dead_root = 0;
3126 
3127 			/*
3128 			 * this is an orphan in the tree root. Currently these
3129 			 * could come from 2 sources:
3130 			 *  a) a snapshot deletion in progress
3131 			 *  b) a free space cache inode
3132 			 * We need to distinguish those two, as the snapshot
3133 			 * orphan must not get deleted.
3134 			 * find_dead_roots already ran before us, so if this
3135 			 * is a snapshot deletion, we should find the root
3136 			 * in the dead_roots list
3137 			 */
3138 			spin_lock(&fs_info->trans_lock);
3139 			list_for_each_entry(dead_root, &fs_info->dead_roots,
3140 					    root_list) {
3141 				if (dead_root->root_key.objectid ==
3142 				    found_key.objectid) {
3143 					is_dead_root = 1;
3144 					break;
3145 				}
3146 			}
3147 			spin_unlock(&fs_info->trans_lock);
3148 			if (is_dead_root) {
3149 				/* prevent this orphan from being found again */
3150 				key.offset = found_key.objectid - 1;
3151 				continue;
3152 			}
3153 		}
3154 		/*
3155 		 * Inode is already gone but the orphan item is still there,
3156 		 * kill the orphan item.
3157 		 */
3158 		if (ret == -ESTALE) {
3159 			trans = btrfs_start_transaction(root, 1);
3160 			if (IS_ERR(trans)) {
3161 				ret = PTR_ERR(trans);
3162 				goto out;
3163 			}
3164 			btrfs_debug(root->fs_info, "auto deleting %Lu",
3165 				found_key.objectid);
3166 			ret = btrfs_del_orphan_item(trans, root,
3167 						    found_key.objectid);
3168 			btrfs_end_transaction(trans, root);
3169 			if (ret)
3170 				goto out;
3171 			continue;
3172 		}
3173 
3174 		/*
3175 		 * add this inode to the orphan list so btrfs_orphan_del does
3176 		 * the proper thing when we hit it
3177 		 */
3178 		set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3179 			&BTRFS_I(inode)->runtime_flags);
3180 		atomic_inc(&root->orphan_inodes);
3181 
3182 		/* if we have links, this was a truncate, lets do that */
3183 		if (inode->i_nlink) {
3184 			if (WARN_ON(!S_ISREG(inode->i_mode))) {
3185 				iput(inode);
3186 				continue;
3187 			}
3188 			nr_truncate++;
3189 
3190 			/* 1 for the orphan item deletion. */
3191 			trans = btrfs_start_transaction(root, 1);
3192 			if (IS_ERR(trans)) {
3193 				iput(inode);
3194 				ret = PTR_ERR(trans);
3195 				goto out;
3196 			}
3197 			ret = btrfs_orphan_add(trans, inode);
3198 			btrfs_end_transaction(trans, root);
3199 			if (ret) {
3200 				iput(inode);
3201 				goto out;
3202 			}
3203 
3204 			ret = btrfs_truncate(inode);
3205 			if (ret)
3206 				btrfs_orphan_del(NULL, inode);
3207 		} else {
3208 			nr_unlink++;
3209 		}
3210 
3211 		/* this will do delete_inode and everything for us */
3212 		iput(inode);
3213 		if (ret)
3214 			goto out;
3215 	}
3216 	/* release the path since we're done with it */
3217 	btrfs_release_path(path);
3218 
3219 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3220 
3221 	if (root->orphan_block_rsv)
3222 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
3223 					(u64)-1);
3224 
3225 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
3226 		trans = btrfs_join_transaction(root);
3227 		if (!IS_ERR(trans))
3228 			btrfs_end_transaction(trans, root);
3229 	}
3230 
3231 	if (nr_unlink)
3232 		btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3233 	if (nr_truncate)
3234 		btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3235 
3236 out:
3237 	if (ret)
3238 		btrfs_crit(root->fs_info,
3239 			"could not do orphan cleanup %d", ret);
3240 	btrfs_free_path(path);
3241 	return ret;
3242 }
3243 
3244 /*
3245  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3246  * don't find any xattrs, we know there can't be any acls.
3247  *
3248  * slot is the slot the inode is in, objectid is the objectid of the inode
3249  */
3250 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3251 					  int slot, u64 objectid)
3252 {
3253 	u32 nritems = btrfs_header_nritems(leaf);
3254 	struct btrfs_key found_key;
3255 	static u64 xattr_access = 0;
3256 	static u64 xattr_default = 0;
3257 	int scanned = 0;
3258 
3259 	if (!xattr_access) {
3260 		xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3261 					strlen(POSIX_ACL_XATTR_ACCESS));
3262 		xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3263 					strlen(POSIX_ACL_XATTR_DEFAULT));
3264 	}
3265 
3266 	slot++;
3267 	while (slot < nritems) {
3268 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3269 
3270 		/* we found a different objectid, there must not be acls */
3271 		if (found_key.objectid != objectid)
3272 			return 0;
3273 
3274 		/* we found an xattr, assume we've got an acl */
3275 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3276 			if (found_key.offset == xattr_access ||
3277 			    found_key.offset == xattr_default)
3278 				return 1;
3279 		}
3280 
3281 		/*
3282 		 * we found a key greater than an xattr key, there can't
3283 		 * be any acls later on
3284 		 */
3285 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3286 			return 0;
3287 
3288 		slot++;
3289 		scanned++;
3290 
3291 		/*
3292 		 * it goes inode, inode backrefs, xattrs, extents,
3293 		 * so if there are a ton of hard links to an inode there can
3294 		 * be a lot of backrefs.  Don't waste time searching too hard,
3295 		 * this is just an optimization
3296 		 */
3297 		if (scanned >= 8)
3298 			break;
3299 	}
3300 	/* we hit the end of the leaf before we found an xattr or
3301 	 * something larger than an xattr.  We have to assume the inode
3302 	 * has acls
3303 	 */
3304 	return 1;
3305 }
3306 
3307 /*
3308  * read an inode from the btree into the in-memory inode
3309  */
3310 static void btrfs_read_locked_inode(struct inode *inode)
3311 {
3312 	struct btrfs_path *path;
3313 	struct extent_buffer *leaf;
3314 	struct btrfs_inode_item *inode_item;
3315 	struct btrfs_timespec *tspec;
3316 	struct btrfs_root *root = BTRFS_I(inode)->root;
3317 	struct btrfs_key location;
3318 	int maybe_acls;
3319 	u32 rdev;
3320 	int ret;
3321 	bool filled = false;
3322 
3323 	ret = btrfs_fill_inode(inode, &rdev);
3324 	if (!ret)
3325 		filled = true;
3326 
3327 	path = btrfs_alloc_path();
3328 	if (!path)
3329 		goto make_bad;
3330 
3331 	path->leave_spinning = 1;
3332 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3333 
3334 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3335 	if (ret)
3336 		goto make_bad;
3337 
3338 	leaf = path->nodes[0];
3339 
3340 	if (filled)
3341 		goto cache_acl;
3342 
3343 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3344 				    struct btrfs_inode_item);
3345 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3346 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3347 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3348 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3349 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3350 
3351 	tspec = btrfs_inode_atime(inode_item);
3352 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3353 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3354 
3355 	tspec = btrfs_inode_mtime(inode_item);
3356 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3357 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3358 
3359 	tspec = btrfs_inode_ctime(inode_item);
3360 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3361 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3362 
3363 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3364 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3365 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3366 
3367 	/*
3368 	 * If we were modified in the current generation and evicted from memory
3369 	 * and then re-read we need to do a full sync since we don't have any
3370 	 * idea about which extents were modified before we were evicted from
3371 	 * cache.
3372 	 */
3373 	if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3374 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3375 			&BTRFS_I(inode)->runtime_flags);
3376 
3377 	inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3378 	inode->i_generation = BTRFS_I(inode)->generation;
3379 	inode->i_rdev = 0;
3380 	rdev = btrfs_inode_rdev(leaf, inode_item);
3381 
3382 	BTRFS_I(inode)->index_cnt = (u64)-1;
3383 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3384 cache_acl:
3385 	/*
3386 	 * try to precache a NULL acl entry for files that don't have
3387 	 * any xattrs or acls
3388 	 */
3389 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3390 					   btrfs_ino(inode));
3391 	if (!maybe_acls)
3392 		cache_no_acl(inode);
3393 
3394 	btrfs_free_path(path);
3395 
3396 	switch (inode->i_mode & S_IFMT) {
3397 	case S_IFREG:
3398 		inode->i_mapping->a_ops = &btrfs_aops;
3399 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3400 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3401 		inode->i_fop = &btrfs_file_operations;
3402 		inode->i_op = &btrfs_file_inode_operations;
3403 		break;
3404 	case S_IFDIR:
3405 		inode->i_fop = &btrfs_dir_file_operations;
3406 		if (root == root->fs_info->tree_root)
3407 			inode->i_op = &btrfs_dir_ro_inode_operations;
3408 		else
3409 			inode->i_op = &btrfs_dir_inode_operations;
3410 		break;
3411 	case S_IFLNK:
3412 		inode->i_op = &btrfs_symlink_inode_operations;
3413 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
3414 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3415 		break;
3416 	default:
3417 		inode->i_op = &btrfs_special_inode_operations;
3418 		init_special_inode(inode, inode->i_mode, rdev);
3419 		break;
3420 	}
3421 
3422 	btrfs_update_iflags(inode);
3423 	return;
3424 
3425 make_bad:
3426 	btrfs_free_path(path);
3427 	make_bad_inode(inode);
3428 }
3429 
3430 /*
3431  * given a leaf and an inode, copy the inode fields into the leaf
3432  */
3433 static void fill_inode_item(struct btrfs_trans_handle *trans,
3434 			    struct extent_buffer *leaf,
3435 			    struct btrfs_inode_item *item,
3436 			    struct inode *inode)
3437 {
3438 	struct btrfs_map_token token;
3439 
3440 	btrfs_init_map_token(&token);
3441 
3442 	btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3443 	btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3444 	btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3445 				   &token);
3446 	btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3447 	btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3448 
3449 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3450 				     inode->i_atime.tv_sec, &token);
3451 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3452 				      inode->i_atime.tv_nsec, &token);
3453 
3454 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3455 				     inode->i_mtime.tv_sec, &token);
3456 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3457 				      inode->i_mtime.tv_nsec, &token);
3458 
3459 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3460 				     inode->i_ctime.tv_sec, &token);
3461 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3462 				      inode->i_ctime.tv_nsec, &token);
3463 
3464 	btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3465 				     &token);
3466 	btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3467 					 &token);
3468 	btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3469 	btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3470 	btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3471 	btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3472 	btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3473 }
3474 
3475 /*
3476  * copy everything in the in-memory inode into the btree.
3477  */
3478 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3479 				struct btrfs_root *root, struct inode *inode)
3480 {
3481 	struct btrfs_inode_item *inode_item;
3482 	struct btrfs_path *path;
3483 	struct extent_buffer *leaf;
3484 	int ret;
3485 
3486 	path = btrfs_alloc_path();
3487 	if (!path)
3488 		return -ENOMEM;
3489 
3490 	path->leave_spinning = 1;
3491 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3492 				 1);
3493 	if (ret) {
3494 		if (ret > 0)
3495 			ret = -ENOENT;
3496 		goto failed;
3497 	}
3498 
3499 	btrfs_unlock_up_safe(path, 1);
3500 	leaf = path->nodes[0];
3501 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3502 				    struct btrfs_inode_item);
3503 
3504 	fill_inode_item(trans, leaf, inode_item, inode);
3505 	btrfs_mark_buffer_dirty(leaf);
3506 	btrfs_set_inode_last_trans(trans, inode);
3507 	ret = 0;
3508 failed:
3509 	btrfs_free_path(path);
3510 	return ret;
3511 }
3512 
3513 /*
3514  * copy everything in the in-memory inode into the btree.
3515  */
3516 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3517 				struct btrfs_root *root, struct inode *inode)
3518 {
3519 	int ret;
3520 
3521 	/*
3522 	 * If the inode is a free space inode, we can deadlock during commit
3523 	 * if we put it into the delayed code.
3524 	 *
3525 	 * The data relocation inode should also be directly updated
3526 	 * without delay
3527 	 */
3528 	if (!btrfs_is_free_space_inode(inode)
3529 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
3530 		btrfs_update_root_times(trans, root);
3531 
3532 		ret = btrfs_delayed_update_inode(trans, root, inode);
3533 		if (!ret)
3534 			btrfs_set_inode_last_trans(trans, inode);
3535 		return ret;
3536 	}
3537 
3538 	return btrfs_update_inode_item(trans, root, inode);
3539 }
3540 
3541 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3542 					 struct btrfs_root *root,
3543 					 struct inode *inode)
3544 {
3545 	int ret;
3546 
3547 	ret = btrfs_update_inode(trans, root, inode);
3548 	if (ret == -ENOSPC)
3549 		return btrfs_update_inode_item(trans, root, inode);
3550 	return ret;
3551 }
3552 
3553 /*
3554  * unlink helper that gets used here in inode.c and in the tree logging
3555  * recovery code.  It remove a link in a directory with a given name, and
3556  * also drops the back refs in the inode to the directory
3557  */
3558 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3559 				struct btrfs_root *root,
3560 				struct inode *dir, struct inode *inode,
3561 				const char *name, int name_len)
3562 {
3563 	struct btrfs_path *path;
3564 	int ret = 0;
3565 	struct extent_buffer *leaf;
3566 	struct btrfs_dir_item *di;
3567 	struct btrfs_key key;
3568 	u64 index;
3569 	u64 ino = btrfs_ino(inode);
3570 	u64 dir_ino = btrfs_ino(dir);
3571 
3572 	path = btrfs_alloc_path();
3573 	if (!path) {
3574 		ret = -ENOMEM;
3575 		goto out;
3576 	}
3577 
3578 	path->leave_spinning = 1;
3579 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3580 				    name, name_len, -1);
3581 	if (IS_ERR(di)) {
3582 		ret = PTR_ERR(di);
3583 		goto err;
3584 	}
3585 	if (!di) {
3586 		ret = -ENOENT;
3587 		goto err;
3588 	}
3589 	leaf = path->nodes[0];
3590 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3591 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3592 	if (ret)
3593 		goto err;
3594 	btrfs_release_path(path);
3595 
3596 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3597 				  dir_ino, &index);
3598 	if (ret) {
3599 		btrfs_info(root->fs_info,
3600 			"failed to delete reference to %.*s, inode %llu parent %llu",
3601 			name_len, name, ino, dir_ino);
3602 		btrfs_abort_transaction(trans, root, ret);
3603 		goto err;
3604 	}
3605 
3606 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3607 	if (ret) {
3608 		btrfs_abort_transaction(trans, root, ret);
3609 		goto err;
3610 	}
3611 
3612 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
3613 					 inode, dir_ino);
3614 	if (ret != 0 && ret != -ENOENT) {
3615 		btrfs_abort_transaction(trans, root, ret);
3616 		goto err;
3617 	}
3618 
3619 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
3620 					   dir, index);
3621 	if (ret == -ENOENT)
3622 		ret = 0;
3623 	else if (ret)
3624 		btrfs_abort_transaction(trans, root, ret);
3625 err:
3626 	btrfs_free_path(path);
3627 	if (ret)
3628 		goto out;
3629 
3630 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3631 	inode_inc_iversion(inode);
3632 	inode_inc_iversion(dir);
3633 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3634 	ret = btrfs_update_inode(trans, root, dir);
3635 out:
3636 	return ret;
3637 }
3638 
3639 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3640 		       struct btrfs_root *root,
3641 		       struct inode *dir, struct inode *inode,
3642 		       const char *name, int name_len)
3643 {
3644 	int ret;
3645 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3646 	if (!ret) {
3647 		drop_nlink(inode);
3648 		ret = btrfs_update_inode(trans, root, inode);
3649 	}
3650 	return ret;
3651 }
3652 
3653 /*
3654  * helper to start transaction for unlink and rmdir.
3655  *
3656  * unlink and rmdir are special in btrfs, they do not always free space, so
3657  * if we cannot make our reservations the normal way try and see if there is
3658  * plenty of slack room in the global reserve to migrate, otherwise we cannot
3659  * allow the unlink to occur.
3660  */
3661 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
3662 {
3663 	struct btrfs_trans_handle *trans;
3664 	struct btrfs_root *root = BTRFS_I(dir)->root;
3665 	int ret;
3666 
3667 	/*
3668 	 * 1 for the possible orphan item
3669 	 * 1 for the dir item
3670 	 * 1 for the dir index
3671 	 * 1 for the inode ref
3672 	 * 1 for the inode
3673 	 */
3674 	trans = btrfs_start_transaction(root, 5);
3675 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
3676 		return trans;
3677 
3678 	if (PTR_ERR(trans) == -ENOSPC) {
3679 		u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
3680 
3681 		trans = btrfs_start_transaction(root, 0);
3682 		if (IS_ERR(trans))
3683 			return trans;
3684 		ret = btrfs_cond_migrate_bytes(root->fs_info,
3685 					       &root->fs_info->trans_block_rsv,
3686 					       num_bytes, 5);
3687 		if (ret) {
3688 			btrfs_end_transaction(trans, root);
3689 			return ERR_PTR(ret);
3690 		}
3691 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3692 		trans->bytes_reserved = num_bytes;
3693 	}
3694 	return trans;
3695 }
3696 
3697 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3698 {
3699 	struct btrfs_root *root = BTRFS_I(dir)->root;
3700 	struct btrfs_trans_handle *trans;
3701 	struct inode *inode = dentry->d_inode;
3702 	int ret;
3703 
3704 	trans = __unlink_start_trans(dir);
3705 	if (IS_ERR(trans))
3706 		return PTR_ERR(trans);
3707 
3708 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3709 
3710 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3711 				 dentry->d_name.name, dentry->d_name.len);
3712 	if (ret)
3713 		goto out;
3714 
3715 	if (inode->i_nlink == 0) {
3716 		ret = btrfs_orphan_add(trans, inode);
3717 		if (ret)
3718 			goto out;
3719 	}
3720 
3721 out:
3722 	btrfs_end_transaction(trans, root);
3723 	btrfs_btree_balance_dirty(root);
3724 	return ret;
3725 }
3726 
3727 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3728 			struct btrfs_root *root,
3729 			struct inode *dir, u64 objectid,
3730 			const char *name, int name_len)
3731 {
3732 	struct btrfs_path *path;
3733 	struct extent_buffer *leaf;
3734 	struct btrfs_dir_item *di;
3735 	struct btrfs_key key;
3736 	u64 index;
3737 	int ret;
3738 	u64 dir_ino = btrfs_ino(dir);
3739 
3740 	path = btrfs_alloc_path();
3741 	if (!path)
3742 		return -ENOMEM;
3743 
3744 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3745 				   name, name_len, -1);
3746 	if (IS_ERR_OR_NULL(di)) {
3747 		if (!di)
3748 			ret = -ENOENT;
3749 		else
3750 			ret = PTR_ERR(di);
3751 		goto out;
3752 	}
3753 
3754 	leaf = path->nodes[0];
3755 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3756 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3757 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3758 	if (ret) {
3759 		btrfs_abort_transaction(trans, root, ret);
3760 		goto out;
3761 	}
3762 	btrfs_release_path(path);
3763 
3764 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3765 				 objectid, root->root_key.objectid,
3766 				 dir_ino, &index, name, name_len);
3767 	if (ret < 0) {
3768 		if (ret != -ENOENT) {
3769 			btrfs_abort_transaction(trans, root, ret);
3770 			goto out;
3771 		}
3772 		di = btrfs_search_dir_index_item(root, path, dir_ino,
3773 						 name, name_len);
3774 		if (IS_ERR_OR_NULL(di)) {
3775 			if (!di)
3776 				ret = -ENOENT;
3777 			else
3778 				ret = PTR_ERR(di);
3779 			btrfs_abort_transaction(trans, root, ret);
3780 			goto out;
3781 		}
3782 
3783 		leaf = path->nodes[0];
3784 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3785 		btrfs_release_path(path);
3786 		index = key.offset;
3787 	}
3788 	btrfs_release_path(path);
3789 
3790 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3791 	if (ret) {
3792 		btrfs_abort_transaction(trans, root, ret);
3793 		goto out;
3794 	}
3795 
3796 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3797 	inode_inc_iversion(dir);
3798 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3799 	ret = btrfs_update_inode_fallback(trans, root, dir);
3800 	if (ret)
3801 		btrfs_abort_transaction(trans, root, ret);
3802 out:
3803 	btrfs_free_path(path);
3804 	return ret;
3805 }
3806 
3807 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3808 {
3809 	struct inode *inode = dentry->d_inode;
3810 	int err = 0;
3811 	struct btrfs_root *root = BTRFS_I(dir)->root;
3812 	struct btrfs_trans_handle *trans;
3813 
3814 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
3815 		return -ENOTEMPTY;
3816 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3817 		return -EPERM;
3818 
3819 	trans = __unlink_start_trans(dir);
3820 	if (IS_ERR(trans))
3821 		return PTR_ERR(trans);
3822 
3823 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3824 		err = btrfs_unlink_subvol(trans, root, dir,
3825 					  BTRFS_I(inode)->location.objectid,
3826 					  dentry->d_name.name,
3827 					  dentry->d_name.len);
3828 		goto out;
3829 	}
3830 
3831 	err = btrfs_orphan_add(trans, inode);
3832 	if (err)
3833 		goto out;
3834 
3835 	/* now the directory is empty */
3836 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3837 				 dentry->d_name.name, dentry->d_name.len);
3838 	if (!err)
3839 		btrfs_i_size_write(inode, 0);
3840 out:
3841 	btrfs_end_transaction(trans, root);
3842 	btrfs_btree_balance_dirty(root);
3843 
3844 	return err;
3845 }
3846 
3847 /*
3848  * this can truncate away extent items, csum items and directory items.
3849  * It starts at a high offset and removes keys until it can't find
3850  * any higher than new_size
3851  *
3852  * csum items that cross the new i_size are truncated to the new size
3853  * as well.
3854  *
3855  * min_type is the minimum key type to truncate down to.  If set to 0, this
3856  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3857  */
3858 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3859 			       struct btrfs_root *root,
3860 			       struct inode *inode,
3861 			       u64 new_size, u32 min_type)
3862 {
3863 	struct btrfs_path *path;
3864 	struct extent_buffer *leaf;
3865 	struct btrfs_file_extent_item *fi;
3866 	struct btrfs_key key;
3867 	struct btrfs_key found_key;
3868 	u64 extent_start = 0;
3869 	u64 extent_num_bytes = 0;
3870 	u64 extent_offset = 0;
3871 	u64 item_end = 0;
3872 	u64 last_size = (u64)-1;
3873 	u32 found_type = (u8)-1;
3874 	int found_extent;
3875 	int del_item;
3876 	int pending_del_nr = 0;
3877 	int pending_del_slot = 0;
3878 	int extent_type = -1;
3879 	int ret;
3880 	int err = 0;
3881 	u64 ino = btrfs_ino(inode);
3882 
3883 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3884 
3885 	path = btrfs_alloc_path();
3886 	if (!path)
3887 		return -ENOMEM;
3888 	path->reada = -1;
3889 
3890 	/*
3891 	 * We want to drop from the next block forward in case this new size is
3892 	 * not block aligned since we will be keeping the last block of the
3893 	 * extent just the way it is.
3894 	 */
3895 	if (root->ref_cows || root == root->fs_info->tree_root)
3896 		btrfs_drop_extent_cache(inode, ALIGN(new_size,
3897 					root->sectorsize), (u64)-1, 0);
3898 
3899 	/*
3900 	 * This function is also used to drop the items in the log tree before
3901 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3902 	 * it is used to drop the loged items. So we shouldn't kill the delayed
3903 	 * items.
3904 	 */
3905 	if (min_type == 0 && root == BTRFS_I(inode)->root)
3906 		btrfs_kill_delayed_inode_items(inode);
3907 
3908 	key.objectid = ino;
3909 	key.offset = (u64)-1;
3910 	key.type = (u8)-1;
3911 
3912 search_again:
3913 	path->leave_spinning = 1;
3914 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3915 	if (ret < 0) {
3916 		err = ret;
3917 		goto out;
3918 	}
3919 
3920 	if (ret > 0) {
3921 		/* there are no items in the tree for us to truncate, we're
3922 		 * done
3923 		 */
3924 		if (path->slots[0] == 0)
3925 			goto out;
3926 		path->slots[0]--;
3927 	}
3928 
3929 	while (1) {
3930 		fi = NULL;
3931 		leaf = path->nodes[0];
3932 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3933 		found_type = btrfs_key_type(&found_key);
3934 
3935 		if (found_key.objectid != ino)
3936 			break;
3937 
3938 		if (found_type < min_type)
3939 			break;
3940 
3941 		item_end = found_key.offset;
3942 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3943 			fi = btrfs_item_ptr(leaf, path->slots[0],
3944 					    struct btrfs_file_extent_item);
3945 			extent_type = btrfs_file_extent_type(leaf, fi);
3946 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3947 				item_end +=
3948 				    btrfs_file_extent_num_bytes(leaf, fi);
3949 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3950 				item_end += btrfs_file_extent_inline_len(leaf,
3951 									 fi);
3952 			}
3953 			item_end--;
3954 		}
3955 		if (found_type > min_type) {
3956 			del_item = 1;
3957 		} else {
3958 			if (item_end < new_size)
3959 				break;
3960 			if (found_key.offset >= new_size)
3961 				del_item = 1;
3962 			else
3963 				del_item = 0;
3964 		}
3965 		found_extent = 0;
3966 		/* FIXME, shrink the extent if the ref count is only 1 */
3967 		if (found_type != BTRFS_EXTENT_DATA_KEY)
3968 			goto delete;
3969 
3970 		if (del_item)
3971 			last_size = found_key.offset;
3972 		else
3973 			last_size = new_size;
3974 
3975 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3976 			u64 num_dec;
3977 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3978 			if (!del_item) {
3979 				u64 orig_num_bytes =
3980 					btrfs_file_extent_num_bytes(leaf, fi);
3981 				extent_num_bytes = ALIGN(new_size -
3982 						found_key.offset,
3983 						root->sectorsize);
3984 				btrfs_set_file_extent_num_bytes(leaf, fi,
3985 							 extent_num_bytes);
3986 				num_dec = (orig_num_bytes -
3987 					   extent_num_bytes);
3988 				if (root->ref_cows && extent_start != 0)
3989 					inode_sub_bytes(inode, num_dec);
3990 				btrfs_mark_buffer_dirty(leaf);
3991 			} else {
3992 				extent_num_bytes =
3993 					btrfs_file_extent_disk_num_bytes(leaf,
3994 									 fi);
3995 				extent_offset = found_key.offset -
3996 					btrfs_file_extent_offset(leaf, fi);
3997 
3998 				/* FIXME blocksize != 4096 */
3999 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4000 				if (extent_start != 0) {
4001 					found_extent = 1;
4002 					if (root->ref_cows)
4003 						inode_sub_bytes(inode, num_dec);
4004 				}
4005 			}
4006 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4007 			/*
4008 			 * we can't truncate inline items that have had
4009 			 * special encodings
4010 			 */
4011 			if (!del_item &&
4012 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
4013 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
4014 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4015 				u32 size = new_size - found_key.offset;
4016 
4017 				if (root->ref_cows) {
4018 					inode_sub_bytes(inode, item_end + 1 -
4019 							new_size);
4020 				}
4021 				size =
4022 				    btrfs_file_extent_calc_inline_size(size);
4023 				btrfs_truncate_item(root, path, size, 1);
4024 			} else if (root->ref_cows) {
4025 				inode_sub_bytes(inode, item_end + 1 -
4026 						found_key.offset);
4027 			}
4028 		}
4029 delete:
4030 		if (del_item) {
4031 			if (!pending_del_nr) {
4032 				/* no pending yet, add ourselves */
4033 				pending_del_slot = path->slots[0];
4034 				pending_del_nr = 1;
4035 			} else if (pending_del_nr &&
4036 				   path->slots[0] + 1 == pending_del_slot) {
4037 				/* hop on the pending chunk */
4038 				pending_del_nr++;
4039 				pending_del_slot = path->slots[0];
4040 			} else {
4041 				BUG();
4042 			}
4043 		} else {
4044 			break;
4045 		}
4046 		if (found_extent && (root->ref_cows ||
4047 				     root == root->fs_info->tree_root)) {
4048 			btrfs_set_path_blocking(path);
4049 			ret = btrfs_free_extent(trans, root, extent_start,
4050 						extent_num_bytes, 0,
4051 						btrfs_header_owner(leaf),
4052 						ino, extent_offset, 0);
4053 			BUG_ON(ret);
4054 		}
4055 
4056 		if (found_type == BTRFS_INODE_ITEM_KEY)
4057 			break;
4058 
4059 		if (path->slots[0] == 0 ||
4060 		    path->slots[0] != pending_del_slot) {
4061 			if (pending_del_nr) {
4062 				ret = btrfs_del_items(trans, root, path,
4063 						pending_del_slot,
4064 						pending_del_nr);
4065 				if (ret) {
4066 					btrfs_abort_transaction(trans,
4067 								root, ret);
4068 					goto error;
4069 				}
4070 				pending_del_nr = 0;
4071 			}
4072 			btrfs_release_path(path);
4073 			goto search_again;
4074 		} else {
4075 			path->slots[0]--;
4076 		}
4077 	}
4078 out:
4079 	if (pending_del_nr) {
4080 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
4081 				      pending_del_nr);
4082 		if (ret)
4083 			btrfs_abort_transaction(trans, root, ret);
4084 	}
4085 error:
4086 	if (last_size != (u64)-1)
4087 		btrfs_ordered_update_i_size(inode, last_size, NULL);
4088 	btrfs_free_path(path);
4089 	return err;
4090 }
4091 
4092 /*
4093  * btrfs_truncate_page - read, zero a chunk and write a page
4094  * @inode - inode that we're zeroing
4095  * @from - the offset to start zeroing
4096  * @len - the length to zero, 0 to zero the entire range respective to the
4097  *	offset
4098  * @front - zero up to the offset instead of from the offset on
4099  *
4100  * This will find the page for the "from" offset and cow the page and zero the
4101  * part we want to zero.  This is used with truncate and hole punching.
4102  */
4103 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4104 			int front)
4105 {
4106 	struct address_space *mapping = inode->i_mapping;
4107 	struct btrfs_root *root = BTRFS_I(inode)->root;
4108 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4109 	struct btrfs_ordered_extent *ordered;
4110 	struct extent_state *cached_state = NULL;
4111 	char *kaddr;
4112 	u32 blocksize = root->sectorsize;
4113 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
4114 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
4115 	struct page *page;
4116 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4117 	int ret = 0;
4118 	u64 page_start;
4119 	u64 page_end;
4120 
4121 	if ((offset & (blocksize - 1)) == 0 &&
4122 	    (!len || ((len & (blocksize - 1)) == 0)))
4123 		goto out;
4124 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
4125 	if (ret)
4126 		goto out;
4127 
4128 again:
4129 	page = find_or_create_page(mapping, index, mask);
4130 	if (!page) {
4131 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4132 		ret = -ENOMEM;
4133 		goto out;
4134 	}
4135 
4136 	page_start = page_offset(page);
4137 	page_end = page_start + PAGE_CACHE_SIZE - 1;
4138 
4139 	if (!PageUptodate(page)) {
4140 		ret = btrfs_readpage(NULL, page);
4141 		lock_page(page);
4142 		if (page->mapping != mapping) {
4143 			unlock_page(page);
4144 			page_cache_release(page);
4145 			goto again;
4146 		}
4147 		if (!PageUptodate(page)) {
4148 			ret = -EIO;
4149 			goto out_unlock;
4150 		}
4151 	}
4152 	wait_on_page_writeback(page);
4153 
4154 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4155 	set_page_extent_mapped(page);
4156 
4157 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
4158 	if (ordered) {
4159 		unlock_extent_cached(io_tree, page_start, page_end,
4160 				     &cached_state, GFP_NOFS);
4161 		unlock_page(page);
4162 		page_cache_release(page);
4163 		btrfs_start_ordered_extent(inode, ordered, 1);
4164 		btrfs_put_ordered_extent(ordered);
4165 		goto again;
4166 	}
4167 
4168 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4169 			  EXTENT_DIRTY | EXTENT_DELALLOC |
4170 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4171 			  0, 0, &cached_state, GFP_NOFS);
4172 
4173 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4174 					&cached_state);
4175 	if (ret) {
4176 		unlock_extent_cached(io_tree, page_start, page_end,
4177 				     &cached_state, GFP_NOFS);
4178 		goto out_unlock;
4179 	}
4180 
4181 	if (offset != PAGE_CACHE_SIZE) {
4182 		if (!len)
4183 			len = PAGE_CACHE_SIZE - offset;
4184 		kaddr = kmap(page);
4185 		if (front)
4186 			memset(kaddr, 0, offset);
4187 		else
4188 			memset(kaddr + offset, 0, len);
4189 		flush_dcache_page(page);
4190 		kunmap(page);
4191 	}
4192 	ClearPageChecked(page);
4193 	set_page_dirty(page);
4194 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4195 			     GFP_NOFS);
4196 
4197 out_unlock:
4198 	if (ret)
4199 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4200 	unlock_page(page);
4201 	page_cache_release(page);
4202 out:
4203 	return ret;
4204 }
4205 
4206 /*
4207  * This function puts in dummy file extents for the area we're creating a hole
4208  * for.  So if we are truncating this file to a larger size we need to insert
4209  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4210  * the range between oldsize and size
4211  */
4212 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4213 {
4214 	struct btrfs_trans_handle *trans;
4215 	struct btrfs_root *root = BTRFS_I(inode)->root;
4216 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4217 	struct extent_map *em = NULL;
4218 	struct extent_state *cached_state = NULL;
4219 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4220 	u64 hole_start = ALIGN(oldsize, root->sectorsize);
4221 	u64 block_end = ALIGN(size, root->sectorsize);
4222 	u64 last_byte;
4223 	u64 cur_offset;
4224 	u64 hole_size;
4225 	int err = 0;
4226 
4227 	/*
4228 	 * If our size started in the middle of a page we need to zero out the
4229 	 * rest of the page before we expand the i_size, otherwise we could
4230 	 * expose stale data.
4231 	 */
4232 	err = btrfs_truncate_page(inode, oldsize, 0, 0);
4233 	if (err)
4234 		return err;
4235 
4236 	if (size <= hole_start)
4237 		return 0;
4238 
4239 	while (1) {
4240 		struct btrfs_ordered_extent *ordered;
4241 
4242 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4243 				 &cached_state);
4244 		ordered = btrfs_lookup_ordered_range(inode, hole_start,
4245 						     block_end - hole_start);
4246 		if (!ordered)
4247 			break;
4248 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
4249 				     &cached_state, GFP_NOFS);
4250 		btrfs_start_ordered_extent(inode, ordered, 1);
4251 		btrfs_put_ordered_extent(ordered);
4252 	}
4253 
4254 	cur_offset = hole_start;
4255 	while (1) {
4256 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4257 				block_end - cur_offset, 0);
4258 		if (IS_ERR(em)) {
4259 			err = PTR_ERR(em);
4260 			em = NULL;
4261 			break;
4262 		}
4263 		last_byte = min(extent_map_end(em), block_end);
4264 		last_byte = ALIGN(last_byte , root->sectorsize);
4265 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4266 			struct extent_map *hole_em;
4267 			hole_size = last_byte - cur_offset;
4268 
4269 			trans = btrfs_start_transaction(root, 3);
4270 			if (IS_ERR(trans)) {
4271 				err = PTR_ERR(trans);
4272 				break;
4273 			}
4274 
4275 			err = btrfs_drop_extents(trans, root, inode,
4276 						 cur_offset,
4277 						 cur_offset + hole_size, 1);
4278 			if (err) {
4279 				btrfs_abort_transaction(trans, root, err);
4280 				btrfs_end_transaction(trans, root);
4281 				break;
4282 			}
4283 
4284 			err = btrfs_insert_file_extent(trans, root,
4285 					btrfs_ino(inode), cur_offset, 0,
4286 					0, hole_size, 0, hole_size,
4287 					0, 0, 0);
4288 			if (err) {
4289 				btrfs_abort_transaction(trans, root, err);
4290 				btrfs_end_transaction(trans, root);
4291 				break;
4292 			}
4293 
4294 			btrfs_drop_extent_cache(inode, cur_offset,
4295 						cur_offset + hole_size - 1, 0);
4296 			hole_em = alloc_extent_map();
4297 			if (!hole_em) {
4298 				set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4299 					&BTRFS_I(inode)->runtime_flags);
4300 				goto next;
4301 			}
4302 			hole_em->start = cur_offset;
4303 			hole_em->len = hole_size;
4304 			hole_em->orig_start = cur_offset;
4305 
4306 			hole_em->block_start = EXTENT_MAP_HOLE;
4307 			hole_em->block_len = 0;
4308 			hole_em->orig_block_len = 0;
4309 			hole_em->ram_bytes = hole_size;
4310 			hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4311 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
4312 			hole_em->generation = trans->transid;
4313 
4314 			while (1) {
4315 				write_lock(&em_tree->lock);
4316 				err = add_extent_mapping(em_tree, hole_em, 1);
4317 				write_unlock(&em_tree->lock);
4318 				if (err != -EEXIST)
4319 					break;
4320 				btrfs_drop_extent_cache(inode, cur_offset,
4321 							cur_offset +
4322 							hole_size - 1, 0);
4323 			}
4324 			free_extent_map(hole_em);
4325 next:
4326 			btrfs_update_inode(trans, root, inode);
4327 			btrfs_end_transaction(trans, root);
4328 		}
4329 		free_extent_map(em);
4330 		em = NULL;
4331 		cur_offset = last_byte;
4332 		if (cur_offset >= block_end)
4333 			break;
4334 	}
4335 
4336 	free_extent_map(em);
4337 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4338 			     GFP_NOFS);
4339 	return err;
4340 }
4341 
4342 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4343 {
4344 	struct btrfs_root *root = BTRFS_I(inode)->root;
4345 	struct btrfs_trans_handle *trans;
4346 	loff_t oldsize = i_size_read(inode);
4347 	loff_t newsize = attr->ia_size;
4348 	int mask = attr->ia_valid;
4349 	int ret;
4350 
4351 	/*
4352 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4353 	 * special case where we need to update the times despite not having
4354 	 * these flags set.  For all other operations the VFS set these flags
4355 	 * explicitly if it wants a timestamp update.
4356 	 */
4357 	if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
4358 		inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
4359 
4360 	if (newsize > oldsize) {
4361 		truncate_pagecache(inode, newsize);
4362 		ret = btrfs_cont_expand(inode, oldsize, newsize);
4363 		if (ret)
4364 			return ret;
4365 
4366 		trans = btrfs_start_transaction(root, 1);
4367 		if (IS_ERR(trans))
4368 			return PTR_ERR(trans);
4369 
4370 		i_size_write(inode, newsize);
4371 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4372 		ret = btrfs_update_inode(trans, root, inode);
4373 		btrfs_end_transaction(trans, root);
4374 	} else {
4375 
4376 		/*
4377 		 * We're truncating a file that used to have good data down to
4378 		 * zero. Make sure it gets into the ordered flush list so that
4379 		 * any new writes get down to disk quickly.
4380 		 */
4381 		if (newsize == 0)
4382 			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4383 				&BTRFS_I(inode)->runtime_flags);
4384 
4385 		/*
4386 		 * 1 for the orphan item we're going to add
4387 		 * 1 for the orphan item deletion.
4388 		 */
4389 		trans = btrfs_start_transaction(root, 2);
4390 		if (IS_ERR(trans))
4391 			return PTR_ERR(trans);
4392 
4393 		/*
4394 		 * We need to do this in case we fail at _any_ point during the
4395 		 * actual truncate.  Once we do the truncate_setsize we could
4396 		 * invalidate pages which forces any outstanding ordered io to
4397 		 * be instantly completed which will give us extents that need
4398 		 * to be truncated.  If we fail to get an orphan inode down we
4399 		 * could have left over extents that were never meant to live,
4400 		 * so we need to garuntee from this point on that everything
4401 		 * will be consistent.
4402 		 */
4403 		ret = btrfs_orphan_add(trans, inode);
4404 		btrfs_end_transaction(trans, root);
4405 		if (ret)
4406 			return ret;
4407 
4408 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
4409 		truncate_setsize(inode, newsize);
4410 
4411 		/* Disable nonlocked read DIO to avoid the end less truncate */
4412 		btrfs_inode_block_unlocked_dio(inode);
4413 		inode_dio_wait(inode);
4414 		btrfs_inode_resume_unlocked_dio(inode);
4415 
4416 		ret = btrfs_truncate(inode);
4417 		if (ret && inode->i_nlink) {
4418 			int err;
4419 
4420 			/*
4421 			 * failed to truncate, disk_i_size is only adjusted down
4422 			 * as we remove extents, so it should represent the true
4423 			 * size of the inode, so reset the in memory size and
4424 			 * delete our orphan entry.
4425 			 */
4426 			trans = btrfs_join_transaction(root);
4427 			if (IS_ERR(trans)) {
4428 				btrfs_orphan_del(NULL, inode);
4429 				return ret;
4430 			}
4431 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
4432 			err = btrfs_orphan_del(trans, inode);
4433 			if (err)
4434 				btrfs_abort_transaction(trans, root, err);
4435 			btrfs_end_transaction(trans, root);
4436 		}
4437 	}
4438 
4439 	return ret;
4440 }
4441 
4442 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
4443 {
4444 	struct inode *inode = dentry->d_inode;
4445 	struct btrfs_root *root = BTRFS_I(inode)->root;
4446 	int err;
4447 
4448 	if (btrfs_root_readonly(root))
4449 		return -EROFS;
4450 
4451 	err = inode_change_ok(inode, attr);
4452 	if (err)
4453 		return err;
4454 
4455 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
4456 		err = btrfs_setsize(inode, attr);
4457 		if (err)
4458 			return err;
4459 	}
4460 
4461 	if (attr->ia_valid) {
4462 		setattr_copy(inode, attr);
4463 		inode_inc_iversion(inode);
4464 		err = btrfs_dirty_inode(inode);
4465 
4466 		if (!err && attr->ia_valid & ATTR_MODE)
4467 			err = btrfs_acl_chmod(inode);
4468 	}
4469 
4470 	return err;
4471 }
4472 
4473 void btrfs_evict_inode(struct inode *inode)
4474 {
4475 	struct btrfs_trans_handle *trans;
4476 	struct btrfs_root *root = BTRFS_I(inode)->root;
4477 	struct btrfs_block_rsv *rsv, *global_rsv;
4478 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
4479 	int ret;
4480 
4481 	trace_btrfs_inode_evict(inode);
4482 
4483 	truncate_inode_pages(&inode->i_data, 0);
4484 	if (inode->i_nlink &&
4485 	    ((btrfs_root_refs(&root->root_item) != 0 &&
4486 	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
4487 	     btrfs_is_free_space_inode(inode)))
4488 		goto no_delete;
4489 
4490 	if (is_bad_inode(inode)) {
4491 		btrfs_orphan_del(NULL, inode);
4492 		goto no_delete;
4493 	}
4494 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
4495 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
4496 
4497 	if (root->fs_info->log_root_recovering) {
4498 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
4499 				 &BTRFS_I(inode)->runtime_flags));
4500 		goto no_delete;
4501 	}
4502 
4503 	if (inode->i_nlink > 0) {
4504 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
4505 		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
4506 		goto no_delete;
4507 	}
4508 
4509 	ret = btrfs_commit_inode_delayed_inode(inode);
4510 	if (ret) {
4511 		btrfs_orphan_del(NULL, inode);
4512 		goto no_delete;
4513 	}
4514 
4515 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
4516 	if (!rsv) {
4517 		btrfs_orphan_del(NULL, inode);
4518 		goto no_delete;
4519 	}
4520 	rsv->size = min_size;
4521 	rsv->failfast = 1;
4522 	global_rsv = &root->fs_info->global_block_rsv;
4523 
4524 	btrfs_i_size_write(inode, 0);
4525 
4526 	/*
4527 	 * This is a bit simpler than btrfs_truncate since we've already
4528 	 * reserved our space for our orphan item in the unlink, so we just
4529 	 * need to reserve some slack space in case we add bytes and update
4530 	 * inode item when doing the truncate.
4531 	 */
4532 	while (1) {
4533 		ret = btrfs_block_rsv_refill(root, rsv, min_size,
4534 					     BTRFS_RESERVE_FLUSH_LIMIT);
4535 
4536 		/*
4537 		 * Try and steal from the global reserve since we will
4538 		 * likely not use this space anyway, we want to try as
4539 		 * hard as possible to get this to work.
4540 		 */
4541 		if (ret)
4542 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
4543 
4544 		if (ret) {
4545 			btrfs_warn(root->fs_info,
4546 				"Could not get space for a delete, will truncate on mount %d",
4547 				ret);
4548 			btrfs_orphan_del(NULL, inode);
4549 			btrfs_free_block_rsv(root, rsv);
4550 			goto no_delete;
4551 		}
4552 
4553 		trans = btrfs_join_transaction(root);
4554 		if (IS_ERR(trans)) {
4555 			btrfs_orphan_del(NULL, inode);
4556 			btrfs_free_block_rsv(root, rsv);
4557 			goto no_delete;
4558 		}
4559 
4560 		trans->block_rsv = rsv;
4561 
4562 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
4563 		if (ret != -ENOSPC)
4564 			break;
4565 
4566 		trans->block_rsv = &root->fs_info->trans_block_rsv;
4567 		btrfs_end_transaction(trans, root);
4568 		trans = NULL;
4569 		btrfs_btree_balance_dirty(root);
4570 	}
4571 
4572 	btrfs_free_block_rsv(root, rsv);
4573 
4574 	/*
4575 	 * Errors here aren't a big deal, it just means we leave orphan items
4576 	 * in the tree.  They will be cleaned up on the next mount.
4577 	 */
4578 	if (ret == 0) {
4579 		trans->block_rsv = root->orphan_block_rsv;
4580 		btrfs_orphan_del(trans, inode);
4581 	} else {
4582 		btrfs_orphan_del(NULL, inode);
4583 	}
4584 
4585 	trans->block_rsv = &root->fs_info->trans_block_rsv;
4586 	if (!(root == root->fs_info->tree_root ||
4587 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
4588 		btrfs_return_ino(root, btrfs_ino(inode));
4589 
4590 	btrfs_end_transaction(trans, root);
4591 	btrfs_btree_balance_dirty(root);
4592 no_delete:
4593 	btrfs_remove_delayed_node(inode);
4594 	clear_inode(inode);
4595 	return;
4596 }
4597 
4598 /*
4599  * this returns the key found in the dir entry in the location pointer.
4600  * If no dir entries were found, location->objectid is 0.
4601  */
4602 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
4603 			       struct btrfs_key *location)
4604 {
4605 	const char *name = dentry->d_name.name;
4606 	int namelen = dentry->d_name.len;
4607 	struct btrfs_dir_item *di;
4608 	struct btrfs_path *path;
4609 	struct btrfs_root *root = BTRFS_I(dir)->root;
4610 	int ret = 0;
4611 
4612 	path = btrfs_alloc_path();
4613 	if (!path)
4614 		return -ENOMEM;
4615 
4616 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
4617 				    namelen, 0);
4618 	if (IS_ERR(di))
4619 		ret = PTR_ERR(di);
4620 
4621 	if (IS_ERR_OR_NULL(di))
4622 		goto out_err;
4623 
4624 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
4625 out:
4626 	btrfs_free_path(path);
4627 	return ret;
4628 out_err:
4629 	location->objectid = 0;
4630 	goto out;
4631 }
4632 
4633 /*
4634  * when we hit a tree root in a directory, the btrfs part of the inode
4635  * needs to be changed to reflect the root directory of the tree root.  This
4636  * is kind of like crossing a mount point.
4637  */
4638 static int fixup_tree_root_location(struct btrfs_root *root,
4639 				    struct inode *dir,
4640 				    struct dentry *dentry,
4641 				    struct btrfs_key *location,
4642 				    struct btrfs_root **sub_root)
4643 {
4644 	struct btrfs_path *path;
4645 	struct btrfs_root *new_root;
4646 	struct btrfs_root_ref *ref;
4647 	struct extent_buffer *leaf;
4648 	int ret;
4649 	int err = 0;
4650 
4651 	path = btrfs_alloc_path();
4652 	if (!path) {
4653 		err = -ENOMEM;
4654 		goto out;
4655 	}
4656 
4657 	err = -ENOENT;
4658 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
4659 				  BTRFS_I(dir)->root->root_key.objectid,
4660 				  location->objectid);
4661 	if (ret) {
4662 		if (ret < 0)
4663 			err = ret;
4664 		goto out;
4665 	}
4666 
4667 	leaf = path->nodes[0];
4668 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
4669 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
4670 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
4671 		goto out;
4672 
4673 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
4674 				   (unsigned long)(ref + 1),
4675 				   dentry->d_name.len);
4676 	if (ret)
4677 		goto out;
4678 
4679 	btrfs_release_path(path);
4680 
4681 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
4682 	if (IS_ERR(new_root)) {
4683 		err = PTR_ERR(new_root);
4684 		goto out;
4685 	}
4686 
4687 	*sub_root = new_root;
4688 	location->objectid = btrfs_root_dirid(&new_root->root_item);
4689 	location->type = BTRFS_INODE_ITEM_KEY;
4690 	location->offset = 0;
4691 	err = 0;
4692 out:
4693 	btrfs_free_path(path);
4694 	return err;
4695 }
4696 
4697 static void inode_tree_add(struct inode *inode)
4698 {
4699 	struct btrfs_root *root = BTRFS_I(inode)->root;
4700 	struct btrfs_inode *entry;
4701 	struct rb_node **p;
4702 	struct rb_node *parent;
4703 	struct rb_node *new = &BTRFS_I(inode)->rb_node;
4704 	u64 ino = btrfs_ino(inode);
4705 
4706 	if (inode_unhashed(inode))
4707 		return;
4708 	parent = NULL;
4709 	spin_lock(&root->inode_lock);
4710 	p = &root->inode_tree.rb_node;
4711 	while (*p) {
4712 		parent = *p;
4713 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
4714 
4715 		if (ino < btrfs_ino(&entry->vfs_inode))
4716 			p = &parent->rb_left;
4717 		else if (ino > btrfs_ino(&entry->vfs_inode))
4718 			p = &parent->rb_right;
4719 		else {
4720 			WARN_ON(!(entry->vfs_inode.i_state &
4721 				  (I_WILL_FREE | I_FREEING)));
4722 			rb_replace_node(parent, new, &root->inode_tree);
4723 			RB_CLEAR_NODE(parent);
4724 			spin_unlock(&root->inode_lock);
4725 			return;
4726 		}
4727 	}
4728 	rb_link_node(new, parent, p);
4729 	rb_insert_color(new, &root->inode_tree);
4730 	spin_unlock(&root->inode_lock);
4731 }
4732 
4733 static void inode_tree_del(struct inode *inode)
4734 {
4735 	struct btrfs_root *root = BTRFS_I(inode)->root;
4736 	int empty = 0;
4737 
4738 	spin_lock(&root->inode_lock);
4739 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
4740 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4741 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
4742 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4743 	}
4744 	spin_unlock(&root->inode_lock);
4745 
4746 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
4747 		synchronize_srcu(&root->fs_info->subvol_srcu);
4748 		spin_lock(&root->inode_lock);
4749 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4750 		spin_unlock(&root->inode_lock);
4751 		if (empty)
4752 			btrfs_add_dead_root(root);
4753 	}
4754 }
4755 
4756 void btrfs_invalidate_inodes(struct btrfs_root *root)
4757 {
4758 	struct rb_node *node;
4759 	struct rb_node *prev;
4760 	struct btrfs_inode *entry;
4761 	struct inode *inode;
4762 	u64 objectid = 0;
4763 
4764 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4765 
4766 	spin_lock(&root->inode_lock);
4767 again:
4768 	node = root->inode_tree.rb_node;
4769 	prev = NULL;
4770 	while (node) {
4771 		prev = node;
4772 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4773 
4774 		if (objectid < btrfs_ino(&entry->vfs_inode))
4775 			node = node->rb_left;
4776 		else if (objectid > btrfs_ino(&entry->vfs_inode))
4777 			node = node->rb_right;
4778 		else
4779 			break;
4780 	}
4781 	if (!node) {
4782 		while (prev) {
4783 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4784 			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4785 				node = prev;
4786 				break;
4787 			}
4788 			prev = rb_next(prev);
4789 		}
4790 	}
4791 	while (node) {
4792 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4793 		objectid = btrfs_ino(&entry->vfs_inode) + 1;
4794 		inode = igrab(&entry->vfs_inode);
4795 		if (inode) {
4796 			spin_unlock(&root->inode_lock);
4797 			if (atomic_read(&inode->i_count) > 1)
4798 				d_prune_aliases(inode);
4799 			/*
4800 			 * btrfs_drop_inode will have it removed from
4801 			 * the inode cache when its usage count
4802 			 * hits zero.
4803 			 */
4804 			iput(inode);
4805 			cond_resched();
4806 			spin_lock(&root->inode_lock);
4807 			goto again;
4808 		}
4809 
4810 		if (cond_resched_lock(&root->inode_lock))
4811 			goto again;
4812 
4813 		node = rb_next(node);
4814 	}
4815 	spin_unlock(&root->inode_lock);
4816 }
4817 
4818 static int btrfs_init_locked_inode(struct inode *inode, void *p)
4819 {
4820 	struct btrfs_iget_args *args = p;
4821 	inode->i_ino = args->ino;
4822 	BTRFS_I(inode)->root = args->root;
4823 	return 0;
4824 }
4825 
4826 static int btrfs_find_actor(struct inode *inode, void *opaque)
4827 {
4828 	struct btrfs_iget_args *args = opaque;
4829 	return args->ino == btrfs_ino(inode) &&
4830 		args->root == BTRFS_I(inode)->root;
4831 }
4832 
4833 static struct inode *btrfs_iget_locked(struct super_block *s,
4834 				       u64 objectid,
4835 				       struct btrfs_root *root)
4836 {
4837 	struct inode *inode;
4838 	struct btrfs_iget_args args;
4839 	unsigned long hashval = btrfs_inode_hash(objectid, root);
4840 
4841 	args.ino = objectid;
4842 	args.root = root;
4843 
4844 	inode = iget5_locked(s, hashval, btrfs_find_actor,
4845 			     btrfs_init_locked_inode,
4846 			     (void *)&args);
4847 	return inode;
4848 }
4849 
4850 /* Get an inode object given its location and corresponding root.
4851  * Returns in *is_new if the inode was read from disk
4852  */
4853 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4854 			 struct btrfs_root *root, int *new)
4855 {
4856 	struct inode *inode;
4857 
4858 	inode = btrfs_iget_locked(s, location->objectid, root);
4859 	if (!inode)
4860 		return ERR_PTR(-ENOMEM);
4861 
4862 	if (inode->i_state & I_NEW) {
4863 		BTRFS_I(inode)->root = root;
4864 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4865 		btrfs_read_locked_inode(inode);
4866 		if (!is_bad_inode(inode)) {
4867 			inode_tree_add(inode);
4868 			unlock_new_inode(inode);
4869 			if (new)
4870 				*new = 1;
4871 		} else {
4872 			unlock_new_inode(inode);
4873 			iput(inode);
4874 			inode = ERR_PTR(-ESTALE);
4875 		}
4876 	}
4877 
4878 	return inode;
4879 }
4880 
4881 static struct inode *new_simple_dir(struct super_block *s,
4882 				    struct btrfs_key *key,
4883 				    struct btrfs_root *root)
4884 {
4885 	struct inode *inode = new_inode(s);
4886 
4887 	if (!inode)
4888 		return ERR_PTR(-ENOMEM);
4889 
4890 	BTRFS_I(inode)->root = root;
4891 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
4892 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
4893 
4894 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4895 	inode->i_op = &btrfs_dir_ro_inode_operations;
4896 	inode->i_fop = &simple_dir_operations;
4897 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4898 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4899 
4900 	return inode;
4901 }
4902 
4903 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4904 {
4905 	struct inode *inode;
4906 	struct btrfs_root *root = BTRFS_I(dir)->root;
4907 	struct btrfs_root *sub_root = root;
4908 	struct btrfs_key location;
4909 	int index;
4910 	int ret = 0;
4911 
4912 	if (dentry->d_name.len > BTRFS_NAME_LEN)
4913 		return ERR_PTR(-ENAMETOOLONG);
4914 
4915 	ret = btrfs_inode_by_name(dir, dentry, &location);
4916 	if (ret < 0)
4917 		return ERR_PTR(ret);
4918 
4919 	if (location.objectid == 0)
4920 		return NULL;
4921 
4922 	if (location.type == BTRFS_INODE_ITEM_KEY) {
4923 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4924 		return inode;
4925 	}
4926 
4927 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
4928 
4929 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4930 	ret = fixup_tree_root_location(root, dir, dentry,
4931 				       &location, &sub_root);
4932 	if (ret < 0) {
4933 		if (ret != -ENOENT)
4934 			inode = ERR_PTR(ret);
4935 		else
4936 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
4937 	} else {
4938 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
4939 	}
4940 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
4941 
4942 	if (!IS_ERR(inode) && root != sub_root) {
4943 		down_read(&root->fs_info->cleanup_work_sem);
4944 		if (!(inode->i_sb->s_flags & MS_RDONLY))
4945 			ret = btrfs_orphan_cleanup(sub_root);
4946 		up_read(&root->fs_info->cleanup_work_sem);
4947 		if (ret) {
4948 			iput(inode);
4949 			inode = ERR_PTR(ret);
4950 		}
4951 	}
4952 
4953 	return inode;
4954 }
4955 
4956 static int btrfs_dentry_delete(const struct dentry *dentry)
4957 {
4958 	struct btrfs_root *root;
4959 	struct inode *inode = dentry->d_inode;
4960 
4961 	if (!inode && !IS_ROOT(dentry))
4962 		inode = dentry->d_parent->d_inode;
4963 
4964 	if (inode) {
4965 		root = BTRFS_I(inode)->root;
4966 		if (btrfs_root_refs(&root->root_item) == 0)
4967 			return 1;
4968 
4969 		if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
4970 			return 1;
4971 	}
4972 	return 0;
4973 }
4974 
4975 static void btrfs_dentry_release(struct dentry *dentry)
4976 {
4977 	if (dentry->d_fsdata)
4978 		kfree(dentry->d_fsdata);
4979 }
4980 
4981 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4982 				   unsigned int flags)
4983 {
4984 	struct dentry *ret;
4985 
4986 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
4987 	return ret;
4988 }
4989 
4990 unsigned char btrfs_filetype_table[] = {
4991 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4992 };
4993 
4994 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
4995 {
4996 	struct inode *inode = file_inode(file);
4997 	struct btrfs_root *root = BTRFS_I(inode)->root;
4998 	struct btrfs_item *item;
4999 	struct btrfs_dir_item *di;
5000 	struct btrfs_key key;
5001 	struct btrfs_key found_key;
5002 	struct btrfs_path *path;
5003 	struct list_head ins_list;
5004 	struct list_head del_list;
5005 	int ret;
5006 	struct extent_buffer *leaf;
5007 	int slot;
5008 	unsigned char d_type;
5009 	int over = 0;
5010 	u32 di_cur;
5011 	u32 di_total;
5012 	u32 di_len;
5013 	int key_type = BTRFS_DIR_INDEX_KEY;
5014 	char tmp_name[32];
5015 	char *name_ptr;
5016 	int name_len;
5017 	int is_curr = 0;	/* ctx->pos points to the current index? */
5018 
5019 	/* FIXME, use a real flag for deciding about the key type */
5020 	if (root->fs_info->tree_root == root)
5021 		key_type = BTRFS_DIR_ITEM_KEY;
5022 
5023 	if (!dir_emit_dots(file, ctx))
5024 		return 0;
5025 
5026 	path = btrfs_alloc_path();
5027 	if (!path)
5028 		return -ENOMEM;
5029 
5030 	path->reada = 1;
5031 
5032 	if (key_type == BTRFS_DIR_INDEX_KEY) {
5033 		INIT_LIST_HEAD(&ins_list);
5034 		INIT_LIST_HEAD(&del_list);
5035 		btrfs_get_delayed_items(inode, &ins_list, &del_list);
5036 	}
5037 
5038 	btrfs_set_key_type(&key, key_type);
5039 	key.offset = ctx->pos;
5040 	key.objectid = btrfs_ino(inode);
5041 
5042 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5043 	if (ret < 0)
5044 		goto err;
5045 
5046 	while (1) {
5047 		leaf = path->nodes[0];
5048 		slot = path->slots[0];
5049 		if (slot >= btrfs_header_nritems(leaf)) {
5050 			ret = btrfs_next_leaf(root, path);
5051 			if (ret < 0)
5052 				goto err;
5053 			else if (ret > 0)
5054 				break;
5055 			continue;
5056 		}
5057 
5058 		item = btrfs_item_nr(slot);
5059 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
5060 
5061 		if (found_key.objectid != key.objectid)
5062 			break;
5063 		if (btrfs_key_type(&found_key) != key_type)
5064 			break;
5065 		if (found_key.offset < ctx->pos)
5066 			goto next;
5067 		if (key_type == BTRFS_DIR_INDEX_KEY &&
5068 		    btrfs_should_delete_dir_index(&del_list,
5069 						  found_key.offset))
5070 			goto next;
5071 
5072 		ctx->pos = found_key.offset;
5073 		is_curr = 1;
5074 
5075 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5076 		di_cur = 0;
5077 		di_total = btrfs_item_size(leaf, item);
5078 
5079 		while (di_cur < di_total) {
5080 			struct btrfs_key location;
5081 
5082 			if (verify_dir_item(root, leaf, di))
5083 				break;
5084 
5085 			name_len = btrfs_dir_name_len(leaf, di);
5086 			if (name_len <= sizeof(tmp_name)) {
5087 				name_ptr = tmp_name;
5088 			} else {
5089 				name_ptr = kmalloc(name_len, GFP_NOFS);
5090 				if (!name_ptr) {
5091 					ret = -ENOMEM;
5092 					goto err;
5093 				}
5094 			}
5095 			read_extent_buffer(leaf, name_ptr,
5096 					   (unsigned long)(di + 1), name_len);
5097 
5098 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5099 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
5100 
5101 
5102 			/* is this a reference to our own snapshot? If so
5103 			 * skip it.
5104 			 *
5105 			 * In contrast to old kernels, we insert the snapshot's
5106 			 * dir item and dir index after it has been created, so
5107 			 * we won't find a reference to our own snapshot. We
5108 			 * still keep the following code for backward
5109 			 * compatibility.
5110 			 */
5111 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
5112 			    location.objectid == root->root_key.objectid) {
5113 				over = 0;
5114 				goto skip;
5115 			}
5116 			over = !dir_emit(ctx, name_ptr, name_len,
5117 				       location.objectid, d_type);
5118 
5119 skip:
5120 			if (name_ptr != tmp_name)
5121 				kfree(name_ptr);
5122 
5123 			if (over)
5124 				goto nopos;
5125 			di_len = btrfs_dir_name_len(leaf, di) +
5126 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
5127 			di_cur += di_len;
5128 			di = (struct btrfs_dir_item *)((char *)di + di_len);
5129 		}
5130 next:
5131 		path->slots[0]++;
5132 	}
5133 
5134 	if (key_type == BTRFS_DIR_INDEX_KEY) {
5135 		if (is_curr)
5136 			ctx->pos++;
5137 		ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5138 		if (ret)
5139 			goto nopos;
5140 	}
5141 
5142 	/* Reached end of directory/root. Bump pos past the last item. */
5143 	ctx->pos++;
5144 
5145 	/*
5146 	 * Stop new entries from being returned after we return the last
5147 	 * entry.
5148 	 *
5149 	 * New directory entries are assigned a strictly increasing
5150 	 * offset.  This means that new entries created during readdir
5151 	 * are *guaranteed* to be seen in the future by that readdir.
5152 	 * This has broken buggy programs which operate on names as
5153 	 * they're returned by readdir.  Until we re-use freed offsets
5154 	 * we have this hack to stop new entries from being returned
5155 	 * under the assumption that they'll never reach this huge
5156 	 * offset.
5157 	 *
5158 	 * This is being careful not to overflow 32bit loff_t unless the
5159 	 * last entry requires it because doing so has broken 32bit apps
5160 	 * in the past.
5161 	 */
5162 	if (key_type == BTRFS_DIR_INDEX_KEY) {
5163 		if (ctx->pos >= INT_MAX)
5164 			ctx->pos = LLONG_MAX;
5165 		else
5166 			ctx->pos = INT_MAX;
5167 	}
5168 nopos:
5169 	ret = 0;
5170 err:
5171 	if (key_type == BTRFS_DIR_INDEX_KEY)
5172 		btrfs_put_delayed_items(&ins_list, &del_list);
5173 	btrfs_free_path(path);
5174 	return ret;
5175 }
5176 
5177 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5178 {
5179 	struct btrfs_root *root = BTRFS_I(inode)->root;
5180 	struct btrfs_trans_handle *trans;
5181 	int ret = 0;
5182 	bool nolock = false;
5183 
5184 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5185 		return 0;
5186 
5187 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5188 		nolock = true;
5189 
5190 	if (wbc->sync_mode == WB_SYNC_ALL) {
5191 		if (nolock)
5192 			trans = btrfs_join_transaction_nolock(root);
5193 		else
5194 			trans = btrfs_join_transaction(root);
5195 		if (IS_ERR(trans))
5196 			return PTR_ERR(trans);
5197 		ret = btrfs_commit_transaction(trans, root);
5198 	}
5199 	return ret;
5200 }
5201 
5202 /*
5203  * This is somewhat expensive, updating the tree every time the
5204  * inode changes.  But, it is most likely to find the inode in cache.
5205  * FIXME, needs more benchmarking...there are no reasons other than performance
5206  * to keep or drop this code.
5207  */
5208 static int btrfs_dirty_inode(struct inode *inode)
5209 {
5210 	struct btrfs_root *root = BTRFS_I(inode)->root;
5211 	struct btrfs_trans_handle *trans;
5212 	int ret;
5213 
5214 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5215 		return 0;
5216 
5217 	trans = btrfs_join_transaction(root);
5218 	if (IS_ERR(trans))
5219 		return PTR_ERR(trans);
5220 
5221 	ret = btrfs_update_inode(trans, root, inode);
5222 	if (ret && ret == -ENOSPC) {
5223 		/* whoops, lets try again with the full transaction */
5224 		btrfs_end_transaction(trans, root);
5225 		trans = btrfs_start_transaction(root, 1);
5226 		if (IS_ERR(trans))
5227 			return PTR_ERR(trans);
5228 
5229 		ret = btrfs_update_inode(trans, root, inode);
5230 	}
5231 	btrfs_end_transaction(trans, root);
5232 	if (BTRFS_I(inode)->delayed_node)
5233 		btrfs_balance_delayed_items(root);
5234 
5235 	return ret;
5236 }
5237 
5238 /*
5239  * This is a copy of file_update_time.  We need this so we can return error on
5240  * ENOSPC for updating the inode in the case of file write and mmap writes.
5241  */
5242 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5243 			     int flags)
5244 {
5245 	struct btrfs_root *root = BTRFS_I(inode)->root;
5246 
5247 	if (btrfs_root_readonly(root))
5248 		return -EROFS;
5249 
5250 	if (flags & S_VERSION)
5251 		inode_inc_iversion(inode);
5252 	if (flags & S_CTIME)
5253 		inode->i_ctime = *now;
5254 	if (flags & S_MTIME)
5255 		inode->i_mtime = *now;
5256 	if (flags & S_ATIME)
5257 		inode->i_atime = *now;
5258 	return btrfs_dirty_inode(inode);
5259 }
5260 
5261 /*
5262  * find the highest existing sequence number in a directory
5263  * and then set the in-memory index_cnt variable to reflect
5264  * free sequence numbers
5265  */
5266 static int btrfs_set_inode_index_count(struct inode *inode)
5267 {
5268 	struct btrfs_root *root = BTRFS_I(inode)->root;
5269 	struct btrfs_key key, found_key;
5270 	struct btrfs_path *path;
5271 	struct extent_buffer *leaf;
5272 	int ret;
5273 
5274 	key.objectid = btrfs_ino(inode);
5275 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
5276 	key.offset = (u64)-1;
5277 
5278 	path = btrfs_alloc_path();
5279 	if (!path)
5280 		return -ENOMEM;
5281 
5282 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5283 	if (ret < 0)
5284 		goto out;
5285 	/* FIXME: we should be able to handle this */
5286 	if (ret == 0)
5287 		goto out;
5288 	ret = 0;
5289 
5290 	/*
5291 	 * MAGIC NUMBER EXPLANATION:
5292 	 * since we search a directory based on f_pos we have to start at 2
5293 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5294 	 * else has to start at 2
5295 	 */
5296 	if (path->slots[0] == 0) {
5297 		BTRFS_I(inode)->index_cnt = 2;
5298 		goto out;
5299 	}
5300 
5301 	path->slots[0]--;
5302 
5303 	leaf = path->nodes[0];
5304 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5305 
5306 	if (found_key.objectid != btrfs_ino(inode) ||
5307 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
5308 		BTRFS_I(inode)->index_cnt = 2;
5309 		goto out;
5310 	}
5311 
5312 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
5313 out:
5314 	btrfs_free_path(path);
5315 	return ret;
5316 }
5317 
5318 /*
5319  * helper to find a free sequence number in a given directory.  This current
5320  * code is very simple, later versions will do smarter things in the btree
5321  */
5322 int btrfs_set_inode_index(struct inode *dir, u64 *index)
5323 {
5324 	int ret = 0;
5325 
5326 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
5327 		ret = btrfs_inode_delayed_dir_index_count(dir);
5328 		if (ret) {
5329 			ret = btrfs_set_inode_index_count(dir);
5330 			if (ret)
5331 				return ret;
5332 		}
5333 	}
5334 
5335 	*index = BTRFS_I(dir)->index_cnt;
5336 	BTRFS_I(dir)->index_cnt++;
5337 
5338 	return ret;
5339 }
5340 
5341 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
5342 				     struct btrfs_root *root,
5343 				     struct inode *dir,
5344 				     const char *name, int name_len,
5345 				     u64 ref_objectid, u64 objectid,
5346 				     umode_t mode, u64 *index)
5347 {
5348 	struct inode *inode;
5349 	struct btrfs_inode_item *inode_item;
5350 	struct btrfs_key *location;
5351 	struct btrfs_path *path;
5352 	struct btrfs_inode_ref *ref;
5353 	struct btrfs_key key[2];
5354 	u32 sizes[2];
5355 	unsigned long ptr;
5356 	int ret;
5357 	int owner;
5358 
5359 	path = btrfs_alloc_path();
5360 	if (!path)
5361 		return ERR_PTR(-ENOMEM);
5362 
5363 	inode = new_inode(root->fs_info->sb);
5364 	if (!inode) {
5365 		btrfs_free_path(path);
5366 		return ERR_PTR(-ENOMEM);
5367 	}
5368 
5369 	/*
5370 	 * we have to initialize this early, so we can reclaim the inode
5371 	 * number if we fail afterwards in this function.
5372 	 */
5373 	inode->i_ino = objectid;
5374 
5375 	if (dir) {
5376 		trace_btrfs_inode_request(dir);
5377 
5378 		ret = btrfs_set_inode_index(dir, index);
5379 		if (ret) {
5380 			btrfs_free_path(path);
5381 			iput(inode);
5382 			return ERR_PTR(ret);
5383 		}
5384 	}
5385 	/*
5386 	 * index_cnt is ignored for everything but a dir,
5387 	 * btrfs_get_inode_index_count has an explanation for the magic
5388 	 * number
5389 	 */
5390 	BTRFS_I(inode)->index_cnt = 2;
5391 	BTRFS_I(inode)->root = root;
5392 	BTRFS_I(inode)->generation = trans->transid;
5393 	inode->i_generation = BTRFS_I(inode)->generation;
5394 
5395 	/*
5396 	 * We could have gotten an inode number from somebody who was fsynced
5397 	 * and then removed in this same transaction, so let's just set full
5398 	 * sync since it will be a full sync anyway and this will blow away the
5399 	 * old info in the log.
5400 	 */
5401 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
5402 
5403 	if (S_ISDIR(mode))
5404 		owner = 0;
5405 	else
5406 		owner = 1;
5407 
5408 	key[0].objectid = objectid;
5409 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
5410 	key[0].offset = 0;
5411 
5412 	/*
5413 	 * Start new inodes with an inode_ref. This is slightly more
5414 	 * efficient for small numbers of hard links since they will
5415 	 * be packed into one item. Extended refs will kick in if we
5416 	 * add more hard links than can fit in the ref item.
5417 	 */
5418 	key[1].objectid = objectid;
5419 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
5420 	key[1].offset = ref_objectid;
5421 
5422 	sizes[0] = sizeof(struct btrfs_inode_item);
5423 	sizes[1] = name_len + sizeof(*ref);
5424 
5425 	path->leave_spinning = 1;
5426 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
5427 	if (ret != 0)
5428 		goto fail;
5429 
5430 	inode_init_owner(inode, dir, mode);
5431 	inode_set_bytes(inode, 0);
5432 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5433 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5434 				  struct btrfs_inode_item);
5435 	memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
5436 			     sizeof(*inode_item));
5437 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
5438 
5439 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
5440 			     struct btrfs_inode_ref);
5441 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
5442 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
5443 	ptr = (unsigned long)(ref + 1);
5444 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
5445 
5446 	btrfs_mark_buffer_dirty(path->nodes[0]);
5447 	btrfs_free_path(path);
5448 
5449 	location = &BTRFS_I(inode)->location;
5450 	location->objectid = objectid;
5451 	location->offset = 0;
5452 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
5453 
5454 	btrfs_inherit_iflags(inode, dir);
5455 
5456 	if (S_ISREG(mode)) {
5457 		if (btrfs_test_opt(root, NODATASUM))
5458 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
5459 		if (btrfs_test_opt(root, NODATACOW))
5460 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
5461 				BTRFS_INODE_NODATASUM;
5462 	}
5463 
5464 	btrfs_insert_inode_hash(inode);
5465 	inode_tree_add(inode);
5466 
5467 	trace_btrfs_inode_new(inode);
5468 	btrfs_set_inode_last_trans(trans, inode);
5469 
5470 	btrfs_update_root_times(trans, root);
5471 
5472 	return inode;
5473 fail:
5474 	if (dir)
5475 		BTRFS_I(dir)->index_cnt--;
5476 	btrfs_free_path(path);
5477 	iput(inode);
5478 	return ERR_PTR(ret);
5479 }
5480 
5481 static inline u8 btrfs_inode_type(struct inode *inode)
5482 {
5483 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
5484 }
5485 
5486 /*
5487  * utility function to add 'inode' into 'parent_inode' with
5488  * a give name and a given sequence number.
5489  * if 'add_backref' is true, also insert a backref from the
5490  * inode to the parent directory.
5491  */
5492 int btrfs_add_link(struct btrfs_trans_handle *trans,
5493 		   struct inode *parent_inode, struct inode *inode,
5494 		   const char *name, int name_len, int add_backref, u64 index)
5495 {
5496 	int ret = 0;
5497 	struct btrfs_key key;
5498 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
5499 	u64 ino = btrfs_ino(inode);
5500 	u64 parent_ino = btrfs_ino(parent_inode);
5501 
5502 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5503 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
5504 	} else {
5505 		key.objectid = ino;
5506 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
5507 		key.offset = 0;
5508 	}
5509 
5510 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5511 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
5512 					 key.objectid, root->root_key.objectid,
5513 					 parent_ino, index, name, name_len);
5514 	} else if (add_backref) {
5515 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
5516 					     parent_ino, index);
5517 	}
5518 
5519 	/* Nothing to clean up yet */
5520 	if (ret)
5521 		return ret;
5522 
5523 	ret = btrfs_insert_dir_item(trans, root, name, name_len,
5524 				    parent_inode, &key,
5525 				    btrfs_inode_type(inode), index);
5526 	if (ret == -EEXIST || ret == -EOVERFLOW)
5527 		goto fail_dir_item;
5528 	else if (ret) {
5529 		btrfs_abort_transaction(trans, root, ret);
5530 		return ret;
5531 	}
5532 
5533 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
5534 			   name_len * 2);
5535 	inode_inc_iversion(parent_inode);
5536 	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
5537 	ret = btrfs_update_inode(trans, root, parent_inode);
5538 	if (ret)
5539 		btrfs_abort_transaction(trans, root, ret);
5540 	return ret;
5541 
5542 fail_dir_item:
5543 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5544 		u64 local_index;
5545 		int err;
5546 		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
5547 				 key.objectid, root->root_key.objectid,
5548 				 parent_ino, &local_index, name, name_len);
5549 
5550 	} else if (add_backref) {
5551 		u64 local_index;
5552 		int err;
5553 
5554 		err = btrfs_del_inode_ref(trans, root, name, name_len,
5555 					  ino, parent_ino, &local_index);
5556 	}
5557 	return ret;
5558 }
5559 
5560 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
5561 			    struct inode *dir, struct dentry *dentry,
5562 			    struct inode *inode, int backref, u64 index)
5563 {
5564 	int err = btrfs_add_link(trans, dir, inode,
5565 				 dentry->d_name.name, dentry->d_name.len,
5566 				 backref, index);
5567 	if (err > 0)
5568 		err = -EEXIST;
5569 	return err;
5570 }
5571 
5572 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
5573 			umode_t mode, dev_t rdev)
5574 {
5575 	struct btrfs_trans_handle *trans;
5576 	struct btrfs_root *root = BTRFS_I(dir)->root;
5577 	struct inode *inode = NULL;
5578 	int err;
5579 	int drop_inode = 0;
5580 	u64 objectid;
5581 	u64 index = 0;
5582 
5583 	if (!new_valid_dev(rdev))
5584 		return -EINVAL;
5585 
5586 	/*
5587 	 * 2 for inode item and ref
5588 	 * 2 for dir items
5589 	 * 1 for xattr if selinux is on
5590 	 */
5591 	trans = btrfs_start_transaction(root, 5);
5592 	if (IS_ERR(trans))
5593 		return PTR_ERR(trans);
5594 
5595 	err = btrfs_find_free_ino(root, &objectid);
5596 	if (err)
5597 		goto out_unlock;
5598 
5599 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5600 				dentry->d_name.len, btrfs_ino(dir), objectid,
5601 				mode, &index);
5602 	if (IS_ERR(inode)) {
5603 		err = PTR_ERR(inode);
5604 		goto out_unlock;
5605 	}
5606 
5607 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5608 	if (err) {
5609 		drop_inode = 1;
5610 		goto out_unlock;
5611 	}
5612 
5613 	/*
5614 	* If the active LSM wants to access the inode during
5615 	* d_instantiate it needs these. Smack checks to see
5616 	* if the filesystem supports xattrs by looking at the
5617 	* ops vector.
5618 	*/
5619 
5620 	inode->i_op = &btrfs_special_inode_operations;
5621 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5622 	if (err)
5623 		drop_inode = 1;
5624 	else {
5625 		init_special_inode(inode, inode->i_mode, rdev);
5626 		btrfs_update_inode(trans, root, inode);
5627 		d_instantiate(dentry, inode);
5628 	}
5629 out_unlock:
5630 	btrfs_end_transaction(trans, root);
5631 	btrfs_btree_balance_dirty(root);
5632 	if (drop_inode) {
5633 		inode_dec_link_count(inode);
5634 		iput(inode);
5635 	}
5636 	return err;
5637 }
5638 
5639 static int btrfs_create(struct inode *dir, struct dentry *dentry,
5640 			umode_t mode, bool excl)
5641 {
5642 	struct btrfs_trans_handle *trans;
5643 	struct btrfs_root *root = BTRFS_I(dir)->root;
5644 	struct inode *inode = NULL;
5645 	int drop_inode_on_err = 0;
5646 	int err;
5647 	u64 objectid;
5648 	u64 index = 0;
5649 
5650 	/*
5651 	 * 2 for inode item and ref
5652 	 * 2 for dir items
5653 	 * 1 for xattr if selinux is on
5654 	 */
5655 	trans = btrfs_start_transaction(root, 5);
5656 	if (IS_ERR(trans))
5657 		return PTR_ERR(trans);
5658 
5659 	err = btrfs_find_free_ino(root, &objectid);
5660 	if (err)
5661 		goto out_unlock;
5662 
5663 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5664 				dentry->d_name.len, btrfs_ino(dir), objectid,
5665 				mode, &index);
5666 	if (IS_ERR(inode)) {
5667 		err = PTR_ERR(inode);
5668 		goto out_unlock;
5669 	}
5670 	drop_inode_on_err = 1;
5671 
5672 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5673 	if (err)
5674 		goto out_unlock;
5675 
5676 	err = btrfs_update_inode(trans, root, inode);
5677 	if (err)
5678 		goto out_unlock;
5679 
5680 	/*
5681 	* If the active LSM wants to access the inode during
5682 	* d_instantiate it needs these. Smack checks to see
5683 	* if the filesystem supports xattrs by looking at the
5684 	* ops vector.
5685 	*/
5686 	inode->i_fop = &btrfs_file_operations;
5687 	inode->i_op = &btrfs_file_inode_operations;
5688 
5689 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5690 	if (err)
5691 		goto out_unlock;
5692 
5693 	inode->i_mapping->a_ops = &btrfs_aops;
5694 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5695 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5696 	d_instantiate(dentry, inode);
5697 
5698 out_unlock:
5699 	btrfs_end_transaction(trans, root);
5700 	if (err && drop_inode_on_err) {
5701 		inode_dec_link_count(inode);
5702 		iput(inode);
5703 	}
5704 	btrfs_btree_balance_dirty(root);
5705 	return err;
5706 }
5707 
5708 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
5709 		      struct dentry *dentry)
5710 {
5711 	struct btrfs_trans_handle *trans;
5712 	struct btrfs_root *root = BTRFS_I(dir)->root;
5713 	struct inode *inode = old_dentry->d_inode;
5714 	u64 index;
5715 	int err;
5716 	int drop_inode = 0;
5717 
5718 	/* do not allow sys_link's with other subvols of the same device */
5719 	if (root->objectid != BTRFS_I(inode)->root->objectid)
5720 		return -EXDEV;
5721 
5722 	if (inode->i_nlink >= BTRFS_LINK_MAX)
5723 		return -EMLINK;
5724 
5725 	err = btrfs_set_inode_index(dir, &index);
5726 	if (err)
5727 		goto fail;
5728 
5729 	/*
5730 	 * 2 items for inode and inode ref
5731 	 * 2 items for dir items
5732 	 * 1 item for parent inode
5733 	 */
5734 	trans = btrfs_start_transaction(root, 5);
5735 	if (IS_ERR(trans)) {
5736 		err = PTR_ERR(trans);
5737 		goto fail;
5738 	}
5739 
5740 	inc_nlink(inode);
5741 	inode_inc_iversion(inode);
5742 	inode->i_ctime = CURRENT_TIME;
5743 	ihold(inode);
5744 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
5745 
5746 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5747 
5748 	if (err) {
5749 		drop_inode = 1;
5750 	} else {
5751 		struct dentry *parent = dentry->d_parent;
5752 		err = btrfs_update_inode(trans, root, inode);
5753 		if (err)
5754 			goto fail;
5755 		d_instantiate(dentry, inode);
5756 		btrfs_log_new_name(trans, inode, NULL, parent);
5757 	}
5758 
5759 	btrfs_end_transaction(trans, root);
5760 fail:
5761 	if (drop_inode) {
5762 		inode_dec_link_count(inode);
5763 		iput(inode);
5764 	}
5765 	btrfs_btree_balance_dirty(root);
5766 	return err;
5767 }
5768 
5769 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
5770 {
5771 	struct inode *inode = NULL;
5772 	struct btrfs_trans_handle *trans;
5773 	struct btrfs_root *root = BTRFS_I(dir)->root;
5774 	int err = 0;
5775 	int drop_on_err = 0;
5776 	u64 objectid = 0;
5777 	u64 index = 0;
5778 
5779 	/*
5780 	 * 2 items for inode and ref
5781 	 * 2 items for dir items
5782 	 * 1 for xattr if selinux is on
5783 	 */
5784 	trans = btrfs_start_transaction(root, 5);
5785 	if (IS_ERR(trans))
5786 		return PTR_ERR(trans);
5787 
5788 	err = btrfs_find_free_ino(root, &objectid);
5789 	if (err)
5790 		goto out_fail;
5791 
5792 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5793 				dentry->d_name.len, btrfs_ino(dir), objectid,
5794 				S_IFDIR | mode, &index);
5795 	if (IS_ERR(inode)) {
5796 		err = PTR_ERR(inode);
5797 		goto out_fail;
5798 	}
5799 
5800 	drop_on_err = 1;
5801 
5802 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5803 	if (err)
5804 		goto out_fail;
5805 
5806 	inode->i_op = &btrfs_dir_inode_operations;
5807 	inode->i_fop = &btrfs_dir_file_operations;
5808 
5809 	btrfs_i_size_write(inode, 0);
5810 	err = btrfs_update_inode(trans, root, inode);
5811 	if (err)
5812 		goto out_fail;
5813 
5814 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5815 			     dentry->d_name.len, 0, index);
5816 	if (err)
5817 		goto out_fail;
5818 
5819 	d_instantiate(dentry, inode);
5820 	drop_on_err = 0;
5821 
5822 out_fail:
5823 	btrfs_end_transaction(trans, root);
5824 	if (drop_on_err)
5825 		iput(inode);
5826 	btrfs_btree_balance_dirty(root);
5827 	return err;
5828 }
5829 
5830 /* helper for btfs_get_extent.  Given an existing extent in the tree,
5831  * and an extent that you want to insert, deal with overlap and insert
5832  * the new extent into the tree.
5833  */
5834 static int merge_extent_mapping(struct extent_map_tree *em_tree,
5835 				struct extent_map *existing,
5836 				struct extent_map *em,
5837 				u64 map_start, u64 map_len)
5838 {
5839 	u64 start_diff;
5840 
5841 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5842 	start_diff = map_start - em->start;
5843 	em->start = map_start;
5844 	em->len = map_len;
5845 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5846 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
5847 		em->block_start += start_diff;
5848 		em->block_len -= start_diff;
5849 	}
5850 	return add_extent_mapping(em_tree, em, 0);
5851 }
5852 
5853 static noinline int uncompress_inline(struct btrfs_path *path,
5854 				      struct inode *inode, struct page *page,
5855 				      size_t pg_offset, u64 extent_offset,
5856 				      struct btrfs_file_extent_item *item)
5857 {
5858 	int ret;
5859 	struct extent_buffer *leaf = path->nodes[0];
5860 	char *tmp;
5861 	size_t max_size;
5862 	unsigned long inline_size;
5863 	unsigned long ptr;
5864 	int compress_type;
5865 
5866 	WARN_ON(pg_offset != 0);
5867 	compress_type = btrfs_file_extent_compression(leaf, item);
5868 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
5869 	inline_size = btrfs_file_extent_inline_item_len(leaf,
5870 					btrfs_item_nr(path->slots[0]));
5871 	tmp = kmalloc(inline_size, GFP_NOFS);
5872 	if (!tmp)
5873 		return -ENOMEM;
5874 	ptr = btrfs_file_extent_inline_start(item);
5875 
5876 	read_extent_buffer(leaf, tmp, ptr, inline_size);
5877 
5878 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
5879 	ret = btrfs_decompress(compress_type, tmp, page,
5880 			       extent_offset, inline_size, max_size);
5881 	if (ret) {
5882 		char *kaddr = kmap_atomic(page);
5883 		unsigned long copy_size = min_t(u64,
5884 				  PAGE_CACHE_SIZE - pg_offset,
5885 				  max_size - extent_offset);
5886 		memset(kaddr + pg_offset, 0, copy_size);
5887 		kunmap_atomic(kaddr);
5888 	}
5889 	kfree(tmp);
5890 	return 0;
5891 }
5892 
5893 /*
5894  * a bit scary, this does extent mapping from logical file offset to the disk.
5895  * the ugly parts come from merging extents from the disk with the in-ram
5896  * representation.  This gets more complex because of the data=ordered code,
5897  * where the in-ram extents might be locked pending data=ordered completion.
5898  *
5899  * This also copies inline extents directly into the page.
5900  */
5901 
5902 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5903 				    size_t pg_offset, u64 start, u64 len,
5904 				    int create)
5905 {
5906 	int ret;
5907 	int err = 0;
5908 	u64 bytenr;
5909 	u64 extent_start = 0;
5910 	u64 extent_end = 0;
5911 	u64 objectid = btrfs_ino(inode);
5912 	u32 found_type;
5913 	struct btrfs_path *path = NULL;
5914 	struct btrfs_root *root = BTRFS_I(inode)->root;
5915 	struct btrfs_file_extent_item *item;
5916 	struct extent_buffer *leaf;
5917 	struct btrfs_key found_key;
5918 	struct extent_map *em = NULL;
5919 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5920 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5921 	struct btrfs_trans_handle *trans = NULL;
5922 	int compress_type;
5923 
5924 again:
5925 	read_lock(&em_tree->lock);
5926 	em = lookup_extent_mapping(em_tree, start, len);
5927 	if (em)
5928 		em->bdev = root->fs_info->fs_devices->latest_bdev;
5929 	read_unlock(&em_tree->lock);
5930 
5931 	if (em) {
5932 		if (em->start > start || em->start + em->len <= start)
5933 			free_extent_map(em);
5934 		else if (em->block_start == EXTENT_MAP_INLINE && page)
5935 			free_extent_map(em);
5936 		else
5937 			goto out;
5938 	}
5939 	em = alloc_extent_map();
5940 	if (!em) {
5941 		err = -ENOMEM;
5942 		goto out;
5943 	}
5944 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5945 	em->start = EXTENT_MAP_HOLE;
5946 	em->orig_start = EXTENT_MAP_HOLE;
5947 	em->len = (u64)-1;
5948 	em->block_len = (u64)-1;
5949 
5950 	if (!path) {
5951 		path = btrfs_alloc_path();
5952 		if (!path) {
5953 			err = -ENOMEM;
5954 			goto out;
5955 		}
5956 		/*
5957 		 * Chances are we'll be called again, so go ahead and do
5958 		 * readahead
5959 		 */
5960 		path->reada = 1;
5961 	}
5962 
5963 	ret = btrfs_lookup_file_extent(trans, root, path,
5964 				       objectid, start, trans != NULL);
5965 	if (ret < 0) {
5966 		err = ret;
5967 		goto out;
5968 	}
5969 
5970 	if (ret != 0) {
5971 		if (path->slots[0] == 0)
5972 			goto not_found;
5973 		path->slots[0]--;
5974 	}
5975 
5976 	leaf = path->nodes[0];
5977 	item = btrfs_item_ptr(leaf, path->slots[0],
5978 			      struct btrfs_file_extent_item);
5979 	/* are we inside the extent that was found? */
5980 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5981 	found_type = btrfs_key_type(&found_key);
5982 	if (found_key.objectid != objectid ||
5983 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5984 		/*
5985 		 * If we backup past the first extent we want to move forward
5986 		 * and see if there is an extent in front of us, otherwise we'll
5987 		 * say there is a hole for our whole search range which can
5988 		 * cause problems.
5989 		 */
5990 		extent_end = start;
5991 		goto next;
5992 	}
5993 
5994 	found_type = btrfs_file_extent_type(leaf, item);
5995 	extent_start = found_key.offset;
5996 	compress_type = btrfs_file_extent_compression(leaf, item);
5997 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5998 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5999 		extent_end = extent_start +
6000 		       btrfs_file_extent_num_bytes(leaf, item);
6001 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6002 		size_t size;
6003 		size = btrfs_file_extent_inline_len(leaf, item);
6004 		extent_end = ALIGN(extent_start + size, root->sectorsize);
6005 	}
6006 next:
6007 	if (start >= extent_end) {
6008 		path->slots[0]++;
6009 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6010 			ret = btrfs_next_leaf(root, path);
6011 			if (ret < 0) {
6012 				err = ret;
6013 				goto out;
6014 			}
6015 			if (ret > 0)
6016 				goto not_found;
6017 			leaf = path->nodes[0];
6018 		}
6019 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6020 		if (found_key.objectid != objectid ||
6021 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6022 			goto not_found;
6023 		if (start + len <= found_key.offset)
6024 			goto not_found;
6025 		em->start = start;
6026 		em->orig_start = start;
6027 		em->len = found_key.offset - start;
6028 		goto not_found_em;
6029 	}
6030 
6031 	em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
6032 	if (found_type == BTRFS_FILE_EXTENT_REG ||
6033 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6034 		em->start = extent_start;
6035 		em->len = extent_end - extent_start;
6036 		em->orig_start = extent_start -
6037 				 btrfs_file_extent_offset(leaf, item);
6038 		em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf,
6039 								      item);
6040 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
6041 		if (bytenr == 0) {
6042 			em->block_start = EXTENT_MAP_HOLE;
6043 			goto insert;
6044 		}
6045 		if (compress_type != BTRFS_COMPRESS_NONE) {
6046 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6047 			em->compress_type = compress_type;
6048 			em->block_start = bytenr;
6049 			em->block_len = em->orig_block_len;
6050 		} else {
6051 			bytenr += btrfs_file_extent_offset(leaf, item);
6052 			em->block_start = bytenr;
6053 			em->block_len = em->len;
6054 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
6055 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
6056 		}
6057 		goto insert;
6058 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6059 		unsigned long ptr;
6060 		char *map;
6061 		size_t size;
6062 		size_t extent_offset;
6063 		size_t copy_size;
6064 
6065 		em->block_start = EXTENT_MAP_INLINE;
6066 		if (!page || create) {
6067 			em->start = extent_start;
6068 			em->len = extent_end - extent_start;
6069 			goto out;
6070 		}
6071 
6072 		size = btrfs_file_extent_inline_len(leaf, item);
6073 		extent_offset = page_offset(page) + pg_offset - extent_start;
6074 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6075 				size - extent_offset);
6076 		em->start = extent_start + extent_offset;
6077 		em->len = ALIGN(copy_size, root->sectorsize);
6078 		em->orig_block_len = em->len;
6079 		em->orig_start = em->start;
6080 		if (compress_type) {
6081 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6082 			em->compress_type = compress_type;
6083 		}
6084 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6085 		if (create == 0 && !PageUptodate(page)) {
6086 			if (btrfs_file_extent_compression(leaf, item) !=
6087 			    BTRFS_COMPRESS_NONE) {
6088 				ret = uncompress_inline(path, inode, page,
6089 							pg_offset,
6090 							extent_offset, item);
6091 				BUG_ON(ret); /* -ENOMEM */
6092 			} else {
6093 				map = kmap(page);
6094 				read_extent_buffer(leaf, map + pg_offset, ptr,
6095 						   copy_size);
6096 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6097 					memset(map + pg_offset + copy_size, 0,
6098 					       PAGE_CACHE_SIZE - pg_offset -
6099 					       copy_size);
6100 				}
6101 				kunmap(page);
6102 			}
6103 			flush_dcache_page(page);
6104 		} else if (create && PageUptodate(page)) {
6105 			BUG();
6106 			if (!trans) {
6107 				kunmap(page);
6108 				free_extent_map(em);
6109 				em = NULL;
6110 
6111 				btrfs_release_path(path);
6112 				trans = btrfs_join_transaction(root);
6113 
6114 				if (IS_ERR(trans))
6115 					return ERR_CAST(trans);
6116 				goto again;
6117 			}
6118 			map = kmap(page);
6119 			write_extent_buffer(leaf, map + pg_offset, ptr,
6120 					    copy_size);
6121 			kunmap(page);
6122 			btrfs_mark_buffer_dirty(leaf);
6123 		}
6124 		set_extent_uptodate(io_tree, em->start,
6125 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
6126 		goto insert;
6127 	} else {
6128 		WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type);
6129 	}
6130 not_found:
6131 	em->start = start;
6132 	em->orig_start = start;
6133 	em->len = len;
6134 not_found_em:
6135 	em->block_start = EXTENT_MAP_HOLE;
6136 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6137 insert:
6138 	btrfs_release_path(path);
6139 	if (em->start > start || extent_map_end(em) <= start) {
6140 		btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6141 			em->start, em->len, start, len);
6142 		err = -EIO;
6143 		goto out;
6144 	}
6145 
6146 	err = 0;
6147 	write_lock(&em_tree->lock);
6148 	ret = add_extent_mapping(em_tree, em, 0);
6149 	/* it is possible that someone inserted the extent into the tree
6150 	 * while we had the lock dropped.  It is also possible that
6151 	 * an overlapping map exists in the tree
6152 	 */
6153 	if (ret == -EEXIST) {
6154 		struct extent_map *existing;
6155 
6156 		ret = 0;
6157 
6158 		existing = lookup_extent_mapping(em_tree, start, len);
6159 		if (existing && (existing->start > start ||
6160 		    existing->start + existing->len <= start)) {
6161 			free_extent_map(existing);
6162 			existing = NULL;
6163 		}
6164 		if (!existing) {
6165 			existing = lookup_extent_mapping(em_tree, em->start,
6166 							 em->len);
6167 			if (existing) {
6168 				err = merge_extent_mapping(em_tree, existing,
6169 							   em, start,
6170 							   root->sectorsize);
6171 				free_extent_map(existing);
6172 				if (err) {
6173 					free_extent_map(em);
6174 					em = NULL;
6175 				}
6176 			} else {
6177 				err = -EIO;
6178 				free_extent_map(em);
6179 				em = NULL;
6180 			}
6181 		} else {
6182 			free_extent_map(em);
6183 			em = existing;
6184 			err = 0;
6185 		}
6186 	}
6187 	write_unlock(&em_tree->lock);
6188 out:
6189 
6190 	trace_btrfs_get_extent(root, em);
6191 
6192 	if (path)
6193 		btrfs_free_path(path);
6194 	if (trans) {
6195 		ret = btrfs_end_transaction(trans, root);
6196 		if (!err)
6197 			err = ret;
6198 	}
6199 	if (err) {
6200 		free_extent_map(em);
6201 		return ERR_PTR(err);
6202 	}
6203 	BUG_ON(!em); /* Error is always set */
6204 	return em;
6205 }
6206 
6207 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
6208 					   size_t pg_offset, u64 start, u64 len,
6209 					   int create)
6210 {
6211 	struct extent_map *em;
6212 	struct extent_map *hole_em = NULL;
6213 	u64 range_start = start;
6214 	u64 end;
6215 	u64 found;
6216 	u64 found_end;
6217 	int err = 0;
6218 
6219 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
6220 	if (IS_ERR(em))
6221 		return em;
6222 	if (em) {
6223 		/*
6224 		 * if our em maps to
6225 		 * -  a hole or
6226 		 * -  a pre-alloc extent,
6227 		 * there might actually be delalloc bytes behind it.
6228 		 */
6229 		if (em->block_start != EXTENT_MAP_HOLE &&
6230 		    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6231 			return em;
6232 		else
6233 			hole_em = em;
6234 	}
6235 
6236 	/* check to see if we've wrapped (len == -1 or similar) */
6237 	end = start + len;
6238 	if (end < start)
6239 		end = (u64)-1;
6240 	else
6241 		end -= 1;
6242 
6243 	em = NULL;
6244 
6245 	/* ok, we didn't find anything, lets look for delalloc */
6246 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
6247 				 end, len, EXTENT_DELALLOC, 1);
6248 	found_end = range_start + found;
6249 	if (found_end < range_start)
6250 		found_end = (u64)-1;
6251 
6252 	/*
6253 	 * we didn't find anything useful, return
6254 	 * the original results from get_extent()
6255 	 */
6256 	if (range_start > end || found_end <= start) {
6257 		em = hole_em;
6258 		hole_em = NULL;
6259 		goto out;
6260 	}
6261 
6262 	/* adjust the range_start to make sure it doesn't
6263 	 * go backwards from the start they passed in
6264 	 */
6265 	range_start = max(start, range_start);
6266 	found = found_end - range_start;
6267 
6268 	if (found > 0) {
6269 		u64 hole_start = start;
6270 		u64 hole_len = len;
6271 
6272 		em = alloc_extent_map();
6273 		if (!em) {
6274 			err = -ENOMEM;
6275 			goto out;
6276 		}
6277 		/*
6278 		 * when btrfs_get_extent can't find anything it
6279 		 * returns one huge hole
6280 		 *
6281 		 * make sure what it found really fits our range, and
6282 		 * adjust to make sure it is based on the start from
6283 		 * the caller
6284 		 */
6285 		if (hole_em) {
6286 			u64 calc_end = extent_map_end(hole_em);
6287 
6288 			if (calc_end <= start || (hole_em->start > end)) {
6289 				free_extent_map(hole_em);
6290 				hole_em = NULL;
6291 			} else {
6292 				hole_start = max(hole_em->start, start);
6293 				hole_len = calc_end - hole_start;
6294 			}
6295 		}
6296 		em->bdev = NULL;
6297 		if (hole_em && range_start > hole_start) {
6298 			/* our hole starts before our delalloc, so we
6299 			 * have to return just the parts of the hole
6300 			 * that go until  the delalloc starts
6301 			 */
6302 			em->len = min(hole_len,
6303 				      range_start - hole_start);
6304 			em->start = hole_start;
6305 			em->orig_start = hole_start;
6306 			/*
6307 			 * don't adjust block start at all,
6308 			 * it is fixed at EXTENT_MAP_HOLE
6309 			 */
6310 			em->block_start = hole_em->block_start;
6311 			em->block_len = hole_len;
6312 			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
6313 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
6314 		} else {
6315 			em->start = range_start;
6316 			em->len = found;
6317 			em->orig_start = range_start;
6318 			em->block_start = EXTENT_MAP_DELALLOC;
6319 			em->block_len = found;
6320 		}
6321 	} else if (hole_em) {
6322 		return hole_em;
6323 	}
6324 out:
6325 
6326 	free_extent_map(hole_em);
6327 	if (err) {
6328 		free_extent_map(em);
6329 		return ERR_PTR(err);
6330 	}
6331 	return em;
6332 }
6333 
6334 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
6335 						  u64 start, u64 len)
6336 {
6337 	struct btrfs_root *root = BTRFS_I(inode)->root;
6338 	struct extent_map *em;
6339 	struct btrfs_key ins;
6340 	u64 alloc_hint;
6341 	int ret;
6342 
6343 	alloc_hint = get_extent_allocation_hint(inode, start, len);
6344 	ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
6345 				   alloc_hint, &ins, 1);
6346 	if (ret)
6347 		return ERR_PTR(ret);
6348 
6349 	em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
6350 			      ins.offset, ins.offset, ins.offset, 0);
6351 	if (IS_ERR(em)) {
6352 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
6353 		return em;
6354 	}
6355 
6356 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
6357 					   ins.offset, ins.offset, 0);
6358 	if (ret) {
6359 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
6360 		free_extent_map(em);
6361 		return ERR_PTR(ret);
6362 	}
6363 
6364 	return em;
6365 }
6366 
6367 /*
6368  * returns 1 when the nocow is safe, < 1 on error, 0 if the
6369  * block must be cow'd
6370  */
6371 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
6372 			      u64 *orig_start, u64 *orig_block_len,
6373 			      u64 *ram_bytes)
6374 {
6375 	struct btrfs_trans_handle *trans;
6376 	struct btrfs_path *path;
6377 	int ret;
6378 	struct extent_buffer *leaf;
6379 	struct btrfs_root *root = BTRFS_I(inode)->root;
6380 	struct btrfs_file_extent_item *fi;
6381 	struct btrfs_key key;
6382 	u64 disk_bytenr;
6383 	u64 backref_offset;
6384 	u64 extent_end;
6385 	u64 num_bytes;
6386 	int slot;
6387 	int found_type;
6388 	bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
6389 	path = btrfs_alloc_path();
6390 	if (!path)
6391 		return -ENOMEM;
6392 
6393 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
6394 				       offset, 0);
6395 	if (ret < 0)
6396 		goto out;
6397 
6398 	slot = path->slots[0];
6399 	if (ret == 1) {
6400 		if (slot == 0) {
6401 			/* can't find the item, must cow */
6402 			ret = 0;
6403 			goto out;
6404 		}
6405 		slot--;
6406 	}
6407 	ret = 0;
6408 	leaf = path->nodes[0];
6409 	btrfs_item_key_to_cpu(leaf, &key, slot);
6410 	if (key.objectid != btrfs_ino(inode) ||
6411 	    key.type != BTRFS_EXTENT_DATA_KEY) {
6412 		/* not our file or wrong item type, must cow */
6413 		goto out;
6414 	}
6415 
6416 	if (key.offset > offset) {
6417 		/* Wrong offset, must cow */
6418 		goto out;
6419 	}
6420 
6421 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6422 	found_type = btrfs_file_extent_type(leaf, fi);
6423 	if (found_type != BTRFS_FILE_EXTENT_REG &&
6424 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
6425 		/* not a regular extent, must cow */
6426 		goto out;
6427 	}
6428 
6429 	if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
6430 		goto out;
6431 
6432 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6433 	if (disk_bytenr == 0)
6434 		goto out;
6435 
6436 	if (btrfs_file_extent_compression(leaf, fi) ||
6437 	    btrfs_file_extent_encryption(leaf, fi) ||
6438 	    btrfs_file_extent_other_encoding(leaf, fi))
6439 		goto out;
6440 
6441 	backref_offset = btrfs_file_extent_offset(leaf, fi);
6442 
6443 	if (orig_start) {
6444 		*orig_start = key.offset - backref_offset;
6445 		*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
6446 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6447 	}
6448 
6449 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
6450 
6451 	if (btrfs_extent_readonly(root, disk_bytenr))
6452 		goto out;
6453 	btrfs_release_path(path);
6454 
6455 	/*
6456 	 * look for other files referencing this extent, if we
6457 	 * find any we must cow
6458 	 */
6459 	trans = btrfs_join_transaction(root);
6460 	if (IS_ERR(trans)) {
6461 		ret = 0;
6462 		goto out;
6463 	}
6464 
6465 	ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
6466 				    key.offset - backref_offset, disk_bytenr);
6467 	btrfs_end_transaction(trans, root);
6468 	if (ret) {
6469 		ret = 0;
6470 		goto out;
6471 	}
6472 
6473 	/*
6474 	 * adjust disk_bytenr and num_bytes to cover just the bytes
6475 	 * in this extent we are about to write.  If there
6476 	 * are any csums in that range we have to cow in order
6477 	 * to keep the csums correct
6478 	 */
6479 	disk_bytenr += backref_offset;
6480 	disk_bytenr += offset - key.offset;
6481 	num_bytes = min(offset + *len, extent_end) - offset;
6482 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
6483 				goto out;
6484 	/*
6485 	 * all of the above have passed, it is safe to overwrite this extent
6486 	 * without cow
6487 	 */
6488 	*len = num_bytes;
6489 	ret = 1;
6490 out:
6491 	btrfs_free_path(path);
6492 	return ret;
6493 }
6494 
6495 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
6496 			      struct extent_state **cached_state, int writing)
6497 {
6498 	struct btrfs_ordered_extent *ordered;
6499 	int ret = 0;
6500 
6501 	while (1) {
6502 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6503 				 0, cached_state);
6504 		/*
6505 		 * We're concerned with the entire range that we're going to be
6506 		 * doing DIO to, so we need to make sure theres no ordered
6507 		 * extents in this range.
6508 		 */
6509 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
6510 						     lockend - lockstart + 1);
6511 
6512 		/*
6513 		 * We need to make sure there are no buffered pages in this
6514 		 * range either, we could have raced between the invalidate in
6515 		 * generic_file_direct_write and locking the extent.  The
6516 		 * invalidate needs to happen so that reads after a write do not
6517 		 * get stale data.
6518 		 */
6519 		if (!ordered && (!writing ||
6520 		    !test_range_bit(&BTRFS_I(inode)->io_tree,
6521 				    lockstart, lockend, EXTENT_UPTODATE, 0,
6522 				    *cached_state)))
6523 			break;
6524 
6525 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6526 				     cached_state, GFP_NOFS);
6527 
6528 		if (ordered) {
6529 			btrfs_start_ordered_extent(inode, ordered, 1);
6530 			btrfs_put_ordered_extent(ordered);
6531 		} else {
6532 			/* Screw you mmap */
6533 			ret = filemap_write_and_wait_range(inode->i_mapping,
6534 							   lockstart,
6535 							   lockend);
6536 			if (ret)
6537 				break;
6538 
6539 			/*
6540 			 * If we found a page that couldn't be invalidated just
6541 			 * fall back to buffered.
6542 			 */
6543 			ret = invalidate_inode_pages2_range(inode->i_mapping,
6544 					lockstart >> PAGE_CACHE_SHIFT,
6545 					lockend >> PAGE_CACHE_SHIFT);
6546 			if (ret)
6547 				break;
6548 		}
6549 
6550 		cond_resched();
6551 	}
6552 
6553 	return ret;
6554 }
6555 
6556 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
6557 					   u64 len, u64 orig_start,
6558 					   u64 block_start, u64 block_len,
6559 					   u64 orig_block_len, u64 ram_bytes,
6560 					   int type)
6561 {
6562 	struct extent_map_tree *em_tree;
6563 	struct extent_map *em;
6564 	struct btrfs_root *root = BTRFS_I(inode)->root;
6565 	int ret;
6566 
6567 	em_tree = &BTRFS_I(inode)->extent_tree;
6568 	em = alloc_extent_map();
6569 	if (!em)
6570 		return ERR_PTR(-ENOMEM);
6571 
6572 	em->start = start;
6573 	em->orig_start = orig_start;
6574 	em->mod_start = start;
6575 	em->mod_len = len;
6576 	em->len = len;
6577 	em->block_len = block_len;
6578 	em->block_start = block_start;
6579 	em->bdev = root->fs_info->fs_devices->latest_bdev;
6580 	em->orig_block_len = orig_block_len;
6581 	em->ram_bytes = ram_bytes;
6582 	em->generation = -1;
6583 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
6584 	if (type == BTRFS_ORDERED_PREALLOC)
6585 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
6586 
6587 	do {
6588 		btrfs_drop_extent_cache(inode, em->start,
6589 				em->start + em->len - 1, 0);
6590 		write_lock(&em_tree->lock);
6591 		ret = add_extent_mapping(em_tree, em, 1);
6592 		write_unlock(&em_tree->lock);
6593 	} while (ret == -EEXIST);
6594 
6595 	if (ret) {
6596 		free_extent_map(em);
6597 		return ERR_PTR(ret);
6598 	}
6599 
6600 	return em;
6601 }
6602 
6603 
6604 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
6605 				   struct buffer_head *bh_result, int create)
6606 {
6607 	struct extent_map *em;
6608 	struct btrfs_root *root = BTRFS_I(inode)->root;
6609 	struct extent_state *cached_state = NULL;
6610 	u64 start = iblock << inode->i_blkbits;
6611 	u64 lockstart, lockend;
6612 	u64 len = bh_result->b_size;
6613 	int unlock_bits = EXTENT_LOCKED;
6614 	int ret = 0;
6615 
6616 	if (create)
6617 		unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
6618 	else
6619 		len = min_t(u64, len, root->sectorsize);
6620 
6621 	lockstart = start;
6622 	lockend = start + len - 1;
6623 
6624 	/*
6625 	 * If this errors out it's because we couldn't invalidate pagecache for
6626 	 * this range and we need to fallback to buffered.
6627 	 */
6628 	if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
6629 		return -ENOTBLK;
6630 
6631 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
6632 	if (IS_ERR(em)) {
6633 		ret = PTR_ERR(em);
6634 		goto unlock_err;
6635 	}
6636 
6637 	/*
6638 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
6639 	 * io.  INLINE is special, and we could probably kludge it in here, but
6640 	 * it's still buffered so for safety lets just fall back to the generic
6641 	 * buffered path.
6642 	 *
6643 	 * For COMPRESSED we _have_ to read the entire extent in so we can
6644 	 * decompress it, so there will be buffering required no matter what we
6645 	 * do, so go ahead and fallback to buffered.
6646 	 *
6647 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
6648 	 * to buffered IO.  Don't blame me, this is the price we pay for using
6649 	 * the generic code.
6650 	 */
6651 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
6652 	    em->block_start == EXTENT_MAP_INLINE) {
6653 		free_extent_map(em);
6654 		ret = -ENOTBLK;
6655 		goto unlock_err;
6656 	}
6657 
6658 	/* Just a good old fashioned hole, return */
6659 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
6660 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
6661 		free_extent_map(em);
6662 		goto unlock_err;
6663 	}
6664 
6665 	/*
6666 	 * We don't allocate a new extent in the following cases
6667 	 *
6668 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
6669 	 * existing extent.
6670 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
6671 	 * just use the extent.
6672 	 *
6673 	 */
6674 	if (!create) {
6675 		len = min(len, em->len - (start - em->start));
6676 		lockstart = start + len;
6677 		goto unlock;
6678 	}
6679 
6680 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
6681 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
6682 	     em->block_start != EXTENT_MAP_HOLE)) {
6683 		int type;
6684 		int ret;
6685 		u64 block_start, orig_start, orig_block_len, ram_bytes;
6686 
6687 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6688 			type = BTRFS_ORDERED_PREALLOC;
6689 		else
6690 			type = BTRFS_ORDERED_NOCOW;
6691 		len = min(len, em->len - (start - em->start));
6692 		block_start = em->block_start + (start - em->start);
6693 
6694 		if (can_nocow_extent(inode, start, &len, &orig_start,
6695 				     &orig_block_len, &ram_bytes) == 1) {
6696 			if (type == BTRFS_ORDERED_PREALLOC) {
6697 				free_extent_map(em);
6698 				em = create_pinned_em(inode, start, len,
6699 						       orig_start,
6700 						       block_start, len,
6701 						       orig_block_len,
6702 						       ram_bytes, type);
6703 				if (IS_ERR(em))
6704 					goto unlock_err;
6705 			}
6706 
6707 			ret = btrfs_add_ordered_extent_dio(inode, start,
6708 					   block_start, len, len, type);
6709 			if (ret) {
6710 				free_extent_map(em);
6711 				goto unlock_err;
6712 			}
6713 			goto unlock;
6714 		}
6715 	}
6716 
6717 	/*
6718 	 * this will cow the extent, reset the len in case we changed
6719 	 * it above
6720 	 */
6721 	len = bh_result->b_size;
6722 	free_extent_map(em);
6723 	em = btrfs_new_extent_direct(inode, start, len);
6724 	if (IS_ERR(em)) {
6725 		ret = PTR_ERR(em);
6726 		goto unlock_err;
6727 	}
6728 	len = min(len, em->len - (start - em->start));
6729 unlock:
6730 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
6731 		inode->i_blkbits;
6732 	bh_result->b_size = len;
6733 	bh_result->b_bdev = em->bdev;
6734 	set_buffer_mapped(bh_result);
6735 	if (create) {
6736 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6737 			set_buffer_new(bh_result);
6738 
6739 		/*
6740 		 * Need to update the i_size under the extent lock so buffered
6741 		 * readers will get the updated i_size when we unlock.
6742 		 */
6743 		if (start + len > i_size_read(inode))
6744 			i_size_write(inode, start + len);
6745 
6746 		spin_lock(&BTRFS_I(inode)->lock);
6747 		BTRFS_I(inode)->outstanding_extents++;
6748 		spin_unlock(&BTRFS_I(inode)->lock);
6749 
6750 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6751 				     lockstart + len - 1, EXTENT_DELALLOC, NULL,
6752 				     &cached_state, GFP_NOFS);
6753 		BUG_ON(ret);
6754 	}
6755 
6756 	/*
6757 	 * In the case of write we need to clear and unlock the entire range,
6758 	 * in the case of read we need to unlock only the end area that we
6759 	 * aren't using if there is any left over space.
6760 	 */
6761 	if (lockstart < lockend) {
6762 		clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6763 				 lockend, unlock_bits, 1, 0,
6764 				 &cached_state, GFP_NOFS);
6765 	} else {
6766 		free_extent_state(cached_state);
6767 	}
6768 
6769 	free_extent_map(em);
6770 
6771 	return 0;
6772 
6773 unlock_err:
6774 	clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6775 			 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6776 	return ret;
6777 }
6778 
6779 static void btrfs_endio_direct_read(struct bio *bio, int err)
6780 {
6781 	struct btrfs_dio_private *dip = bio->bi_private;
6782 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
6783 	struct bio_vec *bvec = bio->bi_io_vec;
6784 	struct inode *inode = dip->inode;
6785 	struct btrfs_root *root = BTRFS_I(inode)->root;
6786 	struct bio *dio_bio;
6787 	u32 *csums = (u32 *)dip->csum;
6788 	int index = 0;
6789 	u64 start;
6790 
6791 	start = dip->logical_offset;
6792 	do {
6793 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6794 			struct page *page = bvec->bv_page;
6795 			char *kaddr;
6796 			u32 csum = ~(u32)0;
6797 			unsigned long flags;
6798 
6799 			local_irq_save(flags);
6800 			kaddr = kmap_atomic(page);
6801 			csum = btrfs_csum_data(kaddr + bvec->bv_offset,
6802 					       csum, bvec->bv_len);
6803 			btrfs_csum_final(csum, (char *)&csum);
6804 			kunmap_atomic(kaddr);
6805 			local_irq_restore(flags);
6806 
6807 			flush_dcache_page(bvec->bv_page);
6808 			if (csum != csums[index]) {
6809 				btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
6810 					  btrfs_ino(inode), start, csum,
6811 					  csums[index]);
6812 				err = -EIO;
6813 			}
6814 		}
6815 
6816 		start += bvec->bv_len;
6817 		bvec++;
6818 		index++;
6819 	} while (bvec <= bvec_end);
6820 
6821 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
6822 		      dip->logical_offset + dip->bytes - 1);
6823 	dio_bio = dip->dio_bio;
6824 
6825 	kfree(dip);
6826 
6827 	/* If we had a csum failure make sure to clear the uptodate flag */
6828 	if (err)
6829 		clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
6830 	dio_end_io(dio_bio, err);
6831 	bio_put(bio);
6832 }
6833 
6834 static void btrfs_endio_direct_write(struct bio *bio, int err)
6835 {
6836 	struct btrfs_dio_private *dip = bio->bi_private;
6837 	struct inode *inode = dip->inode;
6838 	struct btrfs_root *root = BTRFS_I(inode)->root;
6839 	struct btrfs_ordered_extent *ordered = NULL;
6840 	u64 ordered_offset = dip->logical_offset;
6841 	u64 ordered_bytes = dip->bytes;
6842 	struct bio *dio_bio;
6843 	int ret;
6844 
6845 	if (err)
6846 		goto out_done;
6847 again:
6848 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
6849 						   &ordered_offset,
6850 						   ordered_bytes, !err);
6851 	if (!ret)
6852 		goto out_test;
6853 
6854 	ordered->work.func = finish_ordered_fn;
6855 	ordered->work.flags = 0;
6856 	btrfs_queue_worker(&root->fs_info->endio_write_workers,
6857 			   &ordered->work);
6858 out_test:
6859 	/*
6860 	 * our bio might span multiple ordered extents.  If we haven't
6861 	 * completed the accounting for the whole dio, go back and try again
6862 	 */
6863 	if (ordered_offset < dip->logical_offset + dip->bytes) {
6864 		ordered_bytes = dip->logical_offset + dip->bytes -
6865 			ordered_offset;
6866 		ordered = NULL;
6867 		goto again;
6868 	}
6869 out_done:
6870 	dio_bio = dip->dio_bio;
6871 
6872 	kfree(dip);
6873 
6874 	/* If we had an error make sure to clear the uptodate flag */
6875 	if (err)
6876 		clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
6877 	dio_end_io(dio_bio, err);
6878 	bio_put(bio);
6879 }
6880 
6881 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
6882 				    struct bio *bio, int mirror_num,
6883 				    unsigned long bio_flags, u64 offset)
6884 {
6885 	int ret;
6886 	struct btrfs_root *root = BTRFS_I(inode)->root;
6887 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
6888 	BUG_ON(ret); /* -ENOMEM */
6889 	return 0;
6890 }
6891 
6892 static void btrfs_end_dio_bio(struct bio *bio, int err)
6893 {
6894 	struct btrfs_dio_private *dip = bio->bi_private;
6895 
6896 	if (err) {
6897 		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
6898 		      "sector %#Lx len %u err no %d\n",
6899 		      btrfs_ino(dip->inode), bio->bi_rw,
6900 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
6901 		dip->errors = 1;
6902 
6903 		/*
6904 		 * before atomic variable goto zero, we must make sure
6905 		 * dip->errors is perceived to be set.
6906 		 */
6907 		smp_mb__before_atomic_dec();
6908 	}
6909 
6910 	/* if there are more bios still pending for this dio, just exit */
6911 	if (!atomic_dec_and_test(&dip->pending_bios))
6912 		goto out;
6913 
6914 	if (dip->errors) {
6915 		bio_io_error(dip->orig_bio);
6916 	} else {
6917 		set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags);
6918 		bio_endio(dip->orig_bio, 0);
6919 	}
6920 out:
6921 	bio_put(bio);
6922 }
6923 
6924 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
6925 				       u64 first_sector, gfp_t gfp_flags)
6926 {
6927 	int nr_vecs = bio_get_nr_vecs(bdev);
6928 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
6929 }
6930 
6931 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6932 					 int rw, u64 file_offset, int skip_sum,
6933 					 int async_submit)
6934 {
6935 	struct btrfs_dio_private *dip = bio->bi_private;
6936 	int write = rw & REQ_WRITE;
6937 	struct btrfs_root *root = BTRFS_I(inode)->root;
6938 	int ret;
6939 
6940 	if (async_submit)
6941 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
6942 
6943 	bio_get(bio);
6944 
6945 	if (!write) {
6946 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
6947 		if (ret)
6948 			goto err;
6949 	}
6950 
6951 	if (skip_sum)
6952 		goto map;
6953 
6954 	if (write && async_submit) {
6955 		ret = btrfs_wq_submit_bio(root->fs_info,
6956 				   inode, rw, bio, 0, 0,
6957 				   file_offset,
6958 				   __btrfs_submit_bio_start_direct_io,
6959 				   __btrfs_submit_bio_done);
6960 		goto err;
6961 	} else if (write) {
6962 		/*
6963 		 * If we aren't doing async submit, calculate the csum of the
6964 		 * bio now.
6965 		 */
6966 		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
6967 		if (ret)
6968 			goto err;
6969 	} else if (!skip_sum) {
6970 		ret = btrfs_lookup_bio_sums_dio(root, inode, dip, bio,
6971 						file_offset);
6972 		if (ret)
6973 			goto err;
6974 	}
6975 
6976 map:
6977 	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
6978 err:
6979 	bio_put(bio);
6980 	return ret;
6981 }
6982 
6983 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6984 				    int skip_sum)
6985 {
6986 	struct inode *inode = dip->inode;
6987 	struct btrfs_root *root = BTRFS_I(inode)->root;
6988 	struct bio *bio;
6989 	struct bio *orig_bio = dip->orig_bio;
6990 	struct bio_vec *bvec = orig_bio->bi_io_vec;
6991 	u64 start_sector = orig_bio->bi_sector;
6992 	u64 file_offset = dip->logical_offset;
6993 	u64 submit_len = 0;
6994 	u64 map_length;
6995 	int nr_pages = 0;
6996 	int ret = 0;
6997 	int async_submit = 0;
6998 
6999 	map_length = orig_bio->bi_size;
7000 	ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
7001 			      &map_length, NULL, 0);
7002 	if (ret) {
7003 		bio_put(orig_bio);
7004 		return -EIO;
7005 	}
7006 
7007 	if (map_length >= orig_bio->bi_size) {
7008 		bio = orig_bio;
7009 		goto submit;
7010 	}
7011 
7012 	/* async crcs make it difficult to collect full stripe writes. */
7013 	if (btrfs_get_alloc_profile(root, 1) &
7014 	    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
7015 		async_submit = 0;
7016 	else
7017 		async_submit = 1;
7018 
7019 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
7020 	if (!bio)
7021 		return -ENOMEM;
7022 	bio->bi_private = dip;
7023 	bio->bi_end_io = btrfs_end_dio_bio;
7024 	atomic_inc(&dip->pending_bios);
7025 
7026 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
7027 		if (unlikely(map_length < submit_len + bvec->bv_len ||
7028 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
7029 				 bvec->bv_offset) < bvec->bv_len)) {
7030 			/*
7031 			 * inc the count before we submit the bio so
7032 			 * we know the end IO handler won't happen before
7033 			 * we inc the count. Otherwise, the dip might get freed
7034 			 * before we're done setting it up
7035 			 */
7036 			atomic_inc(&dip->pending_bios);
7037 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
7038 						     file_offset, skip_sum,
7039 						     async_submit);
7040 			if (ret) {
7041 				bio_put(bio);
7042 				atomic_dec(&dip->pending_bios);
7043 				goto out_err;
7044 			}
7045 
7046 			start_sector += submit_len >> 9;
7047 			file_offset += submit_len;
7048 
7049 			submit_len = 0;
7050 			nr_pages = 0;
7051 
7052 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
7053 						  start_sector, GFP_NOFS);
7054 			if (!bio)
7055 				goto out_err;
7056 			bio->bi_private = dip;
7057 			bio->bi_end_io = btrfs_end_dio_bio;
7058 
7059 			map_length = orig_bio->bi_size;
7060 			ret = btrfs_map_block(root->fs_info, rw,
7061 					      start_sector << 9,
7062 					      &map_length, NULL, 0);
7063 			if (ret) {
7064 				bio_put(bio);
7065 				goto out_err;
7066 			}
7067 		} else {
7068 			submit_len += bvec->bv_len;
7069 			nr_pages++;
7070 			bvec++;
7071 		}
7072 	}
7073 
7074 submit:
7075 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
7076 				     async_submit);
7077 	if (!ret)
7078 		return 0;
7079 
7080 	bio_put(bio);
7081 out_err:
7082 	dip->errors = 1;
7083 	/*
7084 	 * before atomic variable goto zero, we must
7085 	 * make sure dip->errors is perceived to be set.
7086 	 */
7087 	smp_mb__before_atomic_dec();
7088 	if (atomic_dec_and_test(&dip->pending_bios))
7089 		bio_io_error(dip->orig_bio);
7090 
7091 	/* bio_end_io() will handle error, so we needn't return it */
7092 	return 0;
7093 }
7094 
7095 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
7096 				struct inode *inode, loff_t file_offset)
7097 {
7098 	struct btrfs_root *root = BTRFS_I(inode)->root;
7099 	struct btrfs_dio_private *dip;
7100 	struct bio *io_bio;
7101 	int skip_sum;
7102 	int sum_len;
7103 	int write = rw & REQ_WRITE;
7104 	int ret = 0;
7105 	u16 csum_size;
7106 
7107 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7108 
7109 	io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
7110 	if (!io_bio) {
7111 		ret = -ENOMEM;
7112 		goto free_ordered;
7113 	}
7114 
7115 	if (!skip_sum && !write) {
7116 		csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
7117 		sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits;
7118 		sum_len *= csum_size;
7119 	} else {
7120 		sum_len = 0;
7121 	}
7122 
7123 	dip = kmalloc(sizeof(*dip) + sum_len, GFP_NOFS);
7124 	if (!dip) {
7125 		ret = -ENOMEM;
7126 		goto free_io_bio;
7127 	}
7128 
7129 	dip->private = dio_bio->bi_private;
7130 	dip->inode = inode;
7131 	dip->logical_offset = file_offset;
7132 	dip->bytes = dio_bio->bi_size;
7133 	dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
7134 	io_bio->bi_private = dip;
7135 	dip->errors = 0;
7136 	dip->orig_bio = io_bio;
7137 	dip->dio_bio = dio_bio;
7138 	atomic_set(&dip->pending_bios, 0);
7139 
7140 	if (write)
7141 		io_bio->bi_end_io = btrfs_endio_direct_write;
7142 	else
7143 		io_bio->bi_end_io = btrfs_endio_direct_read;
7144 
7145 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
7146 	if (!ret)
7147 		return;
7148 
7149 free_io_bio:
7150 	bio_put(io_bio);
7151 
7152 free_ordered:
7153 	/*
7154 	 * If this is a write, we need to clean up the reserved space and kill
7155 	 * the ordered extent.
7156 	 */
7157 	if (write) {
7158 		struct btrfs_ordered_extent *ordered;
7159 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
7160 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
7161 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
7162 			btrfs_free_reserved_extent(root, ordered->start,
7163 						   ordered->disk_len);
7164 		btrfs_put_ordered_extent(ordered);
7165 		btrfs_put_ordered_extent(ordered);
7166 	}
7167 	bio_endio(dio_bio, ret);
7168 }
7169 
7170 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
7171 			const struct iovec *iov, loff_t offset,
7172 			unsigned long nr_segs)
7173 {
7174 	int seg;
7175 	int i;
7176 	size_t size;
7177 	unsigned long addr;
7178 	unsigned blocksize_mask = root->sectorsize - 1;
7179 	ssize_t retval = -EINVAL;
7180 	loff_t end = offset;
7181 
7182 	if (offset & blocksize_mask)
7183 		goto out;
7184 
7185 	/* Check the memory alignment.  Blocks cannot straddle pages */
7186 	for (seg = 0; seg < nr_segs; seg++) {
7187 		addr = (unsigned long)iov[seg].iov_base;
7188 		size = iov[seg].iov_len;
7189 		end += size;
7190 		if ((addr & blocksize_mask) || (size & blocksize_mask))
7191 			goto out;
7192 
7193 		/* If this is a write we don't need to check anymore */
7194 		if (rw & WRITE)
7195 			continue;
7196 
7197 		/*
7198 		 * Check to make sure we don't have duplicate iov_base's in this
7199 		 * iovec, if so return EINVAL, otherwise we'll get csum errors
7200 		 * when reading back.
7201 		 */
7202 		for (i = seg + 1; i < nr_segs; i++) {
7203 			if (iov[seg].iov_base == iov[i].iov_base)
7204 				goto out;
7205 		}
7206 	}
7207 	retval = 0;
7208 out:
7209 	return retval;
7210 }
7211 
7212 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
7213 			const struct iovec *iov, loff_t offset,
7214 			unsigned long nr_segs)
7215 {
7216 	struct file *file = iocb->ki_filp;
7217 	struct inode *inode = file->f_mapping->host;
7218 	size_t count = 0;
7219 	int flags = 0;
7220 	bool wakeup = true;
7221 	bool relock = false;
7222 	ssize_t ret;
7223 
7224 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
7225 			    offset, nr_segs))
7226 		return 0;
7227 
7228 	atomic_inc(&inode->i_dio_count);
7229 	smp_mb__after_atomic_inc();
7230 
7231 	/*
7232 	 * The generic stuff only does filemap_write_and_wait_range, which isn't
7233 	 * enough if we've written compressed pages to this area, so we need to
7234 	 * call btrfs_wait_ordered_range to make absolutely sure that any
7235 	 * outstanding dirty pages are on disk.
7236 	 */
7237 	count = iov_length(iov, nr_segs);
7238 	ret = btrfs_wait_ordered_range(inode, offset, count);
7239 	if (ret)
7240 		return ret;
7241 
7242 	if (rw & WRITE) {
7243 		/*
7244 		 * If the write DIO is beyond the EOF, we need update
7245 		 * the isize, but it is protected by i_mutex. So we can
7246 		 * not unlock the i_mutex at this case.
7247 		 */
7248 		if (offset + count <= inode->i_size) {
7249 			mutex_unlock(&inode->i_mutex);
7250 			relock = true;
7251 		}
7252 		ret = btrfs_delalloc_reserve_space(inode, count);
7253 		if (ret)
7254 			goto out;
7255 	} else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
7256 				     &BTRFS_I(inode)->runtime_flags))) {
7257 		inode_dio_done(inode);
7258 		flags = DIO_LOCKING | DIO_SKIP_HOLES;
7259 		wakeup = false;
7260 	}
7261 
7262 	ret = __blockdev_direct_IO(rw, iocb, inode,
7263 			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
7264 			iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
7265 			btrfs_submit_direct, flags);
7266 	if (rw & WRITE) {
7267 		if (ret < 0 && ret != -EIOCBQUEUED)
7268 			btrfs_delalloc_release_space(inode, count);
7269 		else if (ret >= 0 && (size_t)ret < count)
7270 			btrfs_delalloc_release_space(inode,
7271 						     count - (size_t)ret);
7272 		else
7273 			btrfs_delalloc_release_metadata(inode, 0);
7274 	}
7275 out:
7276 	if (wakeup)
7277 		inode_dio_done(inode);
7278 	if (relock)
7279 		mutex_lock(&inode->i_mutex);
7280 
7281 	return ret;
7282 }
7283 
7284 #define BTRFS_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC)
7285 
7286 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7287 		__u64 start, __u64 len)
7288 {
7289 	int	ret;
7290 
7291 	ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
7292 	if (ret)
7293 		return ret;
7294 
7295 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
7296 }
7297 
7298 int btrfs_readpage(struct file *file, struct page *page)
7299 {
7300 	struct extent_io_tree *tree;
7301 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7302 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
7303 }
7304 
7305 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
7306 {
7307 	struct extent_io_tree *tree;
7308 
7309 
7310 	if (current->flags & PF_MEMALLOC) {
7311 		redirty_page_for_writepage(wbc, page);
7312 		unlock_page(page);
7313 		return 0;
7314 	}
7315 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7316 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
7317 }
7318 
7319 static int btrfs_writepages(struct address_space *mapping,
7320 			    struct writeback_control *wbc)
7321 {
7322 	struct extent_io_tree *tree;
7323 
7324 	tree = &BTRFS_I(mapping->host)->io_tree;
7325 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
7326 }
7327 
7328 static int
7329 btrfs_readpages(struct file *file, struct address_space *mapping,
7330 		struct list_head *pages, unsigned nr_pages)
7331 {
7332 	struct extent_io_tree *tree;
7333 	tree = &BTRFS_I(mapping->host)->io_tree;
7334 	return extent_readpages(tree, mapping, pages, nr_pages,
7335 				btrfs_get_extent);
7336 }
7337 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7338 {
7339 	struct extent_io_tree *tree;
7340 	struct extent_map_tree *map;
7341 	int ret;
7342 
7343 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7344 	map = &BTRFS_I(page->mapping->host)->extent_tree;
7345 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
7346 	if (ret == 1) {
7347 		ClearPagePrivate(page);
7348 		set_page_private(page, 0);
7349 		page_cache_release(page);
7350 	}
7351 	return ret;
7352 }
7353 
7354 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7355 {
7356 	if (PageWriteback(page) || PageDirty(page))
7357 		return 0;
7358 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
7359 }
7360 
7361 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
7362 				 unsigned int length)
7363 {
7364 	struct inode *inode = page->mapping->host;
7365 	struct extent_io_tree *tree;
7366 	struct btrfs_ordered_extent *ordered;
7367 	struct extent_state *cached_state = NULL;
7368 	u64 page_start = page_offset(page);
7369 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
7370 
7371 	/*
7372 	 * we have the page locked, so new writeback can't start,
7373 	 * and the dirty bit won't be cleared while we are here.
7374 	 *
7375 	 * Wait for IO on this page so that we can safely clear
7376 	 * the PagePrivate2 bit and do ordered accounting
7377 	 */
7378 	wait_on_page_writeback(page);
7379 
7380 	tree = &BTRFS_I(inode)->io_tree;
7381 	if (offset) {
7382 		btrfs_releasepage(page, GFP_NOFS);
7383 		return;
7384 	}
7385 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
7386 	ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
7387 	if (ordered) {
7388 		/*
7389 		 * IO on this page will never be started, so we need
7390 		 * to account for any ordered extents now
7391 		 */
7392 		clear_extent_bit(tree, page_start, page_end,
7393 				 EXTENT_DIRTY | EXTENT_DELALLOC |
7394 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7395 				 EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
7396 		/*
7397 		 * whoever cleared the private bit is responsible
7398 		 * for the finish_ordered_io
7399 		 */
7400 		if (TestClearPagePrivate2(page)) {
7401 			struct btrfs_ordered_inode_tree *tree;
7402 			u64 new_len;
7403 
7404 			tree = &BTRFS_I(inode)->ordered_tree;
7405 
7406 			spin_lock_irq(&tree->lock);
7407 			set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
7408 			new_len = page_start - ordered->file_offset;
7409 			if (new_len < ordered->truncated_len)
7410 				ordered->truncated_len = new_len;
7411 			spin_unlock_irq(&tree->lock);
7412 
7413 			if (btrfs_dec_test_ordered_pending(inode, &ordered,
7414 							   page_start,
7415 							   PAGE_CACHE_SIZE, 1))
7416 				btrfs_finish_ordered_io(ordered);
7417 		}
7418 		btrfs_put_ordered_extent(ordered);
7419 		cached_state = NULL;
7420 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
7421 	}
7422 	clear_extent_bit(tree, page_start, page_end,
7423 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
7424 		 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
7425 		 &cached_state, GFP_NOFS);
7426 	__btrfs_releasepage(page, GFP_NOFS);
7427 
7428 	ClearPageChecked(page);
7429 	if (PagePrivate(page)) {
7430 		ClearPagePrivate(page);
7431 		set_page_private(page, 0);
7432 		page_cache_release(page);
7433 	}
7434 }
7435 
7436 /*
7437  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
7438  * called from a page fault handler when a page is first dirtied. Hence we must
7439  * be careful to check for EOF conditions here. We set the page up correctly
7440  * for a written page which means we get ENOSPC checking when writing into
7441  * holes and correct delalloc and unwritten extent mapping on filesystems that
7442  * support these features.
7443  *
7444  * We are not allowed to take the i_mutex here so we have to play games to
7445  * protect against truncate races as the page could now be beyond EOF.  Because
7446  * vmtruncate() writes the inode size before removing pages, once we have the
7447  * page lock we can determine safely if the page is beyond EOF. If it is not
7448  * beyond EOF, then the page is guaranteed safe against truncation until we
7449  * unlock the page.
7450  */
7451 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
7452 {
7453 	struct page *page = vmf->page;
7454 	struct inode *inode = file_inode(vma->vm_file);
7455 	struct btrfs_root *root = BTRFS_I(inode)->root;
7456 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7457 	struct btrfs_ordered_extent *ordered;
7458 	struct extent_state *cached_state = NULL;
7459 	char *kaddr;
7460 	unsigned long zero_start;
7461 	loff_t size;
7462 	int ret;
7463 	int reserved = 0;
7464 	u64 page_start;
7465 	u64 page_end;
7466 
7467 	sb_start_pagefault(inode->i_sb);
7468 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
7469 	if (!ret) {
7470 		ret = file_update_time(vma->vm_file);
7471 		reserved = 1;
7472 	}
7473 	if (ret) {
7474 		if (ret == -ENOMEM)
7475 			ret = VM_FAULT_OOM;
7476 		else /* -ENOSPC, -EIO, etc */
7477 			ret = VM_FAULT_SIGBUS;
7478 		if (reserved)
7479 			goto out;
7480 		goto out_noreserve;
7481 	}
7482 
7483 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
7484 again:
7485 	lock_page(page);
7486 	size = i_size_read(inode);
7487 	page_start = page_offset(page);
7488 	page_end = page_start + PAGE_CACHE_SIZE - 1;
7489 
7490 	if ((page->mapping != inode->i_mapping) ||
7491 	    (page_start >= size)) {
7492 		/* page got truncated out from underneath us */
7493 		goto out_unlock;
7494 	}
7495 	wait_on_page_writeback(page);
7496 
7497 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
7498 	set_page_extent_mapped(page);
7499 
7500 	/*
7501 	 * we can't set the delalloc bits if there are pending ordered
7502 	 * extents.  Drop our locks and wait for them to finish
7503 	 */
7504 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
7505 	if (ordered) {
7506 		unlock_extent_cached(io_tree, page_start, page_end,
7507 				     &cached_state, GFP_NOFS);
7508 		unlock_page(page);
7509 		btrfs_start_ordered_extent(inode, ordered, 1);
7510 		btrfs_put_ordered_extent(ordered);
7511 		goto again;
7512 	}
7513 
7514 	/*
7515 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
7516 	 * if it was already dirty, so for space accounting reasons we need to
7517 	 * clear any delalloc bits for the range we are fixing to save.  There
7518 	 * is probably a better way to do this, but for now keep consistent with
7519 	 * prepare_pages in the normal write path.
7520 	 */
7521 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
7522 			  EXTENT_DIRTY | EXTENT_DELALLOC |
7523 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
7524 			  0, 0, &cached_state, GFP_NOFS);
7525 
7526 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
7527 					&cached_state);
7528 	if (ret) {
7529 		unlock_extent_cached(io_tree, page_start, page_end,
7530 				     &cached_state, GFP_NOFS);
7531 		ret = VM_FAULT_SIGBUS;
7532 		goto out_unlock;
7533 	}
7534 	ret = 0;
7535 
7536 	/* page is wholly or partially inside EOF */
7537 	if (page_start + PAGE_CACHE_SIZE > size)
7538 		zero_start = size & ~PAGE_CACHE_MASK;
7539 	else
7540 		zero_start = PAGE_CACHE_SIZE;
7541 
7542 	if (zero_start != PAGE_CACHE_SIZE) {
7543 		kaddr = kmap(page);
7544 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
7545 		flush_dcache_page(page);
7546 		kunmap(page);
7547 	}
7548 	ClearPageChecked(page);
7549 	set_page_dirty(page);
7550 	SetPageUptodate(page);
7551 
7552 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
7553 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
7554 	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
7555 
7556 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
7557 
7558 out_unlock:
7559 	if (!ret) {
7560 		sb_end_pagefault(inode->i_sb);
7561 		return VM_FAULT_LOCKED;
7562 	}
7563 	unlock_page(page);
7564 out:
7565 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
7566 out_noreserve:
7567 	sb_end_pagefault(inode->i_sb);
7568 	return ret;
7569 }
7570 
7571 static int btrfs_truncate(struct inode *inode)
7572 {
7573 	struct btrfs_root *root = BTRFS_I(inode)->root;
7574 	struct btrfs_block_rsv *rsv;
7575 	int ret = 0;
7576 	int err = 0;
7577 	struct btrfs_trans_handle *trans;
7578 	u64 mask = root->sectorsize - 1;
7579 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
7580 
7581 	ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
7582 				       (u64)-1);
7583 	if (ret)
7584 		return ret;
7585 
7586 	/*
7587 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
7588 	 * 3 things going on here
7589 	 *
7590 	 * 1) We need to reserve space for our orphan item and the space to
7591 	 * delete our orphan item.  Lord knows we don't want to have a dangling
7592 	 * orphan item because we didn't reserve space to remove it.
7593 	 *
7594 	 * 2) We need to reserve space to update our inode.
7595 	 *
7596 	 * 3) We need to have something to cache all the space that is going to
7597 	 * be free'd up by the truncate operation, but also have some slack
7598 	 * space reserved in case it uses space during the truncate (thank you
7599 	 * very much snapshotting).
7600 	 *
7601 	 * And we need these to all be seperate.  The fact is we can use alot of
7602 	 * space doing the truncate, and we have no earthly idea how much space
7603 	 * we will use, so we need the truncate reservation to be seperate so it
7604 	 * doesn't end up using space reserved for updating the inode or
7605 	 * removing the orphan item.  We also need to be able to stop the
7606 	 * transaction and start a new one, which means we need to be able to
7607 	 * update the inode several times, and we have no idea of knowing how
7608 	 * many times that will be, so we can't just reserve 1 item for the
7609 	 * entirety of the opration, so that has to be done seperately as well.
7610 	 * Then there is the orphan item, which does indeed need to be held on
7611 	 * to for the whole operation, and we need nobody to touch this reserved
7612 	 * space except the orphan code.
7613 	 *
7614 	 * So that leaves us with
7615 	 *
7616 	 * 1) root->orphan_block_rsv - for the orphan deletion.
7617 	 * 2) rsv - for the truncate reservation, which we will steal from the
7618 	 * transaction reservation.
7619 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
7620 	 * updating the inode.
7621 	 */
7622 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
7623 	if (!rsv)
7624 		return -ENOMEM;
7625 	rsv->size = min_size;
7626 	rsv->failfast = 1;
7627 
7628 	/*
7629 	 * 1 for the truncate slack space
7630 	 * 1 for updating the inode.
7631 	 */
7632 	trans = btrfs_start_transaction(root, 2);
7633 	if (IS_ERR(trans)) {
7634 		err = PTR_ERR(trans);
7635 		goto out;
7636 	}
7637 
7638 	/* Migrate the slack space for the truncate to our reserve */
7639 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
7640 				      min_size);
7641 	BUG_ON(ret);
7642 
7643 	/*
7644 	 * setattr is responsible for setting the ordered_data_close flag,
7645 	 * but that is only tested during the last file release.  That
7646 	 * could happen well after the next commit, leaving a great big
7647 	 * window where new writes may get lost if someone chooses to write
7648 	 * to this file after truncating to zero
7649 	 *
7650 	 * The inode doesn't have any dirty data here, and so if we commit
7651 	 * this is a noop.  If someone immediately starts writing to the inode
7652 	 * it is very likely we'll catch some of their writes in this
7653 	 * transaction, and the commit will find this file on the ordered
7654 	 * data list with good things to send down.
7655 	 *
7656 	 * This is a best effort solution, there is still a window where
7657 	 * using truncate to replace the contents of the file will
7658 	 * end up with a zero length file after a crash.
7659 	 */
7660 	if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
7661 					   &BTRFS_I(inode)->runtime_flags))
7662 		btrfs_add_ordered_operation(trans, root, inode);
7663 
7664 	/*
7665 	 * So if we truncate and then write and fsync we normally would just
7666 	 * write the extents that changed, which is a problem if we need to
7667 	 * first truncate that entire inode.  So set this flag so we write out
7668 	 * all of the extents in the inode to the sync log so we're completely
7669 	 * safe.
7670 	 */
7671 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
7672 	trans->block_rsv = rsv;
7673 
7674 	while (1) {
7675 		ret = btrfs_truncate_inode_items(trans, root, inode,
7676 						 inode->i_size,
7677 						 BTRFS_EXTENT_DATA_KEY);
7678 		if (ret != -ENOSPC) {
7679 			err = ret;
7680 			break;
7681 		}
7682 
7683 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7684 		ret = btrfs_update_inode(trans, root, inode);
7685 		if (ret) {
7686 			err = ret;
7687 			break;
7688 		}
7689 
7690 		btrfs_end_transaction(trans, root);
7691 		btrfs_btree_balance_dirty(root);
7692 
7693 		trans = btrfs_start_transaction(root, 2);
7694 		if (IS_ERR(trans)) {
7695 			ret = err = PTR_ERR(trans);
7696 			trans = NULL;
7697 			break;
7698 		}
7699 
7700 		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
7701 					      rsv, min_size);
7702 		BUG_ON(ret);	/* shouldn't happen */
7703 		trans->block_rsv = rsv;
7704 	}
7705 
7706 	if (ret == 0 && inode->i_nlink > 0) {
7707 		trans->block_rsv = root->orphan_block_rsv;
7708 		ret = btrfs_orphan_del(trans, inode);
7709 		if (ret)
7710 			err = ret;
7711 	}
7712 
7713 	if (trans) {
7714 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7715 		ret = btrfs_update_inode(trans, root, inode);
7716 		if (ret && !err)
7717 			err = ret;
7718 
7719 		ret = btrfs_end_transaction(trans, root);
7720 		btrfs_btree_balance_dirty(root);
7721 	}
7722 
7723 out:
7724 	btrfs_free_block_rsv(root, rsv);
7725 
7726 	if (ret && !err)
7727 		err = ret;
7728 
7729 	return err;
7730 }
7731 
7732 /*
7733  * create a new subvolume directory/inode (helper for the ioctl).
7734  */
7735 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
7736 			     struct btrfs_root *new_root, u64 new_dirid)
7737 {
7738 	struct inode *inode;
7739 	int err;
7740 	u64 index = 0;
7741 
7742 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
7743 				new_dirid, new_dirid,
7744 				S_IFDIR | (~current_umask() & S_IRWXUGO),
7745 				&index);
7746 	if (IS_ERR(inode))
7747 		return PTR_ERR(inode);
7748 	inode->i_op = &btrfs_dir_inode_operations;
7749 	inode->i_fop = &btrfs_dir_file_operations;
7750 
7751 	set_nlink(inode, 1);
7752 	btrfs_i_size_write(inode, 0);
7753 
7754 	err = btrfs_update_inode(trans, new_root, inode);
7755 
7756 	iput(inode);
7757 	return err;
7758 }
7759 
7760 struct inode *btrfs_alloc_inode(struct super_block *sb)
7761 {
7762 	struct btrfs_inode *ei;
7763 	struct inode *inode;
7764 
7765 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
7766 	if (!ei)
7767 		return NULL;
7768 
7769 	ei->root = NULL;
7770 	ei->generation = 0;
7771 	ei->last_trans = 0;
7772 	ei->last_sub_trans = 0;
7773 	ei->logged_trans = 0;
7774 	ei->delalloc_bytes = 0;
7775 	ei->disk_i_size = 0;
7776 	ei->flags = 0;
7777 	ei->csum_bytes = 0;
7778 	ei->index_cnt = (u64)-1;
7779 	ei->last_unlink_trans = 0;
7780 	ei->last_log_commit = 0;
7781 
7782 	spin_lock_init(&ei->lock);
7783 	ei->outstanding_extents = 0;
7784 	ei->reserved_extents = 0;
7785 
7786 	ei->runtime_flags = 0;
7787 	ei->force_compress = BTRFS_COMPRESS_NONE;
7788 
7789 	ei->delayed_node = NULL;
7790 
7791 	inode = &ei->vfs_inode;
7792 	extent_map_tree_init(&ei->extent_tree);
7793 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
7794 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
7795 	ei->io_tree.track_uptodate = 1;
7796 	ei->io_failure_tree.track_uptodate = 1;
7797 	atomic_set(&ei->sync_writers, 0);
7798 	mutex_init(&ei->log_mutex);
7799 	mutex_init(&ei->delalloc_mutex);
7800 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
7801 	INIT_LIST_HEAD(&ei->delalloc_inodes);
7802 	INIT_LIST_HEAD(&ei->ordered_operations);
7803 	RB_CLEAR_NODE(&ei->rb_node);
7804 
7805 	return inode;
7806 }
7807 
7808 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
7809 void btrfs_test_destroy_inode(struct inode *inode)
7810 {
7811 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7812 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7813 }
7814 #endif
7815 
7816 static void btrfs_i_callback(struct rcu_head *head)
7817 {
7818 	struct inode *inode = container_of(head, struct inode, i_rcu);
7819 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7820 }
7821 
7822 void btrfs_destroy_inode(struct inode *inode)
7823 {
7824 	struct btrfs_ordered_extent *ordered;
7825 	struct btrfs_root *root = BTRFS_I(inode)->root;
7826 
7827 	WARN_ON(!hlist_empty(&inode->i_dentry));
7828 	WARN_ON(inode->i_data.nrpages);
7829 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
7830 	WARN_ON(BTRFS_I(inode)->reserved_extents);
7831 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
7832 	WARN_ON(BTRFS_I(inode)->csum_bytes);
7833 
7834 	/*
7835 	 * This can happen where we create an inode, but somebody else also
7836 	 * created the same inode and we need to destroy the one we already
7837 	 * created.
7838 	 */
7839 	if (!root)
7840 		goto free;
7841 
7842 	/*
7843 	 * Make sure we're properly removed from the ordered operation
7844 	 * lists.
7845 	 */
7846 	smp_mb();
7847 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
7848 		spin_lock(&root->fs_info->ordered_root_lock);
7849 		list_del_init(&BTRFS_I(inode)->ordered_operations);
7850 		spin_unlock(&root->fs_info->ordered_root_lock);
7851 	}
7852 
7853 	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7854 		     &BTRFS_I(inode)->runtime_flags)) {
7855 		btrfs_info(root->fs_info, "inode %llu still on the orphan list",
7856 			btrfs_ino(inode));
7857 		atomic_dec(&root->orphan_inodes);
7858 	}
7859 
7860 	while (1) {
7861 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7862 		if (!ordered)
7863 			break;
7864 		else {
7865 			btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
7866 				ordered->file_offset, ordered->len);
7867 			btrfs_remove_ordered_extent(inode, ordered);
7868 			btrfs_put_ordered_extent(ordered);
7869 			btrfs_put_ordered_extent(ordered);
7870 		}
7871 	}
7872 	inode_tree_del(inode);
7873 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7874 free:
7875 	call_rcu(&inode->i_rcu, btrfs_i_callback);
7876 }
7877 
7878 int btrfs_drop_inode(struct inode *inode)
7879 {
7880 	struct btrfs_root *root = BTRFS_I(inode)->root;
7881 
7882 	if (root == NULL)
7883 		return 1;
7884 
7885 	/* the snap/subvol tree is on deleting */
7886 	if (btrfs_root_refs(&root->root_item) == 0)
7887 		return 1;
7888 	else
7889 		return generic_drop_inode(inode);
7890 }
7891 
7892 static void init_once(void *foo)
7893 {
7894 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
7895 
7896 	inode_init_once(&ei->vfs_inode);
7897 }
7898 
7899 void btrfs_destroy_cachep(void)
7900 {
7901 	/*
7902 	 * Make sure all delayed rcu free inodes are flushed before we
7903 	 * destroy cache.
7904 	 */
7905 	rcu_barrier();
7906 	if (btrfs_inode_cachep)
7907 		kmem_cache_destroy(btrfs_inode_cachep);
7908 	if (btrfs_trans_handle_cachep)
7909 		kmem_cache_destroy(btrfs_trans_handle_cachep);
7910 	if (btrfs_transaction_cachep)
7911 		kmem_cache_destroy(btrfs_transaction_cachep);
7912 	if (btrfs_path_cachep)
7913 		kmem_cache_destroy(btrfs_path_cachep);
7914 	if (btrfs_free_space_cachep)
7915 		kmem_cache_destroy(btrfs_free_space_cachep);
7916 	if (btrfs_delalloc_work_cachep)
7917 		kmem_cache_destroy(btrfs_delalloc_work_cachep);
7918 }
7919 
7920 int btrfs_init_cachep(void)
7921 {
7922 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
7923 			sizeof(struct btrfs_inode), 0,
7924 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
7925 	if (!btrfs_inode_cachep)
7926 		goto fail;
7927 
7928 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
7929 			sizeof(struct btrfs_trans_handle), 0,
7930 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7931 	if (!btrfs_trans_handle_cachep)
7932 		goto fail;
7933 
7934 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
7935 			sizeof(struct btrfs_transaction), 0,
7936 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7937 	if (!btrfs_transaction_cachep)
7938 		goto fail;
7939 
7940 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
7941 			sizeof(struct btrfs_path), 0,
7942 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7943 	if (!btrfs_path_cachep)
7944 		goto fail;
7945 
7946 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
7947 			sizeof(struct btrfs_free_space), 0,
7948 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7949 	if (!btrfs_free_space_cachep)
7950 		goto fail;
7951 
7952 	btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
7953 			sizeof(struct btrfs_delalloc_work), 0,
7954 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
7955 			NULL);
7956 	if (!btrfs_delalloc_work_cachep)
7957 		goto fail;
7958 
7959 	return 0;
7960 fail:
7961 	btrfs_destroy_cachep();
7962 	return -ENOMEM;
7963 }
7964 
7965 static int btrfs_getattr(struct vfsmount *mnt,
7966 			 struct dentry *dentry, struct kstat *stat)
7967 {
7968 	u64 delalloc_bytes;
7969 	struct inode *inode = dentry->d_inode;
7970 	u32 blocksize = inode->i_sb->s_blocksize;
7971 
7972 	generic_fillattr(inode, stat);
7973 	stat->dev = BTRFS_I(inode)->root->anon_dev;
7974 	stat->blksize = PAGE_CACHE_SIZE;
7975 
7976 	spin_lock(&BTRFS_I(inode)->lock);
7977 	delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
7978 	spin_unlock(&BTRFS_I(inode)->lock);
7979 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
7980 			ALIGN(delalloc_bytes, blocksize)) >> 9;
7981 	return 0;
7982 }
7983 
7984 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7985 			   struct inode *new_dir, struct dentry *new_dentry)
7986 {
7987 	struct btrfs_trans_handle *trans;
7988 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
7989 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
7990 	struct inode *new_inode = new_dentry->d_inode;
7991 	struct inode *old_inode = old_dentry->d_inode;
7992 	struct timespec ctime = CURRENT_TIME;
7993 	u64 index = 0;
7994 	u64 root_objectid;
7995 	int ret;
7996 	u64 old_ino = btrfs_ino(old_inode);
7997 
7998 	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
7999 		return -EPERM;
8000 
8001 	/* we only allow rename subvolume link between subvolumes */
8002 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8003 		return -EXDEV;
8004 
8005 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8006 	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
8007 		return -ENOTEMPTY;
8008 
8009 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
8010 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8011 		return -ENOTEMPTY;
8012 
8013 
8014 	/* check for collisions, even if the  name isn't there */
8015 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
8016 			     new_dentry->d_name.name,
8017 			     new_dentry->d_name.len);
8018 
8019 	if (ret) {
8020 		if (ret == -EEXIST) {
8021 			/* we shouldn't get
8022 			 * eexist without a new_inode */
8023 			if (WARN_ON(!new_inode)) {
8024 				return ret;
8025 			}
8026 		} else {
8027 			/* maybe -EOVERFLOW */
8028 			return ret;
8029 		}
8030 	}
8031 	ret = 0;
8032 
8033 	/*
8034 	 * we're using rename to replace one file with another.
8035 	 * and the replacement file is large.  Start IO on it now so
8036 	 * we don't add too much work to the end of the transaction
8037 	 */
8038 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
8039 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
8040 		filemap_flush(old_inode->i_mapping);
8041 
8042 	/* close the racy window with snapshot create/destroy ioctl */
8043 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8044 		down_read(&root->fs_info->subvol_sem);
8045 	/*
8046 	 * We want to reserve the absolute worst case amount of items.  So if
8047 	 * both inodes are subvols and we need to unlink them then that would
8048 	 * require 4 item modifications, but if they are both normal inodes it
8049 	 * would require 5 item modifications, so we'll assume their normal
8050 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
8051 	 * should cover the worst case number of items we'll modify.
8052 	 */
8053 	trans = btrfs_start_transaction(root, 11);
8054 	if (IS_ERR(trans)) {
8055                 ret = PTR_ERR(trans);
8056                 goto out_notrans;
8057         }
8058 
8059 	if (dest != root)
8060 		btrfs_record_root_in_trans(trans, dest);
8061 
8062 	ret = btrfs_set_inode_index(new_dir, &index);
8063 	if (ret)
8064 		goto out_fail;
8065 
8066 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8067 		/* force full log commit if subvolume involved. */
8068 		root->fs_info->last_trans_log_full_commit = trans->transid;
8069 	} else {
8070 		ret = btrfs_insert_inode_ref(trans, dest,
8071 					     new_dentry->d_name.name,
8072 					     new_dentry->d_name.len,
8073 					     old_ino,
8074 					     btrfs_ino(new_dir), index);
8075 		if (ret)
8076 			goto out_fail;
8077 		/*
8078 		 * this is an ugly little race, but the rename is required
8079 		 * to make sure that if we crash, the inode is either at the
8080 		 * old name or the new one.  pinning the log transaction lets
8081 		 * us make sure we don't allow a log commit to come in after
8082 		 * we unlink the name but before we add the new name back in.
8083 		 */
8084 		btrfs_pin_log_trans(root);
8085 	}
8086 	/*
8087 	 * make sure the inode gets flushed if it is replacing
8088 	 * something.
8089 	 */
8090 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
8091 		btrfs_add_ordered_operation(trans, root, old_inode);
8092 
8093 	inode_inc_iversion(old_dir);
8094 	inode_inc_iversion(new_dir);
8095 	inode_inc_iversion(old_inode);
8096 	old_dir->i_ctime = old_dir->i_mtime = ctime;
8097 	new_dir->i_ctime = new_dir->i_mtime = ctime;
8098 	old_inode->i_ctime = ctime;
8099 
8100 	if (old_dentry->d_parent != new_dentry->d_parent)
8101 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
8102 
8103 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8104 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
8105 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
8106 					old_dentry->d_name.name,
8107 					old_dentry->d_name.len);
8108 	} else {
8109 		ret = __btrfs_unlink_inode(trans, root, old_dir,
8110 					old_dentry->d_inode,
8111 					old_dentry->d_name.name,
8112 					old_dentry->d_name.len);
8113 		if (!ret)
8114 			ret = btrfs_update_inode(trans, root, old_inode);
8115 	}
8116 	if (ret) {
8117 		btrfs_abort_transaction(trans, root, ret);
8118 		goto out_fail;
8119 	}
8120 
8121 	if (new_inode) {
8122 		inode_inc_iversion(new_inode);
8123 		new_inode->i_ctime = CURRENT_TIME;
8124 		if (unlikely(btrfs_ino(new_inode) ==
8125 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8126 			root_objectid = BTRFS_I(new_inode)->location.objectid;
8127 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
8128 						root_objectid,
8129 						new_dentry->d_name.name,
8130 						new_dentry->d_name.len);
8131 			BUG_ON(new_inode->i_nlink == 0);
8132 		} else {
8133 			ret = btrfs_unlink_inode(trans, dest, new_dir,
8134 						 new_dentry->d_inode,
8135 						 new_dentry->d_name.name,
8136 						 new_dentry->d_name.len);
8137 		}
8138 		if (!ret && new_inode->i_nlink == 0)
8139 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
8140 		if (ret) {
8141 			btrfs_abort_transaction(trans, root, ret);
8142 			goto out_fail;
8143 		}
8144 	}
8145 
8146 	ret = btrfs_add_link(trans, new_dir, old_inode,
8147 			     new_dentry->d_name.name,
8148 			     new_dentry->d_name.len, 0, index);
8149 	if (ret) {
8150 		btrfs_abort_transaction(trans, root, ret);
8151 		goto out_fail;
8152 	}
8153 
8154 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
8155 		struct dentry *parent = new_dentry->d_parent;
8156 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
8157 		btrfs_end_log_trans(root);
8158 	}
8159 out_fail:
8160 	btrfs_end_transaction(trans, root);
8161 out_notrans:
8162 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8163 		up_read(&root->fs_info->subvol_sem);
8164 
8165 	return ret;
8166 }
8167 
8168 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8169 {
8170 	struct btrfs_delalloc_work *delalloc_work;
8171 	struct inode *inode;
8172 
8173 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
8174 				     work);
8175 	inode = delalloc_work->inode;
8176 	if (delalloc_work->wait) {
8177 		btrfs_wait_ordered_range(inode, 0, (u64)-1);
8178 	} else {
8179 		filemap_flush(inode->i_mapping);
8180 		if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8181 			     &BTRFS_I(inode)->runtime_flags))
8182 			filemap_flush(inode->i_mapping);
8183 	}
8184 
8185 	if (delalloc_work->delay_iput)
8186 		btrfs_add_delayed_iput(inode);
8187 	else
8188 		iput(inode);
8189 	complete(&delalloc_work->completion);
8190 }
8191 
8192 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
8193 						    int wait, int delay_iput)
8194 {
8195 	struct btrfs_delalloc_work *work;
8196 
8197 	work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
8198 	if (!work)
8199 		return NULL;
8200 
8201 	init_completion(&work->completion);
8202 	INIT_LIST_HEAD(&work->list);
8203 	work->inode = inode;
8204 	work->wait = wait;
8205 	work->delay_iput = delay_iput;
8206 	work->work.func = btrfs_run_delalloc_work;
8207 
8208 	return work;
8209 }
8210 
8211 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
8212 {
8213 	wait_for_completion(&work->completion);
8214 	kmem_cache_free(btrfs_delalloc_work_cachep, work);
8215 }
8216 
8217 /*
8218  * some fairly slow code that needs optimization. This walks the list
8219  * of all the inodes with pending delalloc and forces them to disk.
8220  */
8221 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
8222 {
8223 	struct btrfs_inode *binode;
8224 	struct inode *inode;
8225 	struct btrfs_delalloc_work *work, *next;
8226 	struct list_head works;
8227 	struct list_head splice;
8228 	int ret = 0;
8229 
8230 	INIT_LIST_HEAD(&works);
8231 	INIT_LIST_HEAD(&splice);
8232 
8233 	spin_lock(&root->delalloc_lock);
8234 	list_splice_init(&root->delalloc_inodes, &splice);
8235 	while (!list_empty(&splice)) {
8236 		binode = list_entry(splice.next, struct btrfs_inode,
8237 				    delalloc_inodes);
8238 
8239 		list_move_tail(&binode->delalloc_inodes,
8240 			       &root->delalloc_inodes);
8241 		inode = igrab(&binode->vfs_inode);
8242 		if (!inode) {
8243 			cond_resched_lock(&root->delalloc_lock);
8244 			continue;
8245 		}
8246 		spin_unlock(&root->delalloc_lock);
8247 
8248 		work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
8249 		if (unlikely(!work)) {
8250 			if (delay_iput)
8251 				btrfs_add_delayed_iput(inode);
8252 			else
8253 				iput(inode);
8254 			ret = -ENOMEM;
8255 			goto out;
8256 		}
8257 		list_add_tail(&work->list, &works);
8258 		btrfs_queue_worker(&root->fs_info->flush_workers,
8259 				   &work->work);
8260 
8261 		cond_resched();
8262 		spin_lock(&root->delalloc_lock);
8263 	}
8264 	spin_unlock(&root->delalloc_lock);
8265 
8266 	list_for_each_entry_safe(work, next, &works, list) {
8267 		list_del_init(&work->list);
8268 		btrfs_wait_and_free_delalloc_work(work);
8269 	}
8270 	return 0;
8271 out:
8272 	list_for_each_entry_safe(work, next, &works, list) {
8273 		list_del_init(&work->list);
8274 		btrfs_wait_and_free_delalloc_work(work);
8275 	}
8276 
8277 	if (!list_empty_careful(&splice)) {
8278 		spin_lock(&root->delalloc_lock);
8279 		list_splice_tail(&splice, &root->delalloc_inodes);
8280 		spin_unlock(&root->delalloc_lock);
8281 	}
8282 	return ret;
8283 }
8284 
8285 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
8286 {
8287 	int ret;
8288 
8289 	if (root->fs_info->sb->s_flags & MS_RDONLY)
8290 		return -EROFS;
8291 
8292 	ret = __start_delalloc_inodes(root, delay_iput);
8293 	/*
8294 	 * the filemap_flush will queue IO into the worker threads, but
8295 	 * we have to make sure the IO is actually started and that
8296 	 * ordered extents get created before we return
8297 	 */
8298 	atomic_inc(&root->fs_info->async_submit_draining);
8299 	while (atomic_read(&root->fs_info->nr_async_submits) ||
8300 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
8301 		wait_event(root->fs_info->async_submit_wait,
8302 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
8303 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
8304 	}
8305 	atomic_dec(&root->fs_info->async_submit_draining);
8306 	return ret;
8307 }
8308 
8309 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput)
8310 {
8311 	struct btrfs_root *root;
8312 	struct list_head splice;
8313 	int ret;
8314 
8315 	if (fs_info->sb->s_flags & MS_RDONLY)
8316 		return -EROFS;
8317 
8318 	INIT_LIST_HEAD(&splice);
8319 
8320 	spin_lock(&fs_info->delalloc_root_lock);
8321 	list_splice_init(&fs_info->delalloc_roots, &splice);
8322 	while (!list_empty(&splice)) {
8323 		root = list_first_entry(&splice, struct btrfs_root,
8324 					delalloc_root);
8325 		root = btrfs_grab_fs_root(root);
8326 		BUG_ON(!root);
8327 		list_move_tail(&root->delalloc_root,
8328 			       &fs_info->delalloc_roots);
8329 		spin_unlock(&fs_info->delalloc_root_lock);
8330 
8331 		ret = __start_delalloc_inodes(root, delay_iput);
8332 		btrfs_put_fs_root(root);
8333 		if (ret)
8334 			goto out;
8335 
8336 		spin_lock(&fs_info->delalloc_root_lock);
8337 	}
8338 	spin_unlock(&fs_info->delalloc_root_lock);
8339 
8340 	atomic_inc(&fs_info->async_submit_draining);
8341 	while (atomic_read(&fs_info->nr_async_submits) ||
8342 	      atomic_read(&fs_info->async_delalloc_pages)) {
8343 		wait_event(fs_info->async_submit_wait,
8344 		   (atomic_read(&fs_info->nr_async_submits) == 0 &&
8345 		    atomic_read(&fs_info->async_delalloc_pages) == 0));
8346 	}
8347 	atomic_dec(&fs_info->async_submit_draining);
8348 	return 0;
8349 out:
8350 	if (!list_empty_careful(&splice)) {
8351 		spin_lock(&fs_info->delalloc_root_lock);
8352 		list_splice_tail(&splice, &fs_info->delalloc_roots);
8353 		spin_unlock(&fs_info->delalloc_root_lock);
8354 	}
8355 	return ret;
8356 }
8357 
8358 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
8359 			 const char *symname)
8360 {
8361 	struct btrfs_trans_handle *trans;
8362 	struct btrfs_root *root = BTRFS_I(dir)->root;
8363 	struct btrfs_path *path;
8364 	struct btrfs_key key;
8365 	struct inode *inode = NULL;
8366 	int err;
8367 	int drop_inode = 0;
8368 	u64 objectid;
8369 	u64 index = 0;
8370 	int name_len;
8371 	int datasize;
8372 	unsigned long ptr;
8373 	struct btrfs_file_extent_item *ei;
8374 	struct extent_buffer *leaf;
8375 
8376 	name_len = strlen(symname);
8377 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
8378 		return -ENAMETOOLONG;
8379 
8380 	/*
8381 	 * 2 items for inode item and ref
8382 	 * 2 items for dir items
8383 	 * 1 item for xattr if selinux is on
8384 	 */
8385 	trans = btrfs_start_transaction(root, 5);
8386 	if (IS_ERR(trans))
8387 		return PTR_ERR(trans);
8388 
8389 	err = btrfs_find_free_ino(root, &objectid);
8390 	if (err)
8391 		goto out_unlock;
8392 
8393 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
8394 				dentry->d_name.len, btrfs_ino(dir), objectid,
8395 				S_IFLNK|S_IRWXUGO, &index);
8396 	if (IS_ERR(inode)) {
8397 		err = PTR_ERR(inode);
8398 		goto out_unlock;
8399 	}
8400 
8401 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
8402 	if (err) {
8403 		drop_inode = 1;
8404 		goto out_unlock;
8405 	}
8406 
8407 	/*
8408 	* If the active LSM wants to access the inode during
8409 	* d_instantiate it needs these. Smack checks to see
8410 	* if the filesystem supports xattrs by looking at the
8411 	* ops vector.
8412 	*/
8413 	inode->i_fop = &btrfs_file_operations;
8414 	inode->i_op = &btrfs_file_inode_operations;
8415 
8416 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
8417 	if (err)
8418 		drop_inode = 1;
8419 	else {
8420 		inode->i_mapping->a_ops = &btrfs_aops;
8421 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
8422 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
8423 	}
8424 	if (drop_inode)
8425 		goto out_unlock;
8426 
8427 	path = btrfs_alloc_path();
8428 	if (!path) {
8429 		err = -ENOMEM;
8430 		drop_inode = 1;
8431 		goto out_unlock;
8432 	}
8433 	key.objectid = btrfs_ino(inode);
8434 	key.offset = 0;
8435 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
8436 	datasize = btrfs_file_extent_calc_inline_size(name_len);
8437 	err = btrfs_insert_empty_item(trans, root, path, &key,
8438 				      datasize);
8439 	if (err) {
8440 		drop_inode = 1;
8441 		btrfs_free_path(path);
8442 		goto out_unlock;
8443 	}
8444 	leaf = path->nodes[0];
8445 	ei = btrfs_item_ptr(leaf, path->slots[0],
8446 			    struct btrfs_file_extent_item);
8447 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
8448 	btrfs_set_file_extent_type(leaf, ei,
8449 				   BTRFS_FILE_EXTENT_INLINE);
8450 	btrfs_set_file_extent_encryption(leaf, ei, 0);
8451 	btrfs_set_file_extent_compression(leaf, ei, 0);
8452 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
8453 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
8454 
8455 	ptr = btrfs_file_extent_inline_start(ei);
8456 	write_extent_buffer(leaf, symname, ptr, name_len);
8457 	btrfs_mark_buffer_dirty(leaf);
8458 	btrfs_free_path(path);
8459 
8460 	inode->i_op = &btrfs_symlink_inode_operations;
8461 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
8462 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
8463 	inode_set_bytes(inode, name_len);
8464 	btrfs_i_size_write(inode, name_len);
8465 	err = btrfs_update_inode(trans, root, inode);
8466 	if (err)
8467 		drop_inode = 1;
8468 
8469 out_unlock:
8470 	if (!err)
8471 		d_instantiate(dentry, inode);
8472 	btrfs_end_transaction(trans, root);
8473 	if (drop_inode) {
8474 		inode_dec_link_count(inode);
8475 		iput(inode);
8476 	}
8477 	btrfs_btree_balance_dirty(root);
8478 	return err;
8479 }
8480 
8481 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8482 				       u64 start, u64 num_bytes, u64 min_size,
8483 				       loff_t actual_len, u64 *alloc_hint,
8484 				       struct btrfs_trans_handle *trans)
8485 {
8486 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
8487 	struct extent_map *em;
8488 	struct btrfs_root *root = BTRFS_I(inode)->root;
8489 	struct btrfs_key ins;
8490 	u64 cur_offset = start;
8491 	u64 i_size;
8492 	u64 cur_bytes;
8493 	int ret = 0;
8494 	bool own_trans = true;
8495 
8496 	if (trans)
8497 		own_trans = false;
8498 	while (num_bytes > 0) {
8499 		if (own_trans) {
8500 			trans = btrfs_start_transaction(root, 3);
8501 			if (IS_ERR(trans)) {
8502 				ret = PTR_ERR(trans);
8503 				break;
8504 			}
8505 		}
8506 
8507 		cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
8508 		cur_bytes = max(cur_bytes, min_size);
8509 		ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
8510 					   *alloc_hint, &ins, 1);
8511 		if (ret) {
8512 			if (own_trans)
8513 				btrfs_end_transaction(trans, root);
8514 			break;
8515 		}
8516 
8517 		ret = insert_reserved_file_extent(trans, inode,
8518 						  cur_offset, ins.objectid,
8519 						  ins.offset, ins.offset,
8520 						  ins.offset, 0, 0, 0,
8521 						  BTRFS_FILE_EXTENT_PREALLOC);
8522 		if (ret) {
8523 			btrfs_free_reserved_extent(root, ins.objectid,
8524 						   ins.offset);
8525 			btrfs_abort_transaction(trans, root, ret);
8526 			if (own_trans)
8527 				btrfs_end_transaction(trans, root);
8528 			break;
8529 		}
8530 		btrfs_drop_extent_cache(inode, cur_offset,
8531 					cur_offset + ins.offset -1, 0);
8532 
8533 		em = alloc_extent_map();
8534 		if (!em) {
8535 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
8536 				&BTRFS_I(inode)->runtime_flags);
8537 			goto next;
8538 		}
8539 
8540 		em->start = cur_offset;
8541 		em->orig_start = cur_offset;
8542 		em->len = ins.offset;
8543 		em->block_start = ins.objectid;
8544 		em->block_len = ins.offset;
8545 		em->orig_block_len = ins.offset;
8546 		em->ram_bytes = ins.offset;
8547 		em->bdev = root->fs_info->fs_devices->latest_bdev;
8548 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
8549 		em->generation = trans->transid;
8550 
8551 		while (1) {
8552 			write_lock(&em_tree->lock);
8553 			ret = add_extent_mapping(em_tree, em, 1);
8554 			write_unlock(&em_tree->lock);
8555 			if (ret != -EEXIST)
8556 				break;
8557 			btrfs_drop_extent_cache(inode, cur_offset,
8558 						cur_offset + ins.offset - 1,
8559 						0);
8560 		}
8561 		free_extent_map(em);
8562 next:
8563 		num_bytes -= ins.offset;
8564 		cur_offset += ins.offset;
8565 		*alloc_hint = ins.objectid + ins.offset;
8566 
8567 		inode_inc_iversion(inode);
8568 		inode->i_ctime = CURRENT_TIME;
8569 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
8570 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
8571 		    (actual_len > inode->i_size) &&
8572 		    (cur_offset > inode->i_size)) {
8573 			if (cur_offset > actual_len)
8574 				i_size = actual_len;
8575 			else
8576 				i_size = cur_offset;
8577 			i_size_write(inode, i_size);
8578 			btrfs_ordered_update_i_size(inode, i_size, NULL);
8579 		}
8580 
8581 		ret = btrfs_update_inode(trans, root, inode);
8582 
8583 		if (ret) {
8584 			btrfs_abort_transaction(trans, root, ret);
8585 			if (own_trans)
8586 				btrfs_end_transaction(trans, root);
8587 			break;
8588 		}
8589 
8590 		if (own_trans)
8591 			btrfs_end_transaction(trans, root);
8592 	}
8593 	return ret;
8594 }
8595 
8596 int btrfs_prealloc_file_range(struct inode *inode, int mode,
8597 			      u64 start, u64 num_bytes, u64 min_size,
8598 			      loff_t actual_len, u64 *alloc_hint)
8599 {
8600 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8601 					   min_size, actual_len, alloc_hint,
8602 					   NULL);
8603 }
8604 
8605 int btrfs_prealloc_file_range_trans(struct inode *inode,
8606 				    struct btrfs_trans_handle *trans, int mode,
8607 				    u64 start, u64 num_bytes, u64 min_size,
8608 				    loff_t actual_len, u64 *alloc_hint)
8609 {
8610 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8611 					   min_size, actual_len, alloc_hint, trans);
8612 }
8613 
8614 static int btrfs_set_page_dirty(struct page *page)
8615 {
8616 	return __set_page_dirty_nobuffers(page);
8617 }
8618 
8619 static int btrfs_permission(struct inode *inode, int mask)
8620 {
8621 	struct btrfs_root *root = BTRFS_I(inode)->root;
8622 	umode_t mode = inode->i_mode;
8623 
8624 	if (mask & MAY_WRITE &&
8625 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
8626 		if (btrfs_root_readonly(root))
8627 			return -EROFS;
8628 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
8629 			return -EACCES;
8630 	}
8631 	return generic_permission(inode, mask);
8632 }
8633 
8634 static const struct inode_operations btrfs_dir_inode_operations = {
8635 	.getattr	= btrfs_getattr,
8636 	.lookup		= btrfs_lookup,
8637 	.create		= btrfs_create,
8638 	.unlink		= btrfs_unlink,
8639 	.link		= btrfs_link,
8640 	.mkdir		= btrfs_mkdir,
8641 	.rmdir		= btrfs_rmdir,
8642 	.rename		= btrfs_rename,
8643 	.symlink	= btrfs_symlink,
8644 	.setattr	= btrfs_setattr,
8645 	.mknod		= btrfs_mknod,
8646 	.setxattr	= btrfs_setxattr,
8647 	.getxattr	= btrfs_getxattr,
8648 	.listxattr	= btrfs_listxattr,
8649 	.removexattr	= btrfs_removexattr,
8650 	.permission	= btrfs_permission,
8651 	.get_acl	= btrfs_get_acl,
8652 	.update_time	= btrfs_update_time,
8653 };
8654 static const struct inode_operations btrfs_dir_ro_inode_operations = {
8655 	.lookup		= btrfs_lookup,
8656 	.permission	= btrfs_permission,
8657 	.get_acl	= btrfs_get_acl,
8658 	.update_time	= btrfs_update_time,
8659 };
8660 
8661 static const struct file_operations btrfs_dir_file_operations = {
8662 	.llseek		= generic_file_llseek,
8663 	.read		= generic_read_dir,
8664 	.iterate	= btrfs_real_readdir,
8665 	.unlocked_ioctl	= btrfs_ioctl,
8666 #ifdef CONFIG_COMPAT
8667 	.compat_ioctl	= btrfs_ioctl,
8668 #endif
8669 	.release        = btrfs_release_file,
8670 	.fsync		= btrfs_sync_file,
8671 };
8672 
8673 static struct extent_io_ops btrfs_extent_io_ops = {
8674 	.fill_delalloc = run_delalloc_range,
8675 	.submit_bio_hook = btrfs_submit_bio_hook,
8676 	.merge_bio_hook = btrfs_merge_bio_hook,
8677 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
8678 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
8679 	.writepage_start_hook = btrfs_writepage_start_hook,
8680 	.set_bit_hook = btrfs_set_bit_hook,
8681 	.clear_bit_hook = btrfs_clear_bit_hook,
8682 	.merge_extent_hook = btrfs_merge_extent_hook,
8683 	.split_extent_hook = btrfs_split_extent_hook,
8684 };
8685 
8686 /*
8687  * btrfs doesn't support the bmap operation because swapfiles
8688  * use bmap to make a mapping of extents in the file.  They assume
8689  * these extents won't change over the life of the file and they
8690  * use the bmap result to do IO directly to the drive.
8691  *
8692  * the btrfs bmap call would return logical addresses that aren't
8693  * suitable for IO and they also will change frequently as COW
8694  * operations happen.  So, swapfile + btrfs == corruption.
8695  *
8696  * For now we're avoiding this by dropping bmap.
8697  */
8698 static const struct address_space_operations btrfs_aops = {
8699 	.readpage	= btrfs_readpage,
8700 	.writepage	= btrfs_writepage,
8701 	.writepages	= btrfs_writepages,
8702 	.readpages	= btrfs_readpages,
8703 	.direct_IO	= btrfs_direct_IO,
8704 	.invalidatepage = btrfs_invalidatepage,
8705 	.releasepage	= btrfs_releasepage,
8706 	.set_page_dirty	= btrfs_set_page_dirty,
8707 	.error_remove_page = generic_error_remove_page,
8708 };
8709 
8710 static const struct address_space_operations btrfs_symlink_aops = {
8711 	.readpage	= btrfs_readpage,
8712 	.writepage	= btrfs_writepage,
8713 	.invalidatepage = btrfs_invalidatepage,
8714 	.releasepage	= btrfs_releasepage,
8715 };
8716 
8717 static const struct inode_operations btrfs_file_inode_operations = {
8718 	.getattr	= btrfs_getattr,
8719 	.setattr	= btrfs_setattr,
8720 	.setxattr	= btrfs_setxattr,
8721 	.getxattr	= btrfs_getxattr,
8722 	.listxattr      = btrfs_listxattr,
8723 	.removexattr	= btrfs_removexattr,
8724 	.permission	= btrfs_permission,
8725 	.fiemap		= btrfs_fiemap,
8726 	.get_acl	= btrfs_get_acl,
8727 	.update_time	= btrfs_update_time,
8728 };
8729 static const struct inode_operations btrfs_special_inode_operations = {
8730 	.getattr	= btrfs_getattr,
8731 	.setattr	= btrfs_setattr,
8732 	.permission	= btrfs_permission,
8733 	.setxattr	= btrfs_setxattr,
8734 	.getxattr	= btrfs_getxattr,
8735 	.listxattr	= btrfs_listxattr,
8736 	.removexattr	= btrfs_removexattr,
8737 	.get_acl	= btrfs_get_acl,
8738 	.update_time	= btrfs_update_time,
8739 };
8740 static const struct inode_operations btrfs_symlink_inode_operations = {
8741 	.readlink	= generic_readlink,
8742 	.follow_link	= page_follow_link_light,
8743 	.put_link	= page_put_link,
8744 	.getattr	= btrfs_getattr,
8745 	.setattr	= btrfs_setattr,
8746 	.permission	= btrfs_permission,
8747 	.setxattr	= btrfs_setxattr,
8748 	.getxattr	= btrfs_getxattr,
8749 	.listxattr	= btrfs_listxattr,
8750 	.removexattr	= btrfs_removexattr,
8751 	.get_acl	= btrfs_get_acl,
8752 	.update_time	= btrfs_update_time,
8753 };
8754 
8755 const struct dentry_operations btrfs_dentry_operations = {
8756 	.d_delete	= btrfs_dentry_delete,
8757 	.d_release	= btrfs_dentry_release,
8758 };
8759