xref: /linux/fs/btrfs/inode.c (revision 6eb2fb3170549737207974c2c6ad34bcc2f3025e)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/aio.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
40 #include <linux/slab.h>
41 #include <linux/ratelimit.h>
42 #include <linux/mount.h>
43 #include <linux/btrfs.h>
44 #include <linux/blkdev.h>
45 #include "compat.h"
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
52 #include "xattr.h"
53 #include "tree-log.h"
54 #include "volumes.h"
55 #include "compression.h"
56 #include "locking.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
59 #include "backref.h"
60 
61 struct btrfs_iget_args {
62 	u64 ino;
63 	struct btrfs_root *root;
64 };
65 
66 static const struct inode_operations btrfs_dir_inode_operations;
67 static const struct inode_operations btrfs_symlink_inode_operations;
68 static const struct inode_operations btrfs_dir_ro_inode_operations;
69 static const struct inode_operations btrfs_special_inode_operations;
70 static const struct inode_operations btrfs_file_inode_operations;
71 static const struct address_space_operations btrfs_aops;
72 static const struct address_space_operations btrfs_symlink_aops;
73 static const struct file_operations btrfs_dir_file_operations;
74 static struct extent_io_ops btrfs_extent_io_ops;
75 
76 static struct kmem_cache *btrfs_inode_cachep;
77 static struct kmem_cache *btrfs_delalloc_work_cachep;
78 struct kmem_cache *btrfs_trans_handle_cachep;
79 struct kmem_cache *btrfs_transaction_cachep;
80 struct kmem_cache *btrfs_path_cachep;
81 struct kmem_cache *btrfs_free_space_cachep;
82 
83 #define S_SHIFT 12
84 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
85 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
86 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
87 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
88 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
89 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
90 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
91 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
92 };
93 
94 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
95 static int btrfs_truncate(struct inode *inode);
96 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
97 static noinline int cow_file_range(struct inode *inode,
98 				   struct page *locked_page,
99 				   u64 start, u64 end, int *page_started,
100 				   unsigned long *nr_written, int unlock);
101 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
102 					   u64 len, u64 orig_start,
103 					   u64 block_start, u64 block_len,
104 					   u64 orig_block_len, u64 ram_bytes,
105 					   int type);
106 
107 static int btrfs_dirty_inode(struct inode *inode);
108 
109 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
110 				     struct inode *inode,  struct inode *dir,
111 				     const struct qstr *qstr)
112 {
113 	int err;
114 
115 	err = btrfs_init_acl(trans, inode, dir);
116 	if (!err)
117 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
118 	return err;
119 }
120 
121 /*
122  * this does all the hard work for inserting an inline extent into
123  * the btree.  The caller should have done a btrfs_drop_extents so that
124  * no overlapping inline items exist in the btree
125  */
126 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
127 				struct btrfs_root *root, struct inode *inode,
128 				u64 start, size_t size, size_t compressed_size,
129 				int compress_type,
130 				struct page **compressed_pages)
131 {
132 	struct btrfs_key key;
133 	struct btrfs_path *path;
134 	struct extent_buffer *leaf;
135 	struct page *page = NULL;
136 	char *kaddr;
137 	unsigned long ptr;
138 	struct btrfs_file_extent_item *ei;
139 	int err = 0;
140 	int ret;
141 	size_t cur_size = size;
142 	size_t datasize;
143 	unsigned long offset;
144 
145 	if (compressed_size && compressed_pages)
146 		cur_size = compressed_size;
147 
148 	path = btrfs_alloc_path();
149 	if (!path)
150 		return -ENOMEM;
151 
152 	path->leave_spinning = 1;
153 
154 	key.objectid = btrfs_ino(inode);
155 	key.offset = start;
156 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
157 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
158 
159 	inode_add_bytes(inode, size);
160 	ret = btrfs_insert_empty_item(trans, root, path, &key,
161 				      datasize);
162 	if (ret) {
163 		err = ret;
164 		goto fail;
165 	}
166 	leaf = path->nodes[0];
167 	ei = btrfs_item_ptr(leaf, path->slots[0],
168 			    struct btrfs_file_extent_item);
169 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
170 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
171 	btrfs_set_file_extent_encryption(leaf, ei, 0);
172 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
173 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
174 	ptr = btrfs_file_extent_inline_start(ei);
175 
176 	if (compress_type != BTRFS_COMPRESS_NONE) {
177 		struct page *cpage;
178 		int i = 0;
179 		while (compressed_size > 0) {
180 			cpage = compressed_pages[i];
181 			cur_size = min_t(unsigned long, compressed_size,
182 				       PAGE_CACHE_SIZE);
183 
184 			kaddr = kmap_atomic(cpage);
185 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
186 			kunmap_atomic(kaddr);
187 
188 			i++;
189 			ptr += cur_size;
190 			compressed_size -= cur_size;
191 		}
192 		btrfs_set_file_extent_compression(leaf, ei,
193 						  compress_type);
194 	} else {
195 		page = find_get_page(inode->i_mapping,
196 				     start >> PAGE_CACHE_SHIFT);
197 		btrfs_set_file_extent_compression(leaf, ei, 0);
198 		kaddr = kmap_atomic(page);
199 		offset = start & (PAGE_CACHE_SIZE - 1);
200 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
201 		kunmap_atomic(kaddr);
202 		page_cache_release(page);
203 	}
204 	btrfs_mark_buffer_dirty(leaf);
205 	btrfs_free_path(path);
206 
207 	/*
208 	 * we're an inline extent, so nobody can
209 	 * extend the file past i_size without locking
210 	 * a page we already have locked.
211 	 *
212 	 * We must do any isize and inode updates
213 	 * before we unlock the pages.  Otherwise we
214 	 * could end up racing with unlink.
215 	 */
216 	BTRFS_I(inode)->disk_i_size = inode->i_size;
217 	ret = btrfs_update_inode(trans, root, inode);
218 
219 	return ret;
220 fail:
221 	btrfs_free_path(path);
222 	return err;
223 }
224 
225 
226 /*
227  * conditionally insert an inline extent into the file.  This
228  * does the checks required to make sure the data is small enough
229  * to fit as an inline extent.
230  */
231 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
232 				 struct btrfs_root *root,
233 				 struct inode *inode, u64 start, u64 end,
234 				 size_t compressed_size, int compress_type,
235 				 struct page **compressed_pages)
236 {
237 	u64 isize = i_size_read(inode);
238 	u64 actual_end = min(end + 1, isize);
239 	u64 inline_len = actual_end - start;
240 	u64 aligned_end = ALIGN(end, root->sectorsize);
241 	u64 data_len = inline_len;
242 	int ret;
243 
244 	if (compressed_size)
245 		data_len = compressed_size;
246 
247 	if (start > 0 ||
248 	    actual_end >= PAGE_CACHE_SIZE ||
249 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
250 	    (!compressed_size &&
251 	    (actual_end & (root->sectorsize - 1)) == 0) ||
252 	    end + 1 < isize ||
253 	    data_len > root->fs_info->max_inline) {
254 		return 1;
255 	}
256 
257 	ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
258 	if (ret)
259 		return ret;
260 
261 	if (isize > actual_end)
262 		inline_len = min_t(u64, isize, actual_end);
263 	ret = insert_inline_extent(trans, root, inode, start,
264 				   inline_len, compressed_size,
265 				   compress_type, compressed_pages);
266 	if (ret && ret != -ENOSPC) {
267 		btrfs_abort_transaction(trans, root, ret);
268 		return ret;
269 	} else if (ret == -ENOSPC) {
270 		return 1;
271 	}
272 
273 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
274 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
275 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
276 	return 0;
277 }
278 
279 struct async_extent {
280 	u64 start;
281 	u64 ram_size;
282 	u64 compressed_size;
283 	struct page **pages;
284 	unsigned long nr_pages;
285 	int compress_type;
286 	struct list_head list;
287 };
288 
289 struct async_cow {
290 	struct inode *inode;
291 	struct btrfs_root *root;
292 	struct page *locked_page;
293 	u64 start;
294 	u64 end;
295 	struct list_head extents;
296 	struct btrfs_work work;
297 };
298 
299 static noinline int add_async_extent(struct async_cow *cow,
300 				     u64 start, u64 ram_size,
301 				     u64 compressed_size,
302 				     struct page **pages,
303 				     unsigned long nr_pages,
304 				     int compress_type)
305 {
306 	struct async_extent *async_extent;
307 
308 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
309 	BUG_ON(!async_extent); /* -ENOMEM */
310 	async_extent->start = start;
311 	async_extent->ram_size = ram_size;
312 	async_extent->compressed_size = compressed_size;
313 	async_extent->pages = pages;
314 	async_extent->nr_pages = nr_pages;
315 	async_extent->compress_type = compress_type;
316 	list_add_tail(&async_extent->list, &cow->extents);
317 	return 0;
318 }
319 
320 /*
321  * we create compressed extents in two phases.  The first
322  * phase compresses a range of pages that have already been
323  * locked (both pages and state bits are locked).
324  *
325  * This is done inside an ordered work queue, and the compression
326  * is spread across many cpus.  The actual IO submission is step
327  * two, and the ordered work queue takes care of making sure that
328  * happens in the same order things were put onto the queue by
329  * writepages and friends.
330  *
331  * If this code finds it can't get good compression, it puts an
332  * entry onto the work queue to write the uncompressed bytes.  This
333  * makes sure that both compressed inodes and uncompressed inodes
334  * are written in the same order that the flusher thread sent them
335  * down.
336  */
337 static noinline int compress_file_range(struct inode *inode,
338 					struct page *locked_page,
339 					u64 start, u64 end,
340 					struct async_cow *async_cow,
341 					int *num_added)
342 {
343 	struct btrfs_root *root = BTRFS_I(inode)->root;
344 	struct btrfs_trans_handle *trans;
345 	u64 num_bytes;
346 	u64 blocksize = root->sectorsize;
347 	u64 actual_end;
348 	u64 isize = i_size_read(inode);
349 	int ret = 0;
350 	struct page **pages = NULL;
351 	unsigned long nr_pages;
352 	unsigned long nr_pages_ret = 0;
353 	unsigned long total_compressed = 0;
354 	unsigned long total_in = 0;
355 	unsigned long max_compressed = 128 * 1024;
356 	unsigned long max_uncompressed = 128 * 1024;
357 	int i;
358 	int will_compress;
359 	int compress_type = root->fs_info->compress_type;
360 	int redirty = 0;
361 
362 	/* if this is a small write inside eof, kick off a defrag */
363 	if ((end - start + 1) < 16 * 1024 &&
364 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
365 		btrfs_add_inode_defrag(NULL, inode);
366 
367 	actual_end = min_t(u64, isize, end + 1);
368 again:
369 	will_compress = 0;
370 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
371 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
372 
373 	/*
374 	 * we don't want to send crud past the end of i_size through
375 	 * compression, that's just a waste of CPU time.  So, if the
376 	 * end of the file is before the start of our current
377 	 * requested range of bytes, we bail out to the uncompressed
378 	 * cleanup code that can deal with all of this.
379 	 *
380 	 * It isn't really the fastest way to fix things, but this is a
381 	 * very uncommon corner.
382 	 */
383 	if (actual_end <= start)
384 		goto cleanup_and_bail_uncompressed;
385 
386 	total_compressed = actual_end - start;
387 
388 	/* we want to make sure that amount of ram required to uncompress
389 	 * an extent is reasonable, so we limit the total size in ram
390 	 * of a compressed extent to 128k.  This is a crucial number
391 	 * because it also controls how easily we can spread reads across
392 	 * cpus for decompression.
393 	 *
394 	 * We also want to make sure the amount of IO required to do
395 	 * a random read is reasonably small, so we limit the size of
396 	 * a compressed extent to 128k.
397 	 */
398 	total_compressed = min(total_compressed, max_uncompressed);
399 	num_bytes = ALIGN(end - start + 1, blocksize);
400 	num_bytes = max(blocksize,  num_bytes);
401 	total_in = 0;
402 	ret = 0;
403 
404 	/*
405 	 * we do compression for mount -o compress and when the
406 	 * inode has not been flagged as nocompress.  This flag can
407 	 * change at any time if we discover bad compression ratios.
408 	 */
409 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
410 	    (btrfs_test_opt(root, COMPRESS) ||
411 	     (BTRFS_I(inode)->force_compress) ||
412 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
413 		WARN_ON(pages);
414 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
415 		if (!pages) {
416 			/* just bail out to the uncompressed code */
417 			goto cont;
418 		}
419 
420 		if (BTRFS_I(inode)->force_compress)
421 			compress_type = BTRFS_I(inode)->force_compress;
422 
423 		/*
424 		 * we need to call clear_page_dirty_for_io on each
425 		 * page in the range.  Otherwise applications with the file
426 		 * mmap'd can wander in and change the page contents while
427 		 * we are compressing them.
428 		 *
429 		 * If the compression fails for any reason, we set the pages
430 		 * dirty again later on.
431 		 */
432 		extent_range_clear_dirty_for_io(inode, start, end);
433 		redirty = 1;
434 		ret = btrfs_compress_pages(compress_type,
435 					   inode->i_mapping, start,
436 					   total_compressed, pages,
437 					   nr_pages, &nr_pages_ret,
438 					   &total_in,
439 					   &total_compressed,
440 					   max_compressed);
441 
442 		if (!ret) {
443 			unsigned long offset = total_compressed &
444 				(PAGE_CACHE_SIZE - 1);
445 			struct page *page = pages[nr_pages_ret - 1];
446 			char *kaddr;
447 
448 			/* zero the tail end of the last page, we might be
449 			 * sending it down to disk
450 			 */
451 			if (offset) {
452 				kaddr = kmap_atomic(page);
453 				memset(kaddr + offset, 0,
454 				       PAGE_CACHE_SIZE - offset);
455 				kunmap_atomic(kaddr);
456 			}
457 			will_compress = 1;
458 		}
459 	}
460 cont:
461 	if (start == 0) {
462 		trans = btrfs_join_transaction(root);
463 		if (IS_ERR(trans)) {
464 			ret = PTR_ERR(trans);
465 			trans = NULL;
466 			goto cleanup_and_out;
467 		}
468 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
469 
470 		/* lets try to make an inline extent */
471 		if (ret || total_in < (actual_end - start)) {
472 			/* we didn't compress the entire range, try
473 			 * to make an uncompressed inline extent.
474 			 */
475 			ret = cow_file_range_inline(trans, root, inode,
476 						    start, end, 0, 0, NULL);
477 		} else {
478 			/* try making a compressed inline extent */
479 			ret = cow_file_range_inline(trans, root, inode,
480 						    start, end,
481 						    total_compressed,
482 						    compress_type, pages);
483 		}
484 		if (ret <= 0) {
485 			/*
486 			 * inline extent creation worked or returned error,
487 			 * we don't need to create any more async work items.
488 			 * Unlock and free up our temp pages.
489 			 */
490 			extent_clear_unlock_delalloc(inode,
491 			     &BTRFS_I(inode)->io_tree,
492 			     start, end, NULL,
493 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
494 			     EXTENT_CLEAR_DELALLOC |
495 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
496 
497 			btrfs_end_transaction(trans, root);
498 			goto free_pages_out;
499 		}
500 		btrfs_end_transaction(trans, root);
501 	}
502 
503 	if (will_compress) {
504 		/*
505 		 * we aren't doing an inline extent round the compressed size
506 		 * up to a block size boundary so the allocator does sane
507 		 * things
508 		 */
509 		total_compressed = ALIGN(total_compressed, blocksize);
510 
511 		/*
512 		 * one last check to make sure the compression is really a
513 		 * win, compare the page count read with the blocks on disk
514 		 */
515 		total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
516 		if (total_compressed >= total_in) {
517 			will_compress = 0;
518 		} else {
519 			num_bytes = total_in;
520 		}
521 	}
522 	if (!will_compress && pages) {
523 		/*
524 		 * the compression code ran but failed to make things smaller,
525 		 * free any pages it allocated and our page pointer array
526 		 */
527 		for (i = 0; i < nr_pages_ret; i++) {
528 			WARN_ON(pages[i]->mapping);
529 			page_cache_release(pages[i]);
530 		}
531 		kfree(pages);
532 		pages = NULL;
533 		total_compressed = 0;
534 		nr_pages_ret = 0;
535 
536 		/* flag the file so we don't compress in the future */
537 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
538 		    !(BTRFS_I(inode)->force_compress)) {
539 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
540 		}
541 	}
542 	if (will_compress) {
543 		*num_added += 1;
544 
545 		/* the async work queues will take care of doing actual
546 		 * allocation on disk for these compressed pages,
547 		 * and will submit them to the elevator.
548 		 */
549 		add_async_extent(async_cow, start, num_bytes,
550 				 total_compressed, pages, nr_pages_ret,
551 				 compress_type);
552 
553 		if (start + num_bytes < end) {
554 			start += num_bytes;
555 			pages = NULL;
556 			cond_resched();
557 			goto again;
558 		}
559 	} else {
560 cleanup_and_bail_uncompressed:
561 		/*
562 		 * No compression, but we still need to write the pages in
563 		 * the file we've been given so far.  redirty the locked
564 		 * page if it corresponds to our extent and set things up
565 		 * for the async work queue to run cow_file_range to do
566 		 * the normal delalloc dance
567 		 */
568 		if (page_offset(locked_page) >= start &&
569 		    page_offset(locked_page) <= end) {
570 			__set_page_dirty_nobuffers(locked_page);
571 			/* unlocked later on in the async handlers */
572 		}
573 		if (redirty)
574 			extent_range_redirty_for_io(inode, start, end);
575 		add_async_extent(async_cow, start, end - start + 1,
576 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
577 		*num_added += 1;
578 	}
579 
580 out:
581 	return ret;
582 
583 free_pages_out:
584 	for (i = 0; i < nr_pages_ret; i++) {
585 		WARN_ON(pages[i]->mapping);
586 		page_cache_release(pages[i]);
587 	}
588 	kfree(pages);
589 
590 	goto out;
591 
592 cleanup_and_out:
593 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
594 				     start, end, NULL,
595 				     EXTENT_CLEAR_UNLOCK_PAGE |
596 				     EXTENT_CLEAR_DIRTY |
597 				     EXTENT_CLEAR_DELALLOC |
598 				     EXTENT_SET_WRITEBACK |
599 				     EXTENT_END_WRITEBACK);
600 	if (!trans || IS_ERR(trans))
601 		btrfs_error(root->fs_info, ret, "Failed to join transaction");
602 	else
603 		btrfs_abort_transaction(trans, root, ret);
604 	goto free_pages_out;
605 }
606 
607 /*
608  * phase two of compressed writeback.  This is the ordered portion
609  * of the code, which only gets called in the order the work was
610  * queued.  We walk all the async extents created by compress_file_range
611  * and send them down to the disk.
612  */
613 static noinline int submit_compressed_extents(struct inode *inode,
614 					      struct async_cow *async_cow)
615 {
616 	struct async_extent *async_extent;
617 	u64 alloc_hint = 0;
618 	struct btrfs_trans_handle *trans;
619 	struct btrfs_key ins;
620 	struct extent_map *em;
621 	struct btrfs_root *root = BTRFS_I(inode)->root;
622 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
623 	struct extent_io_tree *io_tree;
624 	int ret = 0;
625 
626 	if (list_empty(&async_cow->extents))
627 		return 0;
628 
629 again:
630 	while (!list_empty(&async_cow->extents)) {
631 		async_extent = list_entry(async_cow->extents.next,
632 					  struct async_extent, list);
633 		list_del(&async_extent->list);
634 
635 		io_tree = &BTRFS_I(inode)->io_tree;
636 
637 retry:
638 		/* did the compression code fall back to uncompressed IO? */
639 		if (!async_extent->pages) {
640 			int page_started = 0;
641 			unsigned long nr_written = 0;
642 
643 			lock_extent(io_tree, async_extent->start,
644 					 async_extent->start +
645 					 async_extent->ram_size - 1);
646 
647 			/* allocate blocks */
648 			ret = cow_file_range(inode, async_cow->locked_page,
649 					     async_extent->start,
650 					     async_extent->start +
651 					     async_extent->ram_size - 1,
652 					     &page_started, &nr_written, 0);
653 
654 			/* JDM XXX */
655 
656 			/*
657 			 * if page_started, cow_file_range inserted an
658 			 * inline extent and took care of all the unlocking
659 			 * and IO for us.  Otherwise, we need to submit
660 			 * all those pages down to the drive.
661 			 */
662 			if (!page_started && !ret)
663 				extent_write_locked_range(io_tree,
664 						  inode, async_extent->start,
665 						  async_extent->start +
666 						  async_extent->ram_size - 1,
667 						  btrfs_get_extent,
668 						  WB_SYNC_ALL);
669 			else if (ret)
670 				unlock_page(async_cow->locked_page);
671 			kfree(async_extent);
672 			cond_resched();
673 			continue;
674 		}
675 
676 		lock_extent(io_tree, async_extent->start,
677 			    async_extent->start + async_extent->ram_size - 1);
678 
679 		trans = btrfs_join_transaction(root);
680 		if (IS_ERR(trans)) {
681 			ret = PTR_ERR(trans);
682 		} else {
683 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
684 			ret = btrfs_reserve_extent(trans, root,
685 					   async_extent->compressed_size,
686 					   async_extent->compressed_size,
687 					   0, alloc_hint, &ins, 1);
688 			if (ret && ret != -ENOSPC)
689 				btrfs_abort_transaction(trans, root, ret);
690 			btrfs_end_transaction(trans, root);
691 		}
692 
693 		if (ret) {
694 			int i;
695 
696 			for (i = 0; i < async_extent->nr_pages; i++) {
697 				WARN_ON(async_extent->pages[i]->mapping);
698 				page_cache_release(async_extent->pages[i]);
699 			}
700 			kfree(async_extent->pages);
701 			async_extent->nr_pages = 0;
702 			async_extent->pages = NULL;
703 
704 			if (ret == -ENOSPC)
705 				goto retry;
706 			goto out_free;
707 		}
708 
709 		/*
710 		 * here we're doing allocation and writeback of the
711 		 * compressed pages
712 		 */
713 		btrfs_drop_extent_cache(inode, async_extent->start,
714 					async_extent->start +
715 					async_extent->ram_size - 1, 0);
716 
717 		em = alloc_extent_map();
718 		if (!em)
719 			goto out_free_reserve;
720 		em->start = async_extent->start;
721 		em->len = async_extent->ram_size;
722 		em->orig_start = em->start;
723 		em->mod_start = em->start;
724 		em->mod_len = em->len;
725 
726 		em->block_start = ins.objectid;
727 		em->block_len = ins.offset;
728 		em->orig_block_len = ins.offset;
729 		em->ram_bytes = async_extent->ram_size;
730 		em->bdev = root->fs_info->fs_devices->latest_bdev;
731 		em->compress_type = async_extent->compress_type;
732 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
733 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
734 		em->generation = -1;
735 
736 		while (1) {
737 			write_lock(&em_tree->lock);
738 			ret = add_extent_mapping(em_tree, em, 1);
739 			write_unlock(&em_tree->lock);
740 			if (ret != -EEXIST) {
741 				free_extent_map(em);
742 				break;
743 			}
744 			btrfs_drop_extent_cache(inode, async_extent->start,
745 						async_extent->start +
746 						async_extent->ram_size - 1, 0);
747 		}
748 
749 		if (ret)
750 			goto out_free_reserve;
751 
752 		ret = btrfs_add_ordered_extent_compress(inode,
753 						async_extent->start,
754 						ins.objectid,
755 						async_extent->ram_size,
756 						ins.offset,
757 						BTRFS_ORDERED_COMPRESSED,
758 						async_extent->compress_type);
759 		if (ret)
760 			goto out_free_reserve;
761 
762 		/*
763 		 * clear dirty, set writeback and unlock the pages.
764 		 */
765 		extent_clear_unlock_delalloc(inode,
766 				&BTRFS_I(inode)->io_tree,
767 				async_extent->start,
768 				async_extent->start +
769 				async_extent->ram_size - 1,
770 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
771 				EXTENT_CLEAR_UNLOCK |
772 				EXTENT_CLEAR_DELALLOC |
773 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
774 
775 		ret = btrfs_submit_compressed_write(inode,
776 				    async_extent->start,
777 				    async_extent->ram_size,
778 				    ins.objectid,
779 				    ins.offset, async_extent->pages,
780 				    async_extent->nr_pages);
781 		alloc_hint = ins.objectid + ins.offset;
782 		kfree(async_extent);
783 		if (ret)
784 			goto out;
785 		cond_resched();
786 	}
787 	ret = 0;
788 out:
789 	return ret;
790 out_free_reserve:
791 	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
792 out_free:
793 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
794 				     async_extent->start,
795 				     async_extent->start +
796 				     async_extent->ram_size - 1,
797 				     NULL, EXTENT_CLEAR_UNLOCK_PAGE |
798 				     EXTENT_CLEAR_UNLOCK |
799 				     EXTENT_CLEAR_DELALLOC |
800 				     EXTENT_CLEAR_DIRTY |
801 				     EXTENT_SET_WRITEBACK |
802 				     EXTENT_END_WRITEBACK);
803 	kfree(async_extent);
804 	goto again;
805 }
806 
807 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
808 				      u64 num_bytes)
809 {
810 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
811 	struct extent_map *em;
812 	u64 alloc_hint = 0;
813 
814 	read_lock(&em_tree->lock);
815 	em = search_extent_mapping(em_tree, start, num_bytes);
816 	if (em) {
817 		/*
818 		 * if block start isn't an actual block number then find the
819 		 * first block in this inode and use that as a hint.  If that
820 		 * block is also bogus then just don't worry about it.
821 		 */
822 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
823 			free_extent_map(em);
824 			em = search_extent_mapping(em_tree, 0, 0);
825 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
826 				alloc_hint = em->block_start;
827 			if (em)
828 				free_extent_map(em);
829 		} else {
830 			alloc_hint = em->block_start;
831 			free_extent_map(em);
832 		}
833 	}
834 	read_unlock(&em_tree->lock);
835 
836 	return alloc_hint;
837 }
838 
839 /*
840  * when extent_io.c finds a delayed allocation range in the file,
841  * the call backs end up in this code.  The basic idea is to
842  * allocate extents on disk for the range, and create ordered data structs
843  * in ram to track those extents.
844  *
845  * locked_page is the page that writepage had locked already.  We use
846  * it to make sure we don't do extra locks or unlocks.
847  *
848  * *page_started is set to one if we unlock locked_page and do everything
849  * required to start IO on it.  It may be clean and already done with
850  * IO when we return.
851  */
852 static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
853 				     struct inode *inode,
854 				     struct btrfs_root *root,
855 				     struct page *locked_page,
856 				     u64 start, u64 end, int *page_started,
857 				     unsigned long *nr_written,
858 				     int unlock)
859 {
860 	u64 alloc_hint = 0;
861 	u64 num_bytes;
862 	unsigned long ram_size;
863 	u64 disk_num_bytes;
864 	u64 cur_alloc_size;
865 	u64 blocksize = root->sectorsize;
866 	struct btrfs_key ins;
867 	struct extent_map *em;
868 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
869 	int ret = 0;
870 
871 	BUG_ON(btrfs_is_free_space_inode(inode));
872 
873 	num_bytes = ALIGN(end - start + 1, blocksize);
874 	num_bytes = max(blocksize,  num_bytes);
875 	disk_num_bytes = num_bytes;
876 
877 	/* if this is a small write inside eof, kick off defrag */
878 	if (num_bytes < 64 * 1024 &&
879 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
880 		btrfs_add_inode_defrag(trans, inode);
881 
882 	if (start == 0) {
883 		/* lets try to make an inline extent */
884 		ret = cow_file_range_inline(trans, root, inode,
885 					    start, end, 0, 0, NULL);
886 		if (ret == 0) {
887 			extent_clear_unlock_delalloc(inode,
888 				     &BTRFS_I(inode)->io_tree,
889 				     start, end, NULL,
890 				     EXTENT_CLEAR_UNLOCK_PAGE |
891 				     EXTENT_CLEAR_UNLOCK |
892 				     EXTENT_CLEAR_DELALLOC |
893 				     EXTENT_CLEAR_DIRTY |
894 				     EXTENT_SET_WRITEBACK |
895 				     EXTENT_END_WRITEBACK);
896 
897 			*nr_written = *nr_written +
898 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
899 			*page_started = 1;
900 			goto out;
901 		} else if (ret < 0) {
902 			btrfs_abort_transaction(trans, root, ret);
903 			goto out_unlock;
904 		}
905 	}
906 
907 	BUG_ON(disk_num_bytes >
908 	       btrfs_super_total_bytes(root->fs_info->super_copy));
909 
910 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
911 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
912 
913 	while (disk_num_bytes > 0) {
914 		unsigned long op;
915 
916 		cur_alloc_size = disk_num_bytes;
917 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
918 					   root->sectorsize, 0, alloc_hint,
919 					   &ins, 1);
920 		if (ret < 0) {
921 			btrfs_abort_transaction(trans, root, ret);
922 			goto out_unlock;
923 		}
924 
925 		em = alloc_extent_map();
926 		if (!em)
927 			goto out_reserve;
928 		em->start = start;
929 		em->orig_start = em->start;
930 		ram_size = ins.offset;
931 		em->len = ins.offset;
932 		em->mod_start = em->start;
933 		em->mod_len = em->len;
934 
935 		em->block_start = ins.objectid;
936 		em->block_len = ins.offset;
937 		em->orig_block_len = ins.offset;
938 		em->ram_bytes = ram_size;
939 		em->bdev = root->fs_info->fs_devices->latest_bdev;
940 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
941 		em->generation = -1;
942 
943 		while (1) {
944 			write_lock(&em_tree->lock);
945 			ret = add_extent_mapping(em_tree, em, 1);
946 			write_unlock(&em_tree->lock);
947 			if (ret != -EEXIST) {
948 				free_extent_map(em);
949 				break;
950 			}
951 			btrfs_drop_extent_cache(inode, start,
952 						start + ram_size - 1, 0);
953 		}
954 		if (ret)
955 			goto out_reserve;
956 
957 		cur_alloc_size = ins.offset;
958 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
959 					       ram_size, cur_alloc_size, 0);
960 		if (ret)
961 			goto out_reserve;
962 
963 		if (root->root_key.objectid ==
964 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
965 			ret = btrfs_reloc_clone_csums(inode, start,
966 						      cur_alloc_size);
967 			if (ret) {
968 				btrfs_abort_transaction(trans, root, ret);
969 				goto out_reserve;
970 			}
971 		}
972 
973 		if (disk_num_bytes < cur_alloc_size)
974 			break;
975 
976 		/* we're not doing compressed IO, don't unlock the first
977 		 * page (which the caller expects to stay locked), don't
978 		 * clear any dirty bits and don't set any writeback bits
979 		 *
980 		 * Do set the Private2 bit so we know this page was properly
981 		 * setup for writepage
982 		 */
983 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
984 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
985 			EXTENT_SET_PRIVATE2;
986 
987 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
988 					     start, start + ram_size - 1,
989 					     locked_page, op);
990 		disk_num_bytes -= cur_alloc_size;
991 		num_bytes -= cur_alloc_size;
992 		alloc_hint = ins.objectid + ins.offset;
993 		start += cur_alloc_size;
994 	}
995 out:
996 	return ret;
997 
998 out_reserve:
999 	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
1000 out_unlock:
1001 	extent_clear_unlock_delalloc(inode,
1002 		     &BTRFS_I(inode)->io_tree,
1003 		     start, end, locked_page,
1004 		     EXTENT_CLEAR_UNLOCK_PAGE |
1005 		     EXTENT_CLEAR_UNLOCK |
1006 		     EXTENT_CLEAR_DELALLOC |
1007 		     EXTENT_CLEAR_DIRTY |
1008 		     EXTENT_SET_WRITEBACK |
1009 		     EXTENT_END_WRITEBACK);
1010 
1011 	goto out;
1012 }
1013 
1014 static noinline int cow_file_range(struct inode *inode,
1015 				   struct page *locked_page,
1016 				   u64 start, u64 end, int *page_started,
1017 				   unsigned long *nr_written,
1018 				   int unlock)
1019 {
1020 	struct btrfs_trans_handle *trans;
1021 	struct btrfs_root *root = BTRFS_I(inode)->root;
1022 	int ret;
1023 
1024 	trans = btrfs_join_transaction(root);
1025 	if (IS_ERR(trans)) {
1026 		extent_clear_unlock_delalloc(inode,
1027 			     &BTRFS_I(inode)->io_tree,
1028 			     start, end, locked_page,
1029 			     EXTENT_CLEAR_UNLOCK_PAGE |
1030 			     EXTENT_CLEAR_UNLOCK |
1031 			     EXTENT_CLEAR_DELALLOC |
1032 			     EXTENT_CLEAR_DIRTY |
1033 			     EXTENT_SET_WRITEBACK |
1034 			     EXTENT_END_WRITEBACK);
1035 		return PTR_ERR(trans);
1036 	}
1037 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1038 
1039 	ret = __cow_file_range(trans, inode, root, locked_page, start, end,
1040 			       page_started, nr_written, unlock);
1041 
1042 	btrfs_end_transaction(trans, root);
1043 
1044 	return ret;
1045 }
1046 
1047 /*
1048  * work queue call back to started compression on a file and pages
1049  */
1050 static noinline void async_cow_start(struct btrfs_work *work)
1051 {
1052 	struct async_cow *async_cow;
1053 	int num_added = 0;
1054 	async_cow = container_of(work, struct async_cow, work);
1055 
1056 	compress_file_range(async_cow->inode, async_cow->locked_page,
1057 			    async_cow->start, async_cow->end, async_cow,
1058 			    &num_added);
1059 	if (num_added == 0) {
1060 		btrfs_add_delayed_iput(async_cow->inode);
1061 		async_cow->inode = NULL;
1062 	}
1063 }
1064 
1065 /*
1066  * work queue call back to submit previously compressed pages
1067  */
1068 static noinline void async_cow_submit(struct btrfs_work *work)
1069 {
1070 	struct async_cow *async_cow;
1071 	struct btrfs_root *root;
1072 	unsigned long nr_pages;
1073 
1074 	async_cow = container_of(work, struct async_cow, work);
1075 
1076 	root = async_cow->root;
1077 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1078 		PAGE_CACHE_SHIFT;
1079 
1080 	if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1081 	    5 * 1024 * 1024 &&
1082 	    waitqueue_active(&root->fs_info->async_submit_wait))
1083 		wake_up(&root->fs_info->async_submit_wait);
1084 
1085 	if (async_cow->inode)
1086 		submit_compressed_extents(async_cow->inode, async_cow);
1087 }
1088 
1089 static noinline void async_cow_free(struct btrfs_work *work)
1090 {
1091 	struct async_cow *async_cow;
1092 	async_cow = container_of(work, struct async_cow, work);
1093 	if (async_cow->inode)
1094 		btrfs_add_delayed_iput(async_cow->inode);
1095 	kfree(async_cow);
1096 }
1097 
1098 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1099 				u64 start, u64 end, int *page_started,
1100 				unsigned long *nr_written)
1101 {
1102 	struct async_cow *async_cow;
1103 	struct btrfs_root *root = BTRFS_I(inode)->root;
1104 	unsigned long nr_pages;
1105 	u64 cur_end;
1106 	int limit = 10 * 1024 * 1024;
1107 
1108 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1109 			 1, 0, NULL, GFP_NOFS);
1110 	while (start < end) {
1111 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1112 		BUG_ON(!async_cow); /* -ENOMEM */
1113 		async_cow->inode = igrab(inode);
1114 		async_cow->root = root;
1115 		async_cow->locked_page = locked_page;
1116 		async_cow->start = start;
1117 
1118 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
1119 			cur_end = end;
1120 		else
1121 			cur_end = min(end, start + 512 * 1024 - 1);
1122 
1123 		async_cow->end = cur_end;
1124 		INIT_LIST_HEAD(&async_cow->extents);
1125 
1126 		async_cow->work.func = async_cow_start;
1127 		async_cow->work.ordered_func = async_cow_submit;
1128 		async_cow->work.ordered_free = async_cow_free;
1129 		async_cow->work.flags = 0;
1130 
1131 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1132 			PAGE_CACHE_SHIFT;
1133 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1134 
1135 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
1136 				   &async_cow->work);
1137 
1138 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1139 			wait_event(root->fs_info->async_submit_wait,
1140 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
1141 			    limit));
1142 		}
1143 
1144 		while (atomic_read(&root->fs_info->async_submit_draining) &&
1145 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
1146 			wait_event(root->fs_info->async_submit_wait,
1147 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
1148 			   0));
1149 		}
1150 
1151 		*nr_written += nr_pages;
1152 		start = cur_end + 1;
1153 	}
1154 	*page_started = 1;
1155 	return 0;
1156 }
1157 
1158 static noinline int csum_exist_in_range(struct btrfs_root *root,
1159 					u64 bytenr, u64 num_bytes)
1160 {
1161 	int ret;
1162 	struct btrfs_ordered_sum *sums;
1163 	LIST_HEAD(list);
1164 
1165 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1166 				       bytenr + num_bytes - 1, &list, 0);
1167 	if (ret == 0 && list_empty(&list))
1168 		return 0;
1169 
1170 	while (!list_empty(&list)) {
1171 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1172 		list_del(&sums->list);
1173 		kfree(sums);
1174 	}
1175 	return 1;
1176 }
1177 
1178 /*
1179  * when nowcow writeback call back.  This checks for snapshots or COW copies
1180  * of the extents that exist in the file, and COWs the file as required.
1181  *
1182  * If no cow copies or snapshots exist, we write directly to the existing
1183  * blocks on disk
1184  */
1185 static noinline int run_delalloc_nocow(struct inode *inode,
1186 				       struct page *locked_page,
1187 			      u64 start, u64 end, int *page_started, int force,
1188 			      unsigned long *nr_written)
1189 {
1190 	struct btrfs_root *root = BTRFS_I(inode)->root;
1191 	struct btrfs_trans_handle *trans;
1192 	struct extent_buffer *leaf;
1193 	struct btrfs_path *path;
1194 	struct btrfs_file_extent_item *fi;
1195 	struct btrfs_key found_key;
1196 	u64 cow_start;
1197 	u64 cur_offset;
1198 	u64 extent_end;
1199 	u64 extent_offset;
1200 	u64 disk_bytenr;
1201 	u64 num_bytes;
1202 	u64 disk_num_bytes;
1203 	u64 ram_bytes;
1204 	int extent_type;
1205 	int ret, err;
1206 	int type;
1207 	int nocow;
1208 	int check_prev = 1;
1209 	bool nolock;
1210 	u64 ino = btrfs_ino(inode);
1211 
1212 	path = btrfs_alloc_path();
1213 	if (!path) {
1214 		extent_clear_unlock_delalloc(inode,
1215 			     &BTRFS_I(inode)->io_tree,
1216 			     start, end, locked_page,
1217 			     EXTENT_CLEAR_UNLOCK_PAGE |
1218 			     EXTENT_CLEAR_UNLOCK |
1219 			     EXTENT_CLEAR_DELALLOC |
1220 			     EXTENT_CLEAR_DIRTY |
1221 			     EXTENT_SET_WRITEBACK |
1222 			     EXTENT_END_WRITEBACK);
1223 		return -ENOMEM;
1224 	}
1225 
1226 	nolock = btrfs_is_free_space_inode(inode);
1227 
1228 	if (nolock)
1229 		trans = btrfs_join_transaction_nolock(root);
1230 	else
1231 		trans = btrfs_join_transaction(root);
1232 
1233 	if (IS_ERR(trans)) {
1234 		extent_clear_unlock_delalloc(inode,
1235 			     &BTRFS_I(inode)->io_tree,
1236 			     start, end, locked_page,
1237 			     EXTENT_CLEAR_UNLOCK_PAGE |
1238 			     EXTENT_CLEAR_UNLOCK |
1239 			     EXTENT_CLEAR_DELALLOC |
1240 			     EXTENT_CLEAR_DIRTY |
1241 			     EXTENT_SET_WRITEBACK |
1242 			     EXTENT_END_WRITEBACK);
1243 		btrfs_free_path(path);
1244 		return PTR_ERR(trans);
1245 	}
1246 
1247 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1248 
1249 	cow_start = (u64)-1;
1250 	cur_offset = start;
1251 	while (1) {
1252 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1253 					       cur_offset, 0);
1254 		if (ret < 0) {
1255 			btrfs_abort_transaction(trans, root, ret);
1256 			goto error;
1257 		}
1258 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1259 			leaf = path->nodes[0];
1260 			btrfs_item_key_to_cpu(leaf, &found_key,
1261 					      path->slots[0] - 1);
1262 			if (found_key.objectid == ino &&
1263 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1264 				path->slots[0]--;
1265 		}
1266 		check_prev = 0;
1267 next_slot:
1268 		leaf = path->nodes[0];
1269 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1270 			ret = btrfs_next_leaf(root, path);
1271 			if (ret < 0) {
1272 				btrfs_abort_transaction(trans, root, ret);
1273 				goto error;
1274 			}
1275 			if (ret > 0)
1276 				break;
1277 			leaf = path->nodes[0];
1278 		}
1279 
1280 		nocow = 0;
1281 		disk_bytenr = 0;
1282 		num_bytes = 0;
1283 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1284 
1285 		if (found_key.objectid > ino ||
1286 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1287 		    found_key.offset > end)
1288 			break;
1289 
1290 		if (found_key.offset > cur_offset) {
1291 			extent_end = found_key.offset;
1292 			extent_type = 0;
1293 			goto out_check;
1294 		}
1295 
1296 		fi = btrfs_item_ptr(leaf, path->slots[0],
1297 				    struct btrfs_file_extent_item);
1298 		extent_type = btrfs_file_extent_type(leaf, fi);
1299 
1300 		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1301 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1302 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1303 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1304 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1305 			extent_end = found_key.offset +
1306 				btrfs_file_extent_num_bytes(leaf, fi);
1307 			disk_num_bytes =
1308 				btrfs_file_extent_disk_num_bytes(leaf, fi);
1309 			if (extent_end <= start) {
1310 				path->slots[0]++;
1311 				goto next_slot;
1312 			}
1313 			if (disk_bytenr == 0)
1314 				goto out_check;
1315 			if (btrfs_file_extent_compression(leaf, fi) ||
1316 			    btrfs_file_extent_encryption(leaf, fi) ||
1317 			    btrfs_file_extent_other_encoding(leaf, fi))
1318 				goto out_check;
1319 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1320 				goto out_check;
1321 			if (btrfs_extent_readonly(root, disk_bytenr))
1322 				goto out_check;
1323 			if (btrfs_cross_ref_exist(trans, root, ino,
1324 						  found_key.offset -
1325 						  extent_offset, disk_bytenr))
1326 				goto out_check;
1327 			disk_bytenr += extent_offset;
1328 			disk_bytenr += cur_offset - found_key.offset;
1329 			num_bytes = min(end + 1, extent_end) - cur_offset;
1330 			/*
1331 			 * force cow if csum exists in the range.
1332 			 * this ensure that csum for a given extent are
1333 			 * either valid or do not exist.
1334 			 */
1335 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1336 				goto out_check;
1337 			nocow = 1;
1338 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1339 			extent_end = found_key.offset +
1340 				btrfs_file_extent_inline_len(leaf, fi);
1341 			extent_end = ALIGN(extent_end, root->sectorsize);
1342 		} else {
1343 			BUG_ON(1);
1344 		}
1345 out_check:
1346 		if (extent_end <= start) {
1347 			path->slots[0]++;
1348 			goto next_slot;
1349 		}
1350 		if (!nocow) {
1351 			if (cow_start == (u64)-1)
1352 				cow_start = cur_offset;
1353 			cur_offset = extent_end;
1354 			if (cur_offset > end)
1355 				break;
1356 			path->slots[0]++;
1357 			goto next_slot;
1358 		}
1359 
1360 		btrfs_release_path(path);
1361 		if (cow_start != (u64)-1) {
1362 			ret = __cow_file_range(trans, inode, root, locked_page,
1363 					       cow_start, found_key.offset - 1,
1364 					       page_started, nr_written, 1);
1365 			if (ret) {
1366 				btrfs_abort_transaction(trans, root, ret);
1367 				goto error;
1368 			}
1369 			cow_start = (u64)-1;
1370 		}
1371 
1372 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1373 			struct extent_map *em;
1374 			struct extent_map_tree *em_tree;
1375 			em_tree = &BTRFS_I(inode)->extent_tree;
1376 			em = alloc_extent_map();
1377 			BUG_ON(!em); /* -ENOMEM */
1378 			em->start = cur_offset;
1379 			em->orig_start = found_key.offset - extent_offset;
1380 			em->len = num_bytes;
1381 			em->block_len = num_bytes;
1382 			em->block_start = disk_bytenr;
1383 			em->orig_block_len = disk_num_bytes;
1384 			em->ram_bytes = ram_bytes;
1385 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1386 			em->mod_start = em->start;
1387 			em->mod_len = em->len;
1388 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1389 			set_bit(EXTENT_FLAG_FILLING, &em->flags);
1390 			em->generation = -1;
1391 			while (1) {
1392 				write_lock(&em_tree->lock);
1393 				ret = add_extent_mapping(em_tree, em, 1);
1394 				write_unlock(&em_tree->lock);
1395 				if (ret != -EEXIST) {
1396 					free_extent_map(em);
1397 					break;
1398 				}
1399 				btrfs_drop_extent_cache(inode, em->start,
1400 						em->start + em->len - 1, 0);
1401 			}
1402 			type = BTRFS_ORDERED_PREALLOC;
1403 		} else {
1404 			type = BTRFS_ORDERED_NOCOW;
1405 		}
1406 
1407 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1408 					       num_bytes, num_bytes, type);
1409 		BUG_ON(ret); /* -ENOMEM */
1410 
1411 		if (root->root_key.objectid ==
1412 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1413 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1414 						      num_bytes);
1415 			if (ret) {
1416 				btrfs_abort_transaction(trans, root, ret);
1417 				goto error;
1418 			}
1419 		}
1420 
1421 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1422 				cur_offset, cur_offset + num_bytes - 1,
1423 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1424 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1425 				EXTENT_SET_PRIVATE2);
1426 		cur_offset = extent_end;
1427 		if (cur_offset > end)
1428 			break;
1429 	}
1430 	btrfs_release_path(path);
1431 
1432 	if (cur_offset <= end && cow_start == (u64)-1) {
1433 		cow_start = cur_offset;
1434 		cur_offset = end;
1435 	}
1436 
1437 	if (cow_start != (u64)-1) {
1438 		ret = __cow_file_range(trans, inode, root, locked_page,
1439 				       cow_start, end,
1440 				       page_started, nr_written, 1);
1441 		if (ret) {
1442 			btrfs_abort_transaction(trans, root, ret);
1443 			goto error;
1444 		}
1445 	}
1446 
1447 error:
1448 	err = btrfs_end_transaction(trans, root);
1449 	if (!ret)
1450 		ret = err;
1451 
1452 	if (ret && cur_offset < end)
1453 		extent_clear_unlock_delalloc(inode,
1454 			     &BTRFS_I(inode)->io_tree,
1455 			     cur_offset, end, locked_page,
1456 			     EXTENT_CLEAR_UNLOCK_PAGE |
1457 			     EXTENT_CLEAR_UNLOCK |
1458 			     EXTENT_CLEAR_DELALLOC |
1459 			     EXTENT_CLEAR_DIRTY |
1460 			     EXTENT_SET_WRITEBACK |
1461 			     EXTENT_END_WRITEBACK);
1462 
1463 	btrfs_free_path(path);
1464 	return ret;
1465 }
1466 
1467 /*
1468  * extent_io.c call back to do delayed allocation processing
1469  */
1470 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1471 			      u64 start, u64 end, int *page_started,
1472 			      unsigned long *nr_written)
1473 {
1474 	int ret;
1475 	struct btrfs_root *root = BTRFS_I(inode)->root;
1476 
1477 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
1478 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1479 					 page_started, 1, nr_written);
1480 	} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
1481 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1482 					 page_started, 0, nr_written);
1483 	} else if (!btrfs_test_opt(root, COMPRESS) &&
1484 		   !(BTRFS_I(inode)->force_compress) &&
1485 		   !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
1486 		ret = cow_file_range(inode, locked_page, start, end,
1487 				      page_started, nr_written, 1);
1488 	} else {
1489 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1490 			&BTRFS_I(inode)->runtime_flags);
1491 		ret = cow_file_range_async(inode, locked_page, start, end,
1492 					   page_started, nr_written);
1493 	}
1494 	return ret;
1495 }
1496 
1497 static void btrfs_split_extent_hook(struct inode *inode,
1498 				    struct extent_state *orig, u64 split)
1499 {
1500 	/* not delalloc, ignore it */
1501 	if (!(orig->state & EXTENT_DELALLOC))
1502 		return;
1503 
1504 	spin_lock(&BTRFS_I(inode)->lock);
1505 	BTRFS_I(inode)->outstanding_extents++;
1506 	spin_unlock(&BTRFS_I(inode)->lock);
1507 }
1508 
1509 /*
1510  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1511  * extents so we can keep track of new extents that are just merged onto old
1512  * extents, such as when we are doing sequential writes, so we can properly
1513  * account for the metadata space we'll need.
1514  */
1515 static void btrfs_merge_extent_hook(struct inode *inode,
1516 				    struct extent_state *new,
1517 				    struct extent_state *other)
1518 {
1519 	/* not delalloc, ignore it */
1520 	if (!(other->state & EXTENT_DELALLOC))
1521 		return;
1522 
1523 	spin_lock(&BTRFS_I(inode)->lock);
1524 	BTRFS_I(inode)->outstanding_extents--;
1525 	spin_unlock(&BTRFS_I(inode)->lock);
1526 }
1527 
1528 /*
1529  * extent_io.c set_bit_hook, used to track delayed allocation
1530  * bytes in this file, and to maintain the list of inodes that
1531  * have pending delalloc work to be done.
1532  */
1533 static void btrfs_set_bit_hook(struct inode *inode,
1534 			       struct extent_state *state, unsigned long *bits)
1535 {
1536 
1537 	/*
1538 	 * set_bit and clear bit hooks normally require _irqsave/restore
1539 	 * but in this case, we are only testing for the DELALLOC
1540 	 * bit, which is only set or cleared with irqs on
1541 	 */
1542 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1543 		struct btrfs_root *root = BTRFS_I(inode)->root;
1544 		u64 len = state->end + 1 - state->start;
1545 		bool do_list = !btrfs_is_free_space_inode(inode);
1546 
1547 		if (*bits & EXTENT_FIRST_DELALLOC) {
1548 			*bits &= ~EXTENT_FIRST_DELALLOC;
1549 		} else {
1550 			spin_lock(&BTRFS_I(inode)->lock);
1551 			BTRFS_I(inode)->outstanding_extents++;
1552 			spin_unlock(&BTRFS_I(inode)->lock);
1553 		}
1554 
1555 		__percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1556 				     root->fs_info->delalloc_batch);
1557 		spin_lock(&BTRFS_I(inode)->lock);
1558 		BTRFS_I(inode)->delalloc_bytes += len;
1559 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1560 					 &BTRFS_I(inode)->runtime_flags)) {
1561 			spin_lock(&root->fs_info->delalloc_lock);
1562 			if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1563 				list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1564 					      &root->fs_info->delalloc_inodes);
1565 				set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1566 					&BTRFS_I(inode)->runtime_flags);
1567 			}
1568 			spin_unlock(&root->fs_info->delalloc_lock);
1569 		}
1570 		spin_unlock(&BTRFS_I(inode)->lock);
1571 	}
1572 }
1573 
1574 /*
1575  * extent_io.c clear_bit_hook, see set_bit_hook for why
1576  */
1577 static void btrfs_clear_bit_hook(struct inode *inode,
1578 				 struct extent_state *state,
1579 				 unsigned long *bits)
1580 {
1581 	/*
1582 	 * set_bit and clear bit hooks normally require _irqsave/restore
1583 	 * but in this case, we are only testing for the DELALLOC
1584 	 * bit, which is only set or cleared with irqs on
1585 	 */
1586 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1587 		struct btrfs_root *root = BTRFS_I(inode)->root;
1588 		u64 len = state->end + 1 - state->start;
1589 		bool do_list = !btrfs_is_free_space_inode(inode);
1590 
1591 		if (*bits & EXTENT_FIRST_DELALLOC) {
1592 			*bits &= ~EXTENT_FIRST_DELALLOC;
1593 		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1594 			spin_lock(&BTRFS_I(inode)->lock);
1595 			BTRFS_I(inode)->outstanding_extents--;
1596 			spin_unlock(&BTRFS_I(inode)->lock);
1597 		}
1598 
1599 		if (*bits & EXTENT_DO_ACCOUNTING)
1600 			btrfs_delalloc_release_metadata(inode, len);
1601 
1602 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1603 		    && do_list)
1604 			btrfs_free_reserved_data_space(inode, len);
1605 
1606 		__percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1607 				     root->fs_info->delalloc_batch);
1608 		spin_lock(&BTRFS_I(inode)->lock);
1609 		BTRFS_I(inode)->delalloc_bytes -= len;
1610 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1611 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1612 			     &BTRFS_I(inode)->runtime_flags)) {
1613 			spin_lock(&root->fs_info->delalloc_lock);
1614 			if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1615 				list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1616 				clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1617 					  &BTRFS_I(inode)->runtime_flags);
1618 			}
1619 			spin_unlock(&root->fs_info->delalloc_lock);
1620 		}
1621 		spin_unlock(&BTRFS_I(inode)->lock);
1622 	}
1623 }
1624 
1625 /*
1626  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1627  * we don't create bios that span stripes or chunks
1628  */
1629 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1630 			 size_t size, struct bio *bio,
1631 			 unsigned long bio_flags)
1632 {
1633 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1634 	u64 logical = (u64)bio->bi_sector << 9;
1635 	u64 length = 0;
1636 	u64 map_length;
1637 	int ret;
1638 
1639 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1640 		return 0;
1641 
1642 	length = bio->bi_size;
1643 	map_length = length;
1644 	ret = btrfs_map_block(root->fs_info, rw, logical,
1645 			      &map_length, NULL, 0);
1646 	/* Will always return 0 with map_multi == NULL */
1647 	BUG_ON(ret < 0);
1648 	if (map_length < length + size)
1649 		return 1;
1650 	return 0;
1651 }
1652 
1653 /*
1654  * in order to insert checksums into the metadata in large chunks,
1655  * we wait until bio submission time.   All the pages in the bio are
1656  * checksummed and sums are attached onto the ordered extent record.
1657  *
1658  * At IO completion time the cums attached on the ordered extent record
1659  * are inserted into the btree
1660  */
1661 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1662 				    struct bio *bio, int mirror_num,
1663 				    unsigned long bio_flags,
1664 				    u64 bio_offset)
1665 {
1666 	struct btrfs_root *root = BTRFS_I(inode)->root;
1667 	int ret = 0;
1668 
1669 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1670 	BUG_ON(ret); /* -ENOMEM */
1671 	return 0;
1672 }
1673 
1674 /*
1675  * in order to insert checksums into the metadata in large chunks,
1676  * we wait until bio submission time.   All the pages in the bio are
1677  * checksummed and sums are attached onto the ordered extent record.
1678  *
1679  * At IO completion time the cums attached on the ordered extent record
1680  * are inserted into the btree
1681  */
1682 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1683 			  int mirror_num, unsigned long bio_flags,
1684 			  u64 bio_offset)
1685 {
1686 	struct btrfs_root *root = BTRFS_I(inode)->root;
1687 	int ret;
1688 
1689 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1690 	if (ret)
1691 		bio_endio(bio, ret);
1692 	return ret;
1693 }
1694 
1695 /*
1696  * extent_io.c submission hook. This does the right thing for csum calculation
1697  * on write, or reading the csums from the tree before a read
1698  */
1699 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1700 			  int mirror_num, unsigned long bio_flags,
1701 			  u64 bio_offset)
1702 {
1703 	struct btrfs_root *root = BTRFS_I(inode)->root;
1704 	int ret = 0;
1705 	int skip_sum;
1706 	int metadata = 0;
1707 	int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1708 
1709 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1710 
1711 	if (btrfs_is_free_space_inode(inode))
1712 		metadata = 2;
1713 
1714 	if (!(rw & REQ_WRITE)) {
1715 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1716 		if (ret)
1717 			goto out;
1718 
1719 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1720 			ret = btrfs_submit_compressed_read(inode, bio,
1721 							   mirror_num,
1722 							   bio_flags);
1723 			goto out;
1724 		} else if (!skip_sum) {
1725 			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1726 			if (ret)
1727 				goto out;
1728 		}
1729 		goto mapit;
1730 	} else if (async && !skip_sum) {
1731 		/* csum items have already been cloned */
1732 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1733 			goto mapit;
1734 		/* we're doing a write, do the async checksumming */
1735 		ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1736 				   inode, rw, bio, mirror_num,
1737 				   bio_flags, bio_offset,
1738 				   __btrfs_submit_bio_start,
1739 				   __btrfs_submit_bio_done);
1740 		goto out;
1741 	} else if (!skip_sum) {
1742 		ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1743 		if (ret)
1744 			goto out;
1745 	}
1746 
1747 mapit:
1748 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1749 
1750 out:
1751 	if (ret < 0)
1752 		bio_endio(bio, ret);
1753 	return ret;
1754 }
1755 
1756 /*
1757  * given a list of ordered sums record them in the inode.  This happens
1758  * at IO completion time based on sums calculated at bio submission time.
1759  */
1760 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1761 			     struct inode *inode, u64 file_offset,
1762 			     struct list_head *list)
1763 {
1764 	struct btrfs_ordered_sum *sum;
1765 
1766 	list_for_each_entry(sum, list, list) {
1767 		trans->adding_csums = 1;
1768 		btrfs_csum_file_blocks(trans,
1769 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1770 		trans->adding_csums = 0;
1771 	}
1772 	return 0;
1773 }
1774 
1775 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1776 			      struct extent_state **cached_state)
1777 {
1778 	WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1779 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1780 				   cached_state, GFP_NOFS);
1781 }
1782 
1783 /* see btrfs_writepage_start_hook for details on why this is required */
1784 struct btrfs_writepage_fixup {
1785 	struct page *page;
1786 	struct btrfs_work work;
1787 };
1788 
1789 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1790 {
1791 	struct btrfs_writepage_fixup *fixup;
1792 	struct btrfs_ordered_extent *ordered;
1793 	struct extent_state *cached_state = NULL;
1794 	struct page *page;
1795 	struct inode *inode;
1796 	u64 page_start;
1797 	u64 page_end;
1798 	int ret;
1799 
1800 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1801 	page = fixup->page;
1802 again:
1803 	lock_page(page);
1804 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1805 		ClearPageChecked(page);
1806 		goto out_page;
1807 	}
1808 
1809 	inode = page->mapping->host;
1810 	page_start = page_offset(page);
1811 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1812 
1813 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1814 			 &cached_state);
1815 
1816 	/* already ordered? We're done */
1817 	if (PagePrivate2(page))
1818 		goto out;
1819 
1820 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1821 	if (ordered) {
1822 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1823 				     page_end, &cached_state, GFP_NOFS);
1824 		unlock_page(page);
1825 		btrfs_start_ordered_extent(inode, ordered, 1);
1826 		btrfs_put_ordered_extent(ordered);
1827 		goto again;
1828 	}
1829 
1830 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1831 	if (ret) {
1832 		mapping_set_error(page->mapping, ret);
1833 		end_extent_writepage(page, ret, page_start, page_end);
1834 		ClearPageChecked(page);
1835 		goto out;
1836 	 }
1837 
1838 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1839 	ClearPageChecked(page);
1840 	set_page_dirty(page);
1841 out:
1842 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1843 			     &cached_state, GFP_NOFS);
1844 out_page:
1845 	unlock_page(page);
1846 	page_cache_release(page);
1847 	kfree(fixup);
1848 }
1849 
1850 /*
1851  * There are a few paths in the higher layers of the kernel that directly
1852  * set the page dirty bit without asking the filesystem if it is a
1853  * good idea.  This causes problems because we want to make sure COW
1854  * properly happens and the data=ordered rules are followed.
1855  *
1856  * In our case any range that doesn't have the ORDERED bit set
1857  * hasn't been properly setup for IO.  We kick off an async process
1858  * to fix it up.  The async helper will wait for ordered extents, set
1859  * the delalloc bit and make it safe to write the page.
1860  */
1861 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1862 {
1863 	struct inode *inode = page->mapping->host;
1864 	struct btrfs_writepage_fixup *fixup;
1865 	struct btrfs_root *root = BTRFS_I(inode)->root;
1866 
1867 	/* this page is properly in the ordered list */
1868 	if (TestClearPagePrivate2(page))
1869 		return 0;
1870 
1871 	if (PageChecked(page))
1872 		return -EAGAIN;
1873 
1874 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1875 	if (!fixup)
1876 		return -EAGAIN;
1877 
1878 	SetPageChecked(page);
1879 	page_cache_get(page);
1880 	fixup->work.func = btrfs_writepage_fixup_worker;
1881 	fixup->page = page;
1882 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1883 	return -EBUSY;
1884 }
1885 
1886 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1887 				       struct inode *inode, u64 file_pos,
1888 				       u64 disk_bytenr, u64 disk_num_bytes,
1889 				       u64 num_bytes, u64 ram_bytes,
1890 				       u8 compression, u8 encryption,
1891 				       u16 other_encoding, int extent_type)
1892 {
1893 	struct btrfs_root *root = BTRFS_I(inode)->root;
1894 	struct btrfs_file_extent_item *fi;
1895 	struct btrfs_path *path;
1896 	struct extent_buffer *leaf;
1897 	struct btrfs_key ins;
1898 	int ret;
1899 
1900 	path = btrfs_alloc_path();
1901 	if (!path)
1902 		return -ENOMEM;
1903 
1904 	path->leave_spinning = 1;
1905 
1906 	/*
1907 	 * we may be replacing one extent in the tree with another.
1908 	 * The new extent is pinned in the extent map, and we don't want
1909 	 * to drop it from the cache until it is completely in the btree.
1910 	 *
1911 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1912 	 * the caller is expected to unpin it and allow it to be merged
1913 	 * with the others.
1914 	 */
1915 	ret = btrfs_drop_extents(trans, root, inode, file_pos,
1916 				 file_pos + num_bytes, 0);
1917 	if (ret)
1918 		goto out;
1919 
1920 	ins.objectid = btrfs_ino(inode);
1921 	ins.offset = file_pos;
1922 	ins.type = BTRFS_EXTENT_DATA_KEY;
1923 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1924 	if (ret)
1925 		goto out;
1926 	leaf = path->nodes[0];
1927 	fi = btrfs_item_ptr(leaf, path->slots[0],
1928 			    struct btrfs_file_extent_item);
1929 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1930 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1931 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1932 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1933 	btrfs_set_file_extent_offset(leaf, fi, 0);
1934 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1935 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1936 	btrfs_set_file_extent_compression(leaf, fi, compression);
1937 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1938 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1939 
1940 	btrfs_mark_buffer_dirty(leaf);
1941 	btrfs_release_path(path);
1942 
1943 	inode_add_bytes(inode, num_bytes);
1944 
1945 	ins.objectid = disk_bytenr;
1946 	ins.offset = disk_num_bytes;
1947 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1948 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1949 					root->root_key.objectid,
1950 					btrfs_ino(inode), file_pos, &ins);
1951 out:
1952 	btrfs_free_path(path);
1953 
1954 	return ret;
1955 }
1956 
1957 /* snapshot-aware defrag */
1958 struct sa_defrag_extent_backref {
1959 	struct rb_node node;
1960 	struct old_sa_defrag_extent *old;
1961 	u64 root_id;
1962 	u64 inum;
1963 	u64 file_pos;
1964 	u64 extent_offset;
1965 	u64 num_bytes;
1966 	u64 generation;
1967 };
1968 
1969 struct old_sa_defrag_extent {
1970 	struct list_head list;
1971 	struct new_sa_defrag_extent *new;
1972 
1973 	u64 extent_offset;
1974 	u64 bytenr;
1975 	u64 offset;
1976 	u64 len;
1977 	int count;
1978 };
1979 
1980 struct new_sa_defrag_extent {
1981 	struct rb_root root;
1982 	struct list_head head;
1983 	struct btrfs_path *path;
1984 	struct inode *inode;
1985 	u64 file_pos;
1986 	u64 len;
1987 	u64 bytenr;
1988 	u64 disk_len;
1989 	u8 compress_type;
1990 };
1991 
1992 static int backref_comp(struct sa_defrag_extent_backref *b1,
1993 			struct sa_defrag_extent_backref *b2)
1994 {
1995 	if (b1->root_id < b2->root_id)
1996 		return -1;
1997 	else if (b1->root_id > b2->root_id)
1998 		return 1;
1999 
2000 	if (b1->inum < b2->inum)
2001 		return -1;
2002 	else if (b1->inum > b2->inum)
2003 		return 1;
2004 
2005 	if (b1->file_pos < b2->file_pos)
2006 		return -1;
2007 	else if (b1->file_pos > b2->file_pos)
2008 		return 1;
2009 
2010 	/*
2011 	 * [------------------------------] ===> (a range of space)
2012 	 *     |<--->|   |<---->| =============> (fs/file tree A)
2013 	 * |<---------------------------->| ===> (fs/file tree B)
2014 	 *
2015 	 * A range of space can refer to two file extents in one tree while
2016 	 * refer to only one file extent in another tree.
2017 	 *
2018 	 * So we may process a disk offset more than one time(two extents in A)
2019 	 * and locate at the same extent(one extent in B), then insert two same
2020 	 * backrefs(both refer to the extent in B).
2021 	 */
2022 	return 0;
2023 }
2024 
2025 static void backref_insert(struct rb_root *root,
2026 			   struct sa_defrag_extent_backref *backref)
2027 {
2028 	struct rb_node **p = &root->rb_node;
2029 	struct rb_node *parent = NULL;
2030 	struct sa_defrag_extent_backref *entry;
2031 	int ret;
2032 
2033 	while (*p) {
2034 		parent = *p;
2035 		entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2036 
2037 		ret = backref_comp(backref, entry);
2038 		if (ret < 0)
2039 			p = &(*p)->rb_left;
2040 		else
2041 			p = &(*p)->rb_right;
2042 	}
2043 
2044 	rb_link_node(&backref->node, parent, p);
2045 	rb_insert_color(&backref->node, root);
2046 }
2047 
2048 /*
2049  * Note the backref might has changed, and in this case we just return 0.
2050  */
2051 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2052 				       void *ctx)
2053 {
2054 	struct btrfs_file_extent_item *extent;
2055 	struct btrfs_fs_info *fs_info;
2056 	struct old_sa_defrag_extent *old = ctx;
2057 	struct new_sa_defrag_extent *new = old->new;
2058 	struct btrfs_path *path = new->path;
2059 	struct btrfs_key key;
2060 	struct btrfs_root *root;
2061 	struct sa_defrag_extent_backref *backref;
2062 	struct extent_buffer *leaf;
2063 	struct inode *inode = new->inode;
2064 	int slot;
2065 	int ret;
2066 	u64 extent_offset;
2067 	u64 num_bytes;
2068 
2069 	if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2070 	    inum == btrfs_ino(inode))
2071 		return 0;
2072 
2073 	key.objectid = root_id;
2074 	key.type = BTRFS_ROOT_ITEM_KEY;
2075 	key.offset = (u64)-1;
2076 
2077 	fs_info = BTRFS_I(inode)->root->fs_info;
2078 	root = btrfs_read_fs_root_no_name(fs_info, &key);
2079 	if (IS_ERR(root)) {
2080 		if (PTR_ERR(root) == -ENOENT)
2081 			return 0;
2082 		WARN_ON(1);
2083 		pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2084 			 inum, offset, root_id);
2085 		return PTR_ERR(root);
2086 	}
2087 
2088 	key.objectid = inum;
2089 	key.type = BTRFS_EXTENT_DATA_KEY;
2090 	if (offset > (u64)-1 << 32)
2091 		key.offset = 0;
2092 	else
2093 		key.offset = offset;
2094 
2095 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2096 	if (ret < 0) {
2097 		WARN_ON(1);
2098 		return ret;
2099 	}
2100 
2101 	while (1) {
2102 		cond_resched();
2103 
2104 		leaf = path->nodes[0];
2105 		slot = path->slots[0];
2106 
2107 		if (slot >= btrfs_header_nritems(leaf)) {
2108 			ret = btrfs_next_leaf(root, path);
2109 			if (ret < 0) {
2110 				goto out;
2111 			} else if (ret > 0) {
2112 				ret = 0;
2113 				goto out;
2114 			}
2115 			continue;
2116 		}
2117 
2118 		path->slots[0]++;
2119 
2120 		btrfs_item_key_to_cpu(leaf, &key, slot);
2121 
2122 		if (key.objectid > inum)
2123 			goto out;
2124 
2125 		if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2126 			continue;
2127 
2128 		extent = btrfs_item_ptr(leaf, slot,
2129 					struct btrfs_file_extent_item);
2130 
2131 		if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2132 			continue;
2133 
2134 		extent_offset = btrfs_file_extent_offset(leaf, extent);
2135 		if (key.offset - extent_offset != offset)
2136 			continue;
2137 
2138 		num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2139 		if (extent_offset >= old->extent_offset + old->offset +
2140 		    old->len || extent_offset + num_bytes <=
2141 		    old->extent_offset + old->offset)
2142 			continue;
2143 
2144 		break;
2145 	}
2146 
2147 	backref = kmalloc(sizeof(*backref), GFP_NOFS);
2148 	if (!backref) {
2149 		ret = -ENOENT;
2150 		goto out;
2151 	}
2152 
2153 	backref->root_id = root_id;
2154 	backref->inum = inum;
2155 	backref->file_pos = offset + extent_offset;
2156 	backref->num_bytes = num_bytes;
2157 	backref->extent_offset = extent_offset;
2158 	backref->generation = btrfs_file_extent_generation(leaf, extent);
2159 	backref->old = old;
2160 	backref_insert(&new->root, backref);
2161 	old->count++;
2162 out:
2163 	btrfs_release_path(path);
2164 	WARN_ON(ret);
2165 	return ret;
2166 }
2167 
2168 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2169 				   struct new_sa_defrag_extent *new)
2170 {
2171 	struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2172 	struct old_sa_defrag_extent *old, *tmp;
2173 	int ret;
2174 
2175 	new->path = path;
2176 
2177 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2178 		ret = iterate_inodes_from_logical(old->bytenr, fs_info,
2179 						  path, record_one_backref,
2180 						  old);
2181 		BUG_ON(ret < 0 && ret != -ENOENT);
2182 
2183 		/* no backref to be processed for this extent */
2184 		if (!old->count) {
2185 			list_del(&old->list);
2186 			kfree(old);
2187 		}
2188 	}
2189 
2190 	if (list_empty(&new->head))
2191 		return false;
2192 
2193 	return true;
2194 }
2195 
2196 static int relink_is_mergable(struct extent_buffer *leaf,
2197 			      struct btrfs_file_extent_item *fi,
2198 			      u64 disk_bytenr)
2199 {
2200 	if (btrfs_file_extent_disk_bytenr(leaf, fi) != disk_bytenr)
2201 		return 0;
2202 
2203 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2204 		return 0;
2205 
2206 	if (btrfs_file_extent_compression(leaf, fi) ||
2207 	    btrfs_file_extent_encryption(leaf, fi) ||
2208 	    btrfs_file_extent_other_encoding(leaf, fi))
2209 		return 0;
2210 
2211 	return 1;
2212 }
2213 
2214 /*
2215  * Note the backref might has changed, and in this case we just return 0.
2216  */
2217 static noinline int relink_extent_backref(struct btrfs_path *path,
2218 				 struct sa_defrag_extent_backref *prev,
2219 				 struct sa_defrag_extent_backref *backref)
2220 {
2221 	struct btrfs_file_extent_item *extent;
2222 	struct btrfs_file_extent_item *item;
2223 	struct btrfs_ordered_extent *ordered;
2224 	struct btrfs_trans_handle *trans;
2225 	struct btrfs_fs_info *fs_info;
2226 	struct btrfs_root *root;
2227 	struct btrfs_key key;
2228 	struct extent_buffer *leaf;
2229 	struct old_sa_defrag_extent *old = backref->old;
2230 	struct new_sa_defrag_extent *new = old->new;
2231 	struct inode *src_inode = new->inode;
2232 	struct inode *inode;
2233 	struct extent_state *cached = NULL;
2234 	int ret = 0;
2235 	u64 start;
2236 	u64 len;
2237 	u64 lock_start;
2238 	u64 lock_end;
2239 	bool merge = false;
2240 	int index;
2241 
2242 	if (prev && prev->root_id == backref->root_id &&
2243 	    prev->inum == backref->inum &&
2244 	    prev->file_pos + prev->num_bytes == backref->file_pos)
2245 		merge = true;
2246 
2247 	/* step 1: get root */
2248 	key.objectid = backref->root_id;
2249 	key.type = BTRFS_ROOT_ITEM_KEY;
2250 	key.offset = (u64)-1;
2251 
2252 	fs_info = BTRFS_I(src_inode)->root->fs_info;
2253 	index = srcu_read_lock(&fs_info->subvol_srcu);
2254 
2255 	root = btrfs_read_fs_root_no_name(fs_info, &key);
2256 	if (IS_ERR(root)) {
2257 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2258 		if (PTR_ERR(root) == -ENOENT)
2259 			return 0;
2260 		return PTR_ERR(root);
2261 	}
2262 	if (btrfs_root_refs(&root->root_item) == 0) {
2263 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2264 		/* parse ENOENT to 0 */
2265 		return 0;
2266 	}
2267 
2268 	/* step 2: get inode */
2269 	key.objectid = backref->inum;
2270 	key.type = BTRFS_INODE_ITEM_KEY;
2271 	key.offset = 0;
2272 
2273 	inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2274 	if (IS_ERR(inode)) {
2275 		srcu_read_unlock(&fs_info->subvol_srcu, index);
2276 		return 0;
2277 	}
2278 
2279 	srcu_read_unlock(&fs_info->subvol_srcu, index);
2280 
2281 	/* step 3: relink backref */
2282 	lock_start = backref->file_pos;
2283 	lock_end = backref->file_pos + backref->num_bytes - 1;
2284 	lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2285 			 0, &cached);
2286 
2287 	ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2288 	if (ordered) {
2289 		btrfs_put_ordered_extent(ordered);
2290 		goto out_unlock;
2291 	}
2292 
2293 	trans = btrfs_join_transaction(root);
2294 	if (IS_ERR(trans)) {
2295 		ret = PTR_ERR(trans);
2296 		goto out_unlock;
2297 	}
2298 
2299 	key.objectid = backref->inum;
2300 	key.type = BTRFS_EXTENT_DATA_KEY;
2301 	key.offset = backref->file_pos;
2302 
2303 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2304 	if (ret < 0) {
2305 		goto out_free_path;
2306 	} else if (ret > 0) {
2307 		ret = 0;
2308 		goto out_free_path;
2309 	}
2310 
2311 	extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2312 				struct btrfs_file_extent_item);
2313 
2314 	if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2315 	    backref->generation)
2316 		goto out_free_path;
2317 
2318 	btrfs_release_path(path);
2319 
2320 	start = backref->file_pos;
2321 	if (backref->extent_offset < old->extent_offset + old->offset)
2322 		start += old->extent_offset + old->offset -
2323 			 backref->extent_offset;
2324 
2325 	len = min(backref->extent_offset + backref->num_bytes,
2326 		  old->extent_offset + old->offset + old->len);
2327 	len -= max(backref->extent_offset, old->extent_offset + old->offset);
2328 
2329 	ret = btrfs_drop_extents(trans, root, inode, start,
2330 				 start + len, 1);
2331 	if (ret)
2332 		goto out_free_path;
2333 again:
2334 	key.objectid = btrfs_ino(inode);
2335 	key.type = BTRFS_EXTENT_DATA_KEY;
2336 	key.offset = start;
2337 
2338 	path->leave_spinning = 1;
2339 	if (merge) {
2340 		struct btrfs_file_extent_item *fi;
2341 		u64 extent_len;
2342 		struct btrfs_key found_key;
2343 
2344 		ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
2345 		if (ret < 0)
2346 			goto out_free_path;
2347 
2348 		path->slots[0]--;
2349 		leaf = path->nodes[0];
2350 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2351 
2352 		fi = btrfs_item_ptr(leaf, path->slots[0],
2353 				    struct btrfs_file_extent_item);
2354 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2355 
2356 		if (relink_is_mergable(leaf, fi, new->bytenr) &&
2357 		    extent_len + found_key.offset == start) {
2358 			btrfs_set_file_extent_num_bytes(leaf, fi,
2359 							extent_len + len);
2360 			btrfs_mark_buffer_dirty(leaf);
2361 			inode_add_bytes(inode, len);
2362 
2363 			ret = 1;
2364 			goto out_free_path;
2365 		} else {
2366 			merge = false;
2367 			btrfs_release_path(path);
2368 			goto again;
2369 		}
2370 	}
2371 
2372 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2373 					sizeof(*extent));
2374 	if (ret) {
2375 		btrfs_abort_transaction(trans, root, ret);
2376 		goto out_free_path;
2377 	}
2378 
2379 	leaf = path->nodes[0];
2380 	item = btrfs_item_ptr(leaf, path->slots[0],
2381 				struct btrfs_file_extent_item);
2382 	btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2383 	btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2384 	btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2385 	btrfs_set_file_extent_num_bytes(leaf, item, len);
2386 	btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2387 	btrfs_set_file_extent_generation(leaf, item, trans->transid);
2388 	btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2389 	btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2390 	btrfs_set_file_extent_encryption(leaf, item, 0);
2391 	btrfs_set_file_extent_other_encoding(leaf, item, 0);
2392 
2393 	btrfs_mark_buffer_dirty(leaf);
2394 	inode_add_bytes(inode, len);
2395 	btrfs_release_path(path);
2396 
2397 	ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2398 			new->disk_len, 0,
2399 			backref->root_id, backref->inum,
2400 			new->file_pos, 0);	/* start - extent_offset */
2401 	if (ret) {
2402 		btrfs_abort_transaction(trans, root, ret);
2403 		goto out_free_path;
2404 	}
2405 
2406 	ret = 1;
2407 out_free_path:
2408 	btrfs_release_path(path);
2409 	path->leave_spinning = 0;
2410 	btrfs_end_transaction(trans, root);
2411 out_unlock:
2412 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2413 			     &cached, GFP_NOFS);
2414 	iput(inode);
2415 	return ret;
2416 }
2417 
2418 static void relink_file_extents(struct new_sa_defrag_extent *new)
2419 {
2420 	struct btrfs_path *path;
2421 	struct old_sa_defrag_extent *old, *tmp;
2422 	struct sa_defrag_extent_backref *backref;
2423 	struct sa_defrag_extent_backref *prev = NULL;
2424 	struct inode *inode;
2425 	struct btrfs_root *root;
2426 	struct rb_node *node;
2427 	int ret;
2428 
2429 	inode = new->inode;
2430 	root = BTRFS_I(inode)->root;
2431 
2432 	path = btrfs_alloc_path();
2433 	if (!path)
2434 		return;
2435 
2436 	if (!record_extent_backrefs(path, new)) {
2437 		btrfs_free_path(path);
2438 		goto out;
2439 	}
2440 	btrfs_release_path(path);
2441 
2442 	while (1) {
2443 		node = rb_first(&new->root);
2444 		if (!node)
2445 			break;
2446 		rb_erase(node, &new->root);
2447 
2448 		backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2449 
2450 		ret = relink_extent_backref(path, prev, backref);
2451 		WARN_ON(ret < 0);
2452 
2453 		kfree(prev);
2454 
2455 		if (ret == 1)
2456 			prev = backref;
2457 		else
2458 			prev = NULL;
2459 		cond_resched();
2460 	}
2461 	kfree(prev);
2462 
2463 	btrfs_free_path(path);
2464 
2465 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2466 		list_del(&old->list);
2467 		kfree(old);
2468 	}
2469 out:
2470 	atomic_dec(&root->fs_info->defrag_running);
2471 	wake_up(&root->fs_info->transaction_wait);
2472 
2473 	kfree(new);
2474 }
2475 
2476 static struct new_sa_defrag_extent *
2477 record_old_file_extents(struct inode *inode,
2478 			struct btrfs_ordered_extent *ordered)
2479 {
2480 	struct btrfs_root *root = BTRFS_I(inode)->root;
2481 	struct btrfs_path *path;
2482 	struct btrfs_key key;
2483 	struct old_sa_defrag_extent *old, *tmp;
2484 	struct new_sa_defrag_extent *new;
2485 	int ret;
2486 
2487 	new = kmalloc(sizeof(*new), GFP_NOFS);
2488 	if (!new)
2489 		return NULL;
2490 
2491 	new->inode = inode;
2492 	new->file_pos = ordered->file_offset;
2493 	new->len = ordered->len;
2494 	new->bytenr = ordered->start;
2495 	new->disk_len = ordered->disk_len;
2496 	new->compress_type = ordered->compress_type;
2497 	new->root = RB_ROOT;
2498 	INIT_LIST_HEAD(&new->head);
2499 
2500 	path = btrfs_alloc_path();
2501 	if (!path)
2502 		goto out_kfree;
2503 
2504 	key.objectid = btrfs_ino(inode);
2505 	key.type = BTRFS_EXTENT_DATA_KEY;
2506 	key.offset = new->file_pos;
2507 
2508 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2509 	if (ret < 0)
2510 		goto out_free_path;
2511 	if (ret > 0 && path->slots[0] > 0)
2512 		path->slots[0]--;
2513 
2514 	/* find out all the old extents for the file range */
2515 	while (1) {
2516 		struct btrfs_file_extent_item *extent;
2517 		struct extent_buffer *l;
2518 		int slot;
2519 		u64 num_bytes;
2520 		u64 offset;
2521 		u64 end;
2522 		u64 disk_bytenr;
2523 		u64 extent_offset;
2524 
2525 		l = path->nodes[0];
2526 		slot = path->slots[0];
2527 
2528 		if (slot >= btrfs_header_nritems(l)) {
2529 			ret = btrfs_next_leaf(root, path);
2530 			if (ret < 0)
2531 				goto out_free_list;
2532 			else if (ret > 0)
2533 				break;
2534 			continue;
2535 		}
2536 
2537 		btrfs_item_key_to_cpu(l, &key, slot);
2538 
2539 		if (key.objectid != btrfs_ino(inode))
2540 			break;
2541 		if (key.type != BTRFS_EXTENT_DATA_KEY)
2542 			break;
2543 		if (key.offset >= new->file_pos + new->len)
2544 			break;
2545 
2546 		extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2547 
2548 		num_bytes = btrfs_file_extent_num_bytes(l, extent);
2549 		if (key.offset + num_bytes < new->file_pos)
2550 			goto next;
2551 
2552 		disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2553 		if (!disk_bytenr)
2554 			goto next;
2555 
2556 		extent_offset = btrfs_file_extent_offset(l, extent);
2557 
2558 		old = kmalloc(sizeof(*old), GFP_NOFS);
2559 		if (!old)
2560 			goto out_free_list;
2561 
2562 		offset = max(new->file_pos, key.offset);
2563 		end = min(new->file_pos + new->len, key.offset + num_bytes);
2564 
2565 		old->bytenr = disk_bytenr;
2566 		old->extent_offset = extent_offset;
2567 		old->offset = offset - key.offset;
2568 		old->len = end - offset;
2569 		old->new = new;
2570 		old->count = 0;
2571 		list_add_tail(&old->list, &new->head);
2572 next:
2573 		path->slots[0]++;
2574 		cond_resched();
2575 	}
2576 
2577 	btrfs_free_path(path);
2578 	atomic_inc(&root->fs_info->defrag_running);
2579 
2580 	return new;
2581 
2582 out_free_list:
2583 	list_for_each_entry_safe(old, tmp, &new->head, list) {
2584 		list_del(&old->list);
2585 		kfree(old);
2586 	}
2587 out_free_path:
2588 	btrfs_free_path(path);
2589 out_kfree:
2590 	kfree(new);
2591 	return NULL;
2592 }
2593 
2594 /*
2595  * helper function for btrfs_finish_ordered_io, this
2596  * just reads in some of the csum leaves to prime them into ram
2597  * before we start the transaction.  It limits the amount of btree
2598  * reads required while inside the transaction.
2599  */
2600 /* as ordered data IO finishes, this gets called so we can finish
2601  * an ordered extent if the range of bytes in the file it covers are
2602  * fully written.
2603  */
2604 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2605 {
2606 	struct inode *inode = ordered_extent->inode;
2607 	struct btrfs_root *root = BTRFS_I(inode)->root;
2608 	struct btrfs_trans_handle *trans = NULL;
2609 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2610 	struct extent_state *cached_state = NULL;
2611 	struct new_sa_defrag_extent *new = NULL;
2612 	int compress_type = 0;
2613 	int ret;
2614 	bool nolock;
2615 
2616 	nolock = btrfs_is_free_space_inode(inode);
2617 
2618 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2619 		ret = -EIO;
2620 		goto out;
2621 	}
2622 
2623 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2624 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2625 		btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2626 		if (nolock)
2627 			trans = btrfs_join_transaction_nolock(root);
2628 		else
2629 			trans = btrfs_join_transaction(root);
2630 		if (IS_ERR(trans)) {
2631 			ret = PTR_ERR(trans);
2632 			trans = NULL;
2633 			goto out;
2634 		}
2635 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2636 		ret = btrfs_update_inode_fallback(trans, root, inode);
2637 		if (ret) /* -ENOMEM or corruption */
2638 			btrfs_abort_transaction(trans, root, ret);
2639 		goto out;
2640 	}
2641 
2642 	lock_extent_bits(io_tree, ordered_extent->file_offset,
2643 			 ordered_extent->file_offset + ordered_extent->len - 1,
2644 			 0, &cached_state);
2645 
2646 	ret = test_range_bit(io_tree, ordered_extent->file_offset,
2647 			ordered_extent->file_offset + ordered_extent->len - 1,
2648 			EXTENT_DEFRAG, 1, cached_state);
2649 	if (ret) {
2650 		u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2651 		if (last_snapshot >= BTRFS_I(inode)->generation)
2652 			/* the inode is shared */
2653 			new = record_old_file_extents(inode, ordered_extent);
2654 
2655 		clear_extent_bit(io_tree, ordered_extent->file_offset,
2656 			ordered_extent->file_offset + ordered_extent->len - 1,
2657 			EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2658 	}
2659 
2660 	if (nolock)
2661 		trans = btrfs_join_transaction_nolock(root);
2662 	else
2663 		trans = btrfs_join_transaction(root);
2664 	if (IS_ERR(trans)) {
2665 		ret = PTR_ERR(trans);
2666 		trans = NULL;
2667 		goto out_unlock;
2668 	}
2669 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2670 
2671 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2672 		compress_type = ordered_extent->compress_type;
2673 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2674 		BUG_ON(compress_type);
2675 		ret = btrfs_mark_extent_written(trans, inode,
2676 						ordered_extent->file_offset,
2677 						ordered_extent->file_offset +
2678 						ordered_extent->len);
2679 	} else {
2680 		BUG_ON(root == root->fs_info->tree_root);
2681 		ret = insert_reserved_file_extent(trans, inode,
2682 						ordered_extent->file_offset,
2683 						ordered_extent->start,
2684 						ordered_extent->disk_len,
2685 						ordered_extent->len,
2686 						ordered_extent->len,
2687 						compress_type, 0, 0,
2688 						BTRFS_FILE_EXTENT_REG);
2689 	}
2690 	unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2691 			   ordered_extent->file_offset, ordered_extent->len,
2692 			   trans->transid);
2693 	if (ret < 0) {
2694 		btrfs_abort_transaction(trans, root, ret);
2695 		goto out_unlock;
2696 	}
2697 
2698 	add_pending_csums(trans, inode, ordered_extent->file_offset,
2699 			  &ordered_extent->list);
2700 
2701 	btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2702 	ret = btrfs_update_inode_fallback(trans, root, inode);
2703 	if (ret) { /* -ENOMEM or corruption */
2704 		btrfs_abort_transaction(trans, root, ret);
2705 		goto out_unlock;
2706 	}
2707 	ret = 0;
2708 out_unlock:
2709 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
2710 			     ordered_extent->file_offset +
2711 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
2712 out:
2713 	if (root != root->fs_info->tree_root)
2714 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2715 	if (trans)
2716 		btrfs_end_transaction(trans, root);
2717 
2718 	if (ret) {
2719 		clear_extent_uptodate(io_tree, ordered_extent->file_offset,
2720 				      ordered_extent->file_offset +
2721 				      ordered_extent->len - 1, NULL, GFP_NOFS);
2722 
2723 		/*
2724 		 * If the ordered extent had an IOERR or something else went
2725 		 * wrong we need to return the space for this ordered extent
2726 		 * back to the allocator.
2727 		 */
2728 		if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2729 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2730 			btrfs_free_reserved_extent(root, ordered_extent->start,
2731 						   ordered_extent->disk_len);
2732 	}
2733 
2734 
2735 	/*
2736 	 * This needs to be done to make sure anybody waiting knows we are done
2737 	 * updating everything for this ordered extent.
2738 	 */
2739 	btrfs_remove_ordered_extent(inode, ordered_extent);
2740 
2741 	/* for snapshot-aware defrag */
2742 	if (new)
2743 		relink_file_extents(new);
2744 
2745 	/* once for us */
2746 	btrfs_put_ordered_extent(ordered_extent);
2747 	/* once for the tree */
2748 	btrfs_put_ordered_extent(ordered_extent);
2749 
2750 	return ret;
2751 }
2752 
2753 static void finish_ordered_fn(struct btrfs_work *work)
2754 {
2755 	struct btrfs_ordered_extent *ordered_extent;
2756 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2757 	btrfs_finish_ordered_io(ordered_extent);
2758 }
2759 
2760 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2761 				struct extent_state *state, int uptodate)
2762 {
2763 	struct inode *inode = page->mapping->host;
2764 	struct btrfs_root *root = BTRFS_I(inode)->root;
2765 	struct btrfs_ordered_extent *ordered_extent = NULL;
2766 	struct btrfs_workers *workers;
2767 
2768 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2769 
2770 	ClearPagePrivate2(page);
2771 	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2772 					    end - start + 1, uptodate))
2773 		return 0;
2774 
2775 	ordered_extent->work.func = finish_ordered_fn;
2776 	ordered_extent->work.flags = 0;
2777 
2778 	if (btrfs_is_free_space_inode(inode))
2779 		workers = &root->fs_info->endio_freespace_worker;
2780 	else
2781 		workers = &root->fs_info->endio_write_workers;
2782 	btrfs_queue_worker(workers, &ordered_extent->work);
2783 
2784 	return 0;
2785 }
2786 
2787 /*
2788  * when reads are done, we need to check csums to verify the data is correct
2789  * if there's a match, we allow the bio to finish.  If not, the code in
2790  * extent_io.c will try to find good copies for us.
2791  */
2792 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
2793 			       struct extent_state *state, int mirror)
2794 {
2795 	size_t offset = start - page_offset(page);
2796 	struct inode *inode = page->mapping->host;
2797 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2798 	char *kaddr;
2799 	u64 private = ~(u32)0;
2800 	int ret;
2801 	struct btrfs_root *root = BTRFS_I(inode)->root;
2802 	u32 csum = ~(u32)0;
2803 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
2804 	                              DEFAULT_RATELIMIT_BURST);
2805 
2806 	if (PageChecked(page)) {
2807 		ClearPageChecked(page);
2808 		goto good;
2809 	}
2810 
2811 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
2812 		goto good;
2813 
2814 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
2815 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2816 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2817 				  GFP_NOFS);
2818 		return 0;
2819 	}
2820 
2821 	if (state && state->start == start) {
2822 		private = state->private;
2823 		ret = 0;
2824 	} else {
2825 		ret = get_state_private(io_tree, start, &private);
2826 	}
2827 	kaddr = kmap_atomic(page);
2828 	if (ret)
2829 		goto zeroit;
2830 
2831 	csum = btrfs_csum_data(kaddr + offset, csum,  end - start + 1);
2832 	btrfs_csum_final(csum, (char *)&csum);
2833 	if (csum != private)
2834 		goto zeroit;
2835 
2836 	kunmap_atomic(kaddr);
2837 good:
2838 	return 0;
2839 
2840 zeroit:
2841 	if (__ratelimit(&_rs))
2842 		btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u private %llu",
2843 			(unsigned long long)btrfs_ino(page->mapping->host),
2844 			(unsigned long long)start, csum,
2845 			(unsigned long long)private);
2846 	memset(kaddr + offset, 1, end - start + 1);
2847 	flush_dcache_page(page);
2848 	kunmap_atomic(kaddr);
2849 	if (private == 0)
2850 		return 0;
2851 	return -EIO;
2852 }
2853 
2854 struct delayed_iput {
2855 	struct list_head list;
2856 	struct inode *inode;
2857 };
2858 
2859 /* JDM: If this is fs-wide, why can't we add a pointer to
2860  * btrfs_inode instead and avoid the allocation? */
2861 void btrfs_add_delayed_iput(struct inode *inode)
2862 {
2863 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2864 	struct delayed_iput *delayed;
2865 
2866 	if (atomic_add_unless(&inode->i_count, -1, 1))
2867 		return;
2868 
2869 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2870 	delayed->inode = inode;
2871 
2872 	spin_lock(&fs_info->delayed_iput_lock);
2873 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2874 	spin_unlock(&fs_info->delayed_iput_lock);
2875 }
2876 
2877 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2878 {
2879 	LIST_HEAD(list);
2880 	struct btrfs_fs_info *fs_info = root->fs_info;
2881 	struct delayed_iput *delayed;
2882 	int empty;
2883 
2884 	spin_lock(&fs_info->delayed_iput_lock);
2885 	empty = list_empty(&fs_info->delayed_iputs);
2886 	spin_unlock(&fs_info->delayed_iput_lock);
2887 	if (empty)
2888 		return;
2889 
2890 	spin_lock(&fs_info->delayed_iput_lock);
2891 	list_splice_init(&fs_info->delayed_iputs, &list);
2892 	spin_unlock(&fs_info->delayed_iput_lock);
2893 
2894 	while (!list_empty(&list)) {
2895 		delayed = list_entry(list.next, struct delayed_iput, list);
2896 		list_del(&delayed->list);
2897 		iput(delayed->inode);
2898 		kfree(delayed);
2899 	}
2900 }
2901 
2902 /*
2903  * This is called in transaction commit time. If there are no orphan
2904  * files in the subvolume, it removes orphan item and frees block_rsv
2905  * structure.
2906  */
2907 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2908 			      struct btrfs_root *root)
2909 {
2910 	struct btrfs_block_rsv *block_rsv;
2911 	int ret;
2912 
2913 	if (atomic_read(&root->orphan_inodes) ||
2914 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2915 		return;
2916 
2917 	spin_lock(&root->orphan_lock);
2918 	if (atomic_read(&root->orphan_inodes)) {
2919 		spin_unlock(&root->orphan_lock);
2920 		return;
2921 	}
2922 
2923 	if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2924 		spin_unlock(&root->orphan_lock);
2925 		return;
2926 	}
2927 
2928 	block_rsv = root->orphan_block_rsv;
2929 	root->orphan_block_rsv = NULL;
2930 	spin_unlock(&root->orphan_lock);
2931 
2932 	if (root->orphan_item_inserted &&
2933 	    btrfs_root_refs(&root->root_item) > 0) {
2934 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2935 					    root->root_key.objectid);
2936 		BUG_ON(ret);
2937 		root->orphan_item_inserted = 0;
2938 	}
2939 
2940 	if (block_rsv) {
2941 		WARN_ON(block_rsv->size > 0);
2942 		btrfs_free_block_rsv(root, block_rsv);
2943 	}
2944 }
2945 
2946 /*
2947  * This creates an orphan entry for the given inode in case something goes
2948  * wrong in the middle of an unlink/truncate.
2949  *
2950  * NOTE: caller of this function should reserve 5 units of metadata for
2951  *	 this function.
2952  */
2953 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2954 {
2955 	struct btrfs_root *root = BTRFS_I(inode)->root;
2956 	struct btrfs_block_rsv *block_rsv = NULL;
2957 	int reserve = 0;
2958 	int insert = 0;
2959 	int ret;
2960 
2961 	if (!root->orphan_block_rsv) {
2962 		block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2963 		if (!block_rsv)
2964 			return -ENOMEM;
2965 	}
2966 
2967 	spin_lock(&root->orphan_lock);
2968 	if (!root->orphan_block_rsv) {
2969 		root->orphan_block_rsv = block_rsv;
2970 	} else if (block_rsv) {
2971 		btrfs_free_block_rsv(root, block_rsv);
2972 		block_rsv = NULL;
2973 	}
2974 
2975 	if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2976 			      &BTRFS_I(inode)->runtime_flags)) {
2977 #if 0
2978 		/*
2979 		 * For proper ENOSPC handling, we should do orphan
2980 		 * cleanup when mounting. But this introduces backward
2981 		 * compatibility issue.
2982 		 */
2983 		if (!xchg(&root->orphan_item_inserted, 1))
2984 			insert = 2;
2985 		else
2986 			insert = 1;
2987 #endif
2988 		insert = 1;
2989 		atomic_inc(&root->orphan_inodes);
2990 	}
2991 
2992 	if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2993 			      &BTRFS_I(inode)->runtime_flags))
2994 		reserve = 1;
2995 	spin_unlock(&root->orphan_lock);
2996 
2997 	/* grab metadata reservation from transaction handle */
2998 	if (reserve) {
2999 		ret = btrfs_orphan_reserve_metadata(trans, inode);
3000 		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
3001 	}
3002 
3003 	/* insert an orphan item to track this unlinked/truncated file */
3004 	if (insert >= 1) {
3005 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3006 		if (ret && ret != -EEXIST) {
3007 			clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3008 				  &BTRFS_I(inode)->runtime_flags);
3009 			btrfs_abort_transaction(trans, root, ret);
3010 			return ret;
3011 		}
3012 		ret = 0;
3013 	}
3014 
3015 	/* insert an orphan item to track subvolume contains orphan files */
3016 	if (insert >= 2) {
3017 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3018 					       root->root_key.objectid);
3019 		if (ret && ret != -EEXIST) {
3020 			btrfs_abort_transaction(trans, root, ret);
3021 			return ret;
3022 		}
3023 	}
3024 	return 0;
3025 }
3026 
3027 /*
3028  * We have done the truncate/delete so we can go ahead and remove the orphan
3029  * item for this particular inode.
3030  */
3031 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3032 			    struct inode *inode)
3033 {
3034 	struct btrfs_root *root = BTRFS_I(inode)->root;
3035 	int delete_item = 0;
3036 	int release_rsv = 0;
3037 	int ret = 0;
3038 
3039 	spin_lock(&root->orphan_lock);
3040 	if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3041 			       &BTRFS_I(inode)->runtime_flags))
3042 		delete_item = 1;
3043 
3044 	if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3045 			       &BTRFS_I(inode)->runtime_flags))
3046 		release_rsv = 1;
3047 	spin_unlock(&root->orphan_lock);
3048 
3049 	if (trans && delete_item) {
3050 		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
3051 		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
3052 	}
3053 
3054 	if (release_rsv) {
3055 		btrfs_orphan_release_metadata(inode);
3056 		atomic_dec(&root->orphan_inodes);
3057 	}
3058 
3059 	return 0;
3060 }
3061 
3062 /*
3063  * this cleans up any orphans that may be left on the list from the last use
3064  * of this root.
3065  */
3066 int btrfs_orphan_cleanup(struct btrfs_root *root)
3067 {
3068 	struct btrfs_path *path;
3069 	struct extent_buffer *leaf;
3070 	struct btrfs_key key, found_key;
3071 	struct btrfs_trans_handle *trans;
3072 	struct inode *inode;
3073 	u64 last_objectid = 0;
3074 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
3075 
3076 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3077 		return 0;
3078 
3079 	path = btrfs_alloc_path();
3080 	if (!path) {
3081 		ret = -ENOMEM;
3082 		goto out;
3083 	}
3084 	path->reada = -1;
3085 
3086 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3087 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
3088 	key.offset = (u64)-1;
3089 
3090 	while (1) {
3091 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3092 		if (ret < 0)
3093 			goto out;
3094 
3095 		/*
3096 		 * if ret == 0 means we found what we were searching for, which
3097 		 * is weird, but possible, so only screw with path if we didn't
3098 		 * find the key and see if we have stuff that matches
3099 		 */
3100 		if (ret > 0) {
3101 			ret = 0;
3102 			if (path->slots[0] == 0)
3103 				break;
3104 			path->slots[0]--;
3105 		}
3106 
3107 		/* pull out the item */
3108 		leaf = path->nodes[0];
3109 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3110 
3111 		/* make sure the item matches what we want */
3112 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3113 			break;
3114 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
3115 			break;
3116 
3117 		/* release the path since we're done with it */
3118 		btrfs_release_path(path);
3119 
3120 		/*
3121 		 * this is where we are basically btrfs_lookup, without the
3122 		 * crossing root thing.  we store the inode number in the
3123 		 * offset of the orphan item.
3124 		 */
3125 
3126 		if (found_key.offset == last_objectid) {
3127 			btrfs_err(root->fs_info,
3128 				"Error removing orphan entry, stopping orphan cleanup");
3129 			ret = -EINVAL;
3130 			goto out;
3131 		}
3132 
3133 		last_objectid = found_key.offset;
3134 
3135 		found_key.objectid = found_key.offset;
3136 		found_key.type = BTRFS_INODE_ITEM_KEY;
3137 		found_key.offset = 0;
3138 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3139 		ret = PTR_RET(inode);
3140 		if (ret && ret != -ESTALE)
3141 			goto out;
3142 
3143 		if (ret == -ESTALE && root == root->fs_info->tree_root) {
3144 			struct btrfs_root *dead_root;
3145 			struct btrfs_fs_info *fs_info = root->fs_info;
3146 			int is_dead_root = 0;
3147 
3148 			/*
3149 			 * this is an orphan in the tree root. Currently these
3150 			 * could come from 2 sources:
3151 			 *  a) a snapshot deletion in progress
3152 			 *  b) a free space cache inode
3153 			 * We need to distinguish those two, as the snapshot
3154 			 * orphan must not get deleted.
3155 			 * find_dead_roots already ran before us, so if this
3156 			 * is a snapshot deletion, we should find the root
3157 			 * in the dead_roots list
3158 			 */
3159 			spin_lock(&fs_info->trans_lock);
3160 			list_for_each_entry(dead_root, &fs_info->dead_roots,
3161 					    root_list) {
3162 				if (dead_root->root_key.objectid ==
3163 				    found_key.objectid) {
3164 					is_dead_root = 1;
3165 					break;
3166 				}
3167 			}
3168 			spin_unlock(&fs_info->trans_lock);
3169 			if (is_dead_root) {
3170 				/* prevent this orphan from being found again */
3171 				key.offset = found_key.objectid - 1;
3172 				continue;
3173 			}
3174 		}
3175 		/*
3176 		 * Inode is already gone but the orphan item is still there,
3177 		 * kill the orphan item.
3178 		 */
3179 		if (ret == -ESTALE) {
3180 			trans = btrfs_start_transaction(root, 1);
3181 			if (IS_ERR(trans)) {
3182 				ret = PTR_ERR(trans);
3183 				goto out;
3184 			}
3185 			btrfs_debug(root->fs_info, "auto deleting %Lu",
3186 				found_key.objectid);
3187 			ret = btrfs_del_orphan_item(trans, root,
3188 						    found_key.objectid);
3189 			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
3190 			btrfs_end_transaction(trans, root);
3191 			continue;
3192 		}
3193 
3194 		/*
3195 		 * add this inode to the orphan list so btrfs_orphan_del does
3196 		 * the proper thing when we hit it
3197 		 */
3198 		set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3199 			&BTRFS_I(inode)->runtime_flags);
3200 		atomic_inc(&root->orphan_inodes);
3201 
3202 		/* if we have links, this was a truncate, lets do that */
3203 		if (inode->i_nlink) {
3204 			if (!S_ISREG(inode->i_mode)) {
3205 				WARN_ON(1);
3206 				iput(inode);
3207 				continue;
3208 			}
3209 			nr_truncate++;
3210 
3211 			/* 1 for the orphan item deletion. */
3212 			trans = btrfs_start_transaction(root, 1);
3213 			if (IS_ERR(trans)) {
3214 				ret = PTR_ERR(trans);
3215 				goto out;
3216 			}
3217 			ret = btrfs_orphan_add(trans, inode);
3218 			btrfs_end_transaction(trans, root);
3219 			if (ret)
3220 				goto out;
3221 
3222 			ret = btrfs_truncate(inode);
3223 			if (ret)
3224 				btrfs_orphan_del(NULL, inode);
3225 		} else {
3226 			nr_unlink++;
3227 		}
3228 
3229 		/* this will do delete_inode and everything for us */
3230 		iput(inode);
3231 		if (ret)
3232 			goto out;
3233 	}
3234 	/* release the path since we're done with it */
3235 	btrfs_release_path(path);
3236 
3237 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3238 
3239 	if (root->orphan_block_rsv)
3240 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
3241 					(u64)-1);
3242 
3243 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
3244 		trans = btrfs_join_transaction(root);
3245 		if (!IS_ERR(trans))
3246 			btrfs_end_transaction(trans, root);
3247 	}
3248 
3249 	if (nr_unlink)
3250 		btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3251 	if (nr_truncate)
3252 		btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3253 
3254 out:
3255 	if (ret)
3256 		btrfs_crit(root->fs_info,
3257 			"could not do orphan cleanup %d", ret);
3258 	btrfs_free_path(path);
3259 	return ret;
3260 }
3261 
3262 /*
3263  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3264  * don't find any xattrs, we know there can't be any acls.
3265  *
3266  * slot is the slot the inode is in, objectid is the objectid of the inode
3267  */
3268 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3269 					  int slot, u64 objectid)
3270 {
3271 	u32 nritems = btrfs_header_nritems(leaf);
3272 	struct btrfs_key found_key;
3273 	int scanned = 0;
3274 
3275 	slot++;
3276 	while (slot < nritems) {
3277 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3278 
3279 		/* we found a different objectid, there must not be acls */
3280 		if (found_key.objectid != objectid)
3281 			return 0;
3282 
3283 		/* we found an xattr, assume we've got an acl */
3284 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
3285 			return 1;
3286 
3287 		/*
3288 		 * we found a key greater than an xattr key, there can't
3289 		 * be any acls later on
3290 		 */
3291 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3292 			return 0;
3293 
3294 		slot++;
3295 		scanned++;
3296 
3297 		/*
3298 		 * it goes inode, inode backrefs, xattrs, extents,
3299 		 * so if there are a ton of hard links to an inode there can
3300 		 * be a lot of backrefs.  Don't waste time searching too hard,
3301 		 * this is just an optimization
3302 		 */
3303 		if (scanned >= 8)
3304 			break;
3305 	}
3306 	/* we hit the end of the leaf before we found an xattr or
3307 	 * something larger than an xattr.  We have to assume the inode
3308 	 * has acls
3309 	 */
3310 	return 1;
3311 }
3312 
3313 /*
3314  * read an inode from the btree into the in-memory inode
3315  */
3316 static void btrfs_read_locked_inode(struct inode *inode)
3317 {
3318 	struct btrfs_path *path;
3319 	struct extent_buffer *leaf;
3320 	struct btrfs_inode_item *inode_item;
3321 	struct btrfs_timespec *tspec;
3322 	struct btrfs_root *root = BTRFS_I(inode)->root;
3323 	struct btrfs_key location;
3324 	int maybe_acls;
3325 	u32 rdev;
3326 	int ret;
3327 	bool filled = false;
3328 
3329 	ret = btrfs_fill_inode(inode, &rdev);
3330 	if (!ret)
3331 		filled = true;
3332 
3333 	path = btrfs_alloc_path();
3334 	if (!path)
3335 		goto make_bad;
3336 
3337 	path->leave_spinning = 1;
3338 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3339 
3340 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3341 	if (ret)
3342 		goto make_bad;
3343 
3344 	leaf = path->nodes[0];
3345 
3346 	if (filled)
3347 		goto cache_acl;
3348 
3349 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3350 				    struct btrfs_inode_item);
3351 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3352 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3353 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3354 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3355 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3356 
3357 	tspec = btrfs_inode_atime(inode_item);
3358 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3359 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3360 
3361 	tspec = btrfs_inode_mtime(inode_item);
3362 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3363 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3364 
3365 	tspec = btrfs_inode_ctime(inode_item);
3366 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
3367 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
3368 
3369 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3370 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3371 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3372 
3373 	/*
3374 	 * If we were modified in the current generation and evicted from memory
3375 	 * and then re-read we need to do a full sync since we don't have any
3376 	 * idea about which extents were modified before we were evicted from
3377 	 * cache.
3378 	 */
3379 	if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3380 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3381 			&BTRFS_I(inode)->runtime_flags);
3382 
3383 	inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3384 	inode->i_generation = BTRFS_I(inode)->generation;
3385 	inode->i_rdev = 0;
3386 	rdev = btrfs_inode_rdev(leaf, inode_item);
3387 
3388 	BTRFS_I(inode)->index_cnt = (u64)-1;
3389 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3390 cache_acl:
3391 	/*
3392 	 * try to precache a NULL acl entry for files that don't have
3393 	 * any xattrs or acls
3394 	 */
3395 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3396 					   btrfs_ino(inode));
3397 	if (!maybe_acls)
3398 		cache_no_acl(inode);
3399 
3400 	btrfs_free_path(path);
3401 
3402 	switch (inode->i_mode & S_IFMT) {
3403 	case S_IFREG:
3404 		inode->i_mapping->a_ops = &btrfs_aops;
3405 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3406 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3407 		inode->i_fop = &btrfs_file_operations;
3408 		inode->i_op = &btrfs_file_inode_operations;
3409 		break;
3410 	case S_IFDIR:
3411 		inode->i_fop = &btrfs_dir_file_operations;
3412 		if (root == root->fs_info->tree_root)
3413 			inode->i_op = &btrfs_dir_ro_inode_operations;
3414 		else
3415 			inode->i_op = &btrfs_dir_inode_operations;
3416 		break;
3417 	case S_IFLNK:
3418 		inode->i_op = &btrfs_symlink_inode_operations;
3419 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
3420 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3421 		break;
3422 	default:
3423 		inode->i_op = &btrfs_special_inode_operations;
3424 		init_special_inode(inode, inode->i_mode, rdev);
3425 		break;
3426 	}
3427 
3428 	btrfs_update_iflags(inode);
3429 	return;
3430 
3431 make_bad:
3432 	btrfs_free_path(path);
3433 	make_bad_inode(inode);
3434 }
3435 
3436 /*
3437  * given a leaf and an inode, copy the inode fields into the leaf
3438  */
3439 static void fill_inode_item(struct btrfs_trans_handle *trans,
3440 			    struct extent_buffer *leaf,
3441 			    struct btrfs_inode_item *item,
3442 			    struct inode *inode)
3443 {
3444 	struct btrfs_map_token token;
3445 
3446 	btrfs_init_map_token(&token);
3447 
3448 	btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3449 	btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3450 	btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3451 				   &token);
3452 	btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3453 	btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3454 
3455 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3456 				     inode->i_atime.tv_sec, &token);
3457 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3458 				      inode->i_atime.tv_nsec, &token);
3459 
3460 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3461 				     inode->i_mtime.tv_sec, &token);
3462 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3463 				      inode->i_mtime.tv_nsec, &token);
3464 
3465 	btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3466 				     inode->i_ctime.tv_sec, &token);
3467 	btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3468 				      inode->i_ctime.tv_nsec, &token);
3469 
3470 	btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3471 				     &token);
3472 	btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3473 					 &token);
3474 	btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3475 	btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3476 	btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3477 	btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3478 	btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3479 }
3480 
3481 /*
3482  * copy everything in the in-memory inode into the btree.
3483  */
3484 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3485 				struct btrfs_root *root, struct inode *inode)
3486 {
3487 	struct btrfs_inode_item *inode_item;
3488 	struct btrfs_path *path;
3489 	struct extent_buffer *leaf;
3490 	int ret;
3491 
3492 	path = btrfs_alloc_path();
3493 	if (!path)
3494 		return -ENOMEM;
3495 
3496 	path->leave_spinning = 1;
3497 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3498 				 1);
3499 	if (ret) {
3500 		if (ret > 0)
3501 			ret = -ENOENT;
3502 		goto failed;
3503 	}
3504 
3505 	btrfs_unlock_up_safe(path, 1);
3506 	leaf = path->nodes[0];
3507 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3508 				    struct btrfs_inode_item);
3509 
3510 	fill_inode_item(trans, leaf, inode_item, inode);
3511 	btrfs_mark_buffer_dirty(leaf);
3512 	btrfs_set_inode_last_trans(trans, inode);
3513 	ret = 0;
3514 failed:
3515 	btrfs_free_path(path);
3516 	return ret;
3517 }
3518 
3519 /*
3520  * copy everything in the in-memory inode into the btree.
3521  */
3522 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3523 				struct btrfs_root *root, struct inode *inode)
3524 {
3525 	int ret;
3526 
3527 	/*
3528 	 * If the inode is a free space inode, we can deadlock during commit
3529 	 * if we put it into the delayed code.
3530 	 *
3531 	 * The data relocation inode should also be directly updated
3532 	 * without delay
3533 	 */
3534 	if (!btrfs_is_free_space_inode(inode)
3535 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
3536 		btrfs_update_root_times(trans, root);
3537 
3538 		ret = btrfs_delayed_update_inode(trans, root, inode);
3539 		if (!ret)
3540 			btrfs_set_inode_last_trans(trans, inode);
3541 		return ret;
3542 	}
3543 
3544 	return btrfs_update_inode_item(trans, root, inode);
3545 }
3546 
3547 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3548 					 struct btrfs_root *root,
3549 					 struct inode *inode)
3550 {
3551 	int ret;
3552 
3553 	ret = btrfs_update_inode(trans, root, inode);
3554 	if (ret == -ENOSPC)
3555 		return btrfs_update_inode_item(trans, root, inode);
3556 	return ret;
3557 }
3558 
3559 /*
3560  * unlink helper that gets used here in inode.c and in the tree logging
3561  * recovery code.  It remove a link in a directory with a given name, and
3562  * also drops the back refs in the inode to the directory
3563  */
3564 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3565 				struct btrfs_root *root,
3566 				struct inode *dir, struct inode *inode,
3567 				const char *name, int name_len)
3568 {
3569 	struct btrfs_path *path;
3570 	int ret = 0;
3571 	struct extent_buffer *leaf;
3572 	struct btrfs_dir_item *di;
3573 	struct btrfs_key key;
3574 	u64 index;
3575 	u64 ino = btrfs_ino(inode);
3576 	u64 dir_ino = btrfs_ino(dir);
3577 
3578 	path = btrfs_alloc_path();
3579 	if (!path) {
3580 		ret = -ENOMEM;
3581 		goto out;
3582 	}
3583 
3584 	path->leave_spinning = 1;
3585 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3586 				    name, name_len, -1);
3587 	if (IS_ERR(di)) {
3588 		ret = PTR_ERR(di);
3589 		goto err;
3590 	}
3591 	if (!di) {
3592 		ret = -ENOENT;
3593 		goto err;
3594 	}
3595 	leaf = path->nodes[0];
3596 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3597 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3598 	if (ret)
3599 		goto err;
3600 	btrfs_release_path(path);
3601 
3602 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3603 				  dir_ino, &index);
3604 	if (ret) {
3605 		btrfs_info(root->fs_info,
3606 			"failed to delete reference to %.*s, inode %llu parent %llu",
3607 			name_len, name,
3608 			(unsigned long long)ino, (unsigned long long)dir_ino);
3609 		btrfs_abort_transaction(trans, root, ret);
3610 		goto err;
3611 	}
3612 
3613 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3614 	if (ret) {
3615 		btrfs_abort_transaction(trans, root, ret);
3616 		goto err;
3617 	}
3618 
3619 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
3620 					 inode, dir_ino);
3621 	if (ret != 0 && ret != -ENOENT) {
3622 		btrfs_abort_transaction(trans, root, ret);
3623 		goto err;
3624 	}
3625 
3626 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
3627 					   dir, index);
3628 	if (ret == -ENOENT)
3629 		ret = 0;
3630 	else if (ret)
3631 		btrfs_abort_transaction(trans, root, ret);
3632 err:
3633 	btrfs_free_path(path);
3634 	if (ret)
3635 		goto out;
3636 
3637 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3638 	inode_inc_iversion(inode);
3639 	inode_inc_iversion(dir);
3640 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3641 	ret = btrfs_update_inode(trans, root, dir);
3642 out:
3643 	return ret;
3644 }
3645 
3646 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3647 		       struct btrfs_root *root,
3648 		       struct inode *dir, struct inode *inode,
3649 		       const char *name, int name_len)
3650 {
3651 	int ret;
3652 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
3653 	if (!ret) {
3654 		btrfs_drop_nlink(inode);
3655 		ret = btrfs_update_inode(trans, root, inode);
3656 	}
3657 	return ret;
3658 }
3659 
3660 
3661 /* helper to check if there is any shared block in the path */
3662 static int check_path_shared(struct btrfs_root *root,
3663 			     struct btrfs_path *path)
3664 {
3665 	struct extent_buffer *eb;
3666 	int level;
3667 	u64 refs = 1;
3668 
3669 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
3670 		int ret;
3671 
3672 		if (!path->nodes[level])
3673 			break;
3674 		eb = path->nodes[level];
3675 		if (!btrfs_block_can_be_shared(root, eb))
3676 			continue;
3677 		ret = btrfs_lookup_extent_info(NULL, root, eb->start, level, 1,
3678 					       &refs, NULL);
3679 		if (refs > 1)
3680 			return 1;
3681 	}
3682 	return 0;
3683 }
3684 
3685 /*
3686  * helper to start transaction for unlink and rmdir.
3687  *
3688  * unlink and rmdir are special in btrfs, they do not always free space.
3689  * so in enospc case, we should make sure they will free space before
3690  * allowing them to use the global metadata reservation.
3691  */
3692 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
3693 						       struct dentry *dentry)
3694 {
3695 	struct btrfs_trans_handle *trans;
3696 	struct btrfs_root *root = BTRFS_I(dir)->root;
3697 	struct btrfs_path *path;
3698 	struct btrfs_dir_item *di;
3699 	struct inode *inode = dentry->d_inode;
3700 	u64 index;
3701 	int check_link = 1;
3702 	int err = -ENOSPC;
3703 	int ret;
3704 	u64 ino = btrfs_ino(inode);
3705 	u64 dir_ino = btrfs_ino(dir);
3706 
3707 	/*
3708 	 * 1 for the possible orphan item
3709 	 * 1 for the dir item
3710 	 * 1 for the dir index
3711 	 * 1 for the inode ref
3712 	 * 1 for the inode
3713 	 */
3714 	trans = btrfs_start_transaction(root, 5);
3715 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
3716 		return trans;
3717 
3718 	if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
3719 		return ERR_PTR(-ENOSPC);
3720 
3721 	/* check if there is someone else holds reference */
3722 	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
3723 		return ERR_PTR(-ENOSPC);
3724 
3725 	if (atomic_read(&inode->i_count) > 2)
3726 		return ERR_PTR(-ENOSPC);
3727 
3728 	if (xchg(&root->fs_info->enospc_unlink, 1))
3729 		return ERR_PTR(-ENOSPC);
3730 
3731 	path = btrfs_alloc_path();
3732 	if (!path) {
3733 		root->fs_info->enospc_unlink = 0;
3734 		return ERR_PTR(-ENOMEM);
3735 	}
3736 
3737 	/* 1 for the orphan item */
3738 	trans = btrfs_start_transaction(root, 1);
3739 	if (IS_ERR(trans)) {
3740 		btrfs_free_path(path);
3741 		root->fs_info->enospc_unlink = 0;
3742 		return trans;
3743 	}
3744 
3745 	path->skip_locking = 1;
3746 	path->search_commit_root = 1;
3747 
3748 	ret = btrfs_lookup_inode(trans, root, path,
3749 				&BTRFS_I(dir)->location, 0);
3750 	if (ret < 0) {
3751 		err = ret;
3752 		goto out;
3753 	}
3754 	if (ret == 0) {
3755 		if (check_path_shared(root, path))
3756 			goto out;
3757 	} else {
3758 		check_link = 0;
3759 	}
3760 	btrfs_release_path(path);
3761 
3762 	ret = btrfs_lookup_inode(trans, root, path,
3763 				&BTRFS_I(inode)->location, 0);
3764 	if (ret < 0) {
3765 		err = ret;
3766 		goto out;
3767 	}
3768 	if (ret == 0) {
3769 		if (check_path_shared(root, path))
3770 			goto out;
3771 	} else {
3772 		check_link = 0;
3773 	}
3774 	btrfs_release_path(path);
3775 
3776 	if (ret == 0 && S_ISREG(inode->i_mode)) {
3777 		ret = btrfs_lookup_file_extent(trans, root, path,
3778 					       ino, (u64)-1, 0);
3779 		if (ret < 0) {
3780 			err = ret;
3781 			goto out;
3782 		}
3783 		BUG_ON(ret == 0); /* Corruption */
3784 		if (check_path_shared(root, path))
3785 			goto out;
3786 		btrfs_release_path(path);
3787 	}
3788 
3789 	if (!check_link) {
3790 		err = 0;
3791 		goto out;
3792 	}
3793 
3794 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3795 				dentry->d_name.name, dentry->d_name.len, 0);
3796 	if (IS_ERR(di)) {
3797 		err = PTR_ERR(di);
3798 		goto out;
3799 	}
3800 	if (di) {
3801 		if (check_path_shared(root, path))
3802 			goto out;
3803 	} else {
3804 		err = 0;
3805 		goto out;
3806 	}
3807 	btrfs_release_path(path);
3808 
3809 	ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name,
3810 					dentry->d_name.len, ino, dir_ino, 0,
3811 					&index);
3812 	if (ret) {
3813 		err = ret;
3814 		goto out;
3815 	}
3816 
3817 	if (check_path_shared(root, path))
3818 		goto out;
3819 
3820 	btrfs_release_path(path);
3821 
3822 	/*
3823 	 * This is a commit root search, if we can lookup inode item and other
3824 	 * relative items in the commit root, it means the transaction of
3825 	 * dir/file creation has been committed, and the dir index item that we
3826 	 * delay to insert has also been inserted into the commit root. So
3827 	 * we needn't worry about the delayed insertion of the dir index item
3828 	 * here.
3829 	 */
3830 	di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
3831 				dentry->d_name.name, dentry->d_name.len, 0);
3832 	if (IS_ERR(di)) {
3833 		err = PTR_ERR(di);
3834 		goto out;
3835 	}
3836 	BUG_ON(ret == -ENOENT);
3837 	if (check_path_shared(root, path))
3838 		goto out;
3839 
3840 	err = 0;
3841 out:
3842 	btrfs_free_path(path);
3843 	/* Migrate the orphan reservation over */
3844 	if (!err)
3845 		err = btrfs_block_rsv_migrate(trans->block_rsv,
3846 				&root->fs_info->global_block_rsv,
3847 				trans->bytes_reserved);
3848 
3849 	if (err) {
3850 		btrfs_end_transaction(trans, root);
3851 		root->fs_info->enospc_unlink = 0;
3852 		return ERR_PTR(err);
3853 	}
3854 
3855 	trans->block_rsv = &root->fs_info->global_block_rsv;
3856 	return trans;
3857 }
3858 
3859 static void __unlink_end_trans(struct btrfs_trans_handle *trans,
3860 			       struct btrfs_root *root)
3861 {
3862 	if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) {
3863 		btrfs_block_rsv_release(root, trans->block_rsv,
3864 					trans->bytes_reserved);
3865 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3866 		BUG_ON(!root->fs_info->enospc_unlink);
3867 		root->fs_info->enospc_unlink = 0;
3868 	}
3869 	btrfs_end_transaction(trans, root);
3870 }
3871 
3872 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3873 {
3874 	struct btrfs_root *root = BTRFS_I(dir)->root;
3875 	struct btrfs_trans_handle *trans;
3876 	struct inode *inode = dentry->d_inode;
3877 	int ret;
3878 
3879 	trans = __unlink_start_trans(dir, dentry);
3880 	if (IS_ERR(trans))
3881 		return PTR_ERR(trans);
3882 
3883 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3884 
3885 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3886 				 dentry->d_name.name, dentry->d_name.len);
3887 	if (ret)
3888 		goto out;
3889 
3890 	if (inode->i_nlink == 0) {
3891 		ret = btrfs_orphan_add(trans, inode);
3892 		if (ret)
3893 			goto out;
3894 	}
3895 
3896 out:
3897 	__unlink_end_trans(trans, root);
3898 	btrfs_btree_balance_dirty(root);
3899 	return ret;
3900 }
3901 
3902 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3903 			struct btrfs_root *root,
3904 			struct inode *dir, u64 objectid,
3905 			const char *name, int name_len)
3906 {
3907 	struct btrfs_path *path;
3908 	struct extent_buffer *leaf;
3909 	struct btrfs_dir_item *di;
3910 	struct btrfs_key key;
3911 	u64 index;
3912 	int ret;
3913 	u64 dir_ino = btrfs_ino(dir);
3914 
3915 	path = btrfs_alloc_path();
3916 	if (!path)
3917 		return -ENOMEM;
3918 
3919 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3920 				   name, name_len, -1);
3921 	if (IS_ERR_OR_NULL(di)) {
3922 		if (!di)
3923 			ret = -ENOENT;
3924 		else
3925 			ret = PTR_ERR(di);
3926 		goto out;
3927 	}
3928 
3929 	leaf = path->nodes[0];
3930 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3931 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3932 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3933 	if (ret) {
3934 		btrfs_abort_transaction(trans, root, ret);
3935 		goto out;
3936 	}
3937 	btrfs_release_path(path);
3938 
3939 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3940 				 objectid, root->root_key.objectid,
3941 				 dir_ino, &index, name, name_len);
3942 	if (ret < 0) {
3943 		if (ret != -ENOENT) {
3944 			btrfs_abort_transaction(trans, root, ret);
3945 			goto out;
3946 		}
3947 		di = btrfs_search_dir_index_item(root, path, dir_ino,
3948 						 name, name_len);
3949 		if (IS_ERR_OR_NULL(di)) {
3950 			if (!di)
3951 				ret = -ENOENT;
3952 			else
3953 				ret = PTR_ERR(di);
3954 			btrfs_abort_transaction(trans, root, ret);
3955 			goto out;
3956 		}
3957 
3958 		leaf = path->nodes[0];
3959 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3960 		btrfs_release_path(path);
3961 		index = key.offset;
3962 	}
3963 	btrfs_release_path(path);
3964 
3965 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3966 	if (ret) {
3967 		btrfs_abort_transaction(trans, root, ret);
3968 		goto out;
3969 	}
3970 
3971 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3972 	inode_inc_iversion(dir);
3973 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3974 	ret = btrfs_update_inode_fallback(trans, root, dir);
3975 	if (ret)
3976 		btrfs_abort_transaction(trans, root, ret);
3977 out:
3978 	btrfs_free_path(path);
3979 	return ret;
3980 }
3981 
3982 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3983 {
3984 	struct inode *inode = dentry->d_inode;
3985 	int err = 0;
3986 	struct btrfs_root *root = BTRFS_I(dir)->root;
3987 	struct btrfs_trans_handle *trans;
3988 
3989 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
3990 		return -ENOTEMPTY;
3991 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3992 		return -EPERM;
3993 
3994 	trans = __unlink_start_trans(dir, dentry);
3995 	if (IS_ERR(trans))
3996 		return PTR_ERR(trans);
3997 
3998 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3999 		err = btrfs_unlink_subvol(trans, root, dir,
4000 					  BTRFS_I(inode)->location.objectid,
4001 					  dentry->d_name.name,
4002 					  dentry->d_name.len);
4003 		goto out;
4004 	}
4005 
4006 	err = btrfs_orphan_add(trans, inode);
4007 	if (err)
4008 		goto out;
4009 
4010 	/* now the directory is empty */
4011 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
4012 				 dentry->d_name.name, dentry->d_name.len);
4013 	if (!err)
4014 		btrfs_i_size_write(inode, 0);
4015 out:
4016 	__unlink_end_trans(trans, root);
4017 	btrfs_btree_balance_dirty(root);
4018 
4019 	return err;
4020 }
4021 
4022 /*
4023  * this can truncate away extent items, csum items and directory items.
4024  * It starts at a high offset and removes keys until it can't find
4025  * any higher than new_size
4026  *
4027  * csum items that cross the new i_size are truncated to the new size
4028  * as well.
4029  *
4030  * min_type is the minimum key type to truncate down to.  If set to 0, this
4031  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4032  */
4033 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4034 			       struct btrfs_root *root,
4035 			       struct inode *inode,
4036 			       u64 new_size, u32 min_type)
4037 {
4038 	struct btrfs_path *path;
4039 	struct extent_buffer *leaf;
4040 	struct btrfs_file_extent_item *fi;
4041 	struct btrfs_key key;
4042 	struct btrfs_key found_key;
4043 	u64 extent_start = 0;
4044 	u64 extent_num_bytes = 0;
4045 	u64 extent_offset = 0;
4046 	u64 item_end = 0;
4047 	u32 found_type = (u8)-1;
4048 	int found_extent;
4049 	int del_item;
4050 	int pending_del_nr = 0;
4051 	int pending_del_slot = 0;
4052 	int extent_type = -1;
4053 	int ret;
4054 	int err = 0;
4055 	u64 ino = btrfs_ino(inode);
4056 
4057 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4058 
4059 	path = btrfs_alloc_path();
4060 	if (!path)
4061 		return -ENOMEM;
4062 	path->reada = -1;
4063 
4064 	/*
4065 	 * We want to drop from the next block forward in case this new size is
4066 	 * not block aligned since we will be keeping the last block of the
4067 	 * extent just the way it is.
4068 	 */
4069 	if (root->ref_cows || root == root->fs_info->tree_root)
4070 		btrfs_drop_extent_cache(inode, ALIGN(new_size,
4071 					root->sectorsize), (u64)-1, 0);
4072 
4073 	/*
4074 	 * This function is also used to drop the items in the log tree before
4075 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4076 	 * it is used to drop the loged items. So we shouldn't kill the delayed
4077 	 * items.
4078 	 */
4079 	if (min_type == 0 && root == BTRFS_I(inode)->root)
4080 		btrfs_kill_delayed_inode_items(inode);
4081 
4082 	key.objectid = ino;
4083 	key.offset = (u64)-1;
4084 	key.type = (u8)-1;
4085 
4086 search_again:
4087 	path->leave_spinning = 1;
4088 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4089 	if (ret < 0) {
4090 		err = ret;
4091 		goto out;
4092 	}
4093 
4094 	if (ret > 0) {
4095 		/* there are no items in the tree for us to truncate, we're
4096 		 * done
4097 		 */
4098 		if (path->slots[0] == 0)
4099 			goto out;
4100 		path->slots[0]--;
4101 	}
4102 
4103 	while (1) {
4104 		fi = NULL;
4105 		leaf = path->nodes[0];
4106 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4107 		found_type = btrfs_key_type(&found_key);
4108 
4109 		if (found_key.objectid != ino)
4110 			break;
4111 
4112 		if (found_type < min_type)
4113 			break;
4114 
4115 		item_end = found_key.offset;
4116 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
4117 			fi = btrfs_item_ptr(leaf, path->slots[0],
4118 					    struct btrfs_file_extent_item);
4119 			extent_type = btrfs_file_extent_type(leaf, fi);
4120 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4121 				item_end +=
4122 				    btrfs_file_extent_num_bytes(leaf, fi);
4123 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4124 				item_end += btrfs_file_extent_inline_len(leaf,
4125 									 fi);
4126 			}
4127 			item_end--;
4128 		}
4129 		if (found_type > min_type) {
4130 			del_item = 1;
4131 		} else {
4132 			if (item_end < new_size)
4133 				break;
4134 			if (found_key.offset >= new_size)
4135 				del_item = 1;
4136 			else
4137 				del_item = 0;
4138 		}
4139 		found_extent = 0;
4140 		/* FIXME, shrink the extent if the ref count is only 1 */
4141 		if (found_type != BTRFS_EXTENT_DATA_KEY)
4142 			goto delete;
4143 
4144 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4145 			u64 num_dec;
4146 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4147 			if (!del_item) {
4148 				u64 orig_num_bytes =
4149 					btrfs_file_extent_num_bytes(leaf, fi);
4150 				extent_num_bytes = ALIGN(new_size -
4151 						found_key.offset,
4152 						root->sectorsize);
4153 				btrfs_set_file_extent_num_bytes(leaf, fi,
4154 							 extent_num_bytes);
4155 				num_dec = (orig_num_bytes -
4156 					   extent_num_bytes);
4157 				if (root->ref_cows && extent_start != 0)
4158 					inode_sub_bytes(inode, num_dec);
4159 				btrfs_mark_buffer_dirty(leaf);
4160 			} else {
4161 				extent_num_bytes =
4162 					btrfs_file_extent_disk_num_bytes(leaf,
4163 									 fi);
4164 				extent_offset = found_key.offset -
4165 					btrfs_file_extent_offset(leaf, fi);
4166 
4167 				/* FIXME blocksize != 4096 */
4168 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4169 				if (extent_start != 0) {
4170 					found_extent = 1;
4171 					if (root->ref_cows)
4172 						inode_sub_bytes(inode, num_dec);
4173 				}
4174 			}
4175 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4176 			/*
4177 			 * we can't truncate inline items that have had
4178 			 * special encodings
4179 			 */
4180 			if (!del_item &&
4181 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
4182 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
4183 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4184 				u32 size = new_size - found_key.offset;
4185 
4186 				if (root->ref_cows) {
4187 					inode_sub_bytes(inode, item_end + 1 -
4188 							new_size);
4189 				}
4190 				size =
4191 				    btrfs_file_extent_calc_inline_size(size);
4192 				btrfs_truncate_item(root, path, size, 1);
4193 			} else if (root->ref_cows) {
4194 				inode_sub_bytes(inode, item_end + 1 -
4195 						found_key.offset);
4196 			}
4197 		}
4198 delete:
4199 		if (del_item) {
4200 			if (!pending_del_nr) {
4201 				/* no pending yet, add ourselves */
4202 				pending_del_slot = path->slots[0];
4203 				pending_del_nr = 1;
4204 			} else if (pending_del_nr &&
4205 				   path->slots[0] + 1 == pending_del_slot) {
4206 				/* hop on the pending chunk */
4207 				pending_del_nr++;
4208 				pending_del_slot = path->slots[0];
4209 			} else {
4210 				BUG();
4211 			}
4212 		} else {
4213 			break;
4214 		}
4215 		if (found_extent && (root->ref_cows ||
4216 				     root == root->fs_info->tree_root)) {
4217 			btrfs_set_path_blocking(path);
4218 			ret = btrfs_free_extent(trans, root, extent_start,
4219 						extent_num_bytes, 0,
4220 						btrfs_header_owner(leaf),
4221 						ino, extent_offset, 0);
4222 			BUG_ON(ret);
4223 		}
4224 
4225 		if (found_type == BTRFS_INODE_ITEM_KEY)
4226 			break;
4227 
4228 		if (path->slots[0] == 0 ||
4229 		    path->slots[0] != pending_del_slot) {
4230 			if (pending_del_nr) {
4231 				ret = btrfs_del_items(trans, root, path,
4232 						pending_del_slot,
4233 						pending_del_nr);
4234 				if (ret) {
4235 					btrfs_abort_transaction(trans,
4236 								root, ret);
4237 					goto error;
4238 				}
4239 				pending_del_nr = 0;
4240 			}
4241 			btrfs_release_path(path);
4242 			goto search_again;
4243 		} else {
4244 			path->slots[0]--;
4245 		}
4246 	}
4247 out:
4248 	if (pending_del_nr) {
4249 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
4250 				      pending_del_nr);
4251 		if (ret)
4252 			btrfs_abort_transaction(trans, root, ret);
4253 	}
4254 error:
4255 	btrfs_free_path(path);
4256 	return err;
4257 }
4258 
4259 /*
4260  * btrfs_truncate_page - read, zero a chunk and write a page
4261  * @inode - inode that we're zeroing
4262  * @from - the offset to start zeroing
4263  * @len - the length to zero, 0 to zero the entire range respective to the
4264  *	offset
4265  * @front - zero up to the offset instead of from the offset on
4266  *
4267  * This will find the page for the "from" offset and cow the page and zero the
4268  * part we want to zero.  This is used with truncate and hole punching.
4269  */
4270 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4271 			int front)
4272 {
4273 	struct address_space *mapping = inode->i_mapping;
4274 	struct btrfs_root *root = BTRFS_I(inode)->root;
4275 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4276 	struct btrfs_ordered_extent *ordered;
4277 	struct extent_state *cached_state = NULL;
4278 	char *kaddr;
4279 	u32 blocksize = root->sectorsize;
4280 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
4281 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
4282 	struct page *page;
4283 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4284 	int ret = 0;
4285 	u64 page_start;
4286 	u64 page_end;
4287 
4288 	if ((offset & (blocksize - 1)) == 0 &&
4289 	    (!len || ((len & (blocksize - 1)) == 0)))
4290 		goto out;
4291 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
4292 	if (ret)
4293 		goto out;
4294 
4295 again:
4296 	page = find_or_create_page(mapping, index, mask);
4297 	if (!page) {
4298 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4299 		ret = -ENOMEM;
4300 		goto out;
4301 	}
4302 
4303 	page_start = page_offset(page);
4304 	page_end = page_start + PAGE_CACHE_SIZE - 1;
4305 
4306 	if (!PageUptodate(page)) {
4307 		ret = btrfs_readpage(NULL, page);
4308 		lock_page(page);
4309 		if (page->mapping != mapping) {
4310 			unlock_page(page);
4311 			page_cache_release(page);
4312 			goto again;
4313 		}
4314 		if (!PageUptodate(page)) {
4315 			ret = -EIO;
4316 			goto out_unlock;
4317 		}
4318 	}
4319 	wait_on_page_writeback(page);
4320 
4321 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4322 	set_page_extent_mapped(page);
4323 
4324 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
4325 	if (ordered) {
4326 		unlock_extent_cached(io_tree, page_start, page_end,
4327 				     &cached_state, GFP_NOFS);
4328 		unlock_page(page);
4329 		page_cache_release(page);
4330 		btrfs_start_ordered_extent(inode, ordered, 1);
4331 		btrfs_put_ordered_extent(ordered);
4332 		goto again;
4333 	}
4334 
4335 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4336 			  EXTENT_DIRTY | EXTENT_DELALLOC |
4337 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4338 			  0, 0, &cached_state, GFP_NOFS);
4339 
4340 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4341 					&cached_state);
4342 	if (ret) {
4343 		unlock_extent_cached(io_tree, page_start, page_end,
4344 				     &cached_state, GFP_NOFS);
4345 		goto out_unlock;
4346 	}
4347 
4348 	if (offset != PAGE_CACHE_SIZE) {
4349 		if (!len)
4350 			len = PAGE_CACHE_SIZE - offset;
4351 		kaddr = kmap(page);
4352 		if (front)
4353 			memset(kaddr, 0, offset);
4354 		else
4355 			memset(kaddr + offset, 0, len);
4356 		flush_dcache_page(page);
4357 		kunmap(page);
4358 	}
4359 	ClearPageChecked(page);
4360 	set_page_dirty(page);
4361 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4362 			     GFP_NOFS);
4363 
4364 out_unlock:
4365 	if (ret)
4366 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4367 	unlock_page(page);
4368 	page_cache_release(page);
4369 out:
4370 	return ret;
4371 }
4372 
4373 /*
4374  * This function puts in dummy file extents for the area we're creating a hole
4375  * for.  So if we are truncating this file to a larger size we need to insert
4376  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4377  * the range between oldsize and size
4378  */
4379 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4380 {
4381 	struct btrfs_trans_handle *trans;
4382 	struct btrfs_root *root = BTRFS_I(inode)->root;
4383 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4384 	struct extent_map *em = NULL;
4385 	struct extent_state *cached_state = NULL;
4386 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4387 	u64 hole_start = ALIGN(oldsize, root->sectorsize);
4388 	u64 block_end = ALIGN(size, root->sectorsize);
4389 	u64 last_byte;
4390 	u64 cur_offset;
4391 	u64 hole_size;
4392 	int err = 0;
4393 
4394 	if (size <= hole_start)
4395 		return 0;
4396 
4397 	while (1) {
4398 		struct btrfs_ordered_extent *ordered;
4399 		btrfs_wait_ordered_range(inode, hole_start,
4400 					 block_end - hole_start);
4401 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4402 				 &cached_state);
4403 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
4404 		if (!ordered)
4405 			break;
4406 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
4407 				     &cached_state, GFP_NOFS);
4408 		btrfs_put_ordered_extent(ordered);
4409 	}
4410 
4411 	cur_offset = hole_start;
4412 	while (1) {
4413 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4414 				block_end - cur_offset, 0);
4415 		if (IS_ERR(em)) {
4416 			err = PTR_ERR(em);
4417 			em = NULL;
4418 			break;
4419 		}
4420 		last_byte = min(extent_map_end(em), block_end);
4421 		last_byte = ALIGN(last_byte , root->sectorsize);
4422 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4423 			struct extent_map *hole_em;
4424 			hole_size = last_byte - cur_offset;
4425 
4426 			trans = btrfs_start_transaction(root, 3);
4427 			if (IS_ERR(trans)) {
4428 				err = PTR_ERR(trans);
4429 				break;
4430 			}
4431 
4432 			err = btrfs_drop_extents(trans, root, inode,
4433 						 cur_offset,
4434 						 cur_offset + hole_size, 1);
4435 			if (err) {
4436 				btrfs_abort_transaction(trans, root, err);
4437 				btrfs_end_transaction(trans, root);
4438 				break;
4439 			}
4440 
4441 			err = btrfs_insert_file_extent(trans, root,
4442 					btrfs_ino(inode), cur_offset, 0,
4443 					0, hole_size, 0, hole_size,
4444 					0, 0, 0);
4445 			if (err) {
4446 				btrfs_abort_transaction(trans, root, err);
4447 				btrfs_end_transaction(trans, root);
4448 				break;
4449 			}
4450 
4451 			btrfs_drop_extent_cache(inode, cur_offset,
4452 						cur_offset + hole_size - 1, 0);
4453 			hole_em = alloc_extent_map();
4454 			if (!hole_em) {
4455 				set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4456 					&BTRFS_I(inode)->runtime_flags);
4457 				goto next;
4458 			}
4459 			hole_em->start = cur_offset;
4460 			hole_em->len = hole_size;
4461 			hole_em->orig_start = cur_offset;
4462 
4463 			hole_em->block_start = EXTENT_MAP_HOLE;
4464 			hole_em->block_len = 0;
4465 			hole_em->orig_block_len = 0;
4466 			hole_em->ram_bytes = hole_size;
4467 			hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4468 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
4469 			hole_em->generation = trans->transid;
4470 
4471 			while (1) {
4472 				write_lock(&em_tree->lock);
4473 				err = add_extent_mapping(em_tree, hole_em, 1);
4474 				write_unlock(&em_tree->lock);
4475 				if (err != -EEXIST)
4476 					break;
4477 				btrfs_drop_extent_cache(inode, cur_offset,
4478 							cur_offset +
4479 							hole_size - 1, 0);
4480 			}
4481 			free_extent_map(hole_em);
4482 next:
4483 			btrfs_update_inode(trans, root, inode);
4484 			btrfs_end_transaction(trans, root);
4485 		}
4486 		free_extent_map(em);
4487 		em = NULL;
4488 		cur_offset = last_byte;
4489 		if (cur_offset >= block_end)
4490 			break;
4491 	}
4492 
4493 	free_extent_map(em);
4494 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4495 			     GFP_NOFS);
4496 	return err;
4497 }
4498 
4499 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4500 {
4501 	struct btrfs_root *root = BTRFS_I(inode)->root;
4502 	struct btrfs_trans_handle *trans;
4503 	loff_t oldsize = i_size_read(inode);
4504 	loff_t newsize = attr->ia_size;
4505 	int mask = attr->ia_valid;
4506 	int ret;
4507 
4508 	if (newsize == oldsize)
4509 		return 0;
4510 
4511 	/*
4512 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4513 	 * special case where we need to update the times despite not having
4514 	 * these flags set.  For all other operations the VFS set these flags
4515 	 * explicitly if it wants a timestamp update.
4516 	 */
4517 	if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
4518 		inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
4519 
4520 	if (newsize > oldsize) {
4521 		truncate_pagecache(inode, oldsize, newsize);
4522 		ret = btrfs_cont_expand(inode, oldsize, newsize);
4523 		if (ret)
4524 			return ret;
4525 
4526 		trans = btrfs_start_transaction(root, 1);
4527 		if (IS_ERR(trans))
4528 			return PTR_ERR(trans);
4529 
4530 		i_size_write(inode, newsize);
4531 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4532 		ret = btrfs_update_inode(trans, root, inode);
4533 		btrfs_end_transaction(trans, root);
4534 	} else {
4535 
4536 		/*
4537 		 * We're truncating a file that used to have good data down to
4538 		 * zero. Make sure it gets into the ordered flush list so that
4539 		 * any new writes get down to disk quickly.
4540 		 */
4541 		if (newsize == 0)
4542 			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4543 				&BTRFS_I(inode)->runtime_flags);
4544 
4545 		/*
4546 		 * 1 for the orphan item we're going to add
4547 		 * 1 for the orphan item deletion.
4548 		 */
4549 		trans = btrfs_start_transaction(root, 2);
4550 		if (IS_ERR(trans))
4551 			return PTR_ERR(trans);
4552 
4553 		/*
4554 		 * We need to do this in case we fail at _any_ point during the
4555 		 * actual truncate.  Once we do the truncate_setsize we could
4556 		 * invalidate pages which forces any outstanding ordered io to
4557 		 * be instantly completed which will give us extents that need
4558 		 * to be truncated.  If we fail to get an orphan inode down we
4559 		 * could have left over extents that were never meant to live,
4560 		 * so we need to garuntee from this point on that everything
4561 		 * will be consistent.
4562 		 */
4563 		ret = btrfs_orphan_add(trans, inode);
4564 		btrfs_end_transaction(trans, root);
4565 		if (ret)
4566 			return ret;
4567 
4568 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
4569 		truncate_setsize(inode, newsize);
4570 
4571 		/* Disable nonlocked read DIO to avoid the end less truncate */
4572 		btrfs_inode_block_unlocked_dio(inode);
4573 		inode_dio_wait(inode);
4574 		btrfs_inode_resume_unlocked_dio(inode);
4575 
4576 		ret = btrfs_truncate(inode);
4577 		if (ret && inode->i_nlink)
4578 			btrfs_orphan_del(NULL, inode);
4579 	}
4580 
4581 	return ret;
4582 }
4583 
4584 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
4585 {
4586 	struct inode *inode = dentry->d_inode;
4587 	struct btrfs_root *root = BTRFS_I(inode)->root;
4588 	int err;
4589 
4590 	if (btrfs_root_readonly(root))
4591 		return -EROFS;
4592 
4593 	err = inode_change_ok(inode, attr);
4594 	if (err)
4595 		return err;
4596 
4597 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
4598 		err = btrfs_setsize(inode, attr);
4599 		if (err)
4600 			return err;
4601 	}
4602 
4603 	if (attr->ia_valid) {
4604 		setattr_copy(inode, attr);
4605 		inode_inc_iversion(inode);
4606 		err = btrfs_dirty_inode(inode);
4607 
4608 		if (!err && attr->ia_valid & ATTR_MODE)
4609 			err = btrfs_acl_chmod(inode);
4610 	}
4611 
4612 	return err;
4613 }
4614 
4615 void btrfs_evict_inode(struct inode *inode)
4616 {
4617 	struct btrfs_trans_handle *trans;
4618 	struct btrfs_root *root = BTRFS_I(inode)->root;
4619 	struct btrfs_block_rsv *rsv, *global_rsv;
4620 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
4621 	int ret;
4622 
4623 	trace_btrfs_inode_evict(inode);
4624 
4625 	truncate_inode_pages(&inode->i_data, 0);
4626 	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
4627 			       btrfs_is_free_space_inode(inode)))
4628 		goto no_delete;
4629 
4630 	if (is_bad_inode(inode)) {
4631 		btrfs_orphan_del(NULL, inode);
4632 		goto no_delete;
4633 	}
4634 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
4635 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
4636 
4637 	if (root->fs_info->log_root_recovering) {
4638 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
4639 				 &BTRFS_I(inode)->runtime_flags));
4640 		goto no_delete;
4641 	}
4642 
4643 	if (inode->i_nlink > 0) {
4644 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
4645 		goto no_delete;
4646 	}
4647 
4648 	ret = btrfs_commit_inode_delayed_inode(inode);
4649 	if (ret) {
4650 		btrfs_orphan_del(NULL, inode);
4651 		goto no_delete;
4652 	}
4653 
4654 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
4655 	if (!rsv) {
4656 		btrfs_orphan_del(NULL, inode);
4657 		goto no_delete;
4658 	}
4659 	rsv->size = min_size;
4660 	rsv->failfast = 1;
4661 	global_rsv = &root->fs_info->global_block_rsv;
4662 
4663 	btrfs_i_size_write(inode, 0);
4664 
4665 	/*
4666 	 * This is a bit simpler than btrfs_truncate since we've already
4667 	 * reserved our space for our orphan item in the unlink, so we just
4668 	 * need to reserve some slack space in case we add bytes and update
4669 	 * inode item when doing the truncate.
4670 	 */
4671 	while (1) {
4672 		ret = btrfs_block_rsv_refill(root, rsv, min_size,
4673 					     BTRFS_RESERVE_FLUSH_LIMIT);
4674 
4675 		/*
4676 		 * Try and steal from the global reserve since we will
4677 		 * likely not use this space anyway, we want to try as
4678 		 * hard as possible to get this to work.
4679 		 */
4680 		if (ret)
4681 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
4682 
4683 		if (ret) {
4684 			btrfs_warn(root->fs_info,
4685 				"Could not get space for a delete, will truncate on mount %d",
4686 				ret);
4687 			btrfs_orphan_del(NULL, inode);
4688 			btrfs_free_block_rsv(root, rsv);
4689 			goto no_delete;
4690 		}
4691 
4692 		trans = btrfs_join_transaction(root);
4693 		if (IS_ERR(trans)) {
4694 			btrfs_orphan_del(NULL, inode);
4695 			btrfs_free_block_rsv(root, rsv);
4696 			goto no_delete;
4697 		}
4698 
4699 		trans->block_rsv = rsv;
4700 
4701 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
4702 		if (ret != -ENOSPC)
4703 			break;
4704 
4705 		trans->block_rsv = &root->fs_info->trans_block_rsv;
4706 		btrfs_end_transaction(trans, root);
4707 		trans = NULL;
4708 		btrfs_btree_balance_dirty(root);
4709 	}
4710 
4711 	btrfs_free_block_rsv(root, rsv);
4712 
4713 	if (ret == 0) {
4714 		trans->block_rsv = root->orphan_block_rsv;
4715 		ret = btrfs_orphan_del(trans, inode);
4716 		BUG_ON(ret);
4717 	}
4718 
4719 	trans->block_rsv = &root->fs_info->trans_block_rsv;
4720 	if (!(root == root->fs_info->tree_root ||
4721 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
4722 		btrfs_return_ino(root, btrfs_ino(inode));
4723 
4724 	btrfs_end_transaction(trans, root);
4725 	btrfs_btree_balance_dirty(root);
4726 no_delete:
4727 	clear_inode(inode);
4728 	return;
4729 }
4730 
4731 /*
4732  * this returns the key found in the dir entry in the location pointer.
4733  * If no dir entries were found, location->objectid is 0.
4734  */
4735 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
4736 			       struct btrfs_key *location)
4737 {
4738 	const char *name = dentry->d_name.name;
4739 	int namelen = dentry->d_name.len;
4740 	struct btrfs_dir_item *di;
4741 	struct btrfs_path *path;
4742 	struct btrfs_root *root = BTRFS_I(dir)->root;
4743 	int ret = 0;
4744 
4745 	path = btrfs_alloc_path();
4746 	if (!path)
4747 		return -ENOMEM;
4748 
4749 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
4750 				    namelen, 0);
4751 	if (IS_ERR(di))
4752 		ret = PTR_ERR(di);
4753 
4754 	if (IS_ERR_OR_NULL(di))
4755 		goto out_err;
4756 
4757 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
4758 out:
4759 	btrfs_free_path(path);
4760 	return ret;
4761 out_err:
4762 	location->objectid = 0;
4763 	goto out;
4764 }
4765 
4766 /*
4767  * when we hit a tree root in a directory, the btrfs part of the inode
4768  * needs to be changed to reflect the root directory of the tree root.  This
4769  * is kind of like crossing a mount point.
4770  */
4771 static int fixup_tree_root_location(struct btrfs_root *root,
4772 				    struct inode *dir,
4773 				    struct dentry *dentry,
4774 				    struct btrfs_key *location,
4775 				    struct btrfs_root **sub_root)
4776 {
4777 	struct btrfs_path *path;
4778 	struct btrfs_root *new_root;
4779 	struct btrfs_root_ref *ref;
4780 	struct extent_buffer *leaf;
4781 	int ret;
4782 	int err = 0;
4783 
4784 	path = btrfs_alloc_path();
4785 	if (!path) {
4786 		err = -ENOMEM;
4787 		goto out;
4788 	}
4789 
4790 	err = -ENOENT;
4791 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
4792 				  BTRFS_I(dir)->root->root_key.objectid,
4793 				  location->objectid);
4794 	if (ret) {
4795 		if (ret < 0)
4796 			err = ret;
4797 		goto out;
4798 	}
4799 
4800 	leaf = path->nodes[0];
4801 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
4802 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
4803 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
4804 		goto out;
4805 
4806 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
4807 				   (unsigned long)(ref + 1),
4808 				   dentry->d_name.len);
4809 	if (ret)
4810 		goto out;
4811 
4812 	btrfs_release_path(path);
4813 
4814 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
4815 	if (IS_ERR(new_root)) {
4816 		err = PTR_ERR(new_root);
4817 		goto out;
4818 	}
4819 
4820 	if (btrfs_root_refs(&new_root->root_item) == 0) {
4821 		err = -ENOENT;
4822 		goto out;
4823 	}
4824 
4825 	*sub_root = new_root;
4826 	location->objectid = btrfs_root_dirid(&new_root->root_item);
4827 	location->type = BTRFS_INODE_ITEM_KEY;
4828 	location->offset = 0;
4829 	err = 0;
4830 out:
4831 	btrfs_free_path(path);
4832 	return err;
4833 }
4834 
4835 static void inode_tree_add(struct inode *inode)
4836 {
4837 	struct btrfs_root *root = BTRFS_I(inode)->root;
4838 	struct btrfs_inode *entry;
4839 	struct rb_node **p;
4840 	struct rb_node *parent;
4841 	u64 ino = btrfs_ino(inode);
4842 again:
4843 	p = &root->inode_tree.rb_node;
4844 	parent = NULL;
4845 
4846 	if (inode_unhashed(inode))
4847 		return;
4848 
4849 	spin_lock(&root->inode_lock);
4850 	while (*p) {
4851 		parent = *p;
4852 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
4853 
4854 		if (ino < btrfs_ino(&entry->vfs_inode))
4855 			p = &parent->rb_left;
4856 		else if (ino > btrfs_ino(&entry->vfs_inode))
4857 			p = &parent->rb_right;
4858 		else {
4859 			WARN_ON(!(entry->vfs_inode.i_state &
4860 				  (I_WILL_FREE | I_FREEING)));
4861 			rb_erase(parent, &root->inode_tree);
4862 			RB_CLEAR_NODE(parent);
4863 			spin_unlock(&root->inode_lock);
4864 			goto again;
4865 		}
4866 	}
4867 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
4868 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4869 	spin_unlock(&root->inode_lock);
4870 }
4871 
4872 static void inode_tree_del(struct inode *inode)
4873 {
4874 	struct btrfs_root *root = BTRFS_I(inode)->root;
4875 	int empty = 0;
4876 
4877 	spin_lock(&root->inode_lock);
4878 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
4879 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
4880 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
4881 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4882 	}
4883 	spin_unlock(&root->inode_lock);
4884 
4885 	/*
4886 	 * Free space cache has inodes in the tree root, but the tree root has a
4887 	 * root_refs of 0, so this could end up dropping the tree root as a
4888 	 * snapshot, so we need the extra !root->fs_info->tree_root check to
4889 	 * make sure we don't drop it.
4890 	 */
4891 	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
4892 	    root != root->fs_info->tree_root) {
4893 		synchronize_srcu(&root->fs_info->subvol_srcu);
4894 		spin_lock(&root->inode_lock);
4895 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4896 		spin_unlock(&root->inode_lock);
4897 		if (empty)
4898 			btrfs_add_dead_root(root);
4899 	}
4900 }
4901 
4902 void btrfs_invalidate_inodes(struct btrfs_root *root)
4903 {
4904 	struct rb_node *node;
4905 	struct rb_node *prev;
4906 	struct btrfs_inode *entry;
4907 	struct inode *inode;
4908 	u64 objectid = 0;
4909 
4910 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4911 
4912 	spin_lock(&root->inode_lock);
4913 again:
4914 	node = root->inode_tree.rb_node;
4915 	prev = NULL;
4916 	while (node) {
4917 		prev = node;
4918 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4919 
4920 		if (objectid < btrfs_ino(&entry->vfs_inode))
4921 			node = node->rb_left;
4922 		else if (objectid > btrfs_ino(&entry->vfs_inode))
4923 			node = node->rb_right;
4924 		else
4925 			break;
4926 	}
4927 	if (!node) {
4928 		while (prev) {
4929 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4930 			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4931 				node = prev;
4932 				break;
4933 			}
4934 			prev = rb_next(prev);
4935 		}
4936 	}
4937 	while (node) {
4938 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4939 		objectid = btrfs_ino(&entry->vfs_inode) + 1;
4940 		inode = igrab(&entry->vfs_inode);
4941 		if (inode) {
4942 			spin_unlock(&root->inode_lock);
4943 			if (atomic_read(&inode->i_count) > 1)
4944 				d_prune_aliases(inode);
4945 			/*
4946 			 * btrfs_drop_inode will have it removed from
4947 			 * the inode cache when its usage count
4948 			 * hits zero.
4949 			 */
4950 			iput(inode);
4951 			cond_resched();
4952 			spin_lock(&root->inode_lock);
4953 			goto again;
4954 		}
4955 
4956 		if (cond_resched_lock(&root->inode_lock))
4957 			goto again;
4958 
4959 		node = rb_next(node);
4960 	}
4961 	spin_unlock(&root->inode_lock);
4962 }
4963 
4964 static int btrfs_init_locked_inode(struct inode *inode, void *p)
4965 {
4966 	struct btrfs_iget_args *args = p;
4967 	inode->i_ino = args->ino;
4968 	BTRFS_I(inode)->root = args->root;
4969 	return 0;
4970 }
4971 
4972 static int btrfs_find_actor(struct inode *inode, void *opaque)
4973 {
4974 	struct btrfs_iget_args *args = opaque;
4975 	return args->ino == btrfs_ino(inode) &&
4976 		args->root == BTRFS_I(inode)->root;
4977 }
4978 
4979 static struct inode *btrfs_iget_locked(struct super_block *s,
4980 				       u64 objectid,
4981 				       struct btrfs_root *root)
4982 {
4983 	struct inode *inode;
4984 	struct btrfs_iget_args args;
4985 	args.ino = objectid;
4986 	args.root = root;
4987 
4988 	inode = iget5_locked(s, objectid, btrfs_find_actor,
4989 			     btrfs_init_locked_inode,
4990 			     (void *)&args);
4991 	return inode;
4992 }
4993 
4994 /* Get an inode object given its location and corresponding root.
4995  * Returns in *is_new if the inode was read from disk
4996  */
4997 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4998 			 struct btrfs_root *root, int *new)
4999 {
5000 	struct inode *inode;
5001 
5002 	inode = btrfs_iget_locked(s, location->objectid, root);
5003 	if (!inode)
5004 		return ERR_PTR(-ENOMEM);
5005 
5006 	if (inode->i_state & I_NEW) {
5007 		BTRFS_I(inode)->root = root;
5008 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
5009 		btrfs_read_locked_inode(inode);
5010 		if (!is_bad_inode(inode)) {
5011 			inode_tree_add(inode);
5012 			unlock_new_inode(inode);
5013 			if (new)
5014 				*new = 1;
5015 		} else {
5016 			unlock_new_inode(inode);
5017 			iput(inode);
5018 			inode = ERR_PTR(-ESTALE);
5019 		}
5020 	}
5021 
5022 	return inode;
5023 }
5024 
5025 static struct inode *new_simple_dir(struct super_block *s,
5026 				    struct btrfs_key *key,
5027 				    struct btrfs_root *root)
5028 {
5029 	struct inode *inode = new_inode(s);
5030 
5031 	if (!inode)
5032 		return ERR_PTR(-ENOMEM);
5033 
5034 	BTRFS_I(inode)->root = root;
5035 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5036 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5037 
5038 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5039 	inode->i_op = &btrfs_dir_ro_inode_operations;
5040 	inode->i_fop = &simple_dir_operations;
5041 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5042 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5043 
5044 	return inode;
5045 }
5046 
5047 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5048 {
5049 	struct inode *inode;
5050 	struct btrfs_root *root = BTRFS_I(dir)->root;
5051 	struct btrfs_root *sub_root = root;
5052 	struct btrfs_key location;
5053 	int index;
5054 	int ret = 0;
5055 
5056 	if (dentry->d_name.len > BTRFS_NAME_LEN)
5057 		return ERR_PTR(-ENAMETOOLONG);
5058 
5059 	ret = btrfs_inode_by_name(dir, dentry, &location);
5060 	if (ret < 0)
5061 		return ERR_PTR(ret);
5062 
5063 	if (location.objectid == 0)
5064 		return NULL;
5065 
5066 	if (location.type == BTRFS_INODE_ITEM_KEY) {
5067 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5068 		return inode;
5069 	}
5070 
5071 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5072 
5073 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
5074 	ret = fixup_tree_root_location(root, dir, dentry,
5075 				       &location, &sub_root);
5076 	if (ret < 0) {
5077 		if (ret != -ENOENT)
5078 			inode = ERR_PTR(ret);
5079 		else
5080 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
5081 	} else {
5082 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5083 	}
5084 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5085 
5086 	if (!IS_ERR(inode) && root != sub_root) {
5087 		down_read(&root->fs_info->cleanup_work_sem);
5088 		if (!(inode->i_sb->s_flags & MS_RDONLY))
5089 			ret = btrfs_orphan_cleanup(sub_root);
5090 		up_read(&root->fs_info->cleanup_work_sem);
5091 		if (ret)
5092 			inode = ERR_PTR(ret);
5093 	}
5094 
5095 	return inode;
5096 }
5097 
5098 static int btrfs_dentry_delete(const struct dentry *dentry)
5099 {
5100 	struct btrfs_root *root;
5101 	struct inode *inode = dentry->d_inode;
5102 
5103 	if (!inode && !IS_ROOT(dentry))
5104 		inode = dentry->d_parent->d_inode;
5105 
5106 	if (inode) {
5107 		root = BTRFS_I(inode)->root;
5108 		if (btrfs_root_refs(&root->root_item) == 0)
5109 			return 1;
5110 
5111 		if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5112 			return 1;
5113 	}
5114 	return 0;
5115 }
5116 
5117 static void btrfs_dentry_release(struct dentry *dentry)
5118 {
5119 	if (dentry->d_fsdata)
5120 		kfree(dentry->d_fsdata);
5121 }
5122 
5123 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5124 				   unsigned int flags)
5125 {
5126 	struct dentry *ret;
5127 
5128 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
5129 	return ret;
5130 }
5131 
5132 unsigned char btrfs_filetype_table[] = {
5133 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5134 };
5135 
5136 static int btrfs_real_readdir(struct file *filp, void *dirent,
5137 			      filldir_t filldir)
5138 {
5139 	struct inode *inode = file_inode(filp);
5140 	struct btrfs_root *root = BTRFS_I(inode)->root;
5141 	struct btrfs_item *item;
5142 	struct btrfs_dir_item *di;
5143 	struct btrfs_key key;
5144 	struct btrfs_key found_key;
5145 	struct btrfs_path *path;
5146 	struct list_head ins_list;
5147 	struct list_head del_list;
5148 	int ret;
5149 	struct extent_buffer *leaf;
5150 	int slot;
5151 	unsigned char d_type;
5152 	int over = 0;
5153 	u32 di_cur;
5154 	u32 di_total;
5155 	u32 di_len;
5156 	int key_type = BTRFS_DIR_INDEX_KEY;
5157 	char tmp_name[32];
5158 	char *name_ptr;
5159 	int name_len;
5160 	int is_curr = 0;	/* filp->f_pos points to the current index? */
5161 
5162 	/* FIXME, use a real flag for deciding about the key type */
5163 	if (root->fs_info->tree_root == root)
5164 		key_type = BTRFS_DIR_ITEM_KEY;
5165 
5166 	/* special case for "." */
5167 	if (filp->f_pos == 0) {
5168 		over = filldir(dirent, ".", 1,
5169 			       filp->f_pos, btrfs_ino(inode), DT_DIR);
5170 		if (over)
5171 			return 0;
5172 		filp->f_pos = 1;
5173 	}
5174 	/* special case for .., just use the back ref */
5175 	if (filp->f_pos == 1) {
5176 		u64 pino = parent_ino(filp->f_path.dentry);
5177 		over = filldir(dirent, "..", 2,
5178 			       filp->f_pos, pino, DT_DIR);
5179 		if (over)
5180 			return 0;
5181 		filp->f_pos = 2;
5182 	}
5183 	path = btrfs_alloc_path();
5184 	if (!path)
5185 		return -ENOMEM;
5186 
5187 	path->reada = 1;
5188 
5189 	if (key_type == BTRFS_DIR_INDEX_KEY) {
5190 		INIT_LIST_HEAD(&ins_list);
5191 		INIT_LIST_HEAD(&del_list);
5192 		btrfs_get_delayed_items(inode, &ins_list, &del_list);
5193 	}
5194 
5195 	btrfs_set_key_type(&key, key_type);
5196 	key.offset = filp->f_pos;
5197 	key.objectid = btrfs_ino(inode);
5198 
5199 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5200 	if (ret < 0)
5201 		goto err;
5202 
5203 	while (1) {
5204 		leaf = path->nodes[0];
5205 		slot = path->slots[0];
5206 		if (slot >= btrfs_header_nritems(leaf)) {
5207 			ret = btrfs_next_leaf(root, path);
5208 			if (ret < 0)
5209 				goto err;
5210 			else if (ret > 0)
5211 				break;
5212 			continue;
5213 		}
5214 
5215 		item = btrfs_item_nr(leaf, slot);
5216 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
5217 
5218 		if (found_key.objectid != key.objectid)
5219 			break;
5220 		if (btrfs_key_type(&found_key) != key_type)
5221 			break;
5222 		if (found_key.offset < filp->f_pos)
5223 			goto next;
5224 		if (key_type == BTRFS_DIR_INDEX_KEY &&
5225 		    btrfs_should_delete_dir_index(&del_list,
5226 						  found_key.offset))
5227 			goto next;
5228 
5229 		filp->f_pos = found_key.offset;
5230 		is_curr = 1;
5231 
5232 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5233 		di_cur = 0;
5234 		di_total = btrfs_item_size(leaf, item);
5235 
5236 		while (di_cur < di_total) {
5237 			struct btrfs_key location;
5238 
5239 			if (verify_dir_item(root, leaf, di))
5240 				break;
5241 
5242 			name_len = btrfs_dir_name_len(leaf, di);
5243 			if (name_len <= sizeof(tmp_name)) {
5244 				name_ptr = tmp_name;
5245 			} else {
5246 				name_ptr = kmalloc(name_len, GFP_NOFS);
5247 				if (!name_ptr) {
5248 					ret = -ENOMEM;
5249 					goto err;
5250 				}
5251 			}
5252 			read_extent_buffer(leaf, name_ptr,
5253 					   (unsigned long)(di + 1), name_len);
5254 
5255 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5256 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
5257 
5258 
5259 			/* is this a reference to our own snapshot? If so
5260 			 * skip it.
5261 			 *
5262 			 * In contrast to old kernels, we insert the snapshot's
5263 			 * dir item and dir index after it has been created, so
5264 			 * we won't find a reference to our own snapshot. We
5265 			 * still keep the following code for backward
5266 			 * compatibility.
5267 			 */
5268 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
5269 			    location.objectid == root->root_key.objectid) {
5270 				over = 0;
5271 				goto skip;
5272 			}
5273 			over = filldir(dirent, name_ptr, name_len,
5274 				       found_key.offset, location.objectid,
5275 				       d_type);
5276 
5277 skip:
5278 			if (name_ptr != tmp_name)
5279 				kfree(name_ptr);
5280 
5281 			if (over)
5282 				goto nopos;
5283 			di_len = btrfs_dir_name_len(leaf, di) +
5284 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
5285 			di_cur += di_len;
5286 			di = (struct btrfs_dir_item *)((char *)di + di_len);
5287 		}
5288 next:
5289 		path->slots[0]++;
5290 	}
5291 
5292 	if (key_type == BTRFS_DIR_INDEX_KEY) {
5293 		if (is_curr)
5294 			filp->f_pos++;
5295 		ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
5296 						      &ins_list);
5297 		if (ret)
5298 			goto nopos;
5299 	}
5300 
5301 	/* Reached end of directory/root. Bump pos past the last item. */
5302 	if (key_type == BTRFS_DIR_INDEX_KEY)
5303 		/*
5304 		 * 32-bit glibc will use getdents64, but then strtol -
5305 		 * so the last number we can serve is this.
5306 		 */
5307 		filp->f_pos = 0x7fffffff;
5308 	else
5309 		filp->f_pos++;
5310 nopos:
5311 	ret = 0;
5312 err:
5313 	if (key_type == BTRFS_DIR_INDEX_KEY)
5314 		btrfs_put_delayed_items(&ins_list, &del_list);
5315 	btrfs_free_path(path);
5316 	return ret;
5317 }
5318 
5319 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5320 {
5321 	struct btrfs_root *root = BTRFS_I(inode)->root;
5322 	struct btrfs_trans_handle *trans;
5323 	int ret = 0;
5324 	bool nolock = false;
5325 
5326 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5327 		return 0;
5328 
5329 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5330 		nolock = true;
5331 
5332 	if (wbc->sync_mode == WB_SYNC_ALL) {
5333 		if (nolock)
5334 			trans = btrfs_join_transaction_nolock(root);
5335 		else
5336 			trans = btrfs_join_transaction(root);
5337 		if (IS_ERR(trans))
5338 			return PTR_ERR(trans);
5339 		ret = btrfs_commit_transaction(trans, root);
5340 	}
5341 	return ret;
5342 }
5343 
5344 /*
5345  * This is somewhat expensive, updating the tree every time the
5346  * inode changes.  But, it is most likely to find the inode in cache.
5347  * FIXME, needs more benchmarking...there are no reasons other than performance
5348  * to keep or drop this code.
5349  */
5350 static int btrfs_dirty_inode(struct inode *inode)
5351 {
5352 	struct btrfs_root *root = BTRFS_I(inode)->root;
5353 	struct btrfs_trans_handle *trans;
5354 	int ret;
5355 
5356 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5357 		return 0;
5358 
5359 	trans = btrfs_join_transaction(root);
5360 	if (IS_ERR(trans))
5361 		return PTR_ERR(trans);
5362 
5363 	ret = btrfs_update_inode(trans, root, inode);
5364 	if (ret && ret == -ENOSPC) {
5365 		/* whoops, lets try again with the full transaction */
5366 		btrfs_end_transaction(trans, root);
5367 		trans = btrfs_start_transaction(root, 1);
5368 		if (IS_ERR(trans))
5369 			return PTR_ERR(trans);
5370 
5371 		ret = btrfs_update_inode(trans, root, inode);
5372 	}
5373 	btrfs_end_transaction(trans, root);
5374 	if (BTRFS_I(inode)->delayed_node)
5375 		btrfs_balance_delayed_items(root);
5376 
5377 	return ret;
5378 }
5379 
5380 /*
5381  * This is a copy of file_update_time.  We need this so we can return error on
5382  * ENOSPC for updating the inode in the case of file write and mmap writes.
5383  */
5384 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5385 			     int flags)
5386 {
5387 	struct btrfs_root *root = BTRFS_I(inode)->root;
5388 
5389 	if (btrfs_root_readonly(root))
5390 		return -EROFS;
5391 
5392 	if (flags & S_VERSION)
5393 		inode_inc_iversion(inode);
5394 	if (flags & S_CTIME)
5395 		inode->i_ctime = *now;
5396 	if (flags & S_MTIME)
5397 		inode->i_mtime = *now;
5398 	if (flags & S_ATIME)
5399 		inode->i_atime = *now;
5400 	return btrfs_dirty_inode(inode);
5401 }
5402 
5403 /*
5404  * find the highest existing sequence number in a directory
5405  * and then set the in-memory index_cnt variable to reflect
5406  * free sequence numbers
5407  */
5408 static int btrfs_set_inode_index_count(struct inode *inode)
5409 {
5410 	struct btrfs_root *root = BTRFS_I(inode)->root;
5411 	struct btrfs_key key, found_key;
5412 	struct btrfs_path *path;
5413 	struct extent_buffer *leaf;
5414 	int ret;
5415 
5416 	key.objectid = btrfs_ino(inode);
5417 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
5418 	key.offset = (u64)-1;
5419 
5420 	path = btrfs_alloc_path();
5421 	if (!path)
5422 		return -ENOMEM;
5423 
5424 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5425 	if (ret < 0)
5426 		goto out;
5427 	/* FIXME: we should be able to handle this */
5428 	if (ret == 0)
5429 		goto out;
5430 	ret = 0;
5431 
5432 	/*
5433 	 * MAGIC NUMBER EXPLANATION:
5434 	 * since we search a directory based on f_pos we have to start at 2
5435 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5436 	 * else has to start at 2
5437 	 */
5438 	if (path->slots[0] == 0) {
5439 		BTRFS_I(inode)->index_cnt = 2;
5440 		goto out;
5441 	}
5442 
5443 	path->slots[0]--;
5444 
5445 	leaf = path->nodes[0];
5446 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5447 
5448 	if (found_key.objectid != btrfs_ino(inode) ||
5449 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
5450 		BTRFS_I(inode)->index_cnt = 2;
5451 		goto out;
5452 	}
5453 
5454 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
5455 out:
5456 	btrfs_free_path(path);
5457 	return ret;
5458 }
5459 
5460 /*
5461  * helper to find a free sequence number in a given directory.  This current
5462  * code is very simple, later versions will do smarter things in the btree
5463  */
5464 int btrfs_set_inode_index(struct inode *dir, u64 *index)
5465 {
5466 	int ret = 0;
5467 
5468 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
5469 		ret = btrfs_inode_delayed_dir_index_count(dir);
5470 		if (ret) {
5471 			ret = btrfs_set_inode_index_count(dir);
5472 			if (ret)
5473 				return ret;
5474 		}
5475 	}
5476 
5477 	*index = BTRFS_I(dir)->index_cnt;
5478 	BTRFS_I(dir)->index_cnt++;
5479 
5480 	return ret;
5481 }
5482 
5483 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
5484 				     struct btrfs_root *root,
5485 				     struct inode *dir,
5486 				     const char *name, int name_len,
5487 				     u64 ref_objectid, u64 objectid,
5488 				     umode_t mode, u64 *index)
5489 {
5490 	struct inode *inode;
5491 	struct btrfs_inode_item *inode_item;
5492 	struct btrfs_key *location;
5493 	struct btrfs_path *path;
5494 	struct btrfs_inode_ref *ref;
5495 	struct btrfs_key key[2];
5496 	u32 sizes[2];
5497 	unsigned long ptr;
5498 	int ret;
5499 	int owner;
5500 
5501 	path = btrfs_alloc_path();
5502 	if (!path)
5503 		return ERR_PTR(-ENOMEM);
5504 
5505 	inode = new_inode(root->fs_info->sb);
5506 	if (!inode) {
5507 		btrfs_free_path(path);
5508 		return ERR_PTR(-ENOMEM);
5509 	}
5510 
5511 	/*
5512 	 * we have to initialize this early, so we can reclaim the inode
5513 	 * number if we fail afterwards in this function.
5514 	 */
5515 	inode->i_ino = objectid;
5516 
5517 	if (dir) {
5518 		trace_btrfs_inode_request(dir);
5519 
5520 		ret = btrfs_set_inode_index(dir, index);
5521 		if (ret) {
5522 			btrfs_free_path(path);
5523 			iput(inode);
5524 			return ERR_PTR(ret);
5525 		}
5526 	}
5527 	/*
5528 	 * index_cnt is ignored for everything but a dir,
5529 	 * btrfs_get_inode_index_count has an explanation for the magic
5530 	 * number
5531 	 */
5532 	BTRFS_I(inode)->index_cnt = 2;
5533 	BTRFS_I(inode)->root = root;
5534 	BTRFS_I(inode)->generation = trans->transid;
5535 	inode->i_generation = BTRFS_I(inode)->generation;
5536 
5537 	/*
5538 	 * We could have gotten an inode number from somebody who was fsynced
5539 	 * and then removed in this same transaction, so let's just set full
5540 	 * sync since it will be a full sync anyway and this will blow away the
5541 	 * old info in the log.
5542 	 */
5543 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
5544 
5545 	if (S_ISDIR(mode))
5546 		owner = 0;
5547 	else
5548 		owner = 1;
5549 
5550 	key[0].objectid = objectid;
5551 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
5552 	key[0].offset = 0;
5553 
5554 	/*
5555 	 * Start new inodes with an inode_ref. This is slightly more
5556 	 * efficient for small numbers of hard links since they will
5557 	 * be packed into one item. Extended refs will kick in if we
5558 	 * add more hard links than can fit in the ref item.
5559 	 */
5560 	key[1].objectid = objectid;
5561 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
5562 	key[1].offset = ref_objectid;
5563 
5564 	sizes[0] = sizeof(struct btrfs_inode_item);
5565 	sizes[1] = name_len + sizeof(*ref);
5566 
5567 	path->leave_spinning = 1;
5568 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
5569 	if (ret != 0)
5570 		goto fail;
5571 
5572 	inode_init_owner(inode, dir, mode);
5573 	inode_set_bytes(inode, 0);
5574 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
5575 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5576 				  struct btrfs_inode_item);
5577 	memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
5578 			     sizeof(*inode_item));
5579 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
5580 
5581 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
5582 			     struct btrfs_inode_ref);
5583 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
5584 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
5585 	ptr = (unsigned long)(ref + 1);
5586 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
5587 
5588 	btrfs_mark_buffer_dirty(path->nodes[0]);
5589 	btrfs_free_path(path);
5590 
5591 	location = &BTRFS_I(inode)->location;
5592 	location->objectid = objectid;
5593 	location->offset = 0;
5594 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
5595 
5596 	btrfs_inherit_iflags(inode, dir);
5597 
5598 	if (S_ISREG(mode)) {
5599 		if (btrfs_test_opt(root, NODATASUM))
5600 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
5601 		if (btrfs_test_opt(root, NODATACOW))
5602 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
5603 				BTRFS_INODE_NODATASUM;
5604 	}
5605 
5606 	insert_inode_hash(inode);
5607 	inode_tree_add(inode);
5608 
5609 	trace_btrfs_inode_new(inode);
5610 	btrfs_set_inode_last_trans(trans, inode);
5611 
5612 	btrfs_update_root_times(trans, root);
5613 
5614 	return inode;
5615 fail:
5616 	if (dir)
5617 		BTRFS_I(dir)->index_cnt--;
5618 	btrfs_free_path(path);
5619 	iput(inode);
5620 	return ERR_PTR(ret);
5621 }
5622 
5623 static inline u8 btrfs_inode_type(struct inode *inode)
5624 {
5625 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
5626 }
5627 
5628 /*
5629  * utility function to add 'inode' into 'parent_inode' with
5630  * a give name and a given sequence number.
5631  * if 'add_backref' is true, also insert a backref from the
5632  * inode to the parent directory.
5633  */
5634 int btrfs_add_link(struct btrfs_trans_handle *trans,
5635 		   struct inode *parent_inode, struct inode *inode,
5636 		   const char *name, int name_len, int add_backref, u64 index)
5637 {
5638 	int ret = 0;
5639 	struct btrfs_key key;
5640 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
5641 	u64 ino = btrfs_ino(inode);
5642 	u64 parent_ino = btrfs_ino(parent_inode);
5643 
5644 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5645 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
5646 	} else {
5647 		key.objectid = ino;
5648 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
5649 		key.offset = 0;
5650 	}
5651 
5652 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5653 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
5654 					 key.objectid, root->root_key.objectid,
5655 					 parent_ino, index, name, name_len);
5656 	} else if (add_backref) {
5657 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
5658 					     parent_ino, index);
5659 	}
5660 
5661 	/* Nothing to clean up yet */
5662 	if (ret)
5663 		return ret;
5664 
5665 	ret = btrfs_insert_dir_item(trans, root, name, name_len,
5666 				    parent_inode, &key,
5667 				    btrfs_inode_type(inode), index);
5668 	if (ret == -EEXIST || ret == -EOVERFLOW)
5669 		goto fail_dir_item;
5670 	else if (ret) {
5671 		btrfs_abort_transaction(trans, root, ret);
5672 		return ret;
5673 	}
5674 
5675 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
5676 			   name_len * 2);
5677 	inode_inc_iversion(parent_inode);
5678 	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
5679 	ret = btrfs_update_inode(trans, root, parent_inode);
5680 	if (ret)
5681 		btrfs_abort_transaction(trans, root, ret);
5682 	return ret;
5683 
5684 fail_dir_item:
5685 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
5686 		u64 local_index;
5687 		int err;
5688 		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
5689 				 key.objectid, root->root_key.objectid,
5690 				 parent_ino, &local_index, name, name_len);
5691 
5692 	} else if (add_backref) {
5693 		u64 local_index;
5694 		int err;
5695 
5696 		err = btrfs_del_inode_ref(trans, root, name, name_len,
5697 					  ino, parent_ino, &local_index);
5698 	}
5699 	return ret;
5700 }
5701 
5702 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
5703 			    struct inode *dir, struct dentry *dentry,
5704 			    struct inode *inode, int backref, u64 index)
5705 {
5706 	int err = btrfs_add_link(trans, dir, inode,
5707 				 dentry->d_name.name, dentry->d_name.len,
5708 				 backref, index);
5709 	if (err > 0)
5710 		err = -EEXIST;
5711 	return err;
5712 }
5713 
5714 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
5715 			umode_t mode, dev_t rdev)
5716 {
5717 	struct btrfs_trans_handle *trans;
5718 	struct btrfs_root *root = BTRFS_I(dir)->root;
5719 	struct inode *inode = NULL;
5720 	int err;
5721 	int drop_inode = 0;
5722 	u64 objectid;
5723 	u64 index = 0;
5724 
5725 	if (!new_valid_dev(rdev))
5726 		return -EINVAL;
5727 
5728 	/*
5729 	 * 2 for inode item and ref
5730 	 * 2 for dir items
5731 	 * 1 for xattr if selinux is on
5732 	 */
5733 	trans = btrfs_start_transaction(root, 5);
5734 	if (IS_ERR(trans))
5735 		return PTR_ERR(trans);
5736 
5737 	err = btrfs_find_free_ino(root, &objectid);
5738 	if (err)
5739 		goto out_unlock;
5740 
5741 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5742 				dentry->d_name.len, btrfs_ino(dir), objectid,
5743 				mode, &index);
5744 	if (IS_ERR(inode)) {
5745 		err = PTR_ERR(inode);
5746 		goto out_unlock;
5747 	}
5748 
5749 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5750 	if (err) {
5751 		drop_inode = 1;
5752 		goto out_unlock;
5753 	}
5754 
5755 	/*
5756 	* If the active LSM wants to access the inode during
5757 	* d_instantiate it needs these. Smack checks to see
5758 	* if the filesystem supports xattrs by looking at the
5759 	* ops vector.
5760 	*/
5761 
5762 	inode->i_op = &btrfs_special_inode_operations;
5763 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5764 	if (err)
5765 		drop_inode = 1;
5766 	else {
5767 		init_special_inode(inode, inode->i_mode, rdev);
5768 		btrfs_update_inode(trans, root, inode);
5769 		d_instantiate(dentry, inode);
5770 	}
5771 out_unlock:
5772 	btrfs_end_transaction(trans, root);
5773 	btrfs_btree_balance_dirty(root);
5774 	if (drop_inode) {
5775 		inode_dec_link_count(inode);
5776 		iput(inode);
5777 	}
5778 	return err;
5779 }
5780 
5781 static int btrfs_create(struct inode *dir, struct dentry *dentry,
5782 			umode_t mode, bool excl)
5783 {
5784 	struct btrfs_trans_handle *trans;
5785 	struct btrfs_root *root = BTRFS_I(dir)->root;
5786 	struct inode *inode = NULL;
5787 	int drop_inode_on_err = 0;
5788 	int err;
5789 	u64 objectid;
5790 	u64 index = 0;
5791 
5792 	/*
5793 	 * 2 for inode item and ref
5794 	 * 2 for dir items
5795 	 * 1 for xattr if selinux is on
5796 	 */
5797 	trans = btrfs_start_transaction(root, 5);
5798 	if (IS_ERR(trans))
5799 		return PTR_ERR(trans);
5800 
5801 	err = btrfs_find_free_ino(root, &objectid);
5802 	if (err)
5803 		goto out_unlock;
5804 
5805 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5806 				dentry->d_name.len, btrfs_ino(dir), objectid,
5807 				mode, &index);
5808 	if (IS_ERR(inode)) {
5809 		err = PTR_ERR(inode);
5810 		goto out_unlock;
5811 	}
5812 	drop_inode_on_err = 1;
5813 
5814 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5815 	if (err)
5816 		goto out_unlock;
5817 
5818 	err = btrfs_update_inode(trans, root, inode);
5819 	if (err)
5820 		goto out_unlock;
5821 
5822 	/*
5823 	* If the active LSM wants to access the inode during
5824 	* d_instantiate it needs these. Smack checks to see
5825 	* if the filesystem supports xattrs by looking at the
5826 	* ops vector.
5827 	*/
5828 	inode->i_fop = &btrfs_file_operations;
5829 	inode->i_op = &btrfs_file_inode_operations;
5830 
5831 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
5832 	if (err)
5833 		goto out_unlock;
5834 
5835 	inode->i_mapping->a_ops = &btrfs_aops;
5836 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5837 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5838 	d_instantiate(dentry, inode);
5839 
5840 out_unlock:
5841 	btrfs_end_transaction(trans, root);
5842 	if (err && drop_inode_on_err) {
5843 		inode_dec_link_count(inode);
5844 		iput(inode);
5845 	}
5846 	btrfs_btree_balance_dirty(root);
5847 	return err;
5848 }
5849 
5850 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
5851 		      struct dentry *dentry)
5852 {
5853 	struct btrfs_trans_handle *trans;
5854 	struct btrfs_root *root = BTRFS_I(dir)->root;
5855 	struct inode *inode = old_dentry->d_inode;
5856 	u64 index;
5857 	int err;
5858 	int drop_inode = 0;
5859 
5860 	/* do not allow sys_link's with other subvols of the same device */
5861 	if (root->objectid != BTRFS_I(inode)->root->objectid)
5862 		return -EXDEV;
5863 
5864 	if (inode->i_nlink >= BTRFS_LINK_MAX)
5865 		return -EMLINK;
5866 
5867 	err = btrfs_set_inode_index(dir, &index);
5868 	if (err)
5869 		goto fail;
5870 
5871 	/*
5872 	 * 2 items for inode and inode ref
5873 	 * 2 items for dir items
5874 	 * 1 item for parent inode
5875 	 */
5876 	trans = btrfs_start_transaction(root, 5);
5877 	if (IS_ERR(trans)) {
5878 		err = PTR_ERR(trans);
5879 		goto fail;
5880 	}
5881 
5882 	btrfs_inc_nlink(inode);
5883 	inode_inc_iversion(inode);
5884 	inode->i_ctime = CURRENT_TIME;
5885 	ihold(inode);
5886 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
5887 
5888 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5889 
5890 	if (err) {
5891 		drop_inode = 1;
5892 	} else {
5893 		struct dentry *parent = dentry->d_parent;
5894 		err = btrfs_update_inode(trans, root, inode);
5895 		if (err)
5896 			goto fail;
5897 		d_instantiate(dentry, inode);
5898 		btrfs_log_new_name(trans, inode, NULL, parent);
5899 	}
5900 
5901 	btrfs_end_transaction(trans, root);
5902 fail:
5903 	if (drop_inode) {
5904 		inode_dec_link_count(inode);
5905 		iput(inode);
5906 	}
5907 	btrfs_btree_balance_dirty(root);
5908 	return err;
5909 }
5910 
5911 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
5912 {
5913 	struct inode *inode = NULL;
5914 	struct btrfs_trans_handle *trans;
5915 	struct btrfs_root *root = BTRFS_I(dir)->root;
5916 	int err = 0;
5917 	int drop_on_err = 0;
5918 	u64 objectid = 0;
5919 	u64 index = 0;
5920 
5921 	/*
5922 	 * 2 items for inode and ref
5923 	 * 2 items for dir items
5924 	 * 1 for xattr if selinux is on
5925 	 */
5926 	trans = btrfs_start_transaction(root, 5);
5927 	if (IS_ERR(trans))
5928 		return PTR_ERR(trans);
5929 
5930 	err = btrfs_find_free_ino(root, &objectid);
5931 	if (err)
5932 		goto out_fail;
5933 
5934 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5935 				dentry->d_name.len, btrfs_ino(dir), objectid,
5936 				S_IFDIR | mode, &index);
5937 	if (IS_ERR(inode)) {
5938 		err = PTR_ERR(inode);
5939 		goto out_fail;
5940 	}
5941 
5942 	drop_on_err = 1;
5943 
5944 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5945 	if (err)
5946 		goto out_fail;
5947 
5948 	inode->i_op = &btrfs_dir_inode_operations;
5949 	inode->i_fop = &btrfs_dir_file_operations;
5950 
5951 	btrfs_i_size_write(inode, 0);
5952 	err = btrfs_update_inode(trans, root, inode);
5953 	if (err)
5954 		goto out_fail;
5955 
5956 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5957 			     dentry->d_name.len, 0, index);
5958 	if (err)
5959 		goto out_fail;
5960 
5961 	d_instantiate(dentry, inode);
5962 	drop_on_err = 0;
5963 
5964 out_fail:
5965 	btrfs_end_transaction(trans, root);
5966 	if (drop_on_err)
5967 		iput(inode);
5968 	btrfs_btree_balance_dirty(root);
5969 	return err;
5970 }
5971 
5972 /* helper for btfs_get_extent.  Given an existing extent in the tree,
5973  * and an extent that you want to insert, deal with overlap and insert
5974  * the new extent into the tree.
5975  */
5976 static int merge_extent_mapping(struct extent_map_tree *em_tree,
5977 				struct extent_map *existing,
5978 				struct extent_map *em,
5979 				u64 map_start, u64 map_len)
5980 {
5981 	u64 start_diff;
5982 
5983 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5984 	start_diff = map_start - em->start;
5985 	em->start = map_start;
5986 	em->len = map_len;
5987 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5988 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
5989 		em->block_start += start_diff;
5990 		em->block_len -= start_diff;
5991 	}
5992 	return add_extent_mapping(em_tree, em, 0);
5993 }
5994 
5995 static noinline int uncompress_inline(struct btrfs_path *path,
5996 				      struct inode *inode, struct page *page,
5997 				      size_t pg_offset, u64 extent_offset,
5998 				      struct btrfs_file_extent_item *item)
5999 {
6000 	int ret;
6001 	struct extent_buffer *leaf = path->nodes[0];
6002 	char *tmp;
6003 	size_t max_size;
6004 	unsigned long inline_size;
6005 	unsigned long ptr;
6006 	int compress_type;
6007 
6008 	WARN_ON(pg_offset != 0);
6009 	compress_type = btrfs_file_extent_compression(leaf, item);
6010 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6011 	inline_size = btrfs_file_extent_inline_item_len(leaf,
6012 					btrfs_item_nr(leaf, path->slots[0]));
6013 	tmp = kmalloc(inline_size, GFP_NOFS);
6014 	if (!tmp)
6015 		return -ENOMEM;
6016 	ptr = btrfs_file_extent_inline_start(item);
6017 
6018 	read_extent_buffer(leaf, tmp, ptr, inline_size);
6019 
6020 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
6021 	ret = btrfs_decompress(compress_type, tmp, page,
6022 			       extent_offset, inline_size, max_size);
6023 	if (ret) {
6024 		char *kaddr = kmap_atomic(page);
6025 		unsigned long copy_size = min_t(u64,
6026 				  PAGE_CACHE_SIZE - pg_offset,
6027 				  max_size - extent_offset);
6028 		memset(kaddr + pg_offset, 0, copy_size);
6029 		kunmap_atomic(kaddr);
6030 	}
6031 	kfree(tmp);
6032 	return 0;
6033 }
6034 
6035 /*
6036  * a bit scary, this does extent mapping from logical file offset to the disk.
6037  * the ugly parts come from merging extents from the disk with the in-ram
6038  * representation.  This gets more complex because of the data=ordered code,
6039  * where the in-ram extents might be locked pending data=ordered completion.
6040  *
6041  * This also copies inline extents directly into the page.
6042  */
6043 
6044 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
6045 				    size_t pg_offset, u64 start, u64 len,
6046 				    int create)
6047 {
6048 	int ret;
6049 	int err = 0;
6050 	u64 bytenr;
6051 	u64 extent_start = 0;
6052 	u64 extent_end = 0;
6053 	u64 objectid = btrfs_ino(inode);
6054 	u32 found_type;
6055 	struct btrfs_path *path = NULL;
6056 	struct btrfs_root *root = BTRFS_I(inode)->root;
6057 	struct btrfs_file_extent_item *item;
6058 	struct extent_buffer *leaf;
6059 	struct btrfs_key found_key;
6060 	struct extent_map *em = NULL;
6061 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
6062 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6063 	struct btrfs_trans_handle *trans = NULL;
6064 	int compress_type;
6065 
6066 again:
6067 	read_lock(&em_tree->lock);
6068 	em = lookup_extent_mapping(em_tree, start, len);
6069 	if (em)
6070 		em->bdev = root->fs_info->fs_devices->latest_bdev;
6071 	read_unlock(&em_tree->lock);
6072 
6073 	if (em) {
6074 		if (em->start > start || em->start + em->len <= start)
6075 			free_extent_map(em);
6076 		else if (em->block_start == EXTENT_MAP_INLINE && page)
6077 			free_extent_map(em);
6078 		else
6079 			goto out;
6080 	}
6081 	em = alloc_extent_map();
6082 	if (!em) {
6083 		err = -ENOMEM;
6084 		goto out;
6085 	}
6086 	em->bdev = root->fs_info->fs_devices->latest_bdev;
6087 	em->start = EXTENT_MAP_HOLE;
6088 	em->orig_start = EXTENT_MAP_HOLE;
6089 	em->len = (u64)-1;
6090 	em->block_len = (u64)-1;
6091 
6092 	if (!path) {
6093 		path = btrfs_alloc_path();
6094 		if (!path) {
6095 			err = -ENOMEM;
6096 			goto out;
6097 		}
6098 		/*
6099 		 * Chances are we'll be called again, so go ahead and do
6100 		 * readahead
6101 		 */
6102 		path->reada = 1;
6103 	}
6104 
6105 	ret = btrfs_lookup_file_extent(trans, root, path,
6106 				       objectid, start, trans != NULL);
6107 	if (ret < 0) {
6108 		err = ret;
6109 		goto out;
6110 	}
6111 
6112 	if (ret != 0) {
6113 		if (path->slots[0] == 0)
6114 			goto not_found;
6115 		path->slots[0]--;
6116 	}
6117 
6118 	leaf = path->nodes[0];
6119 	item = btrfs_item_ptr(leaf, path->slots[0],
6120 			      struct btrfs_file_extent_item);
6121 	/* are we inside the extent that was found? */
6122 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6123 	found_type = btrfs_key_type(&found_key);
6124 	if (found_key.objectid != objectid ||
6125 	    found_type != BTRFS_EXTENT_DATA_KEY) {
6126 		goto not_found;
6127 	}
6128 
6129 	found_type = btrfs_file_extent_type(leaf, item);
6130 	extent_start = found_key.offset;
6131 	compress_type = btrfs_file_extent_compression(leaf, item);
6132 	if (found_type == BTRFS_FILE_EXTENT_REG ||
6133 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6134 		extent_end = extent_start +
6135 		       btrfs_file_extent_num_bytes(leaf, item);
6136 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6137 		size_t size;
6138 		size = btrfs_file_extent_inline_len(leaf, item);
6139 		extent_end = ALIGN(extent_start + size, root->sectorsize);
6140 	}
6141 
6142 	if (start >= extent_end) {
6143 		path->slots[0]++;
6144 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6145 			ret = btrfs_next_leaf(root, path);
6146 			if (ret < 0) {
6147 				err = ret;
6148 				goto out;
6149 			}
6150 			if (ret > 0)
6151 				goto not_found;
6152 			leaf = path->nodes[0];
6153 		}
6154 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6155 		if (found_key.objectid != objectid ||
6156 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6157 			goto not_found;
6158 		if (start + len <= found_key.offset)
6159 			goto not_found;
6160 		em->start = start;
6161 		em->orig_start = start;
6162 		em->len = found_key.offset - start;
6163 		goto not_found_em;
6164 	}
6165 
6166 	em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
6167 	if (found_type == BTRFS_FILE_EXTENT_REG ||
6168 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6169 		em->start = extent_start;
6170 		em->len = extent_end - extent_start;
6171 		em->orig_start = extent_start -
6172 				 btrfs_file_extent_offset(leaf, item);
6173 		em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf,
6174 								      item);
6175 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
6176 		if (bytenr == 0) {
6177 			em->block_start = EXTENT_MAP_HOLE;
6178 			goto insert;
6179 		}
6180 		if (compress_type != BTRFS_COMPRESS_NONE) {
6181 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6182 			em->compress_type = compress_type;
6183 			em->block_start = bytenr;
6184 			em->block_len = em->orig_block_len;
6185 		} else {
6186 			bytenr += btrfs_file_extent_offset(leaf, item);
6187 			em->block_start = bytenr;
6188 			em->block_len = em->len;
6189 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
6190 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
6191 		}
6192 		goto insert;
6193 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6194 		unsigned long ptr;
6195 		char *map;
6196 		size_t size;
6197 		size_t extent_offset;
6198 		size_t copy_size;
6199 
6200 		em->block_start = EXTENT_MAP_INLINE;
6201 		if (!page || create) {
6202 			em->start = extent_start;
6203 			em->len = extent_end - extent_start;
6204 			goto out;
6205 		}
6206 
6207 		size = btrfs_file_extent_inline_len(leaf, item);
6208 		extent_offset = page_offset(page) + pg_offset - extent_start;
6209 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6210 				size - extent_offset);
6211 		em->start = extent_start + extent_offset;
6212 		em->len = ALIGN(copy_size, root->sectorsize);
6213 		em->orig_block_len = em->len;
6214 		em->orig_start = em->start;
6215 		if (compress_type) {
6216 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
6217 			em->compress_type = compress_type;
6218 		}
6219 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6220 		if (create == 0 && !PageUptodate(page)) {
6221 			if (btrfs_file_extent_compression(leaf, item) !=
6222 			    BTRFS_COMPRESS_NONE) {
6223 				ret = uncompress_inline(path, inode, page,
6224 							pg_offset,
6225 							extent_offset, item);
6226 				BUG_ON(ret); /* -ENOMEM */
6227 			} else {
6228 				map = kmap(page);
6229 				read_extent_buffer(leaf, map + pg_offset, ptr,
6230 						   copy_size);
6231 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6232 					memset(map + pg_offset + copy_size, 0,
6233 					       PAGE_CACHE_SIZE - pg_offset -
6234 					       copy_size);
6235 				}
6236 				kunmap(page);
6237 			}
6238 			flush_dcache_page(page);
6239 		} else if (create && PageUptodate(page)) {
6240 			BUG();
6241 			if (!trans) {
6242 				kunmap(page);
6243 				free_extent_map(em);
6244 				em = NULL;
6245 
6246 				btrfs_release_path(path);
6247 				trans = btrfs_join_transaction(root);
6248 
6249 				if (IS_ERR(trans))
6250 					return ERR_CAST(trans);
6251 				goto again;
6252 			}
6253 			map = kmap(page);
6254 			write_extent_buffer(leaf, map + pg_offset, ptr,
6255 					    copy_size);
6256 			kunmap(page);
6257 			btrfs_mark_buffer_dirty(leaf);
6258 		}
6259 		set_extent_uptodate(io_tree, em->start,
6260 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
6261 		goto insert;
6262 	} else {
6263 		WARN(1, KERN_ERR "btrfs unknown found_type %d\n", found_type);
6264 	}
6265 not_found:
6266 	em->start = start;
6267 	em->orig_start = start;
6268 	em->len = len;
6269 not_found_em:
6270 	em->block_start = EXTENT_MAP_HOLE;
6271 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6272 insert:
6273 	btrfs_release_path(path);
6274 	if (em->start > start || extent_map_end(em) <= start) {
6275 		btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6276 			(unsigned long long)em->start,
6277 			(unsigned long long)em->len,
6278 			(unsigned long long)start,
6279 			(unsigned long long)len);
6280 		err = -EIO;
6281 		goto out;
6282 	}
6283 
6284 	err = 0;
6285 	write_lock(&em_tree->lock);
6286 	ret = add_extent_mapping(em_tree, em, 0);
6287 	/* it is possible that someone inserted the extent into the tree
6288 	 * while we had the lock dropped.  It is also possible that
6289 	 * an overlapping map exists in the tree
6290 	 */
6291 	if (ret == -EEXIST) {
6292 		struct extent_map *existing;
6293 
6294 		ret = 0;
6295 
6296 		existing = lookup_extent_mapping(em_tree, start, len);
6297 		if (existing && (existing->start > start ||
6298 		    existing->start + existing->len <= start)) {
6299 			free_extent_map(existing);
6300 			existing = NULL;
6301 		}
6302 		if (!existing) {
6303 			existing = lookup_extent_mapping(em_tree, em->start,
6304 							 em->len);
6305 			if (existing) {
6306 				err = merge_extent_mapping(em_tree, existing,
6307 							   em, start,
6308 							   root->sectorsize);
6309 				free_extent_map(existing);
6310 				if (err) {
6311 					free_extent_map(em);
6312 					em = NULL;
6313 				}
6314 			} else {
6315 				err = -EIO;
6316 				free_extent_map(em);
6317 				em = NULL;
6318 			}
6319 		} else {
6320 			free_extent_map(em);
6321 			em = existing;
6322 			err = 0;
6323 		}
6324 	}
6325 	write_unlock(&em_tree->lock);
6326 out:
6327 
6328 	if (em)
6329 		trace_btrfs_get_extent(root, em);
6330 
6331 	if (path)
6332 		btrfs_free_path(path);
6333 	if (trans) {
6334 		ret = btrfs_end_transaction(trans, root);
6335 		if (!err)
6336 			err = ret;
6337 	}
6338 	if (err) {
6339 		free_extent_map(em);
6340 		return ERR_PTR(err);
6341 	}
6342 	BUG_ON(!em); /* Error is always set */
6343 	return em;
6344 }
6345 
6346 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
6347 					   size_t pg_offset, u64 start, u64 len,
6348 					   int create)
6349 {
6350 	struct extent_map *em;
6351 	struct extent_map *hole_em = NULL;
6352 	u64 range_start = start;
6353 	u64 end;
6354 	u64 found;
6355 	u64 found_end;
6356 	int err = 0;
6357 
6358 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
6359 	if (IS_ERR(em))
6360 		return em;
6361 	if (em) {
6362 		/*
6363 		 * if our em maps to
6364 		 * -  a hole or
6365 		 * -  a pre-alloc extent,
6366 		 * there might actually be delalloc bytes behind it.
6367 		 */
6368 		if (em->block_start != EXTENT_MAP_HOLE &&
6369 		    !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6370 			return em;
6371 		else
6372 			hole_em = em;
6373 	}
6374 
6375 	/* check to see if we've wrapped (len == -1 or similar) */
6376 	end = start + len;
6377 	if (end < start)
6378 		end = (u64)-1;
6379 	else
6380 		end -= 1;
6381 
6382 	em = NULL;
6383 
6384 	/* ok, we didn't find anything, lets look for delalloc */
6385 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
6386 				 end, len, EXTENT_DELALLOC, 1);
6387 	found_end = range_start + found;
6388 	if (found_end < range_start)
6389 		found_end = (u64)-1;
6390 
6391 	/*
6392 	 * we didn't find anything useful, return
6393 	 * the original results from get_extent()
6394 	 */
6395 	if (range_start > end || found_end <= start) {
6396 		em = hole_em;
6397 		hole_em = NULL;
6398 		goto out;
6399 	}
6400 
6401 	/* adjust the range_start to make sure it doesn't
6402 	 * go backwards from the start they passed in
6403 	 */
6404 	range_start = max(start,range_start);
6405 	found = found_end - range_start;
6406 
6407 	if (found > 0) {
6408 		u64 hole_start = start;
6409 		u64 hole_len = len;
6410 
6411 		em = alloc_extent_map();
6412 		if (!em) {
6413 			err = -ENOMEM;
6414 			goto out;
6415 		}
6416 		/*
6417 		 * when btrfs_get_extent can't find anything it
6418 		 * returns one huge hole
6419 		 *
6420 		 * make sure what it found really fits our range, and
6421 		 * adjust to make sure it is based on the start from
6422 		 * the caller
6423 		 */
6424 		if (hole_em) {
6425 			u64 calc_end = extent_map_end(hole_em);
6426 
6427 			if (calc_end <= start || (hole_em->start > end)) {
6428 				free_extent_map(hole_em);
6429 				hole_em = NULL;
6430 			} else {
6431 				hole_start = max(hole_em->start, start);
6432 				hole_len = calc_end - hole_start;
6433 			}
6434 		}
6435 		em->bdev = NULL;
6436 		if (hole_em && range_start > hole_start) {
6437 			/* our hole starts before our delalloc, so we
6438 			 * have to return just the parts of the hole
6439 			 * that go until  the delalloc starts
6440 			 */
6441 			em->len = min(hole_len,
6442 				      range_start - hole_start);
6443 			em->start = hole_start;
6444 			em->orig_start = hole_start;
6445 			/*
6446 			 * don't adjust block start at all,
6447 			 * it is fixed at EXTENT_MAP_HOLE
6448 			 */
6449 			em->block_start = hole_em->block_start;
6450 			em->block_len = hole_len;
6451 			if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
6452 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
6453 		} else {
6454 			em->start = range_start;
6455 			em->len = found;
6456 			em->orig_start = range_start;
6457 			em->block_start = EXTENT_MAP_DELALLOC;
6458 			em->block_len = found;
6459 		}
6460 	} else if (hole_em) {
6461 		return hole_em;
6462 	}
6463 out:
6464 
6465 	free_extent_map(hole_em);
6466 	if (err) {
6467 		free_extent_map(em);
6468 		return ERR_PTR(err);
6469 	}
6470 	return em;
6471 }
6472 
6473 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
6474 						  u64 start, u64 len)
6475 {
6476 	struct btrfs_root *root = BTRFS_I(inode)->root;
6477 	struct btrfs_trans_handle *trans;
6478 	struct extent_map *em;
6479 	struct btrfs_key ins;
6480 	u64 alloc_hint;
6481 	int ret;
6482 
6483 	trans = btrfs_join_transaction(root);
6484 	if (IS_ERR(trans))
6485 		return ERR_CAST(trans);
6486 
6487 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
6488 
6489 	alloc_hint = get_extent_allocation_hint(inode, start, len);
6490 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
6491 				   alloc_hint, &ins, 1);
6492 	if (ret) {
6493 		em = ERR_PTR(ret);
6494 		goto out;
6495 	}
6496 
6497 	em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
6498 			      ins.offset, ins.offset, ins.offset, 0);
6499 	if (IS_ERR(em))
6500 		goto out;
6501 
6502 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
6503 					   ins.offset, ins.offset, 0);
6504 	if (ret) {
6505 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
6506 		em = ERR_PTR(ret);
6507 	}
6508 out:
6509 	btrfs_end_transaction(trans, root);
6510 	return em;
6511 }
6512 
6513 /*
6514  * returns 1 when the nocow is safe, < 1 on error, 0 if the
6515  * block must be cow'd
6516  */
6517 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
6518 				      struct inode *inode, u64 offset, u64 *len,
6519 				      u64 *orig_start, u64 *orig_block_len,
6520 				      u64 *ram_bytes)
6521 {
6522 	struct btrfs_path *path;
6523 	int ret;
6524 	struct extent_buffer *leaf;
6525 	struct btrfs_root *root = BTRFS_I(inode)->root;
6526 	struct btrfs_file_extent_item *fi;
6527 	struct btrfs_key key;
6528 	u64 disk_bytenr;
6529 	u64 backref_offset;
6530 	u64 extent_end;
6531 	u64 num_bytes;
6532 	int slot;
6533 	int found_type;
6534 
6535 	path = btrfs_alloc_path();
6536 	if (!path)
6537 		return -ENOMEM;
6538 
6539 	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
6540 				       offset, 0);
6541 	if (ret < 0)
6542 		goto out;
6543 
6544 	slot = path->slots[0];
6545 	if (ret == 1) {
6546 		if (slot == 0) {
6547 			/* can't find the item, must cow */
6548 			ret = 0;
6549 			goto out;
6550 		}
6551 		slot--;
6552 	}
6553 	ret = 0;
6554 	leaf = path->nodes[0];
6555 	btrfs_item_key_to_cpu(leaf, &key, slot);
6556 	if (key.objectid != btrfs_ino(inode) ||
6557 	    key.type != BTRFS_EXTENT_DATA_KEY) {
6558 		/* not our file or wrong item type, must cow */
6559 		goto out;
6560 	}
6561 
6562 	if (key.offset > offset) {
6563 		/* Wrong offset, must cow */
6564 		goto out;
6565 	}
6566 
6567 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6568 	found_type = btrfs_file_extent_type(leaf, fi);
6569 	if (found_type != BTRFS_FILE_EXTENT_REG &&
6570 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
6571 		/* not a regular extent, must cow */
6572 		goto out;
6573 	}
6574 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6575 	backref_offset = btrfs_file_extent_offset(leaf, fi);
6576 
6577 	*orig_start = key.offset - backref_offset;
6578 	*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
6579 	*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6580 
6581 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
6582 	if (extent_end < offset + *len) {
6583 		/* extent doesn't include our full range, must cow */
6584 		goto out;
6585 	}
6586 
6587 	if (btrfs_extent_readonly(root, disk_bytenr))
6588 		goto out;
6589 
6590 	/*
6591 	 * look for other files referencing this extent, if we
6592 	 * find any we must cow
6593 	 */
6594 	if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
6595 				  key.offset - backref_offset, disk_bytenr))
6596 		goto out;
6597 
6598 	/*
6599 	 * adjust disk_bytenr and num_bytes to cover just the bytes
6600 	 * in this extent we are about to write.  If there
6601 	 * are any csums in that range we have to cow in order
6602 	 * to keep the csums correct
6603 	 */
6604 	disk_bytenr += backref_offset;
6605 	disk_bytenr += offset - key.offset;
6606 	num_bytes = min(offset + *len, extent_end) - offset;
6607 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
6608 				goto out;
6609 	/*
6610 	 * all of the above have passed, it is safe to overwrite this extent
6611 	 * without cow
6612 	 */
6613 	*len = num_bytes;
6614 	ret = 1;
6615 out:
6616 	btrfs_free_path(path);
6617 	return ret;
6618 }
6619 
6620 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
6621 			      struct extent_state **cached_state, int writing)
6622 {
6623 	struct btrfs_ordered_extent *ordered;
6624 	int ret = 0;
6625 
6626 	while (1) {
6627 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6628 				 0, cached_state);
6629 		/*
6630 		 * We're concerned with the entire range that we're going to be
6631 		 * doing DIO to, so we need to make sure theres no ordered
6632 		 * extents in this range.
6633 		 */
6634 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
6635 						     lockend - lockstart + 1);
6636 
6637 		/*
6638 		 * We need to make sure there are no buffered pages in this
6639 		 * range either, we could have raced between the invalidate in
6640 		 * generic_file_direct_write and locking the extent.  The
6641 		 * invalidate needs to happen so that reads after a write do not
6642 		 * get stale data.
6643 		 */
6644 		if (!ordered && (!writing ||
6645 		    !test_range_bit(&BTRFS_I(inode)->io_tree,
6646 				    lockstart, lockend, EXTENT_UPTODATE, 0,
6647 				    *cached_state)))
6648 			break;
6649 
6650 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6651 				     cached_state, GFP_NOFS);
6652 
6653 		if (ordered) {
6654 			btrfs_start_ordered_extent(inode, ordered, 1);
6655 			btrfs_put_ordered_extent(ordered);
6656 		} else {
6657 			/* Screw you mmap */
6658 			ret = filemap_write_and_wait_range(inode->i_mapping,
6659 							   lockstart,
6660 							   lockend);
6661 			if (ret)
6662 				break;
6663 
6664 			/*
6665 			 * If we found a page that couldn't be invalidated just
6666 			 * fall back to buffered.
6667 			 */
6668 			ret = invalidate_inode_pages2_range(inode->i_mapping,
6669 					lockstart >> PAGE_CACHE_SHIFT,
6670 					lockend >> PAGE_CACHE_SHIFT);
6671 			if (ret)
6672 				break;
6673 		}
6674 
6675 		cond_resched();
6676 	}
6677 
6678 	return ret;
6679 }
6680 
6681 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
6682 					   u64 len, u64 orig_start,
6683 					   u64 block_start, u64 block_len,
6684 					   u64 orig_block_len, u64 ram_bytes,
6685 					   int type)
6686 {
6687 	struct extent_map_tree *em_tree;
6688 	struct extent_map *em;
6689 	struct btrfs_root *root = BTRFS_I(inode)->root;
6690 	int ret;
6691 
6692 	em_tree = &BTRFS_I(inode)->extent_tree;
6693 	em = alloc_extent_map();
6694 	if (!em)
6695 		return ERR_PTR(-ENOMEM);
6696 
6697 	em->start = start;
6698 	em->orig_start = orig_start;
6699 	em->mod_start = start;
6700 	em->mod_len = len;
6701 	em->len = len;
6702 	em->block_len = block_len;
6703 	em->block_start = block_start;
6704 	em->bdev = root->fs_info->fs_devices->latest_bdev;
6705 	em->orig_block_len = orig_block_len;
6706 	em->ram_bytes = ram_bytes;
6707 	em->generation = -1;
6708 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
6709 	if (type == BTRFS_ORDERED_PREALLOC)
6710 		set_bit(EXTENT_FLAG_FILLING, &em->flags);
6711 
6712 	do {
6713 		btrfs_drop_extent_cache(inode, em->start,
6714 				em->start + em->len - 1, 0);
6715 		write_lock(&em_tree->lock);
6716 		ret = add_extent_mapping(em_tree, em, 1);
6717 		write_unlock(&em_tree->lock);
6718 	} while (ret == -EEXIST);
6719 
6720 	if (ret) {
6721 		free_extent_map(em);
6722 		return ERR_PTR(ret);
6723 	}
6724 
6725 	return em;
6726 }
6727 
6728 
6729 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
6730 				   struct buffer_head *bh_result, int create)
6731 {
6732 	struct extent_map *em;
6733 	struct btrfs_root *root = BTRFS_I(inode)->root;
6734 	struct extent_state *cached_state = NULL;
6735 	u64 start = iblock << inode->i_blkbits;
6736 	u64 lockstart, lockend;
6737 	u64 len = bh_result->b_size;
6738 	struct btrfs_trans_handle *trans;
6739 	int unlock_bits = EXTENT_LOCKED;
6740 	int ret = 0;
6741 
6742 	if (create)
6743 		unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
6744 	else
6745 		len = min_t(u64, len, root->sectorsize);
6746 
6747 	lockstart = start;
6748 	lockend = start + len - 1;
6749 
6750 	/*
6751 	 * If this errors out it's because we couldn't invalidate pagecache for
6752 	 * this range and we need to fallback to buffered.
6753 	 */
6754 	if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
6755 		return -ENOTBLK;
6756 
6757 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
6758 	if (IS_ERR(em)) {
6759 		ret = PTR_ERR(em);
6760 		goto unlock_err;
6761 	}
6762 
6763 	/*
6764 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
6765 	 * io.  INLINE is special, and we could probably kludge it in here, but
6766 	 * it's still buffered so for safety lets just fall back to the generic
6767 	 * buffered path.
6768 	 *
6769 	 * For COMPRESSED we _have_ to read the entire extent in so we can
6770 	 * decompress it, so there will be buffering required no matter what we
6771 	 * do, so go ahead and fallback to buffered.
6772 	 *
6773 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
6774 	 * to buffered IO.  Don't blame me, this is the price we pay for using
6775 	 * the generic code.
6776 	 */
6777 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
6778 	    em->block_start == EXTENT_MAP_INLINE) {
6779 		free_extent_map(em);
6780 		ret = -ENOTBLK;
6781 		goto unlock_err;
6782 	}
6783 
6784 	/* Just a good old fashioned hole, return */
6785 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
6786 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
6787 		free_extent_map(em);
6788 		goto unlock_err;
6789 	}
6790 
6791 	/*
6792 	 * We don't allocate a new extent in the following cases
6793 	 *
6794 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
6795 	 * existing extent.
6796 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
6797 	 * just use the extent.
6798 	 *
6799 	 */
6800 	if (!create) {
6801 		len = min(len, em->len - (start - em->start));
6802 		lockstart = start + len;
6803 		goto unlock;
6804 	}
6805 
6806 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
6807 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
6808 	     em->block_start != EXTENT_MAP_HOLE)) {
6809 		int type;
6810 		int ret;
6811 		u64 block_start, orig_start, orig_block_len, ram_bytes;
6812 
6813 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6814 			type = BTRFS_ORDERED_PREALLOC;
6815 		else
6816 			type = BTRFS_ORDERED_NOCOW;
6817 		len = min(len, em->len - (start - em->start));
6818 		block_start = em->block_start + (start - em->start);
6819 
6820 		/*
6821 		 * we're not going to log anything, but we do need
6822 		 * to make sure the current transaction stays open
6823 		 * while we look for nocow cross refs
6824 		 */
6825 		trans = btrfs_join_transaction(root);
6826 		if (IS_ERR(trans))
6827 			goto must_cow;
6828 
6829 		if (can_nocow_odirect(trans, inode, start, &len, &orig_start,
6830 				      &orig_block_len, &ram_bytes) == 1) {
6831 			if (type == BTRFS_ORDERED_PREALLOC) {
6832 				free_extent_map(em);
6833 				em = create_pinned_em(inode, start, len,
6834 						       orig_start,
6835 						       block_start, len,
6836 						       orig_block_len,
6837 						       ram_bytes, type);
6838 				if (IS_ERR(em)) {
6839 					btrfs_end_transaction(trans, root);
6840 					goto unlock_err;
6841 				}
6842 			}
6843 
6844 			ret = btrfs_add_ordered_extent_dio(inode, start,
6845 					   block_start, len, len, type);
6846 			btrfs_end_transaction(trans, root);
6847 			if (ret) {
6848 				free_extent_map(em);
6849 				goto unlock_err;
6850 			}
6851 			goto unlock;
6852 		}
6853 		btrfs_end_transaction(trans, root);
6854 	}
6855 must_cow:
6856 	/*
6857 	 * this will cow the extent, reset the len in case we changed
6858 	 * it above
6859 	 */
6860 	len = bh_result->b_size;
6861 	free_extent_map(em);
6862 	em = btrfs_new_extent_direct(inode, start, len);
6863 	if (IS_ERR(em)) {
6864 		ret = PTR_ERR(em);
6865 		goto unlock_err;
6866 	}
6867 	len = min(len, em->len - (start - em->start));
6868 unlock:
6869 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
6870 		inode->i_blkbits;
6871 	bh_result->b_size = len;
6872 	bh_result->b_bdev = em->bdev;
6873 	set_buffer_mapped(bh_result);
6874 	if (create) {
6875 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6876 			set_buffer_new(bh_result);
6877 
6878 		/*
6879 		 * Need to update the i_size under the extent lock so buffered
6880 		 * readers will get the updated i_size when we unlock.
6881 		 */
6882 		if (start + len > i_size_read(inode))
6883 			i_size_write(inode, start + len);
6884 
6885 		spin_lock(&BTRFS_I(inode)->lock);
6886 		BTRFS_I(inode)->outstanding_extents++;
6887 		spin_unlock(&BTRFS_I(inode)->lock);
6888 
6889 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6890 				     lockstart + len - 1, EXTENT_DELALLOC, NULL,
6891 				     &cached_state, GFP_NOFS);
6892 		BUG_ON(ret);
6893 	}
6894 
6895 	/*
6896 	 * In the case of write we need to clear and unlock the entire range,
6897 	 * in the case of read we need to unlock only the end area that we
6898 	 * aren't using if there is any left over space.
6899 	 */
6900 	if (lockstart < lockend) {
6901 		clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6902 				 lockend, unlock_bits, 1, 0,
6903 				 &cached_state, GFP_NOFS);
6904 	} else {
6905 		free_extent_state(cached_state);
6906 	}
6907 
6908 	free_extent_map(em);
6909 
6910 	return 0;
6911 
6912 unlock_err:
6913 	clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6914 			 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
6915 	return ret;
6916 }
6917 
6918 struct btrfs_dio_private {
6919 	struct inode *inode;
6920 	u64 logical_offset;
6921 	u64 disk_bytenr;
6922 	u64 bytes;
6923 	void *private;
6924 
6925 	/* number of bios pending for this dio */
6926 	atomic_t pending_bios;
6927 
6928 	/* IO errors */
6929 	int errors;
6930 
6931 	struct bio *orig_bio;
6932 };
6933 
6934 static void btrfs_endio_direct_read(struct bio *bio, int err)
6935 {
6936 	struct btrfs_dio_private *dip = bio->bi_private;
6937 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
6938 	struct bio_vec *bvec = bio->bi_io_vec;
6939 	struct inode *inode = dip->inode;
6940 	struct btrfs_root *root = BTRFS_I(inode)->root;
6941 	u64 start;
6942 
6943 	start = dip->logical_offset;
6944 	do {
6945 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6946 			struct page *page = bvec->bv_page;
6947 			char *kaddr;
6948 			u32 csum = ~(u32)0;
6949 			u64 private = ~(u32)0;
6950 			unsigned long flags;
6951 
6952 			if (get_state_private(&BTRFS_I(inode)->io_tree,
6953 					      start, &private))
6954 				goto failed;
6955 			local_irq_save(flags);
6956 			kaddr = kmap_atomic(page);
6957 			csum = btrfs_csum_data(kaddr + bvec->bv_offset,
6958 					       csum, bvec->bv_len);
6959 			btrfs_csum_final(csum, (char *)&csum);
6960 			kunmap_atomic(kaddr);
6961 			local_irq_restore(flags);
6962 
6963 			flush_dcache_page(bvec->bv_page);
6964 			if (csum != private) {
6965 failed:
6966 				btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u private %u",
6967 					(unsigned long long)btrfs_ino(inode),
6968 					(unsigned long long)start,
6969 					csum, (unsigned)private);
6970 				err = -EIO;
6971 			}
6972 		}
6973 
6974 		start += bvec->bv_len;
6975 		bvec++;
6976 	} while (bvec <= bvec_end);
6977 
6978 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
6979 		      dip->logical_offset + dip->bytes - 1);
6980 	bio->bi_private = dip->private;
6981 
6982 	kfree(dip);
6983 
6984 	/* If we had a csum failure make sure to clear the uptodate flag */
6985 	if (err)
6986 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
6987 	dio_end_io(bio, err);
6988 }
6989 
6990 static void btrfs_endio_direct_write(struct bio *bio, int err)
6991 {
6992 	struct btrfs_dio_private *dip = bio->bi_private;
6993 	struct inode *inode = dip->inode;
6994 	struct btrfs_root *root = BTRFS_I(inode)->root;
6995 	struct btrfs_ordered_extent *ordered = NULL;
6996 	u64 ordered_offset = dip->logical_offset;
6997 	u64 ordered_bytes = dip->bytes;
6998 	int ret;
6999 
7000 	if (err)
7001 		goto out_done;
7002 again:
7003 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
7004 						   &ordered_offset,
7005 						   ordered_bytes, !err);
7006 	if (!ret)
7007 		goto out_test;
7008 
7009 	ordered->work.func = finish_ordered_fn;
7010 	ordered->work.flags = 0;
7011 	btrfs_queue_worker(&root->fs_info->endio_write_workers,
7012 			   &ordered->work);
7013 out_test:
7014 	/*
7015 	 * our bio might span multiple ordered extents.  If we haven't
7016 	 * completed the accounting for the whole dio, go back and try again
7017 	 */
7018 	if (ordered_offset < dip->logical_offset + dip->bytes) {
7019 		ordered_bytes = dip->logical_offset + dip->bytes -
7020 			ordered_offset;
7021 		ordered = NULL;
7022 		goto again;
7023 	}
7024 out_done:
7025 	bio->bi_private = dip->private;
7026 
7027 	kfree(dip);
7028 
7029 	/* If we had an error make sure to clear the uptodate flag */
7030 	if (err)
7031 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
7032 	dio_end_io(bio, err);
7033 }
7034 
7035 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
7036 				    struct bio *bio, int mirror_num,
7037 				    unsigned long bio_flags, u64 offset)
7038 {
7039 	int ret;
7040 	struct btrfs_root *root = BTRFS_I(inode)->root;
7041 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
7042 	BUG_ON(ret); /* -ENOMEM */
7043 	return 0;
7044 }
7045 
7046 static void btrfs_end_dio_bio(struct bio *bio, int err)
7047 {
7048 	struct btrfs_dio_private *dip = bio->bi_private;
7049 
7050 	if (err) {
7051 		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
7052 		      "sector %#Lx len %u err no %d\n",
7053 		      (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
7054 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
7055 		dip->errors = 1;
7056 
7057 		/*
7058 		 * before atomic variable goto zero, we must make sure
7059 		 * dip->errors is perceived to be set.
7060 		 */
7061 		smp_mb__before_atomic_dec();
7062 	}
7063 
7064 	/* if there are more bios still pending for this dio, just exit */
7065 	if (!atomic_dec_and_test(&dip->pending_bios))
7066 		goto out;
7067 
7068 	if (dip->errors)
7069 		bio_io_error(dip->orig_bio);
7070 	else {
7071 		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
7072 		bio_endio(dip->orig_bio, 0);
7073 	}
7074 out:
7075 	bio_put(bio);
7076 }
7077 
7078 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
7079 				       u64 first_sector, gfp_t gfp_flags)
7080 {
7081 	int nr_vecs = bio_get_nr_vecs(bdev);
7082 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
7083 }
7084 
7085 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
7086 					 int rw, u64 file_offset, int skip_sum,
7087 					 int async_submit)
7088 {
7089 	int write = rw & REQ_WRITE;
7090 	struct btrfs_root *root = BTRFS_I(inode)->root;
7091 	int ret;
7092 
7093 	if (async_submit)
7094 		async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
7095 
7096 	bio_get(bio);
7097 
7098 	if (!write) {
7099 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
7100 		if (ret)
7101 			goto err;
7102 	}
7103 
7104 	if (skip_sum)
7105 		goto map;
7106 
7107 	if (write && async_submit) {
7108 		ret = btrfs_wq_submit_bio(root->fs_info,
7109 				   inode, rw, bio, 0, 0,
7110 				   file_offset,
7111 				   __btrfs_submit_bio_start_direct_io,
7112 				   __btrfs_submit_bio_done);
7113 		goto err;
7114 	} else if (write) {
7115 		/*
7116 		 * If we aren't doing async submit, calculate the csum of the
7117 		 * bio now.
7118 		 */
7119 		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
7120 		if (ret)
7121 			goto err;
7122 	} else if (!skip_sum) {
7123 		ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
7124 		if (ret)
7125 			goto err;
7126 	}
7127 
7128 map:
7129 	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
7130 err:
7131 	bio_put(bio);
7132 	return ret;
7133 }
7134 
7135 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7136 				    int skip_sum)
7137 {
7138 	struct inode *inode = dip->inode;
7139 	struct btrfs_root *root = BTRFS_I(inode)->root;
7140 	struct bio *bio;
7141 	struct bio *orig_bio = dip->orig_bio;
7142 	struct bio_vec *bvec = orig_bio->bi_io_vec;
7143 	u64 start_sector = orig_bio->bi_sector;
7144 	u64 file_offset = dip->logical_offset;
7145 	u64 submit_len = 0;
7146 	u64 map_length;
7147 	int nr_pages = 0;
7148 	int ret = 0;
7149 	int async_submit = 0;
7150 
7151 	map_length = orig_bio->bi_size;
7152 	ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
7153 			      &map_length, NULL, 0);
7154 	if (ret) {
7155 		bio_put(orig_bio);
7156 		return -EIO;
7157 	}
7158 	if (map_length >= orig_bio->bi_size) {
7159 		bio = orig_bio;
7160 		goto submit;
7161 	}
7162 
7163 	/* async crcs make it difficult to collect full stripe writes. */
7164 	if (btrfs_get_alloc_profile(root, 1) &
7165 	    (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
7166 		async_submit = 0;
7167 	else
7168 		async_submit = 1;
7169 
7170 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
7171 	if (!bio)
7172 		return -ENOMEM;
7173 	bio->bi_private = dip;
7174 	bio->bi_end_io = btrfs_end_dio_bio;
7175 	atomic_inc(&dip->pending_bios);
7176 
7177 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
7178 		if (unlikely(map_length < submit_len + bvec->bv_len ||
7179 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
7180 				 bvec->bv_offset) < bvec->bv_len)) {
7181 			/*
7182 			 * inc the count before we submit the bio so
7183 			 * we know the end IO handler won't happen before
7184 			 * we inc the count. Otherwise, the dip might get freed
7185 			 * before we're done setting it up
7186 			 */
7187 			atomic_inc(&dip->pending_bios);
7188 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
7189 						     file_offset, skip_sum,
7190 						     async_submit);
7191 			if (ret) {
7192 				bio_put(bio);
7193 				atomic_dec(&dip->pending_bios);
7194 				goto out_err;
7195 			}
7196 
7197 			start_sector += submit_len >> 9;
7198 			file_offset += submit_len;
7199 
7200 			submit_len = 0;
7201 			nr_pages = 0;
7202 
7203 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
7204 						  start_sector, GFP_NOFS);
7205 			if (!bio)
7206 				goto out_err;
7207 			bio->bi_private = dip;
7208 			bio->bi_end_io = btrfs_end_dio_bio;
7209 
7210 			map_length = orig_bio->bi_size;
7211 			ret = btrfs_map_block(root->fs_info, rw,
7212 					      start_sector << 9,
7213 					      &map_length, NULL, 0);
7214 			if (ret) {
7215 				bio_put(bio);
7216 				goto out_err;
7217 			}
7218 		} else {
7219 			submit_len += bvec->bv_len;
7220 			nr_pages ++;
7221 			bvec++;
7222 		}
7223 	}
7224 
7225 submit:
7226 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
7227 				     async_submit);
7228 	if (!ret)
7229 		return 0;
7230 
7231 	bio_put(bio);
7232 out_err:
7233 	dip->errors = 1;
7234 	/*
7235 	 * before atomic variable goto zero, we must
7236 	 * make sure dip->errors is perceived to be set.
7237 	 */
7238 	smp_mb__before_atomic_dec();
7239 	if (atomic_dec_and_test(&dip->pending_bios))
7240 		bio_io_error(dip->orig_bio);
7241 
7242 	/* bio_end_io() will handle error, so we needn't return it */
7243 	return 0;
7244 }
7245 
7246 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
7247 				loff_t file_offset)
7248 {
7249 	struct btrfs_root *root = BTRFS_I(inode)->root;
7250 	struct btrfs_dio_private *dip;
7251 	struct bio_vec *bvec = bio->bi_io_vec;
7252 	int skip_sum;
7253 	int write = rw & REQ_WRITE;
7254 	int ret = 0;
7255 
7256 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7257 
7258 	dip = kmalloc(sizeof(*dip), GFP_NOFS);
7259 	if (!dip) {
7260 		ret = -ENOMEM;
7261 		goto free_ordered;
7262 	}
7263 
7264 	dip->private = bio->bi_private;
7265 	dip->inode = inode;
7266 	dip->logical_offset = file_offset;
7267 
7268 	dip->bytes = 0;
7269 	do {
7270 		dip->bytes += bvec->bv_len;
7271 		bvec++;
7272 	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
7273 
7274 	dip->disk_bytenr = (u64)bio->bi_sector << 9;
7275 	bio->bi_private = dip;
7276 	dip->errors = 0;
7277 	dip->orig_bio = bio;
7278 	atomic_set(&dip->pending_bios, 0);
7279 
7280 	if (write)
7281 		bio->bi_end_io = btrfs_endio_direct_write;
7282 	else
7283 		bio->bi_end_io = btrfs_endio_direct_read;
7284 
7285 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
7286 	if (!ret)
7287 		return;
7288 free_ordered:
7289 	/*
7290 	 * If this is a write, we need to clean up the reserved space and kill
7291 	 * the ordered extent.
7292 	 */
7293 	if (write) {
7294 		struct btrfs_ordered_extent *ordered;
7295 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
7296 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
7297 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
7298 			btrfs_free_reserved_extent(root, ordered->start,
7299 						   ordered->disk_len);
7300 		btrfs_put_ordered_extent(ordered);
7301 		btrfs_put_ordered_extent(ordered);
7302 	}
7303 	bio_endio(bio, ret);
7304 }
7305 
7306 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
7307 			const struct iovec *iov, loff_t offset,
7308 			unsigned long nr_segs)
7309 {
7310 	int seg;
7311 	int i;
7312 	size_t size;
7313 	unsigned long addr;
7314 	unsigned blocksize_mask = root->sectorsize - 1;
7315 	ssize_t retval = -EINVAL;
7316 	loff_t end = offset;
7317 
7318 	if (offset & blocksize_mask)
7319 		goto out;
7320 
7321 	/* Check the memory alignment.  Blocks cannot straddle pages */
7322 	for (seg = 0; seg < nr_segs; seg++) {
7323 		addr = (unsigned long)iov[seg].iov_base;
7324 		size = iov[seg].iov_len;
7325 		end += size;
7326 		if ((addr & blocksize_mask) || (size & blocksize_mask))
7327 			goto out;
7328 
7329 		/* If this is a write we don't need to check anymore */
7330 		if (rw & WRITE)
7331 			continue;
7332 
7333 		/*
7334 		 * Check to make sure we don't have duplicate iov_base's in this
7335 		 * iovec, if so return EINVAL, otherwise we'll get csum errors
7336 		 * when reading back.
7337 		 */
7338 		for (i = seg + 1; i < nr_segs; i++) {
7339 			if (iov[seg].iov_base == iov[i].iov_base)
7340 				goto out;
7341 		}
7342 	}
7343 	retval = 0;
7344 out:
7345 	return retval;
7346 }
7347 
7348 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
7349 			const struct iovec *iov, loff_t offset,
7350 			unsigned long nr_segs)
7351 {
7352 	struct file *file = iocb->ki_filp;
7353 	struct inode *inode = file->f_mapping->host;
7354 	size_t count = 0;
7355 	int flags = 0;
7356 	bool wakeup = true;
7357 	bool relock = false;
7358 	ssize_t ret;
7359 
7360 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
7361 			    offset, nr_segs))
7362 		return 0;
7363 
7364 	atomic_inc(&inode->i_dio_count);
7365 	smp_mb__after_atomic_inc();
7366 
7367 	if (rw & WRITE) {
7368 		count = iov_length(iov, nr_segs);
7369 		/*
7370 		 * If the write DIO is beyond the EOF, we need update
7371 		 * the isize, but it is protected by i_mutex. So we can
7372 		 * not unlock the i_mutex at this case.
7373 		 */
7374 		if (offset + count <= inode->i_size) {
7375 			mutex_unlock(&inode->i_mutex);
7376 			relock = true;
7377 		}
7378 		ret = btrfs_delalloc_reserve_space(inode, count);
7379 		if (ret)
7380 			goto out;
7381 	} else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
7382 				     &BTRFS_I(inode)->runtime_flags))) {
7383 		inode_dio_done(inode);
7384 		flags = DIO_LOCKING | DIO_SKIP_HOLES;
7385 		wakeup = false;
7386 	}
7387 
7388 	ret = __blockdev_direct_IO(rw, iocb, inode,
7389 			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
7390 			iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
7391 			btrfs_submit_direct, flags);
7392 	if (rw & WRITE) {
7393 		if (ret < 0 && ret != -EIOCBQUEUED)
7394 			btrfs_delalloc_release_space(inode, count);
7395 		else if (ret >= 0 && (size_t)ret < count)
7396 			btrfs_delalloc_release_space(inode,
7397 						     count - (size_t)ret);
7398 		else
7399 			btrfs_delalloc_release_metadata(inode, 0);
7400 	}
7401 out:
7402 	if (wakeup)
7403 		inode_dio_done(inode);
7404 	if (relock)
7405 		mutex_lock(&inode->i_mutex);
7406 
7407 	return ret;
7408 }
7409 
7410 #define BTRFS_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC)
7411 
7412 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7413 		__u64 start, __u64 len)
7414 {
7415 	int	ret;
7416 
7417 	ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
7418 	if (ret)
7419 		return ret;
7420 
7421 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
7422 }
7423 
7424 int btrfs_readpage(struct file *file, struct page *page)
7425 {
7426 	struct extent_io_tree *tree;
7427 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7428 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
7429 }
7430 
7431 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
7432 {
7433 	struct extent_io_tree *tree;
7434 
7435 
7436 	if (current->flags & PF_MEMALLOC) {
7437 		redirty_page_for_writepage(wbc, page);
7438 		unlock_page(page);
7439 		return 0;
7440 	}
7441 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7442 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
7443 }
7444 
7445 static int btrfs_writepages(struct address_space *mapping,
7446 			    struct writeback_control *wbc)
7447 {
7448 	struct extent_io_tree *tree;
7449 
7450 	tree = &BTRFS_I(mapping->host)->io_tree;
7451 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
7452 }
7453 
7454 static int
7455 btrfs_readpages(struct file *file, struct address_space *mapping,
7456 		struct list_head *pages, unsigned nr_pages)
7457 {
7458 	struct extent_io_tree *tree;
7459 	tree = &BTRFS_I(mapping->host)->io_tree;
7460 	return extent_readpages(tree, mapping, pages, nr_pages,
7461 				btrfs_get_extent);
7462 }
7463 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7464 {
7465 	struct extent_io_tree *tree;
7466 	struct extent_map_tree *map;
7467 	int ret;
7468 
7469 	tree = &BTRFS_I(page->mapping->host)->io_tree;
7470 	map = &BTRFS_I(page->mapping->host)->extent_tree;
7471 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
7472 	if (ret == 1) {
7473 		ClearPagePrivate(page);
7474 		set_page_private(page, 0);
7475 		page_cache_release(page);
7476 	}
7477 	return ret;
7478 }
7479 
7480 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
7481 {
7482 	if (PageWriteback(page) || PageDirty(page))
7483 		return 0;
7484 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
7485 }
7486 
7487 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
7488 {
7489 	struct inode *inode = page->mapping->host;
7490 	struct extent_io_tree *tree;
7491 	struct btrfs_ordered_extent *ordered;
7492 	struct extent_state *cached_state = NULL;
7493 	u64 page_start = page_offset(page);
7494 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
7495 
7496 	/*
7497 	 * we have the page locked, so new writeback can't start,
7498 	 * and the dirty bit won't be cleared while we are here.
7499 	 *
7500 	 * Wait for IO on this page so that we can safely clear
7501 	 * the PagePrivate2 bit and do ordered accounting
7502 	 */
7503 	wait_on_page_writeback(page);
7504 
7505 	tree = &BTRFS_I(inode)->io_tree;
7506 	if (offset) {
7507 		btrfs_releasepage(page, GFP_NOFS);
7508 		return;
7509 	}
7510 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
7511 	ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
7512 	if (ordered) {
7513 		/*
7514 		 * IO on this page will never be started, so we need
7515 		 * to account for any ordered extents now
7516 		 */
7517 		clear_extent_bit(tree, page_start, page_end,
7518 				 EXTENT_DIRTY | EXTENT_DELALLOC |
7519 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7520 				 EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
7521 		/*
7522 		 * whoever cleared the private bit is responsible
7523 		 * for the finish_ordered_io
7524 		 */
7525 		if (TestClearPagePrivate2(page) &&
7526 		    btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
7527 						   PAGE_CACHE_SIZE, 1)) {
7528 			btrfs_finish_ordered_io(ordered);
7529 		}
7530 		btrfs_put_ordered_extent(ordered);
7531 		cached_state = NULL;
7532 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
7533 	}
7534 	clear_extent_bit(tree, page_start, page_end,
7535 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
7536 		 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
7537 		 &cached_state, GFP_NOFS);
7538 	__btrfs_releasepage(page, GFP_NOFS);
7539 
7540 	ClearPageChecked(page);
7541 	if (PagePrivate(page)) {
7542 		ClearPagePrivate(page);
7543 		set_page_private(page, 0);
7544 		page_cache_release(page);
7545 	}
7546 }
7547 
7548 /*
7549  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
7550  * called from a page fault handler when a page is first dirtied. Hence we must
7551  * be careful to check for EOF conditions here. We set the page up correctly
7552  * for a written page which means we get ENOSPC checking when writing into
7553  * holes and correct delalloc and unwritten extent mapping on filesystems that
7554  * support these features.
7555  *
7556  * We are not allowed to take the i_mutex here so we have to play games to
7557  * protect against truncate races as the page could now be beyond EOF.  Because
7558  * vmtruncate() writes the inode size before removing pages, once we have the
7559  * page lock we can determine safely if the page is beyond EOF. If it is not
7560  * beyond EOF, then the page is guaranteed safe against truncation until we
7561  * unlock the page.
7562  */
7563 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
7564 {
7565 	struct page *page = vmf->page;
7566 	struct inode *inode = file_inode(vma->vm_file);
7567 	struct btrfs_root *root = BTRFS_I(inode)->root;
7568 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7569 	struct btrfs_ordered_extent *ordered;
7570 	struct extent_state *cached_state = NULL;
7571 	char *kaddr;
7572 	unsigned long zero_start;
7573 	loff_t size;
7574 	int ret;
7575 	int reserved = 0;
7576 	u64 page_start;
7577 	u64 page_end;
7578 
7579 	sb_start_pagefault(inode->i_sb);
7580 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
7581 	if (!ret) {
7582 		ret = file_update_time(vma->vm_file);
7583 		reserved = 1;
7584 	}
7585 	if (ret) {
7586 		if (ret == -ENOMEM)
7587 			ret = VM_FAULT_OOM;
7588 		else /* -ENOSPC, -EIO, etc */
7589 			ret = VM_FAULT_SIGBUS;
7590 		if (reserved)
7591 			goto out;
7592 		goto out_noreserve;
7593 	}
7594 
7595 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
7596 again:
7597 	lock_page(page);
7598 	size = i_size_read(inode);
7599 	page_start = page_offset(page);
7600 	page_end = page_start + PAGE_CACHE_SIZE - 1;
7601 
7602 	if ((page->mapping != inode->i_mapping) ||
7603 	    (page_start >= size)) {
7604 		/* page got truncated out from underneath us */
7605 		goto out_unlock;
7606 	}
7607 	wait_on_page_writeback(page);
7608 
7609 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
7610 	set_page_extent_mapped(page);
7611 
7612 	/*
7613 	 * we can't set the delalloc bits if there are pending ordered
7614 	 * extents.  Drop our locks and wait for them to finish
7615 	 */
7616 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
7617 	if (ordered) {
7618 		unlock_extent_cached(io_tree, page_start, page_end,
7619 				     &cached_state, GFP_NOFS);
7620 		unlock_page(page);
7621 		btrfs_start_ordered_extent(inode, ordered, 1);
7622 		btrfs_put_ordered_extent(ordered);
7623 		goto again;
7624 	}
7625 
7626 	/*
7627 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
7628 	 * if it was already dirty, so for space accounting reasons we need to
7629 	 * clear any delalloc bits for the range we are fixing to save.  There
7630 	 * is probably a better way to do this, but for now keep consistent with
7631 	 * prepare_pages in the normal write path.
7632 	 */
7633 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
7634 			  EXTENT_DIRTY | EXTENT_DELALLOC |
7635 			  EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
7636 			  0, 0, &cached_state, GFP_NOFS);
7637 
7638 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
7639 					&cached_state);
7640 	if (ret) {
7641 		unlock_extent_cached(io_tree, page_start, page_end,
7642 				     &cached_state, GFP_NOFS);
7643 		ret = VM_FAULT_SIGBUS;
7644 		goto out_unlock;
7645 	}
7646 	ret = 0;
7647 
7648 	/* page is wholly or partially inside EOF */
7649 	if (page_start + PAGE_CACHE_SIZE > size)
7650 		zero_start = size & ~PAGE_CACHE_MASK;
7651 	else
7652 		zero_start = PAGE_CACHE_SIZE;
7653 
7654 	if (zero_start != PAGE_CACHE_SIZE) {
7655 		kaddr = kmap(page);
7656 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
7657 		flush_dcache_page(page);
7658 		kunmap(page);
7659 	}
7660 	ClearPageChecked(page);
7661 	set_page_dirty(page);
7662 	SetPageUptodate(page);
7663 
7664 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
7665 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
7666 	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
7667 
7668 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
7669 
7670 out_unlock:
7671 	if (!ret) {
7672 		sb_end_pagefault(inode->i_sb);
7673 		return VM_FAULT_LOCKED;
7674 	}
7675 	unlock_page(page);
7676 out:
7677 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
7678 out_noreserve:
7679 	sb_end_pagefault(inode->i_sb);
7680 	return ret;
7681 }
7682 
7683 static int btrfs_truncate(struct inode *inode)
7684 {
7685 	struct btrfs_root *root = BTRFS_I(inode)->root;
7686 	struct btrfs_block_rsv *rsv;
7687 	int ret;
7688 	int err = 0;
7689 	struct btrfs_trans_handle *trans;
7690 	u64 mask = root->sectorsize - 1;
7691 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
7692 
7693 	ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
7694 	if (ret)
7695 		return ret;
7696 
7697 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
7698 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
7699 
7700 	/*
7701 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
7702 	 * 3 things going on here
7703 	 *
7704 	 * 1) We need to reserve space for our orphan item and the space to
7705 	 * delete our orphan item.  Lord knows we don't want to have a dangling
7706 	 * orphan item because we didn't reserve space to remove it.
7707 	 *
7708 	 * 2) We need to reserve space to update our inode.
7709 	 *
7710 	 * 3) We need to have something to cache all the space that is going to
7711 	 * be free'd up by the truncate operation, but also have some slack
7712 	 * space reserved in case it uses space during the truncate (thank you
7713 	 * very much snapshotting).
7714 	 *
7715 	 * And we need these to all be seperate.  The fact is we can use alot of
7716 	 * space doing the truncate, and we have no earthly idea how much space
7717 	 * we will use, so we need the truncate reservation to be seperate so it
7718 	 * doesn't end up using space reserved for updating the inode or
7719 	 * removing the orphan item.  We also need to be able to stop the
7720 	 * transaction and start a new one, which means we need to be able to
7721 	 * update the inode several times, and we have no idea of knowing how
7722 	 * many times that will be, so we can't just reserve 1 item for the
7723 	 * entirety of the opration, so that has to be done seperately as well.
7724 	 * Then there is the orphan item, which does indeed need to be held on
7725 	 * to for the whole operation, and we need nobody to touch this reserved
7726 	 * space except the orphan code.
7727 	 *
7728 	 * So that leaves us with
7729 	 *
7730 	 * 1) root->orphan_block_rsv - for the orphan deletion.
7731 	 * 2) rsv - for the truncate reservation, which we will steal from the
7732 	 * transaction reservation.
7733 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
7734 	 * updating the inode.
7735 	 */
7736 	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
7737 	if (!rsv)
7738 		return -ENOMEM;
7739 	rsv->size = min_size;
7740 	rsv->failfast = 1;
7741 
7742 	/*
7743 	 * 1 for the truncate slack space
7744 	 * 1 for updating the inode.
7745 	 */
7746 	trans = btrfs_start_transaction(root, 2);
7747 	if (IS_ERR(trans)) {
7748 		err = PTR_ERR(trans);
7749 		goto out;
7750 	}
7751 
7752 	/* Migrate the slack space for the truncate to our reserve */
7753 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
7754 				      min_size);
7755 	BUG_ON(ret);
7756 
7757 	/*
7758 	 * setattr is responsible for setting the ordered_data_close flag,
7759 	 * but that is only tested during the last file release.  That
7760 	 * could happen well after the next commit, leaving a great big
7761 	 * window where new writes may get lost if someone chooses to write
7762 	 * to this file after truncating to zero
7763 	 *
7764 	 * The inode doesn't have any dirty data here, and so if we commit
7765 	 * this is a noop.  If someone immediately starts writing to the inode
7766 	 * it is very likely we'll catch some of their writes in this
7767 	 * transaction, and the commit will find this file on the ordered
7768 	 * data list with good things to send down.
7769 	 *
7770 	 * This is a best effort solution, there is still a window where
7771 	 * using truncate to replace the contents of the file will
7772 	 * end up with a zero length file after a crash.
7773 	 */
7774 	if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
7775 					   &BTRFS_I(inode)->runtime_flags))
7776 		btrfs_add_ordered_operation(trans, root, inode);
7777 
7778 	/*
7779 	 * So if we truncate and then write and fsync we normally would just
7780 	 * write the extents that changed, which is a problem if we need to
7781 	 * first truncate that entire inode.  So set this flag so we write out
7782 	 * all of the extents in the inode to the sync log so we're completely
7783 	 * safe.
7784 	 */
7785 	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
7786 	trans->block_rsv = rsv;
7787 
7788 	while (1) {
7789 		ret = btrfs_truncate_inode_items(trans, root, inode,
7790 						 inode->i_size,
7791 						 BTRFS_EXTENT_DATA_KEY);
7792 		if (ret != -ENOSPC) {
7793 			err = ret;
7794 			break;
7795 		}
7796 
7797 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7798 		ret = btrfs_update_inode(trans, root, inode);
7799 		if (ret) {
7800 			err = ret;
7801 			break;
7802 		}
7803 
7804 		btrfs_end_transaction(trans, root);
7805 		btrfs_btree_balance_dirty(root);
7806 
7807 		trans = btrfs_start_transaction(root, 2);
7808 		if (IS_ERR(trans)) {
7809 			ret = err = PTR_ERR(trans);
7810 			trans = NULL;
7811 			break;
7812 		}
7813 
7814 		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
7815 					      rsv, min_size);
7816 		BUG_ON(ret);	/* shouldn't happen */
7817 		trans->block_rsv = rsv;
7818 	}
7819 
7820 	if (ret == 0 && inode->i_nlink > 0) {
7821 		trans->block_rsv = root->orphan_block_rsv;
7822 		ret = btrfs_orphan_del(trans, inode);
7823 		if (ret)
7824 			err = ret;
7825 	}
7826 
7827 	if (trans) {
7828 		trans->block_rsv = &root->fs_info->trans_block_rsv;
7829 		ret = btrfs_update_inode(trans, root, inode);
7830 		if (ret && !err)
7831 			err = ret;
7832 
7833 		ret = btrfs_end_transaction(trans, root);
7834 		btrfs_btree_balance_dirty(root);
7835 	}
7836 
7837 out:
7838 	btrfs_free_block_rsv(root, rsv);
7839 
7840 	if (ret && !err)
7841 		err = ret;
7842 
7843 	return err;
7844 }
7845 
7846 /*
7847  * create a new subvolume directory/inode (helper for the ioctl).
7848  */
7849 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
7850 			     struct btrfs_root *new_root, u64 new_dirid)
7851 {
7852 	struct inode *inode;
7853 	int err;
7854 	u64 index = 0;
7855 
7856 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
7857 				new_dirid, new_dirid,
7858 				S_IFDIR | (~current_umask() & S_IRWXUGO),
7859 				&index);
7860 	if (IS_ERR(inode))
7861 		return PTR_ERR(inode);
7862 	inode->i_op = &btrfs_dir_inode_operations;
7863 	inode->i_fop = &btrfs_dir_file_operations;
7864 
7865 	set_nlink(inode, 1);
7866 	btrfs_i_size_write(inode, 0);
7867 
7868 	err = btrfs_update_inode(trans, new_root, inode);
7869 
7870 	iput(inode);
7871 	return err;
7872 }
7873 
7874 struct inode *btrfs_alloc_inode(struct super_block *sb)
7875 {
7876 	struct btrfs_inode *ei;
7877 	struct inode *inode;
7878 
7879 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
7880 	if (!ei)
7881 		return NULL;
7882 
7883 	ei->root = NULL;
7884 	ei->generation = 0;
7885 	ei->last_trans = 0;
7886 	ei->last_sub_trans = 0;
7887 	ei->logged_trans = 0;
7888 	ei->delalloc_bytes = 0;
7889 	ei->disk_i_size = 0;
7890 	ei->flags = 0;
7891 	ei->csum_bytes = 0;
7892 	ei->index_cnt = (u64)-1;
7893 	ei->last_unlink_trans = 0;
7894 	ei->last_log_commit = 0;
7895 
7896 	spin_lock_init(&ei->lock);
7897 	ei->outstanding_extents = 0;
7898 	ei->reserved_extents = 0;
7899 
7900 	ei->runtime_flags = 0;
7901 	ei->force_compress = BTRFS_COMPRESS_NONE;
7902 
7903 	ei->delayed_node = NULL;
7904 
7905 	inode = &ei->vfs_inode;
7906 	extent_map_tree_init(&ei->extent_tree);
7907 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
7908 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
7909 	ei->io_tree.track_uptodate = 1;
7910 	ei->io_failure_tree.track_uptodate = 1;
7911 	atomic_set(&ei->sync_writers, 0);
7912 	mutex_init(&ei->log_mutex);
7913 	mutex_init(&ei->delalloc_mutex);
7914 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
7915 	INIT_LIST_HEAD(&ei->delalloc_inodes);
7916 	INIT_LIST_HEAD(&ei->ordered_operations);
7917 	RB_CLEAR_NODE(&ei->rb_node);
7918 
7919 	return inode;
7920 }
7921 
7922 static void btrfs_i_callback(struct rcu_head *head)
7923 {
7924 	struct inode *inode = container_of(head, struct inode, i_rcu);
7925 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7926 }
7927 
7928 void btrfs_destroy_inode(struct inode *inode)
7929 {
7930 	struct btrfs_ordered_extent *ordered;
7931 	struct btrfs_root *root = BTRFS_I(inode)->root;
7932 
7933 	WARN_ON(!hlist_empty(&inode->i_dentry));
7934 	WARN_ON(inode->i_data.nrpages);
7935 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
7936 	WARN_ON(BTRFS_I(inode)->reserved_extents);
7937 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
7938 	WARN_ON(BTRFS_I(inode)->csum_bytes);
7939 
7940 	/*
7941 	 * This can happen where we create an inode, but somebody else also
7942 	 * created the same inode and we need to destroy the one we already
7943 	 * created.
7944 	 */
7945 	if (!root)
7946 		goto free;
7947 
7948 	/*
7949 	 * Make sure we're properly removed from the ordered operation
7950 	 * lists.
7951 	 */
7952 	smp_mb();
7953 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
7954 		spin_lock(&root->fs_info->ordered_extent_lock);
7955 		list_del_init(&BTRFS_I(inode)->ordered_operations);
7956 		spin_unlock(&root->fs_info->ordered_extent_lock);
7957 	}
7958 
7959 	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7960 		     &BTRFS_I(inode)->runtime_flags)) {
7961 		btrfs_info(root->fs_info, "inode %llu still on the orphan list",
7962 			(unsigned long long)btrfs_ino(inode));
7963 		atomic_dec(&root->orphan_inodes);
7964 	}
7965 
7966 	while (1) {
7967 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7968 		if (!ordered)
7969 			break;
7970 		else {
7971 			btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
7972 				(unsigned long long)ordered->file_offset,
7973 				(unsigned long long)ordered->len);
7974 			btrfs_remove_ordered_extent(inode, ordered);
7975 			btrfs_put_ordered_extent(ordered);
7976 			btrfs_put_ordered_extent(ordered);
7977 		}
7978 	}
7979 	inode_tree_del(inode);
7980 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7981 free:
7982 	btrfs_remove_delayed_node(inode);
7983 	call_rcu(&inode->i_rcu, btrfs_i_callback);
7984 }
7985 
7986 int btrfs_drop_inode(struct inode *inode)
7987 {
7988 	struct btrfs_root *root = BTRFS_I(inode)->root;
7989 
7990 	/* the snap/subvol tree is on deleting */
7991 	if (btrfs_root_refs(&root->root_item) == 0 &&
7992 	    root != root->fs_info->tree_root)
7993 		return 1;
7994 	else
7995 		return generic_drop_inode(inode);
7996 }
7997 
7998 static void init_once(void *foo)
7999 {
8000 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
8001 
8002 	inode_init_once(&ei->vfs_inode);
8003 }
8004 
8005 void btrfs_destroy_cachep(void)
8006 {
8007 	/*
8008 	 * Make sure all delayed rcu free inodes are flushed before we
8009 	 * destroy cache.
8010 	 */
8011 	rcu_barrier();
8012 	if (btrfs_inode_cachep)
8013 		kmem_cache_destroy(btrfs_inode_cachep);
8014 	if (btrfs_trans_handle_cachep)
8015 		kmem_cache_destroy(btrfs_trans_handle_cachep);
8016 	if (btrfs_transaction_cachep)
8017 		kmem_cache_destroy(btrfs_transaction_cachep);
8018 	if (btrfs_path_cachep)
8019 		kmem_cache_destroy(btrfs_path_cachep);
8020 	if (btrfs_free_space_cachep)
8021 		kmem_cache_destroy(btrfs_free_space_cachep);
8022 	if (btrfs_delalloc_work_cachep)
8023 		kmem_cache_destroy(btrfs_delalloc_work_cachep);
8024 }
8025 
8026 int btrfs_init_cachep(void)
8027 {
8028 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8029 			sizeof(struct btrfs_inode), 0,
8030 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
8031 	if (!btrfs_inode_cachep)
8032 		goto fail;
8033 
8034 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
8035 			sizeof(struct btrfs_trans_handle), 0,
8036 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8037 	if (!btrfs_trans_handle_cachep)
8038 		goto fail;
8039 
8040 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
8041 			sizeof(struct btrfs_transaction), 0,
8042 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8043 	if (!btrfs_transaction_cachep)
8044 		goto fail;
8045 
8046 	btrfs_path_cachep = kmem_cache_create("btrfs_path",
8047 			sizeof(struct btrfs_path), 0,
8048 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8049 	if (!btrfs_path_cachep)
8050 		goto fail;
8051 
8052 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
8053 			sizeof(struct btrfs_free_space), 0,
8054 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
8055 	if (!btrfs_free_space_cachep)
8056 		goto fail;
8057 
8058 	btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
8059 			sizeof(struct btrfs_delalloc_work), 0,
8060 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
8061 			NULL);
8062 	if (!btrfs_delalloc_work_cachep)
8063 		goto fail;
8064 
8065 	return 0;
8066 fail:
8067 	btrfs_destroy_cachep();
8068 	return -ENOMEM;
8069 }
8070 
8071 static int btrfs_getattr(struct vfsmount *mnt,
8072 			 struct dentry *dentry, struct kstat *stat)
8073 {
8074 	u64 delalloc_bytes;
8075 	struct inode *inode = dentry->d_inode;
8076 	u32 blocksize = inode->i_sb->s_blocksize;
8077 
8078 	generic_fillattr(inode, stat);
8079 	stat->dev = BTRFS_I(inode)->root->anon_dev;
8080 	stat->blksize = PAGE_CACHE_SIZE;
8081 
8082 	spin_lock(&BTRFS_I(inode)->lock);
8083 	delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
8084 	spin_unlock(&BTRFS_I(inode)->lock);
8085 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
8086 			ALIGN(delalloc_bytes, blocksize)) >> 9;
8087 	return 0;
8088 }
8089 
8090 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
8091 			   struct inode *new_dir, struct dentry *new_dentry)
8092 {
8093 	struct btrfs_trans_handle *trans;
8094 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8095 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8096 	struct inode *new_inode = new_dentry->d_inode;
8097 	struct inode *old_inode = old_dentry->d_inode;
8098 	struct timespec ctime = CURRENT_TIME;
8099 	u64 index = 0;
8100 	u64 root_objectid;
8101 	int ret;
8102 	u64 old_ino = btrfs_ino(old_inode);
8103 
8104 	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8105 		return -EPERM;
8106 
8107 	/* we only allow rename subvolume link between subvolumes */
8108 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8109 		return -EXDEV;
8110 
8111 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8112 	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
8113 		return -ENOTEMPTY;
8114 
8115 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
8116 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8117 		return -ENOTEMPTY;
8118 
8119 
8120 	/* check for collisions, even if the  name isn't there */
8121 	ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
8122 			     new_dentry->d_name.name,
8123 			     new_dentry->d_name.len);
8124 
8125 	if (ret) {
8126 		if (ret == -EEXIST) {
8127 			/* we shouldn't get
8128 			 * eexist without a new_inode */
8129 			if (!new_inode) {
8130 				WARN_ON(1);
8131 				return ret;
8132 			}
8133 		} else {
8134 			/* maybe -EOVERFLOW */
8135 			return ret;
8136 		}
8137 	}
8138 	ret = 0;
8139 
8140 	/*
8141 	 * we're using rename to replace one file with another.
8142 	 * and the replacement file is large.  Start IO on it now so
8143 	 * we don't add too much work to the end of the transaction
8144 	 */
8145 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
8146 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
8147 		filemap_flush(old_inode->i_mapping);
8148 
8149 	/* close the racy window with snapshot create/destroy ioctl */
8150 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8151 		down_read(&root->fs_info->subvol_sem);
8152 	/*
8153 	 * We want to reserve the absolute worst case amount of items.  So if
8154 	 * both inodes are subvols and we need to unlink them then that would
8155 	 * require 4 item modifications, but if they are both normal inodes it
8156 	 * would require 5 item modifications, so we'll assume their normal
8157 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
8158 	 * should cover the worst case number of items we'll modify.
8159 	 */
8160 	trans = btrfs_start_transaction(root, 11);
8161 	if (IS_ERR(trans)) {
8162                 ret = PTR_ERR(trans);
8163                 goto out_notrans;
8164         }
8165 
8166 	if (dest != root)
8167 		btrfs_record_root_in_trans(trans, dest);
8168 
8169 	ret = btrfs_set_inode_index(new_dir, &index);
8170 	if (ret)
8171 		goto out_fail;
8172 
8173 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8174 		/* force full log commit if subvolume involved. */
8175 		root->fs_info->last_trans_log_full_commit = trans->transid;
8176 	} else {
8177 		ret = btrfs_insert_inode_ref(trans, dest,
8178 					     new_dentry->d_name.name,
8179 					     new_dentry->d_name.len,
8180 					     old_ino,
8181 					     btrfs_ino(new_dir), index);
8182 		if (ret)
8183 			goto out_fail;
8184 		/*
8185 		 * this is an ugly little race, but the rename is required
8186 		 * to make sure that if we crash, the inode is either at the
8187 		 * old name or the new one.  pinning the log transaction lets
8188 		 * us make sure we don't allow a log commit to come in after
8189 		 * we unlink the name but before we add the new name back in.
8190 		 */
8191 		btrfs_pin_log_trans(root);
8192 	}
8193 	/*
8194 	 * make sure the inode gets flushed if it is replacing
8195 	 * something.
8196 	 */
8197 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
8198 		btrfs_add_ordered_operation(trans, root, old_inode);
8199 
8200 	inode_inc_iversion(old_dir);
8201 	inode_inc_iversion(new_dir);
8202 	inode_inc_iversion(old_inode);
8203 	old_dir->i_ctime = old_dir->i_mtime = ctime;
8204 	new_dir->i_ctime = new_dir->i_mtime = ctime;
8205 	old_inode->i_ctime = ctime;
8206 
8207 	if (old_dentry->d_parent != new_dentry->d_parent)
8208 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
8209 
8210 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8211 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
8212 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
8213 					old_dentry->d_name.name,
8214 					old_dentry->d_name.len);
8215 	} else {
8216 		ret = __btrfs_unlink_inode(trans, root, old_dir,
8217 					old_dentry->d_inode,
8218 					old_dentry->d_name.name,
8219 					old_dentry->d_name.len);
8220 		if (!ret)
8221 			ret = btrfs_update_inode(trans, root, old_inode);
8222 	}
8223 	if (ret) {
8224 		btrfs_abort_transaction(trans, root, ret);
8225 		goto out_fail;
8226 	}
8227 
8228 	if (new_inode) {
8229 		inode_inc_iversion(new_inode);
8230 		new_inode->i_ctime = CURRENT_TIME;
8231 		if (unlikely(btrfs_ino(new_inode) ==
8232 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8233 			root_objectid = BTRFS_I(new_inode)->location.objectid;
8234 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
8235 						root_objectid,
8236 						new_dentry->d_name.name,
8237 						new_dentry->d_name.len);
8238 			BUG_ON(new_inode->i_nlink == 0);
8239 		} else {
8240 			ret = btrfs_unlink_inode(trans, dest, new_dir,
8241 						 new_dentry->d_inode,
8242 						 new_dentry->d_name.name,
8243 						 new_dentry->d_name.len);
8244 		}
8245 		if (!ret && new_inode->i_nlink == 0) {
8246 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
8247 			BUG_ON(ret);
8248 		}
8249 		if (ret) {
8250 			btrfs_abort_transaction(trans, root, ret);
8251 			goto out_fail;
8252 		}
8253 	}
8254 
8255 	ret = btrfs_add_link(trans, new_dir, old_inode,
8256 			     new_dentry->d_name.name,
8257 			     new_dentry->d_name.len, 0, index);
8258 	if (ret) {
8259 		btrfs_abort_transaction(trans, root, ret);
8260 		goto out_fail;
8261 	}
8262 
8263 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
8264 		struct dentry *parent = new_dentry->d_parent;
8265 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
8266 		btrfs_end_log_trans(root);
8267 	}
8268 out_fail:
8269 	btrfs_end_transaction(trans, root);
8270 out_notrans:
8271 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8272 		up_read(&root->fs_info->subvol_sem);
8273 
8274 	return ret;
8275 }
8276 
8277 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8278 {
8279 	struct btrfs_delalloc_work *delalloc_work;
8280 
8281 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
8282 				     work);
8283 	if (delalloc_work->wait)
8284 		btrfs_wait_ordered_range(delalloc_work->inode, 0, (u64)-1);
8285 	else
8286 		filemap_flush(delalloc_work->inode->i_mapping);
8287 
8288 	if (delalloc_work->delay_iput)
8289 		btrfs_add_delayed_iput(delalloc_work->inode);
8290 	else
8291 		iput(delalloc_work->inode);
8292 	complete(&delalloc_work->completion);
8293 }
8294 
8295 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
8296 						    int wait, int delay_iput)
8297 {
8298 	struct btrfs_delalloc_work *work;
8299 
8300 	work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
8301 	if (!work)
8302 		return NULL;
8303 
8304 	init_completion(&work->completion);
8305 	INIT_LIST_HEAD(&work->list);
8306 	work->inode = inode;
8307 	work->wait = wait;
8308 	work->delay_iput = delay_iput;
8309 	work->work.func = btrfs_run_delalloc_work;
8310 
8311 	return work;
8312 }
8313 
8314 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
8315 {
8316 	wait_for_completion(&work->completion);
8317 	kmem_cache_free(btrfs_delalloc_work_cachep, work);
8318 }
8319 
8320 /*
8321  * some fairly slow code that needs optimization. This walks the list
8322  * of all the inodes with pending delalloc and forces them to disk.
8323  */
8324 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
8325 {
8326 	struct btrfs_inode *binode;
8327 	struct inode *inode;
8328 	struct btrfs_delalloc_work *work, *next;
8329 	struct list_head works;
8330 	struct list_head splice;
8331 	int ret = 0;
8332 
8333 	if (root->fs_info->sb->s_flags & MS_RDONLY)
8334 		return -EROFS;
8335 
8336 	INIT_LIST_HEAD(&works);
8337 	INIT_LIST_HEAD(&splice);
8338 
8339 	spin_lock(&root->fs_info->delalloc_lock);
8340 	list_splice_init(&root->fs_info->delalloc_inodes, &splice);
8341 	while (!list_empty(&splice)) {
8342 		binode = list_entry(splice.next, struct btrfs_inode,
8343 				    delalloc_inodes);
8344 
8345 		list_del_init(&binode->delalloc_inodes);
8346 
8347 		inode = igrab(&binode->vfs_inode);
8348 		if (!inode) {
8349 			clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
8350 				  &binode->runtime_flags);
8351 			continue;
8352 		}
8353 
8354 		list_add_tail(&binode->delalloc_inodes,
8355 			      &root->fs_info->delalloc_inodes);
8356 		spin_unlock(&root->fs_info->delalloc_lock);
8357 
8358 		work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
8359 		if (unlikely(!work)) {
8360 			ret = -ENOMEM;
8361 			goto out;
8362 		}
8363 		list_add_tail(&work->list, &works);
8364 		btrfs_queue_worker(&root->fs_info->flush_workers,
8365 				   &work->work);
8366 
8367 		cond_resched();
8368 		spin_lock(&root->fs_info->delalloc_lock);
8369 	}
8370 	spin_unlock(&root->fs_info->delalloc_lock);
8371 
8372 	list_for_each_entry_safe(work, next, &works, list) {
8373 		list_del_init(&work->list);
8374 		btrfs_wait_and_free_delalloc_work(work);
8375 	}
8376 
8377 	/* the filemap_flush will queue IO into the worker threads, but
8378 	 * we have to make sure the IO is actually started and that
8379 	 * ordered extents get created before we return
8380 	 */
8381 	atomic_inc(&root->fs_info->async_submit_draining);
8382 	while (atomic_read(&root->fs_info->nr_async_submits) ||
8383 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
8384 		wait_event(root->fs_info->async_submit_wait,
8385 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
8386 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
8387 	}
8388 	atomic_dec(&root->fs_info->async_submit_draining);
8389 	return 0;
8390 out:
8391 	list_for_each_entry_safe(work, next, &works, list) {
8392 		list_del_init(&work->list);
8393 		btrfs_wait_and_free_delalloc_work(work);
8394 	}
8395 
8396 	if (!list_empty_careful(&splice)) {
8397 		spin_lock(&root->fs_info->delalloc_lock);
8398 		list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
8399 		spin_unlock(&root->fs_info->delalloc_lock);
8400 	}
8401 	return ret;
8402 }
8403 
8404 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
8405 			 const char *symname)
8406 {
8407 	struct btrfs_trans_handle *trans;
8408 	struct btrfs_root *root = BTRFS_I(dir)->root;
8409 	struct btrfs_path *path;
8410 	struct btrfs_key key;
8411 	struct inode *inode = NULL;
8412 	int err;
8413 	int drop_inode = 0;
8414 	u64 objectid;
8415 	u64 index = 0 ;
8416 	int name_len;
8417 	int datasize;
8418 	unsigned long ptr;
8419 	struct btrfs_file_extent_item *ei;
8420 	struct extent_buffer *leaf;
8421 
8422 	name_len = strlen(symname) + 1;
8423 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
8424 		return -ENAMETOOLONG;
8425 
8426 	/*
8427 	 * 2 items for inode item and ref
8428 	 * 2 items for dir items
8429 	 * 1 item for xattr if selinux is on
8430 	 */
8431 	trans = btrfs_start_transaction(root, 5);
8432 	if (IS_ERR(trans))
8433 		return PTR_ERR(trans);
8434 
8435 	err = btrfs_find_free_ino(root, &objectid);
8436 	if (err)
8437 		goto out_unlock;
8438 
8439 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
8440 				dentry->d_name.len, btrfs_ino(dir), objectid,
8441 				S_IFLNK|S_IRWXUGO, &index);
8442 	if (IS_ERR(inode)) {
8443 		err = PTR_ERR(inode);
8444 		goto out_unlock;
8445 	}
8446 
8447 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
8448 	if (err) {
8449 		drop_inode = 1;
8450 		goto out_unlock;
8451 	}
8452 
8453 	/*
8454 	* If the active LSM wants to access the inode during
8455 	* d_instantiate it needs these. Smack checks to see
8456 	* if the filesystem supports xattrs by looking at the
8457 	* ops vector.
8458 	*/
8459 	inode->i_fop = &btrfs_file_operations;
8460 	inode->i_op = &btrfs_file_inode_operations;
8461 
8462 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
8463 	if (err)
8464 		drop_inode = 1;
8465 	else {
8466 		inode->i_mapping->a_ops = &btrfs_aops;
8467 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
8468 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
8469 	}
8470 	if (drop_inode)
8471 		goto out_unlock;
8472 
8473 	path = btrfs_alloc_path();
8474 	if (!path) {
8475 		err = -ENOMEM;
8476 		drop_inode = 1;
8477 		goto out_unlock;
8478 	}
8479 	key.objectid = btrfs_ino(inode);
8480 	key.offset = 0;
8481 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
8482 	datasize = btrfs_file_extent_calc_inline_size(name_len);
8483 	err = btrfs_insert_empty_item(trans, root, path, &key,
8484 				      datasize);
8485 	if (err) {
8486 		drop_inode = 1;
8487 		btrfs_free_path(path);
8488 		goto out_unlock;
8489 	}
8490 	leaf = path->nodes[0];
8491 	ei = btrfs_item_ptr(leaf, path->slots[0],
8492 			    struct btrfs_file_extent_item);
8493 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
8494 	btrfs_set_file_extent_type(leaf, ei,
8495 				   BTRFS_FILE_EXTENT_INLINE);
8496 	btrfs_set_file_extent_encryption(leaf, ei, 0);
8497 	btrfs_set_file_extent_compression(leaf, ei, 0);
8498 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
8499 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
8500 
8501 	ptr = btrfs_file_extent_inline_start(ei);
8502 	write_extent_buffer(leaf, symname, ptr, name_len);
8503 	btrfs_mark_buffer_dirty(leaf);
8504 	btrfs_free_path(path);
8505 
8506 	inode->i_op = &btrfs_symlink_inode_operations;
8507 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
8508 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
8509 	inode_set_bytes(inode, name_len);
8510 	btrfs_i_size_write(inode, name_len - 1);
8511 	err = btrfs_update_inode(trans, root, inode);
8512 	if (err)
8513 		drop_inode = 1;
8514 
8515 out_unlock:
8516 	if (!err)
8517 		d_instantiate(dentry, inode);
8518 	btrfs_end_transaction(trans, root);
8519 	if (drop_inode) {
8520 		inode_dec_link_count(inode);
8521 		iput(inode);
8522 	}
8523 	btrfs_btree_balance_dirty(root);
8524 	return err;
8525 }
8526 
8527 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8528 				       u64 start, u64 num_bytes, u64 min_size,
8529 				       loff_t actual_len, u64 *alloc_hint,
8530 				       struct btrfs_trans_handle *trans)
8531 {
8532 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
8533 	struct extent_map *em;
8534 	struct btrfs_root *root = BTRFS_I(inode)->root;
8535 	struct btrfs_key ins;
8536 	u64 cur_offset = start;
8537 	u64 i_size;
8538 	u64 cur_bytes;
8539 	int ret = 0;
8540 	bool own_trans = true;
8541 
8542 	if (trans)
8543 		own_trans = false;
8544 	while (num_bytes > 0) {
8545 		if (own_trans) {
8546 			trans = btrfs_start_transaction(root, 3);
8547 			if (IS_ERR(trans)) {
8548 				ret = PTR_ERR(trans);
8549 				break;
8550 			}
8551 		}
8552 
8553 		cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
8554 		cur_bytes = max(cur_bytes, min_size);
8555 		ret = btrfs_reserve_extent(trans, root, cur_bytes,
8556 					   min_size, 0, *alloc_hint, &ins, 1);
8557 		if (ret) {
8558 			if (own_trans)
8559 				btrfs_end_transaction(trans, root);
8560 			break;
8561 		}
8562 
8563 		ret = insert_reserved_file_extent(trans, inode,
8564 						  cur_offset, ins.objectid,
8565 						  ins.offset, ins.offset,
8566 						  ins.offset, 0, 0, 0,
8567 						  BTRFS_FILE_EXTENT_PREALLOC);
8568 		if (ret) {
8569 			btrfs_abort_transaction(trans, root, ret);
8570 			if (own_trans)
8571 				btrfs_end_transaction(trans, root);
8572 			break;
8573 		}
8574 		btrfs_drop_extent_cache(inode, cur_offset,
8575 					cur_offset + ins.offset -1, 0);
8576 
8577 		em = alloc_extent_map();
8578 		if (!em) {
8579 			set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
8580 				&BTRFS_I(inode)->runtime_flags);
8581 			goto next;
8582 		}
8583 
8584 		em->start = cur_offset;
8585 		em->orig_start = cur_offset;
8586 		em->len = ins.offset;
8587 		em->block_start = ins.objectid;
8588 		em->block_len = ins.offset;
8589 		em->orig_block_len = ins.offset;
8590 		em->ram_bytes = ins.offset;
8591 		em->bdev = root->fs_info->fs_devices->latest_bdev;
8592 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
8593 		em->generation = trans->transid;
8594 
8595 		while (1) {
8596 			write_lock(&em_tree->lock);
8597 			ret = add_extent_mapping(em_tree, em, 1);
8598 			write_unlock(&em_tree->lock);
8599 			if (ret != -EEXIST)
8600 				break;
8601 			btrfs_drop_extent_cache(inode, cur_offset,
8602 						cur_offset + ins.offset - 1,
8603 						0);
8604 		}
8605 		free_extent_map(em);
8606 next:
8607 		num_bytes -= ins.offset;
8608 		cur_offset += ins.offset;
8609 		*alloc_hint = ins.objectid + ins.offset;
8610 
8611 		inode_inc_iversion(inode);
8612 		inode->i_ctime = CURRENT_TIME;
8613 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
8614 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
8615 		    (actual_len > inode->i_size) &&
8616 		    (cur_offset > inode->i_size)) {
8617 			if (cur_offset > actual_len)
8618 				i_size = actual_len;
8619 			else
8620 				i_size = cur_offset;
8621 			i_size_write(inode, i_size);
8622 			btrfs_ordered_update_i_size(inode, i_size, NULL);
8623 		}
8624 
8625 		ret = btrfs_update_inode(trans, root, inode);
8626 
8627 		if (ret) {
8628 			btrfs_abort_transaction(trans, root, ret);
8629 			if (own_trans)
8630 				btrfs_end_transaction(trans, root);
8631 			break;
8632 		}
8633 
8634 		if (own_trans)
8635 			btrfs_end_transaction(trans, root);
8636 	}
8637 	return ret;
8638 }
8639 
8640 int btrfs_prealloc_file_range(struct inode *inode, int mode,
8641 			      u64 start, u64 num_bytes, u64 min_size,
8642 			      loff_t actual_len, u64 *alloc_hint)
8643 {
8644 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8645 					   min_size, actual_len, alloc_hint,
8646 					   NULL);
8647 }
8648 
8649 int btrfs_prealloc_file_range_trans(struct inode *inode,
8650 				    struct btrfs_trans_handle *trans, int mode,
8651 				    u64 start, u64 num_bytes, u64 min_size,
8652 				    loff_t actual_len, u64 *alloc_hint)
8653 {
8654 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8655 					   min_size, actual_len, alloc_hint, trans);
8656 }
8657 
8658 static int btrfs_set_page_dirty(struct page *page)
8659 {
8660 	return __set_page_dirty_nobuffers(page);
8661 }
8662 
8663 static int btrfs_permission(struct inode *inode, int mask)
8664 {
8665 	struct btrfs_root *root = BTRFS_I(inode)->root;
8666 	umode_t mode = inode->i_mode;
8667 
8668 	if (mask & MAY_WRITE &&
8669 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
8670 		if (btrfs_root_readonly(root))
8671 			return -EROFS;
8672 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
8673 			return -EACCES;
8674 	}
8675 	return generic_permission(inode, mask);
8676 }
8677 
8678 static const struct inode_operations btrfs_dir_inode_operations = {
8679 	.getattr	= btrfs_getattr,
8680 	.lookup		= btrfs_lookup,
8681 	.create		= btrfs_create,
8682 	.unlink		= btrfs_unlink,
8683 	.link		= btrfs_link,
8684 	.mkdir		= btrfs_mkdir,
8685 	.rmdir		= btrfs_rmdir,
8686 	.rename		= btrfs_rename,
8687 	.symlink	= btrfs_symlink,
8688 	.setattr	= btrfs_setattr,
8689 	.mknod		= btrfs_mknod,
8690 	.setxattr	= btrfs_setxattr,
8691 	.getxattr	= btrfs_getxattr,
8692 	.listxattr	= btrfs_listxattr,
8693 	.removexattr	= btrfs_removexattr,
8694 	.permission	= btrfs_permission,
8695 	.get_acl	= btrfs_get_acl,
8696 };
8697 static const struct inode_operations btrfs_dir_ro_inode_operations = {
8698 	.lookup		= btrfs_lookup,
8699 	.permission	= btrfs_permission,
8700 	.get_acl	= btrfs_get_acl,
8701 };
8702 
8703 static const struct file_operations btrfs_dir_file_operations = {
8704 	.llseek		= generic_file_llseek,
8705 	.read		= generic_read_dir,
8706 	.readdir	= btrfs_real_readdir,
8707 	.unlocked_ioctl	= btrfs_ioctl,
8708 #ifdef CONFIG_COMPAT
8709 	.compat_ioctl	= btrfs_ioctl,
8710 #endif
8711 	.release        = btrfs_release_file,
8712 	.fsync		= btrfs_sync_file,
8713 };
8714 
8715 static struct extent_io_ops btrfs_extent_io_ops = {
8716 	.fill_delalloc = run_delalloc_range,
8717 	.submit_bio_hook = btrfs_submit_bio_hook,
8718 	.merge_bio_hook = btrfs_merge_bio_hook,
8719 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
8720 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
8721 	.writepage_start_hook = btrfs_writepage_start_hook,
8722 	.set_bit_hook = btrfs_set_bit_hook,
8723 	.clear_bit_hook = btrfs_clear_bit_hook,
8724 	.merge_extent_hook = btrfs_merge_extent_hook,
8725 	.split_extent_hook = btrfs_split_extent_hook,
8726 };
8727 
8728 /*
8729  * btrfs doesn't support the bmap operation because swapfiles
8730  * use bmap to make a mapping of extents in the file.  They assume
8731  * these extents won't change over the life of the file and they
8732  * use the bmap result to do IO directly to the drive.
8733  *
8734  * the btrfs bmap call would return logical addresses that aren't
8735  * suitable for IO and they also will change frequently as COW
8736  * operations happen.  So, swapfile + btrfs == corruption.
8737  *
8738  * For now we're avoiding this by dropping bmap.
8739  */
8740 static const struct address_space_operations btrfs_aops = {
8741 	.readpage	= btrfs_readpage,
8742 	.writepage	= btrfs_writepage,
8743 	.writepages	= btrfs_writepages,
8744 	.readpages	= btrfs_readpages,
8745 	.direct_IO	= btrfs_direct_IO,
8746 	.invalidatepage = btrfs_invalidatepage,
8747 	.releasepage	= btrfs_releasepage,
8748 	.set_page_dirty	= btrfs_set_page_dirty,
8749 	.error_remove_page = generic_error_remove_page,
8750 };
8751 
8752 static const struct address_space_operations btrfs_symlink_aops = {
8753 	.readpage	= btrfs_readpage,
8754 	.writepage	= btrfs_writepage,
8755 	.invalidatepage = btrfs_invalidatepage,
8756 	.releasepage	= btrfs_releasepage,
8757 };
8758 
8759 static const struct inode_operations btrfs_file_inode_operations = {
8760 	.getattr	= btrfs_getattr,
8761 	.setattr	= btrfs_setattr,
8762 	.setxattr	= btrfs_setxattr,
8763 	.getxattr	= btrfs_getxattr,
8764 	.listxattr      = btrfs_listxattr,
8765 	.removexattr	= btrfs_removexattr,
8766 	.permission	= btrfs_permission,
8767 	.fiemap		= btrfs_fiemap,
8768 	.get_acl	= btrfs_get_acl,
8769 	.update_time	= btrfs_update_time,
8770 };
8771 static const struct inode_operations btrfs_special_inode_operations = {
8772 	.getattr	= btrfs_getattr,
8773 	.setattr	= btrfs_setattr,
8774 	.permission	= btrfs_permission,
8775 	.setxattr	= btrfs_setxattr,
8776 	.getxattr	= btrfs_getxattr,
8777 	.listxattr	= btrfs_listxattr,
8778 	.removexattr	= btrfs_removexattr,
8779 	.get_acl	= btrfs_get_acl,
8780 	.update_time	= btrfs_update_time,
8781 };
8782 static const struct inode_operations btrfs_symlink_inode_operations = {
8783 	.readlink	= generic_readlink,
8784 	.follow_link	= page_follow_link_light,
8785 	.put_link	= page_put_link,
8786 	.getattr	= btrfs_getattr,
8787 	.setattr	= btrfs_setattr,
8788 	.permission	= btrfs_permission,
8789 	.setxattr	= btrfs_setxattr,
8790 	.getxattr	= btrfs_getxattr,
8791 	.listxattr	= btrfs_listxattr,
8792 	.removexattr	= btrfs_removexattr,
8793 	.get_acl	= btrfs_get_acl,
8794 	.update_time	= btrfs_update_time,
8795 };
8796 
8797 const struct dentry_operations btrfs_dentry_operations = {
8798 	.d_delete	= btrfs_dentry_delete,
8799 	.d_release	= btrfs_dentry_release,
8800 };
8801