xref: /linux/fs/btrfs/inode.c (revision 800fb3ddee2c50918d651fbd70515f1e38857305)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include "compat.h"
43 #include "ctree.h"
44 #include "disk-io.h"
45 #include "transaction.h"
46 #include "btrfs_inode.h"
47 #include "ioctl.h"
48 #include "print-tree.h"
49 #include "ordered-data.h"
50 #include "xattr.h"
51 #include "tree-log.h"
52 #include "volumes.h"
53 #include "compression.h"
54 #include "locking.h"
55 #include "free-space-cache.h"
56 #include "inode-map.h"
57 
58 struct btrfs_iget_args {
59 	u64 ino;
60 	struct btrfs_root *root;
61 };
62 
63 static const struct inode_operations btrfs_dir_inode_operations;
64 static const struct inode_operations btrfs_symlink_inode_operations;
65 static const struct inode_operations btrfs_dir_ro_inode_operations;
66 static const struct inode_operations btrfs_special_inode_operations;
67 static const struct inode_operations btrfs_file_inode_operations;
68 static const struct address_space_operations btrfs_aops;
69 static const struct address_space_operations btrfs_symlink_aops;
70 static const struct file_operations btrfs_dir_file_operations;
71 static struct extent_io_ops btrfs_extent_io_ops;
72 
73 static struct kmem_cache *btrfs_inode_cachep;
74 struct kmem_cache *btrfs_trans_handle_cachep;
75 struct kmem_cache *btrfs_transaction_cachep;
76 struct kmem_cache *btrfs_path_cachep;
77 struct kmem_cache *btrfs_free_space_cachep;
78 
79 #define S_SHIFT 12
80 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
81 	[S_IFREG >> S_SHIFT]	= BTRFS_FT_REG_FILE,
82 	[S_IFDIR >> S_SHIFT]	= BTRFS_FT_DIR,
83 	[S_IFCHR >> S_SHIFT]	= BTRFS_FT_CHRDEV,
84 	[S_IFBLK >> S_SHIFT]	= BTRFS_FT_BLKDEV,
85 	[S_IFIFO >> S_SHIFT]	= BTRFS_FT_FIFO,
86 	[S_IFSOCK >> S_SHIFT]	= BTRFS_FT_SOCK,
87 	[S_IFLNK >> S_SHIFT]	= BTRFS_FT_SYMLINK,
88 };
89 
90 static int btrfs_setsize(struct inode *inode, loff_t newsize);
91 static int btrfs_truncate(struct inode *inode);
92 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
93 static noinline int cow_file_range(struct inode *inode,
94 				   struct page *locked_page,
95 				   u64 start, u64 end, int *page_started,
96 				   unsigned long *nr_written, int unlock);
97 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
98 				struct btrfs_root *root, struct inode *inode);
99 
100 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
101 				     struct inode *inode,  struct inode *dir,
102 				     const struct qstr *qstr)
103 {
104 	int err;
105 
106 	err = btrfs_init_acl(trans, inode, dir);
107 	if (!err)
108 		err = btrfs_xattr_security_init(trans, inode, dir, qstr);
109 	return err;
110 }
111 
112 /*
113  * this does all the hard work for inserting an inline extent into
114  * the btree.  The caller should have done a btrfs_drop_extents so that
115  * no overlapping inline items exist in the btree
116  */
117 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
118 				struct btrfs_root *root, struct inode *inode,
119 				u64 start, size_t size, size_t compressed_size,
120 				int compress_type,
121 				struct page **compressed_pages)
122 {
123 	struct btrfs_key key;
124 	struct btrfs_path *path;
125 	struct extent_buffer *leaf;
126 	struct page *page = NULL;
127 	char *kaddr;
128 	unsigned long ptr;
129 	struct btrfs_file_extent_item *ei;
130 	int err = 0;
131 	int ret;
132 	size_t cur_size = size;
133 	size_t datasize;
134 	unsigned long offset;
135 
136 	if (compressed_size && compressed_pages)
137 		cur_size = compressed_size;
138 
139 	path = btrfs_alloc_path();
140 	if (!path)
141 		return -ENOMEM;
142 
143 	path->leave_spinning = 1;
144 
145 	key.objectid = btrfs_ino(inode);
146 	key.offset = start;
147 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
148 	datasize = btrfs_file_extent_calc_inline_size(cur_size);
149 
150 	inode_add_bytes(inode, size);
151 	ret = btrfs_insert_empty_item(trans, root, path, &key,
152 				      datasize);
153 	if (ret) {
154 		err = ret;
155 		goto fail;
156 	}
157 	leaf = path->nodes[0];
158 	ei = btrfs_item_ptr(leaf, path->slots[0],
159 			    struct btrfs_file_extent_item);
160 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
161 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
162 	btrfs_set_file_extent_encryption(leaf, ei, 0);
163 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
164 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
165 	ptr = btrfs_file_extent_inline_start(ei);
166 
167 	if (compress_type != BTRFS_COMPRESS_NONE) {
168 		struct page *cpage;
169 		int i = 0;
170 		while (compressed_size > 0) {
171 			cpage = compressed_pages[i];
172 			cur_size = min_t(unsigned long, compressed_size,
173 				       PAGE_CACHE_SIZE);
174 
175 			kaddr = kmap_atomic(cpage);
176 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
177 			kunmap_atomic(kaddr);
178 
179 			i++;
180 			ptr += cur_size;
181 			compressed_size -= cur_size;
182 		}
183 		btrfs_set_file_extent_compression(leaf, ei,
184 						  compress_type);
185 	} else {
186 		page = find_get_page(inode->i_mapping,
187 				     start >> PAGE_CACHE_SHIFT);
188 		btrfs_set_file_extent_compression(leaf, ei, 0);
189 		kaddr = kmap_atomic(page);
190 		offset = start & (PAGE_CACHE_SIZE - 1);
191 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
192 		kunmap_atomic(kaddr);
193 		page_cache_release(page);
194 	}
195 	btrfs_mark_buffer_dirty(leaf);
196 	btrfs_free_path(path);
197 
198 	/*
199 	 * we're an inline extent, so nobody can
200 	 * extend the file past i_size without locking
201 	 * a page we already have locked.
202 	 *
203 	 * We must do any isize and inode updates
204 	 * before we unlock the pages.  Otherwise we
205 	 * could end up racing with unlink.
206 	 */
207 	BTRFS_I(inode)->disk_i_size = inode->i_size;
208 	ret = btrfs_update_inode(trans, root, inode);
209 
210 	return ret;
211 fail:
212 	btrfs_free_path(path);
213 	return err;
214 }
215 
216 
217 /*
218  * conditionally insert an inline extent into the file.  This
219  * does the checks required to make sure the data is small enough
220  * to fit as an inline extent.
221  */
222 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
223 				 struct btrfs_root *root,
224 				 struct inode *inode, u64 start, u64 end,
225 				 size_t compressed_size, int compress_type,
226 				 struct page **compressed_pages)
227 {
228 	u64 isize = i_size_read(inode);
229 	u64 actual_end = min(end + 1, isize);
230 	u64 inline_len = actual_end - start;
231 	u64 aligned_end = (end + root->sectorsize - 1) &
232 			~((u64)root->sectorsize - 1);
233 	u64 hint_byte;
234 	u64 data_len = inline_len;
235 	int ret;
236 
237 	if (compressed_size)
238 		data_len = compressed_size;
239 
240 	if (start > 0 ||
241 	    actual_end >= PAGE_CACHE_SIZE ||
242 	    data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
243 	    (!compressed_size &&
244 	    (actual_end & (root->sectorsize - 1)) == 0) ||
245 	    end + 1 < isize ||
246 	    data_len > root->fs_info->max_inline) {
247 		return 1;
248 	}
249 
250 	ret = btrfs_drop_extents(trans, inode, start, aligned_end,
251 				 &hint_byte, 1);
252 	if (ret)
253 		return ret;
254 
255 	if (isize > actual_end)
256 		inline_len = min_t(u64, isize, actual_end);
257 	ret = insert_inline_extent(trans, root, inode, start,
258 				   inline_len, compressed_size,
259 				   compress_type, compressed_pages);
260 	if (ret && ret != -ENOSPC) {
261 		btrfs_abort_transaction(trans, root, ret);
262 		return ret;
263 	} else if (ret == -ENOSPC) {
264 		return 1;
265 	}
266 
267 	btrfs_delalloc_release_metadata(inode, end + 1 - start);
268 	btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
269 	return 0;
270 }
271 
272 struct async_extent {
273 	u64 start;
274 	u64 ram_size;
275 	u64 compressed_size;
276 	struct page **pages;
277 	unsigned long nr_pages;
278 	int compress_type;
279 	struct list_head list;
280 };
281 
282 struct async_cow {
283 	struct inode *inode;
284 	struct btrfs_root *root;
285 	struct page *locked_page;
286 	u64 start;
287 	u64 end;
288 	struct list_head extents;
289 	struct btrfs_work work;
290 };
291 
292 static noinline int add_async_extent(struct async_cow *cow,
293 				     u64 start, u64 ram_size,
294 				     u64 compressed_size,
295 				     struct page **pages,
296 				     unsigned long nr_pages,
297 				     int compress_type)
298 {
299 	struct async_extent *async_extent;
300 
301 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
302 	BUG_ON(!async_extent); /* -ENOMEM */
303 	async_extent->start = start;
304 	async_extent->ram_size = ram_size;
305 	async_extent->compressed_size = compressed_size;
306 	async_extent->pages = pages;
307 	async_extent->nr_pages = nr_pages;
308 	async_extent->compress_type = compress_type;
309 	list_add_tail(&async_extent->list, &cow->extents);
310 	return 0;
311 }
312 
313 /*
314  * we create compressed extents in two phases.  The first
315  * phase compresses a range of pages that have already been
316  * locked (both pages and state bits are locked).
317  *
318  * This is done inside an ordered work queue, and the compression
319  * is spread across many cpus.  The actual IO submission is step
320  * two, and the ordered work queue takes care of making sure that
321  * happens in the same order things were put onto the queue by
322  * writepages and friends.
323  *
324  * If this code finds it can't get good compression, it puts an
325  * entry onto the work queue to write the uncompressed bytes.  This
326  * makes sure that both compressed inodes and uncompressed inodes
327  * are written in the same order that the flusher thread sent them
328  * down.
329  */
330 static noinline int compress_file_range(struct inode *inode,
331 					struct page *locked_page,
332 					u64 start, u64 end,
333 					struct async_cow *async_cow,
334 					int *num_added)
335 {
336 	struct btrfs_root *root = BTRFS_I(inode)->root;
337 	struct btrfs_trans_handle *trans;
338 	u64 num_bytes;
339 	u64 blocksize = root->sectorsize;
340 	u64 actual_end;
341 	u64 isize = i_size_read(inode);
342 	int ret = 0;
343 	struct page **pages = NULL;
344 	unsigned long nr_pages;
345 	unsigned long nr_pages_ret = 0;
346 	unsigned long total_compressed = 0;
347 	unsigned long total_in = 0;
348 	unsigned long max_compressed = 128 * 1024;
349 	unsigned long max_uncompressed = 128 * 1024;
350 	int i;
351 	int will_compress;
352 	int compress_type = root->fs_info->compress_type;
353 
354 	/* if this is a small write inside eof, kick off a defrag */
355 	if ((end - start + 1) < 16 * 1024 &&
356 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
357 		btrfs_add_inode_defrag(NULL, inode);
358 
359 	actual_end = min_t(u64, isize, end + 1);
360 again:
361 	will_compress = 0;
362 	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
363 	nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
364 
365 	/*
366 	 * we don't want to send crud past the end of i_size through
367 	 * compression, that's just a waste of CPU time.  So, if the
368 	 * end of the file is before the start of our current
369 	 * requested range of bytes, we bail out to the uncompressed
370 	 * cleanup code that can deal with all of this.
371 	 *
372 	 * It isn't really the fastest way to fix things, but this is a
373 	 * very uncommon corner.
374 	 */
375 	if (actual_end <= start)
376 		goto cleanup_and_bail_uncompressed;
377 
378 	total_compressed = actual_end - start;
379 
380 	/* we want to make sure that amount of ram required to uncompress
381 	 * an extent is reasonable, so we limit the total size in ram
382 	 * of a compressed extent to 128k.  This is a crucial number
383 	 * because it also controls how easily we can spread reads across
384 	 * cpus for decompression.
385 	 *
386 	 * We also want to make sure the amount of IO required to do
387 	 * a random read is reasonably small, so we limit the size of
388 	 * a compressed extent to 128k.
389 	 */
390 	total_compressed = min(total_compressed, max_uncompressed);
391 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
392 	num_bytes = max(blocksize,  num_bytes);
393 	total_in = 0;
394 	ret = 0;
395 
396 	/*
397 	 * we do compression for mount -o compress and when the
398 	 * inode has not been flagged as nocompress.  This flag can
399 	 * change at any time if we discover bad compression ratios.
400 	 */
401 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
402 	    (btrfs_test_opt(root, COMPRESS) ||
403 	     (BTRFS_I(inode)->force_compress) ||
404 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
405 		WARN_ON(pages);
406 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
407 		if (!pages) {
408 			/* just bail out to the uncompressed code */
409 			goto cont;
410 		}
411 
412 		if (BTRFS_I(inode)->force_compress)
413 			compress_type = BTRFS_I(inode)->force_compress;
414 
415 		ret = btrfs_compress_pages(compress_type,
416 					   inode->i_mapping, start,
417 					   total_compressed, pages,
418 					   nr_pages, &nr_pages_ret,
419 					   &total_in,
420 					   &total_compressed,
421 					   max_compressed);
422 
423 		if (!ret) {
424 			unsigned long offset = total_compressed &
425 				(PAGE_CACHE_SIZE - 1);
426 			struct page *page = pages[nr_pages_ret - 1];
427 			char *kaddr;
428 
429 			/* zero the tail end of the last page, we might be
430 			 * sending it down to disk
431 			 */
432 			if (offset) {
433 				kaddr = kmap_atomic(page);
434 				memset(kaddr + offset, 0,
435 				       PAGE_CACHE_SIZE - offset);
436 				kunmap_atomic(kaddr);
437 			}
438 			will_compress = 1;
439 		}
440 	}
441 cont:
442 	if (start == 0) {
443 		trans = btrfs_join_transaction(root);
444 		if (IS_ERR(trans)) {
445 			ret = PTR_ERR(trans);
446 			trans = NULL;
447 			goto cleanup_and_out;
448 		}
449 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
450 
451 		/* lets try to make an inline extent */
452 		if (ret || total_in < (actual_end - start)) {
453 			/* we didn't compress the entire range, try
454 			 * to make an uncompressed inline extent.
455 			 */
456 			ret = cow_file_range_inline(trans, root, inode,
457 						    start, end, 0, 0, NULL);
458 		} else {
459 			/* try making a compressed inline extent */
460 			ret = cow_file_range_inline(trans, root, inode,
461 						    start, end,
462 						    total_compressed,
463 						    compress_type, pages);
464 		}
465 		if (ret <= 0) {
466 			/*
467 			 * inline extent creation worked or returned error,
468 			 * we don't need to create any more async work items.
469 			 * Unlock and free up our temp pages.
470 			 */
471 			extent_clear_unlock_delalloc(inode,
472 			     &BTRFS_I(inode)->io_tree,
473 			     start, end, NULL,
474 			     EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
475 			     EXTENT_CLEAR_DELALLOC |
476 			     EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
477 
478 			btrfs_end_transaction(trans, root);
479 			goto free_pages_out;
480 		}
481 		btrfs_end_transaction(trans, root);
482 	}
483 
484 	if (will_compress) {
485 		/*
486 		 * we aren't doing an inline extent round the compressed size
487 		 * up to a block size boundary so the allocator does sane
488 		 * things
489 		 */
490 		total_compressed = (total_compressed + blocksize - 1) &
491 			~(blocksize - 1);
492 
493 		/*
494 		 * one last check to make sure the compression is really a
495 		 * win, compare the page count read with the blocks on disk
496 		 */
497 		total_in = (total_in + PAGE_CACHE_SIZE - 1) &
498 			~(PAGE_CACHE_SIZE - 1);
499 		if (total_compressed >= total_in) {
500 			will_compress = 0;
501 		} else {
502 			num_bytes = total_in;
503 		}
504 	}
505 	if (!will_compress && pages) {
506 		/*
507 		 * the compression code ran but failed to make things smaller,
508 		 * free any pages it allocated and our page pointer array
509 		 */
510 		for (i = 0; i < nr_pages_ret; i++) {
511 			WARN_ON(pages[i]->mapping);
512 			page_cache_release(pages[i]);
513 		}
514 		kfree(pages);
515 		pages = NULL;
516 		total_compressed = 0;
517 		nr_pages_ret = 0;
518 
519 		/* flag the file so we don't compress in the future */
520 		if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
521 		    !(BTRFS_I(inode)->force_compress)) {
522 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
523 		}
524 	}
525 	if (will_compress) {
526 		*num_added += 1;
527 
528 		/* the async work queues will take care of doing actual
529 		 * allocation on disk for these compressed pages,
530 		 * and will submit them to the elevator.
531 		 */
532 		add_async_extent(async_cow, start, num_bytes,
533 				 total_compressed, pages, nr_pages_ret,
534 				 compress_type);
535 
536 		if (start + num_bytes < end) {
537 			start += num_bytes;
538 			pages = NULL;
539 			cond_resched();
540 			goto again;
541 		}
542 	} else {
543 cleanup_and_bail_uncompressed:
544 		/*
545 		 * No compression, but we still need to write the pages in
546 		 * the file we've been given so far.  redirty the locked
547 		 * page if it corresponds to our extent and set things up
548 		 * for the async work queue to run cow_file_range to do
549 		 * the normal delalloc dance
550 		 */
551 		if (page_offset(locked_page) >= start &&
552 		    page_offset(locked_page) <= end) {
553 			__set_page_dirty_nobuffers(locked_page);
554 			/* unlocked later on in the async handlers */
555 		}
556 		add_async_extent(async_cow, start, end - start + 1,
557 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
558 		*num_added += 1;
559 	}
560 
561 out:
562 	return ret;
563 
564 free_pages_out:
565 	for (i = 0; i < nr_pages_ret; i++) {
566 		WARN_ON(pages[i]->mapping);
567 		page_cache_release(pages[i]);
568 	}
569 	kfree(pages);
570 
571 	goto out;
572 
573 cleanup_and_out:
574 	extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
575 				     start, end, NULL,
576 				     EXTENT_CLEAR_UNLOCK_PAGE |
577 				     EXTENT_CLEAR_DIRTY |
578 				     EXTENT_CLEAR_DELALLOC |
579 				     EXTENT_SET_WRITEBACK |
580 				     EXTENT_END_WRITEBACK);
581 	if (!trans || IS_ERR(trans))
582 		btrfs_error(root->fs_info, ret, "Failed to join transaction");
583 	else
584 		btrfs_abort_transaction(trans, root, ret);
585 	goto free_pages_out;
586 }
587 
588 /*
589  * phase two of compressed writeback.  This is the ordered portion
590  * of the code, which only gets called in the order the work was
591  * queued.  We walk all the async extents created by compress_file_range
592  * and send them down to the disk.
593  */
594 static noinline int submit_compressed_extents(struct inode *inode,
595 					      struct async_cow *async_cow)
596 {
597 	struct async_extent *async_extent;
598 	u64 alloc_hint = 0;
599 	struct btrfs_trans_handle *trans;
600 	struct btrfs_key ins;
601 	struct extent_map *em;
602 	struct btrfs_root *root = BTRFS_I(inode)->root;
603 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
604 	struct extent_io_tree *io_tree;
605 	int ret = 0;
606 
607 	if (list_empty(&async_cow->extents))
608 		return 0;
609 
610 
611 	while (!list_empty(&async_cow->extents)) {
612 		async_extent = list_entry(async_cow->extents.next,
613 					  struct async_extent, list);
614 		list_del(&async_extent->list);
615 
616 		io_tree = &BTRFS_I(inode)->io_tree;
617 
618 retry:
619 		/* did the compression code fall back to uncompressed IO? */
620 		if (!async_extent->pages) {
621 			int page_started = 0;
622 			unsigned long nr_written = 0;
623 
624 			lock_extent(io_tree, async_extent->start,
625 					 async_extent->start +
626 					 async_extent->ram_size - 1);
627 
628 			/* allocate blocks */
629 			ret = cow_file_range(inode, async_cow->locked_page,
630 					     async_extent->start,
631 					     async_extent->start +
632 					     async_extent->ram_size - 1,
633 					     &page_started, &nr_written, 0);
634 
635 			/* JDM XXX */
636 
637 			/*
638 			 * if page_started, cow_file_range inserted an
639 			 * inline extent and took care of all the unlocking
640 			 * and IO for us.  Otherwise, we need to submit
641 			 * all those pages down to the drive.
642 			 */
643 			if (!page_started && !ret)
644 				extent_write_locked_range(io_tree,
645 						  inode, async_extent->start,
646 						  async_extent->start +
647 						  async_extent->ram_size - 1,
648 						  btrfs_get_extent,
649 						  WB_SYNC_ALL);
650 			kfree(async_extent);
651 			cond_resched();
652 			continue;
653 		}
654 
655 		lock_extent(io_tree, async_extent->start,
656 			    async_extent->start + async_extent->ram_size - 1);
657 
658 		trans = btrfs_join_transaction(root);
659 		if (IS_ERR(trans)) {
660 			ret = PTR_ERR(trans);
661 		} else {
662 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
663 			ret = btrfs_reserve_extent(trans, root,
664 					   async_extent->compressed_size,
665 					   async_extent->compressed_size,
666 					   0, alloc_hint, &ins, 1);
667 			if (ret)
668 				btrfs_abort_transaction(trans, root, ret);
669 			btrfs_end_transaction(trans, root);
670 		}
671 
672 		if (ret) {
673 			int i;
674 			for (i = 0; i < async_extent->nr_pages; i++) {
675 				WARN_ON(async_extent->pages[i]->mapping);
676 				page_cache_release(async_extent->pages[i]);
677 			}
678 			kfree(async_extent->pages);
679 			async_extent->nr_pages = 0;
680 			async_extent->pages = NULL;
681 			unlock_extent(io_tree, async_extent->start,
682 				      async_extent->start +
683 				      async_extent->ram_size - 1);
684 			if (ret == -ENOSPC)
685 				goto retry;
686 			goto out_free; /* JDM: Requeue? */
687 		}
688 
689 		/*
690 		 * here we're doing allocation and writeback of the
691 		 * compressed pages
692 		 */
693 		btrfs_drop_extent_cache(inode, async_extent->start,
694 					async_extent->start +
695 					async_extent->ram_size - 1, 0);
696 
697 		em = alloc_extent_map();
698 		BUG_ON(!em); /* -ENOMEM */
699 		em->start = async_extent->start;
700 		em->len = async_extent->ram_size;
701 		em->orig_start = em->start;
702 
703 		em->block_start = ins.objectid;
704 		em->block_len = ins.offset;
705 		em->bdev = root->fs_info->fs_devices->latest_bdev;
706 		em->compress_type = async_extent->compress_type;
707 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
708 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
709 
710 		while (1) {
711 			write_lock(&em_tree->lock);
712 			ret = add_extent_mapping(em_tree, em);
713 			write_unlock(&em_tree->lock);
714 			if (ret != -EEXIST) {
715 				free_extent_map(em);
716 				break;
717 			}
718 			btrfs_drop_extent_cache(inode, async_extent->start,
719 						async_extent->start +
720 						async_extent->ram_size - 1, 0);
721 		}
722 
723 		ret = btrfs_add_ordered_extent_compress(inode,
724 						async_extent->start,
725 						ins.objectid,
726 						async_extent->ram_size,
727 						ins.offset,
728 						BTRFS_ORDERED_COMPRESSED,
729 						async_extent->compress_type);
730 		BUG_ON(ret); /* -ENOMEM */
731 
732 		/*
733 		 * clear dirty, set writeback and unlock the pages.
734 		 */
735 		extent_clear_unlock_delalloc(inode,
736 				&BTRFS_I(inode)->io_tree,
737 				async_extent->start,
738 				async_extent->start +
739 				async_extent->ram_size - 1,
740 				NULL, EXTENT_CLEAR_UNLOCK_PAGE |
741 				EXTENT_CLEAR_UNLOCK |
742 				EXTENT_CLEAR_DELALLOC |
743 				EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
744 
745 		ret = btrfs_submit_compressed_write(inode,
746 				    async_extent->start,
747 				    async_extent->ram_size,
748 				    ins.objectid,
749 				    ins.offset, async_extent->pages,
750 				    async_extent->nr_pages);
751 
752 		BUG_ON(ret); /* -ENOMEM */
753 		alloc_hint = ins.objectid + ins.offset;
754 		kfree(async_extent);
755 		cond_resched();
756 	}
757 	ret = 0;
758 out:
759 	return ret;
760 out_free:
761 	kfree(async_extent);
762 	goto out;
763 }
764 
765 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
766 				      u64 num_bytes)
767 {
768 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
769 	struct extent_map *em;
770 	u64 alloc_hint = 0;
771 
772 	read_lock(&em_tree->lock);
773 	em = search_extent_mapping(em_tree, start, num_bytes);
774 	if (em) {
775 		/*
776 		 * if block start isn't an actual block number then find the
777 		 * first block in this inode and use that as a hint.  If that
778 		 * block is also bogus then just don't worry about it.
779 		 */
780 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
781 			free_extent_map(em);
782 			em = search_extent_mapping(em_tree, 0, 0);
783 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
784 				alloc_hint = em->block_start;
785 			if (em)
786 				free_extent_map(em);
787 		} else {
788 			alloc_hint = em->block_start;
789 			free_extent_map(em);
790 		}
791 	}
792 	read_unlock(&em_tree->lock);
793 
794 	return alloc_hint;
795 }
796 
797 /*
798  * when extent_io.c finds a delayed allocation range in the file,
799  * the call backs end up in this code.  The basic idea is to
800  * allocate extents on disk for the range, and create ordered data structs
801  * in ram to track those extents.
802  *
803  * locked_page is the page that writepage had locked already.  We use
804  * it to make sure we don't do extra locks or unlocks.
805  *
806  * *page_started is set to one if we unlock locked_page and do everything
807  * required to start IO on it.  It may be clean and already done with
808  * IO when we return.
809  */
810 static noinline int cow_file_range(struct inode *inode,
811 				   struct page *locked_page,
812 				   u64 start, u64 end, int *page_started,
813 				   unsigned long *nr_written,
814 				   int unlock)
815 {
816 	struct btrfs_root *root = BTRFS_I(inode)->root;
817 	struct btrfs_trans_handle *trans;
818 	u64 alloc_hint = 0;
819 	u64 num_bytes;
820 	unsigned long ram_size;
821 	u64 disk_num_bytes;
822 	u64 cur_alloc_size;
823 	u64 blocksize = root->sectorsize;
824 	struct btrfs_key ins;
825 	struct extent_map *em;
826 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
827 	int ret = 0;
828 
829 	BUG_ON(btrfs_is_free_space_inode(inode));
830 	trans = btrfs_join_transaction(root);
831 	if (IS_ERR(trans)) {
832 		extent_clear_unlock_delalloc(inode,
833 			     &BTRFS_I(inode)->io_tree,
834 			     start, end, locked_page,
835 			     EXTENT_CLEAR_UNLOCK_PAGE |
836 			     EXTENT_CLEAR_UNLOCK |
837 			     EXTENT_CLEAR_DELALLOC |
838 			     EXTENT_CLEAR_DIRTY |
839 			     EXTENT_SET_WRITEBACK |
840 			     EXTENT_END_WRITEBACK);
841 		return PTR_ERR(trans);
842 	}
843 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
844 
845 	num_bytes = (end - start + blocksize) & ~(blocksize - 1);
846 	num_bytes = max(blocksize,  num_bytes);
847 	disk_num_bytes = num_bytes;
848 	ret = 0;
849 
850 	/* if this is a small write inside eof, kick off defrag */
851 	if (num_bytes < 64 * 1024 &&
852 	    (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
853 		btrfs_add_inode_defrag(trans, inode);
854 
855 	if (start == 0) {
856 		/* lets try to make an inline extent */
857 		ret = cow_file_range_inline(trans, root, inode,
858 					    start, end, 0, 0, NULL);
859 		if (ret == 0) {
860 			extent_clear_unlock_delalloc(inode,
861 				     &BTRFS_I(inode)->io_tree,
862 				     start, end, NULL,
863 				     EXTENT_CLEAR_UNLOCK_PAGE |
864 				     EXTENT_CLEAR_UNLOCK |
865 				     EXTENT_CLEAR_DELALLOC |
866 				     EXTENT_CLEAR_DIRTY |
867 				     EXTENT_SET_WRITEBACK |
868 				     EXTENT_END_WRITEBACK);
869 
870 			*nr_written = *nr_written +
871 			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
872 			*page_started = 1;
873 			goto out;
874 		} else if (ret < 0) {
875 			btrfs_abort_transaction(trans, root, ret);
876 			goto out_unlock;
877 		}
878 	}
879 
880 	BUG_ON(disk_num_bytes >
881 	       btrfs_super_total_bytes(root->fs_info->super_copy));
882 
883 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
884 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
885 
886 	while (disk_num_bytes > 0) {
887 		unsigned long op;
888 
889 		cur_alloc_size = disk_num_bytes;
890 		ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
891 					   root->sectorsize, 0, alloc_hint,
892 					   &ins, 1);
893 		if (ret < 0) {
894 			btrfs_abort_transaction(trans, root, ret);
895 			goto out_unlock;
896 		}
897 
898 		em = alloc_extent_map();
899 		BUG_ON(!em); /* -ENOMEM */
900 		em->start = start;
901 		em->orig_start = em->start;
902 		ram_size = ins.offset;
903 		em->len = ins.offset;
904 
905 		em->block_start = ins.objectid;
906 		em->block_len = ins.offset;
907 		em->bdev = root->fs_info->fs_devices->latest_bdev;
908 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
909 
910 		while (1) {
911 			write_lock(&em_tree->lock);
912 			ret = add_extent_mapping(em_tree, em);
913 			write_unlock(&em_tree->lock);
914 			if (ret != -EEXIST) {
915 				free_extent_map(em);
916 				break;
917 			}
918 			btrfs_drop_extent_cache(inode, start,
919 						start + ram_size - 1, 0);
920 		}
921 
922 		cur_alloc_size = ins.offset;
923 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
924 					       ram_size, cur_alloc_size, 0);
925 		BUG_ON(ret); /* -ENOMEM */
926 
927 		if (root->root_key.objectid ==
928 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
929 			ret = btrfs_reloc_clone_csums(inode, start,
930 						      cur_alloc_size);
931 			if (ret) {
932 				btrfs_abort_transaction(trans, root, ret);
933 				goto out_unlock;
934 			}
935 		}
936 
937 		if (disk_num_bytes < cur_alloc_size)
938 			break;
939 
940 		/* we're not doing compressed IO, don't unlock the first
941 		 * page (which the caller expects to stay locked), don't
942 		 * clear any dirty bits and don't set any writeback bits
943 		 *
944 		 * Do set the Private2 bit so we know this page was properly
945 		 * setup for writepage
946 		 */
947 		op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
948 		op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
949 			EXTENT_SET_PRIVATE2;
950 
951 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
952 					     start, start + ram_size - 1,
953 					     locked_page, op);
954 		disk_num_bytes -= cur_alloc_size;
955 		num_bytes -= cur_alloc_size;
956 		alloc_hint = ins.objectid + ins.offset;
957 		start += cur_alloc_size;
958 	}
959 	ret = 0;
960 out:
961 	btrfs_end_transaction(trans, root);
962 
963 	return ret;
964 out_unlock:
965 	extent_clear_unlock_delalloc(inode,
966 		     &BTRFS_I(inode)->io_tree,
967 		     start, end, locked_page,
968 		     EXTENT_CLEAR_UNLOCK_PAGE |
969 		     EXTENT_CLEAR_UNLOCK |
970 		     EXTENT_CLEAR_DELALLOC |
971 		     EXTENT_CLEAR_DIRTY |
972 		     EXTENT_SET_WRITEBACK |
973 		     EXTENT_END_WRITEBACK);
974 
975 	goto out;
976 }
977 
978 /*
979  * work queue call back to started compression on a file and pages
980  */
981 static noinline void async_cow_start(struct btrfs_work *work)
982 {
983 	struct async_cow *async_cow;
984 	int num_added = 0;
985 	async_cow = container_of(work, struct async_cow, work);
986 
987 	compress_file_range(async_cow->inode, async_cow->locked_page,
988 			    async_cow->start, async_cow->end, async_cow,
989 			    &num_added);
990 	if (num_added == 0) {
991 		btrfs_add_delayed_iput(async_cow->inode);
992 		async_cow->inode = NULL;
993 	}
994 }
995 
996 /*
997  * work queue call back to submit previously compressed pages
998  */
999 static noinline void async_cow_submit(struct btrfs_work *work)
1000 {
1001 	struct async_cow *async_cow;
1002 	struct btrfs_root *root;
1003 	unsigned long nr_pages;
1004 
1005 	async_cow = container_of(work, struct async_cow, work);
1006 
1007 	root = async_cow->root;
1008 	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1009 		PAGE_CACHE_SHIFT;
1010 
1011 	atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
1012 
1013 	if (atomic_read(&root->fs_info->async_delalloc_pages) <
1014 	    5 * 1024 * 1024 &&
1015 	    waitqueue_active(&root->fs_info->async_submit_wait))
1016 		wake_up(&root->fs_info->async_submit_wait);
1017 
1018 	if (async_cow->inode)
1019 		submit_compressed_extents(async_cow->inode, async_cow);
1020 }
1021 
1022 static noinline void async_cow_free(struct btrfs_work *work)
1023 {
1024 	struct async_cow *async_cow;
1025 	async_cow = container_of(work, struct async_cow, work);
1026 	if (async_cow->inode)
1027 		btrfs_add_delayed_iput(async_cow->inode);
1028 	kfree(async_cow);
1029 }
1030 
1031 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1032 				u64 start, u64 end, int *page_started,
1033 				unsigned long *nr_written)
1034 {
1035 	struct async_cow *async_cow;
1036 	struct btrfs_root *root = BTRFS_I(inode)->root;
1037 	unsigned long nr_pages;
1038 	u64 cur_end;
1039 	int limit = 10 * 1024 * 1024;
1040 
1041 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1042 			 1, 0, NULL, GFP_NOFS);
1043 	while (start < end) {
1044 		async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1045 		BUG_ON(!async_cow); /* -ENOMEM */
1046 		async_cow->inode = igrab(inode);
1047 		async_cow->root = root;
1048 		async_cow->locked_page = locked_page;
1049 		async_cow->start = start;
1050 
1051 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
1052 			cur_end = end;
1053 		else
1054 			cur_end = min(end, start + 512 * 1024 - 1);
1055 
1056 		async_cow->end = cur_end;
1057 		INIT_LIST_HEAD(&async_cow->extents);
1058 
1059 		async_cow->work.func = async_cow_start;
1060 		async_cow->work.ordered_func = async_cow_submit;
1061 		async_cow->work.ordered_free = async_cow_free;
1062 		async_cow->work.flags = 0;
1063 
1064 		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1065 			PAGE_CACHE_SHIFT;
1066 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1067 
1068 		btrfs_queue_worker(&root->fs_info->delalloc_workers,
1069 				   &async_cow->work);
1070 
1071 		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1072 			wait_event(root->fs_info->async_submit_wait,
1073 			   (atomic_read(&root->fs_info->async_delalloc_pages) <
1074 			    limit));
1075 		}
1076 
1077 		while (atomic_read(&root->fs_info->async_submit_draining) &&
1078 		      atomic_read(&root->fs_info->async_delalloc_pages)) {
1079 			wait_event(root->fs_info->async_submit_wait,
1080 			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
1081 			   0));
1082 		}
1083 
1084 		*nr_written += nr_pages;
1085 		start = cur_end + 1;
1086 	}
1087 	*page_started = 1;
1088 	return 0;
1089 }
1090 
1091 static noinline int csum_exist_in_range(struct btrfs_root *root,
1092 					u64 bytenr, u64 num_bytes)
1093 {
1094 	int ret;
1095 	struct btrfs_ordered_sum *sums;
1096 	LIST_HEAD(list);
1097 
1098 	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1099 				       bytenr + num_bytes - 1, &list, 0);
1100 	if (ret == 0 && list_empty(&list))
1101 		return 0;
1102 
1103 	while (!list_empty(&list)) {
1104 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1105 		list_del(&sums->list);
1106 		kfree(sums);
1107 	}
1108 	return 1;
1109 }
1110 
1111 /*
1112  * when nowcow writeback call back.  This checks for snapshots or COW copies
1113  * of the extents that exist in the file, and COWs the file as required.
1114  *
1115  * If no cow copies or snapshots exist, we write directly to the existing
1116  * blocks on disk
1117  */
1118 static noinline int run_delalloc_nocow(struct inode *inode,
1119 				       struct page *locked_page,
1120 			      u64 start, u64 end, int *page_started, int force,
1121 			      unsigned long *nr_written)
1122 {
1123 	struct btrfs_root *root = BTRFS_I(inode)->root;
1124 	struct btrfs_trans_handle *trans;
1125 	struct extent_buffer *leaf;
1126 	struct btrfs_path *path;
1127 	struct btrfs_file_extent_item *fi;
1128 	struct btrfs_key found_key;
1129 	u64 cow_start;
1130 	u64 cur_offset;
1131 	u64 extent_end;
1132 	u64 extent_offset;
1133 	u64 disk_bytenr;
1134 	u64 num_bytes;
1135 	int extent_type;
1136 	int ret, err;
1137 	int type;
1138 	int nocow;
1139 	int check_prev = 1;
1140 	bool nolock;
1141 	u64 ino = btrfs_ino(inode);
1142 
1143 	path = btrfs_alloc_path();
1144 	if (!path) {
1145 		extent_clear_unlock_delalloc(inode,
1146 			     &BTRFS_I(inode)->io_tree,
1147 			     start, end, locked_page,
1148 			     EXTENT_CLEAR_UNLOCK_PAGE |
1149 			     EXTENT_CLEAR_UNLOCK |
1150 			     EXTENT_CLEAR_DELALLOC |
1151 			     EXTENT_CLEAR_DIRTY |
1152 			     EXTENT_SET_WRITEBACK |
1153 			     EXTENT_END_WRITEBACK);
1154 		return -ENOMEM;
1155 	}
1156 
1157 	nolock = btrfs_is_free_space_inode(inode);
1158 
1159 	if (nolock)
1160 		trans = btrfs_join_transaction_nolock(root);
1161 	else
1162 		trans = btrfs_join_transaction(root);
1163 
1164 	if (IS_ERR(trans)) {
1165 		extent_clear_unlock_delalloc(inode,
1166 			     &BTRFS_I(inode)->io_tree,
1167 			     start, end, locked_page,
1168 			     EXTENT_CLEAR_UNLOCK_PAGE |
1169 			     EXTENT_CLEAR_UNLOCK |
1170 			     EXTENT_CLEAR_DELALLOC |
1171 			     EXTENT_CLEAR_DIRTY |
1172 			     EXTENT_SET_WRITEBACK |
1173 			     EXTENT_END_WRITEBACK);
1174 		btrfs_free_path(path);
1175 		return PTR_ERR(trans);
1176 	}
1177 
1178 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1179 
1180 	cow_start = (u64)-1;
1181 	cur_offset = start;
1182 	while (1) {
1183 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
1184 					       cur_offset, 0);
1185 		if (ret < 0) {
1186 			btrfs_abort_transaction(trans, root, ret);
1187 			goto error;
1188 		}
1189 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
1190 			leaf = path->nodes[0];
1191 			btrfs_item_key_to_cpu(leaf, &found_key,
1192 					      path->slots[0] - 1);
1193 			if (found_key.objectid == ino &&
1194 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
1195 				path->slots[0]--;
1196 		}
1197 		check_prev = 0;
1198 next_slot:
1199 		leaf = path->nodes[0];
1200 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1201 			ret = btrfs_next_leaf(root, path);
1202 			if (ret < 0) {
1203 				btrfs_abort_transaction(trans, root, ret);
1204 				goto error;
1205 			}
1206 			if (ret > 0)
1207 				break;
1208 			leaf = path->nodes[0];
1209 		}
1210 
1211 		nocow = 0;
1212 		disk_bytenr = 0;
1213 		num_bytes = 0;
1214 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1215 
1216 		if (found_key.objectid > ino ||
1217 		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
1218 		    found_key.offset > end)
1219 			break;
1220 
1221 		if (found_key.offset > cur_offset) {
1222 			extent_end = found_key.offset;
1223 			extent_type = 0;
1224 			goto out_check;
1225 		}
1226 
1227 		fi = btrfs_item_ptr(leaf, path->slots[0],
1228 				    struct btrfs_file_extent_item);
1229 		extent_type = btrfs_file_extent_type(leaf, fi);
1230 
1231 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
1232 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1233 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1234 			extent_offset = btrfs_file_extent_offset(leaf, fi);
1235 			extent_end = found_key.offset +
1236 				btrfs_file_extent_num_bytes(leaf, fi);
1237 			if (extent_end <= start) {
1238 				path->slots[0]++;
1239 				goto next_slot;
1240 			}
1241 			if (disk_bytenr == 0)
1242 				goto out_check;
1243 			if (btrfs_file_extent_compression(leaf, fi) ||
1244 			    btrfs_file_extent_encryption(leaf, fi) ||
1245 			    btrfs_file_extent_other_encoding(leaf, fi))
1246 				goto out_check;
1247 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1248 				goto out_check;
1249 			if (btrfs_extent_readonly(root, disk_bytenr))
1250 				goto out_check;
1251 			if (btrfs_cross_ref_exist(trans, root, ino,
1252 						  found_key.offset -
1253 						  extent_offset, disk_bytenr))
1254 				goto out_check;
1255 			disk_bytenr += extent_offset;
1256 			disk_bytenr += cur_offset - found_key.offset;
1257 			num_bytes = min(end + 1, extent_end) - cur_offset;
1258 			/*
1259 			 * force cow if csum exists in the range.
1260 			 * this ensure that csum for a given extent are
1261 			 * either valid or do not exist.
1262 			 */
1263 			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1264 				goto out_check;
1265 			nocow = 1;
1266 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1267 			extent_end = found_key.offset +
1268 				btrfs_file_extent_inline_len(leaf, fi);
1269 			extent_end = ALIGN(extent_end, root->sectorsize);
1270 		} else {
1271 			BUG_ON(1);
1272 		}
1273 out_check:
1274 		if (extent_end <= start) {
1275 			path->slots[0]++;
1276 			goto next_slot;
1277 		}
1278 		if (!nocow) {
1279 			if (cow_start == (u64)-1)
1280 				cow_start = cur_offset;
1281 			cur_offset = extent_end;
1282 			if (cur_offset > end)
1283 				break;
1284 			path->slots[0]++;
1285 			goto next_slot;
1286 		}
1287 
1288 		btrfs_release_path(path);
1289 		if (cow_start != (u64)-1) {
1290 			ret = cow_file_range(inode, locked_page, cow_start,
1291 					found_key.offset - 1, page_started,
1292 					nr_written, 1);
1293 			if (ret) {
1294 				btrfs_abort_transaction(trans, root, ret);
1295 				goto error;
1296 			}
1297 			cow_start = (u64)-1;
1298 		}
1299 
1300 		if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1301 			struct extent_map *em;
1302 			struct extent_map_tree *em_tree;
1303 			em_tree = &BTRFS_I(inode)->extent_tree;
1304 			em = alloc_extent_map();
1305 			BUG_ON(!em); /* -ENOMEM */
1306 			em->start = cur_offset;
1307 			em->orig_start = em->start;
1308 			em->len = num_bytes;
1309 			em->block_len = num_bytes;
1310 			em->block_start = disk_bytenr;
1311 			em->bdev = root->fs_info->fs_devices->latest_bdev;
1312 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
1313 			while (1) {
1314 				write_lock(&em_tree->lock);
1315 				ret = add_extent_mapping(em_tree, em);
1316 				write_unlock(&em_tree->lock);
1317 				if (ret != -EEXIST) {
1318 					free_extent_map(em);
1319 					break;
1320 				}
1321 				btrfs_drop_extent_cache(inode, em->start,
1322 						em->start + em->len - 1, 0);
1323 			}
1324 			type = BTRFS_ORDERED_PREALLOC;
1325 		} else {
1326 			type = BTRFS_ORDERED_NOCOW;
1327 		}
1328 
1329 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1330 					       num_bytes, num_bytes, type);
1331 		BUG_ON(ret); /* -ENOMEM */
1332 
1333 		if (root->root_key.objectid ==
1334 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
1335 			ret = btrfs_reloc_clone_csums(inode, cur_offset,
1336 						      num_bytes);
1337 			if (ret) {
1338 				btrfs_abort_transaction(trans, root, ret);
1339 				goto error;
1340 			}
1341 		}
1342 
1343 		extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1344 				cur_offset, cur_offset + num_bytes - 1,
1345 				locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1346 				EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1347 				EXTENT_SET_PRIVATE2);
1348 		cur_offset = extent_end;
1349 		if (cur_offset > end)
1350 			break;
1351 	}
1352 	btrfs_release_path(path);
1353 
1354 	if (cur_offset <= end && cow_start == (u64)-1) {
1355 		cow_start = cur_offset;
1356 		cur_offset = end;
1357 	}
1358 
1359 	if (cow_start != (u64)-1) {
1360 		ret = cow_file_range(inode, locked_page, cow_start, end,
1361 				     page_started, nr_written, 1);
1362 		if (ret) {
1363 			btrfs_abort_transaction(trans, root, ret);
1364 			goto error;
1365 		}
1366 	}
1367 
1368 error:
1369 	if (nolock) {
1370 		err = btrfs_end_transaction_nolock(trans, root);
1371 	} else {
1372 		err = btrfs_end_transaction(trans, root);
1373 	}
1374 	if (!ret)
1375 		ret = err;
1376 
1377 	if (ret && cur_offset < end)
1378 		extent_clear_unlock_delalloc(inode,
1379 			     &BTRFS_I(inode)->io_tree,
1380 			     cur_offset, end, locked_page,
1381 			     EXTENT_CLEAR_UNLOCK_PAGE |
1382 			     EXTENT_CLEAR_UNLOCK |
1383 			     EXTENT_CLEAR_DELALLOC |
1384 			     EXTENT_CLEAR_DIRTY |
1385 			     EXTENT_SET_WRITEBACK |
1386 			     EXTENT_END_WRITEBACK);
1387 
1388 	btrfs_free_path(path);
1389 	return ret;
1390 }
1391 
1392 /*
1393  * extent_io.c call back to do delayed allocation processing
1394  */
1395 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1396 			      u64 start, u64 end, int *page_started,
1397 			      unsigned long *nr_written)
1398 {
1399 	int ret;
1400 	struct btrfs_root *root = BTRFS_I(inode)->root;
1401 
1402 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
1403 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1404 					 page_started, 1, nr_written);
1405 	} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
1406 		ret = run_delalloc_nocow(inode, locked_page, start, end,
1407 					 page_started, 0, nr_written);
1408 	} else if (!btrfs_test_opt(root, COMPRESS) &&
1409 		   !(BTRFS_I(inode)->force_compress) &&
1410 		   !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
1411 		ret = cow_file_range(inode, locked_page, start, end,
1412 				      page_started, nr_written, 1);
1413 	} else {
1414 		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1415 			&BTRFS_I(inode)->runtime_flags);
1416 		ret = cow_file_range_async(inode, locked_page, start, end,
1417 					   page_started, nr_written);
1418 	}
1419 	return ret;
1420 }
1421 
1422 static void btrfs_split_extent_hook(struct inode *inode,
1423 				    struct extent_state *orig, u64 split)
1424 {
1425 	/* not delalloc, ignore it */
1426 	if (!(orig->state & EXTENT_DELALLOC))
1427 		return;
1428 
1429 	spin_lock(&BTRFS_I(inode)->lock);
1430 	BTRFS_I(inode)->outstanding_extents++;
1431 	spin_unlock(&BTRFS_I(inode)->lock);
1432 }
1433 
1434 /*
1435  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1436  * extents so we can keep track of new extents that are just merged onto old
1437  * extents, such as when we are doing sequential writes, so we can properly
1438  * account for the metadata space we'll need.
1439  */
1440 static void btrfs_merge_extent_hook(struct inode *inode,
1441 				    struct extent_state *new,
1442 				    struct extent_state *other)
1443 {
1444 	/* not delalloc, ignore it */
1445 	if (!(other->state & EXTENT_DELALLOC))
1446 		return;
1447 
1448 	spin_lock(&BTRFS_I(inode)->lock);
1449 	BTRFS_I(inode)->outstanding_extents--;
1450 	spin_unlock(&BTRFS_I(inode)->lock);
1451 }
1452 
1453 /*
1454  * extent_io.c set_bit_hook, used to track delayed allocation
1455  * bytes in this file, and to maintain the list of inodes that
1456  * have pending delalloc work to be done.
1457  */
1458 static void btrfs_set_bit_hook(struct inode *inode,
1459 			       struct extent_state *state, int *bits)
1460 {
1461 
1462 	/*
1463 	 * set_bit and clear bit hooks normally require _irqsave/restore
1464 	 * but in this case, we are only testing for the DELALLOC
1465 	 * bit, which is only set or cleared with irqs on
1466 	 */
1467 	if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1468 		struct btrfs_root *root = BTRFS_I(inode)->root;
1469 		u64 len = state->end + 1 - state->start;
1470 		bool do_list = !btrfs_is_free_space_inode(inode);
1471 
1472 		if (*bits & EXTENT_FIRST_DELALLOC) {
1473 			*bits &= ~EXTENT_FIRST_DELALLOC;
1474 		} else {
1475 			spin_lock(&BTRFS_I(inode)->lock);
1476 			BTRFS_I(inode)->outstanding_extents++;
1477 			spin_unlock(&BTRFS_I(inode)->lock);
1478 		}
1479 
1480 		spin_lock(&root->fs_info->delalloc_lock);
1481 		BTRFS_I(inode)->delalloc_bytes += len;
1482 		root->fs_info->delalloc_bytes += len;
1483 		if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1484 			list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1485 				      &root->fs_info->delalloc_inodes);
1486 		}
1487 		spin_unlock(&root->fs_info->delalloc_lock);
1488 	}
1489 }
1490 
1491 /*
1492  * extent_io.c clear_bit_hook, see set_bit_hook for why
1493  */
1494 static void btrfs_clear_bit_hook(struct inode *inode,
1495 				 struct extent_state *state, int *bits)
1496 {
1497 	/*
1498 	 * set_bit and clear bit hooks normally require _irqsave/restore
1499 	 * but in this case, we are only testing for the DELALLOC
1500 	 * bit, which is only set or cleared with irqs on
1501 	 */
1502 	if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1503 		struct btrfs_root *root = BTRFS_I(inode)->root;
1504 		u64 len = state->end + 1 - state->start;
1505 		bool do_list = !btrfs_is_free_space_inode(inode);
1506 
1507 		if (*bits & EXTENT_FIRST_DELALLOC) {
1508 			*bits &= ~EXTENT_FIRST_DELALLOC;
1509 		} else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1510 			spin_lock(&BTRFS_I(inode)->lock);
1511 			BTRFS_I(inode)->outstanding_extents--;
1512 			spin_unlock(&BTRFS_I(inode)->lock);
1513 		}
1514 
1515 		if (*bits & EXTENT_DO_ACCOUNTING)
1516 			btrfs_delalloc_release_metadata(inode, len);
1517 
1518 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1519 		    && do_list)
1520 			btrfs_free_reserved_data_space(inode, len);
1521 
1522 		spin_lock(&root->fs_info->delalloc_lock);
1523 		root->fs_info->delalloc_bytes -= len;
1524 		BTRFS_I(inode)->delalloc_bytes -= len;
1525 
1526 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1527 		    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1528 			list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1529 		}
1530 		spin_unlock(&root->fs_info->delalloc_lock);
1531 	}
1532 }
1533 
1534 /*
1535  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1536  * we don't create bios that span stripes or chunks
1537  */
1538 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1539 			 size_t size, struct bio *bio,
1540 			 unsigned long bio_flags)
1541 {
1542 	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1543 	struct btrfs_mapping_tree *map_tree;
1544 	u64 logical = (u64)bio->bi_sector << 9;
1545 	u64 length = 0;
1546 	u64 map_length;
1547 	int ret;
1548 
1549 	if (bio_flags & EXTENT_BIO_COMPRESSED)
1550 		return 0;
1551 
1552 	length = bio->bi_size;
1553 	map_tree = &root->fs_info->mapping_tree;
1554 	map_length = length;
1555 	ret = btrfs_map_block(map_tree, READ, logical,
1556 			      &map_length, NULL, 0);
1557 	/* Will always return 0 or 1 with map_multi == NULL */
1558 	BUG_ON(ret < 0);
1559 	if (map_length < length + size)
1560 		return 1;
1561 	return 0;
1562 }
1563 
1564 /*
1565  * in order to insert checksums into the metadata in large chunks,
1566  * we wait until bio submission time.   All the pages in the bio are
1567  * checksummed and sums are attached onto the ordered extent record.
1568  *
1569  * At IO completion time the cums attached on the ordered extent record
1570  * are inserted into the btree
1571  */
1572 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1573 				    struct bio *bio, int mirror_num,
1574 				    unsigned long bio_flags,
1575 				    u64 bio_offset)
1576 {
1577 	struct btrfs_root *root = BTRFS_I(inode)->root;
1578 	int ret = 0;
1579 
1580 	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1581 	BUG_ON(ret); /* -ENOMEM */
1582 	return 0;
1583 }
1584 
1585 /*
1586  * in order to insert checksums into the metadata in large chunks,
1587  * we wait until bio submission time.   All the pages in the bio are
1588  * checksummed and sums are attached onto the ordered extent record.
1589  *
1590  * At IO completion time the cums attached on the ordered extent record
1591  * are inserted into the btree
1592  */
1593 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1594 			  int mirror_num, unsigned long bio_flags,
1595 			  u64 bio_offset)
1596 {
1597 	struct btrfs_root *root = BTRFS_I(inode)->root;
1598 	return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1599 }
1600 
1601 /*
1602  * extent_io.c submission hook. This does the right thing for csum calculation
1603  * on write, or reading the csums from the tree before a read
1604  */
1605 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1606 			  int mirror_num, unsigned long bio_flags,
1607 			  u64 bio_offset)
1608 {
1609 	struct btrfs_root *root = BTRFS_I(inode)->root;
1610 	int ret = 0;
1611 	int skip_sum;
1612 	int metadata = 0;
1613 
1614 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1615 
1616 	if (btrfs_is_free_space_inode(inode))
1617 		metadata = 2;
1618 
1619 	if (!(rw & REQ_WRITE)) {
1620 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1621 		if (ret)
1622 			return ret;
1623 
1624 		if (bio_flags & EXTENT_BIO_COMPRESSED) {
1625 			return btrfs_submit_compressed_read(inode, bio,
1626 						    mirror_num, bio_flags);
1627 		} else if (!skip_sum) {
1628 			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1629 			if (ret)
1630 				return ret;
1631 		}
1632 		goto mapit;
1633 	} else if (!skip_sum) {
1634 		/* csum items have already been cloned */
1635 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1636 			goto mapit;
1637 		/* we're doing a write, do the async checksumming */
1638 		return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1639 				   inode, rw, bio, mirror_num,
1640 				   bio_flags, bio_offset,
1641 				   __btrfs_submit_bio_start,
1642 				   __btrfs_submit_bio_done);
1643 	}
1644 
1645 mapit:
1646 	return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1647 }
1648 
1649 /*
1650  * given a list of ordered sums record them in the inode.  This happens
1651  * at IO completion time based on sums calculated at bio submission time.
1652  */
1653 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1654 			     struct inode *inode, u64 file_offset,
1655 			     struct list_head *list)
1656 {
1657 	struct btrfs_ordered_sum *sum;
1658 
1659 	list_for_each_entry(sum, list, list) {
1660 		btrfs_csum_file_blocks(trans,
1661 		       BTRFS_I(inode)->root->fs_info->csum_root, sum);
1662 	}
1663 	return 0;
1664 }
1665 
1666 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1667 			      struct extent_state **cached_state)
1668 {
1669 	if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1670 		WARN_ON(1);
1671 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1672 				   cached_state, GFP_NOFS);
1673 }
1674 
1675 /* see btrfs_writepage_start_hook for details on why this is required */
1676 struct btrfs_writepage_fixup {
1677 	struct page *page;
1678 	struct btrfs_work work;
1679 };
1680 
1681 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1682 {
1683 	struct btrfs_writepage_fixup *fixup;
1684 	struct btrfs_ordered_extent *ordered;
1685 	struct extent_state *cached_state = NULL;
1686 	struct page *page;
1687 	struct inode *inode;
1688 	u64 page_start;
1689 	u64 page_end;
1690 	int ret;
1691 
1692 	fixup = container_of(work, struct btrfs_writepage_fixup, work);
1693 	page = fixup->page;
1694 again:
1695 	lock_page(page);
1696 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1697 		ClearPageChecked(page);
1698 		goto out_page;
1699 	}
1700 
1701 	inode = page->mapping->host;
1702 	page_start = page_offset(page);
1703 	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1704 
1705 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1706 			 &cached_state);
1707 
1708 	/* already ordered? We're done */
1709 	if (PagePrivate2(page))
1710 		goto out;
1711 
1712 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
1713 	if (ordered) {
1714 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1715 				     page_end, &cached_state, GFP_NOFS);
1716 		unlock_page(page);
1717 		btrfs_start_ordered_extent(inode, ordered, 1);
1718 		btrfs_put_ordered_extent(ordered);
1719 		goto again;
1720 	}
1721 
1722 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1723 	if (ret) {
1724 		mapping_set_error(page->mapping, ret);
1725 		end_extent_writepage(page, ret, page_start, page_end);
1726 		ClearPageChecked(page);
1727 		goto out;
1728 	 }
1729 
1730 	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1731 	ClearPageChecked(page);
1732 	set_page_dirty(page);
1733 out:
1734 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1735 			     &cached_state, GFP_NOFS);
1736 out_page:
1737 	unlock_page(page);
1738 	page_cache_release(page);
1739 	kfree(fixup);
1740 }
1741 
1742 /*
1743  * There are a few paths in the higher layers of the kernel that directly
1744  * set the page dirty bit without asking the filesystem if it is a
1745  * good idea.  This causes problems because we want to make sure COW
1746  * properly happens and the data=ordered rules are followed.
1747  *
1748  * In our case any range that doesn't have the ORDERED bit set
1749  * hasn't been properly setup for IO.  We kick off an async process
1750  * to fix it up.  The async helper will wait for ordered extents, set
1751  * the delalloc bit and make it safe to write the page.
1752  */
1753 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1754 {
1755 	struct inode *inode = page->mapping->host;
1756 	struct btrfs_writepage_fixup *fixup;
1757 	struct btrfs_root *root = BTRFS_I(inode)->root;
1758 
1759 	/* this page is properly in the ordered list */
1760 	if (TestClearPagePrivate2(page))
1761 		return 0;
1762 
1763 	if (PageChecked(page))
1764 		return -EAGAIN;
1765 
1766 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1767 	if (!fixup)
1768 		return -EAGAIN;
1769 
1770 	SetPageChecked(page);
1771 	page_cache_get(page);
1772 	fixup->work.func = btrfs_writepage_fixup_worker;
1773 	fixup->page = page;
1774 	btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1775 	return -EBUSY;
1776 }
1777 
1778 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1779 				       struct inode *inode, u64 file_pos,
1780 				       u64 disk_bytenr, u64 disk_num_bytes,
1781 				       u64 num_bytes, u64 ram_bytes,
1782 				       u8 compression, u8 encryption,
1783 				       u16 other_encoding, int extent_type)
1784 {
1785 	struct btrfs_root *root = BTRFS_I(inode)->root;
1786 	struct btrfs_file_extent_item *fi;
1787 	struct btrfs_path *path;
1788 	struct extent_buffer *leaf;
1789 	struct btrfs_key ins;
1790 	u64 hint;
1791 	int ret;
1792 
1793 	path = btrfs_alloc_path();
1794 	if (!path)
1795 		return -ENOMEM;
1796 
1797 	path->leave_spinning = 1;
1798 
1799 	/*
1800 	 * we may be replacing one extent in the tree with another.
1801 	 * The new extent is pinned in the extent map, and we don't want
1802 	 * to drop it from the cache until it is completely in the btree.
1803 	 *
1804 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
1805 	 * the caller is expected to unpin it and allow it to be merged
1806 	 * with the others.
1807 	 */
1808 	ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1809 				 &hint, 0);
1810 	if (ret)
1811 		goto out;
1812 
1813 	ins.objectid = btrfs_ino(inode);
1814 	ins.offset = file_pos;
1815 	ins.type = BTRFS_EXTENT_DATA_KEY;
1816 	ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1817 	if (ret)
1818 		goto out;
1819 	leaf = path->nodes[0];
1820 	fi = btrfs_item_ptr(leaf, path->slots[0],
1821 			    struct btrfs_file_extent_item);
1822 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1823 	btrfs_set_file_extent_type(leaf, fi, extent_type);
1824 	btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1825 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1826 	btrfs_set_file_extent_offset(leaf, fi, 0);
1827 	btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1828 	btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1829 	btrfs_set_file_extent_compression(leaf, fi, compression);
1830 	btrfs_set_file_extent_encryption(leaf, fi, encryption);
1831 	btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1832 
1833 	btrfs_unlock_up_safe(path, 1);
1834 	btrfs_set_lock_blocking(leaf);
1835 
1836 	btrfs_mark_buffer_dirty(leaf);
1837 
1838 	inode_add_bytes(inode, num_bytes);
1839 
1840 	ins.objectid = disk_bytenr;
1841 	ins.offset = disk_num_bytes;
1842 	ins.type = BTRFS_EXTENT_ITEM_KEY;
1843 	ret = btrfs_alloc_reserved_file_extent(trans, root,
1844 					root->root_key.objectid,
1845 					btrfs_ino(inode), file_pos, &ins);
1846 out:
1847 	btrfs_free_path(path);
1848 
1849 	return ret;
1850 }
1851 
1852 /*
1853  * helper function for btrfs_finish_ordered_io, this
1854  * just reads in some of the csum leaves to prime them into ram
1855  * before we start the transaction.  It limits the amount of btree
1856  * reads required while inside the transaction.
1857  */
1858 /* as ordered data IO finishes, this gets called so we can finish
1859  * an ordered extent if the range of bytes in the file it covers are
1860  * fully written.
1861  */
1862 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
1863 {
1864 	struct inode *inode = ordered_extent->inode;
1865 	struct btrfs_root *root = BTRFS_I(inode)->root;
1866 	struct btrfs_trans_handle *trans = NULL;
1867 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1868 	struct extent_state *cached_state = NULL;
1869 	int compress_type = 0;
1870 	int ret;
1871 	bool nolock;
1872 
1873 	nolock = btrfs_is_free_space_inode(inode);
1874 
1875 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
1876 		ret = -EIO;
1877 		goto out;
1878 	}
1879 
1880 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1881 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
1882 		ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1883 		if (!ret) {
1884 			if (nolock)
1885 				trans = btrfs_join_transaction_nolock(root);
1886 			else
1887 				trans = btrfs_join_transaction(root);
1888 			if (IS_ERR(trans))
1889 				return PTR_ERR(trans);
1890 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1891 			ret = btrfs_update_inode_fallback(trans, root, inode);
1892 			if (ret) /* -ENOMEM or corruption */
1893 				btrfs_abort_transaction(trans, root, ret);
1894 		}
1895 		goto out;
1896 	}
1897 
1898 	lock_extent_bits(io_tree, ordered_extent->file_offset,
1899 			 ordered_extent->file_offset + ordered_extent->len - 1,
1900 			 0, &cached_state);
1901 
1902 	if (nolock)
1903 		trans = btrfs_join_transaction_nolock(root);
1904 	else
1905 		trans = btrfs_join_transaction(root);
1906 	if (IS_ERR(trans)) {
1907 		ret = PTR_ERR(trans);
1908 		trans = NULL;
1909 		goto out_unlock;
1910 	}
1911 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1912 
1913 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1914 		compress_type = ordered_extent->compress_type;
1915 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1916 		BUG_ON(compress_type);
1917 		ret = btrfs_mark_extent_written(trans, inode,
1918 						ordered_extent->file_offset,
1919 						ordered_extent->file_offset +
1920 						ordered_extent->len);
1921 	} else {
1922 		BUG_ON(root == root->fs_info->tree_root);
1923 		ret = insert_reserved_file_extent(trans, inode,
1924 						ordered_extent->file_offset,
1925 						ordered_extent->start,
1926 						ordered_extent->disk_len,
1927 						ordered_extent->len,
1928 						ordered_extent->len,
1929 						compress_type, 0, 0,
1930 						BTRFS_FILE_EXTENT_REG);
1931 		unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1932 				   ordered_extent->file_offset,
1933 				   ordered_extent->len);
1934 	}
1935 
1936 	if (ret < 0) {
1937 		btrfs_abort_transaction(trans, root, ret);
1938 		goto out_unlock;
1939 	}
1940 
1941 	add_pending_csums(trans, inode, ordered_extent->file_offset,
1942 			  &ordered_extent->list);
1943 
1944 	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1945 	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1946 		ret = btrfs_update_inode_fallback(trans, root, inode);
1947 		if (ret) { /* -ENOMEM or corruption */
1948 			btrfs_abort_transaction(trans, root, ret);
1949 			goto out_unlock;
1950 		}
1951 	}
1952 	ret = 0;
1953 out_unlock:
1954 	unlock_extent_cached(io_tree, ordered_extent->file_offset,
1955 			     ordered_extent->file_offset +
1956 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
1957 out:
1958 	if (root != root->fs_info->tree_root)
1959 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
1960 	if (trans) {
1961 		if (nolock)
1962 			btrfs_end_transaction_nolock(trans, root);
1963 		else
1964 			btrfs_end_transaction(trans, root);
1965 	}
1966 
1967 	if (ret)
1968 		clear_extent_uptodate(io_tree, ordered_extent->file_offset,
1969 				      ordered_extent->file_offset +
1970 				      ordered_extent->len - 1, NULL, GFP_NOFS);
1971 
1972 	/*
1973 	 * This needs to be dont to make sure anybody waiting knows we are done
1974 	 * upating everything for this ordered extent.
1975 	 */
1976 	btrfs_remove_ordered_extent(inode, ordered_extent);
1977 
1978 	/* once for us */
1979 	btrfs_put_ordered_extent(ordered_extent);
1980 	/* once for the tree */
1981 	btrfs_put_ordered_extent(ordered_extent);
1982 
1983 	return ret;
1984 }
1985 
1986 static void finish_ordered_fn(struct btrfs_work *work)
1987 {
1988 	struct btrfs_ordered_extent *ordered_extent;
1989 	ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
1990 	btrfs_finish_ordered_io(ordered_extent);
1991 }
1992 
1993 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1994 				struct extent_state *state, int uptodate)
1995 {
1996 	struct inode *inode = page->mapping->host;
1997 	struct btrfs_root *root = BTRFS_I(inode)->root;
1998 	struct btrfs_ordered_extent *ordered_extent = NULL;
1999 	struct btrfs_workers *workers;
2000 
2001 	trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2002 
2003 	ClearPagePrivate2(page);
2004 	if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2005 					    end - start + 1, uptodate))
2006 		return 0;
2007 
2008 	ordered_extent->work.func = finish_ordered_fn;
2009 	ordered_extent->work.flags = 0;
2010 
2011 	if (btrfs_is_free_space_inode(inode))
2012 		workers = &root->fs_info->endio_freespace_worker;
2013 	else
2014 		workers = &root->fs_info->endio_write_workers;
2015 	btrfs_queue_worker(workers, &ordered_extent->work);
2016 
2017 	return 0;
2018 }
2019 
2020 /*
2021  * when reads are done, we need to check csums to verify the data is correct
2022  * if there's a match, we allow the bio to finish.  If not, the code in
2023  * extent_io.c will try to find good copies for us.
2024  */
2025 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
2026 			       struct extent_state *state, int mirror)
2027 {
2028 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
2029 	struct inode *inode = page->mapping->host;
2030 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2031 	char *kaddr;
2032 	u64 private = ~(u32)0;
2033 	int ret;
2034 	struct btrfs_root *root = BTRFS_I(inode)->root;
2035 	u32 csum = ~(u32)0;
2036 
2037 	if (PageChecked(page)) {
2038 		ClearPageChecked(page);
2039 		goto good;
2040 	}
2041 
2042 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
2043 		goto good;
2044 
2045 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
2046 	    test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2047 		clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
2048 				  GFP_NOFS);
2049 		return 0;
2050 	}
2051 
2052 	if (state && state->start == start) {
2053 		private = state->private;
2054 		ret = 0;
2055 	} else {
2056 		ret = get_state_private(io_tree, start, &private);
2057 	}
2058 	kaddr = kmap_atomic(page);
2059 	if (ret)
2060 		goto zeroit;
2061 
2062 	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
2063 	btrfs_csum_final(csum, (char *)&csum);
2064 	if (csum != private)
2065 		goto zeroit;
2066 
2067 	kunmap_atomic(kaddr);
2068 good:
2069 	return 0;
2070 
2071 zeroit:
2072 	printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
2073 		       "private %llu\n",
2074 		       (unsigned long long)btrfs_ino(page->mapping->host),
2075 		       (unsigned long long)start, csum,
2076 		       (unsigned long long)private);
2077 	memset(kaddr + offset, 1, end - start + 1);
2078 	flush_dcache_page(page);
2079 	kunmap_atomic(kaddr);
2080 	if (private == 0)
2081 		return 0;
2082 	return -EIO;
2083 }
2084 
2085 struct delayed_iput {
2086 	struct list_head list;
2087 	struct inode *inode;
2088 };
2089 
2090 /* JDM: If this is fs-wide, why can't we add a pointer to
2091  * btrfs_inode instead and avoid the allocation? */
2092 void btrfs_add_delayed_iput(struct inode *inode)
2093 {
2094 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2095 	struct delayed_iput *delayed;
2096 
2097 	if (atomic_add_unless(&inode->i_count, -1, 1))
2098 		return;
2099 
2100 	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
2101 	delayed->inode = inode;
2102 
2103 	spin_lock(&fs_info->delayed_iput_lock);
2104 	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
2105 	spin_unlock(&fs_info->delayed_iput_lock);
2106 }
2107 
2108 void btrfs_run_delayed_iputs(struct btrfs_root *root)
2109 {
2110 	LIST_HEAD(list);
2111 	struct btrfs_fs_info *fs_info = root->fs_info;
2112 	struct delayed_iput *delayed;
2113 	int empty;
2114 
2115 	spin_lock(&fs_info->delayed_iput_lock);
2116 	empty = list_empty(&fs_info->delayed_iputs);
2117 	spin_unlock(&fs_info->delayed_iput_lock);
2118 	if (empty)
2119 		return;
2120 
2121 	down_read(&root->fs_info->cleanup_work_sem);
2122 	spin_lock(&fs_info->delayed_iput_lock);
2123 	list_splice_init(&fs_info->delayed_iputs, &list);
2124 	spin_unlock(&fs_info->delayed_iput_lock);
2125 
2126 	while (!list_empty(&list)) {
2127 		delayed = list_entry(list.next, struct delayed_iput, list);
2128 		list_del(&delayed->list);
2129 		iput(delayed->inode);
2130 		kfree(delayed);
2131 	}
2132 	up_read(&root->fs_info->cleanup_work_sem);
2133 }
2134 
2135 enum btrfs_orphan_cleanup_state {
2136 	ORPHAN_CLEANUP_STARTED	= 1,
2137 	ORPHAN_CLEANUP_DONE	= 2,
2138 };
2139 
2140 /*
2141  * This is called in transaction commit time. If there are no orphan
2142  * files in the subvolume, it removes orphan item and frees block_rsv
2143  * structure.
2144  */
2145 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2146 			      struct btrfs_root *root)
2147 {
2148 	struct btrfs_block_rsv *block_rsv;
2149 	int ret;
2150 
2151 	if (atomic_read(&root->orphan_inodes) ||
2152 	    root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2153 		return;
2154 
2155 	spin_lock(&root->orphan_lock);
2156 	if (atomic_read(&root->orphan_inodes)) {
2157 		spin_unlock(&root->orphan_lock);
2158 		return;
2159 	}
2160 
2161 	if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
2162 		spin_unlock(&root->orphan_lock);
2163 		return;
2164 	}
2165 
2166 	block_rsv = root->orphan_block_rsv;
2167 	root->orphan_block_rsv = NULL;
2168 	spin_unlock(&root->orphan_lock);
2169 
2170 	if (root->orphan_item_inserted &&
2171 	    btrfs_root_refs(&root->root_item) > 0) {
2172 		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2173 					    root->root_key.objectid);
2174 		BUG_ON(ret);
2175 		root->orphan_item_inserted = 0;
2176 	}
2177 
2178 	if (block_rsv) {
2179 		WARN_ON(block_rsv->size > 0);
2180 		btrfs_free_block_rsv(root, block_rsv);
2181 	}
2182 }
2183 
2184 /*
2185  * This creates an orphan entry for the given inode in case something goes
2186  * wrong in the middle of an unlink/truncate.
2187  *
2188  * NOTE: caller of this function should reserve 5 units of metadata for
2189  *	 this function.
2190  */
2191 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2192 {
2193 	struct btrfs_root *root = BTRFS_I(inode)->root;
2194 	struct btrfs_block_rsv *block_rsv = NULL;
2195 	int reserve = 0;
2196 	int insert = 0;
2197 	int ret;
2198 
2199 	if (!root->orphan_block_rsv) {
2200 		block_rsv = btrfs_alloc_block_rsv(root);
2201 		if (!block_rsv)
2202 			return -ENOMEM;
2203 	}
2204 
2205 	spin_lock(&root->orphan_lock);
2206 	if (!root->orphan_block_rsv) {
2207 		root->orphan_block_rsv = block_rsv;
2208 	} else if (block_rsv) {
2209 		btrfs_free_block_rsv(root, block_rsv);
2210 		block_rsv = NULL;
2211 	}
2212 
2213 	if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2214 			      &BTRFS_I(inode)->runtime_flags)) {
2215 #if 0
2216 		/*
2217 		 * For proper ENOSPC handling, we should do orphan
2218 		 * cleanup when mounting. But this introduces backward
2219 		 * compatibility issue.
2220 		 */
2221 		if (!xchg(&root->orphan_item_inserted, 1))
2222 			insert = 2;
2223 		else
2224 			insert = 1;
2225 #endif
2226 		insert = 1;
2227 		atomic_dec(&root->orphan_inodes);
2228 	}
2229 
2230 	if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2231 			      &BTRFS_I(inode)->runtime_flags))
2232 		reserve = 1;
2233 	spin_unlock(&root->orphan_lock);
2234 
2235 	/* grab metadata reservation from transaction handle */
2236 	if (reserve) {
2237 		ret = btrfs_orphan_reserve_metadata(trans, inode);
2238 		BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
2239 	}
2240 
2241 	/* insert an orphan item to track this unlinked/truncated file */
2242 	if (insert >= 1) {
2243 		ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2244 		if (ret && ret != -EEXIST) {
2245 			clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2246 				  &BTRFS_I(inode)->runtime_flags);
2247 			btrfs_abort_transaction(trans, root, ret);
2248 			return ret;
2249 		}
2250 		ret = 0;
2251 	}
2252 
2253 	/* insert an orphan item to track subvolume contains orphan files */
2254 	if (insert >= 2) {
2255 		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2256 					       root->root_key.objectid);
2257 		if (ret && ret != -EEXIST) {
2258 			btrfs_abort_transaction(trans, root, ret);
2259 			return ret;
2260 		}
2261 	}
2262 	return 0;
2263 }
2264 
2265 /*
2266  * We have done the truncate/delete so we can go ahead and remove the orphan
2267  * item for this particular inode.
2268  */
2269 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2270 {
2271 	struct btrfs_root *root = BTRFS_I(inode)->root;
2272 	int delete_item = 0;
2273 	int release_rsv = 0;
2274 	int ret = 0;
2275 
2276 	spin_lock(&root->orphan_lock);
2277 	if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2278 			       &BTRFS_I(inode)->runtime_flags))
2279 		delete_item = 1;
2280 
2281 	if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2282 			       &BTRFS_I(inode)->runtime_flags))
2283 		release_rsv = 1;
2284 	spin_unlock(&root->orphan_lock);
2285 
2286 	if (trans && delete_item) {
2287 		ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2288 		BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2289 	}
2290 
2291 	if (release_rsv) {
2292 		btrfs_orphan_release_metadata(inode);
2293 		atomic_dec(&root->orphan_inodes);
2294 	}
2295 
2296 	return 0;
2297 }
2298 
2299 /*
2300  * this cleans up any orphans that may be left on the list from the last use
2301  * of this root.
2302  */
2303 int btrfs_orphan_cleanup(struct btrfs_root *root)
2304 {
2305 	struct btrfs_path *path;
2306 	struct extent_buffer *leaf;
2307 	struct btrfs_key key, found_key;
2308 	struct btrfs_trans_handle *trans;
2309 	struct inode *inode;
2310 	u64 last_objectid = 0;
2311 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
2312 
2313 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2314 		return 0;
2315 
2316 	path = btrfs_alloc_path();
2317 	if (!path) {
2318 		ret = -ENOMEM;
2319 		goto out;
2320 	}
2321 	path->reada = -1;
2322 
2323 	key.objectid = BTRFS_ORPHAN_OBJECTID;
2324 	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2325 	key.offset = (u64)-1;
2326 
2327 	while (1) {
2328 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2329 		if (ret < 0)
2330 			goto out;
2331 
2332 		/*
2333 		 * if ret == 0 means we found what we were searching for, which
2334 		 * is weird, but possible, so only screw with path if we didn't
2335 		 * find the key and see if we have stuff that matches
2336 		 */
2337 		if (ret > 0) {
2338 			ret = 0;
2339 			if (path->slots[0] == 0)
2340 				break;
2341 			path->slots[0]--;
2342 		}
2343 
2344 		/* pull out the item */
2345 		leaf = path->nodes[0];
2346 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2347 
2348 		/* make sure the item matches what we want */
2349 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2350 			break;
2351 		if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2352 			break;
2353 
2354 		/* release the path since we're done with it */
2355 		btrfs_release_path(path);
2356 
2357 		/*
2358 		 * this is where we are basically btrfs_lookup, without the
2359 		 * crossing root thing.  we store the inode number in the
2360 		 * offset of the orphan item.
2361 		 */
2362 
2363 		if (found_key.offset == last_objectid) {
2364 			printk(KERN_ERR "btrfs: Error removing orphan entry, "
2365 			       "stopping orphan cleanup\n");
2366 			ret = -EINVAL;
2367 			goto out;
2368 		}
2369 
2370 		last_objectid = found_key.offset;
2371 
2372 		found_key.objectid = found_key.offset;
2373 		found_key.type = BTRFS_INODE_ITEM_KEY;
2374 		found_key.offset = 0;
2375 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2376 		ret = PTR_RET(inode);
2377 		if (ret && ret != -ESTALE)
2378 			goto out;
2379 
2380 		if (ret == -ESTALE && root == root->fs_info->tree_root) {
2381 			struct btrfs_root *dead_root;
2382 			struct btrfs_fs_info *fs_info = root->fs_info;
2383 			int is_dead_root = 0;
2384 
2385 			/*
2386 			 * this is an orphan in the tree root. Currently these
2387 			 * could come from 2 sources:
2388 			 *  a) a snapshot deletion in progress
2389 			 *  b) a free space cache inode
2390 			 * We need to distinguish those two, as the snapshot
2391 			 * orphan must not get deleted.
2392 			 * find_dead_roots already ran before us, so if this
2393 			 * is a snapshot deletion, we should find the root
2394 			 * in the dead_roots list
2395 			 */
2396 			spin_lock(&fs_info->trans_lock);
2397 			list_for_each_entry(dead_root, &fs_info->dead_roots,
2398 					    root_list) {
2399 				if (dead_root->root_key.objectid ==
2400 				    found_key.objectid) {
2401 					is_dead_root = 1;
2402 					break;
2403 				}
2404 			}
2405 			spin_unlock(&fs_info->trans_lock);
2406 			if (is_dead_root) {
2407 				/* prevent this orphan from being found again */
2408 				key.offset = found_key.objectid - 1;
2409 				continue;
2410 			}
2411 		}
2412 		/*
2413 		 * Inode is already gone but the orphan item is still there,
2414 		 * kill the orphan item.
2415 		 */
2416 		if (ret == -ESTALE) {
2417 			trans = btrfs_start_transaction(root, 1);
2418 			if (IS_ERR(trans)) {
2419 				ret = PTR_ERR(trans);
2420 				goto out;
2421 			}
2422 			printk(KERN_ERR "auto deleting %Lu\n",
2423 			       found_key.objectid);
2424 			ret = btrfs_del_orphan_item(trans, root,
2425 						    found_key.objectid);
2426 			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2427 			btrfs_end_transaction(trans, root);
2428 			continue;
2429 		}
2430 
2431 		/*
2432 		 * add this inode to the orphan list so btrfs_orphan_del does
2433 		 * the proper thing when we hit it
2434 		 */
2435 		set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2436 			&BTRFS_I(inode)->runtime_flags);
2437 
2438 		/* if we have links, this was a truncate, lets do that */
2439 		if (inode->i_nlink) {
2440 			if (!S_ISREG(inode->i_mode)) {
2441 				WARN_ON(1);
2442 				iput(inode);
2443 				continue;
2444 			}
2445 			nr_truncate++;
2446 			ret = btrfs_truncate(inode);
2447 		} else {
2448 			nr_unlink++;
2449 		}
2450 
2451 		/* this will do delete_inode and everything for us */
2452 		iput(inode);
2453 		if (ret)
2454 			goto out;
2455 	}
2456 	/* release the path since we're done with it */
2457 	btrfs_release_path(path);
2458 
2459 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2460 
2461 	if (root->orphan_block_rsv)
2462 		btrfs_block_rsv_release(root, root->orphan_block_rsv,
2463 					(u64)-1);
2464 
2465 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
2466 		trans = btrfs_join_transaction(root);
2467 		if (!IS_ERR(trans))
2468 			btrfs_end_transaction(trans, root);
2469 	}
2470 
2471 	if (nr_unlink)
2472 		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2473 	if (nr_truncate)
2474 		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2475 
2476 out:
2477 	if (ret)
2478 		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
2479 	btrfs_free_path(path);
2480 	return ret;
2481 }
2482 
2483 /*
2484  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2485  * don't find any xattrs, we know there can't be any acls.
2486  *
2487  * slot is the slot the inode is in, objectid is the objectid of the inode
2488  */
2489 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2490 					  int slot, u64 objectid)
2491 {
2492 	u32 nritems = btrfs_header_nritems(leaf);
2493 	struct btrfs_key found_key;
2494 	int scanned = 0;
2495 
2496 	slot++;
2497 	while (slot < nritems) {
2498 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2499 
2500 		/* we found a different objectid, there must not be acls */
2501 		if (found_key.objectid != objectid)
2502 			return 0;
2503 
2504 		/* we found an xattr, assume we've got an acl */
2505 		if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2506 			return 1;
2507 
2508 		/*
2509 		 * we found a key greater than an xattr key, there can't
2510 		 * be any acls later on
2511 		 */
2512 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2513 			return 0;
2514 
2515 		slot++;
2516 		scanned++;
2517 
2518 		/*
2519 		 * it goes inode, inode backrefs, xattrs, extents,
2520 		 * so if there are a ton of hard links to an inode there can
2521 		 * be a lot of backrefs.  Don't waste time searching too hard,
2522 		 * this is just an optimization
2523 		 */
2524 		if (scanned >= 8)
2525 			break;
2526 	}
2527 	/* we hit the end of the leaf before we found an xattr or
2528 	 * something larger than an xattr.  We have to assume the inode
2529 	 * has acls
2530 	 */
2531 	return 1;
2532 }
2533 
2534 /*
2535  * read an inode from the btree into the in-memory inode
2536  */
2537 static void btrfs_read_locked_inode(struct inode *inode)
2538 {
2539 	struct btrfs_path *path;
2540 	struct extent_buffer *leaf;
2541 	struct btrfs_inode_item *inode_item;
2542 	struct btrfs_timespec *tspec;
2543 	struct btrfs_root *root = BTRFS_I(inode)->root;
2544 	struct btrfs_key location;
2545 	int maybe_acls;
2546 	u32 rdev;
2547 	int ret;
2548 	bool filled = false;
2549 
2550 	ret = btrfs_fill_inode(inode, &rdev);
2551 	if (!ret)
2552 		filled = true;
2553 
2554 	path = btrfs_alloc_path();
2555 	if (!path)
2556 		goto make_bad;
2557 
2558 	path->leave_spinning = 1;
2559 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2560 
2561 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2562 	if (ret)
2563 		goto make_bad;
2564 
2565 	leaf = path->nodes[0];
2566 
2567 	if (filled)
2568 		goto cache_acl;
2569 
2570 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2571 				    struct btrfs_inode_item);
2572 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2573 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
2574 	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2575 	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2576 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2577 
2578 	tspec = btrfs_inode_atime(inode_item);
2579 	inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2580 	inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2581 
2582 	tspec = btrfs_inode_mtime(inode_item);
2583 	inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2584 	inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2585 
2586 	tspec = btrfs_inode_ctime(inode_item);
2587 	inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2588 	inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2589 
2590 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2591 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2592 	inode->i_version = btrfs_inode_sequence(leaf, inode_item);
2593 	inode->i_generation = BTRFS_I(inode)->generation;
2594 	inode->i_rdev = 0;
2595 	rdev = btrfs_inode_rdev(leaf, inode_item);
2596 
2597 	BTRFS_I(inode)->index_cnt = (u64)-1;
2598 	BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2599 cache_acl:
2600 	/*
2601 	 * try to precache a NULL acl entry for files that don't have
2602 	 * any xattrs or acls
2603 	 */
2604 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2605 					   btrfs_ino(inode));
2606 	if (!maybe_acls)
2607 		cache_no_acl(inode);
2608 
2609 	btrfs_free_path(path);
2610 
2611 	switch (inode->i_mode & S_IFMT) {
2612 	case S_IFREG:
2613 		inode->i_mapping->a_ops = &btrfs_aops;
2614 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2615 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2616 		inode->i_fop = &btrfs_file_operations;
2617 		inode->i_op = &btrfs_file_inode_operations;
2618 		break;
2619 	case S_IFDIR:
2620 		inode->i_fop = &btrfs_dir_file_operations;
2621 		if (root == root->fs_info->tree_root)
2622 			inode->i_op = &btrfs_dir_ro_inode_operations;
2623 		else
2624 			inode->i_op = &btrfs_dir_inode_operations;
2625 		break;
2626 	case S_IFLNK:
2627 		inode->i_op = &btrfs_symlink_inode_operations;
2628 		inode->i_mapping->a_ops = &btrfs_symlink_aops;
2629 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2630 		break;
2631 	default:
2632 		inode->i_op = &btrfs_special_inode_operations;
2633 		init_special_inode(inode, inode->i_mode, rdev);
2634 		break;
2635 	}
2636 
2637 	btrfs_update_iflags(inode);
2638 	return;
2639 
2640 make_bad:
2641 	btrfs_free_path(path);
2642 	make_bad_inode(inode);
2643 }
2644 
2645 /*
2646  * given a leaf and an inode, copy the inode fields into the leaf
2647  */
2648 static void fill_inode_item(struct btrfs_trans_handle *trans,
2649 			    struct extent_buffer *leaf,
2650 			    struct btrfs_inode_item *item,
2651 			    struct inode *inode)
2652 {
2653 	btrfs_set_inode_uid(leaf, item, inode->i_uid);
2654 	btrfs_set_inode_gid(leaf, item, inode->i_gid);
2655 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2656 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
2657 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2658 
2659 	btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2660 			       inode->i_atime.tv_sec);
2661 	btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2662 				inode->i_atime.tv_nsec);
2663 
2664 	btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2665 			       inode->i_mtime.tv_sec);
2666 	btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2667 				inode->i_mtime.tv_nsec);
2668 
2669 	btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2670 			       inode->i_ctime.tv_sec);
2671 	btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2672 				inode->i_ctime.tv_nsec);
2673 
2674 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2675 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2676 	btrfs_set_inode_sequence(leaf, item, inode->i_version);
2677 	btrfs_set_inode_transid(leaf, item, trans->transid);
2678 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2679 	btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2680 	btrfs_set_inode_block_group(leaf, item, 0);
2681 }
2682 
2683 /*
2684  * copy everything in the in-memory inode into the btree.
2685  */
2686 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
2687 				struct btrfs_root *root, struct inode *inode)
2688 {
2689 	struct btrfs_inode_item *inode_item;
2690 	struct btrfs_path *path;
2691 	struct extent_buffer *leaf;
2692 	int ret;
2693 
2694 	path = btrfs_alloc_path();
2695 	if (!path)
2696 		return -ENOMEM;
2697 
2698 	path->leave_spinning = 1;
2699 	ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
2700 				 1);
2701 	if (ret) {
2702 		if (ret > 0)
2703 			ret = -ENOENT;
2704 		goto failed;
2705 	}
2706 
2707 	btrfs_unlock_up_safe(path, 1);
2708 	leaf = path->nodes[0];
2709 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
2710 				    struct btrfs_inode_item);
2711 
2712 	fill_inode_item(trans, leaf, inode_item, inode);
2713 	btrfs_mark_buffer_dirty(leaf);
2714 	btrfs_set_inode_last_trans(trans, inode);
2715 	ret = 0;
2716 failed:
2717 	btrfs_free_path(path);
2718 	return ret;
2719 }
2720 
2721 /*
2722  * copy everything in the in-memory inode into the btree.
2723  */
2724 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2725 				struct btrfs_root *root, struct inode *inode)
2726 {
2727 	int ret;
2728 
2729 	/*
2730 	 * If the inode is a free space inode, we can deadlock during commit
2731 	 * if we put it into the delayed code.
2732 	 *
2733 	 * The data relocation inode should also be directly updated
2734 	 * without delay
2735 	 */
2736 	if (!btrfs_is_free_space_inode(inode)
2737 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
2738 		btrfs_update_root_times(trans, root);
2739 
2740 		ret = btrfs_delayed_update_inode(trans, root, inode);
2741 		if (!ret)
2742 			btrfs_set_inode_last_trans(trans, inode);
2743 		return ret;
2744 	}
2745 
2746 	return btrfs_update_inode_item(trans, root, inode);
2747 }
2748 
2749 static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
2750 				struct btrfs_root *root, struct inode *inode)
2751 {
2752 	int ret;
2753 
2754 	ret = btrfs_update_inode(trans, root, inode);
2755 	if (ret == -ENOSPC)
2756 		return btrfs_update_inode_item(trans, root, inode);
2757 	return ret;
2758 }
2759 
2760 /*
2761  * unlink helper that gets used here in inode.c and in the tree logging
2762  * recovery code.  It remove a link in a directory with a given name, and
2763  * also drops the back refs in the inode to the directory
2764  */
2765 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2766 				struct btrfs_root *root,
2767 				struct inode *dir, struct inode *inode,
2768 				const char *name, int name_len)
2769 {
2770 	struct btrfs_path *path;
2771 	int ret = 0;
2772 	struct extent_buffer *leaf;
2773 	struct btrfs_dir_item *di;
2774 	struct btrfs_key key;
2775 	u64 index;
2776 	u64 ino = btrfs_ino(inode);
2777 	u64 dir_ino = btrfs_ino(dir);
2778 
2779 	path = btrfs_alloc_path();
2780 	if (!path) {
2781 		ret = -ENOMEM;
2782 		goto out;
2783 	}
2784 
2785 	path->leave_spinning = 1;
2786 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2787 				    name, name_len, -1);
2788 	if (IS_ERR(di)) {
2789 		ret = PTR_ERR(di);
2790 		goto err;
2791 	}
2792 	if (!di) {
2793 		ret = -ENOENT;
2794 		goto err;
2795 	}
2796 	leaf = path->nodes[0];
2797 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
2798 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
2799 	if (ret)
2800 		goto err;
2801 	btrfs_release_path(path);
2802 
2803 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2804 				  dir_ino, &index);
2805 	if (ret) {
2806 		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2807 		       "inode %llu parent %llu\n", name_len, name,
2808 		       (unsigned long long)ino, (unsigned long long)dir_ino);
2809 		btrfs_abort_transaction(trans, root, ret);
2810 		goto err;
2811 	}
2812 
2813 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2814 	if (ret) {
2815 		btrfs_abort_transaction(trans, root, ret);
2816 		goto err;
2817 	}
2818 
2819 	ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2820 					 inode, dir_ino);
2821 	if (ret != 0 && ret != -ENOENT) {
2822 		btrfs_abort_transaction(trans, root, ret);
2823 		goto err;
2824 	}
2825 
2826 	ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2827 					   dir, index);
2828 	if (ret == -ENOENT)
2829 		ret = 0;
2830 err:
2831 	btrfs_free_path(path);
2832 	if (ret)
2833 		goto out;
2834 
2835 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2836 	inode_inc_iversion(inode);
2837 	inode_inc_iversion(dir);
2838 	inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2839 	ret = btrfs_update_inode(trans, root, dir);
2840 out:
2841 	return ret;
2842 }
2843 
2844 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2845 		       struct btrfs_root *root,
2846 		       struct inode *dir, struct inode *inode,
2847 		       const char *name, int name_len)
2848 {
2849 	int ret;
2850 	ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
2851 	if (!ret) {
2852 		btrfs_drop_nlink(inode);
2853 		ret = btrfs_update_inode(trans, root, inode);
2854 	}
2855 	return ret;
2856 }
2857 
2858 
2859 /* helper to check if there is any shared block in the path */
2860 static int check_path_shared(struct btrfs_root *root,
2861 			     struct btrfs_path *path)
2862 {
2863 	struct extent_buffer *eb;
2864 	int level;
2865 	u64 refs = 1;
2866 
2867 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2868 		int ret;
2869 
2870 		if (!path->nodes[level])
2871 			break;
2872 		eb = path->nodes[level];
2873 		if (!btrfs_block_can_be_shared(root, eb))
2874 			continue;
2875 		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2876 					       &refs, NULL);
2877 		if (refs > 1)
2878 			return 1;
2879 	}
2880 	return 0;
2881 }
2882 
2883 /*
2884  * helper to start transaction for unlink and rmdir.
2885  *
2886  * unlink and rmdir are special in btrfs, they do not always free space.
2887  * so in enospc case, we should make sure they will free space before
2888  * allowing them to use the global metadata reservation.
2889  */
2890 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2891 						       struct dentry *dentry)
2892 {
2893 	struct btrfs_trans_handle *trans;
2894 	struct btrfs_root *root = BTRFS_I(dir)->root;
2895 	struct btrfs_path *path;
2896 	struct btrfs_inode_ref *ref;
2897 	struct btrfs_dir_item *di;
2898 	struct inode *inode = dentry->d_inode;
2899 	u64 index;
2900 	int check_link = 1;
2901 	int err = -ENOSPC;
2902 	int ret;
2903 	u64 ino = btrfs_ino(inode);
2904 	u64 dir_ino = btrfs_ino(dir);
2905 
2906 	/*
2907 	 * 1 for the possible orphan item
2908 	 * 1 for the dir item
2909 	 * 1 for the dir index
2910 	 * 1 for the inode ref
2911 	 * 1 for the inode ref in the tree log
2912 	 * 2 for the dir entries in the log
2913 	 * 1 for the inode
2914 	 */
2915 	trans = btrfs_start_transaction(root, 8);
2916 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2917 		return trans;
2918 
2919 	if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2920 		return ERR_PTR(-ENOSPC);
2921 
2922 	/* check if there is someone else holds reference */
2923 	if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2924 		return ERR_PTR(-ENOSPC);
2925 
2926 	if (atomic_read(&inode->i_count) > 2)
2927 		return ERR_PTR(-ENOSPC);
2928 
2929 	if (xchg(&root->fs_info->enospc_unlink, 1))
2930 		return ERR_PTR(-ENOSPC);
2931 
2932 	path = btrfs_alloc_path();
2933 	if (!path) {
2934 		root->fs_info->enospc_unlink = 0;
2935 		return ERR_PTR(-ENOMEM);
2936 	}
2937 
2938 	/* 1 for the orphan item */
2939 	trans = btrfs_start_transaction(root, 1);
2940 	if (IS_ERR(trans)) {
2941 		btrfs_free_path(path);
2942 		root->fs_info->enospc_unlink = 0;
2943 		return trans;
2944 	}
2945 
2946 	path->skip_locking = 1;
2947 	path->search_commit_root = 1;
2948 
2949 	ret = btrfs_lookup_inode(trans, root, path,
2950 				&BTRFS_I(dir)->location, 0);
2951 	if (ret < 0) {
2952 		err = ret;
2953 		goto out;
2954 	}
2955 	if (ret == 0) {
2956 		if (check_path_shared(root, path))
2957 			goto out;
2958 	} else {
2959 		check_link = 0;
2960 	}
2961 	btrfs_release_path(path);
2962 
2963 	ret = btrfs_lookup_inode(trans, root, path,
2964 				&BTRFS_I(inode)->location, 0);
2965 	if (ret < 0) {
2966 		err = ret;
2967 		goto out;
2968 	}
2969 	if (ret == 0) {
2970 		if (check_path_shared(root, path))
2971 			goto out;
2972 	} else {
2973 		check_link = 0;
2974 	}
2975 	btrfs_release_path(path);
2976 
2977 	if (ret == 0 && S_ISREG(inode->i_mode)) {
2978 		ret = btrfs_lookup_file_extent(trans, root, path,
2979 					       ino, (u64)-1, 0);
2980 		if (ret < 0) {
2981 			err = ret;
2982 			goto out;
2983 		}
2984 		BUG_ON(ret == 0); /* Corruption */
2985 		if (check_path_shared(root, path))
2986 			goto out;
2987 		btrfs_release_path(path);
2988 	}
2989 
2990 	if (!check_link) {
2991 		err = 0;
2992 		goto out;
2993 	}
2994 
2995 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2996 				dentry->d_name.name, dentry->d_name.len, 0);
2997 	if (IS_ERR(di)) {
2998 		err = PTR_ERR(di);
2999 		goto out;
3000 	}
3001 	if (di) {
3002 		if (check_path_shared(root, path))
3003 			goto out;
3004 	} else {
3005 		err = 0;
3006 		goto out;
3007 	}
3008 	btrfs_release_path(path);
3009 
3010 	ref = btrfs_lookup_inode_ref(trans, root, path,
3011 				dentry->d_name.name, dentry->d_name.len,
3012 				ino, dir_ino, 0);
3013 	if (IS_ERR(ref)) {
3014 		err = PTR_ERR(ref);
3015 		goto out;
3016 	}
3017 	BUG_ON(!ref); /* Logic error */
3018 	if (check_path_shared(root, path))
3019 		goto out;
3020 	index = btrfs_inode_ref_index(path->nodes[0], ref);
3021 	btrfs_release_path(path);
3022 
3023 	/*
3024 	 * This is a commit root search, if we can lookup inode item and other
3025 	 * relative items in the commit root, it means the transaction of
3026 	 * dir/file creation has been committed, and the dir index item that we
3027 	 * delay to insert has also been inserted into the commit root. So
3028 	 * we needn't worry about the delayed insertion of the dir index item
3029 	 * here.
3030 	 */
3031 	di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
3032 				dentry->d_name.name, dentry->d_name.len, 0);
3033 	if (IS_ERR(di)) {
3034 		err = PTR_ERR(di);
3035 		goto out;
3036 	}
3037 	BUG_ON(ret == -ENOENT);
3038 	if (check_path_shared(root, path))
3039 		goto out;
3040 
3041 	err = 0;
3042 out:
3043 	btrfs_free_path(path);
3044 	/* Migrate the orphan reservation over */
3045 	if (!err)
3046 		err = btrfs_block_rsv_migrate(trans->block_rsv,
3047 				&root->fs_info->global_block_rsv,
3048 				trans->bytes_reserved);
3049 
3050 	if (err) {
3051 		btrfs_end_transaction(trans, root);
3052 		root->fs_info->enospc_unlink = 0;
3053 		return ERR_PTR(err);
3054 	}
3055 
3056 	trans->block_rsv = &root->fs_info->global_block_rsv;
3057 	return trans;
3058 }
3059 
3060 static void __unlink_end_trans(struct btrfs_trans_handle *trans,
3061 			       struct btrfs_root *root)
3062 {
3063 	if (trans->block_rsv == &root->fs_info->global_block_rsv) {
3064 		btrfs_block_rsv_release(root, trans->block_rsv,
3065 					trans->bytes_reserved);
3066 		trans->block_rsv = &root->fs_info->trans_block_rsv;
3067 		BUG_ON(!root->fs_info->enospc_unlink);
3068 		root->fs_info->enospc_unlink = 0;
3069 	}
3070 	btrfs_end_transaction(trans, root);
3071 }
3072 
3073 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3074 {
3075 	struct btrfs_root *root = BTRFS_I(dir)->root;
3076 	struct btrfs_trans_handle *trans;
3077 	struct inode *inode = dentry->d_inode;
3078 	int ret;
3079 	unsigned long nr = 0;
3080 
3081 	trans = __unlink_start_trans(dir, dentry);
3082 	if (IS_ERR(trans))
3083 		return PTR_ERR(trans);
3084 
3085 	btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3086 
3087 	ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3088 				 dentry->d_name.name, dentry->d_name.len);
3089 	if (ret)
3090 		goto out;
3091 
3092 	if (inode->i_nlink == 0) {
3093 		ret = btrfs_orphan_add(trans, inode);
3094 		if (ret)
3095 			goto out;
3096 	}
3097 
3098 out:
3099 	nr = trans->blocks_used;
3100 	__unlink_end_trans(trans, root);
3101 	btrfs_btree_balance_dirty(root, nr);
3102 	return ret;
3103 }
3104 
3105 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3106 			struct btrfs_root *root,
3107 			struct inode *dir, u64 objectid,
3108 			const char *name, int name_len)
3109 {
3110 	struct btrfs_path *path;
3111 	struct extent_buffer *leaf;
3112 	struct btrfs_dir_item *di;
3113 	struct btrfs_key key;
3114 	u64 index;
3115 	int ret;
3116 	u64 dir_ino = btrfs_ino(dir);
3117 
3118 	path = btrfs_alloc_path();
3119 	if (!path)
3120 		return -ENOMEM;
3121 
3122 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3123 				   name, name_len, -1);
3124 	if (IS_ERR_OR_NULL(di)) {
3125 		if (!di)
3126 			ret = -ENOENT;
3127 		else
3128 			ret = PTR_ERR(di);
3129 		goto out;
3130 	}
3131 
3132 	leaf = path->nodes[0];
3133 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
3134 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
3135 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
3136 	if (ret) {
3137 		btrfs_abort_transaction(trans, root, ret);
3138 		goto out;
3139 	}
3140 	btrfs_release_path(path);
3141 
3142 	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3143 				 objectid, root->root_key.objectid,
3144 				 dir_ino, &index, name, name_len);
3145 	if (ret < 0) {
3146 		if (ret != -ENOENT) {
3147 			btrfs_abort_transaction(trans, root, ret);
3148 			goto out;
3149 		}
3150 		di = btrfs_search_dir_index_item(root, path, dir_ino,
3151 						 name, name_len);
3152 		if (IS_ERR_OR_NULL(di)) {
3153 			if (!di)
3154 				ret = -ENOENT;
3155 			else
3156 				ret = PTR_ERR(di);
3157 			btrfs_abort_transaction(trans, root, ret);
3158 			goto out;
3159 		}
3160 
3161 		leaf = path->nodes[0];
3162 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3163 		btrfs_release_path(path);
3164 		index = key.offset;
3165 	}
3166 	btrfs_release_path(path);
3167 
3168 	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3169 	if (ret) {
3170 		btrfs_abort_transaction(trans, root, ret);
3171 		goto out;
3172 	}
3173 
3174 	btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3175 	inode_inc_iversion(dir);
3176 	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3177 	ret = btrfs_update_inode(trans, root, dir);
3178 	if (ret)
3179 		btrfs_abort_transaction(trans, root, ret);
3180 out:
3181 	btrfs_free_path(path);
3182 	return ret;
3183 }
3184 
3185 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3186 {
3187 	struct inode *inode = dentry->d_inode;
3188 	int err = 0;
3189 	struct btrfs_root *root = BTRFS_I(dir)->root;
3190 	struct btrfs_trans_handle *trans;
3191 	unsigned long nr = 0;
3192 
3193 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3194 	    btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3195 		return -ENOTEMPTY;
3196 
3197 	trans = __unlink_start_trans(dir, dentry);
3198 	if (IS_ERR(trans))
3199 		return PTR_ERR(trans);
3200 
3201 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3202 		err = btrfs_unlink_subvol(trans, root, dir,
3203 					  BTRFS_I(inode)->location.objectid,
3204 					  dentry->d_name.name,
3205 					  dentry->d_name.len);
3206 		goto out;
3207 	}
3208 
3209 	err = btrfs_orphan_add(trans, inode);
3210 	if (err)
3211 		goto out;
3212 
3213 	/* now the directory is empty */
3214 	err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
3215 				 dentry->d_name.name, dentry->d_name.len);
3216 	if (!err)
3217 		btrfs_i_size_write(inode, 0);
3218 out:
3219 	nr = trans->blocks_used;
3220 	__unlink_end_trans(trans, root);
3221 	btrfs_btree_balance_dirty(root, nr);
3222 
3223 	return err;
3224 }
3225 
3226 /*
3227  * this can truncate away extent items, csum items and directory items.
3228  * It starts at a high offset and removes keys until it can't find
3229  * any higher than new_size
3230  *
3231  * csum items that cross the new i_size are truncated to the new size
3232  * as well.
3233  *
3234  * min_type is the minimum key type to truncate down to.  If set to 0, this
3235  * will kill all the items on this inode, including the INODE_ITEM_KEY.
3236  */
3237 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3238 			       struct btrfs_root *root,
3239 			       struct inode *inode,
3240 			       u64 new_size, u32 min_type)
3241 {
3242 	struct btrfs_path *path;
3243 	struct extent_buffer *leaf;
3244 	struct btrfs_file_extent_item *fi;
3245 	struct btrfs_key key;
3246 	struct btrfs_key found_key;
3247 	u64 extent_start = 0;
3248 	u64 extent_num_bytes = 0;
3249 	u64 extent_offset = 0;
3250 	u64 item_end = 0;
3251 	u64 mask = root->sectorsize - 1;
3252 	u32 found_type = (u8)-1;
3253 	int found_extent;
3254 	int del_item;
3255 	int pending_del_nr = 0;
3256 	int pending_del_slot = 0;
3257 	int extent_type = -1;
3258 	int ret;
3259 	int err = 0;
3260 	u64 ino = btrfs_ino(inode);
3261 
3262 	BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3263 
3264 	path = btrfs_alloc_path();
3265 	if (!path)
3266 		return -ENOMEM;
3267 	path->reada = -1;
3268 
3269 	if (root->ref_cows || root == root->fs_info->tree_root)
3270 		btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
3271 
3272 	/*
3273 	 * This function is also used to drop the items in the log tree before
3274 	 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
3275 	 * it is used to drop the loged items. So we shouldn't kill the delayed
3276 	 * items.
3277 	 */
3278 	if (min_type == 0 && root == BTRFS_I(inode)->root)
3279 		btrfs_kill_delayed_inode_items(inode);
3280 
3281 	key.objectid = ino;
3282 	key.offset = (u64)-1;
3283 	key.type = (u8)-1;
3284 
3285 search_again:
3286 	path->leave_spinning = 1;
3287 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3288 	if (ret < 0) {
3289 		err = ret;
3290 		goto out;
3291 	}
3292 
3293 	if (ret > 0) {
3294 		/* there are no items in the tree for us to truncate, we're
3295 		 * done
3296 		 */
3297 		if (path->slots[0] == 0)
3298 			goto out;
3299 		path->slots[0]--;
3300 	}
3301 
3302 	while (1) {
3303 		fi = NULL;
3304 		leaf = path->nodes[0];
3305 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3306 		found_type = btrfs_key_type(&found_key);
3307 
3308 		if (found_key.objectid != ino)
3309 			break;
3310 
3311 		if (found_type < min_type)
3312 			break;
3313 
3314 		item_end = found_key.offset;
3315 		if (found_type == BTRFS_EXTENT_DATA_KEY) {
3316 			fi = btrfs_item_ptr(leaf, path->slots[0],
3317 					    struct btrfs_file_extent_item);
3318 			extent_type = btrfs_file_extent_type(leaf, fi);
3319 			if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3320 				item_end +=
3321 				    btrfs_file_extent_num_bytes(leaf, fi);
3322 			} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3323 				item_end += btrfs_file_extent_inline_len(leaf,
3324 									 fi);
3325 			}
3326 			item_end--;
3327 		}
3328 		if (found_type > min_type) {
3329 			del_item = 1;
3330 		} else {
3331 			if (item_end < new_size)
3332 				break;
3333 			if (found_key.offset >= new_size)
3334 				del_item = 1;
3335 			else
3336 				del_item = 0;
3337 		}
3338 		found_extent = 0;
3339 		/* FIXME, shrink the extent if the ref count is only 1 */
3340 		if (found_type != BTRFS_EXTENT_DATA_KEY)
3341 			goto delete;
3342 
3343 		if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3344 			u64 num_dec;
3345 			extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
3346 			if (!del_item) {
3347 				u64 orig_num_bytes =
3348 					btrfs_file_extent_num_bytes(leaf, fi);
3349 				extent_num_bytes = new_size -
3350 					found_key.offset + root->sectorsize - 1;
3351 				extent_num_bytes = extent_num_bytes &
3352 					~((u64)root->sectorsize - 1);
3353 				btrfs_set_file_extent_num_bytes(leaf, fi,
3354 							 extent_num_bytes);
3355 				num_dec = (orig_num_bytes -
3356 					   extent_num_bytes);
3357 				if (root->ref_cows && extent_start != 0)
3358 					inode_sub_bytes(inode, num_dec);
3359 				btrfs_mark_buffer_dirty(leaf);
3360 			} else {
3361 				extent_num_bytes =
3362 					btrfs_file_extent_disk_num_bytes(leaf,
3363 									 fi);
3364 				extent_offset = found_key.offset -
3365 					btrfs_file_extent_offset(leaf, fi);
3366 
3367 				/* FIXME blocksize != 4096 */
3368 				num_dec = btrfs_file_extent_num_bytes(leaf, fi);
3369 				if (extent_start != 0) {
3370 					found_extent = 1;
3371 					if (root->ref_cows)
3372 						inode_sub_bytes(inode, num_dec);
3373 				}
3374 			}
3375 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3376 			/*
3377 			 * we can't truncate inline items that have had
3378 			 * special encodings
3379 			 */
3380 			if (!del_item &&
3381 			    btrfs_file_extent_compression(leaf, fi) == 0 &&
3382 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
3383 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
3384 				u32 size = new_size - found_key.offset;
3385 
3386 				if (root->ref_cows) {
3387 					inode_sub_bytes(inode, item_end + 1 -
3388 							new_size);
3389 				}
3390 				size =
3391 				    btrfs_file_extent_calc_inline_size(size);
3392 				btrfs_truncate_item(trans, root, path,
3393 						    size, 1);
3394 			} else if (root->ref_cows) {
3395 				inode_sub_bytes(inode, item_end + 1 -
3396 						found_key.offset);
3397 			}
3398 		}
3399 delete:
3400 		if (del_item) {
3401 			if (!pending_del_nr) {
3402 				/* no pending yet, add ourselves */
3403 				pending_del_slot = path->slots[0];
3404 				pending_del_nr = 1;
3405 			} else if (pending_del_nr &&
3406 				   path->slots[0] + 1 == pending_del_slot) {
3407 				/* hop on the pending chunk */
3408 				pending_del_nr++;
3409 				pending_del_slot = path->slots[0];
3410 			} else {
3411 				BUG();
3412 			}
3413 		} else {
3414 			break;
3415 		}
3416 		if (found_extent && (root->ref_cows ||
3417 				     root == root->fs_info->tree_root)) {
3418 			btrfs_set_path_blocking(path);
3419 			ret = btrfs_free_extent(trans, root, extent_start,
3420 						extent_num_bytes, 0,
3421 						btrfs_header_owner(leaf),
3422 						ino, extent_offset, 0);
3423 			BUG_ON(ret);
3424 		}
3425 
3426 		if (found_type == BTRFS_INODE_ITEM_KEY)
3427 			break;
3428 
3429 		if (path->slots[0] == 0 ||
3430 		    path->slots[0] != pending_del_slot) {
3431 			if (root->ref_cows &&
3432 			    BTRFS_I(inode)->location.objectid !=
3433 						BTRFS_FREE_INO_OBJECTID) {
3434 				err = -EAGAIN;
3435 				goto out;
3436 			}
3437 			if (pending_del_nr) {
3438 				ret = btrfs_del_items(trans, root, path,
3439 						pending_del_slot,
3440 						pending_del_nr);
3441 				if (ret) {
3442 					btrfs_abort_transaction(trans,
3443 								root, ret);
3444 					goto error;
3445 				}
3446 				pending_del_nr = 0;
3447 			}
3448 			btrfs_release_path(path);
3449 			goto search_again;
3450 		} else {
3451 			path->slots[0]--;
3452 		}
3453 	}
3454 out:
3455 	if (pending_del_nr) {
3456 		ret = btrfs_del_items(trans, root, path, pending_del_slot,
3457 				      pending_del_nr);
3458 		if (ret)
3459 			btrfs_abort_transaction(trans, root, ret);
3460 	}
3461 error:
3462 	btrfs_free_path(path);
3463 	return err;
3464 }
3465 
3466 /*
3467  * taken from block_truncate_page, but does cow as it zeros out
3468  * any bytes left in the last page in the file.
3469  */
3470 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3471 {
3472 	struct inode *inode = mapping->host;
3473 	struct btrfs_root *root = BTRFS_I(inode)->root;
3474 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3475 	struct btrfs_ordered_extent *ordered;
3476 	struct extent_state *cached_state = NULL;
3477 	char *kaddr;
3478 	u32 blocksize = root->sectorsize;
3479 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
3480 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
3481 	struct page *page;
3482 	gfp_t mask = btrfs_alloc_write_mask(mapping);
3483 	int ret = 0;
3484 	u64 page_start;
3485 	u64 page_end;
3486 
3487 	if ((offset & (blocksize - 1)) == 0)
3488 		goto out;
3489 	ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3490 	if (ret)
3491 		goto out;
3492 
3493 	ret = -ENOMEM;
3494 again:
3495 	page = find_or_create_page(mapping, index, mask);
3496 	if (!page) {
3497 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3498 		goto out;
3499 	}
3500 
3501 	page_start = page_offset(page);
3502 	page_end = page_start + PAGE_CACHE_SIZE - 1;
3503 
3504 	if (!PageUptodate(page)) {
3505 		ret = btrfs_readpage(NULL, page);
3506 		lock_page(page);
3507 		if (page->mapping != mapping) {
3508 			unlock_page(page);
3509 			page_cache_release(page);
3510 			goto again;
3511 		}
3512 		if (!PageUptodate(page)) {
3513 			ret = -EIO;
3514 			goto out_unlock;
3515 		}
3516 	}
3517 	wait_on_page_writeback(page);
3518 
3519 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
3520 	set_page_extent_mapped(page);
3521 
3522 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
3523 	if (ordered) {
3524 		unlock_extent_cached(io_tree, page_start, page_end,
3525 				     &cached_state, GFP_NOFS);
3526 		unlock_page(page);
3527 		page_cache_release(page);
3528 		btrfs_start_ordered_extent(inode, ordered, 1);
3529 		btrfs_put_ordered_extent(ordered);
3530 		goto again;
3531 	}
3532 
3533 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3534 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3535 			  0, 0, &cached_state, GFP_NOFS);
3536 
3537 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3538 					&cached_state);
3539 	if (ret) {
3540 		unlock_extent_cached(io_tree, page_start, page_end,
3541 				     &cached_state, GFP_NOFS);
3542 		goto out_unlock;
3543 	}
3544 
3545 	ret = 0;
3546 	if (offset != PAGE_CACHE_SIZE) {
3547 		kaddr = kmap(page);
3548 		memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3549 		flush_dcache_page(page);
3550 		kunmap(page);
3551 	}
3552 	ClearPageChecked(page);
3553 	set_page_dirty(page);
3554 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3555 			     GFP_NOFS);
3556 
3557 out_unlock:
3558 	if (ret)
3559 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3560 	unlock_page(page);
3561 	page_cache_release(page);
3562 out:
3563 	return ret;
3564 }
3565 
3566 /*
3567  * This function puts in dummy file extents for the area we're creating a hole
3568  * for.  So if we are truncating this file to a larger size we need to insert
3569  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
3570  * the range between oldsize and size
3571  */
3572 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3573 {
3574 	struct btrfs_trans_handle *trans;
3575 	struct btrfs_root *root = BTRFS_I(inode)->root;
3576 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3577 	struct extent_map *em = NULL;
3578 	struct extent_state *cached_state = NULL;
3579 	u64 mask = root->sectorsize - 1;
3580 	u64 hole_start = (oldsize + mask) & ~mask;
3581 	u64 block_end = (size + mask) & ~mask;
3582 	u64 last_byte;
3583 	u64 cur_offset;
3584 	u64 hole_size;
3585 	int err = 0;
3586 
3587 	if (size <= hole_start)
3588 		return 0;
3589 
3590 	while (1) {
3591 		struct btrfs_ordered_extent *ordered;
3592 		btrfs_wait_ordered_range(inode, hole_start,
3593 					 block_end - hole_start);
3594 		lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3595 				 &cached_state);
3596 		ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3597 		if (!ordered)
3598 			break;
3599 		unlock_extent_cached(io_tree, hole_start, block_end - 1,
3600 				     &cached_state, GFP_NOFS);
3601 		btrfs_put_ordered_extent(ordered);
3602 	}
3603 
3604 	cur_offset = hole_start;
3605 	while (1) {
3606 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3607 				block_end - cur_offset, 0);
3608 		if (IS_ERR(em)) {
3609 			err = PTR_ERR(em);
3610 			break;
3611 		}
3612 		last_byte = min(extent_map_end(em), block_end);
3613 		last_byte = (last_byte + mask) & ~mask;
3614 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3615 			u64 hint_byte = 0;
3616 			hole_size = last_byte - cur_offset;
3617 
3618 			trans = btrfs_start_transaction(root, 3);
3619 			if (IS_ERR(trans)) {
3620 				err = PTR_ERR(trans);
3621 				break;
3622 			}
3623 
3624 			err = btrfs_drop_extents(trans, inode, cur_offset,
3625 						 cur_offset + hole_size,
3626 						 &hint_byte, 1);
3627 			if (err) {
3628 				btrfs_abort_transaction(trans, root, err);
3629 				btrfs_end_transaction(trans, root);
3630 				break;
3631 			}
3632 
3633 			err = btrfs_insert_file_extent(trans, root,
3634 					btrfs_ino(inode), cur_offset, 0,
3635 					0, hole_size, 0, hole_size,
3636 					0, 0, 0);
3637 			if (err) {
3638 				btrfs_abort_transaction(trans, root, err);
3639 				btrfs_end_transaction(trans, root);
3640 				break;
3641 			}
3642 
3643 			btrfs_drop_extent_cache(inode, hole_start,
3644 					last_byte - 1, 0);
3645 
3646 			btrfs_update_inode(trans, root, inode);
3647 			btrfs_end_transaction(trans, root);
3648 		}
3649 		free_extent_map(em);
3650 		em = NULL;
3651 		cur_offset = last_byte;
3652 		if (cur_offset >= block_end)
3653 			break;
3654 	}
3655 
3656 	free_extent_map(em);
3657 	unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3658 			     GFP_NOFS);
3659 	return err;
3660 }
3661 
3662 static int btrfs_setsize(struct inode *inode, loff_t newsize)
3663 {
3664 	struct btrfs_root *root = BTRFS_I(inode)->root;
3665 	struct btrfs_trans_handle *trans;
3666 	loff_t oldsize = i_size_read(inode);
3667 	int ret;
3668 
3669 	if (newsize == oldsize)
3670 		return 0;
3671 
3672 	if (newsize > oldsize) {
3673 		truncate_pagecache(inode, oldsize, newsize);
3674 		ret = btrfs_cont_expand(inode, oldsize, newsize);
3675 		if (ret)
3676 			return ret;
3677 
3678 		trans = btrfs_start_transaction(root, 1);
3679 		if (IS_ERR(trans))
3680 			return PTR_ERR(trans);
3681 
3682 		i_size_write(inode, newsize);
3683 		btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
3684 		ret = btrfs_update_inode(trans, root, inode);
3685 		btrfs_end_transaction(trans, root);
3686 	} else {
3687 
3688 		/*
3689 		 * We're truncating a file that used to have good data down to
3690 		 * zero. Make sure it gets into the ordered flush list so that
3691 		 * any new writes get down to disk quickly.
3692 		 */
3693 		if (newsize == 0)
3694 			set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
3695 				&BTRFS_I(inode)->runtime_flags);
3696 
3697 		/* we don't support swapfiles, so vmtruncate shouldn't fail */
3698 		truncate_setsize(inode, newsize);
3699 		ret = btrfs_truncate(inode);
3700 	}
3701 
3702 	return ret;
3703 }
3704 
3705 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3706 {
3707 	struct inode *inode = dentry->d_inode;
3708 	struct btrfs_root *root = BTRFS_I(inode)->root;
3709 	int err;
3710 
3711 	if (btrfs_root_readonly(root))
3712 		return -EROFS;
3713 
3714 	err = inode_change_ok(inode, attr);
3715 	if (err)
3716 		return err;
3717 
3718 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3719 		err = btrfs_setsize(inode, attr->ia_size);
3720 		if (err)
3721 			return err;
3722 	}
3723 
3724 	if (attr->ia_valid) {
3725 		setattr_copy(inode, attr);
3726 		inode_inc_iversion(inode);
3727 		err = btrfs_dirty_inode(inode);
3728 
3729 		if (!err && attr->ia_valid & ATTR_MODE)
3730 			err = btrfs_acl_chmod(inode);
3731 	}
3732 
3733 	return err;
3734 }
3735 
3736 void btrfs_evict_inode(struct inode *inode)
3737 {
3738 	struct btrfs_trans_handle *trans;
3739 	struct btrfs_root *root = BTRFS_I(inode)->root;
3740 	struct btrfs_block_rsv *rsv, *global_rsv;
3741 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
3742 	unsigned long nr;
3743 	int ret;
3744 
3745 	trace_btrfs_inode_evict(inode);
3746 
3747 	truncate_inode_pages(&inode->i_data, 0);
3748 	if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3749 			       btrfs_is_free_space_inode(inode)))
3750 		goto no_delete;
3751 
3752 	if (is_bad_inode(inode)) {
3753 		btrfs_orphan_del(NULL, inode);
3754 		goto no_delete;
3755 	}
3756 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
3757 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
3758 
3759 	if (root->fs_info->log_root_recovering) {
3760 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3761 				 &BTRFS_I(inode)->runtime_flags));
3762 		goto no_delete;
3763 	}
3764 
3765 	if (inode->i_nlink > 0) {
3766 		BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3767 		goto no_delete;
3768 	}
3769 
3770 	rsv = btrfs_alloc_block_rsv(root);
3771 	if (!rsv) {
3772 		btrfs_orphan_del(NULL, inode);
3773 		goto no_delete;
3774 	}
3775 	rsv->size = min_size;
3776 	global_rsv = &root->fs_info->global_block_rsv;
3777 
3778 	btrfs_i_size_write(inode, 0);
3779 
3780 	/*
3781 	 * This is a bit simpler than btrfs_truncate since
3782 	 *
3783 	 * 1) We've already reserved our space for our orphan item in the
3784 	 *    unlink.
3785 	 * 2) We're going to delete the inode item, so we don't need to update
3786 	 *    it at all.
3787 	 *
3788 	 * So we just need to reserve some slack space in case we add bytes when
3789 	 * doing the truncate.
3790 	 */
3791 	while (1) {
3792 		ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
3793 
3794 		/*
3795 		 * Try and steal from the global reserve since we will
3796 		 * likely not use this space anyway, we want to try as
3797 		 * hard as possible to get this to work.
3798 		 */
3799 		if (ret)
3800 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
3801 
3802 		if (ret) {
3803 			printk(KERN_WARNING "Could not get space for a "
3804 			       "delete, will truncate on mount %d\n", ret);
3805 			btrfs_orphan_del(NULL, inode);
3806 			btrfs_free_block_rsv(root, rsv);
3807 			goto no_delete;
3808 		}
3809 
3810 		trans = btrfs_start_transaction(root, 0);
3811 		if (IS_ERR(trans)) {
3812 			btrfs_orphan_del(NULL, inode);
3813 			btrfs_free_block_rsv(root, rsv);
3814 			goto no_delete;
3815 		}
3816 
3817 		trans->block_rsv = rsv;
3818 
3819 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3820 		if (ret != -EAGAIN)
3821 			break;
3822 
3823 		nr = trans->blocks_used;
3824 		btrfs_end_transaction(trans, root);
3825 		trans = NULL;
3826 		btrfs_btree_balance_dirty(root, nr);
3827 	}
3828 
3829 	btrfs_free_block_rsv(root, rsv);
3830 
3831 	if (ret == 0) {
3832 		trans->block_rsv = root->orphan_block_rsv;
3833 		ret = btrfs_orphan_del(trans, inode);
3834 		BUG_ON(ret);
3835 	}
3836 
3837 	trans->block_rsv = &root->fs_info->trans_block_rsv;
3838 	if (!(root == root->fs_info->tree_root ||
3839 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
3840 		btrfs_return_ino(root, btrfs_ino(inode));
3841 
3842 	nr = trans->blocks_used;
3843 	btrfs_end_transaction(trans, root);
3844 	btrfs_btree_balance_dirty(root, nr);
3845 no_delete:
3846 	clear_inode(inode);
3847 	return;
3848 }
3849 
3850 /*
3851  * this returns the key found in the dir entry in the location pointer.
3852  * If no dir entries were found, location->objectid is 0.
3853  */
3854 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3855 			       struct btrfs_key *location)
3856 {
3857 	const char *name = dentry->d_name.name;
3858 	int namelen = dentry->d_name.len;
3859 	struct btrfs_dir_item *di;
3860 	struct btrfs_path *path;
3861 	struct btrfs_root *root = BTRFS_I(dir)->root;
3862 	int ret = 0;
3863 
3864 	path = btrfs_alloc_path();
3865 	if (!path)
3866 		return -ENOMEM;
3867 
3868 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
3869 				    namelen, 0);
3870 	if (IS_ERR(di))
3871 		ret = PTR_ERR(di);
3872 
3873 	if (IS_ERR_OR_NULL(di))
3874 		goto out_err;
3875 
3876 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3877 out:
3878 	btrfs_free_path(path);
3879 	return ret;
3880 out_err:
3881 	location->objectid = 0;
3882 	goto out;
3883 }
3884 
3885 /*
3886  * when we hit a tree root in a directory, the btrfs part of the inode
3887  * needs to be changed to reflect the root directory of the tree root.  This
3888  * is kind of like crossing a mount point.
3889  */
3890 static int fixup_tree_root_location(struct btrfs_root *root,
3891 				    struct inode *dir,
3892 				    struct dentry *dentry,
3893 				    struct btrfs_key *location,
3894 				    struct btrfs_root **sub_root)
3895 {
3896 	struct btrfs_path *path;
3897 	struct btrfs_root *new_root;
3898 	struct btrfs_root_ref *ref;
3899 	struct extent_buffer *leaf;
3900 	int ret;
3901 	int err = 0;
3902 
3903 	path = btrfs_alloc_path();
3904 	if (!path) {
3905 		err = -ENOMEM;
3906 		goto out;
3907 	}
3908 
3909 	err = -ENOENT;
3910 	ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3911 				  BTRFS_I(dir)->root->root_key.objectid,
3912 				  location->objectid);
3913 	if (ret) {
3914 		if (ret < 0)
3915 			err = ret;
3916 		goto out;
3917 	}
3918 
3919 	leaf = path->nodes[0];
3920 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3921 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
3922 	    btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3923 		goto out;
3924 
3925 	ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3926 				   (unsigned long)(ref + 1),
3927 				   dentry->d_name.len);
3928 	if (ret)
3929 		goto out;
3930 
3931 	btrfs_release_path(path);
3932 
3933 	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3934 	if (IS_ERR(new_root)) {
3935 		err = PTR_ERR(new_root);
3936 		goto out;
3937 	}
3938 
3939 	if (btrfs_root_refs(&new_root->root_item) == 0) {
3940 		err = -ENOENT;
3941 		goto out;
3942 	}
3943 
3944 	*sub_root = new_root;
3945 	location->objectid = btrfs_root_dirid(&new_root->root_item);
3946 	location->type = BTRFS_INODE_ITEM_KEY;
3947 	location->offset = 0;
3948 	err = 0;
3949 out:
3950 	btrfs_free_path(path);
3951 	return err;
3952 }
3953 
3954 static void inode_tree_add(struct inode *inode)
3955 {
3956 	struct btrfs_root *root = BTRFS_I(inode)->root;
3957 	struct btrfs_inode *entry;
3958 	struct rb_node **p;
3959 	struct rb_node *parent;
3960 	u64 ino = btrfs_ino(inode);
3961 again:
3962 	p = &root->inode_tree.rb_node;
3963 	parent = NULL;
3964 
3965 	if (inode_unhashed(inode))
3966 		return;
3967 
3968 	spin_lock(&root->inode_lock);
3969 	while (*p) {
3970 		parent = *p;
3971 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
3972 
3973 		if (ino < btrfs_ino(&entry->vfs_inode))
3974 			p = &parent->rb_left;
3975 		else if (ino > btrfs_ino(&entry->vfs_inode))
3976 			p = &parent->rb_right;
3977 		else {
3978 			WARN_ON(!(entry->vfs_inode.i_state &
3979 				  (I_WILL_FREE | I_FREEING)));
3980 			rb_erase(parent, &root->inode_tree);
3981 			RB_CLEAR_NODE(parent);
3982 			spin_unlock(&root->inode_lock);
3983 			goto again;
3984 		}
3985 	}
3986 	rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3987 	rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3988 	spin_unlock(&root->inode_lock);
3989 }
3990 
3991 static void inode_tree_del(struct inode *inode)
3992 {
3993 	struct btrfs_root *root = BTRFS_I(inode)->root;
3994 	int empty = 0;
3995 
3996 	spin_lock(&root->inode_lock);
3997 	if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3998 		rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3999 		RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
4000 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4001 	}
4002 	spin_unlock(&root->inode_lock);
4003 
4004 	/*
4005 	 * Free space cache has inodes in the tree root, but the tree root has a
4006 	 * root_refs of 0, so this could end up dropping the tree root as a
4007 	 * snapshot, so we need the extra !root->fs_info->tree_root check to
4008 	 * make sure we don't drop it.
4009 	 */
4010 	if (empty && btrfs_root_refs(&root->root_item) == 0 &&
4011 	    root != root->fs_info->tree_root) {
4012 		synchronize_srcu(&root->fs_info->subvol_srcu);
4013 		spin_lock(&root->inode_lock);
4014 		empty = RB_EMPTY_ROOT(&root->inode_tree);
4015 		spin_unlock(&root->inode_lock);
4016 		if (empty)
4017 			btrfs_add_dead_root(root);
4018 	}
4019 }
4020 
4021 void btrfs_invalidate_inodes(struct btrfs_root *root)
4022 {
4023 	struct rb_node *node;
4024 	struct rb_node *prev;
4025 	struct btrfs_inode *entry;
4026 	struct inode *inode;
4027 	u64 objectid = 0;
4028 
4029 	WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4030 
4031 	spin_lock(&root->inode_lock);
4032 again:
4033 	node = root->inode_tree.rb_node;
4034 	prev = NULL;
4035 	while (node) {
4036 		prev = node;
4037 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4038 
4039 		if (objectid < btrfs_ino(&entry->vfs_inode))
4040 			node = node->rb_left;
4041 		else if (objectid > btrfs_ino(&entry->vfs_inode))
4042 			node = node->rb_right;
4043 		else
4044 			break;
4045 	}
4046 	if (!node) {
4047 		while (prev) {
4048 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4049 			if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4050 				node = prev;
4051 				break;
4052 			}
4053 			prev = rb_next(prev);
4054 		}
4055 	}
4056 	while (node) {
4057 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4058 		objectid = btrfs_ino(&entry->vfs_inode) + 1;
4059 		inode = igrab(&entry->vfs_inode);
4060 		if (inode) {
4061 			spin_unlock(&root->inode_lock);
4062 			if (atomic_read(&inode->i_count) > 1)
4063 				d_prune_aliases(inode);
4064 			/*
4065 			 * btrfs_drop_inode will have it removed from
4066 			 * the inode cache when its usage count
4067 			 * hits zero.
4068 			 */
4069 			iput(inode);
4070 			cond_resched();
4071 			spin_lock(&root->inode_lock);
4072 			goto again;
4073 		}
4074 
4075 		if (cond_resched_lock(&root->inode_lock))
4076 			goto again;
4077 
4078 		node = rb_next(node);
4079 	}
4080 	spin_unlock(&root->inode_lock);
4081 }
4082 
4083 static int btrfs_init_locked_inode(struct inode *inode, void *p)
4084 {
4085 	struct btrfs_iget_args *args = p;
4086 	inode->i_ino = args->ino;
4087 	BTRFS_I(inode)->root = args->root;
4088 	return 0;
4089 }
4090 
4091 static int btrfs_find_actor(struct inode *inode, void *opaque)
4092 {
4093 	struct btrfs_iget_args *args = opaque;
4094 	return args->ino == btrfs_ino(inode) &&
4095 		args->root == BTRFS_I(inode)->root;
4096 }
4097 
4098 static struct inode *btrfs_iget_locked(struct super_block *s,
4099 				       u64 objectid,
4100 				       struct btrfs_root *root)
4101 {
4102 	struct inode *inode;
4103 	struct btrfs_iget_args args;
4104 	args.ino = objectid;
4105 	args.root = root;
4106 
4107 	inode = iget5_locked(s, objectid, btrfs_find_actor,
4108 			     btrfs_init_locked_inode,
4109 			     (void *)&args);
4110 	return inode;
4111 }
4112 
4113 /* Get an inode object given its location and corresponding root.
4114  * Returns in *is_new if the inode was read from disk
4115  */
4116 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
4117 			 struct btrfs_root *root, int *new)
4118 {
4119 	struct inode *inode;
4120 
4121 	inode = btrfs_iget_locked(s, location->objectid, root);
4122 	if (!inode)
4123 		return ERR_PTR(-ENOMEM);
4124 
4125 	if (inode->i_state & I_NEW) {
4126 		BTRFS_I(inode)->root = root;
4127 		memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
4128 		btrfs_read_locked_inode(inode);
4129 		if (!is_bad_inode(inode)) {
4130 			inode_tree_add(inode);
4131 			unlock_new_inode(inode);
4132 			if (new)
4133 				*new = 1;
4134 		} else {
4135 			unlock_new_inode(inode);
4136 			iput(inode);
4137 			inode = ERR_PTR(-ESTALE);
4138 		}
4139 	}
4140 
4141 	return inode;
4142 }
4143 
4144 static struct inode *new_simple_dir(struct super_block *s,
4145 				    struct btrfs_key *key,
4146 				    struct btrfs_root *root)
4147 {
4148 	struct inode *inode = new_inode(s);
4149 
4150 	if (!inode)
4151 		return ERR_PTR(-ENOMEM);
4152 
4153 	BTRFS_I(inode)->root = root;
4154 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
4155 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
4156 
4157 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4158 	inode->i_op = &btrfs_dir_ro_inode_operations;
4159 	inode->i_fop = &simple_dir_operations;
4160 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
4161 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4162 
4163 	return inode;
4164 }
4165 
4166 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
4167 {
4168 	struct inode *inode;
4169 	struct btrfs_root *root = BTRFS_I(dir)->root;
4170 	struct btrfs_root *sub_root = root;
4171 	struct btrfs_key location;
4172 	int index;
4173 	int ret = 0;
4174 
4175 	if (dentry->d_name.len > BTRFS_NAME_LEN)
4176 		return ERR_PTR(-ENAMETOOLONG);
4177 
4178 	if (unlikely(d_need_lookup(dentry))) {
4179 		memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key));
4180 		kfree(dentry->d_fsdata);
4181 		dentry->d_fsdata = NULL;
4182 		/* This thing is hashed, drop it for now */
4183 		d_drop(dentry);
4184 	} else {
4185 		ret = btrfs_inode_by_name(dir, dentry, &location);
4186 	}
4187 
4188 	if (ret < 0)
4189 		return ERR_PTR(ret);
4190 
4191 	if (location.objectid == 0)
4192 		return NULL;
4193 
4194 	if (location.type == BTRFS_INODE_ITEM_KEY) {
4195 		inode = btrfs_iget(dir->i_sb, &location, root, NULL);
4196 		return inode;
4197 	}
4198 
4199 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
4200 
4201 	index = srcu_read_lock(&root->fs_info->subvol_srcu);
4202 	ret = fixup_tree_root_location(root, dir, dentry,
4203 				       &location, &sub_root);
4204 	if (ret < 0) {
4205 		if (ret != -ENOENT)
4206 			inode = ERR_PTR(ret);
4207 		else
4208 			inode = new_simple_dir(dir->i_sb, &location, sub_root);
4209 	} else {
4210 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
4211 	}
4212 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
4213 
4214 	if (!IS_ERR(inode) && root != sub_root) {
4215 		down_read(&root->fs_info->cleanup_work_sem);
4216 		if (!(inode->i_sb->s_flags & MS_RDONLY))
4217 			ret = btrfs_orphan_cleanup(sub_root);
4218 		up_read(&root->fs_info->cleanup_work_sem);
4219 		if (ret)
4220 			inode = ERR_PTR(ret);
4221 	}
4222 
4223 	return inode;
4224 }
4225 
4226 static int btrfs_dentry_delete(const struct dentry *dentry)
4227 {
4228 	struct btrfs_root *root;
4229 	struct inode *inode = dentry->d_inode;
4230 
4231 	if (!inode && !IS_ROOT(dentry))
4232 		inode = dentry->d_parent->d_inode;
4233 
4234 	if (inode) {
4235 		root = BTRFS_I(inode)->root;
4236 		if (btrfs_root_refs(&root->root_item) == 0)
4237 			return 1;
4238 
4239 		if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
4240 			return 1;
4241 	}
4242 	return 0;
4243 }
4244 
4245 static void btrfs_dentry_release(struct dentry *dentry)
4246 {
4247 	if (dentry->d_fsdata)
4248 		kfree(dentry->d_fsdata);
4249 }
4250 
4251 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
4252 				   unsigned int flags)
4253 {
4254 	struct dentry *ret;
4255 
4256 	ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
4257 	if (unlikely(d_need_lookup(dentry))) {
4258 		spin_lock(&dentry->d_lock);
4259 		dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
4260 		spin_unlock(&dentry->d_lock);
4261 	}
4262 	return ret;
4263 }
4264 
4265 unsigned char btrfs_filetype_table[] = {
4266 	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
4267 };
4268 
4269 static int btrfs_real_readdir(struct file *filp, void *dirent,
4270 			      filldir_t filldir)
4271 {
4272 	struct inode *inode = filp->f_dentry->d_inode;
4273 	struct btrfs_root *root = BTRFS_I(inode)->root;
4274 	struct btrfs_item *item;
4275 	struct btrfs_dir_item *di;
4276 	struct btrfs_key key;
4277 	struct btrfs_key found_key;
4278 	struct btrfs_path *path;
4279 	struct list_head ins_list;
4280 	struct list_head del_list;
4281 	int ret;
4282 	struct extent_buffer *leaf;
4283 	int slot;
4284 	unsigned char d_type;
4285 	int over = 0;
4286 	u32 di_cur;
4287 	u32 di_total;
4288 	u32 di_len;
4289 	int key_type = BTRFS_DIR_INDEX_KEY;
4290 	char tmp_name[32];
4291 	char *name_ptr;
4292 	int name_len;
4293 	int is_curr = 0;	/* filp->f_pos points to the current index? */
4294 
4295 	/* FIXME, use a real flag for deciding about the key type */
4296 	if (root->fs_info->tree_root == root)
4297 		key_type = BTRFS_DIR_ITEM_KEY;
4298 
4299 	/* special case for "." */
4300 	if (filp->f_pos == 0) {
4301 		over = filldir(dirent, ".", 1,
4302 			       filp->f_pos, btrfs_ino(inode), DT_DIR);
4303 		if (over)
4304 			return 0;
4305 		filp->f_pos = 1;
4306 	}
4307 	/* special case for .., just use the back ref */
4308 	if (filp->f_pos == 1) {
4309 		u64 pino = parent_ino(filp->f_path.dentry);
4310 		over = filldir(dirent, "..", 2,
4311 			       filp->f_pos, pino, DT_DIR);
4312 		if (over)
4313 			return 0;
4314 		filp->f_pos = 2;
4315 	}
4316 	path = btrfs_alloc_path();
4317 	if (!path)
4318 		return -ENOMEM;
4319 
4320 	path->reada = 1;
4321 
4322 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4323 		INIT_LIST_HEAD(&ins_list);
4324 		INIT_LIST_HEAD(&del_list);
4325 		btrfs_get_delayed_items(inode, &ins_list, &del_list);
4326 	}
4327 
4328 	btrfs_set_key_type(&key, key_type);
4329 	key.offset = filp->f_pos;
4330 	key.objectid = btrfs_ino(inode);
4331 
4332 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4333 	if (ret < 0)
4334 		goto err;
4335 
4336 	while (1) {
4337 		leaf = path->nodes[0];
4338 		slot = path->slots[0];
4339 		if (slot >= btrfs_header_nritems(leaf)) {
4340 			ret = btrfs_next_leaf(root, path);
4341 			if (ret < 0)
4342 				goto err;
4343 			else if (ret > 0)
4344 				break;
4345 			continue;
4346 		}
4347 
4348 		item = btrfs_item_nr(leaf, slot);
4349 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
4350 
4351 		if (found_key.objectid != key.objectid)
4352 			break;
4353 		if (btrfs_key_type(&found_key) != key_type)
4354 			break;
4355 		if (found_key.offset < filp->f_pos)
4356 			goto next;
4357 		if (key_type == BTRFS_DIR_INDEX_KEY &&
4358 		    btrfs_should_delete_dir_index(&del_list,
4359 						  found_key.offset))
4360 			goto next;
4361 
4362 		filp->f_pos = found_key.offset;
4363 		is_curr = 1;
4364 
4365 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
4366 		di_cur = 0;
4367 		di_total = btrfs_item_size(leaf, item);
4368 
4369 		while (di_cur < di_total) {
4370 			struct btrfs_key location;
4371 
4372 			if (verify_dir_item(root, leaf, di))
4373 				break;
4374 
4375 			name_len = btrfs_dir_name_len(leaf, di);
4376 			if (name_len <= sizeof(tmp_name)) {
4377 				name_ptr = tmp_name;
4378 			} else {
4379 				name_ptr = kmalloc(name_len, GFP_NOFS);
4380 				if (!name_ptr) {
4381 					ret = -ENOMEM;
4382 					goto err;
4383 				}
4384 			}
4385 			read_extent_buffer(leaf, name_ptr,
4386 					   (unsigned long)(di + 1), name_len);
4387 
4388 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
4389 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
4390 
4391 
4392 			/* is this a reference to our own snapshot? If so
4393 			 * skip it.
4394 			 *
4395 			 * In contrast to old kernels, we insert the snapshot's
4396 			 * dir item and dir index after it has been created, so
4397 			 * we won't find a reference to our own snapshot. We
4398 			 * still keep the following code for backward
4399 			 * compatibility.
4400 			 */
4401 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
4402 			    location.objectid == root->root_key.objectid) {
4403 				over = 0;
4404 				goto skip;
4405 			}
4406 			over = filldir(dirent, name_ptr, name_len,
4407 				       found_key.offset, location.objectid,
4408 				       d_type);
4409 
4410 skip:
4411 			if (name_ptr != tmp_name)
4412 				kfree(name_ptr);
4413 
4414 			if (over)
4415 				goto nopos;
4416 			di_len = btrfs_dir_name_len(leaf, di) +
4417 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
4418 			di_cur += di_len;
4419 			di = (struct btrfs_dir_item *)((char *)di + di_len);
4420 		}
4421 next:
4422 		path->slots[0]++;
4423 	}
4424 
4425 	if (key_type == BTRFS_DIR_INDEX_KEY) {
4426 		if (is_curr)
4427 			filp->f_pos++;
4428 		ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
4429 						      &ins_list);
4430 		if (ret)
4431 			goto nopos;
4432 	}
4433 
4434 	/* Reached end of directory/root. Bump pos past the last item. */
4435 	if (key_type == BTRFS_DIR_INDEX_KEY)
4436 		/*
4437 		 * 32-bit glibc will use getdents64, but then strtol -
4438 		 * so the last number we can serve is this.
4439 		 */
4440 		filp->f_pos = 0x7fffffff;
4441 	else
4442 		filp->f_pos++;
4443 nopos:
4444 	ret = 0;
4445 err:
4446 	if (key_type == BTRFS_DIR_INDEX_KEY)
4447 		btrfs_put_delayed_items(&ins_list, &del_list);
4448 	btrfs_free_path(path);
4449 	return ret;
4450 }
4451 
4452 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4453 {
4454 	struct btrfs_root *root = BTRFS_I(inode)->root;
4455 	struct btrfs_trans_handle *trans;
4456 	int ret = 0;
4457 	bool nolock = false;
4458 
4459 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4460 		return 0;
4461 
4462 	if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
4463 		nolock = true;
4464 
4465 	if (wbc->sync_mode == WB_SYNC_ALL) {
4466 		if (nolock)
4467 			trans = btrfs_join_transaction_nolock(root);
4468 		else
4469 			trans = btrfs_join_transaction(root);
4470 		if (IS_ERR(trans))
4471 			return PTR_ERR(trans);
4472 		if (nolock)
4473 			ret = btrfs_end_transaction_nolock(trans, root);
4474 		else
4475 			ret = btrfs_commit_transaction(trans, root);
4476 	}
4477 	return ret;
4478 }
4479 
4480 /*
4481  * This is somewhat expensive, updating the tree every time the
4482  * inode changes.  But, it is most likely to find the inode in cache.
4483  * FIXME, needs more benchmarking...there are no reasons other than performance
4484  * to keep or drop this code.
4485  */
4486 int btrfs_dirty_inode(struct inode *inode)
4487 {
4488 	struct btrfs_root *root = BTRFS_I(inode)->root;
4489 	struct btrfs_trans_handle *trans;
4490 	int ret;
4491 
4492 	if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4493 		return 0;
4494 
4495 	trans = btrfs_join_transaction(root);
4496 	if (IS_ERR(trans))
4497 		return PTR_ERR(trans);
4498 
4499 	ret = btrfs_update_inode(trans, root, inode);
4500 	if (ret && ret == -ENOSPC) {
4501 		/* whoops, lets try again with the full transaction */
4502 		btrfs_end_transaction(trans, root);
4503 		trans = btrfs_start_transaction(root, 1);
4504 		if (IS_ERR(trans))
4505 			return PTR_ERR(trans);
4506 
4507 		ret = btrfs_update_inode(trans, root, inode);
4508 	}
4509 	btrfs_end_transaction(trans, root);
4510 	if (BTRFS_I(inode)->delayed_node)
4511 		btrfs_balance_delayed_items(root);
4512 
4513 	return ret;
4514 }
4515 
4516 /*
4517  * This is a copy of file_update_time.  We need this so we can return error on
4518  * ENOSPC for updating the inode in the case of file write and mmap writes.
4519  */
4520 static int btrfs_update_time(struct inode *inode, struct timespec *now,
4521 			     int flags)
4522 {
4523 	struct btrfs_root *root = BTRFS_I(inode)->root;
4524 
4525 	if (btrfs_root_readonly(root))
4526 		return -EROFS;
4527 
4528 	if (flags & S_VERSION)
4529 		inode_inc_iversion(inode);
4530 	if (flags & S_CTIME)
4531 		inode->i_ctime = *now;
4532 	if (flags & S_MTIME)
4533 		inode->i_mtime = *now;
4534 	if (flags & S_ATIME)
4535 		inode->i_atime = *now;
4536 	return btrfs_dirty_inode(inode);
4537 }
4538 
4539 /*
4540  * find the highest existing sequence number in a directory
4541  * and then set the in-memory index_cnt variable to reflect
4542  * free sequence numbers
4543  */
4544 static int btrfs_set_inode_index_count(struct inode *inode)
4545 {
4546 	struct btrfs_root *root = BTRFS_I(inode)->root;
4547 	struct btrfs_key key, found_key;
4548 	struct btrfs_path *path;
4549 	struct extent_buffer *leaf;
4550 	int ret;
4551 
4552 	key.objectid = btrfs_ino(inode);
4553 	btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4554 	key.offset = (u64)-1;
4555 
4556 	path = btrfs_alloc_path();
4557 	if (!path)
4558 		return -ENOMEM;
4559 
4560 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4561 	if (ret < 0)
4562 		goto out;
4563 	/* FIXME: we should be able to handle this */
4564 	if (ret == 0)
4565 		goto out;
4566 	ret = 0;
4567 
4568 	/*
4569 	 * MAGIC NUMBER EXPLANATION:
4570 	 * since we search a directory based on f_pos we have to start at 2
4571 	 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
4572 	 * else has to start at 2
4573 	 */
4574 	if (path->slots[0] == 0) {
4575 		BTRFS_I(inode)->index_cnt = 2;
4576 		goto out;
4577 	}
4578 
4579 	path->slots[0]--;
4580 
4581 	leaf = path->nodes[0];
4582 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4583 
4584 	if (found_key.objectid != btrfs_ino(inode) ||
4585 	    btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4586 		BTRFS_I(inode)->index_cnt = 2;
4587 		goto out;
4588 	}
4589 
4590 	BTRFS_I(inode)->index_cnt = found_key.offset + 1;
4591 out:
4592 	btrfs_free_path(path);
4593 	return ret;
4594 }
4595 
4596 /*
4597  * helper to find a free sequence number in a given directory.  This current
4598  * code is very simple, later versions will do smarter things in the btree
4599  */
4600 int btrfs_set_inode_index(struct inode *dir, u64 *index)
4601 {
4602 	int ret = 0;
4603 
4604 	if (BTRFS_I(dir)->index_cnt == (u64)-1) {
4605 		ret = btrfs_inode_delayed_dir_index_count(dir);
4606 		if (ret) {
4607 			ret = btrfs_set_inode_index_count(dir);
4608 			if (ret)
4609 				return ret;
4610 		}
4611 	}
4612 
4613 	*index = BTRFS_I(dir)->index_cnt;
4614 	BTRFS_I(dir)->index_cnt++;
4615 
4616 	return ret;
4617 }
4618 
4619 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4620 				     struct btrfs_root *root,
4621 				     struct inode *dir,
4622 				     const char *name, int name_len,
4623 				     u64 ref_objectid, u64 objectid,
4624 				     umode_t mode, u64 *index)
4625 {
4626 	struct inode *inode;
4627 	struct btrfs_inode_item *inode_item;
4628 	struct btrfs_key *location;
4629 	struct btrfs_path *path;
4630 	struct btrfs_inode_ref *ref;
4631 	struct btrfs_key key[2];
4632 	u32 sizes[2];
4633 	unsigned long ptr;
4634 	int ret;
4635 	int owner;
4636 
4637 	path = btrfs_alloc_path();
4638 	if (!path)
4639 		return ERR_PTR(-ENOMEM);
4640 
4641 	inode = new_inode(root->fs_info->sb);
4642 	if (!inode) {
4643 		btrfs_free_path(path);
4644 		return ERR_PTR(-ENOMEM);
4645 	}
4646 
4647 	/*
4648 	 * we have to initialize this early, so we can reclaim the inode
4649 	 * number if we fail afterwards in this function.
4650 	 */
4651 	inode->i_ino = objectid;
4652 
4653 	if (dir) {
4654 		trace_btrfs_inode_request(dir);
4655 
4656 		ret = btrfs_set_inode_index(dir, index);
4657 		if (ret) {
4658 			btrfs_free_path(path);
4659 			iput(inode);
4660 			return ERR_PTR(ret);
4661 		}
4662 	}
4663 	/*
4664 	 * index_cnt is ignored for everything but a dir,
4665 	 * btrfs_get_inode_index_count has an explanation for the magic
4666 	 * number
4667 	 */
4668 	BTRFS_I(inode)->index_cnt = 2;
4669 	BTRFS_I(inode)->root = root;
4670 	BTRFS_I(inode)->generation = trans->transid;
4671 	inode->i_generation = BTRFS_I(inode)->generation;
4672 
4673 	if (S_ISDIR(mode))
4674 		owner = 0;
4675 	else
4676 		owner = 1;
4677 
4678 	key[0].objectid = objectid;
4679 	btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
4680 	key[0].offset = 0;
4681 
4682 	key[1].objectid = objectid;
4683 	btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
4684 	key[1].offset = ref_objectid;
4685 
4686 	sizes[0] = sizeof(struct btrfs_inode_item);
4687 	sizes[1] = name_len + sizeof(*ref);
4688 
4689 	path->leave_spinning = 1;
4690 	ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
4691 	if (ret != 0)
4692 		goto fail;
4693 
4694 	inode_init_owner(inode, dir, mode);
4695 	inode_set_bytes(inode, 0);
4696 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4697 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4698 				  struct btrfs_inode_item);
4699 	memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
4700 			     sizeof(*inode_item));
4701 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
4702 
4703 	ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
4704 			     struct btrfs_inode_ref);
4705 	btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
4706 	btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
4707 	ptr = (unsigned long)(ref + 1);
4708 	write_extent_buffer(path->nodes[0], name, ptr, name_len);
4709 
4710 	btrfs_mark_buffer_dirty(path->nodes[0]);
4711 	btrfs_free_path(path);
4712 
4713 	location = &BTRFS_I(inode)->location;
4714 	location->objectid = objectid;
4715 	location->offset = 0;
4716 	btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4717 
4718 	btrfs_inherit_iflags(inode, dir);
4719 
4720 	if (S_ISREG(mode)) {
4721 		if (btrfs_test_opt(root, NODATASUM))
4722 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4723 		if (btrfs_test_opt(root, NODATACOW) ||
4724 		    (BTRFS_I(dir)->flags & BTRFS_INODE_NODATACOW))
4725 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4726 	}
4727 
4728 	insert_inode_hash(inode);
4729 	inode_tree_add(inode);
4730 
4731 	trace_btrfs_inode_new(inode);
4732 	btrfs_set_inode_last_trans(trans, inode);
4733 
4734 	btrfs_update_root_times(trans, root);
4735 
4736 	return inode;
4737 fail:
4738 	if (dir)
4739 		BTRFS_I(dir)->index_cnt--;
4740 	btrfs_free_path(path);
4741 	iput(inode);
4742 	return ERR_PTR(ret);
4743 }
4744 
4745 static inline u8 btrfs_inode_type(struct inode *inode)
4746 {
4747 	return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4748 }
4749 
4750 /*
4751  * utility function to add 'inode' into 'parent_inode' with
4752  * a give name and a given sequence number.
4753  * if 'add_backref' is true, also insert a backref from the
4754  * inode to the parent directory.
4755  */
4756 int btrfs_add_link(struct btrfs_trans_handle *trans,
4757 		   struct inode *parent_inode, struct inode *inode,
4758 		   const char *name, int name_len, int add_backref, u64 index)
4759 {
4760 	int ret = 0;
4761 	struct btrfs_key key;
4762 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4763 	u64 ino = btrfs_ino(inode);
4764 	u64 parent_ino = btrfs_ino(parent_inode);
4765 
4766 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4767 		memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4768 	} else {
4769 		key.objectid = ino;
4770 		btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4771 		key.offset = 0;
4772 	}
4773 
4774 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4775 		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4776 					 key.objectid, root->root_key.objectid,
4777 					 parent_ino, index, name, name_len);
4778 	} else if (add_backref) {
4779 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
4780 					     parent_ino, index);
4781 	}
4782 
4783 	/* Nothing to clean up yet */
4784 	if (ret)
4785 		return ret;
4786 
4787 	ret = btrfs_insert_dir_item(trans, root, name, name_len,
4788 				    parent_inode, &key,
4789 				    btrfs_inode_type(inode), index);
4790 	if (ret == -EEXIST)
4791 		goto fail_dir_item;
4792 	else if (ret) {
4793 		btrfs_abort_transaction(trans, root, ret);
4794 		return ret;
4795 	}
4796 
4797 	btrfs_i_size_write(parent_inode, parent_inode->i_size +
4798 			   name_len * 2);
4799 	inode_inc_iversion(parent_inode);
4800 	parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4801 	ret = btrfs_update_inode(trans, root, parent_inode);
4802 	if (ret)
4803 		btrfs_abort_transaction(trans, root, ret);
4804 	return ret;
4805 
4806 fail_dir_item:
4807 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4808 		u64 local_index;
4809 		int err;
4810 		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4811 				 key.objectid, root->root_key.objectid,
4812 				 parent_ino, &local_index, name, name_len);
4813 
4814 	} else if (add_backref) {
4815 		u64 local_index;
4816 		int err;
4817 
4818 		err = btrfs_del_inode_ref(trans, root, name, name_len,
4819 					  ino, parent_ino, &local_index);
4820 	}
4821 	return ret;
4822 }
4823 
4824 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4825 			    struct inode *dir, struct dentry *dentry,
4826 			    struct inode *inode, int backref, u64 index)
4827 {
4828 	int err = btrfs_add_link(trans, dir, inode,
4829 				 dentry->d_name.name, dentry->d_name.len,
4830 				 backref, index);
4831 	if (err > 0)
4832 		err = -EEXIST;
4833 	return err;
4834 }
4835 
4836 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4837 			umode_t mode, dev_t rdev)
4838 {
4839 	struct btrfs_trans_handle *trans;
4840 	struct btrfs_root *root = BTRFS_I(dir)->root;
4841 	struct inode *inode = NULL;
4842 	int err;
4843 	int drop_inode = 0;
4844 	u64 objectid;
4845 	unsigned long nr = 0;
4846 	u64 index = 0;
4847 
4848 	if (!new_valid_dev(rdev))
4849 		return -EINVAL;
4850 
4851 	/*
4852 	 * 2 for inode item and ref
4853 	 * 2 for dir items
4854 	 * 1 for xattr if selinux is on
4855 	 */
4856 	trans = btrfs_start_transaction(root, 5);
4857 	if (IS_ERR(trans))
4858 		return PTR_ERR(trans);
4859 
4860 	err = btrfs_find_free_ino(root, &objectid);
4861 	if (err)
4862 		goto out_unlock;
4863 
4864 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4865 				dentry->d_name.len, btrfs_ino(dir), objectid,
4866 				mode, &index);
4867 	if (IS_ERR(inode)) {
4868 		err = PTR_ERR(inode);
4869 		goto out_unlock;
4870 	}
4871 
4872 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4873 	if (err) {
4874 		drop_inode = 1;
4875 		goto out_unlock;
4876 	}
4877 
4878 	/*
4879 	* If the active LSM wants to access the inode during
4880 	* d_instantiate it needs these. Smack checks to see
4881 	* if the filesystem supports xattrs by looking at the
4882 	* ops vector.
4883 	*/
4884 
4885 	inode->i_op = &btrfs_special_inode_operations;
4886 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4887 	if (err)
4888 		drop_inode = 1;
4889 	else {
4890 		init_special_inode(inode, inode->i_mode, rdev);
4891 		btrfs_update_inode(trans, root, inode);
4892 		d_instantiate(dentry, inode);
4893 	}
4894 out_unlock:
4895 	nr = trans->blocks_used;
4896 	btrfs_end_transaction(trans, root);
4897 	btrfs_btree_balance_dirty(root, nr);
4898 	if (drop_inode) {
4899 		inode_dec_link_count(inode);
4900 		iput(inode);
4901 	}
4902 	return err;
4903 }
4904 
4905 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4906 			umode_t mode, bool excl)
4907 {
4908 	struct btrfs_trans_handle *trans;
4909 	struct btrfs_root *root = BTRFS_I(dir)->root;
4910 	struct inode *inode = NULL;
4911 	int drop_inode = 0;
4912 	int err;
4913 	unsigned long nr = 0;
4914 	u64 objectid;
4915 	u64 index = 0;
4916 
4917 	/*
4918 	 * 2 for inode item and ref
4919 	 * 2 for dir items
4920 	 * 1 for xattr if selinux is on
4921 	 */
4922 	trans = btrfs_start_transaction(root, 5);
4923 	if (IS_ERR(trans))
4924 		return PTR_ERR(trans);
4925 
4926 	err = btrfs_find_free_ino(root, &objectid);
4927 	if (err)
4928 		goto out_unlock;
4929 
4930 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4931 				dentry->d_name.len, btrfs_ino(dir), objectid,
4932 				mode, &index);
4933 	if (IS_ERR(inode)) {
4934 		err = PTR_ERR(inode);
4935 		goto out_unlock;
4936 	}
4937 
4938 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4939 	if (err) {
4940 		drop_inode = 1;
4941 		goto out_unlock;
4942 	}
4943 
4944 	/*
4945 	* If the active LSM wants to access the inode during
4946 	* d_instantiate it needs these. Smack checks to see
4947 	* if the filesystem supports xattrs by looking at the
4948 	* ops vector.
4949 	*/
4950 	inode->i_fop = &btrfs_file_operations;
4951 	inode->i_op = &btrfs_file_inode_operations;
4952 
4953 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4954 	if (err)
4955 		drop_inode = 1;
4956 	else {
4957 		inode->i_mapping->a_ops = &btrfs_aops;
4958 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4959 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4960 		d_instantiate(dentry, inode);
4961 	}
4962 out_unlock:
4963 	nr = trans->blocks_used;
4964 	btrfs_end_transaction(trans, root);
4965 	if (drop_inode) {
4966 		inode_dec_link_count(inode);
4967 		iput(inode);
4968 	}
4969 	btrfs_btree_balance_dirty(root, nr);
4970 	return err;
4971 }
4972 
4973 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4974 		      struct dentry *dentry)
4975 {
4976 	struct btrfs_trans_handle *trans;
4977 	struct btrfs_root *root = BTRFS_I(dir)->root;
4978 	struct inode *inode = old_dentry->d_inode;
4979 	u64 index;
4980 	unsigned long nr = 0;
4981 	int err;
4982 	int drop_inode = 0;
4983 
4984 	/* do not allow sys_link's with other subvols of the same device */
4985 	if (root->objectid != BTRFS_I(inode)->root->objectid)
4986 		return -EXDEV;
4987 
4988 	if (inode->i_nlink == ~0U)
4989 		return -EMLINK;
4990 
4991 	err = btrfs_set_inode_index(dir, &index);
4992 	if (err)
4993 		goto fail;
4994 
4995 	/*
4996 	 * 2 items for inode and inode ref
4997 	 * 2 items for dir items
4998 	 * 1 item for parent inode
4999 	 */
5000 	trans = btrfs_start_transaction(root, 5);
5001 	if (IS_ERR(trans)) {
5002 		err = PTR_ERR(trans);
5003 		goto fail;
5004 	}
5005 
5006 	btrfs_inc_nlink(inode);
5007 	inode_inc_iversion(inode);
5008 	inode->i_ctime = CURRENT_TIME;
5009 	ihold(inode);
5010 
5011 	err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
5012 
5013 	if (err) {
5014 		drop_inode = 1;
5015 	} else {
5016 		struct dentry *parent = dentry->d_parent;
5017 		err = btrfs_update_inode(trans, root, inode);
5018 		if (err)
5019 			goto fail;
5020 		d_instantiate(dentry, inode);
5021 		btrfs_log_new_name(trans, inode, NULL, parent);
5022 	}
5023 
5024 	nr = trans->blocks_used;
5025 	btrfs_end_transaction(trans, root);
5026 fail:
5027 	if (drop_inode) {
5028 		inode_dec_link_count(inode);
5029 		iput(inode);
5030 	}
5031 	btrfs_btree_balance_dirty(root, nr);
5032 	return err;
5033 }
5034 
5035 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
5036 {
5037 	struct inode *inode = NULL;
5038 	struct btrfs_trans_handle *trans;
5039 	struct btrfs_root *root = BTRFS_I(dir)->root;
5040 	int err = 0;
5041 	int drop_on_err = 0;
5042 	u64 objectid = 0;
5043 	u64 index = 0;
5044 	unsigned long nr = 1;
5045 
5046 	/*
5047 	 * 2 items for inode and ref
5048 	 * 2 items for dir items
5049 	 * 1 for xattr if selinux is on
5050 	 */
5051 	trans = btrfs_start_transaction(root, 5);
5052 	if (IS_ERR(trans))
5053 		return PTR_ERR(trans);
5054 
5055 	err = btrfs_find_free_ino(root, &objectid);
5056 	if (err)
5057 		goto out_fail;
5058 
5059 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5060 				dentry->d_name.len, btrfs_ino(dir), objectid,
5061 				S_IFDIR | mode, &index);
5062 	if (IS_ERR(inode)) {
5063 		err = PTR_ERR(inode);
5064 		goto out_fail;
5065 	}
5066 
5067 	drop_on_err = 1;
5068 
5069 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
5070 	if (err)
5071 		goto out_fail;
5072 
5073 	inode->i_op = &btrfs_dir_inode_operations;
5074 	inode->i_fop = &btrfs_dir_file_operations;
5075 
5076 	btrfs_i_size_write(inode, 0);
5077 	err = btrfs_update_inode(trans, root, inode);
5078 	if (err)
5079 		goto out_fail;
5080 
5081 	err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
5082 			     dentry->d_name.len, 0, index);
5083 	if (err)
5084 		goto out_fail;
5085 
5086 	d_instantiate(dentry, inode);
5087 	drop_on_err = 0;
5088 
5089 out_fail:
5090 	nr = trans->blocks_used;
5091 	btrfs_end_transaction(trans, root);
5092 	if (drop_on_err)
5093 		iput(inode);
5094 	btrfs_btree_balance_dirty(root, nr);
5095 	return err;
5096 }
5097 
5098 /* helper for btfs_get_extent.  Given an existing extent in the tree,
5099  * and an extent that you want to insert, deal with overlap and insert
5100  * the new extent into the tree.
5101  */
5102 static int merge_extent_mapping(struct extent_map_tree *em_tree,
5103 				struct extent_map *existing,
5104 				struct extent_map *em,
5105 				u64 map_start, u64 map_len)
5106 {
5107 	u64 start_diff;
5108 
5109 	BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
5110 	start_diff = map_start - em->start;
5111 	em->start = map_start;
5112 	em->len = map_len;
5113 	if (em->block_start < EXTENT_MAP_LAST_BYTE &&
5114 	    !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
5115 		em->block_start += start_diff;
5116 		em->block_len -= start_diff;
5117 	}
5118 	return add_extent_mapping(em_tree, em);
5119 }
5120 
5121 static noinline int uncompress_inline(struct btrfs_path *path,
5122 				      struct inode *inode, struct page *page,
5123 				      size_t pg_offset, u64 extent_offset,
5124 				      struct btrfs_file_extent_item *item)
5125 {
5126 	int ret;
5127 	struct extent_buffer *leaf = path->nodes[0];
5128 	char *tmp;
5129 	size_t max_size;
5130 	unsigned long inline_size;
5131 	unsigned long ptr;
5132 	int compress_type;
5133 
5134 	WARN_ON(pg_offset != 0);
5135 	compress_type = btrfs_file_extent_compression(leaf, item);
5136 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
5137 	inline_size = btrfs_file_extent_inline_item_len(leaf,
5138 					btrfs_item_nr(leaf, path->slots[0]));
5139 	tmp = kmalloc(inline_size, GFP_NOFS);
5140 	if (!tmp)
5141 		return -ENOMEM;
5142 	ptr = btrfs_file_extent_inline_start(item);
5143 
5144 	read_extent_buffer(leaf, tmp, ptr, inline_size);
5145 
5146 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
5147 	ret = btrfs_decompress(compress_type, tmp, page,
5148 			       extent_offset, inline_size, max_size);
5149 	if (ret) {
5150 		char *kaddr = kmap_atomic(page);
5151 		unsigned long copy_size = min_t(u64,
5152 				  PAGE_CACHE_SIZE - pg_offset,
5153 				  max_size - extent_offset);
5154 		memset(kaddr + pg_offset, 0, copy_size);
5155 		kunmap_atomic(kaddr);
5156 	}
5157 	kfree(tmp);
5158 	return 0;
5159 }
5160 
5161 /*
5162  * a bit scary, this does extent mapping from logical file offset to the disk.
5163  * the ugly parts come from merging extents from the disk with the in-ram
5164  * representation.  This gets more complex because of the data=ordered code,
5165  * where the in-ram extents might be locked pending data=ordered completion.
5166  *
5167  * This also copies inline extents directly into the page.
5168  */
5169 
5170 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5171 				    size_t pg_offset, u64 start, u64 len,
5172 				    int create)
5173 {
5174 	int ret;
5175 	int err = 0;
5176 	u64 bytenr;
5177 	u64 extent_start = 0;
5178 	u64 extent_end = 0;
5179 	u64 objectid = btrfs_ino(inode);
5180 	u32 found_type;
5181 	struct btrfs_path *path = NULL;
5182 	struct btrfs_root *root = BTRFS_I(inode)->root;
5183 	struct btrfs_file_extent_item *item;
5184 	struct extent_buffer *leaf;
5185 	struct btrfs_key found_key;
5186 	struct extent_map *em = NULL;
5187 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5188 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5189 	struct btrfs_trans_handle *trans = NULL;
5190 	int compress_type;
5191 
5192 again:
5193 	read_lock(&em_tree->lock);
5194 	em = lookup_extent_mapping(em_tree, start, len);
5195 	if (em)
5196 		em->bdev = root->fs_info->fs_devices->latest_bdev;
5197 	read_unlock(&em_tree->lock);
5198 
5199 	if (em) {
5200 		if (em->start > start || em->start + em->len <= start)
5201 			free_extent_map(em);
5202 		else if (em->block_start == EXTENT_MAP_INLINE && page)
5203 			free_extent_map(em);
5204 		else
5205 			goto out;
5206 	}
5207 	em = alloc_extent_map();
5208 	if (!em) {
5209 		err = -ENOMEM;
5210 		goto out;
5211 	}
5212 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5213 	em->start = EXTENT_MAP_HOLE;
5214 	em->orig_start = EXTENT_MAP_HOLE;
5215 	em->len = (u64)-1;
5216 	em->block_len = (u64)-1;
5217 
5218 	if (!path) {
5219 		path = btrfs_alloc_path();
5220 		if (!path) {
5221 			err = -ENOMEM;
5222 			goto out;
5223 		}
5224 		/*
5225 		 * Chances are we'll be called again, so go ahead and do
5226 		 * readahead
5227 		 */
5228 		path->reada = 1;
5229 	}
5230 
5231 	ret = btrfs_lookup_file_extent(trans, root, path,
5232 				       objectid, start, trans != NULL);
5233 	if (ret < 0) {
5234 		err = ret;
5235 		goto out;
5236 	}
5237 
5238 	if (ret != 0) {
5239 		if (path->slots[0] == 0)
5240 			goto not_found;
5241 		path->slots[0]--;
5242 	}
5243 
5244 	leaf = path->nodes[0];
5245 	item = btrfs_item_ptr(leaf, path->slots[0],
5246 			      struct btrfs_file_extent_item);
5247 	/* are we inside the extent that was found? */
5248 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5249 	found_type = btrfs_key_type(&found_key);
5250 	if (found_key.objectid != objectid ||
5251 	    found_type != BTRFS_EXTENT_DATA_KEY) {
5252 		goto not_found;
5253 	}
5254 
5255 	found_type = btrfs_file_extent_type(leaf, item);
5256 	extent_start = found_key.offset;
5257 	compress_type = btrfs_file_extent_compression(leaf, item);
5258 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5259 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5260 		extent_end = extent_start +
5261 		       btrfs_file_extent_num_bytes(leaf, item);
5262 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5263 		size_t size;
5264 		size = btrfs_file_extent_inline_len(leaf, item);
5265 		extent_end = (extent_start + size + root->sectorsize - 1) &
5266 			~((u64)root->sectorsize - 1);
5267 	}
5268 
5269 	if (start >= extent_end) {
5270 		path->slots[0]++;
5271 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
5272 			ret = btrfs_next_leaf(root, path);
5273 			if (ret < 0) {
5274 				err = ret;
5275 				goto out;
5276 			}
5277 			if (ret > 0)
5278 				goto not_found;
5279 			leaf = path->nodes[0];
5280 		}
5281 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5282 		if (found_key.objectid != objectid ||
5283 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
5284 			goto not_found;
5285 		if (start + len <= found_key.offset)
5286 			goto not_found;
5287 		em->start = start;
5288 		em->len = found_key.offset - start;
5289 		goto not_found_em;
5290 	}
5291 
5292 	if (found_type == BTRFS_FILE_EXTENT_REG ||
5293 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
5294 		em->start = extent_start;
5295 		em->len = extent_end - extent_start;
5296 		em->orig_start = extent_start -
5297 				 btrfs_file_extent_offset(leaf, item);
5298 		bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
5299 		if (bytenr == 0) {
5300 			em->block_start = EXTENT_MAP_HOLE;
5301 			goto insert;
5302 		}
5303 		if (compress_type != BTRFS_COMPRESS_NONE) {
5304 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5305 			em->compress_type = compress_type;
5306 			em->block_start = bytenr;
5307 			em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
5308 									 item);
5309 		} else {
5310 			bytenr += btrfs_file_extent_offset(leaf, item);
5311 			em->block_start = bytenr;
5312 			em->block_len = em->len;
5313 			if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
5314 				set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5315 		}
5316 		goto insert;
5317 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
5318 		unsigned long ptr;
5319 		char *map;
5320 		size_t size;
5321 		size_t extent_offset;
5322 		size_t copy_size;
5323 
5324 		em->block_start = EXTENT_MAP_INLINE;
5325 		if (!page || create) {
5326 			em->start = extent_start;
5327 			em->len = extent_end - extent_start;
5328 			goto out;
5329 		}
5330 
5331 		size = btrfs_file_extent_inline_len(leaf, item);
5332 		extent_offset = page_offset(page) + pg_offset - extent_start;
5333 		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
5334 				size - extent_offset);
5335 		em->start = extent_start + extent_offset;
5336 		em->len = (copy_size + root->sectorsize - 1) &
5337 			~((u64)root->sectorsize - 1);
5338 		em->orig_start = EXTENT_MAP_INLINE;
5339 		if (compress_type) {
5340 			set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
5341 			em->compress_type = compress_type;
5342 		}
5343 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
5344 		if (create == 0 && !PageUptodate(page)) {
5345 			if (btrfs_file_extent_compression(leaf, item) !=
5346 			    BTRFS_COMPRESS_NONE) {
5347 				ret = uncompress_inline(path, inode, page,
5348 							pg_offset,
5349 							extent_offset, item);
5350 				BUG_ON(ret); /* -ENOMEM */
5351 			} else {
5352 				map = kmap(page);
5353 				read_extent_buffer(leaf, map + pg_offset, ptr,
5354 						   copy_size);
5355 				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
5356 					memset(map + pg_offset + copy_size, 0,
5357 					       PAGE_CACHE_SIZE - pg_offset -
5358 					       copy_size);
5359 				}
5360 				kunmap(page);
5361 			}
5362 			flush_dcache_page(page);
5363 		} else if (create && PageUptodate(page)) {
5364 			BUG();
5365 			if (!trans) {
5366 				kunmap(page);
5367 				free_extent_map(em);
5368 				em = NULL;
5369 
5370 				btrfs_release_path(path);
5371 				trans = btrfs_join_transaction(root);
5372 
5373 				if (IS_ERR(trans))
5374 					return ERR_CAST(trans);
5375 				goto again;
5376 			}
5377 			map = kmap(page);
5378 			write_extent_buffer(leaf, map + pg_offset, ptr,
5379 					    copy_size);
5380 			kunmap(page);
5381 			btrfs_mark_buffer_dirty(leaf);
5382 		}
5383 		set_extent_uptodate(io_tree, em->start,
5384 				    extent_map_end(em) - 1, NULL, GFP_NOFS);
5385 		goto insert;
5386 	} else {
5387 		printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
5388 		WARN_ON(1);
5389 	}
5390 not_found:
5391 	em->start = start;
5392 	em->len = len;
5393 not_found_em:
5394 	em->block_start = EXTENT_MAP_HOLE;
5395 	set_bit(EXTENT_FLAG_VACANCY, &em->flags);
5396 insert:
5397 	btrfs_release_path(path);
5398 	if (em->start > start || extent_map_end(em) <= start) {
5399 		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
5400 		       "[%llu %llu]\n", (unsigned long long)em->start,
5401 		       (unsigned long long)em->len,
5402 		       (unsigned long long)start,
5403 		       (unsigned long long)len);
5404 		err = -EIO;
5405 		goto out;
5406 	}
5407 
5408 	err = 0;
5409 	write_lock(&em_tree->lock);
5410 	ret = add_extent_mapping(em_tree, em);
5411 	/* it is possible that someone inserted the extent into the tree
5412 	 * while we had the lock dropped.  It is also possible that
5413 	 * an overlapping map exists in the tree
5414 	 */
5415 	if (ret == -EEXIST) {
5416 		struct extent_map *existing;
5417 
5418 		ret = 0;
5419 
5420 		existing = lookup_extent_mapping(em_tree, start, len);
5421 		if (existing && (existing->start > start ||
5422 		    existing->start + existing->len <= start)) {
5423 			free_extent_map(existing);
5424 			existing = NULL;
5425 		}
5426 		if (!existing) {
5427 			existing = lookup_extent_mapping(em_tree, em->start,
5428 							 em->len);
5429 			if (existing) {
5430 				err = merge_extent_mapping(em_tree, existing,
5431 							   em, start,
5432 							   root->sectorsize);
5433 				free_extent_map(existing);
5434 				if (err) {
5435 					free_extent_map(em);
5436 					em = NULL;
5437 				}
5438 			} else {
5439 				err = -EIO;
5440 				free_extent_map(em);
5441 				em = NULL;
5442 			}
5443 		} else {
5444 			free_extent_map(em);
5445 			em = existing;
5446 			err = 0;
5447 		}
5448 	}
5449 	write_unlock(&em_tree->lock);
5450 out:
5451 
5452 	trace_btrfs_get_extent(root, em);
5453 
5454 	if (path)
5455 		btrfs_free_path(path);
5456 	if (trans) {
5457 		ret = btrfs_end_transaction(trans, root);
5458 		if (!err)
5459 			err = ret;
5460 	}
5461 	if (err) {
5462 		free_extent_map(em);
5463 		return ERR_PTR(err);
5464 	}
5465 	BUG_ON(!em); /* Error is always set */
5466 	return em;
5467 }
5468 
5469 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5470 					   size_t pg_offset, u64 start, u64 len,
5471 					   int create)
5472 {
5473 	struct extent_map *em;
5474 	struct extent_map *hole_em = NULL;
5475 	u64 range_start = start;
5476 	u64 end;
5477 	u64 found;
5478 	u64 found_end;
5479 	int err = 0;
5480 
5481 	em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5482 	if (IS_ERR(em))
5483 		return em;
5484 	if (em) {
5485 		/*
5486 		 * if our em maps to a hole, there might
5487 		 * actually be delalloc bytes behind it
5488 		 */
5489 		if (em->block_start != EXTENT_MAP_HOLE)
5490 			return em;
5491 		else
5492 			hole_em = em;
5493 	}
5494 
5495 	/* check to see if we've wrapped (len == -1 or similar) */
5496 	end = start + len;
5497 	if (end < start)
5498 		end = (u64)-1;
5499 	else
5500 		end -= 1;
5501 
5502 	em = NULL;
5503 
5504 	/* ok, we didn't find anything, lets look for delalloc */
5505 	found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5506 				 end, len, EXTENT_DELALLOC, 1);
5507 	found_end = range_start + found;
5508 	if (found_end < range_start)
5509 		found_end = (u64)-1;
5510 
5511 	/*
5512 	 * we didn't find anything useful, return
5513 	 * the original results from get_extent()
5514 	 */
5515 	if (range_start > end || found_end <= start) {
5516 		em = hole_em;
5517 		hole_em = NULL;
5518 		goto out;
5519 	}
5520 
5521 	/* adjust the range_start to make sure it doesn't
5522 	 * go backwards from the start they passed in
5523 	 */
5524 	range_start = max(start,range_start);
5525 	found = found_end - range_start;
5526 
5527 	if (found > 0) {
5528 		u64 hole_start = start;
5529 		u64 hole_len = len;
5530 
5531 		em = alloc_extent_map();
5532 		if (!em) {
5533 			err = -ENOMEM;
5534 			goto out;
5535 		}
5536 		/*
5537 		 * when btrfs_get_extent can't find anything it
5538 		 * returns one huge hole
5539 		 *
5540 		 * make sure what it found really fits our range, and
5541 		 * adjust to make sure it is based on the start from
5542 		 * the caller
5543 		 */
5544 		if (hole_em) {
5545 			u64 calc_end = extent_map_end(hole_em);
5546 
5547 			if (calc_end <= start || (hole_em->start > end)) {
5548 				free_extent_map(hole_em);
5549 				hole_em = NULL;
5550 			} else {
5551 				hole_start = max(hole_em->start, start);
5552 				hole_len = calc_end - hole_start;
5553 			}
5554 		}
5555 		em->bdev = NULL;
5556 		if (hole_em && range_start > hole_start) {
5557 			/* our hole starts before our delalloc, so we
5558 			 * have to return just the parts of the hole
5559 			 * that go until  the delalloc starts
5560 			 */
5561 			em->len = min(hole_len,
5562 				      range_start - hole_start);
5563 			em->start = hole_start;
5564 			em->orig_start = hole_start;
5565 			/*
5566 			 * don't adjust block start at all,
5567 			 * it is fixed at EXTENT_MAP_HOLE
5568 			 */
5569 			em->block_start = hole_em->block_start;
5570 			em->block_len = hole_len;
5571 		} else {
5572 			em->start = range_start;
5573 			em->len = found;
5574 			em->orig_start = range_start;
5575 			em->block_start = EXTENT_MAP_DELALLOC;
5576 			em->block_len = found;
5577 		}
5578 	} else if (hole_em) {
5579 		return hole_em;
5580 	}
5581 out:
5582 
5583 	free_extent_map(hole_em);
5584 	if (err) {
5585 		free_extent_map(em);
5586 		return ERR_PTR(err);
5587 	}
5588 	return em;
5589 }
5590 
5591 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5592 						  struct extent_map *em,
5593 						  u64 start, u64 len)
5594 {
5595 	struct btrfs_root *root = BTRFS_I(inode)->root;
5596 	struct btrfs_trans_handle *trans;
5597 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5598 	struct btrfs_key ins;
5599 	u64 alloc_hint;
5600 	int ret;
5601 	bool insert = false;
5602 
5603 	/*
5604 	 * Ok if the extent map we looked up is a hole and is for the exact
5605 	 * range we want, there is no reason to allocate a new one, however if
5606 	 * it is not right then we need to free this one and drop the cache for
5607 	 * our range.
5608 	 */
5609 	if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
5610 	    em->len != len) {
5611 		free_extent_map(em);
5612 		em = NULL;
5613 		insert = true;
5614 		btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5615 	}
5616 
5617 	trans = btrfs_join_transaction(root);
5618 	if (IS_ERR(trans))
5619 		return ERR_CAST(trans);
5620 
5621 	if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
5622 		btrfs_add_inode_defrag(trans, inode);
5623 
5624 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5625 
5626 	alloc_hint = get_extent_allocation_hint(inode, start, len);
5627 	ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
5628 				   alloc_hint, &ins, 1);
5629 	if (ret) {
5630 		em = ERR_PTR(ret);
5631 		goto out;
5632 	}
5633 
5634 	if (!em) {
5635 		em = alloc_extent_map();
5636 		if (!em) {
5637 			em = ERR_PTR(-ENOMEM);
5638 			goto out;
5639 		}
5640 	}
5641 
5642 	em->start = start;
5643 	em->orig_start = em->start;
5644 	em->len = ins.offset;
5645 
5646 	em->block_start = ins.objectid;
5647 	em->block_len = ins.offset;
5648 	em->bdev = root->fs_info->fs_devices->latest_bdev;
5649 
5650 	/*
5651 	 * We need to do this because if we're using the original em we searched
5652 	 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
5653 	 */
5654 	em->flags = 0;
5655 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
5656 
5657 	while (insert) {
5658 		write_lock(&em_tree->lock);
5659 		ret = add_extent_mapping(em_tree, em);
5660 		write_unlock(&em_tree->lock);
5661 		if (ret != -EEXIST)
5662 			break;
5663 		btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
5664 	}
5665 
5666 	ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
5667 					   ins.offset, ins.offset, 0);
5668 	if (ret) {
5669 		btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
5670 		em = ERR_PTR(ret);
5671 	}
5672 out:
5673 	btrfs_end_transaction(trans, root);
5674 	return em;
5675 }
5676 
5677 /*
5678  * returns 1 when the nocow is safe, < 1 on error, 0 if the
5679  * block must be cow'd
5680  */
5681 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5682 				      struct inode *inode, u64 offset, u64 len)
5683 {
5684 	struct btrfs_path *path;
5685 	int ret;
5686 	struct extent_buffer *leaf;
5687 	struct btrfs_root *root = BTRFS_I(inode)->root;
5688 	struct btrfs_file_extent_item *fi;
5689 	struct btrfs_key key;
5690 	u64 disk_bytenr;
5691 	u64 backref_offset;
5692 	u64 extent_end;
5693 	u64 num_bytes;
5694 	int slot;
5695 	int found_type;
5696 
5697 	path = btrfs_alloc_path();
5698 	if (!path)
5699 		return -ENOMEM;
5700 
5701 	ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
5702 				       offset, 0);
5703 	if (ret < 0)
5704 		goto out;
5705 
5706 	slot = path->slots[0];
5707 	if (ret == 1) {
5708 		if (slot == 0) {
5709 			/* can't find the item, must cow */
5710 			ret = 0;
5711 			goto out;
5712 		}
5713 		slot--;
5714 	}
5715 	ret = 0;
5716 	leaf = path->nodes[0];
5717 	btrfs_item_key_to_cpu(leaf, &key, slot);
5718 	if (key.objectid != btrfs_ino(inode) ||
5719 	    key.type != BTRFS_EXTENT_DATA_KEY) {
5720 		/* not our file or wrong item type, must cow */
5721 		goto out;
5722 	}
5723 
5724 	if (key.offset > offset) {
5725 		/* Wrong offset, must cow */
5726 		goto out;
5727 	}
5728 
5729 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5730 	found_type = btrfs_file_extent_type(leaf, fi);
5731 	if (found_type != BTRFS_FILE_EXTENT_REG &&
5732 	    found_type != BTRFS_FILE_EXTENT_PREALLOC) {
5733 		/* not a regular extent, must cow */
5734 		goto out;
5735 	}
5736 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5737 	backref_offset = btrfs_file_extent_offset(leaf, fi);
5738 
5739 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
5740 	if (extent_end < offset + len) {
5741 		/* extent doesn't include our full range, must cow */
5742 		goto out;
5743 	}
5744 
5745 	if (btrfs_extent_readonly(root, disk_bytenr))
5746 		goto out;
5747 
5748 	/*
5749 	 * look for other files referencing this extent, if we
5750 	 * find any we must cow
5751 	 */
5752 	if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
5753 				  key.offset - backref_offset, disk_bytenr))
5754 		goto out;
5755 
5756 	/*
5757 	 * adjust disk_bytenr and num_bytes to cover just the bytes
5758 	 * in this extent we are about to write.  If there
5759 	 * are any csums in that range we have to cow in order
5760 	 * to keep the csums correct
5761 	 */
5762 	disk_bytenr += backref_offset;
5763 	disk_bytenr += offset - key.offset;
5764 	num_bytes = min(offset + len, extent_end) - offset;
5765 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
5766 				goto out;
5767 	/*
5768 	 * all of the above have passed, it is safe to overwrite this extent
5769 	 * without cow
5770 	 */
5771 	ret = 1;
5772 out:
5773 	btrfs_free_path(path);
5774 	return ret;
5775 }
5776 
5777 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5778 				   struct buffer_head *bh_result, int create)
5779 {
5780 	struct extent_map *em;
5781 	struct btrfs_root *root = BTRFS_I(inode)->root;
5782 	u64 start = iblock << inode->i_blkbits;
5783 	u64 len = bh_result->b_size;
5784 	struct btrfs_trans_handle *trans;
5785 
5786 	em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
5787 	if (IS_ERR(em))
5788 		return PTR_ERR(em);
5789 
5790 	/*
5791 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
5792 	 * io.  INLINE is special, and we could probably kludge it in here, but
5793 	 * it's still buffered so for safety lets just fall back to the generic
5794 	 * buffered path.
5795 	 *
5796 	 * For COMPRESSED we _have_ to read the entire extent in so we can
5797 	 * decompress it, so there will be buffering required no matter what we
5798 	 * do, so go ahead and fallback to buffered.
5799 	 *
5800 	 * We return -ENOTBLK because thats what makes DIO go ahead and go back
5801 	 * to buffered IO.  Don't blame me, this is the price we pay for using
5802 	 * the generic code.
5803 	 */
5804 	if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5805 	    em->block_start == EXTENT_MAP_INLINE) {
5806 		free_extent_map(em);
5807 		return -ENOTBLK;
5808 	}
5809 
5810 	/* Just a good old fashioned hole, return */
5811 	if (!create && (em->block_start == EXTENT_MAP_HOLE ||
5812 			test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5813 		free_extent_map(em);
5814 		/* DIO will do one hole at a time, so just unlock a sector */
5815 		unlock_extent(&BTRFS_I(inode)->io_tree, start,
5816 			      start + root->sectorsize - 1);
5817 		return 0;
5818 	}
5819 
5820 	/*
5821 	 * We don't allocate a new extent in the following cases
5822 	 *
5823 	 * 1) The inode is marked as NODATACOW.  In this case we'll just use the
5824 	 * existing extent.
5825 	 * 2) The extent is marked as PREALLOC.  We're good to go here and can
5826 	 * just use the extent.
5827 	 *
5828 	 */
5829 	if (!create) {
5830 		len = em->len - (start - em->start);
5831 		goto map;
5832 	}
5833 
5834 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
5835 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
5836 	     em->block_start != EXTENT_MAP_HOLE)) {
5837 		int type;
5838 		int ret;
5839 		u64 block_start;
5840 
5841 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5842 			type = BTRFS_ORDERED_PREALLOC;
5843 		else
5844 			type = BTRFS_ORDERED_NOCOW;
5845 		len = min(len, em->len - (start - em->start));
5846 		block_start = em->block_start + (start - em->start);
5847 
5848 		/*
5849 		 * we're not going to log anything, but we do need
5850 		 * to make sure the current transaction stays open
5851 		 * while we look for nocow cross refs
5852 		 */
5853 		trans = btrfs_join_transaction(root);
5854 		if (IS_ERR(trans))
5855 			goto must_cow;
5856 
5857 		if (can_nocow_odirect(trans, inode, start, len) == 1) {
5858 			ret = btrfs_add_ordered_extent_dio(inode, start,
5859 					   block_start, len, len, type);
5860 			btrfs_end_transaction(trans, root);
5861 			if (ret) {
5862 				free_extent_map(em);
5863 				return ret;
5864 			}
5865 			goto unlock;
5866 		}
5867 		btrfs_end_transaction(trans, root);
5868 	}
5869 must_cow:
5870 	/*
5871 	 * this will cow the extent, reset the len in case we changed
5872 	 * it above
5873 	 */
5874 	len = bh_result->b_size;
5875 	em = btrfs_new_extent_direct(inode, em, start, len);
5876 	if (IS_ERR(em))
5877 		return PTR_ERR(em);
5878 	len = min(len, em->len - (start - em->start));
5879 unlock:
5880 	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
5881 			  EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
5882 			  0, NULL, GFP_NOFS);
5883 map:
5884 	bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
5885 		inode->i_blkbits;
5886 	bh_result->b_size = len;
5887 	bh_result->b_bdev = em->bdev;
5888 	set_buffer_mapped(bh_result);
5889 	if (create) {
5890 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5891 			set_buffer_new(bh_result);
5892 
5893 		/*
5894 		 * Need to update the i_size under the extent lock so buffered
5895 		 * readers will get the updated i_size when we unlock.
5896 		 */
5897 		if (start + len > i_size_read(inode))
5898 			i_size_write(inode, start + len);
5899 	}
5900 
5901 	free_extent_map(em);
5902 
5903 	return 0;
5904 }
5905 
5906 struct btrfs_dio_private {
5907 	struct inode *inode;
5908 	u64 logical_offset;
5909 	u64 disk_bytenr;
5910 	u64 bytes;
5911 	u32 *csums;
5912 	void *private;
5913 
5914 	/* number of bios pending for this dio */
5915 	atomic_t pending_bios;
5916 
5917 	/* IO errors */
5918 	int errors;
5919 
5920 	struct bio *orig_bio;
5921 };
5922 
5923 static void btrfs_endio_direct_read(struct bio *bio, int err)
5924 {
5925 	struct btrfs_dio_private *dip = bio->bi_private;
5926 	struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
5927 	struct bio_vec *bvec = bio->bi_io_vec;
5928 	struct inode *inode = dip->inode;
5929 	struct btrfs_root *root = BTRFS_I(inode)->root;
5930 	u64 start;
5931 	u32 *private = dip->csums;
5932 
5933 	start = dip->logical_offset;
5934 	do {
5935 		if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
5936 			struct page *page = bvec->bv_page;
5937 			char *kaddr;
5938 			u32 csum = ~(u32)0;
5939 			unsigned long flags;
5940 
5941 			local_irq_save(flags);
5942 			kaddr = kmap_atomic(page);
5943 			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
5944 					       csum, bvec->bv_len);
5945 			btrfs_csum_final(csum, (char *)&csum);
5946 			kunmap_atomic(kaddr);
5947 			local_irq_restore(flags);
5948 
5949 			flush_dcache_page(bvec->bv_page);
5950 			if (csum != *private) {
5951 				printk(KERN_ERR "btrfs csum failed ino %llu off"
5952 				      " %llu csum %u private %u\n",
5953 				      (unsigned long long)btrfs_ino(inode),
5954 				      (unsigned long long)start,
5955 				      csum, *private);
5956 				err = -EIO;
5957 			}
5958 		}
5959 
5960 		start += bvec->bv_len;
5961 		private++;
5962 		bvec++;
5963 	} while (bvec <= bvec_end);
5964 
5965 	unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
5966 		      dip->logical_offset + dip->bytes - 1);
5967 	bio->bi_private = dip->private;
5968 
5969 	kfree(dip->csums);
5970 	kfree(dip);
5971 
5972 	/* If we had a csum failure make sure to clear the uptodate flag */
5973 	if (err)
5974 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
5975 	dio_end_io(bio, err);
5976 }
5977 
5978 static void btrfs_endio_direct_write(struct bio *bio, int err)
5979 {
5980 	struct btrfs_dio_private *dip = bio->bi_private;
5981 	struct inode *inode = dip->inode;
5982 	struct btrfs_root *root = BTRFS_I(inode)->root;
5983 	struct btrfs_ordered_extent *ordered = NULL;
5984 	u64 ordered_offset = dip->logical_offset;
5985 	u64 ordered_bytes = dip->bytes;
5986 	int ret;
5987 
5988 	if (err)
5989 		goto out_done;
5990 again:
5991 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
5992 						   &ordered_offset,
5993 						   ordered_bytes, !err);
5994 	if (!ret)
5995 		goto out_test;
5996 
5997 	ordered->work.func = finish_ordered_fn;
5998 	ordered->work.flags = 0;
5999 	btrfs_queue_worker(&root->fs_info->endio_write_workers,
6000 			   &ordered->work);
6001 out_test:
6002 	/*
6003 	 * our bio might span multiple ordered extents.  If we haven't
6004 	 * completed the accounting for the whole dio, go back and try again
6005 	 */
6006 	if (ordered_offset < dip->logical_offset + dip->bytes) {
6007 		ordered_bytes = dip->logical_offset + dip->bytes -
6008 			ordered_offset;
6009 		ordered = NULL;
6010 		goto again;
6011 	}
6012 out_done:
6013 	bio->bi_private = dip->private;
6014 
6015 	kfree(dip);
6016 
6017 	/* If we had an error make sure to clear the uptodate flag */
6018 	if (err)
6019 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
6020 	dio_end_io(bio, err);
6021 }
6022 
6023 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
6024 				    struct bio *bio, int mirror_num,
6025 				    unsigned long bio_flags, u64 offset)
6026 {
6027 	int ret;
6028 	struct btrfs_root *root = BTRFS_I(inode)->root;
6029 	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
6030 	BUG_ON(ret); /* -ENOMEM */
6031 	return 0;
6032 }
6033 
6034 static void btrfs_end_dio_bio(struct bio *bio, int err)
6035 {
6036 	struct btrfs_dio_private *dip = bio->bi_private;
6037 
6038 	if (err) {
6039 		printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
6040 		      "sector %#Lx len %u err no %d\n",
6041 		      (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
6042 		      (unsigned long long)bio->bi_sector, bio->bi_size, err);
6043 		dip->errors = 1;
6044 
6045 		/*
6046 		 * before atomic variable goto zero, we must make sure
6047 		 * dip->errors is perceived to be set.
6048 		 */
6049 		smp_mb__before_atomic_dec();
6050 	}
6051 
6052 	/* if there are more bios still pending for this dio, just exit */
6053 	if (!atomic_dec_and_test(&dip->pending_bios))
6054 		goto out;
6055 
6056 	if (dip->errors)
6057 		bio_io_error(dip->orig_bio);
6058 	else {
6059 		set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
6060 		bio_endio(dip->orig_bio, 0);
6061 	}
6062 out:
6063 	bio_put(bio);
6064 }
6065 
6066 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
6067 				       u64 first_sector, gfp_t gfp_flags)
6068 {
6069 	int nr_vecs = bio_get_nr_vecs(bdev);
6070 	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
6071 }
6072 
6073 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6074 					 int rw, u64 file_offset, int skip_sum,
6075 					 u32 *csums, int async_submit)
6076 {
6077 	int write = rw & REQ_WRITE;
6078 	struct btrfs_root *root = BTRFS_I(inode)->root;
6079 	int ret;
6080 
6081 	bio_get(bio);
6082 
6083 	if (!write) {
6084 		ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
6085 		if (ret)
6086 			goto err;
6087 	}
6088 
6089 	if (skip_sum)
6090 		goto map;
6091 
6092 	if (write && async_submit) {
6093 		ret = btrfs_wq_submit_bio(root->fs_info,
6094 				   inode, rw, bio, 0, 0,
6095 				   file_offset,
6096 				   __btrfs_submit_bio_start_direct_io,
6097 				   __btrfs_submit_bio_done);
6098 		goto err;
6099 	} else if (write) {
6100 		/*
6101 		 * If we aren't doing async submit, calculate the csum of the
6102 		 * bio now.
6103 		 */
6104 		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
6105 		if (ret)
6106 			goto err;
6107 	} else if (!skip_sum) {
6108 		ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
6109 					  file_offset, csums);
6110 		if (ret)
6111 			goto err;
6112 	}
6113 
6114 map:
6115 	ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
6116 err:
6117 	bio_put(bio);
6118 	return ret;
6119 }
6120 
6121 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6122 				    int skip_sum)
6123 {
6124 	struct inode *inode = dip->inode;
6125 	struct btrfs_root *root = BTRFS_I(inode)->root;
6126 	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6127 	struct bio *bio;
6128 	struct bio *orig_bio = dip->orig_bio;
6129 	struct bio_vec *bvec = orig_bio->bi_io_vec;
6130 	u64 start_sector = orig_bio->bi_sector;
6131 	u64 file_offset = dip->logical_offset;
6132 	u64 submit_len = 0;
6133 	u64 map_length;
6134 	int nr_pages = 0;
6135 	u32 *csums = dip->csums;
6136 	int ret = 0;
6137 	int async_submit = 0;
6138 	int write = rw & REQ_WRITE;
6139 
6140 	map_length = orig_bio->bi_size;
6141 	ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6142 			      &map_length, NULL, 0);
6143 	if (ret) {
6144 		bio_put(orig_bio);
6145 		return -EIO;
6146 	}
6147 
6148 	if (map_length >= orig_bio->bi_size) {
6149 		bio = orig_bio;
6150 		goto submit;
6151 	}
6152 
6153 	async_submit = 1;
6154 	bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
6155 	if (!bio)
6156 		return -ENOMEM;
6157 	bio->bi_private = dip;
6158 	bio->bi_end_io = btrfs_end_dio_bio;
6159 	atomic_inc(&dip->pending_bios);
6160 
6161 	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
6162 		if (unlikely(map_length < submit_len + bvec->bv_len ||
6163 		    bio_add_page(bio, bvec->bv_page, bvec->bv_len,
6164 				 bvec->bv_offset) < bvec->bv_len)) {
6165 			/*
6166 			 * inc the count before we submit the bio so
6167 			 * we know the end IO handler won't happen before
6168 			 * we inc the count. Otherwise, the dip might get freed
6169 			 * before we're done setting it up
6170 			 */
6171 			atomic_inc(&dip->pending_bios);
6172 			ret = __btrfs_submit_dio_bio(bio, inode, rw,
6173 						     file_offset, skip_sum,
6174 						     csums, async_submit);
6175 			if (ret) {
6176 				bio_put(bio);
6177 				atomic_dec(&dip->pending_bios);
6178 				goto out_err;
6179 			}
6180 
6181 			/* Write's use the ordered csums */
6182 			if (!write && !skip_sum)
6183 				csums = csums + nr_pages;
6184 			start_sector += submit_len >> 9;
6185 			file_offset += submit_len;
6186 
6187 			submit_len = 0;
6188 			nr_pages = 0;
6189 
6190 			bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
6191 						  start_sector, GFP_NOFS);
6192 			if (!bio)
6193 				goto out_err;
6194 			bio->bi_private = dip;
6195 			bio->bi_end_io = btrfs_end_dio_bio;
6196 
6197 			map_length = orig_bio->bi_size;
6198 			ret = btrfs_map_block(map_tree, READ, start_sector << 9,
6199 					      &map_length, NULL, 0);
6200 			if (ret) {
6201 				bio_put(bio);
6202 				goto out_err;
6203 			}
6204 		} else {
6205 			submit_len += bvec->bv_len;
6206 			nr_pages ++;
6207 			bvec++;
6208 		}
6209 	}
6210 
6211 submit:
6212 	ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6213 				     csums, async_submit);
6214 	if (!ret)
6215 		return 0;
6216 
6217 	bio_put(bio);
6218 out_err:
6219 	dip->errors = 1;
6220 	/*
6221 	 * before atomic variable goto zero, we must
6222 	 * make sure dip->errors is perceived to be set.
6223 	 */
6224 	smp_mb__before_atomic_dec();
6225 	if (atomic_dec_and_test(&dip->pending_bios))
6226 		bio_io_error(dip->orig_bio);
6227 
6228 	/* bio_end_io() will handle error, so we needn't return it */
6229 	return 0;
6230 }
6231 
6232 static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
6233 				loff_t file_offset)
6234 {
6235 	struct btrfs_root *root = BTRFS_I(inode)->root;
6236 	struct btrfs_dio_private *dip;
6237 	struct bio_vec *bvec = bio->bi_io_vec;
6238 	int skip_sum;
6239 	int write = rw & REQ_WRITE;
6240 	int ret = 0;
6241 
6242 	skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
6243 
6244 	dip = kmalloc(sizeof(*dip), GFP_NOFS);
6245 	if (!dip) {
6246 		ret = -ENOMEM;
6247 		goto free_ordered;
6248 	}
6249 	dip->csums = NULL;
6250 
6251 	/* Write's use the ordered csum stuff, so we don't need dip->csums */
6252 	if (!write && !skip_sum) {
6253 		dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
6254 		if (!dip->csums) {
6255 			kfree(dip);
6256 			ret = -ENOMEM;
6257 			goto free_ordered;
6258 		}
6259 	}
6260 
6261 	dip->private = bio->bi_private;
6262 	dip->inode = inode;
6263 	dip->logical_offset = file_offset;
6264 
6265 	dip->bytes = 0;
6266 	do {
6267 		dip->bytes += bvec->bv_len;
6268 		bvec++;
6269 	} while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
6270 
6271 	dip->disk_bytenr = (u64)bio->bi_sector << 9;
6272 	bio->bi_private = dip;
6273 	dip->errors = 0;
6274 	dip->orig_bio = bio;
6275 	atomic_set(&dip->pending_bios, 0);
6276 
6277 	if (write)
6278 		bio->bi_end_io = btrfs_endio_direct_write;
6279 	else
6280 		bio->bi_end_io = btrfs_endio_direct_read;
6281 
6282 	ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
6283 	if (!ret)
6284 		return;
6285 free_ordered:
6286 	/*
6287 	 * If this is a write, we need to clean up the reserved space and kill
6288 	 * the ordered extent.
6289 	 */
6290 	if (write) {
6291 		struct btrfs_ordered_extent *ordered;
6292 		ordered = btrfs_lookup_ordered_extent(inode, file_offset);
6293 		if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
6294 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
6295 			btrfs_free_reserved_extent(root, ordered->start,
6296 						   ordered->disk_len);
6297 		btrfs_put_ordered_extent(ordered);
6298 		btrfs_put_ordered_extent(ordered);
6299 	}
6300 	bio_endio(bio, ret);
6301 }
6302 
6303 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
6304 			const struct iovec *iov, loff_t offset,
6305 			unsigned long nr_segs)
6306 {
6307 	int seg;
6308 	int i;
6309 	size_t size;
6310 	unsigned long addr;
6311 	unsigned blocksize_mask = root->sectorsize - 1;
6312 	ssize_t retval = -EINVAL;
6313 	loff_t end = offset;
6314 
6315 	if (offset & blocksize_mask)
6316 		goto out;
6317 
6318 	/* Check the memory alignment.  Blocks cannot straddle pages */
6319 	for (seg = 0; seg < nr_segs; seg++) {
6320 		addr = (unsigned long)iov[seg].iov_base;
6321 		size = iov[seg].iov_len;
6322 		end += size;
6323 		if ((addr & blocksize_mask) || (size & blocksize_mask))
6324 			goto out;
6325 
6326 		/* If this is a write we don't need to check anymore */
6327 		if (rw & WRITE)
6328 			continue;
6329 
6330 		/*
6331 		 * Check to make sure we don't have duplicate iov_base's in this
6332 		 * iovec, if so return EINVAL, otherwise we'll get csum errors
6333 		 * when reading back.
6334 		 */
6335 		for (i = seg + 1; i < nr_segs; i++) {
6336 			if (iov[seg].iov_base == iov[i].iov_base)
6337 				goto out;
6338 		}
6339 	}
6340 	retval = 0;
6341 out:
6342 	return retval;
6343 }
6344 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6345 			const struct iovec *iov, loff_t offset,
6346 			unsigned long nr_segs)
6347 {
6348 	struct file *file = iocb->ki_filp;
6349 	struct inode *inode = file->f_mapping->host;
6350 	struct btrfs_ordered_extent *ordered;
6351 	struct extent_state *cached_state = NULL;
6352 	u64 lockstart, lockend;
6353 	ssize_t ret;
6354 	int writing = rw & WRITE;
6355 	int write_bits = 0;
6356 	size_t count = iov_length(iov, nr_segs);
6357 
6358 	if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
6359 			    offset, nr_segs)) {
6360 		return 0;
6361 	}
6362 
6363 	lockstart = offset;
6364 	lockend = offset + count - 1;
6365 
6366 	if (writing) {
6367 		ret = btrfs_delalloc_reserve_space(inode, count);
6368 		if (ret)
6369 			goto out;
6370 	}
6371 
6372 	while (1) {
6373 		lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6374 				 0, &cached_state);
6375 		/*
6376 		 * We're concerned with the entire range that we're going to be
6377 		 * doing DIO to, so we need to make sure theres no ordered
6378 		 * extents in this range.
6379 		 */
6380 		ordered = btrfs_lookup_ordered_range(inode, lockstart,
6381 						     lockend - lockstart + 1);
6382 
6383 		/*
6384 		 * We need to make sure there are no buffered pages in this
6385 		 * range either, we could have raced between the invalidate in
6386 		 * generic_file_direct_write and locking the extent.  The
6387 		 * invalidate needs to happen so that reads after a write do not
6388 		 * get stale data.
6389 		 */
6390 		if (!ordered && (!writing ||
6391 		    !test_range_bit(&BTRFS_I(inode)->io_tree,
6392 				    lockstart, lockend, EXTENT_UPTODATE, 0,
6393 				    cached_state)))
6394 			break;
6395 
6396 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6397 				     &cached_state, GFP_NOFS);
6398 
6399 		if (ordered) {
6400 			btrfs_start_ordered_extent(inode, ordered, 1);
6401 			btrfs_put_ordered_extent(ordered);
6402 		} else {
6403 			/* Screw you mmap */
6404 			ret = filemap_write_and_wait_range(file->f_mapping,
6405 							   lockstart,
6406 							   lockend);
6407 			if (ret)
6408 				goto out;
6409 
6410 			/*
6411 			 * If we found a page that couldn't be invalidated just
6412 			 * fall back to buffered.
6413 			 */
6414 			ret = invalidate_inode_pages2_range(file->f_mapping,
6415 					lockstart >> PAGE_CACHE_SHIFT,
6416 					lockend >> PAGE_CACHE_SHIFT);
6417 			if (ret) {
6418 				if (ret == -EBUSY)
6419 					ret = 0;
6420 				goto out;
6421 			}
6422 		}
6423 
6424 		cond_resched();
6425 	}
6426 
6427 	/*
6428 	 * we don't use btrfs_set_extent_delalloc because we don't want
6429 	 * the dirty or uptodate bits
6430 	 */
6431 	if (writing) {
6432 		write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
6433 		ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6434 				     EXTENT_DELALLOC, NULL, &cached_state,
6435 				     GFP_NOFS);
6436 		if (ret) {
6437 			clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
6438 					 lockend, EXTENT_LOCKED | write_bits,
6439 					 1, 0, &cached_state, GFP_NOFS);
6440 			goto out;
6441 		}
6442 	}
6443 
6444 	free_extent_state(cached_state);
6445 	cached_state = NULL;
6446 
6447 	ret = __blockdev_direct_IO(rw, iocb, inode,
6448 		   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
6449 		   iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
6450 		   btrfs_submit_direct, 0);
6451 
6452 	if (ret < 0 && ret != -EIOCBQUEUED) {
6453 		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
6454 			      offset + iov_length(iov, nr_segs) - 1,
6455 			      EXTENT_LOCKED | write_bits, 1, 0,
6456 			      &cached_state, GFP_NOFS);
6457 	} else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
6458 		/*
6459 		 * We're falling back to buffered, unlock the section we didn't
6460 		 * do IO on.
6461 		 */
6462 		clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
6463 			      offset + iov_length(iov, nr_segs) - 1,
6464 			      EXTENT_LOCKED | write_bits, 1, 0,
6465 			      &cached_state, GFP_NOFS);
6466 	}
6467 out:
6468 	free_extent_state(cached_state);
6469 	return ret;
6470 }
6471 
6472 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6473 		__u64 start, __u64 len)
6474 {
6475 	return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
6476 }
6477 
6478 int btrfs_readpage(struct file *file, struct page *page)
6479 {
6480 	struct extent_io_tree *tree;
6481 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6482 	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
6483 }
6484 
6485 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
6486 {
6487 	struct extent_io_tree *tree;
6488 
6489 
6490 	if (current->flags & PF_MEMALLOC) {
6491 		redirty_page_for_writepage(wbc, page);
6492 		unlock_page(page);
6493 		return 0;
6494 	}
6495 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6496 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
6497 }
6498 
6499 int btrfs_writepages(struct address_space *mapping,
6500 		     struct writeback_control *wbc)
6501 {
6502 	struct extent_io_tree *tree;
6503 
6504 	tree = &BTRFS_I(mapping->host)->io_tree;
6505 	return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
6506 }
6507 
6508 static int
6509 btrfs_readpages(struct file *file, struct address_space *mapping,
6510 		struct list_head *pages, unsigned nr_pages)
6511 {
6512 	struct extent_io_tree *tree;
6513 	tree = &BTRFS_I(mapping->host)->io_tree;
6514 	return extent_readpages(tree, mapping, pages, nr_pages,
6515 				btrfs_get_extent);
6516 }
6517 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6518 {
6519 	struct extent_io_tree *tree;
6520 	struct extent_map_tree *map;
6521 	int ret;
6522 
6523 	tree = &BTRFS_I(page->mapping->host)->io_tree;
6524 	map = &BTRFS_I(page->mapping->host)->extent_tree;
6525 	ret = try_release_extent_mapping(map, tree, page, gfp_flags);
6526 	if (ret == 1) {
6527 		ClearPagePrivate(page);
6528 		set_page_private(page, 0);
6529 		page_cache_release(page);
6530 	}
6531 	return ret;
6532 }
6533 
6534 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6535 {
6536 	if (PageWriteback(page) || PageDirty(page))
6537 		return 0;
6538 	return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
6539 }
6540 
6541 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6542 {
6543 	struct inode *inode = page->mapping->host;
6544 	struct extent_io_tree *tree;
6545 	struct btrfs_ordered_extent *ordered;
6546 	struct extent_state *cached_state = NULL;
6547 	u64 page_start = page_offset(page);
6548 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
6549 
6550 	/*
6551 	 * we have the page locked, so new writeback can't start,
6552 	 * and the dirty bit won't be cleared while we are here.
6553 	 *
6554 	 * Wait for IO on this page so that we can safely clear
6555 	 * the PagePrivate2 bit and do ordered accounting
6556 	 */
6557 	wait_on_page_writeback(page);
6558 
6559 	tree = &BTRFS_I(inode)->io_tree;
6560 	if (offset) {
6561 		btrfs_releasepage(page, GFP_NOFS);
6562 		return;
6563 	}
6564 	lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6565 	ordered = btrfs_lookup_ordered_extent(inode,
6566 					   page_offset(page));
6567 	if (ordered) {
6568 		/*
6569 		 * IO on this page will never be started, so we need
6570 		 * to account for any ordered extents now
6571 		 */
6572 		clear_extent_bit(tree, page_start, page_end,
6573 				 EXTENT_DIRTY | EXTENT_DELALLOC |
6574 				 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
6575 				 &cached_state, GFP_NOFS);
6576 		/*
6577 		 * whoever cleared the private bit is responsible
6578 		 * for the finish_ordered_io
6579 		 */
6580 		if (TestClearPagePrivate2(page) &&
6581 		    btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
6582 						   PAGE_CACHE_SIZE, 1)) {
6583 			btrfs_finish_ordered_io(ordered);
6584 		}
6585 		btrfs_put_ordered_extent(ordered);
6586 		cached_state = NULL;
6587 		lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6588 	}
6589 	clear_extent_bit(tree, page_start, page_end,
6590 		 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
6591 		 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
6592 	__btrfs_releasepage(page, GFP_NOFS);
6593 
6594 	ClearPageChecked(page);
6595 	if (PagePrivate(page)) {
6596 		ClearPagePrivate(page);
6597 		set_page_private(page, 0);
6598 		page_cache_release(page);
6599 	}
6600 }
6601 
6602 /*
6603  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
6604  * called from a page fault handler when a page is first dirtied. Hence we must
6605  * be careful to check for EOF conditions here. We set the page up correctly
6606  * for a written page which means we get ENOSPC checking when writing into
6607  * holes and correct delalloc and unwritten extent mapping on filesystems that
6608  * support these features.
6609  *
6610  * We are not allowed to take the i_mutex here so we have to play games to
6611  * protect against truncate races as the page could now be beyond EOF.  Because
6612  * vmtruncate() writes the inode size before removing pages, once we have the
6613  * page lock we can determine safely if the page is beyond EOF. If it is not
6614  * beyond EOF, then the page is guaranteed safe against truncation until we
6615  * unlock the page.
6616  */
6617 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6618 {
6619 	struct page *page = vmf->page;
6620 	struct inode *inode = fdentry(vma->vm_file)->d_inode;
6621 	struct btrfs_root *root = BTRFS_I(inode)->root;
6622 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6623 	struct btrfs_ordered_extent *ordered;
6624 	struct extent_state *cached_state = NULL;
6625 	char *kaddr;
6626 	unsigned long zero_start;
6627 	loff_t size;
6628 	int ret;
6629 	int reserved = 0;
6630 	u64 page_start;
6631 	u64 page_end;
6632 
6633 	sb_start_pagefault(inode->i_sb);
6634 	ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6635 	if (!ret) {
6636 		ret = file_update_time(vma->vm_file);
6637 		reserved = 1;
6638 	}
6639 	if (ret) {
6640 		if (ret == -ENOMEM)
6641 			ret = VM_FAULT_OOM;
6642 		else /* -ENOSPC, -EIO, etc */
6643 			ret = VM_FAULT_SIGBUS;
6644 		if (reserved)
6645 			goto out;
6646 		goto out_noreserve;
6647 	}
6648 
6649 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
6650 again:
6651 	lock_page(page);
6652 	size = i_size_read(inode);
6653 	page_start = page_offset(page);
6654 	page_end = page_start + PAGE_CACHE_SIZE - 1;
6655 
6656 	if ((page->mapping != inode->i_mapping) ||
6657 	    (page_start >= size)) {
6658 		/* page got truncated out from underneath us */
6659 		goto out_unlock;
6660 	}
6661 	wait_on_page_writeback(page);
6662 
6663 	lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
6664 	set_page_extent_mapped(page);
6665 
6666 	/*
6667 	 * we can't set the delalloc bits if there are pending ordered
6668 	 * extents.  Drop our locks and wait for them to finish
6669 	 */
6670 	ordered = btrfs_lookup_ordered_extent(inode, page_start);
6671 	if (ordered) {
6672 		unlock_extent_cached(io_tree, page_start, page_end,
6673 				     &cached_state, GFP_NOFS);
6674 		unlock_page(page);
6675 		btrfs_start_ordered_extent(inode, ordered, 1);
6676 		btrfs_put_ordered_extent(ordered);
6677 		goto again;
6678 	}
6679 
6680 	/*
6681 	 * XXX - page_mkwrite gets called every time the page is dirtied, even
6682 	 * if it was already dirty, so for space accounting reasons we need to
6683 	 * clear any delalloc bits for the range we are fixing to save.  There
6684 	 * is probably a better way to do this, but for now keep consistent with
6685 	 * prepare_pages in the normal write path.
6686 	 */
6687 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
6688 			  EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
6689 			  0, 0, &cached_state, GFP_NOFS);
6690 
6691 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
6692 					&cached_state);
6693 	if (ret) {
6694 		unlock_extent_cached(io_tree, page_start, page_end,
6695 				     &cached_state, GFP_NOFS);
6696 		ret = VM_FAULT_SIGBUS;
6697 		goto out_unlock;
6698 	}
6699 	ret = 0;
6700 
6701 	/* page is wholly or partially inside EOF */
6702 	if (page_start + PAGE_CACHE_SIZE > size)
6703 		zero_start = size & ~PAGE_CACHE_MASK;
6704 	else
6705 		zero_start = PAGE_CACHE_SIZE;
6706 
6707 	if (zero_start != PAGE_CACHE_SIZE) {
6708 		kaddr = kmap(page);
6709 		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
6710 		flush_dcache_page(page);
6711 		kunmap(page);
6712 	}
6713 	ClearPageChecked(page);
6714 	set_page_dirty(page);
6715 	SetPageUptodate(page);
6716 
6717 	BTRFS_I(inode)->last_trans = root->fs_info->generation;
6718 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
6719 
6720 	unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
6721 
6722 out_unlock:
6723 	if (!ret) {
6724 		sb_end_pagefault(inode->i_sb);
6725 		return VM_FAULT_LOCKED;
6726 	}
6727 	unlock_page(page);
6728 out:
6729 	btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
6730 out_noreserve:
6731 	sb_end_pagefault(inode->i_sb);
6732 	return ret;
6733 }
6734 
6735 static int btrfs_truncate(struct inode *inode)
6736 {
6737 	struct btrfs_root *root = BTRFS_I(inode)->root;
6738 	struct btrfs_block_rsv *rsv;
6739 	int ret;
6740 	int err = 0;
6741 	struct btrfs_trans_handle *trans;
6742 	unsigned long nr;
6743 	u64 mask = root->sectorsize - 1;
6744 	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
6745 
6746 	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
6747 	if (ret)
6748 		return ret;
6749 
6750 	btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6751 	btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6752 
6753 	/*
6754 	 * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
6755 	 * 3 things going on here
6756 	 *
6757 	 * 1) We need to reserve space for our orphan item and the space to
6758 	 * delete our orphan item.  Lord knows we don't want to have a dangling
6759 	 * orphan item because we didn't reserve space to remove it.
6760 	 *
6761 	 * 2) We need to reserve space to update our inode.
6762 	 *
6763 	 * 3) We need to have something to cache all the space that is going to
6764 	 * be free'd up by the truncate operation, but also have some slack
6765 	 * space reserved in case it uses space during the truncate (thank you
6766 	 * very much snapshotting).
6767 	 *
6768 	 * And we need these to all be seperate.  The fact is we can use alot of
6769 	 * space doing the truncate, and we have no earthly idea how much space
6770 	 * we will use, so we need the truncate reservation to be seperate so it
6771 	 * doesn't end up using space reserved for updating the inode or
6772 	 * removing the orphan item.  We also need to be able to stop the
6773 	 * transaction and start a new one, which means we need to be able to
6774 	 * update the inode several times, and we have no idea of knowing how
6775 	 * many times that will be, so we can't just reserve 1 item for the
6776 	 * entirety of the opration, so that has to be done seperately as well.
6777 	 * Then there is the orphan item, which does indeed need to be held on
6778 	 * to for the whole operation, and we need nobody to touch this reserved
6779 	 * space except the orphan code.
6780 	 *
6781 	 * So that leaves us with
6782 	 *
6783 	 * 1) root->orphan_block_rsv - for the orphan deletion.
6784 	 * 2) rsv - for the truncate reservation, which we will steal from the
6785 	 * transaction reservation.
6786 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6787 	 * updating the inode.
6788 	 */
6789 	rsv = btrfs_alloc_block_rsv(root);
6790 	if (!rsv)
6791 		return -ENOMEM;
6792 	rsv->size = min_size;
6793 
6794 	/*
6795 	 * 1 for the truncate slack space
6796 	 * 1 for the orphan item we're going to add
6797 	 * 1 for the orphan item deletion
6798 	 * 1 for updating the inode.
6799 	 */
6800 	trans = btrfs_start_transaction(root, 4);
6801 	if (IS_ERR(trans)) {
6802 		err = PTR_ERR(trans);
6803 		goto out;
6804 	}
6805 
6806 	/* Migrate the slack space for the truncate to our reserve */
6807 	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
6808 				      min_size);
6809 	BUG_ON(ret);
6810 
6811 	ret = btrfs_orphan_add(trans, inode);
6812 	if (ret) {
6813 		btrfs_end_transaction(trans, root);
6814 		goto out;
6815 	}
6816 
6817 	/*
6818 	 * setattr is responsible for setting the ordered_data_close flag,
6819 	 * but that is only tested during the last file release.  That
6820 	 * could happen well after the next commit, leaving a great big
6821 	 * window where new writes may get lost if someone chooses to write
6822 	 * to this file after truncating to zero
6823 	 *
6824 	 * The inode doesn't have any dirty data here, and so if we commit
6825 	 * this is a noop.  If someone immediately starts writing to the inode
6826 	 * it is very likely we'll catch some of their writes in this
6827 	 * transaction, and the commit will find this file on the ordered
6828 	 * data list with good things to send down.
6829 	 *
6830 	 * This is a best effort solution, there is still a window where
6831 	 * using truncate to replace the contents of the file will
6832 	 * end up with a zero length file after a crash.
6833 	 */
6834 	if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
6835 					   &BTRFS_I(inode)->runtime_flags))
6836 		btrfs_add_ordered_operation(trans, root, inode);
6837 
6838 	while (1) {
6839 		ret = btrfs_block_rsv_refill(root, rsv, min_size);
6840 		if (ret) {
6841 			/*
6842 			 * This can only happen with the original transaction we
6843 			 * started above, every other time we shouldn't have a
6844 			 * transaction started yet.
6845 			 */
6846 			if (ret == -EAGAIN)
6847 				goto end_trans;
6848 			err = ret;
6849 			break;
6850 		}
6851 
6852 		if (!trans) {
6853 			/* Just need the 1 for updating the inode */
6854 			trans = btrfs_start_transaction(root, 1);
6855 			if (IS_ERR(trans)) {
6856 				ret = err = PTR_ERR(trans);
6857 				trans = NULL;
6858 				break;
6859 			}
6860 		}
6861 
6862 		trans->block_rsv = rsv;
6863 
6864 		ret = btrfs_truncate_inode_items(trans, root, inode,
6865 						 inode->i_size,
6866 						 BTRFS_EXTENT_DATA_KEY);
6867 		if (ret != -EAGAIN) {
6868 			err = ret;
6869 			break;
6870 		}
6871 
6872 		trans->block_rsv = &root->fs_info->trans_block_rsv;
6873 		ret = btrfs_update_inode(trans, root, inode);
6874 		if (ret) {
6875 			err = ret;
6876 			break;
6877 		}
6878 end_trans:
6879 		nr = trans->blocks_used;
6880 		btrfs_end_transaction(trans, root);
6881 		trans = NULL;
6882 		btrfs_btree_balance_dirty(root, nr);
6883 	}
6884 
6885 	if (ret == 0 && inode->i_nlink > 0) {
6886 		trans->block_rsv = root->orphan_block_rsv;
6887 		ret = btrfs_orphan_del(trans, inode);
6888 		if (ret)
6889 			err = ret;
6890 	} else if (ret && inode->i_nlink > 0) {
6891 		/*
6892 		 * Failed to do the truncate, remove us from the in memory
6893 		 * orphan list.
6894 		 */
6895 		ret = btrfs_orphan_del(NULL, inode);
6896 	}
6897 
6898 	if (trans) {
6899 		trans->block_rsv = &root->fs_info->trans_block_rsv;
6900 		ret = btrfs_update_inode(trans, root, inode);
6901 		if (ret && !err)
6902 			err = ret;
6903 
6904 		nr = trans->blocks_used;
6905 		ret = btrfs_end_transaction(trans, root);
6906 		btrfs_btree_balance_dirty(root, nr);
6907 	}
6908 
6909 out:
6910 	btrfs_free_block_rsv(root, rsv);
6911 
6912 	if (ret && !err)
6913 		err = ret;
6914 
6915 	return err;
6916 }
6917 
6918 /*
6919  * create a new subvolume directory/inode (helper for the ioctl).
6920  */
6921 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6922 			     struct btrfs_root *new_root, u64 new_dirid)
6923 {
6924 	struct inode *inode;
6925 	int err;
6926 	u64 index = 0;
6927 
6928 	inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
6929 				new_dirid, new_dirid,
6930 				S_IFDIR | (~current_umask() & S_IRWXUGO),
6931 				&index);
6932 	if (IS_ERR(inode))
6933 		return PTR_ERR(inode);
6934 	inode->i_op = &btrfs_dir_inode_operations;
6935 	inode->i_fop = &btrfs_dir_file_operations;
6936 
6937 	set_nlink(inode, 1);
6938 	btrfs_i_size_write(inode, 0);
6939 
6940 	err = btrfs_update_inode(trans, new_root, inode);
6941 
6942 	iput(inode);
6943 	return err;
6944 }
6945 
6946 struct inode *btrfs_alloc_inode(struct super_block *sb)
6947 {
6948 	struct btrfs_inode *ei;
6949 	struct inode *inode;
6950 
6951 	ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
6952 	if (!ei)
6953 		return NULL;
6954 
6955 	ei->root = NULL;
6956 	ei->generation = 0;
6957 	ei->last_trans = 0;
6958 	ei->last_sub_trans = 0;
6959 	ei->logged_trans = 0;
6960 	ei->delalloc_bytes = 0;
6961 	ei->disk_i_size = 0;
6962 	ei->flags = 0;
6963 	ei->csum_bytes = 0;
6964 	ei->index_cnt = (u64)-1;
6965 	ei->last_unlink_trans = 0;
6966 
6967 	spin_lock_init(&ei->lock);
6968 	ei->outstanding_extents = 0;
6969 	ei->reserved_extents = 0;
6970 
6971 	ei->runtime_flags = 0;
6972 	ei->force_compress = BTRFS_COMPRESS_NONE;
6973 
6974 	ei->delayed_node = NULL;
6975 
6976 	inode = &ei->vfs_inode;
6977 	extent_map_tree_init(&ei->extent_tree);
6978 	extent_io_tree_init(&ei->io_tree, &inode->i_data);
6979 	extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
6980 	ei->io_tree.track_uptodate = 1;
6981 	ei->io_failure_tree.track_uptodate = 1;
6982 	mutex_init(&ei->log_mutex);
6983 	mutex_init(&ei->delalloc_mutex);
6984 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6985 	INIT_LIST_HEAD(&ei->delalloc_inodes);
6986 	INIT_LIST_HEAD(&ei->ordered_operations);
6987 	RB_CLEAR_NODE(&ei->rb_node);
6988 
6989 	return inode;
6990 }
6991 
6992 static void btrfs_i_callback(struct rcu_head *head)
6993 {
6994 	struct inode *inode = container_of(head, struct inode, i_rcu);
6995 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
6996 }
6997 
6998 void btrfs_destroy_inode(struct inode *inode)
6999 {
7000 	struct btrfs_ordered_extent *ordered;
7001 	struct btrfs_root *root = BTRFS_I(inode)->root;
7002 
7003 	WARN_ON(!hlist_empty(&inode->i_dentry));
7004 	WARN_ON(inode->i_data.nrpages);
7005 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
7006 	WARN_ON(BTRFS_I(inode)->reserved_extents);
7007 	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
7008 	WARN_ON(BTRFS_I(inode)->csum_bytes);
7009 
7010 	/*
7011 	 * This can happen where we create an inode, but somebody else also
7012 	 * created the same inode and we need to destroy the one we already
7013 	 * created.
7014 	 */
7015 	if (!root)
7016 		goto free;
7017 
7018 	/*
7019 	 * Make sure we're properly removed from the ordered operation
7020 	 * lists.
7021 	 */
7022 	smp_mb();
7023 	if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
7024 		spin_lock(&root->fs_info->ordered_extent_lock);
7025 		list_del_init(&BTRFS_I(inode)->ordered_operations);
7026 		spin_unlock(&root->fs_info->ordered_extent_lock);
7027 	}
7028 
7029 	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
7030 		     &BTRFS_I(inode)->runtime_flags)) {
7031 		printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
7032 		       (unsigned long long)btrfs_ino(inode));
7033 		atomic_dec(&root->orphan_inodes);
7034 	}
7035 
7036 	while (1) {
7037 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7038 		if (!ordered)
7039 			break;
7040 		else {
7041 			printk(KERN_ERR "btrfs found ordered "
7042 			       "extent %llu %llu on inode cleanup\n",
7043 			       (unsigned long long)ordered->file_offset,
7044 			       (unsigned long long)ordered->len);
7045 			btrfs_remove_ordered_extent(inode, ordered);
7046 			btrfs_put_ordered_extent(ordered);
7047 			btrfs_put_ordered_extent(ordered);
7048 		}
7049 	}
7050 	inode_tree_del(inode);
7051 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7052 free:
7053 	btrfs_remove_delayed_node(inode);
7054 	call_rcu(&inode->i_rcu, btrfs_i_callback);
7055 }
7056 
7057 int btrfs_drop_inode(struct inode *inode)
7058 {
7059 	struct btrfs_root *root = BTRFS_I(inode)->root;
7060 
7061 	if (btrfs_root_refs(&root->root_item) == 0 &&
7062 	    !btrfs_is_free_space_inode(inode))
7063 		return 1;
7064 	else
7065 		return generic_drop_inode(inode);
7066 }
7067 
7068 static void init_once(void *foo)
7069 {
7070 	struct btrfs_inode *ei = (struct btrfs_inode *) foo;
7071 
7072 	inode_init_once(&ei->vfs_inode);
7073 }
7074 
7075 void btrfs_destroy_cachep(void)
7076 {
7077 	if (btrfs_inode_cachep)
7078 		kmem_cache_destroy(btrfs_inode_cachep);
7079 	if (btrfs_trans_handle_cachep)
7080 		kmem_cache_destroy(btrfs_trans_handle_cachep);
7081 	if (btrfs_transaction_cachep)
7082 		kmem_cache_destroy(btrfs_transaction_cachep);
7083 	if (btrfs_path_cachep)
7084 		kmem_cache_destroy(btrfs_path_cachep);
7085 	if (btrfs_free_space_cachep)
7086 		kmem_cache_destroy(btrfs_free_space_cachep);
7087 }
7088 
7089 int btrfs_init_cachep(void)
7090 {
7091 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
7092 			sizeof(struct btrfs_inode), 0,
7093 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
7094 	if (!btrfs_inode_cachep)
7095 		goto fail;
7096 
7097 	btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
7098 			sizeof(struct btrfs_trans_handle), 0,
7099 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7100 	if (!btrfs_trans_handle_cachep)
7101 		goto fail;
7102 
7103 	btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
7104 			sizeof(struct btrfs_transaction), 0,
7105 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7106 	if (!btrfs_transaction_cachep)
7107 		goto fail;
7108 
7109 	btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
7110 			sizeof(struct btrfs_path), 0,
7111 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7112 	if (!btrfs_path_cachep)
7113 		goto fail;
7114 
7115 	btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
7116 			sizeof(struct btrfs_free_space), 0,
7117 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
7118 	if (!btrfs_free_space_cachep)
7119 		goto fail;
7120 
7121 	return 0;
7122 fail:
7123 	btrfs_destroy_cachep();
7124 	return -ENOMEM;
7125 }
7126 
7127 static int btrfs_getattr(struct vfsmount *mnt,
7128 			 struct dentry *dentry, struct kstat *stat)
7129 {
7130 	struct inode *inode = dentry->d_inode;
7131 	u32 blocksize = inode->i_sb->s_blocksize;
7132 
7133 	generic_fillattr(inode, stat);
7134 	stat->dev = BTRFS_I(inode)->root->anon_dev;
7135 	stat->blksize = PAGE_CACHE_SIZE;
7136 	stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
7137 		ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
7138 	return 0;
7139 }
7140 
7141 /*
7142  * If a file is moved, it will inherit the cow and compression flags of the new
7143  * directory.
7144  */
7145 static void fixup_inode_flags(struct inode *dir, struct inode *inode)
7146 {
7147 	struct btrfs_inode *b_dir = BTRFS_I(dir);
7148 	struct btrfs_inode *b_inode = BTRFS_I(inode);
7149 
7150 	if (b_dir->flags & BTRFS_INODE_NODATACOW)
7151 		b_inode->flags |= BTRFS_INODE_NODATACOW;
7152 	else
7153 		b_inode->flags &= ~BTRFS_INODE_NODATACOW;
7154 
7155 	if (b_dir->flags & BTRFS_INODE_COMPRESS) {
7156 		b_inode->flags |= BTRFS_INODE_COMPRESS;
7157 		b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
7158 	} else {
7159 		b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
7160 				    BTRFS_INODE_NOCOMPRESS);
7161 	}
7162 }
7163 
7164 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7165 			   struct inode *new_dir, struct dentry *new_dentry)
7166 {
7167 	struct btrfs_trans_handle *trans;
7168 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
7169 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
7170 	struct inode *new_inode = new_dentry->d_inode;
7171 	struct inode *old_inode = old_dentry->d_inode;
7172 	struct timespec ctime = CURRENT_TIME;
7173 	u64 index = 0;
7174 	u64 root_objectid;
7175 	int ret;
7176 	u64 old_ino = btrfs_ino(old_inode);
7177 
7178 	if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
7179 		return -EPERM;
7180 
7181 	/* we only allow rename subvolume link between subvolumes */
7182 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
7183 		return -EXDEV;
7184 
7185 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
7186 	    (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
7187 		return -ENOTEMPTY;
7188 
7189 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
7190 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
7191 		return -ENOTEMPTY;
7192 	/*
7193 	 * we're using rename to replace one file with another.
7194 	 * and the replacement file is large.  Start IO on it now so
7195 	 * we don't add too much work to the end of the transaction
7196 	 */
7197 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
7198 	    old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
7199 		filemap_flush(old_inode->i_mapping);
7200 
7201 	/* close the racy window with snapshot create/destroy ioctl */
7202 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7203 		down_read(&root->fs_info->subvol_sem);
7204 	/*
7205 	 * We want to reserve the absolute worst case amount of items.  So if
7206 	 * both inodes are subvols and we need to unlink them then that would
7207 	 * require 4 item modifications, but if they are both normal inodes it
7208 	 * would require 5 item modifications, so we'll assume their normal
7209 	 * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
7210 	 * should cover the worst case number of items we'll modify.
7211 	 */
7212 	trans = btrfs_start_transaction(root, 20);
7213 	if (IS_ERR(trans)) {
7214                 ret = PTR_ERR(trans);
7215                 goto out_notrans;
7216         }
7217 
7218 	if (dest != root)
7219 		btrfs_record_root_in_trans(trans, dest);
7220 
7221 	ret = btrfs_set_inode_index(new_dir, &index);
7222 	if (ret)
7223 		goto out_fail;
7224 
7225 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7226 		/* force full log commit if subvolume involved. */
7227 		root->fs_info->last_trans_log_full_commit = trans->transid;
7228 	} else {
7229 		ret = btrfs_insert_inode_ref(trans, dest,
7230 					     new_dentry->d_name.name,
7231 					     new_dentry->d_name.len,
7232 					     old_ino,
7233 					     btrfs_ino(new_dir), index);
7234 		if (ret)
7235 			goto out_fail;
7236 		/*
7237 		 * this is an ugly little race, but the rename is required
7238 		 * to make sure that if we crash, the inode is either at the
7239 		 * old name or the new one.  pinning the log transaction lets
7240 		 * us make sure we don't allow a log commit to come in after
7241 		 * we unlink the name but before we add the new name back in.
7242 		 */
7243 		btrfs_pin_log_trans(root);
7244 	}
7245 	/*
7246 	 * make sure the inode gets flushed if it is replacing
7247 	 * something.
7248 	 */
7249 	if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
7250 		btrfs_add_ordered_operation(trans, root, old_inode);
7251 
7252 	inode_inc_iversion(old_dir);
7253 	inode_inc_iversion(new_dir);
7254 	inode_inc_iversion(old_inode);
7255 	old_dir->i_ctime = old_dir->i_mtime = ctime;
7256 	new_dir->i_ctime = new_dir->i_mtime = ctime;
7257 	old_inode->i_ctime = ctime;
7258 
7259 	if (old_dentry->d_parent != new_dentry->d_parent)
7260 		btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
7261 
7262 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7263 		root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
7264 		ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
7265 					old_dentry->d_name.name,
7266 					old_dentry->d_name.len);
7267 	} else {
7268 		ret = __btrfs_unlink_inode(trans, root, old_dir,
7269 					old_dentry->d_inode,
7270 					old_dentry->d_name.name,
7271 					old_dentry->d_name.len);
7272 		if (!ret)
7273 			ret = btrfs_update_inode(trans, root, old_inode);
7274 	}
7275 	if (ret) {
7276 		btrfs_abort_transaction(trans, root, ret);
7277 		goto out_fail;
7278 	}
7279 
7280 	if (new_inode) {
7281 		inode_inc_iversion(new_inode);
7282 		new_inode->i_ctime = CURRENT_TIME;
7283 		if (unlikely(btrfs_ino(new_inode) ==
7284 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
7285 			root_objectid = BTRFS_I(new_inode)->location.objectid;
7286 			ret = btrfs_unlink_subvol(trans, dest, new_dir,
7287 						root_objectid,
7288 						new_dentry->d_name.name,
7289 						new_dentry->d_name.len);
7290 			BUG_ON(new_inode->i_nlink == 0);
7291 		} else {
7292 			ret = btrfs_unlink_inode(trans, dest, new_dir,
7293 						 new_dentry->d_inode,
7294 						 new_dentry->d_name.name,
7295 						 new_dentry->d_name.len);
7296 		}
7297 		if (!ret && new_inode->i_nlink == 0) {
7298 			ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7299 			BUG_ON(ret);
7300 		}
7301 		if (ret) {
7302 			btrfs_abort_transaction(trans, root, ret);
7303 			goto out_fail;
7304 		}
7305 	}
7306 
7307 	fixup_inode_flags(new_dir, old_inode);
7308 
7309 	ret = btrfs_add_link(trans, new_dir, old_inode,
7310 			     new_dentry->d_name.name,
7311 			     new_dentry->d_name.len, 0, index);
7312 	if (ret) {
7313 		btrfs_abort_transaction(trans, root, ret);
7314 		goto out_fail;
7315 	}
7316 
7317 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7318 		struct dentry *parent = new_dentry->d_parent;
7319 		btrfs_log_new_name(trans, old_inode, old_dir, parent);
7320 		btrfs_end_log_trans(root);
7321 	}
7322 out_fail:
7323 	btrfs_end_transaction(trans, root);
7324 out_notrans:
7325 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7326 		up_read(&root->fs_info->subvol_sem);
7327 
7328 	return ret;
7329 }
7330 
7331 /*
7332  * some fairly slow code that needs optimization. This walks the list
7333  * of all the inodes with pending delalloc and forces them to disk.
7334  */
7335 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7336 {
7337 	struct list_head *head = &root->fs_info->delalloc_inodes;
7338 	struct btrfs_inode *binode;
7339 	struct inode *inode;
7340 
7341 	if (root->fs_info->sb->s_flags & MS_RDONLY)
7342 		return -EROFS;
7343 
7344 	spin_lock(&root->fs_info->delalloc_lock);
7345 	while (!list_empty(head)) {
7346 		binode = list_entry(head->next, struct btrfs_inode,
7347 				    delalloc_inodes);
7348 		inode = igrab(&binode->vfs_inode);
7349 		if (!inode)
7350 			list_del_init(&binode->delalloc_inodes);
7351 		spin_unlock(&root->fs_info->delalloc_lock);
7352 		if (inode) {
7353 			filemap_flush(inode->i_mapping);
7354 			if (delay_iput)
7355 				btrfs_add_delayed_iput(inode);
7356 			else
7357 				iput(inode);
7358 		}
7359 		cond_resched();
7360 		spin_lock(&root->fs_info->delalloc_lock);
7361 	}
7362 	spin_unlock(&root->fs_info->delalloc_lock);
7363 
7364 	/* the filemap_flush will queue IO into the worker threads, but
7365 	 * we have to make sure the IO is actually started and that
7366 	 * ordered extents get created before we return
7367 	 */
7368 	atomic_inc(&root->fs_info->async_submit_draining);
7369 	while (atomic_read(&root->fs_info->nr_async_submits) ||
7370 	      atomic_read(&root->fs_info->async_delalloc_pages)) {
7371 		wait_event(root->fs_info->async_submit_wait,
7372 		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
7373 		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7374 	}
7375 	atomic_dec(&root->fs_info->async_submit_draining);
7376 	return 0;
7377 }
7378 
7379 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7380 			 const char *symname)
7381 {
7382 	struct btrfs_trans_handle *trans;
7383 	struct btrfs_root *root = BTRFS_I(dir)->root;
7384 	struct btrfs_path *path;
7385 	struct btrfs_key key;
7386 	struct inode *inode = NULL;
7387 	int err;
7388 	int drop_inode = 0;
7389 	u64 objectid;
7390 	u64 index = 0 ;
7391 	int name_len;
7392 	int datasize;
7393 	unsigned long ptr;
7394 	struct btrfs_file_extent_item *ei;
7395 	struct extent_buffer *leaf;
7396 	unsigned long nr = 0;
7397 
7398 	name_len = strlen(symname) + 1;
7399 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7400 		return -ENAMETOOLONG;
7401 
7402 	/*
7403 	 * 2 items for inode item and ref
7404 	 * 2 items for dir items
7405 	 * 1 item for xattr if selinux is on
7406 	 */
7407 	trans = btrfs_start_transaction(root, 5);
7408 	if (IS_ERR(trans))
7409 		return PTR_ERR(trans);
7410 
7411 	err = btrfs_find_free_ino(root, &objectid);
7412 	if (err)
7413 		goto out_unlock;
7414 
7415 	inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7416 				dentry->d_name.len, btrfs_ino(dir), objectid,
7417 				S_IFLNK|S_IRWXUGO, &index);
7418 	if (IS_ERR(inode)) {
7419 		err = PTR_ERR(inode);
7420 		goto out_unlock;
7421 	}
7422 
7423 	err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
7424 	if (err) {
7425 		drop_inode = 1;
7426 		goto out_unlock;
7427 	}
7428 
7429 	/*
7430 	* If the active LSM wants to access the inode during
7431 	* d_instantiate it needs these. Smack checks to see
7432 	* if the filesystem supports xattrs by looking at the
7433 	* ops vector.
7434 	*/
7435 	inode->i_fop = &btrfs_file_operations;
7436 	inode->i_op = &btrfs_file_inode_operations;
7437 
7438 	err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7439 	if (err)
7440 		drop_inode = 1;
7441 	else {
7442 		inode->i_mapping->a_ops = &btrfs_aops;
7443 		inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7444 		BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7445 	}
7446 	if (drop_inode)
7447 		goto out_unlock;
7448 
7449 	path = btrfs_alloc_path();
7450 	if (!path) {
7451 		err = -ENOMEM;
7452 		drop_inode = 1;
7453 		goto out_unlock;
7454 	}
7455 	key.objectid = btrfs_ino(inode);
7456 	key.offset = 0;
7457 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7458 	datasize = btrfs_file_extent_calc_inline_size(name_len);
7459 	err = btrfs_insert_empty_item(trans, root, path, &key,
7460 				      datasize);
7461 	if (err) {
7462 		drop_inode = 1;
7463 		btrfs_free_path(path);
7464 		goto out_unlock;
7465 	}
7466 	leaf = path->nodes[0];
7467 	ei = btrfs_item_ptr(leaf, path->slots[0],
7468 			    struct btrfs_file_extent_item);
7469 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
7470 	btrfs_set_file_extent_type(leaf, ei,
7471 				   BTRFS_FILE_EXTENT_INLINE);
7472 	btrfs_set_file_extent_encryption(leaf, ei, 0);
7473 	btrfs_set_file_extent_compression(leaf, ei, 0);
7474 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
7475 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
7476 
7477 	ptr = btrfs_file_extent_inline_start(ei);
7478 	write_extent_buffer(leaf, symname, ptr, name_len);
7479 	btrfs_mark_buffer_dirty(leaf);
7480 	btrfs_free_path(path);
7481 
7482 	inode->i_op = &btrfs_symlink_inode_operations;
7483 	inode->i_mapping->a_ops = &btrfs_symlink_aops;
7484 	inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
7485 	inode_set_bytes(inode, name_len);
7486 	btrfs_i_size_write(inode, name_len - 1);
7487 	err = btrfs_update_inode(trans, root, inode);
7488 	if (err)
7489 		drop_inode = 1;
7490 
7491 out_unlock:
7492 	if (!err)
7493 		d_instantiate(dentry, inode);
7494 	nr = trans->blocks_used;
7495 	btrfs_end_transaction(trans, root);
7496 	if (drop_inode) {
7497 		inode_dec_link_count(inode);
7498 		iput(inode);
7499 	}
7500 	btrfs_btree_balance_dirty(root, nr);
7501 	return err;
7502 }
7503 
7504 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7505 				       u64 start, u64 num_bytes, u64 min_size,
7506 				       loff_t actual_len, u64 *alloc_hint,
7507 				       struct btrfs_trans_handle *trans)
7508 {
7509 	struct btrfs_root *root = BTRFS_I(inode)->root;
7510 	struct btrfs_key ins;
7511 	u64 cur_offset = start;
7512 	u64 i_size;
7513 	int ret = 0;
7514 	bool own_trans = true;
7515 
7516 	if (trans)
7517 		own_trans = false;
7518 	while (num_bytes > 0) {
7519 		if (own_trans) {
7520 			trans = btrfs_start_transaction(root, 3);
7521 			if (IS_ERR(trans)) {
7522 				ret = PTR_ERR(trans);
7523 				break;
7524 			}
7525 		}
7526 
7527 		ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
7528 					   0, *alloc_hint, &ins, 1);
7529 		if (ret) {
7530 			if (own_trans)
7531 				btrfs_end_transaction(trans, root);
7532 			break;
7533 		}
7534 
7535 		ret = insert_reserved_file_extent(trans, inode,
7536 						  cur_offset, ins.objectid,
7537 						  ins.offset, ins.offset,
7538 						  ins.offset, 0, 0, 0,
7539 						  BTRFS_FILE_EXTENT_PREALLOC);
7540 		if (ret) {
7541 			btrfs_abort_transaction(trans, root, ret);
7542 			if (own_trans)
7543 				btrfs_end_transaction(trans, root);
7544 			break;
7545 		}
7546 		btrfs_drop_extent_cache(inode, cur_offset,
7547 					cur_offset + ins.offset -1, 0);
7548 
7549 		num_bytes -= ins.offset;
7550 		cur_offset += ins.offset;
7551 		*alloc_hint = ins.objectid + ins.offset;
7552 
7553 		inode_inc_iversion(inode);
7554 		inode->i_ctime = CURRENT_TIME;
7555 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
7556 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
7557 		    (actual_len > inode->i_size) &&
7558 		    (cur_offset > inode->i_size)) {
7559 			if (cur_offset > actual_len)
7560 				i_size = actual_len;
7561 			else
7562 				i_size = cur_offset;
7563 			i_size_write(inode, i_size);
7564 			btrfs_ordered_update_i_size(inode, i_size, NULL);
7565 		}
7566 
7567 		ret = btrfs_update_inode(trans, root, inode);
7568 
7569 		if (ret) {
7570 			btrfs_abort_transaction(trans, root, ret);
7571 			if (own_trans)
7572 				btrfs_end_transaction(trans, root);
7573 			break;
7574 		}
7575 
7576 		if (own_trans)
7577 			btrfs_end_transaction(trans, root);
7578 	}
7579 	return ret;
7580 }
7581 
7582 int btrfs_prealloc_file_range(struct inode *inode, int mode,
7583 			      u64 start, u64 num_bytes, u64 min_size,
7584 			      loff_t actual_len, u64 *alloc_hint)
7585 {
7586 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7587 					   min_size, actual_len, alloc_hint,
7588 					   NULL);
7589 }
7590 
7591 int btrfs_prealloc_file_range_trans(struct inode *inode,
7592 				    struct btrfs_trans_handle *trans, int mode,
7593 				    u64 start, u64 num_bytes, u64 min_size,
7594 				    loff_t actual_len, u64 *alloc_hint)
7595 {
7596 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
7597 					   min_size, actual_len, alloc_hint, trans);
7598 }
7599 
7600 static int btrfs_set_page_dirty(struct page *page)
7601 {
7602 	return __set_page_dirty_nobuffers(page);
7603 }
7604 
7605 static int btrfs_permission(struct inode *inode, int mask)
7606 {
7607 	struct btrfs_root *root = BTRFS_I(inode)->root;
7608 	umode_t mode = inode->i_mode;
7609 
7610 	if (mask & MAY_WRITE &&
7611 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
7612 		if (btrfs_root_readonly(root))
7613 			return -EROFS;
7614 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
7615 			return -EACCES;
7616 	}
7617 	return generic_permission(inode, mask);
7618 }
7619 
7620 static const struct inode_operations btrfs_dir_inode_operations = {
7621 	.getattr	= btrfs_getattr,
7622 	.lookup		= btrfs_lookup,
7623 	.create		= btrfs_create,
7624 	.unlink		= btrfs_unlink,
7625 	.link		= btrfs_link,
7626 	.mkdir		= btrfs_mkdir,
7627 	.rmdir		= btrfs_rmdir,
7628 	.rename		= btrfs_rename,
7629 	.symlink	= btrfs_symlink,
7630 	.setattr	= btrfs_setattr,
7631 	.mknod		= btrfs_mknod,
7632 	.setxattr	= btrfs_setxattr,
7633 	.getxattr	= btrfs_getxattr,
7634 	.listxattr	= btrfs_listxattr,
7635 	.removexattr	= btrfs_removexattr,
7636 	.permission	= btrfs_permission,
7637 	.get_acl	= btrfs_get_acl,
7638 };
7639 static const struct inode_operations btrfs_dir_ro_inode_operations = {
7640 	.lookup		= btrfs_lookup,
7641 	.permission	= btrfs_permission,
7642 	.get_acl	= btrfs_get_acl,
7643 };
7644 
7645 static const struct file_operations btrfs_dir_file_operations = {
7646 	.llseek		= generic_file_llseek,
7647 	.read		= generic_read_dir,
7648 	.readdir	= btrfs_real_readdir,
7649 	.unlocked_ioctl	= btrfs_ioctl,
7650 #ifdef CONFIG_COMPAT
7651 	.compat_ioctl	= btrfs_ioctl,
7652 #endif
7653 	.release        = btrfs_release_file,
7654 	.fsync		= btrfs_sync_file,
7655 };
7656 
7657 static struct extent_io_ops btrfs_extent_io_ops = {
7658 	.fill_delalloc = run_delalloc_range,
7659 	.submit_bio_hook = btrfs_submit_bio_hook,
7660 	.merge_bio_hook = btrfs_merge_bio_hook,
7661 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
7662 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
7663 	.writepage_start_hook = btrfs_writepage_start_hook,
7664 	.set_bit_hook = btrfs_set_bit_hook,
7665 	.clear_bit_hook = btrfs_clear_bit_hook,
7666 	.merge_extent_hook = btrfs_merge_extent_hook,
7667 	.split_extent_hook = btrfs_split_extent_hook,
7668 };
7669 
7670 /*
7671  * btrfs doesn't support the bmap operation because swapfiles
7672  * use bmap to make a mapping of extents in the file.  They assume
7673  * these extents won't change over the life of the file and they
7674  * use the bmap result to do IO directly to the drive.
7675  *
7676  * the btrfs bmap call would return logical addresses that aren't
7677  * suitable for IO and they also will change frequently as COW
7678  * operations happen.  So, swapfile + btrfs == corruption.
7679  *
7680  * For now we're avoiding this by dropping bmap.
7681  */
7682 static const struct address_space_operations btrfs_aops = {
7683 	.readpage	= btrfs_readpage,
7684 	.writepage	= btrfs_writepage,
7685 	.writepages	= btrfs_writepages,
7686 	.readpages	= btrfs_readpages,
7687 	.direct_IO	= btrfs_direct_IO,
7688 	.invalidatepage = btrfs_invalidatepage,
7689 	.releasepage	= btrfs_releasepage,
7690 	.set_page_dirty	= btrfs_set_page_dirty,
7691 	.error_remove_page = generic_error_remove_page,
7692 };
7693 
7694 static const struct address_space_operations btrfs_symlink_aops = {
7695 	.readpage	= btrfs_readpage,
7696 	.writepage	= btrfs_writepage,
7697 	.invalidatepage = btrfs_invalidatepage,
7698 	.releasepage	= btrfs_releasepage,
7699 };
7700 
7701 static const struct inode_operations btrfs_file_inode_operations = {
7702 	.getattr	= btrfs_getattr,
7703 	.setattr	= btrfs_setattr,
7704 	.setxattr	= btrfs_setxattr,
7705 	.getxattr	= btrfs_getxattr,
7706 	.listxattr      = btrfs_listxattr,
7707 	.removexattr	= btrfs_removexattr,
7708 	.permission	= btrfs_permission,
7709 	.fiemap		= btrfs_fiemap,
7710 	.get_acl	= btrfs_get_acl,
7711 	.update_time	= btrfs_update_time,
7712 };
7713 static const struct inode_operations btrfs_special_inode_operations = {
7714 	.getattr	= btrfs_getattr,
7715 	.setattr	= btrfs_setattr,
7716 	.permission	= btrfs_permission,
7717 	.setxattr	= btrfs_setxattr,
7718 	.getxattr	= btrfs_getxattr,
7719 	.listxattr	= btrfs_listxattr,
7720 	.removexattr	= btrfs_removexattr,
7721 	.get_acl	= btrfs_get_acl,
7722 	.update_time	= btrfs_update_time,
7723 };
7724 static const struct inode_operations btrfs_symlink_inode_operations = {
7725 	.readlink	= generic_readlink,
7726 	.follow_link	= page_follow_link_light,
7727 	.put_link	= page_put_link,
7728 	.getattr	= btrfs_getattr,
7729 	.setattr	= btrfs_setattr,
7730 	.permission	= btrfs_permission,
7731 	.setxattr	= btrfs_setxattr,
7732 	.getxattr	= btrfs_getxattr,
7733 	.listxattr	= btrfs_listxattr,
7734 	.removexattr	= btrfs_removexattr,
7735 	.get_acl	= btrfs_get_acl,
7736 	.update_time	= btrfs_update_time,
7737 };
7738 
7739 const struct dentry_operations btrfs_dentry_operations = {
7740 	.d_delete	= btrfs_dentry_delete,
7741 	.d_release	= btrfs_dentry_release,
7742 };
7743