xref: /linux/fs/nilfs2/inode.c (revision 9d796e66230205cd3366f5660387bd9ecca9d336)
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/pagemap.h>
28 #include <linux/writeback.h>
29 #include <linux/uio.h>
30 #include "nilfs.h"
31 #include "btnode.h"
32 #include "segment.h"
33 #include "page.h"
34 #include "mdt.h"
35 #include "cpfile.h"
36 #include "ifile.h"
37 
38 /**
39  * struct nilfs_iget_args - arguments used during comparison between inodes
40  * @ino: inode number
41  * @cno: checkpoint number
42  * @root: pointer on NILFS root object (mounted checkpoint)
43  * @for_gc: inode for GC flag
44  */
45 struct nilfs_iget_args {
46 	u64 ino;
47 	__u64 cno;
48 	struct nilfs_root *root;
49 	int for_gc;
50 };
51 
52 static int nilfs_iget_test(struct inode *inode, void *opaque);
53 
54 void nilfs_inode_add_blocks(struct inode *inode, int n)
55 {
56 	struct nilfs_root *root = NILFS_I(inode)->i_root;
57 
58 	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
59 	if (root)
60 		atomic64_add(n, &root->blocks_count);
61 }
62 
63 void nilfs_inode_sub_blocks(struct inode *inode, int n)
64 {
65 	struct nilfs_root *root = NILFS_I(inode)->i_root;
66 
67 	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
68 	if (root)
69 		atomic64_sub(n, &root->blocks_count);
70 }
71 
72 /**
73  * nilfs_get_block() - get a file block on the filesystem (callback function)
74  * @inode - inode struct of the target file
75  * @blkoff - file block number
76  * @bh_result - buffer head to be mapped on
77  * @create - indicate whether allocating the block or not when it has not
78  *      been allocated yet.
79  *
80  * This function does not issue actual read request of the specified data
81  * block. It is done by VFS.
82  */
83 int nilfs_get_block(struct inode *inode, sector_t blkoff,
84 		    struct buffer_head *bh_result, int create)
85 {
86 	struct nilfs_inode_info *ii = NILFS_I(inode);
87 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
88 	__u64 blknum = 0;
89 	int err = 0, ret;
90 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
91 
92 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
93 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
94 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
95 	if (ret >= 0) {	/* found */
96 		map_bh(bh_result, inode->i_sb, blknum);
97 		if (ret > 0)
98 			bh_result->b_size = (ret << inode->i_blkbits);
99 		goto out;
100 	}
101 	/* data block was not found */
102 	if (ret == -ENOENT && create) {
103 		struct nilfs_transaction_info ti;
104 
105 		bh_result->b_blocknr = 0;
106 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
107 		if (unlikely(err))
108 			goto out;
109 		err = nilfs_bmap_insert(ii->i_bmap, blkoff,
110 					(unsigned long)bh_result);
111 		if (unlikely(err != 0)) {
112 			if (err == -EEXIST) {
113 				/*
114 				 * The get_block() function could be called
115 				 * from multiple callers for an inode.
116 				 * However, the page having this block must
117 				 * be locked in this case.
118 				 */
119 				printk(KERN_WARNING
120 				       "nilfs_get_block: a race condition "
121 				       "while inserting a data block. "
122 				       "(inode number=%lu, file block "
123 				       "offset=%llu)\n",
124 				       inode->i_ino,
125 				       (unsigned long long)blkoff);
126 				err = 0;
127 			}
128 			nilfs_transaction_abort(inode->i_sb);
129 			goto out;
130 		}
131 		nilfs_mark_inode_dirty_sync(inode);
132 		nilfs_transaction_commit(inode->i_sb); /* never fails */
133 		/* Error handling should be detailed */
134 		set_buffer_new(bh_result);
135 		set_buffer_delay(bh_result);
136 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
137 						      to proper value */
138 	} else if (ret == -ENOENT) {
139 		/* not found is not error (e.g. hole); must return without
140 		   the mapped state flag. */
141 		;
142 	} else {
143 		err = ret;
144 	}
145 
146  out:
147 	return err;
148 }
149 
150 /**
151  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
152  * address_space_operations.
153  * @file - file struct of the file to be read
154  * @page - the page to be read
155  */
156 static int nilfs_readpage(struct file *file, struct page *page)
157 {
158 	return mpage_readpage(page, nilfs_get_block);
159 }
160 
161 /**
162  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
163  * address_space_operations.
164  * @file - file struct of the file to be read
165  * @mapping - address_space struct used for reading multiple pages
166  * @pages - the pages to be read
167  * @nr_pages - number of pages to be read
168  */
169 static int nilfs_readpages(struct file *file, struct address_space *mapping,
170 			   struct list_head *pages, unsigned nr_pages)
171 {
172 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
173 }
174 
175 static int nilfs_writepages(struct address_space *mapping,
176 			    struct writeback_control *wbc)
177 {
178 	struct inode *inode = mapping->host;
179 	int err = 0;
180 
181 	if (inode->i_sb->s_flags & MS_RDONLY) {
182 		nilfs_clear_dirty_pages(mapping, false);
183 		return -EROFS;
184 	}
185 
186 	if (wbc->sync_mode == WB_SYNC_ALL)
187 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
188 						    wbc->range_start,
189 						    wbc->range_end);
190 	return err;
191 }
192 
193 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
194 {
195 	struct inode *inode = page->mapping->host;
196 	int err;
197 
198 	if (inode->i_sb->s_flags & MS_RDONLY) {
199 		/*
200 		 * It means that filesystem was remounted in read-only
201 		 * mode because of error or metadata corruption. But we
202 		 * have dirty pages that try to be flushed in background.
203 		 * So, here we simply discard this dirty page.
204 		 */
205 		nilfs_clear_dirty_page(page, false);
206 		unlock_page(page);
207 		return -EROFS;
208 	}
209 
210 	redirty_page_for_writepage(wbc, page);
211 	unlock_page(page);
212 
213 	if (wbc->sync_mode == WB_SYNC_ALL) {
214 		err = nilfs_construct_segment(inode->i_sb);
215 		if (unlikely(err))
216 			return err;
217 	} else if (wbc->for_reclaim)
218 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
219 
220 	return 0;
221 }
222 
223 static int nilfs_set_page_dirty(struct page *page)
224 {
225 	struct inode *inode = page->mapping->host;
226 	int ret = __set_page_dirty_nobuffers(page);
227 
228 	if (page_has_buffers(page)) {
229 		unsigned nr_dirty = 0;
230 		struct buffer_head *bh, *head;
231 
232 		/*
233 		 * This page is locked by callers, and no other thread
234 		 * concurrently marks its buffers dirty since they are
235 		 * only dirtied through routines in fs/buffer.c in
236 		 * which call sites of mark_buffer_dirty are protected
237 		 * by page lock.
238 		 */
239 		bh = head = page_buffers(page);
240 		do {
241 			/* Do not mark hole blocks dirty */
242 			if (buffer_dirty(bh) || !buffer_mapped(bh))
243 				continue;
244 
245 			set_buffer_dirty(bh);
246 			nr_dirty++;
247 		} while (bh = bh->b_this_page, bh != head);
248 
249 		if (nr_dirty)
250 			nilfs_set_file_dirty(inode, nr_dirty);
251 	} else if (ret) {
252 		unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
253 
254 		nilfs_set_file_dirty(inode, nr_dirty);
255 	}
256 	return ret;
257 }
258 
259 void nilfs_write_failed(struct address_space *mapping, loff_t to)
260 {
261 	struct inode *inode = mapping->host;
262 
263 	if (to > inode->i_size) {
264 		truncate_pagecache(inode, inode->i_size);
265 		nilfs_truncate(inode);
266 	}
267 }
268 
269 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
270 			     loff_t pos, unsigned len, unsigned flags,
271 			     struct page **pagep, void **fsdata)
272 
273 {
274 	struct inode *inode = mapping->host;
275 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
276 
277 	if (unlikely(err))
278 		return err;
279 
280 	err = block_write_begin(mapping, pos, len, flags, pagep,
281 				nilfs_get_block);
282 	if (unlikely(err)) {
283 		nilfs_write_failed(mapping, pos + len);
284 		nilfs_transaction_abort(inode->i_sb);
285 	}
286 	return err;
287 }
288 
289 static int nilfs_write_end(struct file *file, struct address_space *mapping,
290 			   loff_t pos, unsigned len, unsigned copied,
291 			   struct page *page, void *fsdata)
292 {
293 	struct inode *inode = mapping->host;
294 	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
295 	unsigned nr_dirty;
296 	int err;
297 
298 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
299 						  start + copied);
300 	copied = generic_write_end(file, mapping, pos, len, copied, page,
301 				   fsdata);
302 	nilfs_set_file_dirty(inode, nr_dirty);
303 	err = nilfs_transaction_commit(inode->i_sb);
304 	return err ? : copied;
305 }
306 
307 static ssize_t
308 nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
309 		loff_t offset)
310 {
311 	struct file *file = iocb->ki_filp;
312 	struct address_space *mapping = file->f_mapping;
313 	struct inode *inode = file->f_mapping->host;
314 	size_t count = iov_iter_count(iter);
315 	ssize_t size;
316 
317 	if (rw == WRITE)
318 		return 0;
319 
320 	/* Needs synchronization with the cleaner */
321 	size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
322 				  nilfs_get_block);
323 
324 	/*
325 	 * In case of error extending write may have instantiated a few
326 	 * blocks outside i_size. Trim these off again.
327 	 */
328 	if (unlikely((rw & WRITE) && size < 0)) {
329 		loff_t isize = i_size_read(inode);
330 		loff_t end = offset + count;
331 
332 		if (end > isize)
333 			nilfs_write_failed(mapping, end);
334 	}
335 
336 	return size;
337 }
338 
339 const struct address_space_operations nilfs_aops = {
340 	.writepage		= nilfs_writepage,
341 	.readpage		= nilfs_readpage,
342 	.writepages		= nilfs_writepages,
343 	.set_page_dirty		= nilfs_set_page_dirty,
344 	.readpages		= nilfs_readpages,
345 	.write_begin		= nilfs_write_begin,
346 	.write_end		= nilfs_write_end,
347 	/* .releasepage		= nilfs_releasepage, */
348 	.invalidatepage		= block_invalidatepage,
349 	.direct_IO		= nilfs_direct_IO,
350 	.is_partially_uptodate  = block_is_partially_uptodate,
351 };
352 
353 static int nilfs_insert_inode_locked(struct inode *inode,
354 				     struct nilfs_root *root,
355 				     unsigned long ino)
356 {
357 	struct nilfs_iget_args args = {
358 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
359 	};
360 
361 	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
362 }
363 
364 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
365 {
366 	struct super_block *sb = dir->i_sb;
367 	struct the_nilfs *nilfs = sb->s_fs_info;
368 	struct inode *inode;
369 	struct nilfs_inode_info *ii;
370 	struct nilfs_root *root;
371 	int err = -ENOMEM;
372 	ino_t ino;
373 
374 	inode = new_inode(sb);
375 	if (unlikely(!inode))
376 		goto failed;
377 
378 	mapping_set_gfp_mask(inode->i_mapping,
379 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
380 
381 	root = NILFS_I(dir)->i_root;
382 	ii = NILFS_I(inode);
383 	ii->i_state = 1 << NILFS_I_NEW;
384 	ii->i_root = root;
385 
386 	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
387 	if (unlikely(err))
388 		goto failed_ifile_create_inode;
389 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
390 
391 	atomic64_inc(&root->inodes_count);
392 	inode_init_owner(inode, dir, mode);
393 	inode->i_ino = ino;
394 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
395 
396 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
397 		err = nilfs_bmap_read(ii->i_bmap, NULL);
398 		if (err < 0)
399 			goto failed_after_creation;
400 
401 		set_bit(NILFS_I_BMAP, &ii->i_state);
402 		/* No lock is needed; iget() ensures it. */
403 	}
404 
405 	ii->i_flags = nilfs_mask_flags(
406 		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
407 
408 	/* ii->i_file_acl = 0; */
409 	/* ii->i_dir_acl = 0; */
410 	ii->i_dir_start_lookup = 0;
411 	nilfs_set_inode_flags(inode);
412 	spin_lock(&nilfs->ns_next_gen_lock);
413 	inode->i_generation = nilfs->ns_next_generation++;
414 	spin_unlock(&nilfs->ns_next_gen_lock);
415 	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
416 		err = -EIO;
417 		goto failed_after_creation;
418 	}
419 
420 	err = nilfs_init_acl(inode, dir);
421 	if (unlikely(err))
422 		goto failed_after_creation; /* never occur. When supporting
423 				    nilfs_init_acl(), proper cancellation of
424 				    above jobs should be considered */
425 
426 	return inode;
427 
428  failed_after_creation:
429 	clear_nlink(inode);
430 	unlock_new_inode(inode);
431 	iput(inode);  /* raw_inode will be deleted through
432 			 nilfs_evict_inode() */
433 	goto failed;
434 
435  failed_ifile_create_inode:
436 	make_bad_inode(inode);
437 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
438 			 called */
439  failed:
440 	return ERR_PTR(err);
441 }
442 
443 void nilfs_set_inode_flags(struct inode *inode)
444 {
445 	unsigned int flags = NILFS_I(inode)->i_flags;
446 	unsigned int new_fl = 0;
447 
448 	if (flags & FS_SYNC_FL)
449 		new_fl |= S_SYNC;
450 	if (flags & FS_APPEND_FL)
451 		new_fl |= S_APPEND;
452 	if (flags & FS_IMMUTABLE_FL)
453 		new_fl |= S_IMMUTABLE;
454 	if (flags & FS_NOATIME_FL)
455 		new_fl |= S_NOATIME;
456 	if (flags & FS_DIRSYNC_FL)
457 		new_fl |= S_DIRSYNC;
458 	inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
459 			S_NOATIME | S_DIRSYNC);
460 }
461 
462 int nilfs_read_inode_common(struct inode *inode,
463 			    struct nilfs_inode *raw_inode)
464 {
465 	struct nilfs_inode_info *ii = NILFS_I(inode);
466 	int err;
467 
468 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
469 	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
470 	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
471 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
472 	inode->i_size = le64_to_cpu(raw_inode->i_size);
473 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
474 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
475 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
476 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
477 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
478 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
479 	if (inode->i_nlink == 0)
480 		return -ESTALE; /* this inode is deleted */
481 
482 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
483 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
484 #if 0
485 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
486 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
487 		0 : le32_to_cpu(raw_inode->i_dir_acl);
488 #endif
489 	ii->i_dir_start_lookup = 0;
490 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
491 
492 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
493 	    S_ISLNK(inode->i_mode)) {
494 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
495 		if (err < 0)
496 			return err;
497 		set_bit(NILFS_I_BMAP, &ii->i_state);
498 		/* No lock is needed; iget() ensures it. */
499 	}
500 	return 0;
501 }
502 
503 static int __nilfs_read_inode(struct super_block *sb,
504 			      struct nilfs_root *root, unsigned long ino,
505 			      struct inode *inode)
506 {
507 	struct the_nilfs *nilfs = sb->s_fs_info;
508 	struct buffer_head *bh;
509 	struct nilfs_inode *raw_inode;
510 	int err;
511 
512 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
513 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
514 	if (unlikely(err))
515 		goto bad_inode;
516 
517 	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
518 
519 	err = nilfs_read_inode_common(inode, raw_inode);
520 	if (err)
521 		goto failed_unmap;
522 
523 	if (S_ISREG(inode->i_mode)) {
524 		inode->i_op = &nilfs_file_inode_operations;
525 		inode->i_fop = &nilfs_file_operations;
526 		inode->i_mapping->a_ops = &nilfs_aops;
527 	} else if (S_ISDIR(inode->i_mode)) {
528 		inode->i_op = &nilfs_dir_inode_operations;
529 		inode->i_fop = &nilfs_dir_operations;
530 		inode->i_mapping->a_ops = &nilfs_aops;
531 	} else if (S_ISLNK(inode->i_mode)) {
532 		inode->i_op = &nilfs_symlink_inode_operations;
533 		inode->i_mapping->a_ops = &nilfs_aops;
534 	} else {
535 		inode->i_op = &nilfs_special_inode_operations;
536 		init_special_inode(
537 			inode, inode->i_mode,
538 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
539 	}
540 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
541 	brelse(bh);
542 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
543 	nilfs_set_inode_flags(inode);
544 	mapping_set_gfp_mask(inode->i_mapping,
545 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
546 	return 0;
547 
548  failed_unmap:
549 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
550 	brelse(bh);
551 
552  bad_inode:
553 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
554 	return err;
555 }
556 
557 static int nilfs_iget_test(struct inode *inode, void *opaque)
558 {
559 	struct nilfs_iget_args *args = opaque;
560 	struct nilfs_inode_info *ii;
561 
562 	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
563 		return 0;
564 
565 	ii = NILFS_I(inode);
566 	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
567 		return !args->for_gc;
568 
569 	return args->for_gc && args->cno == ii->i_cno;
570 }
571 
572 static int nilfs_iget_set(struct inode *inode, void *opaque)
573 {
574 	struct nilfs_iget_args *args = opaque;
575 
576 	inode->i_ino = args->ino;
577 	if (args->for_gc) {
578 		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
579 		NILFS_I(inode)->i_cno = args->cno;
580 		NILFS_I(inode)->i_root = NULL;
581 	} else {
582 		if (args->root && args->ino == NILFS_ROOT_INO)
583 			nilfs_get_root(args->root);
584 		NILFS_I(inode)->i_root = args->root;
585 	}
586 	return 0;
587 }
588 
589 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
590 			    unsigned long ino)
591 {
592 	struct nilfs_iget_args args = {
593 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
594 	};
595 
596 	return ilookup5(sb, ino, nilfs_iget_test, &args);
597 }
598 
599 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
600 				unsigned long ino)
601 {
602 	struct nilfs_iget_args args = {
603 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
604 	};
605 
606 	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
607 }
608 
609 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
610 			 unsigned long ino)
611 {
612 	struct inode *inode;
613 	int err;
614 
615 	inode = nilfs_iget_locked(sb, root, ino);
616 	if (unlikely(!inode))
617 		return ERR_PTR(-ENOMEM);
618 	if (!(inode->i_state & I_NEW))
619 		return inode;
620 
621 	err = __nilfs_read_inode(sb, root, ino, inode);
622 	if (unlikely(err)) {
623 		iget_failed(inode);
624 		return ERR_PTR(err);
625 	}
626 	unlock_new_inode(inode);
627 	return inode;
628 }
629 
630 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
631 				__u64 cno)
632 {
633 	struct nilfs_iget_args args = {
634 		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
635 	};
636 	struct inode *inode;
637 	int err;
638 
639 	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
640 	if (unlikely(!inode))
641 		return ERR_PTR(-ENOMEM);
642 	if (!(inode->i_state & I_NEW))
643 		return inode;
644 
645 	err = nilfs_init_gcinode(inode);
646 	if (unlikely(err)) {
647 		iget_failed(inode);
648 		return ERR_PTR(err);
649 	}
650 	unlock_new_inode(inode);
651 	return inode;
652 }
653 
654 void nilfs_write_inode_common(struct inode *inode,
655 			      struct nilfs_inode *raw_inode, int has_bmap)
656 {
657 	struct nilfs_inode_info *ii = NILFS_I(inode);
658 
659 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
660 	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
661 	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
662 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
663 	raw_inode->i_size = cpu_to_le64(inode->i_size);
664 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
665 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
666 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
667 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
668 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
669 
670 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
671 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
672 
673 	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
674 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
675 
676 		/* zero-fill unused portion in the case of super root block */
677 		raw_inode->i_xattr = 0;
678 		raw_inode->i_pad = 0;
679 		memset((void *)raw_inode + sizeof(*raw_inode), 0,
680 		       nilfs->ns_inode_size - sizeof(*raw_inode));
681 	}
682 
683 	if (has_bmap)
684 		nilfs_bmap_write(ii->i_bmap, raw_inode);
685 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
686 		raw_inode->i_device_code =
687 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
688 	/* When extending inode, nilfs->ns_inode_size should be checked
689 	   for substitutions of appended fields */
690 }
691 
692 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
693 {
694 	ino_t ino = inode->i_ino;
695 	struct nilfs_inode_info *ii = NILFS_I(inode);
696 	struct inode *ifile = ii->i_root->ifile;
697 	struct nilfs_inode *raw_inode;
698 
699 	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
700 
701 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
702 		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
703 	if (flags & I_DIRTY_DATASYNC)
704 		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
705 
706 	nilfs_write_inode_common(inode, raw_inode, 0);
707 		/* XXX: call with has_bmap = 0 is a workaround to avoid
708 		   deadlock of bmap. This delays update of i_bmap to just
709 		   before writing */
710 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
711 }
712 
713 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
714 
715 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
716 				unsigned long from)
717 {
718 	__u64 b;
719 	int ret;
720 
721 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
722 		return;
723 repeat:
724 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
725 	if (ret == -ENOENT)
726 		return;
727 	else if (ret < 0)
728 		goto failed;
729 
730 	if (b < from)
731 		return;
732 
733 	b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
734 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
735 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
736 	if (!ret || (ret == -ENOMEM &&
737 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
738 		goto repeat;
739 
740 failed:
741 	nilfs_warning(ii->vfs_inode.i_sb, __func__,
742 		      "failed to truncate bmap (ino=%lu, err=%d)",
743 		      ii->vfs_inode.i_ino, ret);
744 }
745 
746 void nilfs_truncate(struct inode *inode)
747 {
748 	unsigned long blkoff;
749 	unsigned int blocksize;
750 	struct nilfs_transaction_info ti;
751 	struct super_block *sb = inode->i_sb;
752 	struct nilfs_inode_info *ii = NILFS_I(inode);
753 
754 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
755 		return;
756 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
757 		return;
758 
759 	blocksize = sb->s_blocksize;
760 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
761 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
762 
763 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
764 
765 	nilfs_truncate_bmap(ii, blkoff);
766 
767 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
768 	if (IS_SYNC(inode))
769 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
770 
771 	nilfs_mark_inode_dirty(inode);
772 	nilfs_set_file_dirty(inode, 0);
773 	nilfs_transaction_commit(sb);
774 	/* May construct a logical segment and may fail in sync mode.
775 	   But truncate has no return value. */
776 }
777 
778 static void nilfs_clear_inode(struct inode *inode)
779 {
780 	struct nilfs_inode_info *ii = NILFS_I(inode);
781 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
782 
783 	/*
784 	 * Free resources allocated in nilfs_read_inode(), here.
785 	 */
786 	BUG_ON(!list_empty(&ii->i_dirty));
787 	brelse(ii->i_bh);
788 	ii->i_bh = NULL;
789 
790 	if (mdi && mdi->mi_palloc_cache)
791 		nilfs_palloc_destroy_cache(inode);
792 
793 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
794 		nilfs_bmap_clear(ii->i_bmap);
795 
796 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
797 
798 	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
799 		nilfs_put_root(ii->i_root);
800 }
801 
802 void nilfs_evict_inode(struct inode *inode)
803 {
804 	struct nilfs_transaction_info ti;
805 	struct super_block *sb = inode->i_sb;
806 	struct nilfs_inode_info *ii = NILFS_I(inode);
807 	int ret;
808 
809 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
810 		truncate_inode_pages_final(&inode->i_data);
811 		clear_inode(inode);
812 		nilfs_clear_inode(inode);
813 		return;
814 	}
815 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
816 
817 	truncate_inode_pages_final(&inode->i_data);
818 
819 	/* TODO: some of the following operations may fail.  */
820 	nilfs_truncate_bmap(ii, 0);
821 	nilfs_mark_inode_dirty(inode);
822 	clear_inode(inode);
823 
824 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
825 	if (!ret)
826 		atomic64_dec(&ii->i_root->inodes_count);
827 
828 	nilfs_clear_inode(inode);
829 
830 	if (IS_SYNC(inode))
831 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
832 	nilfs_transaction_commit(sb);
833 	/* May construct a logical segment and may fail in sync mode.
834 	   But delete_inode has no return value. */
835 }
836 
837 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
838 {
839 	struct nilfs_transaction_info ti;
840 	struct inode *inode = dentry->d_inode;
841 	struct super_block *sb = inode->i_sb;
842 	int err;
843 
844 	err = inode_change_ok(inode, iattr);
845 	if (err)
846 		return err;
847 
848 	err = nilfs_transaction_begin(sb, &ti, 0);
849 	if (unlikely(err))
850 		return err;
851 
852 	if ((iattr->ia_valid & ATTR_SIZE) &&
853 	    iattr->ia_size != i_size_read(inode)) {
854 		inode_dio_wait(inode);
855 		truncate_setsize(inode, iattr->ia_size);
856 		nilfs_truncate(inode);
857 	}
858 
859 	setattr_copy(inode, iattr);
860 	mark_inode_dirty(inode);
861 
862 	if (iattr->ia_valid & ATTR_MODE) {
863 		err = nilfs_acl_chmod(inode);
864 		if (unlikely(err))
865 			goto out_err;
866 	}
867 
868 	return nilfs_transaction_commit(sb);
869 
870 out_err:
871 	nilfs_transaction_abort(sb);
872 	return err;
873 }
874 
875 int nilfs_permission(struct inode *inode, int mask)
876 {
877 	struct nilfs_root *root = NILFS_I(inode)->i_root;
878 	if ((mask & MAY_WRITE) && root &&
879 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
880 		return -EROFS; /* snapshot is not writable */
881 
882 	return generic_permission(inode, mask);
883 }
884 
885 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
886 {
887 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
888 	struct nilfs_inode_info *ii = NILFS_I(inode);
889 	int err;
890 
891 	spin_lock(&nilfs->ns_inode_lock);
892 	if (ii->i_bh == NULL) {
893 		spin_unlock(&nilfs->ns_inode_lock);
894 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
895 						  inode->i_ino, pbh);
896 		if (unlikely(err))
897 			return err;
898 		spin_lock(&nilfs->ns_inode_lock);
899 		if (ii->i_bh == NULL)
900 			ii->i_bh = *pbh;
901 		else {
902 			brelse(*pbh);
903 			*pbh = ii->i_bh;
904 		}
905 	} else
906 		*pbh = ii->i_bh;
907 
908 	get_bh(*pbh);
909 	spin_unlock(&nilfs->ns_inode_lock);
910 	return 0;
911 }
912 
913 int nilfs_inode_dirty(struct inode *inode)
914 {
915 	struct nilfs_inode_info *ii = NILFS_I(inode);
916 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
917 	int ret = 0;
918 
919 	if (!list_empty(&ii->i_dirty)) {
920 		spin_lock(&nilfs->ns_inode_lock);
921 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
922 			test_bit(NILFS_I_BUSY, &ii->i_state);
923 		spin_unlock(&nilfs->ns_inode_lock);
924 	}
925 	return ret;
926 }
927 
928 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
929 {
930 	struct nilfs_inode_info *ii = NILFS_I(inode);
931 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
932 
933 	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
934 
935 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
936 		return 0;
937 
938 	spin_lock(&nilfs->ns_inode_lock);
939 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
940 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
941 		/* Because this routine may race with nilfs_dispose_list(),
942 		   we have to check NILFS_I_QUEUED here, too. */
943 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
944 			/* This will happen when somebody is freeing
945 			   this inode. */
946 			nilfs_warning(inode->i_sb, __func__,
947 				      "cannot get inode (ino=%lu)\n",
948 				      inode->i_ino);
949 			spin_unlock(&nilfs->ns_inode_lock);
950 			return -EINVAL; /* NILFS_I_DIRTY may remain for
951 					   freeing inode */
952 		}
953 		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
954 		set_bit(NILFS_I_QUEUED, &ii->i_state);
955 	}
956 	spin_unlock(&nilfs->ns_inode_lock);
957 	return 0;
958 }
959 
960 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
961 {
962 	struct buffer_head *ibh;
963 	int err;
964 
965 	err = nilfs_load_inode_block(inode, &ibh);
966 	if (unlikely(err)) {
967 		nilfs_warning(inode->i_sb, __func__,
968 			      "failed to reget inode block.\n");
969 		return err;
970 	}
971 	nilfs_update_inode(inode, ibh, flags);
972 	mark_buffer_dirty(ibh);
973 	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
974 	brelse(ibh);
975 	return 0;
976 }
977 
978 /**
979  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
980  * @inode: inode of the file to be registered.
981  *
982  * nilfs_dirty_inode() loads a inode block containing the specified
983  * @inode and copies data from a nilfs_inode to a corresponding inode
984  * entry in the inode block. This operation is excluded from the segment
985  * construction. This function can be called both as a single operation
986  * and as a part of indivisible file operations.
987  */
988 void nilfs_dirty_inode(struct inode *inode, int flags)
989 {
990 	struct nilfs_transaction_info ti;
991 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
992 
993 	if (is_bad_inode(inode)) {
994 		nilfs_warning(inode->i_sb, __func__,
995 			      "tried to mark bad_inode dirty. ignored.\n");
996 		dump_stack();
997 		return;
998 	}
999 	if (mdi) {
1000 		nilfs_mdt_mark_dirty(inode);
1001 		return;
1002 	}
1003 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
1004 	__nilfs_mark_inode_dirty(inode, flags);
1005 	nilfs_transaction_commit(inode->i_sb); /* never fails */
1006 }
1007 
1008 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1009 		 __u64 start, __u64 len)
1010 {
1011 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1012 	__u64 logical = 0, phys = 0, size = 0;
1013 	__u32 flags = 0;
1014 	loff_t isize;
1015 	sector_t blkoff, end_blkoff;
1016 	sector_t delalloc_blkoff;
1017 	unsigned long delalloc_blklen;
1018 	unsigned int blkbits = inode->i_blkbits;
1019 	int ret, n;
1020 
1021 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1022 	if (ret)
1023 		return ret;
1024 
1025 	mutex_lock(&inode->i_mutex);
1026 
1027 	isize = i_size_read(inode);
1028 
1029 	blkoff = start >> blkbits;
1030 	end_blkoff = (start + len - 1) >> blkbits;
1031 
1032 	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1033 							&delalloc_blkoff);
1034 
1035 	do {
1036 		__u64 blkphy;
1037 		unsigned int maxblocks;
1038 
1039 		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1040 			if (size) {
1041 				/* End of the current extent */
1042 				ret = fiemap_fill_next_extent(
1043 					fieinfo, logical, phys, size, flags);
1044 				if (ret)
1045 					break;
1046 			}
1047 			if (blkoff > end_blkoff)
1048 				break;
1049 
1050 			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1051 			logical = blkoff << blkbits;
1052 			phys = 0;
1053 			size = delalloc_blklen << blkbits;
1054 
1055 			blkoff = delalloc_blkoff + delalloc_blklen;
1056 			delalloc_blklen = nilfs_find_uncommitted_extent(
1057 				inode, blkoff, &delalloc_blkoff);
1058 			continue;
1059 		}
1060 
1061 		/*
1062 		 * Limit the number of blocks that we look up so as
1063 		 * not to get into the next delayed allocation extent.
1064 		 */
1065 		maxblocks = INT_MAX;
1066 		if (delalloc_blklen)
1067 			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1068 					  maxblocks);
1069 		blkphy = 0;
1070 
1071 		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1072 		n = nilfs_bmap_lookup_contig(
1073 			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1074 		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1075 
1076 		if (n < 0) {
1077 			int past_eof;
1078 
1079 			if (unlikely(n != -ENOENT))
1080 				break; /* error */
1081 
1082 			/* HOLE */
1083 			blkoff++;
1084 			past_eof = ((blkoff << blkbits) >= isize);
1085 
1086 			if (size) {
1087 				/* End of the current extent */
1088 
1089 				if (past_eof)
1090 					flags |= FIEMAP_EXTENT_LAST;
1091 
1092 				ret = fiemap_fill_next_extent(
1093 					fieinfo, logical, phys, size, flags);
1094 				if (ret)
1095 					break;
1096 				size = 0;
1097 			}
1098 			if (blkoff > end_blkoff || past_eof)
1099 				break;
1100 		} else {
1101 			if (size) {
1102 				if (phys && blkphy << blkbits == phys + size) {
1103 					/* The current extent goes on */
1104 					size += n << blkbits;
1105 				} else {
1106 					/* Terminate the current extent */
1107 					ret = fiemap_fill_next_extent(
1108 						fieinfo, logical, phys, size,
1109 						flags);
1110 					if (ret || blkoff > end_blkoff)
1111 						break;
1112 
1113 					/* Start another extent */
1114 					flags = FIEMAP_EXTENT_MERGED;
1115 					logical = blkoff << blkbits;
1116 					phys = blkphy << blkbits;
1117 					size = n << blkbits;
1118 				}
1119 			} else {
1120 				/* Start a new extent */
1121 				flags = FIEMAP_EXTENT_MERGED;
1122 				logical = blkoff << blkbits;
1123 				phys = blkphy << blkbits;
1124 				size = n << blkbits;
1125 			}
1126 			blkoff += n;
1127 		}
1128 		cond_resched();
1129 	} while (true);
1130 
1131 	/* If ret is 1 then we just hit the end of the extent array */
1132 	if (ret == 1)
1133 		ret = 0;
1134 
1135 	mutex_unlock(&inode->i_mutex);
1136 	return ret;
1137 }
1138