xref: /linux/fs/nilfs2/inode.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/pagemap.h>
28 #include <linux/writeback.h>
29 #include <linux/aio.h>
30 #include "nilfs.h"
31 #include "btnode.h"
32 #include "segment.h"
33 #include "page.h"
34 #include "mdt.h"
35 #include "cpfile.h"
36 #include "ifile.h"
37 
38 /**
39  * struct nilfs_iget_args - arguments used during comparison between inodes
40  * @ino: inode number
41  * @cno: checkpoint number
42  * @root: pointer on NILFS root object (mounted checkpoint)
43  * @for_gc: inode for GC flag
44  */
45 struct nilfs_iget_args {
46 	u64 ino;
47 	__u64 cno;
48 	struct nilfs_root *root;
49 	int for_gc;
50 };
51 
52 static int nilfs_iget_test(struct inode *inode, void *opaque);
53 
54 void nilfs_inode_add_blocks(struct inode *inode, int n)
55 {
56 	struct nilfs_root *root = NILFS_I(inode)->i_root;
57 
58 	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
59 	if (root)
60 		atomic64_add(n, &root->blocks_count);
61 }
62 
63 void nilfs_inode_sub_blocks(struct inode *inode, int n)
64 {
65 	struct nilfs_root *root = NILFS_I(inode)->i_root;
66 
67 	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
68 	if (root)
69 		atomic64_sub(n, &root->blocks_count);
70 }
71 
72 /**
73  * nilfs_get_block() - get a file block on the filesystem (callback function)
74  * @inode - inode struct of the target file
75  * @blkoff - file block number
76  * @bh_result - buffer head to be mapped on
77  * @create - indicate whether allocating the block or not when it has not
78  *      been allocated yet.
79  *
80  * This function does not issue actual read request of the specified data
81  * block. It is done by VFS.
82  */
83 int nilfs_get_block(struct inode *inode, sector_t blkoff,
84 		    struct buffer_head *bh_result, int create)
85 {
86 	struct nilfs_inode_info *ii = NILFS_I(inode);
87 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
88 	__u64 blknum = 0;
89 	int err = 0, ret;
90 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
91 
92 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
93 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
94 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
95 	if (ret >= 0) {	/* found */
96 		map_bh(bh_result, inode->i_sb, blknum);
97 		if (ret > 0)
98 			bh_result->b_size = (ret << inode->i_blkbits);
99 		goto out;
100 	}
101 	/* data block was not found */
102 	if (ret == -ENOENT && create) {
103 		struct nilfs_transaction_info ti;
104 
105 		bh_result->b_blocknr = 0;
106 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
107 		if (unlikely(err))
108 			goto out;
109 		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
110 					(unsigned long)bh_result);
111 		if (unlikely(err != 0)) {
112 			if (err == -EEXIST) {
113 				/*
114 				 * The get_block() function could be called
115 				 * from multiple callers for an inode.
116 				 * However, the page having this block must
117 				 * be locked in this case.
118 				 */
119 				printk(KERN_WARNING
120 				       "nilfs_get_block: a race condition "
121 				       "while inserting a data block. "
122 				       "(inode number=%lu, file block "
123 				       "offset=%llu)\n",
124 				       inode->i_ino,
125 				       (unsigned long long)blkoff);
126 				err = 0;
127 			}
128 			nilfs_transaction_abort(inode->i_sb);
129 			goto out;
130 		}
131 		nilfs_mark_inode_dirty_sync(inode);
132 		nilfs_transaction_commit(inode->i_sb); /* never fails */
133 		/* Error handling should be detailed */
134 		set_buffer_new(bh_result);
135 		set_buffer_delay(bh_result);
136 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
137 						      to proper value */
138 	} else if (ret == -ENOENT) {
139 		/* not found is not error (e.g. hole); must return without
140 		   the mapped state flag. */
141 		;
142 	} else {
143 		err = ret;
144 	}
145 
146  out:
147 	return err;
148 }
149 
150 /**
151  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
152  * address_space_operations.
153  * @file - file struct of the file to be read
154  * @page - the page to be read
155  */
156 static int nilfs_readpage(struct file *file, struct page *page)
157 {
158 	return mpage_readpage(page, nilfs_get_block);
159 }
160 
161 /**
162  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
163  * address_space_operations.
164  * @file - file struct of the file to be read
165  * @mapping - address_space struct used for reading multiple pages
166  * @pages - the pages to be read
167  * @nr_pages - number of pages to be read
168  */
169 static int nilfs_readpages(struct file *file, struct address_space *mapping,
170 			   struct list_head *pages, unsigned nr_pages)
171 {
172 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
173 }
174 
175 static int nilfs_writepages(struct address_space *mapping,
176 			    struct writeback_control *wbc)
177 {
178 	struct inode *inode = mapping->host;
179 	int err = 0;
180 
181 	if (inode->i_sb->s_flags & MS_RDONLY) {
182 		nilfs_clear_dirty_pages(mapping, false);
183 		return -EROFS;
184 	}
185 
186 	if (wbc->sync_mode == WB_SYNC_ALL)
187 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
188 						    wbc->range_start,
189 						    wbc->range_end);
190 	return err;
191 }
192 
193 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
194 {
195 	struct inode *inode = page->mapping->host;
196 	int err;
197 
198 	if (inode->i_sb->s_flags & MS_RDONLY) {
199 		/*
200 		 * It means that filesystem was remounted in read-only
201 		 * mode because of error or metadata corruption. But we
202 		 * have dirty pages that try to be flushed in background.
203 		 * So, here we simply discard this dirty page.
204 		 */
205 		nilfs_clear_dirty_page(page, false);
206 		unlock_page(page);
207 		return -EROFS;
208 	}
209 
210 	redirty_page_for_writepage(wbc, page);
211 	unlock_page(page);
212 
213 	if (wbc->sync_mode == WB_SYNC_ALL) {
214 		err = nilfs_construct_segment(inode->i_sb);
215 		if (unlikely(err))
216 			return err;
217 	} else if (wbc->for_reclaim)
218 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
219 
220 	return 0;
221 }
222 
223 static int nilfs_set_page_dirty(struct page *page)
224 {
225 	struct inode *inode = page->mapping->host;
226 	int ret = __set_page_dirty_nobuffers(page);
227 
228 	if (page_has_buffers(page)) {
229 		unsigned nr_dirty = 0;
230 		struct buffer_head *bh, *head;
231 
232 		/*
233 		 * This page is locked by callers, and no other thread
234 		 * concurrently marks its buffers dirty since they are
235 		 * only dirtied through routines in fs/buffer.c in
236 		 * which call sites of mark_buffer_dirty are protected
237 		 * by page lock.
238 		 */
239 		bh = head = page_buffers(page);
240 		do {
241 			/* Do not mark hole blocks dirty */
242 			if (buffer_dirty(bh) || !buffer_mapped(bh))
243 				continue;
244 
245 			set_buffer_dirty(bh);
246 			nr_dirty++;
247 		} while (bh = bh->b_this_page, bh != head);
248 
249 		if (nr_dirty)
250 			nilfs_set_file_dirty(inode, nr_dirty);
251 	} else if (ret) {
252 		unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
253 
254 		nilfs_set_file_dirty(inode, nr_dirty);
255 	}
256 	return ret;
257 }
258 
259 void nilfs_write_failed(struct address_space *mapping, loff_t to)
260 {
261 	struct inode *inode = mapping->host;
262 
263 	if (to > inode->i_size) {
264 		truncate_pagecache(inode, inode->i_size);
265 		nilfs_truncate(inode);
266 	}
267 }
268 
269 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
270 			     loff_t pos, unsigned len, unsigned flags,
271 			     struct page **pagep, void **fsdata)
272 
273 {
274 	struct inode *inode = mapping->host;
275 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
276 
277 	if (unlikely(err))
278 		return err;
279 
280 	err = block_write_begin(mapping, pos, len, flags, pagep,
281 				nilfs_get_block);
282 	if (unlikely(err)) {
283 		nilfs_write_failed(mapping, pos + len);
284 		nilfs_transaction_abort(inode->i_sb);
285 	}
286 	return err;
287 }
288 
289 static int nilfs_write_end(struct file *file, struct address_space *mapping,
290 			   loff_t pos, unsigned len, unsigned copied,
291 			   struct page *page, void *fsdata)
292 {
293 	struct inode *inode = mapping->host;
294 	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
295 	unsigned nr_dirty;
296 	int err;
297 
298 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
299 						  start + copied);
300 	copied = generic_write_end(file, mapping, pos, len, copied, page,
301 				   fsdata);
302 	nilfs_set_file_dirty(inode, nr_dirty);
303 	err = nilfs_transaction_commit(inode->i_sb);
304 	return err ? : copied;
305 }
306 
307 static ssize_t
308 nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
309 		loff_t offset)
310 {
311 	struct file *file = iocb->ki_filp;
312 	struct address_space *mapping = file->f_mapping;
313 	struct inode *inode = file->f_mapping->host;
314 	size_t count = iov_iter_count(iter);
315 	ssize_t size;
316 
317 	if (rw == WRITE)
318 		return 0;
319 
320 	/* Needs synchronization with the cleaner */
321 	size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
322 				  nilfs_get_block);
323 
324 	/*
325 	 * In case of error extending write may have instantiated a few
326 	 * blocks outside i_size. Trim these off again.
327 	 */
328 	if (unlikely((rw & WRITE) && size < 0)) {
329 		loff_t isize = i_size_read(inode);
330 		loff_t end = offset + count;
331 
332 		if (end > isize)
333 			nilfs_write_failed(mapping, end);
334 	}
335 
336 	return size;
337 }
338 
339 const struct address_space_operations nilfs_aops = {
340 	.writepage		= nilfs_writepage,
341 	.readpage		= nilfs_readpage,
342 	.writepages		= nilfs_writepages,
343 	.set_page_dirty		= nilfs_set_page_dirty,
344 	.readpages		= nilfs_readpages,
345 	.write_begin		= nilfs_write_begin,
346 	.write_end		= nilfs_write_end,
347 	/* .releasepage		= nilfs_releasepage, */
348 	.invalidatepage		= block_invalidatepage,
349 	.direct_IO		= nilfs_direct_IO,
350 	.is_partially_uptodate  = block_is_partially_uptodate,
351 };
352 
353 static int nilfs_insert_inode_locked(struct inode *inode,
354 				     struct nilfs_root *root,
355 				     unsigned long ino)
356 {
357 	struct nilfs_iget_args args = {
358 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
359 	};
360 
361 	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
362 }
363 
364 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
365 {
366 	struct super_block *sb = dir->i_sb;
367 	struct the_nilfs *nilfs = sb->s_fs_info;
368 	struct inode *inode;
369 	struct nilfs_inode_info *ii;
370 	struct nilfs_root *root;
371 	int err = -ENOMEM;
372 	ino_t ino;
373 
374 	inode = new_inode(sb);
375 	if (unlikely(!inode))
376 		goto failed;
377 
378 	mapping_set_gfp_mask(inode->i_mapping,
379 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
380 
381 	root = NILFS_I(dir)->i_root;
382 	ii = NILFS_I(inode);
383 	ii->i_state = 1 << NILFS_I_NEW;
384 	ii->i_root = root;
385 
386 	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
387 	if (unlikely(err))
388 		goto failed_ifile_create_inode;
389 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
390 
391 	atomic64_inc(&root->inodes_count);
392 	inode_init_owner(inode, dir, mode);
393 	inode->i_ino = ino;
394 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
395 
396 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
397 		err = nilfs_bmap_read(ii->i_bmap, NULL);
398 		if (err < 0)
399 			goto failed_after_creation;
400 
401 		set_bit(NILFS_I_BMAP, &ii->i_state);
402 		/* No lock is needed; iget() ensures it. */
403 	}
404 
405 	ii->i_flags = nilfs_mask_flags(
406 		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
407 
408 	/* ii->i_file_acl = 0; */
409 	/* ii->i_dir_acl = 0; */
410 	ii->i_dir_start_lookup = 0;
411 	nilfs_set_inode_flags(inode);
412 	spin_lock(&nilfs->ns_next_gen_lock);
413 	inode->i_generation = nilfs->ns_next_generation++;
414 	spin_unlock(&nilfs->ns_next_gen_lock);
415 	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
416 		err = -EIO;
417 		goto failed_after_creation;
418 	}
419 
420 	err = nilfs_init_acl(inode, dir);
421 	if (unlikely(err))
422 		goto failed_after_creation; /* never occur. When supporting
423 				    nilfs_init_acl(), proper cancellation of
424 				    above jobs should be considered */
425 
426 	return inode;
427 
428  failed_after_creation:
429 	clear_nlink(inode);
430 	unlock_new_inode(inode);
431 	iput(inode);  /* raw_inode will be deleted through
432 			 nilfs_evict_inode() */
433 	goto failed;
434 
435  failed_ifile_create_inode:
436 	make_bad_inode(inode);
437 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
438 			 called */
439  failed:
440 	return ERR_PTR(err);
441 }
442 
443 void nilfs_set_inode_flags(struct inode *inode)
444 {
445 	unsigned int flags = NILFS_I(inode)->i_flags;
446 
447 	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
448 			    S_DIRSYNC);
449 	if (flags & FS_SYNC_FL)
450 		inode->i_flags |= S_SYNC;
451 	if (flags & FS_APPEND_FL)
452 		inode->i_flags |= S_APPEND;
453 	if (flags & FS_IMMUTABLE_FL)
454 		inode->i_flags |= S_IMMUTABLE;
455 	if (flags & FS_NOATIME_FL)
456 		inode->i_flags |= S_NOATIME;
457 	if (flags & FS_DIRSYNC_FL)
458 		inode->i_flags |= S_DIRSYNC;
459 	mapping_set_gfp_mask(inode->i_mapping,
460 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
461 }
462 
463 int nilfs_read_inode_common(struct inode *inode,
464 			    struct nilfs_inode *raw_inode)
465 {
466 	struct nilfs_inode_info *ii = NILFS_I(inode);
467 	int err;
468 
469 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
470 	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
471 	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
472 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
473 	inode->i_size = le64_to_cpu(raw_inode->i_size);
474 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
475 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
476 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
477 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
478 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
479 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
480 	if (inode->i_nlink == 0)
481 		return -ESTALE; /* this inode is deleted */
482 
483 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
484 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
485 #if 0
486 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
487 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
488 		0 : le32_to_cpu(raw_inode->i_dir_acl);
489 #endif
490 	ii->i_dir_start_lookup = 0;
491 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
492 
493 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
494 	    S_ISLNK(inode->i_mode)) {
495 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
496 		if (err < 0)
497 			return err;
498 		set_bit(NILFS_I_BMAP, &ii->i_state);
499 		/* No lock is needed; iget() ensures it. */
500 	}
501 	return 0;
502 }
503 
504 static int __nilfs_read_inode(struct super_block *sb,
505 			      struct nilfs_root *root, unsigned long ino,
506 			      struct inode *inode)
507 {
508 	struct the_nilfs *nilfs = sb->s_fs_info;
509 	struct buffer_head *bh;
510 	struct nilfs_inode *raw_inode;
511 	int err;
512 
513 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
514 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
515 	if (unlikely(err))
516 		goto bad_inode;
517 
518 	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
519 
520 	err = nilfs_read_inode_common(inode, raw_inode);
521 	if (err)
522 		goto failed_unmap;
523 
524 	if (S_ISREG(inode->i_mode)) {
525 		inode->i_op = &nilfs_file_inode_operations;
526 		inode->i_fop = &nilfs_file_operations;
527 		inode->i_mapping->a_ops = &nilfs_aops;
528 	} else if (S_ISDIR(inode->i_mode)) {
529 		inode->i_op = &nilfs_dir_inode_operations;
530 		inode->i_fop = &nilfs_dir_operations;
531 		inode->i_mapping->a_ops = &nilfs_aops;
532 	} else if (S_ISLNK(inode->i_mode)) {
533 		inode->i_op = &nilfs_symlink_inode_operations;
534 		inode->i_mapping->a_ops = &nilfs_aops;
535 	} else {
536 		inode->i_op = &nilfs_special_inode_operations;
537 		init_special_inode(
538 			inode, inode->i_mode,
539 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
540 	}
541 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
542 	brelse(bh);
543 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
544 	nilfs_set_inode_flags(inode);
545 	return 0;
546 
547  failed_unmap:
548 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
549 	brelse(bh);
550 
551  bad_inode:
552 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
553 	return err;
554 }
555 
556 static int nilfs_iget_test(struct inode *inode, void *opaque)
557 {
558 	struct nilfs_iget_args *args = opaque;
559 	struct nilfs_inode_info *ii;
560 
561 	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
562 		return 0;
563 
564 	ii = NILFS_I(inode);
565 	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
566 		return !args->for_gc;
567 
568 	return args->for_gc && args->cno == ii->i_cno;
569 }
570 
571 static int nilfs_iget_set(struct inode *inode, void *opaque)
572 {
573 	struct nilfs_iget_args *args = opaque;
574 
575 	inode->i_ino = args->ino;
576 	if (args->for_gc) {
577 		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
578 		NILFS_I(inode)->i_cno = args->cno;
579 		NILFS_I(inode)->i_root = NULL;
580 	} else {
581 		if (args->root && args->ino == NILFS_ROOT_INO)
582 			nilfs_get_root(args->root);
583 		NILFS_I(inode)->i_root = args->root;
584 	}
585 	return 0;
586 }
587 
588 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
589 			    unsigned long ino)
590 {
591 	struct nilfs_iget_args args = {
592 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
593 	};
594 
595 	return ilookup5(sb, ino, nilfs_iget_test, &args);
596 }
597 
598 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
599 				unsigned long ino)
600 {
601 	struct nilfs_iget_args args = {
602 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
603 	};
604 
605 	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
606 }
607 
608 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
609 			 unsigned long ino)
610 {
611 	struct inode *inode;
612 	int err;
613 
614 	inode = nilfs_iget_locked(sb, root, ino);
615 	if (unlikely(!inode))
616 		return ERR_PTR(-ENOMEM);
617 	if (!(inode->i_state & I_NEW))
618 		return inode;
619 
620 	err = __nilfs_read_inode(sb, root, ino, inode);
621 	if (unlikely(err)) {
622 		iget_failed(inode);
623 		return ERR_PTR(err);
624 	}
625 	unlock_new_inode(inode);
626 	return inode;
627 }
628 
629 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
630 				__u64 cno)
631 {
632 	struct nilfs_iget_args args = {
633 		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
634 	};
635 	struct inode *inode;
636 	int err;
637 
638 	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
639 	if (unlikely(!inode))
640 		return ERR_PTR(-ENOMEM);
641 	if (!(inode->i_state & I_NEW))
642 		return inode;
643 
644 	err = nilfs_init_gcinode(inode);
645 	if (unlikely(err)) {
646 		iget_failed(inode);
647 		return ERR_PTR(err);
648 	}
649 	unlock_new_inode(inode);
650 	return inode;
651 }
652 
653 void nilfs_write_inode_common(struct inode *inode,
654 			      struct nilfs_inode *raw_inode, int has_bmap)
655 {
656 	struct nilfs_inode_info *ii = NILFS_I(inode);
657 
658 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
659 	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
660 	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
661 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
662 	raw_inode->i_size = cpu_to_le64(inode->i_size);
663 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
664 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
665 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
666 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
667 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
668 
669 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
670 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
671 
672 	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
673 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
674 
675 		/* zero-fill unused portion in the case of super root block */
676 		raw_inode->i_xattr = 0;
677 		raw_inode->i_pad = 0;
678 		memset((void *)raw_inode + sizeof(*raw_inode), 0,
679 		       nilfs->ns_inode_size - sizeof(*raw_inode));
680 	}
681 
682 	if (has_bmap)
683 		nilfs_bmap_write(ii->i_bmap, raw_inode);
684 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
685 		raw_inode->i_device_code =
686 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
687 	/* When extending inode, nilfs->ns_inode_size should be checked
688 	   for substitutions of appended fields */
689 }
690 
691 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
692 {
693 	ino_t ino = inode->i_ino;
694 	struct nilfs_inode_info *ii = NILFS_I(inode);
695 	struct inode *ifile = ii->i_root->ifile;
696 	struct nilfs_inode *raw_inode;
697 
698 	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
699 
700 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
701 		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
702 	if (flags & I_DIRTY_DATASYNC)
703 		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
704 
705 	nilfs_write_inode_common(inode, raw_inode, 0);
706 		/* XXX: call with has_bmap = 0 is a workaround to avoid
707 		   deadlock of bmap. This delays update of i_bmap to just
708 		   before writing */
709 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
710 }
711 
712 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
713 
714 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
715 				unsigned long from)
716 {
717 	unsigned long b;
718 	int ret;
719 
720 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
721 		return;
722 repeat:
723 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
724 	if (ret == -ENOENT)
725 		return;
726 	else if (ret < 0)
727 		goto failed;
728 
729 	if (b < from)
730 		return;
731 
732 	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
733 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
734 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
735 	if (!ret || (ret == -ENOMEM &&
736 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
737 		goto repeat;
738 
739 failed:
740 	nilfs_warning(ii->vfs_inode.i_sb, __func__,
741 		      "failed to truncate bmap (ino=%lu, err=%d)",
742 		      ii->vfs_inode.i_ino, ret);
743 }
744 
745 void nilfs_truncate(struct inode *inode)
746 {
747 	unsigned long blkoff;
748 	unsigned int blocksize;
749 	struct nilfs_transaction_info ti;
750 	struct super_block *sb = inode->i_sb;
751 	struct nilfs_inode_info *ii = NILFS_I(inode);
752 
753 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
754 		return;
755 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
756 		return;
757 
758 	blocksize = sb->s_blocksize;
759 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
760 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
761 
762 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
763 
764 	nilfs_truncate_bmap(ii, blkoff);
765 
766 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
767 	if (IS_SYNC(inode))
768 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
769 
770 	nilfs_mark_inode_dirty(inode);
771 	nilfs_set_file_dirty(inode, 0);
772 	nilfs_transaction_commit(sb);
773 	/* May construct a logical segment and may fail in sync mode.
774 	   But truncate has no return value. */
775 }
776 
777 static void nilfs_clear_inode(struct inode *inode)
778 {
779 	struct nilfs_inode_info *ii = NILFS_I(inode);
780 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
781 
782 	/*
783 	 * Free resources allocated in nilfs_read_inode(), here.
784 	 */
785 	BUG_ON(!list_empty(&ii->i_dirty));
786 	brelse(ii->i_bh);
787 	ii->i_bh = NULL;
788 
789 	if (mdi && mdi->mi_palloc_cache)
790 		nilfs_palloc_destroy_cache(inode);
791 
792 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
793 		nilfs_bmap_clear(ii->i_bmap);
794 
795 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
796 
797 	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
798 		nilfs_put_root(ii->i_root);
799 }
800 
801 void nilfs_evict_inode(struct inode *inode)
802 {
803 	struct nilfs_transaction_info ti;
804 	struct super_block *sb = inode->i_sb;
805 	struct nilfs_inode_info *ii = NILFS_I(inode);
806 	int ret;
807 
808 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
809 		truncate_inode_pages_final(&inode->i_data);
810 		clear_inode(inode);
811 		nilfs_clear_inode(inode);
812 		return;
813 	}
814 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
815 
816 	truncate_inode_pages_final(&inode->i_data);
817 
818 	/* TODO: some of the following operations may fail.  */
819 	nilfs_truncate_bmap(ii, 0);
820 	nilfs_mark_inode_dirty(inode);
821 	clear_inode(inode);
822 
823 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
824 	if (!ret)
825 		atomic64_dec(&ii->i_root->inodes_count);
826 
827 	nilfs_clear_inode(inode);
828 
829 	if (IS_SYNC(inode))
830 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
831 	nilfs_transaction_commit(sb);
832 	/* May construct a logical segment and may fail in sync mode.
833 	   But delete_inode has no return value. */
834 }
835 
836 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
837 {
838 	struct nilfs_transaction_info ti;
839 	struct inode *inode = dentry->d_inode;
840 	struct super_block *sb = inode->i_sb;
841 	int err;
842 
843 	err = inode_change_ok(inode, iattr);
844 	if (err)
845 		return err;
846 
847 	err = nilfs_transaction_begin(sb, &ti, 0);
848 	if (unlikely(err))
849 		return err;
850 
851 	if ((iattr->ia_valid & ATTR_SIZE) &&
852 	    iattr->ia_size != i_size_read(inode)) {
853 		inode_dio_wait(inode);
854 		truncate_setsize(inode, iattr->ia_size);
855 		nilfs_truncate(inode);
856 	}
857 
858 	setattr_copy(inode, iattr);
859 	mark_inode_dirty(inode);
860 
861 	if (iattr->ia_valid & ATTR_MODE) {
862 		err = nilfs_acl_chmod(inode);
863 		if (unlikely(err))
864 			goto out_err;
865 	}
866 
867 	return nilfs_transaction_commit(sb);
868 
869 out_err:
870 	nilfs_transaction_abort(sb);
871 	return err;
872 }
873 
874 int nilfs_permission(struct inode *inode, int mask)
875 {
876 	struct nilfs_root *root = NILFS_I(inode)->i_root;
877 	if ((mask & MAY_WRITE) && root &&
878 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
879 		return -EROFS; /* snapshot is not writable */
880 
881 	return generic_permission(inode, mask);
882 }
883 
884 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
885 {
886 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
887 	struct nilfs_inode_info *ii = NILFS_I(inode);
888 	int err;
889 
890 	spin_lock(&nilfs->ns_inode_lock);
891 	if (ii->i_bh == NULL) {
892 		spin_unlock(&nilfs->ns_inode_lock);
893 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
894 						  inode->i_ino, pbh);
895 		if (unlikely(err))
896 			return err;
897 		spin_lock(&nilfs->ns_inode_lock);
898 		if (ii->i_bh == NULL)
899 			ii->i_bh = *pbh;
900 		else {
901 			brelse(*pbh);
902 			*pbh = ii->i_bh;
903 		}
904 	} else
905 		*pbh = ii->i_bh;
906 
907 	get_bh(*pbh);
908 	spin_unlock(&nilfs->ns_inode_lock);
909 	return 0;
910 }
911 
912 int nilfs_inode_dirty(struct inode *inode)
913 {
914 	struct nilfs_inode_info *ii = NILFS_I(inode);
915 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
916 	int ret = 0;
917 
918 	if (!list_empty(&ii->i_dirty)) {
919 		spin_lock(&nilfs->ns_inode_lock);
920 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
921 			test_bit(NILFS_I_BUSY, &ii->i_state);
922 		spin_unlock(&nilfs->ns_inode_lock);
923 	}
924 	return ret;
925 }
926 
927 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
928 {
929 	struct nilfs_inode_info *ii = NILFS_I(inode);
930 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
931 
932 	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
933 
934 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
935 		return 0;
936 
937 	spin_lock(&nilfs->ns_inode_lock);
938 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
939 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
940 		/* Because this routine may race with nilfs_dispose_list(),
941 		   we have to check NILFS_I_QUEUED here, too. */
942 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
943 			/* This will happen when somebody is freeing
944 			   this inode. */
945 			nilfs_warning(inode->i_sb, __func__,
946 				      "cannot get inode (ino=%lu)\n",
947 				      inode->i_ino);
948 			spin_unlock(&nilfs->ns_inode_lock);
949 			return -EINVAL; /* NILFS_I_DIRTY may remain for
950 					   freeing inode */
951 		}
952 		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
953 		set_bit(NILFS_I_QUEUED, &ii->i_state);
954 	}
955 	spin_unlock(&nilfs->ns_inode_lock);
956 	return 0;
957 }
958 
959 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
960 {
961 	struct buffer_head *ibh;
962 	int err;
963 
964 	err = nilfs_load_inode_block(inode, &ibh);
965 	if (unlikely(err)) {
966 		nilfs_warning(inode->i_sb, __func__,
967 			      "failed to reget inode block.\n");
968 		return err;
969 	}
970 	nilfs_update_inode(inode, ibh, flags);
971 	mark_buffer_dirty(ibh);
972 	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
973 	brelse(ibh);
974 	return 0;
975 }
976 
977 /**
978  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
979  * @inode: inode of the file to be registered.
980  *
981  * nilfs_dirty_inode() loads a inode block containing the specified
982  * @inode and copies data from a nilfs_inode to a corresponding inode
983  * entry in the inode block. This operation is excluded from the segment
984  * construction. This function can be called both as a single operation
985  * and as a part of indivisible file operations.
986  */
987 void nilfs_dirty_inode(struct inode *inode, int flags)
988 {
989 	struct nilfs_transaction_info ti;
990 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
991 
992 	if (is_bad_inode(inode)) {
993 		nilfs_warning(inode->i_sb, __func__,
994 			      "tried to mark bad_inode dirty. ignored.\n");
995 		dump_stack();
996 		return;
997 	}
998 	if (mdi) {
999 		nilfs_mdt_mark_dirty(inode);
1000 		return;
1001 	}
1002 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
1003 	__nilfs_mark_inode_dirty(inode, flags);
1004 	nilfs_transaction_commit(inode->i_sb); /* never fails */
1005 }
1006 
1007 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1008 		 __u64 start, __u64 len)
1009 {
1010 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1011 	__u64 logical = 0, phys = 0, size = 0;
1012 	__u32 flags = 0;
1013 	loff_t isize;
1014 	sector_t blkoff, end_blkoff;
1015 	sector_t delalloc_blkoff;
1016 	unsigned long delalloc_blklen;
1017 	unsigned int blkbits = inode->i_blkbits;
1018 	int ret, n;
1019 
1020 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1021 	if (ret)
1022 		return ret;
1023 
1024 	mutex_lock(&inode->i_mutex);
1025 
1026 	isize = i_size_read(inode);
1027 
1028 	blkoff = start >> blkbits;
1029 	end_blkoff = (start + len - 1) >> blkbits;
1030 
1031 	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1032 							&delalloc_blkoff);
1033 
1034 	do {
1035 		__u64 blkphy;
1036 		unsigned int maxblocks;
1037 
1038 		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1039 			if (size) {
1040 				/* End of the current extent */
1041 				ret = fiemap_fill_next_extent(
1042 					fieinfo, logical, phys, size, flags);
1043 				if (ret)
1044 					break;
1045 			}
1046 			if (blkoff > end_blkoff)
1047 				break;
1048 
1049 			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1050 			logical = blkoff << blkbits;
1051 			phys = 0;
1052 			size = delalloc_blklen << blkbits;
1053 
1054 			blkoff = delalloc_blkoff + delalloc_blklen;
1055 			delalloc_blklen = nilfs_find_uncommitted_extent(
1056 				inode, blkoff, &delalloc_blkoff);
1057 			continue;
1058 		}
1059 
1060 		/*
1061 		 * Limit the number of blocks that we look up so as
1062 		 * not to get into the next delayed allocation extent.
1063 		 */
1064 		maxblocks = INT_MAX;
1065 		if (delalloc_blklen)
1066 			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1067 					  maxblocks);
1068 		blkphy = 0;
1069 
1070 		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1071 		n = nilfs_bmap_lookup_contig(
1072 			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1073 		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1074 
1075 		if (n < 0) {
1076 			int past_eof;
1077 
1078 			if (unlikely(n != -ENOENT))
1079 				break; /* error */
1080 
1081 			/* HOLE */
1082 			blkoff++;
1083 			past_eof = ((blkoff << blkbits) >= isize);
1084 
1085 			if (size) {
1086 				/* End of the current extent */
1087 
1088 				if (past_eof)
1089 					flags |= FIEMAP_EXTENT_LAST;
1090 
1091 				ret = fiemap_fill_next_extent(
1092 					fieinfo, logical, phys, size, flags);
1093 				if (ret)
1094 					break;
1095 				size = 0;
1096 			}
1097 			if (blkoff > end_blkoff || past_eof)
1098 				break;
1099 		} else {
1100 			if (size) {
1101 				if (phys && blkphy << blkbits == phys + size) {
1102 					/* The current extent goes on */
1103 					size += n << blkbits;
1104 				} else {
1105 					/* Terminate the current extent */
1106 					ret = fiemap_fill_next_extent(
1107 						fieinfo, logical, phys, size,
1108 						flags);
1109 					if (ret || blkoff > end_blkoff)
1110 						break;
1111 
1112 					/* Start another extent */
1113 					flags = FIEMAP_EXTENT_MERGED;
1114 					logical = blkoff << blkbits;
1115 					phys = blkphy << blkbits;
1116 					size = n << blkbits;
1117 				}
1118 			} else {
1119 				/* Start a new extent */
1120 				flags = FIEMAP_EXTENT_MERGED;
1121 				logical = blkoff << blkbits;
1122 				phys = blkphy << blkbits;
1123 				size = n << blkbits;
1124 			}
1125 			blkoff += n;
1126 		}
1127 		cond_resched();
1128 	} while (true);
1129 
1130 	/* If ret is 1 then we just hit the end of the extent array */
1131 	if (ret == 1)
1132 		ret = 0;
1133 
1134 	mutex_unlock(&inode->i_mutex);
1135 	return ret;
1136 }
1137