xref: /linux/fs/nilfs2/inode.c (revision a6cbcd4a4a85e2fdb0b3344b88df2e8b3d526b9e)
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/aio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36 
37 /**
38  * struct nilfs_iget_args - arguments used during comparison between inodes
39  * @ino: inode number
40  * @cno: checkpoint number
41  * @root: pointer on NILFS root object (mounted checkpoint)
42  * @for_gc: inode for GC flag
43  */
44 struct nilfs_iget_args {
45 	u64 ino;
46 	__u64 cno;
47 	struct nilfs_root *root;
48 	int for_gc;
49 };
50 
51 void nilfs_inode_add_blocks(struct inode *inode, int n)
52 {
53 	struct nilfs_root *root = NILFS_I(inode)->i_root;
54 
55 	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
56 	if (root)
57 		atomic64_add(n, &root->blocks_count);
58 }
59 
60 void nilfs_inode_sub_blocks(struct inode *inode, int n)
61 {
62 	struct nilfs_root *root = NILFS_I(inode)->i_root;
63 
64 	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
65 	if (root)
66 		atomic64_sub(n, &root->blocks_count);
67 }
68 
69 /**
70  * nilfs_get_block() - get a file block on the filesystem (callback function)
71  * @inode - inode struct of the target file
72  * @blkoff - file block number
73  * @bh_result - buffer head to be mapped on
74  * @create - indicate whether allocating the block or not when it has not
75  *      been allocated yet.
76  *
77  * This function does not issue actual read request of the specified data
78  * block. It is done by VFS.
79  */
80 int nilfs_get_block(struct inode *inode, sector_t blkoff,
81 		    struct buffer_head *bh_result, int create)
82 {
83 	struct nilfs_inode_info *ii = NILFS_I(inode);
84 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
85 	__u64 blknum = 0;
86 	int err = 0, ret;
87 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
88 
89 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
90 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
91 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
92 	if (ret >= 0) {	/* found */
93 		map_bh(bh_result, inode->i_sb, blknum);
94 		if (ret > 0)
95 			bh_result->b_size = (ret << inode->i_blkbits);
96 		goto out;
97 	}
98 	/* data block was not found */
99 	if (ret == -ENOENT && create) {
100 		struct nilfs_transaction_info ti;
101 
102 		bh_result->b_blocknr = 0;
103 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
104 		if (unlikely(err))
105 			goto out;
106 		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
107 					(unsigned long)bh_result);
108 		if (unlikely(err != 0)) {
109 			if (err == -EEXIST) {
110 				/*
111 				 * The get_block() function could be called
112 				 * from multiple callers for an inode.
113 				 * However, the page having this block must
114 				 * be locked in this case.
115 				 */
116 				printk(KERN_WARNING
117 				       "nilfs_get_block: a race condition "
118 				       "while inserting a data block. "
119 				       "(inode number=%lu, file block "
120 				       "offset=%llu)\n",
121 				       inode->i_ino,
122 				       (unsigned long long)blkoff);
123 				err = 0;
124 			}
125 			nilfs_transaction_abort(inode->i_sb);
126 			goto out;
127 		}
128 		nilfs_mark_inode_dirty(inode);
129 		nilfs_transaction_commit(inode->i_sb); /* never fails */
130 		/* Error handling should be detailed */
131 		set_buffer_new(bh_result);
132 		set_buffer_delay(bh_result);
133 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
134 						      to proper value */
135 	} else if (ret == -ENOENT) {
136 		/* not found is not error (e.g. hole); must return without
137 		   the mapped state flag. */
138 		;
139 	} else {
140 		err = ret;
141 	}
142 
143  out:
144 	return err;
145 }
146 
147 /**
148  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
149  * address_space_operations.
150  * @file - file struct of the file to be read
151  * @page - the page to be read
152  */
153 static int nilfs_readpage(struct file *file, struct page *page)
154 {
155 	return mpage_readpage(page, nilfs_get_block);
156 }
157 
158 /**
159  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
160  * address_space_operations.
161  * @file - file struct of the file to be read
162  * @mapping - address_space struct used for reading multiple pages
163  * @pages - the pages to be read
164  * @nr_pages - number of pages to be read
165  */
166 static int nilfs_readpages(struct file *file, struct address_space *mapping,
167 			   struct list_head *pages, unsigned nr_pages)
168 {
169 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
170 }
171 
172 static int nilfs_writepages(struct address_space *mapping,
173 			    struct writeback_control *wbc)
174 {
175 	struct inode *inode = mapping->host;
176 	int err = 0;
177 
178 	if (inode->i_sb->s_flags & MS_RDONLY) {
179 		nilfs_clear_dirty_pages(mapping, false);
180 		return -EROFS;
181 	}
182 
183 	if (wbc->sync_mode == WB_SYNC_ALL)
184 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
185 						    wbc->range_start,
186 						    wbc->range_end);
187 	return err;
188 }
189 
190 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
191 {
192 	struct inode *inode = page->mapping->host;
193 	int err;
194 
195 	if (inode->i_sb->s_flags & MS_RDONLY) {
196 		/*
197 		 * It means that filesystem was remounted in read-only
198 		 * mode because of error or metadata corruption. But we
199 		 * have dirty pages that try to be flushed in background.
200 		 * So, here we simply discard this dirty page.
201 		 */
202 		nilfs_clear_dirty_page(page, false);
203 		unlock_page(page);
204 		return -EROFS;
205 	}
206 
207 	redirty_page_for_writepage(wbc, page);
208 	unlock_page(page);
209 
210 	if (wbc->sync_mode == WB_SYNC_ALL) {
211 		err = nilfs_construct_segment(inode->i_sb);
212 		if (unlikely(err))
213 			return err;
214 	} else if (wbc->for_reclaim)
215 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
216 
217 	return 0;
218 }
219 
220 static int nilfs_set_page_dirty(struct page *page)
221 {
222 	int ret = __set_page_dirty_nobuffers(page);
223 
224 	if (page_has_buffers(page)) {
225 		struct inode *inode = page->mapping->host;
226 		unsigned nr_dirty = 0;
227 		struct buffer_head *bh, *head;
228 
229 		/*
230 		 * This page is locked by callers, and no other thread
231 		 * concurrently marks its buffers dirty since they are
232 		 * only dirtied through routines in fs/buffer.c in
233 		 * which call sites of mark_buffer_dirty are protected
234 		 * by page lock.
235 		 */
236 		bh = head = page_buffers(page);
237 		do {
238 			/* Do not mark hole blocks dirty */
239 			if (buffer_dirty(bh) || !buffer_mapped(bh))
240 				continue;
241 
242 			set_buffer_dirty(bh);
243 			nr_dirty++;
244 		} while (bh = bh->b_this_page, bh != head);
245 
246 		if (nr_dirty)
247 			nilfs_set_file_dirty(inode, nr_dirty);
248 	}
249 	return ret;
250 }
251 
252 void nilfs_write_failed(struct address_space *mapping, loff_t to)
253 {
254 	struct inode *inode = mapping->host;
255 
256 	if (to > inode->i_size) {
257 		truncate_pagecache(inode, inode->i_size);
258 		nilfs_truncate(inode);
259 	}
260 }
261 
262 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
263 			     loff_t pos, unsigned len, unsigned flags,
264 			     struct page **pagep, void **fsdata)
265 
266 {
267 	struct inode *inode = mapping->host;
268 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
269 
270 	if (unlikely(err))
271 		return err;
272 
273 	err = block_write_begin(mapping, pos, len, flags, pagep,
274 				nilfs_get_block);
275 	if (unlikely(err)) {
276 		nilfs_write_failed(mapping, pos + len);
277 		nilfs_transaction_abort(inode->i_sb);
278 	}
279 	return err;
280 }
281 
282 static int nilfs_write_end(struct file *file, struct address_space *mapping,
283 			   loff_t pos, unsigned len, unsigned copied,
284 			   struct page *page, void *fsdata)
285 {
286 	struct inode *inode = mapping->host;
287 	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
288 	unsigned nr_dirty;
289 	int err;
290 
291 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
292 						  start + copied);
293 	copied = generic_write_end(file, mapping, pos, len, copied, page,
294 				   fsdata);
295 	nilfs_set_file_dirty(inode, nr_dirty);
296 	err = nilfs_transaction_commit(inode->i_sb);
297 	return err ? : copied;
298 }
299 
300 static ssize_t
301 nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
302 		loff_t offset)
303 {
304 	struct file *file = iocb->ki_filp;
305 	struct address_space *mapping = file->f_mapping;
306 	struct inode *inode = file->f_mapping->host;
307 	size_t count = iov_iter_count(iter);
308 	ssize_t size;
309 
310 	if (rw == WRITE)
311 		return 0;
312 
313 	/* Needs synchronization with the cleaner */
314 	size = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
315 				  iter->nr_segs, nilfs_get_block);
316 
317 	/*
318 	 * In case of error extending write may have instantiated a few
319 	 * blocks outside i_size. Trim these off again.
320 	 */
321 	if (unlikely((rw & WRITE) && size < 0)) {
322 		loff_t isize = i_size_read(inode);
323 		loff_t end = offset + count;
324 
325 		if (end > isize)
326 			nilfs_write_failed(mapping, end);
327 	}
328 
329 	return size;
330 }
331 
332 const struct address_space_operations nilfs_aops = {
333 	.writepage		= nilfs_writepage,
334 	.readpage		= nilfs_readpage,
335 	.writepages		= nilfs_writepages,
336 	.set_page_dirty		= nilfs_set_page_dirty,
337 	.readpages		= nilfs_readpages,
338 	.write_begin		= nilfs_write_begin,
339 	.write_end		= nilfs_write_end,
340 	/* .releasepage		= nilfs_releasepage, */
341 	.invalidatepage		= block_invalidatepage,
342 	.direct_IO		= nilfs_direct_IO,
343 	.is_partially_uptodate  = block_is_partially_uptodate,
344 };
345 
346 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
347 {
348 	struct super_block *sb = dir->i_sb;
349 	struct the_nilfs *nilfs = sb->s_fs_info;
350 	struct inode *inode;
351 	struct nilfs_inode_info *ii;
352 	struct nilfs_root *root;
353 	int err = -ENOMEM;
354 	ino_t ino;
355 
356 	inode = new_inode(sb);
357 	if (unlikely(!inode))
358 		goto failed;
359 
360 	mapping_set_gfp_mask(inode->i_mapping,
361 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
362 
363 	root = NILFS_I(dir)->i_root;
364 	ii = NILFS_I(inode);
365 	ii->i_state = 1 << NILFS_I_NEW;
366 	ii->i_root = root;
367 
368 	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
369 	if (unlikely(err))
370 		goto failed_ifile_create_inode;
371 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
372 
373 	atomic64_inc(&root->inodes_count);
374 	inode_init_owner(inode, dir, mode);
375 	inode->i_ino = ino;
376 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
377 
378 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
379 		err = nilfs_bmap_read(ii->i_bmap, NULL);
380 		if (err < 0)
381 			goto failed_bmap;
382 
383 		set_bit(NILFS_I_BMAP, &ii->i_state);
384 		/* No lock is needed; iget() ensures it. */
385 	}
386 
387 	ii->i_flags = nilfs_mask_flags(
388 		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
389 
390 	/* ii->i_file_acl = 0; */
391 	/* ii->i_dir_acl = 0; */
392 	ii->i_dir_start_lookup = 0;
393 	nilfs_set_inode_flags(inode);
394 	spin_lock(&nilfs->ns_next_gen_lock);
395 	inode->i_generation = nilfs->ns_next_generation++;
396 	spin_unlock(&nilfs->ns_next_gen_lock);
397 	insert_inode_hash(inode);
398 
399 	err = nilfs_init_acl(inode, dir);
400 	if (unlikely(err))
401 		goto failed_acl; /* never occur. When supporting
402 				    nilfs_init_acl(), proper cancellation of
403 				    above jobs should be considered */
404 
405 	return inode;
406 
407  failed_acl:
408  failed_bmap:
409 	clear_nlink(inode);
410 	iput(inode);  /* raw_inode will be deleted through
411 			 generic_delete_inode() */
412 	goto failed;
413 
414  failed_ifile_create_inode:
415 	make_bad_inode(inode);
416 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
417 			 called */
418  failed:
419 	return ERR_PTR(err);
420 }
421 
422 void nilfs_set_inode_flags(struct inode *inode)
423 {
424 	unsigned int flags = NILFS_I(inode)->i_flags;
425 
426 	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
427 			    S_DIRSYNC);
428 	if (flags & FS_SYNC_FL)
429 		inode->i_flags |= S_SYNC;
430 	if (flags & FS_APPEND_FL)
431 		inode->i_flags |= S_APPEND;
432 	if (flags & FS_IMMUTABLE_FL)
433 		inode->i_flags |= S_IMMUTABLE;
434 	if (flags & FS_NOATIME_FL)
435 		inode->i_flags |= S_NOATIME;
436 	if (flags & FS_DIRSYNC_FL)
437 		inode->i_flags |= S_DIRSYNC;
438 	mapping_set_gfp_mask(inode->i_mapping,
439 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
440 }
441 
442 int nilfs_read_inode_common(struct inode *inode,
443 			    struct nilfs_inode *raw_inode)
444 {
445 	struct nilfs_inode_info *ii = NILFS_I(inode);
446 	int err;
447 
448 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
449 	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
450 	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
451 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
452 	inode->i_size = le64_to_cpu(raw_inode->i_size);
453 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
454 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
455 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
456 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
457 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
458 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
459 	if (inode->i_nlink == 0 && inode->i_mode == 0)
460 		return -EINVAL; /* this inode is deleted */
461 
462 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
463 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
464 #if 0
465 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
466 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
467 		0 : le32_to_cpu(raw_inode->i_dir_acl);
468 #endif
469 	ii->i_dir_start_lookup = 0;
470 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
471 
472 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
473 	    S_ISLNK(inode->i_mode)) {
474 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
475 		if (err < 0)
476 			return err;
477 		set_bit(NILFS_I_BMAP, &ii->i_state);
478 		/* No lock is needed; iget() ensures it. */
479 	}
480 	return 0;
481 }
482 
483 static int __nilfs_read_inode(struct super_block *sb,
484 			      struct nilfs_root *root, unsigned long ino,
485 			      struct inode *inode)
486 {
487 	struct the_nilfs *nilfs = sb->s_fs_info;
488 	struct buffer_head *bh;
489 	struct nilfs_inode *raw_inode;
490 	int err;
491 
492 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
493 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
494 	if (unlikely(err))
495 		goto bad_inode;
496 
497 	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
498 
499 	err = nilfs_read_inode_common(inode, raw_inode);
500 	if (err)
501 		goto failed_unmap;
502 
503 	if (S_ISREG(inode->i_mode)) {
504 		inode->i_op = &nilfs_file_inode_operations;
505 		inode->i_fop = &nilfs_file_operations;
506 		inode->i_mapping->a_ops = &nilfs_aops;
507 	} else if (S_ISDIR(inode->i_mode)) {
508 		inode->i_op = &nilfs_dir_inode_operations;
509 		inode->i_fop = &nilfs_dir_operations;
510 		inode->i_mapping->a_ops = &nilfs_aops;
511 	} else if (S_ISLNK(inode->i_mode)) {
512 		inode->i_op = &nilfs_symlink_inode_operations;
513 		inode->i_mapping->a_ops = &nilfs_aops;
514 	} else {
515 		inode->i_op = &nilfs_special_inode_operations;
516 		init_special_inode(
517 			inode, inode->i_mode,
518 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
519 	}
520 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
521 	brelse(bh);
522 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
523 	nilfs_set_inode_flags(inode);
524 	return 0;
525 
526  failed_unmap:
527 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
528 	brelse(bh);
529 
530  bad_inode:
531 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
532 	return err;
533 }
534 
535 static int nilfs_iget_test(struct inode *inode, void *opaque)
536 {
537 	struct nilfs_iget_args *args = opaque;
538 	struct nilfs_inode_info *ii;
539 
540 	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
541 		return 0;
542 
543 	ii = NILFS_I(inode);
544 	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
545 		return !args->for_gc;
546 
547 	return args->for_gc && args->cno == ii->i_cno;
548 }
549 
550 static int nilfs_iget_set(struct inode *inode, void *opaque)
551 {
552 	struct nilfs_iget_args *args = opaque;
553 
554 	inode->i_ino = args->ino;
555 	if (args->for_gc) {
556 		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
557 		NILFS_I(inode)->i_cno = args->cno;
558 		NILFS_I(inode)->i_root = NULL;
559 	} else {
560 		if (args->root && args->ino == NILFS_ROOT_INO)
561 			nilfs_get_root(args->root);
562 		NILFS_I(inode)->i_root = args->root;
563 	}
564 	return 0;
565 }
566 
567 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
568 			    unsigned long ino)
569 {
570 	struct nilfs_iget_args args = {
571 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
572 	};
573 
574 	return ilookup5(sb, ino, nilfs_iget_test, &args);
575 }
576 
577 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
578 				unsigned long ino)
579 {
580 	struct nilfs_iget_args args = {
581 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
582 	};
583 
584 	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
585 }
586 
587 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
588 			 unsigned long ino)
589 {
590 	struct inode *inode;
591 	int err;
592 
593 	inode = nilfs_iget_locked(sb, root, ino);
594 	if (unlikely(!inode))
595 		return ERR_PTR(-ENOMEM);
596 	if (!(inode->i_state & I_NEW))
597 		return inode;
598 
599 	err = __nilfs_read_inode(sb, root, ino, inode);
600 	if (unlikely(err)) {
601 		iget_failed(inode);
602 		return ERR_PTR(err);
603 	}
604 	unlock_new_inode(inode);
605 	return inode;
606 }
607 
608 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
609 				__u64 cno)
610 {
611 	struct nilfs_iget_args args = {
612 		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
613 	};
614 	struct inode *inode;
615 	int err;
616 
617 	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
618 	if (unlikely(!inode))
619 		return ERR_PTR(-ENOMEM);
620 	if (!(inode->i_state & I_NEW))
621 		return inode;
622 
623 	err = nilfs_init_gcinode(inode);
624 	if (unlikely(err)) {
625 		iget_failed(inode);
626 		return ERR_PTR(err);
627 	}
628 	unlock_new_inode(inode);
629 	return inode;
630 }
631 
632 void nilfs_write_inode_common(struct inode *inode,
633 			      struct nilfs_inode *raw_inode, int has_bmap)
634 {
635 	struct nilfs_inode_info *ii = NILFS_I(inode);
636 
637 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
638 	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
639 	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
640 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
641 	raw_inode->i_size = cpu_to_le64(inode->i_size);
642 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
643 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
644 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
645 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
646 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
647 
648 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
649 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
650 
651 	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
652 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
653 
654 		/* zero-fill unused portion in the case of super root block */
655 		raw_inode->i_xattr = 0;
656 		raw_inode->i_pad = 0;
657 		memset((void *)raw_inode + sizeof(*raw_inode), 0,
658 		       nilfs->ns_inode_size - sizeof(*raw_inode));
659 	}
660 
661 	if (has_bmap)
662 		nilfs_bmap_write(ii->i_bmap, raw_inode);
663 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
664 		raw_inode->i_device_code =
665 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
666 	/* When extending inode, nilfs->ns_inode_size should be checked
667 	   for substitutions of appended fields */
668 }
669 
670 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
671 {
672 	ino_t ino = inode->i_ino;
673 	struct nilfs_inode_info *ii = NILFS_I(inode);
674 	struct inode *ifile = ii->i_root->ifile;
675 	struct nilfs_inode *raw_inode;
676 
677 	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
678 
679 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
680 		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
681 	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
682 
683 	nilfs_write_inode_common(inode, raw_inode, 0);
684 		/* XXX: call with has_bmap = 0 is a workaround to avoid
685 		   deadlock of bmap. This delays update of i_bmap to just
686 		   before writing */
687 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
688 }
689 
690 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
691 
692 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
693 				unsigned long from)
694 {
695 	unsigned long b;
696 	int ret;
697 
698 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
699 		return;
700 repeat:
701 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
702 	if (ret == -ENOENT)
703 		return;
704 	else if (ret < 0)
705 		goto failed;
706 
707 	if (b < from)
708 		return;
709 
710 	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
711 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
712 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
713 	if (!ret || (ret == -ENOMEM &&
714 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
715 		goto repeat;
716 
717 failed:
718 	nilfs_warning(ii->vfs_inode.i_sb, __func__,
719 		      "failed to truncate bmap (ino=%lu, err=%d)",
720 		      ii->vfs_inode.i_ino, ret);
721 }
722 
723 void nilfs_truncate(struct inode *inode)
724 {
725 	unsigned long blkoff;
726 	unsigned int blocksize;
727 	struct nilfs_transaction_info ti;
728 	struct super_block *sb = inode->i_sb;
729 	struct nilfs_inode_info *ii = NILFS_I(inode);
730 
731 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
732 		return;
733 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
734 		return;
735 
736 	blocksize = sb->s_blocksize;
737 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
738 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
739 
740 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
741 
742 	nilfs_truncate_bmap(ii, blkoff);
743 
744 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
745 	if (IS_SYNC(inode))
746 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
747 
748 	nilfs_mark_inode_dirty(inode);
749 	nilfs_set_file_dirty(inode, 0);
750 	nilfs_transaction_commit(sb);
751 	/* May construct a logical segment and may fail in sync mode.
752 	   But truncate has no return value. */
753 }
754 
755 static void nilfs_clear_inode(struct inode *inode)
756 {
757 	struct nilfs_inode_info *ii = NILFS_I(inode);
758 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
759 
760 	/*
761 	 * Free resources allocated in nilfs_read_inode(), here.
762 	 */
763 	BUG_ON(!list_empty(&ii->i_dirty));
764 	brelse(ii->i_bh);
765 	ii->i_bh = NULL;
766 
767 	if (mdi && mdi->mi_palloc_cache)
768 		nilfs_palloc_destroy_cache(inode);
769 
770 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
771 		nilfs_bmap_clear(ii->i_bmap);
772 
773 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
774 
775 	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
776 		nilfs_put_root(ii->i_root);
777 }
778 
779 void nilfs_evict_inode(struct inode *inode)
780 {
781 	struct nilfs_transaction_info ti;
782 	struct super_block *sb = inode->i_sb;
783 	struct nilfs_inode_info *ii = NILFS_I(inode);
784 	int ret;
785 
786 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
787 		truncate_inode_pages_final(&inode->i_data);
788 		clear_inode(inode);
789 		nilfs_clear_inode(inode);
790 		return;
791 	}
792 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
793 
794 	truncate_inode_pages_final(&inode->i_data);
795 
796 	/* TODO: some of the following operations may fail.  */
797 	nilfs_truncate_bmap(ii, 0);
798 	nilfs_mark_inode_dirty(inode);
799 	clear_inode(inode);
800 
801 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
802 	if (!ret)
803 		atomic64_dec(&ii->i_root->inodes_count);
804 
805 	nilfs_clear_inode(inode);
806 
807 	if (IS_SYNC(inode))
808 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
809 	nilfs_transaction_commit(sb);
810 	/* May construct a logical segment and may fail in sync mode.
811 	   But delete_inode has no return value. */
812 }
813 
814 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
815 {
816 	struct nilfs_transaction_info ti;
817 	struct inode *inode = dentry->d_inode;
818 	struct super_block *sb = inode->i_sb;
819 	int err;
820 
821 	err = inode_change_ok(inode, iattr);
822 	if (err)
823 		return err;
824 
825 	err = nilfs_transaction_begin(sb, &ti, 0);
826 	if (unlikely(err))
827 		return err;
828 
829 	if ((iattr->ia_valid & ATTR_SIZE) &&
830 	    iattr->ia_size != i_size_read(inode)) {
831 		inode_dio_wait(inode);
832 		truncate_setsize(inode, iattr->ia_size);
833 		nilfs_truncate(inode);
834 	}
835 
836 	setattr_copy(inode, iattr);
837 	mark_inode_dirty(inode);
838 
839 	if (iattr->ia_valid & ATTR_MODE) {
840 		err = nilfs_acl_chmod(inode);
841 		if (unlikely(err))
842 			goto out_err;
843 	}
844 
845 	return nilfs_transaction_commit(sb);
846 
847 out_err:
848 	nilfs_transaction_abort(sb);
849 	return err;
850 }
851 
852 int nilfs_permission(struct inode *inode, int mask)
853 {
854 	struct nilfs_root *root = NILFS_I(inode)->i_root;
855 	if ((mask & MAY_WRITE) && root &&
856 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
857 		return -EROFS; /* snapshot is not writable */
858 
859 	return generic_permission(inode, mask);
860 }
861 
862 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
863 {
864 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
865 	struct nilfs_inode_info *ii = NILFS_I(inode);
866 	int err;
867 
868 	spin_lock(&nilfs->ns_inode_lock);
869 	if (ii->i_bh == NULL) {
870 		spin_unlock(&nilfs->ns_inode_lock);
871 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
872 						  inode->i_ino, pbh);
873 		if (unlikely(err))
874 			return err;
875 		spin_lock(&nilfs->ns_inode_lock);
876 		if (ii->i_bh == NULL)
877 			ii->i_bh = *pbh;
878 		else {
879 			brelse(*pbh);
880 			*pbh = ii->i_bh;
881 		}
882 	} else
883 		*pbh = ii->i_bh;
884 
885 	get_bh(*pbh);
886 	spin_unlock(&nilfs->ns_inode_lock);
887 	return 0;
888 }
889 
890 int nilfs_inode_dirty(struct inode *inode)
891 {
892 	struct nilfs_inode_info *ii = NILFS_I(inode);
893 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
894 	int ret = 0;
895 
896 	if (!list_empty(&ii->i_dirty)) {
897 		spin_lock(&nilfs->ns_inode_lock);
898 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
899 			test_bit(NILFS_I_BUSY, &ii->i_state);
900 		spin_unlock(&nilfs->ns_inode_lock);
901 	}
902 	return ret;
903 }
904 
905 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
906 {
907 	struct nilfs_inode_info *ii = NILFS_I(inode);
908 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
909 
910 	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
911 
912 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
913 		return 0;
914 
915 	spin_lock(&nilfs->ns_inode_lock);
916 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
917 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
918 		/* Because this routine may race with nilfs_dispose_list(),
919 		   we have to check NILFS_I_QUEUED here, too. */
920 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
921 			/* This will happen when somebody is freeing
922 			   this inode. */
923 			nilfs_warning(inode->i_sb, __func__,
924 				      "cannot get inode (ino=%lu)\n",
925 				      inode->i_ino);
926 			spin_unlock(&nilfs->ns_inode_lock);
927 			return -EINVAL; /* NILFS_I_DIRTY may remain for
928 					   freeing inode */
929 		}
930 		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
931 		set_bit(NILFS_I_QUEUED, &ii->i_state);
932 	}
933 	spin_unlock(&nilfs->ns_inode_lock);
934 	return 0;
935 }
936 
937 int nilfs_mark_inode_dirty(struct inode *inode)
938 {
939 	struct buffer_head *ibh;
940 	int err;
941 
942 	err = nilfs_load_inode_block(inode, &ibh);
943 	if (unlikely(err)) {
944 		nilfs_warning(inode->i_sb, __func__,
945 			      "failed to reget inode block.\n");
946 		return err;
947 	}
948 	nilfs_update_inode(inode, ibh);
949 	mark_buffer_dirty(ibh);
950 	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
951 	brelse(ibh);
952 	return 0;
953 }
954 
955 /**
956  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
957  * @inode: inode of the file to be registered.
958  *
959  * nilfs_dirty_inode() loads a inode block containing the specified
960  * @inode and copies data from a nilfs_inode to a corresponding inode
961  * entry in the inode block. This operation is excluded from the segment
962  * construction. This function can be called both as a single operation
963  * and as a part of indivisible file operations.
964  */
965 void nilfs_dirty_inode(struct inode *inode, int flags)
966 {
967 	struct nilfs_transaction_info ti;
968 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
969 
970 	if (is_bad_inode(inode)) {
971 		nilfs_warning(inode->i_sb, __func__,
972 			      "tried to mark bad_inode dirty. ignored.\n");
973 		dump_stack();
974 		return;
975 	}
976 	if (mdi) {
977 		nilfs_mdt_mark_dirty(inode);
978 		return;
979 	}
980 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
981 	nilfs_mark_inode_dirty(inode);
982 	nilfs_transaction_commit(inode->i_sb); /* never fails */
983 }
984 
985 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
986 		 __u64 start, __u64 len)
987 {
988 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
989 	__u64 logical = 0, phys = 0, size = 0;
990 	__u32 flags = 0;
991 	loff_t isize;
992 	sector_t blkoff, end_blkoff;
993 	sector_t delalloc_blkoff;
994 	unsigned long delalloc_blklen;
995 	unsigned int blkbits = inode->i_blkbits;
996 	int ret, n;
997 
998 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
999 	if (ret)
1000 		return ret;
1001 
1002 	mutex_lock(&inode->i_mutex);
1003 
1004 	isize = i_size_read(inode);
1005 
1006 	blkoff = start >> blkbits;
1007 	end_blkoff = (start + len - 1) >> blkbits;
1008 
1009 	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1010 							&delalloc_blkoff);
1011 
1012 	do {
1013 		__u64 blkphy;
1014 		unsigned int maxblocks;
1015 
1016 		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1017 			if (size) {
1018 				/* End of the current extent */
1019 				ret = fiemap_fill_next_extent(
1020 					fieinfo, logical, phys, size, flags);
1021 				if (ret)
1022 					break;
1023 			}
1024 			if (blkoff > end_blkoff)
1025 				break;
1026 
1027 			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1028 			logical = blkoff << blkbits;
1029 			phys = 0;
1030 			size = delalloc_blklen << blkbits;
1031 
1032 			blkoff = delalloc_blkoff + delalloc_blklen;
1033 			delalloc_blklen = nilfs_find_uncommitted_extent(
1034 				inode, blkoff, &delalloc_blkoff);
1035 			continue;
1036 		}
1037 
1038 		/*
1039 		 * Limit the number of blocks that we look up so as
1040 		 * not to get into the next delayed allocation extent.
1041 		 */
1042 		maxblocks = INT_MAX;
1043 		if (delalloc_blklen)
1044 			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1045 					  maxblocks);
1046 		blkphy = 0;
1047 
1048 		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1049 		n = nilfs_bmap_lookup_contig(
1050 			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1051 		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1052 
1053 		if (n < 0) {
1054 			int past_eof;
1055 
1056 			if (unlikely(n != -ENOENT))
1057 				break; /* error */
1058 
1059 			/* HOLE */
1060 			blkoff++;
1061 			past_eof = ((blkoff << blkbits) >= isize);
1062 
1063 			if (size) {
1064 				/* End of the current extent */
1065 
1066 				if (past_eof)
1067 					flags |= FIEMAP_EXTENT_LAST;
1068 
1069 				ret = fiemap_fill_next_extent(
1070 					fieinfo, logical, phys, size, flags);
1071 				if (ret)
1072 					break;
1073 				size = 0;
1074 			}
1075 			if (blkoff > end_blkoff || past_eof)
1076 				break;
1077 		} else {
1078 			if (size) {
1079 				if (phys && blkphy << blkbits == phys + size) {
1080 					/* The current extent goes on */
1081 					size += n << blkbits;
1082 				} else {
1083 					/* Terminate the current extent */
1084 					ret = fiemap_fill_next_extent(
1085 						fieinfo, logical, phys, size,
1086 						flags);
1087 					if (ret || blkoff > end_blkoff)
1088 						break;
1089 
1090 					/* Start another extent */
1091 					flags = FIEMAP_EXTENT_MERGED;
1092 					logical = blkoff << blkbits;
1093 					phys = blkphy << blkbits;
1094 					size = n << blkbits;
1095 				}
1096 			} else {
1097 				/* Start a new extent */
1098 				flags = FIEMAP_EXTENT_MERGED;
1099 				logical = blkoff << blkbits;
1100 				phys = blkphy << blkbits;
1101 				size = n << blkbits;
1102 			}
1103 			blkoff += n;
1104 		}
1105 		cond_resched();
1106 	} while (true);
1107 
1108 	/* If ret is 1 then we just hit the end of the extent array */
1109 	if (ret == 1)
1110 		ret = 0;
1111 
1112 	mutex_unlock(&inode->i_mutex);
1113 	return ret;
1114 }
1115