xref: /linux/fs/nilfs2/inode.c (revision cc4589ebfae6f8dbb5cf880a0a67eedab3416492)
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36 
37 
38 /**
39  * nilfs_get_block() - get a file block on the filesystem (callback function)
40  * @inode - inode struct of the target file
41  * @blkoff - file block number
42  * @bh_result - buffer head to be mapped on
43  * @create - indicate whether allocating the block or not when it has not
44  *      been allocated yet.
45  *
46  * This function does not issue actual read request of the specified data
47  * block. It is done by VFS.
48  */
49 int nilfs_get_block(struct inode *inode, sector_t blkoff,
50 		    struct buffer_head *bh_result, int create)
51 {
52 	struct nilfs_inode_info *ii = NILFS_I(inode);
53 	__u64 blknum = 0;
54 	int err = 0, ret;
55 	struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode));
56 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
57 
58 	down_read(&NILFS_MDT(dat)->mi_sem);
59 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
60 	up_read(&NILFS_MDT(dat)->mi_sem);
61 	if (ret >= 0) {	/* found */
62 		map_bh(bh_result, inode->i_sb, blknum);
63 		if (ret > 0)
64 			bh_result->b_size = (ret << inode->i_blkbits);
65 		goto out;
66 	}
67 	/* data block was not found */
68 	if (ret == -ENOENT && create) {
69 		struct nilfs_transaction_info ti;
70 
71 		bh_result->b_blocknr = 0;
72 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
73 		if (unlikely(err))
74 			goto out;
75 		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
76 					(unsigned long)bh_result);
77 		if (unlikely(err != 0)) {
78 			if (err == -EEXIST) {
79 				/*
80 				 * The get_block() function could be called
81 				 * from multiple callers for an inode.
82 				 * However, the page having this block must
83 				 * be locked in this case.
84 				 */
85 				printk(KERN_WARNING
86 				       "nilfs_get_block: a race condition "
87 				       "while inserting a data block. "
88 				       "(inode number=%lu, file block "
89 				       "offset=%llu)\n",
90 				       inode->i_ino,
91 				       (unsigned long long)blkoff);
92 				err = 0;
93 			} else if (err == -EINVAL) {
94 				nilfs_error(inode->i_sb, __func__,
95 					    "broken bmap (inode=%lu)\n",
96 					    inode->i_ino);
97 				err = -EIO;
98 			}
99 			nilfs_transaction_abort(inode->i_sb);
100 			goto out;
101 		}
102 		nilfs_mark_inode_dirty(inode);
103 		nilfs_transaction_commit(inode->i_sb); /* never fails */
104 		/* Error handling should be detailed */
105 		set_buffer_new(bh_result);
106 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
107 						      to proper value */
108 	} else if (ret == -ENOENT) {
109 		/* not found is not error (e.g. hole); must return without
110 		   the mapped state flag. */
111 		;
112 	} else {
113 		err = ret;
114 	}
115 
116  out:
117 	return err;
118 }
119 
120 /**
121  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
122  * address_space_operations.
123  * @file - file struct of the file to be read
124  * @page - the page to be read
125  */
126 static int nilfs_readpage(struct file *file, struct page *page)
127 {
128 	return mpage_readpage(page, nilfs_get_block);
129 }
130 
131 /**
132  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
133  * address_space_operations.
134  * @file - file struct of the file to be read
135  * @mapping - address_space struct used for reading multiple pages
136  * @pages - the pages to be read
137  * @nr_pages - number of pages to be read
138  */
139 static int nilfs_readpages(struct file *file, struct address_space *mapping,
140 			   struct list_head *pages, unsigned nr_pages)
141 {
142 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
143 }
144 
145 static int nilfs_writepages(struct address_space *mapping,
146 			    struct writeback_control *wbc)
147 {
148 	struct inode *inode = mapping->host;
149 	int err = 0;
150 
151 	if (wbc->sync_mode == WB_SYNC_ALL)
152 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
153 						    wbc->range_start,
154 						    wbc->range_end);
155 	return err;
156 }
157 
158 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
159 {
160 	struct inode *inode = page->mapping->host;
161 	int err;
162 
163 	redirty_page_for_writepage(wbc, page);
164 	unlock_page(page);
165 
166 	if (wbc->sync_mode == WB_SYNC_ALL) {
167 		err = nilfs_construct_segment(inode->i_sb);
168 		if (unlikely(err))
169 			return err;
170 	} else if (wbc->for_reclaim)
171 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
172 
173 	return 0;
174 }
175 
176 static int nilfs_set_page_dirty(struct page *page)
177 {
178 	int ret = __set_page_dirty_buffers(page);
179 
180 	if (ret) {
181 		struct inode *inode = page->mapping->host;
182 		struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
183 		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
184 
185 		nilfs_set_file_dirty(sbi, inode, nr_dirty);
186 	}
187 	return ret;
188 }
189 
190 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
191 			     loff_t pos, unsigned len, unsigned flags,
192 			     struct page **pagep, void **fsdata)
193 
194 {
195 	struct inode *inode = mapping->host;
196 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
197 
198 	if (unlikely(err))
199 		return err;
200 
201 	err = block_write_begin(mapping, pos, len, flags, pagep,
202 				nilfs_get_block);
203 	if (unlikely(err)) {
204 		loff_t isize = mapping->host->i_size;
205 		if (pos + len > isize)
206 			vmtruncate(mapping->host, isize);
207 
208 		nilfs_transaction_abort(inode->i_sb);
209 	}
210 	return err;
211 }
212 
213 static int nilfs_write_end(struct file *file, struct address_space *mapping,
214 			   loff_t pos, unsigned len, unsigned copied,
215 			   struct page *page, void *fsdata)
216 {
217 	struct inode *inode = mapping->host;
218 	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
219 	unsigned nr_dirty;
220 	int err;
221 
222 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
223 						  start + copied);
224 	copied = generic_write_end(file, mapping, pos, len, copied, page,
225 				   fsdata);
226 	nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty);
227 	err = nilfs_transaction_commit(inode->i_sb);
228 	return err ? : copied;
229 }
230 
231 static ssize_t
232 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
233 		loff_t offset, unsigned long nr_segs)
234 {
235 	struct file *file = iocb->ki_filp;
236 	struct inode *inode = file->f_mapping->host;
237 	ssize_t size;
238 
239 	if (rw == WRITE)
240 		return 0;
241 
242 	/* Needs synchronization with the cleaner */
243 	size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
244 				  offset, nr_segs, nilfs_get_block, NULL);
245 
246 	/*
247 	 * In case of error extending write may have instantiated a few
248 	 * blocks outside i_size. Trim these off again.
249 	 */
250 	if (unlikely((rw & WRITE) && size < 0)) {
251 		loff_t isize = i_size_read(inode);
252 		loff_t end = offset + iov_length(iov, nr_segs);
253 
254 		if (end > isize)
255 			vmtruncate(inode, isize);
256 	}
257 
258 	return size;
259 }
260 
261 const struct address_space_operations nilfs_aops = {
262 	.writepage		= nilfs_writepage,
263 	.readpage		= nilfs_readpage,
264 	.sync_page		= block_sync_page,
265 	.writepages		= nilfs_writepages,
266 	.set_page_dirty		= nilfs_set_page_dirty,
267 	.readpages		= nilfs_readpages,
268 	.write_begin		= nilfs_write_begin,
269 	.write_end		= nilfs_write_end,
270 	/* .releasepage		= nilfs_releasepage, */
271 	.invalidatepage		= block_invalidatepage,
272 	.direct_IO		= nilfs_direct_IO,
273 	.is_partially_uptodate  = block_is_partially_uptodate,
274 };
275 
276 struct inode *nilfs_new_inode(struct inode *dir, int mode)
277 {
278 	struct super_block *sb = dir->i_sb;
279 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
280 	struct inode *inode;
281 	struct nilfs_inode_info *ii;
282 	int err = -ENOMEM;
283 	ino_t ino;
284 
285 	inode = new_inode(sb);
286 	if (unlikely(!inode))
287 		goto failed;
288 
289 	mapping_set_gfp_mask(inode->i_mapping,
290 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
291 
292 	ii = NILFS_I(inode);
293 	ii->i_state = 1 << NILFS_I_NEW;
294 
295 	err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh);
296 	if (unlikely(err))
297 		goto failed_ifile_create_inode;
298 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
299 
300 	atomic_inc(&sbi->s_inodes_count);
301 	inode_init_owner(inode, dir, mode);
302 	inode->i_ino = ino;
303 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
304 
305 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
306 		err = nilfs_bmap_read(ii->i_bmap, NULL);
307 		if (err < 0)
308 			goto failed_bmap;
309 
310 		set_bit(NILFS_I_BMAP, &ii->i_state);
311 		/* No lock is needed; iget() ensures it. */
312 	}
313 
314 	ii->i_flags = NILFS_I(dir)->i_flags;
315 	if (S_ISLNK(mode))
316 		ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL);
317 	if (!S_ISDIR(mode))
318 		ii->i_flags &= ~NILFS_DIRSYNC_FL;
319 
320 	/* ii->i_file_acl = 0; */
321 	/* ii->i_dir_acl = 0; */
322 	ii->i_dir_start_lookup = 0;
323 	ii->i_cno = 0;
324 	nilfs_set_inode_flags(inode);
325 	spin_lock(&sbi->s_next_gen_lock);
326 	inode->i_generation = sbi->s_next_generation++;
327 	spin_unlock(&sbi->s_next_gen_lock);
328 	insert_inode_hash(inode);
329 
330 	err = nilfs_init_acl(inode, dir);
331 	if (unlikely(err))
332 		goto failed_acl; /* never occur. When supporting
333 				    nilfs_init_acl(), proper cancellation of
334 				    above jobs should be considered */
335 
336 	return inode;
337 
338  failed_acl:
339  failed_bmap:
340 	inode->i_nlink = 0;
341 	iput(inode);  /* raw_inode will be deleted through
342 			 generic_delete_inode() */
343 	goto failed;
344 
345  failed_ifile_create_inode:
346 	make_bad_inode(inode);
347 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
348 			 called */
349  failed:
350 	return ERR_PTR(err);
351 }
352 
353 void nilfs_free_inode(struct inode *inode)
354 {
355 	struct super_block *sb = inode->i_sb;
356 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
357 
358 	/* XXX: check error code? Is there any thing I can do? */
359 	(void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino);
360 	atomic_dec(&sbi->s_inodes_count);
361 }
362 
363 void nilfs_set_inode_flags(struct inode *inode)
364 {
365 	unsigned int flags = NILFS_I(inode)->i_flags;
366 
367 	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
368 			    S_DIRSYNC);
369 	if (flags & NILFS_SYNC_FL)
370 		inode->i_flags |= S_SYNC;
371 	if (flags & NILFS_APPEND_FL)
372 		inode->i_flags |= S_APPEND;
373 	if (flags & NILFS_IMMUTABLE_FL)
374 		inode->i_flags |= S_IMMUTABLE;
375 #ifndef NILFS_ATIME_DISABLE
376 	if (flags & NILFS_NOATIME_FL)
377 #endif
378 		inode->i_flags |= S_NOATIME;
379 	if (flags & NILFS_DIRSYNC_FL)
380 		inode->i_flags |= S_DIRSYNC;
381 	mapping_set_gfp_mask(inode->i_mapping,
382 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
383 }
384 
385 int nilfs_read_inode_common(struct inode *inode,
386 			    struct nilfs_inode *raw_inode)
387 {
388 	struct nilfs_inode_info *ii = NILFS_I(inode);
389 	int err;
390 
391 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
392 	inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
393 	inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
394 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
395 	inode->i_size = le64_to_cpu(raw_inode->i_size);
396 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
397 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
398 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
399 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
400 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
401 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
402 	if (inode->i_nlink == 0 && inode->i_mode == 0)
403 		return -EINVAL; /* this inode is deleted */
404 
405 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
406 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
407 #if 0
408 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
409 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
410 		0 : le32_to_cpu(raw_inode->i_dir_acl);
411 #endif
412 	ii->i_dir_start_lookup = 0;
413 	ii->i_cno = 0;
414 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
415 
416 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
417 	    S_ISLNK(inode->i_mode)) {
418 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
419 		if (err < 0)
420 			return err;
421 		set_bit(NILFS_I_BMAP, &ii->i_state);
422 		/* No lock is needed; iget() ensures it. */
423 	}
424 	return 0;
425 }
426 
427 static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
428 			      struct inode *inode)
429 {
430 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
431 	struct inode *dat = nilfs_dat_inode(sbi->s_nilfs);
432 	struct buffer_head *bh;
433 	struct nilfs_inode *raw_inode;
434 	int err;
435 
436 	down_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
437 	err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh);
438 	if (unlikely(err))
439 		goto bad_inode;
440 
441 	raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh);
442 
443 	err = nilfs_read_inode_common(inode, raw_inode);
444 	if (err)
445 		goto failed_unmap;
446 
447 	if (S_ISREG(inode->i_mode)) {
448 		inode->i_op = &nilfs_file_inode_operations;
449 		inode->i_fop = &nilfs_file_operations;
450 		inode->i_mapping->a_ops = &nilfs_aops;
451 	} else if (S_ISDIR(inode->i_mode)) {
452 		inode->i_op = &nilfs_dir_inode_operations;
453 		inode->i_fop = &nilfs_dir_operations;
454 		inode->i_mapping->a_ops = &nilfs_aops;
455 	} else if (S_ISLNK(inode->i_mode)) {
456 		inode->i_op = &nilfs_symlink_inode_operations;
457 		inode->i_mapping->a_ops = &nilfs_aops;
458 	} else {
459 		inode->i_op = &nilfs_special_inode_operations;
460 		init_special_inode(
461 			inode, inode->i_mode,
462 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
463 	}
464 	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
465 	brelse(bh);
466 	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
467 	nilfs_set_inode_flags(inode);
468 	return 0;
469 
470  failed_unmap:
471 	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
472 	brelse(bh);
473 
474  bad_inode:
475 	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
476 	return err;
477 }
478 
479 struct inode *nilfs_iget(struct super_block *sb, unsigned long ino)
480 {
481 	struct inode *inode;
482 	int err;
483 
484 	inode = iget_locked(sb, ino);
485 	if (unlikely(!inode))
486 		return ERR_PTR(-ENOMEM);
487 	if (!(inode->i_state & I_NEW))
488 		return inode;
489 
490 	err = __nilfs_read_inode(sb, ino, inode);
491 	if (unlikely(err)) {
492 		iget_failed(inode);
493 		return ERR_PTR(err);
494 	}
495 	unlock_new_inode(inode);
496 	return inode;
497 }
498 
499 void nilfs_write_inode_common(struct inode *inode,
500 			      struct nilfs_inode *raw_inode, int has_bmap)
501 {
502 	struct nilfs_inode_info *ii = NILFS_I(inode);
503 
504 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
505 	raw_inode->i_uid = cpu_to_le32(inode->i_uid);
506 	raw_inode->i_gid = cpu_to_le32(inode->i_gid);
507 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
508 	raw_inode->i_size = cpu_to_le64(inode->i_size);
509 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
510 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
511 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
512 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
513 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
514 
515 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
516 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
517 
518 	if (has_bmap)
519 		nilfs_bmap_write(ii->i_bmap, raw_inode);
520 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
521 		raw_inode->i_device_code =
522 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
523 	/* When extending inode, nilfs->ns_inode_size should be checked
524 	   for substitutions of appended fields */
525 }
526 
527 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
528 {
529 	ino_t ino = inode->i_ino;
530 	struct nilfs_inode_info *ii = NILFS_I(inode);
531 	struct super_block *sb = inode->i_sb;
532 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
533 	struct nilfs_inode *raw_inode;
534 
535 	raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);
536 
537 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
538 		memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
539 	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
540 
541 	nilfs_write_inode_common(inode, raw_inode, 0);
542 		/* XXX: call with has_bmap = 0 is a workaround to avoid
543 		   deadlock of bmap. This delays update of i_bmap to just
544 		   before writing */
545 	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh);
546 }
547 
548 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
549 
550 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
551 				unsigned long from)
552 {
553 	unsigned long b;
554 	int ret;
555 
556 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
557 		return;
558  repeat:
559 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
560 	if (ret == -ENOENT)
561 		return;
562 	else if (ret < 0)
563 		goto failed;
564 
565 	if (b < from)
566 		return;
567 
568 	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
569 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
570 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
571 	if (!ret || (ret == -ENOMEM &&
572 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
573 		goto repeat;
574 
575  failed:
576 	if (ret == -EINVAL)
577 		nilfs_error(ii->vfs_inode.i_sb, __func__,
578 			    "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino);
579 	else
580 		nilfs_warning(ii->vfs_inode.i_sb, __func__,
581 			      "failed to truncate bmap (ino=%lu, err=%d)",
582 			      ii->vfs_inode.i_ino, ret);
583 }
584 
585 void nilfs_truncate(struct inode *inode)
586 {
587 	unsigned long blkoff;
588 	unsigned int blocksize;
589 	struct nilfs_transaction_info ti;
590 	struct super_block *sb = inode->i_sb;
591 	struct nilfs_inode_info *ii = NILFS_I(inode);
592 
593 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
594 		return;
595 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
596 		return;
597 
598 	blocksize = sb->s_blocksize;
599 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
600 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
601 
602 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
603 
604 	nilfs_truncate_bmap(ii, blkoff);
605 
606 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
607 	if (IS_SYNC(inode))
608 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
609 
610 	nilfs_mark_inode_dirty(inode);
611 	nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
612 	nilfs_transaction_commit(sb);
613 	/* May construct a logical segment and may fail in sync mode.
614 	   But truncate has no return value. */
615 }
616 
617 static void nilfs_clear_inode(struct inode *inode)
618 {
619 	struct nilfs_inode_info *ii = NILFS_I(inode);
620 
621 	/*
622 	 * Free resources allocated in nilfs_read_inode(), here.
623 	 */
624 	BUG_ON(!list_empty(&ii->i_dirty));
625 	brelse(ii->i_bh);
626 	ii->i_bh = NULL;
627 
628 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
629 		nilfs_bmap_clear(ii->i_bmap);
630 
631 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
632 }
633 
634 void nilfs_evict_inode(struct inode *inode)
635 {
636 	struct nilfs_transaction_info ti;
637 	struct super_block *sb = inode->i_sb;
638 	struct nilfs_inode_info *ii = NILFS_I(inode);
639 
640 	if (inode->i_nlink || unlikely(is_bad_inode(inode))) {
641 		if (inode->i_data.nrpages)
642 			truncate_inode_pages(&inode->i_data, 0);
643 		end_writeback(inode);
644 		nilfs_clear_inode(inode);
645 		return;
646 	}
647 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
648 
649 	if (inode->i_data.nrpages)
650 		truncate_inode_pages(&inode->i_data, 0);
651 
652 	nilfs_truncate_bmap(ii, 0);
653 	nilfs_mark_inode_dirty(inode);
654 	end_writeback(inode);
655 	nilfs_clear_inode(inode);
656 	nilfs_free_inode(inode);
657 	/* nilfs_free_inode() marks inode buffer dirty */
658 	if (IS_SYNC(inode))
659 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
660 	nilfs_transaction_commit(sb);
661 	/* May construct a logical segment and may fail in sync mode.
662 	   But delete_inode has no return value. */
663 }
664 
665 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
666 {
667 	struct nilfs_transaction_info ti;
668 	struct inode *inode = dentry->d_inode;
669 	struct super_block *sb = inode->i_sb;
670 	int err;
671 
672 	err = inode_change_ok(inode, iattr);
673 	if (err)
674 		return err;
675 
676 	err = nilfs_transaction_begin(sb, &ti, 0);
677 	if (unlikely(err))
678 		return err;
679 
680 	if ((iattr->ia_valid & ATTR_SIZE) &&
681 	    iattr->ia_size != i_size_read(inode)) {
682 		err = vmtruncate(inode, iattr->ia_size);
683 		if (unlikely(err))
684 			goto out_err;
685 	}
686 
687 	setattr_copy(inode, iattr);
688 	mark_inode_dirty(inode);
689 
690 	if (iattr->ia_valid & ATTR_MODE) {
691 		err = nilfs_acl_chmod(inode);
692 		if (unlikely(err))
693 			goto out_err;
694 	}
695 
696 	return nilfs_transaction_commit(sb);
697 
698 out_err:
699 	nilfs_transaction_abort(sb);
700 	return err;
701 }
702 
703 int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
704 			   struct buffer_head **pbh)
705 {
706 	struct nilfs_inode_info *ii = NILFS_I(inode);
707 	int err;
708 
709 	spin_lock(&sbi->s_inode_lock);
710 	if (ii->i_bh == NULL) {
711 		spin_unlock(&sbi->s_inode_lock);
712 		err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino,
713 						  pbh);
714 		if (unlikely(err))
715 			return err;
716 		spin_lock(&sbi->s_inode_lock);
717 		if (ii->i_bh == NULL)
718 			ii->i_bh = *pbh;
719 		else {
720 			brelse(*pbh);
721 			*pbh = ii->i_bh;
722 		}
723 	} else
724 		*pbh = ii->i_bh;
725 
726 	get_bh(*pbh);
727 	spin_unlock(&sbi->s_inode_lock);
728 	return 0;
729 }
730 
731 int nilfs_inode_dirty(struct inode *inode)
732 {
733 	struct nilfs_inode_info *ii = NILFS_I(inode);
734 	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
735 	int ret = 0;
736 
737 	if (!list_empty(&ii->i_dirty)) {
738 		spin_lock(&sbi->s_inode_lock);
739 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
740 			test_bit(NILFS_I_BUSY, &ii->i_state);
741 		spin_unlock(&sbi->s_inode_lock);
742 	}
743 	return ret;
744 }
745 
746 int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode,
747 			 unsigned nr_dirty)
748 {
749 	struct nilfs_inode_info *ii = NILFS_I(inode);
750 
751 	atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
752 
753 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
754 		return 0;
755 
756 	spin_lock(&sbi->s_inode_lock);
757 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
758 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
759 		/* Because this routine may race with nilfs_dispose_list(),
760 		   we have to check NILFS_I_QUEUED here, too. */
761 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
762 			/* This will happen when somebody is freeing
763 			   this inode. */
764 			nilfs_warning(sbi->s_super, __func__,
765 				      "cannot get inode (ino=%lu)\n",
766 				      inode->i_ino);
767 			spin_unlock(&sbi->s_inode_lock);
768 			return -EINVAL; /* NILFS_I_DIRTY may remain for
769 					   freeing inode */
770 		}
771 		list_del(&ii->i_dirty);
772 		list_add_tail(&ii->i_dirty, &sbi->s_dirty_files);
773 		set_bit(NILFS_I_QUEUED, &ii->i_state);
774 	}
775 	spin_unlock(&sbi->s_inode_lock);
776 	return 0;
777 }
778 
779 int nilfs_mark_inode_dirty(struct inode *inode)
780 {
781 	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
782 	struct buffer_head *ibh;
783 	int err;
784 
785 	err = nilfs_load_inode_block(sbi, inode, &ibh);
786 	if (unlikely(err)) {
787 		nilfs_warning(inode->i_sb, __func__,
788 			      "failed to reget inode block.\n");
789 		return err;
790 	}
791 	nilfs_update_inode(inode, ibh);
792 	nilfs_mdt_mark_buffer_dirty(ibh);
793 	nilfs_mdt_mark_dirty(sbi->s_ifile);
794 	brelse(ibh);
795 	return 0;
796 }
797 
798 /**
799  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
800  * @inode: inode of the file to be registered.
801  *
802  * nilfs_dirty_inode() loads a inode block containing the specified
803  * @inode and copies data from a nilfs_inode to a corresponding inode
804  * entry in the inode block. This operation is excluded from the segment
805  * construction. This function can be called both as a single operation
806  * and as a part of indivisible file operations.
807  */
808 void nilfs_dirty_inode(struct inode *inode)
809 {
810 	struct nilfs_transaction_info ti;
811 
812 	if (is_bad_inode(inode)) {
813 		nilfs_warning(inode->i_sb, __func__,
814 			      "tried to mark bad_inode dirty. ignored.\n");
815 		dump_stack();
816 		return;
817 	}
818 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
819 	nilfs_mark_inode_dirty(inode);
820 	nilfs_transaction_commit(inode->i_sb); /* never fails */
821 }
822