xref: /linux/fs/f2fs/file.c (revision e3900e74f26fc924c8e9e2a922bd40369b0bb517)
1 /*
2  * fs/f2fs/file.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/falloc.h>
17 #include <linux/types.h>
18 #include <linux/uaccess.h>
19 #include <linux/mount.h>
20 
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 #include "xattr.h"
25 #include "acl.h"
26 
27 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
28 						struct vm_fault *vmf)
29 {
30 	struct page *page = vmf->page;
31 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
32 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
33 	block_t old_blk_addr;
34 	struct dnode_of_data dn;
35 	int err;
36 
37 	f2fs_balance_fs(sbi);
38 
39 	sb_start_pagefault(inode->i_sb);
40 
41 	mutex_lock_op(sbi, DATA_NEW);
42 
43 	/* block allocation */
44 	set_new_dnode(&dn, inode, NULL, NULL, 0);
45 	err = get_dnode_of_data(&dn, page->index, 0);
46 	if (err) {
47 		mutex_unlock_op(sbi, DATA_NEW);
48 		goto out;
49 	}
50 
51 	old_blk_addr = dn.data_blkaddr;
52 
53 	if (old_blk_addr == NULL_ADDR) {
54 		err = reserve_new_block(&dn);
55 		if (err) {
56 			f2fs_put_dnode(&dn);
57 			mutex_unlock_op(sbi, DATA_NEW);
58 			goto out;
59 		}
60 	}
61 	f2fs_put_dnode(&dn);
62 
63 	mutex_unlock_op(sbi, DATA_NEW);
64 
65 	lock_page(page);
66 	if (page->mapping != inode->i_mapping ||
67 			page_offset(page) >= i_size_read(inode) ||
68 			!PageUptodate(page)) {
69 		unlock_page(page);
70 		err = -EFAULT;
71 		goto out;
72 	}
73 
74 	/*
75 	 * check to see if the page is mapped already (no holes)
76 	 */
77 	if (PageMappedToDisk(page))
78 		goto out;
79 
80 	/* fill the page */
81 	wait_on_page_writeback(page);
82 
83 	/* page is wholly or partially inside EOF */
84 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
85 		unsigned offset;
86 		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
87 		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
88 	}
89 	set_page_dirty(page);
90 	SetPageUptodate(page);
91 
92 	file_update_time(vma->vm_file);
93 out:
94 	sb_end_pagefault(inode->i_sb);
95 	return block_page_mkwrite_return(err);
96 }
97 
98 static const struct vm_operations_struct f2fs_file_vm_ops = {
99 	.fault        = filemap_fault,
100 	.page_mkwrite = f2fs_vm_page_mkwrite,
101 };
102 
103 static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
104 {
105 	struct dentry *dentry;
106 	nid_t pino;
107 
108 	inode = igrab(inode);
109 	dentry = d_find_any_alias(inode);
110 	if (!dentry) {
111 		iput(inode);
112 		return 0;
113 	}
114 	pino = dentry->d_parent->d_inode->i_ino;
115 	dput(dentry);
116 	iput(inode);
117 	return !is_checkpointed_node(sbi, pino);
118 }
119 
120 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
121 {
122 	struct inode *inode = file->f_mapping->host;
123 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
124 	unsigned long long cur_version;
125 	int ret = 0;
126 	bool need_cp = false;
127 	struct writeback_control wbc = {
128 		.sync_mode = WB_SYNC_ALL,
129 		.nr_to_write = LONG_MAX,
130 		.for_reclaim = 0,
131 	};
132 
133 	if (inode->i_sb->s_flags & MS_RDONLY)
134 		return 0;
135 
136 	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
137 	if (ret)
138 		return ret;
139 
140 	mutex_lock(&inode->i_mutex);
141 
142 	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
143 		goto out;
144 
145 	mutex_lock(&sbi->cp_mutex);
146 	cur_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
147 	mutex_unlock(&sbi->cp_mutex);
148 
149 	if (F2FS_I(inode)->data_version != cur_version &&
150 					!(inode->i_state & I_DIRTY))
151 		goto out;
152 	F2FS_I(inode)->data_version--;
153 
154 	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
155 		need_cp = true;
156 	if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
157 		need_cp = true;
158 	if (!space_for_roll_forward(sbi))
159 		need_cp = true;
160 	if (need_to_sync_dir(sbi, inode))
161 		need_cp = true;
162 
163 	f2fs_write_inode(inode, NULL);
164 
165 	if (need_cp) {
166 		/* all the dirty node pages should be flushed for POR */
167 		ret = f2fs_sync_fs(inode->i_sb, 1);
168 		clear_inode_flag(F2FS_I(inode), FI_NEED_CP);
169 	} else {
170 		while (sync_node_pages(sbi, inode->i_ino, &wbc) == 0)
171 			f2fs_write_inode(inode, NULL);
172 		filemap_fdatawait_range(sbi->node_inode->i_mapping,
173 							0, LONG_MAX);
174 	}
175 out:
176 	mutex_unlock(&inode->i_mutex);
177 	return ret;
178 }
179 
180 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
181 {
182 	file_accessed(file);
183 	vma->vm_ops = &f2fs_file_vm_ops;
184 	return 0;
185 }
186 
187 static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
188 {
189 	int nr_free = 0, ofs = dn->ofs_in_node;
190 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
191 	struct f2fs_node *raw_node;
192 	__le32 *addr;
193 
194 	raw_node = page_address(dn->node_page);
195 	addr = blkaddr_in_node(raw_node) + ofs;
196 
197 	for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
198 		block_t blkaddr = le32_to_cpu(*addr);
199 		if (blkaddr == NULL_ADDR)
200 			continue;
201 
202 		update_extent_cache(NULL_ADDR, dn);
203 		invalidate_blocks(sbi, blkaddr);
204 		dec_valid_block_count(sbi, dn->inode, 1);
205 		nr_free++;
206 	}
207 	if (nr_free) {
208 		set_page_dirty(dn->node_page);
209 		sync_inode_page(dn);
210 	}
211 	dn->ofs_in_node = ofs;
212 	return nr_free;
213 }
214 
215 void truncate_data_blocks(struct dnode_of_data *dn)
216 {
217 	truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
218 }
219 
220 static void truncate_partial_data_page(struct inode *inode, u64 from)
221 {
222 	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
223 	struct page *page;
224 
225 	if (!offset)
226 		return;
227 
228 	page = find_data_page(inode, from >> PAGE_CACHE_SHIFT);
229 	if (IS_ERR(page))
230 		return;
231 
232 	lock_page(page);
233 	wait_on_page_writeback(page);
234 	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
235 	set_page_dirty(page);
236 	f2fs_put_page(page, 1);
237 }
238 
239 static int truncate_blocks(struct inode *inode, u64 from)
240 {
241 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
242 	unsigned int blocksize = inode->i_sb->s_blocksize;
243 	struct dnode_of_data dn;
244 	pgoff_t free_from;
245 	int count = 0;
246 	int err;
247 
248 	free_from = (pgoff_t)
249 			((from + blocksize - 1) >> (sbi->log_blocksize));
250 
251 	mutex_lock_op(sbi, DATA_TRUNC);
252 
253 	set_new_dnode(&dn, inode, NULL, NULL, 0);
254 	err = get_dnode_of_data(&dn, free_from, RDONLY_NODE);
255 	if (err) {
256 		if (err == -ENOENT)
257 			goto free_next;
258 		mutex_unlock_op(sbi, DATA_TRUNC);
259 		return err;
260 	}
261 
262 	if (IS_INODE(dn.node_page))
263 		count = ADDRS_PER_INODE;
264 	else
265 		count = ADDRS_PER_BLOCK;
266 
267 	count -= dn.ofs_in_node;
268 	BUG_ON(count < 0);
269 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
270 		truncate_data_blocks_range(&dn, count);
271 		free_from += count;
272 	}
273 
274 	f2fs_put_dnode(&dn);
275 free_next:
276 	err = truncate_inode_blocks(inode, free_from);
277 	mutex_unlock_op(sbi, DATA_TRUNC);
278 
279 	/* lastly zero out the first data page */
280 	truncate_partial_data_page(inode, from);
281 
282 	return err;
283 }
284 
285 void f2fs_truncate(struct inode *inode)
286 {
287 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
288 				S_ISLNK(inode->i_mode)))
289 		return;
290 
291 	if (!truncate_blocks(inode, i_size_read(inode))) {
292 		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
293 		mark_inode_dirty(inode);
294 	}
295 
296 	f2fs_balance_fs(F2FS_SB(inode->i_sb));
297 }
298 
299 static int f2fs_getattr(struct vfsmount *mnt,
300 			 struct dentry *dentry, struct kstat *stat)
301 {
302 	struct inode *inode = dentry->d_inode;
303 	generic_fillattr(inode, stat);
304 	stat->blocks <<= 3;
305 	return 0;
306 }
307 
308 #ifdef CONFIG_F2FS_FS_POSIX_ACL
309 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
310 {
311 	struct f2fs_inode_info *fi = F2FS_I(inode);
312 	unsigned int ia_valid = attr->ia_valid;
313 
314 	if (ia_valid & ATTR_UID)
315 		inode->i_uid = attr->ia_uid;
316 	if (ia_valid & ATTR_GID)
317 		inode->i_gid = attr->ia_gid;
318 	if (ia_valid & ATTR_ATIME)
319 		inode->i_atime = timespec_trunc(attr->ia_atime,
320 						inode->i_sb->s_time_gran);
321 	if (ia_valid & ATTR_MTIME)
322 		inode->i_mtime = timespec_trunc(attr->ia_mtime,
323 						inode->i_sb->s_time_gran);
324 	if (ia_valid & ATTR_CTIME)
325 		inode->i_ctime = timespec_trunc(attr->ia_ctime,
326 						inode->i_sb->s_time_gran);
327 	if (ia_valid & ATTR_MODE) {
328 		umode_t mode = attr->ia_mode;
329 
330 		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
331 			mode &= ~S_ISGID;
332 		set_acl_inode(fi, mode);
333 	}
334 }
335 #else
336 #define __setattr_copy setattr_copy
337 #endif
338 
339 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
340 {
341 	struct inode *inode = dentry->d_inode;
342 	struct f2fs_inode_info *fi = F2FS_I(inode);
343 	int err;
344 
345 	err = inode_change_ok(inode, attr);
346 	if (err)
347 		return err;
348 
349 	if ((attr->ia_valid & ATTR_SIZE) &&
350 			attr->ia_size != i_size_read(inode)) {
351 		truncate_setsize(inode, attr->ia_size);
352 		f2fs_truncate(inode);
353 	}
354 
355 	__setattr_copy(inode, attr);
356 
357 	if (attr->ia_valid & ATTR_MODE) {
358 		err = f2fs_acl_chmod(inode);
359 		if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
360 			inode->i_mode = fi->i_acl_mode;
361 			clear_inode_flag(fi, FI_ACL_MODE);
362 		}
363 	}
364 
365 	mark_inode_dirty(inode);
366 	return err;
367 }
368 
369 const struct inode_operations f2fs_file_inode_operations = {
370 	.getattr	= f2fs_getattr,
371 	.setattr	= f2fs_setattr,
372 	.get_acl	= f2fs_get_acl,
373 #ifdef CONFIG_F2FS_FS_XATTR
374 	.setxattr	= generic_setxattr,
375 	.getxattr	= generic_getxattr,
376 	.listxattr	= f2fs_listxattr,
377 	.removexattr	= generic_removexattr,
378 #endif
379 };
380 
381 static void fill_zero(struct inode *inode, pgoff_t index,
382 					loff_t start, loff_t len)
383 {
384 	struct page *page;
385 
386 	if (!len)
387 		return;
388 
389 	page = get_new_data_page(inode, index, false);
390 
391 	if (!IS_ERR(page)) {
392 		wait_on_page_writeback(page);
393 		zero_user(page, start, len);
394 		set_page_dirty(page);
395 		f2fs_put_page(page, 1);
396 	}
397 }
398 
399 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
400 {
401 	pgoff_t index;
402 	int err;
403 
404 	for (index = pg_start; index < pg_end; index++) {
405 		struct dnode_of_data dn;
406 		struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
407 
408 		mutex_lock_op(sbi, DATA_TRUNC);
409 		set_new_dnode(&dn, inode, NULL, NULL, 0);
410 		err = get_dnode_of_data(&dn, index, RDONLY_NODE);
411 		if (err) {
412 			mutex_unlock_op(sbi, DATA_TRUNC);
413 			if (err == -ENOENT)
414 				continue;
415 			return err;
416 		}
417 
418 		if (dn.data_blkaddr != NULL_ADDR)
419 			truncate_data_blocks_range(&dn, 1);
420 		f2fs_put_dnode(&dn);
421 		mutex_unlock_op(sbi, DATA_TRUNC);
422 	}
423 	return 0;
424 }
425 
426 static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
427 {
428 	pgoff_t pg_start, pg_end;
429 	loff_t off_start, off_end;
430 	int ret = 0;
431 
432 	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
433 	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
434 
435 	off_start = offset & (PAGE_CACHE_SIZE - 1);
436 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
437 
438 	if (pg_start == pg_end) {
439 		fill_zero(inode, pg_start, off_start,
440 						off_end - off_start);
441 	} else {
442 		if (off_start)
443 			fill_zero(inode, pg_start++, off_start,
444 					PAGE_CACHE_SIZE - off_start);
445 		if (off_end)
446 			fill_zero(inode, pg_end, 0, off_end);
447 
448 		if (pg_start < pg_end) {
449 			struct address_space *mapping = inode->i_mapping;
450 			loff_t blk_start, blk_end;
451 
452 			blk_start = pg_start << PAGE_CACHE_SHIFT;
453 			blk_end = pg_end << PAGE_CACHE_SHIFT;
454 			truncate_inode_pages_range(mapping, blk_start,
455 					blk_end - 1);
456 			ret = truncate_hole(inode, pg_start, pg_end);
457 		}
458 	}
459 
460 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
461 		i_size_read(inode) <= (offset + len)) {
462 		i_size_write(inode, offset);
463 		mark_inode_dirty(inode);
464 	}
465 
466 	return ret;
467 }
468 
469 static int expand_inode_data(struct inode *inode, loff_t offset,
470 					loff_t len, int mode)
471 {
472 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
473 	pgoff_t index, pg_start, pg_end;
474 	loff_t new_size = i_size_read(inode);
475 	loff_t off_start, off_end;
476 	int ret = 0;
477 
478 	ret = inode_newsize_ok(inode, (len + offset));
479 	if (ret)
480 		return ret;
481 
482 	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
483 	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
484 
485 	off_start = offset & (PAGE_CACHE_SIZE - 1);
486 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
487 
488 	for (index = pg_start; index <= pg_end; index++) {
489 		struct dnode_of_data dn;
490 
491 		mutex_lock_op(sbi, DATA_NEW);
492 
493 		set_new_dnode(&dn, inode, NULL, NULL, 0);
494 		ret = get_dnode_of_data(&dn, index, 0);
495 		if (ret) {
496 			mutex_unlock_op(sbi, DATA_NEW);
497 			break;
498 		}
499 
500 		if (dn.data_blkaddr == NULL_ADDR) {
501 			ret = reserve_new_block(&dn);
502 			if (ret) {
503 				f2fs_put_dnode(&dn);
504 				mutex_unlock_op(sbi, DATA_NEW);
505 				break;
506 			}
507 		}
508 		f2fs_put_dnode(&dn);
509 
510 		mutex_unlock_op(sbi, DATA_NEW);
511 
512 		if (pg_start == pg_end)
513 			new_size = offset + len;
514 		else if (index == pg_start && off_start)
515 			new_size = (index + 1) << PAGE_CACHE_SHIFT;
516 		else if (index == pg_end)
517 			new_size = (index << PAGE_CACHE_SHIFT) + off_end;
518 		else
519 			new_size += PAGE_CACHE_SIZE;
520 	}
521 
522 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
523 		i_size_read(inode) < new_size) {
524 		i_size_write(inode, new_size);
525 		mark_inode_dirty(inode);
526 	}
527 
528 	return ret;
529 }
530 
531 static long f2fs_fallocate(struct file *file, int mode,
532 				loff_t offset, loff_t len)
533 {
534 	struct inode *inode = file->f_path.dentry->d_inode;
535 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
536 	long ret;
537 
538 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
539 		return -EOPNOTSUPP;
540 
541 	if (mode & FALLOC_FL_PUNCH_HOLE)
542 		ret = punch_hole(inode, offset, len, mode);
543 	else
544 		ret = expand_inode_data(inode, offset, len, mode);
545 
546 	f2fs_balance_fs(sbi);
547 	return ret;
548 }
549 
550 #define F2FS_REG_FLMASK		(~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
551 #define F2FS_OTHER_FLMASK	(FS_NODUMP_FL | FS_NOATIME_FL)
552 
553 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
554 {
555 	if (S_ISDIR(mode))
556 		return flags;
557 	else if (S_ISREG(mode))
558 		return flags & F2FS_REG_FLMASK;
559 	else
560 		return flags & F2FS_OTHER_FLMASK;
561 }
562 
563 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
564 {
565 	struct inode *inode = filp->f_dentry->d_inode;
566 	struct f2fs_inode_info *fi = F2FS_I(inode);
567 	unsigned int flags;
568 	int ret;
569 
570 	switch (cmd) {
571 	case FS_IOC_GETFLAGS:
572 		flags = fi->i_flags & FS_FL_USER_VISIBLE;
573 		return put_user(flags, (int __user *) arg);
574 	case FS_IOC_SETFLAGS:
575 	{
576 		unsigned int oldflags;
577 
578 		ret = mnt_want_write(filp->f_path.mnt);
579 		if (ret)
580 			return ret;
581 
582 		if (!inode_owner_or_capable(inode)) {
583 			ret = -EACCES;
584 			goto out;
585 		}
586 
587 		if (get_user(flags, (int __user *) arg)) {
588 			ret = -EFAULT;
589 			goto out;
590 		}
591 
592 		flags = f2fs_mask_flags(inode->i_mode, flags);
593 
594 		mutex_lock(&inode->i_mutex);
595 
596 		oldflags = fi->i_flags;
597 
598 		if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
599 			if (!capable(CAP_LINUX_IMMUTABLE)) {
600 				mutex_unlock(&inode->i_mutex);
601 				ret = -EPERM;
602 				goto out;
603 			}
604 		}
605 
606 		flags = flags & FS_FL_USER_MODIFIABLE;
607 		flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
608 		fi->i_flags = flags;
609 		mutex_unlock(&inode->i_mutex);
610 
611 		f2fs_set_inode_flags(inode);
612 		inode->i_ctime = CURRENT_TIME;
613 		mark_inode_dirty(inode);
614 out:
615 		mnt_drop_write(filp->f_path.mnt);
616 		return ret;
617 	}
618 	default:
619 		return -ENOTTY;
620 	}
621 }
622 
623 const struct file_operations f2fs_file_operations = {
624 	.llseek		= generic_file_llseek,
625 	.read		= do_sync_read,
626 	.write		= do_sync_write,
627 	.aio_read	= generic_file_aio_read,
628 	.aio_write	= generic_file_aio_write,
629 	.open		= generic_file_open,
630 	.mmap		= f2fs_file_mmap,
631 	.fsync		= f2fs_sync_file,
632 	.fallocate	= f2fs_fallocate,
633 	.unlocked_ioctl	= f2fs_ioctl,
634 	.splice_read	= generic_file_splice_read,
635 	.splice_write	= generic_file_splice_write,
636 };
637