1 /* 2 * fs/f2fs/file.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/stat.h> 14 #include <linux/buffer_head.h> 15 #include <linux/writeback.h> 16 #include <linux/falloc.h> 17 #include <linux/types.h> 18 #include <linux/uaccess.h> 19 #include <linux/mount.h> 20 21 #include "f2fs.h" 22 #include "node.h" 23 #include "segment.h" 24 #include "xattr.h" 25 #include "acl.h" 26 27 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, 28 struct vm_fault *vmf) 29 { 30 struct page *page = vmf->page; 31 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 32 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 33 block_t old_blk_addr; 34 struct dnode_of_data dn; 35 int err; 36 37 f2fs_balance_fs(sbi); 38 39 sb_start_pagefault(inode->i_sb); 40 41 mutex_lock_op(sbi, DATA_NEW); 42 43 /* block allocation */ 44 set_new_dnode(&dn, inode, NULL, NULL, 0); 45 err = get_dnode_of_data(&dn, page->index, 0); 46 if (err) { 47 mutex_unlock_op(sbi, DATA_NEW); 48 goto out; 49 } 50 51 old_blk_addr = dn.data_blkaddr; 52 53 if (old_blk_addr == NULL_ADDR) { 54 err = reserve_new_block(&dn); 55 if (err) { 56 f2fs_put_dnode(&dn); 57 mutex_unlock_op(sbi, DATA_NEW); 58 goto out; 59 } 60 } 61 f2fs_put_dnode(&dn); 62 63 mutex_unlock_op(sbi, DATA_NEW); 64 65 lock_page(page); 66 if (page->mapping != inode->i_mapping || 67 page_offset(page) >= i_size_read(inode) || 68 !PageUptodate(page)) { 69 unlock_page(page); 70 err = -EFAULT; 71 goto out; 72 } 73 74 /* 75 * check to see if the page is mapped already (no holes) 76 */ 77 if (PageMappedToDisk(page)) 78 goto out; 79 80 /* fill the page */ 81 wait_on_page_writeback(page); 82 83 /* page is wholly or partially inside EOF */ 84 if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { 85 unsigned offset; 86 offset = i_size_read(inode) & ~PAGE_CACHE_MASK; 87 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 88 } 89 set_page_dirty(page); 90 SetPageUptodate(page); 91 92 file_update_time(vma->vm_file); 93 out: 94 sb_end_pagefault(inode->i_sb); 95 return block_page_mkwrite_return(err); 96 } 97 98 static const struct vm_operations_struct f2fs_file_vm_ops = { 99 .fault = filemap_fault, 100 .page_mkwrite = f2fs_vm_page_mkwrite, 101 .remap_pages = generic_file_remap_pages, 102 }; 103 104 static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode) 105 { 106 struct dentry *dentry; 107 nid_t pino; 108 109 inode = igrab(inode); 110 dentry = d_find_any_alias(inode); 111 if (!dentry) { 112 iput(inode); 113 return 0; 114 } 115 pino = dentry->d_parent->d_inode->i_ino; 116 dput(dentry); 117 iput(inode); 118 return !is_checkpointed_node(sbi, pino); 119 } 120 121 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 122 { 123 struct inode *inode = file->f_mapping->host; 124 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 125 unsigned long long cur_version; 126 int ret = 0; 127 bool need_cp = false; 128 struct writeback_control wbc = { 129 .sync_mode = WB_SYNC_ALL, 130 .nr_to_write = LONG_MAX, 131 .for_reclaim = 0, 132 }; 133 134 if (inode->i_sb->s_flags & MS_RDONLY) 135 return 0; 136 137 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 138 if (ret) 139 return ret; 140 141 /* guarantee free sections for fsync */ 142 f2fs_balance_fs(sbi); 143 144 mutex_lock(&inode->i_mutex); 145 146 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 147 goto out; 148 149 mutex_lock(&sbi->cp_mutex); 150 cur_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver); 151 mutex_unlock(&sbi->cp_mutex); 152 153 if (F2FS_I(inode)->data_version != cur_version && 154 !(inode->i_state & I_DIRTY)) 155 goto out; 156 F2FS_I(inode)->data_version--; 157 158 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) 159 need_cp = true; 160 if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP)) 161 need_cp = true; 162 if (!space_for_roll_forward(sbi)) 163 need_cp = true; 164 if (need_to_sync_dir(sbi, inode)) 165 need_cp = true; 166 167 if (need_cp) { 168 /* all the dirty node pages should be flushed for POR */ 169 ret = f2fs_sync_fs(inode->i_sb, 1); 170 clear_inode_flag(F2FS_I(inode), FI_NEED_CP); 171 } else { 172 /* if there is no written node page, write its inode page */ 173 while (!sync_node_pages(sbi, inode->i_ino, &wbc)) { 174 ret = f2fs_write_inode(inode, NULL); 175 if (ret) 176 goto out; 177 } 178 filemap_fdatawait_range(sbi->node_inode->i_mapping, 179 0, LONG_MAX); 180 } 181 out: 182 mutex_unlock(&inode->i_mutex); 183 return ret; 184 } 185 186 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 187 { 188 file_accessed(file); 189 vma->vm_ops = &f2fs_file_vm_ops; 190 return 0; 191 } 192 193 static int truncate_data_blocks_range(struct dnode_of_data *dn, int count) 194 { 195 int nr_free = 0, ofs = dn->ofs_in_node; 196 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); 197 struct f2fs_node *raw_node; 198 __le32 *addr; 199 200 raw_node = page_address(dn->node_page); 201 addr = blkaddr_in_node(raw_node) + ofs; 202 203 for ( ; count > 0; count--, addr++, dn->ofs_in_node++) { 204 block_t blkaddr = le32_to_cpu(*addr); 205 if (blkaddr == NULL_ADDR) 206 continue; 207 208 update_extent_cache(NULL_ADDR, dn); 209 invalidate_blocks(sbi, blkaddr); 210 dec_valid_block_count(sbi, dn->inode, 1); 211 nr_free++; 212 } 213 if (nr_free) { 214 set_page_dirty(dn->node_page); 215 sync_inode_page(dn); 216 } 217 dn->ofs_in_node = ofs; 218 return nr_free; 219 } 220 221 void truncate_data_blocks(struct dnode_of_data *dn) 222 { 223 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); 224 } 225 226 static void truncate_partial_data_page(struct inode *inode, u64 from) 227 { 228 unsigned offset = from & (PAGE_CACHE_SIZE - 1); 229 struct page *page; 230 231 if (!offset) 232 return; 233 234 page = find_data_page(inode, from >> PAGE_CACHE_SHIFT); 235 if (IS_ERR(page)) 236 return; 237 238 lock_page(page); 239 wait_on_page_writeback(page); 240 zero_user(page, offset, PAGE_CACHE_SIZE - offset); 241 set_page_dirty(page); 242 f2fs_put_page(page, 1); 243 } 244 245 static int truncate_blocks(struct inode *inode, u64 from) 246 { 247 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 248 unsigned int blocksize = inode->i_sb->s_blocksize; 249 struct dnode_of_data dn; 250 pgoff_t free_from; 251 int count = 0; 252 int err; 253 254 free_from = (pgoff_t) 255 ((from + blocksize - 1) >> (sbi->log_blocksize)); 256 257 mutex_lock_op(sbi, DATA_TRUNC); 258 259 set_new_dnode(&dn, inode, NULL, NULL, 0); 260 err = get_dnode_of_data(&dn, free_from, RDONLY_NODE); 261 if (err) { 262 if (err == -ENOENT) 263 goto free_next; 264 mutex_unlock_op(sbi, DATA_TRUNC); 265 return err; 266 } 267 268 if (IS_INODE(dn.node_page)) 269 count = ADDRS_PER_INODE; 270 else 271 count = ADDRS_PER_BLOCK; 272 273 count -= dn.ofs_in_node; 274 BUG_ON(count < 0); 275 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 276 truncate_data_blocks_range(&dn, count); 277 free_from += count; 278 } 279 280 f2fs_put_dnode(&dn); 281 free_next: 282 err = truncate_inode_blocks(inode, free_from); 283 mutex_unlock_op(sbi, DATA_TRUNC); 284 285 /* lastly zero out the first data page */ 286 truncate_partial_data_page(inode, from); 287 288 return err; 289 } 290 291 void f2fs_truncate(struct inode *inode) 292 { 293 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 294 S_ISLNK(inode->i_mode))) 295 return; 296 297 if (!truncate_blocks(inode, i_size_read(inode))) { 298 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 299 mark_inode_dirty(inode); 300 } 301 302 f2fs_balance_fs(F2FS_SB(inode->i_sb)); 303 } 304 305 static int f2fs_getattr(struct vfsmount *mnt, 306 struct dentry *dentry, struct kstat *stat) 307 { 308 struct inode *inode = dentry->d_inode; 309 generic_fillattr(inode, stat); 310 stat->blocks <<= 3; 311 return 0; 312 } 313 314 #ifdef CONFIG_F2FS_FS_POSIX_ACL 315 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 316 { 317 struct f2fs_inode_info *fi = F2FS_I(inode); 318 unsigned int ia_valid = attr->ia_valid; 319 320 if (ia_valid & ATTR_UID) 321 inode->i_uid = attr->ia_uid; 322 if (ia_valid & ATTR_GID) 323 inode->i_gid = attr->ia_gid; 324 if (ia_valid & ATTR_ATIME) 325 inode->i_atime = timespec_trunc(attr->ia_atime, 326 inode->i_sb->s_time_gran); 327 if (ia_valid & ATTR_MTIME) 328 inode->i_mtime = timespec_trunc(attr->ia_mtime, 329 inode->i_sb->s_time_gran); 330 if (ia_valid & ATTR_CTIME) 331 inode->i_ctime = timespec_trunc(attr->ia_ctime, 332 inode->i_sb->s_time_gran); 333 if (ia_valid & ATTR_MODE) { 334 umode_t mode = attr->ia_mode; 335 336 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 337 mode &= ~S_ISGID; 338 set_acl_inode(fi, mode); 339 } 340 } 341 #else 342 #define __setattr_copy setattr_copy 343 #endif 344 345 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 346 { 347 struct inode *inode = dentry->d_inode; 348 struct f2fs_inode_info *fi = F2FS_I(inode); 349 int err; 350 351 err = inode_change_ok(inode, attr); 352 if (err) 353 return err; 354 355 if ((attr->ia_valid & ATTR_SIZE) && 356 attr->ia_size != i_size_read(inode)) { 357 truncate_setsize(inode, attr->ia_size); 358 f2fs_truncate(inode); 359 } 360 361 __setattr_copy(inode, attr); 362 363 if (attr->ia_valid & ATTR_MODE) { 364 err = f2fs_acl_chmod(inode); 365 if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { 366 inode->i_mode = fi->i_acl_mode; 367 clear_inode_flag(fi, FI_ACL_MODE); 368 } 369 } 370 371 mark_inode_dirty(inode); 372 return err; 373 } 374 375 const struct inode_operations f2fs_file_inode_operations = { 376 .getattr = f2fs_getattr, 377 .setattr = f2fs_setattr, 378 .get_acl = f2fs_get_acl, 379 #ifdef CONFIG_F2FS_FS_XATTR 380 .setxattr = generic_setxattr, 381 .getxattr = generic_getxattr, 382 .listxattr = f2fs_listxattr, 383 .removexattr = generic_removexattr, 384 #endif 385 }; 386 387 static void fill_zero(struct inode *inode, pgoff_t index, 388 loff_t start, loff_t len) 389 { 390 struct page *page; 391 392 if (!len) 393 return; 394 395 page = get_new_data_page(inode, index, false); 396 397 if (!IS_ERR(page)) { 398 wait_on_page_writeback(page); 399 zero_user(page, start, len); 400 set_page_dirty(page); 401 f2fs_put_page(page, 1); 402 } 403 } 404 405 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 406 { 407 pgoff_t index; 408 int err; 409 410 for (index = pg_start; index < pg_end; index++) { 411 struct dnode_of_data dn; 412 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 413 414 f2fs_balance_fs(sbi); 415 416 mutex_lock_op(sbi, DATA_TRUNC); 417 set_new_dnode(&dn, inode, NULL, NULL, 0); 418 err = get_dnode_of_data(&dn, index, RDONLY_NODE); 419 if (err) { 420 mutex_unlock_op(sbi, DATA_TRUNC); 421 if (err == -ENOENT) 422 continue; 423 return err; 424 } 425 426 if (dn.data_blkaddr != NULL_ADDR) 427 truncate_data_blocks_range(&dn, 1); 428 f2fs_put_dnode(&dn); 429 mutex_unlock_op(sbi, DATA_TRUNC); 430 } 431 return 0; 432 } 433 434 static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode) 435 { 436 pgoff_t pg_start, pg_end; 437 loff_t off_start, off_end; 438 int ret = 0; 439 440 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 441 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 442 443 off_start = offset & (PAGE_CACHE_SIZE - 1); 444 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 445 446 if (pg_start == pg_end) { 447 fill_zero(inode, pg_start, off_start, 448 off_end - off_start); 449 } else { 450 if (off_start) 451 fill_zero(inode, pg_start++, off_start, 452 PAGE_CACHE_SIZE - off_start); 453 if (off_end) 454 fill_zero(inode, pg_end, 0, off_end); 455 456 if (pg_start < pg_end) { 457 struct address_space *mapping = inode->i_mapping; 458 loff_t blk_start, blk_end; 459 460 blk_start = pg_start << PAGE_CACHE_SHIFT; 461 blk_end = pg_end << PAGE_CACHE_SHIFT; 462 truncate_inode_pages_range(mapping, blk_start, 463 blk_end - 1); 464 ret = truncate_hole(inode, pg_start, pg_end); 465 } 466 } 467 468 if (!(mode & FALLOC_FL_KEEP_SIZE) && 469 i_size_read(inode) <= (offset + len)) { 470 i_size_write(inode, offset); 471 mark_inode_dirty(inode); 472 } 473 474 return ret; 475 } 476 477 static int expand_inode_data(struct inode *inode, loff_t offset, 478 loff_t len, int mode) 479 { 480 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 481 pgoff_t index, pg_start, pg_end; 482 loff_t new_size = i_size_read(inode); 483 loff_t off_start, off_end; 484 int ret = 0; 485 486 ret = inode_newsize_ok(inode, (len + offset)); 487 if (ret) 488 return ret; 489 490 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; 491 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; 492 493 off_start = offset & (PAGE_CACHE_SIZE - 1); 494 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 495 496 for (index = pg_start; index <= pg_end; index++) { 497 struct dnode_of_data dn; 498 499 mutex_lock_op(sbi, DATA_NEW); 500 501 set_new_dnode(&dn, inode, NULL, NULL, 0); 502 ret = get_dnode_of_data(&dn, index, 0); 503 if (ret) { 504 mutex_unlock_op(sbi, DATA_NEW); 505 break; 506 } 507 508 if (dn.data_blkaddr == NULL_ADDR) { 509 ret = reserve_new_block(&dn); 510 if (ret) { 511 f2fs_put_dnode(&dn); 512 mutex_unlock_op(sbi, DATA_NEW); 513 break; 514 } 515 } 516 f2fs_put_dnode(&dn); 517 518 mutex_unlock_op(sbi, DATA_NEW); 519 520 if (pg_start == pg_end) 521 new_size = offset + len; 522 else if (index == pg_start && off_start) 523 new_size = (index + 1) << PAGE_CACHE_SHIFT; 524 else if (index == pg_end) 525 new_size = (index << PAGE_CACHE_SHIFT) + off_end; 526 else 527 new_size += PAGE_CACHE_SIZE; 528 } 529 530 if (!(mode & FALLOC_FL_KEEP_SIZE) && 531 i_size_read(inode) < new_size) { 532 i_size_write(inode, new_size); 533 mark_inode_dirty(inode); 534 } 535 536 return ret; 537 } 538 539 static long f2fs_fallocate(struct file *file, int mode, 540 loff_t offset, loff_t len) 541 { 542 struct inode *inode = file->f_path.dentry->d_inode; 543 long ret; 544 545 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 546 return -EOPNOTSUPP; 547 548 if (mode & FALLOC_FL_PUNCH_HOLE) 549 ret = punch_hole(inode, offset, len, mode); 550 else 551 ret = expand_inode_data(inode, offset, len, mode); 552 553 if (!ret) { 554 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 555 mark_inode_dirty(inode); 556 } 557 return ret; 558 } 559 560 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL)) 561 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL) 562 563 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) 564 { 565 if (S_ISDIR(mode)) 566 return flags; 567 else if (S_ISREG(mode)) 568 return flags & F2FS_REG_FLMASK; 569 else 570 return flags & F2FS_OTHER_FLMASK; 571 } 572 573 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 574 { 575 struct inode *inode = filp->f_dentry->d_inode; 576 struct f2fs_inode_info *fi = F2FS_I(inode); 577 unsigned int flags; 578 int ret; 579 580 switch (cmd) { 581 case FS_IOC_GETFLAGS: 582 flags = fi->i_flags & FS_FL_USER_VISIBLE; 583 return put_user(flags, (int __user *) arg); 584 case FS_IOC_SETFLAGS: 585 { 586 unsigned int oldflags; 587 588 ret = mnt_want_write(filp->f_path.mnt); 589 if (ret) 590 return ret; 591 592 if (!inode_owner_or_capable(inode)) { 593 ret = -EACCES; 594 goto out; 595 } 596 597 if (get_user(flags, (int __user *) arg)) { 598 ret = -EFAULT; 599 goto out; 600 } 601 602 flags = f2fs_mask_flags(inode->i_mode, flags); 603 604 mutex_lock(&inode->i_mutex); 605 606 oldflags = fi->i_flags; 607 608 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { 609 if (!capable(CAP_LINUX_IMMUTABLE)) { 610 mutex_unlock(&inode->i_mutex); 611 ret = -EPERM; 612 goto out; 613 } 614 } 615 616 flags = flags & FS_FL_USER_MODIFIABLE; 617 flags |= oldflags & ~FS_FL_USER_MODIFIABLE; 618 fi->i_flags = flags; 619 mutex_unlock(&inode->i_mutex); 620 621 f2fs_set_inode_flags(inode); 622 inode->i_ctime = CURRENT_TIME; 623 mark_inode_dirty(inode); 624 out: 625 mnt_drop_write(filp->f_path.mnt); 626 return ret; 627 } 628 default: 629 return -ENOTTY; 630 } 631 } 632 633 const struct file_operations f2fs_file_operations = { 634 .llseek = generic_file_llseek, 635 .read = do_sync_read, 636 .write = do_sync_write, 637 .aio_read = generic_file_aio_read, 638 .aio_write = generic_file_aio_write, 639 .open = generic_file_open, 640 .mmap = f2fs_file_mmap, 641 .fsync = f2fs_sync_file, 642 .fallocate = f2fs_fallocate, 643 .unlocked_ioctl = f2fs_ioctl, 644 .splice_read = generic_file_splice_read, 645 .splice_write = generic_file_splice_write, 646 }; 647