1 /* 2 * fs/f2fs/file.c 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 #include <linux/stat.h> 14 #include <linux/buffer_head.h> 15 #include <linux/writeback.h> 16 #include <linux/blkdev.h> 17 #include <linux/falloc.h> 18 #include <linux/types.h> 19 #include <linux/compat.h> 20 #include <linux/uaccess.h> 21 #include <linux/mount.h> 22 #include <linux/pagevec.h> 23 #include <linux/uio.h> 24 #include <linux/uuid.h> 25 #include <linux/file.h> 26 27 #include "f2fs.h" 28 #include "node.h" 29 #include "segment.h" 30 #include "xattr.h" 31 #include "acl.h" 32 #include "gc.h" 33 #include "trace.h" 34 #include <trace/events/f2fs.h> 35 36 static int f2fs_filemap_fault(struct vm_fault *vmf) 37 { 38 struct inode *inode = file_inode(vmf->vma->vm_file); 39 int err; 40 41 down_read(&F2FS_I(inode)->i_mmap_sem); 42 err = filemap_fault(vmf); 43 up_read(&F2FS_I(inode)->i_mmap_sem); 44 45 return err; 46 } 47 48 static int f2fs_vm_page_mkwrite(struct vm_fault *vmf) 49 { 50 struct page *page = vmf->page; 51 struct inode *inode = file_inode(vmf->vma->vm_file); 52 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 53 struct dnode_of_data dn; 54 int err; 55 56 if (unlikely(f2fs_cp_error(sbi))) { 57 err = -EIO; 58 goto err; 59 } 60 61 sb_start_pagefault(inode->i_sb); 62 63 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 64 65 /* block allocation */ 66 f2fs_lock_op(sbi); 67 set_new_dnode(&dn, inode, NULL, NULL, 0); 68 err = f2fs_reserve_block(&dn, page->index); 69 if (err) { 70 f2fs_unlock_op(sbi); 71 goto out; 72 } 73 f2fs_put_dnode(&dn); 74 f2fs_unlock_op(sbi); 75 76 f2fs_balance_fs(sbi, dn.node_changed); 77 78 file_update_time(vmf->vma->vm_file); 79 down_read(&F2FS_I(inode)->i_mmap_sem); 80 lock_page(page); 81 if (unlikely(page->mapping != inode->i_mapping || 82 page_offset(page) > i_size_read(inode) || 83 !PageUptodate(page))) { 84 unlock_page(page); 85 err = -EFAULT; 86 goto out_sem; 87 } 88 89 /* 90 * check to see if the page is mapped already (no holes) 91 */ 92 if (PageMappedToDisk(page)) 93 goto mapped; 94 95 /* page is wholly or partially inside EOF */ 96 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > 97 i_size_read(inode)) { 98 unsigned offset; 99 offset = i_size_read(inode) & ~PAGE_MASK; 100 zero_user_segment(page, offset, PAGE_SIZE); 101 } 102 set_page_dirty(page); 103 if (!PageUptodate(page)) 104 SetPageUptodate(page); 105 106 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 107 108 trace_f2fs_vm_page_mkwrite(page, DATA); 109 mapped: 110 /* fill the page */ 111 f2fs_wait_on_page_writeback(page, DATA, false); 112 113 /* wait for GCed encrypted page writeback */ 114 if (f2fs_encrypted_file(inode)) 115 f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr); 116 117 out_sem: 118 up_read(&F2FS_I(inode)->i_mmap_sem); 119 out: 120 sb_end_pagefault(inode->i_sb); 121 f2fs_update_time(sbi, REQ_TIME); 122 err: 123 return block_page_mkwrite_return(err); 124 } 125 126 static const struct vm_operations_struct f2fs_file_vm_ops = { 127 .fault = f2fs_filemap_fault, 128 .map_pages = filemap_map_pages, 129 .page_mkwrite = f2fs_vm_page_mkwrite, 130 }; 131 132 static int get_parent_ino(struct inode *inode, nid_t *pino) 133 { 134 struct dentry *dentry; 135 136 inode = igrab(inode); 137 dentry = d_find_any_alias(inode); 138 iput(inode); 139 if (!dentry) 140 return 0; 141 142 *pino = parent_ino(dentry); 143 dput(dentry); 144 return 1; 145 } 146 147 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) 148 { 149 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 150 enum cp_reason_type cp_reason = CP_NO_NEEDED; 151 152 if (!S_ISREG(inode->i_mode)) 153 cp_reason = CP_NON_REGULAR; 154 else if (inode->i_nlink != 1) 155 cp_reason = CP_HARDLINK; 156 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) 157 cp_reason = CP_SB_NEED_CP; 158 else if (file_wrong_pino(inode)) 159 cp_reason = CP_WRONG_PINO; 160 else if (!space_for_roll_forward(sbi)) 161 cp_reason = CP_NO_SPC_ROLL; 162 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 163 cp_reason = CP_NODE_NEED_CP; 164 else if (test_opt(sbi, FASTBOOT)) 165 cp_reason = CP_FASTBOOT_MODE; 166 else if (F2FS_OPTION(sbi).active_logs == 2) 167 cp_reason = CP_SPEC_LOG_NUM; 168 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && 169 need_dentry_mark(sbi, inode->i_ino) && 170 exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO)) 171 cp_reason = CP_RECOVER_DIR; 172 173 return cp_reason; 174 } 175 176 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 177 { 178 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 179 bool ret = false; 180 /* But we need to avoid that there are some inode updates */ 181 if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino)) 182 ret = true; 183 f2fs_put_page(i, 0); 184 return ret; 185 } 186 187 static void try_to_fix_pino(struct inode *inode) 188 { 189 struct f2fs_inode_info *fi = F2FS_I(inode); 190 nid_t pino; 191 192 down_write(&fi->i_sem); 193 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 194 get_parent_ino(inode, &pino)) { 195 f2fs_i_pino_write(inode, pino); 196 file_got_pino(inode); 197 } 198 up_write(&fi->i_sem); 199 } 200 201 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, 202 int datasync, bool atomic) 203 { 204 struct inode *inode = file->f_mapping->host; 205 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 206 nid_t ino = inode->i_ino; 207 int ret = 0; 208 enum cp_reason_type cp_reason = 0; 209 struct writeback_control wbc = { 210 .sync_mode = WB_SYNC_ALL, 211 .nr_to_write = LONG_MAX, 212 .for_reclaim = 0, 213 }; 214 215 if (unlikely(f2fs_readonly(inode->i_sb))) 216 return 0; 217 218 trace_f2fs_sync_file_enter(inode); 219 220 /* if fdatasync is triggered, let's do in-place-update */ 221 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 222 set_inode_flag(inode, FI_NEED_IPU); 223 ret = file_write_and_wait_range(file, start, end); 224 clear_inode_flag(inode, FI_NEED_IPU); 225 226 if (ret) { 227 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 228 return ret; 229 } 230 231 /* if the inode is dirty, let's recover all the time */ 232 if (!f2fs_skip_inode_update(inode, datasync)) { 233 f2fs_write_inode(inode, NULL); 234 goto go_write; 235 } 236 237 /* 238 * if there is no written data, don't waste time to write recovery info. 239 */ 240 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && 241 !exist_written_data(sbi, ino, APPEND_INO)) { 242 243 /* it may call write_inode just prior to fsync */ 244 if (need_inode_page_update(sbi, ino)) 245 goto go_write; 246 247 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || 248 exist_written_data(sbi, ino, UPDATE_INO)) 249 goto flush_out; 250 goto out; 251 } 252 go_write: 253 /* 254 * Both of fdatasync() and fsync() are able to be recovered from 255 * sudden-power-off. 256 */ 257 down_read(&F2FS_I(inode)->i_sem); 258 cp_reason = need_do_checkpoint(inode); 259 up_read(&F2FS_I(inode)->i_sem); 260 261 if (cp_reason) { 262 /* all the dirty node pages should be flushed for POR */ 263 ret = f2fs_sync_fs(inode->i_sb, 1); 264 265 /* 266 * We've secured consistency through sync_fs. Following pino 267 * will be used only for fsynced inodes after checkpoint. 268 */ 269 try_to_fix_pino(inode); 270 clear_inode_flag(inode, FI_APPEND_WRITE); 271 clear_inode_flag(inode, FI_UPDATE_WRITE); 272 goto out; 273 } 274 sync_nodes: 275 ret = fsync_node_pages(sbi, inode, &wbc, atomic); 276 if (ret) 277 goto out; 278 279 /* if cp_error was enabled, we should avoid infinite loop */ 280 if (unlikely(f2fs_cp_error(sbi))) { 281 ret = -EIO; 282 goto out; 283 } 284 285 if (need_inode_block_update(sbi, ino)) { 286 f2fs_mark_inode_dirty_sync(inode, true); 287 f2fs_write_inode(inode, NULL); 288 goto sync_nodes; 289 } 290 291 /* 292 * If it's atomic_write, it's just fine to keep write ordering. So 293 * here we don't need to wait for node write completion, since we use 294 * node chain which serializes node blocks. If one of node writes are 295 * reordered, we can see simply broken chain, resulting in stopping 296 * roll-forward recovery. It means we'll recover all or none node blocks 297 * given fsync mark. 298 */ 299 if (!atomic) { 300 ret = wait_on_node_pages_writeback(sbi, ino); 301 if (ret) 302 goto out; 303 } 304 305 /* once recovery info is written, don't need to tack this */ 306 remove_ino_entry(sbi, ino, APPEND_INO); 307 clear_inode_flag(inode, FI_APPEND_WRITE); 308 flush_out: 309 if (!atomic) 310 ret = f2fs_issue_flush(sbi, inode->i_ino); 311 if (!ret) { 312 remove_ino_entry(sbi, ino, UPDATE_INO); 313 clear_inode_flag(inode, FI_UPDATE_WRITE); 314 remove_ino_entry(sbi, ino, FLUSH_INO); 315 } 316 f2fs_update_time(sbi, REQ_TIME); 317 out: 318 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 319 f2fs_trace_ios(NULL, 1); 320 return ret; 321 } 322 323 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 324 { 325 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) 326 return -EIO; 327 return f2fs_do_sync_file(file, start, end, datasync, false); 328 } 329 330 static pgoff_t __get_first_dirty_index(struct address_space *mapping, 331 pgoff_t pgofs, int whence) 332 { 333 struct page *page; 334 int nr_pages; 335 336 if (whence != SEEK_DATA) 337 return 0; 338 339 /* find first dirty page index */ 340 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY, 341 1, &page); 342 if (!nr_pages) 343 return ULONG_MAX; 344 pgofs = page->index; 345 put_page(page); 346 return pgofs; 347 } 348 349 static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs, 350 int whence) 351 { 352 switch (whence) { 353 case SEEK_DATA: 354 if ((blkaddr == NEW_ADDR && dirty == pgofs) || 355 (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR)) 356 return true; 357 break; 358 case SEEK_HOLE: 359 if (blkaddr == NULL_ADDR) 360 return true; 361 break; 362 } 363 return false; 364 } 365 366 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 367 { 368 struct inode *inode = file->f_mapping->host; 369 loff_t maxbytes = inode->i_sb->s_maxbytes; 370 struct dnode_of_data dn; 371 pgoff_t pgofs, end_offset, dirty; 372 loff_t data_ofs = offset; 373 loff_t isize; 374 int err = 0; 375 376 inode_lock(inode); 377 378 isize = i_size_read(inode); 379 if (offset >= isize) 380 goto fail; 381 382 /* handle inline data case */ 383 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { 384 if (whence == SEEK_HOLE) 385 data_ofs = isize; 386 goto found; 387 } 388 389 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); 390 391 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 392 393 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 394 set_new_dnode(&dn, inode, NULL, NULL, 0); 395 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); 396 if (err && err != -ENOENT) { 397 goto fail; 398 } else if (err == -ENOENT) { 399 /* direct node does not exists */ 400 if (whence == SEEK_DATA) { 401 pgofs = get_next_page_offset(&dn, pgofs); 402 continue; 403 } else { 404 goto found; 405 } 406 } 407 408 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 409 410 /* find data/hole in dnode block */ 411 for (; dn.ofs_in_node < end_offset; 412 dn.ofs_in_node++, pgofs++, 413 data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 414 block_t blkaddr; 415 blkaddr = datablock_addr(dn.inode, 416 dn.node_page, dn.ofs_in_node); 417 418 if (__found_offset(blkaddr, dirty, pgofs, whence)) { 419 f2fs_put_dnode(&dn); 420 goto found; 421 } 422 } 423 f2fs_put_dnode(&dn); 424 } 425 426 if (whence == SEEK_DATA) 427 goto fail; 428 found: 429 if (whence == SEEK_HOLE && data_ofs > isize) 430 data_ofs = isize; 431 inode_unlock(inode); 432 return vfs_setpos(file, data_ofs, maxbytes); 433 fail: 434 inode_unlock(inode); 435 return -ENXIO; 436 } 437 438 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 439 { 440 struct inode *inode = file->f_mapping->host; 441 loff_t maxbytes = inode->i_sb->s_maxbytes; 442 443 switch (whence) { 444 case SEEK_SET: 445 case SEEK_CUR: 446 case SEEK_END: 447 return generic_file_llseek_size(file, offset, whence, 448 maxbytes, i_size_read(inode)); 449 case SEEK_DATA: 450 case SEEK_HOLE: 451 if (offset < 0) 452 return -ENXIO; 453 return f2fs_seek_block(file, offset, whence); 454 } 455 456 return -EINVAL; 457 } 458 459 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 460 { 461 struct inode *inode = file_inode(file); 462 int err; 463 464 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 465 return -EIO; 466 467 /* we don't need to use inline_data strictly */ 468 err = f2fs_convert_inline_inode(inode); 469 if (err) 470 return err; 471 472 file_accessed(file); 473 vma->vm_ops = &f2fs_file_vm_ops; 474 return 0; 475 } 476 477 static int f2fs_file_open(struct inode *inode, struct file *filp) 478 { 479 int err = fscrypt_file_open(inode, filp); 480 481 if (err) 482 return err; 483 484 filp->f_mode |= FMODE_NOWAIT; 485 486 return dquot_file_open(inode, filp); 487 } 488 489 void truncate_data_blocks_range(struct dnode_of_data *dn, int count) 490 { 491 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 492 struct f2fs_node *raw_node; 493 int nr_free = 0, ofs = dn->ofs_in_node, len = count; 494 __le32 *addr; 495 int base = 0; 496 497 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 498 base = get_extra_isize(dn->inode); 499 500 raw_node = F2FS_NODE(dn->node_page); 501 addr = blkaddr_in_node(raw_node) + base + ofs; 502 503 for (; count > 0; count--, addr++, dn->ofs_in_node++) { 504 block_t blkaddr = le32_to_cpu(*addr); 505 if (blkaddr == NULL_ADDR) 506 continue; 507 508 dn->data_blkaddr = NULL_ADDR; 509 set_data_blkaddr(dn); 510 invalidate_blocks(sbi, blkaddr); 511 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) 512 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); 513 nr_free++; 514 } 515 516 if (nr_free) { 517 pgoff_t fofs; 518 /* 519 * once we invalidate valid blkaddr in range [ofs, ofs + count], 520 * we will invalidate all blkaddr in the whole range. 521 */ 522 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), 523 dn->inode) + ofs; 524 f2fs_update_extent_cache_range(dn, fofs, 0, len); 525 dec_valid_block_count(sbi, dn->inode, nr_free); 526 } 527 dn->ofs_in_node = ofs; 528 529 f2fs_update_time(sbi, REQ_TIME); 530 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 531 dn->ofs_in_node, nr_free); 532 } 533 534 void truncate_data_blocks(struct dnode_of_data *dn) 535 { 536 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK); 537 } 538 539 static int truncate_partial_data_page(struct inode *inode, u64 from, 540 bool cache_only) 541 { 542 unsigned offset = from & (PAGE_SIZE - 1); 543 pgoff_t index = from >> PAGE_SHIFT; 544 struct address_space *mapping = inode->i_mapping; 545 struct page *page; 546 547 if (!offset && !cache_only) 548 return 0; 549 550 if (cache_only) { 551 page = find_lock_page(mapping, index); 552 if (page && PageUptodate(page)) 553 goto truncate_out; 554 f2fs_put_page(page, 1); 555 return 0; 556 } 557 558 page = get_lock_data_page(inode, index, true); 559 if (IS_ERR(page)) 560 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); 561 truncate_out: 562 f2fs_wait_on_page_writeback(page, DATA, true); 563 zero_user(page, offset, PAGE_SIZE - offset); 564 565 /* An encrypted inode should have a key and truncate the last page. */ 566 f2fs_bug_on(F2FS_I_SB(inode), cache_only && f2fs_encrypted_inode(inode)); 567 if (!cache_only) 568 set_page_dirty(page); 569 f2fs_put_page(page, 1); 570 return 0; 571 } 572 573 int truncate_blocks(struct inode *inode, u64 from, bool lock) 574 { 575 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 576 struct dnode_of_data dn; 577 pgoff_t free_from; 578 int count = 0, err = 0; 579 struct page *ipage; 580 bool truncate_page = false; 581 582 trace_f2fs_truncate_blocks_enter(inode, from); 583 584 free_from = (pgoff_t)F2FS_BLK_ALIGN(from); 585 586 if (free_from >= sbi->max_file_blocks) 587 goto free_partial; 588 589 if (lock) 590 f2fs_lock_op(sbi); 591 592 ipage = get_node_page(sbi, inode->i_ino); 593 if (IS_ERR(ipage)) { 594 err = PTR_ERR(ipage); 595 goto out; 596 } 597 598 if (f2fs_has_inline_data(inode)) { 599 truncate_inline_inode(inode, ipage, from); 600 f2fs_put_page(ipage, 1); 601 truncate_page = true; 602 goto out; 603 } 604 605 set_new_dnode(&dn, inode, ipage, NULL, 0); 606 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); 607 if (err) { 608 if (err == -ENOENT) 609 goto free_next; 610 goto out; 611 } 612 613 count = ADDRS_PER_PAGE(dn.node_page, inode); 614 615 count -= dn.ofs_in_node; 616 f2fs_bug_on(sbi, count < 0); 617 618 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 619 truncate_data_blocks_range(&dn, count); 620 free_from += count; 621 } 622 623 f2fs_put_dnode(&dn); 624 free_next: 625 err = truncate_inode_blocks(inode, free_from); 626 out: 627 if (lock) 628 f2fs_unlock_op(sbi); 629 free_partial: 630 /* lastly zero out the first data page */ 631 if (!err) 632 err = truncate_partial_data_page(inode, from, truncate_page); 633 634 trace_f2fs_truncate_blocks_exit(inode, err); 635 return err; 636 } 637 638 int f2fs_truncate(struct inode *inode) 639 { 640 int err; 641 642 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 643 return -EIO; 644 645 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 646 S_ISLNK(inode->i_mode))) 647 return 0; 648 649 trace_f2fs_truncate(inode); 650 651 #ifdef CONFIG_F2FS_FAULT_INJECTION 652 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { 653 f2fs_show_injection_info(FAULT_TRUNCATE); 654 return -EIO; 655 } 656 #endif 657 /* we should check inline_data size */ 658 if (!f2fs_may_inline_data(inode)) { 659 err = f2fs_convert_inline_inode(inode); 660 if (err) 661 return err; 662 } 663 664 err = truncate_blocks(inode, i_size_read(inode), true); 665 if (err) 666 return err; 667 668 inode->i_mtime = inode->i_ctime = current_time(inode); 669 f2fs_mark_inode_dirty_sync(inode, false); 670 return 0; 671 } 672 673 int f2fs_getattr(const struct path *path, struct kstat *stat, 674 u32 request_mask, unsigned int query_flags) 675 { 676 struct inode *inode = d_inode(path->dentry); 677 struct f2fs_inode_info *fi = F2FS_I(inode); 678 struct f2fs_inode *ri; 679 unsigned int flags; 680 681 if (f2fs_has_extra_attr(inode) && 682 f2fs_sb_has_inode_crtime(inode->i_sb) && 683 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 684 stat->result_mask |= STATX_BTIME; 685 stat->btime.tv_sec = fi->i_crtime.tv_sec; 686 stat->btime.tv_nsec = fi->i_crtime.tv_nsec; 687 } 688 689 flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL); 690 if (flags & FS_APPEND_FL) 691 stat->attributes |= STATX_ATTR_APPEND; 692 if (flags & FS_COMPR_FL) 693 stat->attributes |= STATX_ATTR_COMPRESSED; 694 if (f2fs_encrypted_inode(inode)) 695 stat->attributes |= STATX_ATTR_ENCRYPTED; 696 if (flags & FS_IMMUTABLE_FL) 697 stat->attributes |= STATX_ATTR_IMMUTABLE; 698 if (flags & FS_NODUMP_FL) 699 stat->attributes |= STATX_ATTR_NODUMP; 700 701 stat->attributes_mask |= (STATX_ATTR_APPEND | 702 STATX_ATTR_COMPRESSED | 703 STATX_ATTR_ENCRYPTED | 704 STATX_ATTR_IMMUTABLE | 705 STATX_ATTR_NODUMP); 706 707 generic_fillattr(inode, stat); 708 709 /* we need to show initial sectors used for inline_data/dentries */ 710 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || 711 f2fs_has_inline_dentry(inode)) 712 stat->blocks += (stat->size + 511) >> 9; 713 714 return 0; 715 } 716 717 #ifdef CONFIG_F2FS_FS_POSIX_ACL 718 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 719 { 720 unsigned int ia_valid = attr->ia_valid; 721 722 if (ia_valid & ATTR_UID) 723 inode->i_uid = attr->ia_uid; 724 if (ia_valid & ATTR_GID) 725 inode->i_gid = attr->ia_gid; 726 if (ia_valid & ATTR_ATIME) 727 inode->i_atime = timespec_trunc(attr->ia_atime, 728 inode->i_sb->s_time_gran); 729 if (ia_valid & ATTR_MTIME) 730 inode->i_mtime = timespec_trunc(attr->ia_mtime, 731 inode->i_sb->s_time_gran); 732 if (ia_valid & ATTR_CTIME) 733 inode->i_ctime = timespec_trunc(attr->ia_ctime, 734 inode->i_sb->s_time_gran); 735 if (ia_valid & ATTR_MODE) { 736 umode_t mode = attr->ia_mode; 737 738 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 739 mode &= ~S_ISGID; 740 set_acl_inode(inode, mode); 741 } 742 } 743 #else 744 #define __setattr_copy setattr_copy 745 #endif 746 747 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 748 { 749 struct inode *inode = d_inode(dentry); 750 int err; 751 bool size_changed = false; 752 753 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 754 return -EIO; 755 756 err = setattr_prepare(dentry, attr); 757 if (err) 758 return err; 759 760 err = fscrypt_prepare_setattr(dentry, attr); 761 if (err) 762 return err; 763 764 if (is_quota_modification(inode, attr)) { 765 err = dquot_initialize(inode); 766 if (err) 767 return err; 768 } 769 if ((attr->ia_valid & ATTR_UID && 770 !uid_eq(attr->ia_uid, inode->i_uid)) || 771 (attr->ia_valid & ATTR_GID && 772 !gid_eq(attr->ia_gid, inode->i_gid))) { 773 err = dquot_transfer(inode, attr); 774 if (err) 775 return err; 776 } 777 778 if (attr->ia_valid & ATTR_SIZE) { 779 if (attr->ia_size <= i_size_read(inode)) { 780 down_write(&F2FS_I(inode)->i_mmap_sem); 781 truncate_setsize(inode, attr->ia_size); 782 err = f2fs_truncate(inode); 783 up_write(&F2FS_I(inode)->i_mmap_sem); 784 if (err) 785 return err; 786 } else { 787 /* 788 * do not trim all blocks after i_size if target size is 789 * larger than i_size. 790 */ 791 down_write(&F2FS_I(inode)->i_mmap_sem); 792 truncate_setsize(inode, attr->ia_size); 793 up_write(&F2FS_I(inode)->i_mmap_sem); 794 795 /* should convert inline inode here */ 796 if (!f2fs_may_inline_data(inode)) { 797 err = f2fs_convert_inline_inode(inode); 798 if (err) 799 return err; 800 } 801 inode->i_mtime = inode->i_ctime = current_time(inode); 802 } 803 804 down_write(&F2FS_I(inode)->i_sem); 805 F2FS_I(inode)->last_disk_size = i_size_read(inode); 806 up_write(&F2FS_I(inode)->i_sem); 807 808 size_changed = true; 809 } 810 811 __setattr_copy(inode, attr); 812 813 if (attr->ia_valid & ATTR_MODE) { 814 err = posix_acl_chmod(inode, get_inode_mode(inode)); 815 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { 816 inode->i_mode = F2FS_I(inode)->i_acl_mode; 817 clear_inode_flag(inode, FI_ACL_MODE); 818 } 819 } 820 821 /* file size may changed here */ 822 f2fs_mark_inode_dirty_sync(inode, size_changed); 823 824 /* inode change will produce dirty node pages flushed by checkpoint */ 825 f2fs_balance_fs(F2FS_I_SB(inode), true); 826 827 return err; 828 } 829 830 const struct inode_operations f2fs_file_inode_operations = { 831 .getattr = f2fs_getattr, 832 .setattr = f2fs_setattr, 833 .get_acl = f2fs_get_acl, 834 .set_acl = f2fs_set_acl, 835 #ifdef CONFIG_F2FS_FS_XATTR 836 .listxattr = f2fs_listxattr, 837 #endif 838 .fiemap = f2fs_fiemap, 839 }; 840 841 static int fill_zero(struct inode *inode, pgoff_t index, 842 loff_t start, loff_t len) 843 { 844 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 845 struct page *page; 846 847 if (!len) 848 return 0; 849 850 f2fs_balance_fs(sbi, true); 851 852 f2fs_lock_op(sbi); 853 page = get_new_data_page(inode, NULL, index, false); 854 f2fs_unlock_op(sbi); 855 856 if (IS_ERR(page)) 857 return PTR_ERR(page); 858 859 f2fs_wait_on_page_writeback(page, DATA, true); 860 zero_user(page, start, len); 861 set_page_dirty(page); 862 f2fs_put_page(page, 1); 863 return 0; 864 } 865 866 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 867 { 868 int err; 869 870 while (pg_start < pg_end) { 871 struct dnode_of_data dn; 872 pgoff_t end_offset, count; 873 874 set_new_dnode(&dn, inode, NULL, NULL, 0); 875 err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); 876 if (err) { 877 if (err == -ENOENT) { 878 pg_start = get_next_page_offset(&dn, pg_start); 879 continue; 880 } 881 return err; 882 } 883 884 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 885 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); 886 887 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); 888 889 truncate_data_blocks_range(&dn, count); 890 f2fs_put_dnode(&dn); 891 892 pg_start += count; 893 } 894 return 0; 895 } 896 897 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 898 { 899 pgoff_t pg_start, pg_end; 900 loff_t off_start, off_end; 901 int ret; 902 903 ret = f2fs_convert_inline_inode(inode); 904 if (ret) 905 return ret; 906 907 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 908 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 909 910 off_start = offset & (PAGE_SIZE - 1); 911 off_end = (offset + len) & (PAGE_SIZE - 1); 912 913 if (pg_start == pg_end) { 914 ret = fill_zero(inode, pg_start, off_start, 915 off_end - off_start); 916 if (ret) 917 return ret; 918 } else { 919 if (off_start) { 920 ret = fill_zero(inode, pg_start++, off_start, 921 PAGE_SIZE - off_start); 922 if (ret) 923 return ret; 924 } 925 if (off_end) { 926 ret = fill_zero(inode, pg_end, 0, off_end); 927 if (ret) 928 return ret; 929 } 930 931 if (pg_start < pg_end) { 932 struct address_space *mapping = inode->i_mapping; 933 loff_t blk_start, blk_end; 934 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 935 936 f2fs_balance_fs(sbi, true); 937 938 blk_start = (loff_t)pg_start << PAGE_SHIFT; 939 blk_end = (loff_t)pg_end << PAGE_SHIFT; 940 down_write(&F2FS_I(inode)->i_mmap_sem); 941 truncate_inode_pages_range(mapping, blk_start, 942 blk_end - 1); 943 944 f2fs_lock_op(sbi); 945 ret = truncate_hole(inode, pg_start, pg_end); 946 f2fs_unlock_op(sbi); 947 up_write(&F2FS_I(inode)->i_mmap_sem); 948 } 949 } 950 951 return ret; 952 } 953 954 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, 955 int *do_replace, pgoff_t off, pgoff_t len) 956 { 957 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 958 struct dnode_of_data dn; 959 int ret, done, i; 960 961 next_dnode: 962 set_new_dnode(&dn, inode, NULL, NULL, 0); 963 ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 964 if (ret && ret != -ENOENT) { 965 return ret; 966 } else if (ret == -ENOENT) { 967 if (dn.max_level == 0) 968 return -ENOENT; 969 done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len); 970 blkaddr += done; 971 do_replace += done; 972 goto next; 973 } 974 975 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - 976 dn.ofs_in_node, len); 977 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { 978 *blkaddr = datablock_addr(dn.inode, 979 dn.node_page, dn.ofs_in_node); 980 if (!is_checkpointed_data(sbi, *blkaddr)) { 981 982 if (test_opt(sbi, LFS)) { 983 f2fs_put_dnode(&dn); 984 return -ENOTSUPP; 985 } 986 987 /* do not invalidate this block address */ 988 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 989 *do_replace = 1; 990 } 991 } 992 f2fs_put_dnode(&dn); 993 next: 994 len -= done; 995 off += done; 996 if (len) 997 goto next_dnode; 998 return 0; 999 } 1000 1001 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, 1002 int *do_replace, pgoff_t off, int len) 1003 { 1004 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1005 struct dnode_of_data dn; 1006 int ret, i; 1007 1008 for (i = 0; i < len; i++, do_replace++, blkaddr++) { 1009 if (*do_replace == 0) 1010 continue; 1011 1012 set_new_dnode(&dn, inode, NULL, NULL, 0); 1013 ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); 1014 if (ret) { 1015 dec_valid_block_count(sbi, inode, 1); 1016 invalidate_blocks(sbi, *blkaddr); 1017 } else { 1018 f2fs_update_data_blkaddr(&dn, *blkaddr); 1019 } 1020 f2fs_put_dnode(&dn); 1021 } 1022 return 0; 1023 } 1024 1025 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, 1026 block_t *blkaddr, int *do_replace, 1027 pgoff_t src, pgoff_t dst, pgoff_t len, bool full) 1028 { 1029 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); 1030 pgoff_t i = 0; 1031 int ret; 1032 1033 while (i < len) { 1034 if (blkaddr[i] == NULL_ADDR && !full) { 1035 i++; 1036 continue; 1037 } 1038 1039 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { 1040 struct dnode_of_data dn; 1041 struct node_info ni; 1042 size_t new_size; 1043 pgoff_t ilen; 1044 1045 set_new_dnode(&dn, dst_inode, NULL, NULL, 0); 1046 ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE); 1047 if (ret) 1048 return ret; 1049 1050 get_node_info(sbi, dn.nid, &ni); 1051 ilen = min((pgoff_t) 1052 ADDRS_PER_PAGE(dn.node_page, dst_inode) - 1053 dn.ofs_in_node, len - i); 1054 do { 1055 dn.data_blkaddr = datablock_addr(dn.inode, 1056 dn.node_page, dn.ofs_in_node); 1057 truncate_data_blocks_range(&dn, 1); 1058 1059 if (do_replace[i]) { 1060 f2fs_i_blocks_write(src_inode, 1061 1, false, false); 1062 f2fs_i_blocks_write(dst_inode, 1063 1, true, false); 1064 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 1065 blkaddr[i], ni.version, true, false); 1066 1067 do_replace[i] = 0; 1068 } 1069 dn.ofs_in_node++; 1070 i++; 1071 new_size = (dst + i) << PAGE_SHIFT; 1072 if (dst_inode->i_size < new_size) 1073 f2fs_i_size_write(dst_inode, new_size); 1074 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); 1075 1076 f2fs_put_dnode(&dn); 1077 } else { 1078 struct page *psrc, *pdst; 1079 1080 psrc = get_lock_data_page(src_inode, src + i, true); 1081 if (IS_ERR(psrc)) 1082 return PTR_ERR(psrc); 1083 pdst = get_new_data_page(dst_inode, NULL, dst + i, 1084 true); 1085 if (IS_ERR(pdst)) { 1086 f2fs_put_page(psrc, 1); 1087 return PTR_ERR(pdst); 1088 } 1089 f2fs_copy_page(psrc, pdst); 1090 set_page_dirty(pdst); 1091 f2fs_put_page(pdst, 1); 1092 f2fs_put_page(psrc, 1); 1093 1094 ret = truncate_hole(src_inode, src + i, src + i + 1); 1095 if (ret) 1096 return ret; 1097 i++; 1098 } 1099 } 1100 return 0; 1101 } 1102 1103 static int __exchange_data_block(struct inode *src_inode, 1104 struct inode *dst_inode, pgoff_t src, pgoff_t dst, 1105 pgoff_t len, bool full) 1106 { 1107 block_t *src_blkaddr; 1108 int *do_replace; 1109 pgoff_t olen; 1110 int ret; 1111 1112 while (len) { 1113 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len); 1114 1115 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1116 sizeof(block_t) * olen, GFP_KERNEL); 1117 if (!src_blkaddr) 1118 return -ENOMEM; 1119 1120 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1121 sizeof(int) * olen, GFP_KERNEL); 1122 if (!do_replace) { 1123 kvfree(src_blkaddr); 1124 return -ENOMEM; 1125 } 1126 1127 ret = __read_out_blkaddrs(src_inode, src_blkaddr, 1128 do_replace, src, olen); 1129 if (ret) 1130 goto roll_back; 1131 1132 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, 1133 do_replace, src, dst, olen, full); 1134 if (ret) 1135 goto roll_back; 1136 1137 src += olen; 1138 dst += olen; 1139 len -= olen; 1140 1141 kvfree(src_blkaddr); 1142 kvfree(do_replace); 1143 } 1144 return 0; 1145 1146 roll_back: 1147 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len); 1148 kvfree(src_blkaddr); 1149 kvfree(do_replace); 1150 return ret; 1151 } 1152 1153 static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end) 1154 { 1155 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1156 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1157 int ret; 1158 1159 f2fs_balance_fs(sbi, true); 1160 f2fs_lock_op(sbi); 1161 1162 f2fs_drop_extent_tree(inode); 1163 1164 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); 1165 f2fs_unlock_op(sbi); 1166 return ret; 1167 } 1168 1169 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) 1170 { 1171 pgoff_t pg_start, pg_end; 1172 loff_t new_size; 1173 int ret; 1174 1175 if (offset + len >= i_size_read(inode)) 1176 return -EINVAL; 1177 1178 /* collapse range should be aligned to block size of f2fs. */ 1179 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1180 return -EINVAL; 1181 1182 ret = f2fs_convert_inline_inode(inode); 1183 if (ret) 1184 return ret; 1185 1186 pg_start = offset >> PAGE_SHIFT; 1187 pg_end = (offset + len) >> PAGE_SHIFT; 1188 1189 /* avoid gc operation during block exchange */ 1190 down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1191 1192 down_write(&F2FS_I(inode)->i_mmap_sem); 1193 /* write out all dirty pages from offset */ 1194 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1195 if (ret) 1196 goto out_unlock; 1197 1198 truncate_pagecache(inode, offset); 1199 1200 ret = f2fs_do_collapse(inode, pg_start, pg_end); 1201 if (ret) 1202 goto out_unlock; 1203 1204 /* write out all moved pages, if possible */ 1205 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1206 truncate_pagecache(inode, offset); 1207 1208 new_size = i_size_read(inode) - len; 1209 truncate_pagecache(inode, new_size); 1210 1211 ret = truncate_blocks(inode, new_size, true); 1212 if (!ret) 1213 f2fs_i_size_write(inode, new_size); 1214 out_unlock: 1215 up_write(&F2FS_I(inode)->i_mmap_sem); 1216 up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1217 return ret; 1218 } 1219 1220 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, 1221 pgoff_t end) 1222 { 1223 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1224 pgoff_t index = start; 1225 unsigned int ofs_in_node = dn->ofs_in_node; 1226 blkcnt_t count = 0; 1227 int ret; 1228 1229 for (; index < end; index++, dn->ofs_in_node++) { 1230 if (datablock_addr(dn->inode, dn->node_page, 1231 dn->ofs_in_node) == NULL_ADDR) 1232 count++; 1233 } 1234 1235 dn->ofs_in_node = ofs_in_node; 1236 ret = reserve_new_blocks(dn, count); 1237 if (ret) 1238 return ret; 1239 1240 dn->ofs_in_node = ofs_in_node; 1241 for (index = start; index < end; index++, dn->ofs_in_node++) { 1242 dn->data_blkaddr = datablock_addr(dn->inode, 1243 dn->node_page, dn->ofs_in_node); 1244 /* 1245 * reserve_new_blocks will not guarantee entire block 1246 * allocation. 1247 */ 1248 if (dn->data_blkaddr == NULL_ADDR) { 1249 ret = -ENOSPC; 1250 break; 1251 } 1252 if (dn->data_blkaddr != NEW_ADDR) { 1253 invalidate_blocks(sbi, dn->data_blkaddr); 1254 dn->data_blkaddr = NEW_ADDR; 1255 set_data_blkaddr(dn); 1256 } 1257 } 1258 1259 f2fs_update_extent_cache_range(dn, start, 0, index - start); 1260 1261 return ret; 1262 } 1263 1264 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, 1265 int mode) 1266 { 1267 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1268 struct address_space *mapping = inode->i_mapping; 1269 pgoff_t index, pg_start, pg_end; 1270 loff_t new_size = i_size_read(inode); 1271 loff_t off_start, off_end; 1272 int ret = 0; 1273 1274 ret = inode_newsize_ok(inode, (len + offset)); 1275 if (ret) 1276 return ret; 1277 1278 ret = f2fs_convert_inline_inode(inode); 1279 if (ret) 1280 return ret; 1281 1282 down_write(&F2FS_I(inode)->i_mmap_sem); 1283 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); 1284 if (ret) 1285 goto out_sem; 1286 1287 truncate_pagecache_range(inode, offset, offset + len - 1); 1288 1289 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1290 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1291 1292 off_start = offset & (PAGE_SIZE - 1); 1293 off_end = (offset + len) & (PAGE_SIZE - 1); 1294 1295 if (pg_start == pg_end) { 1296 ret = fill_zero(inode, pg_start, off_start, 1297 off_end - off_start); 1298 if (ret) 1299 goto out_sem; 1300 1301 new_size = max_t(loff_t, new_size, offset + len); 1302 } else { 1303 if (off_start) { 1304 ret = fill_zero(inode, pg_start++, off_start, 1305 PAGE_SIZE - off_start); 1306 if (ret) 1307 goto out_sem; 1308 1309 new_size = max_t(loff_t, new_size, 1310 (loff_t)pg_start << PAGE_SHIFT); 1311 } 1312 1313 for (index = pg_start; index < pg_end;) { 1314 struct dnode_of_data dn; 1315 unsigned int end_offset; 1316 pgoff_t end; 1317 1318 f2fs_lock_op(sbi); 1319 1320 set_new_dnode(&dn, inode, NULL, NULL, 0); 1321 ret = get_dnode_of_data(&dn, index, ALLOC_NODE); 1322 if (ret) { 1323 f2fs_unlock_op(sbi); 1324 goto out; 1325 } 1326 1327 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1328 end = min(pg_end, end_offset - dn.ofs_in_node + index); 1329 1330 ret = f2fs_do_zero_range(&dn, index, end); 1331 f2fs_put_dnode(&dn); 1332 f2fs_unlock_op(sbi); 1333 1334 f2fs_balance_fs(sbi, dn.node_changed); 1335 1336 if (ret) 1337 goto out; 1338 1339 index = end; 1340 new_size = max_t(loff_t, new_size, 1341 (loff_t)index << PAGE_SHIFT); 1342 } 1343 1344 if (off_end) { 1345 ret = fill_zero(inode, pg_end, 0, off_end); 1346 if (ret) 1347 goto out; 1348 1349 new_size = max_t(loff_t, new_size, offset + len); 1350 } 1351 } 1352 1353 out: 1354 if (new_size > i_size_read(inode)) { 1355 if (mode & FALLOC_FL_KEEP_SIZE) 1356 file_set_keep_isize(inode); 1357 else 1358 f2fs_i_size_write(inode, new_size); 1359 } 1360 out_sem: 1361 up_write(&F2FS_I(inode)->i_mmap_sem); 1362 1363 return ret; 1364 } 1365 1366 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) 1367 { 1368 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1369 pgoff_t nr, pg_start, pg_end, delta, idx; 1370 loff_t new_size; 1371 int ret = 0; 1372 1373 new_size = i_size_read(inode) + len; 1374 ret = inode_newsize_ok(inode, new_size); 1375 if (ret) 1376 return ret; 1377 1378 if (offset >= i_size_read(inode)) 1379 return -EINVAL; 1380 1381 /* insert range should be aligned to block size of f2fs. */ 1382 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1383 return -EINVAL; 1384 1385 ret = f2fs_convert_inline_inode(inode); 1386 if (ret) 1387 return ret; 1388 1389 f2fs_balance_fs(sbi, true); 1390 1391 /* avoid gc operation during block exchange */ 1392 down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1393 1394 down_write(&F2FS_I(inode)->i_mmap_sem); 1395 ret = truncate_blocks(inode, i_size_read(inode), true); 1396 if (ret) 1397 goto out; 1398 1399 /* write out all dirty pages from offset */ 1400 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1401 if (ret) 1402 goto out; 1403 1404 truncate_pagecache(inode, offset); 1405 1406 pg_start = offset >> PAGE_SHIFT; 1407 pg_end = (offset + len) >> PAGE_SHIFT; 1408 delta = pg_end - pg_start; 1409 idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1410 1411 while (!ret && idx > pg_start) { 1412 nr = idx - pg_start; 1413 if (nr > delta) 1414 nr = delta; 1415 idx -= nr; 1416 1417 f2fs_lock_op(sbi); 1418 f2fs_drop_extent_tree(inode); 1419 1420 ret = __exchange_data_block(inode, inode, idx, 1421 idx + delta, nr, false); 1422 f2fs_unlock_op(sbi); 1423 } 1424 1425 /* write out all moved pages, if possible */ 1426 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1427 truncate_pagecache(inode, offset); 1428 1429 if (!ret) 1430 f2fs_i_size_write(inode, new_size); 1431 out: 1432 up_write(&F2FS_I(inode)->i_mmap_sem); 1433 up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1434 return ret; 1435 } 1436 1437 static int expand_inode_data(struct inode *inode, loff_t offset, 1438 loff_t len, int mode) 1439 { 1440 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1441 struct f2fs_map_blocks map = { .m_next_pgofs = NULL, 1442 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE }; 1443 pgoff_t pg_end; 1444 loff_t new_size = i_size_read(inode); 1445 loff_t off_end; 1446 int err; 1447 1448 err = inode_newsize_ok(inode, (len + offset)); 1449 if (err) 1450 return err; 1451 1452 err = f2fs_convert_inline_inode(inode); 1453 if (err) 1454 return err; 1455 1456 f2fs_balance_fs(sbi, true); 1457 1458 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; 1459 off_end = (offset + len) & (PAGE_SIZE - 1); 1460 1461 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; 1462 map.m_len = pg_end - map.m_lblk; 1463 if (off_end) 1464 map.m_len++; 1465 1466 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); 1467 if (err) { 1468 pgoff_t last_off; 1469 1470 if (!map.m_len) 1471 return err; 1472 1473 last_off = map.m_lblk + map.m_len - 1; 1474 1475 /* update new size to the failed position */ 1476 new_size = (last_off == pg_end) ? offset + len: 1477 (loff_t)(last_off + 1) << PAGE_SHIFT; 1478 } else { 1479 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; 1480 } 1481 1482 if (new_size > i_size_read(inode)) { 1483 if (mode & FALLOC_FL_KEEP_SIZE) 1484 file_set_keep_isize(inode); 1485 else 1486 f2fs_i_size_write(inode, new_size); 1487 } 1488 1489 return err; 1490 } 1491 1492 static long f2fs_fallocate(struct file *file, int mode, 1493 loff_t offset, loff_t len) 1494 { 1495 struct inode *inode = file_inode(file); 1496 long ret = 0; 1497 1498 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 1499 return -EIO; 1500 1501 /* f2fs only support ->fallocate for regular file */ 1502 if (!S_ISREG(inode->i_mode)) 1503 return -EINVAL; 1504 1505 if (f2fs_encrypted_inode(inode) && 1506 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 1507 return -EOPNOTSUPP; 1508 1509 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 1510 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 1511 FALLOC_FL_INSERT_RANGE)) 1512 return -EOPNOTSUPP; 1513 1514 inode_lock(inode); 1515 1516 if (mode & FALLOC_FL_PUNCH_HOLE) { 1517 if (offset >= inode->i_size) 1518 goto out; 1519 1520 ret = punch_hole(inode, offset, len); 1521 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 1522 ret = f2fs_collapse_range(inode, offset, len); 1523 } else if (mode & FALLOC_FL_ZERO_RANGE) { 1524 ret = f2fs_zero_range(inode, offset, len, mode); 1525 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1526 ret = f2fs_insert_range(inode, offset, len); 1527 } else { 1528 ret = expand_inode_data(inode, offset, len, mode); 1529 } 1530 1531 if (!ret) { 1532 inode->i_mtime = inode->i_ctime = current_time(inode); 1533 f2fs_mark_inode_dirty_sync(inode, false); 1534 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1535 } 1536 1537 out: 1538 inode_unlock(inode); 1539 1540 trace_f2fs_fallocate(inode, mode, offset, len, ret); 1541 return ret; 1542 } 1543 1544 static int f2fs_release_file(struct inode *inode, struct file *filp) 1545 { 1546 /* 1547 * f2fs_relase_file is called at every close calls. So we should 1548 * not drop any inmemory pages by close called by other process. 1549 */ 1550 if (!(filp->f_mode & FMODE_WRITE) || 1551 atomic_read(&inode->i_writecount) != 1) 1552 return 0; 1553 1554 /* some remained atomic pages should discarded */ 1555 if (f2fs_is_atomic_file(inode)) 1556 drop_inmem_pages(inode); 1557 if (f2fs_is_volatile_file(inode)) { 1558 clear_inode_flag(inode, FI_VOLATILE_FILE); 1559 stat_dec_volatile_write(inode); 1560 set_inode_flag(inode, FI_DROP_CACHE); 1561 filemap_fdatawrite(inode->i_mapping); 1562 clear_inode_flag(inode, FI_DROP_CACHE); 1563 } 1564 return 0; 1565 } 1566 1567 static int f2fs_file_flush(struct file *file, fl_owner_t id) 1568 { 1569 struct inode *inode = file_inode(file); 1570 1571 /* 1572 * If the process doing a transaction is crashed, we should do 1573 * roll-back. Otherwise, other reader/write can see corrupted database 1574 * until all the writers close its file. Since this should be done 1575 * before dropping file lock, it needs to do in ->flush. 1576 */ 1577 if (f2fs_is_atomic_file(inode) && 1578 F2FS_I(inode)->inmem_task == current) 1579 drop_inmem_pages(inode); 1580 return 0; 1581 } 1582 1583 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1584 { 1585 struct inode *inode = file_inode(filp); 1586 struct f2fs_inode_info *fi = F2FS_I(inode); 1587 unsigned int flags = fi->i_flags & 1588 (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL); 1589 return put_user(flags, (int __user *)arg); 1590 } 1591 1592 static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags) 1593 { 1594 struct f2fs_inode_info *fi = F2FS_I(inode); 1595 unsigned int oldflags; 1596 1597 /* Is it quota file? Do not allow user to mess with it */ 1598 if (IS_NOQUOTA(inode)) 1599 return -EPERM; 1600 1601 flags = f2fs_mask_flags(inode->i_mode, flags); 1602 1603 oldflags = fi->i_flags; 1604 1605 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) 1606 if (!capable(CAP_LINUX_IMMUTABLE)) 1607 return -EPERM; 1608 1609 flags = flags & (FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL); 1610 flags |= oldflags & ~(FS_FL_USER_MODIFIABLE | FS_PROJINHERIT_FL); 1611 fi->i_flags = flags; 1612 1613 if (fi->i_flags & FS_PROJINHERIT_FL) 1614 set_inode_flag(inode, FI_PROJ_INHERIT); 1615 else 1616 clear_inode_flag(inode, FI_PROJ_INHERIT); 1617 1618 inode->i_ctime = current_time(inode); 1619 f2fs_set_inode_flags(inode); 1620 f2fs_mark_inode_dirty_sync(inode, false); 1621 return 0; 1622 } 1623 1624 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1625 { 1626 struct inode *inode = file_inode(filp); 1627 unsigned int flags; 1628 int ret; 1629 1630 if (!inode_owner_or_capable(inode)) 1631 return -EACCES; 1632 1633 if (get_user(flags, (int __user *)arg)) 1634 return -EFAULT; 1635 1636 ret = mnt_want_write_file(filp); 1637 if (ret) 1638 return ret; 1639 1640 inode_lock(inode); 1641 1642 ret = __f2fs_ioc_setflags(inode, flags); 1643 1644 inode_unlock(inode); 1645 mnt_drop_write_file(filp); 1646 return ret; 1647 } 1648 1649 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) 1650 { 1651 struct inode *inode = file_inode(filp); 1652 1653 return put_user(inode->i_generation, (int __user *)arg); 1654 } 1655 1656 static int f2fs_ioc_start_atomic_write(struct file *filp) 1657 { 1658 struct inode *inode = file_inode(filp); 1659 int ret; 1660 1661 if (!inode_owner_or_capable(inode)) 1662 return -EACCES; 1663 1664 if (!S_ISREG(inode->i_mode)) 1665 return -EINVAL; 1666 1667 ret = mnt_want_write_file(filp); 1668 if (ret) 1669 return ret; 1670 1671 inode_lock(inode); 1672 1673 if (f2fs_is_atomic_file(inode)) 1674 goto out; 1675 1676 ret = f2fs_convert_inline_inode(inode); 1677 if (ret) 1678 goto out; 1679 1680 set_inode_flag(inode, FI_ATOMIC_FILE); 1681 set_inode_flag(inode, FI_HOT_DATA); 1682 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1683 1684 if (!get_dirty_pages(inode)) 1685 goto inc_stat; 1686 1687 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, 1688 "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1689 inode->i_ino, get_dirty_pages(inode)); 1690 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 1691 if (ret) { 1692 clear_inode_flag(inode, FI_ATOMIC_FILE); 1693 clear_inode_flag(inode, FI_HOT_DATA); 1694 goto out; 1695 } 1696 1697 inc_stat: 1698 F2FS_I(inode)->inmem_task = current; 1699 stat_inc_atomic_write(inode); 1700 stat_update_max_atomic_write(inode); 1701 out: 1702 inode_unlock(inode); 1703 mnt_drop_write_file(filp); 1704 return ret; 1705 } 1706 1707 static int f2fs_ioc_commit_atomic_write(struct file *filp) 1708 { 1709 struct inode *inode = file_inode(filp); 1710 int ret; 1711 1712 if (!inode_owner_or_capable(inode)) 1713 return -EACCES; 1714 1715 ret = mnt_want_write_file(filp); 1716 if (ret) 1717 return ret; 1718 1719 inode_lock(inode); 1720 1721 down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1722 1723 if (f2fs_is_volatile_file(inode)) 1724 goto err_out; 1725 1726 if (f2fs_is_atomic_file(inode)) { 1727 ret = commit_inmem_pages(inode); 1728 if (ret) 1729 goto err_out; 1730 1731 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1732 if (!ret) { 1733 clear_inode_flag(inode, FI_ATOMIC_FILE); 1734 clear_inode_flag(inode, FI_HOT_DATA); 1735 stat_dec_atomic_write(inode); 1736 } 1737 } else { 1738 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 1739 } 1740 err_out: 1741 up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); 1742 inode_unlock(inode); 1743 mnt_drop_write_file(filp); 1744 return ret; 1745 } 1746 1747 static int f2fs_ioc_start_volatile_write(struct file *filp) 1748 { 1749 struct inode *inode = file_inode(filp); 1750 int ret; 1751 1752 if (!inode_owner_or_capable(inode)) 1753 return -EACCES; 1754 1755 if (!S_ISREG(inode->i_mode)) 1756 return -EINVAL; 1757 1758 ret = mnt_want_write_file(filp); 1759 if (ret) 1760 return ret; 1761 1762 inode_lock(inode); 1763 1764 if (f2fs_is_volatile_file(inode)) 1765 goto out; 1766 1767 ret = f2fs_convert_inline_inode(inode); 1768 if (ret) 1769 goto out; 1770 1771 stat_inc_volatile_write(inode); 1772 stat_update_max_volatile_write(inode); 1773 1774 set_inode_flag(inode, FI_VOLATILE_FILE); 1775 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1776 out: 1777 inode_unlock(inode); 1778 mnt_drop_write_file(filp); 1779 return ret; 1780 } 1781 1782 static int f2fs_ioc_release_volatile_write(struct file *filp) 1783 { 1784 struct inode *inode = file_inode(filp); 1785 int ret; 1786 1787 if (!inode_owner_or_capable(inode)) 1788 return -EACCES; 1789 1790 ret = mnt_want_write_file(filp); 1791 if (ret) 1792 return ret; 1793 1794 inode_lock(inode); 1795 1796 if (!f2fs_is_volatile_file(inode)) 1797 goto out; 1798 1799 if (!f2fs_is_first_block_written(inode)) { 1800 ret = truncate_partial_data_page(inode, 0, true); 1801 goto out; 1802 } 1803 1804 ret = punch_hole(inode, 0, F2FS_BLKSIZE); 1805 out: 1806 inode_unlock(inode); 1807 mnt_drop_write_file(filp); 1808 return ret; 1809 } 1810 1811 static int f2fs_ioc_abort_volatile_write(struct file *filp) 1812 { 1813 struct inode *inode = file_inode(filp); 1814 int ret; 1815 1816 if (!inode_owner_or_capable(inode)) 1817 return -EACCES; 1818 1819 ret = mnt_want_write_file(filp); 1820 if (ret) 1821 return ret; 1822 1823 inode_lock(inode); 1824 1825 if (f2fs_is_atomic_file(inode)) 1826 drop_inmem_pages(inode); 1827 if (f2fs_is_volatile_file(inode)) { 1828 clear_inode_flag(inode, FI_VOLATILE_FILE); 1829 stat_dec_volatile_write(inode); 1830 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1831 } 1832 1833 inode_unlock(inode); 1834 1835 mnt_drop_write_file(filp); 1836 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1837 return ret; 1838 } 1839 1840 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) 1841 { 1842 struct inode *inode = file_inode(filp); 1843 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1844 struct super_block *sb = sbi->sb; 1845 __u32 in; 1846 int ret; 1847 1848 if (!capable(CAP_SYS_ADMIN)) 1849 return -EPERM; 1850 1851 if (get_user(in, (__u32 __user *)arg)) 1852 return -EFAULT; 1853 1854 ret = mnt_want_write_file(filp); 1855 if (ret) 1856 return ret; 1857 1858 switch (in) { 1859 case F2FS_GOING_DOWN_FULLSYNC: 1860 sb = freeze_bdev(sb->s_bdev); 1861 if (IS_ERR(sb)) { 1862 ret = PTR_ERR(sb); 1863 goto out; 1864 } 1865 if (sb) { 1866 f2fs_stop_checkpoint(sbi, false); 1867 thaw_bdev(sb->s_bdev, sb); 1868 } 1869 break; 1870 case F2FS_GOING_DOWN_METASYNC: 1871 /* do checkpoint only */ 1872 ret = f2fs_sync_fs(sb, 1); 1873 if (ret) 1874 goto out; 1875 f2fs_stop_checkpoint(sbi, false); 1876 break; 1877 case F2FS_GOING_DOWN_NOSYNC: 1878 f2fs_stop_checkpoint(sbi, false); 1879 break; 1880 case F2FS_GOING_DOWN_METAFLUSH: 1881 sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 1882 f2fs_stop_checkpoint(sbi, false); 1883 break; 1884 default: 1885 ret = -EINVAL; 1886 goto out; 1887 } 1888 1889 stop_gc_thread(sbi); 1890 stop_discard_thread(sbi); 1891 1892 drop_discard_cmd(sbi); 1893 clear_opt(sbi, DISCARD); 1894 1895 f2fs_update_time(sbi, REQ_TIME); 1896 out: 1897 mnt_drop_write_file(filp); 1898 return ret; 1899 } 1900 1901 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 1902 { 1903 struct inode *inode = file_inode(filp); 1904 struct super_block *sb = inode->i_sb; 1905 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1906 struct fstrim_range range; 1907 int ret; 1908 1909 if (!capable(CAP_SYS_ADMIN)) 1910 return -EPERM; 1911 1912 if (!blk_queue_discard(q)) 1913 return -EOPNOTSUPP; 1914 1915 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 1916 sizeof(range))) 1917 return -EFAULT; 1918 1919 ret = mnt_want_write_file(filp); 1920 if (ret) 1921 return ret; 1922 1923 range.minlen = max((unsigned int)range.minlen, 1924 q->limits.discard_granularity); 1925 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 1926 mnt_drop_write_file(filp); 1927 if (ret < 0) 1928 return ret; 1929 1930 if (copy_to_user((struct fstrim_range __user *)arg, &range, 1931 sizeof(range))) 1932 return -EFAULT; 1933 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1934 return 0; 1935 } 1936 1937 static bool uuid_is_nonzero(__u8 u[16]) 1938 { 1939 int i; 1940 1941 for (i = 0; i < 16; i++) 1942 if (u[i]) 1943 return true; 1944 return false; 1945 } 1946 1947 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) 1948 { 1949 struct inode *inode = file_inode(filp); 1950 1951 if (!f2fs_sb_has_encrypt(inode->i_sb)) 1952 return -EOPNOTSUPP; 1953 1954 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1955 1956 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 1957 } 1958 1959 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 1960 { 1961 if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb)) 1962 return -EOPNOTSUPP; 1963 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 1964 } 1965 1966 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) 1967 { 1968 struct inode *inode = file_inode(filp); 1969 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1970 int err; 1971 1972 if (!f2fs_sb_has_encrypt(inode->i_sb)) 1973 return -EOPNOTSUPP; 1974 1975 err = mnt_want_write_file(filp); 1976 if (err) 1977 return err; 1978 1979 down_write(&sbi->sb_lock); 1980 1981 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 1982 goto got_it; 1983 1984 /* update superblock with uuid */ 1985 generate_random_uuid(sbi->raw_super->encrypt_pw_salt); 1986 1987 err = f2fs_commit_super(sbi, false); 1988 if (err) { 1989 /* undo new data */ 1990 memset(sbi->raw_super->encrypt_pw_salt, 0, 16); 1991 goto out_err; 1992 } 1993 got_it: 1994 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 1995 16)) 1996 err = -EFAULT; 1997 out_err: 1998 up_write(&sbi->sb_lock); 1999 mnt_drop_write_file(filp); 2000 return err; 2001 } 2002 2003 static int f2fs_ioc_gc(struct file *filp, unsigned long arg) 2004 { 2005 struct inode *inode = file_inode(filp); 2006 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2007 __u32 sync; 2008 int ret; 2009 2010 if (!capable(CAP_SYS_ADMIN)) 2011 return -EPERM; 2012 2013 if (get_user(sync, (__u32 __user *)arg)) 2014 return -EFAULT; 2015 2016 if (f2fs_readonly(sbi->sb)) 2017 return -EROFS; 2018 2019 ret = mnt_want_write_file(filp); 2020 if (ret) 2021 return ret; 2022 2023 if (!sync) { 2024 if (!mutex_trylock(&sbi->gc_mutex)) { 2025 ret = -EBUSY; 2026 goto out; 2027 } 2028 } else { 2029 mutex_lock(&sbi->gc_mutex); 2030 } 2031 2032 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); 2033 out: 2034 mnt_drop_write_file(filp); 2035 return ret; 2036 } 2037 2038 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) 2039 { 2040 struct inode *inode = file_inode(filp); 2041 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2042 struct f2fs_gc_range range; 2043 u64 end; 2044 int ret; 2045 2046 if (!capable(CAP_SYS_ADMIN)) 2047 return -EPERM; 2048 2049 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, 2050 sizeof(range))) 2051 return -EFAULT; 2052 2053 if (f2fs_readonly(sbi->sb)) 2054 return -EROFS; 2055 2056 ret = mnt_want_write_file(filp); 2057 if (ret) 2058 return ret; 2059 2060 end = range.start + range.len; 2061 if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) { 2062 ret = -EINVAL; 2063 goto out; 2064 } 2065 do_more: 2066 if (!range.sync) { 2067 if (!mutex_trylock(&sbi->gc_mutex)) { 2068 ret = -EBUSY; 2069 goto out; 2070 } 2071 } else { 2072 mutex_lock(&sbi->gc_mutex); 2073 } 2074 2075 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); 2076 range.start += sbi->blocks_per_seg; 2077 if (range.start <= end) 2078 goto do_more; 2079 out: 2080 mnt_drop_write_file(filp); 2081 return ret; 2082 } 2083 2084 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) 2085 { 2086 struct inode *inode = file_inode(filp); 2087 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2088 int ret; 2089 2090 if (!capable(CAP_SYS_ADMIN)) 2091 return -EPERM; 2092 2093 if (f2fs_readonly(sbi->sb)) 2094 return -EROFS; 2095 2096 ret = mnt_want_write_file(filp); 2097 if (ret) 2098 return ret; 2099 2100 ret = f2fs_sync_fs(sbi->sb, 1); 2101 2102 mnt_drop_write_file(filp); 2103 return ret; 2104 } 2105 2106 static int f2fs_defragment_range(struct f2fs_sb_info *sbi, 2107 struct file *filp, 2108 struct f2fs_defragment *range) 2109 { 2110 struct inode *inode = file_inode(filp); 2111 struct f2fs_map_blocks map = { .m_next_extent = NULL, 2112 .m_seg_type = NO_CHECK_TYPE }; 2113 struct extent_info ei = {0,0,0}; 2114 pgoff_t pg_start, pg_end, next_pgofs; 2115 unsigned int blk_per_seg = sbi->blocks_per_seg; 2116 unsigned int total = 0, sec_num; 2117 block_t blk_end = 0; 2118 bool fragmented = false; 2119 int err; 2120 2121 /* if in-place-update policy is enabled, don't waste time here */ 2122 if (should_update_inplace(inode, NULL)) 2123 return -EINVAL; 2124 2125 pg_start = range->start >> PAGE_SHIFT; 2126 pg_end = (range->start + range->len) >> PAGE_SHIFT; 2127 2128 f2fs_balance_fs(sbi, true); 2129 2130 inode_lock(inode); 2131 2132 /* writeback all dirty pages in the range */ 2133 err = filemap_write_and_wait_range(inode->i_mapping, range->start, 2134 range->start + range->len - 1); 2135 if (err) 2136 goto out; 2137 2138 /* 2139 * lookup mapping info in extent cache, skip defragmenting if physical 2140 * block addresses are continuous. 2141 */ 2142 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { 2143 if (ei.fofs + ei.len >= pg_end) 2144 goto out; 2145 } 2146 2147 map.m_lblk = pg_start; 2148 map.m_next_pgofs = &next_pgofs; 2149 2150 /* 2151 * lookup mapping info in dnode page cache, skip defragmenting if all 2152 * physical block addresses are continuous even if there are hole(s) 2153 * in logical blocks. 2154 */ 2155 while (map.m_lblk < pg_end) { 2156 map.m_len = pg_end - map.m_lblk; 2157 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2158 if (err) 2159 goto out; 2160 2161 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2162 map.m_lblk = next_pgofs; 2163 continue; 2164 } 2165 2166 if (blk_end && blk_end != map.m_pblk) 2167 fragmented = true; 2168 2169 /* record total count of block that we're going to move */ 2170 total += map.m_len; 2171 2172 blk_end = map.m_pblk + map.m_len; 2173 2174 map.m_lblk += map.m_len; 2175 } 2176 2177 if (!fragmented) 2178 goto out; 2179 2180 sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi); 2181 2182 /* 2183 * make sure there are enough free section for LFS allocation, this can 2184 * avoid defragment running in SSR mode when free section are allocated 2185 * intensively 2186 */ 2187 if (has_not_enough_free_secs(sbi, 0, sec_num)) { 2188 err = -EAGAIN; 2189 goto out; 2190 } 2191 2192 map.m_lblk = pg_start; 2193 map.m_len = pg_end - pg_start; 2194 total = 0; 2195 2196 while (map.m_lblk < pg_end) { 2197 pgoff_t idx; 2198 int cnt = 0; 2199 2200 do_map: 2201 map.m_len = pg_end - map.m_lblk; 2202 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2203 if (err) 2204 goto clear_out; 2205 2206 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2207 map.m_lblk = next_pgofs; 2208 continue; 2209 } 2210 2211 set_inode_flag(inode, FI_DO_DEFRAG); 2212 2213 idx = map.m_lblk; 2214 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { 2215 struct page *page; 2216 2217 page = get_lock_data_page(inode, idx, true); 2218 if (IS_ERR(page)) { 2219 err = PTR_ERR(page); 2220 goto clear_out; 2221 } 2222 2223 set_page_dirty(page); 2224 f2fs_put_page(page, 1); 2225 2226 idx++; 2227 cnt++; 2228 total++; 2229 } 2230 2231 map.m_lblk = idx; 2232 2233 if (idx < pg_end && cnt < blk_per_seg) 2234 goto do_map; 2235 2236 clear_inode_flag(inode, FI_DO_DEFRAG); 2237 2238 err = filemap_fdatawrite(inode->i_mapping); 2239 if (err) 2240 goto out; 2241 } 2242 clear_out: 2243 clear_inode_flag(inode, FI_DO_DEFRAG); 2244 out: 2245 inode_unlock(inode); 2246 if (!err) 2247 range->len = (u64)total << PAGE_SHIFT; 2248 return err; 2249 } 2250 2251 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) 2252 { 2253 struct inode *inode = file_inode(filp); 2254 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2255 struct f2fs_defragment range; 2256 int err; 2257 2258 if (!capable(CAP_SYS_ADMIN)) 2259 return -EPERM; 2260 2261 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) 2262 return -EINVAL; 2263 2264 if (f2fs_readonly(sbi->sb)) 2265 return -EROFS; 2266 2267 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, 2268 sizeof(range))) 2269 return -EFAULT; 2270 2271 /* verify alignment of offset & size */ 2272 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) 2273 return -EINVAL; 2274 2275 if (unlikely((range.start + range.len) >> PAGE_SHIFT > 2276 sbi->max_file_blocks)) 2277 return -EINVAL; 2278 2279 err = mnt_want_write_file(filp); 2280 if (err) 2281 return err; 2282 2283 err = f2fs_defragment_range(sbi, filp, &range); 2284 mnt_drop_write_file(filp); 2285 2286 f2fs_update_time(sbi, REQ_TIME); 2287 if (err < 0) 2288 return err; 2289 2290 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, 2291 sizeof(range))) 2292 return -EFAULT; 2293 2294 return 0; 2295 } 2296 2297 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, 2298 struct file *file_out, loff_t pos_out, size_t len) 2299 { 2300 struct inode *src = file_inode(file_in); 2301 struct inode *dst = file_inode(file_out); 2302 struct f2fs_sb_info *sbi = F2FS_I_SB(src); 2303 size_t olen = len, dst_max_i_size = 0; 2304 size_t dst_osize; 2305 int ret; 2306 2307 if (file_in->f_path.mnt != file_out->f_path.mnt || 2308 src->i_sb != dst->i_sb) 2309 return -EXDEV; 2310 2311 if (unlikely(f2fs_readonly(src->i_sb))) 2312 return -EROFS; 2313 2314 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) 2315 return -EINVAL; 2316 2317 if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst)) 2318 return -EOPNOTSUPP; 2319 2320 if (src == dst) { 2321 if (pos_in == pos_out) 2322 return 0; 2323 if (pos_out > pos_in && pos_out < pos_in + len) 2324 return -EINVAL; 2325 } 2326 2327 inode_lock(src); 2328 down_write(&F2FS_I(src)->dio_rwsem[WRITE]); 2329 if (src != dst) { 2330 ret = -EBUSY; 2331 if (!inode_trylock(dst)) 2332 goto out; 2333 if (!down_write_trylock(&F2FS_I(dst)->dio_rwsem[WRITE])) { 2334 inode_unlock(dst); 2335 goto out; 2336 } 2337 } 2338 2339 ret = -EINVAL; 2340 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2341 goto out_unlock; 2342 if (len == 0) 2343 olen = len = src->i_size - pos_in; 2344 if (pos_in + len == src->i_size) 2345 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; 2346 if (len == 0) { 2347 ret = 0; 2348 goto out_unlock; 2349 } 2350 2351 dst_osize = dst->i_size; 2352 if (pos_out + olen > dst->i_size) 2353 dst_max_i_size = pos_out + olen; 2354 2355 /* verify the end result is block aligned */ 2356 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || 2357 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || 2358 !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) 2359 goto out_unlock; 2360 2361 ret = f2fs_convert_inline_inode(src); 2362 if (ret) 2363 goto out_unlock; 2364 2365 ret = f2fs_convert_inline_inode(dst); 2366 if (ret) 2367 goto out_unlock; 2368 2369 /* write out all dirty pages from offset */ 2370 ret = filemap_write_and_wait_range(src->i_mapping, 2371 pos_in, pos_in + len); 2372 if (ret) 2373 goto out_unlock; 2374 2375 ret = filemap_write_and_wait_range(dst->i_mapping, 2376 pos_out, pos_out + len); 2377 if (ret) 2378 goto out_unlock; 2379 2380 f2fs_balance_fs(sbi, true); 2381 f2fs_lock_op(sbi); 2382 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, 2383 pos_out >> F2FS_BLKSIZE_BITS, 2384 len >> F2FS_BLKSIZE_BITS, false); 2385 2386 if (!ret) { 2387 if (dst_max_i_size) 2388 f2fs_i_size_write(dst, dst_max_i_size); 2389 else if (dst_osize != dst->i_size) 2390 f2fs_i_size_write(dst, dst_osize); 2391 } 2392 f2fs_unlock_op(sbi); 2393 out_unlock: 2394 if (src != dst) { 2395 up_write(&F2FS_I(dst)->dio_rwsem[WRITE]); 2396 inode_unlock(dst); 2397 } 2398 out: 2399 up_write(&F2FS_I(src)->dio_rwsem[WRITE]); 2400 inode_unlock(src); 2401 return ret; 2402 } 2403 2404 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) 2405 { 2406 struct f2fs_move_range range; 2407 struct fd dst; 2408 int err; 2409 2410 if (!(filp->f_mode & FMODE_READ) || 2411 !(filp->f_mode & FMODE_WRITE)) 2412 return -EBADF; 2413 2414 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, 2415 sizeof(range))) 2416 return -EFAULT; 2417 2418 dst = fdget(range.dst_fd); 2419 if (!dst.file) 2420 return -EBADF; 2421 2422 if (!(dst.file->f_mode & FMODE_WRITE)) { 2423 err = -EBADF; 2424 goto err_out; 2425 } 2426 2427 err = mnt_want_write_file(filp); 2428 if (err) 2429 goto err_out; 2430 2431 err = f2fs_move_file_range(filp, range.pos_in, dst.file, 2432 range.pos_out, range.len); 2433 2434 mnt_drop_write_file(filp); 2435 if (err) 2436 goto err_out; 2437 2438 if (copy_to_user((struct f2fs_move_range __user *)arg, 2439 &range, sizeof(range))) 2440 err = -EFAULT; 2441 err_out: 2442 fdput(dst); 2443 return err; 2444 } 2445 2446 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) 2447 { 2448 struct inode *inode = file_inode(filp); 2449 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2450 struct sit_info *sm = SIT_I(sbi); 2451 unsigned int start_segno = 0, end_segno = 0; 2452 unsigned int dev_start_segno = 0, dev_end_segno = 0; 2453 struct f2fs_flush_device range; 2454 int ret; 2455 2456 if (!capable(CAP_SYS_ADMIN)) 2457 return -EPERM; 2458 2459 if (f2fs_readonly(sbi->sb)) 2460 return -EROFS; 2461 2462 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, 2463 sizeof(range))) 2464 return -EFAULT; 2465 2466 if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num || 2467 sbi->segs_per_sec != 1) { 2468 f2fs_msg(sbi->sb, KERN_WARNING, 2469 "Can't flush %u in %d for segs_per_sec %u != 1\n", 2470 range.dev_num, sbi->s_ndevs, 2471 sbi->segs_per_sec); 2472 return -EINVAL; 2473 } 2474 2475 ret = mnt_want_write_file(filp); 2476 if (ret) 2477 return ret; 2478 2479 if (range.dev_num != 0) 2480 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); 2481 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); 2482 2483 start_segno = sm->last_victim[FLUSH_DEVICE]; 2484 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) 2485 start_segno = dev_start_segno; 2486 end_segno = min(start_segno + range.segments, dev_end_segno); 2487 2488 while (start_segno < end_segno) { 2489 if (!mutex_trylock(&sbi->gc_mutex)) { 2490 ret = -EBUSY; 2491 goto out; 2492 } 2493 sm->last_victim[GC_CB] = end_segno + 1; 2494 sm->last_victim[GC_GREEDY] = end_segno + 1; 2495 sm->last_victim[ALLOC_NEXT] = end_segno + 1; 2496 ret = f2fs_gc(sbi, true, true, start_segno); 2497 if (ret == -EAGAIN) 2498 ret = 0; 2499 else if (ret < 0) 2500 break; 2501 start_segno++; 2502 } 2503 out: 2504 mnt_drop_write_file(filp); 2505 return ret; 2506 } 2507 2508 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) 2509 { 2510 struct inode *inode = file_inode(filp); 2511 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); 2512 2513 /* Must validate to set it with SQLite behavior in Android. */ 2514 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; 2515 2516 return put_user(sb_feature, (u32 __user *)arg); 2517 } 2518 2519 #ifdef CONFIG_QUOTA 2520 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2521 { 2522 struct inode *inode = file_inode(filp); 2523 struct f2fs_inode_info *fi = F2FS_I(inode); 2524 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2525 struct super_block *sb = sbi->sb; 2526 struct dquot *transfer_to[MAXQUOTAS] = {}; 2527 struct page *ipage; 2528 kprojid_t kprojid; 2529 int err; 2530 2531 if (!f2fs_sb_has_project_quota(sb)) { 2532 if (projid != F2FS_DEF_PROJID) 2533 return -EOPNOTSUPP; 2534 else 2535 return 0; 2536 } 2537 2538 if (!f2fs_has_extra_attr(inode)) 2539 return -EOPNOTSUPP; 2540 2541 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 2542 2543 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) 2544 return 0; 2545 2546 err = mnt_want_write_file(filp); 2547 if (err) 2548 return err; 2549 2550 err = -EPERM; 2551 inode_lock(inode); 2552 2553 /* Is it quota file? Do not allow user to mess with it */ 2554 if (IS_NOQUOTA(inode)) 2555 goto out_unlock; 2556 2557 ipage = get_node_page(sbi, inode->i_ino); 2558 if (IS_ERR(ipage)) { 2559 err = PTR_ERR(ipage); 2560 goto out_unlock; 2561 } 2562 2563 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, 2564 i_projid)) { 2565 err = -EOVERFLOW; 2566 f2fs_put_page(ipage, 1); 2567 goto out_unlock; 2568 } 2569 f2fs_put_page(ipage, 1); 2570 2571 dquot_initialize(inode); 2572 2573 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 2574 if (!IS_ERR(transfer_to[PRJQUOTA])) { 2575 err = __dquot_transfer(inode, transfer_to); 2576 dqput(transfer_to[PRJQUOTA]); 2577 if (err) 2578 goto out_dirty; 2579 } 2580 2581 F2FS_I(inode)->i_projid = kprojid; 2582 inode->i_ctime = current_time(inode); 2583 out_dirty: 2584 f2fs_mark_inode_dirty_sync(inode, true); 2585 out_unlock: 2586 inode_unlock(inode); 2587 mnt_drop_write_file(filp); 2588 return err; 2589 } 2590 #else 2591 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2592 { 2593 if (projid != F2FS_DEF_PROJID) 2594 return -EOPNOTSUPP; 2595 return 0; 2596 } 2597 #endif 2598 2599 /* Transfer internal flags to xflags */ 2600 static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags) 2601 { 2602 __u32 xflags = 0; 2603 2604 if (iflags & FS_SYNC_FL) 2605 xflags |= FS_XFLAG_SYNC; 2606 if (iflags & FS_IMMUTABLE_FL) 2607 xflags |= FS_XFLAG_IMMUTABLE; 2608 if (iflags & FS_APPEND_FL) 2609 xflags |= FS_XFLAG_APPEND; 2610 if (iflags & FS_NODUMP_FL) 2611 xflags |= FS_XFLAG_NODUMP; 2612 if (iflags & FS_NOATIME_FL) 2613 xflags |= FS_XFLAG_NOATIME; 2614 if (iflags & FS_PROJINHERIT_FL) 2615 xflags |= FS_XFLAG_PROJINHERIT; 2616 return xflags; 2617 } 2618 2619 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \ 2620 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \ 2621 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT) 2622 2623 /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */ 2624 #define F2FS_FL_XFLAG_VISIBLE (FS_SYNC_FL | \ 2625 FS_IMMUTABLE_FL | \ 2626 FS_APPEND_FL | \ 2627 FS_NODUMP_FL | \ 2628 FS_NOATIME_FL | \ 2629 FS_PROJINHERIT_FL) 2630 2631 /* Transfer xflags flags to internal */ 2632 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags) 2633 { 2634 unsigned long iflags = 0; 2635 2636 if (xflags & FS_XFLAG_SYNC) 2637 iflags |= FS_SYNC_FL; 2638 if (xflags & FS_XFLAG_IMMUTABLE) 2639 iflags |= FS_IMMUTABLE_FL; 2640 if (xflags & FS_XFLAG_APPEND) 2641 iflags |= FS_APPEND_FL; 2642 if (xflags & FS_XFLAG_NODUMP) 2643 iflags |= FS_NODUMP_FL; 2644 if (xflags & FS_XFLAG_NOATIME) 2645 iflags |= FS_NOATIME_FL; 2646 if (xflags & FS_XFLAG_PROJINHERIT) 2647 iflags |= FS_PROJINHERIT_FL; 2648 2649 return iflags; 2650 } 2651 2652 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 2653 { 2654 struct inode *inode = file_inode(filp); 2655 struct f2fs_inode_info *fi = F2FS_I(inode); 2656 struct fsxattr fa; 2657 2658 memset(&fa, 0, sizeof(struct fsxattr)); 2659 fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags & 2660 (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL)); 2661 2662 if (f2fs_sb_has_project_quota(inode->i_sb)) 2663 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, 2664 fi->i_projid); 2665 2666 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) 2667 return -EFAULT; 2668 return 0; 2669 } 2670 2671 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 2672 { 2673 struct inode *inode = file_inode(filp); 2674 struct f2fs_inode_info *fi = F2FS_I(inode); 2675 struct fsxattr fa; 2676 unsigned int flags; 2677 int err; 2678 2679 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 2680 return -EFAULT; 2681 2682 /* Make sure caller has proper permission */ 2683 if (!inode_owner_or_capable(inode)) 2684 return -EACCES; 2685 2686 if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS) 2687 return -EOPNOTSUPP; 2688 2689 flags = f2fs_xflags_to_iflags(fa.fsx_xflags); 2690 if (f2fs_mask_flags(inode->i_mode, flags) != flags) 2691 return -EOPNOTSUPP; 2692 2693 err = mnt_want_write_file(filp); 2694 if (err) 2695 return err; 2696 2697 inode_lock(inode); 2698 flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) | 2699 (flags & F2FS_FL_XFLAG_VISIBLE); 2700 err = __f2fs_ioc_setflags(inode, flags); 2701 inode_unlock(inode); 2702 mnt_drop_write_file(filp); 2703 if (err) 2704 return err; 2705 2706 err = f2fs_ioc_setproject(filp, fa.fsx_projid); 2707 if (err) 2708 return err; 2709 2710 return 0; 2711 } 2712 2713 int f2fs_pin_file_control(struct inode *inode, bool inc) 2714 { 2715 struct f2fs_inode_info *fi = F2FS_I(inode); 2716 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2717 2718 /* Use i_gc_failures for normal file as a risk signal. */ 2719 if (inc) 2720 f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1); 2721 2722 if (fi->i_gc_failures > sbi->gc_pin_file_threshold) { 2723 f2fs_msg(sbi->sb, KERN_WARNING, 2724 "%s: Enable GC = ino %lx after %x GC trials\n", 2725 __func__, inode->i_ino, fi->i_gc_failures); 2726 clear_inode_flag(inode, FI_PIN_FILE); 2727 return -EAGAIN; 2728 } 2729 return 0; 2730 } 2731 2732 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) 2733 { 2734 struct inode *inode = file_inode(filp); 2735 __u32 pin; 2736 int ret = 0; 2737 2738 if (!inode_owner_or_capable(inode)) 2739 return -EACCES; 2740 2741 if (get_user(pin, (__u32 __user *)arg)) 2742 return -EFAULT; 2743 2744 if (!S_ISREG(inode->i_mode)) 2745 return -EINVAL; 2746 2747 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 2748 return -EROFS; 2749 2750 ret = mnt_want_write_file(filp); 2751 if (ret) 2752 return ret; 2753 2754 inode_lock(inode); 2755 2756 if (should_update_outplace(inode, NULL)) { 2757 ret = -EINVAL; 2758 goto out; 2759 } 2760 2761 if (!pin) { 2762 clear_inode_flag(inode, FI_PIN_FILE); 2763 F2FS_I(inode)->i_gc_failures = 1; 2764 goto done; 2765 } 2766 2767 if (f2fs_pin_file_control(inode, false)) { 2768 ret = -EAGAIN; 2769 goto out; 2770 } 2771 ret = f2fs_convert_inline_inode(inode); 2772 if (ret) 2773 goto out; 2774 2775 set_inode_flag(inode, FI_PIN_FILE); 2776 ret = F2FS_I(inode)->i_gc_failures; 2777 done: 2778 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2779 out: 2780 inode_unlock(inode); 2781 mnt_drop_write_file(filp); 2782 return ret; 2783 } 2784 2785 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) 2786 { 2787 struct inode *inode = file_inode(filp); 2788 __u32 pin = 0; 2789 2790 if (is_inode_flag_set(inode, FI_PIN_FILE)) 2791 pin = F2FS_I(inode)->i_gc_failures; 2792 return put_user(pin, (u32 __user *)arg); 2793 } 2794 2795 int f2fs_precache_extents(struct inode *inode) 2796 { 2797 struct f2fs_inode_info *fi = F2FS_I(inode); 2798 struct f2fs_map_blocks map; 2799 pgoff_t m_next_extent; 2800 loff_t end; 2801 int err; 2802 2803 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 2804 return -EOPNOTSUPP; 2805 2806 map.m_lblk = 0; 2807 map.m_next_pgofs = NULL; 2808 map.m_next_extent = &m_next_extent; 2809 map.m_seg_type = NO_CHECK_TYPE; 2810 end = F2FS_I_SB(inode)->max_file_blocks; 2811 2812 while (map.m_lblk < end) { 2813 map.m_len = end - map.m_lblk; 2814 2815 down_write(&fi->dio_rwsem[WRITE]); 2816 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); 2817 up_write(&fi->dio_rwsem[WRITE]); 2818 if (err) 2819 return err; 2820 2821 map.m_lblk = m_next_extent; 2822 } 2823 2824 return err; 2825 } 2826 2827 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg) 2828 { 2829 return f2fs_precache_extents(file_inode(filp)); 2830 } 2831 2832 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2833 { 2834 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 2835 return -EIO; 2836 2837 switch (cmd) { 2838 case F2FS_IOC_GETFLAGS: 2839 return f2fs_ioc_getflags(filp, arg); 2840 case F2FS_IOC_SETFLAGS: 2841 return f2fs_ioc_setflags(filp, arg); 2842 case F2FS_IOC_GETVERSION: 2843 return f2fs_ioc_getversion(filp, arg); 2844 case F2FS_IOC_START_ATOMIC_WRITE: 2845 return f2fs_ioc_start_atomic_write(filp); 2846 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 2847 return f2fs_ioc_commit_atomic_write(filp); 2848 case F2FS_IOC_START_VOLATILE_WRITE: 2849 return f2fs_ioc_start_volatile_write(filp); 2850 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 2851 return f2fs_ioc_release_volatile_write(filp); 2852 case F2FS_IOC_ABORT_VOLATILE_WRITE: 2853 return f2fs_ioc_abort_volatile_write(filp); 2854 case F2FS_IOC_SHUTDOWN: 2855 return f2fs_ioc_shutdown(filp, arg); 2856 case FITRIM: 2857 return f2fs_ioc_fitrim(filp, arg); 2858 case F2FS_IOC_SET_ENCRYPTION_POLICY: 2859 return f2fs_ioc_set_encryption_policy(filp, arg); 2860 case F2FS_IOC_GET_ENCRYPTION_POLICY: 2861 return f2fs_ioc_get_encryption_policy(filp, arg); 2862 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 2863 return f2fs_ioc_get_encryption_pwsalt(filp, arg); 2864 case F2FS_IOC_GARBAGE_COLLECT: 2865 return f2fs_ioc_gc(filp, arg); 2866 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 2867 return f2fs_ioc_gc_range(filp, arg); 2868 case F2FS_IOC_WRITE_CHECKPOINT: 2869 return f2fs_ioc_write_checkpoint(filp, arg); 2870 case F2FS_IOC_DEFRAGMENT: 2871 return f2fs_ioc_defragment(filp, arg); 2872 case F2FS_IOC_MOVE_RANGE: 2873 return f2fs_ioc_move_range(filp, arg); 2874 case F2FS_IOC_FLUSH_DEVICE: 2875 return f2fs_ioc_flush_device(filp, arg); 2876 case F2FS_IOC_GET_FEATURES: 2877 return f2fs_ioc_get_features(filp, arg); 2878 case F2FS_IOC_FSGETXATTR: 2879 return f2fs_ioc_fsgetxattr(filp, arg); 2880 case F2FS_IOC_FSSETXATTR: 2881 return f2fs_ioc_fssetxattr(filp, arg); 2882 case F2FS_IOC_GET_PIN_FILE: 2883 return f2fs_ioc_get_pin_file(filp, arg); 2884 case F2FS_IOC_SET_PIN_FILE: 2885 return f2fs_ioc_set_pin_file(filp, arg); 2886 case F2FS_IOC_PRECACHE_EXTENTS: 2887 return f2fs_ioc_precache_extents(filp, arg); 2888 default: 2889 return -ENOTTY; 2890 } 2891 } 2892 2893 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 2894 { 2895 struct file *file = iocb->ki_filp; 2896 struct inode *inode = file_inode(file); 2897 struct blk_plug plug; 2898 ssize_t ret; 2899 2900 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 2901 return -EIO; 2902 2903 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) 2904 return -EINVAL; 2905 2906 if (!inode_trylock(inode)) { 2907 if (iocb->ki_flags & IOCB_NOWAIT) 2908 return -EAGAIN; 2909 inode_lock(inode); 2910 } 2911 2912 ret = generic_write_checks(iocb, from); 2913 if (ret > 0) { 2914 bool preallocated = false; 2915 size_t target_size = 0; 2916 int err; 2917 2918 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) 2919 set_inode_flag(inode, FI_NO_PREALLOC); 2920 2921 if ((iocb->ki_flags & IOCB_NOWAIT) && 2922 (iocb->ki_flags & IOCB_DIRECT)) { 2923 if (!f2fs_overwrite_io(inode, iocb->ki_pos, 2924 iov_iter_count(from)) || 2925 f2fs_has_inline_data(inode) || 2926 f2fs_force_buffered_io(inode, WRITE)) { 2927 inode_unlock(inode); 2928 return -EAGAIN; 2929 } 2930 2931 } else { 2932 preallocated = true; 2933 target_size = iocb->ki_pos + iov_iter_count(from); 2934 2935 err = f2fs_preallocate_blocks(iocb, from); 2936 if (err) { 2937 clear_inode_flag(inode, FI_NO_PREALLOC); 2938 inode_unlock(inode); 2939 return err; 2940 } 2941 } 2942 blk_start_plug(&plug); 2943 ret = __generic_file_write_iter(iocb, from); 2944 blk_finish_plug(&plug); 2945 clear_inode_flag(inode, FI_NO_PREALLOC); 2946 2947 /* if we couldn't write data, we should deallocate blocks. */ 2948 if (preallocated && i_size_read(inode) < target_size) 2949 f2fs_truncate(inode); 2950 2951 if (ret > 0) 2952 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); 2953 } 2954 inode_unlock(inode); 2955 2956 if (ret > 0) 2957 ret = generic_write_sync(iocb, ret); 2958 return ret; 2959 } 2960 2961 #ifdef CONFIG_COMPAT 2962 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2963 { 2964 switch (cmd) { 2965 case F2FS_IOC32_GETFLAGS: 2966 cmd = F2FS_IOC_GETFLAGS; 2967 break; 2968 case F2FS_IOC32_SETFLAGS: 2969 cmd = F2FS_IOC_SETFLAGS; 2970 break; 2971 case F2FS_IOC32_GETVERSION: 2972 cmd = F2FS_IOC_GETVERSION; 2973 break; 2974 case F2FS_IOC_START_ATOMIC_WRITE: 2975 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 2976 case F2FS_IOC_START_VOLATILE_WRITE: 2977 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 2978 case F2FS_IOC_ABORT_VOLATILE_WRITE: 2979 case F2FS_IOC_SHUTDOWN: 2980 case F2FS_IOC_SET_ENCRYPTION_POLICY: 2981 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 2982 case F2FS_IOC_GET_ENCRYPTION_POLICY: 2983 case F2FS_IOC_GARBAGE_COLLECT: 2984 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 2985 case F2FS_IOC_WRITE_CHECKPOINT: 2986 case F2FS_IOC_DEFRAGMENT: 2987 case F2FS_IOC_MOVE_RANGE: 2988 case F2FS_IOC_FLUSH_DEVICE: 2989 case F2FS_IOC_GET_FEATURES: 2990 case F2FS_IOC_FSGETXATTR: 2991 case F2FS_IOC_FSSETXATTR: 2992 case F2FS_IOC_GET_PIN_FILE: 2993 case F2FS_IOC_SET_PIN_FILE: 2994 case F2FS_IOC_PRECACHE_EXTENTS: 2995 break; 2996 default: 2997 return -ENOIOCTLCMD; 2998 } 2999 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 3000 } 3001 #endif 3002 3003 const struct file_operations f2fs_file_operations = { 3004 .llseek = f2fs_llseek, 3005 .read_iter = generic_file_read_iter, 3006 .write_iter = f2fs_file_write_iter, 3007 .open = f2fs_file_open, 3008 .release = f2fs_release_file, 3009 .mmap = f2fs_file_mmap, 3010 .flush = f2fs_file_flush, 3011 .fsync = f2fs_sync_file, 3012 .fallocate = f2fs_fallocate, 3013 .unlocked_ioctl = f2fs_ioctl, 3014 #ifdef CONFIG_COMPAT 3015 .compat_ioctl = f2fs_compat_ioctl, 3016 #endif 3017 .splice_read = generic_file_splice_read, 3018 .splice_write = iter_file_splice_write, 3019 }; 3020