1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/file.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/stat.h> 11 #include <linux/buffer_head.h> 12 #include <linux/writeback.h> 13 #include <linux/blkdev.h> 14 #include <linux/falloc.h> 15 #include <linux/types.h> 16 #include <linux/compat.h> 17 #include <linux/uaccess.h> 18 #include <linux/mount.h> 19 #include <linux/pagevec.h> 20 #include <linux/uio.h> 21 #include <linux/uuid.h> 22 #include <linux/file.h> 23 #include <linux/nls.h> 24 25 #include "f2fs.h" 26 #include "node.h" 27 #include "segment.h" 28 #include "xattr.h" 29 #include "acl.h" 30 #include "gc.h" 31 #include "trace.h" 32 #include <trace/events/f2fs.h> 33 34 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) 35 { 36 struct inode *inode = file_inode(vmf->vma->vm_file); 37 vm_fault_t ret; 38 39 down_read(&F2FS_I(inode)->i_mmap_sem); 40 ret = filemap_fault(vmf); 41 up_read(&F2FS_I(inode)->i_mmap_sem); 42 43 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret); 44 45 return ret; 46 } 47 48 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) 49 { 50 struct page *page = vmf->page; 51 struct inode *inode = file_inode(vmf->vma->vm_file); 52 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 53 struct dnode_of_data dn = { .node_changed = false }; 54 int err; 55 56 if (unlikely(f2fs_cp_error(sbi))) { 57 err = -EIO; 58 goto err; 59 } 60 61 if (!f2fs_is_checkpoint_ready(sbi)) { 62 err = -ENOSPC; 63 goto err; 64 } 65 66 sb_start_pagefault(inode->i_sb); 67 68 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 69 70 file_update_time(vmf->vma->vm_file); 71 down_read(&F2FS_I(inode)->i_mmap_sem); 72 lock_page(page); 73 if (unlikely(page->mapping != inode->i_mapping || 74 page_offset(page) > i_size_read(inode) || 75 !PageUptodate(page))) { 76 unlock_page(page); 77 err = -EFAULT; 78 goto out_sem; 79 } 80 81 /* block allocation */ 82 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); 83 set_new_dnode(&dn, inode, NULL, NULL, 0); 84 err = f2fs_get_block(&dn, page->index); 85 f2fs_put_dnode(&dn); 86 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); 87 if (err) { 88 unlock_page(page); 89 goto out_sem; 90 } 91 92 /* fill the page */ 93 f2fs_wait_on_page_writeback(page, DATA, false, true); 94 95 /* wait for GCed page writeback via META_MAPPING */ 96 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 97 98 /* 99 * check to see if the page is mapped already (no holes) 100 */ 101 if (PageMappedToDisk(page)) 102 goto out_sem; 103 104 /* page is wholly or partially inside EOF */ 105 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > 106 i_size_read(inode)) { 107 loff_t offset; 108 109 offset = i_size_read(inode) & ~PAGE_MASK; 110 zero_user_segment(page, offset, PAGE_SIZE); 111 } 112 set_page_dirty(page); 113 if (!PageUptodate(page)) 114 SetPageUptodate(page); 115 116 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 117 f2fs_update_time(sbi, REQ_TIME); 118 119 trace_f2fs_vm_page_mkwrite(page, DATA); 120 out_sem: 121 up_read(&F2FS_I(inode)->i_mmap_sem); 122 123 f2fs_balance_fs(sbi, dn.node_changed); 124 125 sb_end_pagefault(inode->i_sb); 126 err: 127 return block_page_mkwrite_return(err); 128 } 129 130 static const struct vm_operations_struct f2fs_file_vm_ops = { 131 .fault = f2fs_filemap_fault, 132 .map_pages = filemap_map_pages, 133 .page_mkwrite = f2fs_vm_page_mkwrite, 134 }; 135 136 static int get_parent_ino(struct inode *inode, nid_t *pino) 137 { 138 struct dentry *dentry; 139 140 inode = igrab(inode); 141 dentry = d_find_any_alias(inode); 142 iput(inode); 143 if (!dentry) 144 return 0; 145 146 *pino = parent_ino(dentry); 147 dput(dentry); 148 return 1; 149 } 150 151 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) 152 { 153 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 154 enum cp_reason_type cp_reason = CP_NO_NEEDED; 155 156 if (!S_ISREG(inode->i_mode)) 157 cp_reason = CP_NON_REGULAR; 158 else if (inode->i_nlink != 1) 159 cp_reason = CP_HARDLINK; 160 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) 161 cp_reason = CP_SB_NEED_CP; 162 else if (file_wrong_pino(inode)) 163 cp_reason = CP_WRONG_PINO; 164 else if (!f2fs_space_for_roll_forward(sbi)) 165 cp_reason = CP_NO_SPC_ROLL; 166 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 167 cp_reason = CP_NODE_NEED_CP; 168 else if (test_opt(sbi, FASTBOOT)) 169 cp_reason = CP_FASTBOOT_MODE; 170 else if (F2FS_OPTION(sbi).active_logs == 2) 171 cp_reason = CP_SPEC_LOG_NUM; 172 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && 173 f2fs_need_dentry_mark(sbi, inode->i_ino) && 174 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, 175 TRANS_DIR_INO)) 176 cp_reason = CP_RECOVER_DIR; 177 178 return cp_reason; 179 } 180 181 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 182 { 183 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 184 bool ret = false; 185 /* But we need to avoid that there are some inode updates */ 186 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) 187 ret = true; 188 f2fs_put_page(i, 0); 189 return ret; 190 } 191 192 static void try_to_fix_pino(struct inode *inode) 193 { 194 struct f2fs_inode_info *fi = F2FS_I(inode); 195 nid_t pino; 196 197 down_write(&fi->i_sem); 198 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 199 get_parent_ino(inode, &pino)) { 200 f2fs_i_pino_write(inode, pino); 201 file_got_pino(inode); 202 } 203 up_write(&fi->i_sem); 204 } 205 206 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, 207 int datasync, bool atomic) 208 { 209 struct inode *inode = file->f_mapping->host; 210 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 211 nid_t ino = inode->i_ino; 212 int ret = 0; 213 enum cp_reason_type cp_reason = 0; 214 struct writeback_control wbc = { 215 .sync_mode = WB_SYNC_ALL, 216 .nr_to_write = LONG_MAX, 217 .for_reclaim = 0, 218 }; 219 unsigned int seq_id = 0; 220 221 if (unlikely(f2fs_readonly(inode->i_sb) || 222 is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 223 return 0; 224 225 trace_f2fs_sync_file_enter(inode); 226 227 if (S_ISDIR(inode->i_mode)) 228 goto go_write; 229 230 /* if fdatasync is triggered, let's do in-place-update */ 231 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 232 set_inode_flag(inode, FI_NEED_IPU); 233 ret = file_write_and_wait_range(file, start, end); 234 clear_inode_flag(inode, FI_NEED_IPU); 235 236 if (ret) { 237 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 238 return ret; 239 } 240 241 /* if the inode is dirty, let's recover all the time */ 242 if (!f2fs_skip_inode_update(inode, datasync)) { 243 f2fs_write_inode(inode, NULL); 244 goto go_write; 245 } 246 247 /* 248 * if there is no written data, don't waste time to write recovery info. 249 */ 250 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && 251 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) { 252 253 /* it may call write_inode just prior to fsync */ 254 if (need_inode_page_update(sbi, ino)) 255 goto go_write; 256 257 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || 258 f2fs_exist_written_data(sbi, ino, UPDATE_INO)) 259 goto flush_out; 260 goto out; 261 } 262 go_write: 263 /* 264 * Both of fdatasync() and fsync() are able to be recovered from 265 * sudden-power-off. 266 */ 267 down_read(&F2FS_I(inode)->i_sem); 268 cp_reason = need_do_checkpoint(inode); 269 up_read(&F2FS_I(inode)->i_sem); 270 271 if (cp_reason) { 272 /* all the dirty node pages should be flushed for POR */ 273 ret = f2fs_sync_fs(inode->i_sb, 1); 274 275 /* 276 * We've secured consistency through sync_fs. Following pino 277 * will be used only for fsynced inodes after checkpoint. 278 */ 279 try_to_fix_pino(inode); 280 clear_inode_flag(inode, FI_APPEND_WRITE); 281 clear_inode_flag(inode, FI_UPDATE_WRITE); 282 goto out; 283 } 284 sync_nodes: 285 atomic_inc(&sbi->wb_sync_req[NODE]); 286 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id); 287 atomic_dec(&sbi->wb_sync_req[NODE]); 288 if (ret) 289 goto out; 290 291 /* if cp_error was enabled, we should avoid infinite loop */ 292 if (unlikely(f2fs_cp_error(sbi))) { 293 ret = -EIO; 294 goto out; 295 } 296 297 if (f2fs_need_inode_block_update(sbi, ino)) { 298 f2fs_mark_inode_dirty_sync(inode, true); 299 f2fs_write_inode(inode, NULL); 300 goto sync_nodes; 301 } 302 303 /* 304 * If it's atomic_write, it's just fine to keep write ordering. So 305 * here we don't need to wait for node write completion, since we use 306 * node chain which serializes node blocks. If one of node writes are 307 * reordered, we can see simply broken chain, resulting in stopping 308 * roll-forward recovery. It means we'll recover all or none node blocks 309 * given fsync mark. 310 */ 311 if (!atomic) { 312 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id); 313 if (ret) 314 goto out; 315 } 316 317 /* once recovery info is written, don't need to tack this */ 318 f2fs_remove_ino_entry(sbi, ino, APPEND_INO); 319 clear_inode_flag(inode, FI_APPEND_WRITE); 320 flush_out: 321 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) 322 ret = f2fs_issue_flush(sbi, inode->i_ino); 323 if (!ret) { 324 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO); 325 clear_inode_flag(inode, FI_UPDATE_WRITE); 326 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO); 327 } 328 f2fs_update_time(sbi, REQ_TIME); 329 out: 330 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 331 f2fs_trace_ios(NULL, 1); 332 return ret; 333 } 334 335 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 336 { 337 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) 338 return -EIO; 339 return f2fs_do_sync_file(file, start, end, datasync, false); 340 } 341 342 static pgoff_t __get_first_dirty_index(struct address_space *mapping, 343 pgoff_t pgofs, int whence) 344 { 345 struct page *page; 346 int nr_pages; 347 348 if (whence != SEEK_DATA) 349 return 0; 350 351 /* find first dirty page index */ 352 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY, 353 1, &page); 354 if (!nr_pages) 355 return ULONG_MAX; 356 pgofs = page->index; 357 put_page(page); 358 return pgofs; 359 } 360 361 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr, 362 pgoff_t dirty, pgoff_t pgofs, int whence) 363 { 364 switch (whence) { 365 case SEEK_DATA: 366 if ((blkaddr == NEW_ADDR && dirty == pgofs) || 367 __is_valid_data_blkaddr(blkaddr)) 368 return true; 369 break; 370 case SEEK_HOLE: 371 if (blkaddr == NULL_ADDR) 372 return true; 373 break; 374 } 375 return false; 376 } 377 378 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 379 { 380 struct inode *inode = file->f_mapping->host; 381 loff_t maxbytes = inode->i_sb->s_maxbytes; 382 struct dnode_of_data dn; 383 pgoff_t pgofs, end_offset, dirty; 384 loff_t data_ofs = offset; 385 loff_t isize; 386 int err = 0; 387 388 inode_lock(inode); 389 390 isize = i_size_read(inode); 391 if (offset >= isize) 392 goto fail; 393 394 /* handle inline data case */ 395 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { 396 if (whence == SEEK_HOLE) 397 data_ofs = isize; 398 goto found; 399 } 400 401 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); 402 403 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); 404 405 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 406 set_new_dnode(&dn, inode, NULL, NULL, 0); 407 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); 408 if (err && err != -ENOENT) { 409 goto fail; 410 } else if (err == -ENOENT) { 411 /* direct node does not exists */ 412 if (whence == SEEK_DATA) { 413 pgofs = f2fs_get_next_page_offset(&dn, pgofs); 414 continue; 415 } else { 416 goto found; 417 } 418 } 419 420 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 421 422 /* find data/hole in dnode block */ 423 for (; dn.ofs_in_node < end_offset; 424 dn.ofs_in_node++, pgofs++, 425 data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 426 block_t blkaddr; 427 428 blkaddr = datablock_addr(dn.inode, 429 dn.node_page, dn.ofs_in_node); 430 431 if (__is_valid_data_blkaddr(blkaddr) && 432 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), 433 blkaddr, DATA_GENERIC_ENHANCE)) { 434 f2fs_put_dnode(&dn); 435 goto fail; 436 } 437 438 if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty, 439 pgofs, whence)) { 440 f2fs_put_dnode(&dn); 441 goto found; 442 } 443 } 444 f2fs_put_dnode(&dn); 445 } 446 447 if (whence == SEEK_DATA) 448 goto fail; 449 found: 450 if (whence == SEEK_HOLE && data_ofs > isize) 451 data_ofs = isize; 452 inode_unlock(inode); 453 return vfs_setpos(file, data_ofs, maxbytes); 454 fail: 455 inode_unlock(inode); 456 return -ENXIO; 457 } 458 459 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 460 { 461 struct inode *inode = file->f_mapping->host; 462 loff_t maxbytes = inode->i_sb->s_maxbytes; 463 464 switch (whence) { 465 case SEEK_SET: 466 case SEEK_CUR: 467 case SEEK_END: 468 return generic_file_llseek_size(file, offset, whence, 469 maxbytes, i_size_read(inode)); 470 case SEEK_DATA: 471 case SEEK_HOLE: 472 if (offset < 0) 473 return -ENXIO; 474 return f2fs_seek_block(file, offset, whence); 475 } 476 477 return -EINVAL; 478 } 479 480 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 481 { 482 struct inode *inode = file_inode(file); 483 int err; 484 485 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 486 return -EIO; 487 488 /* we don't need to use inline_data strictly */ 489 err = f2fs_convert_inline_inode(inode); 490 if (err) 491 return err; 492 493 file_accessed(file); 494 vma->vm_ops = &f2fs_file_vm_ops; 495 return 0; 496 } 497 498 static int f2fs_file_open(struct inode *inode, struct file *filp) 499 { 500 int err = fscrypt_file_open(inode, filp); 501 502 if (err) 503 return err; 504 505 err = fsverity_file_open(inode, filp); 506 if (err) 507 return err; 508 509 filp->f_mode |= FMODE_NOWAIT; 510 511 return dquot_file_open(inode, filp); 512 } 513 514 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) 515 { 516 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 517 struct f2fs_node *raw_node; 518 int nr_free = 0, ofs = dn->ofs_in_node, len = count; 519 __le32 *addr; 520 int base = 0; 521 522 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 523 base = get_extra_isize(dn->inode); 524 525 raw_node = F2FS_NODE(dn->node_page); 526 addr = blkaddr_in_node(raw_node) + base + ofs; 527 528 for (; count > 0; count--, addr++, dn->ofs_in_node++) { 529 block_t blkaddr = le32_to_cpu(*addr); 530 531 if (blkaddr == NULL_ADDR) 532 continue; 533 534 dn->data_blkaddr = NULL_ADDR; 535 f2fs_set_data_blkaddr(dn); 536 537 if (__is_valid_data_blkaddr(blkaddr) && 538 !f2fs_is_valid_blkaddr(sbi, blkaddr, 539 DATA_GENERIC_ENHANCE)) 540 continue; 541 542 f2fs_invalidate_blocks(sbi, blkaddr); 543 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) 544 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); 545 nr_free++; 546 } 547 548 if (nr_free) { 549 pgoff_t fofs; 550 /* 551 * once we invalidate valid blkaddr in range [ofs, ofs + count], 552 * we will invalidate all blkaddr in the whole range. 553 */ 554 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), 555 dn->inode) + ofs; 556 f2fs_update_extent_cache_range(dn, fofs, 0, len); 557 dec_valid_block_count(sbi, dn->inode, nr_free); 558 } 559 dn->ofs_in_node = ofs; 560 561 f2fs_update_time(sbi, REQ_TIME); 562 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 563 dn->ofs_in_node, nr_free); 564 } 565 566 void f2fs_truncate_data_blocks(struct dnode_of_data *dn) 567 { 568 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); 569 } 570 571 static int truncate_partial_data_page(struct inode *inode, u64 from, 572 bool cache_only) 573 { 574 loff_t offset = from & (PAGE_SIZE - 1); 575 pgoff_t index = from >> PAGE_SHIFT; 576 struct address_space *mapping = inode->i_mapping; 577 struct page *page; 578 579 if (!offset && !cache_only) 580 return 0; 581 582 if (cache_only) { 583 page = find_lock_page(mapping, index); 584 if (page && PageUptodate(page)) 585 goto truncate_out; 586 f2fs_put_page(page, 1); 587 return 0; 588 } 589 590 page = f2fs_get_lock_data_page(inode, index, true); 591 if (IS_ERR(page)) 592 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); 593 truncate_out: 594 f2fs_wait_on_page_writeback(page, DATA, true, true); 595 zero_user(page, offset, PAGE_SIZE - offset); 596 597 /* An encrypted inode should have a key and truncate the last page. */ 598 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode)); 599 if (!cache_only) 600 set_page_dirty(page); 601 f2fs_put_page(page, 1); 602 return 0; 603 } 604 605 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) 606 { 607 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 608 struct dnode_of_data dn; 609 pgoff_t free_from; 610 int count = 0, err = 0; 611 struct page *ipage; 612 bool truncate_page = false; 613 614 trace_f2fs_truncate_blocks_enter(inode, from); 615 616 free_from = (pgoff_t)F2FS_BLK_ALIGN(from); 617 618 if (free_from >= sbi->max_file_blocks) 619 goto free_partial; 620 621 if (lock) 622 f2fs_lock_op(sbi); 623 624 ipage = f2fs_get_node_page(sbi, inode->i_ino); 625 if (IS_ERR(ipage)) { 626 err = PTR_ERR(ipage); 627 goto out; 628 } 629 630 if (f2fs_has_inline_data(inode)) { 631 f2fs_truncate_inline_inode(inode, ipage, from); 632 f2fs_put_page(ipage, 1); 633 truncate_page = true; 634 goto out; 635 } 636 637 set_new_dnode(&dn, inode, ipage, NULL, 0); 638 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); 639 if (err) { 640 if (err == -ENOENT) 641 goto free_next; 642 goto out; 643 } 644 645 count = ADDRS_PER_PAGE(dn.node_page, inode); 646 647 count -= dn.ofs_in_node; 648 f2fs_bug_on(sbi, count < 0); 649 650 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 651 f2fs_truncate_data_blocks_range(&dn, count); 652 free_from += count; 653 } 654 655 f2fs_put_dnode(&dn); 656 free_next: 657 err = f2fs_truncate_inode_blocks(inode, free_from); 658 out: 659 if (lock) 660 f2fs_unlock_op(sbi); 661 free_partial: 662 /* lastly zero out the first data page */ 663 if (!err) 664 err = truncate_partial_data_page(inode, from, truncate_page); 665 666 trace_f2fs_truncate_blocks_exit(inode, err); 667 return err; 668 } 669 670 int f2fs_truncate(struct inode *inode) 671 { 672 int err; 673 674 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 675 return -EIO; 676 677 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 678 S_ISLNK(inode->i_mode))) 679 return 0; 680 681 trace_f2fs_truncate(inode); 682 683 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { 684 f2fs_show_injection_info(FAULT_TRUNCATE); 685 return -EIO; 686 } 687 688 /* we should check inline_data size */ 689 if (!f2fs_may_inline_data(inode)) { 690 err = f2fs_convert_inline_inode(inode); 691 if (err) 692 return err; 693 } 694 695 err = f2fs_truncate_blocks(inode, i_size_read(inode), true); 696 if (err) 697 return err; 698 699 inode->i_mtime = inode->i_ctime = current_time(inode); 700 f2fs_mark_inode_dirty_sync(inode, false); 701 return 0; 702 } 703 704 int f2fs_getattr(const struct path *path, struct kstat *stat, 705 u32 request_mask, unsigned int query_flags) 706 { 707 struct inode *inode = d_inode(path->dentry); 708 struct f2fs_inode_info *fi = F2FS_I(inode); 709 struct f2fs_inode *ri; 710 unsigned int flags; 711 712 if (f2fs_has_extra_attr(inode) && 713 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 714 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 715 stat->result_mask |= STATX_BTIME; 716 stat->btime.tv_sec = fi->i_crtime.tv_sec; 717 stat->btime.tv_nsec = fi->i_crtime.tv_nsec; 718 } 719 720 flags = fi->i_flags; 721 if (flags & F2FS_APPEND_FL) 722 stat->attributes |= STATX_ATTR_APPEND; 723 if (IS_ENCRYPTED(inode)) 724 stat->attributes |= STATX_ATTR_ENCRYPTED; 725 if (flags & F2FS_IMMUTABLE_FL) 726 stat->attributes |= STATX_ATTR_IMMUTABLE; 727 if (flags & F2FS_NODUMP_FL) 728 stat->attributes |= STATX_ATTR_NODUMP; 729 730 stat->attributes_mask |= (STATX_ATTR_APPEND | 731 STATX_ATTR_ENCRYPTED | 732 STATX_ATTR_IMMUTABLE | 733 STATX_ATTR_NODUMP); 734 735 generic_fillattr(inode, stat); 736 737 /* we need to show initial sectors used for inline_data/dentries */ 738 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || 739 f2fs_has_inline_dentry(inode)) 740 stat->blocks += (stat->size + 511) >> 9; 741 742 return 0; 743 } 744 745 #ifdef CONFIG_F2FS_FS_POSIX_ACL 746 static void __setattr_copy(struct inode *inode, const struct iattr *attr) 747 { 748 unsigned int ia_valid = attr->ia_valid; 749 750 if (ia_valid & ATTR_UID) 751 inode->i_uid = attr->ia_uid; 752 if (ia_valid & ATTR_GID) 753 inode->i_gid = attr->ia_gid; 754 if (ia_valid & ATTR_ATIME) { 755 inode->i_atime = timestamp_truncate(attr->ia_atime, 756 inode); 757 } 758 if (ia_valid & ATTR_MTIME) { 759 inode->i_mtime = timestamp_truncate(attr->ia_mtime, 760 inode); 761 } 762 if (ia_valid & ATTR_CTIME) { 763 inode->i_ctime = timestamp_truncate(attr->ia_ctime, 764 inode); 765 } 766 if (ia_valid & ATTR_MODE) { 767 umode_t mode = attr->ia_mode; 768 769 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 770 mode &= ~S_ISGID; 771 set_acl_inode(inode, mode); 772 } 773 } 774 #else 775 #define __setattr_copy setattr_copy 776 #endif 777 778 int f2fs_setattr(struct dentry *dentry, struct iattr *attr) 779 { 780 struct inode *inode = d_inode(dentry); 781 int err; 782 783 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 784 return -EIO; 785 786 err = setattr_prepare(dentry, attr); 787 if (err) 788 return err; 789 790 err = fscrypt_prepare_setattr(dentry, attr); 791 if (err) 792 return err; 793 794 err = fsverity_prepare_setattr(dentry, attr); 795 if (err) 796 return err; 797 798 if (is_quota_modification(inode, attr)) { 799 err = dquot_initialize(inode); 800 if (err) 801 return err; 802 } 803 if ((attr->ia_valid & ATTR_UID && 804 !uid_eq(attr->ia_uid, inode->i_uid)) || 805 (attr->ia_valid & ATTR_GID && 806 !gid_eq(attr->ia_gid, inode->i_gid))) { 807 f2fs_lock_op(F2FS_I_SB(inode)); 808 err = dquot_transfer(inode, attr); 809 if (err) { 810 set_sbi_flag(F2FS_I_SB(inode), 811 SBI_QUOTA_NEED_REPAIR); 812 f2fs_unlock_op(F2FS_I_SB(inode)); 813 return err; 814 } 815 /* 816 * update uid/gid under lock_op(), so that dquot and inode can 817 * be updated atomically. 818 */ 819 if (attr->ia_valid & ATTR_UID) 820 inode->i_uid = attr->ia_uid; 821 if (attr->ia_valid & ATTR_GID) 822 inode->i_gid = attr->ia_gid; 823 f2fs_mark_inode_dirty_sync(inode, true); 824 f2fs_unlock_op(F2FS_I_SB(inode)); 825 } 826 827 if (attr->ia_valid & ATTR_SIZE) { 828 loff_t old_size = i_size_read(inode); 829 830 if (attr->ia_size > MAX_INLINE_DATA(inode)) { 831 /* 832 * should convert inline inode before i_size_write to 833 * keep smaller than inline_data size with inline flag. 834 */ 835 err = f2fs_convert_inline_inode(inode); 836 if (err) 837 return err; 838 } 839 840 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 841 down_write(&F2FS_I(inode)->i_mmap_sem); 842 843 truncate_setsize(inode, attr->ia_size); 844 845 if (attr->ia_size <= old_size) 846 err = f2fs_truncate(inode); 847 /* 848 * do not trim all blocks after i_size if target size is 849 * larger than i_size. 850 */ 851 up_write(&F2FS_I(inode)->i_mmap_sem); 852 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 853 if (err) 854 return err; 855 856 down_write(&F2FS_I(inode)->i_sem); 857 inode->i_mtime = inode->i_ctime = current_time(inode); 858 F2FS_I(inode)->last_disk_size = i_size_read(inode); 859 up_write(&F2FS_I(inode)->i_sem); 860 } 861 862 __setattr_copy(inode, attr); 863 864 if (attr->ia_valid & ATTR_MODE) { 865 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode)); 866 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { 867 inode->i_mode = F2FS_I(inode)->i_acl_mode; 868 clear_inode_flag(inode, FI_ACL_MODE); 869 } 870 } 871 872 /* file size may changed here */ 873 f2fs_mark_inode_dirty_sync(inode, true); 874 875 /* inode change will produce dirty node pages flushed by checkpoint */ 876 f2fs_balance_fs(F2FS_I_SB(inode), true); 877 878 return err; 879 } 880 881 const struct inode_operations f2fs_file_inode_operations = { 882 .getattr = f2fs_getattr, 883 .setattr = f2fs_setattr, 884 .get_acl = f2fs_get_acl, 885 .set_acl = f2fs_set_acl, 886 #ifdef CONFIG_F2FS_FS_XATTR 887 .listxattr = f2fs_listxattr, 888 #endif 889 .fiemap = f2fs_fiemap, 890 }; 891 892 static int fill_zero(struct inode *inode, pgoff_t index, 893 loff_t start, loff_t len) 894 { 895 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 896 struct page *page; 897 898 if (!len) 899 return 0; 900 901 f2fs_balance_fs(sbi, true); 902 903 f2fs_lock_op(sbi); 904 page = f2fs_get_new_data_page(inode, NULL, index, false); 905 f2fs_unlock_op(sbi); 906 907 if (IS_ERR(page)) 908 return PTR_ERR(page); 909 910 f2fs_wait_on_page_writeback(page, DATA, true, true); 911 zero_user(page, start, len); 912 set_page_dirty(page); 913 f2fs_put_page(page, 1); 914 return 0; 915 } 916 917 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 918 { 919 int err; 920 921 while (pg_start < pg_end) { 922 struct dnode_of_data dn; 923 pgoff_t end_offset, count; 924 925 set_new_dnode(&dn, inode, NULL, NULL, 0); 926 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); 927 if (err) { 928 if (err == -ENOENT) { 929 pg_start = f2fs_get_next_page_offset(&dn, 930 pg_start); 931 continue; 932 } 933 return err; 934 } 935 936 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 937 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); 938 939 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); 940 941 f2fs_truncate_data_blocks_range(&dn, count); 942 f2fs_put_dnode(&dn); 943 944 pg_start += count; 945 } 946 return 0; 947 } 948 949 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 950 { 951 pgoff_t pg_start, pg_end; 952 loff_t off_start, off_end; 953 int ret; 954 955 ret = f2fs_convert_inline_inode(inode); 956 if (ret) 957 return ret; 958 959 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 960 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 961 962 off_start = offset & (PAGE_SIZE - 1); 963 off_end = (offset + len) & (PAGE_SIZE - 1); 964 965 if (pg_start == pg_end) { 966 ret = fill_zero(inode, pg_start, off_start, 967 off_end - off_start); 968 if (ret) 969 return ret; 970 } else { 971 if (off_start) { 972 ret = fill_zero(inode, pg_start++, off_start, 973 PAGE_SIZE - off_start); 974 if (ret) 975 return ret; 976 } 977 if (off_end) { 978 ret = fill_zero(inode, pg_end, 0, off_end); 979 if (ret) 980 return ret; 981 } 982 983 if (pg_start < pg_end) { 984 struct address_space *mapping = inode->i_mapping; 985 loff_t blk_start, blk_end; 986 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 987 988 f2fs_balance_fs(sbi, true); 989 990 blk_start = (loff_t)pg_start << PAGE_SHIFT; 991 blk_end = (loff_t)pg_end << PAGE_SHIFT; 992 993 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 994 down_write(&F2FS_I(inode)->i_mmap_sem); 995 996 truncate_inode_pages_range(mapping, blk_start, 997 blk_end - 1); 998 999 f2fs_lock_op(sbi); 1000 ret = f2fs_truncate_hole(inode, pg_start, pg_end); 1001 f2fs_unlock_op(sbi); 1002 1003 up_write(&F2FS_I(inode)->i_mmap_sem); 1004 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1005 } 1006 } 1007 1008 return ret; 1009 } 1010 1011 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, 1012 int *do_replace, pgoff_t off, pgoff_t len) 1013 { 1014 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1015 struct dnode_of_data dn; 1016 int ret, done, i; 1017 1018 next_dnode: 1019 set_new_dnode(&dn, inode, NULL, NULL, 0); 1020 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 1021 if (ret && ret != -ENOENT) { 1022 return ret; 1023 } else if (ret == -ENOENT) { 1024 if (dn.max_level == 0) 1025 return -ENOENT; 1026 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node, 1027 len); 1028 blkaddr += done; 1029 do_replace += done; 1030 goto next; 1031 } 1032 1033 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - 1034 dn.ofs_in_node, len); 1035 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { 1036 *blkaddr = datablock_addr(dn.inode, 1037 dn.node_page, dn.ofs_in_node); 1038 1039 if (__is_valid_data_blkaddr(*blkaddr) && 1040 !f2fs_is_valid_blkaddr(sbi, *blkaddr, 1041 DATA_GENERIC_ENHANCE)) { 1042 f2fs_put_dnode(&dn); 1043 return -EFSCORRUPTED; 1044 } 1045 1046 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { 1047 1048 if (test_opt(sbi, LFS)) { 1049 f2fs_put_dnode(&dn); 1050 return -EOPNOTSUPP; 1051 } 1052 1053 /* do not invalidate this block address */ 1054 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 1055 *do_replace = 1; 1056 } 1057 } 1058 f2fs_put_dnode(&dn); 1059 next: 1060 len -= done; 1061 off += done; 1062 if (len) 1063 goto next_dnode; 1064 return 0; 1065 } 1066 1067 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, 1068 int *do_replace, pgoff_t off, int len) 1069 { 1070 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1071 struct dnode_of_data dn; 1072 int ret, i; 1073 1074 for (i = 0; i < len; i++, do_replace++, blkaddr++) { 1075 if (*do_replace == 0) 1076 continue; 1077 1078 set_new_dnode(&dn, inode, NULL, NULL, 0); 1079 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); 1080 if (ret) { 1081 dec_valid_block_count(sbi, inode, 1); 1082 f2fs_invalidate_blocks(sbi, *blkaddr); 1083 } else { 1084 f2fs_update_data_blkaddr(&dn, *blkaddr); 1085 } 1086 f2fs_put_dnode(&dn); 1087 } 1088 return 0; 1089 } 1090 1091 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, 1092 block_t *blkaddr, int *do_replace, 1093 pgoff_t src, pgoff_t dst, pgoff_t len, bool full) 1094 { 1095 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); 1096 pgoff_t i = 0; 1097 int ret; 1098 1099 while (i < len) { 1100 if (blkaddr[i] == NULL_ADDR && !full) { 1101 i++; 1102 continue; 1103 } 1104 1105 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { 1106 struct dnode_of_data dn; 1107 struct node_info ni; 1108 size_t new_size; 1109 pgoff_t ilen; 1110 1111 set_new_dnode(&dn, dst_inode, NULL, NULL, 0); 1112 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE); 1113 if (ret) 1114 return ret; 1115 1116 ret = f2fs_get_node_info(sbi, dn.nid, &ni); 1117 if (ret) { 1118 f2fs_put_dnode(&dn); 1119 return ret; 1120 } 1121 1122 ilen = min((pgoff_t) 1123 ADDRS_PER_PAGE(dn.node_page, dst_inode) - 1124 dn.ofs_in_node, len - i); 1125 do { 1126 dn.data_blkaddr = datablock_addr(dn.inode, 1127 dn.node_page, dn.ofs_in_node); 1128 f2fs_truncate_data_blocks_range(&dn, 1); 1129 1130 if (do_replace[i]) { 1131 f2fs_i_blocks_write(src_inode, 1132 1, false, false); 1133 f2fs_i_blocks_write(dst_inode, 1134 1, true, false); 1135 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 1136 blkaddr[i], ni.version, true, false); 1137 1138 do_replace[i] = 0; 1139 } 1140 dn.ofs_in_node++; 1141 i++; 1142 new_size = (dst + i) << PAGE_SHIFT; 1143 if (dst_inode->i_size < new_size) 1144 f2fs_i_size_write(dst_inode, new_size); 1145 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); 1146 1147 f2fs_put_dnode(&dn); 1148 } else { 1149 struct page *psrc, *pdst; 1150 1151 psrc = f2fs_get_lock_data_page(src_inode, 1152 src + i, true); 1153 if (IS_ERR(psrc)) 1154 return PTR_ERR(psrc); 1155 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i, 1156 true); 1157 if (IS_ERR(pdst)) { 1158 f2fs_put_page(psrc, 1); 1159 return PTR_ERR(pdst); 1160 } 1161 f2fs_copy_page(psrc, pdst); 1162 set_page_dirty(pdst); 1163 f2fs_put_page(pdst, 1); 1164 f2fs_put_page(psrc, 1); 1165 1166 ret = f2fs_truncate_hole(src_inode, 1167 src + i, src + i + 1); 1168 if (ret) 1169 return ret; 1170 i++; 1171 } 1172 } 1173 return 0; 1174 } 1175 1176 static int __exchange_data_block(struct inode *src_inode, 1177 struct inode *dst_inode, pgoff_t src, pgoff_t dst, 1178 pgoff_t len, bool full) 1179 { 1180 block_t *src_blkaddr; 1181 int *do_replace; 1182 pgoff_t olen; 1183 int ret; 1184 1185 while (len) { 1186 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len); 1187 1188 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1189 array_size(olen, sizeof(block_t)), 1190 GFP_KERNEL); 1191 if (!src_blkaddr) 1192 return -ENOMEM; 1193 1194 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1195 array_size(olen, sizeof(int)), 1196 GFP_KERNEL); 1197 if (!do_replace) { 1198 kvfree(src_blkaddr); 1199 return -ENOMEM; 1200 } 1201 1202 ret = __read_out_blkaddrs(src_inode, src_blkaddr, 1203 do_replace, src, olen); 1204 if (ret) 1205 goto roll_back; 1206 1207 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, 1208 do_replace, src, dst, olen, full); 1209 if (ret) 1210 goto roll_back; 1211 1212 src += olen; 1213 dst += olen; 1214 len -= olen; 1215 1216 kvfree(src_blkaddr); 1217 kvfree(do_replace); 1218 } 1219 return 0; 1220 1221 roll_back: 1222 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); 1223 kvfree(src_blkaddr); 1224 kvfree(do_replace); 1225 return ret; 1226 } 1227 1228 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) 1229 { 1230 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1231 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1232 pgoff_t start = offset >> PAGE_SHIFT; 1233 pgoff_t end = (offset + len) >> PAGE_SHIFT; 1234 int ret; 1235 1236 f2fs_balance_fs(sbi, true); 1237 1238 /* avoid gc operation during block exchange */ 1239 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1240 down_write(&F2FS_I(inode)->i_mmap_sem); 1241 1242 f2fs_lock_op(sbi); 1243 f2fs_drop_extent_tree(inode); 1244 truncate_pagecache(inode, offset); 1245 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); 1246 f2fs_unlock_op(sbi); 1247 1248 up_write(&F2FS_I(inode)->i_mmap_sem); 1249 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1250 return ret; 1251 } 1252 1253 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) 1254 { 1255 loff_t new_size; 1256 int ret; 1257 1258 if (offset + len >= i_size_read(inode)) 1259 return -EINVAL; 1260 1261 /* collapse range should be aligned to block size of f2fs. */ 1262 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1263 return -EINVAL; 1264 1265 ret = f2fs_convert_inline_inode(inode); 1266 if (ret) 1267 return ret; 1268 1269 /* write out all dirty pages from offset */ 1270 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1271 if (ret) 1272 return ret; 1273 1274 ret = f2fs_do_collapse(inode, offset, len); 1275 if (ret) 1276 return ret; 1277 1278 /* write out all moved pages, if possible */ 1279 down_write(&F2FS_I(inode)->i_mmap_sem); 1280 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1281 truncate_pagecache(inode, offset); 1282 1283 new_size = i_size_read(inode) - len; 1284 truncate_pagecache(inode, new_size); 1285 1286 ret = f2fs_truncate_blocks(inode, new_size, true); 1287 up_write(&F2FS_I(inode)->i_mmap_sem); 1288 if (!ret) 1289 f2fs_i_size_write(inode, new_size); 1290 return ret; 1291 } 1292 1293 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, 1294 pgoff_t end) 1295 { 1296 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1297 pgoff_t index = start; 1298 unsigned int ofs_in_node = dn->ofs_in_node; 1299 blkcnt_t count = 0; 1300 int ret; 1301 1302 for (; index < end; index++, dn->ofs_in_node++) { 1303 if (datablock_addr(dn->inode, dn->node_page, 1304 dn->ofs_in_node) == NULL_ADDR) 1305 count++; 1306 } 1307 1308 dn->ofs_in_node = ofs_in_node; 1309 ret = f2fs_reserve_new_blocks(dn, count); 1310 if (ret) 1311 return ret; 1312 1313 dn->ofs_in_node = ofs_in_node; 1314 for (index = start; index < end; index++, dn->ofs_in_node++) { 1315 dn->data_blkaddr = datablock_addr(dn->inode, 1316 dn->node_page, dn->ofs_in_node); 1317 /* 1318 * f2fs_reserve_new_blocks will not guarantee entire block 1319 * allocation. 1320 */ 1321 if (dn->data_blkaddr == NULL_ADDR) { 1322 ret = -ENOSPC; 1323 break; 1324 } 1325 if (dn->data_blkaddr != NEW_ADDR) { 1326 f2fs_invalidate_blocks(sbi, dn->data_blkaddr); 1327 dn->data_blkaddr = NEW_ADDR; 1328 f2fs_set_data_blkaddr(dn); 1329 } 1330 } 1331 1332 f2fs_update_extent_cache_range(dn, start, 0, index - start); 1333 1334 return ret; 1335 } 1336 1337 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, 1338 int mode) 1339 { 1340 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1341 struct address_space *mapping = inode->i_mapping; 1342 pgoff_t index, pg_start, pg_end; 1343 loff_t new_size = i_size_read(inode); 1344 loff_t off_start, off_end; 1345 int ret = 0; 1346 1347 ret = inode_newsize_ok(inode, (len + offset)); 1348 if (ret) 1349 return ret; 1350 1351 ret = f2fs_convert_inline_inode(inode); 1352 if (ret) 1353 return ret; 1354 1355 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); 1356 if (ret) 1357 return ret; 1358 1359 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1360 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1361 1362 off_start = offset & (PAGE_SIZE - 1); 1363 off_end = (offset + len) & (PAGE_SIZE - 1); 1364 1365 if (pg_start == pg_end) { 1366 ret = fill_zero(inode, pg_start, off_start, 1367 off_end - off_start); 1368 if (ret) 1369 return ret; 1370 1371 new_size = max_t(loff_t, new_size, offset + len); 1372 } else { 1373 if (off_start) { 1374 ret = fill_zero(inode, pg_start++, off_start, 1375 PAGE_SIZE - off_start); 1376 if (ret) 1377 return ret; 1378 1379 new_size = max_t(loff_t, new_size, 1380 (loff_t)pg_start << PAGE_SHIFT); 1381 } 1382 1383 for (index = pg_start; index < pg_end;) { 1384 struct dnode_of_data dn; 1385 unsigned int end_offset; 1386 pgoff_t end; 1387 1388 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1389 down_write(&F2FS_I(inode)->i_mmap_sem); 1390 1391 truncate_pagecache_range(inode, 1392 (loff_t)index << PAGE_SHIFT, 1393 ((loff_t)pg_end << PAGE_SHIFT) - 1); 1394 1395 f2fs_lock_op(sbi); 1396 1397 set_new_dnode(&dn, inode, NULL, NULL, 0); 1398 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 1399 if (ret) { 1400 f2fs_unlock_op(sbi); 1401 up_write(&F2FS_I(inode)->i_mmap_sem); 1402 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1403 goto out; 1404 } 1405 1406 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1407 end = min(pg_end, end_offset - dn.ofs_in_node + index); 1408 1409 ret = f2fs_do_zero_range(&dn, index, end); 1410 f2fs_put_dnode(&dn); 1411 1412 f2fs_unlock_op(sbi); 1413 up_write(&F2FS_I(inode)->i_mmap_sem); 1414 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1415 1416 f2fs_balance_fs(sbi, dn.node_changed); 1417 1418 if (ret) 1419 goto out; 1420 1421 index = end; 1422 new_size = max_t(loff_t, new_size, 1423 (loff_t)index << PAGE_SHIFT); 1424 } 1425 1426 if (off_end) { 1427 ret = fill_zero(inode, pg_end, 0, off_end); 1428 if (ret) 1429 goto out; 1430 1431 new_size = max_t(loff_t, new_size, offset + len); 1432 } 1433 } 1434 1435 out: 1436 if (new_size > i_size_read(inode)) { 1437 if (mode & FALLOC_FL_KEEP_SIZE) 1438 file_set_keep_isize(inode); 1439 else 1440 f2fs_i_size_write(inode, new_size); 1441 } 1442 return ret; 1443 } 1444 1445 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) 1446 { 1447 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1448 pgoff_t nr, pg_start, pg_end, delta, idx; 1449 loff_t new_size; 1450 int ret = 0; 1451 1452 new_size = i_size_read(inode) + len; 1453 ret = inode_newsize_ok(inode, new_size); 1454 if (ret) 1455 return ret; 1456 1457 if (offset >= i_size_read(inode)) 1458 return -EINVAL; 1459 1460 /* insert range should be aligned to block size of f2fs. */ 1461 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1462 return -EINVAL; 1463 1464 ret = f2fs_convert_inline_inode(inode); 1465 if (ret) 1466 return ret; 1467 1468 f2fs_balance_fs(sbi, true); 1469 1470 down_write(&F2FS_I(inode)->i_mmap_sem); 1471 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); 1472 up_write(&F2FS_I(inode)->i_mmap_sem); 1473 if (ret) 1474 return ret; 1475 1476 /* write out all dirty pages from offset */ 1477 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1478 if (ret) 1479 return ret; 1480 1481 pg_start = offset >> PAGE_SHIFT; 1482 pg_end = (offset + len) >> PAGE_SHIFT; 1483 delta = pg_end - pg_start; 1484 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1485 1486 /* avoid gc operation during block exchange */ 1487 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1488 down_write(&F2FS_I(inode)->i_mmap_sem); 1489 truncate_pagecache(inode, offset); 1490 1491 while (!ret && idx > pg_start) { 1492 nr = idx - pg_start; 1493 if (nr > delta) 1494 nr = delta; 1495 idx -= nr; 1496 1497 f2fs_lock_op(sbi); 1498 f2fs_drop_extent_tree(inode); 1499 1500 ret = __exchange_data_block(inode, inode, idx, 1501 idx + delta, nr, false); 1502 f2fs_unlock_op(sbi); 1503 } 1504 up_write(&F2FS_I(inode)->i_mmap_sem); 1505 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1506 1507 /* write out all moved pages, if possible */ 1508 down_write(&F2FS_I(inode)->i_mmap_sem); 1509 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1510 truncate_pagecache(inode, offset); 1511 up_write(&F2FS_I(inode)->i_mmap_sem); 1512 1513 if (!ret) 1514 f2fs_i_size_write(inode, new_size); 1515 return ret; 1516 } 1517 1518 static int expand_inode_data(struct inode *inode, loff_t offset, 1519 loff_t len, int mode) 1520 { 1521 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1522 struct f2fs_map_blocks map = { .m_next_pgofs = NULL, 1523 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, 1524 .m_may_create = true }; 1525 pgoff_t pg_end; 1526 loff_t new_size = i_size_read(inode); 1527 loff_t off_end; 1528 int err; 1529 1530 err = inode_newsize_ok(inode, (len + offset)); 1531 if (err) 1532 return err; 1533 1534 err = f2fs_convert_inline_inode(inode); 1535 if (err) 1536 return err; 1537 1538 f2fs_balance_fs(sbi, true); 1539 1540 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; 1541 off_end = (offset + len) & (PAGE_SIZE - 1); 1542 1543 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; 1544 map.m_len = pg_end - map.m_lblk; 1545 if (off_end) 1546 map.m_len++; 1547 1548 if (f2fs_is_pinned_file(inode)) 1549 map.m_seg_type = CURSEG_COLD_DATA; 1550 1551 err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ? 1552 F2FS_GET_BLOCK_PRE_DIO : 1553 F2FS_GET_BLOCK_PRE_AIO)); 1554 if (err) { 1555 pgoff_t last_off; 1556 1557 if (!map.m_len) 1558 return err; 1559 1560 last_off = map.m_lblk + map.m_len - 1; 1561 1562 /* update new size to the failed position */ 1563 new_size = (last_off == pg_end) ? offset + len : 1564 (loff_t)(last_off + 1) << PAGE_SHIFT; 1565 } else { 1566 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; 1567 } 1568 1569 if (new_size > i_size_read(inode)) { 1570 if (mode & FALLOC_FL_KEEP_SIZE) 1571 file_set_keep_isize(inode); 1572 else 1573 f2fs_i_size_write(inode, new_size); 1574 } 1575 1576 return err; 1577 } 1578 1579 static long f2fs_fallocate(struct file *file, int mode, 1580 loff_t offset, loff_t len) 1581 { 1582 struct inode *inode = file_inode(file); 1583 long ret = 0; 1584 1585 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 1586 return -EIO; 1587 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) 1588 return -ENOSPC; 1589 1590 /* f2fs only support ->fallocate for regular file */ 1591 if (!S_ISREG(inode->i_mode)) 1592 return -EINVAL; 1593 1594 if (IS_ENCRYPTED(inode) && 1595 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 1596 return -EOPNOTSUPP; 1597 1598 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 1599 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 1600 FALLOC_FL_INSERT_RANGE)) 1601 return -EOPNOTSUPP; 1602 1603 inode_lock(inode); 1604 1605 if (mode & FALLOC_FL_PUNCH_HOLE) { 1606 if (offset >= inode->i_size) 1607 goto out; 1608 1609 ret = punch_hole(inode, offset, len); 1610 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 1611 ret = f2fs_collapse_range(inode, offset, len); 1612 } else if (mode & FALLOC_FL_ZERO_RANGE) { 1613 ret = f2fs_zero_range(inode, offset, len, mode); 1614 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1615 ret = f2fs_insert_range(inode, offset, len); 1616 } else { 1617 ret = expand_inode_data(inode, offset, len, mode); 1618 } 1619 1620 if (!ret) { 1621 inode->i_mtime = inode->i_ctime = current_time(inode); 1622 f2fs_mark_inode_dirty_sync(inode, false); 1623 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1624 } 1625 1626 out: 1627 inode_unlock(inode); 1628 1629 trace_f2fs_fallocate(inode, mode, offset, len, ret); 1630 return ret; 1631 } 1632 1633 static int f2fs_release_file(struct inode *inode, struct file *filp) 1634 { 1635 /* 1636 * f2fs_relase_file is called at every close calls. So we should 1637 * not drop any inmemory pages by close called by other process. 1638 */ 1639 if (!(filp->f_mode & FMODE_WRITE) || 1640 atomic_read(&inode->i_writecount) != 1) 1641 return 0; 1642 1643 /* some remained atomic pages should discarded */ 1644 if (f2fs_is_atomic_file(inode)) 1645 f2fs_drop_inmem_pages(inode); 1646 if (f2fs_is_volatile_file(inode)) { 1647 set_inode_flag(inode, FI_DROP_CACHE); 1648 filemap_fdatawrite(inode->i_mapping); 1649 clear_inode_flag(inode, FI_DROP_CACHE); 1650 clear_inode_flag(inode, FI_VOLATILE_FILE); 1651 stat_dec_volatile_write(inode); 1652 } 1653 return 0; 1654 } 1655 1656 static int f2fs_file_flush(struct file *file, fl_owner_t id) 1657 { 1658 struct inode *inode = file_inode(file); 1659 1660 /* 1661 * If the process doing a transaction is crashed, we should do 1662 * roll-back. Otherwise, other reader/write can see corrupted database 1663 * until all the writers close its file. Since this should be done 1664 * before dropping file lock, it needs to do in ->flush. 1665 */ 1666 if (f2fs_is_atomic_file(inode) && 1667 F2FS_I(inode)->inmem_task == current) 1668 f2fs_drop_inmem_pages(inode); 1669 return 0; 1670 } 1671 1672 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) 1673 { 1674 struct f2fs_inode_info *fi = F2FS_I(inode); 1675 1676 /* Is it quota file? Do not allow user to mess with it */ 1677 if (IS_NOQUOTA(inode)) 1678 return -EPERM; 1679 1680 if ((iflags ^ fi->i_flags) & F2FS_CASEFOLD_FL) { 1681 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) 1682 return -EOPNOTSUPP; 1683 if (!f2fs_empty_dir(inode)) 1684 return -ENOTEMPTY; 1685 } 1686 1687 fi->i_flags = iflags | (fi->i_flags & ~mask); 1688 1689 if (fi->i_flags & F2FS_PROJINHERIT_FL) 1690 set_inode_flag(inode, FI_PROJ_INHERIT); 1691 else 1692 clear_inode_flag(inode, FI_PROJ_INHERIT); 1693 1694 inode->i_ctime = current_time(inode); 1695 f2fs_set_inode_flags(inode); 1696 f2fs_mark_inode_dirty_sync(inode, true); 1697 return 0; 1698 } 1699 1700 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */ 1701 1702 /* 1703 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry 1704 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to 1705 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add 1706 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL. 1707 */ 1708 1709 static const struct { 1710 u32 iflag; 1711 u32 fsflag; 1712 } f2fs_fsflags_map[] = { 1713 { F2FS_SYNC_FL, FS_SYNC_FL }, 1714 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL }, 1715 { F2FS_APPEND_FL, FS_APPEND_FL }, 1716 { F2FS_NODUMP_FL, FS_NODUMP_FL }, 1717 { F2FS_NOATIME_FL, FS_NOATIME_FL }, 1718 { F2FS_INDEX_FL, FS_INDEX_FL }, 1719 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL }, 1720 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL }, 1721 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL }, 1722 }; 1723 1724 #define F2FS_GETTABLE_FS_FL ( \ 1725 FS_SYNC_FL | \ 1726 FS_IMMUTABLE_FL | \ 1727 FS_APPEND_FL | \ 1728 FS_NODUMP_FL | \ 1729 FS_NOATIME_FL | \ 1730 FS_INDEX_FL | \ 1731 FS_DIRSYNC_FL | \ 1732 FS_PROJINHERIT_FL | \ 1733 FS_ENCRYPT_FL | \ 1734 FS_INLINE_DATA_FL | \ 1735 FS_NOCOW_FL | \ 1736 FS_VERITY_FL | \ 1737 FS_CASEFOLD_FL) 1738 1739 #define F2FS_SETTABLE_FS_FL ( \ 1740 FS_SYNC_FL | \ 1741 FS_IMMUTABLE_FL | \ 1742 FS_APPEND_FL | \ 1743 FS_NODUMP_FL | \ 1744 FS_NOATIME_FL | \ 1745 FS_DIRSYNC_FL | \ 1746 FS_PROJINHERIT_FL | \ 1747 FS_CASEFOLD_FL) 1748 1749 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */ 1750 static inline u32 f2fs_iflags_to_fsflags(u32 iflags) 1751 { 1752 u32 fsflags = 0; 1753 int i; 1754 1755 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) 1756 if (iflags & f2fs_fsflags_map[i].iflag) 1757 fsflags |= f2fs_fsflags_map[i].fsflag; 1758 1759 return fsflags; 1760 } 1761 1762 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */ 1763 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags) 1764 { 1765 u32 iflags = 0; 1766 int i; 1767 1768 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) 1769 if (fsflags & f2fs_fsflags_map[i].fsflag) 1770 iflags |= f2fs_fsflags_map[i].iflag; 1771 1772 return iflags; 1773 } 1774 1775 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1776 { 1777 struct inode *inode = file_inode(filp); 1778 struct f2fs_inode_info *fi = F2FS_I(inode); 1779 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags); 1780 1781 if (IS_ENCRYPTED(inode)) 1782 fsflags |= FS_ENCRYPT_FL; 1783 if (IS_VERITY(inode)) 1784 fsflags |= FS_VERITY_FL; 1785 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) 1786 fsflags |= FS_INLINE_DATA_FL; 1787 if (is_inode_flag_set(inode, FI_PIN_FILE)) 1788 fsflags |= FS_NOCOW_FL; 1789 1790 fsflags &= F2FS_GETTABLE_FS_FL; 1791 1792 return put_user(fsflags, (int __user *)arg); 1793 } 1794 1795 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1796 { 1797 struct inode *inode = file_inode(filp); 1798 struct f2fs_inode_info *fi = F2FS_I(inode); 1799 u32 fsflags, old_fsflags; 1800 u32 iflags; 1801 int ret; 1802 1803 if (!inode_owner_or_capable(inode)) 1804 return -EACCES; 1805 1806 if (get_user(fsflags, (int __user *)arg)) 1807 return -EFAULT; 1808 1809 if (fsflags & ~F2FS_GETTABLE_FS_FL) 1810 return -EOPNOTSUPP; 1811 fsflags &= F2FS_SETTABLE_FS_FL; 1812 1813 iflags = f2fs_fsflags_to_iflags(fsflags); 1814 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) 1815 return -EOPNOTSUPP; 1816 1817 ret = mnt_want_write_file(filp); 1818 if (ret) 1819 return ret; 1820 1821 inode_lock(inode); 1822 1823 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags); 1824 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); 1825 if (ret) 1826 goto out; 1827 1828 ret = f2fs_setflags_common(inode, iflags, 1829 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); 1830 out: 1831 inode_unlock(inode); 1832 mnt_drop_write_file(filp); 1833 return ret; 1834 } 1835 1836 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) 1837 { 1838 struct inode *inode = file_inode(filp); 1839 1840 return put_user(inode->i_generation, (int __user *)arg); 1841 } 1842 1843 static int f2fs_ioc_start_atomic_write(struct file *filp) 1844 { 1845 struct inode *inode = file_inode(filp); 1846 struct f2fs_inode_info *fi = F2FS_I(inode); 1847 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1848 int ret; 1849 1850 if (!inode_owner_or_capable(inode)) 1851 return -EACCES; 1852 1853 if (!S_ISREG(inode->i_mode)) 1854 return -EINVAL; 1855 1856 if (filp->f_flags & O_DIRECT) 1857 return -EINVAL; 1858 1859 ret = mnt_want_write_file(filp); 1860 if (ret) 1861 return ret; 1862 1863 inode_lock(inode); 1864 1865 if (f2fs_is_atomic_file(inode)) { 1866 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) 1867 ret = -EINVAL; 1868 goto out; 1869 } 1870 1871 ret = f2fs_convert_inline_inode(inode); 1872 if (ret) 1873 goto out; 1874 1875 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1876 1877 /* 1878 * Should wait end_io to count F2FS_WB_CP_DATA correctly by 1879 * f2fs_is_atomic_file. 1880 */ 1881 if (get_dirty_pages(inode)) 1882 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1883 inode->i_ino, get_dirty_pages(inode)); 1884 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 1885 if (ret) { 1886 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1887 goto out; 1888 } 1889 1890 spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 1891 if (list_empty(&fi->inmem_ilist)) 1892 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]); 1893 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 1894 1895 /* add inode in inmem_list first and set atomic_file */ 1896 set_inode_flag(inode, FI_ATOMIC_FILE); 1897 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1898 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1899 1900 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1901 F2FS_I(inode)->inmem_task = current; 1902 stat_inc_atomic_write(inode); 1903 stat_update_max_atomic_write(inode); 1904 out: 1905 inode_unlock(inode); 1906 mnt_drop_write_file(filp); 1907 return ret; 1908 } 1909 1910 static int f2fs_ioc_commit_atomic_write(struct file *filp) 1911 { 1912 struct inode *inode = file_inode(filp); 1913 int ret; 1914 1915 if (!inode_owner_or_capable(inode)) 1916 return -EACCES; 1917 1918 ret = mnt_want_write_file(filp); 1919 if (ret) 1920 return ret; 1921 1922 f2fs_balance_fs(F2FS_I_SB(inode), true); 1923 1924 inode_lock(inode); 1925 1926 if (f2fs_is_volatile_file(inode)) { 1927 ret = -EINVAL; 1928 goto err_out; 1929 } 1930 1931 if (f2fs_is_atomic_file(inode)) { 1932 ret = f2fs_commit_inmem_pages(inode); 1933 if (ret) 1934 goto err_out; 1935 1936 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1937 if (!ret) 1938 f2fs_drop_inmem_pages(inode); 1939 } else { 1940 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 1941 } 1942 err_out: 1943 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) { 1944 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1945 ret = -EINVAL; 1946 } 1947 inode_unlock(inode); 1948 mnt_drop_write_file(filp); 1949 return ret; 1950 } 1951 1952 static int f2fs_ioc_start_volatile_write(struct file *filp) 1953 { 1954 struct inode *inode = file_inode(filp); 1955 int ret; 1956 1957 if (!inode_owner_or_capable(inode)) 1958 return -EACCES; 1959 1960 if (!S_ISREG(inode->i_mode)) 1961 return -EINVAL; 1962 1963 ret = mnt_want_write_file(filp); 1964 if (ret) 1965 return ret; 1966 1967 inode_lock(inode); 1968 1969 if (f2fs_is_volatile_file(inode)) 1970 goto out; 1971 1972 ret = f2fs_convert_inline_inode(inode); 1973 if (ret) 1974 goto out; 1975 1976 stat_inc_volatile_write(inode); 1977 stat_update_max_volatile_write(inode); 1978 1979 set_inode_flag(inode, FI_VOLATILE_FILE); 1980 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1981 out: 1982 inode_unlock(inode); 1983 mnt_drop_write_file(filp); 1984 return ret; 1985 } 1986 1987 static int f2fs_ioc_release_volatile_write(struct file *filp) 1988 { 1989 struct inode *inode = file_inode(filp); 1990 int ret; 1991 1992 if (!inode_owner_or_capable(inode)) 1993 return -EACCES; 1994 1995 ret = mnt_want_write_file(filp); 1996 if (ret) 1997 return ret; 1998 1999 inode_lock(inode); 2000 2001 if (!f2fs_is_volatile_file(inode)) 2002 goto out; 2003 2004 if (!f2fs_is_first_block_written(inode)) { 2005 ret = truncate_partial_data_page(inode, 0, true); 2006 goto out; 2007 } 2008 2009 ret = punch_hole(inode, 0, F2FS_BLKSIZE); 2010 out: 2011 inode_unlock(inode); 2012 mnt_drop_write_file(filp); 2013 return ret; 2014 } 2015 2016 static int f2fs_ioc_abort_volatile_write(struct file *filp) 2017 { 2018 struct inode *inode = file_inode(filp); 2019 int ret; 2020 2021 if (!inode_owner_or_capable(inode)) 2022 return -EACCES; 2023 2024 ret = mnt_want_write_file(filp); 2025 if (ret) 2026 return ret; 2027 2028 inode_lock(inode); 2029 2030 if (f2fs_is_atomic_file(inode)) 2031 f2fs_drop_inmem_pages(inode); 2032 if (f2fs_is_volatile_file(inode)) { 2033 clear_inode_flag(inode, FI_VOLATILE_FILE); 2034 stat_dec_volatile_write(inode); 2035 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 2036 } 2037 2038 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 2039 2040 inode_unlock(inode); 2041 2042 mnt_drop_write_file(filp); 2043 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2044 return ret; 2045 } 2046 2047 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) 2048 { 2049 struct inode *inode = file_inode(filp); 2050 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2051 struct super_block *sb = sbi->sb; 2052 __u32 in; 2053 int ret = 0; 2054 2055 if (!capable(CAP_SYS_ADMIN)) 2056 return -EPERM; 2057 2058 if (get_user(in, (__u32 __user *)arg)) 2059 return -EFAULT; 2060 2061 if (in != F2FS_GOING_DOWN_FULLSYNC) { 2062 ret = mnt_want_write_file(filp); 2063 if (ret) 2064 return ret; 2065 } 2066 2067 switch (in) { 2068 case F2FS_GOING_DOWN_FULLSYNC: 2069 sb = freeze_bdev(sb->s_bdev); 2070 if (IS_ERR(sb)) { 2071 ret = PTR_ERR(sb); 2072 goto out; 2073 } 2074 if (sb) { 2075 f2fs_stop_checkpoint(sbi, false); 2076 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2077 thaw_bdev(sb->s_bdev, sb); 2078 } 2079 break; 2080 case F2FS_GOING_DOWN_METASYNC: 2081 /* do checkpoint only */ 2082 ret = f2fs_sync_fs(sb, 1); 2083 if (ret) 2084 goto out; 2085 f2fs_stop_checkpoint(sbi, false); 2086 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2087 break; 2088 case F2FS_GOING_DOWN_NOSYNC: 2089 f2fs_stop_checkpoint(sbi, false); 2090 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2091 break; 2092 case F2FS_GOING_DOWN_METAFLUSH: 2093 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 2094 f2fs_stop_checkpoint(sbi, false); 2095 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2096 break; 2097 case F2FS_GOING_DOWN_NEED_FSCK: 2098 set_sbi_flag(sbi, SBI_NEED_FSCK); 2099 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); 2100 set_sbi_flag(sbi, SBI_IS_DIRTY); 2101 /* do checkpoint only */ 2102 ret = f2fs_sync_fs(sb, 1); 2103 goto out; 2104 default: 2105 ret = -EINVAL; 2106 goto out; 2107 } 2108 2109 f2fs_stop_gc_thread(sbi); 2110 f2fs_stop_discard_thread(sbi); 2111 2112 f2fs_drop_discard_cmd(sbi); 2113 clear_opt(sbi, DISCARD); 2114 2115 f2fs_update_time(sbi, REQ_TIME); 2116 out: 2117 if (in != F2FS_GOING_DOWN_FULLSYNC) 2118 mnt_drop_write_file(filp); 2119 2120 trace_f2fs_shutdown(sbi, in, ret); 2121 2122 return ret; 2123 } 2124 2125 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 2126 { 2127 struct inode *inode = file_inode(filp); 2128 struct super_block *sb = inode->i_sb; 2129 struct request_queue *q = bdev_get_queue(sb->s_bdev); 2130 struct fstrim_range range; 2131 int ret; 2132 2133 if (!capable(CAP_SYS_ADMIN)) 2134 return -EPERM; 2135 2136 if (!f2fs_hw_support_discard(F2FS_SB(sb))) 2137 return -EOPNOTSUPP; 2138 2139 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 2140 sizeof(range))) 2141 return -EFAULT; 2142 2143 ret = mnt_want_write_file(filp); 2144 if (ret) 2145 return ret; 2146 2147 range.minlen = max((unsigned int)range.minlen, 2148 q->limits.discard_granularity); 2149 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 2150 mnt_drop_write_file(filp); 2151 if (ret < 0) 2152 return ret; 2153 2154 if (copy_to_user((struct fstrim_range __user *)arg, &range, 2155 sizeof(range))) 2156 return -EFAULT; 2157 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2158 return 0; 2159 } 2160 2161 static bool uuid_is_nonzero(__u8 u[16]) 2162 { 2163 int i; 2164 2165 for (i = 0; i < 16; i++) 2166 if (u[i]) 2167 return true; 2168 return false; 2169 } 2170 2171 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) 2172 { 2173 struct inode *inode = file_inode(filp); 2174 2175 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode))) 2176 return -EOPNOTSUPP; 2177 2178 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2179 2180 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 2181 } 2182 2183 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 2184 { 2185 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2186 return -EOPNOTSUPP; 2187 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 2188 } 2189 2190 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) 2191 { 2192 struct inode *inode = file_inode(filp); 2193 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2194 int err; 2195 2196 if (!f2fs_sb_has_encrypt(sbi)) 2197 return -EOPNOTSUPP; 2198 2199 err = mnt_want_write_file(filp); 2200 if (err) 2201 return err; 2202 2203 down_write(&sbi->sb_lock); 2204 2205 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 2206 goto got_it; 2207 2208 /* update superblock with uuid */ 2209 generate_random_uuid(sbi->raw_super->encrypt_pw_salt); 2210 2211 err = f2fs_commit_super(sbi, false); 2212 if (err) { 2213 /* undo new data */ 2214 memset(sbi->raw_super->encrypt_pw_salt, 0, 16); 2215 goto out_err; 2216 } 2217 got_it: 2218 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 2219 16)) 2220 err = -EFAULT; 2221 out_err: 2222 up_write(&sbi->sb_lock); 2223 mnt_drop_write_file(filp); 2224 return err; 2225 } 2226 2227 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp, 2228 unsigned long arg) 2229 { 2230 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2231 return -EOPNOTSUPP; 2232 2233 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg); 2234 } 2235 2236 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg) 2237 { 2238 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2239 return -EOPNOTSUPP; 2240 2241 return fscrypt_ioctl_add_key(filp, (void __user *)arg); 2242 } 2243 2244 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg) 2245 { 2246 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2247 return -EOPNOTSUPP; 2248 2249 return fscrypt_ioctl_remove_key(filp, (void __user *)arg); 2250 } 2251 2252 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp, 2253 unsigned long arg) 2254 { 2255 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2256 return -EOPNOTSUPP; 2257 2258 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg); 2259 } 2260 2261 static int f2fs_ioc_get_encryption_key_status(struct file *filp, 2262 unsigned long arg) 2263 { 2264 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2265 return -EOPNOTSUPP; 2266 2267 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); 2268 } 2269 2270 static int f2fs_ioc_gc(struct file *filp, unsigned long arg) 2271 { 2272 struct inode *inode = file_inode(filp); 2273 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2274 __u32 sync; 2275 int ret; 2276 2277 if (!capable(CAP_SYS_ADMIN)) 2278 return -EPERM; 2279 2280 if (get_user(sync, (__u32 __user *)arg)) 2281 return -EFAULT; 2282 2283 if (f2fs_readonly(sbi->sb)) 2284 return -EROFS; 2285 2286 ret = mnt_want_write_file(filp); 2287 if (ret) 2288 return ret; 2289 2290 if (!sync) { 2291 if (!mutex_trylock(&sbi->gc_mutex)) { 2292 ret = -EBUSY; 2293 goto out; 2294 } 2295 } else { 2296 mutex_lock(&sbi->gc_mutex); 2297 } 2298 2299 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); 2300 out: 2301 mnt_drop_write_file(filp); 2302 return ret; 2303 } 2304 2305 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) 2306 { 2307 struct inode *inode = file_inode(filp); 2308 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2309 struct f2fs_gc_range range; 2310 u64 end; 2311 int ret; 2312 2313 if (!capable(CAP_SYS_ADMIN)) 2314 return -EPERM; 2315 2316 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, 2317 sizeof(range))) 2318 return -EFAULT; 2319 2320 if (f2fs_readonly(sbi->sb)) 2321 return -EROFS; 2322 2323 end = range.start + range.len; 2324 if (end < range.start || range.start < MAIN_BLKADDR(sbi) || 2325 end >= MAX_BLKADDR(sbi)) 2326 return -EINVAL; 2327 2328 ret = mnt_want_write_file(filp); 2329 if (ret) 2330 return ret; 2331 2332 do_more: 2333 if (!range.sync) { 2334 if (!mutex_trylock(&sbi->gc_mutex)) { 2335 ret = -EBUSY; 2336 goto out; 2337 } 2338 } else { 2339 mutex_lock(&sbi->gc_mutex); 2340 } 2341 2342 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); 2343 range.start += BLKS_PER_SEC(sbi); 2344 if (range.start <= end) 2345 goto do_more; 2346 out: 2347 mnt_drop_write_file(filp); 2348 return ret; 2349 } 2350 2351 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) 2352 { 2353 struct inode *inode = file_inode(filp); 2354 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2355 int ret; 2356 2357 if (!capable(CAP_SYS_ADMIN)) 2358 return -EPERM; 2359 2360 if (f2fs_readonly(sbi->sb)) 2361 return -EROFS; 2362 2363 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2364 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled."); 2365 return -EINVAL; 2366 } 2367 2368 ret = mnt_want_write_file(filp); 2369 if (ret) 2370 return ret; 2371 2372 ret = f2fs_sync_fs(sbi->sb, 1); 2373 2374 mnt_drop_write_file(filp); 2375 return ret; 2376 } 2377 2378 static int f2fs_defragment_range(struct f2fs_sb_info *sbi, 2379 struct file *filp, 2380 struct f2fs_defragment *range) 2381 { 2382 struct inode *inode = file_inode(filp); 2383 struct f2fs_map_blocks map = { .m_next_extent = NULL, 2384 .m_seg_type = NO_CHECK_TYPE , 2385 .m_may_create = false }; 2386 struct extent_info ei = {0, 0, 0}; 2387 pgoff_t pg_start, pg_end, next_pgofs; 2388 unsigned int blk_per_seg = sbi->blocks_per_seg; 2389 unsigned int total = 0, sec_num; 2390 block_t blk_end = 0; 2391 bool fragmented = false; 2392 int err; 2393 2394 /* if in-place-update policy is enabled, don't waste time here */ 2395 if (f2fs_should_update_inplace(inode, NULL)) 2396 return -EINVAL; 2397 2398 pg_start = range->start >> PAGE_SHIFT; 2399 pg_end = (range->start + range->len) >> PAGE_SHIFT; 2400 2401 f2fs_balance_fs(sbi, true); 2402 2403 inode_lock(inode); 2404 2405 /* writeback all dirty pages in the range */ 2406 err = filemap_write_and_wait_range(inode->i_mapping, range->start, 2407 range->start + range->len - 1); 2408 if (err) 2409 goto out; 2410 2411 /* 2412 * lookup mapping info in extent cache, skip defragmenting if physical 2413 * block addresses are continuous. 2414 */ 2415 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { 2416 if (ei.fofs + ei.len >= pg_end) 2417 goto out; 2418 } 2419 2420 map.m_lblk = pg_start; 2421 map.m_next_pgofs = &next_pgofs; 2422 2423 /* 2424 * lookup mapping info in dnode page cache, skip defragmenting if all 2425 * physical block addresses are continuous even if there are hole(s) 2426 * in logical blocks. 2427 */ 2428 while (map.m_lblk < pg_end) { 2429 map.m_len = pg_end - map.m_lblk; 2430 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2431 if (err) 2432 goto out; 2433 2434 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2435 map.m_lblk = next_pgofs; 2436 continue; 2437 } 2438 2439 if (blk_end && blk_end != map.m_pblk) 2440 fragmented = true; 2441 2442 /* record total count of block that we're going to move */ 2443 total += map.m_len; 2444 2445 blk_end = map.m_pblk + map.m_len; 2446 2447 map.m_lblk += map.m_len; 2448 } 2449 2450 if (!fragmented) { 2451 total = 0; 2452 goto out; 2453 } 2454 2455 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi)); 2456 2457 /* 2458 * make sure there are enough free section for LFS allocation, this can 2459 * avoid defragment running in SSR mode when free section are allocated 2460 * intensively 2461 */ 2462 if (has_not_enough_free_secs(sbi, 0, sec_num)) { 2463 err = -EAGAIN; 2464 goto out; 2465 } 2466 2467 map.m_lblk = pg_start; 2468 map.m_len = pg_end - pg_start; 2469 total = 0; 2470 2471 while (map.m_lblk < pg_end) { 2472 pgoff_t idx; 2473 int cnt = 0; 2474 2475 do_map: 2476 map.m_len = pg_end - map.m_lblk; 2477 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2478 if (err) 2479 goto clear_out; 2480 2481 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2482 map.m_lblk = next_pgofs; 2483 goto check; 2484 } 2485 2486 set_inode_flag(inode, FI_DO_DEFRAG); 2487 2488 idx = map.m_lblk; 2489 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { 2490 struct page *page; 2491 2492 page = f2fs_get_lock_data_page(inode, idx, true); 2493 if (IS_ERR(page)) { 2494 err = PTR_ERR(page); 2495 goto clear_out; 2496 } 2497 2498 set_page_dirty(page); 2499 f2fs_put_page(page, 1); 2500 2501 idx++; 2502 cnt++; 2503 total++; 2504 } 2505 2506 map.m_lblk = idx; 2507 check: 2508 if (map.m_lblk < pg_end && cnt < blk_per_seg) 2509 goto do_map; 2510 2511 clear_inode_flag(inode, FI_DO_DEFRAG); 2512 2513 err = filemap_fdatawrite(inode->i_mapping); 2514 if (err) 2515 goto out; 2516 } 2517 clear_out: 2518 clear_inode_flag(inode, FI_DO_DEFRAG); 2519 out: 2520 inode_unlock(inode); 2521 if (!err) 2522 range->len = (u64)total << PAGE_SHIFT; 2523 return err; 2524 } 2525 2526 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) 2527 { 2528 struct inode *inode = file_inode(filp); 2529 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2530 struct f2fs_defragment range; 2531 int err; 2532 2533 if (!capable(CAP_SYS_ADMIN)) 2534 return -EPERM; 2535 2536 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) 2537 return -EINVAL; 2538 2539 if (f2fs_readonly(sbi->sb)) 2540 return -EROFS; 2541 2542 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, 2543 sizeof(range))) 2544 return -EFAULT; 2545 2546 /* verify alignment of offset & size */ 2547 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) 2548 return -EINVAL; 2549 2550 if (unlikely((range.start + range.len) >> PAGE_SHIFT > 2551 sbi->max_file_blocks)) 2552 return -EINVAL; 2553 2554 err = mnt_want_write_file(filp); 2555 if (err) 2556 return err; 2557 2558 err = f2fs_defragment_range(sbi, filp, &range); 2559 mnt_drop_write_file(filp); 2560 2561 f2fs_update_time(sbi, REQ_TIME); 2562 if (err < 0) 2563 return err; 2564 2565 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, 2566 sizeof(range))) 2567 return -EFAULT; 2568 2569 return 0; 2570 } 2571 2572 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, 2573 struct file *file_out, loff_t pos_out, size_t len) 2574 { 2575 struct inode *src = file_inode(file_in); 2576 struct inode *dst = file_inode(file_out); 2577 struct f2fs_sb_info *sbi = F2FS_I_SB(src); 2578 size_t olen = len, dst_max_i_size = 0; 2579 size_t dst_osize; 2580 int ret; 2581 2582 if (file_in->f_path.mnt != file_out->f_path.mnt || 2583 src->i_sb != dst->i_sb) 2584 return -EXDEV; 2585 2586 if (unlikely(f2fs_readonly(src->i_sb))) 2587 return -EROFS; 2588 2589 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) 2590 return -EINVAL; 2591 2592 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst)) 2593 return -EOPNOTSUPP; 2594 2595 if (src == dst) { 2596 if (pos_in == pos_out) 2597 return 0; 2598 if (pos_out > pos_in && pos_out < pos_in + len) 2599 return -EINVAL; 2600 } 2601 2602 inode_lock(src); 2603 if (src != dst) { 2604 ret = -EBUSY; 2605 if (!inode_trylock(dst)) 2606 goto out; 2607 } 2608 2609 ret = -EINVAL; 2610 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2611 goto out_unlock; 2612 if (len == 0) 2613 olen = len = src->i_size - pos_in; 2614 if (pos_in + len == src->i_size) 2615 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; 2616 if (len == 0) { 2617 ret = 0; 2618 goto out_unlock; 2619 } 2620 2621 dst_osize = dst->i_size; 2622 if (pos_out + olen > dst->i_size) 2623 dst_max_i_size = pos_out + olen; 2624 2625 /* verify the end result is block aligned */ 2626 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || 2627 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || 2628 !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) 2629 goto out_unlock; 2630 2631 ret = f2fs_convert_inline_inode(src); 2632 if (ret) 2633 goto out_unlock; 2634 2635 ret = f2fs_convert_inline_inode(dst); 2636 if (ret) 2637 goto out_unlock; 2638 2639 /* write out all dirty pages from offset */ 2640 ret = filemap_write_and_wait_range(src->i_mapping, 2641 pos_in, pos_in + len); 2642 if (ret) 2643 goto out_unlock; 2644 2645 ret = filemap_write_and_wait_range(dst->i_mapping, 2646 pos_out, pos_out + len); 2647 if (ret) 2648 goto out_unlock; 2649 2650 f2fs_balance_fs(sbi, true); 2651 2652 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2653 if (src != dst) { 2654 ret = -EBUSY; 2655 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) 2656 goto out_src; 2657 } 2658 2659 f2fs_lock_op(sbi); 2660 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, 2661 pos_out >> F2FS_BLKSIZE_BITS, 2662 len >> F2FS_BLKSIZE_BITS, false); 2663 2664 if (!ret) { 2665 if (dst_max_i_size) 2666 f2fs_i_size_write(dst, dst_max_i_size); 2667 else if (dst_osize != dst->i_size) 2668 f2fs_i_size_write(dst, dst_osize); 2669 } 2670 f2fs_unlock_op(sbi); 2671 2672 if (src != dst) 2673 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); 2674 out_src: 2675 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2676 out_unlock: 2677 if (src != dst) 2678 inode_unlock(dst); 2679 out: 2680 inode_unlock(src); 2681 return ret; 2682 } 2683 2684 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) 2685 { 2686 struct f2fs_move_range range; 2687 struct fd dst; 2688 int err; 2689 2690 if (!(filp->f_mode & FMODE_READ) || 2691 !(filp->f_mode & FMODE_WRITE)) 2692 return -EBADF; 2693 2694 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, 2695 sizeof(range))) 2696 return -EFAULT; 2697 2698 dst = fdget(range.dst_fd); 2699 if (!dst.file) 2700 return -EBADF; 2701 2702 if (!(dst.file->f_mode & FMODE_WRITE)) { 2703 err = -EBADF; 2704 goto err_out; 2705 } 2706 2707 err = mnt_want_write_file(filp); 2708 if (err) 2709 goto err_out; 2710 2711 err = f2fs_move_file_range(filp, range.pos_in, dst.file, 2712 range.pos_out, range.len); 2713 2714 mnt_drop_write_file(filp); 2715 if (err) 2716 goto err_out; 2717 2718 if (copy_to_user((struct f2fs_move_range __user *)arg, 2719 &range, sizeof(range))) 2720 err = -EFAULT; 2721 err_out: 2722 fdput(dst); 2723 return err; 2724 } 2725 2726 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) 2727 { 2728 struct inode *inode = file_inode(filp); 2729 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2730 struct sit_info *sm = SIT_I(sbi); 2731 unsigned int start_segno = 0, end_segno = 0; 2732 unsigned int dev_start_segno = 0, dev_end_segno = 0; 2733 struct f2fs_flush_device range; 2734 int ret; 2735 2736 if (!capable(CAP_SYS_ADMIN)) 2737 return -EPERM; 2738 2739 if (f2fs_readonly(sbi->sb)) 2740 return -EROFS; 2741 2742 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2743 return -EINVAL; 2744 2745 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, 2746 sizeof(range))) 2747 return -EFAULT; 2748 2749 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || 2750 __is_large_section(sbi)) { 2751 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1", 2752 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec); 2753 return -EINVAL; 2754 } 2755 2756 ret = mnt_want_write_file(filp); 2757 if (ret) 2758 return ret; 2759 2760 if (range.dev_num != 0) 2761 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); 2762 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); 2763 2764 start_segno = sm->last_victim[FLUSH_DEVICE]; 2765 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) 2766 start_segno = dev_start_segno; 2767 end_segno = min(start_segno + range.segments, dev_end_segno); 2768 2769 while (start_segno < end_segno) { 2770 if (!mutex_trylock(&sbi->gc_mutex)) { 2771 ret = -EBUSY; 2772 goto out; 2773 } 2774 sm->last_victim[GC_CB] = end_segno + 1; 2775 sm->last_victim[GC_GREEDY] = end_segno + 1; 2776 sm->last_victim[ALLOC_NEXT] = end_segno + 1; 2777 ret = f2fs_gc(sbi, true, true, start_segno); 2778 if (ret == -EAGAIN) 2779 ret = 0; 2780 else if (ret < 0) 2781 break; 2782 start_segno++; 2783 } 2784 out: 2785 mnt_drop_write_file(filp); 2786 return ret; 2787 } 2788 2789 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) 2790 { 2791 struct inode *inode = file_inode(filp); 2792 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); 2793 2794 /* Must validate to set it with SQLite behavior in Android. */ 2795 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; 2796 2797 return put_user(sb_feature, (u32 __user *)arg); 2798 } 2799 2800 #ifdef CONFIG_QUOTA 2801 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) 2802 { 2803 struct dquot *transfer_to[MAXQUOTAS] = {}; 2804 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2805 struct super_block *sb = sbi->sb; 2806 int err = 0; 2807 2808 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 2809 if (!IS_ERR(transfer_to[PRJQUOTA])) { 2810 err = __dquot_transfer(inode, transfer_to); 2811 if (err) 2812 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 2813 dqput(transfer_to[PRJQUOTA]); 2814 } 2815 return err; 2816 } 2817 2818 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2819 { 2820 struct inode *inode = file_inode(filp); 2821 struct f2fs_inode_info *fi = F2FS_I(inode); 2822 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2823 struct page *ipage; 2824 kprojid_t kprojid; 2825 int err; 2826 2827 if (!f2fs_sb_has_project_quota(sbi)) { 2828 if (projid != F2FS_DEF_PROJID) 2829 return -EOPNOTSUPP; 2830 else 2831 return 0; 2832 } 2833 2834 if (!f2fs_has_extra_attr(inode)) 2835 return -EOPNOTSUPP; 2836 2837 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 2838 2839 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) 2840 return 0; 2841 2842 err = -EPERM; 2843 /* Is it quota file? Do not allow user to mess with it */ 2844 if (IS_NOQUOTA(inode)) 2845 return err; 2846 2847 ipage = f2fs_get_node_page(sbi, inode->i_ino); 2848 if (IS_ERR(ipage)) 2849 return PTR_ERR(ipage); 2850 2851 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, 2852 i_projid)) { 2853 err = -EOVERFLOW; 2854 f2fs_put_page(ipage, 1); 2855 return err; 2856 } 2857 f2fs_put_page(ipage, 1); 2858 2859 err = dquot_initialize(inode); 2860 if (err) 2861 return err; 2862 2863 f2fs_lock_op(sbi); 2864 err = f2fs_transfer_project_quota(inode, kprojid); 2865 if (err) 2866 goto out_unlock; 2867 2868 F2FS_I(inode)->i_projid = kprojid; 2869 inode->i_ctime = current_time(inode); 2870 f2fs_mark_inode_dirty_sync(inode, true); 2871 out_unlock: 2872 f2fs_unlock_op(sbi); 2873 return err; 2874 } 2875 #else 2876 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) 2877 { 2878 return 0; 2879 } 2880 2881 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 2882 { 2883 if (projid != F2FS_DEF_PROJID) 2884 return -EOPNOTSUPP; 2885 return 0; 2886 } 2887 #endif 2888 2889 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */ 2890 2891 /* 2892 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable 2893 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its 2894 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS. 2895 */ 2896 2897 static const struct { 2898 u32 iflag; 2899 u32 xflag; 2900 } f2fs_xflags_map[] = { 2901 { F2FS_SYNC_FL, FS_XFLAG_SYNC }, 2902 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE }, 2903 { F2FS_APPEND_FL, FS_XFLAG_APPEND }, 2904 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP }, 2905 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME }, 2906 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT }, 2907 }; 2908 2909 #define F2FS_SUPPORTED_XFLAGS ( \ 2910 FS_XFLAG_SYNC | \ 2911 FS_XFLAG_IMMUTABLE | \ 2912 FS_XFLAG_APPEND | \ 2913 FS_XFLAG_NODUMP | \ 2914 FS_XFLAG_NOATIME | \ 2915 FS_XFLAG_PROJINHERIT) 2916 2917 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */ 2918 static inline u32 f2fs_iflags_to_xflags(u32 iflags) 2919 { 2920 u32 xflags = 0; 2921 int i; 2922 2923 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) 2924 if (iflags & f2fs_xflags_map[i].iflag) 2925 xflags |= f2fs_xflags_map[i].xflag; 2926 2927 return xflags; 2928 } 2929 2930 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */ 2931 static inline u32 f2fs_xflags_to_iflags(u32 xflags) 2932 { 2933 u32 iflags = 0; 2934 int i; 2935 2936 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) 2937 if (xflags & f2fs_xflags_map[i].xflag) 2938 iflags |= f2fs_xflags_map[i].iflag; 2939 2940 return iflags; 2941 } 2942 2943 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa) 2944 { 2945 struct f2fs_inode_info *fi = F2FS_I(inode); 2946 2947 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags)); 2948 2949 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) 2950 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid); 2951 } 2952 2953 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 2954 { 2955 struct inode *inode = file_inode(filp); 2956 struct fsxattr fa; 2957 2958 f2fs_fill_fsxattr(inode, &fa); 2959 2960 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) 2961 return -EFAULT; 2962 return 0; 2963 } 2964 2965 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 2966 { 2967 struct inode *inode = file_inode(filp); 2968 struct fsxattr fa, old_fa; 2969 u32 iflags; 2970 int err; 2971 2972 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 2973 return -EFAULT; 2974 2975 /* Make sure caller has proper permission */ 2976 if (!inode_owner_or_capable(inode)) 2977 return -EACCES; 2978 2979 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS) 2980 return -EOPNOTSUPP; 2981 2982 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags); 2983 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) 2984 return -EOPNOTSUPP; 2985 2986 err = mnt_want_write_file(filp); 2987 if (err) 2988 return err; 2989 2990 inode_lock(inode); 2991 2992 f2fs_fill_fsxattr(inode, &old_fa); 2993 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa); 2994 if (err) 2995 goto out; 2996 2997 err = f2fs_setflags_common(inode, iflags, 2998 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); 2999 if (err) 3000 goto out; 3001 3002 err = f2fs_ioc_setproject(filp, fa.fsx_projid); 3003 out: 3004 inode_unlock(inode); 3005 mnt_drop_write_file(filp); 3006 return err; 3007 } 3008 3009 int f2fs_pin_file_control(struct inode *inode, bool inc) 3010 { 3011 struct f2fs_inode_info *fi = F2FS_I(inode); 3012 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3013 3014 /* Use i_gc_failures for normal file as a risk signal. */ 3015 if (inc) 3016 f2fs_i_gc_failures_write(inode, 3017 fi->i_gc_failures[GC_FAILURE_PIN] + 1); 3018 3019 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { 3020 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials", 3021 __func__, inode->i_ino, 3022 fi->i_gc_failures[GC_FAILURE_PIN]); 3023 clear_inode_flag(inode, FI_PIN_FILE); 3024 return -EAGAIN; 3025 } 3026 return 0; 3027 } 3028 3029 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) 3030 { 3031 struct inode *inode = file_inode(filp); 3032 __u32 pin; 3033 int ret = 0; 3034 3035 if (get_user(pin, (__u32 __user *)arg)) 3036 return -EFAULT; 3037 3038 if (!S_ISREG(inode->i_mode)) 3039 return -EINVAL; 3040 3041 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 3042 return -EROFS; 3043 3044 ret = mnt_want_write_file(filp); 3045 if (ret) 3046 return ret; 3047 3048 inode_lock(inode); 3049 3050 if (f2fs_should_update_outplace(inode, NULL)) { 3051 ret = -EINVAL; 3052 goto out; 3053 } 3054 3055 if (!pin) { 3056 clear_inode_flag(inode, FI_PIN_FILE); 3057 f2fs_i_gc_failures_write(inode, 0); 3058 goto done; 3059 } 3060 3061 if (f2fs_pin_file_control(inode, false)) { 3062 ret = -EAGAIN; 3063 goto out; 3064 } 3065 ret = f2fs_convert_inline_inode(inode); 3066 if (ret) 3067 goto out; 3068 3069 set_inode_flag(inode, FI_PIN_FILE); 3070 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 3071 done: 3072 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3073 out: 3074 inode_unlock(inode); 3075 mnt_drop_write_file(filp); 3076 return ret; 3077 } 3078 3079 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) 3080 { 3081 struct inode *inode = file_inode(filp); 3082 __u32 pin = 0; 3083 3084 if (is_inode_flag_set(inode, FI_PIN_FILE)) 3085 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 3086 return put_user(pin, (u32 __user *)arg); 3087 } 3088 3089 int f2fs_precache_extents(struct inode *inode) 3090 { 3091 struct f2fs_inode_info *fi = F2FS_I(inode); 3092 struct f2fs_map_blocks map; 3093 pgoff_t m_next_extent; 3094 loff_t end; 3095 int err; 3096 3097 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 3098 return -EOPNOTSUPP; 3099 3100 map.m_lblk = 0; 3101 map.m_next_pgofs = NULL; 3102 map.m_next_extent = &m_next_extent; 3103 map.m_seg_type = NO_CHECK_TYPE; 3104 map.m_may_create = false; 3105 end = F2FS_I_SB(inode)->max_file_blocks; 3106 3107 while (map.m_lblk < end) { 3108 map.m_len = end - map.m_lblk; 3109 3110 down_write(&fi->i_gc_rwsem[WRITE]); 3111 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); 3112 up_write(&fi->i_gc_rwsem[WRITE]); 3113 if (err) 3114 return err; 3115 3116 map.m_lblk = m_next_extent; 3117 } 3118 3119 return err; 3120 } 3121 3122 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg) 3123 { 3124 return f2fs_precache_extents(file_inode(filp)); 3125 } 3126 3127 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) 3128 { 3129 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); 3130 __u64 block_count; 3131 int ret; 3132 3133 if (!capable(CAP_SYS_ADMIN)) 3134 return -EPERM; 3135 3136 if (f2fs_readonly(sbi->sb)) 3137 return -EROFS; 3138 3139 if (copy_from_user(&block_count, (void __user *)arg, 3140 sizeof(block_count))) 3141 return -EFAULT; 3142 3143 ret = f2fs_resize_fs(sbi, block_count); 3144 3145 return ret; 3146 } 3147 3148 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) 3149 { 3150 struct inode *inode = file_inode(filp); 3151 3152 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3153 3154 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) { 3155 f2fs_warn(F2FS_I_SB(inode), 3156 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n", 3157 inode->i_ino); 3158 return -EOPNOTSUPP; 3159 } 3160 3161 return fsverity_ioctl_enable(filp, (const void __user *)arg); 3162 } 3163 3164 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) 3165 { 3166 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) 3167 return -EOPNOTSUPP; 3168 3169 return fsverity_ioctl_measure(filp, (void __user *)arg); 3170 } 3171 3172 static int f2fs_get_volume_name(struct file *filp, unsigned long arg) 3173 { 3174 struct inode *inode = file_inode(filp); 3175 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3176 char *vbuf; 3177 int count; 3178 int err = 0; 3179 3180 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL); 3181 if (!vbuf) 3182 return -ENOMEM; 3183 3184 down_read(&sbi->sb_lock); 3185 count = utf16s_to_utf8s(sbi->raw_super->volume_name, 3186 ARRAY_SIZE(sbi->raw_super->volume_name), 3187 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME); 3188 up_read(&sbi->sb_lock); 3189 3190 if (copy_to_user((char __user *)arg, vbuf, 3191 min(FSLABEL_MAX, count))) 3192 err = -EFAULT; 3193 3194 kvfree(vbuf); 3195 return err; 3196 } 3197 3198 static int f2fs_set_volume_name(struct file *filp, unsigned long arg) 3199 { 3200 struct inode *inode = file_inode(filp); 3201 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3202 char *vbuf; 3203 int err = 0; 3204 3205 if (!capable(CAP_SYS_ADMIN)) 3206 return -EPERM; 3207 3208 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX); 3209 if (IS_ERR(vbuf)) 3210 return PTR_ERR(vbuf); 3211 3212 err = mnt_want_write_file(filp); 3213 if (err) 3214 goto out; 3215 3216 down_write(&sbi->sb_lock); 3217 3218 memset(sbi->raw_super->volume_name, 0, 3219 sizeof(sbi->raw_super->volume_name)); 3220 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN, 3221 sbi->raw_super->volume_name, 3222 ARRAY_SIZE(sbi->raw_super->volume_name)); 3223 3224 err = f2fs_commit_super(sbi, false); 3225 3226 up_write(&sbi->sb_lock); 3227 3228 mnt_drop_write_file(filp); 3229 out: 3230 kfree(vbuf); 3231 return err; 3232 } 3233 3234 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 3235 { 3236 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 3237 return -EIO; 3238 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp)))) 3239 return -ENOSPC; 3240 3241 switch (cmd) { 3242 case F2FS_IOC_GETFLAGS: 3243 return f2fs_ioc_getflags(filp, arg); 3244 case F2FS_IOC_SETFLAGS: 3245 return f2fs_ioc_setflags(filp, arg); 3246 case F2FS_IOC_GETVERSION: 3247 return f2fs_ioc_getversion(filp, arg); 3248 case F2FS_IOC_START_ATOMIC_WRITE: 3249 return f2fs_ioc_start_atomic_write(filp); 3250 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 3251 return f2fs_ioc_commit_atomic_write(filp); 3252 case F2FS_IOC_START_VOLATILE_WRITE: 3253 return f2fs_ioc_start_volatile_write(filp); 3254 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 3255 return f2fs_ioc_release_volatile_write(filp); 3256 case F2FS_IOC_ABORT_VOLATILE_WRITE: 3257 return f2fs_ioc_abort_volatile_write(filp); 3258 case F2FS_IOC_SHUTDOWN: 3259 return f2fs_ioc_shutdown(filp, arg); 3260 case FITRIM: 3261 return f2fs_ioc_fitrim(filp, arg); 3262 case F2FS_IOC_SET_ENCRYPTION_POLICY: 3263 return f2fs_ioc_set_encryption_policy(filp, arg); 3264 case F2FS_IOC_GET_ENCRYPTION_POLICY: 3265 return f2fs_ioc_get_encryption_policy(filp, arg); 3266 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 3267 return f2fs_ioc_get_encryption_pwsalt(filp, arg); 3268 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 3269 return f2fs_ioc_get_encryption_policy_ex(filp, arg); 3270 case FS_IOC_ADD_ENCRYPTION_KEY: 3271 return f2fs_ioc_add_encryption_key(filp, arg); 3272 case FS_IOC_REMOVE_ENCRYPTION_KEY: 3273 return f2fs_ioc_remove_encryption_key(filp, arg); 3274 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 3275 return f2fs_ioc_remove_encryption_key_all_users(filp, arg); 3276 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 3277 return f2fs_ioc_get_encryption_key_status(filp, arg); 3278 case F2FS_IOC_GARBAGE_COLLECT: 3279 return f2fs_ioc_gc(filp, arg); 3280 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 3281 return f2fs_ioc_gc_range(filp, arg); 3282 case F2FS_IOC_WRITE_CHECKPOINT: 3283 return f2fs_ioc_write_checkpoint(filp, arg); 3284 case F2FS_IOC_DEFRAGMENT: 3285 return f2fs_ioc_defragment(filp, arg); 3286 case F2FS_IOC_MOVE_RANGE: 3287 return f2fs_ioc_move_range(filp, arg); 3288 case F2FS_IOC_FLUSH_DEVICE: 3289 return f2fs_ioc_flush_device(filp, arg); 3290 case F2FS_IOC_GET_FEATURES: 3291 return f2fs_ioc_get_features(filp, arg); 3292 case F2FS_IOC_FSGETXATTR: 3293 return f2fs_ioc_fsgetxattr(filp, arg); 3294 case F2FS_IOC_FSSETXATTR: 3295 return f2fs_ioc_fssetxattr(filp, arg); 3296 case F2FS_IOC_GET_PIN_FILE: 3297 return f2fs_ioc_get_pin_file(filp, arg); 3298 case F2FS_IOC_SET_PIN_FILE: 3299 return f2fs_ioc_set_pin_file(filp, arg); 3300 case F2FS_IOC_PRECACHE_EXTENTS: 3301 return f2fs_ioc_precache_extents(filp, arg); 3302 case F2FS_IOC_RESIZE_FS: 3303 return f2fs_ioc_resize_fs(filp, arg); 3304 case FS_IOC_ENABLE_VERITY: 3305 return f2fs_ioc_enable_verity(filp, arg); 3306 case FS_IOC_MEASURE_VERITY: 3307 return f2fs_ioc_measure_verity(filp, arg); 3308 case F2FS_IOC_GET_VOLUME_NAME: 3309 return f2fs_get_volume_name(filp, arg); 3310 case F2FS_IOC_SET_VOLUME_NAME: 3311 return f2fs_set_volume_name(filp, arg); 3312 default: 3313 return -ENOTTY; 3314 } 3315 } 3316 3317 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3318 { 3319 struct file *file = iocb->ki_filp; 3320 struct inode *inode = file_inode(file); 3321 ssize_t ret; 3322 3323 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 3324 ret = -EIO; 3325 goto out; 3326 } 3327 3328 if (iocb->ki_flags & IOCB_NOWAIT) { 3329 if (!inode_trylock(inode)) { 3330 ret = -EAGAIN; 3331 goto out; 3332 } 3333 } else { 3334 inode_lock(inode); 3335 } 3336 3337 ret = generic_write_checks(iocb, from); 3338 if (ret > 0) { 3339 bool preallocated = false; 3340 size_t target_size = 0; 3341 int err; 3342 3343 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) 3344 set_inode_flag(inode, FI_NO_PREALLOC); 3345 3346 if ((iocb->ki_flags & IOCB_NOWAIT)) { 3347 if (!f2fs_overwrite_io(inode, iocb->ki_pos, 3348 iov_iter_count(from)) || 3349 f2fs_has_inline_data(inode) || 3350 f2fs_force_buffered_io(inode, iocb, from)) { 3351 clear_inode_flag(inode, FI_NO_PREALLOC); 3352 inode_unlock(inode); 3353 ret = -EAGAIN; 3354 goto out; 3355 } 3356 } else { 3357 preallocated = true; 3358 target_size = iocb->ki_pos + iov_iter_count(from); 3359 3360 err = f2fs_preallocate_blocks(iocb, from); 3361 if (err) { 3362 clear_inode_flag(inode, FI_NO_PREALLOC); 3363 inode_unlock(inode); 3364 ret = err; 3365 goto out; 3366 } 3367 } 3368 ret = __generic_file_write_iter(iocb, from); 3369 clear_inode_flag(inode, FI_NO_PREALLOC); 3370 3371 /* if we couldn't write data, we should deallocate blocks. */ 3372 if (preallocated && i_size_read(inode) < target_size) 3373 f2fs_truncate(inode); 3374 3375 if (ret > 0) 3376 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); 3377 } 3378 inode_unlock(inode); 3379 out: 3380 trace_f2fs_file_write_iter(inode, iocb->ki_pos, 3381 iov_iter_count(from), ret); 3382 if (ret > 0) 3383 ret = generic_write_sync(iocb, ret); 3384 return ret; 3385 } 3386 3387 #ifdef CONFIG_COMPAT 3388 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3389 { 3390 switch (cmd) { 3391 case F2FS_IOC32_GETFLAGS: 3392 cmd = F2FS_IOC_GETFLAGS; 3393 break; 3394 case F2FS_IOC32_SETFLAGS: 3395 cmd = F2FS_IOC_SETFLAGS; 3396 break; 3397 case F2FS_IOC32_GETVERSION: 3398 cmd = F2FS_IOC_GETVERSION; 3399 break; 3400 case F2FS_IOC_START_ATOMIC_WRITE: 3401 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 3402 case F2FS_IOC_START_VOLATILE_WRITE: 3403 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 3404 case F2FS_IOC_ABORT_VOLATILE_WRITE: 3405 case F2FS_IOC_SHUTDOWN: 3406 case F2FS_IOC_SET_ENCRYPTION_POLICY: 3407 case F2FS_IOC_GET_ENCRYPTION_PWSALT: 3408 case F2FS_IOC_GET_ENCRYPTION_POLICY: 3409 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 3410 case FS_IOC_ADD_ENCRYPTION_KEY: 3411 case FS_IOC_REMOVE_ENCRYPTION_KEY: 3412 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 3413 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 3414 case F2FS_IOC_GARBAGE_COLLECT: 3415 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 3416 case F2FS_IOC_WRITE_CHECKPOINT: 3417 case F2FS_IOC_DEFRAGMENT: 3418 case F2FS_IOC_MOVE_RANGE: 3419 case F2FS_IOC_FLUSH_DEVICE: 3420 case F2FS_IOC_GET_FEATURES: 3421 case F2FS_IOC_FSGETXATTR: 3422 case F2FS_IOC_FSSETXATTR: 3423 case F2FS_IOC_GET_PIN_FILE: 3424 case F2FS_IOC_SET_PIN_FILE: 3425 case F2FS_IOC_PRECACHE_EXTENTS: 3426 case F2FS_IOC_RESIZE_FS: 3427 case FS_IOC_ENABLE_VERITY: 3428 case FS_IOC_MEASURE_VERITY: 3429 case F2FS_IOC_GET_VOLUME_NAME: 3430 case F2FS_IOC_SET_VOLUME_NAME: 3431 break; 3432 default: 3433 return -ENOIOCTLCMD; 3434 } 3435 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 3436 } 3437 #endif 3438 3439 const struct file_operations f2fs_file_operations = { 3440 .llseek = f2fs_llseek, 3441 .read_iter = generic_file_read_iter, 3442 .write_iter = f2fs_file_write_iter, 3443 .open = f2fs_file_open, 3444 .release = f2fs_release_file, 3445 .mmap = f2fs_file_mmap, 3446 .flush = f2fs_file_flush, 3447 .fsync = f2fs_sync_file, 3448 .fallocate = f2fs_fallocate, 3449 .unlocked_ioctl = f2fs_ioctl, 3450 #ifdef CONFIG_COMPAT 3451 .compat_ioctl = f2fs_compat_ioctl, 3452 #endif 3453 .splice_read = generic_file_splice_read, 3454 .splice_write = iter_file_splice_write, 3455 }; 3456