1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/file.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/stat.h> 11 #include <linux/buffer_head.h> 12 #include <linux/writeback.h> 13 #include <linux/blkdev.h> 14 #include <linux/falloc.h> 15 #include <linux/types.h> 16 #include <linux/compat.h> 17 #include <linux/uaccess.h> 18 #include <linux/mount.h> 19 #include <linux/pagevec.h> 20 #include <linux/uio.h> 21 #include <linux/uuid.h> 22 #include <linux/file.h> 23 #include <linux/nls.h> 24 #include <linux/sched/signal.h> 25 26 #include "f2fs.h" 27 #include "node.h" 28 #include "segment.h" 29 #include "xattr.h" 30 #include "acl.h" 31 #include "gc.h" 32 #include <trace/events/f2fs.h> 33 #include <uapi/linux/f2fs.h> 34 35 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) 36 { 37 struct inode *inode = file_inode(vmf->vma->vm_file); 38 vm_fault_t ret; 39 40 down_read(&F2FS_I(inode)->i_mmap_sem); 41 ret = filemap_fault(vmf); 42 up_read(&F2FS_I(inode)->i_mmap_sem); 43 44 if (!ret) 45 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO, 46 F2FS_BLKSIZE); 47 48 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret); 49 50 return ret; 51 } 52 53 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) 54 { 55 struct page *page = vmf->page; 56 struct inode *inode = file_inode(vmf->vma->vm_file); 57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 58 struct dnode_of_data dn; 59 bool need_alloc = true; 60 int err = 0; 61 62 if (unlikely(IS_IMMUTABLE(inode))) 63 return VM_FAULT_SIGBUS; 64 65 if (unlikely(f2fs_cp_error(sbi))) { 66 err = -EIO; 67 goto err; 68 } 69 70 if (!f2fs_is_checkpoint_ready(sbi)) { 71 err = -ENOSPC; 72 goto err; 73 } 74 75 err = f2fs_convert_inline_inode(inode); 76 if (err) 77 goto err; 78 79 #ifdef CONFIG_F2FS_FS_COMPRESSION 80 if (f2fs_compressed_file(inode)) { 81 int ret = f2fs_is_compressed_cluster(inode, page->index); 82 83 if (ret < 0) { 84 err = ret; 85 goto err; 86 } else if (ret) { 87 if (ret < F2FS_I(inode)->i_cluster_size) { 88 err = -EAGAIN; 89 goto err; 90 } 91 need_alloc = false; 92 } 93 } 94 #endif 95 /* should do out of any locked page */ 96 if (need_alloc) 97 f2fs_balance_fs(sbi, true); 98 99 sb_start_pagefault(inode->i_sb); 100 101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); 102 103 file_update_time(vmf->vma->vm_file); 104 down_read(&F2FS_I(inode)->i_mmap_sem); 105 lock_page(page); 106 if (unlikely(page->mapping != inode->i_mapping || 107 page_offset(page) > i_size_read(inode) || 108 !PageUptodate(page))) { 109 unlock_page(page); 110 err = -EFAULT; 111 goto out_sem; 112 } 113 114 if (need_alloc) { 115 /* block allocation */ 116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); 117 set_new_dnode(&dn, inode, NULL, NULL, 0); 118 err = f2fs_get_block(&dn, page->index); 119 f2fs_put_dnode(&dn); 120 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); 121 } 122 123 #ifdef CONFIG_F2FS_FS_COMPRESSION 124 if (!need_alloc) { 125 set_new_dnode(&dn, inode, NULL, NULL, 0); 126 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 127 f2fs_put_dnode(&dn); 128 } 129 #endif 130 if (err) { 131 unlock_page(page); 132 goto out_sem; 133 } 134 135 f2fs_wait_on_page_writeback(page, DATA, false, true); 136 137 /* wait for GCed page writeback via META_MAPPING */ 138 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); 139 140 /* 141 * check to see if the page is mapped already (no holes) 142 */ 143 if (PageMappedToDisk(page)) 144 goto out_sem; 145 146 /* page is wholly or partially inside EOF */ 147 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > 148 i_size_read(inode)) { 149 loff_t offset; 150 151 offset = i_size_read(inode) & ~PAGE_MASK; 152 zero_user_segment(page, offset, PAGE_SIZE); 153 } 154 set_page_dirty(page); 155 if (!PageUptodate(page)) 156 SetPageUptodate(page); 157 158 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); 159 f2fs_update_time(sbi, REQ_TIME); 160 161 trace_f2fs_vm_page_mkwrite(page, DATA); 162 out_sem: 163 up_read(&F2FS_I(inode)->i_mmap_sem); 164 165 sb_end_pagefault(inode->i_sb); 166 err: 167 return block_page_mkwrite_return(err); 168 } 169 170 static const struct vm_operations_struct f2fs_file_vm_ops = { 171 .fault = f2fs_filemap_fault, 172 .map_pages = filemap_map_pages, 173 .page_mkwrite = f2fs_vm_page_mkwrite, 174 }; 175 176 static int get_parent_ino(struct inode *inode, nid_t *pino) 177 { 178 struct dentry *dentry; 179 180 /* 181 * Make sure to get the non-deleted alias. The alias associated with 182 * the open file descriptor being fsync()'ed may be deleted already. 183 */ 184 dentry = d_find_alias(inode); 185 if (!dentry) 186 return 0; 187 188 *pino = parent_ino(dentry); 189 dput(dentry); 190 return 1; 191 } 192 193 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) 194 { 195 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 196 enum cp_reason_type cp_reason = CP_NO_NEEDED; 197 198 if (!S_ISREG(inode->i_mode)) 199 cp_reason = CP_NON_REGULAR; 200 else if (f2fs_compressed_file(inode)) 201 cp_reason = CP_COMPRESSED; 202 else if (inode->i_nlink != 1) 203 cp_reason = CP_HARDLINK; 204 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) 205 cp_reason = CP_SB_NEED_CP; 206 else if (file_wrong_pino(inode)) 207 cp_reason = CP_WRONG_PINO; 208 else if (!f2fs_space_for_roll_forward(sbi)) 209 cp_reason = CP_NO_SPC_ROLL; 210 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) 211 cp_reason = CP_NODE_NEED_CP; 212 else if (test_opt(sbi, FASTBOOT)) 213 cp_reason = CP_FASTBOOT_MODE; 214 else if (F2FS_OPTION(sbi).active_logs == 2) 215 cp_reason = CP_SPEC_LOG_NUM; 216 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && 217 f2fs_need_dentry_mark(sbi, inode->i_ino) && 218 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, 219 TRANS_DIR_INO)) 220 cp_reason = CP_RECOVER_DIR; 221 222 return cp_reason; 223 } 224 225 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) 226 { 227 struct page *i = find_get_page(NODE_MAPPING(sbi), ino); 228 bool ret = false; 229 /* But we need to avoid that there are some inode updates */ 230 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) 231 ret = true; 232 f2fs_put_page(i, 0); 233 return ret; 234 } 235 236 static void try_to_fix_pino(struct inode *inode) 237 { 238 struct f2fs_inode_info *fi = F2FS_I(inode); 239 nid_t pino; 240 241 down_write(&fi->i_sem); 242 if (file_wrong_pino(inode) && inode->i_nlink == 1 && 243 get_parent_ino(inode, &pino)) { 244 f2fs_i_pino_write(inode, pino); 245 file_got_pino(inode); 246 } 247 up_write(&fi->i_sem); 248 } 249 250 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, 251 int datasync, bool atomic) 252 { 253 struct inode *inode = file->f_mapping->host; 254 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 255 nid_t ino = inode->i_ino; 256 int ret = 0; 257 enum cp_reason_type cp_reason = 0; 258 struct writeback_control wbc = { 259 .sync_mode = WB_SYNC_ALL, 260 .nr_to_write = LONG_MAX, 261 .for_reclaim = 0, 262 }; 263 unsigned int seq_id = 0; 264 265 if (unlikely(f2fs_readonly(inode->i_sb) || 266 is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 267 return 0; 268 269 trace_f2fs_sync_file_enter(inode); 270 271 if (S_ISDIR(inode->i_mode)) 272 goto go_write; 273 274 /* if fdatasync is triggered, let's do in-place-update */ 275 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) 276 set_inode_flag(inode, FI_NEED_IPU); 277 ret = file_write_and_wait_range(file, start, end); 278 clear_inode_flag(inode, FI_NEED_IPU); 279 280 if (ret) { 281 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 282 return ret; 283 } 284 285 /* if the inode is dirty, let's recover all the time */ 286 if (!f2fs_skip_inode_update(inode, datasync)) { 287 f2fs_write_inode(inode, NULL); 288 goto go_write; 289 } 290 291 /* 292 * if there is no written data, don't waste time to write recovery info. 293 */ 294 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && 295 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) { 296 297 /* it may call write_inode just prior to fsync */ 298 if (need_inode_page_update(sbi, ino)) 299 goto go_write; 300 301 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || 302 f2fs_exist_written_data(sbi, ino, UPDATE_INO)) 303 goto flush_out; 304 goto out; 305 } 306 go_write: 307 /* 308 * Both of fdatasync() and fsync() are able to be recovered from 309 * sudden-power-off. 310 */ 311 down_read(&F2FS_I(inode)->i_sem); 312 cp_reason = need_do_checkpoint(inode); 313 up_read(&F2FS_I(inode)->i_sem); 314 315 if (cp_reason) { 316 /* all the dirty node pages should be flushed for POR */ 317 ret = f2fs_sync_fs(inode->i_sb, 1); 318 319 /* 320 * We've secured consistency through sync_fs. Following pino 321 * will be used only for fsynced inodes after checkpoint. 322 */ 323 try_to_fix_pino(inode); 324 clear_inode_flag(inode, FI_APPEND_WRITE); 325 clear_inode_flag(inode, FI_UPDATE_WRITE); 326 goto out; 327 } 328 sync_nodes: 329 atomic_inc(&sbi->wb_sync_req[NODE]); 330 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id); 331 atomic_dec(&sbi->wb_sync_req[NODE]); 332 if (ret) 333 goto out; 334 335 /* if cp_error was enabled, we should avoid infinite loop */ 336 if (unlikely(f2fs_cp_error(sbi))) { 337 ret = -EIO; 338 goto out; 339 } 340 341 if (f2fs_need_inode_block_update(sbi, ino)) { 342 f2fs_mark_inode_dirty_sync(inode, true); 343 f2fs_write_inode(inode, NULL); 344 goto sync_nodes; 345 } 346 347 /* 348 * If it's atomic_write, it's just fine to keep write ordering. So 349 * here we don't need to wait for node write completion, since we use 350 * node chain which serializes node blocks. If one of node writes are 351 * reordered, we can see simply broken chain, resulting in stopping 352 * roll-forward recovery. It means we'll recover all or none node blocks 353 * given fsync mark. 354 */ 355 if (!atomic) { 356 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id); 357 if (ret) 358 goto out; 359 } 360 361 /* once recovery info is written, don't need to tack this */ 362 f2fs_remove_ino_entry(sbi, ino, APPEND_INO); 363 clear_inode_flag(inode, FI_APPEND_WRITE); 364 flush_out: 365 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) 366 ret = f2fs_issue_flush(sbi, inode->i_ino); 367 if (!ret) { 368 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO); 369 clear_inode_flag(inode, FI_UPDATE_WRITE); 370 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO); 371 } 372 f2fs_update_time(sbi, REQ_TIME); 373 out: 374 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); 375 return ret; 376 } 377 378 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 379 { 380 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) 381 return -EIO; 382 return f2fs_do_sync_file(file, start, end, datasync, false); 383 } 384 385 static bool __found_offset(struct address_space *mapping, block_t blkaddr, 386 pgoff_t index, int whence) 387 { 388 switch (whence) { 389 case SEEK_DATA: 390 if (__is_valid_data_blkaddr(blkaddr)) 391 return true; 392 if (blkaddr == NEW_ADDR && 393 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY)) 394 return true; 395 break; 396 case SEEK_HOLE: 397 if (blkaddr == NULL_ADDR) 398 return true; 399 break; 400 } 401 return false; 402 } 403 404 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) 405 { 406 struct inode *inode = file->f_mapping->host; 407 loff_t maxbytes = inode->i_sb->s_maxbytes; 408 struct dnode_of_data dn; 409 pgoff_t pgofs, end_offset; 410 loff_t data_ofs = offset; 411 loff_t isize; 412 int err = 0; 413 414 inode_lock(inode); 415 416 isize = i_size_read(inode); 417 if (offset >= isize) 418 goto fail; 419 420 /* handle inline data case */ 421 if (f2fs_has_inline_data(inode)) { 422 if (whence == SEEK_HOLE) { 423 data_ofs = isize; 424 goto found; 425 } else if (whence == SEEK_DATA) { 426 data_ofs = offset; 427 goto found; 428 } 429 } 430 431 pgofs = (pgoff_t)(offset >> PAGE_SHIFT); 432 433 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 434 set_new_dnode(&dn, inode, NULL, NULL, 0); 435 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); 436 if (err && err != -ENOENT) { 437 goto fail; 438 } else if (err == -ENOENT) { 439 /* direct node does not exists */ 440 if (whence == SEEK_DATA) { 441 pgofs = f2fs_get_next_page_offset(&dn, pgofs); 442 continue; 443 } else { 444 goto found; 445 } 446 } 447 448 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 449 450 /* find data/hole in dnode block */ 451 for (; dn.ofs_in_node < end_offset; 452 dn.ofs_in_node++, pgofs++, 453 data_ofs = (loff_t)pgofs << PAGE_SHIFT) { 454 block_t blkaddr; 455 456 blkaddr = f2fs_data_blkaddr(&dn); 457 458 if (__is_valid_data_blkaddr(blkaddr) && 459 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), 460 blkaddr, DATA_GENERIC_ENHANCE)) { 461 f2fs_put_dnode(&dn); 462 goto fail; 463 } 464 465 if (__found_offset(file->f_mapping, blkaddr, 466 pgofs, whence)) { 467 f2fs_put_dnode(&dn); 468 goto found; 469 } 470 } 471 f2fs_put_dnode(&dn); 472 } 473 474 if (whence == SEEK_DATA) 475 goto fail; 476 found: 477 if (whence == SEEK_HOLE && data_ofs > isize) 478 data_ofs = isize; 479 inode_unlock(inode); 480 return vfs_setpos(file, data_ofs, maxbytes); 481 fail: 482 inode_unlock(inode); 483 return -ENXIO; 484 } 485 486 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) 487 { 488 struct inode *inode = file->f_mapping->host; 489 loff_t maxbytes = inode->i_sb->s_maxbytes; 490 491 if (f2fs_compressed_file(inode)) 492 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS; 493 494 switch (whence) { 495 case SEEK_SET: 496 case SEEK_CUR: 497 case SEEK_END: 498 return generic_file_llseek_size(file, offset, whence, 499 maxbytes, i_size_read(inode)); 500 case SEEK_DATA: 501 case SEEK_HOLE: 502 if (offset < 0) 503 return -ENXIO; 504 return f2fs_seek_block(file, offset, whence); 505 } 506 507 return -EINVAL; 508 } 509 510 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) 511 { 512 struct inode *inode = file_inode(file); 513 514 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 515 return -EIO; 516 517 if (!f2fs_is_compress_backend_ready(inode)) 518 return -EOPNOTSUPP; 519 520 file_accessed(file); 521 vma->vm_ops = &f2fs_file_vm_ops; 522 set_inode_flag(inode, FI_MMAP_FILE); 523 return 0; 524 } 525 526 static int f2fs_file_open(struct inode *inode, struct file *filp) 527 { 528 int err = fscrypt_file_open(inode, filp); 529 530 if (err) 531 return err; 532 533 if (!f2fs_is_compress_backend_ready(inode)) 534 return -EOPNOTSUPP; 535 536 err = fsverity_file_open(inode, filp); 537 if (err) 538 return err; 539 540 filp->f_mode |= FMODE_NOWAIT; 541 542 return dquot_file_open(inode, filp); 543 } 544 545 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) 546 { 547 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 548 struct f2fs_node *raw_node; 549 int nr_free = 0, ofs = dn->ofs_in_node, len = count; 550 __le32 *addr; 551 int base = 0; 552 bool compressed_cluster = false; 553 int cluster_index = 0, valid_blocks = 0; 554 int cluster_size = F2FS_I(dn->inode)->i_cluster_size; 555 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks); 556 557 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) 558 base = get_extra_isize(dn->inode); 559 560 raw_node = F2FS_NODE(dn->node_page); 561 addr = blkaddr_in_node(raw_node) + base + ofs; 562 563 /* Assumption: truncateion starts with cluster */ 564 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) { 565 block_t blkaddr = le32_to_cpu(*addr); 566 567 if (f2fs_compressed_file(dn->inode) && 568 !(cluster_index & (cluster_size - 1))) { 569 if (compressed_cluster) 570 f2fs_i_compr_blocks_update(dn->inode, 571 valid_blocks, false); 572 compressed_cluster = (blkaddr == COMPRESS_ADDR); 573 valid_blocks = 0; 574 } 575 576 if (blkaddr == NULL_ADDR) 577 continue; 578 579 dn->data_blkaddr = NULL_ADDR; 580 f2fs_set_data_blkaddr(dn); 581 582 if (__is_valid_data_blkaddr(blkaddr)) { 583 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 584 DATA_GENERIC_ENHANCE)) 585 continue; 586 if (compressed_cluster) 587 valid_blocks++; 588 } 589 590 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) 591 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); 592 593 f2fs_invalidate_blocks(sbi, blkaddr); 594 595 if (!released || blkaddr != COMPRESS_ADDR) 596 nr_free++; 597 } 598 599 if (compressed_cluster) 600 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false); 601 602 if (nr_free) { 603 pgoff_t fofs; 604 /* 605 * once we invalidate valid blkaddr in range [ofs, ofs + count], 606 * we will invalidate all blkaddr in the whole range. 607 */ 608 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), 609 dn->inode) + ofs; 610 f2fs_update_extent_cache_range(dn, fofs, 0, len); 611 dec_valid_block_count(sbi, dn->inode, nr_free); 612 } 613 dn->ofs_in_node = ofs; 614 615 f2fs_update_time(sbi, REQ_TIME); 616 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, 617 dn->ofs_in_node, nr_free); 618 } 619 620 void f2fs_truncate_data_blocks(struct dnode_of_data *dn) 621 { 622 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); 623 } 624 625 static int truncate_partial_data_page(struct inode *inode, u64 from, 626 bool cache_only) 627 { 628 loff_t offset = from & (PAGE_SIZE - 1); 629 pgoff_t index = from >> PAGE_SHIFT; 630 struct address_space *mapping = inode->i_mapping; 631 struct page *page; 632 633 if (!offset && !cache_only) 634 return 0; 635 636 if (cache_only) { 637 page = find_lock_page(mapping, index); 638 if (page && PageUptodate(page)) 639 goto truncate_out; 640 f2fs_put_page(page, 1); 641 return 0; 642 } 643 644 page = f2fs_get_lock_data_page(inode, index, true); 645 if (IS_ERR(page)) 646 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); 647 truncate_out: 648 f2fs_wait_on_page_writeback(page, DATA, true, true); 649 zero_user(page, offset, PAGE_SIZE - offset); 650 651 /* An encrypted inode should have a key and truncate the last page. */ 652 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode)); 653 if (!cache_only) 654 set_page_dirty(page); 655 f2fs_put_page(page, 1); 656 return 0; 657 } 658 659 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) 660 { 661 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 662 struct dnode_of_data dn; 663 pgoff_t free_from; 664 int count = 0, err = 0; 665 struct page *ipage; 666 bool truncate_page = false; 667 668 trace_f2fs_truncate_blocks_enter(inode, from); 669 670 free_from = (pgoff_t)F2FS_BLK_ALIGN(from); 671 672 if (free_from >= max_file_blocks(inode)) 673 goto free_partial; 674 675 if (lock) 676 f2fs_lock_op(sbi); 677 678 ipage = f2fs_get_node_page(sbi, inode->i_ino); 679 if (IS_ERR(ipage)) { 680 err = PTR_ERR(ipage); 681 goto out; 682 } 683 684 if (f2fs_has_inline_data(inode)) { 685 f2fs_truncate_inline_inode(inode, ipage, from); 686 f2fs_put_page(ipage, 1); 687 truncate_page = true; 688 goto out; 689 } 690 691 set_new_dnode(&dn, inode, ipage, NULL, 0); 692 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); 693 if (err) { 694 if (err == -ENOENT) 695 goto free_next; 696 goto out; 697 } 698 699 count = ADDRS_PER_PAGE(dn.node_page, inode); 700 701 count -= dn.ofs_in_node; 702 f2fs_bug_on(sbi, count < 0); 703 704 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { 705 f2fs_truncate_data_blocks_range(&dn, count); 706 free_from += count; 707 } 708 709 f2fs_put_dnode(&dn); 710 free_next: 711 err = f2fs_truncate_inode_blocks(inode, free_from); 712 out: 713 if (lock) 714 f2fs_unlock_op(sbi); 715 free_partial: 716 /* lastly zero out the first data page */ 717 if (!err) 718 err = truncate_partial_data_page(inode, from, truncate_page); 719 720 trace_f2fs_truncate_blocks_exit(inode, err); 721 return err; 722 } 723 724 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) 725 { 726 u64 free_from = from; 727 int err; 728 729 #ifdef CONFIG_F2FS_FS_COMPRESSION 730 /* 731 * for compressed file, only support cluster size 732 * aligned truncation. 733 */ 734 if (f2fs_compressed_file(inode)) 735 free_from = round_up(from, 736 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); 737 #endif 738 739 err = f2fs_do_truncate_blocks(inode, free_from, lock); 740 if (err) 741 return err; 742 743 #ifdef CONFIG_F2FS_FS_COMPRESSION 744 if (from != free_from) { 745 err = f2fs_truncate_partial_cluster(inode, from, lock); 746 if (err) 747 return err; 748 } 749 #endif 750 751 return 0; 752 } 753 754 int f2fs_truncate(struct inode *inode) 755 { 756 int err; 757 758 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 759 return -EIO; 760 761 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 762 S_ISLNK(inode->i_mode))) 763 return 0; 764 765 trace_f2fs_truncate(inode); 766 767 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { 768 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE); 769 return -EIO; 770 } 771 772 err = dquot_initialize(inode); 773 if (err) 774 return err; 775 776 /* we should check inline_data size */ 777 if (!f2fs_may_inline_data(inode)) { 778 err = f2fs_convert_inline_inode(inode); 779 if (err) 780 return err; 781 } 782 783 err = f2fs_truncate_blocks(inode, i_size_read(inode), true); 784 if (err) 785 return err; 786 787 inode->i_mtime = inode->i_ctime = current_time(inode); 788 f2fs_mark_inode_dirty_sync(inode, false); 789 return 0; 790 } 791 792 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path, 793 struct kstat *stat, u32 request_mask, unsigned int query_flags) 794 { 795 struct inode *inode = d_inode(path->dentry); 796 struct f2fs_inode_info *fi = F2FS_I(inode); 797 struct f2fs_inode *ri; 798 unsigned int flags; 799 800 if (f2fs_has_extra_attr(inode) && 801 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 802 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 803 stat->result_mask |= STATX_BTIME; 804 stat->btime.tv_sec = fi->i_crtime.tv_sec; 805 stat->btime.tv_nsec = fi->i_crtime.tv_nsec; 806 } 807 808 flags = fi->i_flags; 809 if (flags & F2FS_COMPR_FL) 810 stat->attributes |= STATX_ATTR_COMPRESSED; 811 if (flags & F2FS_APPEND_FL) 812 stat->attributes |= STATX_ATTR_APPEND; 813 if (IS_ENCRYPTED(inode)) 814 stat->attributes |= STATX_ATTR_ENCRYPTED; 815 if (flags & F2FS_IMMUTABLE_FL) 816 stat->attributes |= STATX_ATTR_IMMUTABLE; 817 if (flags & F2FS_NODUMP_FL) 818 stat->attributes |= STATX_ATTR_NODUMP; 819 if (IS_VERITY(inode)) 820 stat->attributes |= STATX_ATTR_VERITY; 821 822 stat->attributes_mask |= (STATX_ATTR_COMPRESSED | 823 STATX_ATTR_APPEND | 824 STATX_ATTR_ENCRYPTED | 825 STATX_ATTR_IMMUTABLE | 826 STATX_ATTR_NODUMP | 827 STATX_ATTR_VERITY); 828 829 generic_fillattr(&init_user_ns, inode, stat); 830 831 /* we need to show initial sectors used for inline_data/dentries */ 832 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || 833 f2fs_has_inline_dentry(inode)) 834 stat->blocks += (stat->size + 511) >> 9; 835 836 return 0; 837 } 838 839 #ifdef CONFIG_F2FS_FS_POSIX_ACL 840 static void __setattr_copy(struct user_namespace *mnt_userns, 841 struct inode *inode, const struct iattr *attr) 842 { 843 unsigned int ia_valid = attr->ia_valid; 844 845 if (ia_valid & ATTR_UID) 846 inode->i_uid = attr->ia_uid; 847 if (ia_valid & ATTR_GID) 848 inode->i_gid = attr->ia_gid; 849 if (ia_valid & ATTR_ATIME) 850 inode->i_atime = attr->ia_atime; 851 if (ia_valid & ATTR_MTIME) 852 inode->i_mtime = attr->ia_mtime; 853 if (ia_valid & ATTR_CTIME) 854 inode->i_ctime = attr->ia_ctime; 855 if (ia_valid & ATTR_MODE) { 856 umode_t mode = attr->ia_mode; 857 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode); 858 859 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID)) 860 mode &= ~S_ISGID; 861 set_acl_inode(inode, mode); 862 } 863 } 864 #else 865 #define __setattr_copy setattr_copy 866 #endif 867 868 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 869 struct iattr *attr) 870 { 871 struct inode *inode = d_inode(dentry); 872 int err; 873 874 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 875 return -EIO; 876 877 if (unlikely(IS_IMMUTABLE(inode))) 878 return -EPERM; 879 880 if (unlikely(IS_APPEND(inode) && 881 (attr->ia_valid & (ATTR_MODE | ATTR_UID | 882 ATTR_GID | ATTR_TIMES_SET)))) 883 return -EPERM; 884 885 if ((attr->ia_valid & ATTR_SIZE) && 886 !f2fs_is_compress_backend_ready(inode)) 887 return -EOPNOTSUPP; 888 889 err = setattr_prepare(&init_user_ns, dentry, attr); 890 if (err) 891 return err; 892 893 err = fscrypt_prepare_setattr(dentry, attr); 894 if (err) 895 return err; 896 897 err = fsverity_prepare_setattr(dentry, attr); 898 if (err) 899 return err; 900 901 if (is_quota_modification(inode, attr)) { 902 err = dquot_initialize(inode); 903 if (err) 904 return err; 905 } 906 if ((attr->ia_valid & ATTR_UID && 907 !uid_eq(attr->ia_uid, inode->i_uid)) || 908 (attr->ia_valid & ATTR_GID && 909 !gid_eq(attr->ia_gid, inode->i_gid))) { 910 f2fs_lock_op(F2FS_I_SB(inode)); 911 err = dquot_transfer(inode, attr); 912 if (err) { 913 set_sbi_flag(F2FS_I_SB(inode), 914 SBI_QUOTA_NEED_REPAIR); 915 f2fs_unlock_op(F2FS_I_SB(inode)); 916 return err; 917 } 918 /* 919 * update uid/gid under lock_op(), so that dquot and inode can 920 * be updated atomically. 921 */ 922 if (attr->ia_valid & ATTR_UID) 923 inode->i_uid = attr->ia_uid; 924 if (attr->ia_valid & ATTR_GID) 925 inode->i_gid = attr->ia_gid; 926 f2fs_mark_inode_dirty_sync(inode, true); 927 f2fs_unlock_op(F2FS_I_SB(inode)); 928 } 929 930 if (attr->ia_valid & ATTR_SIZE) { 931 loff_t old_size = i_size_read(inode); 932 933 if (attr->ia_size > MAX_INLINE_DATA(inode)) { 934 /* 935 * should convert inline inode before i_size_write to 936 * keep smaller than inline_data size with inline flag. 937 */ 938 err = f2fs_convert_inline_inode(inode); 939 if (err) 940 return err; 941 } 942 943 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 944 down_write(&F2FS_I(inode)->i_mmap_sem); 945 946 truncate_setsize(inode, attr->ia_size); 947 948 if (attr->ia_size <= old_size) 949 err = f2fs_truncate(inode); 950 /* 951 * do not trim all blocks after i_size if target size is 952 * larger than i_size. 953 */ 954 up_write(&F2FS_I(inode)->i_mmap_sem); 955 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 956 if (err) 957 return err; 958 959 spin_lock(&F2FS_I(inode)->i_size_lock); 960 inode->i_mtime = inode->i_ctime = current_time(inode); 961 F2FS_I(inode)->last_disk_size = i_size_read(inode); 962 spin_unlock(&F2FS_I(inode)->i_size_lock); 963 } 964 965 __setattr_copy(&init_user_ns, inode, attr); 966 967 if (attr->ia_valid & ATTR_MODE) { 968 err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode)); 969 970 if (is_inode_flag_set(inode, FI_ACL_MODE)) { 971 if (!err) 972 inode->i_mode = F2FS_I(inode)->i_acl_mode; 973 clear_inode_flag(inode, FI_ACL_MODE); 974 } 975 } 976 977 /* file size may changed here */ 978 f2fs_mark_inode_dirty_sync(inode, true); 979 980 /* inode change will produce dirty node pages flushed by checkpoint */ 981 f2fs_balance_fs(F2FS_I_SB(inode), true); 982 983 return err; 984 } 985 986 const struct inode_operations f2fs_file_inode_operations = { 987 .getattr = f2fs_getattr, 988 .setattr = f2fs_setattr, 989 .get_acl = f2fs_get_acl, 990 .set_acl = f2fs_set_acl, 991 .listxattr = f2fs_listxattr, 992 .fiemap = f2fs_fiemap, 993 }; 994 995 static int fill_zero(struct inode *inode, pgoff_t index, 996 loff_t start, loff_t len) 997 { 998 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 999 struct page *page; 1000 1001 if (!len) 1002 return 0; 1003 1004 f2fs_balance_fs(sbi, true); 1005 1006 f2fs_lock_op(sbi); 1007 page = f2fs_get_new_data_page(inode, NULL, index, false); 1008 f2fs_unlock_op(sbi); 1009 1010 if (IS_ERR(page)) 1011 return PTR_ERR(page); 1012 1013 f2fs_wait_on_page_writeback(page, DATA, true, true); 1014 zero_user(page, start, len); 1015 set_page_dirty(page); 1016 f2fs_put_page(page, 1); 1017 return 0; 1018 } 1019 1020 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) 1021 { 1022 int err; 1023 1024 while (pg_start < pg_end) { 1025 struct dnode_of_data dn; 1026 pgoff_t end_offset, count; 1027 1028 set_new_dnode(&dn, inode, NULL, NULL, 0); 1029 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); 1030 if (err) { 1031 if (err == -ENOENT) { 1032 pg_start = f2fs_get_next_page_offset(&dn, 1033 pg_start); 1034 continue; 1035 } 1036 return err; 1037 } 1038 1039 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1040 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); 1041 1042 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); 1043 1044 f2fs_truncate_data_blocks_range(&dn, count); 1045 f2fs_put_dnode(&dn); 1046 1047 pg_start += count; 1048 } 1049 return 0; 1050 } 1051 1052 static int punch_hole(struct inode *inode, loff_t offset, loff_t len) 1053 { 1054 pgoff_t pg_start, pg_end; 1055 loff_t off_start, off_end; 1056 int ret; 1057 1058 ret = f2fs_convert_inline_inode(inode); 1059 if (ret) 1060 return ret; 1061 1062 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1063 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1064 1065 off_start = offset & (PAGE_SIZE - 1); 1066 off_end = (offset + len) & (PAGE_SIZE - 1); 1067 1068 if (pg_start == pg_end) { 1069 ret = fill_zero(inode, pg_start, off_start, 1070 off_end - off_start); 1071 if (ret) 1072 return ret; 1073 } else { 1074 if (off_start) { 1075 ret = fill_zero(inode, pg_start++, off_start, 1076 PAGE_SIZE - off_start); 1077 if (ret) 1078 return ret; 1079 } 1080 if (off_end) { 1081 ret = fill_zero(inode, pg_end, 0, off_end); 1082 if (ret) 1083 return ret; 1084 } 1085 1086 if (pg_start < pg_end) { 1087 struct address_space *mapping = inode->i_mapping; 1088 loff_t blk_start, blk_end; 1089 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1090 1091 f2fs_balance_fs(sbi, true); 1092 1093 blk_start = (loff_t)pg_start << PAGE_SHIFT; 1094 blk_end = (loff_t)pg_end << PAGE_SHIFT; 1095 1096 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1097 down_write(&F2FS_I(inode)->i_mmap_sem); 1098 1099 truncate_inode_pages_range(mapping, blk_start, 1100 blk_end - 1); 1101 1102 f2fs_lock_op(sbi); 1103 ret = f2fs_truncate_hole(inode, pg_start, pg_end); 1104 f2fs_unlock_op(sbi); 1105 1106 up_write(&F2FS_I(inode)->i_mmap_sem); 1107 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1108 } 1109 } 1110 1111 return ret; 1112 } 1113 1114 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, 1115 int *do_replace, pgoff_t off, pgoff_t len) 1116 { 1117 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1118 struct dnode_of_data dn; 1119 int ret, done, i; 1120 1121 next_dnode: 1122 set_new_dnode(&dn, inode, NULL, NULL, 0); 1123 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); 1124 if (ret && ret != -ENOENT) { 1125 return ret; 1126 } else if (ret == -ENOENT) { 1127 if (dn.max_level == 0) 1128 return -ENOENT; 1129 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - 1130 dn.ofs_in_node, len); 1131 blkaddr += done; 1132 do_replace += done; 1133 goto next; 1134 } 1135 1136 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - 1137 dn.ofs_in_node, len); 1138 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { 1139 *blkaddr = f2fs_data_blkaddr(&dn); 1140 1141 if (__is_valid_data_blkaddr(*blkaddr) && 1142 !f2fs_is_valid_blkaddr(sbi, *blkaddr, 1143 DATA_GENERIC_ENHANCE)) { 1144 f2fs_put_dnode(&dn); 1145 return -EFSCORRUPTED; 1146 } 1147 1148 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { 1149 1150 if (f2fs_lfs_mode(sbi)) { 1151 f2fs_put_dnode(&dn); 1152 return -EOPNOTSUPP; 1153 } 1154 1155 /* do not invalidate this block address */ 1156 f2fs_update_data_blkaddr(&dn, NULL_ADDR); 1157 *do_replace = 1; 1158 } 1159 } 1160 f2fs_put_dnode(&dn); 1161 next: 1162 len -= done; 1163 off += done; 1164 if (len) 1165 goto next_dnode; 1166 return 0; 1167 } 1168 1169 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, 1170 int *do_replace, pgoff_t off, int len) 1171 { 1172 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1173 struct dnode_of_data dn; 1174 int ret, i; 1175 1176 for (i = 0; i < len; i++, do_replace++, blkaddr++) { 1177 if (*do_replace == 0) 1178 continue; 1179 1180 set_new_dnode(&dn, inode, NULL, NULL, 0); 1181 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); 1182 if (ret) { 1183 dec_valid_block_count(sbi, inode, 1); 1184 f2fs_invalidate_blocks(sbi, *blkaddr); 1185 } else { 1186 f2fs_update_data_blkaddr(&dn, *blkaddr); 1187 } 1188 f2fs_put_dnode(&dn); 1189 } 1190 return 0; 1191 } 1192 1193 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, 1194 block_t *blkaddr, int *do_replace, 1195 pgoff_t src, pgoff_t dst, pgoff_t len, bool full) 1196 { 1197 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); 1198 pgoff_t i = 0; 1199 int ret; 1200 1201 while (i < len) { 1202 if (blkaddr[i] == NULL_ADDR && !full) { 1203 i++; 1204 continue; 1205 } 1206 1207 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { 1208 struct dnode_of_data dn; 1209 struct node_info ni; 1210 size_t new_size; 1211 pgoff_t ilen; 1212 1213 set_new_dnode(&dn, dst_inode, NULL, NULL, 0); 1214 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE); 1215 if (ret) 1216 return ret; 1217 1218 ret = f2fs_get_node_info(sbi, dn.nid, &ni); 1219 if (ret) { 1220 f2fs_put_dnode(&dn); 1221 return ret; 1222 } 1223 1224 ilen = min((pgoff_t) 1225 ADDRS_PER_PAGE(dn.node_page, dst_inode) - 1226 dn.ofs_in_node, len - i); 1227 do { 1228 dn.data_blkaddr = f2fs_data_blkaddr(&dn); 1229 f2fs_truncate_data_blocks_range(&dn, 1); 1230 1231 if (do_replace[i]) { 1232 f2fs_i_blocks_write(src_inode, 1233 1, false, false); 1234 f2fs_i_blocks_write(dst_inode, 1235 1, true, false); 1236 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, 1237 blkaddr[i], ni.version, true, false); 1238 1239 do_replace[i] = 0; 1240 } 1241 dn.ofs_in_node++; 1242 i++; 1243 new_size = (loff_t)(dst + i) << PAGE_SHIFT; 1244 if (dst_inode->i_size < new_size) 1245 f2fs_i_size_write(dst_inode, new_size); 1246 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); 1247 1248 f2fs_put_dnode(&dn); 1249 } else { 1250 struct page *psrc, *pdst; 1251 1252 psrc = f2fs_get_lock_data_page(src_inode, 1253 src + i, true); 1254 if (IS_ERR(psrc)) 1255 return PTR_ERR(psrc); 1256 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i, 1257 true); 1258 if (IS_ERR(pdst)) { 1259 f2fs_put_page(psrc, 1); 1260 return PTR_ERR(pdst); 1261 } 1262 f2fs_copy_page(psrc, pdst); 1263 set_page_dirty(pdst); 1264 f2fs_put_page(pdst, 1); 1265 f2fs_put_page(psrc, 1); 1266 1267 ret = f2fs_truncate_hole(src_inode, 1268 src + i, src + i + 1); 1269 if (ret) 1270 return ret; 1271 i++; 1272 } 1273 } 1274 return 0; 1275 } 1276 1277 static int __exchange_data_block(struct inode *src_inode, 1278 struct inode *dst_inode, pgoff_t src, pgoff_t dst, 1279 pgoff_t len, bool full) 1280 { 1281 block_t *src_blkaddr; 1282 int *do_replace; 1283 pgoff_t olen; 1284 int ret; 1285 1286 while (len) { 1287 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len); 1288 1289 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1290 array_size(olen, sizeof(block_t)), 1291 GFP_NOFS); 1292 if (!src_blkaddr) 1293 return -ENOMEM; 1294 1295 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode), 1296 array_size(olen, sizeof(int)), 1297 GFP_NOFS); 1298 if (!do_replace) { 1299 kvfree(src_blkaddr); 1300 return -ENOMEM; 1301 } 1302 1303 ret = __read_out_blkaddrs(src_inode, src_blkaddr, 1304 do_replace, src, olen); 1305 if (ret) 1306 goto roll_back; 1307 1308 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, 1309 do_replace, src, dst, olen, full); 1310 if (ret) 1311 goto roll_back; 1312 1313 src += olen; 1314 dst += olen; 1315 len -= olen; 1316 1317 kvfree(src_blkaddr); 1318 kvfree(do_replace); 1319 } 1320 return 0; 1321 1322 roll_back: 1323 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); 1324 kvfree(src_blkaddr); 1325 kvfree(do_replace); 1326 return ret; 1327 } 1328 1329 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) 1330 { 1331 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1332 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1333 pgoff_t start = offset >> PAGE_SHIFT; 1334 pgoff_t end = (offset + len) >> PAGE_SHIFT; 1335 int ret; 1336 1337 f2fs_balance_fs(sbi, true); 1338 1339 /* avoid gc operation during block exchange */ 1340 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1341 down_write(&F2FS_I(inode)->i_mmap_sem); 1342 1343 f2fs_lock_op(sbi); 1344 f2fs_drop_extent_tree(inode); 1345 truncate_pagecache(inode, offset); 1346 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); 1347 f2fs_unlock_op(sbi); 1348 1349 up_write(&F2FS_I(inode)->i_mmap_sem); 1350 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1351 return ret; 1352 } 1353 1354 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) 1355 { 1356 loff_t new_size; 1357 int ret; 1358 1359 if (offset + len >= i_size_read(inode)) 1360 return -EINVAL; 1361 1362 /* collapse range should be aligned to block size of f2fs. */ 1363 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1364 return -EINVAL; 1365 1366 ret = f2fs_convert_inline_inode(inode); 1367 if (ret) 1368 return ret; 1369 1370 /* write out all dirty pages from offset */ 1371 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1372 if (ret) 1373 return ret; 1374 1375 ret = f2fs_do_collapse(inode, offset, len); 1376 if (ret) 1377 return ret; 1378 1379 /* write out all moved pages, if possible */ 1380 down_write(&F2FS_I(inode)->i_mmap_sem); 1381 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1382 truncate_pagecache(inode, offset); 1383 1384 new_size = i_size_read(inode) - len; 1385 ret = f2fs_truncate_blocks(inode, new_size, true); 1386 up_write(&F2FS_I(inode)->i_mmap_sem); 1387 if (!ret) 1388 f2fs_i_size_write(inode, new_size); 1389 return ret; 1390 } 1391 1392 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, 1393 pgoff_t end) 1394 { 1395 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1396 pgoff_t index = start; 1397 unsigned int ofs_in_node = dn->ofs_in_node; 1398 blkcnt_t count = 0; 1399 int ret; 1400 1401 for (; index < end; index++, dn->ofs_in_node++) { 1402 if (f2fs_data_blkaddr(dn) == NULL_ADDR) 1403 count++; 1404 } 1405 1406 dn->ofs_in_node = ofs_in_node; 1407 ret = f2fs_reserve_new_blocks(dn, count); 1408 if (ret) 1409 return ret; 1410 1411 dn->ofs_in_node = ofs_in_node; 1412 for (index = start; index < end; index++, dn->ofs_in_node++) { 1413 dn->data_blkaddr = f2fs_data_blkaddr(dn); 1414 /* 1415 * f2fs_reserve_new_blocks will not guarantee entire block 1416 * allocation. 1417 */ 1418 if (dn->data_blkaddr == NULL_ADDR) { 1419 ret = -ENOSPC; 1420 break; 1421 } 1422 if (dn->data_blkaddr != NEW_ADDR) { 1423 f2fs_invalidate_blocks(sbi, dn->data_blkaddr); 1424 dn->data_blkaddr = NEW_ADDR; 1425 f2fs_set_data_blkaddr(dn); 1426 } 1427 } 1428 1429 f2fs_update_extent_cache_range(dn, start, 0, index - start); 1430 1431 return ret; 1432 } 1433 1434 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, 1435 int mode) 1436 { 1437 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1438 struct address_space *mapping = inode->i_mapping; 1439 pgoff_t index, pg_start, pg_end; 1440 loff_t new_size = i_size_read(inode); 1441 loff_t off_start, off_end; 1442 int ret = 0; 1443 1444 ret = inode_newsize_ok(inode, (len + offset)); 1445 if (ret) 1446 return ret; 1447 1448 ret = f2fs_convert_inline_inode(inode); 1449 if (ret) 1450 return ret; 1451 1452 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); 1453 if (ret) 1454 return ret; 1455 1456 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; 1457 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; 1458 1459 off_start = offset & (PAGE_SIZE - 1); 1460 off_end = (offset + len) & (PAGE_SIZE - 1); 1461 1462 if (pg_start == pg_end) { 1463 ret = fill_zero(inode, pg_start, off_start, 1464 off_end - off_start); 1465 if (ret) 1466 return ret; 1467 1468 new_size = max_t(loff_t, new_size, offset + len); 1469 } else { 1470 if (off_start) { 1471 ret = fill_zero(inode, pg_start++, off_start, 1472 PAGE_SIZE - off_start); 1473 if (ret) 1474 return ret; 1475 1476 new_size = max_t(loff_t, new_size, 1477 (loff_t)pg_start << PAGE_SHIFT); 1478 } 1479 1480 for (index = pg_start; index < pg_end;) { 1481 struct dnode_of_data dn; 1482 unsigned int end_offset; 1483 pgoff_t end; 1484 1485 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1486 down_write(&F2FS_I(inode)->i_mmap_sem); 1487 1488 truncate_pagecache_range(inode, 1489 (loff_t)index << PAGE_SHIFT, 1490 ((loff_t)pg_end << PAGE_SHIFT) - 1); 1491 1492 f2fs_lock_op(sbi); 1493 1494 set_new_dnode(&dn, inode, NULL, NULL, 0); 1495 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); 1496 if (ret) { 1497 f2fs_unlock_op(sbi); 1498 up_write(&F2FS_I(inode)->i_mmap_sem); 1499 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1500 goto out; 1501 } 1502 1503 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 1504 end = min(pg_end, end_offset - dn.ofs_in_node + index); 1505 1506 ret = f2fs_do_zero_range(&dn, index, end); 1507 f2fs_put_dnode(&dn); 1508 1509 f2fs_unlock_op(sbi); 1510 up_write(&F2FS_I(inode)->i_mmap_sem); 1511 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1512 1513 f2fs_balance_fs(sbi, dn.node_changed); 1514 1515 if (ret) 1516 goto out; 1517 1518 index = end; 1519 new_size = max_t(loff_t, new_size, 1520 (loff_t)index << PAGE_SHIFT); 1521 } 1522 1523 if (off_end) { 1524 ret = fill_zero(inode, pg_end, 0, off_end); 1525 if (ret) 1526 goto out; 1527 1528 new_size = max_t(loff_t, new_size, offset + len); 1529 } 1530 } 1531 1532 out: 1533 if (new_size > i_size_read(inode)) { 1534 if (mode & FALLOC_FL_KEEP_SIZE) 1535 file_set_keep_isize(inode); 1536 else 1537 f2fs_i_size_write(inode, new_size); 1538 } 1539 return ret; 1540 } 1541 1542 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) 1543 { 1544 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1545 pgoff_t nr, pg_start, pg_end, delta, idx; 1546 loff_t new_size; 1547 int ret = 0; 1548 1549 new_size = i_size_read(inode) + len; 1550 ret = inode_newsize_ok(inode, new_size); 1551 if (ret) 1552 return ret; 1553 1554 if (offset >= i_size_read(inode)) 1555 return -EINVAL; 1556 1557 /* insert range should be aligned to block size of f2fs. */ 1558 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) 1559 return -EINVAL; 1560 1561 ret = f2fs_convert_inline_inode(inode); 1562 if (ret) 1563 return ret; 1564 1565 f2fs_balance_fs(sbi, true); 1566 1567 down_write(&F2FS_I(inode)->i_mmap_sem); 1568 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); 1569 up_write(&F2FS_I(inode)->i_mmap_sem); 1570 if (ret) 1571 return ret; 1572 1573 /* write out all dirty pages from offset */ 1574 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1575 if (ret) 1576 return ret; 1577 1578 pg_start = offset >> PAGE_SHIFT; 1579 pg_end = (offset + len) >> PAGE_SHIFT; 1580 delta = pg_end - pg_start; 1581 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 1582 1583 /* avoid gc operation during block exchange */ 1584 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1585 down_write(&F2FS_I(inode)->i_mmap_sem); 1586 truncate_pagecache(inode, offset); 1587 1588 while (!ret && idx > pg_start) { 1589 nr = idx - pg_start; 1590 if (nr > delta) 1591 nr = delta; 1592 idx -= nr; 1593 1594 f2fs_lock_op(sbi); 1595 f2fs_drop_extent_tree(inode); 1596 1597 ret = __exchange_data_block(inode, inode, idx, 1598 idx + delta, nr, false); 1599 f2fs_unlock_op(sbi); 1600 } 1601 up_write(&F2FS_I(inode)->i_mmap_sem); 1602 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1603 1604 /* write out all moved pages, if possible */ 1605 down_write(&F2FS_I(inode)->i_mmap_sem); 1606 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); 1607 truncate_pagecache(inode, offset); 1608 up_write(&F2FS_I(inode)->i_mmap_sem); 1609 1610 if (!ret) 1611 f2fs_i_size_write(inode, new_size); 1612 return ret; 1613 } 1614 1615 static int expand_inode_data(struct inode *inode, loff_t offset, 1616 loff_t len, int mode) 1617 { 1618 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1619 struct f2fs_map_blocks map = { .m_next_pgofs = NULL, 1620 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, 1621 .m_may_create = true }; 1622 pgoff_t pg_end; 1623 loff_t new_size = i_size_read(inode); 1624 loff_t off_end; 1625 int err; 1626 1627 err = inode_newsize_ok(inode, (len + offset)); 1628 if (err) 1629 return err; 1630 1631 err = f2fs_convert_inline_inode(inode); 1632 if (err) 1633 return err; 1634 1635 f2fs_balance_fs(sbi, true); 1636 1637 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; 1638 off_end = (offset + len) & (PAGE_SIZE - 1); 1639 1640 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; 1641 map.m_len = pg_end - map.m_lblk; 1642 if (off_end) 1643 map.m_len++; 1644 1645 if (!map.m_len) 1646 return 0; 1647 1648 if (f2fs_is_pinned_file(inode)) { 1649 block_t len = (map.m_len >> sbi->log_blocks_per_seg) << 1650 sbi->log_blocks_per_seg; 1651 block_t done = 0; 1652 1653 if (map.m_len % sbi->blocks_per_seg) 1654 len += sbi->blocks_per_seg; 1655 1656 map.m_len = sbi->blocks_per_seg; 1657 next_alloc: 1658 if (has_not_enough_free_secs(sbi, 0, 1659 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { 1660 down_write(&sbi->gc_lock); 1661 err = f2fs_gc(sbi, true, false, NULL_SEGNO); 1662 if (err && err != -ENODATA && err != -EAGAIN) 1663 goto out_err; 1664 } 1665 1666 down_write(&sbi->pin_sem); 1667 1668 f2fs_lock_op(sbi); 1669 f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED); 1670 f2fs_unlock_op(sbi); 1671 1672 map.m_seg_type = CURSEG_COLD_DATA_PINNED; 1673 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); 1674 1675 up_write(&sbi->pin_sem); 1676 1677 done += map.m_len; 1678 len -= map.m_len; 1679 map.m_lblk += map.m_len; 1680 if (!err && len) 1681 goto next_alloc; 1682 1683 map.m_len = done; 1684 } else { 1685 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); 1686 } 1687 out_err: 1688 if (err) { 1689 pgoff_t last_off; 1690 1691 if (!map.m_len) 1692 return err; 1693 1694 last_off = map.m_lblk + map.m_len - 1; 1695 1696 /* update new size to the failed position */ 1697 new_size = (last_off == pg_end) ? offset + len : 1698 (loff_t)(last_off + 1) << PAGE_SHIFT; 1699 } else { 1700 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; 1701 } 1702 1703 if (new_size > i_size_read(inode)) { 1704 if (mode & FALLOC_FL_KEEP_SIZE) 1705 file_set_keep_isize(inode); 1706 else 1707 f2fs_i_size_write(inode, new_size); 1708 } 1709 1710 return err; 1711 } 1712 1713 static long f2fs_fallocate(struct file *file, int mode, 1714 loff_t offset, loff_t len) 1715 { 1716 struct inode *inode = file_inode(file); 1717 long ret = 0; 1718 1719 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 1720 return -EIO; 1721 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) 1722 return -ENOSPC; 1723 if (!f2fs_is_compress_backend_ready(inode)) 1724 return -EOPNOTSUPP; 1725 1726 /* f2fs only support ->fallocate for regular file */ 1727 if (!S_ISREG(inode->i_mode)) 1728 return -EINVAL; 1729 1730 if (IS_ENCRYPTED(inode) && 1731 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 1732 return -EOPNOTSUPP; 1733 1734 if (f2fs_compressed_file(inode) && 1735 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | 1736 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) 1737 return -EOPNOTSUPP; 1738 1739 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 1740 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 1741 FALLOC_FL_INSERT_RANGE)) 1742 return -EOPNOTSUPP; 1743 1744 inode_lock(inode); 1745 1746 if (mode & FALLOC_FL_PUNCH_HOLE) { 1747 if (offset >= inode->i_size) 1748 goto out; 1749 1750 ret = punch_hole(inode, offset, len); 1751 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 1752 ret = f2fs_collapse_range(inode, offset, len); 1753 } else if (mode & FALLOC_FL_ZERO_RANGE) { 1754 ret = f2fs_zero_range(inode, offset, len, mode); 1755 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1756 ret = f2fs_insert_range(inode, offset, len); 1757 } else { 1758 ret = expand_inode_data(inode, offset, len, mode); 1759 } 1760 1761 if (!ret) { 1762 inode->i_mtime = inode->i_ctime = current_time(inode); 1763 f2fs_mark_inode_dirty_sync(inode, false); 1764 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 1765 } 1766 1767 out: 1768 inode_unlock(inode); 1769 1770 trace_f2fs_fallocate(inode, mode, offset, len, ret); 1771 return ret; 1772 } 1773 1774 static int f2fs_release_file(struct inode *inode, struct file *filp) 1775 { 1776 /* 1777 * f2fs_relase_file is called at every close calls. So we should 1778 * not drop any inmemory pages by close called by other process. 1779 */ 1780 if (!(filp->f_mode & FMODE_WRITE) || 1781 atomic_read(&inode->i_writecount) != 1) 1782 return 0; 1783 1784 /* some remained atomic pages should discarded */ 1785 if (f2fs_is_atomic_file(inode)) 1786 f2fs_drop_inmem_pages(inode); 1787 if (f2fs_is_volatile_file(inode)) { 1788 set_inode_flag(inode, FI_DROP_CACHE); 1789 filemap_fdatawrite(inode->i_mapping); 1790 clear_inode_flag(inode, FI_DROP_CACHE); 1791 clear_inode_flag(inode, FI_VOLATILE_FILE); 1792 stat_dec_volatile_write(inode); 1793 } 1794 return 0; 1795 } 1796 1797 static int f2fs_file_flush(struct file *file, fl_owner_t id) 1798 { 1799 struct inode *inode = file_inode(file); 1800 1801 /* 1802 * If the process doing a transaction is crashed, we should do 1803 * roll-back. Otherwise, other reader/write can see corrupted database 1804 * until all the writers close its file. Since this should be done 1805 * before dropping file lock, it needs to do in ->flush. 1806 */ 1807 if (f2fs_is_atomic_file(inode) && 1808 F2FS_I(inode)->inmem_task == current) 1809 f2fs_drop_inmem_pages(inode); 1810 return 0; 1811 } 1812 1813 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) 1814 { 1815 struct f2fs_inode_info *fi = F2FS_I(inode); 1816 u32 masked_flags = fi->i_flags & mask; 1817 1818 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask)); 1819 1820 /* Is it quota file? Do not allow user to mess with it */ 1821 if (IS_NOQUOTA(inode)) 1822 return -EPERM; 1823 1824 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) { 1825 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) 1826 return -EOPNOTSUPP; 1827 if (!f2fs_empty_dir(inode)) 1828 return -ENOTEMPTY; 1829 } 1830 1831 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) { 1832 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) 1833 return -EOPNOTSUPP; 1834 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL)) 1835 return -EINVAL; 1836 } 1837 1838 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) { 1839 if (masked_flags & F2FS_COMPR_FL) { 1840 if (!f2fs_disable_compressed_file(inode)) 1841 return -EINVAL; 1842 } 1843 if (iflags & F2FS_NOCOMP_FL) 1844 return -EINVAL; 1845 if (iflags & F2FS_COMPR_FL) { 1846 if (!f2fs_may_compress(inode)) 1847 return -EINVAL; 1848 if (S_ISREG(inode->i_mode) && inode->i_size) 1849 return -EINVAL; 1850 1851 set_compress_context(inode); 1852 } 1853 } 1854 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) { 1855 if (masked_flags & F2FS_COMPR_FL) 1856 return -EINVAL; 1857 } 1858 1859 fi->i_flags = iflags | (fi->i_flags & ~mask); 1860 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) && 1861 (fi->i_flags & F2FS_NOCOMP_FL)); 1862 1863 if (fi->i_flags & F2FS_PROJINHERIT_FL) 1864 set_inode_flag(inode, FI_PROJ_INHERIT); 1865 else 1866 clear_inode_flag(inode, FI_PROJ_INHERIT); 1867 1868 inode->i_ctime = current_time(inode); 1869 f2fs_set_inode_flags(inode); 1870 f2fs_mark_inode_dirty_sync(inode, true); 1871 return 0; 1872 } 1873 1874 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */ 1875 1876 /* 1877 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry 1878 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to 1879 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add 1880 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL. 1881 */ 1882 1883 static const struct { 1884 u32 iflag; 1885 u32 fsflag; 1886 } f2fs_fsflags_map[] = { 1887 { F2FS_COMPR_FL, FS_COMPR_FL }, 1888 { F2FS_SYNC_FL, FS_SYNC_FL }, 1889 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL }, 1890 { F2FS_APPEND_FL, FS_APPEND_FL }, 1891 { F2FS_NODUMP_FL, FS_NODUMP_FL }, 1892 { F2FS_NOATIME_FL, FS_NOATIME_FL }, 1893 { F2FS_NOCOMP_FL, FS_NOCOMP_FL }, 1894 { F2FS_INDEX_FL, FS_INDEX_FL }, 1895 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL }, 1896 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL }, 1897 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL }, 1898 }; 1899 1900 #define F2FS_GETTABLE_FS_FL ( \ 1901 FS_COMPR_FL | \ 1902 FS_SYNC_FL | \ 1903 FS_IMMUTABLE_FL | \ 1904 FS_APPEND_FL | \ 1905 FS_NODUMP_FL | \ 1906 FS_NOATIME_FL | \ 1907 FS_NOCOMP_FL | \ 1908 FS_INDEX_FL | \ 1909 FS_DIRSYNC_FL | \ 1910 FS_PROJINHERIT_FL | \ 1911 FS_ENCRYPT_FL | \ 1912 FS_INLINE_DATA_FL | \ 1913 FS_NOCOW_FL | \ 1914 FS_VERITY_FL | \ 1915 FS_CASEFOLD_FL) 1916 1917 #define F2FS_SETTABLE_FS_FL ( \ 1918 FS_COMPR_FL | \ 1919 FS_SYNC_FL | \ 1920 FS_IMMUTABLE_FL | \ 1921 FS_APPEND_FL | \ 1922 FS_NODUMP_FL | \ 1923 FS_NOATIME_FL | \ 1924 FS_NOCOMP_FL | \ 1925 FS_DIRSYNC_FL | \ 1926 FS_PROJINHERIT_FL | \ 1927 FS_CASEFOLD_FL) 1928 1929 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */ 1930 static inline u32 f2fs_iflags_to_fsflags(u32 iflags) 1931 { 1932 u32 fsflags = 0; 1933 int i; 1934 1935 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) 1936 if (iflags & f2fs_fsflags_map[i].iflag) 1937 fsflags |= f2fs_fsflags_map[i].fsflag; 1938 1939 return fsflags; 1940 } 1941 1942 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */ 1943 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags) 1944 { 1945 u32 iflags = 0; 1946 int i; 1947 1948 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) 1949 if (fsflags & f2fs_fsflags_map[i].fsflag) 1950 iflags |= f2fs_fsflags_map[i].iflag; 1951 1952 return iflags; 1953 } 1954 1955 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1956 { 1957 struct inode *inode = file_inode(filp); 1958 struct f2fs_inode_info *fi = F2FS_I(inode); 1959 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags); 1960 1961 if (IS_ENCRYPTED(inode)) 1962 fsflags |= FS_ENCRYPT_FL; 1963 if (IS_VERITY(inode)) 1964 fsflags |= FS_VERITY_FL; 1965 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) 1966 fsflags |= FS_INLINE_DATA_FL; 1967 if (is_inode_flag_set(inode, FI_PIN_FILE)) 1968 fsflags |= FS_NOCOW_FL; 1969 1970 fsflags &= F2FS_GETTABLE_FS_FL; 1971 1972 return put_user(fsflags, (int __user *)arg); 1973 } 1974 1975 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1976 { 1977 struct inode *inode = file_inode(filp); 1978 struct f2fs_inode_info *fi = F2FS_I(inode); 1979 u32 fsflags, old_fsflags; 1980 u32 iflags; 1981 int ret; 1982 1983 if (!inode_owner_or_capable(&init_user_ns, inode)) 1984 return -EACCES; 1985 1986 if (get_user(fsflags, (int __user *)arg)) 1987 return -EFAULT; 1988 1989 if (fsflags & ~F2FS_GETTABLE_FS_FL) 1990 return -EOPNOTSUPP; 1991 fsflags &= F2FS_SETTABLE_FS_FL; 1992 1993 iflags = f2fs_fsflags_to_iflags(fsflags); 1994 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) 1995 return -EOPNOTSUPP; 1996 1997 ret = mnt_want_write_file(filp); 1998 if (ret) 1999 return ret; 2000 2001 inode_lock(inode); 2002 2003 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags); 2004 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); 2005 if (ret) 2006 goto out; 2007 2008 ret = f2fs_setflags_common(inode, iflags, 2009 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); 2010 out: 2011 inode_unlock(inode); 2012 mnt_drop_write_file(filp); 2013 return ret; 2014 } 2015 2016 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) 2017 { 2018 struct inode *inode = file_inode(filp); 2019 2020 return put_user(inode->i_generation, (int __user *)arg); 2021 } 2022 2023 static int f2fs_ioc_start_atomic_write(struct file *filp) 2024 { 2025 struct inode *inode = file_inode(filp); 2026 struct f2fs_inode_info *fi = F2FS_I(inode); 2027 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2028 int ret; 2029 2030 if (!inode_owner_or_capable(&init_user_ns, inode)) 2031 return -EACCES; 2032 2033 if (!S_ISREG(inode->i_mode)) 2034 return -EINVAL; 2035 2036 if (filp->f_flags & O_DIRECT) 2037 return -EINVAL; 2038 2039 ret = mnt_want_write_file(filp); 2040 if (ret) 2041 return ret; 2042 2043 inode_lock(inode); 2044 2045 f2fs_disable_compressed_file(inode); 2046 2047 if (f2fs_is_atomic_file(inode)) { 2048 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) 2049 ret = -EINVAL; 2050 goto out; 2051 } 2052 2053 ret = f2fs_convert_inline_inode(inode); 2054 if (ret) 2055 goto out; 2056 2057 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2058 2059 /* 2060 * Should wait end_io to count F2FS_WB_CP_DATA correctly by 2061 * f2fs_is_atomic_file. 2062 */ 2063 if (get_dirty_pages(inode)) 2064 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u", 2065 inode->i_ino, get_dirty_pages(inode)); 2066 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 2067 if (ret) { 2068 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2069 goto out; 2070 } 2071 2072 spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 2073 if (list_empty(&fi->inmem_ilist)) 2074 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]); 2075 sbi->atomic_files++; 2076 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 2077 2078 /* add inode in inmem_list first and set atomic_file */ 2079 set_inode_flag(inode, FI_ATOMIC_FILE); 2080 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 2081 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 2082 2083 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2084 F2FS_I(inode)->inmem_task = current; 2085 stat_update_max_atomic_write(inode); 2086 out: 2087 inode_unlock(inode); 2088 mnt_drop_write_file(filp); 2089 return ret; 2090 } 2091 2092 static int f2fs_ioc_commit_atomic_write(struct file *filp) 2093 { 2094 struct inode *inode = file_inode(filp); 2095 int ret; 2096 2097 if (!inode_owner_or_capable(&init_user_ns, inode)) 2098 return -EACCES; 2099 2100 ret = mnt_want_write_file(filp); 2101 if (ret) 2102 return ret; 2103 2104 f2fs_balance_fs(F2FS_I_SB(inode), true); 2105 2106 inode_lock(inode); 2107 2108 if (f2fs_is_volatile_file(inode)) { 2109 ret = -EINVAL; 2110 goto err_out; 2111 } 2112 2113 if (f2fs_is_atomic_file(inode)) { 2114 ret = f2fs_commit_inmem_pages(inode); 2115 if (ret) 2116 goto err_out; 2117 2118 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 2119 if (!ret) 2120 f2fs_drop_inmem_pages(inode); 2121 } else { 2122 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 2123 } 2124 err_out: 2125 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) { 2126 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 2127 ret = -EINVAL; 2128 } 2129 inode_unlock(inode); 2130 mnt_drop_write_file(filp); 2131 return ret; 2132 } 2133 2134 static int f2fs_ioc_start_volatile_write(struct file *filp) 2135 { 2136 struct inode *inode = file_inode(filp); 2137 int ret; 2138 2139 if (!inode_owner_or_capable(&init_user_ns, inode)) 2140 return -EACCES; 2141 2142 if (!S_ISREG(inode->i_mode)) 2143 return -EINVAL; 2144 2145 ret = mnt_want_write_file(filp); 2146 if (ret) 2147 return ret; 2148 2149 inode_lock(inode); 2150 2151 if (f2fs_is_volatile_file(inode)) 2152 goto out; 2153 2154 ret = f2fs_convert_inline_inode(inode); 2155 if (ret) 2156 goto out; 2157 2158 stat_inc_volatile_write(inode); 2159 stat_update_max_volatile_write(inode); 2160 2161 set_inode_flag(inode, FI_VOLATILE_FILE); 2162 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2163 out: 2164 inode_unlock(inode); 2165 mnt_drop_write_file(filp); 2166 return ret; 2167 } 2168 2169 static int f2fs_ioc_release_volatile_write(struct file *filp) 2170 { 2171 struct inode *inode = file_inode(filp); 2172 int ret; 2173 2174 if (!inode_owner_or_capable(&init_user_ns, inode)) 2175 return -EACCES; 2176 2177 ret = mnt_want_write_file(filp); 2178 if (ret) 2179 return ret; 2180 2181 inode_lock(inode); 2182 2183 if (!f2fs_is_volatile_file(inode)) 2184 goto out; 2185 2186 if (!f2fs_is_first_block_written(inode)) { 2187 ret = truncate_partial_data_page(inode, 0, true); 2188 goto out; 2189 } 2190 2191 ret = punch_hole(inode, 0, F2FS_BLKSIZE); 2192 out: 2193 inode_unlock(inode); 2194 mnt_drop_write_file(filp); 2195 return ret; 2196 } 2197 2198 static int f2fs_ioc_abort_volatile_write(struct file *filp) 2199 { 2200 struct inode *inode = file_inode(filp); 2201 int ret; 2202 2203 if (!inode_owner_or_capable(&init_user_ns, inode)) 2204 return -EACCES; 2205 2206 ret = mnt_want_write_file(filp); 2207 if (ret) 2208 return ret; 2209 2210 inode_lock(inode); 2211 2212 if (f2fs_is_atomic_file(inode)) 2213 f2fs_drop_inmem_pages(inode); 2214 if (f2fs_is_volatile_file(inode)) { 2215 clear_inode_flag(inode, FI_VOLATILE_FILE); 2216 stat_dec_volatile_write(inode); 2217 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 2218 } 2219 2220 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 2221 2222 inode_unlock(inode); 2223 2224 mnt_drop_write_file(filp); 2225 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2226 return ret; 2227 } 2228 2229 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) 2230 { 2231 struct inode *inode = file_inode(filp); 2232 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2233 struct super_block *sb = sbi->sb; 2234 __u32 in; 2235 int ret = 0; 2236 2237 if (!capable(CAP_SYS_ADMIN)) 2238 return -EPERM; 2239 2240 if (get_user(in, (__u32 __user *)arg)) 2241 return -EFAULT; 2242 2243 if (in != F2FS_GOING_DOWN_FULLSYNC) { 2244 ret = mnt_want_write_file(filp); 2245 if (ret) { 2246 if (ret == -EROFS) { 2247 ret = 0; 2248 f2fs_stop_checkpoint(sbi, false); 2249 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2250 trace_f2fs_shutdown(sbi, in, ret); 2251 } 2252 return ret; 2253 } 2254 } 2255 2256 switch (in) { 2257 case F2FS_GOING_DOWN_FULLSYNC: 2258 ret = freeze_bdev(sb->s_bdev); 2259 if (ret) 2260 goto out; 2261 f2fs_stop_checkpoint(sbi, false); 2262 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2263 thaw_bdev(sb->s_bdev); 2264 break; 2265 case F2FS_GOING_DOWN_METASYNC: 2266 /* do checkpoint only */ 2267 ret = f2fs_sync_fs(sb, 1); 2268 if (ret) 2269 goto out; 2270 f2fs_stop_checkpoint(sbi, false); 2271 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2272 break; 2273 case F2FS_GOING_DOWN_NOSYNC: 2274 f2fs_stop_checkpoint(sbi, false); 2275 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2276 break; 2277 case F2FS_GOING_DOWN_METAFLUSH: 2278 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); 2279 f2fs_stop_checkpoint(sbi, false); 2280 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); 2281 break; 2282 case F2FS_GOING_DOWN_NEED_FSCK: 2283 set_sbi_flag(sbi, SBI_NEED_FSCK); 2284 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); 2285 set_sbi_flag(sbi, SBI_IS_DIRTY); 2286 /* do checkpoint only */ 2287 ret = f2fs_sync_fs(sb, 1); 2288 goto out; 2289 default: 2290 ret = -EINVAL; 2291 goto out; 2292 } 2293 2294 f2fs_stop_gc_thread(sbi); 2295 f2fs_stop_discard_thread(sbi); 2296 2297 f2fs_drop_discard_cmd(sbi); 2298 clear_opt(sbi, DISCARD); 2299 2300 f2fs_update_time(sbi, REQ_TIME); 2301 out: 2302 if (in != F2FS_GOING_DOWN_FULLSYNC) 2303 mnt_drop_write_file(filp); 2304 2305 trace_f2fs_shutdown(sbi, in, ret); 2306 2307 return ret; 2308 } 2309 2310 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) 2311 { 2312 struct inode *inode = file_inode(filp); 2313 struct super_block *sb = inode->i_sb; 2314 struct request_queue *q = bdev_get_queue(sb->s_bdev); 2315 struct fstrim_range range; 2316 int ret; 2317 2318 if (!capable(CAP_SYS_ADMIN)) 2319 return -EPERM; 2320 2321 if (!f2fs_hw_support_discard(F2FS_SB(sb))) 2322 return -EOPNOTSUPP; 2323 2324 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 2325 sizeof(range))) 2326 return -EFAULT; 2327 2328 ret = mnt_want_write_file(filp); 2329 if (ret) 2330 return ret; 2331 2332 range.minlen = max((unsigned int)range.minlen, 2333 q->limits.discard_granularity); 2334 ret = f2fs_trim_fs(F2FS_SB(sb), &range); 2335 mnt_drop_write_file(filp); 2336 if (ret < 0) 2337 return ret; 2338 2339 if (copy_to_user((struct fstrim_range __user *)arg, &range, 2340 sizeof(range))) 2341 return -EFAULT; 2342 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2343 return 0; 2344 } 2345 2346 static bool uuid_is_nonzero(__u8 u[16]) 2347 { 2348 int i; 2349 2350 for (i = 0; i < 16; i++) 2351 if (u[i]) 2352 return true; 2353 return false; 2354 } 2355 2356 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) 2357 { 2358 struct inode *inode = file_inode(filp); 2359 2360 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode))) 2361 return -EOPNOTSUPP; 2362 2363 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 2364 2365 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); 2366 } 2367 2368 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) 2369 { 2370 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2371 return -EOPNOTSUPP; 2372 return fscrypt_ioctl_get_policy(filp, (void __user *)arg); 2373 } 2374 2375 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) 2376 { 2377 struct inode *inode = file_inode(filp); 2378 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2379 int err; 2380 2381 if (!f2fs_sb_has_encrypt(sbi)) 2382 return -EOPNOTSUPP; 2383 2384 err = mnt_want_write_file(filp); 2385 if (err) 2386 return err; 2387 2388 down_write(&sbi->sb_lock); 2389 2390 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) 2391 goto got_it; 2392 2393 /* update superblock with uuid */ 2394 generate_random_uuid(sbi->raw_super->encrypt_pw_salt); 2395 2396 err = f2fs_commit_super(sbi, false); 2397 if (err) { 2398 /* undo new data */ 2399 memset(sbi->raw_super->encrypt_pw_salt, 0, 16); 2400 goto out_err; 2401 } 2402 got_it: 2403 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 2404 16)) 2405 err = -EFAULT; 2406 out_err: 2407 up_write(&sbi->sb_lock); 2408 mnt_drop_write_file(filp); 2409 return err; 2410 } 2411 2412 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp, 2413 unsigned long arg) 2414 { 2415 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2416 return -EOPNOTSUPP; 2417 2418 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg); 2419 } 2420 2421 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg) 2422 { 2423 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2424 return -EOPNOTSUPP; 2425 2426 return fscrypt_ioctl_add_key(filp, (void __user *)arg); 2427 } 2428 2429 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg) 2430 { 2431 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2432 return -EOPNOTSUPP; 2433 2434 return fscrypt_ioctl_remove_key(filp, (void __user *)arg); 2435 } 2436 2437 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp, 2438 unsigned long arg) 2439 { 2440 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2441 return -EOPNOTSUPP; 2442 2443 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg); 2444 } 2445 2446 static int f2fs_ioc_get_encryption_key_status(struct file *filp, 2447 unsigned long arg) 2448 { 2449 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2450 return -EOPNOTSUPP; 2451 2452 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); 2453 } 2454 2455 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg) 2456 { 2457 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) 2458 return -EOPNOTSUPP; 2459 2460 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg); 2461 } 2462 2463 static int f2fs_ioc_gc(struct file *filp, unsigned long arg) 2464 { 2465 struct inode *inode = file_inode(filp); 2466 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2467 __u32 sync; 2468 int ret; 2469 2470 if (!capable(CAP_SYS_ADMIN)) 2471 return -EPERM; 2472 2473 if (get_user(sync, (__u32 __user *)arg)) 2474 return -EFAULT; 2475 2476 if (f2fs_readonly(sbi->sb)) 2477 return -EROFS; 2478 2479 ret = mnt_want_write_file(filp); 2480 if (ret) 2481 return ret; 2482 2483 if (!sync) { 2484 if (!down_write_trylock(&sbi->gc_lock)) { 2485 ret = -EBUSY; 2486 goto out; 2487 } 2488 } else { 2489 down_write(&sbi->gc_lock); 2490 } 2491 2492 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); 2493 out: 2494 mnt_drop_write_file(filp); 2495 return ret; 2496 } 2497 2498 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range) 2499 { 2500 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); 2501 u64 end; 2502 int ret; 2503 2504 if (!capable(CAP_SYS_ADMIN)) 2505 return -EPERM; 2506 if (f2fs_readonly(sbi->sb)) 2507 return -EROFS; 2508 2509 end = range->start + range->len; 2510 if (end < range->start || range->start < MAIN_BLKADDR(sbi) || 2511 end >= MAX_BLKADDR(sbi)) 2512 return -EINVAL; 2513 2514 ret = mnt_want_write_file(filp); 2515 if (ret) 2516 return ret; 2517 2518 do_more: 2519 if (!range->sync) { 2520 if (!down_write_trylock(&sbi->gc_lock)) { 2521 ret = -EBUSY; 2522 goto out; 2523 } 2524 } else { 2525 down_write(&sbi->gc_lock); 2526 } 2527 2528 ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start)); 2529 if (ret) { 2530 if (ret == -EBUSY) 2531 ret = -EAGAIN; 2532 goto out; 2533 } 2534 range->start += BLKS_PER_SEC(sbi); 2535 if (range->start <= end) 2536 goto do_more; 2537 out: 2538 mnt_drop_write_file(filp); 2539 return ret; 2540 } 2541 2542 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) 2543 { 2544 struct f2fs_gc_range range; 2545 2546 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, 2547 sizeof(range))) 2548 return -EFAULT; 2549 return __f2fs_ioc_gc_range(filp, &range); 2550 } 2551 2552 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) 2553 { 2554 struct inode *inode = file_inode(filp); 2555 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2556 int ret; 2557 2558 if (!capable(CAP_SYS_ADMIN)) 2559 return -EPERM; 2560 2561 if (f2fs_readonly(sbi->sb)) 2562 return -EROFS; 2563 2564 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2565 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled."); 2566 return -EINVAL; 2567 } 2568 2569 ret = mnt_want_write_file(filp); 2570 if (ret) 2571 return ret; 2572 2573 ret = f2fs_sync_fs(sbi->sb, 1); 2574 2575 mnt_drop_write_file(filp); 2576 return ret; 2577 } 2578 2579 static int f2fs_defragment_range(struct f2fs_sb_info *sbi, 2580 struct file *filp, 2581 struct f2fs_defragment *range) 2582 { 2583 struct inode *inode = file_inode(filp); 2584 struct f2fs_map_blocks map = { .m_next_extent = NULL, 2585 .m_seg_type = NO_CHECK_TYPE , 2586 .m_may_create = false }; 2587 struct extent_info ei = {0, 0, 0}; 2588 pgoff_t pg_start, pg_end, next_pgofs; 2589 unsigned int blk_per_seg = sbi->blocks_per_seg; 2590 unsigned int total = 0, sec_num; 2591 block_t blk_end = 0; 2592 bool fragmented = false; 2593 int err; 2594 2595 /* if in-place-update policy is enabled, don't waste time here */ 2596 if (f2fs_should_update_inplace(inode, NULL)) 2597 return -EINVAL; 2598 2599 pg_start = range->start >> PAGE_SHIFT; 2600 pg_end = (range->start + range->len) >> PAGE_SHIFT; 2601 2602 f2fs_balance_fs(sbi, true); 2603 2604 inode_lock(inode); 2605 2606 /* writeback all dirty pages in the range */ 2607 err = filemap_write_and_wait_range(inode->i_mapping, range->start, 2608 range->start + range->len - 1); 2609 if (err) 2610 goto out; 2611 2612 /* 2613 * lookup mapping info in extent cache, skip defragmenting if physical 2614 * block addresses are continuous. 2615 */ 2616 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { 2617 if (ei.fofs + ei.len >= pg_end) 2618 goto out; 2619 } 2620 2621 map.m_lblk = pg_start; 2622 map.m_next_pgofs = &next_pgofs; 2623 2624 /* 2625 * lookup mapping info in dnode page cache, skip defragmenting if all 2626 * physical block addresses are continuous even if there are hole(s) 2627 * in logical blocks. 2628 */ 2629 while (map.m_lblk < pg_end) { 2630 map.m_len = pg_end - map.m_lblk; 2631 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2632 if (err) 2633 goto out; 2634 2635 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2636 map.m_lblk = next_pgofs; 2637 continue; 2638 } 2639 2640 if (blk_end && blk_end != map.m_pblk) 2641 fragmented = true; 2642 2643 /* record total count of block that we're going to move */ 2644 total += map.m_len; 2645 2646 blk_end = map.m_pblk + map.m_len; 2647 2648 map.m_lblk += map.m_len; 2649 } 2650 2651 if (!fragmented) { 2652 total = 0; 2653 goto out; 2654 } 2655 2656 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi)); 2657 2658 /* 2659 * make sure there are enough free section for LFS allocation, this can 2660 * avoid defragment running in SSR mode when free section are allocated 2661 * intensively 2662 */ 2663 if (has_not_enough_free_secs(sbi, 0, sec_num)) { 2664 err = -EAGAIN; 2665 goto out; 2666 } 2667 2668 map.m_lblk = pg_start; 2669 map.m_len = pg_end - pg_start; 2670 total = 0; 2671 2672 while (map.m_lblk < pg_end) { 2673 pgoff_t idx; 2674 int cnt = 0; 2675 2676 do_map: 2677 map.m_len = pg_end - map.m_lblk; 2678 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); 2679 if (err) 2680 goto clear_out; 2681 2682 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2683 map.m_lblk = next_pgofs; 2684 goto check; 2685 } 2686 2687 set_inode_flag(inode, FI_DO_DEFRAG); 2688 2689 idx = map.m_lblk; 2690 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { 2691 struct page *page; 2692 2693 page = f2fs_get_lock_data_page(inode, idx, true); 2694 if (IS_ERR(page)) { 2695 err = PTR_ERR(page); 2696 goto clear_out; 2697 } 2698 2699 set_page_dirty(page); 2700 f2fs_put_page(page, 1); 2701 2702 idx++; 2703 cnt++; 2704 total++; 2705 } 2706 2707 map.m_lblk = idx; 2708 check: 2709 if (map.m_lblk < pg_end && cnt < blk_per_seg) 2710 goto do_map; 2711 2712 clear_inode_flag(inode, FI_DO_DEFRAG); 2713 2714 err = filemap_fdatawrite(inode->i_mapping); 2715 if (err) 2716 goto out; 2717 } 2718 clear_out: 2719 clear_inode_flag(inode, FI_DO_DEFRAG); 2720 out: 2721 inode_unlock(inode); 2722 if (!err) 2723 range->len = (u64)total << PAGE_SHIFT; 2724 return err; 2725 } 2726 2727 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) 2728 { 2729 struct inode *inode = file_inode(filp); 2730 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2731 struct f2fs_defragment range; 2732 int err; 2733 2734 if (!capable(CAP_SYS_ADMIN)) 2735 return -EPERM; 2736 2737 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) 2738 return -EINVAL; 2739 2740 if (f2fs_readonly(sbi->sb)) 2741 return -EROFS; 2742 2743 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, 2744 sizeof(range))) 2745 return -EFAULT; 2746 2747 /* verify alignment of offset & size */ 2748 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) 2749 return -EINVAL; 2750 2751 if (unlikely((range.start + range.len) >> PAGE_SHIFT > 2752 max_file_blocks(inode))) 2753 return -EINVAL; 2754 2755 err = mnt_want_write_file(filp); 2756 if (err) 2757 return err; 2758 2759 err = f2fs_defragment_range(sbi, filp, &range); 2760 mnt_drop_write_file(filp); 2761 2762 f2fs_update_time(sbi, REQ_TIME); 2763 if (err < 0) 2764 return err; 2765 2766 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, 2767 sizeof(range))) 2768 return -EFAULT; 2769 2770 return 0; 2771 } 2772 2773 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, 2774 struct file *file_out, loff_t pos_out, size_t len) 2775 { 2776 struct inode *src = file_inode(file_in); 2777 struct inode *dst = file_inode(file_out); 2778 struct f2fs_sb_info *sbi = F2FS_I_SB(src); 2779 size_t olen = len, dst_max_i_size = 0; 2780 size_t dst_osize; 2781 int ret; 2782 2783 if (file_in->f_path.mnt != file_out->f_path.mnt || 2784 src->i_sb != dst->i_sb) 2785 return -EXDEV; 2786 2787 if (unlikely(f2fs_readonly(src->i_sb))) 2788 return -EROFS; 2789 2790 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) 2791 return -EINVAL; 2792 2793 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst)) 2794 return -EOPNOTSUPP; 2795 2796 if (pos_out < 0 || pos_in < 0) 2797 return -EINVAL; 2798 2799 if (src == dst) { 2800 if (pos_in == pos_out) 2801 return 0; 2802 if (pos_out > pos_in && pos_out < pos_in + len) 2803 return -EINVAL; 2804 } 2805 2806 inode_lock(src); 2807 if (src != dst) { 2808 ret = -EBUSY; 2809 if (!inode_trylock(dst)) 2810 goto out; 2811 } 2812 2813 ret = -EINVAL; 2814 if (pos_in + len > src->i_size || pos_in + len < pos_in) 2815 goto out_unlock; 2816 if (len == 0) 2817 olen = len = src->i_size - pos_in; 2818 if (pos_in + len == src->i_size) 2819 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; 2820 if (len == 0) { 2821 ret = 0; 2822 goto out_unlock; 2823 } 2824 2825 dst_osize = dst->i_size; 2826 if (pos_out + olen > dst->i_size) 2827 dst_max_i_size = pos_out + olen; 2828 2829 /* verify the end result is block aligned */ 2830 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || 2831 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || 2832 !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) 2833 goto out_unlock; 2834 2835 ret = f2fs_convert_inline_inode(src); 2836 if (ret) 2837 goto out_unlock; 2838 2839 ret = f2fs_convert_inline_inode(dst); 2840 if (ret) 2841 goto out_unlock; 2842 2843 /* write out all dirty pages from offset */ 2844 ret = filemap_write_and_wait_range(src->i_mapping, 2845 pos_in, pos_in + len); 2846 if (ret) 2847 goto out_unlock; 2848 2849 ret = filemap_write_and_wait_range(dst->i_mapping, 2850 pos_out, pos_out + len); 2851 if (ret) 2852 goto out_unlock; 2853 2854 f2fs_balance_fs(sbi, true); 2855 2856 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2857 if (src != dst) { 2858 ret = -EBUSY; 2859 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) 2860 goto out_src; 2861 } 2862 2863 f2fs_lock_op(sbi); 2864 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, 2865 pos_out >> F2FS_BLKSIZE_BITS, 2866 len >> F2FS_BLKSIZE_BITS, false); 2867 2868 if (!ret) { 2869 if (dst_max_i_size) 2870 f2fs_i_size_write(dst, dst_max_i_size); 2871 else if (dst_osize != dst->i_size) 2872 f2fs_i_size_write(dst, dst_osize); 2873 } 2874 f2fs_unlock_op(sbi); 2875 2876 if (src != dst) 2877 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); 2878 out_src: 2879 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); 2880 out_unlock: 2881 if (src != dst) 2882 inode_unlock(dst); 2883 out: 2884 inode_unlock(src); 2885 return ret; 2886 } 2887 2888 static int __f2fs_ioc_move_range(struct file *filp, 2889 struct f2fs_move_range *range) 2890 { 2891 struct fd dst; 2892 int err; 2893 2894 if (!(filp->f_mode & FMODE_READ) || 2895 !(filp->f_mode & FMODE_WRITE)) 2896 return -EBADF; 2897 2898 dst = fdget(range->dst_fd); 2899 if (!dst.file) 2900 return -EBADF; 2901 2902 if (!(dst.file->f_mode & FMODE_WRITE)) { 2903 err = -EBADF; 2904 goto err_out; 2905 } 2906 2907 err = mnt_want_write_file(filp); 2908 if (err) 2909 goto err_out; 2910 2911 err = f2fs_move_file_range(filp, range->pos_in, dst.file, 2912 range->pos_out, range->len); 2913 2914 mnt_drop_write_file(filp); 2915 err_out: 2916 fdput(dst); 2917 return err; 2918 } 2919 2920 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) 2921 { 2922 struct f2fs_move_range range; 2923 2924 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, 2925 sizeof(range))) 2926 return -EFAULT; 2927 return __f2fs_ioc_move_range(filp, &range); 2928 } 2929 2930 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) 2931 { 2932 struct inode *inode = file_inode(filp); 2933 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2934 struct sit_info *sm = SIT_I(sbi); 2935 unsigned int start_segno = 0, end_segno = 0; 2936 unsigned int dev_start_segno = 0, dev_end_segno = 0; 2937 struct f2fs_flush_device range; 2938 int ret; 2939 2940 if (!capable(CAP_SYS_ADMIN)) 2941 return -EPERM; 2942 2943 if (f2fs_readonly(sbi->sb)) 2944 return -EROFS; 2945 2946 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2947 return -EINVAL; 2948 2949 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, 2950 sizeof(range))) 2951 return -EFAULT; 2952 2953 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || 2954 __is_large_section(sbi)) { 2955 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1", 2956 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec); 2957 return -EINVAL; 2958 } 2959 2960 ret = mnt_want_write_file(filp); 2961 if (ret) 2962 return ret; 2963 2964 if (range.dev_num != 0) 2965 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); 2966 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); 2967 2968 start_segno = sm->last_victim[FLUSH_DEVICE]; 2969 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) 2970 start_segno = dev_start_segno; 2971 end_segno = min(start_segno + range.segments, dev_end_segno); 2972 2973 while (start_segno < end_segno) { 2974 if (!down_write_trylock(&sbi->gc_lock)) { 2975 ret = -EBUSY; 2976 goto out; 2977 } 2978 sm->last_victim[GC_CB] = end_segno + 1; 2979 sm->last_victim[GC_GREEDY] = end_segno + 1; 2980 sm->last_victim[ALLOC_NEXT] = end_segno + 1; 2981 ret = f2fs_gc(sbi, true, true, start_segno); 2982 if (ret == -EAGAIN) 2983 ret = 0; 2984 else if (ret < 0) 2985 break; 2986 start_segno++; 2987 } 2988 out: 2989 mnt_drop_write_file(filp); 2990 return ret; 2991 } 2992 2993 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) 2994 { 2995 struct inode *inode = file_inode(filp); 2996 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); 2997 2998 /* Must validate to set it with SQLite behavior in Android. */ 2999 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; 3000 3001 return put_user(sb_feature, (u32 __user *)arg); 3002 } 3003 3004 #ifdef CONFIG_QUOTA 3005 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) 3006 { 3007 struct dquot *transfer_to[MAXQUOTAS] = {}; 3008 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3009 struct super_block *sb = sbi->sb; 3010 int err = 0; 3011 3012 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); 3013 if (!IS_ERR(transfer_to[PRJQUOTA])) { 3014 err = __dquot_transfer(inode, transfer_to); 3015 if (err) 3016 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 3017 dqput(transfer_to[PRJQUOTA]); 3018 } 3019 return err; 3020 } 3021 3022 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 3023 { 3024 struct inode *inode = file_inode(filp); 3025 struct f2fs_inode_info *fi = F2FS_I(inode); 3026 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3027 struct page *ipage; 3028 kprojid_t kprojid; 3029 int err; 3030 3031 if (!f2fs_sb_has_project_quota(sbi)) { 3032 if (projid != F2FS_DEF_PROJID) 3033 return -EOPNOTSUPP; 3034 else 3035 return 0; 3036 } 3037 3038 if (!f2fs_has_extra_attr(inode)) 3039 return -EOPNOTSUPP; 3040 3041 kprojid = make_kprojid(&init_user_ns, (projid_t)projid); 3042 3043 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) 3044 return 0; 3045 3046 err = -EPERM; 3047 /* Is it quota file? Do not allow user to mess with it */ 3048 if (IS_NOQUOTA(inode)) 3049 return err; 3050 3051 ipage = f2fs_get_node_page(sbi, inode->i_ino); 3052 if (IS_ERR(ipage)) 3053 return PTR_ERR(ipage); 3054 3055 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, 3056 i_projid)) { 3057 err = -EOVERFLOW; 3058 f2fs_put_page(ipage, 1); 3059 return err; 3060 } 3061 f2fs_put_page(ipage, 1); 3062 3063 err = dquot_initialize(inode); 3064 if (err) 3065 return err; 3066 3067 f2fs_lock_op(sbi); 3068 err = f2fs_transfer_project_quota(inode, kprojid); 3069 if (err) 3070 goto out_unlock; 3071 3072 F2FS_I(inode)->i_projid = kprojid; 3073 inode->i_ctime = current_time(inode); 3074 f2fs_mark_inode_dirty_sync(inode, true); 3075 out_unlock: 3076 f2fs_unlock_op(sbi); 3077 return err; 3078 } 3079 #else 3080 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) 3081 { 3082 return 0; 3083 } 3084 3085 static int f2fs_ioc_setproject(struct file *filp, __u32 projid) 3086 { 3087 if (projid != F2FS_DEF_PROJID) 3088 return -EOPNOTSUPP; 3089 return 0; 3090 } 3091 #endif 3092 3093 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */ 3094 3095 /* 3096 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable 3097 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its 3098 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS. 3099 */ 3100 3101 static const struct { 3102 u32 iflag; 3103 u32 xflag; 3104 } f2fs_xflags_map[] = { 3105 { F2FS_SYNC_FL, FS_XFLAG_SYNC }, 3106 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE }, 3107 { F2FS_APPEND_FL, FS_XFLAG_APPEND }, 3108 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP }, 3109 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME }, 3110 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT }, 3111 }; 3112 3113 #define F2FS_SUPPORTED_XFLAGS ( \ 3114 FS_XFLAG_SYNC | \ 3115 FS_XFLAG_IMMUTABLE | \ 3116 FS_XFLAG_APPEND | \ 3117 FS_XFLAG_NODUMP | \ 3118 FS_XFLAG_NOATIME | \ 3119 FS_XFLAG_PROJINHERIT) 3120 3121 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */ 3122 static inline u32 f2fs_iflags_to_xflags(u32 iflags) 3123 { 3124 u32 xflags = 0; 3125 int i; 3126 3127 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) 3128 if (iflags & f2fs_xflags_map[i].iflag) 3129 xflags |= f2fs_xflags_map[i].xflag; 3130 3131 return xflags; 3132 } 3133 3134 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */ 3135 static inline u32 f2fs_xflags_to_iflags(u32 xflags) 3136 { 3137 u32 iflags = 0; 3138 int i; 3139 3140 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) 3141 if (xflags & f2fs_xflags_map[i].xflag) 3142 iflags |= f2fs_xflags_map[i].iflag; 3143 3144 return iflags; 3145 } 3146 3147 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa) 3148 { 3149 struct f2fs_inode_info *fi = F2FS_I(inode); 3150 3151 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags)); 3152 3153 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) 3154 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid); 3155 } 3156 3157 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) 3158 { 3159 struct inode *inode = file_inode(filp); 3160 struct fsxattr fa; 3161 3162 f2fs_fill_fsxattr(inode, &fa); 3163 3164 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) 3165 return -EFAULT; 3166 return 0; 3167 } 3168 3169 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 3170 { 3171 struct inode *inode = file_inode(filp); 3172 struct fsxattr fa, old_fa; 3173 u32 iflags; 3174 int err; 3175 3176 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 3177 return -EFAULT; 3178 3179 /* Make sure caller has proper permission */ 3180 if (!inode_owner_or_capable(&init_user_ns, inode)) 3181 return -EACCES; 3182 3183 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS) 3184 return -EOPNOTSUPP; 3185 3186 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags); 3187 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) 3188 return -EOPNOTSUPP; 3189 3190 err = mnt_want_write_file(filp); 3191 if (err) 3192 return err; 3193 3194 inode_lock(inode); 3195 3196 f2fs_fill_fsxattr(inode, &old_fa); 3197 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa); 3198 if (err) 3199 goto out; 3200 3201 err = f2fs_setflags_common(inode, iflags, 3202 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); 3203 if (err) 3204 goto out; 3205 3206 err = f2fs_ioc_setproject(filp, fa.fsx_projid); 3207 out: 3208 inode_unlock(inode); 3209 mnt_drop_write_file(filp); 3210 return err; 3211 } 3212 3213 int f2fs_pin_file_control(struct inode *inode, bool inc) 3214 { 3215 struct f2fs_inode_info *fi = F2FS_I(inode); 3216 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3217 3218 /* Use i_gc_failures for normal file as a risk signal. */ 3219 if (inc) 3220 f2fs_i_gc_failures_write(inode, 3221 fi->i_gc_failures[GC_FAILURE_PIN] + 1); 3222 3223 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { 3224 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials", 3225 __func__, inode->i_ino, 3226 fi->i_gc_failures[GC_FAILURE_PIN]); 3227 clear_inode_flag(inode, FI_PIN_FILE); 3228 return -EAGAIN; 3229 } 3230 return 0; 3231 } 3232 3233 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) 3234 { 3235 struct inode *inode = file_inode(filp); 3236 __u32 pin; 3237 int ret = 0; 3238 3239 if (get_user(pin, (__u32 __user *)arg)) 3240 return -EFAULT; 3241 3242 if (!S_ISREG(inode->i_mode)) 3243 return -EINVAL; 3244 3245 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 3246 return -EROFS; 3247 3248 ret = mnt_want_write_file(filp); 3249 if (ret) 3250 return ret; 3251 3252 inode_lock(inode); 3253 3254 if (f2fs_should_update_outplace(inode, NULL)) { 3255 ret = -EINVAL; 3256 goto out; 3257 } 3258 3259 if (!pin) { 3260 clear_inode_flag(inode, FI_PIN_FILE); 3261 f2fs_i_gc_failures_write(inode, 0); 3262 goto done; 3263 } 3264 3265 if (f2fs_pin_file_control(inode, false)) { 3266 ret = -EAGAIN; 3267 goto out; 3268 } 3269 3270 ret = f2fs_convert_inline_inode(inode); 3271 if (ret) 3272 goto out; 3273 3274 if (!f2fs_disable_compressed_file(inode)) { 3275 ret = -EOPNOTSUPP; 3276 goto out; 3277 } 3278 3279 set_inode_flag(inode, FI_PIN_FILE); 3280 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 3281 done: 3282 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3283 out: 3284 inode_unlock(inode); 3285 mnt_drop_write_file(filp); 3286 return ret; 3287 } 3288 3289 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) 3290 { 3291 struct inode *inode = file_inode(filp); 3292 __u32 pin = 0; 3293 3294 if (is_inode_flag_set(inode, FI_PIN_FILE)) 3295 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; 3296 return put_user(pin, (u32 __user *)arg); 3297 } 3298 3299 int f2fs_precache_extents(struct inode *inode) 3300 { 3301 struct f2fs_inode_info *fi = F2FS_I(inode); 3302 struct f2fs_map_blocks map; 3303 pgoff_t m_next_extent; 3304 loff_t end; 3305 int err; 3306 3307 if (is_inode_flag_set(inode, FI_NO_EXTENT)) 3308 return -EOPNOTSUPP; 3309 3310 map.m_lblk = 0; 3311 map.m_next_pgofs = NULL; 3312 map.m_next_extent = &m_next_extent; 3313 map.m_seg_type = NO_CHECK_TYPE; 3314 map.m_may_create = false; 3315 end = max_file_blocks(inode); 3316 3317 while (map.m_lblk < end) { 3318 map.m_len = end - map.m_lblk; 3319 3320 down_write(&fi->i_gc_rwsem[WRITE]); 3321 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); 3322 up_write(&fi->i_gc_rwsem[WRITE]); 3323 if (err) 3324 return err; 3325 3326 map.m_lblk = m_next_extent; 3327 } 3328 3329 return err; 3330 } 3331 3332 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg) 3333 { 3334 return f2fs_precache_extents(file_inode(filp)); 3335 } 3336 3337 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) 3338 { 3339 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); 3340 __u64 block_count; 3341 3342 if (!capable(CAP_SYS_ADMIN)) 3343 return -EPERM; 3344 3345 if (f2fs_readonly(sbi->sb)) 3346 return -EROFS; 3347 3348 if (copy_from_user(&block_count, (void __user *)arg, 3349 sizeof(block_count))) 3350 return -EFAULT; 3351 3352 return f2fs_resize_fs(sbi, block_count); 3353 } 3354 3355 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) 3356 { 3357 struct inode *inode = file_inode(filp); 3358 3359 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); 3360 3361 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) { 3362 f2fs_warn(F2FS_I_SB(inode), 3363 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n", 3364 inode->i_ino); 3365 return -EOPNOTSUPP; 3366 } 3367 3368 return fsverity_ioctl_enable(filp, (const void __user *)arg); 3369 } 3370 3371 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) 3372 { 3373 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) 3374 return -EOPNOTSUPP; 3375 3376 return fsverity_ioctl_measure(filp, (void __user *)arg); 3377 } 3378 3379 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg) 3380 { 3381 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) 3382 return -EOPNOTSUPP; 3383 3384 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg); 3385 } 3386 3387 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg) 3388 { 3389 struct inode *inode = file_inode(filp); 3390 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3391 char *vbuf; 3392 int count; 3393 int err = 0; 3394 3395 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL); 3396 if (!vbuf) 3397 return -ENOMEM; 3398 3399 down_read(&sbi->sb_lock); 3400 count = utf16s_to_utf8s(sbi->raw_super->volume_name, 3401 ARRAY_SIZE(sbi->raw_super->volume_name), 3402 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME); 3403 up_read(&sbi->sb_lock); 3404 3405 if (copy_to_user((char __user *)arg, vbuf, 3406 min(FSLABEL_MAX, count))) 3407 err = -EFAULT; 3408 3409 kfree(vbuf); 3410 return err; 3411 } 3412 3413 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg) 3414 { 3415 struct inode *inode = file_inode(filp); 3416 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3417 char *vbuf; 3418 int err = 0; 3419 3420 if (!capable(CAP_SYS_ADMIN)) 3421 return -EPERM; 3422 3423 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX); 3424 if (IS_ERR(vbuf)) 3425 return PTR_ERR(vbuf); 3426 3427 err = mnt_want_write_file(filp); 3428 if (err) 3429 goto out; 3430 3431 down_write(&sbi->sb_lock); 3432 3433 memset(sbi->raw_super->volume_name, 0, 3434 sizeof(sbi->raw_super->volume_name)); 3435 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN, 3436 sbi->raw_super->volume_name, 3437 ARRAY_SIZE(sbi->raw_super->volume_name)); 3438 3439 err = f2fs_commit_super(sbi, false); 3440 3441 up_write(&sbi->sb_lock); 3442 3443 mnt_drop_write_file(filp); 3444 out: 3445 kfree(vbuf); 3446 return err; 3447 } 3448 3449 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg) 3450 { 3451 struct inode *inode = file_inode(filp); 3452 __u64 blocks; 3453 3454 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) 3455 return -EOPNOTSUPP; 3456 3457 if (!f2fs_compressed_file(inode)) 3458 return -EINVAL; 3459 3460 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks); 3461 return put_user(blocks, (u64 __user *)arg); 3462 } 3463 3464 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count) 3465 { 3466 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 3467 unsigned int released_blocks = 0; 3468 int cluster_size = F2FS_I(dn->inode)->i_cluster_size; 3469 block_t blkaddr; 3470 int i; 3471 3472 for (i = 0; i < count; i++) { 3473 blkaddr = data_blkaddr(dn->inode, dn->node_page, 3474 dn->ofs_in_node + i); 3475 3476 if (!__is_valid_data_blkaddr(blkaddr)) 3477 continue; 3478 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, 3479 DATA_GENERIC_ENHANCE))) 3480 return -EFSCORRUPTED; 3481 } 3482 3483 while (count) { 3484 int compr_blocks = 0; 3485 3486 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { 3487 blkaddr = f2fs_data_blkaddr(dn); 3488 3489 if (i == 0) { 3490 if (blkaddr == COMPRESS_ADDR) 3491 continue; 3492 dn->ofs_in_node += cluster_size; 3493 goto next; 3494 } 3495 3496 if (__is_valid_data_blkaddr(blkaddr)) 3497 compr_blocks++; 3498 3499 if (blkaddr != NEW_ADDR) 3500 continue; 3501 3502 dn->data_blkaddr = NULL_ADDR; 3503 f2fs_set_data_blkaddr(dn); 3504 } 3505 3506 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false); 3507 dec_valid_block_count(sbi, dn->inode, 3508 cluster_size - compr_blocks); 3509 3510 released_blocks += cluster_size - compr_blocks; 3511 next: 3512 count -= cluster_size; 3513 } 3514 3515 return released_blocks; 3516 } 3517 3518 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) 3519 { 3520 struct inode *inode = file_inode(filp); 3521 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3522 pgoff_t page_idx = 0, last_idx; 3523 unsigned int released_blocks = 0; 3524 int ret; 3525 int writecount; 3526 3527 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) 3528 return -EOPNOTSUPP; 3529 3530 if (!f2fs_compressed_file(inode)) 3531 return -EINVAL; 3532 3533 if (f2fs_readonly(sbi->sb)) 3534 return -EROFS; 3535 3536 ret = mnt_want_write_file(filp); 3537 if (ret) 3538 return ret; 3539 3540 f2fs_balance_fs(F2FS_I_SB(inode), true); 3541 3542 inode_lock(inode); 3543 3544 writecount = atomic_read(&inode->i_writecount); 3545 if ((filp->f_mode & FMODE_WRITE && writecount != 1) || 3546 (!(filp->f_mode & FMODE_WRITE) && writecount)) { 3547 ret = -EBUSY; 3548 goto out; 3549 } 3550 3551 if (IS_IMMUTABLE(inode)) { 3552 ret = -EINVAL; 3553 goto out; 3554 } 3555 3556 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 3557 if (ret) 3558 goto out; 3559 3560 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL; 3561 f2fs_set_inode_flags(inode); 3562 inode->i_ctime = current_time(inode); 3563 f2fs_mark_inode_dirty_sync(inode, true); 3564 3565 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) 3566 goto out; 3567 3568 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3569 down_write(&F2FS_I(inode)->i_mmap_sem); 3570 3571 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3572 3573 while (page_idx < last_idx) { 3574 struct dnode_of_data dn; 3575 pgoff_t end_offset, count; 3576 3577 set_new_dnode(&dn, inode, NULL, NULL, 0); 3578 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); 3579 if (ret) { 3580 if (ret == -ENOENT) { 3581 page_idx = f2fs_get_next_page_offset(&dn, 3582 page_idx); 3583 ret = 0; 3584 continue; 3585 } 3586 break; 3587 } 3588 3589 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3590 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3591 count = round_up(count, F2FS_I(inode)->i_cluster_size); 3592 3593 ret = release_compress_blocks(&dn, count); 3594 3595 f2fs_put_dnode(&dn); 3596 3597 if (ret < 0) 3598 break; 3599 3600 page_idx += count; 3601 released_blocks += ret; 3602 } 3603 3604 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3605 up_write(&F2FS_I(inode)->i_mmap_sem); 3606 out: 3607 inode_unlock(inode); 3608 3609 mnt_drop_write_file(filp); 3610 3611 if (ret >= 0) { 3612 ret = put_user(released_blocks, (u64 __user *)arg); 3613 } else if (released_blocks && 3614 atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3615 set_sbi_flag(sbi, SBI_NEED_FSCK); 3616 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " 3617 "iblocks=%llu, released=%u, compr_blocks=%u, " 3618 "run fsck to fix.", 3619 __func__, inode->i_ino, inode->i_blocks, 3620 released_blocks, 3621 atomic_read(&F2FS_I(inode)->i_compr_blocks)); 3622 } 3623 3624 return ret; 3625 } 3626 3627 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count) 3628 { 3629 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 3630 unsigned int reserved_blocks = 0; 3631 int cluster_size = F2FS_I(dn->inode)->i_cluster_size; 3632 block_t blkaddr; 3633 int i; 3634 3635 for (i = 0; i < count; i++) { 3636 blkaddr = data_blkaddr(dn->inode, dn->node_page, 3637 dn->ofs_in_node + i); 3638 3639 if (!__is_valid_data_blkaddr(blkaddr)) 3640 continue; 3641 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, 3642 DATA_GENERIC_ENHANCE))) 3643 return -EFSCORRUPTED; 3644 } 3645 3646 while (count) { 3647 int compr_blocks = 0; 3648 blkcnt_t reserved; 3649 int ret; 3650 3651 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { 3652 blkaddr = f2fs_data_blkaddr(dn); 3653 3654 if (i == 0) { 3655 if (blkaddr == COMPRESS_ADDR) 3656 continue; 3657 dn->ofs_in_node += cluster_size; 3658 goto next; 3659 } 3660 3661 if (__is_valid_data_blkaddr(blkaddr)) { 3662 compr_blocks++; 3663 continue; 3664 } 3665 3666 dn->data_blkaddr = NEW_ADDR; 3667 f2fs_set_data_blkaddr(dn); 3668 } 3669 3670 reserved = cluster_size - compr_blocks; 3671 ret = inc_valid_block_count(sbi, dn->inode, &reserved); 3672 if (ret) 3673 return ret; 3674 3675 if (reserved != cluster_size - compr_blocks) 3676 return -ENOSPC; 3677 3678 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true); 3679 3680 reserved_blocks += reserved; 3681 next: 3682 count -= cluster_size; 3683 } 3684 3685 return reserved_blocks; 3686 } 3687 3688 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) 3689 { 3690 struct inode *inode = file_inode(filp); 3691 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3692 pgoff_t page_idx = 0, last_idx; 3693 unsigned int reserved_blocks = 0; 3694 int ret; 3695 3696 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) 3697 return -EOPNOTSUPP; 3698 3699 if (!f2fs_compressed_file(inode)) 3700 return -EINVAL; 3701 3702 if (f2fs_readonly(sbi->sb)) 3703 return -EROFS; 3704 3705 ret = mnt_want_write_file(filp); 3706 if (ret) 3707 return ret; 3708 3709 if (atomic_read(&F2FS_I(inode)->i_compr_blocks)) 3710 goto out; 3711 3712 f2fs_balance_fs(F2FS_I_SB(inode), true); 3713 3714 inode_lock(inode); 3715 3716 if (!IS_IMMUTABLE(inode)) { 3717 ret = -EINVAL; 3718 goto unlock_inode; 3719 } 3720 3721 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3722 down_write(&F2FS_I(inode)->i_mmap_sem); 3723 3724 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3725 3726 while (page_idx < last_idx) { 3727 struct dnode_of_data dn; 3728 pgoff_t end_offset, count; 3729 3730 set_new_dnode(&dn, inode, NULL, NULL, 0); 3731 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); 3732 if (ret) { 3733 if (ret == -ENOENT) { 3734 page_idx = f2fs_get_next_page_offset(&dn, 3735 page_idx); 3736 ret = 0; 3737 continue; 3738 } 3739 break; 3740 } 3741 3742 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3743 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3744 count = round_up(count, F2FS_I(inode)->i_cluster_size); 3745 3746 ret = reserve_compress_blocks(&dn, count); 3747 3748 f2fs_put_dnode(&dn); 3749 3750 if (ret < 0) 3751 break; 3752 3753 page_idx += count; 3754 reserved_blocks += ret; 3755 } 3756 3757 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3758 up_write(&F2FS_I(inode)->i_mmap_sem); 3759 3760 if (ret >= 0) { 3761 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL; 3762 f2fs_set_inode_flags(inode); 3763 inode->i_ctime = current_time(inode); 3764 f2fs_mark_inode_dirty_sync(inode, true); 3765 } 3766 unlock_inode: 3767 inode_unlock(inode); 3768 out: 3769 mnt_drop_write_file(filp); 3770 3771 if (ret >= 0) { 3772 ret = put_user(reserved_blocks, (u64 __user *)arg); 3773 } else if (reserved_blocks && 3774 atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3775 set_sbi_flag(sbi, SBI_NEED_FSCK); 3776 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " 3777 "iblocks=%llu, reserved=%u, compr_blocks=%u, " 3778 "run fsck to fix.", 3779 __func__, inode->i_ino, inode->i_blocks, 3780 reserved_blocks, 3781 atomic_read(&F2FS_I(inode)->i_compr_blocks)); 3782 } 3783 3784 return ret; 3785 } 3786 3787 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode, 3788 pgoff_t off, block_t block, block_t len, u32 flags) 3789 { 3790 struct request_queue *q = bdev_get_queue(bdev); 3791 sector_t sector = SECTOR_FROM_BLOCK(block); 3792 sector_t nr_sects = SECTOR_FROM_BLOCK(len); 3793 int ret = 0; 3794 3795 if (!q) 3796 return -ENXIO; 3797 3798 if (flags & F2FS_TRIM_FILE_DISCARD) 3799 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS, 3800 blk_queue_secure_erase(q) ? 3801 BLKDEV_DISCARD_SECURE : 0); 3802 3803 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) { 3804 if (IS_ENCRYPTED(inode)) 3805 ret = fscrypt_zeroout_range(inode, off, block, len); 3806 else 3807 ret = blkdev_issue_zeroout(bdev, sector, nr_sects, 3808 GFP_NOFS, 0); 3809 } 3810 3811 return ret; 3812 } 3813 3814 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) 3815 { 3816 struct inode *inode = file_inode(filp); 3817 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3818 struct address_space *mapping = inode->i_mapping; 3819 struct block_device *prev_bdev = NULL; 3820 struct f2fs_sectrim_range range; 3821 pgoff_t index, pg_end, prev_index = 0; 3822 block_t prev_block = 0, len = 0; 3823 loff_t end_addr; 3824 bool to_end = false; 3825 int ret = 0; 3826 3827 if (!(filp->f_mode & FMODE_WRITE)) 3828 return -EBADF; 3829 3830 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg, 3831 sizeof(range))) 3832 return -EFAULT; 3833 3834 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) || 3835 !S_ISREG(inode->i_mode)) 3836 return -EINVAL; 3837 3838 if (((range.flags & F2FS_TRIM_FILE_DISCARD) && 3839 !f2fs_hw_support_discard(sbi)) || 3840 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) && 3841 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi))) 3842 return -EOPNOTSUPP; 3843 3844 file_start_write(filp); 3845 inode_lock(inode); 3846 3847 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) || 3848 range.start >= inode->i_size) { 3849 ret = -EINVAL; 3850 goto err; 3851 } 3852 3853 if (range.len == 0) 3854 goto err; 3855 3856 if (inode->i_size - range.start > range.len) { 3857 end_addr = range.start + range.len; 3858 } else { 3859 end_addr = range.len == (u64)-1 ? 3860 sbi->sb->s_maxbytes : inode->i_size; 3861 to_end = true; 3862 } 3863 3864 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) || 3865 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) { 3866 ret = -EINVAL; 3867 goto err; 3868 } 3869 3870 index = F2FS_BYTES_TO_BLK(range.start); 3871 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE); 3872 3873 ret = f2fs_convert_inline_inode(inode); 3874 if (ret) 3875 goto err; 3876 3877 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3878 down_write(&F2FS_I(inode)->i_mmap_sem); 3879 3880 ret = filemap_write_and_wait_range(mapping, range.start, 3881 to_end ? LLONG_MAX : end_addr - 1); 3882 if (ret) 3883 goto out; 3884 3885 truncate_inode_pages_range(mapping, range.start, 3886 to_end ? -1 : end_addr - 1); 3887 3888 while (index < pg_end) { 3889 struct dnode_of_data dn; 3890 pgoff_t end_offset, count; 3891 int i; 3892 3893 set_new_dnode(&dn, inode, NULL, NULL, 0); 3894 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); 3895 if (ret) { 3896 if (ret == -ENOENT) { 3897 index = f2fs_get_next_page_offset(&dn, index); 3898 continue; 3899 } 3900 goto out; 3901 } 3902 3903 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3904 count = min(end_offset - dn.ofs_in_node, pg_end - index); 3905 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) { 3906 struct block_device *cur_bdev; 3907 block_t blkaddr = f2fs_data_blkaddr(&dn); 3908 3909 if (!__is_valid_data_blkaddr(blkaddr)) 3910 continue; 3911 3912 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 3913 DATA_GENERIC_ENHANCE)) { 3914 ret = -EFSCORRUPTED; 3915 f2fs_put_dnode(&dn); 3916 goto out; 3917 } 3918 3919 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL); 3920 if (f2fs_is_multi_device(sbi)) { 3921 int di = f2fs_target_device_index(sbi, blkaddr); 3922 3923 blkaddr -= FDEV(di).start_blk; 3924 } 3925 3926 if (len) { 3927 if (prev_bdev == cur_bdev && 3928 index == prev_index + len && 3929 blkaddr == prev_block + len) { 3930 len++; 3931 } else { 3932 ret = f2fs_secure_erase(prev_bdev, 3933 inode, prev_index, prev_block, 3934 len, range.flags); 3935 if (ret) { 3936 f2fs_put_dnode(&dn); 3937 goto out; 3938 } 3939 3940 len = 0; 3941 } 3942 } 3943 3944 if (!len) { 3945 prev_bdev = cur_bdev; 3946 prev_index = index; 3947 prev_block = blkaddr; 3948 len = 1; 3949 } 3950 } 3951 3952 f2fs_put_dnode(&dn); 3953 3954 if (fatal_signal_pending(current)) { 3955 ret = -EINTR; 3956 goto out; 3957 } 3958 cond_resched(); 3959 } 3960 3961 if (len) 3962 ret = f2fs_secure_erase(prev_bdev, inode, prev_index, 3963 prev_block, len, range.flags); 3964 out: 3965 up_write(&F2FS_I(inode)->i_mmap_sem); 3966 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3967 err: 3968 inode_unlock(inode); 3969 file_end_write(filp); 3970 3971 return ret; 3972 } 3973 3974 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg) 3975 { 3976 struct inode *inode = file_inode(filp); 3977 struct f2fs_comp_option option; 3978 3979 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) 3980 return -EOPNOTSUPP; 3981 3982 inode_lock_shared(inode); 3983 3984 if (!f2fs_compressed_file(inode)) { 3985 inode_unlock_shared(inode); 3986 return -ENODATA; 3987 } 3988 3989 option.algorithm = F2FS_I(inode)->i_compress_algorithm; 3990 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size; 3991 3992 inode_unlock_shared(inode); 3993 3994 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option, 3995 sizeof(option))) 3996 return -EFAULT; 3997 3998 return 0; 3999 } 4000 4001 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg) 4002 { 4003 struct inode *inode = file_inode(filp); 4004 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4005 struct f2fs_comp_option option; 4006 int ret = 0; 4007 4008 if (!f2fs_sb_has_compression(sbi)) 4009 return -EOPNOTSUPP; 4010 4011 if (!(filp->f_mode & FMODE_WRITE)) 4012 return -EBADF; 4013 4014 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg, 4015 sizeof(option))) 4016 return -EFAULT; 4017 4018 if (!f2fs_compressed_file(inode) || 4019 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE || 4020 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE || 4021 option.algorithm >= COMPRESS_MAX) 4022 return -EINVAL; 4023 4024 file_start_write(filp); 4025 inode_lock(inode); 4026 4027 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) { 4028 ret = -EBUSY; 4029 goto out; 4030 } 4031 4032 if (inode->i_size != 0) { 4033 ret = -EFBIG; 4034 goto out; 4035 } 4036 4037 F2FS_I(inode)->i_compress_algorithm = option.algorithm; 4038 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size; 4039 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size; 4040 f2fs_mark_inode_dirty_sync(inode, true); 4041 4042 if (!f2fs_is_compress_backend_ready(inode)) 4043 f2fs_warn(sbi, "compression algorithm is successfully set, " 4044 "but current kernel doesn't support this algorithm."); 4045 out: 4046 inode_unlock(inode); 4047 file_end_write(filp); 4048 4049 return ret; 4050 } 4051 4052 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len) 4053 { 4054 DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx); 4055 struct address_space *mapping = inode->i_mapping; 4056 struct page *page; 4057 pgoff_t redirty_idx = page_idx; 4058 int i, page_len = 0, ret = 0; 4059 4060 page_cache_ra_unbounded(&ractl, len, 0); 4061 4062 for (i = 0; i < len; i++, page_idx++) { 4063 page = read_cache_page(mapping, page_idx, NULL, NULL); 4064 if (IS_ERR(page)) { 4065 ret = PTR_ERR(page); 4066 break; 4067 } 4068 page_len++; 4069 } 4070 4071 for (i = 0; i < page_len; i++, redirty_idx++) { 4072 page = find_lock_page(mapping, redirty_idx); 4073 if (!page) { 4074 ret = -ENOMEM; 4075 break; 4076 } 4077 set_page_dirty(page); 4078 f2fs_put_page(page, 1); 4079 f2fs_put_page(page, 0); 4080 } 4081 4082 return ret; 4083 } 4084 4085 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg) 4086 { 4087 struct inode *inode = file_inode(filp); 4088 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4089 struct f2fs_inode_info *fi = F2FS_I(inode); 4090 pgoff_t page_idx = 0, last_idx; 4091 unsigned int blk_per_seg = sbi->blocks_per_seg; 4092 int cluster_size = F2FS_I(inode)->i_cluster_size; 4093 int count, ret; 4094 4095 if (!f2fs_sb_has_compression(sbi) || 4096 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) 4097 return -EOPNOTSUPP; 4098 4099 if (!(filp->f_mode & FMODE_WRITE)) 4100 return -EBADF; 4101 4102 if (!f2fs_compressed_file(inode)) 4103 return -EINVAL; 4104 4105 f2fs_balance_fs(F2FS_I_SB(inode), true); 4106 4107 file_start_write(filp); 4108 inode_lock(inode); 4109 4110 if (!f2fs_is_compress_backend_ready(inode)) { 4111 ret = -EOPNOTSUPP; 4112 goto out; 4113 } 4114 4115 if (f2fs_is_mmap_file(inode)) { 4116 ret = -EBUSY; 4117 goto out; 4118 } 4119 4120 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 4121 if (ret) 4122 goto out; 4123 4124 if (!atomic_read(&fi->i_compr_blocks)) 4125 goto out; 4126 4127 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 4128 4129 count = last_idx - page_idx; 4130 while (count) { 4131 int len = min(cluster_size, count); 4132 4133 ret = redirty_blocks(inode, page_idx, len); 4134 if (ret < 0) 4135 break; 4136 4137 if (get_dirty_pages(inode) >= blk_per_seg) 4138 filemap_fdatawrite(inode->i_mapping); 4139 4140 count -= len; 4141 page_idx += len; 4142 } 4143 4144 if (!ret) 4145 ret = filemap_write_and_wait_range(inode->i_mapping, 0, 4146 LLONG_MAX); 4147 4148 if (ret) 4149 f2fs_warn(sbi, "%s: The file might be partially decompressed " 4150 "(errno=%d). Please delete the file.\n", 4151 __func__, ret); 4152 out: 4153 inode_unlock(inode); 4154 file_end_write(filp); 4155 4156 return ret; 4157 } 4158 4159 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg) 4160 { 4161 struct inode *inode = file_inode(filp); 4162 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4163 pgoff_t page_idx = 0, last_idx; 4164 unsigned int blk_per_seg = sbi->blocks_per_seg; 4165 int cluster_size = F2FS_I(inode)->i_cluster_size; 4166 int count, ret; 4167 4168 if (!f2fs_sb_has_compression(sbi) || 4169 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) 4170 return -EOPNOTSUPP; 4171 4172 if (!(filp->f_mode & FMODE_WRITE)) 4173 return -EBADF; 4174 4175 if (!f2fs_compressed_file(inode)) 4176 return -EINVAL; 4177 4178 f2fs_balance_fs(F2FS_I_SB(inode), true); 4179 4180 file_start_write(filp); 4181 inode_lock(inode); 4182 4183 if (!f2fs_is_compress_backend_ready(inode)) { 4184 ret = -EOPNOTSUPP; 4185 goto out; 4186 } 4187 4188 if (f2fs_is_mmap_file(inode)) { 4189 ret = -EBUSY; 4190 goto out; 4191 } 4192 4193 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 4194 if (ret) 4195 goto out; 4196 4197 set_inode_flag(inode, FI_ENABLE_COMPRESS); 4198 4199 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 4200 4201 count = last_idx - page_idx; 4202 while (count) { 4203 int len = min(cluster_size, count); 4204 4205 ret = redirty_blocks(inode, page_idx, len); 4206 if (ret < 0) 4207 break; 4208 4209 if (get_dirty_pages(inode) >= blk_per_seg) 4210 filemap_fdatawrite(inode->i_mapping); 4211 4212 count -= len; 4213 page_idx += len; 4214 } 4215 4216 if (!ret) 4217 ret = filemap_write_and_wait_range(inode->i_mapping, 0, 4218 LLONG_MAX); 4219 4220 clear_inode_flag(inode, FI_ENABLE_COMPRESS); 4221 4222 if (ret) 4223 f2fs_warn(sbi, "%s: The file might be partially compressed " 4224 "(errno=%d). Please delete the file.\n", 4225 __func__, ret); 4226 out: 4227 inode_unlock(inode); 4228 file_end_write(filp); 4229 4230 return ret; 4231 } 4232 4233 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4234 { 4235 switch (cmd) { 4236 case FS_IOC_GETFLAGS: 4237 return f2fs_ioc_getflags(filp, arg); 4238 case FS_IOC_SETFLAGS: 4239 return f2fs_ioc_setflags(filp, arg); 4240 case FS_IOC_GETVERSION: 4241 return f2fs_ioc_getversion(filp, arg); 4242 case F2FS_IOC_START_ATOMIC_WRITE: 4243 return f2fs_ioc_start_atomic_write(filp); 4244 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 4245 return f2fs_ioc_commit_atomic_write(filp); 4246 case F2FS_IOC_START_VOLATILE_WRITE: 4247 return f2fs_ioc_start_volatile_write(filp); 4248 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 4249 return f2fs_ioc_release_volatile_write(filp); 4250 case F2FS_IOC_ABORT_VOLATILE_WRITE: 4251 return f2fs_ioc_abort_volatile_write(filp); 4252 case F2FS_IOC_SHUTDOWN: 4253 return f2fs_ioc_shutdown(filp, arg); 4254 case FITRIM: 4255 return f2fs_ioc_fitrim(filp, arg); 4256 case FS_IOC_SET_ENCRYPTION_POLICY: 4257 return f2fs_ioc_set_encryption_policy(filp, arg); 4258 case FS_IOC_GET_ENCRYPTION_POLICY: 4259 return f2fs_ioc_get_encryption_policy(filp, arg); 4260 case FS_IOC_GET_ENCRYPTION_PWSALT: 4261 return f2fs_ioc_get_encryption_pwsalt(filp, arg); 4262 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 4263 return f2fs_ioc_get_encryption_policy_ex(filp, arg); 4264 case FS_IOC_ADD_ENCRYPTION_KEY: 4265 return f2fs_ioc_add_encryption_key(filp, arg); 4266 case FS_IOC_REMOVE_ENCRYPTION_KEY: 4267 return f2fs_ioc_remove_encryption_key(filp, arg); 4268 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 4269 return f2fs_ioc_remove_encryption_key_all_users(filp, arg); 4270 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 4271 return f2fs_ioc_get_encryption_key_status(filp, arg); 4272 case FS_IOC_GET_ENCRYPTION_NONCE: 4273 return f2fs_ioc_get_encryption_nonce(filp, arg); 4274 case F2FS_IOC_GARBAGE_COLLECT: 4275 return f2fs_ioc_gc(filp, arg); 4276 case F2FS_IOC_GARBAGE_COLLECT_RANGE: 4277 return f2fs_ioc_gc_range(filp, arg); 4278 case F2FS_IOC_WRITE_CHECKPOINT: 4279 return f2fs_ioc_write_checkpoint(filp, arg); 4280 case F2FS_IOC_DEFRAGMENT: 4281 return f2fs_ioc_defragment(filp, arg); 4282 case F2FS_IOC_MOVE_RANGE: 4283 return f2fs_ioc_move_range(filp, arg); 4284 case F2FS_IOC_FLUSH_DEVICE: 4285 return f2fs_ioc_flush_device(filp, arg); 4286 case F2FS_IOC_GET_FEATURES: 4287 return f2fs_ioc_get_features(filp, arg); 4288 case FS_IOC_FSGETXATTR: 4289 return f2fs_ioc_fsgetxattr(filp, arg); 4290 case FS_IOC_FSSETXATTR: 4291 return f2fs_ioc_fssetxattr(filp, arg); 4292 case F2FS_IOC_GET_PIN_FILE: 4293 return f2fs_ioc_get_pin_file(filp, arg); 4294 case F2FS_IOC_SET_PIN_FILE: 4295 return f2fs_ioc_set_pin_file(filp, arg); 4296 case F2FS_IOC_PRECACHE_EXTENTS: 4297 return f2fs_ioc_precache_extents(filp, arg); 4298 case F2FS_IOC_RESIZE_FS: 4299 return f2fs_ioc_resize_fs(filp, arg); 4300 case FS_IOC_ENABLE_VERITY: 4301 return f2fs_ioc_enable_verity(filp, arg); 4302 case FS_IOC_MEASURE_VERITY: 4303 return f2fs_ioc_measure_verity(filp, arg); 4304 case FS_IOC_READ_VERITY_METADATA: 4305 return f2fs_ioc_read_verity_metadata(filp, arg); 4306 case FS_IOC_GETFSLABEL: 4307 return f2fs_ioc_getfslabel(filp, arg); 4308 case FS_IOC_SETFSLABEL: 4309 return f2fs_ioc_setfslabel(filp, arg); 4310 case F2FS_IOC_GET_COMPRESS_BLOCKS: 4311 return f2fs_get_compress_blocks(filp, arg); 4312 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: 4313 return f2fs_release_compress_blocks(filp, arg); 4314 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: 4315 return f2fs_reserve_compress_blocks(filp, arg); 4316 case F2FS_IOC_SEC_TRIM_FILE: 4317 return f2fs_sec_trim_file(filp, arg); 4318 case F2FS_IOC_GET_COMPRESS_OPTION: 4319 return f2fs_ioc_get_compress_option(filp, arg); 4320 case F2FS_IOC_SET_COMPRESS_OPTION: 4321 return f2fs_ioc_set_compress_option(filp, arg); 4322 case F2FS_IOC_DECOMPRESS_FILE: 4323 return f2fs_ioc_decompress_file(filp, arg); 4324 case F2FS_IOC_COMPRESS_FILE: 4325 return f2fs_ioc_compress_file(filp, arg); 4326 default: 4327 return -ENOTTY; 4328 } 4329 } 4330 4331 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 4332 { 4333 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 4334 return -EIO; 4335 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp)))) 4336 return -ENOSPC; 4337 4338 return __f2fs_ioctl(filp, cmd, arg); 4339 } 4340 4341 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 4342 { 4343 struct file *file = iocb->ki_filp; 4344 struct inode *inode = file_inode(file); 4345 int ret; 4346 4347 if (!f2fs_is_compress_backend_ready(inode)) 4348 return -EOPNOTSUPP; 4349 4350 ret = generic_file_read_iter(iocb, iter); 4351 4352 if (ret > 0) 4353 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret); 4354 4355 return ret; 4356 } 4357 4358 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 4359 { 4360 struct file *file = iocb->ki_filp; 4361 struct inode *inode = file_inode(file); 4362 ssize_t ret; 4363 4364 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 4365 ret = -EIO; 4366 goto out; 4367 } 4368 4369 if (!f2fs_is_compress_backend_ready(inode)) { 4370 ret = -EOPNOTSUPP; 4371 goto out; 4372 } 4373 4374 if (iocb->ki_flags & IOCB_NOWAIT) { 4375 if (!inode_trylock(inode)) { 4376 ret = -EAGAIN; 4377 goto out; 4378 } 4379 } else { 4380 inode_lock(inode); 4381 } 4382 4383 if (unlikely(IS_IMMUTABLE(inode))) { 4384 ret = -EPERM; 4385 goto unlock; 4386 } 4387 4388 ret = generic_write_checks(iocb, from); 4389 if (ret > 0) { 4390 bool preallocated = false; 4391 size_t target_size = 0; 4392 int err; 4393 4394 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) 4395 set_inode_flag(inode, FI_NO_PREALLOC); 4396 4397 if ((iocb->ki_flags & IOCB_NOWAIT)) { 4398 if (!f2fs_overwrite_io(inode, iocb->ki_pos, 4399 iov_iter_count(from)) || 4400 f2fs_has_inline_data(inode) || 4401 f2fs_force_buffered_io(inode, iocb, from)) { 4402 clear_inode_flag(inode, FI_NO_PREALLOC); 4403 inode_unlock(inode); 4404 ret = -EAGAIN; 4405 goto out; 4406 } 4407 goto write; 4408 } 4409 4410 if (is_inode_flag_set(inode, FI_NO_PREALLOC)) 4411 goto write; 4412 4413 if (iocb->ki_flags & IOCB_DIRECT) { 4414 /* 4415 * Convert inline data for Direct I/O before entering 4416 * f2fs_direct_IO(). 4417 */ 4418 err = f2fs_convert_inline_inode(inode); 4419 if (err) 4420 goto out_err; 4421 /* 4422 * If force_buffere_io() is true, we have to allocate 4423 * blocks all the time, since f2fs_direct_IO will fall 4424 * back to buffered IO. 4425 */ 4426 if (!f2fs_force_buffered_io(inode, iocb, from) && 4427 allow_outplace_dio(inode, iocb, from)) 4428 goto write; 4429 } 4430 preallocated = true; 4431 target_size = iocb->ki_pos + iov_iter_count(from); 4432 4433 err = f2fs_preallocate_blocks(iocb, from); 4434 if (err) { 4435 out_err: 4436 clear_inode_flag(inode, FI_NO_PREALLOC); 4437 inode_unlock(inode); 4438 ret = err; 4439 goto out; 4440 } 4441 write: 4442 ret = __generic_file_write_iter(iocb, from); 4443 clear_inode_flag(inode, FI_NO_PREALLOC); 4444 4445 /* if we couldn't write data, we should deallocate blocks. */ 4446 if (preallocated && i_size_read(inode) < target_size) 4447 f2fs_truncate(inode); 4448 4449 if (ret > 0) 4450 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); 4451 } 4452 unlock: 4453 inode_unlock(inode); 4454 out: 4455 trace_f2fs_file_write_iter(inode, iocb->ki_pos, 4456 iov_iter_count(from), ret); 4457 if (ret > 0) 4458 ret = generic_write_sync(iocb, ret); 4459 return ret; 4460 } 4461 4462 #ifdef CONFIG_COMPAT 4463 struct compat_f2fs_gc_range { 4464 u32 sync; 4465 compat_u64 start; 4466 compat_u64 len; 4467 }; 4468 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\ 4469 struct compat_f2fs_gc_range) 4470 4471 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg) 4472 { 4473 struct compat_f2fs_gc_range __user *urange; 4474 struct f2fs_gc_range range; 4475 int err; 4476 4477 urange = compat_ptr(arg); 4478 err = get_user(range.sync, &urange->sync); 4479 err |= get_user(range.start, &urange->start); 4480 err |= get_user(range.len, &urange->len); 4481 if (err) 4482 return -EFAULT; 4483 4484 return __f2fs_ioc_gc_range(file, &range); 4485 } 4486 4487 struct compat_f2fs_move_range { 4488 u32 dst_fd; 4489 compat_u64 pos_in; 4490 compat_u64 pos_out; 4491 compat_u64 len; 4492 }; 4493 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \ 4494 struct compat_f2fs_move_range) 4495 4496 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg) 4497 { 4498 struct compat_f2fs_move_range __user *urange; 4499 struct f2fs_move_range range; 4500 int err; 4501 4502 urange = compat_ptr(arg); 4503 err = get_user(range.dst_fd, &urange->dst_fd); 4504 err |= get_user(range.pos_in, &urange->pos_in); 4505 err |= get_user(range.pos_out, &urange->pos_out); 4506 err |= get_user(range.len, &urange->len); 4507 if (err) 4508 return -EFAULT; 4509 4510 return __f2fs_ioc_move_range(file, &range); 4511 } 4512 4513 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 4514 { 4515 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) 4516 return -EIO; 4517 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file)))) 4518 return -ENOSPC; 4519 4520 switch (cmd) { 4521 case FS_IOC32_GETFLAGS: 4522 cmd = FS_IOC_GETFLAGS; 4523 break; 4524 case FS_IOC32_SETFLAGS: 4525 cmd = FS_IOC_SETFLAGS; 4526 break; 4527 case FS_IOC32_GETVERSION: 4528 cmd = FS_IOC_GETVERSION; 4529 break; 4530 case F2FS_IOC32_GARBAGE_COLLECT_RANGE: 4531 return f2fs_compat_ioc_gc_range(file, arg); 4532 case F2FS_IOC32_MOVE_RANGE: 4533 return f2fs_compat_ioc_move_range(file, arg); 4534 case F2FS_IOC_START_ATOMIC_WRITE: 4535 case F2FS_IOC_COMMIT_ATOMIC_WRITE: 4536 case F2FS_IOC_START_VOLATILE_WRITE: 4537 case F2FS_IOC_RELEASE_VOLATILE_WRITE: 4538 case F2FS_IOC_ABORT_VOLATILE_WRITE: 4539 case F2FS_IOC_SHUTDOWN: 4540 case FITRIM: 4541 case FS_IOC_SET_ENCRYPTION_POLICY: 4542 case FS_IOC_GET_ENCRYPTION_PWSALT: 4543 case FS_IOC_GET_ENCRYPTION_POLICY: 4544 case FS_IOC_GET_ENCRYPTION_POLICY_EX: 4545 case FS_IOC_ADD_ENCRYPTION_KEY: 4546 case FS_IOC_REMOVE_ENCRYPTION_KEY: 4547 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: 4548 case FS_IOC_GET_ENCRYPTION_KEY_STATUS: 4549 case FS_IOC_GET_ENCRYPTION_NONCE: 4550 case F2FS_IOC_GARBAGE_COLLECT: 4551 case F2FS_IOC_WRITE_CHECKPOINT: 4552 case F2FS_IOC_DEFRAGMENT: 4553 case F2FS_IOC_FLUSH_DEVICE: 4554 case F2FS_IOC_GET_FEATURES: 4555 case FS_IOC_FSGETXATTR: 4556 case FS_IOC_FSSETXATTR: 4557 case F2FS_IOC_GET_PIN_FILE: 4558 case F2FS_IOC_SET_PIN_FILE: 4559 case F2FS_IOC_PRECACHE_EXTENTS: 4560 case F2FS_IOC_RESIZE_FS: 4561 case FS_IOC_ENABLE_VERITY: 4562 case FS_IOC_MEASURE_VERITY: 4563 case FS_IOC_READ_VERITY_METADATA: 4564 case FS_IOC_GETFSLABEL: 4565 case FS_IOC_SETFSLABEL: 4566 case F2FS_IOC_GET_COMPRESS_BLOCKS: 4567 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: 4568 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: 4569 case F2FS_IOC_SEC_TRIM_FILE: 4570 case F2FS_IOC_GET_COMPRESS_OPTION: 4571 case F2FS_IOC_SET_COMPRESS_OPTION: 4572 case F2FS_IOC_DECOMPRESS_FILE: 4573 case F2FS_IOC_COMPRESS_FILE: 4574 break; 4575 default: 4576 return -ENOIOCTLCMD; 4577 } 4578 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 4579 } 4580 #endif 4581 4582 const struct file_operations f2fs_file_operations = { 4583 .llseek = f2fs_llseek, 4584 .read_iter = f2fs_file_read_iter, 4585 .write_iter = f2fs_file_write_iter, 4586 .open = f2fs_file_open, 4587 .release = f2fs_release_file, 4588 .mmap = f2fs_file_mmap, 4589 .flush = f2fs_file_flush, 4590 .fsync = f2fs_sync_file, 4591 .fallocate = f2fs_fallocate, 4592 .unlocked_ioctl = f2fs_ioctl, 4593 #ifdef CONFIG_COMPAT 4594 .compat_ioctl = f2fs_compat_ioctl, 4595 #endif 4596 .splice_read = generic_file_splice_read, 4597 .splice_write = iter_file_splice_write, 4598 }; 4599