1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/file.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/writeback.h>
12 #include <linux/blkdev.h>
13 #include <linux/falloc.h>
14 #include <linux/types.h>
15 #include <linux/compat.h>
16 #include <linux/uaccess.h>
17 #include <linux/mount.h>
18 #include <linux/pagevec.h>
19 #include <linux/uio.h>
20 #include <linux/uuid.h>
21 #include <linux/file.h>
22 #include <linux/nls.h>
23 #include <linux/sched/signal.h>
24 #include <linux/fileattr.h>
25 #include <linux/fadvise.h>
26 #include <linux/iomap.h>
27
28 #include "f2fs.h"
29 #include "node.h"
30 #include "segment.h"
31 #include "xattr.h"
32 #include "acl.h"
33 #include "gc.h"
34 #include "iostat.h"
35 #include <trace/events/f2fs.h>
36 #include <uapi/linux/f2fs.h>
37
f2fs_zero_post_eof_page(struct inode * inode,loff_t new_size)38 static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size)
39 {
40 loff_t old_size = i_size_read(inode);
41
42 if (old_size >= new_size)
43 return;
44
45 /* zero or drop pages only in range of [old_size, new_size] */
46 truncate_pagecache(inode, old_size);
47 }
48
f2fs_filemap_fault(struct vm_fault * vmf)49 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
50 {
51 struct inode *inode = file_inode(vmf->vma->vm_file);
52 vm_flags_t flags = vmf->vma->vm_flags;
53 vm_fault_t ret;
54
55 ret = filemap_fault(vmf);
56 if (ret & VM_FAULT_LOCKED)
57 f2fs_update_iostat(F2FS_I_SB(inode), inode,
58 APP_MAPPED_READ_IO, F2FS_BLKSIZE);
59
60 trace_f2fs_filemap_fault(inode, vmf->pgoff, flags, ret);
61
62 return ret;
63 }
64
f2fs_vm_page_mkwrite(struct vm_fault * vmf)65 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
66 {
67 struct folio *folio = page_folio(vmf->page);
68 struct inode *inode = file_inode(vmf->vma->vm_file);
69 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
70 struct dnode_of_data dn;
71 bool need_alloc = !f2fs_is_pinned_file(inode);
72 int err = 0;
73 vm_fault_t ret;
74
75 if (unlikely(IS_IMMUTABLE(inode)))
76 return VM_FAULT_SIGBUS;
77
78 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
79 err = -EIO;
80 goto out;
81 }
82
83 if (unlikely(f2fs_cp_error(sbi))) {
84 err = -EIO;
85 goto out;
86 }
87
88 if (!f2fs_is_checkpoint_ready(sbi)) {
89 err = -ENOSPC;
90 goto out;
91 }
92
93 err = f2fs_convert_inline_inode(inode);
94 if (err)
95 goto out;
96
97 #ifdef CONFIG_F2FS_FS_COMPRESSION
98 if (f2fs_compressed_file(inode)) {
99 int ret = f2fs_is_compressed_cluster(inode, folio->index);
100
101 if (ret < 0) {
102 err = ret;
103 goto out;
104 } else if (ret) {
105 need_alloc = false;
106 }
107 }
108 #endif
109 /* should do out of any locked page */
110 if (need_alloc)
111 f2fs_balance_fs(sbi, true);
112
113 sb_start_pagefault(inode->i_sb);
114
115 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
116
117 filemap_invalidate_lock(inode->i_mapping);
118 f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT);
119 filemap_invalidate_unlock(inode->i_mapping);
120
121 file_update_time(vmf->vma->vm_file);
122 filemap_invalidate_lock_shared(inode->i_mapping);
123
124 folio_lock(folio);
125 if (unlikely(folio->mapping != inode->i_mapping ||
126 folio_pos(folio) > i_size_read(inode) ||
127 !folio_test_uptodate(folio))) {
128 folio_unlock(folio);
129 err = -EFAULT;
130 goto out_sem;
131 }
132
133 set_new_dnode(&dn, inode, NULL, NULL, 0);
134 if (need_alloc) {
135 /* block allocation */
136 err = f2fs_get_block_locked(&dn, folio->index);
137 } else {
138 err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
139 f2fs_put_dnode(&dn);
140 if (f2fs_is_pinned_file(inode) &&
141 !__is_valid_data_blkaddr(dn.data_blkaddr))
142 err = -EIO;
143 }
144
145 if (err) {
146 folio_unlock(folio);
147 goto out_sem;
148 }
149
150 f2fs_folio_wait_writeback(folio, DATA, false, true);
151
152 /* wait for GCed page writeback via META_MAPPING */
153 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
154
155 /*
156 * check to see if the page is mapped already (no holes)
157 */
158 if (folio_test_mappedtodisk(folio))
159 goto out_sem;
160
161 /* page is wholly or partially inside EOF */
162 if (((loff_t)(folio->index + 1) << PAGE_SHIFT) >
163 i_size_read(inode)) {
164 loff_t offset;
165
166 offset = i_size_read(inode) & ~PAGE_MASK;
167 folio_zero_segment(folio, offset, folio_size(folio));
168 }
169 folio_mark_dirty(folio);
170
171 f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
172 f2fs_update_time(sbi, REQ_TIME);
173
174 out_sem:
175 filemap_invalidate_unlock_shared(inode->i_mapping);
176
177 sb_end_pagefault(inode->i_sb);
178 out:
179 ret = vmf_fs_error(err);
180
181 trace_f2fs_vm_page_mkwrite(inode, folio->index, vmf->vma->vm_flags, ret);
182 return ret;
183 }
184
185 static const struct vm_operations_struct f2fs_file_vm_ops = {
186 .fault = f2fs_filemap_fault,
187 .map_pages = filemap_map_pages,
188 .page_mkwrite = f2fs_vm_page_mkwrite,
189 };
190
get_parent_ino(struct inode * inode,nid_t * pino)191 static int get_parent_ino(struct inode *inode, nid_t *pino)
192 {
193 struct dentry *dentry;
194
195 /*
196 * Make sure to get the non-deleted alias. The alias associated with
197 * the open file descriptor being fsync()'ed may be deleted already.
198 */
199 dentry = d_find_alias(inode);
200 if (!dentry)
201 return 0;
202
203 *pino = d_parent_ino(dentry);
204 dput(dentry);
205 return 1;
206 }
207
need_do_checkpoint(struct inode * inode)208 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
209 {
210 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
211 enum cp_reason_type cp_reason = CP_NO_NEEDED;
212
213 if (!S_ISREG(inode->i_mode))
214 cp_reason = CP_NON_REGULAR;
215 else if (f2fs_compressed_file(inode))
216 cp_reason = CP_COMPRESSED;
217 else if (inode->i_nlink != 1)
218 cp_reason = CP_HARDLINK;
219 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
220 cp_reason = CP_SB_NEED_CP;
221 else if (file_wrong_pino(inode))
222 cp_reason = CP_WRONG_PINO;
223 else if (!f2fs_space_for_roll_forward(sbi))
224 cp_reason = CP_NO_SPC_ROLL;
225 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
226 cp_reason = CP_NODE_NEED_CP;
227 else if (test_opt(sbi, FASTBOOT))
228 cp_reason = CP_FASTBOOT_MODE;
229 else if (F2FS_OPTION(sbi).active_logs == 2)
230 cp_reason = CP_SPEC_LOG_NUM;
231 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
232 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
233 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
234 TRANS_DIR_INO))
235 cp_reason = CP_RECOVER_DIR;
236 else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
237 XATTR_DIR_INO))
238 cp_reason = CP_XATTR_DIR;
239
240 return cp_reason;
241 }
242
need_inode_page_update(struct f2fs_sb_info * sbi,nid_t ino)243 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
244 {
245 struct folio *i = filemap_get_folio(NODE_MAPPING(sbi), ino);
246 bool ret = false;
247 /* But we need to avoid that there are some inode updates */
248 if ((!IS_ERR(i) && folio_test_dirty(i)) ||
249 f2fs_need_inode_block_update(sbi, ino))
250 ret = true;
251 f2fs_folio_put(i, false);
252 return ret;
253 }
254
try_to_fix_pino(struct inode * inode)255 static void try_to_fix_pino(struct inode *inode)
256 {
257 struct f2fs_inode_info *fi = F2FS_I(inode);
258 nid_t pino;
259
260 f2fs_down_write(&fi->i_sem);
261 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
262 get_parent_ino(inode, &pino)) {
263 f2fs_i_pino_write(inode, pino);
264 file_got_pino(inode);
265 }
266 f2fs_up_write(&fi->i_sem);
267 }
268
f2fs_do_sync_file(struct file * file,loff_t start,loff_t end,int datasync,bool atomic)269 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
270 int datasync, bool atomic)
271 {
272 struct inode *inode = file->f_mapping->host;
273 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
274 nid_t ino = inode->i_ino;
275 int ret = 0;
276 enum cp_reason_type cp_reason = 0;
277 struct writeback_control wbc = {
278 .sync_mode = WB_SYNC_ALL,
279 .nr_to_write = LONG_MAX,
280 };
281 unsigned int seq_id = 0;
282
283 if (unlikely(f2fs_readonly(inode->i_sb)))
284 return 0;
285
286 trace_f2fs_sync_file_enter(inode);
287
288 if (S_ISDIR(inode->i_mode))
289 goto go_write;
290
291 /* if fdatasync is triggered, let's do in-place-update */
292 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
293 set_inode_flag(inode, FI_NEED_IPU);
294 ret = file_write_and_wait_range(file, start, end);
295 clear_inode_flag(inode, FI_NEED_IPU);
296
297 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
298 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
299 return ret;
300 }
301
302 /* if the inode is dirty, let's recover all the time */
303 if (!f2fs_skip_inode_update(inode, datasync)) {
304 f2fs_write_inode(inode, NULL);
305 goto go_write;
306 }
307
308 /*
309 * if there is no written data, don't waste time to write recovery info.
310 */
311 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
312 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
313
314 /* it may call write_inode just prior to fsync */
315 if (need_inode_page_update(sbi, ino))
316 goto go_write;
317
318 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
319 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
320 goto flush_out;
321 goto out;
322 } else {
323 /*
324 * for OPU case, during fsync(), node can be persisted before
325 * data when lower device doesn't support write barrier, result
326 * in data corruption after SPO.
327 * So for strict fsync mode, force to use atomic write semantics
328 * to keep write order in between data/node and last node to
329 * avoid potential data corruption.
330 */
331 if (F2FS_OPTION(sbi).fsync_mode ==
332 FSYNC_MODE_STRICT && !atomic)
333 atomic = true;
334 }
335 go_write:
336 /*
337 * Both of fdatasync() and fsync() are able to be recovered from
338 * sudden-power-off.
339 */
340 f2fs_down_read(&F2FS_I(inode)->i_sem);
341 cp_reason = need_do_checkpoint(inode);
342 f2fs_up_read(&F2FS_I(inode)->i_sem);
343
344 if (cp_reason) {
345 /* all the dirty node pages should be flushed for POR */
346 ret = f2fs_sync_fs(inode->i_sb, 1);
347
348 /*
349 * We've secured consistency through sync_fs. Following pino
350 * will be used only for fsynced inodes after checkpoint.
351 */
352 try_to_fix_pino(inode);
353 clear_inode_flag(inode, FI_APPEND_WRITE);
354 clear_inode_flag(inode, FI_UPDATE_WRITE);
355 goto out;
356 }
357 sync_nodes:
358 atomic_inc(&sbi->wb_sync_req[NODE]);
359 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
360 atomic_dec(&sbi->wb_sync_req[NODE]);
361 if (ret)
362 goto out;
363
364 /* if cp_error was enabled, we should avoid infinite loop */
365 if (unlikely(f2fs_cp_error(sbi))) {
366 ret = -EIO;
367 goto out;
368 }
369
370 if (f2fs_need_inode_block_update(sbi, ino)) {
371 f2fs_mark_inode_dirty_sync(inode, true);
372 f2fs_write_inode(inode, NULL);
373 goto sync_nodes;
374 }
375
376 /*
377 * If it's atomic_write, it's just fine to keep write ordering. So
378 * here we don't need to wait for node write completion, since we use
379 * node chain which serializes node blocks. If one of node writes are
380 * reordered, we can see simply broken chain, resulting in stopping
381 * roll-forward recovery. It means we'll recover all or none node blocks
382 * given fsync mark.
383 */
384 if (!atomic) {
385 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
386 if (ret)
387 goto out;
388 }
389
390 /* once recovery info is written, don't need to tack this */
391 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
392 clear_inode_flag(inode, FI_APPEND_WRITE);
393 flush_out:
394 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
395 ret = f2fs_issue_flush(sbi, inode->i_ino);
396 if (!ret) {
397 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
398 clear_inode_flag(inode, FI_UPDATE_WRITE);
399 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
400 }
401 f2fs_update_time(sbi, REQ_TIME);
402 out:
403 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
404 return ret;
405 }
406
f2fs_sync_file(struct file * file,loff_t start,loff_t end,int datasync)407 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
408 {
409 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
410 return -EIO;
411 return f2fs_do_sync_file(file, start, end, datasync, false);
412 }
413
__found_offset(struct address_space * mapping,struct dnode_of_data * dn,pgoff_t index,int whence)414 static bool __found_offset(struct address_space *mapping,
415 struct dnode_of_data *dn, pgoff_t index, int whence)
416 {
417 block_t blkaddr = f2fs_data_blkaddr(dn);
418 struct inode *inode = mapping->host;
419 bool compressed_cluster = false;
420
421 if (f2fs_compressed_file(inode)) {
422 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio,
423 ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size));
424
425 compressed_cluster = first_blkaddr == COMPRESS_ADDR;
426 }
427
428 switch (whence) {
429 case SEEK_DATA:
430 if (__is_valid_data_blkaddr(blkaddr))
431 return true;
432 if (blkaddr == NEW_ADDR &&
433 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
434 return true;
435 if (compressed_cluster)
436 return true;
437 break;
438 case SEEK_HOLE:
439 if (compressed_cluster)
440 return false;
441 if (blkaddr == NULL_ADDR)
442 return true;
443 break;
444 }
445 return false;
446 }
447
f2fs_seek_block(struct file * file,loff_t offset,int whence)448 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
449 {
450 struct inode *inode = file->f_mapping->host;
451 loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
452 struct dnode_of_data dn;
453 pgoff_t pgofs, end_offset;
454 loff_t data_ofs = offset;
455 loff_t isize;
456 int err = 0;
457
458 inode_lock_shared(inode);
459
460 isize = i_size_read(inode);
461 if (offset >= isize)
462 goto fail;
463
464 /* handle inline data case */
465 if (f2fs_has_inline_data(inode)) {
466 if (whence == SEEK_HOLE) {
467 data_ofs = isize;
468 goto found;
469 } else if (whence == SEEK_DATA) {
470 data_ofs = offset;
471 goto found;
472 }
473 }
474
475 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
476
477 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
478 set_new_dnode(&dn, inode, NULL, NULL, 0);
479 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
480 if (err && err != -ENOENT) {
481 goto fail;
482 } else if (err == -ENOENT) {
483 /* direct node does not exists */
484 if (whence == SEEK_DATA) {
485 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
486 continue;
487 } else {
488 goto found;
489 }
490 }
491
492 end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
493
494 /* find data/hole in dnode block */
495 for (; dn.ofs_in_node < end_offset;
496 dn.ofs_in_node++, pgofs++,
497 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
498 block_t blkaddr;
499
500 blkaddr = f2fs_data_blkaddr(&dn);
501
502 if (__is_valid_data_blkaddr(blkaddr) &&
503 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
504 blkaddr, DATA_GENERIC_ENHANCE)) {
505 f2fs_put_dnode(&dn);
506 goto fail;
507 }
508
509 if (__found_offset(file->f_mapping, &dn,
510 pgofs, whence)) {
511 f2fs_put_dnode(&dn);
512 goto found;
513 }
514 }
515 f2fs_put_dnode(&dn);
516 }
517
518 if (whence == SEEK_DATA)
519 goto fail;
520 found:
521 if (whence == SEEK_HOLE && data_ofs > isize)
522 data_ofs = isize;
523 inode_unlock_shared(inode);
524 return vfs_setpos(file, data_ofs, maxbytes);
525 fail:
526 inode_unlock_shared(inode);
527 return -ENXIO;
528 }
529
f2fs_llseek(struct file * file,loff_t offset,int whence)530 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
531 {
532 struct inode *inode = file->f_mapping->host;
533 loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
534
535 switch (whence) {
536 case SEEK_SET:
537 case SEEK_CUR:
538 case SEEK_END:
539 return generic_file_llseek_size(file, offset, whence,
540 maxbytes, i_size_read(inode));
541 case SEEK_DATA:
542 case SEEK_HOLE:
543 if (offset < 0)
544 return -ENXIO;
545 return f2fs_seek_block(file, offset, whence);
546 }
547
548 return -EINVAL;
549 }
550
f2fs_file_mmap_prepare(struct vm_area_desc * desc)551 static int f2fs_file_mmap_prepare(struct vm_area_desc *desc)
552 {
553 struct file *file = desc->file;
554 struct inode *inode = file_inode(file);
555
556 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
557 return -EIO;
558
559 if (!f2fs_is_compress_backend_ready(inode))
560 return -EOPNOTSUPP;
561
562 file_accessed(file);
563 desc->vm_ops = &f2fs_file_vm_ops;
564
565 f2fs_down_read(&F2FS_I(inode)->i_sem);
566 set_inode_flag(inode, FI_MMAP_FILE);
567 f2fs_up_read(&F2FS_I(inode)->i_sem);
568
569 return 0;
570 }
571
finish_preallocate_blocks(struct inode * inode)572 static int finish_preallocate_blocks(struct inode *inode)
573 {
574 int ret = 0;
575 bool opened;
576
577 f2fs_down_read(&F2FS_I(inode)->i_sem);
578 opened = is_inode_flag_set(inode, FI_OPENED_FILE);
579 f2fs_up_read(&F2FS_I(inode)->i_sem);
580 if (opened)
581 return 0;
582
583 inode_lock(inode);
584 if (is_inode_flag_set(inode, FI_OPENED_FILE))
585 goto out_unlock;
586
587 if (!file_should_truncate(inode))
588 goto out_update;
589
590 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
591 filemap_invalidate_lock(inode->i_mapping);
592
593 truncate_setsize(inode, i_size_read(inode));
594 ret = f2fs_truncate(inode);
595
596 filemap_invalidate_unlock(inode->i_mapping);
597 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
598 if (ret)
599 goto out_unlock;
600
601 file_dont_truncate(inode);
602 out_update:
603 f2fs_down_write(&F2FS_I(inode)->i_sem);
604 set_inode_flag(inode, FI_OPENED_FILE);
605 f2fs_up_write(&F2FS_I(inode)->i_sem);
606 out_unlock:
607 inode_unlock(inode);
608 return ret;
609 }
610
f2fs_file_open(struct inode * inode,struct file * filp)611 static int f2fs_file_open(struct inode *inode, struct file *filp)
612 {
613 int err = fscrypt_file_open(inode, filp);
614
615 if (err)
616 return err;
617
618 if (!f2fs_is_compress_backend_ready(inode))
619 return -EOPNOTSUPP;
620
621 err = fsverity_file_open(inode, filp);
622 if (err)
623 return err;
624
625 filp->f_mode |= FMODE_NOWAIT;
626 filp->f_mode |= FMODE_CAN_ODIRECT;
627
628 err = dquot_file_open(inode, filp);
629 if (err)
630 return err;
631
632 return finish_preallocate_blocks(inode);
633 }
634
f2fs_truncate_data_blocks_range(struct dnode_of_data * dn,int count)635 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
636 {
637 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
638 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
639 __le32 *addr;
640 bool compressed_cluster = false;
641 int cluster_index = 0, valid_blocks = 0;
642 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
643 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
644 block_t blkstart;
645 int blklen = 0;
646
647 addr = get_dnode_addr(dn->inode, dn->node_folio) + ofs;
648 blkstart = le32_to_cpu(*addr);
649
650 /* Assumption: truncation starts with cluster */
651 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
652 block_t blkaddr = le32_to_cpu(*addr);
653
654 if (f2fs_compressed_file(dn->inode) &&
655 !(cluster_index & (cluster_size - 1))) {
656 if (compressed_cluster)
657 f2fs_i_compr_blocks_update(dn->inode,
658 valid_blocks, false);
659 compressed_cluster = (blkaddr == COMPRESS_ADDR);
660 valid_blocks = 0;
661 }
662
663 if (blkaddr == NULL_ADDR)
664 goto next;
665
666 f2fs_set_data_blkaddr(dn, NULL_ADDR);
667
668 if (__is_valid_data_blkaddr(blkaddr)) {
669 if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
670 goto next;
671 if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
672 DATA_GENERIC_ENHANCE))
673 goto next;
674 if (compressed_cluster)
675 valid_blocks++;
676 }
677
678 if (blkstart + blklen == blkaddr) {
679 blklen++;
680 } else {
681 f2fs_invalidate_blocks(sbi, blkstart, blklen);
682 blkstart = blkaddr;
683 blklen = 1;
684 }
685
686 if (!released || blkaddr != COMPRESS_ADDR)
687 nr_free++;
688
689 continue;
690
691 next:
692 if (blklen)
693 f2fs_invalidate_blocks(sbi, blkstart, blklen);
694
695 blkstart = le32_to_cpu(*(addr + 1));
696 blklen = 0;
697 }
698
699 if (blklen)
700 f2fs_invalidate_blocks(sbi, blkstart, blklen);
701
702 if (compressed_cluster)
703 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
704
705 if (nr_free) {
706 pgoff_t fofs;
707 /*
708 * once we invalidate valid blkaddr in range [ofs, ofs + count],
709 * we will invalidate all blkaddr in the whole range.
710 */
711 fofs = f2fs_start_bidx_of_node(ofs_of_node(&dn->node_folio->page),
712 dn->inode) + ofs;
713 f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
714 f2fs_update_age_extent_cache_range(dn, fofs, len);
715 dec_valid_block_count(sbi, dn->inode, nr_free);
716 }
717 dn->ofs_in_node = ofs;
718
719 f2fs_update_time(sbi, REQ_TIME);
720 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
721 dn->ofs_in_node, nr_free);
722 }
723
truncate_partial_data_page(struct inode * inode,u64 from,bool cache_only)724 static int truncate_partial_data_page(struct inode *inode, u64 from,
725 bool cache_only)
726 {
727 loff_t offset = from & (PAGE_SIZE - 1);
728 pgoff_t index = from >> PAGE_SHIFT;
729 struct address_space *mapping = inode->i_mapping;
730 struct folio *folio;
731
732 if (!offset && !cache_only)
733 return 0;
734
735 if (cache_only) {
736 folio = filemap_lock_folio(mapping, index);
737 if (IS_ERR(folio))
738 return 0;
739 if (folio_test_uptodate(folio))
740 goto truncate_out;
741 f2fs_folio_put(folio, true);
742 return 0;
743 }
744
745 folio = f2fs_get_lock_data_folio(inode, index, true);
746 if (IS_ERR(folio))
747 return PTR_ERR(folio) == -ENOENT ? 0 : PTR_ERR(folio);
748 truncate_out:
749 f2fs_folio_wait_writeback(folio, DATA, true, true);
750 folio_zero_segment(folio, offset, folio_size(folio));
751
752 /* An encrypted inode should have a key and truncate the last page. */
753 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
754 if (!cache_only)
755 folio_mark_dirty(folio);
756 f2fs_folio_put(folio, true);
757 return 0;
758 }
759
f2fs_do_truncate_blocks(struct inode * inode,u64 from,bool lock)760 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
761 {
762 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
763 struct dnode_of_data dn;
764 pgoff_t free_from;
765 int count = 0, err = 0;
766 struct folio *ifolio;
767 bool truncate_page = false;
768
769 trace_f2fs_truncate_blocks_enter(inode, from);
770
771 if (IS_DEVICE_ALIASING(inode) && from) {
772 err = -EINVAL;
773 goto out_err;
774 }
775
776 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
777
778 if (free_from >= max_file_blocks(inode))
779 goto free_partial;
780
781 if (lock)
782 f2fs_lock_op(sbi);
783
784 ifolio = f2fs_get_inode_folio(sbi, inode->i_ino);
785 if (IS_ERR(ifolio)) {
786 err = PTR_ERR(ifolio);
787 goto out;
788 }
789
790 if (IS_DEVICE_ALIASING(inode)) {
791 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
792 struct extent_info ei = et->largest;
793
794 f2fs_invalidate_blocks(sbi, ei.blk, ei.len);
795
796 dec_valid_block_count(sbi, inode, ei.len);
797 f2fs_update_time(sbi, REQ_TIME);
798
799 f2fs_folio_put(ifolio, true);
800 goto out;
801 }
802
803 if (f2fs_has_inline_data(inode)) {
804 f2fs_truncate_inline_inode(inode, ifolio, from);
805 f2fs_folio_put(ifolio, true);
806 truncate_page = true;
807 goto out;
808 }
809
810 set_new_dnode(&dn, inode, ifolio, NULL, 0);
811 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
812 if (err) {
813 if (err == -ENOENT)
814 goto free_next;
815 goto out;
816 }
817
818 count = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
819
820 count -= dn.ofs_in_node;
821 f2fs_bug_on(sbi, count < 0);
822
823 if (dn.ofs_in_node || IS_INODE(&dn.node_folio->page)) {
824 f2fs_truncate_data_blocks_range(&dn, count);
825 free_from += count;
826 }
827
828 f2fs_put_dnode(&dn);
829 free_next:
830 err = f2fs_truncate_inode_blocks(inode, free_from);
831 out:
832 if (lock)
833 f2fs_unlock_op(sbi);
834 free_partial:
835 /* lastly zero out the first data page */
836 if (!err)
837 err = truncate_partial_data_page(inode, from, truncate_page);
838 out_err:
839 trace_f2fs_truncate_blocks_exit(inode, err);
840 return err;
841 }
842
f2fs_truncate_blocks(struct inode * inode,u64 from,bool lock)843 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
844 {
845 u64 free_from = from;
846 int err;
847
848 #ifdef CONFIG_F2FS_FS_COMPRESSION
849 /*
850 * for compressed file, only support cluster size
851 * aligned truncation.
852 */
853 if (f2fs_compressed_file(inode))
854 free_from = round_up(from,
855 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
856 #endif
857
858 err = f2fs_do_truncate_blocks(inode, free_from, lock);
859 if (err)
860 return err;
861
862 #ifdef CONFIG_F2FS_FS_COMPRESSION
863 /*
864 * For compressed file, after release compress blocks, don't allow write
865 * direct, but we should allow write direct after truncate to zero.
866 */
867 if (f2fs_compressed_file(inode) && !free_from
868 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
869 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
870
871 if (from != free_from) {
872 err = f2fs_truncate_partial_cluster(inode, from, lock);
873 if (err)
874 return err;
875 }
876 #endif
877
878 return 0;
879 }
880
f2fs_truncate(struct inode * inode)881 int f2fs_truncate(struct inode *inode)
882 {
883 int err;
884
885 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
886 return -EIO;
887
888 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
889 S_ISLNK(inode->i_mode)))
890 return 0;
891
892 trace_f2fs_truncate(inode);
893
894 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE))
895 return -EIO;
896
897 err = f2fs_dquot_initialize(inode);
898 if (err)
899 return err;
900
901 /* we should check inline_data size */
902 if (!f2fs_may_inline_data(inode)) {
903 err = f2fs_convert_inline_inode(inode);
904 if (err)
905 return err;
906 }
907
908 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
909 if (err)
910 return err;
911
912 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
913 f2fs_mark_inode_dirty_sync(inode, false);
914 return 0;
915 }
916
f2fs_force_buffered_io(struct inode * inode,int rw)917 static bool f2fs_force_buffered_io(struct inode *inode, int rw)
918 {
919 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
920
921 if (!fscrypt_dio_supported(inode))
922 return true;
923 if (fsverity_active(inode))
924 return true;
925 if (f2fs_compressed_file(inode))
926 return true;
927 /*
928 * only force direct read to use buffered IO, for direct write,
929 * it expects inline data conversion before committing IO.
930 */
931 if (f2fs_has_inline_data(inode) && rw == READ)
932 return true;
933
934 /* disallow direct IO if any of devices has unaligned blksize */
935 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
936 return true;
937 /*
938 * for blkzoned device, fallback direct IO to buffered IO, so
939 * all IOs can be serialized by log-structured write.
940 */
941 if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE) &&
942 !f2fs_is_pinned_file(inode))
943 return true;
944 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
945 return true;
946
947 return false;
948 }
949
f2fs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)950 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
951 struct kstat *stat, u32 request_mask, unsigned int query_flags)
952 {
953 struct inode *inode = d_inode(path->dentry);
954 struct f2fs_inode_info *fi = F2FS_I(inode);
955 struct f2fs_inode *ri = NULL;
956 unsigned int flags;
957
958 if (f2fs_has_extra_attr(inode) &&
959 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
960 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
961 stat->result_mask |= STATX_BTIME;
962 stat->btime.tv_sec = fi->i_crtime.tv_sec;
963 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
964 }
965
966 /*
967 * Return the DIO alignment restrictions if requested. We only return
968 * this information when requested, since on encrypted files it might
969 * take a fair bit of work to get if the file wasn't opened recently.
970 *
971 * f2fs sometimes supports DIO reads but not DIO writes. STATX_DIOALIGN
972 * cannot represent that, so in that case we report no DIO support.
973 */
974 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
975 unsigned int bsize = i_blocksize(inode);
976
977 stat->result_mask |= STATX_DIOALIGN;
978 if (!f2fs_force_buffered_io(inode, WRITE)) {
979 stat->dio_mem_align = bsize;
980 stat->dio_offset_align = bsize;
981 }
982 }
983
984 flags = fi->i_flags;
985 if (flags & F2FS_COMPR_FL)
986 stat->attributes |= STATX_ATTR_COMPRESSED;
987 if (flags & F2FS_APPEND_FL)
988 stat->attributes |= STATX_ATTR_APPEND;
989 if (IS_ENCRYPTED(inode))
990 stat->attributes |= STATX_ATTR_ENCRYPTED;
991 if (flags & F2FS_IMMUTABLE_FL)
992 stat->attributes |= STATX_ATTR_IMMUTABLE;
993 if (flags & F2FS_NODUMP_FL)
994 stat->attributes |= STATX_ATTR_NODUMP;
995 if (IS_VERITY(inode))
996 stat->attributes |= STATX_ATTR_VERITY;
997
998 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
999 STATX_ATTR_APPEND |
1000 STATX_ATTR_ENCRYPTED |
1001 STATX_ATTR_IMMUTABLE |
1002 STATX_ATTR_NODUMP |
1003 STATX_ATTR_VERITY);
1004
1005 generic_fillattr(idmap, request_mask, inode, stat);
1006
1007 /* we need to show initial sectors used for inline_data/dentries */
1008 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
1009 f2fs_has_inline_dentry(inode))
1010 stat->blocks += (stat->size + 511) >> 9;
1011
1012 return 0;
1013 }
1014
1015 #ifdef CONFIG_F2FS_FS_POSIX_ACL
__setattr_copy(struct mnt_idmap * idmap,struct inode * inode,const struct iattr * attr)1016 static void __setattr_copy(struct mnt_idmap *idmap,
1017 struct inode *inode, const struct iattr *attr)
1018 {
1019 unsigned int ia_valid = attr->ia_valid;
1020
1021 i_uid_update(idmap, attr, inode);
1022 i_gid_update(idmap, attr, inode);
1023 if (ia_valid & ATTR_ATIME)
1024 inode_set_atime_to_ts(inode, attr->ia_atime);
1025 if (ia_valid & ATTR_MTIME)
1026 inode_set_mtime_to_ts(inode, attr->ia_mtime);
1027 if (ia_valid & ATTR_CTIME)
1028 inode_set_ctime_to_ts(inode, attr->ia_ctime);
1029 if (ia_valid & ATTR_MODE) {
1030 umode_t mode = attr->ia_mode;
1031
1032 if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode)))
1033 mode &= ~S_ISGID;
1034 set_acl_inode(inode, mode);
1035 }
1036 }
1037 #else
1038 #define __setattr_copy setattr_copy
1039 #endif
1040
f2fs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1041 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1042 struct iattr *attr)
1043 {
1044 struct inode *inode = d_inode(dentry);
1045 struct f2fs_inode_info *fi = F2FS_I(inode);
1046 int err;
1047
1048 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1049 return -EIO;
1050
1051 if (unlikely(IS_IMMUTABLE(inode)))
1052 return -EPERM;
1053
1054 if (unlikely(IS_APPEND(inode) &&
1055 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
1056 ATTR_GID | ATTR_TIMES_SET))))
1057 return -EPERM;
1058
1059 if ((attr->ia_valid & ATTR_SIZE)) {
1060 if (!f2fs_is_compress_backend_ready(inode) ||
1061 IS_DEVICE_ALIASING(inode))
1062 return -EOPNOTSUPP;
1063 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) &&
1064 !IS_ALIGNED(attr->ia_size,
1065 F2FS_BLK_TO_BYTES(fi->i_cluster_size)))
1066 return -EINVAL;
1067 }
1068
1069 err = setattr_prepare(idmap, dentry, attr);
1070 if (err)
1071 return err;
1072
1073 err = fscrypt_prepare_setattr(dentry, attr);
1074 if (err)
1075 return err;
1076
1077 err = fsverity_prepare_setattr(dentry, attr);
1078 if (err)
1079 return err;
1080
1081 if (is_quota_modification(idmap, inode, attr)) {
1082 err = f2fs_dquot_initialize(inode);
1083 if (err)
1084 return err;
1085 }
1086 if (i_uid_needs_update(idmap, attr, inode) ||
1087 i_gid_needs_update(idmap, attr, inode)) {
1088 f2fs_lock_op(F2FS_I_SB(inode));
1089 err = dquot_transfer(idmap, inode, attr);
1090 if (err) {
1091 set_sbi_flag(F2FS_I_SB(inode),
1092 SBI_QUOTA_NEED_REPAIR);
1093 f2fs_unlock_op(F2FS_I_SB(inode));
1094 return err;
1095 }
1096 /*
1097 * update uid/gid under lock_op(), so that dquot and inode can
1098 * be updated atomically.
1099 */
1100 i_uid_update(idmap, attr, inode);
1101 i_gid_update(idmap, attr, inode);
1102 f2fs_mark_inode_dirty_sync(inode, true);
1103 f2fs_unlock_op(F2FS_I_SB(inode));
1104 }
1105
1106 if (attr->ia_valid & ATTR_SIZE) {
1107 loff_t old_size = i_size_read(inode);
1108
1109 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
1110 /*
1111 * should convert inline inode before i_size_write to
1112 * keep smaller than inline_data size with inline flag.
1113 */
1114 err = f2fs_convert_inline_inode(inode);
1115 if (err)
1116 return err;
1117 }
1118
1119 /*
1120 * wait for inflight dio, blocks should be removed after
1121 * IO completion.
1122 */
1123 if (attr->ia_size < old_size)
1124 inode_dio_wait(inode);
1125
1126 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
1127 filemap_invalidate_lock(inode->i_mapping);
1128
1129 if (attr->ia_size > old_size)
1130 f2fs_zero_post_eof_page(inode, attr->ia_size);
1131 truncate_setsize(inode, attr->ia_size);
1132
1133 if (attr->ia_size <= old_size)
1134 err = f2fs_truncate(inode);
1135 /*
1136 * do not trim all blocks after i_size if target size is
1137 * larger than i_size.
1138 */
1139 filemap_invalidate_unlock(inode->i_mapping);
1140 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1141 if (err)
1142 return err;
1143
1144 spin_lock(&fi->i_size_lock);
1145 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1146 fi->last_disk_size = i_size_read(inode);
1147 spin_unlock(&fi->i_size_lock);
1148 }
1149
1150 __setattr_copy(idmap, inode, attr);
1151
1152 if (attr->ia_valid & ATTR_MODE) {
1153 err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode));
1154
1155 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
1156 if (!err)
1157 inode->i_mode = fi->i_acl_mode;
1158 clear_inode_flag(inode, FI_ACL_MODE);
1159 }
1160 }
1161
1162 /* file size may changed here */
1163 f2fs_mark_inode_dirty_sync(inode, true);
1164
1165 /* inode change will produce dirty node pages flushed by checkpoint */
1166 f2fs_balance_fs(F2FS_I_SB(inode), true);
1167
1168 return err;
1169 }
1170
1171 const struct inode_operations f2fs_file_inode_operations = {
1172 .getattr = f2fs_getattr,
1173 .setattr = f2fs_setattr,
1174 .get_inode_acl = f2fs_get_acl,
1175 .set_acl = f2fs_set_acl,
1176 .listxattr = f2fs_listxattr,
1177 .fiemap = f2fs_fiemap,
1178 .fileattr_get = f2fs_fileattr_get,
1179 .fileattr_set = f2fs_fileattr_set,
1180 };
1181
fill_zero(struct inode * inode,pgoff_t index,loff_t start,loff_t len)1182 static int fill_zero(struct inode *inode, pgoff_t index,
1183 loff_t start, loff_t len)
1184 {
1185 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1186 struct folio *folio;
1187
1188 if (!len)
1189 return 0;
1190
1191 f2fs_balance_fs(sbi, true);
1192
1193 f2fs_lock_op(sbi);
1194 folio = f2fs_get_new_data_folio(inode, NULL, index, false);
1195 f2fs_unlock_op(sbi);
1196
1197 if (IS_ERR(folio))
1198 return PTR_ERR(folio);
1199
1200 f2fs_folio_wait_writeback(folio, DATA, true, true);
1201 folio_zero_range(folio, start, len);
1202 folio_mark_dirty(folio);
1203 f2fs_folio_put(folio, true);
1204 return 0;
1205 }
1206
f2fs_truncate_hole(struct inode * inode,pgoff_t pg_start,pgoff_t pg_end)1207 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1208 {
1209 int err;
1210
1211 while (pg_start < pg_end) {
1212 struct dnode_of_data dn;
1213 pgoff_t end_offset, count;
1214
1215 set_new_dnode(&dn, inode, NULL, NULL, 0);
1216 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1217 if (err) {
1218 if (err == -ENOENT) {
1219 pg_start = f2fs_get_next_page_offset(&dn,
1220 pg_start);
1221 continue;
1222 }
1223 return err;
1224 }
1225
1226 end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
1227 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1228
1229 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1230
1231 f2fs_truncate_data_blocks_range(&dn, count);
1232 f2fs_put_dnode(&dn);
1233
1234 pg_start += count;
1235 }
1236 return 0;
1237 }
1238
f2fs_punch_hole(struct inode * inode,loff_t offset,loff_t len)1239 static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
1240 {
1241 pgoff_t pg_start, pg_end;
1242 loff_t off_start, off_end;
1243 int ret;
1244
1245 ret = f2fs_convert_inline_inode(inode);
1246 if (ret)
1247 return ret;
1248
1249 filemap_invalidate_lock(inode->i_mapping);
1250 f2fs_zero_post_eof_page(inode, offset + len);
1251 filemap_invalidate_unlock(inode->i_mapping);
1252
1253 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1254 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1255
1256 off_start = offset & (PAGE_SIZE - 1);
1257 off_end = (offset + len) & (PAGE_SIZE - 1);
1258
1259 if (pg_start == pg_end) {
1260 ret = fill_zero(inode, pg_start, off_start,
1261 off_end - off_start);
1262 if (ret)
1263 return ret;
1264 } else {
1265 if (off_start) {
1266 ret = fill_zero(inode, pg_start++, off_start,
1267 PAGE_SIZE - off_start);
1268 if (ret)
1269 return ret;
1270 }
1271 if (off_end) {
1272 ret = fill_zero(inode, pg_end, 0, off_end);
1273 if (ret)
1274 return ret;
1275 }
1276
1277 if (pg_start < pg_end) {
1278 loff_t blk_start, blk_end;
1279 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1280
1281 f2fs_balance_fs(sbi, true);
1282
1283 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1284 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1285
1286 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1287 filemap_invalidate_lock(inode->i_mapping);
1288
1289 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1290
1291 f2fs_lock_op(sbi);
1292 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1293 f2fs_unlock_op(sbi);
1294
1295 filemap_invalidate_unlock(inode->i_mapping);
1296 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1297 }
1298 }
1299
1300 return ret;
1301 }
1302
__read_out_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,pgoff_t len)1303 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1304 int *do_replace, pgoff_t off, pgoff_t len)
1305 {
1306 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1307 struct dnode_of_data dn;
1308 int ret, done, i;
1309
1310 next_dnode:
1311 set_new_dnode(&dn, inode, NULL, NULL, 0);
1312 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1313 if (ret && ret != -ENOENT) {
1314 return ret;
1315 } else if (ret == -ENOENT) {
1316 if (dn.max_level == 0)
1317 return -ENOENT;
1318 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1319 dn.ofs_in_node, len);
1320 blkaddr += done;
1321 do_replace += done;
1322 goto next;
1323 }
1324
1325 done = min((pgoff_t)ADDRS_PER_PAGE(&dn.node_folio->page, inode) -
1326 dn.ofs_in_node, len);
1327 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1328 *blkaddr = f2fs_data_blkaddr(&dn);
1329
1330 if (__is_valid_data_blkaddr(*blkaddr) &&
1331 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1332 DATA_GENERIC_ENHANCE)) {
1333 f2fs_put_dnode(&dn);
1334 return -EFSCORRUPTED;
1335 }
1336
1337 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1338
1339 if (f2fs_lfs_mode(sbi)) {
1340 f2fs_put_dnode(&dn);
1341 return -EOPNOTSUPP;
1342 }
1343
1344 /* do not invalidate this block address */
1345 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1346 *do_replace = 1;
1347 }
1348 }
1349 f2fs_put_dnode(&dn);
1350 next:
1351 len -= done;
1352 off += done;
1353 if (len)
1354 goto next_dnode;
1355 return 0;
1356 }
1357
__roll_back_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,int len)1358 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1359 int *do_replace, pgoff_t off, int len)
1360 {
1361 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1362 struct dnode_of_data dn;
1363 int ret, i;
1364
1365 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1366 if (*do_replace == 0)
1367 continue;
1368
1369 set_new_dnode(&dn, inode, NULL, NULL, 0);
1370 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1371 if (ret) {
1372 dec_valid_block_count(sbi, inode, 1);
1373 f2fs_invalidate_blocks(sbi, *blkaddr, 1);
1374 } else {
1375 f2fs_update_data_blkaddr(&dn, *blkaddr);
1376 }
1377 f2fs_put_dnode(&dn);
1378 }
1379 return 0;
1380 }
1381
__clone_blkaddrs(struct inode * src_inode,struct inode * dst_inode,block_t * blkaddr,int * do_replace,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1382 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1383 block_t *blkaddr, int *do_replace,
1384 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1385 {
1386 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1387 pgoff_t i = 0;
1388 int ret;
1389
1390 while (i < len) {
1391 if (blkaddr[i] == NULL_ADDR && !full) {
1392 i++;
1393 continue;
1394 }
1395
1396 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1397 struct dnode_of_data dn;
1398 struct node_info ni;
1399 size_t new_size;
1400 pgoff_t ilen;
1401
1402 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1403 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1404 if (ret)
1405 return ret;
1406
1407 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1408 if (ret) {
1409 f2fs_put_dnode(&dn);
1410 return ret;
1411 }
1412
1413 ilen = min((pgoff_t)
1414 ADDRS_PER_PAGE(&dn.node_folio->page, dst_inode) -
1415 dn.ofs_in_node, len - i);
1416 do {
1417 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1418 f2fs_truncate_data_blocks_range(&dn, 1);
1419
1420 if (do_replace[i]) {
1421 f2fs_i_blocks_write(src_inode,
1422 1, false, false);
1423 f2fs_i_blocks_write(dst_inode,
1424 1, true, false);
1425 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1426 blkaddr[i], ni.version, true, false);
1427
1428 do_replace[i] = 0;
1429 }
1430 dn.ofs_in_node++;
1431 i++;
1432 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1433 if (dst_inode->i_size < new_size)
1434 f2fs_i_size_write(dst_inode, new_size);
1435 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1436
1437 f2fs_put_dnode(&dn);
1438 } else {
1439 struct folio *fsrc, *fdst;
1440
1441 fsrc = f2fs_get_lock_data_folio(src_inode,
1442 src + i, true);
1443 if (IS_ERR(fsrc))
1444 return PTR_ERR(fsrc);
1445 fdst = f2fs_get_new_data_folio(dst_inode, NULL, dst + i,
1446 true);
1447 if (IS_ERR(fdst)) {
1448 f2fs_folio_put(fsrc, true);
1449 return PTR_ERR(fdst);
1450 }
1451
1452 f2fs_folio_wait_writeback(fdst, DATA, true, true);
1453
1454 memcpy_folio(fdst, 0, fsrc, 0, PAGE_SIZE);
1455 folio_mark_dirty(fdst);
1456 set_page_private_gcing(&fdst->page);
1457 f2fs_folio_put(fdst, true);
1458 f2fs_folio_put(fsrc, true);
1459
1460 ret = f2fs_truncate_hole(src_inode,
1461 src + i, src + i + 1);
1462 if (ret)
1463 return ret;
1464 i++;
1465 }
1466 }
1467 return 0;
1468 }
1469
__exchange_data_block(struct inode * src_inode,struct inode * dst_inode,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1470 static int __exchange_data_block(struct inode *src_inode,
1471 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1472 pgoff_t len, bool full)
1473 {
1474 block_t *src_blkaddr;
1475 int *do_replace;
1476 pgoff_t olen;
1477 int ret;
1478
1479 while (len) {
1480 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1481
1482 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1483 array_size(olen, sizeof(block_t)),
1484 GFP_NOFS);
1485 if (!src_blkaddr)
1486 return -ENOMEM;
1487
1488 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1489 array_size(olen, sizeof(int)),
1490 GFP_NOFS);
1491 if (!do_replace) {
1492 kvfree(src_blkaddr);
1493 return -ENOMEM;
1494 }
1495
1496 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1497 do_replace, src, olen);
1498 if (ret)
1499 goto roll_back;
1500
1501 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1502 do_replace, src, dst, olen, full);
1503 if (ret)
1504 goto roll_back;
1505
1506 src += olen;
1507 dst += olen;
1508 len -= olen;
1509
1510 kvfree(src_blkaddr);
1511 kvfree(do_replace);
1512 }
1513 return 0;
1514
1515 roll_back:
1516 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1517 kvfree(src_blkaddr);
1518 kvfree(do_replace);
1519 return ret;
1520 }
1521
f2fs_do_collapse(struct inode * inode,loff_t offset,loff_t len)1522 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1523 {
1524 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1525 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1526 pgoff_t start = offset >> PAGE_SHIFT;
1527 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1528 int ret;
1529
1530 f2fs_balance_fs(sbi, true);
1531
1532 /* avoid gc operation during block exchange */
1533 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1534 filemap_invalidate_lock(inode->i_mapping);
1535
1536 f2fs_zero_post_eof_page(inode, offset + len);
1537
1538 f2fs_lock_op(sbi);
1539 f2fs_drop_extent_tree(inode);
1540 truncate_pagecache(inode, offset);
1541 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1542 f2fs_unlock_op(sbi);
1543
1544 filemap_invalidate_unlock(inode->i_mapping);
1545 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1546 return ret;
1547 }
1548
f2fs_collapse_range(struct inode * inode,loff_t offset,loff_t len)1549 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1550 {
1551 loff_t new_size;
1552 int ret;
1553
1554 if (offset + len >= i_size_read(inode))
1555 return -EINVAL;
1556
1557 /* collapse range should be aligned to block size of f2fs. */
1558 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1559 return -EINVAL;
1560
1561 ret = f2fs_convert_inline_inode(inode);
1562 if (ret)
1563 return ret;
1564
1565 /* write out all dirty pages from offset */
1566 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1567 if (ret)
1568 return ret;
1569
1570 ret = f2fs_do_collapse(inode, offset, len);
1571 if (ret)
1572 return ret;
1573
1574 /* write out all moved pages, if possible */
1575 filemap_invalidate_lock(inode->i_mapping);
1576 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1577 truncate_pagecache(inode, offset);
1578
1579 new_size = i_size_read(inode) - len;
1580 ret = f2fs_truncate_blocks(inode, new_size, true);
1581 filemap_invalidate_unlock(inode->i_mapping);
1582 if (!ret)
1583 f2fs_i_size_write(inode, new_size);
1584 return ret;
1585 }
1586
f2fs_do_zero_range(struct dnode_of_data * dn,pgoff_t start,pgoff_t end)1587 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1588 pgoff_t end)
1589 {
1590 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1591 pgoff_t index = start;
1592 unsigned int ofs_in_node = dn->ofs_in_node;
1593 blkcnt_t count = 0;
1594 int ret;
1595
1596 for (; index < end; index++, dn->ofs_in_node++) {
1597 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1598 count++;
1599 }
1600
1601 dn->ofs_in_node = ofs_in_node;
1602 ret = f2fs_reserve_new_blocks(dn, count);
1603 if (ret)
1604 return ret;
1605
1606 dn->ofs_in_node = ofs_in_node;
1607 for (index = start; index < end; index++, dn->ofs_in_node++) {
1608 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1609 /*
1610 * f2fs_reserve_new_blocks will not guarantee entire block
1611 * allocation.
1612 */
1613 if (dn->data_blkaddr == NULL_ADDR) {
1614 ret = -ENOSPC;
1615 break;
1616 }
1617
1618 if (dn->data_blkaddr == NEW_ADDR)
1619 continue;
1620
1621 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1622 DATA_GENERIC_ENHANCE)) {
1623 ret = -EFSCORRUPTED;
1624 break;
1625 }
1626
1627 f2fs_invalidate_blocks(sbi, dn->data_blkaddr, 1);
1628 f2fs_set_data_blkaddr(dn, NEW_ADDR);
1629 }
1630
1631 f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
1632 f2fs_update_age_extent_cache_range(dn, start, index - start);
1633
1634 return ret;
1635 }
1636
f2fs_zero_range(struct inode * inode,loff_t offset,loff_t len,int mode)1637 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1638 int mode)
1639 {
1640 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1641 struct address_space *mapping = inode->i_mapping;
1642 pgoff_t index, pg_start, pg_end;
1643 loff_t new_size = i_size_read(inode);
1644 loff_t off_start, off_end;
1645 int ret = 0;
1646
1647 ret = inode_newsize_ok(inode, (len + offset));
1648 if (ret)
1649 return ret;
1650
1651 ret = f2fs_convert_inline_inode(inode);
1652 if (ret)
1653 return ret;
1654
1655 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1656 if (ret)
1657 return ret;
1658
1659 filemap_invalidate_lock(mapping);
1660 f2fs_zero_post_eof_page(inode, offset + len);
1661 filemap_invalidate_unlock(mapping);
1662
1663 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1664 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1665
1666 off_start = offset & (PAGE_SIZE - 1);
1667 off_end = (offset + len) & (PAGE_SIZE - 1);
1668
1669 if (pg_start == pg_end) {
1670 ret = fill_zero(inode, pg_start, off_start,
1671 off_end - off_start);
1672 if (ret)
1673 return ret;
1674
1675 new_size = max_t(loff_t, new_size, offset + len);
1676 } else {
1677 if (off_start) {
1678 ret = fill_zero(inode, pg_start++, off_start,
1679 PAGE_SIZE - off_start);
1680 if (ret)
1681 return ret;
1682
1683 new_size = max_t(loff_t, new_size,
1684 (loff_t)pg_start << PAGE_SHIFT);
1685 }
1686
1687 for (index = pg_start; index < pg_end;) {
1688 struct dnode_of_data dn;
1689 unsigned int end_offset;
1690 pgoff_t end;
1691
1692 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1693 filemap_invalidate_lock(mapping);
1694
1695 truncate_pagecache_range(inode,
1696 (loff_t)index << PAGE_SHIFT,
1697 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1698
1699 f2fs_lock_op(sbi);
1700
1701 set_new_dnode(&dn, inode, NULL, NULL, 0);
1702 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1703 if (ret) {
1704 f2fs_unlock_op(sbi);
1705 filemap_invalidate_unlock(mapping);
1706 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1707 goto out;
1708 }
1709
1710 end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
1711 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1712
1713 ret = f2fs_do_zero_range(&dn, index, end);
1714 f2fs_put_dnode(&dn);
1715
1716 f2fs_unlock_op(sbi);
1717 filemap_invalidate_unlock(mapping);
1718 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1719
1720 f2fs_balance_fs(sbi, dn.node_changed);
1721
1722 if (ret)
1723 goto out;
1724
1725 index = end;
1726 new_size = max_t(loff_t, new_size,
1727 (loff_t)index << PAGE_SHIFT);
1728 }
1729
1730 if (off_end) {
1731 ret = fill_zero(inode, pg_end, 0, off_end);
1732 if (ret)
1733 goto out;
1734
1735 new_size = max_t(loff_t, new_size, offset + len);
1736 }
1737 }
1738
1739 out:
1740 if (new_size > i_size_read(inode)) {
1741 if (mode & FALLOC_FL_KEEP_SIZE)
1742 file_set_keep_isize(inode);
1743 else
1744 f2fs_i_size_write(inode, new_size);
1745 }
1746 return ret;
1747 }
1748
f2fs_insert_range(struct inode * inode,loff_t offset,loff_t len)1749 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1750 {
1751 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1752 struct address_space *mapping = inode->i_mapping;
1753 pgoff_t nr, pg_start, pg_end, delta, idx;
1754 loff_t new_size;
1755 int ret = 0;
1756
1757 new_size = i_size_read(inode) + len;
1758 ret = inode_newsize_ok(inode, new_size);
1759 if (ret)
1760 return ret;
1761
1762 if (offset >= i_size_read(inode))
1763 return -EINVAL;
1764
1765 /* insert range should be aligned to block size of f2fs. */
1766 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1767 return -EINVAL;
1768
1769 ret = f2fs_convert_inline_inode(inode);
1770 if (ret)
1771 return ret;
1772
1773 f2fs_balance_fs(sbi, true);
1774
1775 filemap_invalidate_lock(mapping);
1776 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1777 filemap_invalidate_unlock(mapping);
1778 if (ret)
1779 return ret;
1780
1781 /* write out all dirty pages from offset */
1782 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1783 if (ret)
1784 return ret;
1785
1786 pg_start = offset >> PAGE_SHIFT;
1787 pg_end = (offset + len) >> PAGE_SHIFT;
1788 delta = pg_end - pg_start;
1789 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1790
1791 /* avoid gc operation during block exchange */
1792 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1793 filemap_invalidate_lock(mapping);
1794
1795 f2fs_zero_post_eof_page(inode, offset + len);
1796 truncate_pagecache(inode, offset);
1797
1798 while (!ret && idx > pg_start) {
1799 nr = idx - pg_start;
1800 if (nr > delta)
1801 nr = delta;
1802 idx -= nr;
1803
1804 f2fs_lock_op(sbi);
1805 f2fs_drop_extent_tree(inode);
1806
1807 ret = __exchange_data_block(inode, inode, idx,
1808 idx + delta, nr, false);
1809 f2fs_unlock_op(sbi);
1810 }
1811 filemap_invalidate_unlock(mapping);
1812 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1813 if (ret)
1814 return ret;
1815
1816 /* write out all moved pages, if possible */
1817 filemap_invalidate_lock(mapping);
1818 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1819 truncate_pagecache(inode, offset);
1820 filemap_invalidate_unlock(mapping);
1821
1822 if (!ret)
1823 f2fs_i_size_write(inode, new_size);
1824 return ret;
1825 }
1826
f2fs_expand_inode_data(struct inode * inode,loff_t offset,loff_t len,int mode)1827 static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
1828 loff_t len, int mode)
1829 {
1830 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1831 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1832 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1833 .m_may_create = true };
1834 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
1835 .init_gc_type = FG_GC,
1836 .should_migrate_blocks = false,
1837 .err_gc_skipped = true,
1838 .nr_free_secs = 0 };
1839 pgoff_t pg_start, pg_end;
1840 loff_t new_size;
1841 loff_t off_end;
1842 block_t expanded = 0;
1843 int err;
1844
1845 err = inode_newsize_ok(inode, (len + offset));
1846 if (err)
1847 return err;
1848
1849 err = f2fs_convert_inline_inode(inode);
1850 if (err)
1851 return err;
1852
1853 filemap_invalidate_lock(inode->i_mapping);
1854 f2fs_zero_post_eof_page(inode, offset + len);
1855 filemap_invalidate_unlock(inode->i_mapping);
1856
1857 f2fs_balance_fs(sbi, true);
1858
1859 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1860 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1861 off_end = (offset + len) & (PAGE_SIZE - 1);
1862
1863 map.m_lblk = pg_start;
1864 map.m_len = pg_end - pg_start;
1865 if (off_end)
1866 map.m_len++;
1867
1868 if (!map.m_len)
1869 return 0;
1870
1871 if (f2fs_is_pinned_file(inode)) {
1872 block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
1873 block_t sec_len = roundup(map.m_len, sec_blks);
1874
1875 map.m_len = sec_blks;
1876 next_alloc:
1877 f2fs_down_write(&sbi->pin_sem);
1878
1879 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1880 if (has_not_enough_free_secs(sbi, 0, 0)) {
1881 f2fs_up_write(&sbi->pin_sem);
1882 err = -ENOSPC;
1883 f2fs_warn_ratelimited(sbi,
1884 "ino:%lu, start:%lu, end:%lu, need to trigger GC to "
1885 "reclaim enough free segment when checkpoint is enabled",
1886 inode->i_ino, pg_start, pg_end);
1887 goto out_err;
1888 }
1889 }
1890
1891 if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ?
1892 ZONED_PIN_SEC_REQUIRED_COUNT :
1893 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1894 f2fs_down_write(&sbi->gc_lock);
1895 stat_inc_gc_call_count(sbi, FOREGROUND);
1896 err = f2fs_gc(sbi, &gc_control);
1897 if (err && err != -ENODATA) {
1898 f2fs_up_write(&sbi->pin_sem);
1899 goto out_err;
1900 }
1901 }
1902
1903 err = f2fs_allocate_pinning_section(sbi);
1904 if (err) {
1905 f2fs_up_write(&sbi->pin_sem);
1906 goto out_err;
1907 }
1908
1909 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1910 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
1911 file_dont_truncate(inode);
1912
1913 f2fs_up_write(&sbi->pin_sem);
1914
1915 expanded += map.m_len;
1916 sec_len -= map.m_len;
1917 map.m_lblk += map.m_len;
1918 if (!err && sec_len)
1919 goto next_alloc;
1920
1921 map.m_len = expanded;
1922 } else {
1923 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO);
1924 expanded = map.m_len;
1925 }
1926 out_err:
1927 if (err) {
1928 pgoff_t last_off;
1929
1930 if (!expanded)
1931 return err;
1932
1933 last_off = pg_start + expanded - 1;
1934
1935 /* update new size to the failed position */
1936 new_size = (last_off == pg_end) ? offset + len :
1937 (loff_t)(last_off + 1) << PAGE_SHIFT;
1938 } else {
1939 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1940 }
1941
1942 if (new_size > i_size_read(inode)) {
1943 if (mode & FALLOC_FL_KEEP_SIZE)
1944 file_set_keep_isize(inode);
1945 else
1946 f2fs_i_size_write(inode, new_size);
1947 }
1948
1949 return err;
1950 }
1951
f2fs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)1952 static long f2fs_fallocate(struct file *file, int mode,
1953 loff_t offset, loff_t len)
1954 {
1955 struct inode *inode = file_inode(file);
1956 long ret = 0;
1957
1958 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1959 return -EIO;
1960 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1961 return -ENOSPC;
1962 if (!f2fs_is_compress_backend_ready(inode) || IS_DEVICE_ALIASING(inode))
1963 return -EOPNOTSUPP;
1964
1965 /* f2fs only support ->fallocate for regular file */
1966 if (!S_ISREG(inode->i_mode))
1967 return -EINVAL;
1968
1969 if (IS_ENCRYPTED(inode) &&
1970 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1971 return -EOPNOTSUPP;
1972
1973 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1974 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1975 FALLOC_FL_INSERT_RANGE))
1976 return -EOPNOTSUPP;
1977
1978 inode_lock(inode);
1979
1980 /*
1981 * Pinned file should not support partial truncation since the block
1982 * can be used by applications.
1983 */
1984 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1985 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1986 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) {
1987 ret = -EOPNOTSUPP;
1988 goto out;
1989 }
1990
1991 ret = file_modified(file);
1992 if (ret)
1993 goto out;
1994
1995 /*
1996 * wait for inflight dio, blocks should be removed after IO
1997 * completion.
1998 */
1999 inode_dio_wait(inode);
2000
2001 if (mode & FALLOC_FL_PUNCH_HOLE) {
2002 if (offset >= inode->i_size)
2003 goto out;
2004
2005 ret = f2fs_punch_hole(inode, offset, len);
2006 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
2007 ret = f2fs_collapse_range(inode, offset, len);
2008 } else if (mode & FALLOC_FL_ZERO_RANGE) {
2009 ret = f2fs_zero_range(inode, offset, len, mode);
2010 } else if (mode & FALLOC_FL_INSERT_RANGE) {
2011 ret = f2fs_insert_range(inode, offset, len);
2012 } else {
2013 ret = f2fs_expand_inode_data(inode, offset, len, mode);
2014 }
2015
2016 if (!ret) {
2017 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2018 f2fs_mark_inode_dirty_sync(inode, false);
2019 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2020 }
2021
2022 out:
2023 inode_unlock(inode);
2024
2025 trace_f2fs_fallocate(inode, mode, offset, len, ret);
2026 return ret;
2027 }
2028
f2fs_release_file(struct inode * inode,struct file * filp)2029 static int f2fs_release_file(struct inode *inode, struct file *filp)
2030 {
2031 /*
2032 * f2fs_release_file is called at every close calls. So we should
2033 * not drop any inmemory pages by close called by other process.
2034 */
2035 if (!(filp->f_mode & FMODE_WRITE) ||
2036 atomic_read(&inode->i_writecount) != 1)
2037 return 0;
2038
2039 inode_lock(inode);
2040 f2fs_abort_atomic_write(inode, true);
2041 inode_unlock(inode);
2042
2043 return 0;
2044 }
2045
f2fs_file_flush(struct file * file,fl_owner_t id)2046 static int f2fs_file_flush(struct file *file, fl_owner_t id)
2047 {
2048 struct inode *inode = file_inode(file);
2049
2050 /*
2051 * If the process doing a transaction is crashed, we should do
2052 * roll-back. Otherwise, other reader/write can see corrupted database
2053 * until all the writers close its file. Since this should be done
2054 * before dropping file lock, it needs to do in ->flush.
2055 */
2056 if (F2FS_I(inode)->atomic_write_task == current &&
2057 (current->flags & PF_EXITING)) {
2058 inode_lock(inode);
2059 f2fs_abort_atomic_write(inode, true);
2060 inode_unlock(inode);
2061 }
2062
2063 return 0;
2064 }
2065
f2fs_setflags_common(struct inode * inode,u32 iflags,u32 mask)2066 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
2067 {
2068 struct f2fs_inode_info *fi = F2FS_I(inode);
2069 u32 masked_flags = fi->i_flags & mask;
2070
2071 /* mask can be shrunk by flags_valid selector */
2072 iflags &= mask;
2073
2074 /* Is it quota file? Do not allow user to mess with it */
2075 if (IS_NOQUOTA(inode))
2076 return -EPERM;
2077
2078 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
2079 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
2080 return -EOPNOTSUPP;
2081 if (!f2fs_empty_dir(inode))
2082 return -ENOTEMPTY;
2083 }
2084
2085 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
2086 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
2087 return -EOPNOTSUPP;
2088 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
2089 return -EINVAL;
2090 }
2091
2092 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
2093 if (masked_flags & F2FS_COMPR_FL) {
2094 if (!f2fs_disable_compressed_file(inode))
2095 return -EINVAL;
2096 } else {
2097 /* try to convert inline_data to support compression */
2098 int err = f2fs_convert_inline_inode(inode);
2099 if (err)
2100 return err;
2101
2102 f2fs_down_write(&fi->i_sem);
2103 if (!f2fs_may_compress(inode) ||
2104 (S_ISREG(inode->i_mode) &&
2105 F2FS_HAS_BLOCKS(inode))) {
2106 f2fs_up_write(&fi->i_sem);
2107 return -EINVAL;
2108 }
2109 err = set_compress_context(inode);
2110 f2fs_up_write(&fi->i_sem);
2111
2112 if (err)
2113 return err;
2114 }
2115 }
2116
2117 fi->i_flags = iflags | (fi->i_flags & ~mask);
2118 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
2119 (fi->i_flags & F2FS_NOCOMP_FL));
2120
2121 if (fi->i_flags & F2FS_PROJINHERIT_FL)
2122 set_inode_flag(inode, FI_PROJ_INHERIT);
2123 else
2124 clear_inode_flag(inode, FI_PROJ_INHERIT);
2125
2126 inode_set_ctime_current(inode);
2127 f2fs_set_inode_flags(inode);
2128 f2fs_mark_inode_dirty_sync(inode, true);
2129 return 0;
2130 }
2131
2132 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
2133
2134 /*
2135 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
2136 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
2137 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
2138 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
2139 *
2140 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
2141 * FS_IOC_FSSETXATTR is done by the VFS.
2142 */
2143
2144 static const struct {
2145 u32 iflag;
2146 u32 fsflag;
2147 } f2fs_fsflags_map[] = {
2148 { F2FS_COMPR_FL, FS_COMPR_FL },
2149 { F2FS_SYNC_FL, FS_SYNC_FL },
2150 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
2151 { F2FS_APPEND_FL, FS_APPEND_FL },
2152 { F2FS_NODUMP_FL, FS_NODUMP_FL },
2153 { F2FS_NOATIME_FL, FS_NOATIME_FL },
2154 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
2155 { F2FS_INDEX_FL, FS_INDEX_FL },
2156 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
2157 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
2158 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
2159 };
2160
2161 #define F2FS_GETTABLE_FS_FL ( \
2162 FS_COMPR_FL | \
2163 FS_SYNC_FL | \
2164 FS_IMMUTABLE_FL | \
2165 FS_APPEND_FL | \
2166 FS_NODUMP_FL | \
2167 FS_NOATIME_FL | \
2168 FS_NOCOMP_FL | \
2169 FS_INDEX_FL | \
2170 FS_DIRSYNC_FL | \
2171 FS_PROJINHERIT_FL | \
2172 FS_ENCRYPT_FL | \
2173 FS_INLINE_DATA_FL | \
2174 FS_NOCOW_FL | \
2175 FS_VERITY_FL | \
2176 FS_CASEFOLD_FL)
2177
2178 #define F2FS_SETTABLE_FS_FL ( \
2179 FS_COMPR_FL | \
2180 FS_SYNC_FL | \
2181 FS_IMMUTABLE_FL | \
2182 FS_APPEND_FL | \
2183 FS_NODUMP_FL | \
2184 FS_NOATIME_FL | \
2185 FS_NOCOMP_FL | \
2186 FS_DIRSYNC_FL | \
2187 FS_PROJINHERIT_FL | \
2188 FS_CASEFOLD_FL)
2189
2190 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
f2fs_iflags_to_fsflags(u32 iflags)2191 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
2192 {
2193 u32 fsflags = 0;
2194 int i;
2195
2196 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2197 if (iflags & f2fs_fsflags_map[i].iflag)
2198 fsflags |= f2fs_fsflags_map[i].fsflag;
2199
2200 return fsflags;
2201 }
2202
2203 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
f2fs_fsflags_to_iflags(u32 fsflags)2204 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
2205 {
2206 u32 iflags = 0;
2207 int i;
2208
2209 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2210 if (fsflags & f2fs_fsflags_map[i].fsflag)
2211 iflags |= f2fs_fsflags_map[i].iflag;
2212
2213 return iflags;
2214 }
2215
f2fs_ioc_getversion(struct file * filp,unsigned long arg)2216 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2217 {
2218 struct inode *inode = file_inode(filp);
2219
2220 return put_user(inode->i_generation, (int __user *)arg);
2221 }
2222
f2fs_ioc_start_atomic_write(struct file * filp,bool truncate)2223 static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
2224 {
2225 struct inode *inode = file_inode(filp);
2226 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2227 struct f2fs_inode_info *fi = F2FS_I(inode);
2228 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2229 loff_t isize;
2230 int ret;
2231
2232 if (!(filp->f_mode & FMODE_WRITE))
2233 return -EBADF;
2234
2235 if (!inode_owner_or_capable(idmap, inode))
2236 return -EACCES;
2237
2238 if (!S_ISREG(inode->i_mode))
2239 return -EINVAL;
2240
2241 if (filp->f_flags & O_DIRECT)
2242 return -EINVAL;
2243
2244 ret = mnt_want_write_file(filp);
2245 if (ret)
2246 return ret;
2247
2248 inode_lock(inode);
2249
2250 if (!f2fs_disable_compressed_file(inode) ||
2251 f2fs_is_pinned_file(inode)) {
2252 ret = -EINVAL;
2253 goto out;
2254 }
2255
2256 if (f2fs_is_atomic_file(inode))
2257 goto out;
2258
2259 ret = f2fs_convert_inline_inode(inode);
2260 if (ret)
2261 goto out;
2262
2263 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
2264 f2fs_down_write(&fi->i_gc_rwsem[READ]);
2265
2266 /*
2267 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2268 * f2fs_is_atomic_file.
2269 */
2270 if (get_dirty_pages(inode))
2271 f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2272 inode->i_ino, get_dirty_pages(inode));
2273 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2274 if (ret)
2275 goto out_unlock;
2276
2277 /* Check if the inode already has a COW inode */
2278 if (fi->cow_inode == NULL) {
2279 /* Create a COW inode for atomic write */
2280 struct dentry *dentry = file_dentry(filp);
2281 struct inode *dir = d_inode(dentry->d_parent);
2282
2283 ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode);
2284 if (ret)
2285 goto out_unlock;
2286
2287 set_inode_flag(fi->cow_inode, FI_COW_FILE);
2288 clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
2289
2290 /* Set the COW inode's atomic_inode to the atomic inode */
2291 F2FS_I(fi->cow_inode)->atomic_inode = inode;
2292 } else {
2293 /* Reuse the already created COW inode */
2294 f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode));
2295
2296 invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1);
2297
2298 ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
2299 if (ret)
2300 goto out_unlock;
2301 }
2302
2303 f2fs_write_inode(inode, NULL);
2304
2305 stat_inc_atomic_inode(inode);
2306
2307 set_inode_flag(inode, FI_ATOMIC_FILE);
2308
2309 isize = i_size_read(inode);
2310 fi->original_i_size = isize;
2311 if (truncate) {
2312 set_inode_flag(inode, FI_ATOMIC_REPLACE);
2313 truncate_inode_pages_final(inode->i_mapping);
2314 f2fs_i_size_write(inode, 0);
2315 isize = 0;
2316 }
2317 f2fs_i_size_write(fi->cow_inode, isize);
2318
2319 out_unlock:
2320 f2fs_up_write(&fi->i_gc_rwsem[READ]);
2321 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2322 if (ret)
2323 goto out;
2324
2325 f2fs_update_time(sbi, REQ_TIME);
2326 fi->atomic_write_task = current;
2327 stat_update_max_atomic_write(inode);
2328 fi->atomic_write_cnt = 0;
2329 out:
2330 inode_unlock(inode);
2331 mnt_drop_write_file(filp);
2332 return ret;
2333 }
2334
f2fs_ioc_commit_atomic_write(struct file * filp)2335 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2336 {
2337 struct inode *inode = file_inode(filp);
2338 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2339 int ret;
2340
2341 if (!(filp->f_mode & FMODE_WRITE))
2342 return -EBADF;
2343
2344 if (!inode_owner_or_capable(idmap, inode))
2345 return -EACCES;
2346
2347 ret = mnt_want_write_file(filp);
2348 if (ret)
2349 return ret;
2350
2351 f2fs_balance_fs(F2FS_I_SB(inode), true);
2352
2353 inode_lock(inode);
2354
2355 if (f2fs_is_atomic_file(inode)) {
2356 ret = f2fs_commit_atomic_write(inode);
2357 if (!ret)
2358 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2359
2360 f2fs_abort_atomic_write(inode, ret);
2361 } else {
2362 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2363 }
2364
2365 inode_unlock(inode);
2366 mnt_drop_write_file(filp);
2367 return ret;
2368 }
2369
f2fs_ioc_abort_atomic_write(struct file * filp)2370 static int f2fs_ioc_abort_atomic_write(struct file *filp)
2371 {
2372 struct inode *inode = file_inode(filp);
2373 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2374 int ret;
2375
2376 if (!(filp->f_mode & FMODE_WRITE))
2377 return -EBADF;
2378
2379 if (!inode_owner_or_capable(idmap, inode))
2380 return -EACCES;
2381
2382 ret = mnt_want_write_file(filp);
2383 if (ret)
2384 return ret;
2385
2386 inode_lock(inode);
2387
2388 f2fs_abort_atomic_write(inode, true);
2389
2390 inode_unlock(inode);
2391
2392 mnt_drop_write_file(filp);
2393 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2394 return ret;
2395 }
2396
f2fs_do_shutdown(struct f2fs_sb_info * sbi,unsigned int flag,bool readonly,bool need_lock)2397 int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag,
2398 bool readonly, bool need_lock)
2399 {
2400 struct super_block *sb = sbi->sb;
2401 int ret = 0;
2402
2403 switch (flag) {
2404 case F2FS_GOING_DOWN_FULLSYNC:
2405 ret = bdev_freeze(sb->s_bdev);
2406 if (ret)
2407 goto out;
2408 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2409 bdev_thaw(sb->s_bdev);
2410 break;
2411 case F2FS_GOING_DOWN_METASYNC:
2412 /* do checkpoint only */
2413 ret = f2fs_sync_fs(sb, 1);
2414 if (ret) {
2415 if (ret == -EIO)
2416 ret = 0;
2417 goto out;
2418 }
2419 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2420 break;
2421 case F2FS_GOING_DOWN_NOSYNC:
2422 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2423 break;
2424 case F2FS_GOING_DOWN_METAFLUSH:
2425 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2426 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2427 break;
2428 case F2FS_GOING_DOWN_NEED_FSCK:
2429 set_sbi_flag(sbi, SBI_NEED_FSCK);
2430 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2431 set_sbi_flag(sbi, SBI_IS_DIRTY);
2432 /* do checkpoint only */
2433 ret = f2fs_sync_fs(sb, 1);
2434 if (ret == -EIO)
2435 ret = 0;
2436 goto out;
2437 default:
2438 ret = -EINVAL;
2439 goto out;
2440 }
2441
2442 if (readonly)
2443 goto out;
2444
2445 /*
2446 * grab sb->s_umount to avoid racing w/ remount() and other shutdown
2447 * paths.
2448 */
2449 if (need_lock)
2450 down_write(&sbi->sb->s_umount);
2451
2452 f2fs_stop_gc_thread(sbi);
2453 f2fs_stop_discard_thread(sbi);
2454
2455 f2fs_drop_discard_cmd(sbi);
2456 clear_opt(sbi, DISCARD);
2457
2458 if (need_lock)
2459 up_write(&sbi->sb->s_umount);
2460
2461 f2fs_update_time(sbi, REQ_TIME);
2462 out:
2463
2464 trace_f2fs_shutdown(sbi, flag, ret);
2465
2466 return ret;
2467 }
2468
f2fs_ioc_shutdown(struct file * filp,unsigned long arg)2469 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2470 {
2471 struct inode *inode = file_inode(filp);
2472 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2473 __u32 in;
2474 int ret;
2475 bool need_drop = false, readonly = false;
2476
2477 if (!capable(CAP_SYS_ADMIN))
2478 return -EPERM;
2479
2480 if (get_user(in, (__u32 __user *)arg))
2481 return -EFAULT;
2482
2483 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2484 ret = mnt_want_write_file(filp);
2485 if (ret) {
2486 if (ret != -EROFS)
2487 return ret;
2488
2489 /* fallback to nosync shutdown for readonly fs */
2490 in = F2FS_GOING_DOWN_NOSYNC;
2491 readonly = true;
2492 } else {
2493 need_drop = true;
2494 }
2495 }
2496
2497 ret = f2fs_do_shutdown(sbi, in, readonly, true);
2498
2499 if (need_drop)
2500 mnt_drop_write_file(filp);
2501
2502 return ret;
2503 }
2504
f2fs_keep_noreuse_range(struct inode * inode,loff_t offset,loff_t len)2505 static int f2fs_keep_noreuse_range(struct inode *inode,
2506 loff_t offset, loff_t len)
2507 {
2508 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2509 u64 max_bytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
2510 u64 start, end;
2511 int ret = 0;
2512
2513 if (!S_ISREG(inode->i_mode))
2514 return 0;
2515
2516 if (offset >= max_bytes || len > max_bytes ||
2517 (offset + len) > max_bytes)
2518 return 0;
2519
2520 start = offset >> PAGE_SHIFT;
2521 end = DIV_ROUND_UP(offset + len, PAGE_SIZE);
2522
2523 inode_lock(inode);
2524 if (f2fs_is_atomic_file(inode)) {
2525 inode_unlock(inode);
2526 return 0;
2527 }
2528
2529 spin_lock(&sbi->inode_lock[DONATE_INODE]);
2530 /* let's remove the range, if len = 0 */
2531 if (!len) {
2532 if (!list_empty(&F2FS_I(inode)->gdonate_list)) {
2533 list_del_init(&F2FS_I(inode)->gdonate_list);
2534 sbi->donate_files--;
2535 if (is_inode_flag_set(inode, FI_DONATE_FINISHED))
2536 ret = -EALREADY;
2537 else
2538 set_inode_flag(inode, FI_DONATE_FINISHED);
2539 } else
2540 ret = -ENOENT;
2541 } else {
2542 if (list_empty(&F2FS_I(inode)->gdonate_list)) {
2543 list_add_tail(&F2FS_I(inode)->gdonate_list,
2544 &sbi->inode_list[DONATE_INODE]);
2545 sbi->donate_files++;
2546 } else {
2547 list_move_tail(&F2FS_I(inode)->gdonate_list,
2548 &sbi->inode_list[DONATE_INODE]);
2549 }
2550 F2FS_I(inode)->donate_start = start;
2551 F2FS_I(inode)->donate_end = end - 1;
2552 clear_inode_flag(inode, FI_DONATE_FINISHED);
2553 }
2554 spin_unlock(&sbi->inode_lock[DONATE_INODE]);
2555 inode_unlock(inode);
2556
2557 return ret;
2558 }
2559
f2fs_ioc_fitrim(struct file * filp,unsigned long arg)2560 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2561 {
2562 struct inode *inode = file_inode(filp);
2563 struct super_block *sb = inode->i_sb;
2564 struct fstrim_range range;
2565 int ret;
2566
2567 if (!capable(CAP_SYS_ADMIN))
2568 return -EPERM;
2569
2570 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2571 return -EOPNOTSUPP;
2572
2573 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2574 sizeof(range)))
2575 return -EFAULT;
2576
2577 ret = mnt_want_write_file(filp);
2578 if (ret)
2579 return ret;
2580
2581 range.minlen = max((unsigned int)range.minlen,
2582 bdev_discard_granularity(sb->s_bdev));
2583 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2584 mnt_drop_write_file(filp);
2585 if (ret < 0)
2586 return ret;
2587
2588 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2589 sizeof(range)))
2590 return -EFAULT;
2591 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2592 return 0;
2593 }
2594
uuid_is_nonzero(__u8 u[16])2595 static bool uuid_is_nonzero(__u8 u[16])
2596 {
2597 int i;
2598
2599 for (i = 0; i < 16; i++)
2600 if (u[i])
2601 return true;
2602 return false;
2603 }
2604
f2fs_ioc_set_encryption_policy(struct file * filp,unsigned long arg)2605 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2606 {
2607 struct inode *inode = file_inode(filp);
2608 int ret;
2609
2610 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2611 return -EOPNOTSUPP;
2612
2613 ret = fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2614 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2615 return ret;
2616 }
2617
f2fs_ioc_get_encryption_policy(struct file * filp,unsigned long arg)2618 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2619 {
2620 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2621 return -EOPNOTSUPP;
2622 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2623 }
2624
f2fs_ioc_get_encryption_pwsalt(struct file * filp,unsigned long arg)2625 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2626 {
2627 struct inode *inode = file_inode(filp);
2628 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2629 u8 encrypt_pw_salt[16];
2630 int err;
2631
2632 if (!f2fs_sb_has_encrypt(sbi))
2633 return -EOPNOTSUPP;
2634
2635 err = mnt_want_write_file(filp);
2636 if (err)
2637 return err;
2638
2639 f2fs_down_write(&sbi->sb_lock);
2640
2641 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2642 goto got_it;
2643
2644 /* update superblock with uuid */
2645 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2646
2647 err = f2fs_commit_super(sbi, false);
2648 if (err) {
2649 /* undo new data */
2650 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2651 goto out_err;
2652 }
2653 got_it:
2654 memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
2655 out_err:
2656 f2fs_up_write(&sbi->sb_lock);
2657 mnt_drop_write_file(filp);
2658
2659 if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
2660 err = -EFAULT;
2661
2662 return err;
2663 }
2664
f2fs_ioc_get_encryption_policy_ex(struct file * filp,unsigned long arg)2665 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2666 unsigned long arg)
2667 {
2668 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2669 return -EOPNOTSUPP;
2670
2671 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2672 }
2673
f2fs_ioc_add_encryption_key(struct file * filp,unsigned long arg)2674 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2675 {
2676 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2677 return -EOPNOTSUPP;
2678
2679 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2680 }
2681
f2fs_ioc_remove_encryption_key(struct file * filp,unsigned long arg)2682 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2683 {
2684 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2685 return -EOPNOTSUPP;
2686
2687 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2688 }
2689
f2fs_ioc_remove_encryption_key_all_users(struct file * filp,unsigned long arg)2690 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2691 unsigned long arg)
2692 {
2693 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2694 return -EOPNOTSUPP;
2695
2696 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2697 }
2698
f2fs_ioc_get_encryption_key_status(struct file * filp,unsigned long arg)2699 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2700 unsigned long arg)
2701 {
2702 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2703 return -EOPNOTSUPP;
2704
2705 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2706 }
2707
f2fs_ioc_get_encryption_nonce(struct file * filp,unsigned long arg)2708 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2709 {
2710 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2711 return -EOPNOTSUPP;
2712
2713 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2714 }
2715
f2fs_ioc_gc(struct file * filp,unsigned long arg)2716 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2717 {
2718 struct inode *inode = file_inode(filp);
2719 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2720 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
2721 .no_bg_gc = false,
2722 .should_migrate_blocks = false,
2723 .nr_free_secs = 0 };
2724 __u32 sync;
2725 int ret;
2726
2727 if (!capable(CAP_SYS_ADMIN))
2728 return -EPERM;
2729
2730 if (get_user(sync, (__u32 __user *)arg))
2731 return -EFAULT;
2732
2733 if (f2fs_readonly(sbi->sb))
2734 return -EROFS;
2735
2736 ret = mnt_want_write_file(filp);
2737 if (ret)
2738 return ret;
2739
2740 if (!sync) {
2741 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2742 ret = -EBUSY;
2743 goto out;
2744 }
2745 } else {
2746 f2fs_down_write(&sbi->gc_lock);
2747 }
2748
2749 gc_control.init_gc_type = sync ? FG_GC : BG_GC;
2750 gc_control.err_gc_skipped = sync;
2751 stat_inc_gc_call_count(sbi, FOREGROUND);
2752 ret = f2fs_gc(sbi, &gc_control);
2753 out:
2754 mnt_drop_write_file(filp);
2755 return ret;
2756 }
2757
__f2fs_ioc_gc_range(struct file * filp,struct f2fs_gc_range * range)2758 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2759 {
2760 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2761 struct f2fs_gc_control gc_control = {
2762 .init_gc_type = range->sync ? FG_GC : BG_GC,
2763 .no_bg_gc = false,
2764 .should_migrate_blocks = false,
2765 .err_gc_skipped = range->sync,
2766 .nr_free_secs = 0 };
2767 u64 end;
2768 int ret;
2769
2770 if (!capable(CAP_SYS_ADMIN))
2771 return -EPERM;
2772 if (f2fs_readonly(sbi->sb))
2773 return -EROFS;
2774
2775 end = range->start + range->len;
2776 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2777 end >= MAX_BLKADDR(sbi))
2778 return -EINVAL;
2779
2780 ret = mnt_want_write_file(filp);
2781 if (ret)
2782 return ret;
2783
2784 do_more:
2785 if (!range->sync) {
2786 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2787 ret = -EBUSY;
2788 goto out;
2789 }
2790 } else {
2791 f2fs_down_write(&sbi->gc_lock);
2792 }
2793
2794 gc_control.victim_segno = GET_SEGNO(sbi, range->start);
2795 stat_inc_gc_call_count(sbi, FOREGROUND);
2796 ret = f2fs_gc(sbi, &gc_control);
2797 if (ret) {
2798 if (ret == -EBUSY)
2799 ret = -EAGAIN;
2800 goto out;
2801 }
2802 range->start += CAP_BLKS_PER_SEC(sbi);
2803 if (range->start <= end)
2804 goto do_more;
2805 out:
2806 mnt_drop_write_file(filp);
2807 return ret;
2808 }
2809
f2fs_ioc_gc_range(struct file * filp,unsigned long arg)2810 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2811 {
2812 struct f2fs_gc_range range;
2813
2814 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2815 sizeof(range)))
2816 return -EFAULT;
2817 return __f2fs_ioc_gc_range(filp, &range);
2818 }
2819
f2fs_ioc_write_checkpoint(struct file * filp)2820 static int f2fs_ioc_write_checkpoint(struct file *filp)
2821 {
2822 struct inode *inode = file_inode(filp);
2823 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2824 int ret;
2825
2826 if (!capable(CAP_SYS_ADMIN))
2827 return -EPERM;
2828
2829 if (f2fs_readonly(sbi->sb))
2830 return -EROFS;
2831
2832 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2833 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2834 return -EINVAL;
2835 }
2836
2837 ret = mnt_want_write_file(filp);
2838 if (ret)
2839 return ret;
2840
2841 ret = f2fs_sync_fs(sbi->sb, 1);
2842
2843 mnt_drop_write_file(filp);
2844 return ret;
2845 }
2846
f2fs_defragment_range(struct f2fs_sb_info * sbi,struct file * filp,struct f2fs_defragment * range)2847 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2848 struct file *filp,
2849 struct f2fs_defragment *range)
2850 {
2851 struct inode *inode = file_inode(filp);
2852 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2853 .m_seg_type = NO_CHECK_TYPE,
2854 .m_may_create = false };
2855 struct extent_info ei = {};
2856 pgoff_t pg_start, pg_end, next_pgofs;
2857 unsigned int total = 0, sec_num;
2858 block_t blk_end = 0;
2859 bool fragmented = false;
2860 int err;
2861
2862 f2fs_balance_fs(sbi, true);
2863
2864 inode_lock(inode);
2865 pg_start = range->start >> PAGE_SHIFT;
2866 pg_end = min_t(pgoff_t,
2867 (range->start + range->len) >> PAGE_SHIFT,
2868 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
2869
2870 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) ||
2871 f2fs_is_atomic_file(inode)) {
2872 err = -EINVAL;
2873 goto unlock_out;
2874 }
2875
2876 /* if in-place-update policy is enabled, don't waste time here */
2877 set_inode_flag(inode, FI_OPU_WRITE);
2878 if (f2fs_should_update_inplace(inode, NULL)) {
2879 err = -EINVAL;
2880 goto out;
2881 }
2882
2883 /* writeback all dirty pages in the range */
2884 err = filemap_write_and_wait_range(inode->i_mapping,
2885 pg_start << PAGE_SHIFT,
2886 (pg_end << PAGE_SHIFT) - 1);
2887 if (err)
2888 goto out;
2889
2890 /*
2891 * lookup mapping info in extent cache, skip defragmenting if physical
2892 * block addresses are continuous.
2893 */
2894 if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
2895 if ((pgoff_t)ei.fofs + ei.len >= pg_end)
2896 goto out;
2897 }
2898
2899 map.m_lblk = pg_start;
2900 map.m_next_pgofs = &next_pgofs;
2901
2902 /*
2903 * lookup mapping info in dnode page cache, skip defragmenting if all
2904 * physical block addresses are continuous even if there are hole(s)
2905 * in logical blocks.
2906 */
2907 while (map.m_lblk < pg_end) {
2908 map.m_len = pg_end - map.m_lblk;
2909 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2910 if (err)
2911 goto out;
2912
2913 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2914 map.m_lblk = next_pgofs;
2915 continue;
2916 }
2917
2918 if (blk_end && blk_end != map.m_pblk)
2919 fragmented = true;
2920
2921 /* record total count of block that we're going to move */
2922 total += map.m_len;
2923
2924 blk_end = map.m_pblk + map.m_len;
2925
2926 map.m_lblk += map.m_len;
2927 }
2928
2929 if (!fragmented) {
2930 total = 0;
2931 goto out;
2932 }
2933
2934 sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
2935
2936 /*
2937 * make sure there are enough free section for LFS allocation, this can
2938 * avoid defragment running in SSR mode when free section are allocated
2939 * intensively
2940 */
2941 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2942 err = -EAGAIN;
2943 goto out;
2944 }
2945
2946 map.m_lblk = pg_start;
2947 map.m_len = pg_end - pg_start;
2948 total = 0;
2949
2950 while (map.m_lblk < pg_end) {
2951 pgoff_t idx;
2952 int cnt = 0;
2953
2954 do_map:
2955 map.m_len = pg_end - map.m_lblk;
2956 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
2957 if (err)
2958 goto clear_out;
2959
2960 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2961 map.m_lblk = next_pgofs;
2962 goto check;
2963 }
2964
2965 set_inode_flag(inode, FI_SKIP_WRITES);
2966
2967 idx = map.m_lblk;
2968 while (idx < map.m_lblk + map.m_len &&
2969 cnt < BLKS_PER_SEG(sbi)) {
2970 struct folio *folio;
2971
2972 folio = f2fs_get_lock_data_folio(inode, idx, true);
2973 if (IS_ERR(folio)) {
2974 err = PTR_ERR(folio);
2975 goto clear_out;
2976 }
2977
2978 f2fs_folio_wait_writeback(folio, DATA, true, true);
2979
2980 folio_mark_dirty(folio);
2981 set_page_private_gcing(&folio->page);
2982 f2fs_folio_put(folio, true);
2983
2984 idx++;
2985 cnt++;
2986 total++;
2987 }
2988
2989 map.m_lblk = idx;
2990 check:
2991 if (map.m_lblk < pg_end && cnt < BLKS_PER_SEG(sbi))
2992 goto do_map;
2993
2994 clear_inode_flag(inode, FI_SKIP_WRITES);
2995
2996 err = filemap_fdatawrite(inode->i_mapping);
2997 if (err)
2998 goto out;
2999 }
3000 clear_out:
3001 clear_inode_flag(inode, FI_SKIP_WRITES);
3002 out:
3003 clear_inode_flag(inode, FI_OPU_WRITE);
3004 unlock_out:
3005 inode_unlock(inode);
3006 if (!err)
3007 range->len = (u64)total << PAGE_SHIFT;
3008 return err;
3009 }
3010
f2fs_ioc_defragment(struct file * filp,unsigned long arg)3011 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
3012 {
3013 struct inode *inode = file_inode(filp);
3014 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3015 struct f2fs_defragment range;
3016 int err;
3017
3018 if (!capable(CAP_SYS_ADMIN))
3019 return -EPERM;
3020
3021 if (!S_ISREG(inode->i_mode))
3022 return -EINVAL;
3023
3024 if (f2fs_readonly(sbi->sb))
3025 return -EROFS;
3026
3027 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
3028 sizeof(range)))
3029 return -EFAULT;
3030
3031 /* verify alignment of offset & size */
3032 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
3033 return -EINVAL;
3034
3035 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
3036 max_file_blocks(inode)))
3037 return -EINVAL;
3038
3039 err = mnt_want_write_file(filp);
3040 if (err)
3041 return err;
3042
3043 err = f2fs_defragment_range(sbi, filp, &range);
3044 mnt_drop_write_file(filp);
3045
3046 if (range.len)
3047 f2fs_update_time(sbi, REQ_TIME);
3048 if (err < 0)
3049 return err;
3050
3051 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
3052 sizeof(range)))
3053 return -EFAULT;
3054
3055 return 0;
3056 }
3057
f2fs_move_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len)3058 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
3059 struct file *file_out, loff_t pos_out, size_t len)
3060 {
3061 struct inode *src = file_inode(file_in);
3062 struct inode *dst = file_inode(file_out);
3063 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
3064 size_t olen = len, dst_max_i_size = 0;
3065 size_t dst_osize;
3066 int ret;
3067
3068 if (file_in->f_path.mnt != file_out->f_path.mnt ||
3069 src->i_sb != dst->i_sb)
3070 return -EXDEV;
3071
3072 if (unlikely(f2fs_readonly(src->i_sb)))
3073 return -EROFS;
3074
3075 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
3076 return -EINVAL;
3077
3078 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
3079 return -EOPNOTSUPP;
3080
3081 if (pos_out < 0 || pos_in < 0)
3082 return -EINVAL;
3083
3084 if (src == dst) {
3085 if (pos_in == pos_out)
3086 return 0;
3087 if (pos_out > pos_in && pos_out < pos_in + len)
3088 return -EINVAL;
3089 }
3090
3091 inode_lock(src);
3092 if (src != dst) {
3093 ret = -EBUSY;
3094 if (!inode_trylock(dst))
3095 goto out;
3096 }
3097
3098 if (f2fs_compressed_file(src) || f2fs_compressed_file(dst) ||
3099 f2fs_is_pinned_file(src) || f2fs_is_pinned_file(dst)) {
3100 ret = -EOPNOTSUPP;
3101 goto out_unlock;
3102 }
3103
3104 if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) {
3105 ret = -EINVAL;
3106 goto out_unlock;
3107 }
3108
3109 ret = -EINVAL;
3110 if (pos_in + len > src->i_size || pos_in + len < pos_in)
3111 goto out_unlock;
3112 if (len == 0)
3113 olen = len = src->i_size - pos_in;
3114 if (pos_in + len == src->i_size)
3115 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
3116 if (len == 0) {
3117 ret = 0;
3118 goto out_unlock;
3119 }
3120
3121 dst_osize = dst->i_size;
3122 if (pos_out + olen > dst->i_size)
3123 dst_max_i_size = pos_out + olen;
3124
3125 /* verify the end result is block aligned */
3126 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
3127 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
3128 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
3129 goto out_unlock;
3130
3131 ret = f2fs_convert_inline_inode(src);
3132 if (ret)
3133 goto out_unlock;
3134
3135 ret = f2fs_convert_inline_inode(dst);
3136 if (ret)
3137 goto out_unlock;
3138
3139 /* write out all dirty pages from offset */
3140 ret = filemap_write_and_wait_range(src->i_mapping,
3141 pos_in, pos_in + len);
3142 if (ret)
3143 goto out_unlock;
3144
3145 ret = filemap_write_and_wait_range(dst->i_mapping,
3146 pos_out, pos_out + len);
3147 if (ret)
3148 goto out_unlock;
3149
3150 f2fs_balance_fs(sbi, true);
3151
3152 f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
3153 if (src != dst) {
3154 ret = -EBUSY;
3155 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
3156 goto out_src;
3157 }
3158
3159 f2fs_lock_op(sbi);
3160 ret = __exchange_data_block(src, dst, F2FS_BYTES_TO_BLK(pos_in),
3161 F2FS_BYTES_TO_BLK(pos_out),
3162 F2FS_BYTES_TO_BLK(len), false);
3163
3164 if (!ret) {
3165 if (dst_max_i_size)
3166 f2fs_i_size_write(dst, dst_max_i_size);
3167 else if (dst_osize != dst->i_size)
3168 f2fs_i_size_write(dst, dst_osize);
3169 }
3170 f2fs_unlock_op(sbi);
3171
3172 if (src != dst)
3173 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
3174 out_src:
3175 f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
3176 if (ret)
3177 goto out_unlock;
3178
3179 inode_set_mtime_to_ts(src, inode_set_ctime_current(src));
3180 f2fs_mark_inode_dirty_sync(src, false);
3181 if (src != dst) {
3182 inode_set_mtime_to_ts(dst, inode_set_ctime_current(dst));
3183 f2fs_mark_inode_dirty_sync(dst, false);
3184 }
3185 f2fs_update_time(sbi, REQ_TIME);
3186
3187 out_unlock:
3188 if (src != dst)
3189 inode_unlock(dst);
3190 out:
3191 inode_unlock(src);
3192 return ret;
3193 }
3194
__f2fs_ioc_move_range(struct file * filp,struct f2fs_move_range * range)3195 static int __f2fs_ioc_move_range(struct file *filp,
3196 struct f2fs_move_range *range)
3197 {
3198 int err;
3199
3200 if (!(filp->f_mode & FMODE_READ) ||
3201 !(filp->f_mode & FMODE_WRITE))
3202 return -EBADF;
3203
3204 CLASS(fd, dst)(range->dst_fd);
3205 if (fd_empty(dst))
3206 return -EBADF;
3207
3208 if (!(fd_file(dst)->f_mode & FMODE_WRITE))
3209 return -EBADF;
3210
3211 err = mnt_want_write_file(filp);
3212 if (err)
3213 return err;
3214
3215 err = f2fs_move_file_range(filp, range->pos_in, fd_file(dst),
3216 range->pos_out, range->len);
3217
3218 mnt_drop_write_file(filp);
3219 return err;
3220 }
3221
f2fs_ioc_move_range(struct file * filp,unsigned long arg)3222 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
3223 {
3224 struct f2fs_move_range range;
3225
3226 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
3227 sizeof(range)))
3228 return -EFAULT;
3229 return __f2fs_ioc_move_range(filp, &range);
3230 }
3231
f2fs_ioc_flush_device(struct file * filp,unsigned long arg)3232 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
3233 {
3234 struct inode *inode = file_inode(filp);
3235 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3236 struct sit_info *sm = SIT_I(sbi);
3237 unsigned int start_segno = 0, end_segno = 0;
3238 unsigned int dev_start_segno = 0, dev_end_segno = 0;
3239 struct f2fs_flush_device range;
3240 struct f2fs_gc_control gc_control = {
3241 .init_gc_type = FG_GC,
3242 .should_migrate_blocks = true,
3243 .err_gc_skipped = true,
3244 .nr_free_secs = 0 };
3245 int ret;
3246
3247 if (!capable(CAP_SYS_ADMIN))
3248 return -EPERM;
3249
3250 if (f2fs_readonly(sbi->sb))
3251 return -EROFS;
3252
3253 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3254 return -EINVAL;
3255
3256 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
3257 sizeof(range)))
3258 return -EFAULT;
3259
3260 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
3261 __is_large_section(sbi)) {
3262 f2fs_warn(sbi, "Can't flush %u in %d for SEGS_PER_SEC %u != 1",
3263 range.dev_num, sbi->s_ndevs, SEGS_PER_SEC(sbi));
3264 return -EINVAL;
3265 }
3266
3267 ret = mnt_want_write_file(filp);
3268 if (ret)
3269 return ret;
3270
3271 if (range.dev_num != 0)
3272 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
3273 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
3274
3275 start_segno = sm->last_victim[FLUSH_DEVICE];
3276 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
3277 start_segno = dev_start_segno;
3278 end_segno = min(start_segno + range.segments, dev_end_segno);
3279
3280 while (start_segno < end_segno) {
3281 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
3282 ret = -EBUSY;
3283 goto out;
3284 }
3285 sm->last_victim[GC_CB] = end_segno + 1;
3286 sm->last_victim[GC_GREEDY] = end_segno + 1;
3287 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3288
3289 gc_control.victim_segno = start_segno;
3290 stat_inc_gc_call_count(sbi, FOREGROUND);
3291 ret = f2fs_gc(sbi, &gc_control);
3292 if (ret == -EAGAIN)
3293 ret = 0;
3294 else if (ret < 0)
3295 break;
3296 start_segno++;
3297 }
3298 out:
3299 mnt_drop_write_file(filp);
3300 return ret;
3301 }
3302
f2fs_ioc_get_features(struct file * filp,unsigned long arg)3303 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3304 {
3305 struct inode *inode = file_inode(filp);
3306 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3307
3308 /* Must validate to set it with SQLite behavior in Android. */
3309 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3310
3311 return put_user(sb_feature, (u32 __user *)arg);
3312 }
3313
3314 #ifdef CONFIG_QUOTA
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3315 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3316 {
3317 struct dquot *transfer_to[MAXQUOTAS] = {};
3318 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3319 struct super_block *sb = sbi->sb;
3320 int err;
3321
3322 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3323 if (IS_ERR(transfer_to[PRJQUOTA]))
3324 return PTR_ERR(transfer_to[PRJQUOTA]);
3325
3326 err = __dquot_transfer(inode, transfer_to);
3327 if (err)
3328 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3329 dqput(transfer_to[PRJQUOTA]);
3330 return err;
3331 }
3332
f2fs_ioc_setproject(struct inode * inode,__u32 projid)3333 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3334 {
3335 struct f2fs_inode_info *fi = F2FS_I(inode);
3336 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3337 struct f2fs_inode *ri = NULL;
3338 kprojid_t kprojid;
3339 int err;
3340
3341 if (!f2fs_sb_has_project_quota(sbi)) {
3342 if (projid != F2FS_DEF_PROJID)
3343 return -EOPNOTSUPP;
3344 else
3345 return 0;
3346 }
3347
3348 if (!f2fs_has_extra_attr(inode))
3349 return -EOPNOTSUPP;
3350
3351 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3352
3353 if (projid_eq(kprojid, fi->i_projid))
3354 return 0;
3355
3356 err = -EPERM;
3357 /* Is it quota file? Do not allow user to mess with it */
3358 if (IS_NOQUOTA(inode))
3359 return err;
3360
3361 if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
3362 return -EOVERFLOW;
3363
3364 err = f2fs_dquot_initialize(inode);
3365 if (err)
3366 return err;
3367
3368 f2fs_lock_op(sbi);
3369 err = f2fs_transfer_project_quota(inode, kprojid);
3370 if (err)
3371 goto out_unlock;
3372
3373 fi->i_projid = kprojid;
3374 inode_set_ctime_current(inode);
3375 f2fs_mark_inode_dirty_sync(inode, true);
3376 out_unlock:
3377 f2fs_unlock_op(sbi);
3378 return err;
3379 }
3380 #else
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3381 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3382 {
3383 return 0;
3384 }
3385
f2fs_ioc_setproject(struct inode * inode,__u32 projid)3386 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3387 {
3388 if (projid != F2FS_DEF_PROJID)
3389 return -EOPNOTSUPP;
3390 return 0;
3391 }
3392 #endif
3393
f2fs_fileattr_get(struct dentry * dentry,struct file_kattr * fa)3394 int f2fs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
3395 {
3396 struct inode *inode = d_inode(dentry);
3397 struct f2fs_inode_info *fi = F2FS_I(inode);
3398 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3399
3400 if (IS_ENCRYPTED(inode))
3401 fsflags |= FS_ENCRYPT_FL;
3402 if (IS_VERITY(inode))
3403 fsflags |= FS_VERITY_FL;
3404 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3405 fsflags |= FS_INLINE_DATA_FL;
3406 if (is_inode_flag_set(inode, FI_PIN_FILE))
3407 fsflags |= FS_NOCOW_FL;
3408
3409 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3410
3411 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3412 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3413
3414 return 0;
3415 }
3416
f2fs_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct file_kattr * fa)3417 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3418 struct dentry *dentry, struct file_kattr *fa)
3419 {
3420 struct inode *inode = d_inode(dentry);
3421 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3422 u32 iflags;
3423 int err;
3424
3425 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3426 return -EIO;
3427 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3428 return -ENOSPC;
3429 if (fsflags & ~F2FS_GETTABLE_FS_FL)
3430 return -EOPNOTSUPP;
3431 fsflags &= F2FS_SETTABLE_FS_FL;
3432 if (!fa->flags_valid)
3433 mask &= FS_COMMON_FL;
3434
3435 iflags = f2fs_fsflags_to_iflags(fsflags);
3436 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3437 return -EOPNOTSUPP;
3438
3439 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3440 if (!err)
3441 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3442
3443 return err;
3444 }
3445
f2fs_pin_file_control(struct inode * inode,bool inc)3446 int f2fs_pin_file_control(struct inode *inode, bool inc)
3447 {
3448 struct f2fs_inode_info *fi = F2FS_I(inode);
3449 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3450
3451 if (IS_DEVICE_ALIASING(inode))
3452 return -EINVAL;
3453
3454 if (fi->i_gc_failures >= sbi->gc_pin_file_threshold) {
3455 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3456 __func__, inode->i_ino, fi->i_gc_failures);
3457 clear_inode_flag(inode, FI_PIN_FILE);
3458 return -EAGAIN;
3459 }
3460
3461 /* Use i_gc_failures for normal file as a risk signal. */
3462 if (inc)
3463 f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1);
3464
3465 return 0;
3466 }
3467
f2fs_ioc_set_pin_file(struct file * filp,unsigned long arg)3468 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3469 {
3470 struct inode *inode = file_inode(filp);
3471 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3472 __u32 pin;
3473 int ret = 0;
3474
3475 if (get_user(pin, (__u32 __user *)arg))
3476 return -EFAULT;
3477
3478 if (!S_ISREG(inode->i_mode))
3479 return -EINVAL;
3480
3481 if (f2fs_readonly(sbi->sb))
3482 return -EROFS;
3483
3484 if (!pin && IS_DEVICE_ALIASING(inode))
3485 return -EOPNOTSUPP;
3486
3487 ret = mnt_want_write_file(filp);
3488 if (ret)
3489 return ret;
3490
3491 inode_lock(inode);
3492
3493 if (f2fs_is_atomic_file(inode)) {
3494 ret = -EINVAL;
3495 goto out;
3496 }
3497
3498 if (!pin) {
3499 clear_inode_flag(inode, FI_PIN_FILE);
3500 f2fs_i_gc_failures_write(inode, 0);
3501 goto done;
3502 } else if (f2fs_is_pinned_file(inode)) {
3503 goto done;
3504 }
3505
3506 if (F2FS_HAS_BLOCKS(inode)) {
3507 ret = -EFBIG;
3508 goto out;
3509 }
3510
3511 /* Let's allow file pinning on zoned device. */
3512 if (!f2fs_sb_has_blkzoned(sbi) &&
3513 f2fs_should_update_outplace(inode, NULL)) {
3514 ret = -EINVAL;
3515 goto out;
3516 }
3517
3518 if (f2fs_pin_file_control(inode, false)) {
3519 ret = -EAGAIN;
3520 goto out;
3521 }
3522
3523 ret = f2fs_convert_inline_inode(inode);
3524 if (ret)
3525 goto out;
3526
3527 if (!f2fs_disable_compressed_file(inode)) {
3528 ret = -EOPNOTSUPP;
3529 goto out;
3530 }
3531
3532 set_inode_flag(inode, FI_PIN_FILE);
3533 ret = F2FS_I(inode)->i_gc_failures;
3534 done:
3535 f2fs_update_time(sbi, REQ_TIME);
3536 out:
3537 inode_unlock(inode);
3538 mnt_drop_write_file(filp);
3539 return ret;
3540 }
3541
f2fs_ioc_get_pin_file(struct file * filp,unsigned long arg)3542 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3543 {
3544 struct inode *inode = file_inode(filp);
3545 __u32 pin = 0;
3546
3547 if (is_inode_flag_set(inode, FI_PIN_FILE))
3548 pin = F2FS_I(inode)->i_gc_failures;
3549 return put_user(pin, (u32 __user *)arg);
3550 }
3551
f2fs_ioc_get_dev_alias_file(struct file * filp,unsigned long arg)3552 static int f2fs_ioc_get_dev_alias_file(struct file *filp, unsigned long arg)
3553 {
3554 return put_user(IS_DEVICE_ALIASING(file_inode(filp)) ? 1 : 0,
3555 (u32 __user *)arg);
3556 }
3557
f2fs_ioc_io_prio(struct file * filp,unsigned long arg)3558 static int f2fs_ioc_io_prio(struct file *filp, unsigned long arg)
3559 {
3560 struct inode *inode = file_inode(filp);
3561 __u32 level;
3562
3563 if (get_user(level, (__u32 __user *)arg))
3564 return -EFAULT;
3565
3566 if (!S_ISREG(inode->i_mode) || level >= F2FS_IOPRIO_MAX)
3567 return -EINVAL;
3568
3569 inode_lock(inode);
3570 F2FS_I(inode)->ioprio_hint = level;
3571 inode_unlock(inode);
3572 return 0;
3573 }
3574
f2fs_precache_extents(struct inode * inode)3575 int f2fs_precache_extents(struct inode *inode)
3576 {
3577 struct f2fs_inode_info *fi = F2FS_I(inode);
3578 struct f2fs_map_blocks map;
3579 pgoff_t m_next_extent;
3580 loff_t end;
3581 int err;
3582
3583 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3584 return -EOPNOTSUPP;
3585
3586 map.m_lblk = 0;
3587 map.m_pblk = 0;
3588 map.m_next_pgofs = NULL;
3589 map.m_next_extent = &m_next_extent;
3590 map.m_seg_type = NO_CHECK_TYPE;
3591 map.m_may_create = false;
3592 end = F2FS_BLK_ALIGN(i_size_read(inode));
3593
3594 while (map.m_lblk < end) {
3595 map.m_len = end - map.m_lblk;
3596
3597 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3598 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
3599 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3600 if (err || !map.m_len)
3601 return err;
3602
3603 map.m_lblk = m_next_extent;
3604 }
3605
3606 return 0;
3607 }
3608
f2fs_ioc_precache_extents(struct file * filp)3609 static int f2fs_ioc_precache_extents(struct file *filp)
3610 {
3611 return f2fs_precache_extents(file_inode(filp));
3612 }
3613
f2fs_ioc_resize_fs(struct file * filp,unsigned long arg)3614 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3615 {
3616 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3617 __u64 block_count;
3618
3619 if (!capable(CAP_SYS_ADMIN))
3620 return -EPERM;
3621
3622 if (f2fs_readonly(sbi->sb))
3623 return -EROFS;
3624
3625 if (copy_from_user(&block_count, (void __user *)arg,
3626 sizeof(block_count)))
3627 return -EFAULT;
3628
3629 return f2fs_resize_fs(filp, block_count);
3630 }
3631
f2fs_ioc_enable_verity(struct file * filp,unsigned long arg)3632 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3633 {
3634 struct inode *inode = file_inode(filp);
3635
3636 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3637
3638 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3639 f2fs_warn(F2FS_I_SB(inode),
3640 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3641 inode->i_ino);
3642 return -EOPNOTSUPP;
3643 }
3644
3645 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3646 }
3647
f2fs_ioc_measure_verity(struct file * filp,unsigned long arg)3648 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3649 {
3650 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3651 return -EOPNOTSUPP;
3652
3653 return fsverity_ioctl_measure(filp, (void __user *)arg);
3654 }
3655
f2fs_ioc_read_verity_metadata(struct file * filp,unsigned long arg)3656 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3657 {
3658 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3659 return -EOPNOTSUPP;
3660
3661 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3662 }
3663
f2fs_ioc_getfslabel(struct file * filp,unsigned long arg)3664 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3665 {
3666 struct inode *inode = file_inode(filp);
3667 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3668 char *vbuf;
3669 int count;
3670 int err = 0;
3671
3672 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3673 if (!vbuf)
3674 return -ENOMEM;
3675
3676 f2fs_down_read(&sbi->sb_lock);
3677 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3678 ARRAY_SIZE(sbi->raw_super->volume_name),
3679 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3680 f2fs_up_read(&sbi->sb_lock);
3681
3682 if (copy_to_user((char __user *)arg, vbuf,
3683 min(FSLABEL_MAX, count)))
3684 err = -EFAULT;
3685
3686 kfree(vbuf);
3687 return err;
3688 }
3689
f2fs_ioc_setfslabel(struct file * filp,unsigned long arg)3690 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3691 {
3692 struct inode *inode = file_inode(filp);
3693 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3694 char *vbuf;
3695 int err = 0;
3696
3697 if (!capable(CAP_SYS_ADMIN))
3698 return -EPERM;
3699
3700 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3701 if (IS_ERR(vbuf))
3702 return PTR_ERR(vbuf);
3703
3704 err = mnt_want_write_file(filp);
3705 if (err)
3706 goto out;
3707
3708 f2fs_down_write(&sbi->sb_lock);
3709
3710 memset(sbi->raw_super->volume_name, 0,
3711 sizeof(sbi->raw_super->volume_name));
3712 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3713 sbi->raw_super->volume_name,
3714 ARRAY_SIZE(sbi->raw_super->volume_name));
3715
3716 err = f2fs_commit_super(sbi, false);
3717
3718 f2fs_up_write(&sbi->sb_lock);
3719
3720 mnt_drop_write_file(filp);
3721 out:
3722 kfree(vbuf);
3723 return err;
3724 }
3725
f2fs_get_compress_blocks(struct inode * inode,__u64 * blocks)3726 static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks)
3727 {
3728 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3729 return -EOPNOTSUPP;
3730
3731 if (!f2fs_compressed_file(inode))
3732 return -EINVAL;
3733
3734 *blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3735
3736 return 0;
3737 }
3738
f2fs_ioc_get_compress_blocks(struct file * filp,unsigned long arg)3739 static int f2fs_ioc_get_compress_blocks(struct file *filp, unsigned long arg)
3740 {
3741 struct inode *inode = file_inode(filp);
3742 __u64 blocks;
3743 int ret;
3744
3745 ret = f2fs_get_compress_blocks(inode, &blocks);
3746 if (ret < 0)
3747 return ret;
3748
3749 return put_user(blocks, (u64 __user *)arg);
3750 }
3751
release_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3752 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3753 {
3754 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3755 unsigned int released_blocks = 0;
3756 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3757 block_t blkaddr;
3758 int i;
3759
3760 for (i = 0; i < count; i++) {
3761 blkaddr = data_blkaddr(dn->inode, dn->node_folio,
3762 dn->ofs_in_node + i);
3763
3764 if (!__is_valid_data_blkaddr(blkaddr))
3765 continue;
3766 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3767 DATA_GENERIC_ENHANCE)))
3768 return -EFSCORRUPTED;
3769 }
3770
3771 while (count) {
3772 int compr_blocks = 0;
3773
3774 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3775 blkaddr = f2fs_data_blkaddr(dn);
3776
3777 if (i == 0) {
3778 if (blkaddr == COMPRESS_ADDR)
3779 continue;
3780 dn->ofs_in_node += cluster_size;
3781 goto next;
3782 }
3783
3784 if (__is_valid_data_blkaddr(blkaddr))
3785 compr_blocks++;
3786
3787 if (blkaddr != NEW_ADDR)
3788 continue;
3789
3790 f2fs_set_data_blkaddr(dn, NULL_ADDR);
3791 }
3792
3793 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3794 dec_valid_block_count(sbi, dn->inode,
3795 cluster_size - compr_blocks);
3796
3797 released_blocks += cluster_size - compr_blocks;
3798 next:
3799 count -= cluster_size;
3800 }
3801
3802 return released_blocks;
3803 }
3804
f2fs_release_compress_blocks(struct file * filp,unsigned long arg)3805 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3806 {
3807 struct inode *inode = file_inode(filp);
3808 struct f2fs_inode_info *fi = F2FS_I(inode);
3809 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3810 pgoff_t page_idx = 0, last_idx;
3811 unsigned int released_blocks = 0;
3812 int ret;
3813 int writecount;
3814
3815 if (!f2fs_sb_has_compression(sbi))
3816 return -EOPNOTSUPP;
3817
3818 if (f2fs_readonly(sbi->sb))
3819 return -EROFS;
3820
3821 ret = mnt_want_write_file(filp);
3822 if (ret)
3823 return ret;
3824
3825 f2fs_balance_fs(sbi, true);
3826
3827 inode_lock(inode);
3828
3829 writecount = atomic_read(&inode->i_writecount);
3830 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3831 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3832 ret = -EBUSY;
3833 goto out;
3834 }
3835
3836 if (!f2fs_compressed_file(inode) ||
3837 is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3838 ret = -EINVAL;
3839 goto out;
3840 }
3841
3842 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3843 if (ret)
3844 goto out;
3845
3846 if (!atomic_read(&fi->i_compr_blocks)) {
3847 ret = -EPERM;
3848 goto out;
3849 }
3850
3851 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3852 inode_set_ctime_current(inode);
3853 f2fs_mark_inode_dirty_sync(inode, true);
3854
3855 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3856 filemap_invalidate_lock(inode->i_mapping);
3857
3858 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3859
3860 while (page_idx < last_idx) {
3861 struct dnode_of_data dn;
3862 pgoff_t end_offset, count;
3863
3864 f2fs_lock_op(sbi);
3865
3866 set_new_dnode(&dn, inode, NULL, NULL, 0);
3867 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3868 if (ret) {
3869 f2fs_unlock_op(sbi);
3870 if (ret == -ENOENT) {
3871 page_idx = f2fs_get_next_page_offset(&dn,
3872 page_idx);
3873 ret = 0;
3874 continue;
3875 }
3876 break;
3877 }
3878
3879 end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
3880 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3881 count = round_up(count, fi->i_cluster_size);
3882
3883 ret = release_compress_blocks(&dn, count);
3884
3885 f2fs_put_dnode(&dn);
3886
3887 f2fs_unlock_op(sbi);
3888
3889 if (ret < 0)
3890 break;
3891
3892 page_idx += count;
3893 released_blocks += ret;
3894 }
3895
3896 filemap_invalidate_unlock(inode->i_mapping);
3897 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3898 out:
3899 if (released_blocks)
3900 f2fs_update_time(sbi, REQ_TIME);
3901 inode_unlock(inode);
3902
3903 mnt_drop_write_file(filp);
3904
3905 if (ret >= 0) {
3906 ret = put_user(released_blocks, (u64 __user *)arg);
3907 } else if (released_blocks &&
3908 atomic_read(&fi->i_compr_blocks)) {
3909 set_sbi_flag(sbi, SBI_NEED_FSCK);
3910 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3911 "iblocks=%llu, released=%u, compr_blocks=%u, "
3912 "run fsck to fix.",
3913 __func__, inode->i_ino, inode->i_blocks,
3914 released_blocks,
3915 atomic_read(&fi->i_compr_blocks));
3916 }
3917
3918 return ret;
3919 }
3920
reserve_compress_blocks(struct dnode_of_data * dn,pgoff_t count,unsigned int * reserved_blocks)3921 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
3922 unsigned int *reserved_blocks)
3923 {
3924 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3925 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3926 block_t blkaddr;
3927 int i;
3928
3929 for (i = 0; i < count; i++) {
3930 blkaddr = data_blkaddr(dn->inode, dn->node_folio,
3931 dn->ofs_in_node + i);
3932
3933 if (!__is_valid_data_blkaddr(blkaddr))
3934 continue;
3935 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3936 DATA_GENERIC_ENHANCE)))
3937 return -EFSCORRUPTED;
3938 }
3939
3940 while (count) {
3941 int compr_blocks = 0;
3942 blkcnt_t reserved = 0;
3943 blkcnt_t to_reserved;
3944 int ret;
3945
3946 for (i = 0; i < cluster_size; i++) {
3947 blkaddr = data_blkaddr(dn->inode, dn->node_folio,
3948 dn->ofs_in_node + i);
3949
3950 if (i == 0) {
3951 if (blkaddr != COMPRESS_ADDR) {
3952 dn->ofs_in_node += cluster_size;
3953 goto next;
3954 }
3955 continue;
3956 }
3957
3958 /*
3959 * compressed cluster was not released due to it
3960 * fails in release_compress_blocks(), so NEW_ADDR
3961 * is a possible case.
3962 */
3963 if (blkaddr == NEW_ADDR) {
3964 reserved++;
3965 continue;
3966 }
3967 if (__is_valid_data_blkaddr(blkaddr)) {
3968 compr_blocks++;
3969 continue;
3970 }
3971 }
3972
3973 to_reserved = cluster_size - compr_blocks - reserved;
3974
3975 /* for the case all blocks in cluster were reserved */
3976 if (reserved && to_reserved == 1) {
3977 dn->ofs_in_node += cluster_size;
3978 goto next;
3979 }
3980
3981 ret = inc_valid_block_count(sbi, dn->inode,
3982 &to_reserved, false);
3983 if (unlikely(ret))
3984 return ret;
3985
3986 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3987 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
3988 f2fs_set_data_blkaddr(dn, NEW_ADDR);
3989 }
3990
3991 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3992
3993 *reserved_blocks += to_reserved;
3994 next:
3995 count -= cluster_size;
3996 }
3997
3998 return 0;
3999 }
4000
f2fs_reserve_compress_blocks(struct file * filp,unsigned long arg)4001 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
4002 {
4003 struct inode *inode = file_inode(filp);
4004 struct f2fs_inode_info *fi = F2FS_I(inode);
4005 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4006 pgoff_t page_idx = 0, last_idx;
4007 unsigned int reserved_blocks = 0;
4008 int ret;
4009
4010 if (!f2fs_sb_has_compression(sbi))
4011 return -EOPNOTSUPP;
4012
4013 if (f2fs_readonly(sbi->sb))
4014 return -EROFS;
4015
4016 ret = mnt_want_write_file(filp);
4017 if (ret)
4018 return ret;
4019
4020 f2fs_balance_fs(sbi, true);
4021
4022 inode_lock(inode);
4023
4024 if (!f2fs_compressed_file(inode) ||
4025 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4026 ret = -EINVAL;
4027 goto unlock_inode;
4028 }
4029
4030 if (atomic_read(&fi->i_compr_blocks))
4031 goto unlock_inode;
4032
4033 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
4034 filemap_invalidate_lock(inode->i_mapping);
4035
4036 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4037
4038 while (page_idx < last_idx) {
4039 struct dnode_of_data dn;
4040 pgoff_t end_offset, count;
4041
4042 f2fs_lock_op(sbi);
4043
4044 set_new_dnode(&dn, inode, NULL, NULL, 0);
4045 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
4046 if (ret) {
4047 f2fs_unlock_op(sbi);
4048 if (ret == -ENOENT) {
4049 page_idx = f2fs_get_next_page_offset(&dn,
4050 page_idx);
4051 ret = 0;
4052 continue;
4053 }
4054 break;
4055 }
4056
4057 end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
4058 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
4059 count = round_up(count, fi->i_cluster_size);
4060
4061 ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
4062
4063 f2fs_put_dnode(&dn);
4064
4065 f2fs_unlock_op(sbi);
4066
4067 if (ret < 0)
4068 break;
4069
4070 page_idx += count;
4071 }
4072
4073 filemap_invalidate_unlock(inode->i_mapping);
4074 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
4075
4076 if (!ret) {
4077 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
4078 inode_set_ctime_current(inode);
4079 f2fs_mark_inode_dirty_sync(inode, true);
4080 }
4081 unlock_inode:
4082 if (reserved_blocks)
4083 f2fs_update_time(sbi, REQ_TIME);
4084 inode_unlock(inode);
4085 mnt_drop_write_file(filp);
4086
4087 if (!ret) {
4088 ret = put_user(reserved_blocks, (u64 __user *)arg);
4089 } else if (reserved_blocks &&
4090 atomic_read(&fi->i_compr_blocks)) {
4091 set_sbi_flag(sbi, SBI_NEED_FSCK);
4092 f2fs_warn(sbi, "%s: partial blocks were reserved i_ino=%lx "
4093 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
4094 "run fsck to fix.",
4095 __func__, inode->i_ino, inode->i_blocks,
4096 reserved_blocks,
4097 atomic_read(&fi->i_compr_blocks));
4098 }
4099
4100 return ret;
4101 }
4102
f2fs_secure_erase(struct block_device * bdev,struct inode * inode,pgoff_t off,block_t block,block_t len,u32 flags)4103 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
4104 pgoff_t off, block_t block, block_t len, u32 flags)
4105 {
4106 sector_t sector = SECTOR_FROM_BLOCK(block);
4107 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
4108 int ret = 0;
4109
4110 if (flags & F2FS_TRIM_FILE_DISCARD) {
4111 if (bdev_max_secure_erase_sectors(bdev))
4112 ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
4113 GFP_NOFS);
4114 else
4115 ret = blkdev_issue_discard(bdev, sector, nr_sects,
4116 GFP_NOFS);
4117 }
4118
4119 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
4120 if (IS_ENCRYPTED(inode))
4121 ret = fscrypt_zeroout_range(inode, off, block, len);
4122 else
4123 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
4124 GFP_NOFS, 0);
4125 }
4126
4127 return ret;
4128 }
4129
f2fs_sec_trim_file(struct file * filp,unsigned long arg)4130 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
4131 {
4132 struct inode *inode = file_inode(filp);
4133 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4134 struct address_space *mapping = inode->i_mapping;
4135 struct block_device *prev_bdev = NULL;
4136 struct f2fs_sectrim_range range;
4137 pgoff_t index, pg_end, prev_index = 0;
4138 block_t prev_block = 0, len = 0;
4139 loff_t end_addr;
4140 bool to_end = false;
4141 int ret = 0;
4142
4143 if (!(filp->f_mode & FMODE_WRITE))
4144 return -EBADF;
4145
4146 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
4147 sizeof(range)))
4148 return -EFAULT;
4149
4150 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
4151 !S_ISREG(inode->i_mode))
4152 return -EINVAL;
4153
4154 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
4155 !f2fs_hw_support_discard(sbi)) ||
4156 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
4157 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
4158 return -EOPNOTSUPP;
4159
4160 ret = mnt_want_write_file(filp);
4161 if (ret)
4162 return ret;
4163 inode_lock(inode);
4164
4165 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
4166 range.start >= inode->i_size) {
4167 ret = -EINVAL;
4168 goto err;
4169 }
4170
4171 if (range.len == 0)
4172 goto err;
4173
4174 if (inode->i_size - range.start > range.len) {
4175 end_addr = range.start + range.len;
4176 } else {
4177 end_addr = range.len == (u64)-1 ?
4178 sbi->sb->s_maxbytes : inode->i_size;
4179 to_end = true;
4180 }
4181
4182 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
4183 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
4184 ret = -EINVAL;
4185 goto err;
4186 }
4187
4188 index = F2FS_BYTES_TO_BLK(range.start);
4189 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
4190
4191 ret = f2fs_convert_inline_inode(inode);
4192 if (ret)
4193 goto err;
4194
4195 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4196 filemap_invalidate_lock(mapping);
4197
4198 ret = filemap_write_and_wait_range(mapping, range.start,
4199 to_end ? LLONG_MAX : end_addr - 1);
4200 if (ret)
4201 goto out;
4202
4203 truncate_inode_pages_range(mapping, range.start,
4204 to_end ? -1 : end_addr - 1);
4205
4206 while (index < pg_end) {
4207 struct dnode_of_data dn;
4208 pgoff_t end_offset, count;
4209 int i;
4210
4211 set_new_dnode(&dn, inode, NULL, NULL, 0);
4212 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
4213 if (ret) {
4214 if (ret == -ENOENT) {
4215 index = f2fs_get_next_page_offset(&dn, index);
4216 continue;
4217 }
4218 goto out;
4219 }
4220
4221 end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode);
4222 count = min(end_offset - dn.ofs_in_node, pg_end - index);
4223 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
4224 struct block_device *cur_bdev;
4225 block_t blkaddr = f2fs_data_blkaddr(&dn);
4226
4227 if (!__is_valid_data_blkaddr(blkaddr))
4228 continue;
4229
4230 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
4231 DATA_GENERIC_ENHANCE)) {
4232 ret = -EFSCORRUPTED;
4233 f2fs_put_dnode(&dn);
4234 goto out;
4235 }
4236
4237 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
4238 if (f2fs_is_multi_device(sbi)) {
4239 int di = f2fs_target_device_index(sbi, blkaddr);
4240
4241 blkaddr -= FDEV(di).start_blk;
4242 }
4243
4244 if (len) {
4245 if (prev_bdev == cur_bdev &&
4246 index == prev_index + len &&
4247 blkaddr == prev_block + len) {
4248 len++;
4249 } else {
4250 ret = f2fs_secure_erase(prev_bdev,
4251 inode, prev_index, prev_block,
4252 len, range.flags);
4253 if (ret) {
4254 f2fs_put_dnode(&dn);
4255 goto out;
4256 }
4257
4258 len = 0;
4259 }
4260 }
4261
4262 if (!len) {
4263 prev_bdev = cur_bdev;
4264 prev_index = index;
4265 prev_block = blkaddr;
4266 len = 1;
4267 }
4268 }
4269
4270 f2fs_put_dnode(&dn);
4271
4272 if (fatal_signal_pending(current)) {
4273 ret = -EINTR;
4274 goto out;
4275 }
4276 cond_resched();
4277 }
4278
4279 if (len)
4280 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
4281 prev_block, len, range.flags);
4282 f2fs_update_time(sbi, REQ_TIME);
4283 out:
4284 filemap_invalidate_unlock(mapping);
4285 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4286 err:
4287 inode_unlock(inode);
4288 mnt_drop_write_file(filp);
4289
4290 return ret;
4291 }
4292
f2fs_ioc_get_compress_option(struct file * filp,unsigned long arg)4293 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
4294 {
4295 struct inode *inode = file_inode(filp);
4296 struct f2fs_comp_option option;
4297
4298 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
4299 return -EOPNOTSUPP;
4300
4301 inode_lock_shared(inode);
4302
4303 if (!f2fs_compressed_file(inode)) {
4304 inode_unlock_shared(inode);
4305 return -ENODATA;
4306 }
4307
4308 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
4309 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
4310
4311 inode_unlock_shared(inode);
4312
4313 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
4314 sizeof(option)))
4315 return -EFAULT;
4316
4317 return 0;
4318 }
4319
f2fs_ioc_set_compress_option(struct file * filp,unsigned long arg)4320 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4321 {
4322 struct inode *inode = file_inode(filp);
4323 struct f2fs_inode_info *fi = F2FS_I(inode);
4324 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4325 struct f2fs_comp_option option;
4326 int ret = 0;
4327
4328 if (!f2fs_sb_has_compression(sbi))
4329 return -EOPNOTSUPP;
4330
4331 if (!(filp->f_mode & FMODE_WRITE))
4332 return -EBADF;
4333
4334 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4335 sizeof(option)))
4336 return -EFAULT;
4337
4338 if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4339 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4340 option.algorithm >= COMPRESS_MAX)
4341 return -EINVAL;
4342
4343 ret = mnt_want_write_file(filp);
4344 if (ret)
4345 return ret;
4346 inode_lock(inode);
4347
4348 f2fs_down_write(&F2FS_I(inode)->i_sem);
4349 if (!f2fs_compressed_file(inode)) {
4350 ret = -EINVAL;
4351 goto out;
4352 }
4353
4354 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4355 ret = -EBUSY;
4356 goto out;
4357 }
4358
4359 if (F2FS_HAS_BLOCKS(inode)) {
4360 ret = -EFBIG;
4361 goto out;
4362 }
4363
4364 fi->i_compress_algorithm = option.algorithm;
4365 fi->i_log_cluster_size = option.log_cluster_size;
4366 fi->i_cluster_size = BIT(option.log_cluster_size);
4367 /* Set default level */
4368 if (fi->i_compress_algorithm == COMPRESS_ZSTD)
4369 fi->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
4370 else
4371 fi->i_compress_level = 0;
4372 /* Adjust mount option level */
4373 if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
4374 F2FS_OPTION(sbi).compress_level)
4375 fi->i_compress_level = F2FS_OPTION(sbi).compress_level;
4376 f2fs_mark_inode_dirty_sync(inode, true);
4377
4378 if (!f2fs_is_compress_backend_ready(inode))
4379 f2fs_warn(sbi, "compression algorithm is successfully set, "
4380 "but current kernel doesn't support this algorithm.");
4381 out:
4382 f2fs_up_write(&fi->i_sem);
4383 inode_unlock(inode);
4384 mnt_drop_write_file(filp);
4385
4386 return ret;
4387 }
4388
redirty_blocks(struct inode * inode,pgoff_t page_idx,int len)4389 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4390 {
4391 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
4392 struct address_space *mapping = inode->i_mapping;
4393 struct folio *folio;
4394 pgoff_t redirty_idx = page_idx;
4395 int page_len = 0, ret = 0;
4396
4397 page_cache_ra_unbounded(&ractl, len, 0);
4398
4399 do {
4400 folio = read_cache_folio(mapping, page_idx, NULL, NULL);
4401 if (IS_ERR(folio)) {
4402 ret = PTR_ERR(folio);
4403 break;
4404 }
4405 page_len += folio_nr_pages(folio) - (page_idx - folio->index);
4406 page_idx = folio_next_index(folio);
4407 } while (page_len < len);
4408
4409 do {
4410 folio = filemap_lock_folio(mapping, redirty_idx);
4411
4412 /* It will never fail, when folio has pinned above */
4413 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(folio));
4414
4415 f2fs_folio_wait_writeback(folio, DATA, true, true);
4416
4417 folio_mark_dirty(folio);
4418 set_page_private_gcing(&folio->page);
4419 redirty_idx = folio_next_index(folio);
4420 folio_unlock(folio);
4421 folio_put_refs(folio, 2);
4422 } while (redirty_idx < page_idx);
4423
4424 return ret;
4425 }
4426
f2fs_ioc_decompress_file(struct file * filp)4427 static int f2fs_ioc_decompress_file(struct file *filp)
4428 {
4429 struct inode *inode = file_inode(filp);
4430 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4431 struct f2fs_inode_info *fi = F2FS_I(inode);
4432 pgoff_t page_idx = 0, last_idx, cluster_idx;
4433 int ret;
4434
4435 if (!f2fs_sb_has_compression(sbi) ||
4436 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4437 return -EOPNOTSUPP;
4438
4439 if (!(filp->f_mode & FMODE_WRITE))
4440 return -EBADF;
4441
4442 f2fs_balance_fs(sbi, true);
4443
4444 ret = mnt_want_write_file(filp);
4445 if (ret)
4446 return ret;
4447 inode_lock(inode);
4448
4449 if (!f2fs_is_compress_backend_ready(inode)) {
4450 ret = -EOPNOTSUPP;
4451 goto out;
4452 }
4453
4454 if (!f2fs_compressed_file(inode) ||
4455 is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4456 ret = -EINVAL;
4457 goto out;
4458 }
4459
4460 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4461 if (ret)
4462 goto out;
4463
4464 if (!atomic_read(&fi->i_compr_blocks))
4465 goto out;
4466
4467 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4468 last_idx >>= fi->i_log_cluster_size;
4469
4470 for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
4471 page_idx = cluster_idx << fi->i_log_cluster_size;
4472
4473 if (!f2fs_is_compressed_cluster(inode, page_idx))
4474 continue;
4475
4476 ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
4477 if (ret < 0)
4478 break;
4479
4480 if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
4481 ret = filemap_fdatawrite(inode->i_mapping);
4482 if (ret < 0)
4483 break;
4484 }
4485
4486 cond_resched();
4487 if (fatal_signal_pending(current)) {
4488 ret = -EINTR;
4489 break;
4490 }
4491 }
4492
4493 if (!ret)
4494 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4495 LLONG_MAX);
4496
4497 if (ret)
4498 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4499 __func__, ret);
4500 f2fs_update_time(sbi, REQ_TIME);
4501 out:
4502 inode_unlock(inode);
4503 mnt_drop_write_file(filp);
4504
4505 return ret;
4506 }
4507
f2fs_ioc_compress_file(struct file * filp)4508 static int f2fs_ioc_compress_file(struct file *filp)
4509 {
4510 struct inode *inode = file_inode(filp);
4511 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4512 struct f2fs_inode_info *fi = F2FS_I(inode);
4513 pgoff_t page_idx = 0, last_idx, cluster_idx;
4514 int ret;
4515
4516 if (!f2fs_sb_has_compression(sbi) ||
4517 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4518 return -EOPNOTSUPP;
4519
4520 if (!(filp->f_mode & FMODE_WRITE))
4521 return -EBADF;
4522
4523 f2fs_balance_fs(sbi, true);
4524
4525 ret = mnt_want_write_file(filp);
4526 if (ret)
4527 return ret;
4528 inode_lock(inode);
4529
4530 if (!f2fs_is_compress_backend_ready(inode)) {
4531 ret = -EOPNOTSUPP;
4532 goto out;
4533 }
4534
4535 if (!f2fs_compressed_file(inode) ||
4536 is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4537 ret = -EINVAL;
4538 goto out;
4539 }
4540
4541 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4542 if (ret)
4543 goto out;
4544
4545 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4546
4547 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4548 last_idx >>= fi->i_log_cluster_size;
4549
4550 for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) {
4551 page_idx = cluster_idx << fi->i_log_cluster_size;
4552
4553 if (f2fs_is_sparse_cluster(inode, page_idx))
4554 continue;
4555
4556 ret = redirty_blocks(inode, page_idx, fi->i_cluster_size);
4557 if (ret < 0)
4558 break;
4559
4560 if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
4561 ret = filemap_fdatawrite(inode->i_mapping);
4562 if (ret < 0)
4563 break;
4564 }
4565
4566 cond_resched();
4567 if (fatal_signal_pending(current)) {
4568 ret = -EINTR;
4569 break;
4570 }
4571 }
4572
4573 if (!ret)
4574 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4575 LLONG_MAX);
4576
4577 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4578
4579 if (ret)
4580 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4581 __func__, ret);
4582 f2fs_update_time(sbi, REQ_TIME);
4583 out:
4584 inode_unlock(inode);
4585 mnt_drop_write_file(filp);
4586
4587 return ret;
4588 }
4589
__f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4590 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4591 {
4592 switch (cmd) {
4593 case FS_IOC_GETVERSION:
4594 return f2fs_ioc_getversion(filp, arg);
4595 case F2FS_IOC_START_ATOMIC_WRITE:
4596 return f2fs_ioc_start_atomic_write(filp, false);
4597 case F2FS_IOC_START_ATOMIC_REPLACE:
4598 return f2fs_ioc_start_atomic_write(filp, true);
4599 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4600 return f2fs_ioc_commit_atomic_write(filp);
4601 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4602 return f2fs_ioc_abort_atomic_write(filp);
4603 case F2FS_IOC_START_VOLATILE_WRITE:
4604 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4605 return -EOPNOTSUPP;
4606 case F2FS_IOC_SHUTDOWN:
4607 return f2fs_ioc_shutdown(filp, arg);
4608 case FITRIM:
4609 return f2fs_ioc_fitrim(filp, arg);
4610 case FS_IOC_SET_ENCRYPTION_POLICY:
4611 return f2fs_ioc_set_encryption_policy(filp, arg);
4612 case FS_IOC_GET_ENCRYPTION_POLICY:
4613 return f2fs_ioc_get_encryption_policy(filp, arg);
4614 case FS_IOC_GET_ENCRYPTION_PWSALT:
4615 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4616 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4617 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4618 case FS_IOC_ADD_ENCRYPTION_KEY:
4619 return f2fs_ioc_add_encryption_key(filp, arg);
4620 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4621 return f2fs_ioc_remove_encryption_key(filp, arg);
4622 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4623 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4624 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4625 return f2fs_ioc_get_encryption_key_status(filp, arg);
4626 case FS_IOC_GET_ENCRYPTION_NONCE:
4627 return f2fs_ioc_get_encryption_nonce(filp, arg);
4628 case F2FS_IOC_GARBAGE_COLLECT:
4629 return f2fs_ioc_gc(filp, arg);
4630 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4631 return f2fs_ioc_gc_range(filp, arg);
4632 case F2FS_IOC_WRITE_CHECKPOINT:
4633 return f2fs_ioc_write_checkpoint(filp);
4634 case F2FS_IOC_DEFRAGMENT:
4635 return f2fs_ioc_defragment(filp, arg);
4636 case F2FS_IOC_MOVE_RANGE:
4637 return f2fs_ioc_move_range(filp, arg);
4638 case F2FS_IOC_FLUSH_DEVICE:
4639 return f2fs_ioc_flush_device(filp, arg);
4640 case F2FS_IOC_GET_FEATURES:
4641 return f2fs_ioc_get_features(filp, arg);
4642 case F2FS_IOC_GET_PIN_FILE:
4643 return f2fs_ioc_get_pin_file(filp, arg);
4644 case F2FS_IOC_SET_PIN_FILE:
4645 return f2fs_ioc_set_pin_file(filp, arg);
4646 case F2FS_IOC_PRECACHE_EXTENTS:
4647 return f2fs_ioc_precache_extents(filp);
4648 case F2FS_IOC_RESIZE_FS:
4649 return f2fs_ioc_resize_fs(filp, arg);
4650 case FS_IOC_ENABLE_VERITY:
4651 return f2fs_ioc_enable_verity(filp, arg);
4652 case FS_IOC_MEASURE_VERITY:
4653 return f2fs_ioc_measure_verity(filp, arg);
4654 case FS_IOC_READ_VERITY_METADATA:
4655 return f2fs_ioc_read_verity_metadata(filp, arg);
4656 case FS_IOC_GETFSLABEL:
4657 return f2fs_ioc_getfslabel(filp, arg);
4658 case FS_IOC_SETFSLABEL:
4659 return f2fs_ioc_setfslabel(filp, arg);
4660 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4661 return f2fs_ioc_get_compress_blocks(filp, arg);
4662 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4663 return f2fs_release_compress_blocks(filp, arg);
4664 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4665 return f2fs_reserve_compress_blocks(filp, arg);
4666 case F2FS_IOC_SEC_TRIM_FILE:
4667 return f2fs_sec_trim_file(filp, arg);
4668 case F2FS_IOC_GET_COMPRESS_OPTION:
4669 return f2fs_ioc_get_compress_option(filp, arg);
4670 case F2FS_IOC_SET_COMPRESS_OPTION:
4671 return f2fs_ioc_set_compress_option(filp, arg);
4672 case F2FS_IOC_DECOMPRESS_FILE:
4673 return f2fs_ioc_decompress_file(filp);
4674 case F2FS_IOC_COMPRESS_FILE:
4675 return f2fs_ioc_compress_file(filp);
4676 case F2FS_IOC_GET_DEV_ALIAS_FILE:
4677 return f2fs_ioc_get_dev_alias_file(filp, arg);
4678 case F2FS_IOC_IO_PRIO:
4679 return f2fs_ioc_io_prio(filp, arg);
4680 default:
4681 return -ENOTTY;
4682 }
4683 }
4684
f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4685 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4686 {
4687 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4688 return -EIO;
4689 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4690 return -ENOSPC;
4691
4692 return __f2fs_ioctl(filp, cmd, arg);
4693 }
4694
4695 /*
4696 * Return %true if the given read or write request should use direct I/O, or
4697 * %false if it should use buffered I/O.
4698 */
f2fs_should_use_dio(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4699 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4700 struct iov_iter *iter)
4701 {
4702 unsigned int align;
4703
4704 if (!(iocb->ki_flags & IOCB_DIRECT))
4705 return false;
4706
4707 if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
4708 return false;
4709
4710 /*
4711 * Direct I/O not aligned to the disk's logical_block_size will be
4712 * attempted, but will fail with -EINVAL.
4713 *
4714 * f2fs additionally requires that direct I/O be aligned to the
4715 * filesystem block size, which is often a stricter requirement.
4716 * However, f2fs traditionally falls back to buffered I/O on requests
4717 * that are logical_block_size-aligned but not fs-block aligned.
4718 *
4719 * The below logic implements this behavior.
4720 */
4721 align = iocb->ki_pos | iov_iter_alignment(iter);
4722 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4723 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4724 return false;
4725
4726 return true;
4727 }
4728
f2fs_dio_read_end_io(struct kiocb * iocb,ssize_t size,int error,unsigned int flags)4729 static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4730 unsigned int flags)
4731 {
4732 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4733
4734 dec_page_count(sbi, F2FS_DIO_READ);
4735 if (error)
4736 return error;
4737 f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size);
4738 return 0;
4739 }
4740
4741 static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4742 .end_io = f2fs_dio_read_end_io,
4743 };
4744
f2fs_dio_read_iter(struct kiocb * iocb,struct iov_iter * to)4745 static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4746 {
4747 struct file *file = iocb->ki_filp;
4748 struct inode *inode = file_inode(file);
4749 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4750 struct f2fs_inode_info *fi = F2FS_I(inode);
4751 const loff_t pos = iocb->ki_pos;
4752 const size_t count = iov_iter_count(to);
4753 struct iomap_dio *dio;
4754 ssize_t ret;
4755
4756 if (count == 0)
4757 return 0; /* skip atime update */
4758
4759 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4760
4761 if (iocb->ki_flags & IOCB_NOWAIT) {
4762 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4763 ret = -EAGAIN;
4764 goto out;
4765 }
4766 } else {
4767 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4768 }
4769
4770 /* dio is not compatible w/ atomic file */
4771 if (f2fs_is_atomic_file(inode)) {
4772 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4773 ret = -EOPNOTSUPP;
4774 goto out;
4775 }
4776
4777 /*
4778 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4779 * the higher-level function iomap_dio_rw() in order to ensure that the
4780 * F2FS_DIO_READ counter will be decremented correctly in all cases.
4781 */
4782 inc_page_count(sbi, F2FS_DIO_READ);
4783 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4784 &f2fs_iomap_dio_read_ops, 0, NULL, 0);
4785 if (IS_ERR_OR_NULL(dio)) {
4786 ret = PTR_ERR_OR_ZERO(dio);
4787 if (ret != -EIOCBQUEUED)
4788 dec_page_count(sbi, F2FS_DIO_READ);
4789 } else {
4790 ret = iomap_dio_complete(dio);
4791 }
4792
4793 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4794
4795 file_accessed(file);
4796 out:
4797 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4798 return ret;
4799 }
4800
f2fs_trace_rw_file_path(struct file * file,loff_t pos,size_t count,int rw)4801 static void f2fs_trace_rw_file_path(struct file *file, loff_t pos, size_t count,
4802 int rw)
4803 {
4804 struct inode *inode = file_inode(file);
4805 char *buf, *path;
4806
4807 buf = f2fs_getname(F2FS_I_SB(inode));
4808 if (!buf)
4809 return;
4810 path = dentry_path_raw(file_dentry(file), buf, PATH_MAX);
4811 if (IS_ERR(path))
4812 goto free_buf;
4813 if (rw == WRITE)
4814 trace_f2fs_datawrite_start(inode, pos, count,
4815 current->pid, path, current->comm);
4816 else
4817 trace_f2fs_dataread_start(inode, pos, count,
4818 current->pid, path, current->comm);
4819 free_buf:
4820 f2fs_putname(buf);
4821 }
4822
f2fs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)4823 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4824 {
4825 struct inode *inode = file_inode(iocb->ki_filp);
4826 const loff_t pos = iocb->ki_pos;
4827 ssize_t ret;
4828
4829 if (!f2fs_is_compress_backend_ready(inode))
4830 return -EOPNOTSUPP;
4831
4832 if (trace_f2fs_dataread_start_enabled())
4833 f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
4834 iov_iter_count(to), READ);
4835
4836 /* In LFS mode, if there is inflight dio, wait for its completion */
4837 if (f2fs_lfs_mode(F2FS_I_SB(inode)) &&
4838 get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE))
4839 inode_dio_wait(inode);
4840
4841 if (f2fs_should_use_dio(inode, iocb, to)) {
4842 ret = f2fs_dio_read_iter(iocb, to);
4843 } else {
4844 ret = filemap_read(iocb, to, 0);
4845 if (ret > 0)
4846 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4847 APP_BUFFERED_READ_IO, ret);
4848 }
4849 if (trace_f2fs_dataread_end_enabled())
4850 trace_f2fs_dataread_end(inode, pos, ret);
4851 return ret;
4852 }
4853
f2fs_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)4854 static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos,
4855 struct pipe_inode_info *pipe,
4856 size_t len, unsigned int flags)
4857 {
4858 struct inode *inode = file_inode(in);
4859 const loff_t pos = *ppos;
4860 ssize_t ret;
4861
4862 if (!f2fs_is_compress_backend_ready(inode))
4863 return -EOPNOTSUPP;
4864
4865 if (trace_f2fs_dataread_start_enabled())
4866 f2fs_trace_rw_file_path(in, pos, len, READ);
4867
4868 ret = filemap_splice_read(in, ppos, pipe, len, flags);
4869 if (ret > 0)
4870 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4871 APP_BUFFERED_READ_IO, ret);
4872
4873 if (trace_f2fs_dataread_end_enabled())
4874 trace_f2fs_dataread_end(inode, pos, ret);
4875 return ret;
4876 }
4877
f2fs_write_checks(struct kiocb * iocb,struct iov_iter * from)4878 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4879 {
4880 struct file *file = iocb->ki_filp;
4881 struct inode *inode = file_inode(file);
4882 ssize_t count;
4883 int err;
4884
4885 if (IS_IMMUTABLE(inode))
4886 return -EPERM;
4887
4888 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4889 return -EPERM;
4890
4891 count = generic_write_checks(iocb, from);
4892 if (count <= 0)
4893 return count;
4894
4895 err = file_modified(file);
4896 if (err)
4897 return err;
4898
4899 filemap_invalidate_lock(inode->i_mapping);
4900 f2fs_zero_post_eof_page(inode, iocb->ki_pos + iov_iter_count(from));
4901 filemap_invalidate_unlock(inode->i_mapping);
4902 return count;
4903 }
4904
4905 /*
4906 * Preallocate blocks for a write request, if it is possible and helpful to do
4907 * so. Returns a positive number if blocks may have been preallocated, 0 if no
4908 * blocks were preallocated, or a negative errno value if something went
4909 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4910 * requested blocks (not just some of them) have been allocated.
4911 */
f2fs_preallocate_blocks(struct kiocb * iocb,struct iov_iter * iter,bool dio)4912 static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4913 bool dio)
4914 {
4915 struct inode *inode = file_inode(iocb->ki_filp);
4916 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4917 const loff_t pos = iocb->ki_pos;
4918 const size_t count = iov_iter_count(iter);
4919 struct f2fs_map_blocks map = {};
4920 int flag;
4921 int ret;
4922
4923 /* If it will be an out-of-place direct write, don't bother. */
4924 if (dio && f2fs_lfs_mode(sbi))
4925 return 0;
4926 /*
4927 * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4928 * buffered IO, if DIO meets any holes.
4929 */
4930 if (dio && i_size_read(inode) &&
4931 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4932 return 0;
4933
4934 /* No-wait I/O can't allocate blocks. */
4935 if (iocb->ki_flags & IOCB_NOWAIT)
4936 return 0;
4937
4938 /* If it will be a short write, don't bother. */
4939 if (fault_in_iov_iter_readable(iter, count))
4940 return 0;
4941
4942 if (f2fs_has_inline_data(inode)) {
4943 /* If the data will fit inline, don't bother. */
4944 if (pos + count <= MAX_INLINE_DATA(inode))
4945 return 0;
4946 ret = f2fs_convert_inline_inode(inode);
4947 if (ret)
4948 return ret;
4949 }
4950
4951 /* Do not preallocate blocks that will be written partially in 4KB. */
4952 map.m_lblk = F2FS_BLK_ALIGN(pos);
4953 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4954 if (map.m_len > map.m_lblk)
4955 map.m_len -= map.m_lblk;
4956 else
4957 return 0;
4958
4959 if (!IS_DEVICE_ALIASING(inode))
4960 map.m_may_create = true;
4961 if (dio) {
4962 map.m_seg_type = f2fs_rw_hint_to_seg_type(sbi,
4963 inode->i_write_hint);
4964 flag = F2FS_GET_BLOCK_PRE_DIO;
4965 } else {
4966 map.m_seg_type = NO_CHECK_TYPE;
4967 flag = F2FS_GET_BLOCK_PRE_AIO;
4968 }
4969
4970 ret = f2fs_map_blocks(inode, &map, flag);
4971 /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4972 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4973 return ret;
4974 if (ret == 0)
4975 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4976 return map.m_len;
4977 }
4978
f2fs_buffered_write_iter(struct kiocb * iocb,struct iov_iter * from)4979 static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4980 struct iov_iter *from)
4981 {
4982 struct file *file = iocb->ki_filp;
4983 struct inode *inode = file_inode(file);
4984 ssize_t ret;
4985
4986 if (iocb->ki_flags & IOCB_NOWAIT)
4987 return -EOPNOTSUPP;
4988
4989 ret = generic_perform_write(iocb, from);
4990
4991 if (ret > 0) {
4992 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4993 APP_BUFFERED_IO, ret);
4994 }
4995 return ret;
4996 }
4997
f2fs_dio_write_end_io(struct kiocb * iocb,ssize_t size,int error,unsigned int flags)4998 static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4999 unsigned int flags)
5000 {
5001 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
5002
5003 dec_page_count(sbi, F2FS_DIO_WRITE);
5004 if (error)
5005 return error;
5006 f2fs_update_time(sbi, REQ_TIME);
5007 f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
5008 return 0;
5009 }
5010
f2fs_dio_write_submit_io(const struct iomap_iter * iter,struct bio * bio,loff_t file_offset)5011 static void f2fs_dio_write_submit_io(const struct iomap_iter *iter,
5012 struct bio *bio, loff_t file_offset)
5013 {
5014 struct inode *inode = iter->inode;
5015 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
5016 enum log_type type = f2fs_rw_hint_to_seg_type(sbi, inode->i_write_hint);
5017 enum temp_type temp = f2fs_get_segment_temp(sbi, type);
5018
5019 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, DATA, temp);
5020 submit_bio(bio);
5021 }
5022
5023 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
5024 .end_io = f2fs_dio_write_end_io,
5025 .submit_io = f2fs_dio_write_submit_io,
5026 };
5027
f2fs_flush_buffered_write(struct address_space * mapping,loff_t start_pos,loff_t end_pos)5028 static void f2fs_flush_buffered_write(struct address_space *mapping,
5029 loff_t start_pos, loff_t end_pos)
5030 {
5031 int ret;
5032
5033 ret = filemap_write_and_wait_range(mapping, start_pos, end_pos);
5034 if (ret < 0)
5035 return;
5036 invalidate_mapping_pages(mapping,
5037 start_pos >> PAGE_SHIFT,
5038 end_pos >> PAGE_SHIFT);
5039 }
5040
f2fs_dio_write_iter(struct kiocb * iocb,struct iov_iter * from,bool * may_need_sync)5041 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
5042 bool *may_need_sync)
5043 {
5044 struct file *file = iocb->ki_filp;
5045 struct inode *inode = file_inode(file);
5046 struct f2fs_inode_info *fi = F2FS_I(inode);
5047 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
5048 const bool do_opu = f2fs_lfs_mode(sbi);
5049 const loff_t pos = iocb->ki_pos;
5050 const ssize_t count = iov_iter_count(from);
5051 unsigned int dio_flags;
5052 struct iomap_dio *dio;
5053 ssize_t ret;
5054
5055 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
5056
5057 if (iocb->ki_flags & IOCB_NOWAIT) {
5058 /* f2fs_convert_inline_inode() and block allocation can block */
5059 if (f2fs_has_inline_data(inode) ||
5060 !f2fs_overwrite_io(inode, pos, count)) {
5061 ret = -EAGAIN;
5062 goto out;
5063 }
5064
5065 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
5066 ret = -EAGAIN;
5067 goto out;
5068 }
5069 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
5070 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
5071 ret = -EAGAIN;
5072 goto out;
5073 }
5074 } else {
5075 ret = f2fs_convert_inline_inode(inode);
5076 if (ret)
5077 goto out;
5078
5079 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
5080 if (do_opu)
5081 f2fs_down_read(&fi->i_gc_rwsem[READ]);
5082 }
5083
5084 /*
5085 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
5086 * the higher-level function iomap_dio_rw() in order to ensure that the
5087 * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
5088 */
5089 inc_page_count(sbi, F2FS_DIO_WRITE);
5090 dio_flags = 0;
5091 if (pos + count > inode->i_size)
5092 dio_flags |= IOMAP_DIO_FORCE_WAIT;
5093 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
5094 &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
5095 if (IS_ERR_OR_NULL(dio)) {
5096 ret = PTR_ERR_OR_ZERO(dio);
5097 if (ret == -ENOTBLK)
5098 ret = 0;
5099 if (ret != -EIOCBQUEUED)
5100 dec_page_count(sbi, F2FS_DIO_WRITE);
5101 } else {
5102 ret = iomap_dio_complete(dio);
5103 }
5104
5105 if (do_opu)
5106 f2fs_up_read(&fi->i_gc_rwsem[READ]);
5107 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
5108
5109 if (ret < 0)
5110 goto out;
5111 if (pos + ret > inode->i_size)
5112 f2fs_i_size_write(inode, pos + ret);
5113 if (!do_opu)
5114 set_inode_flag(inode, FI_UPDATE_WRITE);
5115
5116 if (iov_iter_count(from)) {
5117 ssize_t ret2;
5118 loff_t bufio_start_pos = iocb->ki_pos;
5119
5120 /*
5121 * The direct write was partial, so we need to fall back to a
5122 * buffered write for the remainder.
5123 */
5124
5125 ret2 = f2fs_buffered_write_iter(iocb, from);
5126 if (iov_iter_count(from))
5127 f2fs_write_failed(inode, iocb->ki_pos);
5128 if (ret2 < 0)
5129 goto out;
5130
5131 /*
5132 * Ensure that the pagecache pages are written to disk and
5133 * invalidated to preserve the expected O_DIRECT semantics.
5134 */
5135 if (ret2 > 0) {
5136 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
5137
5138 ret += ret2;
5139
5140 f2fs_flush_buffered_write(file->f_mapping,
5141 bufio_start_pos,
5142 bufio_end_pos);
5143 }
5144 } else {
5145 /* iomap_dio_rw() already handled the generic_write_sync(). */
5146 *may_need_sync = false;
5147 }
5148 out:
5149 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
5150 return ret;
5151 }
5152
f2fs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)5153 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
5154 {
5155 struct inode *inode = file_inode(iocb->ki_filp);
5156 const loff_t orig_pos = iocb->ki_pos;
5157 const size_t orig_count = iov_iter_count(from);
5158 loff_t target_size;
5159 bool dio;
5160 bool may_need_sync = true;
5161 int preallocated;
5162 const loff_t pos = iocb->ki_pos;
5163 const ssize_t count = iov_iter_count(from);
5164 ssize_t ret;
5165
5166 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
5167 ret = -EIO;
5168 goto out;
5169 }
5170
5171 if (!f2fs_is_compress_backend_ready(inode)) {
5172 ret = -EOPNOTSUPP;
5173 goto out;
5174 }
5175
5176 if (iocb->ki_flags & IOCB_NOWAIT) {
5177 if (!inode_trylock(inode)) {
5178 ret = -EAGAIN;
5179 goto out;
5180 }
5181 } else {
5182 inode_lock(inode);
5183 }
5184
5185 if (f2fs_is_pinned_file(inode) &&
5186 !f2fs_overwrite_io(inode, pos, count)) {
5187 ret = -EIO;
5188 goto out_unlock;
5189 }
5190
5191 ret = f2fs_write_checks(iocb, from);
5192 if (ret <= 0)
5193 goto out_unlock;
5194
5195 /* Determine whether we will do a direct write or a buffered write. */
5196 dio = f2fs_should_use_dio(inode, iocb, from);
5197
5198 /* dio is not compatible w/ atomic write */
5199 if (dio && f2fs_is_atomic_file(inode)) {
5200 ret = -EOPNOTSUPP;
5201 goto out_unlock;
5202 }
5203
5204 /* Possibly preallocate the blocks for the write. */
5205 target_size = iocb->ki_pos + iov_iter_count(from);
5206 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
5207 if (preallocated < 0) {
5208 ret = preallocated;
5209 } else {
5210 if (trace_f2fs_datawrite_start_enabled())
5211 f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
5212 orig_count, WRITE);
5213
5214 /* Do the actual write. */
5215 ret = dio ?
5216 f2fs_dio_write_iter(iocb, from, &may_need_sync) :
5217 f2fs_buffered_write_iter(iocb, from);
5218
5219 if (trace_f2fs_datawrite_end_enabled())
5220 trace_f2fs_datawrite_end(inode, orig_pos, ret);
5221 }
5222
5223 /* Don't leave any preallocated blocks around past i_size. */
5224 if (preallocated && i_size_read(inode) < target_size) {
5225 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
5226 filemap_invalidate_lock(inode->i_mapping);
5227 if (!f2fs_truncate(inode))
5228 file_dont_truncate(inode);
5229 filemap_invalidate_unlock(inode->i_mapping);
5230 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
5231 } else {
5232 file_dont_truncate(inode);
5233 }
5234
5235 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
5236 out_unlock:
5237 inode_unlock(inode);
5238 out:
5239 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
5240
5241 if (ret > 0 && may_need_sync)
5242 ret = generic_write_sync(iocb, ret);
5243
5244 /* If buffered IO was forced, flush and drop the data from
5245 * the page cache to preserve O_DIRECT semantics
5246 */
5247 if (ret > 0 && !dio && (iocb->ki_flags & IOCB_DIRECT))
5248 f2fs_flush_buffered_write(iocb->ki_filp->f_mapping,
5249 orig_pos,
5250 orig_pos + ret - 1);
5251
5252 return ret;
5253 }
5254
f2fs_file_fadvise(struct file * filp,loff_t offset,loff_t len,int advice)5255 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
5256 int advice)
5257 {
5258 struct address_space *mapping;
5259 struct backing_dev_info *bdi;
5260 struct inode *inode = file_inode(filp);
5261 int err;
5262
5263 if (advice == POSIX_FADV_SEQUENTIAL) {
5264 if (S_ISFIFO(inode->i_mode))
5265 return -ESPIPE;
5266
5267 mapping = filp->f_mapping;
5268 if (!mapping || len < 0)
5269 return -EINVAL;
5270
5271 bdi = inode_to_bdi(mapping->host);
5272 filp->f_ra.ra_pages = bdi->ra_pages *
5273 F2FS_I_SB(inode)->seq_file_ra_mul;
5274 spin_lock(&filp->f_lock);
5275 filp->f_mode &= ~FMODE_RANDOM;
5276 spin_unlock(&filp->f_lock);
5277 return 0;
5278 } else if (advice == POSIX_FADV_WILLNEED && offset == 0) {
5279 /* Load extent cache at the first readahead. */
5280 f2fs_precache_extents(inode);
5281 }
5282
5283 err = generic_fadvise(filp, offset, len, advice);
5284 if (err)
5285 return err;
5286
5287 if (advice == POSIX_FADV_DONTNEED &&
5288 (test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
5289 f2fs_compressed_file(inode)))
5290 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
5291 else if (advice == POSIX_FADV_NOREUSE)
5292 err = f2fs_keep_noreuse_range(inode, offset, len);
5293 return err;
5294 }
5295
5296 #ifdef CONFIG_COMPAT
5297 struct compat_f2fs_gc_range {
5298 u32 sync;
5299 compat_u64 start;
5300 compat_u64 len;
5301 };
5302 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
5303 struct compat_f2fs_gc_range)
5304
f2fs_compat_ioc_gc_range(struct file * file,unsigned long arg)5305 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
5306 {
5307 struct compat_f2fs_gc_range __user *urange;
5308 struct f2fs_gc_range range;
5309 int err;
5310
5311 urange = compat_ptr(arg);
5312 err = get_user(range.sync, &urange->sync);
5313 err |= get_user(range.start, &urange->start);
5314 err |= get_user(range.len, &urange->len);
5315 if (err)
5316 return -EFAULT;
5317
5318 return __f2fs_ioc_gc_range(file, &range);
5319 }
5320
5321 struct compat_f2fs_move_range {
5322 u32 dst_fd;
5323 compat_u64 pos_in;
5324 compat_u64 pos_out;
5325 compat_u64 len;
5326 };
5327 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
5328 struct compat_f2fs_move_range)
5329
f2fs_compat_ioc_move_range(struct file * file,unsigned long arg)5330 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
5331 {
5332 struct compat_f2fs_move_range __user *urange;
5333 struct f2fs_move_range range;
5334 int err;
5335
5336 urange = compat_ptr(arg);
5337 err = get_user(range.dst_fd, &urange->dst_fd);
5338 err |= get_user(range.pos_in, &urange->pos_in);
5339 err |= get_user(range.pos_out, &urange->pos_out);
5340 err |= get_user(range.len, &urange->len);
5341 if (err)
5342 return -EFAULT;
5343
5344 return __f2fs_ioc_move_range(file, &range);
5345 }
5346
f2fs_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)5347 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5348 {
5349 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
5350 return -EIO;
5351 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
5352 return -ENOSPC;
5353
5354 switch (cmd) {
5355 case FS_IOC32_GETVERSION:
5356 cmd = FS_IOC_GETVERSION;
5357 break;
5358 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
5359 return f2fs_compat_ioc_gc_range(file, arg);
5360 case F2FS_IOC32_MOVE_RANGE:
5361 return f2fs_compat_ioc_move_range(file, arg);
5362 case F2FS_IOC_START_ATOMIC_WRITE:
5363 case F2FS_IOC_START_ATOMIC_REPLACE:
5364 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
5365 case F2FS_IOC_START_VOLATILE_WRITE:
5366 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
5367 case F2FS_IOC_ABORT_ATOMIC_WRITE:
5368 case F2FS_IOC_SHUTDOWN:
5369 case FITRIM:
5370 case FS_IOC_SET_ENCRYPTION_POLICY:
5371 case FS_IOC_GET_ENCRYPTION_PWSALT:
5372 case FS_IOC_GET_ENCRYPTION_POLICY:
5373 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
5374 case FS_IOC_ADD_ENCRYPTION_KEY:
5375 case FS_IOC_REMOVE_ENCRYPTION_KEY:
5376 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
5377 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
5378 case FS_IOC_GET_ENCRYPTION_NONCE:
5379 case F2FS_IOC_GARBAGE_COLLECT:
5380 case F2FS_IOC_WRITE_CHECKPOINT:
5381 case F2FS_IOC_DEFRAGMENT:
5382 case F2FS_IOC_FLUSH_DEVICE:
5383 case F2FS_IOC_GET_FEATURES:
5384 case F2FS_IOC_GET_PIN_FILE:
5385 case F2FS_IOC_SET_PIN_FILE:
5386 case F2FS_IOC_PRECACHE_EXTENTS:
5387 case F2FS_IOC_RESIZE_FS:
5388 case FS_IOC_ENABLE_VERITY:
5389 case FS_IOC_MEASURE_VERITY:
5390 case FS_IOC_READ_VERITY_METADATA:
5391 case FS_IOC_GETFSLABEL:
5392 case FS_IOC_SETFSLABEL:
5393 case F2FS_IOC_GET_COMPRESS_BLOCKS:
5394 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
5395 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
5396 case F2FS_IOC_SEC_TRIM_FILE:
5397 case F2FS_IOC_GET_COMPRESS_OPTION:
5398 case F2FS_IOC_SET_COMPRESS_OPTION:
5399 case F2FS_IOC_DECOMPRESS_FILE:
5400 case F2FS_IOC_COMPRESS_FILE:
5401 case F2FS_IOC_GET_DEV_ALIAS_FILE:
5402 case F2FS_IOC_IO_PRIO:
5403 break;
5404 default:
5405 return -ENOIOCTLCMD;
5406 }
5407 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
5408 }
5409 #endif
5410
5411 const struct file_operations f2fs_file_operations = {
5412 .llseek = f2fs_llseek,
5413 .read_iter = f2fs_file_read_iter,
5414 .write_iter = f2fs_file_write_iter,
5415 .iopoll = iocb_bio_iopoll,
5416 .open = f2fs_file_open,
5417 .release = f2fs_release_file,
5418 .mmap_prepare = f2fs_file_mmap_prepare,
5419 .flush = f2fs_file_flush,
5420 .fsync = f2fs_sync_file,
5421 .fallocate = f2fs_fallocate,
5422 .unlocked_ioctl = f2fs_ioctl,
5423 #ifdef CONFIG_COMPAT
5424 .compat_ioctl = f2fs_compat_ioctl,
5425 #endif
5426 .splice_read = f2fs_file_splice_read,
5427 .splice_write = iter_file_splice_write,
5428 .fadvise = f2fs_file_fadvise,
5429 .fop_flags = FOP_BUFFER_RASYNC,
5430 };
5431