xref: /linux/fs/f2fs/file.c (revision 6389a62ff798e781567645c0b0ca3dd7b8a4289d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 
24 #include "f2fs.h"
25 #include "node.h"
26 #include "segment.h"
27 #include "xattr.h"
28 #include "acl.h"
29 #include "gc.h"
30 #include "trace.h"
31 #include <trace/events/f2fs.h>
32 
33 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
34 {
35 	struct inode *inode = file_inode(vmf->vma->vm_file);
36 	vm_fault_t ret;
37 
38 	down_read(&F2FS_I(inode)->i_mmap_sem);
39 	ret = filemap_fault(vmf);
40 	up_read(&F2FS_I(inode)->i_mmap_sem);
41 
42 	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
43 
44 	return ret;
45 }
46 
47 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
48 {
49 	struct page *page = vmf->page;
50 	struct inode *inode = file_inode(vmf->vma->vm_file);
51 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
52 	struct dnode_of_data dn = { .node_changed = false };
53 	int err;
54 
55 	if (unlikely(f2fs_cp_error(sbi))) {
56 		err = -EIO;
57 		goto err;
58 	}
59 
60 	sb_start_pagefault(inode->i_sb);
61 
62 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
63 
64 	file_update_time(vmf->vma->vm_file);
65 	down_read(&F2FS_I(inode)->i_mmap_sem);
66 	lock_page(page);
67 	if (unlikely(page->mapping != inode->i_mapping ||
68 			page_offset(page) > i_size_read(inode) ||
69 			!PageUptodate(page))) {
70 		unlock_page(page);
71 		err = -EFAULT;
72 		goto out_sem;
73 	}
74 
75 	/* block allocation */
76 	__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
77 	set_new_dnode(&dn, inode, NULL, NULL, 0);
78 	err = f2fs_get_block(&dn, page->index);
79 	f2fs_put_dnode(&dn);
80 	__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
81 	if (err) {
82 		unlock_page(page);
83 		goto out_sem;
84 	}
85 
86 	/* fill the page */
87 	f2fs_wait_on_page_writeback(page, DATA, false, true);
88 
89 	/* wait for GCed page writeback via META_MAPPING */
90 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
91 
92 	/*
93 	 * check to see if the page is mapped already (no holes)
94 	 */
95 	if (PageMappedToDisk(page))
96 		goto out_sem;
97 
98 	/* page is wholly or partially inside EOF */
99 	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
100 						i_size_read(inode)) {
101 		loff_t offset;
102 
103 		offset = i_size_read(inode) & ~PAGE_MASK;
104 		zero_user_segment(page, offset, PAGE_SIZE);
105 	}
106 	set_page_dirty(page);
107 	if (!PageUptodate(page))
108 		SetPageUptodate(page);
109 
110 	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
111 	f2fs_update_time(sbi, REQ_TIME);
112 
113 	trace_f2fs_vm_page_mkwrite(page, DATA);
114 out_sem:
115 	up_read(&F2FS_I(inode)->i_mmap_sem);
116 
117 	f2fs_balance_fs(sbi, dn.node_changed);
118 
119 	sb_end_pagefault(inode->i_sb);
120 err:
121 	return block_page_mkwrite_return(err);
122 }
123 
124 static const struct vm_operations_struct f2fs_file_vm_ops = {
125 	.fault		= f2fs_filemap_fault,
126 	.map_pages	= filemap_map_pages,
127 	.page_mkwrite	= f2fs_vm_page_mkwrite,
128 };
129 
130 static int get_parent_ino(struct inode *inode, nid_t *pino)
131 {
132 	struct dentry *dentry;
133 
134 	inode = igrab(inode);
135 	dentry = d_find_any_alias(inode);
136 	iput(inode);
137 	if (!dentry)
138 		return 0;
139 
140 	*pino = parent_ino(dentry);
141 	dput(dentry);
142 	return 1;
143 }
144 
145 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
146 {
147 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
148 	enum cp_reason_type cp_reason = CP_NO_NEEDED;
149 
150 	if (!S_ISREG(inode->i_mode))
151 		cp_reason = CP_NON_REGULAR;
152 	else if (inode->i_nlink != 1)
153 		cp_reason = CP_HARDLINK;
154 	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
155 		cp_reason = CP_SB_NEED_CP;
156 	else if (file_wrong_pino(inode))
157 		cp_reason = CP_WRONG_PINO;
158 	else if (!f2fs_space_for_roll_forward(sbi))
159 		cp_reason = CP_NO_SPC_ROLL;
160 	else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
161 		cp_reason = CP_NODE_NEED_CP;
162 	else if (test_opt(sbi, FASTBOOT))
163 		cp_reason = CP_FASTBOOT_MODE;
164 	else if (F2FS_OPTION(sbi).active_logs == 2)
165 		cp_reason = CP_SPEC_LOG_NUM;
166 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
167 		f2fs_need_dentry_mark(sbi, inode->i_ino) &&
168 		f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
169 							TRANS_DIR_INO))
170 		cp_reason = CP_RECOVER_DIR;
171 
172 	return cp_reason;
173 }
174 
175 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
176 {
177 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
178 	bool ret = false;
179 	/* But we need to avoid that there are some inode updates */
180 	if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
181 		ret = true;
182 	f2fs_put_page(i, 0);
183 	return ret;
184 }
185 
186 static void try_to_fix_pino(struct inode *inode)
187 {
188 	struct f2fs_inode_info *fi = F2FS_I(inode);
189 	nid_t pino;
190 
191 	down_write(&fi->i_sem);
192 	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
193 			get_parent_ino(inode, &pino)) {
194 		f2fs_i_pino_write(inode, pino);
195 		file_got_pino(inode);
196 	}
197 	up_write(&fi->i_sem);
198 }
199 
200 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
201 						int datasync, bool atomic)
202 {
203 	struct inode *inode = file->f_mapping->host;
204 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
205 	nid_t ino = inode->i_ino;
206 	int ret = 0;
207 	enum cp_reason_type cp_reason = 0;
208 	struct writeback_control wbc = {
209 		.sync_mode = WB_SYNC_ALL,
210 		.nr_to_write = LONG_MAX,
211 		.for_reclaim = 0,
212 	};
213 	unsigned int seq_id = 0;
214 
215 	if (unlikely(f2fs_readonly(inode->i_sb) ||
216 				is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
217 		return 0;
218 
219 	trace_f2fs_sync_file_enter(inode);
220 
221 	if (S_ISDIR(inode->i_mode))
222 		goto go_write;
223 
224 	/* if fdatasync is triggered, let's do in-place-update */
225 	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
226 		set_inode_flag(inode, FI_NEED_IPU);
227 	ret = file_write_and_wait_range(file, start, end);
228 	clear_inode_flag(inode, FI_NEED_IPU);
229 
230 	if (ret) {
231 		trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
232 		return ret;
233 	}
234 
235 	/* if the inode is dirty, let's recover all the time */
236 	if (!f2fs_skip_inode_update(inode, datasync)) {
237 		f2fs_write_inode(inode, NULL);
238 		goto go_write;
239 	}
240 
241 	/*
242 	 * if there is no written data, don't waste time to write recovery info.
243 	 */
244 	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
245 			!f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
246 
247 		/* it may call write_inode just prior to fsync */
248 		if (need_inode_page_update(sbi, ino))
249 			goto go_write;
250 
251 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
252 				f2fs_exist_written_data(sbi, ino, UPDATE_INO))
253 			goto flush_out;
254 		goto out;
255 	}
256 go_write:
257 	/*
258 	 * Both of fdatasync() and fsync() are able to be recovered from
259 	 * sudden-power-off.
260 	 */
261 	down_read(&F2FS_I(inode)->i_sem);
262 	cp_reason = need_do_checkpoint(inode);
263 	up_read(&F2FS_I(inode)->i_sem);
264 
265 	if (cp_reason) {
266 		/* all the dirty node pages should be flushed for POR */
267 		ret = f2fs_sync_fs(inode->i_sb, 1);
268 
269 		/*
270 		 * We've secured consistency through sync_fs. Following pino
271 		 * will be used only for fsynced inodes after checkpoint.
272 		 */
273 		try_to_fix_pino(inode);
274 		clear_inode_flag(inode, FI_APPEND_WRITE);
275 		clear_inode_flag(inode, FI_UPDATE_WRITE);
276 		goto out;
277 	}
278 sync_nodes:
279 	atomic_inc(&sbi->wb_sync_req[NODE]);
280 	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
281 	atomic_dec(&sbi->wb_sync_req[NODE]);
282 	if (ret)
283 		goto out;
284 
285 	/* if cp_error was enabled, we should avoid infinite loop */
286 	if (unlikely(f2fs_cp_error(sbi))) {
287 		ret = -EIO;
288 		goto out;
289 	}
290 
291 	if (f2fs_need_inode_block_update(sbi, ino)) {
292 		f2fs_mark_inode_dirty_sync(inode, true);
293 		f2fs_write_inode(inode, NULL);
294 		goto sync_nodes;
295 	}
296 
297 	/*
298 	 * If it's atomic_write, it's just fine to keep write ordering. So
299 	 * here we don't need to wait for node write completion, since we use
300 	 * node chain which serializes node blocks. If one of node writes are
301 	 * reordered, we can see simply broken chain, resulting in stopping
302 	 * roll-forward recovery. It means we'll recover all or none node blocks
303 	 * given fsync mark.
304 	 */
305 	if (!atomic) {
306 		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
307 		if (ret)
308 			goto out;
309 	}
310 
311 	/* once recovery info is written, don't need to tack this */
312 	f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
313 	clear_inode_flag(inode, FI_APPEND_WRITE);
314 flush_out:
315 	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
316 		ret = f2fs_issue_flush(sbi, inode->i_ino);
317 	if (!ret) {
318 		f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
319 		clear_inode_flag(inode, FI_UPDATE_WRITE);
320 		f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
321 	}
322 	f2fs_update_time(sbi, REQ_TIME);
323 out:
324 	trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
325 	f2fs_trace_ios(NULL, 1);
326 	return ret;
327 }
328 
329 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
330 {
331 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
332 		return -EIO;
333 	return f2fs_do_sync_file(file, start, end, datasync, false);
334 }
335 
336 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
337 						pgoff_t pgofs, int whence)
338 {
339 	struct page *page;
340 	int nr_pages;
341 
342 	if (whence != SEEK_DATA)
343 		return 0;
344 
345 	/* find first dirty page index */
346 	nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
347 				      1, &page);
348 	if (!nr_pages)
349 		return ULONG_MAX;
350 	pgofs = page->index;
351 	put_page(page);
352 	return pgofs;
353 }
354 
355 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
356 				pgoff_t dirty, pgoff_t pgofs, int whence)
357 {
358 	switch (whence) {
359 	case SEEK_DATA:
360 		if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
361 			__is_valid_data_blkaddr(blkaddr))
362 			return true;
363 		break;
364 	case SEEK_HOLE:
365 		if (blkaddr == NULL_ADDR)
366 			return true;
367 		break;
368 	}
369 	return false;
370 }
371 
372 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
373 {
374 	struct inode *inode = file->f_mapping->host;
375 	loff_t maxbytes = inode->i_sb->s_maxbytes;
376 	struct dnode_of_data dn;
377 	pgoff_t pgofs, end_offset, dirty;
378 	loff_t data_ofs = offset;
379 	loff_t isize;
380 	int err = 0;
381 
382 	inode_lock(inode);
383 
384 	isize = i_size_read(inode);
385 	if (offset >= isize)
386 		goto fail;
387 
388 	/* handle inline data case */
389 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
390 		if (whence == SEEK_HOLE)
391 			data_ofs = isize;
392 		goto found;
393 	}
394 
395 	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
396 
397 	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
398 
399 	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
400 		set_new_dnode(&dn, inode, NULL, NULL, 0);
401 		err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
402 		if (err && err != -ENOENT) {
403 			goto fail;
404 		} else if (err == -ENOENT) {
405 			/* direct node does not exists */
406 			if (whence == SEEK_DATA) {
407 				pgofs = f2fs_get_next_page_offset(&dn, pgofs);
408 				continue;
409 			} else {
410 				goto found;
411 			}
412 		}
413 
414 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
415 
416 		/* find data/hole in dnode block */
417 		for (; dn.ofs_in_node < end_offset;
418 				dn.ofs_in_node++, pgofs++,
419 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
420 			block_t blkaddr;
421 
422 			blkaddr = datablock_addr(dn.inode,
423 					dn.node_page, dn.ofs_in_node);
424 
425 			if (__is_valid_data_blkaddr(blkaddr) &&
426 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
427 					blkaddr, DATA_GENERIC_ENHANCE)) {
428 				f2fs_put_dnode(&dn);
429 				goto fail;
430 			}
431 
432 			if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
433 							pgofs, whence)) {
434 				f2fs_put_dnode(&dn);
435 				goto found;
436 			}
437 		}
438 		f2fs_put_dnode(&dn);
439 	}
440 
441 	if (whence == SEEK_DATA)
442 		goto fail;
443 found:
444 	if (whence == SEEK_HOLE && data_ofs > isize)
445 		data_ofs = isize;
446 	inode_unlock(inode);
447 	return vfs_setpos(file, data_ofs, maxbytes);
448 fail:
449 	inode_unlock(inode);
450 	return -ENXIO;
451 }
452 
453 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
454 {
455 	struct inode *inode = file->f_mapping->host;
456 	loff_t maxbytes = inode->i_sb->s_maxbytes;
457 
458 	switch (whence) {
459 	case SEEK_SET:
460 	case SEEK_CUR:
461 	case SEEK_END:
462 		return generic_file_llseek_size(file, offset, whence,
463 						maxbytes, i_size_read(inode));
464 	case SEEK_DATA:
465 	case SEEK_HOLE:
466 		if (offset < 0)
467 			return -ENXIO;
468 		return f2fs_seek_block(file, offset, whence);
469 	}
470 
471 	return -EINVAL;
472 }
473 
474 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
475 {
476 	struct inode *inode = file_inode(file);
477 	int err;
478 
479 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
480 		return -EIO;
481 
482 	/* we don't need to use inline_data strictly */
483 	err = f2fs_convert_inline_inode(inode);
484 	if (err)
485 		return err;
486 
487 	file_accessed(file);
488 	vma->vm_ops = &f2fs_file_vm_ops;
489 	return 0;
490 }
491 
492 static int f2fs_file_open(struct inode *inode, struct file *filp)
493 {
494 	int err = fscrypt_file_open(inode, filp);
495 
496 	if (err)
497 		return err;
498 
499 	filp->f_mode |= FMODE_NOWAIT;
500 
501 	return dquot_file_open(inode, filp);
502 }
503 
504 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
505 {
506 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
507 	struct f2fs_node *raw_node;
508 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
509 	__le32 *addr;
510 	int base = 0;
511 
512 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
513 		base = get_extra_isize(dn->inode);
514 
515 	raw_node = F2FS_NODE(dn->node_page);
516 	addr = blkaddr_in_node(raw_node) + base + ofs;
517 
518 	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
519 		block_t blkaddr = le32_to_cpu(*addr);
520 
521 		if (blkaddr == NULL_ADDR)
522 			continue;
523 
524 		dn->data_blkaddr = NULL_ADDR;
525 		f2fs_set_data_blkaddr(dn);
526 
527 		if (__is_valid_data_blkaddr(blkaddr) &&
528 			!f2fs_is_valid_blkaddr(sbi, blkaddr,
529 					DATA_GENERIC_ENHANCE))
530 			continue;
531 
532 		f2fs_invalidate_blocks(sbi, blkaddr);
533 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
534 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
535 		nr_free++;
536 	}
537 
538 	if (nr_free) {
539 		pgoff_t fofs;
540 		/*
541 		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
542 		 * we will invalidate all blkaddr in the whole range.
543 		 */
544 		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
545 							dn->inode) + ofs;
546 		f2fs_update_extent_cache_range(dn, fofs, 0, len);
547 		dec_valid_block_count(sbi, dn->inode, nr_free);
548 	}
549 	dn->ofs_in_node = ofs;
550 
551 	f2fs_update_time(sbi, REQ_TIME);
552 	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
553 					 dn->ofs_in_node, nr_free);
554 }
555 
556 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
557 {
558 	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
559 }
560 
561 static int truncate_partial_data_page(struct inode *inode, u64 from,
562 								bool cache_only)
563 {
564 	loff_t offset = from & (PAGE_SIZE - 1);
565 	pgoff_t index = from >> PAGE_SHIFT;
566 	struct address_space *mapping = inode->i_mapping;
567 	struct page *page;
568 
569 	if (!offset && !cache_only)
570 		return 0;
571 
572 	if (cache_only) {
573 		page = find_lock_page(mapping, index);
574 		if (page && PageUptodate(page))
575 			goto truncate_out;
576 		f2fs_put_page(page, 1);
577 		return 0;
578 	}
579 
580 	page = f2fs_get_lock_data_page(inode, index, true);
581 	if (IS_ERR(page))
582 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
583 truncate_out:
584 	f2fs_wait_on_page_writeback(page, DATA, true, true);
585 	zero_user(page, offset, PAGE_SIZE - offset);
586 
587 	/* An encrypted inode should have a key and truncate the last page. */
588 	f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
589 	if (!cache_only)
590 		set_page_dirty(page);
591 	f2fs_put_page(page, 1);
592 	return 0;
593 }
594 
595 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
596 {
597 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
598 	struct dnode_of_data dn;
599 	pgoff_t free_from;
600 	int count = 0, err = 0;
601 	struct page *ipage;
602 	bool truncate_page = false;
603 
604 	trace_f2fs_truncate_blocks_enter(inode, from);
605 
606 	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
607 
608 	if (free_from >= sbi->max_file_blocks)
609 		goto free_partial;
610 
611 	if (lock)
612 		f2fs_lock_op(sbi);
613 
614 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
615 	if (IS_ERR(ipage)) {
616 		err = PTR_ERR(ipage);
617 		goto out;
618 	}
619 
620 	if (f2fs_has_inline_data(inode)) {
621 		f2fs_truncate_inline_inode(inode, ipage, from);
622 		f2fs_put_page(ipage, 1);
623 		truncate_page = true;
624 		goto out;
625 	}
626 
627 	set_new_dnode(&dn, inode, ipage, NULL, 0);
628 	err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
629 	if (err) {
630 		if (err == -ENOENT)
631 			goto free_next;
632 		goto out;
633 	}
634 
635 	count = ADDRS_PER_PAGE(dn.node_page, inode);
636 
637 	count -= dn.ofs_in_node;
638 	f2fs_bug_on(sbi, count < 0);
639 
640 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
641 		f2fs_truncate_data_blocks_range(&dn, count);
642 		free_from += count;
643 	}
644 
645 	f2fs_put_dnode(&dn);
646 free_next:
647 	err = f2fs_truncate_inode_blocks(inode, free_from);
648 out:
649 	if (lock)
650 		f2fs_unlock_op(sbi);
651 free_partial:
652 	/* lastly zero out the first data page */
653 	if (!err)
654 		err = truncate_partial_data_page(inode, from, truncate_page);
655 
656 	trace_f2fs_truncate_blocks_exit(inode, err);
657 	return err;
658 }
659 
660 int f2fs_truncate(struct inode *inode)
661 {
662 	int err;
663 
664 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
665 		return -EIO;
666 
667 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
668 				S_ISLNK(inode->i_mode)))
669 		return 0;
670 
671 	trace_f2fs_truncate(inode);
672 
673 	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
674 		f2fs_show_injection_info(FAULT_TRUNCATE);
675 		return -EIO;
676 	}
677 
678 	/* we should check inline_data size */
679 	if (!f2fs_may_inline_data(inode)) {
680 		err = f2fs_convert_inline_inode(inode);
681 		if (err)
682 			return err;
683 	}
684 
685 	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
686 	if (err)
687 		return err;
688 
689 	inode->i_mtime = inode->i_ctime = current_time(inode);
690 	f2fs_mark_inode_dirty_sync(inode, false);
691 	return 0;
692 }
693 
694 int f2fs_getattr(const struct path *path, struct kstat *stat,
695 		 u32 request_mask, unsigned int query_flags)
696 {
697 	struct inode *inode = d_inode(path->dentry);
698 	struct f2fs_inode_info *fi = F2FS_I(inode);
699 	struct f2fs_inode *ri;
700 	unsigned int flags;
701 
702 	if (f2fs_has_extra_attr(inode) &&
703 			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
704 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
705 		stat->result_mask |= STATX_BTIME;
706 		stat->btime.tv_sec = fi->i_crtime.tv_sec;
707 		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
708 	}
709 
710 	flags = fi->i_flags;
711 	if (flags & F2FS_APPEND_FL)
712 		stat->attributes |= STATX_ATTR_APPEND;
713 	if (IS_ENCRYPTED(inode))
714 		stat->attributes |= STATX_ATTR_ENCRYPTED;
715 	if (flags & F2FS_IMMUTABLE_FL)
716 		stat->attributes |= STATX_ATTR_IMMUTABLE;
717 	if (flags & F2FS_NODUMP_FL)
718 		stat->attributes |= STATX_ATTR_NODUMP;
719 
720 	stat->attributes_mask |= (STATX_ATTR_APPEND |
721 				  STATX_ATTR_ENCRYPTED |
722 				  STATX_ATTR_IMMUTABLE |
723 				  STATX_ATTR_NODUMP);
724 
725 	generic_fillattr(inode, stat);
726 
727 	/* we need to show initial sectors used for inline_data/dentries */
728 	if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
729 					f2fs_has_inline_dentry(inode))
730 		stat->blocks += (stat->size + 511) >> 9;
731 
732 	return 0;
733 }
734 
735 #ifdef CONFIG_F2FS_FS_POSIX_ACL
736 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
737 {
738 	unsigned int ia_valid = attr->ia_valid;
739 
740 	if (ia_valid & ATTR_UID)
741 		inode->i_uid = attr->ia_uid;
742 	if (ia_valid & ATTR_GID)
743 		inode->i_gid = attr->ia_gid;
744 	if (ia_valid & ATTR_ATIME)
745 		inode->i_atime = timespec64_trunc(attr->ia_atime,
746 						  inode->i_sb->s_time_gran);
747 	if (ia_valid & ATTR_MTIME)
748 		inode->i_mtime = timespec64_trunc(attr->ia_mtime,
749 						  inode->i_sb->s_time_gran);
750 	if (ia_valid & ATTR_CTIME)
751 		inode->i_ctime = timespec64_trunc(attr->ia_ctime,
752 						  inode->i_sb->s_time_gran);
753 	if (ia_valid & ATTR_MODE) {
754 		umode_t mode = attr->ia_mode;
755 
756 		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
757 			mode &= ~S_ISGID;
758 		set_acl_inode(inode, mode);
759 	}
760 }
761 #else
762 #define __setattr_copy setattr_copy
763 #endif
764 
765 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
766 {
767 	struct inode *inode = d_inode(dentry);
768 	int err;
769 
770 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
771 		return -EIO;
772 
773 	err = setattr_prepare(dentry, attr);
774 	if (err)
775 		return err;
776 
777 	err = fscrypt_prepare_setattr(dentry, attr);
778 	if (err)
779 		return err;
780 
781 	if (is_quota_modification(inode, attr)) {
782 		err = dquot_initialize(inode);
783 		if (err)
784 			return err;
785 	}
786 	if ((attr->ia_valid & ATTR_UID &&
787 		!uid_eq(attr->ia_uid, inode->i_uid)) ||
788 		(attr->ia_valid & ATTR_GID &&
789 		!gid_eq(attr->ia_gid, inode->i_gid))) {
790 		f2fs_lock_op(F2FS_I_SB(inode));
791 		err = dquot_transfer(inode, attr);
792 		if (err) {
793 			set_sbi_flag(F2FS_I_SB(inode),
794 					SBI_QUOTA_NEED_REPAIR);
795 			f2fs_unlock_op(F2FS_I_SB(inode));
796 			return err;
797 		}
798 		/*
799 		 * update uid/gid under lock_op(), so that dquot and inode can
800 		 * be updated atomically.
801 		 */
802 		if (attr->ia_valid & ATTR_UID)
803 			inode->i_uid = attr->ia_uid;
804 		if (attr->ia_valid & ATTR_GID)
805 			inode->i_gid = attr->ia_gid;
806 		f2fs_mark_inode_dirty_sync(inode, true);
807 		f2fs_unlock_op(F2FS_I_SB(inode));
808 	}
809 
810 	if (attr->ia_valid & ATTR_SIZE) {
811 		bool to_smaller = (attr->ia_size <= i_size_read(inode));
812 
813 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
814 		down_write(&F2FS_I(inode)->i_mmap_sem);
815 
816 		truncate_setsize(inode, attr->ia_size);
817 
818 		if (to_smaller)
819 			err = f2fs_truncate(inode);
820 		/*
821 		 * do not trim all blocks after i_size if target size is
822 		 * larger than i_size.
823 		 */
824 		up_write(&F2FS_I(inode)->i_mmap_sem);
825 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
826 
827 		if (err)
828 			return err;
829 
830 		if (!to_smaller) {
831 			/* should convert inline inode here */
832 			if (!f2fs_may_inline_data(inode)) {
833 				err = f2fs_convert_inline_inode(inode);
834 				if (err)
835 					return err;
836 			}
837 			inode->i_mtime = inode->i_ctime = current_time(inode);
838 		}
839 
840 		down_write(&F2FS_I(inode)->i_sem);
841 		F2FS_I(inode)->last_disk_size = i_size_read(inode);
842 		up_write(&F2FS_I(inode)->i_sem);
843 	}
844 
845 	__setattr_copy(inode, attr);
846 
847 	if (attr->ia_valid & ATTR_MODE) {
848 		err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
849 		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
850 			inode->i_mode = F2FS_I(inode)->i_acl_mode;
851 			clear_inode_flag(inode, FI_ACL_MODE);
852 		}
853 	}
854 
855 	/* file size may changed here */
856 	f2fs_mark_inode_dirty_sync(inode, true);
857 
858 	/* inode change will produce dirty node pages flushed by checkpoint */
859 	f2fs_balance_fs(F2FS_I_SB(inode), true);
860 
861 	return err;
862 }
863 
864 const struct inode_operations f2fs_file_inode_operations = {
865 	.getattr	= f2fs_getattr,
866 	.setattr	= f2fs_setattr,
867 	.get_acl	= f2fs_get_acl,
868 	.set_acl	= f2fs_set_acl,
869 #ifdef CONFIG_F2FS_FS_XATTR
870 	.listxattr	= f2fs_listxattr,
871 #endif
872 	.fiemap		= f2fs_fiemap,
873 };
874 
875 static int fill_zero(struct inode *inode, pgoff_t index,
876 					loff_t start, loff_t len)
877 {
878 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
879 	struct page *page;
880 
881 	if (!len)
882 		return 0;
883 
884 	f2fs_balance_fs(sbi, true);
885 
886 	f2fs_lock_op(sbi);
887 	page = f2fs_get_new_data_page(inode, NULL, index, false);
888 	f2fs_unlock_op(sbi);
889 
890 	if (IS_ERR(page))
891 		return PTR_ERR(page);
892 
893 	f2fs_wait_on_page_writeback(page, DATA, true, true);
894 	zero_user(page, start, len);
895 	set_page_dirty(page);
896 	f2fs_put_page(page, 1);
897 	return 0;
898 }
899 
900 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
901 {
902 	int err;
903 
904 	while (pg_start < pg_end) {
905 		struct dnode_of_data dn;
906 		pgoff_t end_offset, count;
907 
908 		set_new_dnode(&dn, inode, NULL, NULL, 0);
909 		err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
910 		if (err) {
911 			if (err == -ENOENT) {
912 				pg_start = f2fs_get_next_page_offset(&dn,
913 								pg_start);
914 				continue;
915 			}
916 			return err;
917 		}
918 
919 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
920 		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
921 
922 		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
923 
924 		f2fs_truncate_data_blocks_range(&dn, count);
925 		f2fs_put_dnode(&dn);
926 
927 		pg_start += count;
928 	}
929 	return 0;
930 }
931 
932 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
933 {
934 	pgoff_t pg_start, pg_end;
935 	loff_t off_start, off_end;
936 	int ret;
937 
938 	ret = f2fs_convert_inline_inode(inode);
939 	if (ret)
940 		return ret;
941 
942 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
943 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
944 
945 	off_start = offset & (PAGE_SIZE - 1);
946 	off_end = (offset + len) & (PAGE_SIZE - 1);
947 
948 	if (pg_start == pg_end) {
949 		ret = fill_zero(inode, pg_start, off_start,
950 						off_end - off_start);
951 		if (ret)
952 			return ret;
953 	} else {
954 		if (off_start) {
955 			ret = fill_zero(inode, pg_start++, off_start,
956 						PAGE_SIZE - off_start);
957 			if (ret)
958 				return ret;
959 		}
960 		if (off_end) {
961 			ret = fill_zero(inode, pg_end, 0, off_end);
962 			if (ret)
963 				return ret;
964 		}
965 
966 		if (pg_start < pg_end) {
967 			struct address_space *mapping = inode->i_mapping;
968 			loff_t blk_start, blk_end;
969 			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
970 
971 			f2fs_balance_fs(sbi, true);
972 
973 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
974 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
975 
976 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
977 			down_write(&F2FS_I(inode)->i_mmap_sem);
978 
979 			truncate_inode_pages_range(mapping, blk_start,
980 					blk_end - 1);
981 
982 			f2fs_lock_op(sbi);
983 			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
984 			f2fs_unlock_op(sbi);
985 
986 			up_write(&F2FS_I(inode)->i_mmap_sem);
987 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
988 		}
989 	}
990 
991 	return ret;
992 }
993 
994 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
995 				int *do_replace, pgoff_t off, pgoff_t len)
996 {
997 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
998 	struct dnode_of_data dn;
999 	int ret, done, i;
1000 
1001 next_dnode:
1002 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1003 	ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1004 	if (ret && ret != -ENOENT) {
1005 		return ret;
1006 	} else if (ret == -ENOENT) {
1007 		if (dn.max_level == 0)
1008 			return -ENOENT;
1009 		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node,
1010 									len);
1011 		blkaddr += done;
1012 		do_replace += done;
1013 		goto next;
1014 	}
1015 
1016 	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1017 							dn.ofs_in_node, len);
1018 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1019 		*blkaddr = datablock_addr(dn.inode,
1020 					dn.node_page, dn.ofs_in_node);
1021 
1022 		if (__is_valid_data_blkaddr(*blkaddr) &&
1023 			!f2fs_is_valid_blkaddr(sbi, *blkaddr,
1024 					DATA_GENERIC_ENHANCE)) {
1025 			f2fs_put_dnode(&dn);
1026 			return -EFSCORRUPTED;
1027 		}
1028 
1029 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1030 
1031 			if (test_opt(sbi, LFS)) {
1032 				f2fs_put_dnode(&dn);
1033 				return -ENOTSUPP;
1034 			}
1035 
1036 			/* do not invalidate this block address */
1037 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1038 			*do_replace = 1;
1039 		}
1040 	}
1041 	f2fs_put_dnode(&dn);
1042 next:
1043 	len -= done;
1044 	off += done;
1045 	if (len)
1046 		goto next_dnode;
1047 	return 0;
1048 }
1049 
1050 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1051 				int *do_replace, pgoff_t off, int len)
1052 {
1053 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1054 	struct dnode_of_data dn;
1055 	int ret, i;
1056 
1057 	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1058 		if (*do_replace == 0)
1059 			continue;
1060 
1061 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1062 		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1063 		if (ret) {
1064 			dec_valid_block_count(sbi, inode, 1);
1065 			f2fs_invalidate_blocks(sbi, *blkaddr);
1066 		} else {
1067 			f2fs_update_data_blkaddr(&dn, *blkaddr);
1068 		}
1069 		f2fs_put_dnode(&dn);
1070 	}
1071 	return 0;
1072 }
1073 
1074 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1075 			block_t *blkaddr, int *do_replace,
1076 			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1077 {
1078 	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1079 	pgoff_t i = 0;
1080 	int ret;
1081 
1082 	while (i < len) {
1083 		if (blkaddr[i] == NULL_ADDR && !full) {
1084 			i++;
1085 			continue;
1086 		}
1087 
1088 		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1089 			struct dnode_of_data dn;
1090 			struct node_info ni;
1091 			size_t new_size;
1092 			pgoff_t ilen;
1093 
1094 			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1095 			ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1096 			if (ret)
1097 				return ret;
1098 
1099 			ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1100 			if (ret) {
1101 				f2fs_put_dnode(&dn);
1102 				return ret;
1103 			}
1104 
1105 			ilen = min((pgoff_t)
1106 				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1107 						dn.ofs_in_node, len - i);
1108 			do {
1109 				dn.data_blkaddr = datablock_addr(dn.inode,
1110 						dn.node_page, dn.ofs_in_node);
1111 				f2fs_truncate_data_blocks_range(&dn, 1);
1112 
1113 				if (do_replace[i]) {
1114 					f2fs_i_blocks_write(src_inode,
1115 							1, false, false);
1116 					f2fs_i_blocks_write(dst_inode,
1117 							1, true, false);
1118 					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1119 					blkaddr[i], ni.version, true, false);
1120 
1121 					do_replace[i] = 0;
1122 				}
1123 				dn.ofs_in_node++;
1124 				i++;
1125 				new_size = (dst + i) << PAGE_SHIFT;
1126 				if (dst_inode->i_size < new_size)
1127 					f2fs_i_size_write(dst_inode, new_size);
1128 			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1129 
1130 			f2fs_put_dnode(&dn);
1131 		} else {
1132 			struct page *psrc, *pdst;
1133 
1134 			psrc = f2fs_get_lock_data_page(src_inode,
1135 							src + i, true);
1136 			if (IS_ERR(psrc))
1137 				return PTR_ERR(psrc);
1138 			pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1139 								true);
1140 			if (IS_ERR(pdst)) {
1141 				f2fs_put_page(psrc, 1);
1142 				return PTR_ERR(pdst);
1143 			}
1144 			f2fs_copy_page(psrc, pdst);
1145 			set_page_dirty(pdst);
1146 			f2fs_put_page(pdst, 1);
1147 			f2fs_put_page(psrc, 1);
1148 
1149 			ret = f2fs_truncate_hole(src_inode,
1150 						src + i, src + i + 1);
1151 			if (ret)
1152 				return ret;
1153 			i++;
1154 		}
1155 	}
1156 	return 0;
1157 }
1158 
1159 static int __exchange_data_block(struct inode *src_inode,
1160 			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1161 			pgoff_t len, bool full)
1162 {
1163 	block_t *src_blkaddr;
1164 	int *do_replace;
1165 	pgoff_t olen;
1166 	int ret;
1167 
1168 	while (len) {
1169 		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1170 
1171 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1172 					array_size(olen, sizeof(block_t)),
1173 					GFP_KERNEL);
1174 		if (!src_blkaddr)
1175 			return -ENOMEM;
1176 
1177 		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1178 					array_size(olen, sizeof(int)),
1179 					GFP_KERNEL);
1180 		if (!do_replace) {
1181 			kvfree(src_blkaddr);
1182 			return -ENOMEM;
1183 		}
1184 
1185 		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1186 					do_replace, src, olen);
1187 		if (ret)
1188 			goto roll_back;
1189 
1190 		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1191 					do_replace, src, dst, olen, full);
1192 		if (ret)
1193 			goto roll_back;
1194 
1195 		src += olen;
1196 		dst += olen;
1197 		len -= olen;
1198 
1199 		kvfree(src_blkaddr);
1200 		kvfree(do_replace);
1201 	}
1202 	return 0;
1203 
1204 roll_back:
1205 	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1206 	kvfree(src_blkaddr);
1207 	kvfree(do_replace);
1208 	return ret;
1209 }
1210 
1211 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1212 {
1213 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1214 	pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1215 	pgoff_t start = offset >> PAGE_SHIFT;
1216 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
1217 	int ret;
1218 
1219 	f2fs_balance_fs(sbi, true);
1220 
1221 	/* avoid gc operation during block exchange */
1222 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1223 	down_write(&F2FS_I(inode)->i_mmap_sem);
1224 
1225 	f2fs_lock_op(sbi);
1226 	f2fs_drop_extent_tree(inode);
1227 	truncate_pagecache(inode, offset);
1228 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1229 	f2fs_unlock_op(sbi);
1230 
1231 	up_write(&F2FS_I(inode)->i_mmap_sem);
1232 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1233 	return ret;
1234 }
1235 
1236 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1237 {
1238 	loff_t new_size;
1239 	int ret;
1240 
1241 	if (offset + len >= i_size_read(inode))
1242 		return -EINVAL;
1243 
1244 	/* collapse range should be aligned to block size of f2fs. */
1245 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1246 		return -EINVAL;
1247 
1248 	ret = f2fs_convert_inline_inode(inode);
1249 	if (ret)
1250 		return ret;
1251 
1252 	/* write out all dirty pages from offset */
1253 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1254 	if (ret)
1255 		return ret;
1256 
1257 	ret = f2fs_do_collapse(inode, offset, len);
1258 	if (ret)
1259 		return ret;
1260 
1261 	/* write out all moved pages, if possible */
1262 	down_write(&F2FS_I(inode)->i_mmap_sem);
1263 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1264 	truncate_pagecache(inode, offset);
1265 
1266 	new_size = i_size_read(inode) - len;
1267 	truncate_pagecache(inode, new_size);
1268 
1269 	ret = f2fs_truncate_blocks(inode, new_size, true);
1270 	up_write(&F2FS_I(inode)->i_mmap_sem);
1271 	if (!ret)
1272 		f2fs_i_size_write(inode, new_size);
1273 	return ret;
1274 }
1275 
1276 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1277 								pgoff_t end)
1278 {
1279 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1280 	pgoff_t index = start;
1281 	unsigned int ofs_in_node = dn->ofs_in_node;
1282 	blkcnt_t count = 0;
1283 	int ret;
1284 
1285 	for (; index < end; index++, dn->ofs_in_node++) {
1286 		if (datablock_addr(dn->inode, dn->node_page,
1287 					dn->ofs_in_node) == NULL_ADDR)
1288 			count++;
1289 	}
1290 
1291 	dn->ofs_in_node = ofs_in_node;
1292 	ret = f2fs_reserve_new_blocks(dn, count);
1293 	if (ret)
1294 		return ret;
1295 
1296 	dn->ofs_in_node = ofs_in_node;
1297 	for (index = start; index < end; index++, dn->ofs_in_node++) {
1298 		dn->data_blkaddr = datablock_addr(dn->inode,
1299 					dn->node_page, dn->ofs_in_node);
1300 		/*
1301 		 * f2fs_reserve_new_blocks will not guarantee entire block
1302 		 * allocation.
1303 		 */
1304 		if (dn->data_blkaddr == NULL_ADDR) {
1305 			ret = -ENOSPC;
1306 			break;
1307 		}
1308 		if (dn->data_blkaddr != NEW_ADDR) {
1309 			f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1310 			dn->data_blkaddr = NEW_ADDR;
1311 			f2fs_set_data_blkaddr(dn);
1312 		}
1313 	}
1314 
1315 	f2fs_update_extent_cache_range(dn, start, 0, index - start);
1316 
1317 	return ret;
1318 }
1319 
1320 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1321 								int mode)
1322 {
1323 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1324 	struct address_space *mapping = inode->i_mapping;
1325 	pgoff_t index, pg_start, pg_end;
1326 	loff_t new_size = i_size_read(inode);
1327 	loff_t off_start, off_end;
1328 	int ret = 0;
1329 
1330 	ret = inode_newsize_ok(inode, (len + offset));
1331 	if (ret)
1332 		return ret;
1333 
1334 	ret = f2fs_convert_inline_inode(inode);
1335 	if (ret)
1336 		return ret;
1337 
1338 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1339 	if (ret)
1340 		return ret;
1341 
1342 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1343 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1344 
1345 	off_start = offset & (PAGE_SIZE - 1);
1346 	off_end = (offset + len) & (PAGE_SIZE - 1);
1347 
1348 	if (pg_start == pg_end) {
1349 		ret = fill_zero(inode, pg_start, off_start,
1350 						off_end - off_start);
1351 		if (ret)
1352 			return ret;
1353 
1354 		new_size = max_t(loff_t, new_size, offset + len);
1355 	} else {
1356 		if (off_start) {
1357 			ret = fill_zero(inode, pg_start++, off_start,
1358 						PAGE_SIZE - off_start);
1359 			if (ret)
1360 				return ret;
1361 
1362 			new_size = max_t(loff_t, new_size,
1363 					(loff_t)pg_start << PAGE_SHIFT);
1364 		}
1365 
1366 		for (index = pg_start; index < pg_end;) {
1367 			struct dnode_of_data dn;
1368 			unsigned int end_offset;
1369 			pgoff_t end;
1370 
1371 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1372 			down_write(&F2FS_I(inode)->i_mmap_sem);
1373 
1374 			truncate_pagecache_range(inode,
1375 				(loff_t)index << PAGE_SHIFT,
1376 				((loff_t)pg_end << PAGE_SHIFT) - 1);
1377 
1378 			f2fs_lock_op(sbi);
1379 
1380 			set_new_dnode(&dn, inode, NULL, NULL, 0);
1381 			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1382 			if (ret) {
1383 				f2fs_unlock_op(sbi);
1384 				up_write(&F2FS_I(inode)->i_mmap_sem);
1385 				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1386 				goto out;
1387 			}
1388 
1389 			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1390 			end = min(pg_end, end_offset - dn.ofs_in_node + index);
1391 
1392 			ret = f2fs_do_zero_range(&dn, index, end);
1393 			f2fs_put_dnode(&dn);
1394 
1395 			f2fs_unlock_op(sbi);
1396 			up_write(&F2FS_I(inode)->i_mmap_sem);
1397 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1398 
1399 			f2fs_balance_fs(sbi, dn.node_changed);
1400 
1401 			if (ret)
1402 				goto out;
1403 
1404 			index = end;
1405 			new_size = max_t(loff_t, new_size,
1406 					(loff_t)index << PAGE_SHIFT);
1407 		}
1408 
1409 		if (off_end) {
1410 			ret = fill_zero(inode, pg_end, 0, off_end);
1411 			if (ret)
1412 				goto out;
1413 
1414 			new_size = max_t(loff_t, new_size, offset + len);
1415 		}
1416 	}
1417 
1418 out:
1419 	if (new_size > i_size_read(inode)) {
1420 		if (mode & FALLOC_FL_KEEP_SIZE)
1421 			file_set_keep_isize(inode);
1422 		else
1423 			f2fs_i_size_write(inode, new_size);
1424 	}
1425 	return ret;
1426 }
1427 
1428 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1429 {
1430 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1431 	pgoff_t nr, pg_start, pg_end, delta, idx;
1432 	loff_t new_size;
1433 	int ret = 0;
1434 
1435 	new_size = i_size_read(inode) + len;
1436 	ret = inode_newsize_ok(inode, new_size);
1437 	if (ret)
1438 		return ret;
1439 
1440 	if (offset >= i_size_read(inode))
1441 		return -EINVAL;
1442 
1443 	/* insert range should be aligned to block size of f2fs. */
1444 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1445 		return -EINVAL;
1446 
1447 	ret = f2fs_convert_inline_inode(inode);
1448 	if (ret)
1449 		return ret;
1450 
1451 	f2fs_balance_fs(sbi, true);
1452 
1453 	down_write(&F2FS_I(inode)->i_mmap_sem);
1454 	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1455 	up_write(&F2FS_I(inode)->i_mmap_sem);
1456 	if (ret)
1457 		return ret;
1458 
1459 	/* write out all dirty pages from offset */
1460 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1461 	if (ret)
1462 		return ret;
1463 
1464 	pg_start = offset >> PAGE_SHIFT;
1465 	pg_end = (offset + len) >> PAGE_SHIFT;
1466 	delta = pg_end - pg_start;
1467 	idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1468 
1469 	/* avoid gc operation during block exchange */
1470 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1471 	down_write(&F2FS_I(inode)->i_mmap_sem);
1472 	truncate_pagecache(inode, offset);
1473 
1474 	while (!ret && idx > pg_start) {
1475 		nr = idx - pg_start;
1476 		if (nr > delta)
1477 			nr = delta;
1478 		idx -= nr;
1479 
1480 		f2fs_lock_op(sbi);
1481 		f2fs_drop_extent_tree(inode);
1482 
1483 		ret = __exchange_data_block(inode, inode, idx,
1484 					idx + delta, nr, false);
1485 		f2fs_unlock_op(sbi);
1486 	}
1487 	up_write(&F2FS_I(inode)->i_mmap_sem);
1488 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1489 
1490 	/* write out all moved pages, if possible */
1491 	down_write(&F2FS_I(inode)->i_mmap_sem);
1492 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1493 	truncate_pagecache(inode, offset);
1494 	up_write(&F2FS_I(inode)->i_mmap_sem);
1495 
1496 	if (!ret)
1497 		f2fs_i_size_write(inode, new_size);
1498 	return ret;
1499 }
1500 
1501 static int expand_inode_data(struct inode *inode, loff_t offset,
1502 					loff_t len, int mode)
1503 {
1504 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1505 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1506 			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1507 			.m_may_create = true };
1508 	pgoff_t pg_end;
1509 	loff_t new_size = i_size_read(inode);
1510 	loff_t off_end;
1511 	int err;
1512 
1513 	err = inode_newsize_ok(inode, (len + offset));
1514 	if (err)
1515 		return err;
1516 
1517 	err = f2fs_convert_inline_inode(inode);
1518 	if (err)
1519 		return err;
1520 
1521 	f2fs_balance_fs(sbi, true);
1522 
1523 	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1524 	off_end = (offset + len) & (PAGE_SIZE - 1);
1525 
1526 	map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1527 	map.m_len = pg_end - map.m_lblk;
1528 	if (off_end)
1529 		map.m_len++;
1530 
1531 	if (f2fs_is_pinned_file(inode))
1532 		map.m_seg_type = CURSEG_COLD_DATA;
1533 
1534 	err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ?
1535 						F2FS_GET_BLOCK_PRE_DIO :
1536 						F2FS_GET_BLOCK_PRE_AIO));
1537 	if (err) {
1538 		pgoff_t last_off;
1539 
1540 		if (!map.m_len)
1541 			return err;
1542 
1543 		last_off = map.m_lblk + map.m_len - 1;
1544 
1545 		/* update new size to the failed position */
1546 		new_size = (last_off == pg_end) ? offset + len :
1547 					(loff_t)(last_off + 1) << PAGE_SHIFT;
1548 	} else {
1549 		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1550 	}
1551 
1552 	if (new_size > i_size_read(inode)) {
1553 		if (mode & FALLOC_FL_KEEP_SIZE)
1554 			file_set_keep_isize(inode);
1555 		else
1556 			f2fs_i_size_write(inode, new_size);
1557 	}
1558 
1559 	return err;
1560 }
1561 
1562 static long f2fs_fallocate(struct file *file, int mode,
1563 				loff_t offset, loff_t len)
1564 {
1565 	struct inode *inode = file_inode(file);
1566 	long ret = 0;
1567 
1568 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1569 		return -EIO;
1570 
1571 	/* f2fs only support ->fallocate for regular file */
1572 	if (!S_ISREG(inode->i_mode))
1573 		return -EINVAL;
1574 
1575 	if (IS_ENCRYPTED(inode) &&
1576 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1577 		return -EOPNOTSUPP;
1578 
1579 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1580 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1581 			FALLOC_FL_INSERT_RANGE))
1582 		return -EOPNOTSUPP;
1583 
1584 	inode_lock(inode);
1585 
1586 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1587 		if (offset >= inode->i_size)
1588 			goto out;
1589 
1590 		ret = punch_hole(inode, offset, len);
1591 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1592 		ret = f2fs_collapse_range(inode, offset, len);
1593 	} else if (mode & FALLOC_FL_ZERO_RANGE) {
1594 		ret = f2fs_zero_range(inode, offset, len, mode);
1595 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1596 		ret = f2fs_insert_range(inode, offset, len);
1597 	} else {
1598 		ret = expand_inode_data(inode, offset, len, mode);
1599 	}
1600 
1601 	if (!ret) {
1602 		inode->i_mtime = inode->i_ctime = current_time(inode);
1603 		f2fs_mark_inode_dirty_sync(inode, false);
1604 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1605 	}
1606 
1607 out:
1608 	inode_unlock(inode);
1609 
1610 	trace_f2fs_fallocate(inode, mode, offset, len, ret);
1611 	return ret;
1612 }
1613 
1614 static int f2fs_release_file(struct inode *inode, struct file *filp)
1615 {
1616 	/*
1617 	 * f2fs_relase_file is called at every close calls. So we should
1618 	 * not drop any inmemory pages by close called by other process.
1619 	 */
1620 	if (!(filp->f_mode & FMODE_WRITE) ||
1621 			atomic_read(&inode->i_writecount) != 1)
1622 		return 0;
1623 
1624 	/* some remained atomic pages should discarded */
1625 	if (f2fs_is_atomic_file(inode))
1626 		f2fs_drop_inmem_pages(inode);
1627 	if (f2fs_is_volatile_file(inode)) {
1628 		set_inode_flag(inode, FI_DROP_CACHE);
1629 		filemap_fdatawrite(inode->i_mapping);
1630 		clear_inode_flag(inode, FI_DROP_CACHE);
1631 		clear_inode_flag(inode, FI_VOLATILE_FILE);
1632 		stat_dec_volatile_write(inode);
1633 	}
1634 	return 0;
1635 }
1636 
1637 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1638 {
1639 	struct inode *inode = file_inode(file);
1640 
1641 	/*
1642 	 * If the process doing a transaction is crashed, we should do
1643 	 * roll-back. Otherwise, other reader/write can see corrupted database
1644 	 * until all the writers close its file. Since this should be done
1645 	 * before dropping file lock, it needs to do in ->flush.
1646 	 */
1647 	if (f2fs_is_atomic_file(inode) &&
1648 			F2FS_I(inode)->inmem_task == current)
1649 		f2fs_drop_inmem_pages(inode);
1650 	return 0;
1651 }
1652 
1653 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1654 {
1655 	struct f2fs_inode_info *fi = F2FS_I(inode);
1656 	u32 oldflags;
1657 
1658 	/* Is it quota file? Do not allow user to mess with it */
1659 	if (IS_NOQUOTA(inode))
1660 		return -EPERM;
1661 
1662 	oldflags = fi->i_flags;
1663 
1664 	if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
1665 		if (!capable(CAP_LINUX_IMMUTABLE))
1666 			return -EPERM;
1667 
1668 	fi->i_flags = iflags | (oldflags & ~mask);
1669 
1670 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
1671 		set_inode_flag(inode, FI_PROJ_INHERIT);
1672 	else
1673 		clear_inode_flag(inode, FI_PROJ_INHERIT);
1674 
1675 	inode->i_ctime = current_time(inode);
1676 	f2fs_set_inode_flags(inode);
1677 	f2fs_mark_inode_dirty_sync(inode, true);
1678 	return 0;
1679 }
1680 
1681 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1682 
1683 /*
1684  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1685  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1686  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1687  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1688  */
1689 
1690 static const struct {
1691 	u32 iflag;
1692 	u32 fsflag;
1693 } f2fs_fsflags_map[] = {
1694 	{ F2FS_SYNC_FL,		FS_SYNC_FL },
1695 	{ F2FS_IMMUTABLE_FL,	FS_IMMUTABLE_FL },
1696 	{ F2FS_APPEND_FL,	FS_APPEND_FL },
1697 	{ F2FS_NODUMP_FL,	FS_NODUMP_FL },
1698 	{ F2FS_NOATIME_FL,	FS_NOATIME_FL },
1699 	{ F2FS_INDEX_FL,	FS_INDEX_FL },
1700 	{ F2FS_DIRSYNC_FL,	FS_DIRSYNC_FL },
1701 	{ F2FS_PROJINHERIT_FL,	FS_PROJINHERIT_FL },
1702 };
1703 
1704 #define F2FS_GETTABLE_FS_FL (		\
1705 		FS_SYNC_FL |		\
1706 		FS_IMMUTABLE_FL |	\
1707 		FS_APPEND_FL |		\
1708 		FS_NODUMP_FL |		\
1709 		FS_NOATIME_FL |		\
1710 		FS_INDEX_FL |		\
1711 		FS_DIRSYNC_FL |		\
1712 		FS_PROJINHERIT_FL |	\
1713 		FS_ENCRYPT_FL |		\
1714 		FS_INLINE_DATA_FL |	\
1715 		FS_NOCOW_FL)
1716 
1717 #define F2FS_SETTABLE_FS_FL (		\
1718 		FS_SYNC_FL |		\
1719 		FS_IMMUTABLE_FL |	\
1720 		FS_APPEND_FL |		\
1721 		FS_NODUMP_FL |		\
1722 		FS_NOATIME_FL |		\
1723 		FS_DIRSYNC_FL |		\
1724 		FS_PROJINHERIT_FL)
1725 
1726 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1727 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1728 {
1729 	u32 fsflags = 0;
1730 	int i;
1731 
1732 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1733 		if (iflags & f2fs_fsflags_map[i].iflag)
1734 			fsflags |= f2fs_fsflags_map[i].fsflag;
1735 
1736 	return fsflags;
1737 }
1738 
1739 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1740 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1741 {
1742 	u32 iflags = 0;
1743 	int i;
1744 
1745 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1746 		if (fsflags & f2fs_fsflags_map[i].fsflag)
1747 			iflags |= f2fs_fsflags_map[i].iflag;
1748 
1749 	return iflags;
1750 }
1751 
1752 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1753 {
1754 	struct inode *inode = file_inode(filp);
1755 	struct f2fs_inode_info *fi = F2FS_I(inode);
1756 	u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1757 
1758 	if (IS_ENCRYPTED(inode))
1759 		fsflags |= FS_ENCRYPT_FL;
1760 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1761 		fsflags |= FS_INLINE_DATA_FL;
1762 	if (is_inode_flag_set(inode, FI_PIN_FILE))
1763 		fsflags |= FS_NOCOW_FL;
1764 
1765 	fsflags &= F2FS_GETTABLE_FS_FL;
1766 
1767 	return put_user(fsflags, (int __user *)arg);
1768 }
1769 
1770 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1771 {
1772 	struct inode *inode = file_inode(filp);
1773 	u32 fsflags;
1774 	u32 iflags;
1775 	int ret;
1776 
1777 	if (!inode_owner_or_capable(inode))
1778 		return -EACCES;
1779 
1780 	if (get_user(fsflags, (int __user *)arg))
1781 		return -EFAULT;
1782 
1783 	if (fsflags & ~F2FS_GETTABLE_FS_FL)
1784 		return -EOPNOTSUPP;
1785 	fsflags &= F2FS_SETTABLE_FS_FL;
1786 
1787 	iflags = f2fs_fsflags_to_iflags(fsflags);
1788 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1789 		return -EOPNOTSUPP;
1790 
1791 	ret = mnt_want_write_file(filp);
1792 	if (ret)
1793 		return ret;
1794 
1795 	inode_lock(inode);
1796 
1797 	ret = f2fs_setflags_common(inode, iflags,
1798 			f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
1799 	inode_unlock(inode);
1800 	mnt_drop_write_file(filp);
1801 	return ret;
1802 }
1803 
1804 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1805 {
1806 	struct inode *inode = file_inode(filp);
1807 
1808 	return put_user(inode->i_generation, (int __user *)arg);
1809 }
1810 
1811 static int f2fs_ioc_start_atomic_write(struct file *filp)
1812 {
1813 	struct inode *inode = file_inode(filp);
1814 	int ret;
1815 
1816 	if (!inode_owner_or_capable(inode))
1817 		return -EACCES;
1818 
1819 	if (!S_ISREG(inode->i_mode))
1820 		return -EINVAL;
1821 
1822 	ret = mnt_want_write_file(filp);
1823 	if (ret)
1824 		return ret;
1825 
1826 	inode_lock(inode);
1827 
1828 	if (f2fs_is_atomic_file(inode)) {
1829 		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1830 			ret = -EINVAL;
1831 		goto out;
1832 	}
1833 
1834 	ret = f2fs_convert_inline_inode(inode);
1835 	if (ret)
1836 		goto out;
1837 
1838 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1839 
1840 	/*
1841 	 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
1842 	 * f2fs_is_atomic_file.
1843 	 */
1844 	if (get_dirty_pages(inode))
1845 		f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1846 			  inode->i_ino, get_dirty_pages(inode));
1847 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1848 	if (ret) {
1849 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1850 		goto out;
1851 	}
1852 
1853 	set_inode_flag(inode, FI_ATOMIC_FILE);
1854 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1855 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1856 
1857 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1858 	F2FS_I(inode)->inmem_task = current;
1859 	stat_inc_atomic_write(inode);
1860 	stat_update_max_atomic_write(inode);
1861 out:
1862 	inode_unlock(inode);
1863 	mnt_drop_write_file(filp);
1864 	return ret;
1865 }
1866 
1867 static int f2fs_ioc_commit_atomic_write(struct file *filp)
1868 {
1869 	struct inode *inode = file_inode(filp);
1870 	int ret;
1871 
1872 	if (!inode_owner_or_capable(inode))
1873 		return -EACCES;
1874 
1875 	ret = mnt_want_write_file(filp);
1876 	if (ret)
1877 		return ret;
1878 
1879 	f2fs_balance_fs(F2FS_I_SB(inode), true);
1880 
1881 	inode_lock(inode);
1882 
1883 	if (f2fs_is_volatile_file(inode)) {
1884 		ret = -EINVAL;
1885 		goto err_out;
1886 	}
1887 
1888 	if (f2fs_is_atomic_file(inode)) {
1889 		ret = f2fs_commit_inmem_pages(inode);
1890 		if (ret)
1891 			goto err_out;
1892 
1893 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1894 		if (!ret) {
1895 			clear_inode_flag(inode, FI_ATOMIC_FILE);
1896 			F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
1897 			stat_dec_atomic_write(inode);
1898 		}
1899 	} else {
1900 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
1901 	}
1902 err_out:
1903 	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
1904 		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1905 		ret = -EINVAL;
1906 	}
1907 	inode_unlock(inode);
1908 	mnt_drop_write_file(filp);
1909 	return ret;
1910 }
1911 
1912 static int f2fs_ioc_start_volatile_write(struct file *filp)
1913 {
1914 	struct inode *inode = file_inode(filp);
1915 	int ret;
1916 
1917 	if (!inode_owner_or_capable(inode))
1918 		return -EACCES;
1919 
1920 	if (!S_ISREG(inode->i_mode))
1921 		return -EINVAL;
1922 
1923 	ret = mnt_want_write_file(filp);
1924 	if (ret)
1925 		return ret;
1926 
1927 	inode_lock(inode);
1928 
1929 	if (f2fs_is_volatile_file(inode))
1930 		goto out;
1931 
1932 	ret = f2fs_convert_inline_inode(inode);
1933 	if (ret)
1934 		goto out;
1935 
1936 	stat_inc_volatile_write(inode);
1937 	stat_update_max_volatile_write(inode);
1938 
1939 	set_inode_flag(inode, FI_VOLATILE_FILE);
1940 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1941 out:
1942 	inode_unlock(inode);
1943 	mnt_drop_write_file(filp);
1944 	return ret;
1945 }
1946 
1947 static int f2fs_ioc_release_volatile_write(struct file *filp)
1948 {
1949 	struct inode *inode = file_inode(filp);
1950 	int ret;
1951 
1952 	if (!inode_owner_or_capable(inode))
1953 		return -EACCES;
1954 
1955 	ret = mnt_want_write_file(filp);
1956 	if (ret)
1957 		return ret;
1958 
1959 	inode_lock(inode);
1960 
1961 	if (!f2fs_is_volatile_file(inode))
1962 		goto out;
1963 
1964 	if (!f2fs_is_first_block_written(inode)) {
1965 		ret = truncate_partial_data_page(inode, 0, true);
1966 		goto out;
1967 	}
1968 
1969 	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
1970 out:
1971 	inode_unlock(inode);
1972 	mnt_drop_write_file(filp);
1973 	return ret;
1974 }
1975 
1976 static int f2fs_ioc_abort_volatile_write(struct file *filp)
1977 {
1978 	struct inode *inode = file_inode(filp);
1979 	int ret;
1980 
1981 	if (!inode_owner_or_capable(inode))
1982 		return -EACCES;
1983 
1984 	ret = mnt_want_write_file(filp);
1985 	if (ret)
1986 		return ret;
1987 
1988 	inode_lock(inode);
1989 
1990 	if (f2fs_is_atomic_file(inode))
1991 		f2fs_drop_inmem_pages(inode);
1992 	if (f2fs_is_volatile_file(inode)) {
1993 		clear_inode_flag(inode, FI_VOLATILE_FILE);
1994 		stat_dec_volatile_write(inode);
1995 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1996 	}
1997 
1998 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1999 
2000 	inode_unlock(inode);
2001 
2002 	mnt_drop_write_file(filp);
2003 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2004 	return ret;
2005 }
2006 
2007 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2008 {
2009 	struct inode *inode = file_inode(filp);
2010 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2011 	struct super_block *sb = sbi->sb;
2012 	__u32 in;
2013 	int ret = 0;
2014 
2015 	if (!capable(CAP_SYS_ADMIN))
2016 		return -EPERM;
2017 
2018 	if (get_user(in, (__u32 __user *)arg))
2019 		return -EFAULT;
2020 
2021 	if (in != F2FS_GOING_DOWN_FULLSYNC) {
2022 		ret = mnt_want_write_file(filp);
2023 		if (ret)
2024 			return ret;
2025 	}
2026 
2027 	switch (in) {
2028 	case F2FS_GOING_DOWN_FULLSYNC:
2029 		sb = freeze_bdev(sb->s_bdev);
2030 		if (IS_ERR(sb)) {
2031 			ret = PTR_ERR(sb);
2032 			goto out;
2033 		}
2034 		if (sb) {
2035 			f2fs_stop_checkpoint(sbi, false);
2036 			set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2037 			thaw_bdev(sb->s_bdev, sb);
2038 		}
2039 		break;
2040 	case F2FS_GOING_DOWN_METASYNC:
2041 		/* do checkpoint only */
2042 		ret = f2fs_sync_fs(sb, 1);
2043 		if (ret)
2044 			goto out;
2045 		f2fs_stop_checkpoint(sbi, false);
2046 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2047 		break;
2048 	case F2FS_GOING_DOWN_NOSYNC:
2049 		f2fs_stop_checkpoint(sbi, false);
2050 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2051 		break;
2052 	case F2FS_GOING_DOWN_METAFLUSH:
2053 		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2054 		f2fs_stop_checkpoint(sbi, false);
2055 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2056 		break;
2057 	case F2FS_GOING_DOWN_NEED_FSCK:
2058 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2059 		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2060 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2061 		/* do checkpoint only */
2062 		ret = f2fs_sync_fs(sb, 1);
2063 		goto out;
2064 	default:
2065 		ret = -EINVAL;
2066 		goto out;
2067 	}
2068 
2069 	f2fs_stop_gc_thread(sbi);
2070 	f2fs_stop_discard_thread(sbi);
2071 
2072 	f2fs_drop_discard_cmd(sbi);
2073 	clear_opt(sbi, DISCARD);
2074 
2075 	f2fs_update_time(sbi, REQ_TIME);
2076 out:
2077 	if (in != F2FS_GOING_DOWN_FULLSYNC)
2078 		mnt_drop_write_file(filp);
2079 
2080 	trace_f2fs_shutdown(sbi, in, ret);
2081 
2082 	return ret;
2083 }
2084 
2085 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2086 {
2087 	struct inode *inode = file_inode(filp);
2088 	struct super_block *sb = inode->i_sb;
2089 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
2090 	struct fstrim_range range;
2091 	int ret;
2092 
2093 	if (!capable(CAP_SYS_ADMIN))
2094 		return -EPERM;
2095 
2096 	if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2097 		return -EOPNOTSUPP;
2098 
2099 	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2100 				sizeof(range)))
2101 		return -EFAULT;
2102 
2103 	ret = mnt_want_write_file(filp);
2104 	if (ret)
2105 		return ret;
2106 
2107 	range.minlen = max((unsigned int)range.minlen,
2108 				q->limits.discard_granularity);
2109 	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2110 	mnt_drop_write_file(filp);
2111 	if (ret < 0)
2112 		return ret;
2113 
2114 	if (copy_to_user((struct fstrim_range __user *)arg, &range,
2115 				sizeof(range)))
2116 		return -EFAULT;
2117 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2118 	return 0;
2119 }
2120 
2121 static bool uuid_is_nonzero(__u8 u[16])
2122 {
2123 	int i;
2124 
2125 	for (i = 0; i < 16; i++)
2126 		if (u[i])
2127 			return true;
2128 	return false;
2129 }
2130 
2131 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2132 {
2133 	struct inode *inode = file_inode(filp);
2134 
2135 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2136 		return -EOPNOTSUPP;
2137 
2138 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2139 
2140 	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2141 }
2142 
2143 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2144 {
2145 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2146 		return -EOPNOTSUPP;
2147 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2148 }
2149 
2150 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2151 {
2152 	struct inode *inode = file_inode(filp);
2153 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2154 	int err;
2155 
2156 	if (!f2fs_sb_has_encrypt(sbi))
2157 		return -EOPNOTSUPP;
2158 
2159 	err = mnt_want_write_file(filp);
2160 	if (err)
2161 		return err;
2162 
2163 	down_write(&sbi->sb_lock);
2164 
2165 	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2166 		goto got_it;
2167 
2168 	/* update superblock with uuid */
2169 	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2170 
2171 	err = f2fs_commit_super(sbi, false);
2172 	if (err) {
2173 		/* undo new data */
2174 		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2175 		goto out_err;
2176 	}
2177 got_it:
2178 	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2179 									16))
2180 		err = -EFAULT;
2181 out_err:
2182 	up_write(&sbi->sb_lock);
2183 	mnt_drop_write_file(filp);
2184 	return err;
2185 }
2186 
2187 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2188 {
2189 	struct inode *inode = file_inode(filp);
2190 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2191 	__u32 sync;
2192 	int ret;
2193 
2194 	if (!capable(CAP_SYS_ADMIN))
2195 		return -EPERM;
2196 
2197 	if (get_user(sync, (__u32 __user *)arg))
2198 		return -EFAULT;
2199 
2200 	if (f2fs_readonly(sbi->sb))
2201 		return -EROFS;
2202 
2203 	ret = mnt_want_write_file(filp);
2204 	if (ret)
2205 		return ret;
2206 
2207 	if (!sync) {
2208 		if (!mutex_trylock(&sbi->gc_mutex)) {
2209 			ret = -EBUSY;
2210 			goto out;
2211 		}
2212 	} else {
2213 		mutex_lock(&sbi->gc_mutex);
2214 	}
2215 
2216 	ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2217 out:
2218 	mnt_drop_write_file(filp);
2219 	return ret;
2220 }
2221 
2222 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2223 {
2224 	struct inode *inode = file_inode(filp);
2225 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2226 	struct f2fs_gc_range range;
2227 	u64 end;
2228 	int ret;
2229 
2230 	if (!capable(CAP_SYS_ADMIN))
2231 		return -EPERM;
2232 
2233 	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2234 							sizeof(range)))
2235 		return -EFAULT;
2236 
2237 	if (f2fs_readonly(sbi->sb))
2238 		return -EROFS;
2239 
2240 	end = range.start + range.len;
2241 	if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
2242 		return -EINVAL;
2243 	}
2244 
2245 	ret = mnt_want_write_file(filp);
2246 	if (ret)
2247 		return ret;
2248 
2249 do_more:
2250 	if (!range.sync) {
2251 		if (!mutex_trylock(&sbi->gc_mutex)) {
2252 			ret = -EBUSY;
2253 			goto out;
2254 		}
2255 	} else {
2256 		mutex_lock(&sbi->gc_mutex);
2257 	}
2258 
2259 	ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2260 	range.start += BLKS_PER_SEC(sbi);
2261 	if (range.start <= end)
2262 		goto do_more;
2263 out:
2264 	mnt_drop_write_file(filp);
2265 	return ret;
2266 }
2267 
2268 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2269 {
2270 	struct inode *inode = file_inode(filp);
2271 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2272 	int ret;
2273 
2274 	if (!capable(CAP_SYS_ADMIN))
2275 		return -EPERM;
2276 
2277 	if (f2fs_readonly(sbi->sb))
2278 		return -EROFS;
2279 
2280 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2281 		f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2282 		return -EINVAL;
2283 	}
2284 
2285 	ret = mnt_want_write_file(filp);
2286 	if (ret)
2287 		return ret;
2288 
2289 	ret = f2fs_sync_fs(sbi->sb, 1);
2290 
2291 	mnt_drop_write_file(filp);
2292 	return ret;
2293 }
2294 
2295 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2296 					struct file *filp,
2297 					struct f2fs_defragment *range)
2298 {
2299 	struct inode *inode = file_inode(filp);
2300 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
2301 					.m_seg_type = NO_CHECK_TYPE ,
2302 					.m_may_create = false };
2303 	struct extent_info ei = {0, 0, 0};
2304 	pgoff_t pg_start, pg_end, next_pgofs;
2305 	unsigned int blk_per_seg = sbi->blocks_per_seg;
2306 	unsigned int total = 0, sec_num;
2307 	block_t blk_end = 0;
2308 	bool fragmented = false;
2309 	int err;
2310 
2311 	/* if in-place-update policy is enabled, don't waste time here */
2312 	if (f2fs_should_update_inplace(inode, NULL))
2313 		return -EINVAL;
2314 
2315 	pg_start = range->start >> PAGE_SHIFT;
2316 	pg_end = (range->start + range->len) >> PAGE_SHIFT;
2317 
2318 	f2fs_balance_fs(sbi, true);
2319 
2320 	inode_lock(inode);
2321 
2322 	/* writeback all dirty pages in the range */
2323 	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2324 						range->start + range->len - 1);
2325 	if (err)
2326 		goto out;
2327 
2328 	/*
2329 	 * lookup mapping info in extent cache, skip defragmenting if physical
2330 	 * block addresses are continuous.
2331 	 */
2332 	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2333 		if (ei.fofs + ei.len >= pg_end)
2334 			goto out;
2335 	}
2336 
2337 	map.m_lblk = pg_start;
2338 	map.m_next_pgofs = &next_pgofs;
2339 
2340 	/*
2341 	 * lookup mapping info in dnode page cache, skip defragmenting if all
2342 	 * physical block addresses are continuous even if there are hole(s)
2343 	 * in logical blocks.
2344 	 */
2345 	while (map.m_lblk < pg_end) {
2346 		map.m_len = pg_end - map.m_lblk;
2347 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2348 		if (err)
2349 			goto out;
2350 
2351 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2352 			map.m_lblk = next_pgofs;
2353 			continue;
2354 		}
2355 
2356 		if (blk_end && blk_end != map.m_pblk)
2357 			fragmented = true;
2358 
2359 		/* record total count of block that we're going to move */
2360 		total += map.m_len;
2361 
2362 		blk_end = map.m_pblk + map.m_len;
2363 
2364 		map.m_lblk += map.m_len;
2365 	}
2366 
2367 	if (!fragmented)
2368 		goto out;
2369 
2370 	sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2371 
2372 	/*
2373 	 * make sure there are enough free section for LFS allocation, this can
2374 	 * avoid defragment running in SSR mode when free section are allocated
2375 	 * intensively
2376 	 */
2377 	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2378 		err = -EAGAIN;
2379 		goto out;
2380 	}
2381 
2382 	map.m_lblk = pg_start;
2383 	map.m_len = pg_end - pg_start;
2384 	total = 0;
2385 
2386 	while (map.m_lblk < pg_end) {
2387 		pgoff_t idx;
2388 		int cnt = 0;
2389 
2390 do_map:
2391 		map.m_len = pg_end - map.m_lblk;
2392 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2393 		if (err)
2394 			goto clear_out;
2395 
2396 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2397 			map.m_lblk = next_pgofs;
2398 			continue;
2399 		}
2400 
2401 		set_inode_flag(inode, FI_DO_DEFRAG);
2402 
2403 		idx = map.m_lblk;
2404 		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2405 			struct page *page;
2406 
2407 			page = f2fs_get_lock_data_page(inode, idx, true);
2408 			if (IS_ERR(page)) {
2409 				err = PTR_ERR(page);
2410 				goto clear_out;
2411 			}
2412 
2413 			set_page_dirty(page);
2414 			f2fs_put_page(page, 1);
2415 
2416 			idx++;
2417 			cnt++;
2418 			total++;
2419 		}
2420 
2421 		map.m_lblk = idx;
2422 
2423 		if (idx < pg_end && cnt < blk_per_seg)
2424 			goto do_map;
2425 
2426 		clear_inode_flag(inode, FI_DO_DEFRAG);
2427 
2428 		err = filemap_fdatawrite(inode->i_mapping);
2429 		if (err)
2430 			goto out;
2431 	}
2432 clear_out:
2433 	clear_inode_flag(inode, FI_DO_DEFRAG);
2434 out:
2435 	inode_unlock(inode);
2436 	if (!err)
2437 		range->len = (u64)total << PAGE_SHIFT;
2438 	return err;
2439 }
2440 
2441 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2442 {
2443 	struct inode *inode = file_inode(filp);
2444 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2445 	struct f2fs_defragment range;
2446 	int err;
2447 
2448 	if (!capable(CAP_SYS_ADMIN))
2449 		return -EPERM;
2450 
2451 	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2452 		return -EINVAL;
2453 
2454 	if (f2fs_readonly(sbi->sb))
2455 		return -EROFS;
2456 
2457 	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2458 							sizeof(range)))
2459 		return -EFAULT;
2460 
2461 	/* verify alignment of offset & size */
2462 	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2463 		return -EINVAL;
2464 
2465 	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2466 					sbi->max_file_blocks))
2467 		return -EINVAL;
2468 
2469 	err = mnt_want_write_file(filp);
2470 	if (err)
2471 		return err;
2472 
2473 	err = f2fs_defragment_range(sbi, filp, &range);
2474 	mnt_drop_write_file(filp);
2475 
2476 	f2fs_update_time(sbi, REQ_TIME);
2477 	if (err < 0)
2478 		return err;
2479 
2480 	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2481 							sizeof(range)))
2482 		return -EFAULT;
2483 
2484 	return 0;
2485 }
2486 
2487 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2488 			struct file *file_out, loff_t pos_out, size_t len)
2489 {
2490 	struct inode *src = file_inode(file_in);
2491 	struct inode *dst = file_inode(file_out);
2492 	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2493 	size_t olen = len, dst_max_i_size = 0;
2494 	size_t dst_osize;
2495 	int ret;
2496 
2497 	if (file_in->f_path.mnt != file_out->f_path.mnt ||
2498 				src->i_sb != dst->i_sb)
2499 		return -EXDEV;
2500 
2501 	if (unlikely(f2fs_readonly(src->i_sb)))
2502 		return -EROFS;
2503 
2504 	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2505 		return -EINVAL;
2506 
2507 	if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2508 		return -EOPNOTSUPP;
2509 
2510 	if (src == dst) {
2511 		if (pos_in == pos_out)
2512 			return 0;
2513 		if (pos_out > pos_in && pos_out < pos_in + len)
2514 			return -EINVAL;
2515 	}
2516 
2517 	inode_lock(src);
2518 	if (src != dst) {
2519 		ret = -EBUSY;
2520 		if (!inode_trylock(dst))
2521 			goto out;
2522 	}
2523 
2524 	ret = -EINVAL;
2525 	if (pos_in + len > src->i_size || pos_in + len < pos_in)
2526 		goto out_unlock;
2527 	if (len == 0)
2528 		olen = len = src->i_size - pos_in;
2529 	if (pos_in + len == src->i_size)
2530 		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2531 	if (len == 0) {
2532 		ret = 0;
2533 		goto out_unlock;
2534 	}
2535 
2536 	dst_osize = dst->i_size;
2537 	if (pos_out + olen > dst->i_size)
2538 		dst_max_i_size = pos_out + olen;
2539 
2540 	/* verify the end result is block aligned */
2541 	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2542 			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2543 			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2544 		goto out_unlock;
2545 
2546 	ret = f2fs_convert_inline_inode(src);
2547 	if (ret)
2548 		goto out_unlock;
2549 
2550 	ret = f2fs_convert_inline_inode(dst);
2551 	if (ret)
2552 		goto out_unlock;
2553 
2554 	/* write out all dirty pages from offset */
2555 	ret = filemap_write_and_wait_range(src->i_mapping,
2556 					pos_in, pos_in + len);
2557 	if (ret)
2558 		goto out_unlock;
2559 
2560 	ret = filemap_write_and_wait_range(dst->i_mapping,
2561 					pos_out, pos_out + len);
2562 	if (ret)
2563 		goto out_unlock;
2564 
2565 	f2fs_balance_fs(sbi, true);
2566 
2567 	down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2568 	if (src != dst) {
2569 		ret = -EBUSY;
2570 		if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2571 			goto out_src;
2572 	}
2573 
2574 	f2fs_lock_op(sbi);
2575 	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2576 				pos_out >> F2FS_BLKSIZE_BITS,
2577 				len >> F2FS_BLKSIZE_BITS, false);
2578 
2579 	if (!ret) {
2580 		if (dst_max_i_size)
2581 			f2fs_i_size_write(dst, dst_max_i_size);
2582 		else if (dst_osize != dst->i_size)
2583 			f2fs_i_size_write(dst, dst_osize);
2584 	}
2585 	f2fs_unlock_op(sbi);
2586 
2587 	if (src != dst)
2588 		up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2589 out_src:
2590 	up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2591 out_unlock:
2592 	if (src != dst)
2593 		inode_unlock(dst);
2594 out:
2595 	inode_unlock(src);
2596 	return ret;
2597 }
2598 
2599 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2600 {
2601 	struct f2fs_move_range range;
2602 	struct fd dst;
2603 	int err;
2604 
2605 	if (!(filp->f_mode & FMODE_READ) ||
2606 			!(filp->f_mode & FMODE_WRITE))
2607 		return -EBADF;
2608 
2609 	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2610 							sizeof(range)))
2611 		return -EFAULT;
2612 
2613 	dst = fdget(range.dst_fd);
2614 	if (!dst.file)
2615 		return -EBADF;
2616 
2617 	if (!(dst.file->f_mode & FMODE_WRITE)) {
2618 		err = -EBADF;
2619 		goto err_out;
2620 	}
2621 
2622 	err = mnt_want_write_file(filp);
2623 	if (err)
2624 		goto err_out;
2625 
2626 	err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2627 					range.pos_out, range.len);
2628 
2629 	mnt_drop_write_file(filp);
2630 	if (err)
2631 		goto err_out;
2632 
2633 	if (copy_to_user((struct f2fs_move_range __user *)arg,
2634 						&range, sizeof(range)))
2635 		err = -EFAULT;
2636 err_out:
2637 	fdput(dst);
2638 	return err;
2639 }
2640 
2641 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2642 {
2643 	struct inode *inode = file_inode(filp);
2644 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2645 	struct sit_info *sm = SIT_I(sbi);
2646 	unsigned int start_segno = 0, end_segno = 0;
2647 	unsigned int dev_start_segno = 0, dev_end_segno = 0;
2648 	struct f2fs_flush_device range;
2649 	int ret;
2650 
2651 	if (!capable(CAP_SYS_ADMIN))
2652 		return -EPERM;
2653 
2654 	if (f2fs_readonly(sbi->sb))
2655 		return -EROFS;
2656 
2657 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2658 		return -EINVAL;
2659 
2660 	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2661 							sizeof(range)))
2662 		return -EFAULT;
2663 
2664 	if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2665 			__is_large_section(sbi)) {
2666 		f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2667 			  range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2668 		return -EINVAL;
2669 	}
2670 
2671 	ret = mnt_want_write_file(filp);
2672 	if (ret)
2673 		return ret;
2674 
2675 	if (range.dev_num != 0)
2676 		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2677 	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2678 
2679 	start_segno = sm->last_victim[FLUSH_DEVICE];
2680 	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2681 		start_segno = dev_start_segno;
2682 	end_segno = min(start_segno + range.segments, dev_end_segno);
2683 
2684 	while (start_segno < end_segno) {
2685 		if (!mutex_trylock(&sbi->gc_mutex)) {
2686 			ret = -EBUSY;
2687 			goto out;
2688 		}
2689 		sm->last_victim[GC_CB] = end_segno + 1;
2690 		sm->last_victim[GC_GREEDY] = end_segno + 1;
2691 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2692 		ret = f2fs_gc(sbi, true, true, start_segno);
2693 		if (ret == -EAGAIN)
2694 			ret = 0;
2695 		else if (ret < 0)
2696 			break;
2697 		start_segno++;
2698 	}
2699 out:
2700 	mnt_drop_write_file(filp);
2701 	return ret;
2702 }
2703 
2704 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2705 {
2706 	struct inode *inode = file_inode(filp);
2707 	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2708 
2709 	/* Must validate to set it with SQLite behavior in Android. */
2710 	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2711 
2712 	return put_user(sb_feature, (u32 __user *)arg);
2713 }
2714 
2715 #ifdef CONFIG_QUOTA
2716 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2717 {
2718 	struct dquot *transfer_to[MAXQUOTAS] = {};
2719 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2720 	struct super_block *sb = sbi->sb;
2721 	int err = 0;
2722 
2723 	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2724 	if (!IS_ERR(transfer_to[PRJQUOTA])) {
2725 		err = __dquot_transfer(inode, transfer_to);
2726 		if (err)
2727 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2728 		dqput(transfer_to[PRJQUOTA]);
2729 	}
2730 	return err;
2731 }
2732 
2733 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2734 {
2735 	struct inode *inode = file_inode(filp);
2736 	struct f2fs_inode_info *fi = F2FS_I(inode);
2737 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2738 	struct page *ipage;
2739 	kprojid_t kprojid;
2740 	int err;
2741 
2742 	if (!f2fs_sb_has_project_quota(sbi)) {
2743 		if (projid != F2FS_DEF_PROJID)
2744 			return -EOPNOTSUPP;
2745 		else
2746 			return 0;
2747 	}
2748 
2749 	if (!f2fs_has_extra_attr(inode))
2750 		return -EOPNOTSUPP;
2751 
2752 	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2753 
2754 	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2755 		return 0;
2756 
2757 	err = -EPERM;
2758 	/* Is it quota file? Do not allow user to mess with it */
2759 	if (IS_NOQUOTA(inode))
2760 		return err;
2761 
2762 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
2763 	if (IS_ERR(ipage))
2764 		return PTR_ERR(ipage);
2765 
2766 	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
2767 								i_projid)) {
2768 		err = -EOVERFLOW;
2769 		f2fs_put_page(ipage, 1);
2770 		return err;
2771 	}
2772 	f2fs_put_page(ipage, 1);
2773 
2774 	err = dquot_initialize(inode);
2775 	if (err)
2776 		return err;
2777 
2778 	f2fs_lock_op(sbi);
2779 	err = f2fs_transfer_project_quota(inode, kprojid);
2780 	if (err)
2781 		goto out_unlock;
2782 
2783 	F2FS_I(inode)->i_projid = kprojid;
2784 	inode->i_ctime = current_time(inode);
2785 	f2fs_mark_inode_dirty_sync(inode, true);
2786 out_unlock:
2787 	f2fs_unlock_op(sbi);
2788 	return err;
2789 }
2790 #else
2791 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2792 {
2793 	return 0;
2794 }
2795 
2796 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2797 {
2798 	if (projid != F2FS_DEF_PROJID)
2799 		return -EOPNOTSUPP;
2800 	return 0;
2801 }
2802 #endif
2803 
2804 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
2805 
2806 /*
2807  * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
2808  * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
2809  * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
2810  */
2811 
2812 static const struct {
2813 	u32 iflag;
2814 	u32 xflag;
2815 } f2fs_xflags_map[] = {
2816 	{ F2FS_SYNC_FL,		FS_XFLAG_SYNC },
2817 	{ F2FS_IMMUTABLE_FL,	FS_XFLAG_IMMUTABLE },
2818 	{ F2FS_APPEND_FL,	FS_XFLAG_APPEND },
2819 	{ F2FS_NODUMP_FL,	FS_XFLAG_NODUMP },
2820 	{ F2FS_NOATIME_FL,	FS_XFLAG_NOATIME },
2821 	{ F2FS_PROJINHERIT_FL,	FS_XFLAG_PROJINHERIT },
2822 };
2823 
2824 #define F2FS_SUPPORTED_XFLAGS (		\
2825 		FS_XFLAG_SYNC |		\
2826 		FS_XFLAG_IMMUTABLE |	\
2827 		FS_XFLAG_APPEND |	\
2828 		FS_XFLAG_NODUMP |	\
2829 		FS_XFLAG_NOATIME |	\
2830 		FS_XFLAG_PROJINHERIT)
2831 
2832 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
2833 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
2834 {
2835 	u32 xflags = 0;
2836 	int i;
2837 
2838 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
2839 		if (iflags & f2fs_xflags_map[i].iflag)
2840 			xflags |= f2fs_xflags_map[i].xflag;
2841 
2842 	return xflags;
2843 }
2844 
2845 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
2846 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
2847 {
2848 	u32 iflags = 0;
2849 	int i;
2850 
2851 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
2852 		if (xflags & f2fs_xflags_map[i].xflag)
2853 			iflags |= f2fs_xflags_map[i].iflag;
2854 
2855 	return iflags;
2856 }
2857 
2858 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
2859 {
2860 	struct inode *inode = file_inode(filp);
2861 	struct f2fs_inode_info *fi = F2FS_I(inode);
2862 	struct fsxattr fa;
2863 
2864 	memset(&fa, 0, sizeof(struct fsxattr));
2865 	fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags);
2866 
2867 	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
2868 		fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
2869 							fi->i_projid);
2870 
2871 	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
2872 		return -EFAULT;
2873 	return 0;
2874 }
2875 
2876 static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
2877 {
2878 	/*
2879 	 * Project Quota ID state is only allowed to change from within the init
2880 	 * namespace. Enforce that restriction only if we are trying to change
2881 	 * the quota ID state. Everything else is allowed in user namespaces.
2882 	 */
2883 	if (current_user_ns() == &init_user_ns)
2884 		return 0;
2885 
2886 	if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
2887 		return -EINVAL;
2888 
2889 	if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
2890 		if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
2891 			return -EINVAL;
2892 	} else {
2893 		if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
2894 			return -EINVAL;
2895 	}
2896 
2897 	return 0;
2898 }
2899 
2900 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2901 {
2902 	struct inode *inode = file_inode(filp);
2903 	struct fsxattr fa;
2904 	u32 iflags;
2905 	int err;
2906 
2907 	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
2908 		return -EFAULT;
2909 
2910 	/* Make sure caller has proper permission */
2911 	if (!inode_owner_or_capable(inode))
2912 		return -EACCES;
2913 
2914 	if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
2915 		return -EOPNOTSUPP;
2916 
2917 	iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
2918 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2919 		return -EOPNOTSUPP;
2920 
2921 	err = mnt_want_write_file(filp);
2922 	if (err)
2923 		return err;
2924 
2925 	inode_lock(inode);
2926 	err = f2fs_ioctl_check_project(inode, &fa);
2927 	if (err)
2928 		goto out;
2929 	err = f2fs_setflags_common(inode, iflags,
2930 			f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
2931 	if (err)
2932 		goto out;
2933 
2934 	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
2935 out:
2936 	inode_unlock(inode);
2937 	mnt_drop_write_file(filp);
2938 	return err;
2939 }
2940 
2941 int f2fs_pin_file_control(struct inode *inode, bool inc)
2942 {
2943 	struct f2fs_inode_info *fi = F2FS_I(inode);
2944 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2945 
2946 	/* Use i_gc_failures for normal file as a risk signal. */
2947 	if (inc)
2948 		f2fs_i_gc_failures_write(inode,
2949 				fi->i_gc_failures[GC_FAILURE_PIN] + 1);
2950 
2951 	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
2952 		f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
2953 			  __func__, inode->i_ino,
2954 			  fi->i_gc_failures[GC_FAILURE_PIN]);
2955 		clear_inode_flag(inode, FI_PIN_FILE);
2956 		return -EAGAIN;
2957 	}
2958 	return 0;
2959 }
2960 
2961 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
2962 {
2963 	struct inode *inode = file_inode(filp);
2964 	__u32 pin;
2965 	int ret = 0;
2966 
2967 	if (get_user(pin, (__u32 __user *)arg))
2968 		return -EFAULT;
2969 
2970 	if (!S_ISREG(inode->i_mode))
2971 		return -EINVAL;
2972 
2973 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
2974 		return -EROFS;
2975 
2976 	ret = mnt_want_write_file(filp);
2977 	if (ret)
2978 		return ret;
2979 
2980 	inode_lock(inode);
2981 
2982 	if (f2fs_should_update_outplace(inode, NULL)) {
2983 		ret = -EINVAL;
2984 		goto out;
2985 	}
2986 
2987 	if (!pin) {
2988 		clear_inode_flag(inode, FI_PIN_FILE);
2989 		f2fs_i_gc_failures_write(inode, 0);
2990 		goto done;
2991 	}
2992 
2993 	if (f2fs_pin_file_control(inode, false)) {
2994 		ret = -EAGAIN;
2995 		goto out;
2996 	}
2997 	ret = f2fs_convert_inline_inode(inode);
2998 	if (ret)
2999 		goto out;
3000 
3001 	set_inode_flag(inode, FI_PIN_FILE);
3002 	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3003 done:
3004 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3005 out:
3006 	inode_unlock(inode);
3007 	mnt_drop_write_file(filp);
3008 	return ret;
3009 }
3010 
3011 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3012 {
3013 	struct inode *inode = file_inode(filp);
3014 	__u32 pin = 0;
3015 
3016 	if (is_inode_flag_set(inode, FI_PIN_FILE))
3017 		pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3018 	return put_user(pin, (u32 __user *)arg);
3019 }
3020 
3021 int f2fs_precache_extents(struct inode *inode)
3022 {
3023 	struct f2fs_inode_info *fi = F2FS_I(inode);
3024 	struct f2fs_map_blocks map;
3025 	pgoff_t m_next_extent;
3026 	loff_t end;
3027 	int err;
3028 
3029 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
3030 		return -EOPNOTSUPP;
3031 
3032 	map.m_lblk = 0;
3033 	map.m_next_pgofs = NULL;
3034 	map.m_next_extent = &m_next_extent;
3035 	map.m_seg_type = NO_CHECK_TYPE;
3036 	map.m_may_create = false;
3037 	end = F2FS_I_SB(inode)->max_file_blocks;
3038 
3039 	while (map.m_lblk < end) {
3040 		map.m_len = end - map.m_lblk;
3041 
3042 		down_write(&fi->i_gc_rwsem[WRITE]);
3043 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3044 		up_write(&fi->i_gc_rwsem[WRITE]);
3045 		if (err)
3046 			return err;
3047 
3048 		map.m_lblk = m_next_extent;
3049 	}
3050 
3051 	return err;
3052 }
3053 
3054 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3055 {
3056 	return f2fs_precache_extents(file_inode(filp));
3057 }
3058 
3059 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3060 {
3061 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3062 	__u64 block_count;
3063 	int ret;
3064 
3065 	if (!capable(CAP_SYS_ADMIN))
3066 		return -EPERM;
3067 
3068 	if (f2fs_readonly(sbi->sb))
3069 		return -EROFS;
3070 
3071 	if (copy_from_user(&block_count, (void __user *)arg,
3072 			   sizeof(block_count)))
3073 		return -EFAULT;
3074 
3075 	ret = f2fs_resize_fs(sbi, block_count);
3076 
3077 	return ret;
3078 }
3079 
3080 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3081 {
3082 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
3083 		return -EIO;
3084 
3085 	switch (cmd) {
3086 	case F2FS_IOC_GETFLAGS:
3087 		return f2fs_ioc_getflags(filp, arg);
3088 	case F2FS_IOC_SETFLAGS:
3089 		return f2fs_ioc_setflags(filp, arg);
3090 	case F2FS_IOC_GETVERSION:
3091 		return f2fs_ioc_getversion(filp, arg);
3092 	case F2FS_IOC_START_ATOMIC_WRITE:
3093 		return f2fs_ioc_start_atomic_write(filp);
3094 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3095 		return f2fs_ioc_commit_atomic_write(filp);
3096 	case F2FS_IOC_START_VOLATILE_WRITE:
3097 		return f2fs_ioc_start_volatile_write(filp);
3098 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3099 		return f2fs_ioc_release_volatile_write(filp);
3100 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
3101 		return f2fs_ioc_abort_volatile_write(filp);
3102 	case F2FS_IOC_SHUTDOWN:
3103 		return f2fs_ioc_shutdown(filp, arg);
3104 	case FITRIM:
3105 		return f2fs_ioc_fitrim(filp, arg);
3106 	case F2FS_IOC_SET_ENCRYPTION_POLICY:
3107 		return f2fs_ioc_set_encryption_policy(filp, arg);
3108 	case F2FS_IOC_GET_ENCRYPTION_POLICY:
3109 		return f2fs_ioc_get_encryption_policy(filp, arg);
3110 	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3111 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
3112 	case F2FS_IOC_GARBAGE_COLLECT:
3113 		return f2fs_ioc_gc(filp, arg);
3114 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3115 		return f2fs_ioc_gc_range(filp, arg);
3116 	case F2FS_IOC_WRITE_CHECKPOINT:
3117 		return f2fs_ioc_write_checkpoint(filp, arg);
3118 	case F2FS_IOC_DEFRAGMENT:
3119 		return f2fs_ioc_defragment(filp, arg);
3120 	case F2FS_IOC_MOVE_RANGE:
3121 		return f2fs_ioc_move_range(filp, arg);
3122 	case F2FS_IOC_FLUSH_DEVICE:
3123 		return f2fs_ioc_flush_device(filp, arg);
3124 	case F2FS_IOC_GET_FEATURES:
3125 		return f2fs_ioc_get_features(filp, arg);
3126 	case F2FS_IOC_FSGETXATTR:
3127 		return f2fs_ioc_fsgetxattr(filp, arg);
3128 	case F2FS_IOC_FSSETXATTR:
3129 		return f2fs_ioc_fssetxattr(filp, arg);
3130 	case F2FS_IOC_GET_PIN_FILE:
3131 		return f2fs_ioc_get_pin_file(filp, arg);
3132 	case F2FS_IOC_SET_PIN_FILE:
3133 		return f2fs_ioc_set_pin_file(filp, arg);
3134 	case F2FS_IOC_PRECACHE_EXTENTS:
3135 		return f2fs_ioc_precache_extents(filp, arg);
3136 	case F2FS_IOC_RESIZE_FS:
3137 		return f2fs_ioc_resize_fs(filp, arg);
3138 	default:
3139 		return -ENOTTY;
3140 	}
3141 }
3142 
3143 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3144 {
3145 	struct file *file = iocb->ki_filp;
3146 	struct inode *inode = file_inode(file);
3147 	ssize_t ret;
3148 
3149 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
3150 		ret = -EIO;
3151 		goto out;
3152 	}
3153 
3154 	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) {
3155 		ret = -EINVAL;
3156 		goto out;
3157 	}
3158 
3159 	if (!inode_trylock(inode)) {
3160 		if (iocb->ki_flags & IOCB_NOWAIT) {
3161 			ret = -EAGAIN;
3162 			goto out;
3163 		}
3164 		inode_lock(inode);
3165 	}
3166 
3167 	ret = generic_write_checks(iocb, from);
3168 	if (ret > 0) {
3169 		bool preallocated = false;
3170 		size_t target_size = 0;
3171 		int err;
3172 
3173 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
3174 			set_inode_flag(inode, FI_NO_PREALLOC);
3175 
3176 		if ((iocb->ki_flags & IOCB_NOWAIT)) {
3177 			if (!f2fs_overwrite_io(inode, iocb->ki_pos,
3178 						iov_iter_count(from)) ||
3179 				f2fs_has_inline_data(inode) ||
3180 				f2fs_force_buffered_io(inode, iocb, from)) {
3181 				clear_inode_flag(inode, FI_NO_PREALLOC);
3182 				inode_unlock(inode);
3183 				ret = -EAGAIN;
3184 				goto out;
3185 			}
3186 		} else {
3187 			preallocated = true;
3188 			target_size = iocb->ki_pos + iov_iter_count(from);
3189 
3190 			err = f2fs_preallocate_blocks(iocb, from);
3191 			if (err) {
3192 				clear_inode_flag(inode, FI_NO_PREALLOC);
3193 				inode_unlock(inode);
3194 				ret = err;
3195 				goto out;
3196 			}
3197 		}
3198 		ret = __generic_file_write_iter(iocb, from);
3199 		clear_inode_flag(inode, FI_NO_PREALLOC);
3200 
3201 		/* if we couldn't write data, we should deallocate blocks. */
3202 		if (preallocated && i_size_read(inode) < target_size)
3203 			f2fs_truncate(inode);
3204 
3205 		if (ret > 0)
3206 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
3207 	}
3208 	inode_unlock(inode);
3209 out:
3210 	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
3211 					iov_iter_count(from), ret);
3212 	if (ret > 0)
3213 		ret = generic_write_sync(iocb, ret);
3214 	return ret;
3215 }
3216 
3217 #ifdef CONFIG_COMPAT
3218 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3219 {
3220 	switch (cmd) {
3221 	case F2FS_IOC32_GETFLAGS:
3222 		cmd = F2FS_IOC_GETFLAGS;
3223 		break;
3224 	case F2FS_IOC32_SETFLAGS:
3225 		cmd = F2FS_IOC_SETFLAGS;
3226 		break;
3227 	case F2FS_IOC32_GETVERSION:
3228 		cmd = F2FS_IOC_GETVERSION;
3229 		break;
3230 	case F2FS_IOC_START_ATOMIC_WRITE:
3231 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3232 	case F2FS_IOC_START_VOLATILE_WRITE:
3233 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3234 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
3235 	case F2FS_IOC_SHUTDOWN:
3236 	case F2FS_IOC_SET_ENCRYPTION_POLICY:
3237 	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3238 	case F2FS_IOC_GET_ENCRYPTION_POLICY:
3239 	case F2FS_IOC_GARBAGE_COLLECT:
3240 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3241 	case F2FS_IOC_WRITE_CHECKPOINT:
3242 	case F2FS_IOC_DEFRAGMENT:
3243 	case F2FS_IOC_MOVE_RANGE:
3244 	case F2FS_IOC_FLUSH_DEVICE:
3245 	case F2FS_IOC_GET_FEATURES:
3246 	case F2FS_IOC_FSGETXATTR:
3247 	case F2FS_IOC_FSSETXATTR:
3248 	case F2FS_IOC_GET_PIN_FILE:
3249 	case F2FS_IOC_SET_PIN_FILE:
3250 	case F2FS_IOC_PRECACHE_EXTENTS:
3251 	case F2FS_IOC_RESIZE_FS:
3252 		break;
3253 	default:
3254 		return -ENOIOCTLCMD;
3255 	}
3256 	return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3257 }
3258 #endif
3259 
3260 const struct file_operations f2fs_file_operations = {
3261 	.llseek		= f2fs_llseek,
3262 	.read_iter	= generic_file_read_iter,
3263 	.write_iter	= f2fs_file_write_iter,
3264 	.open		= f2fs_file_open,
3265 	.release	= f2fs_release_file,
3266 	.mmap		= f2fs_file_mmap,
3267 	.flush		= f2fs_file_flush,
3268 	.fsync		= f2fs_sync_file,
3269 	.fallocate	= f2fs_fallocate,
3270 	.unlocked_ioctl	= f2fs_ioctl,
3271 #ifdef CONFIG_COMPAT
3272 	.compat_ioctl	= f2fs_compat_ioctl,
3273 #endif
3274 	.splice_read	= generic_file_splice_read,
3275 	.splice_write	= iter_file_splice_write,
3276 };
3277