1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode_item.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_util.h"
18 #include "xfs_dir2.h"
19 #include "xfs_dir2_priv.h"
20 #include "xfs_ioctl.h"
21 #include "xfs_trace.h"
22 #include "xfs_log.h"
23 #include "xfs_icache.h"
24 #include "xfs_pnfs.h"
25 #include "xfs_iomap.h"
26 #include "xfs_reflink.h"
27 #include "xfs_file.h"
28
29 #include <linux/dax.h>
30 #include <linux/falloc.h>
31 #include <linux/backing-dev.h>
32 #include <linux/mman.h>
33 #include <linux/fadvise.h>
34 #include <linux/mount.h>
35
36 static const struct vm_operations_struct xfs_file_vm_ops;
37
38 /*
39 * Decide if the given file range is aligned to the size of the fundamental
40 * allocation unit for the file.
41 */
42 bool
xfs_is_falloc_aligned(struct xfs_inode * ip,loff_t pos,long long int len)43 xfs_is_falloc_aligned(
44 struct xfs_inode *ip,
45 loff_t pos,
46 long long int len)
47 {
48 unsigned int alloc_unit = xfs_inode_alloc_unitsize(ip);
49
50 if (!is_power_of_2(alloc_unit))
51 return isaligned_64(pos, alloc_unit) &&
52 isaligned_64(len, alloc_unit);
53
54 return !((pos | len) & (alloc_unit - 1));
55 }
56
57 /*
58 * Fsync operations on directories are much simpler than on regular files,
59 * as there is no file data to flush, and thus also no need for explicit
60 * cache flush operations, and there are no non-transaction metadata updates
61 * on directories either.
62 */
63 STATIC int
xfs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)64 xfs_dir_fsync(
65 struct file *file,
66 loff_t start,
67 loff_t end,
68 int datasync)
69 {
70 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
71
72 trace_xfs_dir_fsync(ip);
73 return xfs_log_force_inode(ip);
74 }
75
76 static xfs_csn_t
xfs_fsync_seq(struct xfs_inode * ip,bool datasync)77 xfs_fsync_seq(
78 struct xfs_inode *ip,
79 bool datasync)
80 {
81 if (!xfs_ipincount(ip))
82 return 0;
83 if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
84 return 0;
85 return ip->i_itemp->ili_commit_seq;
86 }
87
88 /*
89 * All metadata updates are logged, which means that we just have to flush the
90 * log up to the latest LSN that touched the inode.
91 *
92 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
93 * the log force before we clear the ili_fsync_fields field. This ensures that
94 * we don't get a racing sync operation that does not wait for the metadata to
95 * hit the journal before returning. If we race with clearing ili_fsync_fields,
96 * then all that will happen is the log force will do nothing as the lsn will
97 * already be on disk. We can't race with setting ili_fsync_fields because that
98 * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
99 * shared until after the ili_fsync_fields is cleared.
100 */
101 static int
xfs_fsync_flush_log(struct xfs_inode * ip,bool datasync,int * log_flushed)102 xfs_fsync_flush_log(
103 struct xfs_inode *ip,
104 bool datasync,
105 int *log_flushed)
106 {
107 int error = 0;
108 xfs_csn_t seq;
109
110 xfs_ilock(ip, XFS_ILOCK_SHARED);
111 seq = xfs_fsync_seq(ip, datasync);
112 if (seq) {
113 error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
114 log_flushed);
115
116 spin_lock(&ip->i_itemp->ili_lock);
117 ip->i_itemp->ili_fsync_fields = 0;
118 spin_unlock(&ip->i_itemp->ili_lock);
119 }
120 xfs_iunlock(ip, XFS_ILOCK_SHARED);
121 return error;
122 }
123
124 STATIC int
xfs_file_fsync(struct file * file,loff_t start,loff_t end,int datasync)125 xfs_file_fsync(
126 struct file *file,
127 loff_t start,
128 loff_t end,
129 int datasync)
130 {
131 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
132 struct xfs_mount *mp = ip->i_mount;
133 int error, err2;
134 int log_flushed = 0;
135
136 trace_xfs_file_fsync(ip);
137
138 error = file_write_and_wait_range(file, start, end);
139 if (error)
140 return error;
141
142 if (xfs_is_shutdown(mp))
143 return -EIO;
144
145 xfs_iflags_clear(ip, XFS_ITRUNCATED);
146
147 /*
148 * If we have an RT and/or log subvolume we need to make sure to flush
149 * the write cache the device used for file data first. This is to
150 * ensure newly written file data make it to disk before logging the new
151 * inode size in case of an extending write.
152 */
153 if (XFS_IS_REALTIME_INODE(ip))
154 error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
155 else if (mp->m_logdev_targp != mp->m_ddev_targp)
156 error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
157
158 /*
159 * Any inode that has dirty modifications in the log is pinned. The
160 * racy check here for a pinned inode will not catch modifications
161 * that happen concurrently to the fsync call, but fsync semantics
162 * only require to sync previously completed I/O.
163 */
164 if (xfs_ipincount(ip)) {
165 err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed);
166 if (err2 && !error)
167 error = err2;
168 }
169
170 /*
171 * If we only have a single device, and the log force about was
172 * a no-op we might have to flush the data device cache here.
173 * This can only happen for fdatasync/O_DSYNC if we were overwriting
174 * an already allocated file and thus do not have any metadata to
175 * commit.
176 */
177 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
178 mp->m_logdev_targp == mp->m_ddev_targp) {
179 err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
180 if (err2 && !error)
181 error = err2;
182 }
183
184 return error;
185 }
186
187 static int
xfs_ilock_iocb(struct kiocb * iocb,unsigned int lock_mode)188 xfs_ilock_iocb(
189 struct kiocb *iocb,
190 unsigned int lock_mode)
191 {
192 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
193
194 if (iocb->ki_flags & IOCB_NOWAIT) {
195 if (!xfs_ilock_nowait(ip, lock_mode))
196 return -EAGAIN;
197 } else {
198 xfs_ilock(ip, lock_mode);
199 }
200
201 return 0;
202 }
203
204 static int
xfs_ilock_iocb_for_write(struct kiocb * iocb,unsigned int * lock_mode)205 xfs_ilock_iocb_for_write(
206 struct kiocb *iocb,
207 unsigned int *lock_mode)
208 {
209 ssize_t ret;
210 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
211
212 ret = xfs_ilock_iocb(iocb, *lock_mode);
213 if (ret)
214 return ret;
215
216 /*
217 * If a reflink remap is in progress we always need to take the iolock
218 * exclusively to wait for it to finish.
219 */
220 if (*lock_mode == XFS_IOLOCK_SHARED &&
221 xfs_iflags_test(ip, XFS_IREMAPPING)) {
222 xfs_iunlock(ip, *lock_mode);
223 *lock_mode = XFS_IOLOCK_EXCL;
224 return xfs_ilock_iocb(iocb, *lock_mode);
225 }
226
227 return 0;
228 }
229
230 STATIC ssize_t
xfs_file_dio_read(struct kiocb * iocb,struct iov_iter * to)231 xfs_file_dio_read(
232 struct kiocb *iocb,
233 struct iov_iter *to)
234 {
235 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
236 ssize_t ret;
237
238 trace_xfs_file_direct_read(iocb, to);
239
240 if (!iov_iter_count(to))
241 return 0; /* skip atime */
242
243 file_accessed(iocb->ki_filp);
244
245 ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
246 if (ret)
247 return ret;
248 ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, NULL, 0);
249 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
250
251 return ret;
252 }
253
254 static noinline ssize_t
xfs_file_dax_read(struct kiocb * iocb,struct iov_iter * to)255 xfs_file_dax_read(
256 struct kiocb *iocb,
257 struct iov_iter *to)
258 {
259 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
260 ssize_t ret = 0;
261
262 trace_xfs_file_dax_read(iocb, to);
263
264 if (!iov_iter_count(to))
265 return 0; /* skip atime */
266
267 ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
268 if (ret)
269 return ret;
270 ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
271 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
272
273 file_accessed(iocb->ki_filp);
274 return ret;
275 }
276
277 STATIC ssize_t
xfs_file_buffered_read(struct kiocb * iocb,struct iov_iter * to)278 xfs_file_buffered_read(
279 struct kiocb *iocb,
280 struct iov_iter *to)
281 {
282 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
283 ssize_t ret;
284
285 trace_xfs_file_buffered_read(iocb, to);
286
287 ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
288 if (ret)
289 return ret;
290 ret = generic_file_read_iter(iocb, to);
291 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
292
293 return ret;
294 }
295
296 STATIC ssize_t
xfs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)297 xfs_file_read_iter(
298 struct kiocb *iocb,
299 struct iov_iter *to)
300 {
301 struct inode *inode = file_inode(iocb->ki_filp);
302 struct xfs_mount *mp = XFS_I(inode)->i_mount;
303 ssize_t ret = 0;
304
305 XFS_STATS_INC(mp, xs_read_calls);
306
307 if (xfs_is_shutdown(mp))
308 return -EIO;
309
310 if (IS_DAX(inode))
311 ret = xfs_file_dax_read(iocb, to);
312 else if (iocb->ki_flags & IOCB_DIRECT)
313 ret = xfs_file_dio_read(iocb, to);
314 else
315 ret = xfs_file_buffered_read(iocb, to);
316
317 if (ret > 0)
318 XFS_STATS_ADD(mp, xs_read_bytes, ret);
319 return ret;
320 }
321
322 STATIC ssize_t
xfs_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)323 xfs_file_splice_read(
324 struct file *in,
325 loff_t *ppos,
326 struct pipe_inode_info *pipe,
327 size_t len,
328 unsigned int flags)
329 {
330 struct inode *inode = file_inode(in);
331 struct xfs_inode *ip = XFS_I(inode);
332 struct xfs_mount *mp = ip->i_mount;
333 ssize_t ret = 0;
334
335 XFS_STATS_INC(mp, xs_read_calls);
336
337 if (xfs_is_shutdown(mp))
338 return -EIO;
339
340 trace_xfs_file_splice_read(ip, *ppos, len);
341
342 xfs_ilock(ip, XFS_IOLOCK_SHARED);
343 ret = filemap_splice_read(in, ppos, pipe, len, flags);
344 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
345 if (ret > 0)
346 XFS_STATS_ADD(mp, xs_read_bytes, ret);
347 return ret;
348 }
349
350 /*
351 * Take care of zeroing post-EOF blocks when they might exist.
352 *
353 * Returns 0 if successfully, a negative error for a failure, or 1 if this
354 * function dropped the iolock and reacquired it exclusively and the caller
355 * needs to restart the write sanity checks.
356 */
357 static ssize_t
xfs_file_write_zero_eof(struct kiocb * iocb,struct iov_iter * from,unsigned int * iolock,size_t count,bool * drained_dio)358 xfs_file_write_zero_eof(
359 struct kiocb *iocb,
360 struct iov_iter *from,
361 unsigned int *iolock,
362 size_t count,
363 bool *drained_dio)
364 {
365 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
366 loff_t isize;
367 int error;
368
369 /*
370 * We need to serialise against EOF updates that occur in IO completions
371 * here. We want to make sure that nobody is changing the size while
372 * we do this check until we have placed an IO barrier (i.e. hold
373 * XFS_IOLOCK_EXCL) that prevents new IO from being dispatched. The
374 * spinlock effectively forms a memory barrier once we have
375 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value and
376 * hence be able to correctly determine if we need to run zeroing.
377 */
378 spin_lock(&ip->i_flags_lock);
379 isize = i_size_read(VFS_I(ip));
380 if (iocb->ki_pos <= isize) {
381 spin_unlock(&ip->i_flags_lock);
382 return 0;
383 }
384 spin_unlock(&ip->i_flags_lock);
385
386 if (iocb->ki_flags & IOCB_NOWAIT)
387 return -EAGAIN;
388
389 if (!*drained_dio) {
390 /*
391 * If zeroing is needed and we are currently holding the iolock
392 * shared, we need to update it to exclusive which implies
393 * having to redo all checks before.
394 */
395 if (*iolock == XFS_IOLOCK_SHARED) {
396 xfs_iunlock(ip, *iolock);
397 *iolock = XFS_IOLOCK_EXCL;
398 xfs_ilock(ip, *iolock);
399 iov_iter_reexpand(from, count);
400 }
401
402 /*
403 * We now have an IO submission barrier in place, but AIO can do
404 * EOF updates during IO completion and hence we now need to
405 * wait for all of them to drain. Non-AIO DIO will have drained
406 * before we are given the XFS_IOLOCK_EXCL, and so for most
407 * cases this wait is a no-op.
408 */
409 inode_dio_wait(VFS_I(ip));
410 *drained_dio = true;
411 return 1;
412 }
413
414 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
415
416 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
417 error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
418 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
419
420 return error;
421 }
422
423 /*
424 * Common pre-write limit and setup checks.
425 *
426 * Called with the iolock held either shared and exclusive according to
427 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
428 * if called for a direct write beyond i_size.
429 */
430 STATIC ssize_t
xfs_file_write_checks(struct kiocb * iocb,struct iov_iter * from,unsigned int * iolock)431 xfs_file_write_checks(
432 struct kiocb *iocb,
433 struct iov_iter *from,
434 unsigned int *iolock)
435 {
436 struct inode *inode = iocb->ki_filp->f_mapping->host;
437 size_t count = iov_iter_count(from);
438 bool drained_dio = false;
439 ssize_t error;
440
441 restart:
442 error = generic_write_checks(iocb, from);
443 if (error <= 0)
444 return error;
445
446 if (iocb->ki_flags & IOCB_NOWAIT) {
447 error = break_layout(inode, false);
448 if (error == -EWOULDBLOCK)
449 error = -EAGAIN;
450 } else {
451 error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
452 }
453
454 if (error)
455 return error;
456
457 /*
458 * For changing security info in file_remove_privs() we need i_rwsem
459 * exclusively.
460 */
461 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
462 xfs_iunlock(XFS_I(inode), *iolock);
463 *iolock = XFS_IOLOCK_EXCL;
464 error = xfs_ilock_iocb(iocb, *iolock);
465 if (error) {
466 *iolock = 0;
467 return error;
468 }
469 goto restart;
470 }
471
472 /*
473 * If the offset is beyond the size of the file, we need to zero all
474 * blocks that fall between the existing EOF and the start of this
475 * write.
476 *
477 * We can do an unlocked check for i_size here safely as I/O completion
478 * can only extend EOF. Truncate is locked out at this point, so the
479 * EOF can not move backwards, only forwards. Hence we only need to take
480 * the slow path when we are at or beyond the current EOF.
481 */
482 if (iocb->ki_pos > i_size_read(inode)) {
483 error = xfs_file_write_zero_eof(iocb, from, iolock, count,
484 &drained_dio);
485 if (error == 1)
486 goto restart;
487 if (error)
488 return error;
489 }
490
491 return kiocb_modified(iocb);
492 }
493
494 static int
xfs_dio_write_end_io(struct kiocb * iocb,ssize_t size,int error,unsigned flags)495 xfs_dio_write_end_io(
496 struct kiocb *iocb,
497 ssize_t size,
498 int error,
499 unsigned flags)
500 {
501 struct inode *inode = file_inode(iocb->ki_filp);
502 struct xfs_inode *ip = XFS_I(inode);
503 loff_t offset = iocb->ki_pos;
504 unsigned int nofs_flag;
505
506 trace_xfs_end_io_direct_write(ip, offset, size);
507
508 if (xfs_is_shutdown(ip->i_mount))
509 return -EIO;
510
511 if (error)
512 return error;
513 if (!size)
514 return 0;
515
516 /*
517 * Capture amount written on completion as we can't reliably account
518 * for it on submission.
519 */
520 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
521
522 /*
523 * We can allocate memory here while doing writeback on behalf of
524 * memory reclaim. To avoid memory allocation deadlocks set the
525 * task-wide nofs context for the following operations.
526 */
527 nofs_flag = memalloc_nofs_save();
528
529 if (flags & IOMAP_DIO_COW) {
530 error = xfs_reflink_end_cow(ip, offset, size);
531 if (error)
532 goto out;
533 }
534
535 /*
536 * Unwritten conversion updates the in-core isize after extent
537 * conversion but before updating the on-disk size. Updating isize any
538 * earlier allows a racing dio read to find unwritten extents before
539 * they are converted.
540 */
541 if (flags & IOMAP_DIO_UNWRITTEN) {
542 error = xfs_iomap_write_unwritten(ip, offset, size, true);
543 goto out;
544 }
545
546 /*
547 * We need to update the in-core inode size here so that we don't end up
548 * with the on-disk inode size being outside the in-core inode size. We
549 * have no other method of updating EOF for AIO, so always do it here
550 * if necessary.
551 *
552 * We need to lock the test/set EOF update as we can be racing with
553 * other IO completions here to update the EOF. Failing to serialise
554 * here can result in EOF moving backwards and Bad Things Happen when
555 * that occurs.
556 *
557 * As IO completion only ever extends EOF, we can do an unlocked check
558 * here to avoid taking the spinlock. If we land within the current EOF,
559 * then we do not need to do an extending update at all, and we don't
560 * need to take the lock to check this. If we race with an update moving
561 * EOF, then we'll either still be beyond EOF and need to take the lock,
562 * or we'll be within EOF and we don't need to take it at all.
563 */
564 if (offset + size <= i_size_read(inode))
565 goto out;
566
567 spin_lock(&ip->i_flags_lock);
568 if (offset + size > i_size_read(inode)) {
569 i_size_write(inode, offset + size);
570 spin_unlock(&ip->i_flags_lock);
571 error = xfs_setfilesize(ip, offset, size);
572 } else {
573 spin_unlock(&ip->i_flags_lock);
574 }
575
576 out:
577 memalloc_nofs_restore(nofs_flag);
578 return error;
579 }
580
581 static const struct iomap_dio_ops xfs_dio_write_ops = {
582 .end_io = xfs_dio_write_end_io,
583 };
584
585 /*
586 * Handle block aligned direct I/O writes
587 */
588 static noinline ssize_t
xfs_file_dio_write_aligned(struct xfs_inode * ip,struct kiocb * iocb,struct iov_iter * from)589 xfs_file_dio_write_aligned(
590 struct xfs_inode *ip,
591 struct kiocb *iocb,
592 struct iov_iter *from)
593 {
594 unsigned int iolock = XFS_IOLOCK_SHARED;
595 ssize_t ret;
596
597 ret = xfs_ilock_iocb_for_write(iocb, &iolock);
598 if (ret)
599 return ret;
600 ret = xfs_file_write_checks(iocb, from, &iolock);
601 if (ret)
602 goto out_unlock;
603
604 /*
605 * We don't need to hold the IOLOCK exclusively across the IO, so demote
606 * the iolock back to shared if we had to take the exclusive lock in
607 * xfs_file_write_checks() for other reasons.
608 */
609 if (iolock == XFS_IOLOCK_EXCL) {
610 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
611 iolock = XFS_IOLOCK_SHARED;
612 }
613 trace_xfs_file_direct_write(iocb, from);
614 ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
615 &xfs_dio_write_ops, 0, NULL, 0);
616 out_unlock:
617 if (iolock)
618 xfs_iunlock(ip, iolock);
619 return ret;
620 }
621
622 /*
623 * Handle block unaligned direct I/O writes
624 *
625 * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
626 * them to be done in parallel with reads and other direct I/O writes. However,
627 * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
628 * to do sub-block zeroing and that requires serialisation against other direct
629 * I/O to the same block. In this case we need to serialise the submission of
630 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
631 * In the case where sub-block zeroing is not required, we can do concurrent
632 * sub-block dios to the same block successfully.
633 *
634 * Optimistically submit the I/O using the shared lock first, but use the
635 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
636 * if block allocation or partial block zeroing would be required. In that case
637 * we try again with the exclusive lock.
638 */
639 static noinline ssize_t
xfs_file_dio_write_unaligned(struct xfs_inode * ip,struct kiocb * iocb,struct iov_iter * from)640 xfs_file_dio_write_unaligned(
641 struct xfs_inode *ip,
642 struct kiocb *iocb,
643 struct iov_iter *from)
644 {
645 size_t isize = i_size_read(VFS_I(ip));
646 size_t count = iov_iter_count(from);
647 unsigned int iolock = XFS_IOLOCK_SHARED;
648 unsigned int flags = IOMAP_DIO_OVERWRITE_ONLY;
649 ssize_t ret;
650
651 /*
652 * Extending writes need exclusivity because of the sub-block zeroing
653 * that the DIO code always does for partial tail blocks beyond EOF, so
654 * don't even bother trying the fast path in this case.
655 */
656 if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
657 if (iocb->ki_flags & IOCB_NOWAIT)
658 return -EAGAIN;
659 retry_exclusive:
660 iolock = XFS_IOLOCK_EXCL;
661 flags = IOMAP_DIO_FORCE_WAIT;
662 }
663
664 ret = xfs_ilock_iocb_for_write(iocb, &iolock);
665 if (ret)
666 return ret;
667
668 /*
669 * We can't properly handle unaligned direct I/O to reflink files yet,
670 * as we can't unshare a partial block.
671 */
672 if (xfs_is_cow_inode(ip)) {
673 trace_xfs_reflink_bounce_dio_write(iocb, from);
674 ret = -ENOTBLK;
675 goto out_unlock;
676 }
677
678 ret = xfs_file_write_checks(iocb, from, &iolock);
679 if (ret)
680 goto out_unlock;
681
682 /*
683 * If we are doing exclusive unaligned I/O, this must be the only I/O
684 * in-flight. Otherwise we risk data corruption due to unwritten extent
685 * conversions from the AIO end_io handler. Wait for all other I/O to
686 * drain first.
687 */
688 if (flags & IOMAP_DIO_FORCE_WAIT)
689 inode_dio_wait(VFS_I(ip));
690
691 trace_xfs_file_direct_write(iocb, from);
692 ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
693 &xfs_dio_write_ops, flags, NULL, 0);
694
695 /*
696 * Retry unaligned I/O with exclusive blocking semantics if the DIO
697 * layer rejected it for mapping or locking reasons. If we are doing
698 * nonblocking user I/O, propagate the error.
699 */
700 if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
701 ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
702 xfs_iunlock(ip, iolock);
703 goto retry_exclusive;
704 }
705
706 out_unlock:
707 if (iolock)
708 xfs_iunlock(ip, iolock);
709 return ret;
710 }
711
712 static ssize_t
xfs_file_dio_write(struct kiocb * iocb,struct iov_iter * from)713 xfs_file_dio_write(
714 struct kiocb *iocb,
715 struct iov_iter *from)
716 {
717 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
718 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
719 size_t count = iov_iter_count(from);
720
721 /* direct I/O must be aligned to device logical sector size */
722 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
723 return -EINVAL;
724 if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
725 return xfs_file_dio_write_unaligned(ip, iocb, from);
726 return xfs_file_dio_write_aligned(ip, iocb, from);
727 }
728
729 static noinline ssize_t
xfs_file_dax_write(struct kiocb * iocb,struct iov_iter * from)730 xfs_file_dax_write(
731 struct kiocb *iocb,
732 struct iov_iter *from)
733 {
734 struct inode *inode = iocb->ki_filp->f_mapping->host;
735 struct xfs_inode *ip = XFS_I(inode);
736 unsigned int iolock = XFS_IOLOCK_EXCL;
737 ssize_t ret, error = 0;
738 loff_t pos;
739
740 ret = xfs_ilock_iocb(iocb, iolock);
741 if (ret)
742 return ret;
743 ret = xfs_file_write_checks(iocb, from, &iolock);
744 if (ret)
745 goto out;
746
747 pos = iocb->ki_pos;
748
749 trace_xfs_file_dax_write(iocb, from);
750 ret = dax_iomap_rw(iocb, from, &xfs_dax_write_iomap_ops);
751 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
752 i_size_write(inode, iocb->ki_pos);
753 error = xfs_setfilesize(ip, pos, ret);
754 }
755 out:
756 if (iolock)
757 xfs_iunlock(ip, iolock);
758 if (error)
759 return error;
760
761 if (ret > 0) {
762 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
763
764 /* Handle various SYNC-type writes */
765 ret = generic_write_sync(iocb, ret);
766 }
767 return ret;
768 }
769
770 STATIC ssize_t
xfs_file_buffered_write(struct kiocb * iocb,struct iov_iter * from)771 xfs_file_buffered_write(
772 struct kiocb *iocb,
773 struct iov_iter *from)
774 {
775 struct inode *inode = iocb->ki_filp->f_mapping->host;
776 struct xfs_inode *ip = XFS_I(inode);
777 ssize_t ret;
778 bool cleared_space = false;
779 unsigned int iolock;
780
781 write_retry:
782 iolock = XFS_IOLOCK_EXCL;
783 ret = xfs_ilock_iocb(iocb, iolock);
784 if (ret)
785 return ret;
786
787 ret = xfs_file_write_checks(iocb, from, &iolock);
788 if (ret)
789 goto out;
790
791 trace_xfs_file_buffered_write(iocb, from);
792 ret = iomap_file_buffered_write(iocb, from,
793 &xfs_buffered_write_iomap_ops, NULL);
794
795 /*
796 * If we hit a space limit, try to free up some lingering preallocated
797 * space before returning an error. In the case of ENOSPC, first try to
798 * write back all dirty inodes to free up some of the excess reserved
799 * metadata space. This reduces the chances that the eofblocks scan
800 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
801 * also behaves as a filter to prevent too many eofblocks scans from
802 * running at the same time. Use a synchronous scan to increase the
803 * effectiveness of the scan.
804 */
805 if (ret == -EDQUOT && !cleared_space) {
806 xfs_iunlock(ip, iolock);
807 xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC);
808 cleared_space = true;
809 goto write_retry;
810 } else if (ret == -ENOSPC && !cleared_space) {
811 struct xfs_icwalk icw = {0};
812
813 cleared_space = true;
814 xfs_flush_inodes(ip->i_mount);
815
816 xfs_iunlock(ip, iolock);
817 icw.icw_flags = XFS_ICWALK_FLAG_SYNC;
818 xfs_blockgc_free_space(ip->i_mount, &icw);
819 goto write_retry;
820 }
821
822 out:
823 if (iolock)
824 xfs_iunlock(ip, iolock);
825
826 if (ret > 0) {
827 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
828 /* Handle various SYNC-type writes */
829 ret = generic_write_sync(iocb, ret);
830 }
831 return ret;
832 }
833
834 STATIC ssize_t
xfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)835 xfs_file_write_iter(
836 struct kiocb *iocb,
837 struct iov_iter *from)
838 {
839 struct inode *inode = iocb->ki_filp->f_mapping->host;
840 struct xfs_inode *ip = XFS_I(inode);
841 ssize_t ret;
842 size_t ocount = iov_iter_count(from);
843
844 XFS_STATS_INC(ip->i_mount, xs_write_calls);
845
846 if (ocount == 0)
847 return 0;
848
849 if (xfs_is_shutdown(ip->i_mount))
850 return -EIO;
851
852 if (IS_DAX(inode))
853 return xfs_file_dax_write(iocb, from);
854
855 if (iocb->ki_flags & IOCB_DIRECT) {
856 /*
857 * Allow a directio write to fall back to a buffered
858 * write *only* in the case that we're doing a reflink
859 * CoW. In all other directio scenarios we do not
860 * allow an operation to fall back to buffered mode.
861 */
862 ret = xfs_file_dio_write(iocb, from);
863 if (ret != -ENOTBLK)
864 return ret;
865 }
866
867 return xfs_file_buffered_write(iocb, from);
868 }
869
870 /* Does this file, inode, or mount want synchronous writes? */
xfs_file_sync_writes(struct file * filp)871 static inline bool xfs_file_sync_writes(struct file *filp)
872 {
873 struct xfs_inode *ip = XFS_I(file_inode(filp));
874
875 if (xfs_has_wsync(ip->i_mount))
876 return true;
877 if (filp->f_flags & (__O_SYNC | O_DSYNC))
878 return true;
879 if (IS_SYNC(file_inode(filp)))
880 return true;
881
882 return false;
883 }
884
885 static int
xfs_falloc_newsize(struct file * file,int mode,loff_t offset,loff_t len,loff_t * new_size)886 xfs_falloc_newsize(
887 struct file *file,
888 int mode,
889 loff_t offset,
890 loff_t len,
891 loff_t *new_size)
892 {
893 struct inode *inode = file_inode(file);
894
895 if ((mode & FALLOC_FL_KEEP_SIZE) || offset + len <= i_size_read(inode))
896 return 0;
897 *new_size = offset + len;
898 return inode_newsize_ok(inode, *new_size);
899 }
900
901 static int
xfs_falloc_setsize(struct file * file,loff_t new_size)902 xfs_falloc_setsize(
903 struct file *file,
904 loff_t new_size)
905 {
906 struct iattr iattr = {
907 .ia_valid = ATTR_SIZE,
908 .ia_size = new_size,
909 };
910
911 if (!new_size)
912 return 0;
913 return xfs_vn_setattr_size(file_mnt_idmap(file), file_dentry(file),
914 &iattr);
915 }
916
917 static int
xfs_falloc_collapse_range(struct file * file,loff_t offset,loff_t len)918 xfs_falloc_collapse_range(
919 struct file *file,
920 loff_t offset,
921 loff_t len)
922 {
923 struct inode *inode = file_inode(file);
924 loff_t new_size = i_size_read(inode) - len;
925 int error;
926
927 if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
928 return -EINVAL;
929
930 /*
931 * There is no need to overlap collapse range with EOF, in which case it
932 * is effectively a truncate operation
933 */
934 if (offset + len >= i_size_read(inode))
935 return -EINVAL;
936
937 error = xfs_collapse_file_space(XFS_I(inode), offset, len);
938 if (error)
939 return error;
940 return xfs_falloc_setsize(file, new_size);
941 }
942
943 static int
xfs_falloc_insert_range(struct file * file,loff_t offset,loff_t len)944 xfs_falloc_insert_range(
945 struct file *file,
946 loff_t offset,
947 loff_t len)
948 {
949 struct inode *inode = file_inode(file);
950 loff_t isize = i_size_read(inode);
951 int error;
952
953 if (!xfs_is_falloc_aligned(XFS_I(inode), offset, len))
954 return -EINVAL;
955
956 /*
957 * New inode size must not exceed ->s_maxbytes, accounting for
958 * possible signed overflow.
959 */
960 if (inode->i_sb->s_maxbytes - isize < len)
961 return -EFBIG;
962
963 /* Offset should be less than i_size */
964 if (offset >= isize)
965 return -EINVAL;
966
967 error = xfs_falloc_setsize(file, isize + len);
968 if (error)
969 return error;
970
971 /*
972 * Perform hole insertion now that the file size has been updated so
973 * that if we crash during the operation we don't leave shifted extents
974 * past EOF and hence losing access to the data that is contained within
975 * them.
976 */
977 return xfs_insert_file_space(XFS_I(inode), offset, len);
978 }
979
980 /*
981 * Punch a hole and prealloc the range. We use a hole punch rather than
982 * unwritten extent conversion for two reasons:
983 *
984 * 1.) Hole punch handles partial block zeroing for us.
985 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued by
986 * virtue of the hole punch.
987 */
988 static int
xfs_falloc_zero_range(struct file * file,int mode,loff_t offset,loff_t len)989 xfs_falloc_zero_range(
990 struct file *file,
991 int mode,
992 loff_t offset,
993 loff_t len)
994 {
995 struct inode *inode = file_inode(file);
996 unsigned int blksize = i_blocksize(inode);
997 loff_t new_size = 0;
998 int error;
999
1000 trace_xfs_zero_file_space(XFS_I(inode));
1001
1002 error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
1003 if (error)
1004 return error;
1005
1006 error = xfs_free_file_space(XFS_I(inode), offset, len);
1007 if (error)
1008 return error;
1009
1010 len = round_up(offset + len, blksize) - round_down(offset, blksize);
1011 offset = round_down(offset, blksize);
1012 error = xfs_alloc_file_space(XFS_I(inode), offset, len);
1013 if (error)
1014 return error;
1015 return xfs_falloc_setsize(file, new_size);
1016 }
1017
1018 static int
xfs_falloc_unshare_range(struct file * file,int mode,loff_t offset,loff_t len)1019 xfs_falloc_unshare_range(
1020 struct file *file,
1021 int mode,
1022 loff_t offset,
1023 loff_t len)
1024 {
1025 struct inode *inode = file_inode(file);
1026 loff_t new_size = 0;
1027 int error;
1028
1029 error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
1030 if (error)
1031 return error;
1032
1033 error = xfs_reflink_unshare(XFS_I(inode), offset, len);
1034 if (error)
1035 return error;
1036
1037 error = xfs_alloc_file_space(XFS_I(inode), offset, len);
1038 if (error)
1039 return error;
1040 return xfs_falloc_setsize(file, new_size);
1041 }
1042
1043 static int
xfs_falloc_allocate_range(struct file * file,int mode,loff_t offset,loff_t len)1044 xfs_falloc_allocate_range(
1045 struct file *file,
1046 int mode,
1047 loff_t offset,
1048 loff_t len)
1049 {
1050 struct inode *inode = file_inode(file);
1051 loff_t new_size = 0;
1052 int error;
1053
1054 /*
1055 * If always_cow mode we can't use preallocations and thus should not
1056 * create them.
1057 */
1058 if (xfs_is_always_cow_inode(XFS_I(inode)))
1059 return -EOPNOTSUPP;
1060
1061 error = xfs_falloc_newsize(file, mode, offset, len, &new_size);
1062 if (error)
1063 return error;
1064
1065 error = xfs_alloc_file_space(XFS_I(inode), offset, len);
1066 if (error)
1067 return error;
1068 return xfs_falloc_setsize(file, new_size);
1069 }
1070
1071 #define XFS_FALLOC_FL_SUPPORTED \
1072 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
1073 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
1074 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
1075
1076 STATIC long
xfs_file_fallocate(struct file * file,int mode,loff_t offset,loff_t len)1077 xfs_file_fallocate(
1078 struct file *file,
1079 int mode,
1080 loff_t offset,
1081 loff_t len)
1082 {
1083 struct inode *inode = file_inode(file);
1084 struct xfs_inode *ip = XFS_I(inode);
1085 long error;
1086 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
1087
1088 if (!S_ISREG(inode->i_mode))
1089 return -EINVAL;
1090 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
1091 return -EOPNOTSUPP;
1092
1093 xfs_ilock(ip, iolock);
1094 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
1095 if (error)
1096 goto out_unlock;
1097
1098 /*
1099 * Must wait for all AIO to complete before we continue as AIO can
1100 * change the file size on completion without holding any locks we
1101 * currently hold. We must do this first because AIO can update both
1102 * the on disk and in memory inode sizes, and the operations that follow
1103 * require the in-memory size to be fully up-to-date.
1104 */
1105 inode_dio_wait(inode);
1106
1107 error = file_modified(file);
1108 if (error)
1109 goto out_unlock;
1110
1111 switch (mode & FALLOC_FL_MODE_MASK) {
1112 case FALLOC_FL_PUNCH_HOLE:
1113 error = xfs_free_file_space(ip, offset, len);
1114 break;
1115 case FALLOC_FL_COLLAPSE_RANGE:
1116 error = xfs_falloc_collapse_range(file, offset, len);
1117 break;
1118 case FALLOC_FL_INSERT_RANGE:
1119 error = xfs_falloc_insert_range(file, offset, len);
1120 break;
1121 case FALLOC_FL_ZERO_RANGE:
1122 error = xfs_falloc_zero_range(file, mode, offset, len);
1123 break;
1124 case FALLOC_FL_UNSHARE_RANGE:
1125 error = xfs_falloc_unshare_range(file, mode, offset, len);
1126 break;
1127 case FALLOC_FL_ALLOCATE_RANGE:
1128 error = xfs_falloc_allocate_range(file, mode, offset, len);
1129 break;
1130 default:
1131 error = -EOPNOTSUPP;
1132 break;
1133 }
1134
1135 if (!error && xfs_file_sync_writes(file))
1136 error = xfs_log_force_inode(ip);
1137
1138 out_unlock:
1139 xfs_iunlock(ip, iolock);
1140 return error;
1141 }
1142
1143 STATIC int
xfs_file_fadvise(struct file * file,loff_t start,loff_t end,int advice)1144 xfs_file_fadvise(
1145 struct file *file,
1146 loff_t start,
1147 loff_t end,
1148 int advice)
1149 {
1150 struct xfs_inode *ip = XFS_I(file_inode(file));
1151 int ret;
1152 int lockflags = 0;
1153
1154 /*
1155 * Operations creating pages in page cache need protection from hole
1156 * punching and similar ops
1157 */
1158 if (advice == POSIX_FADV_WILLNEED) {
1159 lockflags = XFS_IOLOCK_SHARED;
1160 xfs_ilock(ip, lockflags);
1161 }
1162 ret = generic_fadvise(file, start, end, advice);
1163 if (lockflags)
1164 xfs_iunlock(ip, lockflags);
1165 return ret;
1166 }
1167
1168 STATIC loff_t
xfs_file_remap_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,loff_t len,unsigned int remap_flags)1169 xfs_file_remap_range(
1170 struct file *file_in,
1171 loff_t pos_in,
1172 struct file *file_out,
1173 loff_t pos_out,
1174 loff_t len,
1175 unsigned int remap_flags)
1176 {
1177 struct inode *inode_in = file_inode(file_in);
1178 struct xfs_inode *src = XFS_I(inode_in);
1179 struct inode *inode_out = file_inode(file_out);
1180 struct xfs_inode *dest = XFS_I(inode_out);
1181 struct xfs_mount *mp = src->i_mount;
1182 loff_t remapped = 0;
1183 xfs_extlen_t cowextsize;
1184 int ret;
1185
1186 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1187 return -EINVAL;
1188
1189 if (!xfs_has_reflink(mp))
1190 return -EOPNOTSUPP;
1191
1192 if (xfs_is_shutdown(mp))
1193 return -EIO;
1194
1195 /* Prepare and then clone file data. */
1196 ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1197 &len, remap_flags);
1198 if (ret || len == 0)
1199 return ret;
1200
1201 trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1202
1203 ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1204 &remapped);
1205 if (ret)
1206 goto out_unlock;
1207
1208 /*
1209 * Carry the cowextsize hint from src to dest if we're sharing the
1210 * entire source file to the entire destination file, the source file
1211 * has a cowextsize hint, and the destination file does not.
1212 */
1213 cowextsize = 0;
1214 if (pos_in == 0 && len == i_size_read(inode_in) &&
1215 (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1216 pos_out == 0 && len >= i_size_read(inode_out) &&
1217 !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE))
1218 cowextsize = src->i_cowextsize;
1219
1220 ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1221 remap_flags);
1222 if (ret)
1223 goto out_unlock;
1224
1225 if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1226 xfs_log_force_inode(dest);
1227 out_unlock:
1228 xfs_iunlock2_remapping(src, dest);
1229 if (ret)
1230 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1231 return remapped > 0 ? remapped : ret;
1232 }
1233
1234 STATIC int
xfs_file_open(struct inode * inode,struct file * file)1235 xfs_file_open(
1236 struct inode *inode,
1237 struct file *file)
1238 {
1239 if (xfs_is_shutdown(XFS_M(inode->i_sb)))
1240 return -EIO;
1241 file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
1242 return generic_file_open(inode, file);
1243 }
1244
1245 STATIC int
xfs_dir_open(struct inode * inode,struct file * file)1246 xfs_dir_open(
1247 struct inode *inode,
1248 struct file *file)
1249 {
1250 struct xfs_inode *ip = XFS_I(inode);
1251 unsigned int mode;
1252 int error;
1253
1254 if (xfs_is_shutdown(ip->i_mount))
1255 return -EIO;
1256 error = generic_file_open(inode, file);
1257 if (error)
1258 return error;
1259
1260 /*
1261 * If there are any blocks, read-ahead block 0 as we're almost
1262 * certain to have the next operation be a read there.
1263 */
1264 mode = xfs_ilock_data_map_shared(ip);
1265 if (ip->i_df.if_nextents > 0)
1266 error = xfs_dir3_data_readahead(ip, 0, 0);
1267 xfs_iunlock(ip, mode);
1268 return error;
1269 }
1270
1271 /*
1272 * Don't bother propagating errors. We're just doing cleanup, and the caller
1273 * ignores the return value anyway.
1274 */
1275 STATIC int
xfs_file_release(struct inode * inode,struct file * file)1276 xfs_file_release(
1277 struct inode *inode,
1278 struct file *file)
1279 {
1280 struct xfs_inode *ip = XFS_I(inode);
1281 struct xfs_mount *mp = ip->i_mount;
1282
1283 /*
1284 * If this is a read-only mount or the file system has been shut down,
1285 * don't generate I/O.
1286 */
1287 if (xfs_is_readonly(mp) || xfs_is_shutdown(mp))
1288 return 0;
1289
1290 /*
1291 * If we previously truncated this file and removed old data in the
1292 * process, we want to initiate "early" writeout on the last close.
1293 * This is an attempt to combat the notorious NULL files problem which
1294 * is particularly noticeable from a truncate down, buffered (re-)write
1295 * (delalloc), followed by a crash. What we are effectively doing here
1296 * is significantly reducing the time window where we'd otherwise be
1297 * exposed to that problem.
1298 */
1299 if (xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED)) {
1300 xfs_iflags_clear(ip, XFS_EOFBLOCKS_RELEASED);
1301 if (ip->i_delayed_blks > 0)
1302 filemap_flush(inode->i_mapping);
1303 }
1304
1305 /*
1306 * XFS aggressively preallocates post-EOF space to generate contiguous
1307 * allocations for writers that append to the end of the file.
1308 *
1309 * To support workloads that close and reopen the file frequently, these
1310 * preallocations usually persist after a close unless it is the first
1311 * close for the inode. This is a tradeoff to generate tightly packed
1312 * data layouts for unpacking tarballs or similar archives that write
1313 * one file after another without going back to it while keeping the
1314 * preallocation for files that have recurring open/write/close cycles.
1315 *
1316 * This heuristic is skipped for inodes with the append-only flag as
1317 * that flag is rather pointless for inodes written only once.
1318 *
1319 * There is no point in freeing blocks here for open but unlinked files
1320 * as they will be taken care of by the inactivation path soon.
1321 *
1322 * When releasing a read-only context, don't flush data or trim post-EOF
1323 * blocks. This avoids open/read/close workloads from removing EOF
1324 * blocks that other writers depend upon to reduce fragmentation.
1325 *
1326 * If we can't get the iolock just skip truncating the blocks past EOF
1327 * because we could deadlock with the mmap_lock otherwise. We'll get
1328 * another chance to drop them once the last reference to the inode is
1329 * dropped, so we'll never leak blocks permanently.
1330 */
1331 if (inode->i_nlink &&
1332 (file->f_mode & FMODE_WRITE) &&
1333 !(ip->i_diflags & XFS_DIFLAG_APPEND) &&
1334 !xfs_iflags_test(ip, XFS_EOFBLOCKS_RELEASED) &&
1335 xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1336 if (xfs_can_free_eofblocks(ip) &&
1337 !xfs_iflags_test_and_set(ip, XFS_EOFBLOCKS_RELEASED))
1338 xfs_free_eofblocks(ip);
1339 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1340 }
1341
1342 return 0;
1343 }
1344
1345 STATIC int
xfs_file_readdir(struct file * file,struct dir_context * ctx)1346 xfs_file_readdir(
1347 struct file *file,
1348 struct dir_context *ctx)
1349 {
1350 struct inode *inode = file_inode(file);
1351 xfs_inode_t *ip = XFS_I(inode);
1352 size_t bufsize;
1353
1354 /*
1355 * The Linux API doesn't pass down the total size of the buffer
1356 * we read into down to the filesystem. With the filldir concept
1357 * it's not needed for correct information, but the XFS dir2 leaf
1358 * code wants an estimate of the buffer size to calculate it's
1359 * readahead window and size the buffers used for mapping to
1360 * physical blocks.
1361 *
1362 * Try to give it an estimate that's good enough, maybe at some
1363 * point we can change the ->readdir prototype to include the
1364 * buffer size. For now we use the current glibc buffer size.
1365 */
1366 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size);
1367
1368 return xfs_readdir(NULL, ip, ctx, bufsize);
1369 }
1370
1371 STATIC loff_t
xfs_file_llseek(struct file * file,loff_t offset,int whence)1372 xfs_file_llseek(
1373 struct file *file,
1374 loff_t offset,
1375 int whence)
1376 {
1377 struct inode *inode = file->f_mapping->host;
1378
1379 if (xfs_is_shutdown(XFS_I(inode)->i_mount))
1380 return -EIO;
1381
1382 switch (whence) {
1383 default:
1384 return generic_file_llseek(file, offset, whence);
1385 case SEEK_HOLE:
1386 offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1387 break;
1388 case SEEK_DATA:
1389 offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1390 break;
1391 }
1392
1393 if (offset < 0)
1394 return offset;
1395 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1396 }
1397
1398 static inline vm_fault_t
xfs_dax_fault_locked(struct vm_fault * vmf,unsigned int order,bool write_fault)1399 xfs_dax_fault_locked(
1400 struct vm_fault *vmf,
1401 unsigned int order,
1402 bool write_fault)
1403 {
1404 vm_fault_t ret;
1405 pfn_t pfn;
1406
1407 if (!IS_ENABLED(CONFIG_FS_DAX)) {
1408 ASSERT(0);
1409 return VM_FAULT_SIGBUS;
1410 }
1411 ret = dax_iomap_fault(vmf, order, &pfn, NULL,
1412 (write_fault && !vmf->cow_page) ?
1413 &xfs_dax_write_iomap_ops :
1414 &xfs_read_iomap_ops);
1415 if (ret & VM_FAULT_NEEDDSYNC)
1416 ret = dax_finish_sync_fault(vmf, order, pfn);
1417 return ret;
1418 }
1419
1420 static vm_fault_t
xfs_dax_read_fault(struct vm_fault * vmf,unsigned int order)1421 xfs_dax_read_fault(
1422 struct vm_fault *vmf,
1423 unsigned int order)
1424 {
1425 struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file));
1426 vm_fault_t ret;
1427
1428 xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1429 ret = xfs_dax_fault_locked(vmf, order, false);
1430 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1431
1432 return ret;
1433 }
1434
1435 static vm_fault_t
xfs_write_fault(struct vm_fault * vmf,unsigned int order)1436 xfs_write_fault(
1437 struct vm_fault *vmf,
1438 unsigned int order)
1439 {
1440 struct inode *inode = file_inode(vmf->vma->vm_file);
1441 struct xfs_inode *ip = XFS_I(inode);
1442 unsigned int lock_mode = XFS_MMAPLOCK_SHARED;
1443 vm_fault_t ret;
1444
1445 sb_start_pagefault(inode->i_sb);
1446 file_update_time(vmf->vma->vm_file);
1447
1448 /*
1449 * Normally we only need the shared mmaplock, but if a reflink remap is
1450 * in progress we take the exclusive lock to wait for the remap to
1451 * finish before taking a write fault.
1452 */
1453 xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1454 if (xfs_iflags_test(ip, XFS_IREMAPPING)) {
1455 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1456 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1457 lock_mode = XFS_MMAPLOCK_EXCL;
1458 }
1459
1460 if (IS_DAX(inode))
1461 ret = xfs_dax_fault_locked(vmf, order, true);
1462 else
1463 ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
1464 xfs_iunlock(ip, lock_mode);
1465
1466 sb_end_pagefault(inode->i_sb);
1467 return ret;
1468 }
1469
1470 /*
1471 * Locking for serialisation of IO during page faults. This results in a lock
1472 * ordering of:
1473 *
1474 * mmap_lock (MM)
1475 * sb_start_pagefault(vfs, freeze)
1476 * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1477 * page_lock (MM)
1478 * i_lock (XFS - extent map serialisation)
1479 */
1480 static vm_fault_t
__xfs_filemap_fault(struct vm_fault * vmf,unsigned int order,bool write_fault)1481 __xfs_filemap_fault(
1482 struct vm_fault *vmf,
1483 unsigned int order,
1484 bool write_fault)
1485 {
1486 struct inode *inode = file_inode(vmf->vma->vm_file);
1487
1488 trace_xfs_filemap_fault(XFS_I(inode), order, write_fault);
1489
1490 if (write_fault)
1491 return xfs_write_fault(vmf, order);
1492 if (IS_DAX(inode))
1493 return xfs_dax_read_fault(vmf, order);
1494 return filemap_fault(vmf);
1495 }
1496
1497 static inline bool
xfs_is_write_fault(struct vm_fault * vmf)1498 xfs_is_write_fault(
1499 struct vm_fault *vmf)
1500 {
1501 return (vmf->flags & FAULT_FLAG_WRITE) &&
1502 (vmf->vma->vm_flags & VM_SHARED);
1503 }
1504
1505 static vm_fault_t
xfs_filemap_fault(struct vm_fault * vmf)1506 xfs_filemap_fault(
1507 struct vm_fault *vmf)
1508 {
1509 /* DAX can shortcut the normal fault path on write faults! */
1510 return __xfs_filemap_fault(vmf, 0,
1511 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1512 xfs_is_write_fault(vmf));
1513 }
1514
1515 static vm_fault_t
xfs_filemap_huge_fault(struct vm_fault * vmf,unsigned int order)1516 xfs_filemap_huge_fault(
1517 struct vm_fault *vmf,
1518 unsigned int order)
1519 {
1520 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1521 return VM_FAULT_FALLBACK;
1522
1523 /* DAX can shortcut the normal fault path on write faults! */
1524 return __xfs_filemap_fault(vmf, order,
1525 xfs_is_write_fault(vmf));
1526 }
1527
1528 static vm_fault_t
xfs_filemap_page_mkwrite(struct vm_fault * vmf)1529 xfs_filemap_page_mkwrite(
1530 struct vm_fault *vmf)
1531 {
1532 return __xfs_filemap_fault(vmf, 0, true);
1533 }
1534
1535 /*
1536 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1537 * on write faults. In reality, it needs to serialise against truncate and
1538 * prepare memory for writing so handle is as standard write fault.
1539 */
1540 static vm_fault_t
xfs_filemap_pfn_mkwrite(struct vm_fault * vmf)1541 xfs_filemap_pfn_mkwrite(
1542 struct vm_fault *vmf)
1543 {
1544
1545 return __xfs_filemap_fault(vmf, 0, true);
1546 }
1547
1548 static const struct vm_operations_struct xfs_file_vm_ops = {
1549 .fault = xfs_filemap_fault,
1550 .huge_fault = xfs_filemap_huge_fault,
1551 .map_pages = filemap_map_pages,
1552 .page_mkwrite = xfs_filemap_page_mkwrite,
1553 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
1554 };
1555
1556 STATIC int
xfs_file_mmap(struct file * file,struct vm_area_struct * vma)1557 xfs_file_mmap(
1558 struct file *file,
1559 struct vm_area_struct *vma)
1560 {
1561 struct inode *inode = file_inode(file);
1562 struct xfs_buftarg *target = xfs_inode_buftarg(XFS_I(inode));
1563
1564 /*
1565 * We don't support synchronous mappings for non-DAX files and
1566 * for DAX files if underneath dax_device is not synchronous.
1567 */
1568 if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1569 return -EOPNOTSUPP;
1570
1571 file_accessed(file);
1572 vma->vm_ops = &xfs_file_vm_ops;
1573 if (IS_DAX(inode))
1574 vm_flags_set(vma, VM_HUGEPAGE);
1575 return 0;
1576 }
1577
1578 const struct file_operations xfs_file_operations = {
1579 .llseek = xfs_file_llseek,
1580 .read_iter = xfs_file_read_iter,
1581 .write_iter = xfs_file_write_iter,
1582 .splice_read = xfs_file_splice_read,
1583 .splice_write = iter_file_splice_write,
1584 .iopoll = iocb_bio_iopoll,
1585 .unlocked_ioctl = xfs_file_ioctl,
1586 #ifdef CONFIG_COMPAT
1587 .compat_ioctl = xfs_file_compat_ioctl,
1588 #endif
1589 .mmap = xfs_file_mmap,
1590 .open = xfs_file_open,
1591 .release = xfs_file_release,
1592 .fsync = xfs_file_fsync,
1593 .get_unmapped_area = thp_get_unmapped_area,
1594 .fallocate = xfs_file_fallocate,
1595 .fadvise = xfs_file_fadvise,
1596 .remap_file_range = xfs_file_remap_range,
1597 .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
1598 FOP_BUFFER_WASYNC | FOP_DIO_PARALLEL_WRITE,
1599 };
1600
1601 const struct file_operations xfs_dir_file_operations = {
1602 .open = xfs_dir_open,
1603 .read = generic_read_dir,
1604 .iterate_shared = xfs_file_readdir,
1605 .llseek = generic_file_llseek,
1606 .unlocked_ioctl = xfs_file_ioctl,
1607 #ifdef CONFIG_COMPAT
1608 .compat_ioctl = xfs_file_compat_ioctl,
1609 #endif
1610 .fsync = xfs_dir_fsync,
1611 };
1612