Lines Matching +full:ip +full:- +full:block
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
31 #include <linux/backing-dev.h>
44 struct xfs_inode *ip, in xfs_is_falloc_aligned() argument
48 unsigned int alloc_unit = xfs_inode_alloc_unitsize(ip); in xfs_is_falloc_aligned()
54 return !((pos | len) & (alloc_unit - 1)); in xfs_is_falloc_aligned()
60 * cache flush operations, and there are no non-transaction metadata updates
70 struct xfs_inode *ip = XFS_I(file->f_mapping->host); in xfs_dir_fsync() local
72 trace_xfs_dir_fsync(ip); in xfs_dir_fsync()
73 return xfs_log_force_inode(ip); in xfs_dir_fsync()
78 struct xfs_inode *ip, in xfs_fsync_seq() argument
81 if (!xfs_ipincount(ip)) in xfs_fsync_seq()
83 if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) in xfs_fsync_seq()
85 return ip->i_itemp->ili_commit_seq; in xfs_fsync_seq()
92 * If we have concurrent fsync/fdatasync() calls, we need them to all block on
103 struct xfs_inode *ip, in xfs_fsync_flush_log() argument
110 xfs_ilock(ip, XFS_ILOCK_SHARED); in xfs_fsync_flush_log()
111 seq = xfs_fsync_seq(ip, datasync); in xfs_fsync_flush_log()
113 error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, in xfs_fsync_flush_log()
116 spin_lock(&ip->i_itemp->ili_lock); in xfs_fsync_flush_log()
117 ip->i_itemp->ili_fsync_fields = 0; in xfs_fsync_flush_log()
118 spin_unlock(&ip->i_itemp->ili_lock); in xfs_fsync_flush_log()
120 xfs_iunlock(ip, XFS_ILOCK_SHARED); in xfs_fsync_flush_log()
131 struct xfs_inode *ip = XFS_I(file->f_mapping->host); in xfs_file_fsync() local
132 struct xfs_mount *mp = ip->i_mount; in xfs_file_fsync()
136 trace_xfs_file_fsync(ip); in xfs_file_fsync()
143 return -EIO; in xfs_file_fsync()
145 xfs_iflags_clear(ip, XFS_ITRUNCATED); in xfs_file_fsync()
153 if (XFS_IS_REALTIME_INODE(ip)) in xfs_file_fsync()
154 error = blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev); in xfs_file_fsync()
155 else if (mp->m_logdev_targp != mp->m_ddev_targp) in xfs_file_fsync()
156 error = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); in xfs_file_fsync()
164 if (xfs_ipincount(ip)) { in xfs_file_fsync()
165 err2 = xfs_fsync_flush_log(ip, datasync, &log_flushed); in xfs_file_fsync()
172 * a no-op we might have to flush the data device cache here. in xfs_file_fsync()
177 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) && in xfs_file_fsync()
178 mp->m_logdev_targp == mp->m_ddev_targp) { in xfs_file_fsync()
179 err2 = blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); in xfs_file_fsync()
192 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_ilock_iocb() local
194 if (iocb->ki_flags & IOCB_NOWAIT) { in xfs_ilock_iocb()
195 if (!xfs_ilock_nowait(ip, lock_mode)) in xfs_ilock_iocb()
196 return -EAGAIN; in xfs_ilock_iocb()
198 xfs_ilock(ip, lock_mode); in xfs_ilock_iocb()
210 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_ilock_iocb_for_write() local
221 xfs_iflags_test(ip, XFS_IREMAPPING)) { in xfs_ilock_iocb_for_write()
222 xfs_iunlock(ip, *lock_mode); in xfs_ilock_iocb_for_write()
235 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_dio_read() local
243 file_accessed(iocb->ki_filp); in xfs_file_dio_read()
249 xfs_iunlock(ip, XFS_IOLOCK_SHARED); in xfs_file_dio_read()
259 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host); in xfs_file_dax_read() local
271 xfs_iunlock(ip, XFS_IOLOCK_SHARED); in xfs_file_dax_read()
273 file_accessed(iocb->ki_filp); in xfs_file_dax_read()
282 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_buffered_read() local
291 xfs_iunlock(ip, XFS_IOLOCK_SHARED); in xfs_file_buffered_read()
301 struct inode *inode = file_inode(iocb->ki_filp); in xfs_file_read_iter()
302 struct xfs_mount *mp = XFS_I(inode)->i_mount; in xfs_file_read_iter()
308 return -EIO; in xfs_file_read_iter()
312 else if (iocb->ki_flags & IOCB_DIRECT) in xfs_file_read_iter()
331 struct xfs_inode *ip = XFS_I(inode); in xfs_file_splice_read() local
332 struct xfs_mount *mp = ip->i_mount; in xfs_file_splice_read()
338 return -EIO; in xfs_file_splice_read()
340 trace_xfs_file_splice_read(ip, *ppos, len); in xfs_file_splice_read()
342 xfs_ilock(ip, XFS_IOLOCK_SHARED); in xfs_file_splice_read()
344 xfs_iunlock(ip, XFS_IOLOCK_SHARED); in xfs_file_splice_read()
351 * Take care of zeroing post-EOF blocks when they might exist.
365 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host); in xfs_file_write_zero_eof() local
378 spin_lock(&ip->i_flags_lock); in xfs_file_write_zero_eof()
379 isize = i_size_read(VFS_I(ip)); in xfs_file_write_zero_eof()
380 if (iocb->ki_pos <= isize) { in xfs_file_write_zero_eof()
381 spin_unlock(&ip->i_flags_lock); in xfs_file_write_zero_eof()
384 spin_unlock(&ip->i_flags_lock); in xfs_file_write_zero_eof()
386 if (iocb->ki_flags & IOCB_NOWAIT) in xfs_file_write_zero_eof()
387 return -EAGAIN; in xfs_file_write_zero_eof()
396 xfs_iunlock(ip, *iolock); in xfs_file_write_zero_eof()
398 xfs_ilock(ip, *iolock); in xfs_file_write_zero_eof()
405 * wait for all of them to drain. Non-AIO DIO will have drained in xfs_file_write_zero_eof()
407 * cases this wait is a no-op. in xfs_file_write_zero_eof()
409 inode_dio_wait(VFS_I(ip)); in xfs_file_write_zero_eof()
414 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize); in xfs_file_write_zero_eof()
416 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); in xfs_file_write_zero_eof()
417 error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL); in xfs_file_write_zero_eof()
418 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); in xfs_file_write_zero_eof()
424 * Common pre-write limit and setup checks.
436 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_write_checks()
446 if (iocb->ki_flags & IOCB_NOWAIT) { in xfs_file_write_checks()
448 if (error == -EWOULDBLOCK) in xfs_file_write_checks()
449 error = -EAGAIN; in xfs_file_write_checks()
482 if (iocb->ki_pos > i_size_read(inode)) { in xfs_file_write_checks()
501 struct inode *inode = file_inode(iocb->ki_filp); in xfs_dio_write_end_io()
502 struct xfs_inode *ip = XFS_I(inode); in xfs_dio_write_end_io() local
503 loff_t offset = iocb->ki_pos; in xfs_dio_write_end_io()
506 trace_xfs_end_io_direct_write(ip, offset, size); in xfs_dio_write_end_io()
508 if (xfs_is_shutdown(ip->i_mount)) in xfs_dio_write_end_io()
509 return -EIO; in xfs_dio_write_end_io()
520 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size); in xfs_dio_write_end_io()
525 * task-wide nofs context for the following operations. in xfs_dio_write_end_io()
530 error = xfs_reflink_end_cow(ip, offset, size); in xfs_dio_write_end_io()
536 * Unwritten conversion updates the in-core isize after extent in xfs_dio_write_end_io()
537 * conversion but before updating the on-disk size. Updating isize any in xfs_dio_write_end_io()
542 error = xfs_iomap_write_unwritten(ip, offset, size, true); in xfs_dio_write_end_io()
547 * We need to update the in-core inode size here so that we don't end up in xfs_dio_write_end_io()
548 * with the on-disk inode size being outside the in-core inode size. We in xfs_dio_write_end_io()
567 spin_lock(&ip->i_flags_lock); in xfs_dio_write_end_io()
570 spin_unlock(&ip->i_flags_lock); in xfs_dio_write_end_io()
571 error = xfs_setfilesize(ip, offset, size); in xfs_dio_write_end_io()
573 spin_unlock(&ip->i_flags_lock); in xfs_dio_write_end_io()
586 * Handle block aligned direct I/O writes
590 struct xfs_inode *ip, in xfs_file_dio_write_aligned() argument
610 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL); in xfs_file_dio_write_aligned()
618 xfs_iunlock(ip, iolock); in xfs_file_dio_write_aligned()
623 * Handle block unaligned direct I/O writes
628 * to do sub-block zeroing and that requires serialisation against other direct
629 * I/O to the same block. In this case we need to serialise the submission of
630 * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
631 * In the case where sub-block zeroing is not required, we can do concurrent
632 * sub-block dios to the same block successfully.
635 * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
636 * if block allocation or partial block zeroing would be required. In that case
641 struct xfs_inode *ip, in xfs_file_dio_write_unaligned() argument
645 size_t isize = i_size_read(VFS_I(ip)); in xfs_file_dio_write_unaligned()
652 * Extending writes need exclusivity because of the sub-block zeroing in xfs_file_dio_write_unaligned()
656 if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) { in xfs_file_dio_write_unaligned()
657 if (iocb->ki_flags & IOCB_NOWAIT) in xfs_file_dio_write_unaligned()
658 return -EAGAIN; in xfs_file_dio_write_unaligned()
670 * as we can't unshare a partial block. in xfs_file_dio_write_unaligned()
672 if (xfs_is_cow_inode(ip)) { in xfs_file_dio_write_unaligned()
674 ret = -ENOTBLK; in xfs_file_dio_write_unaligned()
684 * in-flight. Otherwise we risk data corruption due to unwritten extent in xfs_file_dio_write_unaligned()
689 inode_dio_wait(VFS_I(ip)); in xfs_file_dio_write_unaligned()
700 if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) { in xfs_file_dio_write_unaligned()
702 xfs_iunlock(ip, iolock); in xfs_file_dio_write_unaligned()
708 xfs_iunlock(ip, iolock); in xfs_file_dio_write_unaligned()
717 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); in xfs_file_dio_write() local
718 struct xfs_buftarg *target = xfs_inode_buftarg(ip); in xfs_file_dio_write()
722 if ((iocb->ki_pos | count) & target->bt_logical_sectormask) in xfs_file_dio_write()
723 return -EINVAL; in xfs_file_dio_write()
724 if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask) in xfs_file_dio_write()
725 return xfs_file_dio_write_unaligned(ip, iocb, from); in xfs_file_dio_write()
726 return xfs_file_dio_write_aligned(ip, iocb, from); in xfs_file_dio_write()
734 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_dax_write()
735 struct xfs_inode *ip = XFS_I(inode); in xfs_file_dax_write() local
747 pos = iocb->ki_pos; in xfs_file_dax_write()
751 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { in xfs_file_dax_write()
752 i_size_write(inode, iocb->ki_pos); in xfs_file_dax_write()
753 error = xfs_setfilesize(ip, pos, ret); in xfs_file_dax_write()
757 xfs_iunlock(ip, iolock); in xfs_file_dax_write()
762 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); in xfs_file_dax_write()
764 /* Handle various SYNC-type writes */ in xfs_file_dax_write()
775 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_buffered_write()
776 struct xfs_inode *ip = XFS_I(inode); in xfs_file_buffered_write() local
805 if (ret == -EDQUOT && !cleared_space) { in xfs_file_buffered_write()
806 xfs_iunlock(ip, iolock); in xfs_file_buffered_write()
807 xfs_blockgc_free_quota(ip, XFS_ICWALK_FLAG_SYNC); in xfs_file_buffered_write()
810 } else if (ret == -ENOSPC && !cleared_space) { in xfs_file_buffered_write()
814 xfs_flush_inodes(ip->i_mount); in xfs_file_buffered_write()
816 xfs_iunlock(ip, iolock); in xfs_file_buffered_write()
818 xfs_blockgc_free_space(ip->i_mount, &icw); in xfs_file_buffered_write()
824 xfs_iunlock(ip, iolock); in xfs_file_buffered_write()
827 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); in xfs_file_buffered_write()
828 /* Handle various SYNC-type writes */ in xfs_file_buffered_write()
839 struct inode *inode = iocb->ki_filp->f_mapping->host; in xfs_file_write_iter()
840 struct xfs_inode *ip = XFS_I(inode); in xfs_file_write_iter() local
844 XFS_STATS_INC(ip->i_mount, xs_write_calls); in xfs_file_write_iter()
849 if (xfs_is_shutdown(ip->i_mount)) in xfs_file_write_iter()
850 return -EIO; in xfs_file_write_iter()
855 if (iocb->ki_flags & IOCB_DIRECT) { in xfs_file_write_iter()
863 if (ret != -ENOTBLK) in xfs_file_write_iter()
873 struct xfs_inode *ip = XFS_I(file_inode(filp)); in xfs_file_sync_writes() local
875 if (xfs_has_wsync(ip->i_mount)) in xfs_file_sync_writes()
877 if (filp->f_flags & (__O_SYNC | O_DSYNC)) in xfs_file_sync_writes()
924 loff_t new_size = i_size_read(inode) - len; in xfs_falloc_collapse_range()
928 return -EINVAL; in xfs_falloc_collapse_range()
935 return -EINVAL; in xfs_falloc_collapse_range()
954 return -EINVAL; in xfs_falloc_insert_range()
957 * New inode size must not exceed ->s_maxbytes, accounting for in xfs_falloc_insert_range()
960 if (inode->i_sb->s_maxbytes - isize < len) in xfs_falloc_insert_range()
961 return -EFBIG; in xfs_falloc_insert_range()
965 return -EINVAL; in xfs_falloc_insert_range()
984 * 1.) Hole punch handles partial block zeroing for us.
985 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued by
1010 len = round_up(offset + len, blksize) - round_down(offset, blksize); in xfs_falloc_zero_range()
1059 return -EOPNOTSUPP; in xfs_falloc_allocate_range()
1084 struct xfs_inode *ip = XFS_I(inode); in xfs_file_fallocate() local
1088 if (!S_ISREG(inode->i_mode)) in xfs_file_fallocate()
1089 return -EINVAL; in xfs_file_fallocate()
1091 return -EOPNOTSUPP; in xfs_file_fallocate()
1093 xfs_ilock(ip, iolock); in xfs_file_fallocate()
1103 * require the in-memory size to be fully up-to-date. in xfs_file_fallocate()
1113 error = xfs_free_file_space(ip, offset, len); in xfs_file_fallocate()
1131 error = -EOPNOTSUPP; in xfs_file_fallocate()
1136 error = xfs_log_force_inode(ip); in xfs_file_fallocate()
1139 xfs_iunlock(ip, iolock); in xfs_file_fallocate()
1150 struct xfs_inode *ip = XFS_I(file_inode(file)); in xfs_file_fadvise() local
1160 xfs_ilock(ip, lockflags); in xfs_file_fadvise()
1164 xfs_iunlock(ip, lockflags); in xfs_file_fadvise()
1181 struct xfs_mount *mp = src->i_mount; in xfs_file_remap_range()
1187 return -EINVAL; in xfs_file_remap_range()
1190 return -EOPNOTSUPP; in xfs_file_remap_range()
1193 return -EIO; in xfs_file_remap_range()
1215 (src->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) && in xfs_file_remap_range()
1217 !(dest->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)) in xfs_file_remap_range()
1218 cowextsize = src->i_cowextsize; in xfs_file_remap_range()
1239 if (xfs_is_shutdown(XFS_M(inode->i_sb))) in xfs_file_open()
1240 return -EIO; in xfs_file_open()
1241 file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT; in xfs_file_open()
1250 struct xfs_inode *ip = XFS_I(inode); in xfs_dir_open() local
1254 if (xfs_is_shutdown(ip->i_mount)) in xfs_dir_open()
1255 return -EIO; in xfs_dir_open()
1261 * If there are any blocks, read-ahead block 0 as we're almost in xfs_dir_open()
1264 mode = xfs_ilock_data_map_shared(ip); in xfs_dir_open()
1265 if (ip->i_df.if_nextents > 0) in xfs_dir_open()
1266 error = xfs_dir3_data_readahead(ip, 0, 0); in xfs_dir_open()
1267 xfs_iunlock(ip, mode); in xfs_dir_open()
1280 struct xfs_inode *ip = XFS_I(inode); in xfs_file_release() local
1281 struct xfs_mount *mp = ip->i_mount; in xfs_file_release()
1284 * If this is a read-only mount or the file system has been shut down, in xfs_file_release()
1294 * is particularly noticeable from a truncate down, buffered (re-)write in xfs_file_release()
1299 if (xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED)) { in xfs_file_release()
1300 xfs_iflags_clear(ip, XFS_EOFBLOCKS_RELEASED); in xfs_file_release()
1301 if (ip->i_delayed_blks > 0) in xfs_file_release()
1302 filemap_flush(inode->i_mapping); in xfs_file_release()
1306 * XFS aggressively preallocates post-EOF space to generate contiguous in xfs_file_release()
1316 * This heuristic is skipped for inodes with the append-only flag as in xfs_file_release()
1322 * When releasing a read-only context, don't flush data or trim post-EOF in xfs_file_release()
1331 if (inode->i_nlink && in xfs_file_release()
1332 (file->f_mode & FMODE_WRITE) && in xfs_file_release()
1333 !(ip->i_diflags & XFS_DIFLAG_APPEND) && in xfs_file_release()
1334 !xfs_iflags_test(ip, XFS_EOFBLOCKS_RELEASED) && in xfs_file_release()
1335 xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { in xfs_file_release()
1336 if (xfs_can_free_eofblocks(ip) && in xfs_file_release()
1337 !xfs_iflags_test_and_set(ip, XFS_EOFBLOCKS_RELEASED)) in xfs_file_release()
1338 xfs_free_eofblocks(ip); in xfs_file_release()
1339 xfs_iunlock(ip, XFS_IOLOCK_EXCL); in xfs_file_release()
1351 xfs_inode_t *ip = XFS_I(inode); in xfs_file_readdir() local
1363 * point we can change the ->readdir prototype to include the in xfs_file_readdir()
1366 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_disk_size); in xfs_file_readdir()
1368 return xfs_readdir(NULL, ip, ctx, bufsize); in xfs_file_readdir()
1377 struct inode *inode = file->f_mapping->host; in xfs_file_llseek()
1379 if (xfs_is_shutdown(XFS_I(inode)->i_mount)) in xfs_file_llseek()
1380 return -EIO; in xfs_file_llseek()
1395 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); in xfs_file_llseek()
1412 (write_fault && !vmf->cow_page) ? in xfs_dax_fault_locked()
1425 struct xfs_inode *ip = XFS_I(file_inode(vmf->vma->vm_file)); in xfs_dax_read_fault() local
1428 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); in xfs_dax_read_fault()
1430 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); in xfs_dax_read_fault()
1440 struct inode *inode = file_inode(vmf->vma->vm_file); in xfs_write_fault()
1441 struct xfs_inode *ip = XFS_I(inode); in xfs_write_fault() local
1445 sb_start_pagefault(inode->i_sb); in xfs_write_fault()
1446 file_update_time(vmf->vma->vm_file); in xfs_write_fault()
1453 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); in xfs_write_fault()
1454 if (xfs_iflags_test(ip, XFS_IREMAPPING)) { in xfs_write_fault()
1455 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); in xfs_write_fault()
1456 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); in xfs_write_fault()
1464 xfs_iunlock(ip, lock_mode); in xfs_write_fault()
1466 sb_end_pagefault(inode->i_sb); in xfs_write_fault()
1476 * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
1478 * i_lock (XFS - extent map serialisation)
1486 struct inode *inode = file_inode(vmf->vma->vm_file); in __xfs_filemap_fault()
1501 return (vmf->flags & FAULT_FLAG_WRITE) && in xfs_is_write_fault()
1502 (vmf->vma->vm_flags & VM_SHARED); in xfs_is_write_fault()
1511 IS_DAX(file_inode(vmf->vma->vm_file)) && in xfs_filemap_fault()
1520 if (!IS_DAX(file_inode(vmf->vma->vm_file))) in xfs_filemap_huge_fault()
1565 * We don't support synchronous mappings for non-DAX files and in xfs_file_mmap()
1568 if (!daxdev_mapping_supported(vma, target->bt_daxdev)) in xfs_file_mmap()
1569 return -EOPNOTSUPP; in xfs_file_mmap()
1572 vma->vm_ops = &xfs_file_vm_ops; in xfs_file_mmap()