1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_da_format.h" 26 #include "xfs_da_btree.h" 27 #include "xfs_inode.h" 28 #include "xfs_trans.h" 29 #include "xfs_inode_item.h" 30 #include "xfs_bmap.h" 31 #include "xfs_bmap_util.h" 32 #include "xfs_error.h" 33 #include "xfs_dir2.h" 34 #include "xfs_dir2_priv.h" 35 #include "xfs_ioctl.h" 36 #include "xfs_trace.h" 37 #include "xfs_log.h" 38 #include "xfs_icache.h" 39 #include "xfs_pnfs.h" 40 41 #include <linux/dcache.h> 42 #include <linux/falloc.h> 43 #include <linux/pagevec.h> 44 #include <linux/backing-dev.h> 45 46 static const struct vm_operations_struct xfs_file_vm_ops; 47 48 /* 49 * Locking primitives for read and write IO paths to ensure we consistently use 50 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. 51 */ 52 static inline void 53 xfs_rw_ilock( 54 struct xfs_inode *ip, 55 int type) 56 { 57 if (type & XFS_IOLOCK_EXCL) 58 inode_lock(VFS_I(ip)); 59 xfs_ilock(ip, type); 60 } 61 62 static inline void 63 xfs_rw_iunlock( 64 struct xfs_inode *ip, 65 int type) 66 { 67 xfs_iunlock(ip, type); 68 if (type & XFS_IOLOCK_EXCL) 69 inode_unlock(VFS_I(ip)); 70 } 71 72 static inline void 73 xfs_rw_ilock_demote( 74 struct xfs_inode *ip, 75 int type) 76 { 77 xfs_ilock_demote(ip, type); 78 if (type & XFS_IOLOCK_EXCL) 79 inode_unlock(VFS_I(ip)); 80 } 81 82 /* 83 * xfs_iozero clears the specified range supplied via the page cache (except in 84 * the DAX case). Writes through the page cache will allocate blocks over holes, 85 * though the callers usually map the holes first and avoid them. If a block is 86 * not completely zeroed, then it will be read from disk before being partially 87 * zeroed. 88 * 89 * In the DAX case, we can just directly write to the underlying pages. This 90 * will not allocate blocks, but will avoid holes and unwritten extents and so 91 * not do unnecessary work. 92 */ 93 int 94 xfs_iozero( 95 struct xfs_inode *ip, /* inode */ 96 loff_t pos, /* offset in file */ 97 size_t count) /* size of data to zero */ 98 { 99 struct page *page; 100 struct address_space *mapping; 101 int status = 0; 102 103 104 mapping = VFS_I(ip)->i_mapping; 105 do { 106 unsigned offset, bytes; 107 void *fsdata; 108 109 offset = (pos & (PAGE_SIZE -1)); /* Within page */ 110 bytes = PAGE_SIZE - offset; 111 if (bytes > count) 112 bytes = count; 113 114 if (IS_DAX(VFS_I(ip))) { 115 status = dax_zero_page_range(VFS_I(ip), pos, bytes, 116 xfs_get_blocks_direct); 117 if (status) 118 break; 119 } else { 120 status = pagecache_write_begin(NULL, mapping, pos, bytes, 121 AOP_FLAG_UNINTERRUPTIBLE, 122 &page, &fsdata); 123 if (status) 124 break; 125 126 zero_user(page, offset, bytes); 127 128 status = pagecache_write_end(NULL, mapping, pos, bytes, 129 bytes, page, fsdata); 130 WARN_ON(status <= 0); /* can't return less than zero! */ 131 status = 0; 132 } 133 pos += bytes; 134 count -= bytes; 135 } while (count); 136 137 return status; 138 } 139 140 int 141 xfs_update_prealloc_flags( 142 struct xfs_inode *ip, 143 enum xfs_prealloc_flags flags) 144 { 145 struct xfs_trans *tp; 146 int error; 147 148 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid, 149 0, 0, 0, &tp); 150 if (error) 151 return error; 152 153 xfs_ilock(ip, XFS_ILOCK_EXCL); 154 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 155 156 if (!(flags & XFS_PREALLOC_INVISIBLE)) { 157 VFS_I(ip)->i_mode &= ~S_ISUID; 158 if (VFS_I(ip)->i_mode & S_IXGRP) 159 VFS_I(ip)->i_mode &= ~S_ISGID; 160 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 161 } 162 163 if (flags & XFS_PREALLOC_SET) 164 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; 165 if (flags & XFS_PREALLOC_CLEAR) 166 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; 167 168 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 169 if (flags & XFS_PREALLOC_SYNC) 170 xfs_trans_set_sync(tp); 171 return xfs_trans_commit(tp); 172 } 173 174 /* 175 * Fsync operations on directories are much simpler than on regular files, 176 * as there is no file data to flush, and thus also no need for explicit 177 * cache flush operations, and there are no non-transaction metadata updates 178 * on directories either. 179 */ 180 STATIC int 181 xfs_dir_fsync( 182 struct file *file, 183 loff_t start, 184 loff_t end, 185 int datasync) 186 { 187 struct xfs_inode *ip = XFS_I(file->f_mapping->host); 188 struct xfs_mount *mp = ip->i_mount; 189 xfs_lsn_t lsn = 0; 190 191 trace_xfs_dir_fsync(ip); 192 193 xfs_ilock(ip, XFS_ILOCK_SHARED); 194 if (xfs_ipincount(ip)) 195 lsn = ip->i_itemp->ili_last_lsn; 196 xfs_iunlock(ip, XFS_ILOCK_SHARED); 197 198 if (!lsn) 199 return 0; 200 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 201 } 202 203 STATIC int 204 xfs_file_fsync( 205 struct file *file, 206 loff_t start, 207 loff_t end, 208 int datasync) 209 { 210 struct inode *inode = file->f_mapping->host; 211 struct xfs_inode *ip = XFS_I(inode); 212 struct xfs_mount *mp = ip->i_mount; 213 int error = 0; 214 int log_flushed = 0; 215 xfs_lsn_t lsn = 0; 216 217 trace_xfs_file_fsync(ip); 218 219 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 220 if (error) 221 return error; 222 223 if (XFS_FORCED_SHUTDOWN(mp)) 224 return -EIO; 225 226 xfs_iflags_clear(ip, XFS_ITRUNCATED); 227 228 if (mp->m_flags & XFS_MOUNT_BARRIER) { 229 /* 230 * If we have an RT and/or log subvolume we need to make sure 231 * to flush the write cache the device used for file data 232 * first. This is to ensure newly written file data make 233 * it to disk before logging the new inode size in case of 234 * an extending write. 235 */ 236 if (XFS_IS_REALTIME_INODE(ip)) 237 xfs_blkdev_issue_flush(mp->m_rtdev_targp); 238 else if (mp->m_logdev_targp != mp->m_ddev_targp) 239 xfs_blkdev_issue_flush(mp->m_ddev_targp); 240 } 241 242 /* 243 * All metadata updates are logged, which means that we just have to 244 * flush the log up to the latest LSN that touched the inode. If we have 245 * concurrent fsync/fdatasync() calls, we need them to all block on the 246 * log force before we clear the ili_fsync_fields field. This ensures 247 * that we don't get a racing sync operation that does not wait for the 248 * metadata to hit the journal before returning. If we race with 249 * clearing the ili_fsync_fields, then all that will happen is the log 250 * force will do nothing as the lsn will already be on disk. We can't 251 * race with setting ili_fsync_fields because that is done under 252 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared 253 * until after the ili_fsync_fields is cleared. 254 */ 255 xfs_ilock(ip, XFS_ILOCK_SHARED); 256 if (xfs_ipincount(ip)) { 257 if (!datasync || 258 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) 259 lsn = ip->i_itemp->ili_last_lsn; 260 } 261 262 if (lsn) { 263 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); 264 ip->i_itemp->ili_fsync_fields = 0; 265 } 266 xfs_iunlock(ip, XFS_ILOCK_SHARED); 267 268 /* 269 * If we only have a single device, and the log force about was 270 * a no-op we might have to flush the data device cache here. 271 * This can only happen for fdatasync/O_DSYNC if we were overwriting 272 * an already allocated file and thus do not have any metadata to 273 * commit. 274 */ 275 if ((mp->m_flags & XFS_MOUNT_BARRIER) && 276 mp->m_logdev_targp == mp->m_ddev_targp && 277 !XFS_IS_REALTIME_INODE(ip) && 278 !log_flushed) 279 xfs_blkdev_issue_flush(mp->m_ddev_targp); 280 281 return error; 282 } 283 284 STATIC ssize_t 285 xfs_file_dio_aio_read( 286 struct kiocb *iocb, 287 struct iov_iter *to) 288 { 289 struct address_space *mapping = iocb->ki_filp->f_mapping; 290 struct inode *inode = mapping->host; 291 struct xfs_inode *ip = XFS_I(inode); 292 loff_t isize = i_size_read(inode); 293 size_t count = iov_iter_count(to); 294 struct iov_iter data; 295 struct xfs_buftarg *target; 296 ssize_t ret = 0; 297 298 trace_xfs_file_direct_read(ip, count, iocb->ki_pos); 299 300 if (!count) 301 return 0; /* skip atime */ 302 303 if (XFS_IS_REALTIME_INODE(ip)) 304 target = ip->i_mount->m_rtdev_targp; 305 else 306 target = ip->i_mount->m_ddev_targp; 307 308 if (!IS_DAX(inode)) { 309 /* DIO must be aligned to device logical sector size */ 310 if ((iocb->ki_pos | count) & target->bt_logical_sectormask) { 311 if (iocb->ki_pos == isize) 312 return 0; 313 return -EINVAL; 314 } 315 } 316 317 /* 318 * Locking is a bit tricky here. If we take an exclusive lock for direct 319 * IO, we effectively serialise all new concurrent read IO to this file 320 * and block it behind IO that is currently in progress because IO in 321 * progress holds the IO lock shared. We only need to hold the lock 322 * exclusive to blow away the page cache, so only take lock exclusively 323 * if the page cache needs invalidation. This allows the normal direct 324 * IO case of no page cache pages to proceeed concurrently without 325 * serialisation. 326 */ 327 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 328 if (mapping->nrpages) { 329 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 330 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 331 332 /* 333 * The generic dio code only flushes the range of the particular 334 * I/O. Because we take an exclusive lock here, this whole 335 * sequence is considerably more expensive for us. This has a 336 * noticeable performance impact for any file with cached pages, 337 * even when outside of the range of the particular I/O. 338 * 339 * Hence, amortize the cost of the lock against a full file 340 * flush and reduce the chances of repeated iolock cycles going 341 * forward. 342 */ 343 if (mapping->nrpages) { 344 ret = filemap_write_and_wait(mapping); 345 if (ret) { 346 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 347 return ret; 348 } 349 350 /* 351 * Invalidate whole pages. This can return an error if 352 * we fail to invalidate a page, but this should never 353 * happen on XFS. Warn if it does fail. 354 */ 355 ret = invalidate_inode_pages2(mapping); 356 WARN_ON_ONCE(ret); 357 ret = 0; 358 } 359 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 360 } 361 362 data = *to; 363 if (IS_DAX(inode)) { 364 ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct, 365 NULL, 0); 366 } else { 367 ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data, 368 xfs_get_blocks_direct, NULL, NULL, 0); 369 } 370 if (ret > 0) { 371 iocb->ki_pos += ret; 372 iov_iter_advance(to, ret); 373 } 374 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 375 376 file_accessed(iocb->ki_filp); 377 return ret; 378 } 379 380 STATIC ssize_t 381 xfs_file_buffered_aio_read( 382 struct kiocb *iocb, 383 struct iov_iter *to) 384 { 385 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); 386 ssize_t ret; 387 388 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); 389 390 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 391 ret = generic_file_read_iter(iocb, to); 392 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 393 394 return ret; 395 } 396 397 STATIC ssize_t 398 xfs_file_read_iter( 399 struct kiocb *iocb, 400 struct iov_iter *to) 401 { 402 struct xfs_mount *mp = XFS_I(file_inode(iocb->ki_filp))->i_mount; 403 ssize_t ret = 0; 404 405 XFS_STATS_INC(mp, xs_read_calls); 406 407 if (XFS_FORCED_SHUTDOWN(mp)) 408 return -EIO; 409 410 if (iocb->ki_flags & IOCB_DIRECT) 411 ret = xfs_file_dio_aio_read(iocb, to); 412 else 413 ret = xfs_file_buffered_aio_read(iocb, to); 414 415 if (ret > 0) 416 XFS_STATS_ADD(mp, xs_read_bytes, ret); 417 return ret; 418 } 419 420 STATIC ssize_t 421 xfs_file_splice_read( 422 struct file *infilp, 423 loff_t *ppos, 424 struct pipe_inode_info *pipe, 425 size_t count, 426 unsigned int flags) 427 { 428 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); 429 ssize_t ret; 430 431 XFS_STATS_INC(ip->i_mount, xs_read_calls); 432 433 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 434 return -EIO; 435 436 trace_xfs_file_splice_read(ip, count, *ppos); 437 438 /* 439 * DAX inodes cannot ues the page cache for splice, so we have to push 440 * them through the VFS IO path. This means it goes through 441 * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we 442 * cannot lock the splice operation at this level for DAX inodes. 443 */ 444 if (IS_DAX(VFS_I(ip))) { 445 ret = default_file_splice_read(infilp, ppos, pipe, count, 446 flags); 447 goto out; 448 } 449 450 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 451 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 452 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 453 out: 454 if (ret > 0) 455 XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret); 456 return ret; 457 } 458 459 /* 460 * This routine is called to handle zeroing any space in the last block of the 461 * file that is beyond the EOF. We do this since the size is being increased 462 * without writing anything to that block and we don't want to read the 463 * garbage on the disk. 464 */ 465 STATIC int /* error (positive) */ 466 xfs_zero_last_block( 467 struct xfs_inode *ip, 468 xfs_fsize_t offset, 469 xfs_fsize_t isize, 470 bool *did_zeroing) 471 { 472 struct xfs_mount *mp = ip->i_mount; 473 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); 474 int zero_offset = XFS_B_FSB_OFFSET(mp, isize); 475 int zero_len; 476 int nimaps = 1; 477 int error = 0; 478 struct xfs_bmbt_irec imap; 479 480 xfs_ilock(ip, XFS_ILOCK_EXCL); 481 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); 482 xfs_iunlock(ip, XFS_ILOCK_EXCL); 483 if (error) 484 return error; 485 486 ASSERT(nimaps > 0); 487 488 /* 489 * If the block underlying isize is just a hole, then there 490 * is nothing to zero. 491 */ 492 if (imap.br_startblock == HOLESTARTBLOCK) 493 return 0; 494 495 zero_len = mp->m_sb.sb_blocksize - zero_offset; 496 if (isize + zero_len > offset) 497 zero_len = offset - isize; 498 *did_zeroing = true; 499 return xfs_iozero(ip, isize, zero_len); 500 } 501 502 /* 503 * Zero any on disk space between the current EOF and the new, larger EOF. 504 * 505 * This handles the normal case of zeroing the remainder of the last block in 506 * the file and the unusual case of zeroing blocks out beyond the size of the 507 * file. This second case only happens with fixed size extents and when the 508 * system crashes before the inode size was updated but after blocks were 509 * allocated. 510 * 511 * Expects the iolock to be held exclusive, and will take the ilock internally. 512 */ 513 int /* error (positive) */ 514 xfs_zero_eof( 515 struct xfs_inode *ip, 516 xfs_off_t offset, /* starting I/O offset */ 517 xfs_fsize_t isize, /* current inode size */ 518 bool *did_zeroing) 519 { 520 struct xfs_mount *mp = ip->i_mount; 521 xfs_fileoff_t start_zero_fsb; 522 xfs_fileoff_t end_zero_fsb; 523 xfs_fileoff_t zero_count_fsb; 524 xfs_fileoff_t last_fsb; 525 xfs_fileoff_t zero_off; 526 xfs_fsize_t zero_len; 527 int nimaps; 528 int error = 0; 529 struct xfs_bmbt_irec imap; 530 531 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 532 ASSERT(offset > isize); 533 534 trace_xfs_zero_eof(ip, isize, offset - isize); 535 536 /* 537 * First handle zeroing the block on which isize resides. 538 * 539 * We only zero a part of that block so it is handled specially. 540 */ 541 if (XFS_B_FSB_OFFSET(mp, isize) != 0) { 542 error = xfs_zero_last_block(ip, offset, isize, did_zeroing); 543 if (error) 544 return error; 545 } 546 547 /* 548 * Calculate the range between the new size and the old where blocks 549 * needing to be zeroed may exist. 550 * 551 * To get the block where the last byte in the file currently resides, 552 * we need to subtract one from the size and truncate back to a block 553 * boundary. We subtract 1 in case the size is exactly on a block 554 * boundary. 555 */ 556 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; 557 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 558 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); 559 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); 560 if (last_fsb == end_zero_fsb) { 561 /* 562 * The size was only incremented on its last block. 563 * We took care of that above, so just return. 564 */ 565 return 0; 566 } 567 568 ASSERT(start_zero_fsb <= end_zero_fsb); 569 while (start_zero_fsb <= end_zero_fsb) { 570 nimaps = 1; 571 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 572 573 xfs_ilock(ip, XFS_ILOCK_EXCL); 574 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, 575 &imap, &nimaps, 0); 576 xfs_iunlock(ip, XFS_ILOCK_EXCL); 577 if (error) 578 return error; 579 580 ASSERT(nimaps > 0); 581 582 if (imap.br_state == XFS_EXT_UNWRITTEN || 583 imap.br_startblock == HOLESTARTBLOCK) { 584 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 585 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 586 continue; 587 } 588 589 /* 590 * There are blocks we need to zero. 591 */ 592 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); 593 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); 594 595 if ((zero_off + zero_len) > offset) 596 zero_len = offset - zero_off; 597 598 error = xfs_iozero(ip, zero_off, zero_len); 599 if (error) 600 return error; 601 602 *did_zeroing = true; 603 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 604 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 605 } 606 607 return 0; 608 } 609 610 /* 611 * Common pre-write limit and setup checks. 612 * 613 * Called with the iolocked held either shared and exclusive according to 614 * @iolock, and returns with it held. Might upgrade the iolock to exclusive 615 * if called for a direct write beyond i_size. 616 */ 617 STATIC ssize_t 618 xfs_file_aio_write_checks( 619 struct kiocb *iocb, 620 struct iov_iter *from, 621 int *iolock) 622 { 623 struct file *file = iocb->ki_filp; 624 struct inode *inode = file->f_mapping->host; 625 struct xfs_inode *ip = XFS_I(inode); 626 ssize_t error = 0; 627 size_t count = iov_iter_count(from); 628 bool drained_dio = false; 629 630 restart: 631 error = generic_write_checks(iocb, from); 632 if (error <= 0) 633 return error; 634 635 error = xfs_break_layouts(inode, iolock, true); 636 if (error) 637 return error; 638 639 /* For changing security info in file_remove_privs() we need i_mutex */ 640 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) { 641 xfs_rw_iunlock(ip, *iolock); 642 *iolock = XFS_IOLOCK_EXCL; 643 xfs_rw_ilock(ip, *iolock); 644 goto restart; 645 } 646 /* 647 * If the offset is beyond the size of the file, we need to zero any 648 * blocks that fall between the existing EOF and the start of this 649 * write. If zeroing is needed and we are currently holding the 650 * iolock shared, we need to update it to exclusive which implies 651 * having to redo all checks before. 652 * 653 * We need to serialise against EOF updates that occur in IO 654 * completions here. We want to make sure that nobody is changing the 655 * size while we do this check until we have placed an IO barrier (i.e. 656 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched. 657 * The spinlock effectively forms a memory barrier once we have the 658 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value 659 * and hence be able to correctly determine if we need to run zeroing. 660 */ 661 spin_lock(&ip->i_flags_lock); 662 if (iocb->ki_pos > i_size_read(inode)) { 663 bool zero = false; 664 665 spin_unlock(&ip->i_flags_lock); 666 if (!drained_dio) { 667 if (*iolock == XFS_IOLOCK_SHARED) { 668 xfs_rw_iunlock(ip, *iolock); 669 *iolock = XFS_IOLOCK_EXCL; 670 xfs_rw_ilock(ip, *iolock); 671 iov_iter_reexpand(from, count); 672 } 673 /* 674 * We now have an IO submission barrier in place, but 675 * AIO can do EOF updates during IO completion and hence 676 * we now need to wait for all of them to drain. Non-AIO 677 * DIO will have drained before we are given the 678 * XFS_IOLOCK_EXCL, and so for most cases this wait is a 679 * no-op. 680 */ 681 inode_dio_wait(inode); 682 drained_dio = true; 683 goto restart; 684 } 685 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); 686 if (error) 687 return error; 688 } else 689 spin_unlock(&ip->i_flags_lock); 690 691 /* 692 * Updating the timestamps will grab the ilock again from 693 * xfs_fs_dirty_inode, so we have to call it after dropping the 694 * lock above. Eventually we should look into a way to avoid 695 * the pointless lock roundtrip. 696 */ 697 if (likely(!(file->f_mode & FMODE_NOCMTIME))) { 698 error = file_update_time(file); 699 if (error) 700 return error; 701 } 702 703 /* 704 * If we're writing the file then make sure to clear the setuid and 705 * setgid bits if the process is not being run by root. This keeps 706 * people from modifying setuid and setgid binaries. 707 */ 708 if (!IS_NOSEC(inode)) 709 return file_remove_privs(file); 710 return 0; 711 } 712 713 /* 714 * xfs_file_dio_aio_write - handle direct IO writes 715 * 716 * Lock the inode appropriately to prepare for and issue a direct IO write. 717 * By separating it from the buffered write path we remove all the tricky to 718 * follow locking changes and looping. 719 * 720 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL 721 * until we're sure the bytes at the new EOF have been zeroed and/or the cached 722 * pages are flushed out. 723 * 724 * In most cases the direct IO writes will be done holding IOLOCK_SHARED 725 * allowing them to be done in parallel with reads and other direct IO writes. 726 * However, if the IO is not aligned to filesystem blocks, the direct IO layer 727 * needs to do sub-block zeroing and that requires serialisation against other 728 * direct IOs to the same block. In this case we need to serialise the 729 * submission of the unaligned IOs so that we don't get racing block zeroing in 730 * the dio layer. To avoid the problem with aio, we also need to wait for 731 * outstanding IOs to complete so that unwritten extent conversion is completed 732 * before we try to map the overlapping block. This is currently implemented by 733 * hitting it with a big hammer (i.e. inode_dio_wait()). 734 * 735 * Returns with locks held indicated by @iolock and errors indicated by 736 * negative return values. 737 */ 738 STATIC ssize_t 739 xfs_file_dio_aio_write( 740 struct kiocb *iocb, 741 struct iov_iter *from) 742 { 743 struct file *file = iocb->ki_filp; 744 struct address_space *mapping = file->f_mapping; 745 struct inode *inode = mapping->host; 746 struct xfs_inode *ip = XFS_I(inode); 747 struct xfs_mount *mp = ip->i_mount; 748 ssize_t ret = 0; 749 int unaligned_io = 0; 750 int iolock; 751 size_t count = iov_iter_count(from); 752 loff_t end; 753 struct iov_iter data; 754 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? 755 mp->m_rtdev_targp : mp->m_ddev_targp; 756 757 /* DIO must be aligned to device logical sector size */ 758 if (!IS_DAX(inode) && 759 ((iocb->ki_pos | count) & target->bt_logical_sectormask)) 760 return -EINVAL; 761 762 /* "unaligned" here means not aligned to a filesystem block */ 763 if ((iocb->ki_pos & mp->m_blockmask) || 764 ((iocb->ki_pos + count) & mp->m_blockmask)) 765 unaligned_io = 1; 766 767 /* 768 * We don't need to take an exclusive lock unless there page cache needs 769 * to be invalidated or unaligned IO is being executed. We don't need to 770 * consider the EOF extension case here because 771 * xfs_file_aio_write_checks() will relock the inode as necessary for 772 * EOF zeroing cases and fill out the new inode size as appropriate. 773 */ 774 if (unaligned_io || mapping->nrpages) 775 iolock = XFS_IOLOCK_EXCL; 776 else 777 iolock = XFS_IOLOCK_SHARED; 778 xfs_rw_ilock(ip, iolock); 779 780 /* 781 * Recheck if there are cached pages that need invalidate after we got 782 * the iolock to protect against other threads adding new pages while 783 * we were waiting for the iolock. 784 */ 785 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { 786 xfs_rw_iunlock(ip, iolock); 787 iolock = XFS_IOLOCK_EXCL; 788 xfs_rw_ilock(ip, iolock); 789 } 790 791 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 792 if (ret) 793 goto out; 794 count = iov_iter_count(from); 795 end = iocb->ki_pos + count - 1; 796 797 /* 798 * See xfs_file_dio_aio_read() for why we do a full-file flush here. 799 */ 800 if (mapping->nrpages) { 801 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); 802 if (ret) 803 goto out; 804 /* 805 * Invalidate whole pages. This can return an error if we fail 806 * to invalidate a page, but this should never happen on XFS. 807 * Warn if it does fail. 808 */ 809 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); 810 WARN_ON_ONCE(ret); 811 ret = 0; 812 } 813 814 /* 815 * If we are doing unaligned IO, wait for all other IO to drain, 816 * otherwise demote the lock if we had to flush cached pages 817 */ 818 if (unaligned_io) 819 inode_dio_wait(inode); 820 else if (iolock == XFS_IOLOCK_EXCL) { 821 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 822 iolock = XFS_IOLOCK_SHARED; 823 } 824 825 trace_xfs_file_direct_write(ip, count, iocb->ki_pos); 826 827 data = *from; 828 if (IS_DAX(inode)) { 829 ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct, 830 xfs_end_io_direct_write, 0); 831 } else { 832 ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data, 833 xfs_get_blocks_direct, xfs_end_io_direct_write, 834 NULL, DIO_ASYNC_EXTEND); 835 } 836 837 /* see generic_file_direct_write() for why this is necessary */ 838 if (mapping->nrpages) { 839 invalidate_inode_pages2_range(mapping, 840 iocb->ki_pos >> PAGE_SHIFT, 841 end >> PAGE_SHIFT); 842 } 843 844 if (ret > 0) { 845 iocb->ki_pos += ret; 846 iov_iter_advance(from, ret); 847 } 848 out: 849 xfs_rw_iunlock(ip, iolock); 850 851 /* 852 * No fallback to buffered IO on errors for XFS. DAX can result in 853 * partial writes, but direct IO will either complete fully or fail. 854 */ 855 ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip))); 856 return ret; 857 } 858 859 STATIC ssize_t 860 xfs_file_buffered_aio_write( 861 struct kiocb *iocb, 862 struct iov_iter *from) 863 { 864 struct file *file = iocb->ki_filp; 865 struct address_space *mapping = file->f_mapping; 866 struct inode *inode = mapping->host; 867 struct xfs_inode *ip = XFS_I(inode); 868 ssize_t ret; 869 int enospc = 0; 870 int iolock = XFS_IOLOCK_EXCL; 871 872 xfs_rw_ilock(ip, iolock); 873 874 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 875 if (ret) 876 goto out; 877 878 /* We can write back this queue in page reclaim */ 879 current->backing_dev_info = inode_to_bdi(inode); 880 881 write_retry: 882 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos); 883 ret = generic_perform_write(file, from, iocb->ki_pos); 884 if (likely(ret >= 0)) 885 iocb->ki_pos += ret; 886 887 /* 888 * If we hit a space limit, try to free up some lingering preallocated 889 * space before returning an error. In the case of ENOSPC, first try to 890 * write back all dirty inodes to free up some of the excess reserved 891 * metadata space. This reduces the chances that the eofblocks scan 892 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this 893 * also behaves as a filter to prevent too many eofblocks scans from 894 * running at the same time. 895 */ 896 if (ret == -EDQUOT && !enospc) { 897 enospc = xfs_inode_free_quota_eofblocks(ip); 898 if (enospc) 899 goto write_retry; 900 } else if (ret == -ENOSPC && !enospc) { 901 struct xfs_eofblocks eofb = {0}; 902 903 enospc = 1; 904 xfs_flush_inodes(ip->i_mount); 905 eofb.eof_scan_owner = ip->i_ino; /* for locking */ 906 eofb.eof_flags = XFS_EOF_FLAGS_SYNC; 907 xfs_icache_free_eofblocks(ip->i_mount, &eofb); 908 goto write_retry; 909 } 910 911 current->backing_dev_info = NULL; 912 out: 913 xfs_rw_iunlock(ip, iolock); 914 return ret; 915 } 916 917 STATIC ssize_t 918 xfs_file_write_iter( 919 struct kiocb *iocb, 920 struct iov_iter *from) 921 { 922 struct file *file = iocb->ki_filp; 923 struct address_space *mapping = file->f_mapping; 924 struct inode *inode = mapping->host; 925 struct xfs_inode *ip = XFS_I(inode); 926 ssize_t ret; 927 size_t ocount = iov_iter_count(from); 928 929 XFS_STATS_INC(ip->i_mount, xs_write_calls); 930 931 if (ocount == 0) 932 return 0; 933 934 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 935 return -EIO; 936 937 if ((iocb->ki_flags & IOCB_DIRECT) || IS_DAX(inode)) 938 ret = xfs_file_dio_aio_write(iocb, from); 939 else 940 ret = xfs_file_buffered_aio_write(iocb, from); 941 942 if (ret > 0) { 943 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); 944 945 /* Handle various SYNC-type writes */ 946 ret = generic_write_sync(iocb, ret); 947 } 948 return ret; 949 } 950 951 #define XFS_FALLOC_FL_SUPPORTED \ 952 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ 953 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \ 954 FALLOC_FL_INSERT_RANGE) 955 956 STATIC long 957 xfs_file_fallocate( 958 struct file *file, 959 int mode, 960 loff_t offset, 961 loff_t len) 962 { 963 struct inode *inode = file_inode(file); 964 struct xfs_inode *ip = XFS_I(inode); 965 long error; 966 enum xfs_prealloc_flags flags = 0; 967 uint iolock = XFS_IOLOCK_EXCL; 968 loff_t new_size = 0; 969 bool do_file_insert = 0; 970 971 if (!S_ISREG(inode->i_mode)) 972 return -EINVAL; 973 if (mode & ~XFS_FALLOC_FL_SUPPORTED) 974 return -EOPNOTSUPP; 975 976 xfs_ilock(ip, iolock); 977 error = xfs_break_layouts(inode, &iolock, false); 978 if (error) 979 goto out_unlock; 980 981 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 982 iolock |= XFS_MMAPLOCK_EXCL; 983 984 if (mode & FALLOC_FL_PUNCH_HOLE) { 985 error = xfs_free_file_space(ip, offset, len); 986 if (error) 987 goto out_unlock; 988 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 989 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 990 991 if (offset & blksize_mask || len & blksize_mask) { 992 error = -EINVAL; 993 goto out_unlock; 994 } 995 996 /* 997 * There is no need to overlap collapse range with EOF, 998 * in which case it is effectively a truncate operation 999 */ 1000 if (offset + len >= i_size_read(inode)) { 1001 error = -EINVAL; 1002 goto out_unlock; 1003 } 1004 1005 new_size = i_size_read(inode) - len; 1006 1007 error = xfs_collapse_file_space(ip, offset, len); 1008 if (error) 1009 goto out_unlock; 1010 } else if (mode & FALLOC_FL_INSERT_RANGE) { 1011 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 1012 1013 new_size = i_size_read(inode) + len; 1014 if (offset & blksize_mask || len & blksize_mask) { 1015 error = -EINVAL; 1016 goto out_unlock; 1017 } 1018 1019 /* check the new inode size does not wrap through zero */ 1020 if (new_size > inode->i_sb->s_maxbytes) { 1021 error = -EFBIG; 1022 goto out_unlock; 1023 } 1024 1025 /* Offset should be less than i_size */ 1026 if (offset >= i_size_read(inode)) { 1027 error = -EINVAL; 1028 goto out_unlock; 1029 } 1030 do_file_insert = 1; 1031 } else { 1032 flags |= XFS_PREALLOC_SET; 1033 1034 if (!(mode & FALLOC_FL_KEEP_SIZE) && 1035 offset + len > i_size_read(inode)) { 1036 new_size = offset + len; 1037 error = inode_newsize_ok(inode, new_size); 1038 if (error) 1039 goto out_unlock; 1040 } 1041 1042 if (mode & FALLOC_FL_ZERO_RANGE) 1043 error = xfs_zero_file_space(ip, offset, len); 1044 else 1045 error = xfs_alloc_file_space(ip, offset, len, 1046 XFS_BMAPI_PREALLOC); 1047 if (error) 1048 goto out_unlock; 1049 } 1050 1051 if (file->f_flags & O_DSYNC) 1052 flags |= XFS_PREALLOC_SYNC; 1053 1054 error = xfs_update_prealloc_flags(ip, flags); 1055 if (error) 1056 goto out_unlock; 1057 1058 /* Change file size if needed */ 1059 if (new_size) { 1060 struct iattr iattr; 1061 1062 iattr.ia_valid = ATTR_SIZE; 1063 iattr.ia_size = new_size; 1064 error = xfs_setattr_size(ip, &iattr); 1065 if (error) 1066 goto out_unlock; 1067 } 1068 1069 /* 1070 * Perform hole insertion now that the file size has been 1071 * updated so that if we crash during the operation we don't 1072 * leave shifted extents past EOF and hence losing access to 1073 * the data that is contained within them. 1074 */ 1075 if (do_file_insert) 1076 error = xfs_insert_file_space(ip, offset, len); 1077 1078 out_unlock: 1079 xfs_iunlock(ip, iolock); 1080 return error; 1081 } 1082 1083 1084 STATIC int 1085 xfs_file_open( 1086 struct inode *inode, 1087 struct file *file) 1088 { 1089 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 1090 return -EFBIG; 1091 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) 1092 return -EIO; 1093 return 0; 1094 } 1095 1096 STATIC int 1097 xfs_dir_open( 1098 struct inode *inode, 1099 struct file *file) 1100 { 1101 struct xfs_inode *ip = XFS_I(inode); 1102 int mode; 1103 int error; 1104 1105 error = xfs_file_open(inode, file); 1106 if (error) 1107 return error; 1108 1109 /* 1110 * If there are any blocks, read-ahead block 0 as we're almost 1111 * certain to have the next operation be a read there. 1112 */ 1113 mode = xfs_ilock_data_map_shared(ip); 1114 if (ip->i_d.di_nextents > 0) 1115 xfs_dir3_data_readahead(ip, 0, -1); 1116 xfs_iunlock(ip, mode); 1117 return 0; 1118 } 1119 1120 STATIC int 1121 xfs_file_release( 1122 struct inode *inode, 1123 struct file *filp) 1124 { 1125 return xfs_release(XFS_I(inode)); 1126 } 1127 1128 STATIC int 1129 xfs_file_readdir( 1130 struct file *file, 1131 struct dir_context *ctx) 1132 { 1133 struct inode *inode = file_inode(file); 1134 xfs_inode_t *ip = XFS_I(inode); 1135 size_t bufsize; 1136 1137 /* 1138 * The Linux API doesn't pass down the total size of the buffer 1139 * we read into down to the filesystem. With the filldir concept 1140 * it's not needed for correct information, but the XFS dir2 leaf 1141 * code wants an estimate of the buffer size to calculate it's 1142 * readahead window and size the buffers used for mapping to 1143 * physical blocks. 1144 * 1145 * Try to give it an estimate that's good enough, maybe at some 1146 * point we can change the ->readdir prototype to include the 1147 * buffer size. For now we use the current glibc buffer size. 1148 */ 1149 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); 1150 1151 return xfs_readdir(ip, ctx, bufsize); 1152 } 1153 1154 /* 1155 * This type is designed to indicate the type of offset we would like 1156 * to search from page cache for xfs_seek_hole_data(). 1157 */ 1158 enum { 1159 HOLE_OFF = 0, 1160 DATA_OFF, 1161 }; 1162 1163 /* 1164 * Lookup the desired type of offset from the given page. 1165 * 1166 * On success, return true and the offset argument will point to the 1167 * start of the region that was found. Otherwise this function will 1168 * return false and keep the offset argument unchanged. 1169 */ 1170 STATIC bool 1171 xfs_lookup_buffer_offset( 1172 struct page *page, 1173 loff_t *offset, 1174 unsigned int type) 1175 { 1176 loff_t lastoff = page_offset(page); 1177 bool found = false; 1178 struct buffer_head *bh, *head; 1179 1180 bh = head = page_buffers(page); 1181 do { 1182 /* 1183 * Unwritten extents that have data in the page 1184 * cache covering them can be identified by the 1185 * BH_Unwritten state flag. Pages with multiple 1186 * buffers might have a mix of holes, data and 1187 * unwritten extents - any buffer with valid 1188 * data in it should have BH_Uptodate flag set 1189 * on it. 1190 */ 1191 if (buffer_unwritten(bh) || 1192 buffer_uptodate(bh)) { 1193 if (type == DATA_OFF) 1194 found = true; 1195 } else { 1196 if (type == HOLE_OFF) 1197 found = true; 1198 } 1199 1200 if (found) { 1201 *offset = lastoff; 1202 break; 1203 } 1204 lastoff += bh->b_size; 1205 } while ((bh = bh->b_this_page) != head); 1206 1207 return found; 1208 } 1209 1210 /* 1211 * This routine is called to find out and return a data or hole offset 1212 * from the page cache for unwritten extents according to the desired 1213 * type for xfs_seek_hole_data(). 1214 * 1215 * The argument offset is used to tell where we start to search from the 1216 * page cache. Map is used to figure out the end points of the range to 1217 * lookup pages. 1218 * 1219 * Return true if the desired type of offset was found, and the argument 1220 * offset is filled with that address. Otherwise, return false and keep 1221 * offset unchanged. 1222 */ 1223 STATIC bool 1224 xfs_find_get_desired_pgoff( 1225 struct inode *inode, 1226 struct xfs_bmbt_irec *map, 1227 unsigned int type, 1228 loff_t *offset) 1229 { 1230 struct xfs_inode *ip = XFS_I(inode); 1231 struct xfs_mount *mp = ip->i_mount; 1232 struct pagevec pvec; 1233 pgoff_t index; 1234 pgoff_t end; 1235 loff_t endoff; 1236 loff_t startoff = *offset; 1237 loff_t lastoff = startoff; 1238 bool found = false; 1239 1240 pagevec_init(&pvec, 0); 1241 1242 index = startoff >> PAGE_SHIFT; 1243 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); 1244 end = endoff >> PAGE_SHIFT; 1245 do { 1246 int want; 1247 unsigned nr_pages; 1248 unsigned int i; 1249 1250 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 1251 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 1252 want); 1253 /* 1254 * No page mapped into given range. If we are searching holes 1255 * and if this is the first time we got into the loop, it means 1256 * that the given offset is landed in a hole, return it. 1257 * 1258 * If we have already stepped through some block buffers to find 1259 * holes but they all contains data. In this case, the last 1260 * offset is already updated and pointed to the end of the last 1261 * mapped page, if it does not reach the endpoint to search, 1262 * that means there should be a hole between them. 1263 */ 1264 if (nr_pages == 0) { 1265 /* Data search found nothing */ 1266 if (type == DATA_OFF) 1267 break; 1268 1269 ASSERT(type == HOLE_OFF); 1270 if (lastoff == startoff || lastoff < endoff) { 1271 found = true; 1272 *offset = lastoff; 1273 } 1274 break; 1275 } 1276 1277 /* 1278 * At lease we found one page. If this is the first time we 1279 * step into the loop, and if the first page index offset is 1280 * greater than the given search offset, a hole was found. 1281 */ 1282 if (type == HOLE_OFF && lastoff == startoff && 1283 lastoff < page_offset(pvec.pages[0])) { 1284 found = true; 1285 break; 1286 } 1287 1288 for (i = 0; i < nr_pages; i++) { 1289 struct page *page = pvec.pages[i]; 1290 loff_t b_offset; 1291 1292 /* 1293 * At this point, the page may be truncated or 1294 * invalidated (changing page->mapping to NULL), 1295 * or even swizzled back from swapper_space to tmpfs 1296 * file mapping. However, page->index will not change 1297 * because we have a reference on the page. 1298 * 1299 * Searching done if the page index is out of range. 1300 * If the current offset is not reaches the end of 1301 * the specified search range, there should be a hole 1302 * between them. 1303 */ 1304 if (page->index > end) { 1305 if (type == HOLE_OFF && lastoff < endoff) { 1306 *offset = lastoff; 1307 found = true; 1308 } 1309 goto out; 1310 } 1311 1312 lock_page(page); 1313 /* 1314 * Page truncated or invalidated(page->mapping == NULL). 1315 * We can freely skip it and proceed to check the next 1316 * page. 1317 */ 1318 if (unlikely(page->mapping != inode->i_mapping)) { 1319 unlock_page(page); 1320 continue; 1321 } 1322 1323 if (!page_has_buffers(page)) { 1324 unlock_page(page); 1325 continue; 1326 } 1327 1328 found = xfs_lookup_buffer_offset(page, &b_offset, type); 1329 if (found) { 1330 /* 1331 * The found offset may be less than the start 1332 * point to search if this is the first time to 1333 * come here. 1334 */ 1335 *offset = max_t(loff_t, startoff, b_offset); 1336 unlock_page(page); 1337 goto out; 1338 } 1339 1340 /* 1341 * We either searching data but nothing was found, or 1342 * searching hole but found a data buffer. In either 1343 * case, probably the next page contains the desired 1344 * things, update the last offset to it so. 1345 */ 1346 lastoff = page_offset(page) + PAGE_SIZE; 1347 unlock_page(page); 1348 } 1349 1350 /* 1351 * The number of returned pages less than our desired, search 1352 * done. In this case, nothing was found for searching data, 1353 * but we found a hole behind the last offset. 1354 */ 1355 if (nr_pages < want) { 1356 if (type == HOLE_OFF) { 1357 *offset = lastoff; 1358 found = true; 1359 } 1360 break; 1361 } 1362 1363 index = pvec.pages[i - 1]->index + 1; 1364 pagevec_release(&pvec); 1365 } while (index <= end); 1366 1367 out: 1368 pagevec_release(&pvec); 1369 return found; 1370 } 1371 1372 /* 1373 * caller must lock inode with xfs_ilock_data_map_shared, 1374 * can we craft an appropriate ASSERT? 1375 * 1376 * end is because the VFS-level lseek interface is defined such that any 1377 * offset past i_size shall return -ENXIO, but we use this for quota code 1378 * which does not maintain i_size, and we want to SEEK_DATA past i_size. 1379 */ 1380 loff_t 1381 __xfs_seek_hole_data( 1382 struct inode *inode, 1383 loff_t start, 1384 loff_t end, 1385 int whence) 1386 { 1387 struct xfs_inode *ip = XFS_I(inode); 1388 struct xfs_mount *mp = ip->i_mount; 1389 loff_t uninitialized_var(offset); 1390 xfs_fileoff_t fsbno; 1391 xfs_filblks_t lastbno; 1392 int error; 1393 1394 if (start >= end) { 1395 error = -ENXIO; 1396 goto out_error; 1397 } 1398 1399 /* 1400 * Try to read extents from the first block indicated 1401 * by fsbno to the end block of the file. 1402 */ 1403 fsbno = XFS_B_TO_FSBT(mp, start); 1404 lastbno = XFS_B_TO_FSB(mp, end); 1405 1406 for (;;) { 1407 struct xfs_bmbt_irec map[2]; 1408 int nmap = 2; 1409 unsigned int i; 1410 1411 error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap, 1412 XFS_BMAPI_ENTIRE); 1413 if (error) 1414 goto out_error; 1415 1416 /* No extents at given offset, must be beyond EOF */ 1417 if (nmap == 0) { 1418 error = -ENXIO; 1419 goto out_error; 1420 } 1421 1422 for (i = 0; i < nmap; i++) { 1423 offset = max_t(loff_t, start, 1424 XFS_FSB_TO_B(mp, map[i].br_startoff)); 1425 1426 /* Landed in the hole we wanted? */ 1427 if (whence == SEEK_HOLE && 1428 map[i].br_startblock == HOLESTARTBLOCK) 1429 goto out; 1430 1431 /* Landed in the data extent we wanted? */ 1432 if (whence == SEEK_DATA && 1433 (map[i].br_startblock == DELAYSTARTBLOCK || 1434 (map[i].br_state == XFS_EXT_NORM && 1435 !isnullstartblock(map[i].br_startblock)))) 1436 goto out; 1437 1438 /* 1439 * Landed in an unwritten extent, try to search 1440 * for hole or data from page cache. 1441 */ 1442 if (map[i].br_state == XFS_EXT_UNWRITTEN) { 1443 if (xfs_find_get_desired_pgoff(inode, &map[i], 1444 whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF, 1445 &offset)) 1446 goto out; 1447 } 1448 } 1449 1450 /* 1451 * We only received one extent out of the two requested. This 1452 * means we've hit EOF and didn't find what we are looking for. 1453 */ 1454 if (nmap == 1) { 1455 /* 1456 * If we were looking for a hole, set offset to 1457 * the end of the file (i.e., there is an implicit 1458 * hole at the end of any file). 1459 */ 1460 if (whence == SEEK_HOLE) { 1461 offset = end; 1462 break; 1463 } 1464 /* 1465 * If we were looking for data, it's nowhere to be found 1466 */ 1467 ASSERT(whence == SEEK_DATA); 1468 error = -ENXIO; 1469 goto out_error; 1470 } 1471 1472 ASSERT(i > 1); 1473 1474 /* 1475 * Nothing was found, proceed to the next round of search 1476 * if the next reading offset is not at or beyond EOF. 1477 */ 1478 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; 1479 start = XFS_FSB_TO_B(mp, fsbno); 1480 if (start >= end) { 1481 if (whence == SEEK_HOLE) { 1482 offset = end; 1483 break; 1484 } 1485 ASSERT(whence == SEEK_DATA); 1486 error = -ENXIO; 1487 goto out_error; 1488 } 1489 } 1490 1491 out: 1492 /* 1493 * If at this point we have found the hole we wanted, the returned 1494 * offset may be bigger than the file size as it may be aligned to 1495 * page boundary for unwritten extents. We need to deal with this 1496 * situation in particular. 1497 */ 1498 if (whence == SEEK_HOLE) 1499 offset = min_t(loff_t, offset, end); 1500 1501 return offset; 1502 1503 out_error: 1504 return error; 1505 } 1506 1507 STATIC loff_t 1508 xfs_seek_hole_data( 1509 struct file *file, 1510 loff_t start, 1511 int whence) 1512 { 1513 struct inode *inode = file->f_mapping->host; 1514 struct xfs_inode *ip = XFS_I(inode); 1515 struct xfs_mount *mp = ip->i_mount; 1516 uint lock; 1517 loff_t offset, end; 1518 int error = 0; 1519 1520 if (XFS_FORCED_SHUTDOWN(mp)) 1521 return -EIO; 1522 1523 lock = xfs_ilock_data_map_shared(ip); 1524 1525 end = i_size_read(inode); 1526 offset = __xfs_seek_hole_data(inode, start, end, whence); 1527 if (offset < 0) { 1528 error = offset; 1529 goto out_unlock; 1530 } 1531 1532 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1533 1534 out_unlock: 1535 xfs_iunlock(ip, lock); 1536 1537 if (error) 1538 return error; 1539 return offset; 1540 } 1541 1542 STATIC loff_t 1543 xfs_file_llseek( 1544 struct file *file, 1545 loff_t offset, 1546 int whence) 1547 { 1548 switch (whence) { 1549 case SEEK_END: 1550 case SEEK_CUR: 1551 case SEEK_SET: 1552 return generic_file_llseek(file, offset, whence); 1553 case SEEK_HOLE: 1554 case SEEK_DATA: 1555 return xfs_seek_hole_data(file, offset, whence); 1556 default: 1557 return -EINVAL; 1558 } 1559 } 1560 1561 /* 1562 * Locking for serialisation of IO during page faults. This results in a lock 1563 * ordering of: 1564 * 1565 * mmap_sem (MM) 1566 * sb_start_pagefault(vfs, freeze) 1567 * i_mmaplock (XFS - truncate serialisation) 1568 * page_lock (MM) 1569 * i_lock (XFS - extent map serialisation) 1570 */ 1571 1572 /* 1573 * mmap()d file has taken write protection fault and is being made writable. We 1574 * can set the page state up correctly for a writable page, which means we can 1575 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent 1576 * mapping. 1577 */ 1578 STATIC int 1579 xfs_filemap_page_mkwrite( 1580 struct vm_area_struct *vma, 1581 struct vm_fault *vmf) 1582 { 1583 struct inode *inode = file_inode(vma->vm_file); 1584 int ret; 1585 1586 trace_xfs_filemap_page_mkwrite(XFS_I(inode)); 1587 1588 sb_start_pagefault(inode->i_sb); 1589 file_update_time(vma->vm_file); 1590 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1591 1592 if (IS_DAX(inode)) { 1593 ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault); 1594 } else { 1595 ret = block_page_mkwrite(vma, vmf, xfs_get_blocks); 1596 ret = block_page_mkwrite_return(ret); 1597 } 1598 1599 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1600 sb_end_pagefault(inode->i_sb); 1601 1602 return ret; 1603 } 1604 1605 STATIC int 1606 xfs_filemap_fault( 1607 struct vm_area_struct *vma, 1608 struct vm_fault *vmf) 1609 { 1610 struct inode *inode = file_inode(vma->vm_file); 1611 int ret; 1612 1613 trace_xfs_filemap_fault(XFS_I(inode)); 1614 1615 /* DAX can shortcut the normal fault path on write faults! */ 1616 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode)) 1617 return xfs_filemap_page_mkwrite(vma, vmf); 1618 1619 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1620 if (IS_DAX(inode)) { 1621 /* 1622 * we do not want to trigger unwritten extent conversion on read 1623 * faults - that is unnecessary overhead and would also require 1624 * changes to xfs_get_blocks_direct() to map unwritten extent 1625 * ioend for conversion on read-only mappings. 1626 */ 1627 ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault); 1628 } else 1629 ret = filemap_fault(vma, vmf); 1630 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1631 1632 return ret; 1633 } 1634 1635 /* 1636 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on 1637 * both read and write faults. Hence we need to handle both cases. There is no 1638 * ->pmd_mkwrite callout for huge pages, so we have a single function here to 1639 * handle both cases here. @flags carries the information on the type of fault 1640 * occuring. 1641 */ 1642 STATIC int 1643 xfs_filemap_pmd_fault( 1644 struct vm_area_struct *vma, 1645 unsigned long addr, 1646 pmd_t *pmd, 1647 unsigned int flags) 1648 { 1649 struct inode *inode = file_inode(vma->vm_file); 1650 struct xfs_inode *ip = XFS_I(inode); 1651 int ret; 1652 1653 if (!IS_DAX(inode)) 1654 return VM_FAULT_FALLBACK; 1655 1656 trace_xfs_filemap_pmd_fault(ip); 1657 1658 if (flags & FAULT_FLAG_WRITE) { 1659 sb_start_pagefault(inode->i_sb); 1660 file_update_time(vma->vm_file); 1661 } 1662 1663 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1664 ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault); 1665 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1666 1667 if (flags & FAULT_FLAG_WRITE) 1668 sb_end_pagefault(inode->i_sb); 1669 1670 return ret; 1671 } 1672 1673 /* 1674 * pfn_mkwrite was originally inteneded to ensure we capture time stamp 1675 * updates on write faults. In reality, it's need to serialise against 1676 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED 1677 * to ensure we serialise the fault barrier in place. 1678 */ 1679 static int 1680 xfs_filemap_pfn_mkwrite( 1681 struct vm_area_struct *vma, 1682 struct vm_fault *vmf) 1683 { 1684 1685 struct inode *inode = file_inode(vma->vm_file); 1686 struct xfs_inode *ip = XFS_I(inode); 1687 int ret = VM_FAULT_NOPAGE; 1688 loff_t size; 1689 1690 trace_xfs_filemap_pfn_mkwrite(ip); 1691 1692 sb_start_pagefault(inode->i_sb); 1693 file_update_time(vma->vm_file); 1694 1695 /* check if the faulting page hasn't raced with truncate */ 1696 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); 1697 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1698 if (vmf->pgoff >= size) 1699 ret = VM_FAULT_SIGBUS; 1700 else if (IS_DAX(inode)) 1701 ret = dax_pfn_mkwrite(vma, vmf); 1702 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); 1703 sb_end_pagefault(inode->i_sb); 1704 return ret; 1705 1706 } 1707 1708 static const struct vm_operations_struct xfs_file_vm_ops = { 1709 .fault = xfs_filemap_fault, 1710 .pmd_fault = xfs_filemap_pmd_fault, 1711 .map_pages = filemap_map_pages, 1712 .page_mkwrite = xfs_filemap_page_mkwrite, 1713 .pfn_mkwrite = xfs_filemap_pfn_mkwrite, 1714 }; 1715 1716 STATIC int 1717 xfs_file_mmap( 1718 struct file *filp, 1719 struct vm_area_struct *vma) 1720 { 1721 file_accessed(filp); 1722 vma->vm_ops = &xfs_file_vm_ops; 1723 if (IS_DAX(file_inode(filp))) 1724 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 1725 return 0; 1726 } 1727 1728 const struct file_operations xfs_file_operations = { 1729 .llseek = xfs_file_llseek, 1730 .read_iter = xfs_file_read_iter, 1731 .write_iter = xfs_file_write_iter, 1732 .splice_read = xfs_file_splice_read, 1733 .splice_write = iter_file_splice_write, 1734 .unlocked_ioctl = xfs_file_ioctl, 1735 #ifdef CONFIG_COMPAT 1736 .compat_ioctl = xfs_file_compat_ioctl, 1737 #endif 1738 .mmap = xfs_file_mmap, 1739 .open = xfs_file_open, 1740 .release = xfs_file_release, 1741 .fsync = xfs_file_fsync, 1742 .fallocate = xfs_file_fallocate, 1743 }; 1744 1745 const struct file_operations xfs_dir_file_operations = { 1746 .open = xfs_dir_open, 1747 .read = generic_read_dir, 1748 .iterate_shared = xfs_file_readdir, 1749 .llseek = generic_file_llseek, 1750 .unlocked_ioctl = xfs_file_ioctl, 1751 #ifdef CONFIG_COMPAT 1752 .compat_ioctl = xfs_file_compat_ioctl, 1753 #endif 1754 .fsync = xfs_dir_fsync, 1755 }; 1756