1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_da_format.h" 26 #include "xfs_da_btree.h" 27 #include "xfs_inode.h" 28 #include "xfs_trans.h" 29 #include "xfs_inode_item.h" 30 #include "xfs_bmap.h" 31 #include "xfs_bmap_util.h" 32 #include "xfs_error.h" 33 #include "xfs_dir2.h" 34 #include "xfs_dir2_priv.h" 35 #include "xfs_ioctl.h" 36 #include "xfs_trace.h" 37 #include "xfs_log.h" 38 #include "xfs_icache.h" 39 #include "xfs_pnfs.h" 40 41 #include <linux/dcache.h> 42 #include <linux/falloc.h> 43 #include <linux/pagevec.h> 44 #include <linux/backing-dev.h> 45 46 static const struct vm_operations_struct xfs_file_vm_ops; 47 48 /* 49 * Locking primitives for read and write IO paths to ensure we consistently use 50 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. 51 */ 52 static inline void 53 xfs_rw_ilock( 54 struct xfs_inode *ip, 55 int type) 56 { 57 if (type & XFS_IOLOCK_EXCL) 58 inode_lock(VFS_I(ip)); 59 xfs_ilock(ip, type); 60 } 61 62 static inline void 63 xfs_rw_iunlock( 64 struct xfs_inode *ip, 65 int type) 66 { 67 xfs_iunlock(ip, type); 68 if (type & XFS_IOLOCK_EXCL) 69 inode_unlock(VFS_I(ip)); 70 } 71 72 static inline void 73 xfs_rw_ilock_demote( 74 struct xfs_inode *ip, 75 int type) 76 { 77 xfs_ilock_demote(ip, type); 78 if (type & XFS_IOLOCK_EXCL) 79 inode_unlock(VFS_I(ip)); 80 } 81 82 /* 83 * xfs_iozero clears the specified range supplied via the page cache (except in 84 * the DAX case). Writes through the page cache will allocate blocks over holes, 85 * though the callers usually map the holes first and avoid them. If a block is 86 * not completely zeroed, then it will be read from disk before being partially 87 * zeroed. 88 * 89 * In the DAX case, we can just directly write to the underlying pages. This 90 * will not allocate blocks, but will avoid holes and unwritten extents and so 91 * not do unnecessary work. 92 */ 93 int 94 xfs_iozero( 95 struct xfs_inode *ip, /* inode */ 96 loff_t pos, /* offset in file */ 97 size_t count) /* size of data to zero */ 98 { 99 struct page *page; 100 struct address_space *mapping; 101 int status = 0; 102 103 104 mapping = VFS_I(ip)->i_mapping; 105 do { 106 unsigned offset, bytes; 107 void *fsdata; 108 109 offset = (pos & (PAGE_SIZE -1)); /* Within page */ 110 bytes = PAGE_SIZE - offset; 111 if (bytes > count) 112 bytes = count; 113 114 if (IS_DAX(VFS_I(ip))) { 115 status = dax_zero_page_range(VFS_I(ip), pos, bytes, 116 xfs_get_blocks_direct); 117 if (status) 118 break; 119 } else { 120 status = pagecache_write_begin(NULL, mapping, pos, bytes, 121 AOP_FLAG_UNINTERRUPTIBLE, 122 &page, &fsdata); 123 if (status) 124 break; 125 126 zero_user(page, offset, bytes); 127 128 status = pagecache_write_end(NULL, mapping, pos, bytes, 129 bytes, page, fsdata); 130 WARN_ON(status <= 0); /* can't return less than zero! */ 131 status = 0; 132 } 133 pos += bytes; 134 count -= bytes; 135 } while (count); 136 137 return status; 138 } 139 140 int 141 xfs_update_prealloc_flags( 142 struct xfs_inode *ip, 143 enum xfs_prealloc_flags flags) 144 { 145 struct xfs_trans *tp; 146 int error; 147 148 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid, 149 0, 0, 0, &tp); 150 if (error) 151 return error; 152 153 xfs_ilock(ip, XFS_ILOCK_EXCL); 154 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 155 156 if (!(flags & XFS_PREALLOC_INVISIBLE)) { 157 VFS_I(ip)->i_mode &= ~S_ISUID; 158 if (VFS_I(ip)->i_mode & S_IXGRP) 159 VFS_I(ip)->i_mode &= ~S_ISGID; 160 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 161 } 162 163 if (flags & XFS_PREALLOC_SET) 164 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; 165 if (flags & XFS_PREALLOC_CLEAR) 166 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; 167 168 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 169 if (flags & XFS_PREALLOC_SYNC) 170 xfs_trans_set_sync(tp); 171 return xfs_trans_commit(tp); 172 } 173 174 /* 175 * Fsync operations on directories are much simpler than on regular files, 176 * as there is no file data to flush, and thus also no need for explicit 177 * cache flush operations, and there are no non-transaction metadata updates 178 * on directories either. 179 */ 180 STATIC int 181 xfs_dir_fsync( 182 struct file *file, 183 loff_t start, 184 loff_t end, 185 int datasync) 186 { 187 struct xfs_inode *ip = XFS_I(file->f_mapping->host); 188 struct xfs_mount *mp = ip->i_mount; 189 xfs_lsn_t lsn = 0; 190 191 trace_xfs_dir_fsync(ip); 192 193 xfs_ilock(ip, XFS_ILOCK_SHARED); 194 if (xfs_ipincount(ip)) 195 lsn = ip->i_itemp->ili_last_lsn; 196 xfs_iunlock(ip, XFS_ILOCK_SHARED); 197 198 if (!lsn) 199 return 0; 200 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 201 } 202 203 STATIC int 204 xfs_file_fsync( 205 struct file *file, 206 loff_t start, 207 loff_t end, 208 int datasync) 209 { 210 struct inode *inode = file->f_mapping->host; 211 struct xfs_inode *ip = XFS_I(inode); 212 struct xfs_mount *mp = ip->i_mount; 213 int error = 0; 214 int log_flushed = 0; 215 xfs_lsn_t lsn = 0; 216 217 trace_xfs_file_fsync(ip); 218 219 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 220 if (error) 221 return error; 222 223 if (XFS_FORCED_SHUTDOWN(mp)) 224 return -EIO; 225 226 xfs_iflags_clear(ip, XFS_ITRUNCATED); 227 228 if (mp->m_flags & XFS_MOUNT_BARRIER) { 229 /* 230 * If we have an RT and/or log subvolume we need to make sure 231 * to flush the write cache the device used for file data 232 * first. This is to ensure newly written file data make 233 * it to disk before logging the new inode size in case of 234 * an extending write. 235 */ 236 if (XFS_IS_REALTIME_INODE(ip)) 237 xfs_blkdev_issue_flush(mp->m_rtdev_targp); 238 else if (mp->m_logdev_targp != mp->m_ddev_targp) 239 xfs_blkdev_issue_flush(mp->m_ddev_targp); 240 } 241 242 /* 243 * All metadata updates are logged, which means that we just have to 244 * flush the log up to the latest LSN that touched the inode. If we have 245 * concurrent fsync/fdatasync() calls, we need them to all block on the 246 * log force before we clear the ili_fsync_fields field. This ensures 247 * that we don't get a racing sync operation that does not wait for the 248 * metadata to hit the journal before returning. If we race with 249 * clearing the ili_fsync_fields, then all that will happen is the log 250 * force will do nothing as the lsn will already be on disk. We can't 251 * race with setting ili_fsync_fields because that is done under 252 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared 253 * until after the ili_fsync_fields is cleared. 254 */ 255 xfs_ilock(ip, XFS_ILOCK_SHARED); 256 if (xfs_ipincount(ip)) { 257 if (!datasync || 258 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) 259 lsn = ip->i_itemp->ili_last_lsn; 260 } 261 262 if (lsn) { 263 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); 264 ip->i_itemp->ili_fsync_fields = 0; 265 } 266 xfs_iunlock(ip, XFS_ILOCK_SHARED); 267 268 /* 269 * If we only have a single device, and the log force about was 270 * a no-op we might have to flush the data device cache here. 271 * This can only happen for fdatasync/O_DSYNC if we were overwriting 272 * an already allocated file and thus do not have any metadata to 273 * commit. 274 */ 275 if ((mp->m_flags & XFS_MOUNT_BARRIER) && 276 mp->m_logdev_targp == mp->m_ddev_targp && 277 !XFS_IS_REALTIME_INODE(ip) && 278 !log_flushed) 279 xfs_blkdev_issue_flush(mp->m_ddev_targp); 280 281 return error; 282 } 283 284 STATIC ssize_t 285 xfs_file_dio_aio_read( 286 struct kiocb *iocb, 287 struct iov_iter *to) 288 { 289 struct address_space *mapping = iocb->ki_filp->f_mapping; 290 struct inode *inode = mapping->host; 291 struct xfs_inode *ip = XFS_I(inode); 292 loff_t isize = i_size_read(inode); 293 size_t count = iov_iter_count(to); 294 struct iov_iter data; 295 struct xfs_buftarg *target; 296 ssize_t ret = 0; 297 298 trace_xfs_file_direct_read(ip, count, iocb->ki_pos); 299 300 if (!count) 301 return 0; /* skip atime */ 302 303 if (XFS_IS_REALTIME_INODE(ip)) 304 target = ip->i_mount->m_rtdev_targp; 305 else 306 target = ip->i_mount->m_ddev_targp; 307 308 if (!IS_DAX(inode)) { 309 /* DIO must be aligned to device logical sector size */ 310 if ((iocb->ki_pos | count) & target->bt_logical_sectormask) { 311 if (iocb->ki_pos == isize) 312 return 0; 313 return -EINVAL; 314 } 315 } 316 317 /* 318 * Locking is a bit tricky here. If we take an exclusive lock for direct 319 * IO, we effectively serialise all new concurrent read IO to this file 320 * and block it behind IO that is currently in progress because IO in 321 * progress holds the IO lock shared. We only need to hold the lock 322 * exclusive to blow away the page cache, so only take lock exclusively 323 * if the page cache needs invalidation. This allows the normal direct 324 * IO case of no page cache pages to proceeed concurrently without 325 * serialisation. 326 */ 327 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 328 if (mapping->nrpages) { 329 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 330 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 331 332 /* 333 * The generic dio code only flushes the range of the particular 334 * I/O. Because we take an exclusive lock here, this whole 335 * sequence is considerably more expensive for us. This has a 336 * noticeable performance impact for any file with cached pages, 337 * even when outside of the range of the particular I/O. 338 * 339 * Hence, amortize the cost of the lock against a full file 340 * flush and reduce the chances of repeated iolock cycles going 341 * forward. 342 */ 343 if (mapping->nrpages) { 344 ret = filemap_write_and_wait(mapping); 345 if (ret) { 346 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 347 return ret; 348 } 349 350 /* 351 * Invalidate whole pages. This can return an error if 352 * we fail to invalidate a page, but this should never 353 * happen on XFS. Warn if it does fail. 354 */ 355 ret = invalidate_inode_pages2(mapping); 356 WARN_ON_ONCE(ret); 357 ret = 0; 358 } 359 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 360 } 361 362 data = *to; 363 ret = mapping->a_ops->direct_IO(iocb, &data); 364 if (ret > 0) { 365 iocb->ki_pos += ret; 366 iov_iter_advance(to, ret); 367 } 368 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 369 370 file_accessed(iocb->ki_filp); 371 return ret; 372 } 373 374 STATIC ssize_t 375 xfs_file_buffered_aio_read( 376 struct kiocb *iocb, 377 struct iov_iter *to) 378 { 379 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); 380 ssize_t ret; 381 382 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); 383 384 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 385 ret = generic_file_read_iter(iocb, to); 386 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 387 388 return ret; 389 } 390 391 STATIC ssize_t 392 xfs_file_read_iter( 393 struct kiocb *iocb, 394 struct iov_iter *to) 395 { 396 struct xfs_mount *mp = XFS_I(file_inode(iocb->ki_filp))->i_mount; 397 ssize_t ret = 0; 398 399 XFS_STATS_INC(mp, xs_read_calls); 400 401 if (XFS_FORCED_SHUTDOWN(mp)) 402 return -EIO; 403 404 if (iocb->ki_flags & IOCB_DIRECT) 405 ret = xfs_file_dio_aio_read(iocb, to); 406 else 407 ret = xfs_file_buffered_aio_read(iocb, to); 408 409 if (ret > 0) 410 XFS_STATS_ADD(mp, xs_read_bytes, ret); 411 return ret; 412 } 413 414 STATIC ssize_t 415 xfs_file_splice_read( 416 struct file *infilp, 417 loff_t *ppos, 418 struct pipe_inode_info *pipe, 419 size_t count, 420 unsigned int flags) 421 { 422 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); 423 ssize_t ret; 424 425 XFS_STATS_INC(ip->i_mount, xs_read_calls); 426 427 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 428 return -EIO; 429 430 trace_xfs_file_splice_read(ip, count, *ppos); 431 432 /* 433 * DAX inodes cannot ues the page cache for splice, so we have to push 434 * them through the VFS IO path. This means it goes through 435 * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we 436 * cannot lock the splice operation at this level for DAX inodes. 437 */ 438 if (IS_DAX(VFS_I(ip))) { 439 ret = default_file_splice_read(infilp, ppos, pipe, count, 440 flags); 441 goto out; 442 } 443 444 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 445 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 446 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 447 out: 448 if (ret > 0) 449 XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret); 450 return ret; 451 } 452 453 /* 454 * This routine is called to handle zeroing any space in the last block of the 455 * file that is beyond the EOF. We do this since the size is being increased 456 * without writing anything to that block and we don't want to read the 457 * garbage on the disk. 458 */ 459 STATIC int /* error (positive) */ 460 xfs_zero_last_block( 461 struct xfs_inode *ip, 462 xfs_fsize_t offset, 463 xfs_fsize_t isize, 464 bool *did_zeroing) 465 { 466 struct xfs_mount *mp = ip->i_mount; 467 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); 468 int zero_offset = XFS_B_FSB_OFFSET(mp, isize); 469 int zero_len; 470 int nimaps = 1; 471 int error = 0; 472 struct xfs_bmbt_irec imap; 473 474 xfs_ilock(ip, XFS_ILOCK_EXCL); 475 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); 476 xfs_iunlock(ip, XFS_ILOCK_EXCL); 477 if (error) 478 return error; 479 480 ASSERT(nimaps > 0); 481 482 /* 483 * If the block underlying isize is just a hole, then there 484 * is nothing to zero. 485 */ 486 if (imap.br_startblock == HOLESTARTBLOCK) 487 return 0; 488 489 zero_len = mp->m_sb.sb_blocksize - zero_offset; 490 if (isize + zero_len > offset) 491 zero_len = offset - isize; 492 *did_zeroing = true; 493 return xfs_iozero(ip, isize, zero_len); 494 } 495 496 /* 497 * Zero any on disk space between the current EOF and the new, larger EOF. 498 * 499 * This handles the normal case of zeroing the remainder of the last block in 500 * the file and the unusual case of zeroing blocks out beyond the size of the 501 * file. This second case only happens with fixed size extents and when the 502 * system crashes before the inode size was updated but after blocks were 503 * allocated. 504 * 505 * Expects the iolock to be held exclusive, and will take the ilock internally. 506 */ 507 int /* error (positive) */ 508 xfs_zero_eof( 509 struct xfs_inode *ip, 510 xfs_off_t offset, /* starting I/O offset */ 511 xfs_fsize_t isize, /* current inode size */ 512 bool *did_zeroing) 513 { 514 struct xfs_mount *mp = ip->i_mount; 515 xfs_fileoff_t start_zero_fsb; 516 xfs_fileoff_t end_zero_fsb; 517 xfs_fileoff_t zero_count_fsb; 518 xfs_fileoff_t last_fsb; 519 xfs_fileoff_t zero_off; 520 xfs_fsize_t zero_len; 521 int nimaps; 522 int error = 0; 523 struct xfs_bmbt_irec imap; 524 525 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 526 ASSERT(offset > isize); 527 528 trace_xfs_zero_eof(ip, isize, offset - isize); 529 530 /* 531 * First handle zeroing the block on which isize resides. 532 * 533 * We only zero a part of that block so it is handled specially. 534 */ 535 if (XFS_B_FSB_OFFSET(mp, isize) != 0) { 536 error = xfs_zero_last_block(ip, offset, isize, did_zeroing); 537 if (error) 538 return error; 539 } 540 541 /* 542 * Calculate the range between the new size and the old where blocks 543 * needing to be zeroed may exist. 544 * 545 * To get the block where the last byte in the file currently resides, 546 * we need to subtract one from the size and truncate back to a block 547 * boundary. We subtract 1 in case the size is exactly on a block 548 * boundary. 549 */ 550 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; 551 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 552 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); 553 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); 554 if (last_fsb == end_zero_fsb) { 555 /* 556 * The size was only incremented on its last block. 557 * We took care of that above, so just return. 558 */ 559 return 0; 560 } 561 562 ASSERT(start_zero_fsb <= end_zero_fsb); 563 while (start_zero_fsb <= end_zero_fsb) { 564 nimaps = 1; 565 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 566 567 xfs_ilock(ip, XFS_ILOCK_EXCL); 568 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, 569 &imap, &nimaps, 0); 570 xfs_iunlock(ip, XFS_ILOCK_EXCL); 571 if (error) 572 return error; 573 574 ASSERT(nimaps > 0); 575 576 if (imap.br_state == XFS_EXT_UNWRITTEN || 577 imap.br_startblock == HOLESTARTBLOCK) { 578 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 579 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 580 continue; 581 } 582 583 /* 584 * There are blocks we need to zero. 585 */ 586 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); 587 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); 588 589 if ((zero_off + zero_len) > offset) 590 zero_len = offset - zero_off; 591 592 error = xfs_iozero(ip, zero_off, zero_len); 593 if (error) 594 return error; 595 596 *did_zeroing = true; 597 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 598 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 599 } 600 601 return 0; 602 } 603 604 /* 605 * Common pre-write limit and setup checks. 606 * 607 * Called with the iolocked held either shared and exclusive according to 608 * @iolock, and returns with it held. Might upgrade the iolock to exclusive 609 * if called for a direct write beyond i_size. 610 */ 611 STATIC ssize_t 612 xfs_file_aio_write_checks( 613 struct kiocb *iocb, 614 struct iov_iter *from, 615 int *iolock) 616 { 617 struct file *file = iocb->ki_filp; 618 struct inode *inode = file->f_mapping->host; 619 struct xfs_inode *ip = XFS_I(inode); 620 ssize_t error = 0; 621 size_t count = iov_iter_count(from); 622 bool drained_dio = false; 623 624 restart: 625 error = generic_write_checks(iocb, from); 626 if (error <= 0) 627 return error; 628 629 error = xfs_break_layouts(inode, iolock, true); 630 if (error) 631 return error; 632 633 /* For changing security info in file_remove_privs() we need i_mutex */ 634 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) { 635 xfs_rw_iunlock(ip, *iolock); 636 *iolock = XFS_IOLOCK_EXCL; 637 xfs_rw_ilock(ip, *iolock); 638 goto restart; 639 } 640 /* 641 * If the offset is beyond the size of the file, we need to zero any 642 * blocks that fall between the existing EOF and the start of this 643 * write. If zeroing is needed and we are currently holding the 644 * iolock shared, we need to update it to exclusive which implies 645 * having to redo all checks before. 646 * 647 * We need to serialise against EOF updates that occur in IO 648 * completions here. We want to make sure that nobody is changing the 649 * size while we do this check until we have placed an IO barrier (i.e. 650 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched. 651 * The spinlock effectively forms a memory barrier once we have the 652 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value 653 * and hence be able to correctly determine if we need to run zeroing. 654 */ 655 spin_lock(&ip->i_flags_lock); 656 if (iocb->ki_pos > i_size_read(inode)) { 657 bool zero = false; 658 659 spin_unlock(&ip->i_flags_lock); 660 if (!drained_dio) { 661 if (*iolock == XFS_IOLOCK_SHARED) { 662 xfs_rw_iunlock(ip, *iolock); 663 *iolock = XFS_IOLOCK_EXCL; 664 xfs_rw_ilock(ip, *iolock); 665 iov_iter_reexpand(from, count); 666 } 667 /* 668 * We now have an IO submission barrier in place, but 669 * AIO can do EOF updates during IO completion and hence 670 * we now need to wait for all of them to drain. Non-AIO 671 * DIO will have drained before we are given the 672 * XFS_IOLOCK_EXCL, and so for most cases this wait is a 673 * no-op. 674 */ 675 inode_dio_wait(inode); 676 drained_dio = true; 677 goto restart; 678 } 679 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); 680 if (error) 681 return error; 682 } else 683 spin_unlock(&ip->i_flags_lock); 684 685 /* 686 * Updating the timestamps will grab the ilock again from 687 * xfs_fs_dirty_inode, so we have to call it after dropping the 688 * lock above. Eventually we should look into a way to avoid 689 * the pointless lock roundtrip. 690 */ 691 if (likely(!(file->f_mode & FMODE_NOCMTIME))) { 692 error = file_update_time(file); 693 if (error) 694 return error; 695 } 696 697 /* 698 * If we're writing the file then make sure to clear the setuid and 699 * setgid bits if the process is not being run by root. This keeps 700 * people from modifying setuid and setgid binaries. 701 */ 702 if (!IS_NOSEC(inode)) 703 return file_remove_privs(file); 704 return 0; 705 } 706 707 /* 708 * xfs_file_dio_aio_write - handle direct IO writes 709 * 710 * Lock the inode appropriately to prepare for and issue a direct IO write. 711 * By separating it from the buffered write path we remove all the tricky to 712 * follow locking changes and looping. 713 * 714 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL 715 * until we're sure the bytes at the new EOF have been zeroed and/or the cached 716 * pages are flushed out. 717 * 718 * In most cases the direct IO writes will be done holding IOLOCK_SHARED 719 * allowing them to be done in parallel with reads and other direct IO writes. 720 * However, if the IO is not aligned to filesystem blocks, the direct IO layer 721 * needs to do sub-block zeroing and that requires serialisation against other 722 * direct IOs to the same block. In this case we need to serialise the 723 * submission of the unaligned IOs so that we don't get racing block zeroing in 724 * the dio layer. To avoid the problem with aio, we also need to wait for 725 * outstanding IOs to complete so that unwritten extent conversion is completed 726 * before we try to map the overlapping block. This is currently implemented by 727 * hitting it with a big hammer (i.e. inode_dio_wait()). 728 * 729 * Returns with locks held indicated by @iolock and errors indicated by 730 * negative return values. 731 */ 732 STATIC ssize_t 733 xfs_file_dio_aio_write( 734 struct kiocb *iocb, 735 struct iov_iter *from) 736 { 737 struct file *file = iocb->ki_filp; 738 struct address_space *mapping = file->f_mapping; 739 struct inode *inode = mapping->host; 740 struct xfs_inode *ip = XFS_I(inode); 741 struct xfs_mount *mp = ip->i_mount; 742 ssize_t ret = 0; 743 int unaligned_io = 0; 744 int iolock; 745 size_t count = iov_iter_count(from); 746 loff_t end; 747 struct iov_iter data; 748 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? 749 mp->m_rtdev_targp : mp->m_ddev_targp; 750 751 /* DIO must be aligned to device logical sector size */ 752 if (!IS_DAX(inode) && 753 ((iocb->ki_pos | count) & target->bt_logical_sectormask)) 754 return -EINVAL; 755 756 /* "unaligned" here means not aligned to a filesystem block */ 757 if ((iocb->ki_pos & mp->m_blockmask) || 758 ((iocb->ki_pos + count) & mp->m_blockmask)) 759 unaligned_io = 1; 760 761 /* 762 * We don't need to take an exclusive lock unless there page cache needs 763 * to be invalidated or unaligned IO is being executed. We don't need to 764 * consider the EOF extension case here because 765 * xfs_file_aio_write_checks() will relock the inode as necessary for 766 * EOF zeroing cases and fill out the new inode size as appropriate. 767 */ 768 if (unaligned_io || mapping->nrpages) 769 iolock = XFS_IOLOCK_EXCL; 770 else 771 iolock = XFS_IOLOCK_SHARED; 772 xfs_rw_ilock(ip, iolock); 773 774 /* 775 * Recheck if there are cached pages that need invalidate after we got 776 * the iolock to protect against other threads adding new pages while 777 * we were waiting for the iolock. 778 */ 779 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { 780 xfs_rw_iunlock(ip, iolock); 781 iolock = XFS_IOLOCK_EXCL; 782 xfs_rw_ilock(ip, iolock); 783 } 784 785 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 786 if (ret) 787 goto out; 788 count = iov_iter_count(from); 789 end = iocb->ki_pos + count - 1; 790 791 /* 792 * See xfs_file_dio_aio_read() for why we do a full-file flush here. 793 */ 794 if (mapping->nrpages) { 795 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); 796 if (ret) 797 goto out; 798 /* 799 * Invalidate whole pages. This can return an error if we fail 800 * to invalidate a page, but this should never happen on XFS. 801 * Warn if it does fail. 802 */ 803 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); 804 WARN_ON_ONCE(ret); 805 ret = 0; 806 } 807 808 /* 809 * If we are doing unaligned IO, wait for all other IO to drain, 810 * otherwise demote the lock if we had to flush cached pages 811 */ 812 if (unaligned_io) 813 inode_dio_wait(inode); 814 else if (iolock == XFS_IOLOCK_EXCL) { 815 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 816 iolock = XFS_IOLOCK_SHARED; 817 } 818 819 trace_xfs_file_direct_write(ip, count, iocb->ki_pos); 820 821 data = *from; 822 ret = mapping->a_ops->direct_IO(iocb, &data); 823 824 /* see generic_file_direct_write() for why this is necessary */ 825 if (mapping->nrpages) { 826 invalidate_inode_pages2_range(mapping, 827 iocb->ki_pos >> PAGE_SHIFT, 828 end >> PAGE_SHIFT); 829 } 830 831 if (ret > 0) { 832 iocb->ki_pos += ret; 833 iov_iter_advance(from, ret); 834 } 835 out: 836 xfs_rw_iunlock(ip, iolock); 837 838 /* 839 * No fallback to buffered IO on errors for XFS. DAX can result in 840 * partial writes, but direct IO will either complete fully or fail. 841 */ 842 ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip))); 843 return ret; 844 } 845 846 STATIC ssize_t 847 xfs_file_buffered_aio_write( 848 struct kiocb *iocb, 849 struct iov_iter *from) 850 { 851 struct file *file = iocb->ki_filp; 852 struct address_space *mapping = file->f_mapping; 853 struct inode *inode = mapping->host; 854 struct xfs_inode *ip = XFS_I(inode); 855 ssize_t ret; 856 int enospc = 0; 857 int iolock = XFS_IOLOCK_EXCL; 858 859 xfs_rw_ilock(ip, iolock); 860 861 ret = xfs_file_aio_write_checks(iocb, from, &iolock); 862 if (ret) 863 goto out; 864 865 /* We can write back this queue in page reclaim */ 866 current->backing_dev_info = inode_to_bdi(inode); 867 868 write_retry: 869 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos); 870 ret = generic_perform_write(file, from, iocb->ki_pos); 871 if (likely(ret >= 0)) 872 iocb->ki_pos += ret; 873 874 /* 875 * If we hit a space limit, try to free up some lingering preallocated 876 * space before returning an error. In the case of ENOSPC, first try to 877 * write back all dirty inodes to free up some of the excess reserved 878 * metadata space. This reduces the chances that the eofblocks scan 879 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this 880 * also behaves as a filter to prevent too many eofblocks scans from 881 * running at the same time. 882 */ 883 if (ret == -EDQUOT && !enospc) { 884 enospc = xfs_inode_free_quota_eofblocks(ip); 885 if (enospc) 886 goto write_retry; 887 } else if (ret == -ENOSPC && !enospc) { 888 struct xfs_eofblocks eofb = {0}; 889 890 enospc = 1; 891 xfs_flush_inodes(ip->i_mount); 892 eofb.eof_scan_owner = ip->i_ino; /* for locking */ 893 eofb.eof_flags = XFS_EOF_FLAGS_SYNC; 894 xfs_icache_free_eofblocks(ip->i_mount, &eofb); 895 goto write_retry; 896 } 897 898 current->backing_dev_info = NULL; 899 out: 900 xfs_rw_iunlock(ip, iolock); 901 return ret; 902 } 903 904 STATIC ssize_t 905 xfs_file_write_iter( 906 struct kiocb *iocb, 907 struct iov_iter *from) 908 { 909 struct file *file = iocb->ki_filp; 910 struct address_space *mapping = file->f_mapping; 911 struct inode *inode = mapping->host; 912 struct xfs_inode *ip = XFS_I(inode); 913 ssize_t ret; 914 size_t ocount = iov_iter_count(from); 915 916 XFS_STATS_INC(ip->i_mount, xs_write_calls); 917 918 if (ocount == 0) 919 return 0; 920 921 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 922 return -EIO; 923 924 if ((iocb->ki_flags & IOCB_DIRECT) || IS_DAX(inode)) 925 ret = xfs_file_dio_aio_write(iocb, from); 926 else 927 ret = xfs_file_buffered_aio_write(iocb, from); 928 929 if (ret > 0) { 930 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); 931 932 /* Handle various SYNC-type writes */ 933 ret = generic_write_sync(iocb, ret); 934 } 935 return ret; 936 } 937 938 #define XFS_FALLOC_FL_SUPPORTED \ 939 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ 940 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \ 941 FALLOC_FL_INSERT_RANGE) 942 943 STATIC long 944 xfs_file_fallocate( 945 struct file *file, 946 int mode, 947 loff_t offset, 948 loff_t len) 949 { 950 struct inode *inode = file_inode(file); 951 struct xfs_inode *ip = XFS_I(inode); 952 long error; 953 enum xfs_prealloc_flags flags = 0; 954 uint iolock = XFS_IOLOCK_EXCL; 955 loff_t new_size = 0; 956 bool do_file_insert = 0; 957 958 if (!S_ISREG(inode->i_mode)) 959 return -EINVAL; 960 if (mode & ~XFS_FALLOC_FL_SUPPORTED) 961 return -EOPNOTSUPP; 962 963 xfs_ilock(ip, iolock); 964 error = xfs_break_layouts(inode, &iolock, false); 965 if (error) 966 goto out_unlock; 967 968 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); 969 iolock |= XFS_MMAPLOCK_EXCL; 970 971 if (mode & FALLOC_FL_PUNCH_HOLE) { 972 error = xfs_free_file_space(ip, offset, len); 973 if (error) 974 goto out_unlock; 975 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 976 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 977 978 if (offset & blksize_mask || len & blksize_mask) { 979 error = -EINVAL; 980 goto out_unlock; 981 } 982 983 /* 984 * There is no need to overlap collapse range with EOF, 985 * in which case it is effectively a truncate operation 986 */ 987 if (offset + len >= i_size_read(inode)) { 988 error = -EINVAL; 989 goto out_unlock; 990 } 991 992 new_size = i_size_read(inode) - len; 993 994 error = xfs_collapse_file_space(ip, offset, len); 995 if (error) 996 goto out_unlock; 997 } else if (mode & FALLOC_FL_INSERT_RANGE) { 998 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 999 1000 new_size = i_size_read(inode) + len; 1001 if (offset & blksize_mask || len & blksize_mask) { 1002 error = -EINVAL; 1003 goto out_unlock; 1004 } 1005 1006 /* check the new inode size does not wrap through zero */ 1007 if (new_size > inode->i_sb->s_maxbytes) { 1008 error = -EFBIG; 1009 goto out_unlock; 1010 } 1011 1012 /* Offset should be less than i_size */ 1013 if (offset >= i_size_read(inode)) { 1014 error = -EINVAL; 1015 goto out_unlock; 1016 } 1017 do_file_insert = 1; 1018 } else { 1019 flags |= XFS_PREALLOC_SET; 1020 1021 if (!(mode & FALLOC_FL_KEEP_SIZE) && 1022 offset + len > i_size_read(inode)) { 1023 new_size = offset + len; 1024 error = inode_newsize_ok(inode, new_size); 1025 if (error) 1026 goto out_unlock; 1027 } 1028 1029 if (mode & FALLOC_FL_ZERO_RANGE) 1030 error = xfs_zero_file_space(ip, offset, len); 1031 else 1032 error = xfs_alloc_file_space(ip, offset, len, 1033 XFS_BMAPI_PREALLOC); 1034 if (error) 1035 goto out_unlock; 1036 } 1037 1038 if (file->f_flags & O_DSYNC) 1039 flags |= XFS_PREALLOC_SYNC; 1040 1041 error = xfs_update_prealloc_flags(ip, flags); 1042 if (error) 1043 goto out_unlock; 1044 1045 /* Change file size if needed */ 1046 if (new_size) { 1047 struct iattr iattr; 1048 1049 iattr.ia_valid = ATTR_SIZE; 1050 iattr.ia_size = new_size; 1051 error = xfs_setattr_size(ip, &iattr); 1052 if (error) 1053 goto out_unlock; 1054 } 1055 1056 /* 1057 * Perform hole insertion now that the file size has been 1058 * updated so that if we crash during the operation we don't 1059 * leave shifted extents past EOF and hence losing access to 1060 * the data that is contained within them. 1061 */ 1062 if (do_file_insert) 1063 error = xfs_insert_file_space(ip, offset, len); 1064 1065 out_unlock: 1066 xfs_iunlock(ip, iolock); 1067 return error; 1068 } 1069 1070 1071 STATIC int 1072 xfs_file_open( 1073 struct inode *inode, 1074 struct file *file) 1075 { 1076 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 1077 return -EFBIG; 1078 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) 1079 return -EIO; 1080 return 0; 1081 } 1082 1083 STATIC int 1084 xfs_dir_open( 1085 struct inode *inode, 1086 struct file *file) 1087 { 1088 struct xfs_inode *ip = XFS_I(inode); 1089 int mode; 1090 int error; 1091 1092 error = xfs_file_open(inode, file); 1093 if (error) 1094 return error; 1095 1096 /* 1097 * If there are any blocks, read-ahead block 0 as we're almost 1098 * certain to have the next operation be a read there. 1099 */ 1100 mode = xfs_ilock_data_map_shared(ip); 1101 if (ip->i_d.di_nextents > 0) 1102 xfs_dir3_data_readahead(ip, 0, -1); 1103 xfs_iunlock(ip, mode); 1104 return 0; 1105 } 1106 1107 STATIC int 1108 xfs_file_release( 1109 struct inode *inode, 1110 struct file *filp) 1111 { 1112 return xfs_release(XFS_I(inode)); 1113 } 1114 1115 STATIC int 1116 xfs_file_readdir( 1117 struct file *file, 1118 struct dir_context *ctx) 1119 { 1120 struct inode *inode = file_inode(file); 1121 xfs_inode_t *ip = XFS_I(inode); 1122 size_t bufsize; 1123 1124 /* 1125 * The Linux API doesn't pass down the total size of the buffer 1126 * we read into down to the filesystem. With the filldir concept 1127 * it's not needed for correct information, but the XFS dir2 leaf 1128 * code wants an estimate of the buffer size to calculate it's 1129 * readahead window and size the buffers used for mapping to 1130 * physical blocks. 1131 * 1132 * Try to give it an estimate that's good enough, maybe at some 1133 * point we can change the ->readdir prototype to include the 1134 * buffer size. For now we use the current glibc buffer size. 1135 */ 1136 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); 1137 1138 return xfs_readdir(ip, ctx, bufsize); 1139 } 1140 1141 /* 1142 * This type is designed to indicate the type of offset we would like 1143 * to search from page cache for xfs_seek_hole_data(). 1144 */ 1145 enum { 1146 HOLE_OFF = 0, 1147 DATA_OFF, 1148 }; 1149 1150 /* 1151 * Lookup the desired type of offset from the given page. 1152 * 1153 * On success, return true and the offset argument will point to the 1154 * start of the region that was found. Otherwise this function will 1155 * return false and keep the offset argument unchanged. 1156 */ 1157 STATIC bool 1158 xfs_lookup_buffer_offset( 1159 struct page *page, 1160 loff_t *offset, 1161 unsigned int type) 1162 { 1163 loff_t lastoff = page_offset(page); 1164 bool found = false; 1165 struct buffer_head *bh, *head; 1166 1167 bh = head = page_buffers(page); 1168 do { 1169 /* 1170 * Unwritten extents that have data in the page 1171 * cache covering them can be identified by the 1172 * BH_Unwritten state flag. Pages with multiple 1173 * buffers might have a mix of holes, data and 1174 * unwritten extents - any buffer with valid 1175 * data in it should have BH_Uptodate flag set 1176 * on it. 1177 */ 1178 if (buffer_unwritten(bh) || 1179 buffer_uptodate(bh)) { 1180 if (type == DATA_OFF) 1181 found = true; 1182 } else { 1183 if (type == HOLE_OFF) 1184 found = true; 1185 } 1186 1187 if (found) { 1188 *offset = lastoff; 1189 break; 1190 } 1191 lastoff += bh->b_size; 1192 } while ((bh = bh->b_this_page) != head); 1193 1194 return found; 1195 } 1196 1197 /* 1198 * This routine is called to find out and return a data or hole offset 1199 * from the page cache for unwritten extents according to the desired 1200 * type for xfs_seek_hole_data(). 1201 * 1202 * The argument offset is used to tell where we start to search from the 1203 * page cache. Map is used to figure out the end points of the range to 1204 * lookup pages. 1205 * 1206 * Return true if the desired type of offset was found, and the argument 1207 * offset is filled with that address. Otherwise, return false and keep 1208 * offset unchanged. 1209 */ 1210 STATIC bool 1211 xfs_find_get_desired_pgoff( 1212 struct inode *inode, 1213 struct xfs_bmbt_irec *map, 1214 unsigned int type, 1215 loff_t *offset) 1216 { 1217 struct xfs_inode *ip = XFS_I(inode); 1218 struct xfs_mount *mp = ip->i_mount; 1219 struct pagevec pvec; 1220 pgoff_t index; 1221 pgoff_t end; 1222 loff_t endoff; 1223 loff_t startoff = *offset; 1224 loff_t lastoff = startoff; 1225 bool found = false; 1226 1227 pagevec_init(&pvec, 0); 1228 1229 index = startoff >> PAGE_SHIFT; 1230 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); 1231 end = endoff >> PAGE_SHIFT; 1232 do { 1233 int want; 1234 unsigned nr_pages; 1235 unsigned int i; 1236 1237 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 1238 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 1239 want); 1240 /* 1241 * No page mapped into given range. If we are searching holes 1242 * and if this is the first time we got into the loop, it means 1243 * that the given offset is landed in a hole, return it. 1244 * 1245 * If we have already stepped through some block buffers to find 1246 * holes but they all contains data. In this case, the last 1247 * offset is already updated and pointed to the end of the last 1248 * mapped page, if it does not reach the endpoint to search, 1249 * that means there should be a hole between them. 1250 */ 1251 if (nr_pages == 0) { 1252 /* Data search found nothing */ 1253 if (type == DATA_OFF) 1254 break; 1255 1256 ASSERT(type == HOLE_OFF); 1257 if (lastoff == startoff || lastoff < endoff) { 1258 found = true; 1259 *offset = lastoff; 1260 } 1261 break; 1262 } 1263 1264 /* 1265 * At lease we found one page. If this is the first time we 1266 * step into the loop, and if the first page index offset is 1267 * greater than the given search offset, a hole was found. 1268 */ 1269 if (type == HOLE_OFF && lastoff == startoff && 1270 lastoff < page_offset(pvec.pages[0])) { 1271 found = true; 1272 break; 1273 } 1274 1275 for (i = 0; i < nr_pages; i++) { 1276 struct page *page = pvec.pages[i]; 1277 loff_t b_offset; 1278 1279 /* 1280 * At this point, the page may be truncated or 1281 * invalidated (changing page->mapping to NULL), 1282 * or even swizzled back from swapper_space to tmpfs 1283 * file mapping. However, page->index will not change 1284 * because we have a reference on the page. 1285 * 1286 * Searching done if the page index is out of range. 1287 * If the current offset is not reaches the end of 1288 * the specified search range, there should be a hole 1289 * between them. 1290 */ 1291 if (page->index > end) { 1292 if (type == HOLE_OFF && lastoff < endoff) { 1293 *offset = lastoff; 1294 found = true; 1295 } 1296 goto out; 1297 } 1298 1299 lock_page(page); 1300 /* 1301 * Page truncated or invalidated(page->mapping == NULL). 1302 * We can freely skip it and proceed to check the next 1303 * page. 1304 */ 1305 if (unlikely(page->mapping != inode->i_mapping)) { 1306 unlock_page(page); 1307 continue; 1308 } 1309 1310 if (!page_has_buffers(page)) { 1311 unlock_page(page); 1312 continue; 1313 } 1314 1315 found = xfs_lookup_buffer_offset(page, &b_offset, type); 1316 if (found) { 1317 /* 1318 * The found offset may be less than the start 1319 * point to search if this is the first time to 1320 * come here. 1321 */ 1322 *offset = max_t(loff_t, startoff, b_offset); 1323 unlock_page(page); 1324 goto out; 1325 } 1326 1327 /* 1328 * We either searching data but nothing was found, or 1329 * searching hole but found a data buffer. In either 1330 * case, probably the next page contains the desired 1331 * things, update the last offset to it so. 1332 */ 1333 lastoff = page_offset(page) + PAGE_SIZE; 1334 unlock_page(page); 1335 } 1336 1337 /* 1338 * The number of returned pages less than our desired, search 1339 * done. In this case, nothing was found for searching data, 1340 * but we found a hole behind the last offset. 1341 */ 1342 if (nr_pages < want) { 1343 if (type == HOLE_OFF) { 1344 *offset = lastoff; 1345 found = true; 1346 } 1347 break; 1348 } 1349 1350 index = pvec.pages[i - 1]->index + 1; 1351 pagevec_release(&pvec); 1352 } while (index <= end); 1353 1354 out: 1355 pagevec_release(&pvec); 1356 return found; 1357 } 1358 1359 /* 1360 * caller must lock inode with xfs_ilock_data_map_shared, 1361 * can we craft an appropriate ASSERT? 1362 * 1363 * end is because the VFS-level lseek interface is defined such that any 1364 * offset past i_size shall return -ENXIO, but we use this for quota code 1365 * which does not maintain i_size, and we want to SEEK_DATA past i_size. 1366 */ 1367 loff_t 1368 __xfs_seek_hole_data( 1369 struct inode *inode, 1370 loff_t start, 1371 loff_t end, 1372 int whence) 1373 { 1374 struct xfs_inode *ip = XFS_I(inode); 1375 struct xfs_mount *mp = ip->i_mount; 1376 loff_t uninitialized_var(offset); 1377 xfs_fileoff_t fsbno; 1378 xfs_filblks_t lastbno; 1379 int error; 1380 1381 if (start >= end) { 1382 error = -ENXIO; 1383 goto out_error; 1384 } 1385 1386 /* 1387 * Try to read extents from the first block indicated 1388 * by fsbno to the end block of the file. 1389 */ 1390 fsbno = XFS_B_TO_FSBT(mp, start); 1391 lastbno = XFS_B_TO_FSB(mp, end); 1392 1393 for (;;) { 1394 struct xfs_bmbt_irec map[2]; 1395 int nmap = 2; 1396 unsigned int i; 1397 1398 error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap, 1399 XFS_BMAPI_ENTIRE); 1400 if (error) 1401 goto out_error; 1402 1403 /* No extents at given offset, must be beyond EOF */ 1404 if (nmap == 0) { 1405 error = -ENXIO; 1406 goto out_error; 1407 } 1408 1409 for (i = 0; i < nmap; i++) { 1410 offset = max_t(loff_t, start, 1411 XFS_FSB_TO_B(mp, map[i].br_startoff)); 1412 1413 /* Landed in the hole we wanted? */ 1414 if (whence == SEEK_HOLE && 1415 map[i].br_startblock == HOLESTARTBLOCK) 1416 goto out; 1417 1418 /* Landed in the data extent we wanted? */ 1419 if (whence == SEEK_DATA && 1420 (map[i].br_startblock == DELAYSTARTBLOCK || 1421 (map[i].br_state == XFS_EXT_NORM && 1422 !isnullstartblock(map[i].br_startblock)))) 1423 goto out; 1424 1425 /* 1426 * Landed in an unwritten extent, try to search 1427 * for hole or data from page cache. 1428 */ 1429 if (map[i].br_state == XFS_EXT_UNWRITTEN) { 1430 if (xfs_find_get_desired_pgoff(inode, &map[i], 1431 whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF, 1432 &offset)) 1433 goto out; 1434 } 1435 } 1436 1437 /* 1438 * We only received one extent out of the two requested. This 1439 * means we've hit EOF and didn't find what we are looking for. 1440 */ 1441 if (nmap == 1) { 1442 /* 1443 * If we were looking for a hole, set offset to 1444 * the end of the file (i.e., there is an implicit 1445 * hole at the end of any file). 1446 */ 1447 if (whence == SEEK_HOLE) { 1448 offset = end; 1449 break; 1450 } 1451 /* 1452 * If we were looking for data, it's nowhere to be found 1453 */ 1454 ASSERT(whence == SEEK_DATA); 1455 error = -ENXIO; 1456 goto out_error; 1457 } 1458 1459 ASSERT(i > 1); 1460 1461 /* 1462 * Nothing was found, proceed to the next round of search 1463 * if the next reading offset is not at or beyond EOF. 1464 */ 1465 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; 1466 start = XFS_FSB_TO_B(mp, fsbno); 1467 if (start >= end) { 1468 if (whence == SEEK_HOLE) { 1469 offset = end; 1470 break; 1471 } 1472 ASSERT(whence == SEEK_DATA); 1473 error = -ENXIO; 1474 goto out_error; 1475 } 1476 } 1477 1478 out: 1479 /* 1480 * If at this point we have found the hole we wanted, the returned 1481 * offset may be bigger than the file size as it may be aligned to 1482 * page boundary for unwritten extents. We need to deal with this 1483 * situation in particular. 1484 */ 1485 if (whence == SEEK_HOLE) 1486 offset = min_t(loff_t, offset, end); 1487 1488 return offset; 1489 1490 out_error: 1491 return error; 1492 } 1493 1494 STATIC loff_t 1495 xfs_seek_hole_data( 1496 struct file *file, 1497 loff_t start, 1498 int whence) 1499 { 1500 struct inode *inode = file->f_mapping->host; 1501 struct xfs_inode *ip = XFS_I(inode); 1502 struct xfs_mount *mp = ip->i_mount; 1503 uint lock; 1504 loff_t offset, end; 1505 int error = 0; 1506 1507 if (XFS_FORCED_SHUTDOWN(mp)) 1508 return -EIO; 1509 1510 lock = xfs_ilock_data_map_shared(ip); 1511 1512 end = i_size_read(inode); 1513 offset = __xfs_seek_hole_data(inode, start, end, whence); 1514 if (offset < 0) { 1515 error = offset; 1516 goto out_unlock; 1517 } 1518 1519 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1520 1521 out_unlock: 1522 xfs_iunlock(ip, lock); 1523 1524 if (error) 1525 return error; 1526 return offset; 1527 } 1528 1529 STATIC loff_t 1530 xfs_file_llseek( 1531 struct file *file, 1532 loff_t offset, 1533 int whence) 1534 { 1535 switch (whence) { 1536 case SEEK_END: 1537 case SEEK_CUR: 1538 case SEEK_SET: 1539 return generic_file_llseek(file, offset, whence); 1540 case SEEK_HOLE: 1541 case SEEK_DATA: 1542 return xfs_seek_hole_data(file, offset, whence); 1543 default: 1544 return -EINVAL; 1545 } 1546 } 1547 1548 /* 1549 * Locking for serialisation of IO during page faults. This results in a lock 1550 * ordering of: 1551 * 1552 * mmap_sem (MM) 1553 * sb_start_pagefault(vfs, freeze) 1554 * i_mmaplock (XFS - truncate serialisation) 1555 * page_lock (MM) 1556 * i_lock (XFS - extent map serialisation) 1557 */ 1558 1559 /* 1560 * mmap()d file has taken write protection fault and is being made writable. We 1561 * can set the page state up correctly for a writable page, which means we can 1562 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent 1563 * mapping. 1564 */ 1565 STATIC int 1566 xfs_filemap_page_mkwrite( 1567 struct vm_area_struct *vma, 1568 struct vm_fault *vmf) 1569 { 1570 struct inode *inode = file_inode(vma->vm_file); 1571 int ret; 1572 1573 trace_xfs_filemap_page_mkwrite(XFS_I(inode)); 1574 1575 sb_start_pagefault(inode->i_sb); 1576 file_update_time(vma->vm_file); 1577 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1578 1579 if (IS_DAX(inode)) { 1580 ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault); 1581 } else { 1582 ret = block_page_mkwrite(vma, vmf, xfs_get_blocks); 1583 ret = block_page_mkwrite_return(ret); 1584 } 1585 1586 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1587 sb_end_pagefault(inode->i_sb); 1588 1589 return ret; 1590 } 1591 1592 STATIC int 1593 xfs_filemap_fault( 1594 struct vm_area_struct *vma, 1595 struct vm_fault *vmf) 1596 { 1597 struct inode *inode = file_inode(vma->vm_file); 1598 int ret; 1599 1600 trace_xfs_filemap_fault(XFS_I(inode)); 1601 1602 /* DAX can shortcut the normal fault path on write faults! */ 1603 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode)) 1604 return xfs_filemap_page_mkwrite(vma, vmf); 1605 1606 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1607 if (IS_DAX(inode)) { 1608 /* 1609 * we do not want to trigger unwritten extent conversion on read 1610 * faults - that is unnecessary overhead and would also require 1611 * changes to xfs_get_blocks_direct() to map unwritten extent 1612 * ioend for conversion on read-only mappings. 1613 */ 1614 ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault); 1615 } else 1616 ret = filemap_fault(vma, vmf); 1617 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1618 1619 return ret; 1620 } 1621 1622 /* 1623 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on 1624 * both read and write faults. Hence we need to handle both cases. There is no 1625 * ->pmd_mkwrite callout for huge pages, so we have a single function here to 1626 * handle both cases here. @flags carries the information on the type of fault 1627 * occuring. 1628 */ 1629 STATIC int 1630 xfs_filemap_pmd_fault( 1631 struct vm_area_struct *vma, 1632 unsigned long addr, 1633 pmd_t *pmd, 1634 unsigned int flags) 1635 { 1636 struct inode *inode = file_inode(vma->vm_file); 1637 struct xfs_inode *ip = XFS_I(inode); 1638 int ret; 1639 1640 if (!IS_DAX(inode)) 1641 return VM_FAULT_FALLBACK; 1642 1643 trace_xfs_filemap_pmd_fault(ip); 1644 1645 if (flags & FAULT_FLAG_WRITE) { 1646 sb_start_pagefault(inode->i_sb); 1647 file_update_time(vma->vm_file); 1648 } 1649 1650 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1651 ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault); 1652 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); 1653 1654 if (flags & FAULT_FLAG_WRITE) 1655 sb_end_pagefault(inode->i_sb); 1656 1657 return ret; 1658 } 1659 1660 /* 1661 * pfn_mkwrite was originally inteneded to ensure we capture time stamp 1662 * updates on write faults. In reality, it's need to serialise against 1663 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED 1664 * to ensure we serialise the fault barrier in place. 1665 */ 1666 static int 1667 xfs_filemap_pfn_mkwrite( 1668 struct vm_area_struct *vma, 1669 struct vm_fault *vmf) 1670 { 1671 1672 struct inode *inode = file_inode(vma->vm_file); 1673 struct xfs_inode *ip = XFS_I(inode); 1674 int ret = VM_FAULT_NOPAGE; 1675 loff_t size; 1676 1677 trace_xfs_filemap_pfn_mkwrite(ip); 1678 1679 sb_start_pagefault(inode->i_sb); 1680 file_update_time(vma->vm_file); 1681 1682 /* check if the faulting page hasn't raced with truncate */ 1683 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); 1684 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1685 if (vmf->pgoff >= size) 1686 ret = VM_FAULT_SIGBUS; 1687 else if (IS_DAX(inode)) 1688 ret = dax_pfn_mkwrite(vma, vmf); 1689 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); 1690 sb_end_pagefault(inode->i_sb); 1691 return ret; 1692 1693 } 1694 1695 static const struct vm_operations_struct xfs_file_vm_ops = { 1696 .fault = xfs_filemap_fault, 1697 .pmd_fault = xfs_filemap_pmd_fault, 1698 .map_pages = filemap_map_pages, 1699 .page_mkwrite = xfs_filemap_page_mkwrite, 1700 .pfn_mkwrite = xfs_filemap_pfn_mkwrite, 1701 }; 1702 1703 STATIC int 1704 xfs_file_mmap( 1705 struct file *filp, 1706 struct vm_area_struct *vma) 1707 { 1708 file_accessed(filp); 1709 vma->vm_ops = &xfs_file_vm_ops; 1710 if (IS_DAX(file_inode(filp))) 1711 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 1712 return 0; 1713 } 1714 1715 const struct file_operations xfs_file_operations = { 1716 .llseek = xfs_file_llseek, 1717 .read_iter = xfs_file_read_iter, 1718 .write_iter = xfs_file_write_iter, 1719 .splice_read = xfs_file_splice_read, 1720 .splice_write = iter_file_splice_write, 1721 .unlocked_ioctl = xfs_file_ioctl, 1722 #ifdef CONFIG_COMPAT 1723 .compat_ioctl = xfs_file_compat_ioctl, 1724 #endif 1725 .mmap = xfs_file_mmap, 1726 .open = xfs_file_open, 1727 .release = xfs_file_release, 1728 .fsync = xfs_file_fsync, 1729 .fallocate = xfs_file_fallocate, 1730 }; 1731 1732 const struct file_operations xfs_dir_file_operations = { 1733 .open = xfs_dir_open, 1734 .read = generic_read_dir, 1735 .iterate_shared = xfs_file_readdir, 1736 .llseek = generic_file_llseek, 1737 .unlocked_ioctl = xfs_file_ioctl, 1738 #ifdef CONFIG_COMPAT 1739 .compat_ioctl = xfs_file_compat_ioctl, 1740 #endif 1741 .fsync = xfs_dir_fsync, 1742 }; 1743