1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_bit.h" 21 #include "xfs_log.h" 22 #include "xfs_inum.h" 23 #include "xfs_sb.h" 24 #include "xfs_ag.h" 25 #include "xfs_trans.h" 26 #include "xfs_mount.h" 27 #include "xfs_bmap_btree.h" 28 #include "xfs_alloc.h" 29 #include "xfs_dinode.h" 30 #include "xfs_inode.h" 31 #include "xfs_inode_item.h" 32 #include "xfs_bmap.h" 33 #include "xfs_error.h" 34 #include "xfs_vnodeops.h" 35 #include "xfs_da_btree.h" 36 #include "xfs_ioctl.h" 37 #include "xfs_trace.h" 38 39 #include <linux/dcache.h> 40 #include <linux/falloc.h> 41 42 static const struct vm_operations_struct xfs_file_vm_ops; 43 44 /* 45 * Locking primitives for read and write IO paths to ensure we consistently use 46 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. 47 */ 48 static inline void 49 xfs_rw_ilock( 50 struct xfs_inode *ip, 51 int type) 52 { 53 if (type & XFS_IOLOCK_EXCL) 54 mutex_lock(&VFS_I(ip)->i_mutex); 55 xfs_ilock(ip, type); 56 } 57 58 static inline void 59 xfs_rw_iunlock( 60 struct xfs_inode *ip, 61 int type) 62 { 63 xfs_iunlock(ip, type); 64 if (type & XFS_IOLOCK_EXCL) 65 mutex_unlock(&VFS_I(ip)->i_mutex); 66 } 67 68 static inline void 69 xfs_rw_ilock_demote( 70 struct xfs_inode *ip, 71 int type) 72 { 73 xfs_ilock_demote(ip, type); 74 if (type & XFS_IOLOCK_EXCL) 75 mutex_unlock(&VFS_I(ip)->i_mutex); 76 } 77 78 /* 79 * xfs_iozero 80 * 81 * xfs_iozero clears the specified range of buffer supplied, 82 * and marks all the affected blocks as valid and modified. If 83 * an affected block is not allocated, it will be allocated. If 84 * an affected block is not completely overwritten, and is not 85 * valid before the operation, it will be read from disk before 86 * being partially zeroed. 87 */ 88 STATIC int 89 xfs_iozero( 90 struct xfs_inode *ip, /* inode */ 91 loff_t pos, /* offset in file */ 92 size_t count) /* size of data to zero */ 93 { 94 struct page *page; 95 struct address_space *mapping; 96 int status; 97 98 mapping = VFS_I(ip)->i_mapping; 99 do { 100 unsigned offset, bytes; 101 void *fsdata; 102 103 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 104 bytes = PAGE_CACHE_SIZE - offset; 105 if (bytes > count) 106 bytes = count; 107 108 status = pagecache_write_begin(NULL, mapping, pos, bytes, 109 AOP_FLAG_UNINTERRUPTIBLE, 110 &page, &fsdata); 111 if (status) 112 break; 113 114 zero_user(page, offset, bytes); 115 116 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, 117 page, fsdata); 118 WARN_ON(status <= 0); /* can't return less than zero! */ 119 pos += bytes; 120 count -= bytes; 121 status = 0; 122 } while (count); 123 124 return (-status); 125 } 126 127 /* 128 * Fsync operations on directories are much simpler than on regular files, 129 * as there is no file data to flush, and thus also no need for explicit 130 * cache flush operations, and there are no non-transaction metadata updates 131 * on directories either. 132 */ 133 STATIC int 134 xfs_dir_fsync( 135 struct file *file, 136 loff_t start, 137 loff_t end, 138 int datasync) 139 { 140 struct xfs_inode *ip = XFS_I(file->f_mapping->host); 141 struct xfs_mount *mp = ip->i_mount; 142 xfs_lsn_t lsn = 0; 143 144 trace_xfs_dir_fsync(ip); 145 146 xfs_ilock(ip, XFS_ILOCK_SHARED); 147 if (xfs_ipincount(ip)) 148 lsn = ip->i_itemp->ili_last_lsn; 149 xfs_iunlock(ip, XFS_ILOCK_SHARED); 150 151 if (!lsn) 152 return 0; 153 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 154 } 155 156 STATIC int 157 xfs_file_fsync( 158 struct file *file, 159 loff_t start, 160 loff_t end, 161 int datasync) 162 { 163 struct inode *inode = file->f_mapping->host; 164 struct xfs_inode *ip = XFS_I(inode); 165 struct xfs_mount *mp = ip->i_mount; 166 struct xfs_trans *tp; 167 int error = 0; 168 int log_flushed = 0; 169 xfs_lsn_t lsn = 0; 170 171 trace_xfs_file_fsync(ip); 172 173 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 174 if (error) 175 return error; 176 177 if (XFS_FORCED_SHUTDOWN(mp)) 178 return -XFS_ERROR(EIO); 179 180 xfs_iflags_clear(ip, XFS_ITRUNCATED); 181 182 if (mp->m_flags & XFS_MOUNT_BARRIER) { 183 /* 184 * If we have an RT and/or log subvolume we need to make sure 185 * to flush the write cache the device used for file data 186 * first. This is to ensure newly written file data make 187 * it to disk before logging the new inode size in case of 188 * an extending write. 189 */ 190 if (XFS_IS_REALTIME_INODE(ip)) 191 xfs_blkdev_issue_flush(mp->m_rtdev_targp); 192 else if (mp->m_logdev_targp != mp->m_ddev_targp) 193 xfs_blkdev_issue_flush(mp->m_ddev_targp); 194 } 195 196 /* 197 * We always need to make sure that the required inode state is safe on 198 * disk. The inode might be clean but we still might need to force the 199 * log because of committed transactions that haven't hit the disk yet. 200 * Likewise, there could be unflushed non-transactional changes to the 201 * inode core that have to go to disk and this requires us to issue 202 * a synchronous transaction to capture these changes correctly. 203 * 204 * This code relies on the assumption that if the i_update_core field 205 * of the inode is clear and the inode is unpinned then it is clean 206 * and no action is required. 207 */ 208 xfs_ilock(ip, XFS_ILOCK_SHARED); 209 210 /* 211 * First check if the VFS inode is marked dirty. All the dirtying 212 * of non-transactional updates do not go through mark_inode_dirty*, 213 * which allows us to distinguish between pure timestamp updates 214 * and i_size updates which need to be caught for fdatasync. 215 * After that also check for the dirty state in the XFS inode, which 216 * might gets cleared when the inode gets written out via the AIL 217 * or xfs_iflush_cluster. 218 */ 219 if (((inode->i_state & I_DIRTY_DATASYNC) || 220 ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && 221 ip->i_update_core) { 222 /* 223 * Kick off a transaction to log the inode core to get the 224 * updates. The sync transaction will also force the log. 225 */ 226 xfs_iunlock(ip, XFS_ILOCK_SHARED); 227 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); 228 error = xfs_trans_reserve(tp, 0, 229 XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); 230 if (error) { 231 xfs_trans_cancel(tp, 0); 232 return -error; 233 } 234 xfs_ilock(ip, XFS_ILOCK_EXCL); 235 236 /* 237 * Note - it's possible that we might have pushed ourselves out 238 * of the way during trans_reserve which would flush the inode. 239 * But there's no guarantee that the inode buffer has actually 240 * gone out yet (it's delwri). Plus the buffer could be pinned 241 * anyway if it's part of an inode in another recent 242 * transaction. So we play it safe and fire off the 243 * transaction anyway. 244 */ 245 xfs_trans_ijoin(tp, ip, 0); 246 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 247 error = xfs_trans_commit(tp, 0); 248 249 lsn = ip->i_itemp->ili_last_lsn; 250 xfs_iunlock(ip, XFS_ILOCK_EXCL); 251 } else { 252 /* 253 * Timestamps/size haven't changed since last inode flush or 254 * inode transaction commit. That means either nothing got 255 * written or a transaction committed which caught the updates. 256 * If the latter happened and the transaction hasn't hit the 257 * disk yet, the inode will be still be pinned. If it is, 258 * force the log. 259 */ 260 if (xfs_ipincount(ip)) 261 lsn = ip->i_itemp->ili_last_lsn; 262 xfs_iunlock(ip, XFS_ILOCK_SHARED); 263 } 264 265 if (!error && lsn) 266 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); 267 268 /* 269 * If we only have a single device, and the log force about was 270 * a no-op we might have to flush the data device cache here. 271 * This can only happen for fdatasync/O_DSYNC if we were overwriting 272 * an already allocated file and thus do not have any metadata to 273 * commit. 274 */ 275 if ((mp->m_flags & XFS_MOUNT_BARRIER) && 276 mp->m_logdev_targp == mp->m_ddev_targp && 277 !XFS_IS_REALTIME_INODE(ip) && 278 !log_flushed) 279 xfs_blkdev_issue_flush(mp->m_ddev_targp); 280 281 return -error; 282 } 283 284 STATIC ssize_t 285 xfs_file_aio_read( 286 struct kiocb *iocb, 287 const struct iovec *iovp, 288 unsigned long nr_segs, 289 loff_t pos) 290 { 291 struct file *file = iocb->ki_filp; 292 struct inode *inode = file->f_mapping->host; 293 struct xfs_inode *ip = XFS_I(inode); 294 struct xfs_mount *mp = ip->i_mount; 295 size_t size = 0; 296 ssize_t ret = 0; 297 int ioflags = 0; 298 xfs_fsize_t n; 299 unsigned long seg; 300 301 XFS_STATS_INC(xs_read_calls); 302 303 BUG_ON(iocb->ki_pos != pos); 304 305 if (unlikely(file->f_flags & O_DIRECT)) 306 ioflags |= IO_ISDIRECT; 307 if (file->f_mode & FMODE_NOCMTIME) 308 ioflags |= IO_INVIS; 309 310 /* START copy & waste from filemap.c */ 311 for (seg = 0; seg < nr_segs; seg++) { 312 const struct iovec *iv = &iovp[seg]; 313 314 /* 315 * If any segment has a negative length, or the cumulative 316 * length ever wraps negative then return -EINVAL. 317 */ 318 size += iv->iov_len; 319 if (unlikely((ssize_t)(size|iv->iov_len) < 0)) 320 return XFS_ERROR(-EINVAL); 321 } 322 /* END copy & waste from filemap.c */ 323 324 if (unlikely(ioflags & IO_ISDIRECT)) { 325 xfs_buftarg_t *target = 326 XFS_IS_REALTIME_INODE(ip) ? 327 mp->m_rtdev_targp : mp->m_ddev_targp; 328 if ((iocb->ki_pos & target->bt_smask) || 329 (size & target->bt_smask)) { 330 if (iocb->ki_pos == i_size_read(inode)) 331 return 0; 332 return -XFS_ERROR(EINVAL); 333 } 334 } 335 336 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; 337 if (n <= 0 || size == 0) 338 return 0; 339 340 if (n < size) 341 size = n; 342 343 if (XFS_FORCED_SHUTDOWN(mp)) 344 return -EIO; 345 346 /* 347 * Locking is a bit tricky here. If we take an exclusive lock 348 * for direct IO, we effectively serialise all new concurrent 349 * read IO to this file and block it behind IO that is currently in 350 * progress because IO in progress holds the IO lock shared. We only 351 * need to hold the lock exclusive to blow away the page cache, so 352 * only take lock exclusively if the page cache needs invalidation. 353 * This allows the normal direct IO case of no page cache pages to 354 * proceeed concurrently without serialisation. 355 */ 356 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 357 if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { 358 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 359 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 360 361 if (inode->i_mapping->nrpages) { 362 ret = -xfs_flushinval_pages(ip, 363 (iocb->ki_pos & PAGE_CACHE_MASK), 364 -1, FI_REMAPF_LOCKED); 365 if (ret) { 366 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 367 return ret; 368 } 369 } 370 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 371 } 372 373 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); 374 375 ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); 376 if (ret > 0) 377 XFS_STATS_ADD(xs_read_bytes, ret); 378 379 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 380 return ret; 381 } 382 383 STATIC ssize_t 384 xfs_file_splice_read( 385 struct file *infilp, 386 loff_t *ppos, 387 struct pipe_inode_info *pipe, 388 size_t count, 389 unsigned int flags) 390 { 391 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); 392 int ioflags = 0; 393 ssize_t ret; 394 395 XFS_STATS_INC(xs_read_calls); 396 397 if (infilp->f_mode & FMODE_NOCMTIME) 398 ioflags |= IO_INVIS; 399 400 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 401 return -EIO; 402 403 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 404 405 trace_xfs_file_splice_read(ip, count, *ppos, ioflags); 406 407 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 408 if (ret > 0) 409 XFS_STATS_ADD(xs_read_bytes, ret); 410 411 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 412 return ret; 413 } 414 415 /* 416 * xfs_file_splice_write() does not use xfs_rw_ilock() because 417 * generic_file_splice_write() takes the i_mutex itself. This, in theory, 418 * couuld cause lock inversions between the aio_write path and the splice path 419 * if someone is doing concurrent splice(2) based writes and write(2) based 420 * writes to the same inode. The only real way to fix this is to re-implement 421 * the generic code here with correct locking orders. 422 */ 423 STATIC ssize_t 424 xfs_file_splice_write( 425 struct pipe_inode_info *pipe, 426 struct file *outfilp, 427 loff_t *ppos, 428 size_t count, 429 unsigned int flags) 430 { 431 struct inode *inode = outfilp->f_mapping->host; 432 struct xfs_inode *ip = XFS_I(inode); 433 int ioflags = 0; 434 ssize_t ret; 435 436 XFS_STATS_INC(xs_write_calls); 437 438 if (outfilp->f_mode & FMODE_NOCMTIME) 439 ioflags |= IO_INVIS; 440 441 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 442 return -EIO; 443 444 xfs_ilock(ip, XFS_IOLOCK_EXCL); 445 446 trace_xfs_file_splice_write(ip, count, *ppos, ioflags); 447 448 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); 449 if (ret > 0) 450 XFS_STATS_ADD(xs_write_bytes, ret); 451 452 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 453 return ret; 454 } 455 456 /* 457 * This routine is called to handle zeroing any space in the last 458 * block of the file that is beyond the EOF. We do this since the 459 * size is being increased without writing anything to that block 460 * and we don't want anyone to read the garbage on the disk. 461 */ 462 STATIC int /* error (positive) */ 463 xfs_zero_last_block( 464 xfs_inode_t *ip, 465 xfs_fsize_t offset, 466 xfs_fsize_t isize) 467 { 468 xfs_fileoff_t last_fsb; 469 xfs_mount_t *mp = ip->i_mount; 470 int nimaps; 471 int zero_offset; 472 int zero_len; 473 int error = 0; 474 xfs_bmbt_irec_t imap; 475 476 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 477 478 zero_offset = XFS_B_FSB_OFFSET(mp, isize); 479 if (zero_offset == 0) { 480 /* 481 * There are no extra bytes in the last block on disk to 482 * zero, so return. 483 */ 484 return 0; 485 } 486 487 last_fsb = XFS_B_TO_FSBT(mp, isize); 488 nimaps = 1; 489 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); 490 if (error) 491 return error; 492 ASSERT(nimaps > 0); 493 /* 494 * If the block underlying isize is just a hole, then there 495 * is nothing to zero. 496 */ 497 if (imap.br_startblock == HOLESTARTBLOCK) { 498 return 0; 499 } 500 /* 501 * Zero the part of the last block beyond the EOF, and write it 502 * out sync. We need to drop the ilock while we do this so we 503 * don't deadlock when the buffer cache calls back to us. 504 */ 505 xfs_iunlock(ip, XFS_ILOCK_EXCL); 506 507 zero_len = mp->m_sb.sb_blocksize - zero_offset; 508 if (isize + zero_len > offset) 509 zero_len = offset - isize; 510 error = xfs_iozero(ip, isize, zero_len); 511 512 xfs_ilock(ip, XFS_ILOCK_EXCL); 513 ASSERT(error >= 0); 514 return error; 515 } 516 517 /* 518 * Zero any on disk space between the current EOF and the new, 519 * larger EOF. This handles the normal case of zeroing the remainder 520 * of the last block in the file and the unusual case of zeroing blocks 521 * out beyond the size of the file. This second case only happens 522 * with fixed size extents and when the system crashes before the inode 523 * size was updated but after blocks were allocated. If fill is set, 524 * then any holes in the range are filled and zeroed. If not, the holes 525 * are left alone as holes. 526 */ 527 528 int /* error (positive) */ 529 xfs_zero_eof( 530 xfs_inode_t *ip, 531 xfs_off_t offset, /* starting I/O offset */ 532 xfs_fsize_t isize) /* current inode size */ 533 { 534 xfs_mount_t *mp = ip->i_mount; 535 xfs_fileoff_t start_zero_fsb; 536 xfs_fileoff_t end_zero_fsb; 537 xfs_fileoff_t zero_count_fsb; 538 xfs_fileoff_t last_fsb; 539 xfs_fileoff_t zero_off; 540 xfs_fsize_t zero_len; 541 int nimaps; 542 int error = 0; 543 xfs_bmbt_irec_t imap; 544 545 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 546 ASSERT(offset > isize); 547 548 /* 549 * First handle zeroing the block on which isize resides. 550 * We only zero a part of that block so it is handled specially. 551 */ 552 error = xfs_zero_last_block(ip, offset, isize); 553 if (error) { 554 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 555 return error; 556 } 557 558 /* 559 * Calculate the range between the new size and the old 560 * where blocks needing to be zeroed may exist. To get the 561 * block where the last byte in the file currently resides, 562 * we need to subtract one from the size and truncate back 563 * to a block boundary. We subtract 1 in case the size is 564 * exactly on a block boundary. 565 */ 566 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; 567 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 568 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); 569 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); 570 if (last_fsb == end_zero_fsb) { 571 /* 572 * The size was only incremented on its last block. 573 * We took care of that above, so just return. 574 */ 575 return 0; 576 } 577 578 ASSERT(start_zero_fsb <= end_zero_fsb); 579 while (start_zero_fsb <= end_zero_fsb) { 580 nimaps = 1; 581 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 582 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, 583 &imap, &nimaps, 0); 584 if (error) { 585 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 586 return error; 587 } 588 ASSERT(nimaps > 0); 589 590 if (imap.br_state == XFS_EXT_UNWRITTEN || 591 imap.br_startblock == HOLESTARTBLOCK) { 592 /* 593 * This loop handles initializing pages that were 594 * partially initialized by the code below this 595 * loop. It basically zeroes the part of the page 596 * that sits on a hole and sets the page as P_HOLE 597 * and calls remapf if it is a mapped file. 598 */ 599 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 600 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 601 continue; 602 } 603 604 /* 605 * There are blocks we need to zero. 606 * Drop the inode lock while we're doing the I/O. 607 * We'll still have the iolock to protect us. 608 */ 609 xfs_iunlock(ip, XFS_ILOCK_EXCL); 610 611 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); 612 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); 613 614 if ((zero_off + zero_len) > offset) 615 zero_len = offset - zero_off; 616 617 error = xfs_iozero(ip, zero_off, zero_len); 618 if (error) { 619 goto out_lock; 620 } 621 622 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 623 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 624 625 xfs_ilock(ip, XFS_ILOCK_EXCL); 626 } 627 628 return 0; 629 630 out_lock: 631 xfs_ilock(ip, XFS_ILOCK_EXCL); 632 ASSERT(error >= 0); 633 return error; 634 } 635 636 /* 637 * Common pre-write limit and setup checks. 638 * 639 * Called with the iolocked held either shared and exclusive according to 640 * @iolock, and returns with it held. Might upgrade the iolock to exclusive 641 * if called for a direct write beyond i_size. 642 */ 643 STATIC ssize_t 644 xfs_file_aio_write_checks( 645 struct file *file, 646 loff_t *pos, 647 size_t *count, 648 int *iolock) 649 { 650 struct inode *inode = file->f_mapping->host; 651 struct xfs_inode *ip = XFS_I(inode); 652 int error = 0; 653 654 xfs_rw_ilock(ip, XFS_ILOCK_EXCL); 655 restart: 656 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); 657 if (error) { 658 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); 659 return error; 660 } 661 662 if (likely(!(file->f_mode & FMODE_NOCMTIME))) 663 file_update_time(file); 664 665 /* 666 * If the offset is beyond the size of the file, we need to zero any 667 * blocks that fall between the existing EOF and the start of this 668 * write. If zeroing is needed and we are currently holding the 669 * iolock shared, we need to update it to exclusive which involves 670 * dropping all locks and relocking to maintain correct locking order. 671 * If we do this, restart the function to ensure all checks and values 672 * are still valid. 673 */ 674 if (*pos > i_size_read(inode)) { 675 if (*iolock == XFS_IOLOCK_SHARED) { 676 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); 677 *iolock = XFS_IOLOCK_EXCL; 678 xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); 679 goto restart; 680 } 681 error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); 682 } 683 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); 684 if (error) 685 return error; 686 687 /* 688 * If we're writing the file then make sure to clear the setuid and 689 * setgid bits if the process is not being run by root. This keeps 690 * people from modifying setuid and setgid binaries. 691 */ 692 return file_remove_suid(file); 693 694 } 695 696 /* 697 * xfs_file_dio_aio_write - handle direct IO writes 698 * 699 * Lock the inode appropriately to prepare for and issue a direct IO write. 700 * By separating it from the buffered write path we remove all the tricky to 701 * follow locking changes and looping. 702 * 703 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL 704 * until we're sure the bytes at the new EOF have been zeroed and/or the cached 705 * pages are flushed out. 706 * 707 * In most cases the direct IO writes will be done holding IOLOCK_SHARED 708 * allowing them to be done in parallel with reads and other direct IO writes. 709 * However, if the IO is not aligned to filesystem blocks, the direct IO layer 710 * needs to do sub-block zeroing and that requires serialisation against other 711 * direct IOs to the same block. In this case we need to serialise the 712 * submission of the unaligned IOs so that we don't get racing block zeroing in 713 * the dio layer. To avoid the problem with aio, we also need to wait for 714 * outstanding IOs to complete so that unwritten extent conversion is completed 715 * before we try to map the overlapping block. This is currently implemented by 716 * hitting it with a big hammer (i.e. inode_dio_wait()). 717 * 718 * Returns with locks held indicated by @iolock and errors indicated by 719 * negative return values. 720 */ 721 STATIC ssize_t 722 xfs_file_dio_aio_write( 723 struct kiocb *iocb, 724 const struct iovec *iovp, 725 unsigned long nr_segs, 726 loff_t pos, 727 size_t ocount) 728 { 729 struct file *file = iocb->ki_filp; 730 struct address_space *mapping = file->f_mapping; 731 struct inode *inode = mapping->host; 732 struct xfs_inode *ip = XFS_I(inode); 733 struct xfs_mount *mp = ip->i_mount; 734 ssize_t ret = 0; 735 size_t count = ocount; 736 int unaligned_io = 0; 737 int iolock; 738 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? 739 mp->m_rtdev_targp : mp->m_ddev_targp; 740 741 if ((pos & target->bt_smask) || (count & target->bt_smask)) 742 return -XFS_ERROR(EINVAL); 743 744 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) 745 unaligned_io = 1; 746 747 /* 748 * We don't need to take an exclusive lock unless there page cache needs 749 * to be invalidated or unaligned IO is being executed. We don't need to 750 * consider the EOF extension case here because 751 * xfs_file_aio_write_checks() will relock the inode as necessary for 752 * EOF zeroing cases and fill out the new inode size as appropriate. 753 */ 754 if (unaligned_io || mapping->nrpages) 755 iolock = XFS_IOLOCK_EXCL; 756 else 757 iolock = XFS_IOLOCK_SHARED; 758 xfs_rw_ilock(ip, iolock); 759 760 /* 761 * Recheck if there are cached pages that need invalidate after we got 762 * the iolock to protect against other threads adding new pages while 763 * we were waiting for the iolock. 764 */ 765 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { 766 xfs_rw_iunlock(ip, iolock); 767 iolock = XFS_IOLOCK_EXCL; 768 xfs_rw_ilock(ip, iolock); 769 } 770 771 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); 772 if (ret) 773 goto out; 774 775 if (mapping->nrpages) { 776 ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, 777 FI_REMAPF_LOCKED); 778 if (ret) 779 goto out; 780 } 781 782 /* 783 * If we are doing unaligned IO, wait for all other IO to drain, 784 * otherwise demote the lock if we had to flush cached pages 785 */ 786 if (unaligned_io) 787 inode_dio_wait(inode); 788 else if (iolock == XFS_IOLOCK_EXCL) { 789 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 790 iolock = XFS_IOLOCK_SHARED; 791 } 792 793 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); 794 ret = generic_file_direct_write(iocb, iovp, 795 &nr_segs, pos, &iocb->ki_pos, count, ocount); 796 797 out: 798 xfs_rw_iunlock(ip, iolock); 799 800 /* No fallback to buffered IO on errors for XFS. */ 801 ASSERT(ret < 0 || ret == count); 802 return ret; 803 } 804 805 STATIC ssize_t 806 xfs_file_buffered_aio_write( 807 struct kiocb *iocb, 808 const struct iovec *iovp, 809 unsigned long nr_segs, 810 loff_t pos, 811 size_t ocount) 812 { 813 struct file *file = iocb->ki_filp; 814 struct address_space *mapping = file->f_mapping; 815 struct inode *inode = mapping->host; 816 struct xfs_inode *ip = XFS_I(inode); 817 ssize_t ret; 818 int enospc = 0; 819 int iolock = XFS_IOLOCK_EXCL; 820 size_t count = ocount; 821 822 xfs_rw_ilock(ip, iolock); 823 824 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); 825 if (ret) 826 goto out; 827 828 /* We can write back this queue in page reclaim */ 829 current->backing_dev_info = mapping->backing_dev_info; 830 831 write_retry: 832 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); 833 ret = generic_file_buffered_write(iocb, iovp, nr_segs, 834 pos, &iocb->ki_pos, count, ret); 835 /* 836 * if we just got an ENOSPC, flush the inode now we aren't holding any 837 * page locks and retry *once* 838 */ 839 if (ret == -ENOSPC && !enospc) { 840 enospc = 1; 841 ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); 842 if (!ret) 843 goto write_retry; 844 } 845 846 current->backing_dev_info = NULL; 847 out: 848 xfs_rw_iunlock(ip, iolock); 849 return ret; 850 } 851 852 STATIC ssize_t 853 xfs_file_aio_write( 854 struct kiocb *iocb, 855 const struct iovec *iovp, 856 unsigned long nr_segs, 857 loff_t pos) 858 { 859 struct file *file = iocb->ki_filp; 860 struct address_space *mapping = file->f_mapping; 861 struct inode *inode = mapping->host; 862 struct xfs_inode *ip = XFS_I(inode); 863 ssize_t ret; 864 size_t ocount = 0; 865 866 XFS_STATS_INC(xs_write_calls); 867 868 BUG_ON(iocb->ki_pos != pos); 869 870 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); 871 if (ret) 872 return ret; 873 874 if (ocount == 0) 875 return 0; 876 877 xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); 878 879 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 880 return -EIO; 881 882 if (unlikely(file->f_flags & O_DIRECT)) 883 ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); 884 else 885 ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, 886 ocount); 887 888 if (ret > 0) { 889 ssize_t err; 890 891 XFS_STATS_ADD(xs_write_bytes, ret); 892 893 /* Handle various SYNC-type writes */ 894 err = generic_write_sync(file, pos, ret); 895 if (err < 0) 896 ret = err; 897 } 898 899 return ret; 900 } 901 902 STATIC long 903 xfs_file_fallocate( 904 struct file *file, 905 int mode, 906 loff_t offset, 907 loff_t len) 908 { 909 struct inode *inode = file->f_path.dentry->d_inode; 910 long error; 911 loff_t new_size = 0; 912 xfs_flock64_t bf; 913 xfs_inode_t *ip = XFS_I(inode); 914 int cmd = XFS_IOC_RESVSP; 915 int attr_flags = XFS_ATTR_NOLOCK; 916 917 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 918 return -EOPNOTSUPP; 919 920 bf.l_whence = 0; 921 bf.l_start = offset; 922 bf.l_len = len; 923 924 xfs_ilock(ip, XFS_IOLOCK_EXCL); 925 926 if (mode & FALLOC_FL_PUNCH_HOLE) 927 cmd = XFS_IOC_UNRESVSP; 928 929 /* check the new inode size is valid before allocating */ 930 if (!(mode & FALLOC_FL_KEEP_SIZE) && 931 offset + len > i_size_read(inode)) { 932 new_size = offset + len; 933 error = inode_newsize_ok(inode, new_size); 934 if (error) 935 goto out_unlock; 936 } 937 938 if (file->f_flags & O_DSYNC) 939 attr_flags |= XFS_ATTR_SYNC; 940 941 error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); 942 if (error) 943 goto out_unlock; 944 945 /* Change file size if needed */ 946 if (new_size) { 947 struct iattr iattr; 948 949 iattr.ia_valid = ATTR_SIZE; 950 iattr.ia_size = new_size; 951 error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK); 952 } 953 954 out_unlock: 955 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 956 return error; 957 } 958 959 960 STATIC int 961 xfs_file_open( 962 struct inode *inode, 963 struct file *file) 964 { 965 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 966 return -EFBIG; 967 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) 968 return -EIO; 969 return 0; 970 } 971 972 STATIC int 973 xfs_dir_open( 974 struct inode *inode, 975 struct file *file) 976 { 977 struct xfs_inode *ip = XFS_I(inode); 978 int mode; 979 int error; 980 981 error = xfs_file_open(inode, file); 982 if (error) 983 return error; 984 985 /* 986 * If there are any blocks, read-ahead block 0 as we're almost 987 * certain to have the next operation be a read there. 988 */ 989 mode = xfs_ilock_map_shared(ip); 990 if (ip->i_d.di_nextents > 0) 991 xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); 992 xfs_iunlock(ip, mode); 993 return 0; 994 } 995 996 STATIC int 997 xfs_file_release( 998 struct inode *inode, 999 struct file *filp) 1000 { 1001 return -xfs_release(XFS_I(inode)); 1002 } 1003 1004 STATIC int 1005 xfs_file_readdir( 1006 struct file *filp, 1007 void *dirent, 1008 filldir_t filldir) 1009 { 1010 struct inode *inode = filp->f_path.dentry->d_inode; 1011 xfs_inode_t *ip = XFS_I(inode); 1012 int error; 1013 size_t bufsize; 1014 1015 /* 1016 * The Linux API doesn't pass down the total size of the buffer 1017 * we read into down to the filesystem. With the filldir concept 1018 * it's not needed for correct information, but the XFS dir2 leaf 1019 * code wants an estimate of the buffer size to calculate it's 1020 * readahead window and size the buffers used for mapping to 1021 * physical blocks. 1022 * 1023 * Try to give it an estimate that's good enough, maybe at some 1024 * point we can change the ->readdir prototype to include the 1025 * buffer size. For now we use the current glibc buffer size. 1026 */ 1027 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); 1028 1029 error = xfs_readdir(ip, dirent, bufsize, 1030 (xfs_off_t *)&filp->f_pos, filldir); 1031 if (error) 1032 return -error; 1033 return 0; 1034 } 1035 1036 STATIC int 1037 xfs_file_mmap( 1038 struct file *filp, 1039 struct vm_area_struct *vma) 1040 { 1041 vma->vm_ops = &xfs_file_vm_ops; 1042 vma->vm_flags |= VM_CAN_NONLINEAR; 1043 1044 file_accessed(filp); 1045 return 0; 1046 } 1047 1048 /* 1049 * mmap()d file has taken write protection fault and is being made 1050 * writable. We can set the page state up correctly for a writable 1051 * page, which means we can do correct delalloc accounting (ENOSPC 1052 * checking!) and unwritten extent mapping. 1053 */ 1054 STATIC int 1055 xfs_vm_page_mkwrite( 1056 struct vm_area_struct *vma, 1057 struct vm_fault *vmf) 1058 { 1059 return block_page_mkwrite(vma, vmf, xfs_get_blocks); 1060 } 1061 1062 const struct file_operations xfs_file_operations = { 1063 .llseek = generic_file_llseek, 1064 .read = do_sync_read, 1065 .write = do_sync_write, 1066 .aio_read = xfs_file_aio_read, 1067 .aio_write = xfs_file_aio_write, 1068 .splice_read = xfs_file_splice_read, 1069 .splice_write = xfs_file_splice_write, 1070 .unlocked_ioctl = xfs_file_ioctl, 1071 #ifdef CONFIG_COMPAT 1072 .compat_ioctl = xfs_file_compat_ioctl, 1073 #endif 1074 .mmap = xfs_file_mmap, 1075 .open = xfs_file_open, 1076 .release = xfs_file_release, 1077 .fsync = xfs_file_fsync, 1078 .fallocate = xfs_file_fallocate, 1079 }; 1080 1081 const struct file_operations xfs_dir_file_operations = { 1082 .open = xfs_dir_open, 1083 .read = generic_read_dir, 1084 .readdir = xfs_file_readdir, 1085 .llseek = generic_file_llseek, 1086 .unlocked_ioctl = xfs_file_ioctl, 1087 #ifdef CONFIG_COMPAT 1088 .compat_ioctl = xfs_file_compat_ioctl, 1089 #endif 1090 .fsync = xfs_dir_fsync, 1091 }; 1092 1093 static const struct vm_operations_struct xfs_file_vm_ops = { 1094 .fault = filemap_fault, 1095 .page_mkwrite = xfs_vm_page_mkwrite, 1096 }; 1097