1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_da_format.h" 26 #include "xfs_da_btree.h" 27 #include "xfs_inode.h" 28 #include "xfs_trans.h" 29 #include "xfs_inode_item.h" 30 #include "xfs_bmap.h" 31 #include "xfs_bmap_util.h" 32 #include "xfs_error.h" 33 #include "xfs_dir2.h" 34 #include "xfs_dir2_priv.h" 35 #include "xfs_ioctl.h" 36 #include "xfs_trace.h" 37 #include "xfs_log.h" 38 #include "xfs_icache.h" 39 #include "xfs_pnfs.h" 40 41 #include <linux/dcache.h> 42 #include <linux/falloc.h> 43 #include <linux/pagevec.h> 44 45 static const struct vm_operations_struct xfs_file_vm_ops; 46 47 /* 48 * Locking primitives for read and write IO paths to ensure we consistently use 49 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. 50 */ 51 static inline void 52 xfs_rw_ilock( 53 struct xfs_inode *ip, 54 int type) 55 { 56 if (type & XFS_IOLOCK_EXCL) 57 mutex_lock(&VFS_I(ip)->i_mutex); 58 xfs_ilock(ip, type); 59 } 60 61 static inline void 62 xfs_rw_iunlock( 63 struct xfs_inode *ip, 64 int type) 65 { 66 xfs_iunlock(ip, type); 67 if (type & XFS_IOLOCK_EXCL) 68 mutex_unlock(&VFS_I(ip)->i_mutex); 69 } 70 71 static inline void 72 xfs_rw_ilock_demote( 73 struct xfs_inode *ip, 74 int type) 75 { 76 xfs_ilock_demote(ip, type); 77 if (type & XFS_IOLOCK_EXCL) 78 mutex_unlock(&VFS_I(ip)->i_mutex); 79 } 80 81 /* 82 * xfs_iozero 83 * 84 * xfs_iozero clears the specified range of buffer supplied, 85 * and marks all the affected blocks as valid and modified. If 86 * an affected block is not allocated, it will be allocated. If 87 * an affected block is not completely overwritten, and is not 88 * valid before the operation, it will be read from disk before 89 * being partially zeroed. 90 */ 91 int 92 xfs_iozero( 93 struct xfs_inode *ip, /* inode */ 94 loff_t pos, /* offset in file */ 95 size_t count) /* size of data to zero */ 96 { 97 struct page *page; 98 struct address_space *mapping; 99 int status; 100 101 mapping = VFS_I(ip)->i_mapping; 102 do { 103 unsigned offset, bytes; 104 void *fsdata; 105 106 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 107 bytes = PAGE_CACHE_SIZE - offset; 108 if (bytes > count) 109 bytes = count; 110 111 status = pagecache_write_begin(NULL, mapping, pos, bytes, 112 AOP_FLAG_UNINTERRUPTIBLE, 113 &page, &fsdata); 114 if (status) 115 break; 116 117 zero_user(page, offset, bytes); 118 119 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, 120 page, fsdata); 121 WARN_ON(status <= 0); /* can't return less than zero! */ 122 pos += bytes; 123 count -= bytes; 124 status = 0; 125 } while (count); 126 127 return (-status); 128 } 129 130 int 131 xfs_update_prealloc_flags( 132 struct xfs_inode *ip, 133 enum xfs_prealloc_flags flags) 134 { 135 struct xfs_trans *tp; 136 int error; 137 138 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID); 139 error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0); 140 if (error) { 141 xfs_trans_cancel(tp, 0); 142 return error; 143 } 144 145 xfs_ilock(ip, XFS_ILOCK_EXCL); 146 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 147 148 if (!(flags & XFS_PREALLOC_INVISIBLE)) { 149 ip->i_d.di_mode &= ~S_ISUID; 150 if (ip->i_d.di_mode & S_IXGRP) 151 ip->i_d.di_mode &= ~S_ISGID; 152 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 153 } 154 155 if (flags & XFS_PREALLOC_SET) 156 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; 157 if (flags & XFS_PREALLOC_CLEAR) 158 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; 159 160 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 161 if (flags & XFS_PREALLOC_SYNC) 162 xfs_trans_set_sync(tp); 163 return xfs_trans_commit(tp, 0); 164 } 165 166 /* 167 * Fsync operations on directories are much simpler than on regular files, 168 * as there is no file data to flush, and thus also no need for explicit 169 * cache flush operations, and there are no non-transaction metadata updates 170 * on directories either. 171 */ 172 STATIC int 173 xfs_dir_fsync( 174 struct file *file, 175 loff_t start, 176 loff_t end, 177 int datasync) 178 { 179 struct xfs_inode *ip = XFS_I(file->f_mapping->host); 180 struct xfs_mount *mp = ip->i_mount; 181 xfs_lsn_t lsn = 0; 182 183 trace_xfs_dir_fsync(ip); 184 185 xfs_ilock(ip, XFS_ILOCK_SHARED); 186 if (xfs_ipincount(ip)) 187 lsn = ip->i_itemp->ili_last_lsn; 188 xfs_iunlock(ip, XFS_ILOCK_SHARED); 189 190 if (!lsn) 191 return 0; 192 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 193 } 194 195 STATIC int 196 xfs_file_fsync( 197 struct file *file, 198 loff_t start, 199 loff_t end, 200 int datasync) 201 { 202 struct inode *inode = file->f_mapping->host; 203 struct xfs_inode *ip = XFS_I(inode); 204 struct xfs_mount *mp = ip->i_mount; 205 int error = 0; 206 int log_flushed = 0; 207 xfs_lsn_t lsn = 0; 208 209 trace_xfs_file_fsync(ip); 210 211 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 212 if (error) 213 return error; 214 215 if (XFS_FORCED_SHUTDOWN(mp)) 216 return -EIO; 217 218 xfs_iflags_clear(ip, XFS_ITRUNCATED); 219 220 if (mp->m_flags & XFS_MOUNT_BARRIER) { 221 /* 222 * If we have an RT and/or log subvolume we need to make sure 223 * to flush the write cache the device used for file data 224 * first. This is to ensure newly written file data make 225 * it to disk before logging the new inode size in case of 226 * an extending write. 227 */ 228 if (XFS_IS_REALTIME_INODE(ip)) 229 xfs_blkdev_issue_flush(mp->m_rtdev_targp); 230 else if (mp->m_logdev_targp != mp->m_ddev_targp) 231 xfs_blkdev_issue_flush(mp->m_ddev_targp); 232 } 233 234 /* 235 * All metadata updates are logged, which means that we just have 236 * to flush the log up to the latest LSN that touched the inode. 237 */ 238 xfs_ilock(ip, XFS_ILOCK_SHARED); 239 if (xfs_ipincount(ip)) { 240 if (!datasync || 241 (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) 242 lsn = ip->i_itemp->ili_last_lsn; 243 } 244 xfs_iunlock(ip, XFS_ILOCK_SHARED); 245 246 if (lsn) 247 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); 248 249 /* 250 * If we only have a single device, and the log force about was 251 * a no-op we might have to flush the data device cache here. 252 * This can only happen for fdatasync/O_DSYNC if we were overwriting 253 * an already allocated file and thus do not have any metadata to 254 * commit. 255 */ 256 if ((mp->m_flags & XFS_MOUNT_BARRIER) && 257 mp->m_logdev_targp == mp->m_ddev_targp && 258 !XFS_IS_REALTIME_INODE(ip) && 259 !log_flushed) 260 xfs_blkdev_issue_flush(mp->m_ddev_targp); 261 262 return error; 263 } 264 265 STATIC ssize_t 266 xfs_file_read_iter( 267 struct kiocb *iocb, 268 struct iov_iter *to) 269 { 270 struct file *file = iocb->ki_filp; 271 struct inode *inode = file->f_mapping->host; 272 struct xfs_inode *ip = XFS_I(inode); 273 struct xfs_mount *mp = ip->i_mount; 274 size_t size = iov_iter_count(to); 275 ssize_t ret = 0; 276 int ioflags = 0; 277 xfs_fsize_t n; 278 loff_t pos = iocb->ki_pos; 279 280 XFS_STATS_INC(xs_read_calls); 281 282 if (unlikely(file->f_flags & O_DIRECT)) 283 ioflags |= XFS_IO_ISDIRECT; 284 if (file->f_mode & FMODE_NOCMTIME) 285 ioflags |= XFS_IO_INVIS; 286 287 if (unlikely(ioflags & XFS_IO_ISDIRECT)) { 288 xfs_buftarg_t *target = 289 XFS_IS_REALTIME_INODE(ip) ? 290 mp->m_rtdev_targp : mp->m_ddev_targp; 291 /* DIO must be aligned to device logical sector size */ 292 if ((pos | size) & target->bt_logical_sectormask) { 293 if (pos == i_size_read(inode)) 294 return 0; 295 return -EINVAL; 296 } 297 } 298 299 n = mp->m_super->s_maxbytes - pos; 300 if (n <= 0 || size == 0) 301 return 0; 302 303 if (n < size) 304 size = n; 305 306 if (XFS_FORCED_SHUTDOWN(mp)) 307 return -EIO; 308 309 /* 310 * Locking is a bit tricky here. If we take an exclusive lock 311 * for direct IO, we effectively serialise all new concurrent 312 * read IO to this file and block it behind IO that is currently in 313 * progress because IO in progress holds the IO lock shared. We only 314 * need to hold the lock exclusive to blow away the page cache, so 315 * only take lock exclusively if the page cache needs invalidation. 316 * This allows the normal direct IO case of no page cache pages to 317 * proceeed concurrently without serialisation. 318 */ 319 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 320 if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) { 321 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 322 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 323 324 if (inode->i_mapping->nrpages) { 325 ret = filemap_write_and_wait_range( 326 VFS_I(ip)->i_mapping, 327 pos, pos + size - 1); 328 if (ret) { 329 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 330 return ret; 331 } 332 333 /* 334 * Invalidate whole pages. This can return an error if 335 * we fail to invalidate a page, but this should never 336 * happen on XFS. Warn if it does fail. 337 */ 338 ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 339 pos >> PAGE_CACHE_SHIFT, 340 (pos + size - 1) >> PAGE_CACHE_SHIFT); 341 WARN_ON_ONCE(ret); 342 ret = 0; 343 } 344 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 345 } 346 347 trace_xfs_file_read(ip, size, pos, ioflags); 348 349 ret = generic_file_read_iter(iocb, to); 350 if (ret > 0) 351 XFS_STATS_ADD(xs_read_bytes, ret); 352 353 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 354 return ret; 355 } 356 357 STATIC ssize_t 358 xfs_file_splice_read( 359 struct file *infilp, 360 loff_t *ppos, 361 struct pipe_inode_info *pipe, 362 size_t count, 363 unsigned int flags) 364 { 365 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); 366 int ioflags = 0; 367 ssize_t ret; 368 369 XFS_STATS_INC(xs_read_calls); 370 371 if (infilp->f_mode & FMODE_NOCMTIME) 372 ioflags |= XFS_IO_INVIS; 373 374 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 375 return -EIO; 376 377 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 378 379 trace_xfs_file_splice_read(ip, count, *ppos, ioflags); 380 381 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 382 if (ret > 0) 383 XFS_STATS_ADD(xs_read_bytes, ret); 384 385 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 386 return ret; 387 } 388 389 /* 390 * This routine is called to handle zeroing any space in the last block of the 391 * file that is beyond the EOF. We do this since the size is being increased 392 * without writing anything to that block and we don't want to read the 393 * garbage on the disk. 394 */ 395 STATIC int /* error (positive) */ 396 xfs_zero_last_block( 397 struct xfs_inode *ip, 398 xfs_fsize_t offset, 399 xfs_fsize_t isize, 400 bool *did_zeroing) 401 { 402 struct xfs_mount *mp = ip->i_mount; 403 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); 404 int zero_offset = XFS_B_FSB_OFFSET(mp, isize); 405 int zero_len; 406 int nimaps = 1; 407 int error = 0; 408 struct xfs_bmbt_irec imap; 409 410 xfs_ilock(ip, XFS_ILOCK_EXCL); 411 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); 412 xfs_iunlock(ip, XFS_ILOCK_EXCL); 413 if (error) 414 return error; 415 416 ASSERT(nimaps > 0); 417 418 /* 419 * If the block underlying isize is just a hole, then there 420 * is nothing to zero. 421 */ 422 if (imap.br_startblock == HOLESTARTBLOCK) 423 return 0; 424 425 zero_len = mp->m_sb.sb_blocksize - zero_offset; 426 if (isize + zero_len > offset) 427 zero_len = offset - isize; 428 *did_zeroing = true; 429 return xfs_iozero(ip, isize, zero_len); 430 } 431 432 /* 433 * Zero any on disk space between the current EOF and the new, larger EOF. 434 * 435 * This handles the normal case of zeroing the remainder of the last block in 436 * the file and the unusual case of zeroing blocks out beyond the size of the 437 * file. This second case only happens with fixed size extents and when the 438 * system crashes before the inode size was updated but after blocks were 439 * allocated. 440 * 441 * Expects the iolock to be held exclusive, and will take the ilock internally. 442 */ 443 int /* error (positive) */ 444 xfs_zero_eof( 445 struct xfs_inode *ip, 446 xfs_off_t offset, /* starting I/O offset */ 447 xfs_fsize_t isize, /* current inode size */ 448 bool *did_zeroing) 449 { 450 struct xfs_mount *mp = ip->i_mount; 451 xfs_fileoff_t start_zero_fsb; 452 xfs_fileoff_t end_zero_fsb; 453 xfs_fileoff_t zero_count_fsb; 454 xfs_fileoff_t last_fsb; 455 xfs_fileoff_t zero_off; 456 xfs_fsize_t zero_len; 457 int nimaps; 458 int error = 0; 459 struct xfs_bmbt_irec imap; 460 461 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 462 ASSERT(offset > isize); 463 464 /* 465 * First handle zeroing the block on which isize resides. 466 * 467 * We only zero a part of that block so it is handled specially. 468 */ 469 if (XFS_B_FSB_OFFSET(mp, isize) != 0) { 470 error = xfs_zero_last_block(ip, offset, isize, did_zeroing); 471 if (error) 472 return error; 473 } 474 475 /* 476 * Calculate the range between the new size and the old where blocks 477 * needing to be zeroed may exist. 478 * 479 * To get the block where the last byte in the file currently resides, 480 * we need to subtract one from the size and truncate back to a block 481 * boundary. We subtract 1 in case the size is exactly on a block 482 * boundary. 483 */ 484 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; 485 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 486 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); 487 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); 488 if (last_fsb == end_zero_fsb) { 489 /* 490 * The size was only incremented on its last block. 491 * We took care of that above, so just return. 492 */ 493 return 0; 494 } 495 496 ASSERT(start_zero_fsb <= end_zero_fsb); 497 while (start_zero_fsb <= end_zero_fsb) { 498 nimaps = 1; 499 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 500 501 xfs_ilock(ip, XFS_ILOCK_EXCL); 502 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, 503 &imap, &nimaps, 0); 504 xfs_iunlock(ip, XFS_ILOCK_EXCL); 505 if (error) 506 return error; 507 508 ASSERT(nimaps > 0); 509 510 if (imap.br_state == XFS_EXT_UNWRITTEN || 511 imap.br_startblock == HOLESTARTBLOCK) { 512 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 513 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 514 continue; 515 } 516 517 /* 518 * There are blocks we need to zero. 519 */ 520 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); 521 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); 522 523 if ((zero_off + zero_len) > offset) 524 zero_len = offset - zero_off; 525 526 error = xfs_iozero(ip, zero_off, zero_len); 527 if (error) 528 return error; 529 530 *did_zeroing = true; 531 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 532 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 533 } 534 535 return 0; 536 } 537 538 /* 539 * Common pre-write limit and setup checks. 540 * 541 * Called with the iolocked held either shared and exclusive according to 542 * @iolock, and returns with it held. Might upgrade the iolock to exclusive 543 * if called for a direct write beyond i_size. 544 */ 545 STATIC ssize_t 546 xfs_file_aio_write_checks( 547 struct file *file, 548 loff_t *pos, 549 size_t *count, 550 int *iolock) 551 { 552 struct inode *inode = file->f_mapping->host; 553 struct xfs_inode *ip = XFS_I(inode); 554 int error = 0; 555 556 restart: 557 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); 558 if (error) 559 return error; 560 561 error = xfs_break_layouts(inode, iolock); 562 if (error) 563 return error; 564 565 /* 566 * If the offset is beyond the size of the file, we need to zero any 567 * blocks that fall between the existing EOF and the start of this 568 * write. If zeroing is needed and we are currently holding the 569 * iolock shared, we need to update it to exclusive which implies 570 * having to redo all checks before. 571 */ 572 if (*pos > i_size_read(inode)) { 573 bool zero = false; 574 575 if (*iolock == XFS_IOLOCK_SHARED) { 576 xfs_rw_iunlock(ip, *iolock); 577 *iolock = XFS_IOLOCK_EXCL; 578 xfs_rw_ilock(ip, *iolock); 579 goto restart; 580 } 581 error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero); 582 if (error) 583 return error; 584 } 585 586 /* 587 * Updating the timestamps will grab the ilock again from 588 * xfs_fs_dirty_inode, so we have to call it after dropping the 589 * lock above. Eventually we should look into a way to avoid 590 * the pointless lock roundtrip. 591 */ 592 if (likely(!(file->f_mode & FMODE_NOCMTIME))) { 593 error = file_update_time(file); 594 if (error) 595 return error; 596 } 597 598 /* 599 * If we're writing the file then make sure to clear the setuid and 600 * setgid bits if the process is not being run by root. This keeps 601 * people from modifying setuid and setgid binaries. 602 */ 603 return file_remove_suid(file); 604 } 605 606 /* 607 * xfs_file_dio_aio_write - handle direct IO writes 608 * 609 * Lock the inode appropriately to prepare for and issue a direct IO write. 610 * By separating it from the buffered write path we remove all the tricky to 611 * follow locking changes and looping. 612 * 613 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL 614 * until we're sure the bytes at the new EOF have been zeroed and/or the cached 615 * pages are flushed out. 616 * 617 * In most cases the direct IO writes will be done holding IOLOCK_SHARED 618 * allowing them to be done in parallel with reads and other direct IO writes. 619 * However, if the IO is not aligned to filesystem blocks, the direct IO layer 620 * needs to do sub-block zeroing and that requires serialisation against other 621 * direct IOs to the same block. In this case we need to serialise the 622 * submission of the unaligned IOs so that we don't get racing block zeroing in 623 * the dio layer. To avoid the problem with aio, we also need to wait for 624 * outstanding IOs to complete so that unwritten extent conversion is completed 625 * before we try to map the overlapping block. This is currently implemented by 626 * hitting it with a big hammer (i.e. inode_dio_wait()). 627 * 628 * Returns with locks held indicated by @iolock and errors indicated by 629 * negative return values. 630 */ 631 STATIC ssize_t 632 xfs_file_dio_aio_write( 633 struct kiocb *iocb, 634 struct iov_iter *from) 635 { 636 struct file *file = iocb->ki_filp; 637 struct address_space *mapping = file->f_mapping; 638 struct inode *inode = mapping->host; 639 struct xfs_inode *ip = XFS_I(inode); 640 struct xfs_mount *mp = ip->i_mount; 641 ssize_t ret = 0; 642 int unaligned_io = 0; 643 int iolock; 644 size_t count = iov_iter_count(from); 645 loff_t pos = iocb->ki_pos; 646 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? 647 mp->m_rtdev_targp : mp->m_ddev_targp; 648 649 /* DIO must be aligned to device logical sector size */ 650 if ((pos | count) & target->bt_logical_sectormask) 651 return -EINVAL; 652 653 /* "unaligned" here means not aligned to a filesystem block */ 654 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) 655 unaligned_io = 1; 656 657 /* 658 * We don't need to take an exclusive lock unless there page cache needs 659 * to be invalidated or unaligned IO is being executed. We don't need to 660 * consider the EOF extension case here because 661 * xfs_file_aio_write_checks() will relock the inode as necessary for 662 * EOF zeroing cases and fill out the new inode size as appropriate. 663 */ 664 if (unaligned_io || mapping->nrpages) 665 iolock = XFS_IOLOCK_EXCL; 666 else 667 iolock = XFS_IOLOCK_SHARED; 668 xfs_rw_ilock(ip, iolock); 669 670 /* 671 * Recheck if there are cached pages that need invalidate after we got 672 * the iolock to protect against other threads adding new pages while 673 * we were waiting for the iolock. 674 */ 675 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { 676 xfs_rw_iunlock(ip, iolock); 677 iolock = XFS_IOLOCK_EXCL; 678 xfs_rw_ilock(ip, iolock); 679 } 680 681 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); 682 if (ret) 683 goto out; 684 iov_iter_truncate(from, count); 685 686 if (mapping->nrpages) { 687 ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 688 pos, pos + count - 1); 689 if (ret) 690 goto out; 691 /* 692 * Invalidate whole pages. This can return an error if 693 * we fail to invalidate a page, but this should never 694 * happen on XFS. Warn if it does fail. 695 */ 696 ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, 697 pos >> PAGE_CACHE_SHIFT, 698 (pos + count - 1) >> PAGE_CACHE_SHIFT); 699 WARN_ON_ONCE(ret); 700 ret = 0; 701 } 702 703 /* 704 * If we are doing unaligned IO, wait for all other IO to drain, 705 * otherwise demote the lock if we had to flush cached pages 706 */ 707 if (unaligned_io) 708 inode_dio_wait(inode); 709 else if (iolock == XFS_IOLOCK_EXCL) { 710 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 711 iolock = XFS_IOLOCK_SHARED; 712 } 713 714 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); 715 ret = generic_file_direct_write(iocb, from, pos); 716 717 out: 718 xfs_rw_iunlock(ip, iolock); 719 720 /* No fallback to buffered IO on errors for XFS. */ 721 ASSERT(ret < 0 || ret == count); 722 return ret; 723 } 724 725 STATIC ssize_t 726 xfs_file_buffered_aio_write( 727 struct kiocb *iocb, 728 struct iov_iter *from) 729 { 730 struct file *file = iocb->ki_filp; 731 struct address_space *mapping = file->f_mapping; 732 struct inode *inode = mapping->host; 733 struct xfs_inode *ip = XFS_I(inode); 734 ssize_t ret; 735 int enospc = 0; 736 int iolock = XFS_IOLOCK_EXCL; 737 loff_t pos = iocb->ki_pos; 738 size_t count = iov_iter_count(from); 739 740 xfs_rw_ilock(ip, iolock); 741 742 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); 743 if (ret) 744 goto out; 745 746 iov_iter_truncate(from, count); 747 /* We can write back this queue in page reclaim */ 748 current->backing_dev_info = inode_to_bdi(inode); 749 750 write_retry: 751 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); 752 ret = generic_perform_write(file, from, pos); 753 if (likely(ret >= 0)) 754 iocb->ki_pos = pos + ret; 755 756 /* 757 * If we hit a space limit, try to free up some lingering preallocated 758 * space before returning an error. In the case of ENOSPC, first try to 759 * write back all dirty inodes to free up some of the excess reserved 760 * metadata space. This reduces the chances that the eofblocks scan 761 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this 762 * also behaves as a filter to prevent too many eofblocks scans from 763 * running at the same time. 764 */ 765 if (ret == -EDQUOT && !enospc) { 766 enospc = xfs_inode_free_quota_eofblocks(ip); 767 if (enospc) 768 goto write_retry; 769 } else if (ret == -ENOSPC && !enospc) { 770 struct xfs_eofblocks eofb = {0}; 771 772 enospc = 1; 773 xfs_flush_inodes(ip->i_mount); 774 eofb.eof_scan_owner = ip->i_ino; /* for locking */ 775 eofb.eof_flags = XFS_EOF_FLAGS_SYNC; 776 xfs_icache_free_eofblocks(ip->i_mount, &eofb); 777 goto write_retry; 778 } 779 780 current->backing_dev_info = NULL; 781 out: 782 xfs_rw_iunlock(ip, iolock); 783 return ret; 784 } 785 786 STATIC ssize_t 787 xfs_file_write_iter( 788 struct kiocb *iocb, 789 struct iov_iter *from) 790 { 791 struct file *file = iocb->ki_filp; 792 struct address_space *mapping = file->f_mapping; 793 struct inode *inode = mapping->host; 794 struct xfs_inode *ip = XFS_I(inode); 795 ssize_t ret; 796 size_t ocount = iov_iter_count(from); 797 798 XFS_STATS_INC(xs_write_calls); 799 800 if (ocount == 0) 801 return 0; 802 803 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 804 return -EIO; 805 806 if (unlikely(file->f_flags & O_DIRECT)) 807 ret = xfs_file_dio_aio_write(iocb, from); 808 else 809 ret = xfs_file_buffered_aio_write(iocb, from); 810 811 if (ret > 0) { 812 ssize_t err; 813 814 XFS_STATS_ADD(xs_write_bytes, ret); 815 816 /* Handle various SYNC-type writes */ 817 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 818 if (err < 0) 819 ret = err; 820 } 821 return ret; 822 } 823 824 STATIC long 825 xfs_file_fallocate( 826 struct file *file, 827 int mode, 828 loff_t offset, 829 loff_t len) 830 { 831 struct inode *inode = file_inode(file); 832 struct xfs_inode *ip = XFS_I(inode); 833 long error; 834 enum xfs_prealloc_flags flags = 0; 835 uint iolock = XFS_IOLOCK_EXCL; 836 loff_t new_size = 0; 837 838 if (!S_ISREG(inode->i_mode)) 839 return -EINVAL; 840 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 841 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) 842 return -EOPNOTSUPP; 843 844 xfs_ilock(ip, iolock); 845 error = xfs_break_layouts(inode, &iolock); 846 if (error) 847 goto out_unlock; 848 849 if (mode & FALLOC_FL_PUNCH_HOLE) { 850 error = xfs_free_file_space(ip, offset, len); 851 if (error) 852 goto out_unlock; 853 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 854 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 855 856 if (offset & blksize_mask || len & blksize_mask) { 857 error = -EINVAL; 858 goto out_unlock; 859 } 860 861 /* 862 * There is no need to overlap collapse range with EOF, 863 * in which case it is effectively a truncate operation 864 */ 865 if (offset + len >= i_size_read(inode)) { 866 error = -EINVAL; 867 goto out_unlock; 868 } 869 870 new_size = i_size_read(inode) - len; 871 872 error = xfs_collapse_file_space(ip, offset, len); 873 if (error) 874 goto out_unlock; 875 } else { 876 flags |= XFS_PREALLOC_SET; 877 878 if (!(mode & FALLOC_FL_KEEP_SIZE) && 879 offset + len > i_size_read(inode)) { 880 new_size = offset + len; 881 error = inode_newsize_ok(inode, new_size); 882 if (error) 883 goto out_unlock; 884 } 885 886 if (mode & FALLOC_FL_ZERO_RANGE) 887 error = xfs_zero_file_space(ip, offset, len); 888 else 889 error = xfs_alloc_file_space(ip, offset, len, 890 XFS_BMAPI_PREALLOC); 891 if (error) 892 goto out_unlock; 893 } 894 895 if (file->f_flags & O_DSYNC) 896 flags |= XFS_PREALLOC_SYNC; 897 898 error = xfs_update_prealloc_flags(ip, flags); 899 if (error) 900 goto out_unlock; 901 902 /* Change file size if needed */ 903 if (new_size) { 904 struct iattr iattr; 905 906 iattr.ia_valid = ATTR_SIZE; 907 iattr.ia_size = new_size; 908 error = xfs_setattr_size(ip, &iattr); 909 } 910 911 out_unlock: 912 xfs_iunlock(ip, iolock); 913 return error; 914 } 915 916 917 STATIC int 918 xfs_file_open( 919 struct inode *inode, 920 struct file *file) 921 { 922 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 923 return -EFBIG; 924 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) 925 return -EIO; 926 return 0; 927 } 928 929 STATIC int 930 xfs_dir_open( 931 struct inode *inode, 932 struct file *file) 933 { 934 struct xfs_inode *ip = XFS_I(inode); 935 int mode; 936 int error; 937 938 error = xfs_file_open(inode, file); 939 if (error) 940 return error; 941 942 /* 943 * If there are any blocks, read-ahead block 0 as we're almost 944 * certain to have the next operation be a read there. 945 */ 946 mode = xfs_ilock_data_map_shared(ip); 947 if (ip->i_d.di_nextents > 0) 948 xfs_dir3_data_readahead(ip, 0, -1); 949 xfs_iunlock(ip, mode); 950 return 0; 951 } 952 953 STATIC int 954 xfs_file_release( 955 struct inode *inode, 956 struct file *filp) 957 { 958 return xfs_release(XFS_I(inode)); 959 } 960 961 STATIC int 962 xfs_file_readdir( 963 struct file *file, 964 struct dir_context *ctx) 965 { 966 struct inode *inode = file_inode(file); 967 xfs_inode_t *ip = XFS_I(inode); 968 size_t bufsize; 969 970 /* 971 * The Linux API doesn't pass down the total size of the buffer 972 * we read into down to the filesystem. With the filldir concept 973 * it's not needed for correct information, but the XFS dir2 leaf 974 * code wants an estimate of the buffer size to calculate it's 975 * readahead window and size the buffers used for mapping to 976 * physical blocks. 977 * 978 * Try to give it an estimate that's good enough, maybe at some 979 * point we can change the ->readdir prototype to include the 980 * buffer size. For now we use the current glibc buffer size. 981 */ 982 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); 983 984 return xfs_readdir(ip, ctx, bufsize); 985 } 986 987 STATIC int 988 xfs_file_mmap( 989 struct file *filp, 990 struct vm_area_struct *vma) 991 { 992 vma->vm_ops = &xfs_file_vm_ops; 993 994 file_accessed(filp); 995 return 0; 996 } 997 998 /* 999 * mmap()d file has taken write protection fault and is being made 1000 * writable. We can set the page state up correctly for a writable 1001 * page, which means we can do correct delalloc accounting (ENOSPC 1002 * checking!) and unwritten extent mapping. 1003 */ 1004 STATIC int 1005 xfs_vm_page_mkwrite( 1006 struct vm_area_struct *vma, 1007 struct vm_fault *vmf) 1008 { 1009 return block_page_mkwrite(vma, vmf, xfs_get_blocks); 1010 } 1011 1012 /* 1013 * This type is designed to indicate the type of offset we would like 1014 * to search from page cache for xfs_seek_hole_data(). 1015 */ 1016 enum { 1017 HOLE_OFF = 0, 1018 DATA_OFF, 1019 }; 1020 1021 /* 1022 * Lookup the desired type of offset from the given page. 1023 * 1024 * On success, return true and the offset argument will point to the 1025 * start of the region that was found. Otherwise this function will 1026 * return false and keep the offset argument unchanged. 1027 */ 1028 STATIC bool 1029 xfs_lookup_buffer_offset( 1030 struct page *page, 1031 loff_t *offset, 1032 unsigned int type) 1033 { 1034 loff_t lastoff = page_offset(page); 1035 bool found = false; 1036 struct buffer_head *bh, *head; 1037 1038 bh = head = page_buffers(page); 1039 do { 1040 /* 1041 * Unwritten extents that have data in the page 1042 * cache covering them can be identified by the 1043 * BH_Unwritten state flag. Pages with multiple 1044 * buffers might have a mix of holes, data and 1045 * unwritten extents - any buffer with valid 1046 * data in it should have BH_Uptodate flag set 1047 * on it. 1048 */ 1049 if (buffer_unwritten(bh) || 1050 buffer_uptodate(bh)) { 1051 if (type == DATA_OFF) 1052 found = true; 1053 } else { 1054 if (type == HOLE_OFF) 1055 found = true; 1056 } 1057 1058 if (found) { 1059 *offset = lastoff; 1060 break; 1061 } 1062 lastoff += bh->b_size; 1063 } while ((bh = bh->b_this_page) != head); 1064 1065 return found; 1066 } 1067 1068 /* 1069 * This routine is called to find out and return a data or hole offset 1070 * from the page cache for unwritten extents according to the desired 1071 * type for xfs_seek_hole_data(). 1072 * 1073 * The argument offset is used to tell where we start to search from the 1074 * page cache. Map is used to figure out the end points of the range to 1075 * lookup pages. 1076 * 1077 * Return true if the desired type of offset was found, and the argument 1078 * offset is filled with that address. Otherwise, return false and keep 1079 * offset unchanged. 1080 */ 1081 STATIC bool 1082 xfs_find_get_desired_pgoff( 1083 struct inode *inode, 1084 struct xfs_bmbt_irec *map, 1085 unsigned int type, 1086 loff_t *offset) 1087 { 1088 struct xfs_inode *ip = XFS_I(inode); 1089 struct xfs_mount *mp = ip->i_mount; 1090 struct pagevec pvec; 1091 pgoff_t index; 1092 pgoff_t end; 1093 loff_t endoff; 1094 loff_t startoff = *offset; 1095 loff_t lastoff = startoff; 1096 bool found = false; 1097 1098 pagevec_init(&pvec, 0); 1099 1100 index = startoff >> PAGE_CACHE_SHIFT; 1101 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); 1102 end = endoff >> PAGE_CACHE_SHIFT; 1103 do { 1104 int want; 1105 unsigned nr_pages; 1106 unsigned int i; 1107 1108 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 1109 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 1110 want); 1111 /* 1112 * No page mapped into given range. If we are searching holes 1113 * and if this is the first time we got into the loop, it means 1114 * that the given offset is landed in a hole, return it. 1115 * 1116 * If we have already stepped through some block buffers to find 1117 * holes but they all contains data. In this case, the last 1118 * offset is already updated and pointed to the end of the last 1119 * mapped page, if it does not reach the endpoint to search, 1120 * that means there should be a hole between them. 1121 */ 1122 if (nr_pages == 0) { 1123 /* Data search found nothing */ 1124 if (type == DATA_OFF) 1125 break; 1126 1127 ASSERT(type == HOLE_OFF); 1128 if (lastoff == startoff || lastoff < endoff) { 1129 found = true; 1130 *offset = lastoff; 1131 } 1132 break; 1133 } 1134 1135 /* 1136 * At lease we found one page. If this is the first time we 1137 * step into the loop, and if the first page index offset is 1138 * greater than the given search offset, a hole was found. 1139 */ 1140 if (type == HOLE_OFF && lastoff == startoff && 1141 lastoff < page_offset(pvec.pages[0])) { 1142 found = true; 1143 break; 1144 } 1145 1146 for (i = 0; i < nr_pages; i++) { 1147 struct page *page = pvec.pages[i]; 1148 loff_t b_offset; 1149 1150 /* 1151 * At this point, the page may be truncated or 1152 * invalidated (changing page->mapping to NULL), 1153 * or even swizzled back from swapper_space to tmpfs 1154 * file mapping. However, page->index will not change 1155 * because we have a reference on the page. 1156 * 1157 * Searching done if the page index is out of range. 1158 * If the current offset is not reaches the end of 1159 * the specified search range, there should be a hole 1160 * between them. 1161 */ 1162 if (page->index > end) { 1163 if (type == HOLE_OFF && lastoff < endoff) { 1164 *offset = lastoff; 1165 found = true; 1166 } 1167 goto out; 1168 } 1169 1170 lock_page(page); 1171 /* 1172 * Page truncated or invalidated(page->mapping == NULL). 1173 * We can freely skip it and proceed to check the next 1174 * page. 1175 */ 1176 if (unlikely(page->mapping != inode->i_mapping)) { 1177 unlock_page(page); 1178 continue; 1179 } 1180 1181 if (!page_has_buffers(page)) { 1182 unlock_page(page); 1183 continue; 1184 } 1185 1186 found = xfs_lookup_buffer_offset(page, &b_offset, type); 1187 if (found) { 1188 /* 1189 * The found offset may be less than the start 1190 * point to search if this is the first time to 1191 * come here. 1192 */ 1193 *offset = max_t(loff_t, startoff, b_offset); 1194 unlock_page(page); 1195 goto out; 1196 } 1197 1198 /* 1199 * We either searching data but nothing was found, or 1200 * searching hole but found a data buffer. In either 1201 * case, probably the next page contains the desired 1202 * things, update the last offset to it so. 1203 */ 1204 lastoff = page_offset(page) + PAGE_SIZE; 1205 unlock_page(page); 1206 } 1207 1208 /* 1209 * The number of returned pages less than our desired, search 1210 * done. In this case, nothing was found for searching data, 1211 * but we found a hole behind the last offset. 1212 */ 1213 if (nr_pages < want) { 1214 if (type == HOLE_OFF) { 1215 *offset = lastoff; 1216 found = true; 1217 } 1218 break; 1219 } 1220 1221 index = pvec.pages[i - 1]->index + 1; 1222 pagevec_release(&pvec); 1223 } while (index <= end); 1224 1225 out: 1226 pagevec_release(&pvec); 1227 return found; 1228 } 1229 1230 STATIC loff_t 1231 xfs_seek_hole_data( 1232 struct file *file, 1233 loff_t start, 1234 int whence) 1235 { 1236 struct inode *inode = file->f_mapping->host; 1237 struct xfs_inode *ip = XFS_I(inode); 1238 struct xfs_mount *mp = ip->i_mount; 1239 loff_t uninitialized_var(offset); 1240 xfs_fsize_t isize; 1241 xfs_fileoff_t fsbno; 1242 xfs_filblks_t end; 1243 uint lock; 1244 int error; 1245 1246 if (XFS_FORCED_SHUTDOWN(mp)) 1247 return -EIO; 1248 1249 lock = xfs_ilock_data_map_shared(ip); 1250 1251 isize = i_size_read(inode); 1252 if (start >= isize) { 1253 error = -ENXIO; 1254 goto out_unlock; 1255 } 1256 1257 /* 1258 * Try to read extents from the first block indicated 1259 * by fsbno to the end block of the file. 1260 */ 1261 fsbno = XFS_B_TO_FSBT(mp, start); 1262 end = XFS_B_TO_FSB(mp, isize); 1263 1264 for (;;) { 1265 struct xfs_bmbt_irec map[2]; 1266 int nmap = 2; 1267 unsigned int i; 1268 1269 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, 1270 XFS_BMAPI_ENTIRE); 1271 if (error) 1272 goto out_unlock; 1273 1274 /* No extents at given offset, must be beyond EOF */ 1275 if (nmap == 0) { 1276 error = -ENXIO; 1277 goto out_unlock; 1278 } 1279 1280 for (i = 0; i < nmap; i++) { 1281 offset = max_t(loff_t, start, 1282 XFS_FSB_TO_B(mp, map[i].br_startoff)); 1283 1284 /* Landed in the hole we wanted? */ 1285 if (whence == SEEK_HOLE && 1286 map[i].br_startblock == HOLESTARTBLOCK) 1287 goto out; 1288 1289 /* Landed in the data extent we wanted? */ 1290 if (whence == SEEK_DATA && 1291 (map[i].br_startblock == DELAYSTARTBLOCK || 1292 (map[i].br_state == XFS_EXT_NORM && 1293 !isnullstartblock(map[i].br_startblock)))) 1294 goto out; 1295 1296 /* 1297 * Landed in an unwritten extent, try to search 1298 * for hole or data from page cache. 1299 */ 1300 if (map[i].br_state == XFS_EXT_UNWRITTEN) { 1301 if (xfs_find_get_desired_pgoff(inode, &map[i], 1302 whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF, 1303 &offset)) 1304 goto out; 1305 } 1306 } 1307 1308 /* 1309 * We only received one extent out of the two requested. This 1310 * means we've hit EOF and didn't find what we are looking for. 1311 */ 1312 if (nmap == 1) { 1313 /* 1314 * If we were looking for a hole, set offset to 1315 * the end of the file (i.e., there is an implicit 1316 * hole at the end of any file). 1317 */ 1318 if (whence == SEEK_HOLE) { 1319 offset = isize; 1320 break; 1321 } 1322 /* 1323 * If we were looking for data, it's nowhere to be found 1324 */ 1325 ASSERT(whence == SEEK_DATA); 1326 error = -ENXIO; 1327 goto out_unlock; 1328 } 1329 1330 ASSERT(i > 1); 1331 1332 /* 1333 * Nothing was found, proceed to the next round of search 1334 * if the next reading offset is not at or beyond EOF. 1335 */ 1336 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; 1337 start = XFS_FSB_TO_B(mp, fsbno); 1338 if (start >= isize) { 1339 if (whence == SEEK_HOLE) { 1340 offset = isize; 1341 break; 1342 } 1343 ASSERT(whence == SEEK_DATA); 1344 error = -ENXIO; 1345 goto out_unlock; 1346 } 1347 } 1348 1349 out: 1350 /* 1351 * If at this point we have found the hole we wanted, the returned 1352 * offset may be bigger than the file size as it may be aligned to 1353 * page boundary for unwritten extents. We need to deal with this 1354 * situation in particular. 1355 */ 1356 if (whence == SEEK_HOLE) 1357 offset = min_t(loff_t, offset, isize); 1358 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); 1359 1360 out_unlock: 1361 xfs_iunlock(ip, lock); 1362 1363 if (error) 1364 return error; 1365 return offset; 1366 } 1367 1368 STATIC loff_t 1369 xfs_file_llseek( 1370 struct file *file, 1371 loff_t offset, 1372 int whence) 1373 { 1374 switch (whence) { 1375 case SEEK_END: 1376 case SEEK_CUR: 1377 case SEEK_SET: 1378 return generic_file_llseek(file, offset, whence); 1379 case SEEK_HOLE: 1380 case SEEK_DATA: 1381 return xfs_seek_hole_data(file, offset, whence); 1382 default: 1383 return -EINVAL; 1384 } 1385 } 1386 1387 const struct file_operations xfs_file_operations = { 1388 .llseek = xfs_file_llseek, 1389 .read_iter = xfs_file_read_iter, 1390 .write_iter = xfs_file_write_iter, 1391 .splice_read = xfs_file_splice_read, 1392 .splice_write = iter_file_splice_write, 1393 .unlocked_ioctl = xfs_file_ioctl, 1394 #ifdef CONFIG_COMPAT 1395 .compat_ioctl = xfs_file_compat_ioctl, 1396 #endif 1397 .mmap = xfs_file_mmap, 1398 .open = xfs_file_open, 1399 .release = xfs_file_release, 1400 .fsync = xfs_file_fsync, 1401 .fallocate = xfs_file_fallocate, 1402 }; 1403 1404 const struct file_operations xfs_dir_file_operations = { 1405 .open = xfs_dir_open, 1406 .read = generic_read_dir, 1407 .iterate = xfs_file_readdir, 1408 .llseek = generic_file_llseek, 1409 .unlocked_ioctl = xfs_file_ioctl, 1410 #ifdef CONFIG_COMPAT 1411 .compat_ioctl = xfs_file_compat_ioctl, 1412 #endif 1413 .fsync = xfs_dir_fsync, 1414 }; 1415 1416 static const struct vm_operations_struct xfs_file_vm_ops = { 1417 .fault = filemap_fault, 1418 .map_pages = filemap_map_pages, 1419 .page_mkwrite = xfs_vm_page_mkwrite, 1420 }; 1421