1 /* 2 * linux/fs/ext4/file.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/file.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * ext4 fs regular file handling primitives 16 * 17 * 64-bit file support on 64-bit platforms by Jakub Jelinek 18 * (jj@sunsite.ms.mff.cuni.cz) 19 */ 20 21 #include <linux/time.h> 22 #include <linux/fs.h> 23 #include <linux/jbd2.h> 24 #include <linux/mount.h> 25 #include <linux/path.h> 26 #include <linux/quotaops.h> 27 #include <linux/pagevec.h> 28 #include <linux/uio.h> 29 #include "ext4.h" 30 #include "ext4_jbd2.h" 31 #include "xattr.h" 32 #include "acl.h" 33 34 /* 35 * Called when an inode is released. Note that this is different 36 * from ext4_file_open: open gets called at every open, but release 37 * gets called only when /all/ the files are closed. 38 */ 39 static int ext4_release_file(struct inode *inode, struct file *filp) 40 { 41 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { 42 ext4_alloc_da_blocks(inode); 43 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 44 } 45 /* if we are the last writer on the inode, drop the block reservation */ 46 if ((filp->f_mode & FMODE_WRITE) && 47 (atomic_read(&inode->i_writecount) == 1) && 48 !EXT4_I(inode)->i_reserved_data_blocks) 49 { 50 down_write(&EXT4_I(inode)->i_data_sem); 51 ext4_discard_preallocations(inode); 52 up_write(&EXT4_I(inode)->i_data_sem); 53 } 54 if (is_dx(inode) && filp->private_data) 55 ext4_htree_free_dir_info(filp->private_data); 56 57 return 0; 58 } 59 60 static void ext4_unwritten_wait(struct inode *inode) 61 { 62 wait_queue_head_t *wq = ext4_ioend_wq(inode); 63 64 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); 65 } 66 67 /* 68 * This tests whether the IO in question is block-aligned or not. 69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they 70 * are converted to written only after the IO is complete. Until they are 71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that 72 * it needs to zero out portions of the start and/or end block. If 2 AIO 73 * threads are at work on the same unwritten block, they must be synchronized 74 * or one thread will zero the other's data, causing corruption. 75 */ 76 static int 77 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos) 78 { 79 struct super_block *sb = inode->i_sb; 80 int blockmask = sb->s_blocksize - 1; 81 82 if (pos >= i_size_read(inode)) 83 return 0; 84 85 if ((pos | iov_iter_alignment(from)) & blockmask) 86 return 1; 87 88 return 0; 89 } 90 91 static ssize_t 92 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 93 { 94 struct file *file = iocb->ki_filp; 95 struct inode *inode = file_inode(iocb->ki_filp); 96 struct mutex *aio_mutex = NULL; 97 struct blk_plug plug; 98 int o_direct = io_is_direct(file); 99 int overwrite = 0; 100 size_t length = iov_iter_count(from); 101 ssize_t ret; 102 loff_t pos = iocb->ki_pos; 103 104 /* 105 * Unaligned direct AIO must be serialized; see comment above 106 * In the case of O_APPEND, assume that we must always serialize 107 */ 108 if (o_direct && 109 ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && 110 !is_sync_kiocb(iocb) && 111 (file->f_flags & O_APPEND || 112 ext4_unaligned_aio(inode, from, pos))) { 113 aio_mutex = ext4_aio_mutex(inode); 114 mutex_lock(aio_mutex); 115 ext4_unwritten_wait(inode); 116 } 117 118 mutex_lock(&inode->i_mutex); 119 if (file->f_flags & O_APPEND) 120 iocb->ki_pos = pos = i_size_read(inode); 121 122 /* 123 * If we have encountered a bitmap-format file, the size limit 124 * is smaller than s_maxbytes, which is for extent-mapped files. 125 */ 126 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 127 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 128 129 if ((pos > sbi->s_bitmap_maxbytes) || 130 (pos == sbi->s_bitmap_maxbytes && length > 0)) { 131 mutex_unlock(&inode->i_mutex); 132 ret = -EFBIG; 133 goto errout; 134 } 135 136 if (pos + length > sbi->s_bitmap_maxbytes) 137 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos); 138 } 139 140 iocb->private = &overwrite; 141 if (o_direct) { 142 blk_start_plug(&plug); 143 144 145 /* check whether we do a DIO overwrite or not */ 146 if (ext4_should_dioread_nolock(inode) && !aio_mutex && 147 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { 148 struct ext4_map_blocks map; 149 unsigned int blkbits = inode->i_blkbits; 150 int err, len; 151 152 map.m_lblk = pos >> blkbits; 153 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits) 154 - map.m_lblk; 155 len = map.m_len; 156 157 err = ext4_map_blocks(NULL, inode, &map, 0); 158 /* 159 * 'err==len' means that all of blocks has 160 * been preallocated no matter they are 161 * initialized or not. For excluding 162 * unwritten extents, we need to check 163 * m_flags. There are two conditions that 164 * indicate for initialized extents. 1) If we 165 * hit extent cache, EXT4_MAP_MAPPED flag is 166 * returned; 2) If we do a real lookup, 167 * non-flags are returned. So we should check 168 * these two conditions. 169 */ 170 if (err == len && (map.m_flags & EXT4_MAP_MAPPED)) 171 overwrite = 1; 172 } 173 } 174 175 ret = __generic_file_write_iter(iocb, from); 176 mutex_unlock(&inode->i_mutex); 177 178 if (ret > 0) { 179 ssize_t err; 180 181 err = generic_write_sync(file, iocb->ki_pos - ret, ret); 182 if (err < 0) 183 ret = err; 184 } 185 if (o_direct) 186 blk_finish_plug(&plug); 187 188 errout: 189 if (aio_mutex) 190 mutex_unlock(aio_mutex); 191 return ret; 192 } 193 194 #ifdef CONFIG_FS_DAX 195 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 196 { 197 return dax_fault(vma, vmf, ext4_get_block); 198 /* Is this the right get_block? */ 199 } 200 201 static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 202 { 203 return dax_mkwrite(vma, vmf, ext4_get_block); 204 } 205 206 static const struct vm_operations_struct ext4_dax_vm_ops = { 207 .fault = ext4_dax_fault, 208 .page_mkwrite = ext4_dax_mkwrite, 209 .pfn_mkwrite = dax_pfn_mkwrite, 210 }; 211 #else 212 #define ext4_dax_vm_ops ext4_file_vm_ops 213 #endif 214 215 static const struct vm_operations_struct ext4_file_vm_ops = { 216 .fault = filemap_fault, 217 .map_pages = filemap_map_pages, 218 .page_mkwrite = ext4_page_mkwrite, 219 }; 220 221 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) 222 { 223 file_accessed(file); 224 if (IS_DAX(file_inode(file))) { 225 vma->vm_ops = &ext4_dax_vm_ops; 226 vma->vm_flags |= VM_MIXEDMAP; 227 } else { 228 vma->vm_ops = &ext4_file_vm_ops; 229 } 230 return 0; 231 } 232 233 static int ext4_file_open(struct inode * inode, struct file * filp) 234 { 235 struct super_block *sb = inode->i_sb; 236 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 237 struct vfsmount *mnt = filp->f_path.mnt; 238 struct path path; 239 char buf[64], *cp; 240 241 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && 242 !(sb->s_flags & MS_RDONLY))) { 243 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; 244 /* 245 * Sample where the filesystem has been mounted and 246 * store it in the superblock for sysadmin convenience 247 * when trying to sort through large numbers of block 248 * devices or filesystem images. 249 */ 250 memset(buf, 0, sizeof(buf)); 251 path.mnt = mnt; 252 path.dentry = mnt->mnt_root; 253 cp = d_path(&path, buf, sizeof(buf)); 254 if (!IS_ERR(cp)) { 255 handle_t *handle; 256 int err; 257 258 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); 259 if (IS_ERR(handle)) 260 return PTR_ERR(handle); 261 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); 262 err = ext4_journal_get_write_access(handle, sbi->s_sbh); 263 if (err) { 264 ext4_journal_stop(handle); 265 return err; 266 } 267 strlcpy(sbi->s_es->s_last_mounted, cp, 268 sizeof(sbi->s_es->s_last_mounted)); 269 ext4_handle_dirty_super(handle, sb); 270 ext4_journal_stop(handle); 271 } 272 } 273 /* 274 * Set up the jbd2_inode if we are opening the inode for 275 * writing and the journal is present 276 */ 277 if (filp->f_mode & FMODE_WRITE) { 278 int ret = ext4_inode_attach_jinode(inode); 279 if (ret < 0) 280 return ret; 281 } 282 return dquot_file_open(inode, filp); 283 } 284 285 /* 286 * Here we use ext4_map_blocks() to get a block mapping for a extent-based 287 * file rather than ext4_ext_walk_space() because we can introduce 288 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same 289 * function. When extent status tree has been fully implemented, it will 290 * track all extent status for a file and we can directly use it to 291 * retrieve the offset for SEEK_DATA/SEEK_HOLE. 292 */ 293 294 /* 295 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to 296 * lookup page cache to check whether or not there has some data between 297 * [startoff, endoff] because, if this range contains an unwritten extent, 298 * we determine this extent as a data or a hole according to whether the 299 * page cache has data or not. 300 */ 301 static int ext4_find_unwritten_pgoff(struct inode *inode, 302 int whence, 303 struct ext4_map_blocks *map, 304 loff_t *offset) 305 { 306 struct pagevec pvec; 307 unsigned int blkbits; 308 pgoff_t index; 309 pgoff_t end; 310 loff_t endoff; 311 loff_t startoff; 312 loff_t lastoff; 313 int found = 0; 314 315 blkbits = inode->i_sb->s_blocksize_bits; 316 startoff = *offset; 317 lastoff = startoff; 318 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; 319 320 index = startoff >> PAGE_CACHE_SHIFT; 321 end = endoff >> PAGE_CACHE_SHIFT; 322 323 pagevec_init(&pvec, 0); 324 do { 325 int i, num; 326 unsigned long nr_pages; 327 328 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 329 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 330 (pgoff_t)num); 331 if (nr_pages == 0) { 332 if (whence == SEEK_DATA) 333 break; 334 335 BUG_ON(whence != SEEK_HOLE); 336 /* 337 * If this is the first time to go into the loop and 338 * offset is not beyond the end offset, it will be a 339 * hole at this offset 340 */ 341 if (lastoff == startoff || lastoff < endoff) 342 found = 1; 343 break; 344 } 345 346 /* 347 * If this is the first time to go into the loop and 348 * offset is smaller than the first page offset, it will be a 349 * hole at this offset. 350 */ 351 if (lastoff == startoff && whence == SEEK_HOLE && 352 lastoff < page_offset(pvec.pages[0])) { 353 found = 1; 354 break; 355 } 356 357 for (i = 0; i < nr_pages; i++) { 358 struct page *page = pvec.pages[i]; 359 struct buffer_head *bh, *head; 360 361 /* 362 * If the current offset is not beyond the end of given 363 * range, it will be a hole. 364 */ 365 if (lastoff < endoff && whence == SEEK_HOLE && 366 page->index > end) { 367 found = 1; 368 *offset = lastoff; 369 goto out; 370 } 371 372 lock_page(page); 373 374 if (unlikely(page->mapping != inode->i_mapping)) { 375 unlock_page(page); 376 continue; 377 } 378 379 if (!page_has_buffers(page)) { 380 unlock_page(page); 381 continue; 382 } 383 384 if (page_has_buffers(page)) { 385 lastoff = page_offset(page); 386 bh = head = page_buffers(page); 387 do { 388 if (buffer_uptodate(bh) || 389 buffer_unwritten(bh)) { 390 if (whence == SEEK_DATA) 391 found = 1; 392 } else { 393 if (whence == SEEK_HOLE) 394 found = 1; 395 } 396 if (found) { 397 *offset = max_t(loff_t, 398 startoff, lastoff); 399 unlock_page(page); 400 goto out; 401 } 402 lastoff += bh->b_size; 403 bh = bh->b_this_page; 404 } while (bh != head); 405 } 406 407 lastoff = page_offset(page) + PAGE_SIZE; 408 unlock_page(page); 409 } 410 411 /* 412 * The no. of pages is less than our desired, that would be a 413 * hole in there. 414 */ 415 if (nr_pages < num && whence == SEEK_HOLE) { 416 found = 1; 417 *offset = lastoff; 418 break; 419 } 420 421 index = pvec.pages[i - 1]->index + 1; 422 pagevec_release(&pvec); 423 } while (index <= end); 424 425 out: 426 pagevec_release(&pvec); 427 return found; 428 } 429 430 /* 431 * ext4_seek_data() retrieves the offset for SEEK_DATA. 432 */ 433 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) 434 { 435 struct inode *inode = file->f_mapping->host; 436 struct ext4_map_blocks map; 437 struct extent_status es; 438 ext4_lblk_t start, last, end; 439 loff_t dataoff, isize; 440 int blkbits; 441 int ret = 0; 442 443 mutex_lock(&inode->i_mutex); 444 445 isize = i_size_read(inode); 446 if (offset >= isize) { 447 mutex_unlock(&inode->i_mutex); 448 return -ENXIO; 449 } 450 451 blkbits = inode->i_sb->s_blocksize_bits; 452 start = offset >> blkbits; 453 last = start; 454 end = isize >> blkbits; 455 dataoff = offset; 456 457 do { 458 map.m_lblk = last; 459 map.m_len = end - last + 1; 460 ret = ext4_map_blocks(NULL, inode, &map, 0); 461 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { 462 if (last != start) 463 dataoff = (loff_t)last << blkbits; 464 break; 465 } 466 467 /* 468 * If there is a delay extent at this offset, 469 * it will be as a data. 470 */ 471 ext4_es_find_delayed_extent_range(inode, last, last, &es); 472 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 473 if (last != start) 474 dataoff = (loff_t)last << blkbits; 475 break; 476 } 477 478 /* 479 * If there is a unwritten extent at this offset, 480 * it will be as a data or a hole according to page 481 * cache that has data or not. 482 */ 483 if (map.m_flags & EXT4_MAP_UNWRITTEN) { 484 int unwritten; 485 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, 486 &map, &dataoff); 487 if (unwritten) 488 break; 489 } 490 491 last++; 492 dataoff = (loff_t)last << blkbits; 493 } while (last <= end); 494 495 mutex_unlock(&inode->i_mutex); 496 497 if (dataoff > isize) 498 return -ENXIO; 499 500 return vfs_setpos(file, dataoff, maxsize); 501 } 502 503 /* 504 * ext4_seek_hole() retrieves the offset for SEEK_HOLE. 505 */ 506 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) 507 { 508 struct inode *inode = file->f_mapping->host; 509 struct ext4_map_blocks map; 510 struct extent_status es; 511 ext4_lblk_t start, last, end; 512 loff_t holeoff, isize; 513 int blkbits; 514 int ret = 0; 515 516 mutex_lock(&inode->i_mutex); 517 518 isize = i_size_read(inode); 519 if (offset >= isize) { 520 mutex_unlock(&inode->i_mutex); 521 return -ENXIO; 522 } 523 524 blkbits = inode->i_sb->s_blocksize_bits; 525 start = offset >> blkbits; 526 last = start; 527 end = isize >> blkbits; 528 holeoff = offset; 529 530 do { 531 map.m_lblk = last; 532 map.m_len = end - last + 1; 533 ret = ext4_map_blocks(NULL, inode, &map, 0); 534 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { 535 last += ret; 536 holeoff = (loff_t)last << blkbits; 537 continue; 538 } 539 540 /* 541 * If there is a delay extent at this offset, 542 * we will skip this extent. 543 */ 544 ext4_es_find_delayed_extent_range(inode, last, last, &es); 545 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 546 last = es.es_lblk + es.es_len; 547 holeoff = (loff_t)last << blkbits; 548 continue; 549 } 550 551 /* 552 * If there is a unwritten extent at this offset, 553 * it will be as a data or a hole according to page 554 * cache that has data or not. 555 */ 556 if (map.m_flags & EXT4_MAP_UNWRITTEN) { 557 int unwritten; 558 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 559 &map, &holeoff); 560 if (!unwritten) { 561 last += ret; 562 holeoff = (loff_t)last << blkbits; 563 continue; 564 } 565 } 566 567 /* find a hole */ 568 break; 569 } while (last <= end); 570 571 mutex_unlock(&inode->i_mutex); 572 573 if (holeoff > isize) 574 holeoff = isize; 575 576 return vfs_setpos(file, holeoff, maxsize); 577 } 578 579 /* 580 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values 581 * by calling generic_file_llseek_size() with the appropriate maxbytes 582 * value for each. 583 */ 584 loff_t ext4_llseek(struct file *file, loff_t offset, int whence) 585 { 586 struct inode *inode = file->f_mapping->host; 587 loff_t maxbytes; 588 589 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 590 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 591 else 592 maxbytes = inode->i_sb->s_maxbytes; 593 594 switch (whence) { 595 case SEEK_SET: 596 case SEEK_CUR: 597 case SEEK_END: 598 return generic_file_llseek_size(file, offset, whence, 599 maxbytes, i_size_read(inode)); 600 case SEEK_DATA: 601 return ext4_seek_data(file, offset, maxbytes); 602 case SEEK_HOLE: 603 return ext4_seek_hole(file, offset, maxbytes); 604 } 605 606 return -EINVAL; 607 } 608 609 const struct file_operations ext4_file_operations = { 610 .llseek = ext4_llseek, 611 .read_iter = generic_file_read_iter, 612 .write_iter = ext4_file_write_iter, 613 .unlocked_ioctl = ext4_ioctl, 614 #ifdef CONFIG_COMPAT 615 .compat_ioctl = ext4_compat_ioctl, 616 #endif 617 .mmap = ext4_file_mmap, 618 .open = ext4_file_open, 619 .release = ext4_release_file, 620 .fsync = ext4_sync_file, 621 .splice_read = generic_file_splice_read, 622 .splice_write = iter_file_splice_write, 623 .fallocate = ext4_fallocate, 624 }; 625 626 const struct inode_operations ext4_file_inode_operations = { 627 .setattr = ext4_setattr, 628 .getattr = ext4_getattr, 629 .setxattr = generic_setxattr, 630 .getxattr = generic_getxattr, 631 .listxattr = ext4_listxattr, 632 .removexattr = generic_removexattr, 633 .get_acl = ext4_get_acl, 634 .set_acl = ext4_set_acl, 635 .fiemap = ext4_fiemap, 636 }; 637 638