1 /* 2 * linux/fs/ext4/file.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/file.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * ext4 fs regular file handling primitives 16 * 17 * 64-bit file support on 64-bit platforms by Jakub Jelinek 18 * (jj@sunsite.ms.mff.cuni.cz) 19 */ 20 21 #include <linux/time.h> 22 #include <linux/fs.h> 23 #include <linux/jbd2.h> 24 #include <linux/mount.h> 25 #include <linux/path.h> 26 #include <linux/quotaops.h> 27 #include <linux/pagevec.h> 28 #include "ext4.h" 29 #include "ext4_jbd2.h" 30 #include "xattr.h" 31 #include "acl.h" 32 33 /* 34 * Called when an inode is released. Note that this is different 35 * from ext4_file_open: open gets called at every open, but release 36 * gets called only when /all/ the files are closed. 37 */ 38 static int ext4_release_file(struct inode *inode, struct file *filp) 39 { 40 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { 41 ext4_alloc_da_blocks(inode); 42 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 43 } 44 /* if we are the last writer on the inode, drop the block reservation */ 45 if ((filp->f_mode & FMODE_WRITE) && 46 (atomic_read(&inode->i_writecount) == 1) && 47 !EXT4_I(inode)->i_reserved_data_blocks) 48 { 49 down_write(&EXT4_I(inode)->i_data_sem); 50 ext4_discard_preallocations(inode); 51 up_write(&EXT4_I(inode)->i_data_sem); 52 } 53 if (is_dx(inode) && filp->private_data) 54 ext4_htree_free_dir_info(filp->private_data); 55 56 return 0; 57 } 58 59 void ext4_unwritten_wait(struct inode *inode) 60 { 61 wait_queue_head_t *wq = ext4_ioend_wq(inode); 62 63 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); 64 } 65 66 /* 67 * This tests whether the IO in question is block-aligned or not. 68 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they 69 * are converted to written only after the IO is complete. Until they are 70 * mapped, these blocks appear as holes, so dio_zero_block() will assume that 71 * it needs to zero out portions of the start and/or end block. If 2 AIO 72 * threads are at work on the same unwritten block, they must be synchronized 73 * or one thread will zero the other's data, causing corruption. 74 */ 75 static int 76 ext4_unaligned_aio(struct inode *inode, const struct iovec *iov, 77 unsigned long nr_segs, loff_t pos) 78 { 79 struct super_block *sb = inode->i_sb; 80 int blockmask = sb->s_blocksize - 1; 81 size_t count = iov_length(iov, nr_segs); 82 loff_t final_size = pos + count; 83 84 if (pos >= inode->i_size) 85 return 0; 86 87 if ((pos & blockmask) || (final_size & blockmask)) 88 return 1; 89 90 return 0; 91 } 92 93 static ssize_t 94 ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov, 95 unsigned long nr_segs, loff_t pos) 96 { 97 struct file *file = iocb->ki_filp; 98 struct inode *inode = file->f_mapping->host; 99 struct blk_plug plug; 100 int unaligned_aio = 0; 101 ssize_t ret; 102 int overwrite = 0; 103 size_t length = iov_length(iov, nr_segs); 104 105 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && 106 !is_sync_kiocb(iocb)) 107 unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos); 108 109 /* Unaligned direct AIO must be serialized; see comment above */ 110 if (unaligned_aio) { 111 static unsigned long unaligned_warn_time; 112 113 /* Warn about this once per day */ 114 if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ)) 115 ext4_msg(inode->i_sb, KERN_WARNING, 116 "Unaligned AIO/DIO on inode %ld by %s; " 117 "performance will be poor.", 118 inode->i_ino, current->comm); 119 mutex_lock(ext4_aio_mutex(inode)); 120 ext4_unwritten_wait(inode); 121 } 122 123 BUG_ON(iocb->ki_pos != pos); 124 125 mutex_lock(&inode->i_mutex); 126 blk_start_plug(&plug); 127 128 iocb->private = &overwrite; 129 130 /* check whether we do a DIO overwrite or not */ 131 if (ext4_should_dioread_nolock(inode) && !unaligned_aio && 132 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { 133 struct ext4_map_blocks map; 134 unsigned int blkbits = inode->i_blkbits; 135 int err, len; 136 137 map.m_lblk = pos >> blkbits; 138 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits) 139 - map.m_lblk; 140 len = map.m_len; 141 142 err = ext4_map_blocks(NULL, inode, &map, 0); 143 /* 144 * 'err==len' means that all of blocks has been preallocated no 145 * matter they are initialized or not. For excluding 146 * uninitialized extents, we need to check m_flags. There are 147 * two conditions that indicate for initialized extents. 148 * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned; 149 * 2) If we do a real lookup, non-flags are returned. 150 * So we should check these two conditions. 151 */ 152 if (err == len && (map.m_flags & EXT4_MAP_MAPPED)) 153 overwrite = 1; 154 } 155 156 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 157 mutex_unlock(&inode->i_mutex); 158 159 if (ret > 0 || ret == -EIOCBQUEUED) { 160 ssize_t err; 161 162 err = generic_write_sync(file, pos, ret); 163 if (err < 0 && ret > 0) 164 ret = err; 165 } 166 blk_finish_plug(&plug); 167 168 if (unaligned_aio) 169 mutex_unlock(ext4_aio_mutex(inode)); 170 171 return ret; 172 } 173 174 static ssize_t 175 ext4_file_write(struct kiocb *iocb, const struct iovec *iov, 176 unsigned long nr_segs, loff_t pos) 177 { 178 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 179 ssize_t ret; 180 181 /* 182 * If we have encountered a bitmap-format file, the size limit 183 * is smaller than s_maxbytes, which is for extent-mapped files. 184 */ 185 186 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 187 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 188 size_t length = iov_length(iov, nr_segs); 189 190 if ((pos > sbi->s_bitmap_maxbytes || 191 (pos == sbi->s_bitmap_maxbytes && length > 0))) 192 return -EFBIG; 193 194 if (pos + length > sbi->s_bitmap_maxbytes) { 195 nr_segs = iov_shorten((struct iovec *)iov, nr_segs, 196 sbi->s_bitmap_maxbytes - pos); 197 } 198 } 199 200 if (unlikely(iocb->ki_filp->f_flags & O_DIRECT)) 201 ret = ext4_file_dio_write(iocb, iov, nr_segs, pos); 202 else 203 ret = generic_file_aio_write(iocb, iov, nr_segs, pos); 204 205 return ret; 206 } 207 208 static const struct vm_operations_struct ext4_file_vm_ops = { 209 .fault = filemap_fault, 210 .page_mkwrite = ext4_page_mkwrite, 211 .remap_pages = generic_file_remap_pages, 212 }; 213 214 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) 215 { 216 struct address_space *mapping = file->f_mapping; 217 218 if (!mapping->a_ops->readpage) 219 return -ENOEXEC; 220 file_accessed(file); 221 vma->vm_ops = &ext4_file_vm_ops; 222 return 0; 223 } 224 225 static int ext4_file_open(struct inode * inode, struct file * filp) 226 { 227 struct super_block *sb = inode->i_sb; 228 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 229 struct ext4_inode_info *ei = EXT4_I(inode); 230 struct vfsmount *mnt = filp->f_path.mnt; 231 struct path path; 232 char buf[64], *cp; 233 234 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && 235 !(sb->s_flags & MS_RDONLY))) { 236 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; 237 /* 238 * Sample where the filesystem has been mounted and 239 * store it in the superblock for sysadmin convenience 240 * when trying to sort through large numbers of block 241 * devices or filesystem images. 242 */ 243 memset(buf, 0, sizeof(buf)); 244 path.mnt = mnt; 245 path.dentry = mnt->mnt_root; 246 cp = d_path(&path, buf, sizeof(buf)); 247 if (!IS_ERR(cp)) { 248 handle_t *handle; 249 int err; 250 251 handle = ext4_journal_start_sb(sb, 1); 252 if (IS_ERR(handle)) 253 return PTR_ERR(handle); 254 err = ext4_journal_get_write_access(handle, sbi->s_sbh); 255 if (err) { 256 ext4_journal_stop(handle); 257 return err; 258 } 259 strlcpy(sbi->s_es->s_last_mounted, cp, 260 sizeof(sbi->s_es->s_last_mounted)); 261 ext4_handle_dirty_super(handle, sb); 262 ext4_journal_stop(handle); 263 } 264 } 265 /* 266 * Set up the jbd2_inode if we are opening the inode for 267 * writing and the journal is present 268 */ 269 if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { 270 struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); 271 272 spin_lock(&inode->i_lock); 273 if (!ei->jinode) { 274 if (!jinode) { 275 spin_unlock(&inode->i_lock); 276 return -ENOMEM; 277 } 278 ei->jinode = jinode; 279 jbd2_journal_init_jbd_inode(ei->jinode, inode); 280 jinode = NULL; 281 } 282 spin_unlock(&inode->i_lock); 283 if (unlikely(jinode != NULL)) 284 jbd2_free_inode(jinode); 285 } 286 return dquot_file_open(inode, filp); 287 } 288 289 /* 290 * Here we use ext4_map_blocks() to get a block mapping for a extent-based 291 * file rather than ext4_ext_walk_space() because we can introduce 292 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same 293 * function. When extent status tree has been fully implemented, it will 294 * track all extent status for a file and we can directly use it to 295 * retrieve the offset for SEEK_DATA/SEEK_HOLE. 296 */ 297 298 /* 299 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to 300 * lookup page cache to check whether or not there has some data between 301 * [startoff, endoff] because, if this range contains an unwritten extent, 302 * we determine this extent as a data or a hole according to whether the 303 * page cache has data or not. 304 */ 305 static int ext4_find_unwritten_pgoff(struct inode *inode, 306 int whence, 307 struct ext4_map_blocks *map, 308 loff_t *offset) 309 { 310 struct pagevec pvec; 311 unsigned int blkbits; 312 pgoff_t index; 313 pgoff_t end; 314 loff_t endoff; 315 loff_t startoff; 316 loff_t lastoff; 317 int found = 0; 318 319 blkbits = inode->i_sb->s_blocksize_bits; 320 startoff = *offset; 321 lastoff = startoff; 322 endoff = (map->m_lblk + map->m_len) << blkbits; 323 324 index = startoff >> PAGE_CACHE_SHIFT; 325 end = endoff >> PAGE_CACHE_SHIFT; 326 327 pagevec_init(&pvec, 0); 328 do { 329 int i, num; 330 unsigned long nr_pages; 331 332 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 333 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 334 (pgoff_t)num); 335 if (nr_pages == 0) { 336 if (whence == SEEK_DATA) 337 break; 338 339 BUG_ON(whence != SEEK_HOLE); 340 /* 341 * If this is the first time to go into the loop and 342 * offset is not beyond the end offset, it will be a 343 * hole at this offset 344 */ 345 if (lastoff == startoff || lastoff < endoff) 346 found = 1; 347 break; 348 } 349 350 /* 351 * If this is the first time to go into the loop and 352 * offset is smaller than the first page offset, it will be a 353 * hole at this offset. 354 */ 355 if (lastoff == startoff && whence == SEEK_HOLE && 356 lastoff < page_offset(pvec.pages[0])) { 357 found = 1; 358 break; 359 } 360 361 for (i = 0; i < nr_pages; i++) { 362 struct page *page = pvec.pages[i]; 363 struct buffer_head *bh, *head; 364 365 /* 366 * If the current offset is not beyond the end of given 367 * range, it will be a hole. 368 */ 369 if (lastoff < endoff && whence == SEEK_HOLE && 370 page->index > end) { 371 found = 1; 372 *offset = lastoff; 373 goto out; 374 } 375 376 lock_page(page); 377 378 if (unlikely(page->mapping != inode->i_mapping)) { 379 unlock_page(page); 380 continue; 381 } 382 383 if (!page_has_buffers(page)) { 384 unlock_page(page); 385 continue; 386 } 387 388 if (page_has_buffers(page)) { 389 lastoff = page_offset(page); 390 bh = head = page_buffers(page); 391 do { 392 if (buffer_uptodate(bh) || 393 buffer_unwritten(bh)) { 394 if (whence == SEEK_DATA) 395 found = 1; 396 } else { 397 if (whence == SEEK_HOLE) 398 found = 1; 399 } 400 if (found) { 401 *offset = max_t(loff_t, 402 startoff, lastoff); 403 unlock_page(page); 404 goto out; 405 } 406 lastoff += bh->b_size; 407 bh = bh->b_this_page; 408 } while (bh != head); 409 } 410 411 lastoff = page_offset(page) + PAGE_SIZE; 412 unlock_page(page); 413 } 414 415 /* 416 * The no. of pages is less than our desired, that would be a 417 * hole in there. 418 */ 419 if (nr_pages < num && whence == SEEK_HOLE) { 420 found = 1; 421 *offset = lastoff; 422 break; 423 } 424 425 index = pvec.pages[i - 1]->index + 1; 426 pagevec_release(&pvec); 427 } while (index <= end); 428 429 out: 430 pagevec_release(&pvec); 431 return found; 432 } 433 434 /* 435 * ext4_seek_data() retrieves the offset for SEEK_DATA. 436 */ 437 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) 438 { 439 struct inode *inode = file->f_mapping->host; 440 struct ext4_map_blocks map; 441 struct extent_status es; 442 ext4_lblk_t start, last, end; 443 loff_t dataoff, isize; 444 int blkbits; 445 int ret = 0; 446 447 mutex_lock(&inode->i_mutex); 448 449 isize = i_size_read(inode); 450 if (offset >= isize) { 451 mutex_unlock(&inode->i_mutex); 452 return -ENXIO; 453 } 454 455 blkbits = inode->i_sb->s_blocksize_bits; 456 start = offset >> blkbits; 457 last = start; 458 end = isize >> blkbits; 459 dataoff = offset; 460 461 do { 462 map.m_lblk = last; 463 map.m_len = end - last + 1; 464 ret = ext4_map_blocks(NULL, inode, &map, 0); 465 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { 466 if (last != start) 467 dataoff = last << blkbits; 468 break; 469 } 470 471 /* 472 * If there is a delay extent at this offset, 473 * it will be as a data. 474 */ 475 es.start = last; 476 (void)ext4_es_find_extent(inode, &es); 477 if (last >= es.start && 478 last < es.start + es.len) { 479 if (last != start) 480 dataoff = last << blkbits; 481 break; 482 } 483 484 /* 485 * If there is a unwritten extent at this offset, 486 * it will be as a data or a hole according to page 487 * cache that has data or not. 488 */ 489 if (map.m_flags & EXT4_MAP_UNWRITTEN) { 490 int unwritten; 491 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, 492 &map, &dataoff); 493 if (unwritten) 494 break; 495 } 496 497 last++; 498 dataoff = last << blkbits; 499 } while (last <= end); 500 501 mutex_unlock(&inode->i_mutex); 502 503 if (dataoff > isize) 504 return -ENXIO; 505 506 if (dataoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) 507 return -EINVAL; 508 if (dataoff > maxsize) 509 return -EINVAL; 510 511 if (dataoff != file->f_pos) { 512 file->f_pos = dataoff; 513 file->f_version = 0; 514 } 515 516 return dataoff; 517 } 518 519 /* 520 * ext4_seek_hole() retrieves the offset for SEEK_HOLE. 521 */ 522 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) 523 { 524 struct inode *inode = file->f_mapping->host; 525 struct ext4_map_blocks map; 526 struct extent_status es; 527 ext4_lblk_t start, last, end; 528 loff_t holeoff, isize; 529 int blkbits; 530 int ret = 0; 531 532 mutex_lock(&inode->i_mutex); 533 534 isize = i_size_read(inode); 535 if (offset >= isize) { 536 mutex_unlock(&inode->i_mutex); 537 return -ENXIO; 538 } 539 540 blkbits = inode->i_sb->s_blocksize_bits; 541 start = offset >> blkbits; 542 last = start; 543 end = isize >> blkbits; 544 holeoff = offset; 545 546 do { 547 map.m_lblk = last; 548 map.m_len = end - last + 1; 549 ret = ext4_map_blocks(NULL, inode, &map, 0); 550 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { 551 last += ret; 552 holeoff = last << blkbits; 553 continue; 554 } 555 556 /* 557 * If there is a delay extent at this offset, 558 * we will skip this extent. 559 */ 560 es.start = last; 561 (void)ext4_es_find_extent(inode, &es); 562 if (last >= es.start && 563 last < es.start + es.len) { 564 last = es.start + es.len; 565 holeoff = last << blkbits; 566 continue; 567 } 568 569 /* 570 * If there is a unwritten extent at this offset, 571 * it will be as a data or a hole according to page 572 * cache that has data or not. 573 */ 574 if (map.m_flags & EXT4_MAP_UNWRITTEN) { 575 int unwritten; 576 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 577 &map, &holeoff); 578 if (!unwritten) { 579 last += ret; 580 holeoff = last << blkbits; 581 continue; 582 } 583 } 584 585 /* find a hole */ 586 break; 587 } while (last <= end); 588 589 mutex_unlock(&inode->i_mutex); 590 591 if (holeoff > isize) 592 holeoff = isize; 593 594 if (holeoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) 595 return -EINVAL; 596 if (holeoff > maxsize) 597 return -EINVAL; 598 599 if (holeoff != file->f_pos) { 600 file->f_pos = holeoff; 601 file->f_version = 0; 602 } 603 604 return holeoff; 605 } 606 607 /* 608 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values 609 * by calling generic_file_llseek_size() with the appropriate maxbytes 610 * value for each. 611 */ 612 loff_t ext4_llseek(struct file *file, loff_t offset, int whence) 613 { 614 struct inode *inode = file->f_mapping->host; 615 loff_t maxbytes; 616 617 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 618 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 619 else 620 maxbytes = inode->i_sb->s_maxbytes; 621 622 switch (whence) { 623 case SEEK_SET: 624 case SEEK_CUR: 625 case SEEK_END: 626 return generic_file_llseek_size(file, offset, whence, 627 maxbytes, i_size_read(inode)); 628 case SEEK_DATA: 629 return ext4_seek_data(file, offset, maxbytes); 630 case SEEK_HOLE: 631 return ext4_seek_hole(file, offset, maxbytes); 632 } 633 634 return -EINVAL; 635 } 636 637 const struct file_operations ext4_file_operations = { 638 .llseek = ext4_llseek, 639 .read = do_sync_read, 640 .write = do_sync_write, 641 .aio_read = generic_file_aio_read, 642 .aio_write = ext4_file_write, 643 .unlocked_ioctl = ext4_ioctl, 644 #ifdef CONFIG_COMPAT 645 .compat_ioctl = ext4_compat_ioctl, 646 #endif 647 .mmap = ext4_file_mmap, 648 .open = ext4_file_open, 649 .release = ext4_release_file, 650 .fsync = ext4_sync_file, 651 .splice_read = generic_file_splice_read, 652 .splice_write = generic_file_splice_write, 653 .fallocate = ext4_fallocate, 654 }; 655 656 const struct inode_operations ext4_file_inode_operations = { 657 .setattr = ext4_setattr, 658 .getattr = ext4_getattr, 659 .setxattr = generic_setxattr, 660 .getxattr = generic_getxattr, 661 .listxattr = ext4_listxattr, 662 .removexattr = generic_removexattr, 663 .get_acl = ext4_get_acl, 664 .fiemap = ext4_fiemap, 665 }; 666 667