1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * Copyright (C) 2002, 2004 Oracle. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public 17 * License along with this program; if not, write to the 18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 19 * Boston, MA 021110-1307, USA. 20 */ 21 22 #include <linux/fs.h> 23 #include <linux/slab.h> 24 #include <linux/highmem.h> 25 #include <linux/pagemap.h> 26 #include <asm/byteorder.h> 27 28 #define MLOG_MASK_PREFIX ML_FILE_IO 29 #include <cluster/masklog.h> 30 31 #include "ocfs2.h" 32 33 #include "alloc.h" 34 #include "aops.h" 35 #include "dlmglue.h" 36 #include "extent_map.h" 37 #include "file.h" 38 #include "inode.h" 39 #include "journal.h" 40 #include "super.h" 41 #include "symlink.h" 42 43 #include "buffer_head_io.h" 44 45 static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, 46 struct buffer_head *bh_result, int create) 47 { 48 int err = -EIO; 49 int status; 50 struct ocfs2_dinode *fe = NULL; 51 struct buffer_head *bh = NULL; 52 struct buffer_head *buffer_cache_bh = NULL; 53 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 54 void *kaddr; 55 56 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, 57 (unsigned long long)iblock, bh_result, create); 58 59 BUG_ON(ocfs2_inode_is_fast_symlink(inode)); 60 61 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { 62 mlog(ML_ERROR, "block offset > PATH_MAX: %llu", 63 (unsigned long long)iblock); 64 goto bail; 65 } 66 67 status = ocfs2_read_block(OCFS2_SB(inode->i_sb), 68 OCFS2_I(inode)->ip_blkno, 69 &bh, OCFS2_BH_CACHED, inode); 70 if (status < 0) { 71 mlog_errno(status); 72 goto bail; 73 } 74 fe = (struct ocfs2_dinode *) bh->b_data; 75 76 if (!OCFS2_IS_VALID_DINODE(fe)) { 77 mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n", 78 (unsigned long long)fe->i_blkno, 7, fe->i_signature); 79 goto bail; 80 } 81 82 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, 83 le32_to_cpu(fe->i_clusters))) { 84 mlog(ML_ERROR, "block offset is outside the allocated size: " 85 "%llu\n", (unsigned long long)iblock); 86 goto bail; 87 } 88 89 /* We don't use the page cache to create symlink data, so if 90 * need be, copy it over from the buffer cache. */ 91 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { 92 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + 93 iblock; 94 buffer_cache_bh = sb_getblk(osb->sb, blkno); 95 if (!buffer_cache_bh) { 96 mlog(ML_ERROR, "couldn't getblock for symlink!\n"); 97 goto bail; 98 } 99 100 /* we haven't locked out transactions, so a commit 101 * could've happened. Since we've got a reference on 102 * the bh, even if it commits while we're doing the 103 * copy, the data is still good. */ 104 if (buffer_jbd(buffer_cache_bh) 105 && ocfs2_inode_is_new(inode)) { 106 kaddr = kmap_atomic(bh_result->b_page, KM_USER0); 107 if (!kaddr) { 108 mlog(ML_ERROR, "couldn't kmap!\n"); 109 goto bail; 110 } 111 memcpy(kaddr + (bh_result->b_size * iblock), 112 buffer_cache_bh->b_data, 113 bh_result->b_size); 114 kunmap_atomic(kaddr, KM_USER0); 115 set_buffer_uptodate(bh_result); 116 } 117 brelse(buffer_cache_bh); 118 } 119 120 map_bh(bh_result, inode->i_sb, 121 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); 122 123 err = 0; 124 125 bail: 126 if (bh) 127 brelse(bh); 128 129 mlog_exit(err); 130 return err; 131 } 132 133 static int ocfs2_get_block(struct inode *inode, sector_t iblock, 134 struct buffer_head *bh_result, int create) 135 { 136 int err = 0; 137 u64 p_blkno, past_eof; 138 139 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, 140 (unsigned long long)iblock, bh_result, create); 141 142 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) 143 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", 144 inode, inode->i_ino); 145 146 if (S_ISLNK(inode->i_mode)) { 147 /* this always does I/O for some reason. */ 148 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); 149 goto bail; 150 } 151 152 /* this can happen if another node truncs after our extend! */ 153 spin_lock(&OCFS2_I(inode)->ip_lock); 154 if (iblock >= ocfs2_clusters_to_blocks(inode->i_sb, 155 OCFS2_I(inode)->ip_clusters)) 156 err = -EIO; 157 spin_unlock(&OCFS2_I(inode)->ip_lock); 158 if (err) 159 goto bail; 160 161 err = ocfs2_extent_map_get_blocks(inode, iblock, 1, &p_blkno, 162 NULL); 163 if (err) { 164 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " 165 "%llu, NULL)\n", err, inode, (unsigned long long)iblock, 166 (unsigned long long)p_blkno); 167 goto bail; 168 } 169 170 map_bh(bh_result, inode->i_sb, p_blkno); 171 172 if (bh_result->b_blocknr == 0) { 173 err = -EIO; 174 mlog(ML_ERROR, "iblock = %llu p_blkno = %llu blkno=(%llu)\n", 175 (unsigned long long)iblock, 176 (unsigned long long)p_blkno, 177 (unsigned long long)OCFS2_I(inode)->ip_blkno); 178 } 179 180 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 181 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, 182 (unsigned long long)past_eof); 183 184 if (create && (iblock >= past_eof)) 185 set_buffer_new(bh_result); 186 187 bail: 188 if (err < 0) 189 err = -EIO; 190 191 mlog_exit(err); 192 return err; 193 } 194 195 static int ocfs2_readpage(struct file *file, struct page *page) 196 { 197 struct inode *inode = page->mapping->host; 198 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; 199 int ret, unlock = 1; 200 201 mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); 202 203 ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page); 204 if (ret != 0) { 205 if (ret == AOP_TRUNCATED_PAGE) 206 unlock = 0; 207 mlog_errno(ret); 208 goto out; 209 } 210 211 down_read(&OCFS2_I(inode)->ip_alloc_sem); 212 213 /* 214 * i_size might have just been updated as we grabed the meta lock. We 215 * might now be discovering a truncate that hit on another node. 216 * block_read_full_page->get_block freaks out if it is asked to read 217 * beyond the end of a file, so we check here. Callers 218 * (generic_file_read, fault->nopage) are clever enough to check i_size 219 * and notice that the page they just read isn't needed. 220 * 221 * XXX sys_readahead() seems to get that wrong? 222 */ 223 if (start >= i_size_read(inode)) { 224 char *addr = kmap(page); 225 memset(addr, 0, PAGE_SIZE); 226 flush_dcache_page(page); 227 kunmap(page); 228 SetPageUptodate(page); 229 ret = 0; 230 goto out_alloc; 231 } 232 233 ret = ocfs2_data_lock_with_page(inode, 0, page); 234 if (ret != 0) { 235 if (ret == AOP_TRUNCATED_PAGE) 236 unlock = 0; 237 mlog_errno(ret); 238 goto out_alloc; 239 } 240 241 ret = block_read_full_page(page, ocfs2_get_block); 242 unlock = 0; 243 244 ocfs2_data_unlock(inode, 0); 245 out_alloc: 246 up_read(&OCFS2_I(inode)->ip_alloc_sem); 247 ocfs2_meta_unlock(inode, 0); 248 out: 249 if (unlock) 250 unlock_page(page); 251 mlog_exit(ret); 252 return ret; 253 } 254 255 /* Note: Because we don't support holes, our allocation has 256 * already happened (allocation writes zeros to the file data) 257 * so we don't have to worry about ordered writes in 258 * ocfs2_writepage. 259 * 260 * ->writepage is called during the process of invalidating the page cache 261 * during blocked lock processing. It can't block on any cluster locks 262 * to during block mapping. It's relying on the fact that the block 263 * mapping can't have disappeared under the dirty pages that it is 264 * being asked to write back. 265 */ 266 static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) 267 { 268 int ret; 269 270 mlog_entry("(0x%p)\n", page); 271 272 ret = block_write_full_page(page, ocfs2_get_block, wbc); 273 274 mlog_exit(ret); 275 276 return ret; 277 } 278 279 /* 280 * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called 281 * from loopback. It must be able to perform its own locking around 282 * ocfs2_get_block(). 283 */ 284 int ocfs2_prepare_write(struct file *file, struct page *page, 285 unsigned from, unsigned to) 286 { 287 struct inode *inode = page->mapping->host; 288 int ret; 289 290 mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to); 291 292 ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page); 293 if (ret != 0) { 294 mlog_errno(ret); 295 goto out; 296 } 297 298 down_read(&OCFS2_I(inode)->ip_alloc_sem); 299 300 ret = block_prepare_write(page, from, to, ocfs2_get_block); 301 302 up_read(&OCFS2_I(inode)->ip_alloc_sem); 303 304 ocfs2_meta_unlock(inode, 0); 305 out: 306 mlog_exit(ret); 307 return ret; 308 } 309 310 /* Taken from ext3. We don't necessarily need the full blown 311 * functionality yet, but IMHO it's better to cut and paste the whole 312 * thing so we can avoid introducing our own bugs (and easily pick up 313 * their fixes when they happen) --Mark */ 314 static int walk_page_buffers( handle_t *handle, 315 struct buffer_head *head, 316 unsigned from, 317 unsigned to, 318 int *partial, 319 int (*fn)( handle_t *handle, 320 struct buffer_head *bh)) 321 { 322 struct buffer_head *bh; 323 unsigned block_start, block_end; 324 unsigned blocksize = head->b_size; 325 int err, ret = 0; 326 struct buffer_head *next; 327 328 for ( bh = head, block_start = 0; 329 ret == 0 && (bh != head || !block_start); 330 block_start = block_end, bh = next) 331 { 332 next = bh->b_this_page; 333 block_end = block_start + blocksize; 334 if (block_end <= from || block_start >= to) { 335 if (partial && !buffer_uptodate(bh)) 336 *partial = 1; 337 continue; 338 } 339 err = (*fn)(handle, bh); 340 if (!ret) 341 ret = err; 342 } 343 return ret; 344 } 345 346 struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, 347 struct page *page, 348 unsigned from, 349 unsigned to) 350 { 351 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 352 struct ocfs2_journal_handle *handle = NULL; 353 int ret = 0; 354 355 handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS); 356 if (!handle) { 357 ret = -ENOMEM; 358 mlog_errno(ret); 359 goto out; 360 } 361 362 if (ocfs2_should_order_data(inode)) { 363 ret = walk_page_buffers(handle->k_handle, 364 page_buffers(page), 365 from, to, NULL, 366 ocfs2_journal_dirty_data); 367 if (ret < 0) 368 mlog_errno(ret); 369 } 370 out: 371 if (ret) { 372 if (handle) 373 ocfs2_commit_trans(handle); 374 handle = ERR_PTR(ret); 375 } 376 return handle; 377 } 378 379 static int ocfs2_commit_write(struct file *file, struct page *page, 380 unsigned from, unsigned to) 381 { 382 int ret, extending = 0, locklevel = 0; 383 loff_t new_i_size; 384 struct buffer_head *di_bh = NULL; 385 struct inode *inode = page->mapping->host; 386 struct ocfs2_journal_handle *handle = NULL; 387 388 mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to); 389 390 /* NOTE: ocfs2_file_aio_write has ensured that it's safe for 391 * us to sample inode->i_size here without the metadata lock: 392 * 393 * 1) We're currently holding the inode alloc lock, so no 394 * nodes can change it underneath us. 395 * 396 * 2) We've had to take the metadata lock at least once 397 * already to check for extending writes, hence insuring 398 * that our current copy is also up to date. 399 */ 400 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 401 if (new_i_size > i_size_read(inode)) { 402 extending = 1; 403 locklevel = 1; 404 } 405 406 ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, locklevel, page); 407 if (ret != 0) { 408 mlog_errno(ret); 409 goto out; 410 } 411 412 ret = ocfs2_data_lock_with_page(inode, 1, page); 413 if (ret != 0) { 414 mlog_errno(ret); 415 goto out_unlock_meta; 416 } 417 418 if (extending) { 419 handle = ocfs2_start_walk_page_trans(inode, page, from, to); 420 if (IS_ERR(handle)) { 421 ret = PTR_ERR(handle); 422 handle = NULL; 423 goto out_unlock_data; 424 } 425 426 /* Mark our buffer early. We'd rather catch this error up here 427 * as opposed to after a successful commit_write which would 428 * require us to set back inode->i_size. */ 429 ret = ocfs2_journal_access(handle, inode, di_bh, 430 OCFS2_JOURNAL_ACCESS_WRITE); 431 if (ret < 0) { 432 mlog_errno(ret); 433 goto out_commit; 434 } 435 } 436 437 /* might update i_size */ 438 ret = generic_commit_write(file, page, from, to); 439 if (ret < 0) { 440 mlog_errno(ret); 441 goto out_commit; 442 } 443 444 if (extending) { 445 loff_t size = (u64) i_size_read(inode); 446 struct ocfs2_dinode *di = 447 (struct ocfs2_dinode *)di_bh->b_data; 448 449 /* ocfs2_mark_inode_dirty is too heavy to use here. */ 450 inode->i_blocks = ocfs2_align_bytes_to_sectors(size); 451 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 452 453 di->i_size = cpu_to_le64(size); 454 di->i_ctime = di->i_mtime = 455 cpu_to_le64(inode->i_mtime.tv_sec); 456 di->i_ctime_nsec = di->i_mtime_nsec = 457 cpu_to_le32(inode->i_mtime.tv_nsec); 458 459 ret = ocfs2_journal_dirty(handle, di_bh); 460 if (ret < 0) { 461 mlog_errno(ret); 462 goto out_commit; 463 } 464 } 465 466 BUG_ON(extending && (i_size_read(inode) != new_i_size)); 467 468 out_commit: 469 if (handle) 470 ocfs2_commit_trans(handle); 471 out_unlock_data: 472 ocfs2_data_unlock(inode, 1); 473 out_unlock_meta: 474 ocfs2_meta_unlock(inode, locklevel); 475 out: 476 if (di_bh) 477 brelse(di_bh); 478 479 mlog_exit(ret); 480 return ret; 481 } 482 483 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) 484 { 485 sector_t status; 486 u64 p_blkno = 0; 487 int err = 0; 488 struct inode *inode = mapping->host; 489 490 mlog_entry("(block = %llu)\n", (unsigned long long)block); 491 492 /* We don't need to lock journal system files, since they aren't 493 * accessed concurrently from multiple nodes. 494 */ 495 if (!INODE_JOURNAL(inode)) { 496 err = ocfs2_meta_lock(inode, NULL, NULL, 0); 497 if (err) { 498 if (err != -ENOENT) 499 mlog_errno(err); 500 goto bail; 501 } 502 down_read(&OCFS2_I(inode)->ip_alloc_sem); 503 } 504 505 err = ocfs2_extent_map_get_blocks(inode, block, 1, &p_blkno, 506 NULL); 507 508 if (!INODE_JOURNAL(inode)) { 509 up_read(&OCFS2_I(inode)->ip_alloc_sem); 510 ocfs2_meta_unlock(inode, 0); 511 } 512 513 if (err) { 514 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", 515 (unsigned long long)block); 516 mlog_errno(err); 517 goto bail; 518 } 519 520 521 bail: 522 status = err ? 0 : p_blkno; 523 524 mlog_exit((int)status); 525 526 return status; 527 } 528 529 /* 530 * TODO: Make this into a generic get_blocks function. 531 * 532 * From do_direct_io in direct-io.c: 533 * "So what we do is to permit the ->get_blocks function to populate 534 * bh.b_size with the size of IO which is permitted at this offset and 535 * this i_blkbits." 536 * 537 * This function is called directly from get_more_blocks in direct-io.c. 538 * 539 * called like this: dio->get_blocks(dio->inode, fs_startblk, 540 * fs_count, map_bh, dio->rw == WRITE); 541 */ 542 static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, 543 struct buffer_head *bh_result, int create) 544 { 545 int ret; 546 u64 vbo_max; /* file offset, max_blocks from iblock */ 547 u64 p_blkno; 548 int contig_blocks; 549 unsigned char blocksize_bits; 550 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; 551 552 if (!inode || !bh_result) { 553 mlog(ML_ERROR, "inode or bh_result is null\n"); 554 return -EIO; 555 } 556 557 blocksize_bits = inode->i_sb->s_blocksize_bits; 558 559 /* This function won't even be called if the request isn't all 560 * nicely aligned and of the right size, so there's no need 561 * for us to check any of that. */ 562 563 vbo_max = ((u64)iblock + max_blocks) << blocksize_bits; 564 565 spin_lock(&OCFS2_I(inode)->ip_lock); 566 if ((iblock + max_blocks) > 567 ocfs2_clusters_to_blocks(inode->i_sb, 568 OCFS2_I(inode)->ip_clusters)) { 569 spin_unlock(&OCFS2_I(inode)->ip_lock); 570 ret = -EIO; 571 goto bail; 572 } 573 spin_unlock(&OCFS2_I(inode)->ip_lock); 574 575 /* This figures out the size of the next contiguous block, and 576 * our logical offset */ 577 ret = ocfs2_extent_map_get_blocks(inode, iblock, 1, &p_blkno, 578 &contig_blocks); 579 if (ret) { 580 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", 581 (unsigned long long)iblock); 582 ret = -EIO; 583 goto bail; 584 } 585 586 map_bh(bh_result, inode->i_sb, p_blkno); 587 588 /* make sure we don't map more than max_blocks blocks here as 589 that's all the kernel will handle at this point. */ 590 if (max_blocks < contig_blocks) 591 contig_blocks = max_blocks; 592 bh_result->b_size = contig_blocks << blocksize_bits; 593 bail: 594 return ret; 595 } 596 597 /* 598 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're 599 * particularly interested in the aio/dio case. Like the core uses 600 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from 601 * truncation on another. 602 */ 603 static void ocfs2_dio_end_io(struct kiocb *iocb, 604 loff_t offset, 605 ssize_t bytes, 606 void *private) 607 { 608 struct inode *inode = iocb->ki_filp->f_dentry->d_inode; 609 610 /* this io's submitter should not have unlocked this before we could */ 611 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 612 ocfs2_iocb_clear_rw_locked(iocb); 613 up_read(&inode->i_alloc_sem); 614 ocfs2_rw_unlock(inode, 0); 615 } 616 617 static ssize_t ocfs2_direct_IO(int rw, 618 struct kiocb *iocb, 619 const struct iovec *iov, 620 loff_t offset, 621 unsigned long nr_segs) 622 { 623 struct file *file = iocb->ki_filp; 624 struct inode *inode = file->f_dentry->d_inode->i_mapping->host; 625 int ret; 626 627 mlog_entry_void(); 628 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, 629 inode->i_sb->s_bdev, iov, offset, 630 nr_segs, 631 ocfs2_direct_IO_get_blocks, 632 ocfs2_dio_end_io); 633 mlog_exit(ret); 634 return ret; 635 } 636 637 struct address_space_operations ocfs2_aops = { 638 .readpage = ocfs2_readpage, 639 .writepage = ocfs2_writepage, 640 .prepare_write = ocfs2_prepare_write, 641 .commit_write = ocfs2_commit_write, 642 .bmap = ocfs2_bmap, 643 .sync_page = block_sync_page, 644 .direct_IO = ocfs2_direct_IO 645 }; 646