1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2005 3 * Portions Copyright (C) Christoph Hellwig, 2001-2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/fs.h> 21 #include <linux/mm.h> 22 #include <linux/bio.h> 23 #include <linux/init.h> 24 #include <linux/buffer_head.h> 25 #include <linux/mempool.h> 26 #include "jfs_incore.h" 27 #include "jfs_superblock.h" 28 #include "jfs_filsys.h" 29 #include "jfs_metapage.h" 30 #include "jfs_txnmgr.h" 31 #include "jfs_debug.h" 32 33 #ifdef CONFIG_JFS_STATISTICS 34 static struct { 35 uint pagealloc; /* # of page allocations */ 36 uint pagefree; /* # of page frees */ 37 uint lockwait; /* # of sleeping lock_metapage() calls */ 38 } mpStat; 39 #endif 40 41 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag) 42 #define trylock_metapage(mp) test_and_set_bit(META_locked, &(mp)->flag) 43 44 static inline void unlock_metapage(struct metapage *mp) 45 { 46 clear_bit(META_locked, &mp->flag); 47 wake_up(&mp->wait); 48 } 49 50 static inline void __lock_metapage(struct metapage *mp) 51 { 52 DECLARE_WAITQUEUE(wait, current); 53 INCREMENT(mpStat.lockwait); 54 add_wait_queue_exclusive(&mp->wait, &wait); 55 do { 56 set_current_state(TASK_UNINTERRUPTIBLE); 57 if (metapage_locked(mp)) { 58 unlock_page(mp->page); 59 schedule(); 60 lock_page(mp->page); 61 } 62 } while (trylock_metapage(mp)); 63 __set_current_state(TASK_RUNNING); 64 remove_wait_queue(&mp->wait, &wait); 65 } 66 67 /* 68 * Must have mp->page locked 69 */ 70 static inline void lock_metapage(struct metapage *mp) 71 { 72 if (trylock_metapage(mp)) 73 __lock_metapage(mp); 74 } 75 76 #define METAPOOL_MIN_PAGES 32 77 static kmem_cache_t *metapage_cache; 78 static mempool_t *metapage_mempool; 79 80 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) 81 82 #if MPS_PER_PAGE > 1 83 84 struct meta_anchor { 85 int mp_count; 86 atomic_t io_count; 87 struct metapage *mp[MPS_PER_PAGE]; 88 }; 89 #define mp_anchor(page) ((struct meta_anchor *)page_private(page)) 90 91 static inline struct metapage *page_to_mp(struct page *page, uint offset) 92 { 93 if (!PagePrivate(page)) 94 return NULL; 95 return mp_anchor(page)->mp[offset >> L2PSIZE]; 96 } 97 98 static inline int insert_metapage(struct page *page, struct metapage *mp) 99 { 100 struct meta_anchor *a; 101 int index; 102 int l2mp_blocks; /* log2 blocks per metapage */ 103 104 if (PagePrivate(page)) 105 a = mp_anchor(page); 106 else { 107 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS); 108 if (!a) 109 return -ENOMEM; 110 set_page_private(page, (unsigned long)a); 111 SetPagePrivate(page); 112 kmap(page); 113 } 114 115 if (mp) { 116 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; 117 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1); 118 a->mp_count++; 119 a->mp[index] = mp; 120 } 121 122 return 0; 123 } 124 125 static inline void remove_metapage(struct page *page, struct metapage *mp) 126 { 127 struct meta_anchor *a = mp_anchor(page); 128 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; 129 int index; 130 131 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1); 132 133 BUG_ON(a->mp[index] != mp); 134 135 a->mp[index] = NULL; 136 if (--a->mp_count == 0) { 137 kfree(a); 138 set_page_private(page, 0); 139 ClearPagePrivate(page); 140 kunmap(page); 141 } 142 } 143 144 static inline void inc_io(struct page *page) 145 { 146 atomic_inc(&mp_anchor(page)->io_count); 147 } 148 149 static inline void dec_io(struct page *page, void (*handler) (struct page *)) 150 { 151 if (atomic_dec_and_test(&mp_anchor(page)->io_count)) 152 handler(page); 153 } 154 155 #else 156 static inline struct metapage *page_to_mp(struct page *page, uint offset) 157 { 158 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL; 159 } 160 161 static inline int insert_metapage(struct page *page, struct metapage *mp) 162 { 163 if (mp) { 164 set_page_private(page, (unsigned long)mp); 165 SetPagePrivate(page); 166 kmap(page); 167 } 168 return 0; 169 } 170 171 static inline void remove_metapage(struct page *page, struct metapage *mp) 172 { 173 set_page_private(page, 0); 174 ClearPagePrivate(page); 175 kunmap(page); 176 } 177 178 #define inc_io(page) do {} while(0) 179 #define dec_io(page, handler) handler(page) 180 181 #endif 182 183 static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 184 { 185 struct metapage *mp = (struct metapage *)foo; 186 187 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 188 SLAB_CTOR_CONSTRUCTOR) { 189 mp->lid = 0; 190 mp->lsn = 0; 191 mp->flag = 0; 192 mp->data = NULL; 193 mp->clsn = 0; 194 mp->log = NULL; 195 set_bit(META_free, &mp->flag); 196 init_waitqueue_head(&mp->wait); 197 } 198 } 199 200 static inline struct metapage *alloc_metapage(gfp_t gfp_mask) 201 { 202 return mempool_alloc(metapage_mempool, gfp_mask); 203 } 204 205 static inline void free_metapage(struct metapage *mp) 206 { 207 mp->flag = 0; 208 set_bit(META_free, &mp->flag); 209 210 mempool_free(mp, metapage_mempool); 211 } 212 213 int __init metapage_init(void) 214 { 215 /* 216 * Allocate the metapage structures 217 */ 218 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage), 219 0, 0, init_once, NULL); 220 if (metapage_cache == NULL) 221 return -ENOMEM; 222 223 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES, 224 metapage_cache); 225 226 if (metapage_mempool == NULL) { 227 kmem_cache_destroy(metapage_cache); 228 return -ENOMEM; 229 } 230 231 return 0; 232 } 233 234 void metapage_exit(void) 235 { 236 mempool_destroy(metapage_mempool); 237 kmem_cache_destroy(metapage_cache); 238 } 239 240 static inline void drop_metapage(struct page *page, struct metapage *mp) 241 { 242 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) || 243 test_bit(META_io, &mp->flag)) 244 return; 245 remove_metapage(page, mp); 246 INCREMENT(mpStat.pagefree); 247 free_metapage(mp); 248 } 249 250 /* 251 * Metapage address space operations 252 */ 253 254 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock, 255 unsigned int *len) 256 { 257 int rc = 0; 258 int xflag; 259 s64 xaddr; 260 sector_t file_blocks = (inode->i_size + inode->i_blksize - 1) >> 261 inode->i_blkbits; 262 263 if (lblock >= file_blocks) 264 return 0; 265 if (lblock + *len > file_blocks) 266 *len = file_blocks - lblock; 267 268 if (inode->i_ino) { 269 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0); 270 if ((rc == 0) && *len) 271 lblock = (sector_t)xaddr; 272 else 273 lblock = 0; 274 } /* else no mapping */ 275 276 return lblock; 277 } 278 279 static void last_read_complete(struct page *page) 280 { 281 if (!PageError(page)) 282 SetPageUptodate(page); 283 unlock_page(page); 284 } 285 286 static int metapage_read_end_io(struct bio *bio, unsigned int bytes_done, 287 int err) 288 { 289 struct page *page = bio->bi_private; 290 291 if (bio->bi_size) 292 return 1; 293 294 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 295 printk(KERN_ERR "metapage_read_end_io: I/O error\n"); 296 SetPageError(page); 297 } 298 299 dec_io(page, last_read_complete); 300 bio_put(bio); 301 302 return 0; 303 } 304 305 static void remove_from_logsync(struct metapage *mp) 306 { 307 struct jfs_log *log = mp->log; 308 unsigned long flags; 309 /* 310 * This can race. Recheck that log hasn't been set to null, and after 311 * acquiring logsync lock, recheck lsn 312 */ 313 if (!log) 314 return; 315 316 LOGSYNC_LOCK(log, flags); 317 if (mp->lsn) { 318 mp->log = NULL; 319 mp->lsn = 0; 320 mp->clsn = 0; 321 log->count--; 322 list_del(&mp->synclist); 323 } 324 LOGSYNC_UNLOCK(log, flags); 325 } 326 327 static void last_write_complete(struct page *page) 328 { 329 struct metapage *mp; 330 unsigned int offset; 331 332 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 333 mp = page_to_mp(page, offset); 334 if (mp && test_bit(META_io, &mp->flag)) { 335 if (mp->lsn) 336 remove_from_logsync(mp); 337 clear_bit(META_io, &mp->flag); 338 } 339 /* 340 * I'd like to call drop_metapage here, but I don't think it's 341 * safe unless I have the page locked 342 */ 343 } 344 end_page_writeback(page); 345 } 346 347 static int metapage_write_end_io(struct bio *bio, unsigned int bytes_done, 348 int err) 349 { 350 struct page *page = bio->bi_private; 351 352 BUG_ON(!PagePrivate(page)); 353 354 if (bio->bi_size) 355 return 1; 356 357 if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) { 358 printk(KERN_ERR "metapage_write_end_io: I/O error\n"); 359 SetPageError(page); 360 } 361 dec_io(page, last_write_complete); 362 bio_put(bio); 363 return 0; 364 } 365 366 static int metapage_writepage(struct page *page, struct writeback_control *wbc) 367 { 368 struct bio *bio = NULL; 369 unsigned int block_offset; /* block offset of mp within page */ 370 struct inode *inode = page->mapping->host; 371 unsigned int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage; 372 unsigned int len; 373 unsigned int xlen; 374 struct metapage *mp; 375 int redirty = 0; 376 sector_t lblock; 377 sector_t pblock; 378 sector_t next_block = 0; 379 sector_t page_start; 380 unsigned long bio_bytes = 0; 381 unsigned long bio_offset = 0; 382 unsigned int offset; 383 384 page_start = (sector_t)page->index << 385 (PAGE_CACHE_SHIFT - inode->i_blkbits); 386 BUG_ON(!PageLocked(page)); 387 BUG_ON(PageWriteback(page)); 388 389 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 390 mp = page_to_mp(page, offset); 391 392 if (!mp || !test_bit(META_dirty, &mp->flag)) 393 continue; 394 395 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) { 396 redirty = 1; 397 /* 398 * Make sure this page isn't blocked indefinitely. 399 * If the journal isn't undergoing I/O, push it 400 */ 401 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT)) 402 jfs_flush_journal(mp->log, 0); 403 continue; 404 } 405 406 clear_bit(META_dirty, &mp->flag); 407 block_offset = offset >> inode->i_blkbits; 408 lblock = page_start + block_offset; 409 if (bio) { 410 if (xlen && lblock == next_block) { 411 /* Contiguous, in memory & on disk */ 412 len = min(xlen, blocks_per_mp); 413 xlen -= len; 414 bio_bytes += len << inode->i_blkbits; 415 set_bit(META_io, &mp->flag); 416 continue; 417 } 418 /* Not contiguous */ 419 if (bio_add_page(bio, page, bio_bytes, bio_offset) < 420 bio_bytes) 421 goto add_failed; 422 /* 423 * Increment counter before submitting i/o to keep 424 * count from hitting zero before we're through 425 */ 426 inc_io(page); 427 if (!bio->bi_size) 428 goto dump_bio; 429 submit_bio(WRITE, bio); 430 bio = NULL; 431 } else { 432 set_page_writeback(page); 433 inc_io(page); 434 } 435 xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits; 436 pblock = metapage_get_blocks(inode, lblock, &xlen); 437 if (!pblock) { 438 /* Need better error handling */ 439 printk(KERN_ERR "JFS: metapage_get_blocks failed\n"); 440 dec_io(page, last_write_complete); 441 continue; 442 } 443 set_bit(META_io, &mp->flag); 444 len = min(xlen, (uint) JFS_SBI(inode->i_sb)->nbperpage); 445 446 bio = bio_alloc(GFP_NOFS, 1); 447 bio->bi_bdev = inode->i_sb->s_bdev; 448 bio->bi_sector = pblock << (inode->i_blkbits - 9); 449 bio->bi_end_io = metapage_write_end_io; 450 bio->bi_private = page; 451 452 /* Don't call bio_add_page yet, we may add to this vec */ 453 bio_offset = offset; 454 bio_bytes = len << inode->i_blkbits; 455 456 xlen -= len; 457 next_block = lblock + len; 458 } 459 if (bio) { 460 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) 461 goto add_failed; 462 if (!bio->bi_size) 463 goto dump_bio; 464 465 submit_bio(WRITE, bio); 466 } 467 if (redirty) 468 redirty_page_for_writepage(wbc, page); 469 470 unlock_page(page); 471 472 return 0; 473 add_failed: 474 /* We should never reach here, since we're only adding one vec */ 475 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n"); 476 goto skip; 477 dump_bio: 478 dump_mem("bio", bio, sizeof(*bio)); 479 skip: 480 bio_put(bio); 481 unlock_page(page); 482 dec_io(page, last_write_complete); 483 484 return -EIO; 485 } 486 487 static int metapage_readpage(struct file *fp, struct page *page) 488 { 489 struct inode *inode = page->mapping->host; 490 struct bio *bio = NULL; 491 unsigned int block_offset; 492 unsigned int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; 493 sector_t page_start; /* address of page in fs blocks */ 494 sector_t pblock; 495 unsigned int xlen; 496 unsigned int len; 497 unsigned int offset; 498 499 BUG_ON(!PageLocked(page)); 500 page_start = (sector_t)page->index << 501 (PAGE_CACHE_SHIFT - inode->i_blkbits); 502 503 block_offset = 0; 504 while (block_offset < blocks_per_page) { 505 xlen = blocks_per_page - block_offset; 506 pblock = metapage_get_blocks(inode, page_start + block_offset, 507 &xlen); 508 if (pblock) { 509 if (!PagePrivate(page)) 510 insert_metapage(page, NULL); 511 inc_io(page); 512 if (bio) 513 submit_bio(READ, bio); 514 515 bio = bio_alloc(GFP_NOFS, 1); 516 bio->bi_bdev = inode->i_sb->s_bdev; 517 bio->bi_sector = pblock << (inode->i_blkbits - 9); 518 bio->bi_end_io = metapage_read_end_io; 519 bio->bi_private = page; 520 len = xlen << inode->i_blkbits; 521 offset = block_offset << inode->i_blkbits; 522 if (bio_add_page(bio, page, len, offset) < len) 523 goto add_failed; 524 block_offset += xlen; 525 } else 526 block_offset++; 527 } 528 if (bio) 529 submit_bio(READ, bio); 530 else 531 unlock_page(page); 532 533 return 0; 534 535 add_failed: 536 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n"); 537 bio_put(bio); 538 dec_io(page, last_read_complete); 539 return -EIO; 540 } 541 542 static int metapage_releasepage(struct page *page, gfp_t gfp_mask) 543 { 544 struct metapage *mp; 545 int busy = 0; 546 unsigned int offset; 547 548 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 549 mp = page_to_mp(page, offset); 550 551 if (!mp) 552 continue; 553 554 jfs_info("metapage_releasepage: mp = 0x%p", mp); 555 if (mp->count || mp->nohomeok) { 556 jfs_info("count = %ld, nohomeok = %d", mp->count, 557 mp->nohomeok); 558 busy = 1; 559 continue; 560 } 561 wait_on_page_writeback(page); 562 //WARN_ON(test_bit(META_dirty, &mp->flag)); 563 if (test_bit(META_dirty, &mp->flag)) { 564 dump_mem("dirty mp in metapage_releasepage", mp, 565 sizeof(struct metapage)); 566 dump_mem("page", page, sizeof(struct page)); 567 dump_stack(); 568 } 569 if (mp->lsn) 570 remove_from_logsync(mp); 571 remove_metapage(page, mp); 572 INCREMENT(mpStat.pagefree); 573 free_metapage(mp); 574 } 575 if (busy) 576 return -1; 577 578 return 0; 579 } 580 581 static void metapage_invalidatepage(struct page *page, unsigned long offset) 582 { 583 BUG_ON(offset); 584 585 BUG_ON(PageWriteback(page)); 586 587 metapage_releasepage(page, 0); 588 } 589 590 struct address_space_operations jfs_metapage_aops = { 591 .readpage = metapage_readpage, 592 .writepage = metapage_writepage, 593 .sync_page = block_sync_page, 594 .releasepage = metapage_releasepage, 595 .invalidatepage = metapage_invalidatepage, 596 .set_page_dirty = __set_page_dirty_nobuffers, 597 }; 598 599 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, 600 unsigned int size, int absolute, 601 unsigned long new) 602 { 603 int l2BlocksPerPage; 604 int l2bsize; 605 struct address_space *mapping; 606 struct metapage *mp = NULL; 607 struct page *page; 608 unsigned long page_index; 609 unsigned long page_offset; 610 611 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d", 612 inode->i_ino, lblock, absolute); 613 614 l2bsize = inode->i_blkbits; 615 l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize; 616 page_index = lblock >> l2BlocksPerPage; 617 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; 618 if ((page_offset + size) > PAGE_CACHE_SIZE) { 619 jfs_err("MetaData crosses page boundary!!"); 620 jfs_err("lblock = %lx, size = %d", lblock, size); 621 dump_stack(); 622 return NULL; 623 } 624 if (absolute) 625 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping; 626 else { 627 /* 628 * If an nfs client tries to read an inode that is larger 629 * than any existing inodes, we may try to read past the 630 * end of the inode map 631 */ 632 if ((lblock << inode->i_blkbits) >= inode->i_size) 633 return NULL; 634 mapping = inode->i_mapping; 635 } 636 637 if (new && (PSIZE == PAGE_CACHE_SIZE)) { 638 page = grab_cache_page(mapping, page_index); 639 if (!page) { 640 jfs_err("grab_cache_page failed!"); 641 return NULL; 642 } 643 SetPageUptodate(page); 644 } else { 645 page = read_cache_page(mapping, page_index, 646 (filler_t *)mapping->a_ops->readpage, NULL); 647 if (IS_ERR(page) || !PageUptodate(page)) { 648 jfs_err("read_cache_page failed!"); 649 return NULL; 650 } 651 lock_page(page); 652 } 653 654 mp = page_to_mp(page, page_offset); 655 if (mp) { 656 if (mp->logical_size != size) { 657 jfs_error(inode->i_sb, 658 "__get_metapage: mp->logical_size != size"); 659 jfs_err("logical_size = %d, size = %d", 660 mp->logical_size, size); 661 dump_stack(); 662 goto unlock; 663 } 664 mp->count++; 665 lock_metapage(mp); 666 if (test_bit(META_discard, &mp->flag)) { 667 if (!new) { 668 jfs_error(inode->i_sb, 669 "__get_metapage: using a " 670 "discarded metapage"); 671 discard_metapage(mp); 672 goto unlock; 673 } 674 clear_bit(META_discard, &mp->flag); 675 } 676 } else { 677 INCREMENT(mpStat.pagealloc); 678 mp = alloc_metapage(GFP_NOFS); 679 mp->page = page; 680 mp->flag = 0; 681 mp->xflag = COMMIT_PAGE; 682 mp->count = 1; 683 mp->nohomeok = 0; 684 mp->logical_size = size; 685 mp->data = page_address(page) + page_offset; 686 mp->index = lblock; 687 if (unlikely(insert_metapage(page, mp))) { 688 free_metapage(mp); 689 goto unlock; 690 } 691 lock_metapage(mp); 692 } 693 694 if (new) { 695 jfs_info("zeroing mp = 0x%p", mp); 696 memset(mp->data, 0, PSIZE); 697 } 698 699 unlock_page(page); 700 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data); 701 return mp; 702 703 unlock: 704 unlock_page(page); 705 return NULL; 706 } 707 708 void grab_metapage(struct metapage * mp) 709 { 710 jfs_info("grab_metapage: mp = 0x%p", mp); 711 page_cache_get(mp->page); 712 lock_page(mp->page); 713 mp->count++; 714 lock_metapage(mp); 715 unlock_page(mp->page); 716 } 717 718 void force_metapage(struct metapage *mp) 719 { 720 struct page *page = mp->page; 721 jfs_info("force_metapage: mp = 0x%p", mp); 722 set_bit(META_forcewrite, &mp->flag); 723 clear_bit(META_sync, &mp->flag); 724 page_cache_get(page); 725 lock_page(page); 726 set_page_dirty(page); 727 write_one_page(page, 1); 728 clear_bit(META_forcewrite, &mp->flag); 729 page_cache_release(page); 730 } 731 732 void hold_metapage(struct metapage *mp) 733 { 734 lock_page(mp->page); 735 } 736 737 void put_metapage(struct metapage *mp) 738 { 739 if (mp->count || mp->nohomeok) { 740 /* Someone else will release this */ 741 unlock_page(mp->page); 742 return; 743 } 744 page_cache_get(mp->page); 745 mp->count++; 746 lock_metapage(mp); 747 unlock_page(mp->page); 748 release_metapage(mp); 749 } 750 751 void release_metapage(struct metapage * mp) 752 { 753 struct page *page = mp->page; 754 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag); 755 756 BUG_ON(!page); 757 758 lock_page(page); 759 unlock_metapage(mp); 760 761 assert(mp->count); 762 if (--mp->count || mp->nohomeok) { 763 unlock_page(page); 764 page_cache_release(page); 765 return; 766 } 767 768 if (test_bit(META_dirty, &mp->flag)) { 769 set_page_dirty(page); 770 if (test_bit(META_sync, &mp->flag)) { 771 clear_bit(META_sync, &mp->flag); 772 write_one_page(page, 1); 773 lock_page(page); /* write_one_page unlocks the page */ 774 } 775 } else if (mp->lsn) /* discard_metapage doesn't remove it */ 776 remove_from_logsync(mp); 777 778 #if MPS_PER_PAGE == 1 779 /* 780 * If we know this is the only thing in the page, we can throw 781 * the page out of the page cache. If pages are larger, we 782 * don't want to do this. 783 */ 784 785 /* Retest mp->count since we may have released page lock */ 786 if (test_bit(META_discard, &mp->flag) && !mp->count) { 787 clear_page_dirty(page); 788 ClearPageUptodate(page); 789 } 790 #else 791 /* Try to keep metapages from using up too much memory */ 792 drop_metapage(page, mp); 793 #endif 794 unlock_page(page); 795 page_cache_release(page); 796 } 797 798 void __invalidate_metapages(struct inode *ip, s64 addr, int len) 799 { 800 sector_t lblock; 801 int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits; 802 int BlocksPerPage = 1 << l2BlocksPerPage; 803 /* All callers are interested in block device's mapping */ 804 struct address_space *mapping = 805 JFS_SBI(ip->i_sb)->direct_inode->i_mapping; 806 struct metapage *mp; 807 struct page *page; 808 unsigned int offset; 809 810 /* 811 * Mark metapages to discard. They will eventually be 812 * released, but should not be written. 813 */ 814 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len; 815 lblock += BlocksPerPage) { 816 page = find_lock_page(mapping, lblock >> l2BlocksPerPage); 817 if (!page) 818 continue; 819 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { 820 mp = page_to_mp(page, offset); 821 if (!mp) 822 continue; 823 if (mp->index < addr) 824 continue; 825 if (mp->index >= addr + len) 826 break; 827 828 clear_bit(META_dirty, &mp->flag); 829 set_bit(META_discard, &mp->flag); 830 if (mp->lsn) 831 remove_from_logsync(mp); 832 } 833 unlock_page(page); 834 page_cache_release(page); 835 } 836 } 837 838 #ifdef CONFIG_JFS_STATISTICS 839 int jfs_mpstat_read(char *buffer, char **start, off_t offset, int length, 840 int *eof, void *data) 841 { 842 int len = 0; 843 off_t begin; 844 845 len += sprintf(buffer, 846 "JFS Metapage statistics\n" 847 "=======================\n" 848 "page allocations = %d\n" 849 "page frees = %d\n" 850 "lock waits = %d\n", 851 mpStat.pagealloc, 852 mpStat.pagefree, 853 mpStat.lockwait); 854 855 begin = offset; 856 *start = buffer + begin; 857 len -= begin; 858 859 if (len > length) 860 len = length; 861 else 862 *eof = 1; 863 864 if (len < 0) 865 len = 0; 866 867 return len; 868 } 869 #endif 870