1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * page.c - buffer/page management specific to NILFS 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi and Seiji Kihara. 8 */ 9 10 #include <linux/pagemap.h> 11 #include <linux/writeback.h> 12 #include <linux/swap.h> 13 #include <linux/bitops.h> 14 #include <linux/page-flags.h> 15 #include <linux/list.h> 16 #include <linux/highmem.h> 17 #include <linux/pagevec.h> 18 #include <linux/gfp.h> 19 #include "nilfs.h" 20 #include "page.h" 21 #include "mdt.h" 22 23 24 #define NILFS_BUFFER_INHERENT_BITS \ 25 (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \ 26 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked)) 27 28 static struct buffer_head * 29 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, 30 int blkbits, unsigned long b_state) 31 32 { 33 unsigned long first_block; 34 struct buffer_head *bh; 35 36 if (!page_has_buffers(page)) 37 create_empty_buffers(page, 1 << blkbits, b_state); 38 39 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); 40 bh = nilfs_page_get_nth_block(page, block - first_block); 41 42 touch_buffer(bh); 43 wait_on_buffer(bh); 44 return bh; 45 } 46 47 struct buffer_head *nilfs_grab_buffer(struct inode *inode, 48 struct address_space *mapping, 49 unsigned long blkoff, 50 unsigned long b_state) 51 { 52 int blkbits = inode->i_blkbits; 53 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits); 54 struct page *page; 55 struct buffer_head *bh; 56 57 page = grab_cache_page(mapping, index); 58 if (unlikely(!page)) 59 return NULL; 60 61 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); 62 if (unlikely(!bh)) { 63 unlock_page(page); 64 put_page(page); 65 return NULL; 66 } 67 return bh; 68 } 69 70 /** 71 * nilfs_forget_buffer - discard dirty state 72 * @inode: owner inode of the buffer 73 * @bh: buffer head of the buffer to be discarded 74 */ 75 void nilfs_forget_buffer(struct buffer_head *bh) 76 { 77 struct page *page = bh->b_page; 78 const unsigned long clear_bits = 79 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | 80 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | 81 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); 82 83 lock_buffer(bh); 84 set_mask_bits(&bh->b_state, clear_bits, 0); 85 if (nilfs_page_buffers_clean(page)) 86 __nilfs_clear_page_dirty(page); 87 88 bh->b_blocknr = -1; 89 ClearPageUptodate(page); 90 ClearPageMappedToDisk(page); 91 unlock_buffer(bh); 92 brelse(bh); 93 } 94 95 /** 96 * nilfs_copy_buffer -- copy buffer data and flags 97 * @dbh: destination buffer 98 * @sbh: source buffer 99 */ 100 void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) 101 { 102 void *kaddr0, *kaddr1; 103 unsigned long bits; 104 struct page *spage = sbh->b_page, *dpage = dbh->b_page; 105 struct buffer_head *bh; 106 107 kaddr0 = kmap_atomic(spage); 108 kaddr1 = kmap_atomic(dpage); 109 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); 110 kunmap_atomic(kaddr1); 111 kunmap_atomic(kaddr0); 112 113 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; 114 dbh->b_blocknr = sbh->b_blocknr; 115 dbh->b_bdev = sbh->b_bdev; 116 117 bh = dbh; 118 bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped)); 119 while ((bh = bh->b_this_page) != dbh) { 120 lock_buffer(bh); 121 bits &= bh->b_state; 122 unlock_buffer(bh); 123 } 124 if (bits & BIT(BH_Uptodate)) 125 SetPageUptodate(dpage); 126 else 127 ClearPageUptodate(dpage); 128 if (bits & BIT(BH_Mapped)) 129 SetPageMappedToDisk(dpage); 130 else 131 ClearPageMappedToDisk(dpage); 132 } 133 134 /** 135 * nilfs_page_buffers_clean - check if a page has dirty buffers or not. 136 * @page: page to be checked 137 * 138 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers. 139 * Otherwise, it returns non-zero value. 140 */ 141 int nilfs_page_buffers_clean(struct page *page) 142 { 143 struct buffer_head *bh, *head; 144 145 bh = head = page_buffers(page); 146 do { 147 if (buffer_dirty(bh)) 148 return 0; 149 bh = bh->b_this_page; 150 } while (bh != head); 151 return 1; 152 } 153 154 void nilfs_page_bug(struct page *page) 155 { 156 struct address_space *m; 157 unsigned long ino; 158 159 if (unlikely(!page)) { 160 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); 161 return; 162 } 163 164 m = page->mapping; 165 ino = m ? m->host->i_ino : 0; 166 167 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " 168 "mapping=%p ino=%lu\n", 169 page, page_ref_count(page), 170 (unsigned long long)page->index, page->flags, m, ino); 171 172 if (page_has_buffers(page)) { 173 struct buffer_head *bh, *head; 174 int i = 0; 175 176 bh = head = page_buffers(page); 177 do { 178 printk(KERN_CRIT 179 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n", 180 i++, bh, atomic_read(&bh->b_count), 181 (unsigned long long)bh->b_blocknr, bh->b_state); 182 bh = bh->b_this_page; 183 } while (bh != head); 184 } 185 } 186 187 /** 188 * nilfs_copy_page -- copy the page with buffers 189 * @dst: destination page 190 * @src: source page 191 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads. 192 * 193 * This function is for both data pages and btnode pages. The dirty flag 194 * should be treated by caller. The page must not be under i/o. 195 * Both src and dst page must be locked 196 */ 197 static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) 198 { 199 struct buffer_head *dbh, *dbufs, *sbh, *sbufs; 200 unsigned long mask = NILFS_BUFFER_INHERENT_BITS; 201 202 BUG_ON(PageWriteback(dst)); 203 204 sbh = sbufs = page_buffers(src); 205 if (!page_has_buffers(dst)) 206 create_empty_buffers(dst, sbh->b_size, 0); 207 208 if (copy_dirty) 209 mask |= BIT(BH_Dirty); 210 211 dbh = dbufs = page_buffers(dst); 212 do { 213 lock_buffer(sbh); 214 lock_buffer(dbh); 215 dbh->b_state = sbh->b_state & mask; 216 dbh->b_blocknr = sbh->b_blocknr; 217 dbh->b_bdev = sbh->b_bdev; 218 sbh = sbh->b_this_page; 219 dbh = dbh->b_this_page; 220 } while (dbh != dbufs); 221 222 copy_highpage(dst, src); 223 224 if (PageUptodate(src) && !PageUptodate(dst)) 225 SetPageUptodate(dst); 226 else if (!PageUptodate(src) && PageUptodate(dst)) 227 ClearPageUptodate(dst); 228 if (PageMappedToDisk(src) && !PageMappedToDisk(dst)) 229 SetPageMappedToDisk(dst); 230 else if (!PageMappedToDisk(src) && PageMappedToDisk(dst)) 231 ClearPageMappedToDisk(dst); 232 233 do { 234 unlock_buffer(sbh); 235 unlock_buffer(dbh); 236 sbh = sbh->b_this_page; 237 dbh = dbh->b_this_page; 238 } while (dbh != dbufs); 239 } 240 241 int nilfs_copy_dirty_pages(struct address_space *dmap, 242 struct address_space *smap) 243 { 244 struct pagevec pvec; 245 unsigned int i; 246 pgoff_t index = 0; 247 int err = 0; 248 249 pagevec_init(&pvec); 250 repeat: 251 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY)) 252 return 0; 253 254 for (i = 0; i < pagevec_count(&pvec); i++) { 255 struct page *page = pvec.pages[i], *dpage; 256 257 lock_page(page); 258 if (unlikely(!PageDirty(page))) 259 NILFS_PAGE_BUG(page, "inconsistent dirty state"); 260 261 dpage = grab_cache_page(dmap, page->index); 262 if (unlikely(!dpage)) { 263 /* No empty page is added to the page cache */ 264 err = -ENOMEM; 265 unlock_page(page); 266 break; 267 } 268 if (unlikely(!page_has_buffers(page))) 269 NILFS_PAGE_BUG(page, 270 "found empty page in dat page cache"); 271 272 nilfs_copy_page(dpage, page, 1); 273 __set_page_dirty_nobuffers(dpage); 274 275 unlock_page(dpage); 276 put_page(dpage); 277 unlock_page(page); 278 } 279 pagevec_release(&pvec); 280 cond_resched(); 281 282 if (likely(!err)) 283 goto repeat; 284 return err; 285 } 286 287 /** 288 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache 289 * @dmap: destination page cache 290 * @smap: source page cache 291 * 292 * No pages must be added to the cache during this process. 293 * This must be ensured by the caller. 294 */ 295 void nilfs_copy_back_pages(struct address_space *dmap, 296 struct address_space *smap) 297 { 298 struct pagevec pvec; 299 unsigned int i, n; 300 pgoff_t index = 0; 301 302 pagevec_init(&pvec); 303 repeat: 304 n = pagevec_lookup(&pvec, smap, &index); 305 if (!n) 306 return; 307 308 for (i = 0; i < pagevec_count(&pvec); i++) { 309 struct page *page = pvec.pages[i], *dpage; 310 pgoff_t offset = page->index; 311 312 lock_page(page); 313 dpage = find_lock_page(dmap, offset); 314 if (dpage) { 315 /* overwrite existing page in the destination cache */ 316 WARN_ON(PageDirty(dpage)); 317 nilfs_copy_page(dpage, page, 0); 318 unlock_page(dpage); 319 put_page(dpage); 320 /* Do we not need to remove page from smap here? */ 321 } else { 322 struct page *p; 323 324 /* move the page to the destination cache */ 325 xa_lock_irq(&smap->i_pages); 326 p = __xa_erase(&smap->i_pages, offset); 327 WARN_ON(page != p); 328 smap->nrpages--; 329 xa_unlock_irq(&smap->i_pages); 330 331 xa_lock_irq(&dmap->i_pages); 332 p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS); 333 if (unlikely(p)) { 334 /* Probably -ENOMEM */ 335 page->mapping = NULL; 336 put_page(page); 337 } else { 338 page->mapping = dmap; 339 dmap->nrpages++; 340 if (PageDirty(page)) 341 __xa_set_mark(&dmap->i_pages, offset, 342 PAGECACHE_TAG_DIRTY); 343 } 344 xa_unlock_irq(&dmap->i_pages); 345 } 346 unlock_page(page); 347 } 348 pagevec_release(&pvec); 349 cond_resched(); 350 351 goto repeat; 352 } 353 354 /** 355 * nilfs_clear_dirty_pages - discard dirty pages in address space 356 * @mapping: address space with dirty pages for discarding 357 * @silent: suppress [true] or print [false] warning messages 358 */ 359 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) 360 { 361 struct pagevec pvec; 362 unsigned int i; 363 pgoff_t index = 0; 364 365 pagevec_init(&pvec); 366 367 while (pagevec_lookup_tag(&pvec, mapping, &index, 368 PAGECACHE_TAG_DIRTY)) { 369 for (i = 0; i < pagevec_count(&pvec); i++) { 370 struct page *page = pvec.pages[i]; 371 372 lock_page(page); 373 nilfs_clear_dirty_page(page, silent); 374 unlock_page(page); 375 } 376 pagevec_release(&pvec); 377 cond_resched(); 378 } 379 } 380 381 /** 382 * nilfs_clear_dirty_page - discard dirty page 383 * @page: dirty page that will be discarded 384 * @silent: suppress [true] or print [false] warning messages 385 */ 386 void nilfs_clear_dirty_page(struct page *page, bool silent) 387 { 388 struct inode *inode = page->mapping->host; 389 struct super_block *sb = inode->i_sb; 390 391 BUG_ON(!PageLocked(page)); 392 393 if (!silent) 394 nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu", 395 page_offset(page), inode->i_ino); 396 397 ClearPageUptodate(page); 398 ClearPageMappedToDisk(page); 399 400 if (page_has_buffers(page)) { 401 struct buffer_head *bh, *head; 402 const unsigned long clear_bits = 403 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | 404 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | 405 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); 406 407 bh = head = page_buffers(page); 408 do { 409 lock_buffer(bh); 410 if (!silent) 411 nilfs_warn(sb, 412 "discard dirty block: blocknr=%llu, size=%zu", 413 (u64)bh->b_blocknr, bh->b_size); 414 415 set_mask_bits(&bh->b_state, clear_bits, 0); 416 unlock_buffer(bh); 417 } while (bh = bh->b_this_page, bh != head); 418 } 419 420 __nilfs_clear_page_dirty(page); 421 } 422 423 unsigned int nilfs_page_count_clean_buffers(struct page *page, 424 unsigned int from, unsigned int to) 425 { 426 unsigned int block_start, block_end; 427 struct buffer_head *bh, *head; 428 unsigned int nc = 0; 429 430 for (bh = head = page_buffers(page), block_start = 0; 431 bh != head || !block_start; 432 block_start = block_end, bh = bh->b_this_page) { 433 block_end = block_start + bh->b_size; 434 if (block_end > from && block_start < to && !buffer_dirty(bh)) 435 nc++; 436 } 437 return nc; 438 } 439 440 void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) 441 { 442 mapping->host = inode; 443 mapping->flags = 0; 444 mapping_set_gfp_mask(mapping, GFP_NOFS); 445 mapping->private_data = NULL; 446 mapping->a_ops = &empty_aops; 447 } 448 449 /* 450 * NILFS2 needs clear_page_dirty() in the following two cases: 451 * 452 * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears 453 * page dirty flags when it copies back pages from the shadow cache 454 * (gcdat->{i_mapping,i_btnode_cache}) to its original cache 455 * (dat->{i_mapping,i_btnode_cache}). 456 * 457 * 2) Some B-tree operations like insertion or deletion may dispose buffers 458 * in dirty state, and this needs to cancel the dirty state of their pages. 459 */ 460 int __nilfs_clear_page_dirty(struct page *page) 461 { 462 struct address_space *mapping = page->mapping; 463 464 if (mapping) { 465 xa_lock_irq(&mapping->i_pages); 466 if (test_bit(PG_dirty, &page->flags)) { 467 __xa_clear_mark(&mapping->i_pages, page_index(page), 468 PAGECACHE_TAG_DIRTY); 469 xa_unlock_irq(&mapping->i_pages); 470 return clear_page_dirty_for_io(page); 471 } 472 xa_unlock_irq(&mapping->i_pages); 473 return 0; 474 } 475 return TestClearPageDirty(page); 476 } 477 478 /** 479 * nilfs_find_uncommitted_extent - find extent of uncommitted data 480 * @inode: inode 481 * @start_blk: start block offset (in) 482 * @blkoff: start offset of the found extent (out) 483 * 484 * This function searches an extent of buffers marked "delayed" which 485 * starts from a block offset equal to or larger than @start_blk. If 486 * such an extent was found, this will store the start offset in 487 * @blkoff and return its length in blocks. Otherwise, zero is 488 * returned. 489 */ 490 unsigned long nilfs_find_uncommitted_extent(struct inode *inode, 491 sector_t start_blk, 492 sector_t *blkoff) 493 { 494 unsigned int i; 495 pgoff_t index; 496 unsigned int nblocks_in_page; 497 unsigned long length = 0; 498 sector_t b; 499 struct pagevec pvec; 500 struct page *page; 501 502 if (inode->i_mapping->nrpages == 0) 503 return 0; 504 505 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits); 506 nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits); 507 508 pagevec_init(&pvec); 509 510 repeat: 511 pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE, 512 pvec.pages); 513 if (pvec.nr == 0) 514 return length; 515 516 if (length > 0 && pvec.pages[0]->index > index) 517 goto out; 518 519 b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits); 520 i = 0; 521 do { 522 page = pvec.pages[i]; 523 524 lock_page(page); 525 if (page_has_buffers(page)) { 526 struct buffer_head *bh, *head; 527 528 bh = head = page_buffers(page); 529 do { 530 if (b < start_blk) 531 continue; 532 if (buffer_delay(bh)) { 533 if (length == 0) 534 *blkoff = b; 535 length++; 536 } else if (length > 0) { 537 goto out_locked; 538 } 539 } while (++b, bh = bh->b_this_page, bh != head); 540 } else { 541 if (length > 0) 542 goto out_locked; 543 544 b += nblocks_in_page; 545 } 546 unlock_page(page); 547 548 } while (++i < pagevec_count(&pvec)); 549 550 index = page->index + 1; 551 pagevec_release(&pvec); 552 cond_resched(); 553 goto repeat; 554 555 out_locked: 556 unlock_page(page); 557 out: 558 pagevec_release(&pvec); 559 return length; 560 } 561