xref: /linux/fs/nilfs2/page.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Buffer/page management specific to NILFS
4  *
5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Ryusuke Konishi and Seiji Kihara.
8  */
9 
10 #include <linux/pagemap.h>
11 #include <linux/writeback.h>
12 #include <linux/swap.h>
13 #include <linux/bitops.h>
14 #include <linux/page-flags.h>
15 #include <linux/list.h>
16 #include <linux/highmem.h>
17 #include <linux/pagevec.h>
18 #include <linux/gfp.h>
19 #include "nilfs.h"
20 #include "page.h"
21 #include "mdt.h"
22 
23 
24 #define NILFS_BUFFER_INHERENT_BITS					\
25 	(BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) |	\
26 	 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
27 
28 static struct buffer_head *
29 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
30 		       int blkbits, unsigned long b_state)
31 
32 {
33 	unsigned long first_block;
34 	struct buffer_head *bh;
35 
36 	if (!page_has_buffers(page))
37 		create_empty_buffers(page, 1 << blkbits, b_state);
38 
39 	first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
40 	bh = nilfs_page_get_nth_block(page, block - first_block);
41 
42 	touch_buffer(bh);
43 	wait_on_buffer(bh);
44 	return bh;
45 }
46 
47 struct buffer_head *nilfs_grab_buffer(struct inode *inode,
48 				      struct address_space *mapping,
49 				      unsigned long blkoff,
50 				      unsigned long b_state)
51 {
52 	int blkbits = inode->i_blkbits;
53 	pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
54 	struct page *page;
55 	struct buffer_head *bh;
56 
57 	page = grab_cache_page(mapping, index);
58 	if (unlikely(!page))
59 		return NULL;
60 
61 	bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
62 	if (unlikely(!bh)) {
63 		unlock_page(page);
64 		put_page(page);
65 		return NULL;
66 	}
67 	return bh;
68 }
69 
70 /**
71  * nilfs_forget_buffer - discard dirty state
72  * @bh: buffer head of the buffer to be discarded
73  */
74 void nilfs_forget_buffer(struct buffer_head *bh)
75 {
76 	struct page *page = bh->b_page;
77 	const unsigned long clear_bits =
78 		(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
79 		 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
80 		 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
81 
82 	lock_buffer(bh);
83 	set_mask_bits(&bh->b_state, clear_bits, 0);
84 	if (nilfs_page_buffers_clean(page))
85 		__nilfs_clear_page_dirty(page);
86 
87 	bh->b_blocknr = -1;
88 	ClearPageUptodate(page);
89 	ClearPageMappedToDisk(page);
90 	unlock_buffer(bh);
91 	brelse(bh);
92 }
93 
94 /**
95  * nilfs_copy_buffer -- copy buffer data and flags
96  * @dbh: destination buffer
97  * @sbh: source buffer
98  */
99 void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
100 {
101 	void *kaddr0, *kaddr1;
102 	unsigned long bits;
103 	struct page *spage = sbh->b_page, *dpage = dbh->b_page;
104 	struct buffer_head *bh;
105 
106 	kaddr0 = kmap_atomic(spage);
107 	kaddr1 = kmap_atomic(dpage);
108 	memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
109 	kunmap_atomic(kaddr1);
110 	kunmap_atomic(kaddr0);
111 
112 	dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
113 	dbh->b_blocknr = sbh->b_blocknr;
114 	dbh->b_bdev = sbh->b_bdev;
115 
116 	bh = dbh;
117 	bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
118 	while ((bh = bh->b_this_page) != dbh) {
119 		lock_buffer(bh);
120 		bits &= bh->b_state;
121 		unlock_buffer(bh);
122 	}
123 	if (bits & BIT(BH_Uptodate))
124 		SetPageUptodate(dpage);
125 	else
126 		ClearPageUptodate(dpage);
127 	if (bits & BIT(BH_Mapped))
128 		SetPageMappedToDisk(dpage);
129 	else
130 		ClearPageMappedToDisk(dpage);
131 }
132 
133 /**
134  * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
135  * @page: page to be checked
136  *
137  * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
138  * Otherwise, it returns non-zero value.
139  */
140 int nilfs_page_buffers_clean(struct page *page)
141 {
142 	struct buffer_head *bh, *head;
143 
144 	bh = head = page_buffers(page);
145 	do {
146 		if (buffer_dirty(bh))
147 			return 0;
148 		bh = bh->b_this_page;
149 	} while (bh != head);
150 	return 1;
151 }
152 
153 void nilfs_page_bug(struct page *page)
154 {
155 	struct address_space *m;
156 	unsigned long ino;
157 
158 	if (unlikely(!page)) {
159 		printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
160 		return;
161 	}
162 
163 	m = page->mapping;
164 	ino = m ? m->host->i_ino : 0;
165 
166 	printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
167 	       "mapping=%p ino=%lu\n",
168 	       page, page_ref_count(page),
169 	       (unsigned long long)page->index, page->flags, m, ino);
170 
171 	if (page_has_buffers(page)) {
172 		struct buffer_head *bh, *head;
173 		int i = 0;
174 
175 		bh = head = page_buffers(page);
176 		do {
177 			printk(KERN_CRIT
178 			       " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
179 			       i++, bh, atomic_read(&bh->b_count),
180 			       (unsigned long long)bh->b_blocknr, bh->b_state);
181 			bh = bh->b_this_page;
182 		} while (bh != head);
183 	}
184 }
185 
186 /**
187  * nilfs_copy_page -- copy the page with buffers
188  * @dst: destination page
189  * @src: source page
190  * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
191  *
192  * This function is for both data pages and btnode pages.  The dirty flag
193  * should be treated by caller.  The page must not be under i/o.
194  * Both src and dst page must be locked
195  */
196 static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
197 {
198 	struct buffer_head *dbh, *dbufs, *sbh;
199 	unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
200 
201 	BUG_ON(PageWriteback(dst));
202 
203 	sbh = page_buffers(src);
204 	if (!page_has_buffers(dst))
205 		create_empty_buffers(dst, sbh->b_size, 0);
206 
207 	if (copy_dirty)
208 		mask |= BIT(BH_Dirty);
209 
210 	dbh = dbufs = page_buffers(dst);
211 	do {
212 		lock_buffer(sbh);
213 		lock_buffer(dbh);
214 		dbh->b_state = sbh->b_state & mask;
215 		dbh->b_blocknr = sbh->b_blocknr;
216 		dbh->b_bdev = sbh->b_bdev;
217 		sbh = sbh->b_this_page;
218 		dbh = dbh->b_this_page;
219 	} while (dbh != dbufs);
220 
221 	copy_highpage(dst, src);
222 
223 	if (PageUptodate(src) && !PageUptodate(dst))
224 		SetPageUptodate(dst);
225 	else if (!PageUptodate(src) && PageUptodate(dst))
226 		ClearPageUptodate(dst);
227 	if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
228 		SetPageMappedToDisk(dst);
229 	else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
230 		ClearPageMappedToDisk(dst);
231 
232 	do {
233 		unlock_buffer(sbh);
234 		unlock_buffer(dbh);
235 		sbh = sbh->b_this_page;
236 		dbh = dbh->b_this_page;
237 	} while (dbh != dbufs);
238 }
239 
240 int nilfs_copy_dirty_pages(struct address_space *dmap,
241 			   struct address_space *smap)
242 {
243 	struct folio_batch fbatch;
244 	unsigned int i;
245 	pgoff_t index = 0;
246 	int err = 0;
247 
248 	folio_batch_init(&fbatch);
249 repeat:
250 	if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1,
251 				PAGECACHE_TAG_DIRTY, &fbatch))
252 		return 0;
253 
254 	for (i = 0; i < folio_batch_count(&fbatch); i++) {
255 		struct folio *folio = fbatch.folios[i], *dfolio;
256 
257 		folio_lock(folio);
258 		if (unlikely(!folio_test_dirty(folio)))
259 			NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state");
260 
261 		dfolio = filemap_grab_folio(dmap, folio->index);
262 		if (unlikely(IS_ERR(dfolio))) {
263 			/* No empty page is added to the page cache */
264 			folio_unlock(folio);
265 			err = PTR_ERR(dfolio);
266 			break;
267 		}
268 		if (unlikely(!folio_buffers(folio)))
269 			NILFS_PAGE_BUG(&folio->page,
270 				       "found empty page in dat page cache");
271 
272 		nilfs_copy_page(&dfolio->page, &folio->page, 1);
273 		filemap_dirty_folio(folio_mapping(dfolio), dfolio);
274 
275 		folio_unlock(dfolio);
276 		folio_put(dfolio);
277 		folio_unlock(folio);
278 	}
279 	folio_batch_release(&fbatch);
280 	cond_resched();
281 
282 	if (likely(!err))
283 		goto repeat;
284 	return err;
285 }
286 
287 /**
288  * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
289  * @dmap: destination page cache
290  * @smap: source page cache
291  *
292  * No pages must be added to the cache during this process.
293  * This must be ensured by the caller.
294  */
295 void nilfs_copy_back_pages(struct address_space *dmap,
296 			   struct address_space *smap)
297 {
298 	struct folio_batch fbatch;
299 	unsigned int i, n;
300 	pgoff_t start = 0;
301 
302 	folio_batch_init(&fbatch);
303 repeat:
304 	n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
305 	if (!n)
306 		return;
307 
308 	for (i = 0; i < folio_batch_count(&fbatch); i++) {
309 		struct folio *folio = fbatch.folios[i], *dfolio;
310 		pgoff_t index = folio->index;
311 
312 		folio_lock(folio);
313 		dfolio = filemap_lock_folio(dmap, index);
314 		if (!IS_ERR(dfolio)) {
315 			/* overwrite existing folio in the destination cache */
316 			WARN_ON(folio_test_dirty(dfolio));
317 			nilfs_copy_page(&dfolio->page, &folio->page, 0);
318 			folio_unlock(dfolio);
319 			folio_put(dfolio);
320 			/* Do we not need to remove folio from smap here? */
321 		} else {
322 			struct folio *f;
323 
324 			/* move the folio to the destination cache */
325 			xa_lock_irq(&smap->i_pages);
326 			f = __xa_erase(&smap->i_pages, index);
327 			WARN_ON(folio != f);
328 			smap->nrpages--;
329 			xa_unlock_irq(&smap->i_pages);
330 
331 			xa_lock_irq(&dmap->i_pages);
332 			f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
333 			if (unlikely(f)) {
334 				/* Probably -ENOMEM */
335 				folio->mapping = NULL;
336 				folio_put(folio);
337 			} else {
338 				folio->mapping = dmap;
339 				dmap->nrpages++;
340 				if (folio_test_dirty(folio))
341 					__xa_set_mark(&dmap->i_pages, index,
342 							PAGECACHE_TAG_DIRTY);
343 			}
344 			xa_unlock_irq(&dmap->i_pages);
345 		}
346 		folio_unlock(folio);
347 	}
348 	folio_batch_release(&fbatch);
349 	cond_resched();
350 
351 	goto repeat;
352 }
353 
354 /**
355  * nilfs_clear_dirty_pages - discard dirty pages in address space
356  * @mapping: address space with dirty pages for discarding
357  * @silent: suppress [true] or print [false] warning messages
358  */
359 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
360 {
361 	struct folio_batch fbatch;
362 	unsigned int i;
363 	pgoff_t index = 0;
364 
365 	folio_batch_init(&fbatch);
366 
367 	while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
368 				PAGECACHE_TAG_DIRTY, &fbatch)) {
369 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
370 			struct folio *folio = fbatch.folios[i];
371 
372 			folio_lock(folio);
373 			nilfs_clear_dirty_page(&folio->page, silent);
374 			folio_unlock(folio);
375 		}
376 		folio_batch_release(&fbatch);
377 		cond_resched();
378 	}
379 }
380 
381 /**
382  * nilfs_clear_dirty_page - discard dirty page
383  * @page: dirty page that will be discarded
384  * @silent: suppress [true] or print [false] warning messages
385  */
386 void nilfs_clear_dirty_page(struct page *page, bool silent)
387 {
388 	struct inode *inode = page->mapping->host;
389 	struct super_block *sb = inode->i_sb;
390 
391 	BUG_ON(!PageLocked(page));
392 
393 	if (!silent)
394 		nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
395 			   page_offset(page), inode->i_ino);
396 
397 	ClearPageUptodate(page);
398 	ClearPageMappedToDisk(page);
399 
400 	if (page_has_buffers(page)) {
401 		struct buffer_head *bh, *head;
402 		const unsigned long clear_bits =
403 			(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
404 			 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
405 			 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
406 
407 		bh = head = page_buffers(page);
408 		do {
409 			lock_buffer(bh);
410 			if (!silent)
411 				nilfs_warn(sb,
412 					   "discard dirty block: blocknr=%llu, size=%zu",
413 					   (u64)bh->b_blocknr, bh->b_size);
414 
415 			set_mask_bits(&bh->b_state, clear_bits, 0);
416 			unlock_buffer(bh);
417 		} while (bh = bh->b_this_page, bh != head);
418 	}
419 
420 	__nilfs_clear_page_dirty(page);
421 }
422 
423 unsigned int nilfs_page_count_clean_buffers(struct page *page,
424 					    unsigned int from, unsigned int to)
425 {
426 	unsigned int block_start, block_end;
427 	struct buffer_head *bh, *head;
428 	unsigned int nc = 0;
429 
430 	for (bh = head = page_buffers(page), block_start = 0;
431 	     bh != head || !block_start;
432 	     block_start = block_end, bh = bh->b_this_page) {
433 		block_end = block_start + bh->b_size;
434 		if (block_end > from && block_start < to && !buffer_dirty(bh))
435 			nc++;
436 	}
437 	return nc;
438 }
439 
440 /*
441  * NILFS2 needs clear_page_dirty() in the following two cases:
442  *
443  * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
444  *    flag of pages when it copies back pages from shadow cache to the
445  *    original cache.
446  *
447  * 2) Some B-tree operations like insertion or deletion may dispose buffers
448  *    in dirty state, and this needs to cancel the dirty state of their pages.
449  */
450 int __nilfs_clear_page_dirty(struct page *page)
451 {
452 	struct address_space *mapping = page->mapping;
453 
454 	if (mapping) {
455 		xa_lock_irq(&mapping->i_pages);
456 		if (test_bit(PG_dirty, &page->flags)) {
457 			__xa_clear_mark(&mapping->i_pages, page_index(page),
458 					     PAGECACHE_TAG_DIRTY);
459 			xa_unlock_irq(&mapping->i_pages);
460 			return clear_page_dirty_for_io(page);
461 		}
462 		xa_unlock_irq(&mapping->i_pages);
463 		return 0;
464 	}
465 	return TestClearPageDirty(page);
466 }
467 
468 /**
469  * nilfs_find_uncommitted_extent - find extent of uncommitted data
470  * @inode: inode
471  * @start_blk: start block offset (in)
472  * @blkoff: start offset of the found extent (out)
473  *
474  * This function searches an extent of buffers marked "delayed" which
475  * starts from a block offset equal to or larger than @start_blk.  If
476  * such an extent was found, this will store the start offset in
477  * @blkoff and return its length in blocks.  Otherwise, zero is
478  * returned.
479  */
480 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
481 					    sector_t start_blk,
482 					    sector_t *blkoff)
483 {
484 	unsigned int i, nr_folios;
485 	pgoff_t index;
486 	unsigned long length = 0;
487 	struct folio_batch fbatch;
488 	struct folio *folio;
489 
490 	if (inode->i_mapping->nrpages == 0)
491 		return 0;
492 
493 	index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
494 
495 	folio_batch_init(&fbatch);
496 
497 repeat:
498 	nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX,
499 			&fbatch);
500 	if (nr_folios == 0)
501 		return length;
502 
503 	i = 0;
504 	do {
505 		folio = fbatch.folios[i];
506 
507 		folio_lock(folio);
508 		if (folio_buffers(folio)) {
509 			struct buffer_head *bh, *head;
510 			sector_t b;
511 
512 			b = folio->index << (PAGE_SHIFT - inode->i_blkbits);
513 			bh = head = folio_buffers(folio);
514 			do {
515 				if (b < start_blk)
516 					continue;
517 				if (buffer_delay(bh)) {
518 					if (length == 0)
519 						*blkoff = b;
520 					length++;
521 				} else if (length > 0) {
522 					goto out_locked;
523 				}
524 			} while (++b, bh = bh->b_this_page, bh != head);
525 		} else {
526 			if (length > 0)
527 				goto out_locked;
528 		}
529 		folio_unlock(folio);
530 
531 	} while (++i < nr_folios);
532 
533 	folio_batch_release(&fbatch);
534 	cond_resched();
535 	goto repeat;
536 
537 out_locked:
538 	folio_unlock(folio);
539 	folio_batch_release(&fbatch);
540 	return length;
541 }
542