xref: /linux/fs/nilfs2/page.c (revision c145211d1f9e2ef19e7b4c2b943f68366daa97af)
1 /*
2  * page.c - buffer/page management specific to NILFS
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>,
21  *            Seiji Kihara <kihara@osrg.net>.
22  */
23 
24 #include <linux/pagemap.h>
25 #include <linux/writeback.h>
26 #include <linux/swap.h>
27 #include <linux/bitops.h>
28 #include <linux/page-flags.h>
29 #include <linux/list.h>
30 #include <linux/highmem.h>
31 #include <linux/pagevec.h>
32 #include <linux/gfp.h>
33 #include "nilfs.h"
34 #include "page.h"
35 #include "mdt.h"
36 
37 
38 #define NILFS_BUFFER_INHERENT_BITS  \
39 	((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
40 	 (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated))
41 
42 static struct buffer_head *
43 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
44 		       int blkbits, unsigned long b_state)
45 
46 {
47 	unsigned long first_block;
48 	struct buffer_head *bh;
49 
50 	if (!page_has_buffers(page))
51 		create_empty_buffers(page, 1 << blkbits, b_state);
52 
53 	first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits);
54 	bh = nilfs_page_get_nth_block(page, block - first_block);
55 
56 	touch_buffer(bh);
57 	wait_on_buffer(bh);
58 	return bh;
59 }
60 
61 /*
62  * Since the page cache of B-tree node pages or data page cache of pseudo
63  * inodes does not have a valid mapping->host pointer, calling
64  * mark_buffer_dirty() for their buffers causes a NULL pointer dereference;
65  * it calls __mark_inode_dirty(NULL) through __set_page_dirty().
66  * To avoid this problem, the old style mark_buffer_dirty() is used instead.
67  */
68 void nilfs_mark_buffer_dirty(struct buffer_head *bh)
69 {
70 	if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
71 		__set_page_dirty_nobuffers(bh->b_page);
72 }
73 
74 struct buffer_head *nilfs_grab_buffer(struct inode *inode,
75 				      struct address_space *mapping,
76 				      unsigned long blkoff,
77 				      unsigned long b_state)
78 {
79 	int blkbits = inode->i_blkbits;
80 	pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits);
81 	struct page *page, *opage;
82 	struct buffer_head *bh, *obh;
83 
84 	page = grab_cache_page(mapping, index);
85 	if (unlikely(!page))
86 		return NULL;
87 
88 	bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
89 	if (unlikely(!bh)) {
90 		unlock_page(page);
91 		page_cache_release(page);
92 		return NULL;
93 	}
94 	if (!buffer_uptodate(bh) && mapping->assoc_mapping != NULL) {
95 		/*
96 		 * Shadow page cache uses assoc_mapping to point its original
97 		 * page cache.  The following code tries the original cache
98 		 * if the given cache is a shadow and it didn't hit.
99 		 */
100 		opage = find_lock_page(mapping->assoc_mapping, index);
101 		if (!opage)
102 			return bh;
103 
104 		obh = __nilfs_get_page_block(opage, blkoff, index, blkbits,
105 					     b_state);
106 		if (buffer_uptodate(obh)) {
107 			nilfs_copy_buffer(bh, obh);
108 			if (buffer_dirty(obh)) {
109 				nilfs_mark_buffer_dirty(bh);
110 				if (!buffer_nilfs_node(bh) && NILFS_MDT(inode))
111 					nilfs_mdt_mark_dirty(inode);
112 			}
113 		}
114 		brelse(obh);
115 		unlock_page(opage);
116 		page_cache_release(opage);
117 	}
118 	return bh;
119 }
120 
121 /**
122  * nilfs_forget_buffer - discard dirty state
123  * @inode: owner inode of the buffer
124  * @bh: buffer head of the buffer to be discarded
125  */
126 void nilfs_forget_buffer(struct buffer_head *bh)
127 {
128 	struct page *page = bh->b_page;
129 
130 	lock_buffer(bh);
131 	clear_buffer_nilfs_volatile(bh);
132 	clear_buffer_dirty(bh);
133 	if (nilfs_page_buffers_clean(page))
134 		__nilfs_clear_page_dirty(page);
135 
136 	clear_buffer_uptodate(bh);
137 	clear_buffer_mapped(bh);
138 	bh->b_blocknr = -1;
139 	ClearPageUptodate(page);
140 	ClearPageMappedToDisk(page);
141 	unlock_buffer(bh);
142 	brelse(bh);
143 }
144 
145 /**
146  * nilfs_copy_buffer -- copy buffer data and flags
147  * @dbh: destination buffer
148  * @sbh: source buffer
149  */
150 void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
151 {
152 	void *kaddr0, *kaddr1;
153 	unsigned long bits;
154 	struct page *spage = sbh->b_page, *dpage = dbh->b_page;
155 	struct buffer_head *bh;
156 
157 	kaddr0 = kmap_atomic(spage, KM_USER0);
158 	kaddr1 = kmap_atomic(dpage, KM_USER1);
159 	memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
160 	kunmap_atomic(kaddr1, KM_USER1);
161 	kunmap_atomic(kaddr0, KM_USER0);
162 
163 	dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
164 	dbh->b_blocknr = sbh->b_blocknr;
165 	dbh->b_bdev = sbh->b_bdev;
166 
167 	bh = dbh;
168 	bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped));
169 	while ((bh = bh->b_this_page) != dbh) {
170 		lock_buffer(bh);
171 		bits &= bh->b_state;
172 		unlock_buffer(bh);
173 	}
174 	if (bits & (1UL << BH_Uptodate))
175 		SetPageUptodate(dpage);
176 	else
177 		ClearPageUptodate(dpage);
178 	if (bits & (1UL << BH_Mapped))
179 		SetPageMappedToDisk(dpage);
180 	else
181 		ClearPageMappedToDisk(dpage);
182 }
183 
184 /**
185  * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
186  * @page: page to be checked
187  *
188  * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
189  * Otherwise, it returns non-zero value.
190  */
191 int nilfs_page_buffers_clean(struct page *page)
192 {
193 	struct buffer_head *bh, *head;
194 
195 	bh = head = page_buffers(page);
196 	do {
197 		if (buffer_dirty(bh))
198 			return 0;
199 		bh = bh->b_this_page;
200 	} while (bh != head);
201 	return 1;
202 }
203 
204 void nilfs_page_bug(struct page *page)
205 {
206 	struct address_space *m;
207 	unsigned long ino = 0;
208 
209 	if (unlikely(!page)) {
210 		printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
211 		return;
212 	}
213 
214 	m = page->mapping;
215 	if (m) {
216 		struct inode *inode = NILFS_AS_I(m);
217 		if (inode != NULL)
218 			ino = inode->i_ino;
219 	}
220 	printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
221 	       "mapping=%p ino=%lu\n",
222 	       page, atomic_read(&page->_count),
223 	       (unsigned long long)page->index, page->flags, m, ino);
224 
225 	if (page_has_buffers(page)) {
226 		struct buffer_head *bh, *head;
227 		int i = 0;
228 
229 		bh = head = page_buffers(page);
230 		do {
231 			printk(KERN_CRIT
232 			       " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
233 			       i++, bh, atomic_read(&bh->b_count),
234 			       (unsigned long long)bh->b_blocknr, bh->b_state);
235 			bh = bh->b_this_page;
236 		} while (bh != head);
237 	}
238 }
239 
240 /**
241  * nilfs_alloc_private_page - allocate a private page with buffer heads
242  *
243  * Return Value: On success, a pointer to the allocated page is returned.
244  * On error, NULL is returned.
245  */
246 struct page *nilfs_alloc_private_page(struct block_device *bdev, int size,
247 				      unsigned long state)
248 {
249 	struct buffer_head *bh, *head, *tail;
250 	struct page *page;
251 
252 	page = alloc_page(GFP_NOFS); /* page_count of the returned page is 1 */
253 	if (unlikely(!page))
254 		return NULL;
255 
256 	lock_page(page);
257 	head = alloc_page_buffers(page, size, 0);
258 	if (unlikely(!head)) {
259 		unlock_page(page);
260 		__free_page(page);
261 		return NULL;
262 	}
263 
264 	bh = head;
265 	do {
266 		bh->b_state = (1UL << BH_NILFS_Allocated) | state;
267 		tail = bh;
268 		bh->b_bdev = bdev;
269 		bh = bh->b_this_page;
270 	} while (bh);
271 
272 	tail->b_this_page = head;
273 	attach_page_buffers(page, head);
274 
275 	return page;
276 }
277 
278 void nilfs_free_private_page(struct page *page)
279 {
280 	BUG_ON(!PageLocked(page));
281 	BUG_ON(page->mapping);
282 
283 	if (page_has_buffers(page) && !try_to_free_buffers(page))
284 		NILFS_PAGE_BUG(page, "failed to free page");
285 
286 	unlock_page(page);
287 	__free_page(page);
288 }
289 
290 /**
291  * nilfs_copy_page -- copy the page with buffers
292  * @dst: destination page
293  * @src: source page
294  * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
295  *
296  * This function is for both data pages and btnode pages.  The dirty flag
297  * should be treated by caller.  The page must not be under i/o.
298  * Both src and dst page must be locked
299  */
300 static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
301 {
302 	struct buffer_head *dbh, *dbufs, *sbh, *sbufs;
303 	unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
304 
305 	BUG_ON(PageWriteback(dst));
306 
307 	sbh = sbufs = page_buffers(src);
308 	if (!page_has_buffers(dst))
309 		create_empty_buffers(dst, sbh->b_size, 0);
310 
311 	if (copy_dirty)
312 		mask |= (1UL << BH_Dirty);
313 
314 	dbh = dbufs = page_buffers(dst);
315 	do {
316 		lock_buffer(sbh);
317 		lock_buffer(dbh);
318 		dbh->b_state = sbh->b_state & mask;
319 		dbh->b_blocknr = sbh->b_blocknr;
320 		dbh->b_bdev = sbh->b_bdev;
321 		sbh = sbh->b_this_page;
322 		dbh = dbh->b_this_page;
323 	} while (dbh != dbufs);
324 
325 	copy_highpage(dst, src);
326 
327 	if (PageUptodate(src) && !PageUptodate(dst))
328 		SetPageUptodate(dst);
329 	else if (!PageUptodate(src) && PageUptodate(dst))
330 		ClearPageUptodate(dst);
331 	if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
332 		SetPageMappedToDisk(dst);
333 	else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
334 		ClearPageMappedToDisk(dst);
335 
336 	do {
337 		unlock_buffer(sbh);
338 		unlock_buffer(dbh);
339 		sbh = sbh->b_this_page;
340 		dbh = dbh->b_this_page;
341 	} while (dbh != dbufs);
342 }
343 
344 int nilfs_copy_dirty_pages(struct address_space *dmap,
345 			   struct address_space *smap)
346 {
347 	struct pagevec pvec;
348 	unsigned int i;
349 	pgoff_t index = 0;
350 	int err = 0;
351 
352 	pagevec_init(&pvec, 0);
353 repeat:
354 	if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY,
355 				PAGEVEC_SIZE))
356 		return 0;
357 
358 	for (i = 0; i < pagevec_count(&pvec); i++) {
359 		struct page *page = pvec.pages[i], *dpage;
360 
361 		lock_page(page);
362 		if (unlikely(!PageDirty(page)))
363 			NILFS_PAGE_BUG(page, "inconsistent dirty state");
364 
365 		dpage = grab_cache_page(dmap, page->index);
366 		if (unlikely(!dpage)) {
367 			/* No empty page is added to the page cache */
368 			err = -ENOMEM;
369 			unlock_page(page);
370 			break;
371 		}
372 		if (unlikely(!page_has_buffers(page)))
373 			NILFS_PAGE_BUG(page,
374 				       "found empty page in dat page cache");
375 
376 		nilfs_copy_page(dpage, page, 1);
377 		__set_page_dirty_nobuffers(dpage);
378 
379 		unlock_page(dpage);
380 		page_cache_release(dpage);
381 		unlock_page(page);
382 	}
383 	pagevec_release(&pvec);
384 	cond_resched();
385 
386 	if (likely(!err))
387 		goto repeat;
388 	return err;
389 }
390 
391 /**
392  * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
393  * @dmap: destination page cache
394  * @smap: source page cache
395  *
396  * No pages must no be added to the cache during this process.
397  * This must be ensured by the caller.
398  */
399 void nilfs_copy_back_pages(struct address_space *dmap,
400 			   struct address_space *smap)
401 {
402 	struct pagevec pvec;
403 	unsigned int i, n;
404 	pgoff_t index = 0;
405 	int err;
406 
407 	pagevec_init(&pvec, 0);
408 repeat:
409 	n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE);
410 	if (!n)
411 		return;
412 	index = pvec.pages[n - 1]->index + 1;
413 
414 	for (i = 0; i < pagevec_count(&pvec); i++) {
415 		struct page *page = pvec.pages[i], *dpage;
416 		pgoff_t offset = page->index;
417 
418 		lock_page(page);
419 		dpage = find_lock_page(dmap, offset);
420 		if (dpage) {
421 			/* override existing page on the destination cache */
422 			WARN_ON(PageDirty(dpage));
423 			nilfs_copy_page(dpage, page, 0);
424 			unlock_page(dpage);
425 			page_cache_release(dpage);
426 		} else {
427 			struct page *page2;
428 
429 			/* move the page to the destination cache */
430 			spin_lock_irq(&smap->tree_lock);
431 			page2 = radix_tree_delete(&smap->page_tree, offset);
432 			WARN_ON(page2 != page);
433 
434 			smap->nrpages--;
435 			spin_unlock_irq(&smap->tree_lock);
436 
437 			spin_lock_irq(&dmap->tree_lock);
438 			err = radix_tree_insert(&dmap->page_tree, offset, page);
439 			if (unlikely(err < 0)) {
440 				WARN_ON(err == -EEXIST);
441 				page->mapping = NULL;
442 				page_cache_release(page); /* for cache */
443 			} else {
444 				page->mapping = dmap;
445 				dmap->nrpages++;
446 				if (PageDirty(page))
447 					radix_tree_tag_set(&dmap->page_tree,
448 							   offset,
449 							   PAGECACHE_TAG_DIRTY);
450 			}
451 			spin_unlock_irq(&dmap->tree_lock);
452 		}
453 		unlock_page(page);
454 	}
455 	pagevec_release(&pvec);
456 	cond_resched();
457 
458 	goto repeat;
459 }
460 
461 void nilfs_clear_dirty_pages(struct address_space *mapping)
462 {
463 	struct pagevec pvec;
464 	unsigned int i;
465 	pgoff_t index = 0;
466 
467 	pagevec_init(&pvec, 0);
468 
469 	while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
470 				  PAGEVEC_SIZE)) {
471 		for (i = 0; i < pagevec_count(&pvec); i++) {
472 			struct page *page = pvec.pages[i];
473 			struct buffer_head *bh, *head;
474 
475 			lock_page(page);
476 			ClearPageUptodate(page);
477 			ClearPageMappedToDisk(page);
478 			bh = head = page_buffers(page);
479 			do {
480 				lock_buffer(bh);
481 				clear_buffer_dirty(bh);
482 				clear_buffer_nilfs_volatile(bh);
483 				clear_buffer_uptodate(bh);
484 				clear_buffer_mapped(bh);
485 				unlock_buffer(bh);
486 				bh = bh->b_this_page;
487 			} while (bh != head);
488 
489 			__nilfs_clear_page_dirty(page);
490 			unlock_page(page);
491 		}
492 		pagevec_release(&pvec);
493 		cond_resched();
494 	}
495 }
496 
497 unsigned nilfs_page_count_clean_buffers(struct page *page,
498 					unsigned from, unsigned to)
499 {
500 	unsigned block_start, block_end;
501 	struct buffer_head *bh, *head;
502 	unsigned nc = 0;
503 
504 	for (bh = head = page_buffers(page), block_start = 0;
505 	     bh != head || !block_start;
506 	     block_start = block_end, bh = bh->b_this_page) {
507 		block_end = block_start + bh->b_size;
508 		if (block_end > from && block_start < to && !buffer_dirty(bh))
509 			nc++;
510 	}
511 	return nc;
512 }
513 
514 /*
515  * NILFS2 needs clear_page_dirty() in the following two cases:
516  *
517  * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
518  *    page dirty flags when it copies back pages from the shadow cache
519  *    (gcdat->{i_mapping,i_btnode_cache}) to its original cache
520  *    (dat->{i_mapping,i_btnode_cache}).
521  *
522  * 2) Some B-tree operations like insertion or deletion may dispose buffers
523  *    in dirty state, and this needs to cancel the dirty state of their pages.
524  */
525 int __nilfs_clear_page_dirty(struct page *page)
526 {
527 	struct address_space *mapping = page->mapping;
528 
529 	if (mapping) {
530 		spin_lock_irq(&mapping->tree_lock);
531 		if (test_bit(PG_dirty, &page->flags)) {
532 			radix_tree_tag_clear(&mapping->page_tree,
533 					     page_index(page),
534 					     PAGECACHE_TAG_DIRTY);
535 			spin_unlock_irq(&mapping->tree_lock);
536 			return clear_page_dirty_for_io(page);
537 		}
538 		spin_unlock_irq(&mapping->tree_lock);
539 		return 0;
540 	}
541 	return TestClearPageDirty(page);
542 }
543