xref: /linux/fs/nilfs2/btnode.c (revision 913df4453f85f1fe79b35ecf3c9a0c0b707d22a2)
1 /*
2  * btnode.c - NILFS B-tree node cache
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * This file was originally written by Seiji Kihara <kihara@osrg.net>
21  * and fully revised by Ryusuke Konishi <ryusuke@osrg.net> for
22  * stabilization and simplification.
23  *
24  */
25 
26 #include <linux/types.h>
27 #include <linux/buffer_head.h>
28 #include <linux/mm.h>
29 #include <linux/backing-dev.h>
30 #include "nilfs.h"
31 #include "mdt.h"
32 #include "dat.h"
33 #include "page.h"
34 #include "btnode.h"
35 
36 
37 void nilfs_btnode_cache_init_once(struct address_space *btnc)
38 {
39 	memset(btnc, 0, sizeof(*btnc));
40 	INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC);
41 	spin_lock_init(&btnc->tree_lock);
42 	INIT_LIST_HEAD(&btnc->private_list);
43 	spin_lock_init(&btnc->private_lock);
44 
45 	spin_lock_init(&btnc->i_mmap_lock);
46 	INIT_RAW_PRIO_TREE_ROOT(&btnc->i_mmap);
47 	INIT_LIST_HEAD(&btnc->i_mmap_nonlinear);
48 }
49 
50 static const struct address_space_operations def_btnode_aops = {
51 	.sync_page		= block_sync_page,
52 };
53 
54 void nilfs_btnode_cache_init(struct address_space *btnc,
55 			     struct backing_dev_info *bdi)
56 {
57 	btnc->host = NULL;  /* can safely set to host inode ? */
58 	btnc->flags = 0;
59 	mapping_set_gfp_mask(btnc, GFP_NOFS);
60 	btnc->assoc_mapping = NULL;
61 	btnc->backing_dev_info = bdi;
62 	btnc->a_ops = &def_btnode_aops;
63 }
64 
65 void nilfs_btnode_cache_clear(struct address_space *btnc)
66 {
67 	invalidate_mapping_pages(btnc, 0, -1);
68 	truncate_inode_pages(btnc, 0);
69 }
70 
71 int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
72 			      sector_t pblocknr, struct buffer_head **pbh,
73 			      int newblk)
74 {
75 	struct buffer_head *bh;
76 	struct inode *inode = NILFS_BTNC_I(btnc);
77 	int err;
78 
79 	bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
80 	if (unlikely(!bh))
81 		return -ENOMEM;
82 
83 	err = -EEXIST; /* internal code */
84 	if (newblk) {
85 		if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
86 			     buffer_dirty(bh))) {
87 			brelse(bh);
88 			BUG();
89 		}
90 		bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
91 		bh->b_blocknr = blocknr;
92 		set_buffer_mapped(bh);
93 		set_buffer_uptodate(bh);
94 		goto found;
95 	}
96 
97 	if (buffer_uptodate(bh) || buffer_dirty(bh))
98 		goto found;
99 
100 	if (pblocknr == 0) {
101 		pblocknr = blocknr;
102 		if (inode->i_ino != NILFS_DAT_INO) {
103 			struct inode *dat =
104 				nilfs_dat_inode(NILFS_I_NILFS(inode));
105 
106 			/* blocknr is a virtual block number */
107 			err = nilfs_dat_translate(dat, blocknr, &pblocknr);
108 			if (unlikely(err)) {
109 				brelse(bh);
110 				goto out_locked;
111 			}
112 		}
113 	}
114 	lock_buffer(bh);
115 	if (buffer_uptodate(bh)) {
116 		unlock_buffer(bh);
117 		err = -EEXIST; /* internal code */
118 		goto found;
119 	}
120 	set_buffer_mapped(bh);
121 	bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
122 	bh->b_blocknr = pblocknr; /* set block address for read */
123 	bh->b_end_io = end_buffer_read_sync;
124 	get_bh(bh);
125 	submit_bh(READ, bh);
126 	bh->b_blocknr = blocknr; /* set back to the given block address */
127 	err = 0;
128 found:
129 	*pbh = bh;
130 
131 out_locked:
132 	unlock_page(bh->b_page);
133 	page_cache_release(bh->b_page);
134 	return err;
135 }
136 
137 int nilfs_btnode_get(struct address_space *btnc, __u64 blocknr,
138 		     sector_t pblocknr, struct buffer_head **pbh, int newblk)
139 {
140 	struct buffer_head *bh;
141 	int err;
142 
143 	err = nilfs_btnode_submit_block(btnc, blocknr, pblocknr, pbh, newblk);
144 	if (err == -EEXIST) /* internal code (cache hit) */
145 		return 0;
146 	if (unlikely(err))
147 		return err;
148 
149 	bh = *pbh;
150 	wait_on_buffer(bh);
151 	if (!buffer_uptodate(bh)) {
152 		brelse(bh);
153 		return -EIO;
154 	}
155 	return 0;
156 }
157 
158 /**
159  * nilfs_btnode_delete - delete B-tree node buffer
160  * @bh: buffer to be deleted
161  *
162  * nilfs_btnode_delete() invalidates the specified buffer and delete the page
163  * including the buffer if the page gets unbusy.
164  */
165 void nilfs_btnode_delete(struct buffer_head *bh)
166 {
167 	struct address_space *mapping;
168 	struct page *page = bh->b_page;
169 	pgoff_t index = page_index(page);
170 	int still_dirty;
171 
172 	page_cache_get(page);
173 	lock_page(page);
174 	wait_on_page_writeback(page);
175 
176 	nilfs_forget_buffer(bh);
177 	still_dirty = PageDirty(page);
178 	mapping = page->mapping;
179 	unlock_page(page);
180 	page_cache_release(page);
181 
182 	if (!still_dirty && mapping)
183 		invalidate_inode_pages2_range(mapping, index, index);
184 }
185 
186 /**
187  * nilfs_btnode_prepare_change_key
188  *  prepare to move contents of the block for old key to one of new key.
189  *  the old buffer will not be removed, but might be reused for new buffer.
190  *  it might return -ENOMEM because of memory allocation errors,
191  *  and might return -EIO because of disk read errors.
192  */
193 int nilfs_btnode_prepare_change_key(struct address_space *btnc,
194 				    struct nilfs_btnode_chkey_ctxt *ctxt)
195 {
196 	struct buffer_head *obh, *nbh;
197 	struct inode *inode = NILFS_BTNC_I(btnc);
198 	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
199 	int err;
200 
201 	if (oldkey == newkey)
202 		return 0;
203 
204 	obh = ctxt->bh;
205 	ctxt->newbh = NULL;
206 
207 	if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
208 		lock_page(obh->b_page);
209 		/*
210 		 * We cannot call radix_tree_preload for the kernels older
211 		 * than 2.6.23, because it is not exported for modules.
212 		 */
213 retry:
214 		err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
215 		if (err)
216 			goto failed_unlock;
217 		/* BUG_ON(oldkey != obh->b_page->index); */
218 		if (unlikely(oldkey != obh->b_page->index))
219 			NILFS_PAGE_BUG(obh->b_page,
220 				       "invalid oldkey %lld (newkey=%lld)",
221 				       (unsigned long long)oldkey,
222 				       (unsigned long long)newkey);
223 
224 		spin_lock_irq(&btnc->tree_lock);
225 		err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
226 		spin_unlock_irq(&btnc->tree_lock);
227 		/*
228 		 * Note: page->index will not change to newkey until
229 		 * nilfs_btnode_commit_change_key() will be called.
230 		 * To protect the page in intermediate state, the page lock
231 		 * is held.
232 		 */
233 		radix_tree_preload_end();
234 		if (!err)
235 			return 0;
236 		else if (err != -EEXIST)
237 			goto failed_unlock;
238 
239 		err = invalidate_inode_pages2_range(btnc, newkey, newkey);
240 		if (!err)
241 			goto retry;
242 		/* fallback to copy mode */
243 		unlock_page(obh->b_page);
244 	}
245 
246 	err = nilfs_btnode_get(btnc, newkey, 0, &nbh, 1);
247 	if (likely(!err)) {
248 		BUG_ON(nbh == obh);
249 		ctxt->newbh = nbh;
250 	}
251 	return err;
252 
253  failed_unlock:
254 	unlock_page(obh->b_page);
255 	return err;
256 }
257 
258 /**
259  * nilfs_btnode_commit_change_key
260  *  commit the change_key operation prepared by prepare_change_key().
261  */
262 void nilfs_btnode_commit_change_key(struct address_space *btnc,
263 				    struct nilfs_btnode_chkey_ctxt *ctxt)
264 {
265 	struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
266 	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
267 	struct page *opage;
268 
269 	if (oldkey == newkey)
270 		return;
271 
272 	if (nbh == NULL) {	/* blocksize == pagesize */
273 		opage = obh->b_page;
274 		if (unlikely(oldkey != opage->index))
275 			NILFS_PAGE_BUG(opage,
276 				       "invalid oldkey %lld (newkey=%lld)",
277 				       (unsigned long long)oldkey,
278 				       (unsigned long long)newkey);
279 		if (!test_set_buffer_dirty(obh) && TestSetPageDirty(opage))
280 			BUG();
281 
282 		spin_lock_irq(&btnc->tree_lock);
283 		radix_tree_delete(&btnc->page_tree, oldkey);
284 		radix_tree_tag_set(&btnc->page_tree, newkey,
285 				   PAGECACHE_TAG_DIRTY);
286 		spin_unlock_irq(&btnc->tree_lock);
287 
288 		opage->index = obh->b_blocknr = newkey;
289 		unlock_page(opage);
290 	} else {
291 		nilfs_copy_buffer(nbh, obh);
292 		nilfs_btnode_mark_dirty(nbh);
293 
294 		nbh->b_blocknr = newkey;
295 		ctxt->bh = nbh;
296 		nilfs_btnode_delete(obh); /* will decrement bh->b_count */
297 	}
298 }
299 
300 /**
301  * nilfs_btnode_abort_change_key
302  *  abort the change_key operation prepared by prepare_change_key().
303  */
304 void nilfs_btnode_abort_change_key(struct address_space *btnc,
305 				   struct nilfs_btnode_chkey_ctxt *ctxt)
306 {
307 	struct buffer_head *nbh = ctxt->newbh;
308 	__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
309 
310 	if (oldkey == newkey)
311 		return;
312 
313 	if (nbh == NULL) {	/* blocksize == pagesize */
314 		spin_lock_irq(&btnc->tree_lock);
315 		radix_tree_delete(&btnc->page_tree, newkey);
316 		spin_unlock_irq(&btnc->tree_lock);
317 		unlock_page(ctxt->bh->b_page);
318 	} else
319 		brelse(nbh);
320 }
321