1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * NILFS B-tree node cache
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Originally written by Seiji Kihara.
8 * Fully revised by Ryusuke Konishi for stabilization and simplification.
9 *
10 */
11
12 #include <linux/types.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mm.h>
15 #include <linux/backing-dev.h>
16 #include <linux/gfp.h>
17 #include "nilfs.h"
18 #include "mdt.h"
19 #include "dat.h"
20 #include "page.h"
21 #include "btnode.h"
22
23
24 /**
25 * nilfs_init_btnc_inode - initialize B-tree node cache inode
26 * @btnc_inode: inode to be initialized
27 *
28 * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
29 */
nilfs_init_btnc_inode(struct inode * btnc_inode)30 void nilfs_init_btnc_inode(struct inode *btnc_inode)
31 {
32 struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
33
34 btnc_inode->i_mode = S_IFREG;
35 ii->i_flags = 0;
36 memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
37 mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
38 btnc_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
39 }
40
nilfs_btnode_cache_clear(struct address_space * btnc)41 void nilfs_btnode_cache_clear(struct address_space *btnc)
42 {
43 invalidate_mapping_pages(btnc, 0, -1);
44 truncate_inode_pages(btnc, 0);
45 }
46
47 struct buffer_head *
nilfs_btnode_create_block(struct address_space * btnc,__u64 blocknr)48 nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
49 {
50 struct inode *inode = btnc->host;
51 struct buffer_head *bh;
52
53 bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
54 if (unlikely(!bh))
55 return ERR_PTR(-ENOMEM);
56
57 if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
58 buffer_dirty(bh))) {
59 /*
60 * The block buffer at the specified new address was already
61 * in use. This can happen if it is a virtual block number
62 * and has been reallocated due to corruption of the bitmap
63 * used to manage its allocation state (if not, the buffer
64 * clearing of an abandoned b-tree node is missing somewhere).
65 */
66 nilfs_error(inode->i_sb,
67 "state inconsistency probably due to duplicate use of b-tree node block address %llu (ino=%lu)",
68 (unsigned long long)blocknr, inode->i_ino);
69 goto failed;
70 }
71 memset(bh->b_data, 0, i_blocksize(inode));
72 bh->b_blocknr = blocknr;
73 set_buffer_mapped(bh);
74 set_buffer_uptodate(bh);
75
76 folio_unlock(bh->b_folio);
77 folio_put(bh->b_folio);
78 return bh;
79
80 failed:
81 folio_unlock(bh->b_folio);
82 folio_put(bh->b_folio);
83 brelse(bh);
84 return ERR_PTR(-EIO);
85 }
86
nilfs_btnode_submit_block(struct address_space * btnc,__u64 blocknr,sector_t pblocknr,blk_opf_t opf,struct buffer_head ** pbh,sector_t * submit_ptr)87 int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
88 sector_t pblocknr, blk_opf_t opf,
89 struct buffer_head **pbh, sector_t *submit_ptr)
90 {
91 struct buffer_head *bh;
92 struct inode *inode = btnc->host;
93 struct folio *folio;
94 int err;
95
96 bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
97 if (unlikely(!bh))
98 return -ENOMEM;
99
100 err = -EEXIST; /* internal code */
101 folio = bh->b_folio;
102
103 if (buffer_uptodate(bh) || buffer_dirty(bh))
104 goto found;
105
106 if (pblocknr == 0) {
107 pblocknr = blocknr;
108 if (inode->i_ino != NILFS_DAT_INO) {
109 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
110
111 /* blocknr is a virtual block number */
112 err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
113 &pblocknr);
114 if (unlikely(err)) {
115 brelse(bh);
116 goto out_locked;
117 }
118 }
119 }
120
121 if (opf & REQ_RAHEAD) {
122 if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
123 err = -EBUSY; /* internal code */
124 brelse(bh);
125 goto out_locked;
126 }
127 } else { /* opf == REQ_OP_READ */
128 lock_buffer(bh);
129 }
130 if (buffer_uptodate(bh)) {
131 unlock_buffer(bh);
132 err = -EEXIST; /* internal code */
133 goto found;
134 }
135 set_buffer_mapped(bh);
136 bh->b_blocknr = pblocknr; /* set block address for read */
137 bh->b_end_io = end_buffer_read_sync;
138 get_bh(bh);
139 submit_bh(opf, bh);
140 bh->b_blocknr = blocknr; /* set back to the given block address */
141 *submit_ptr = pblocknr;
142 err = 0;
143 found:
144 *pbh = bh;
145
146 out_locked:
147 folio_unlock(folio);
148 folio_put(folio);
149 return err;
150 }
151
152 /**
153 * nilfs_btnode_delete - delete B-tree node buffer
154 * @bh: buffer to be deleted
155 *
156 * nilfs_btnode_delete() invalidates the specified buffer and delete the page
157 * including the buffer if the page gets unbusy.
158 */
nilfs_btnode_delete(struct buffer_head * bh)159 void nilfs_btnode_delete(struct buffer_head *bh)
160 {
161 struct address_space *mapping;
162 struct folio *folio = bh->b_folio;
163 pgoff_t index = folio->index;
164 int still_dirty;
165
166 folio_get(folio);
167 folio_lock(folio);
168 folio_wait_writeback(folio);
169
170 nilfs_forget_buffer(bh);
171 still_dirty = folio_test_dirty(folio);
172 mapping = folio->mapping;
173 folio_unlock(folio);
174 folio_put(folio);
175
176 if (!still_dirty && mapping)
177 invalidate_inode_pages2_range(mapping, index, index);
178 }
179
180 /**
181 * nilfs_btnode_prepare_change_key - prepare to change the search key of a
182 * b-tree node block
183 * @btnc: page cache in which the b-tree node block is buffered
184 * @ctxt: structure for exchanging context information for key change
185 *
186 * nilfs_btnode_prepare_change_key() prepares to move the contents of the
187 * b-tree node block of the old key given in the "oldkey" member of @ctxt to
188 * the position of the new key given in the "newkey" member of @ctxt in the
189 * page cache @btnc. Here, the key of the block is an index in units of
190 * blocks, and if the page and block sizes match, it matches the page index
191 * in the page cache.
192 *
193 * If the page size and block size match, this function attempts to move the
194 * entire folio, and in preparation for this, inserts the original folio into
195 * the new index of the cache. If this insertion fails or if the page size
196 * and block size are different, it falls back to a copy preparation using
197 * nilfs_btnode_create_block(), inserts a new block at the position
198 * corresponding to "newkey", and stores the buffer head pointer in the
199 * "newbh" member of @ctxt.
200 *
201 * Note that the current implementation does not support folio sizes larger
202 * than the page size.
203 *
204 * Return: 0 on success, or one of the following negative error codes on
205 * failure:
206 * * %-EIO - I/O error (metadata corruption).
207 * * %-ENOMEM - Insufficient memory available.
208 */
nilfs_btnode_prepare_change_key(struct address_space * btnc,struct nilfs_btnode_chkey_ctxt * ctxt)209 int nilfs_btnode_prepare_change_key(struct address_space *btnc,
210 struct nilfs_btnode_chkey_ctxt *ctxt)
211 {
212 struct buffer_head *obh, *nbh;
213 struct inode *inode = btnc->host;
214 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
215 int err;
216
217 if (oldkey == newkey)
218 return 0;
219
220 obh = ctxt->bh;
221 ctxt->newbh = NULL;
222
223 if (inode->i_blkbits == PAGE_SHIFT) {
224 struct folio *ofolio = obh->b_folio;
225 folio_lock(ofolio);
226 retry:
227 /* BUG_ON(oldkey != obh->b_folio->index); */
228 if (unlikely(oldkey != ofolio->index))
229 NILFS_FOLIO_BUG(ofolio,
230 "invalid oldkey %lld (newkey=%lld)",
231 (unsigned long long)oldkey,
232 (unsigned long long)newkey);
233
234 xa_lock_irq(&btnc->i_pages);
235 err = __xa_insert(&btnc->i_pages, newkey, ofolio, GFP_NOFS);
236 xa_unlock_irq(&btnc->i_pages);
237 /*
238 * Note: folio->index will not change to newkey until
239 * nilfs_btnode_commit_change_key() will be called.
240 * To protect the folio in intermediate state, the folio lock
241 * is held.
242 */
243 if (!err)
244 return 0;
245 else if (err != -EBUSY)
246 goto failed_unlock;
247
248 err = invalidate_inode_pages2_range(btnc, newkey, newkey);
249 if (!err)
250 goto retry;
251 /* fallback to copy mode */
252 folio_unlock(ofolio);
253 }
254
255 nbh = nilfs_btnode_create_block(btnc, newkey);
256 if (IS_ERR(nbh))
257 return PTR_ERR(nbh);
258
259 BUG_ON(nbh == obh);
260 ctxt->newbh = nbh;
261 return 0;
262
263 failed_unlock:
264 folio_unlock(obh->b_folio);
265 return err;
266 }
267
268 /**
269 * nilfs_btnode_commit_change_key - commit the change of the search key of
270 * a b-tree node block
271 * @btnc: page cache in which the b-tree node block is buffered
272 * @ctxt: structure for exchanging context information for key change
273 *
274 * nilfs_btnode_commit_change_key() executes the key change based on the
275 * context @ctxt prepared by nilfs_btnode_prepare_change_key(). If no valid
276 * block buffer is prepared in "newbh" of @ctxt (i.e., a full folio move),
277 * this function removes the folio from the old index and completes the move.
278 * Otherwise, it copies the block data and inherited flag states of "oldbh"
279 * to "newbh" and clears the "oldbh" from the cache. In either case, the
280 * relocated buffer is marked as dirty.
281 *
282 * As with nilfs_btnode_prepare_change_key(), the current implementation does
283 * not support folio sizes larger than the page size.
284 */
nilfs_btnode_commit_change_key(struct address_space * btnc,struct nilfs_btnode_chkey_ctxt * ctxt)285 void nilfs_btnode_commit_change_key(struct address_space *btnc,
286 struct nilfs_btnode_chkey_ctxt *ctxt)
287 {
288 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
289 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
290 struct folio *ofolio;
291
292 if (oldkey == newkey)
293 return;
294
295 if (nbh == NULL) { /* blocksize == pagesize */
296 ofolio = obh->b_folio;
297 if (unlikely(oldkey != ofolio->index))
298 NILFS_FOLIO_BUG(ofolio,
299 "invalid oldkey %lld (newkey=%lld)",
300 (unsigned long long)oldkey,
301 (unsigned long long)newkey);
302 mark_buffer_dirty(obh);
303
304 xa_lock_irq(&btnc->i_pages);
305 __xa_erase(&btnc->i_pages, oldkey);
306 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY);
307 xa_unlock_irq(&btnc->i_pages);
308
309 ofolio->index = obh->b_blocknr = newkey;
310 folio_unlock(ofolio);
311 } else {
312 nilfs_copy_buffer(nbh, obh);
313 mark_buffer_dirty(nbh);
314
315 nbh->b_blocknr = newkey;
316 ctxt->bh = nbh;
317 nilfs_btnode_delete(obh); /* will decrement bh->b_count */
318 }
319 }
320
321 /**
322 * nilfs_btnode_abort_change_key - abort the change of the search key of a
323 * b-tree node block
324 * @btnc: page cache in which the b-tree node block is buffered
325 * @ctxt: structure for exchanging context information for key change
326 *
327 * nilfs_btnode_abort_change_key() cancels the key change associated with the
328 * context @ctxt prepared via nilfs_btnode_prepare_change_key() and performs
329 * any necessary cleanup. If no valid block buffer is prepared in "newbh" of
330 * @ctxt, this function removes the folio from the destination index and aborts
331 * the move. Otherwise, it clears "newbh" from the cache.
332 *
333 * As with nilfs_btnode_prepare_change_key(), the current implementation does
334 * not support folio sizes larger than the page size.
335 */
nilfs_btnode_abort_change_key(struct address_space * btnc,struct nilfs_btnode_chkey_ctxt * ctxt)336 void nilfs_btnode_abort_change_key(struct address_space *btnc,
337 struct nilfs_btnode_chkey_ctxt *ctxt)
338 {
339 struct buffer_head *nbh = ctxt->newbh;
340 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
341
342 if (oldkey == newkey)
343 return;
344
345 if (nbh == NULL) { /* blocksize == pagesize */
346 xa_erase_irq(&btnc->i_pages, newkey);
347 folio_unlock(ctxt->bh->b_folio);
348 } else {
349 /*
350 * When canceling a buffer that a prepare operation has
351 * allocated to copy a node block to another location, use
352 * nilfs_btnode_delete() to initialize and release the buffer
353 * so that the buffer flags will not be in an inconsistent
354 * state when it is reallocated.
355 */
356 nilfs_btnode_delete(nbh);
357 }
358 }
359