1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS B-tree node cache 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Originally written by Seiji Kihara. 8 * Fully revised by Ryusuke Konishi for stabilization and simplification. 9 * 10 */ 11 12 #include <linux/types.h> 13 #include <linux/buffer_head.h> 14 #include <linux/mm.h> 15 #include <linux/backing-dev.h> 16 #include <linux/gfp.h> 17 #include "nilfs.h" 18 #include "mdt.h" 19 #include "dat.h" 20 #include "page.h" 21 #include "btnode.h" 22 23 24 /** 25 * nilfs_init_btnc_inode - initialize B-tree node cache inode 26 * @btnc_inode: inode to be initialized 27 * 28 * nilfs_init_btnc_inode() sets up an inode for B-tree node cache. 29 */ 30 void nilfs_init_btnc_inode(struct inode *btnc_inode) 31 { 32 struct nilfs_inode_info *ii = NILFS_I(btnc_inode); 33 34 btnc_inode->i_mode = S_IFREG; 35 ii->i_flags = 0; 36 memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap)); 37 mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS); 38 } 39 40 void nilfs_btnode_cache_clear(struct address_space *btnc) 41 { 42 invalidate_mapping_pages(btnc, 0, -1); 43 truncate_inode_pages(btnc, 0); 44 } 45 46 struct buffer_head * 47 nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) 48 { 49 struct inode *inode = btnc->host; 50 struct buffer_head *bh; 51 52 bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); 53 if (unlikely(!bh)) 54 return ERR_PTR(-ENOMEM); 55 56 if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) || 57 buffer_dirty(bh))) { 58 /* 59 * The block buffer at the specified new address was already 60 * in use. This can happen if it is a virtual block number 61 * and has been reallocated due to corruption of the bitmap 62 * used to manage its allocation state (if not, the buffer 63 * clearing of an abandoned b-tree node is missing somewhere). 64 */ 65 nilfs_error(inode->i_sb, 66 "state inconsistency probably due to duplicate use of b-tree node block address %llu (ino=%lu)", 67 (unsigned long long)blocknr, inode->i_ino); 68 goto failed; 69 } 70 memset(bh->b_data, 0, i_blocksize(inode)); 71 bh->b_blocknr = blocknr; 72 set_buffer_mapped(bh); 73 set_buffer_uptodate(bh); 74 75 folio_unlock(bh->b_folio); 76 folio_put(bh->b_folio); 77 return bh; 78 79 failed: 80 folio_unlock(bh->b_folio); 81 folio_put(bh->b_folio); 82 brelse(bh); 83 return ERR_PTR(-EIO); 84 } 85 86 int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, 87 sector_t pblocknr, blk_opf_t opf, 88 struct buffer_head **pbh, sector_t *submit_ptr) 89 { 90 struct buffer_head *bh; 91 struct inode *inode = btnc->host; 92 struct folio *folio; 93 int err; 94 95 bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); 96 if (unlikely(!bh)) 97 return -ENOMEM; 98 99 err = -EEXIST; /* internal code */ 100 folio = bh->b_folio; 101 102 if (buffer_uptodate(bh) || buffer_dirty(bh)) 103 goto found; 104 105 if (pblocknr == 0) { 106 pblocknr = blocknr; 107 if (inode->i_ino != NILFS_DAT_INO) { 108 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 109 110 /* blocknr is a virtual block number */ 111 err = nilfs_dat_translate(nilfs->ns_dat, blocknr, 112 &pblocknr); 113 if (unlikely(err)) { 114 brelse(bh); 115 goto out_locked; 116 } 117 } 118 } 119 120 if (opf & REQ_RAHEAD) { 121 if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) { 122 err = -EBUSY; /* internal code */ 123 brelse(bh); 124 goto out_locked; 125 } 126 } else { /* opf == REQ_OP_READ */ 127 lock_buffer(bh); 128 } 129 if (buffer_uptodate(bh)) { 130 unlock_buffer(bh); 131 err = -EEXIST; /* internal code */ 132 goto found; 133 } 134 set_buffer_mapped(bh); 135 bh->b_blocknr = pblocknr; /* set block address for read */ 136 bh->b_end_io = end_buffer_read_sync; 137 get_bh(bh); 138 submit_bh(opf, bh); 139 bh->b_blocknr = blocknr; /* set back to the given block address */ 140 *submit_ptr = pblocknr; 141 err = 0; 142 found: 143 *pbh = bh; 144 145 out_locked: 146 folio_unlock(folio); 147 folio_put(folio); 148 return err; 149 } 150 151 /** 152 * nilfs_btnode_delete - delete B-tree node buffer 153 * @bh: buffer to be deleted 154 * 155 * nilfs_btnode_delete() invalidates the specified buffer and delete the page 156 * including the buffer if the page gets unbusy. 157 */ 158 void nilfs_btnode_delete(struct buffer_head *bh) 159 { 160 struct address_space *mapping; 161 struct folio *folio = bh->b_folio; 162 pgoff_t index = folio->index; 163 int still_dirty; 164 165 folio_get(folio); 166 folio_lock(folio); 167 folio_wait_writeback(folio); 168 169 nilfs_forget_buffer(bh); 170 still_dirty = folio_test_dirty(folio); 171 mapping = folio->mapping; 172 folio_unlock(folio); 173 folio_put(folio); 174 175 if (!still_dirty && mapping) 176 invalidate_inode_pages2_range(mapping, index, index); 177 } 178 179 /** 180 * nilfs_btnode_prepare_change_key - prepare to change the search key of a 181 * b-tree node block 182 * @btnc: page cache in which the b-tree node block is buffered 183 * @ctxt: structure for exchanging context information for key change 184 * 185 * nilfs_btnode_prepare_change_key() prepares to move the contents of the 186 * b-tree node block of the old key given in the "oldkey" member of @ctxt to 187 * the position of the new key given in the "newkey" member of @ctxt in the 188 * page cache @btnc. Here, the key of the block is an index in units of 189 * blocks, and if the page and block sizes match, it matches the page index 190 * in the page cache. 191 * 192 * If the page size and block size match, this function attempts to move the 193 * entire folio, and in preparation for this, inserts the original folio into 194 * the new index of the cache. If this insertion fails or if the page size 195 * and block size are different, it falls back to a copy preparation using 196 * nilfs_btnode_create_block(), inserts a new block at the position 197 * corresponding to "newkey", and stores the buffer head pointer in the 198 * "newbh" member of @ctxt. 199 * 200 * Note that the current implementation does not support folio sizes larger 201 * than the page size. 202 * 203 * Return: 0 on success, or the following negative error code on failure. 204 * * %-EIO - I/O error (metadata corruption). 205 * * %-ENOMEM - Insufficient memory available. 206 */ 207 int nilfs_btnode_prepare_change_key(struct address_space *btnc, 208 struct nilfs_btnode_chkey_ctxt *ctxt) 209 { 210 struct buffer_head *obh, *nbh; 211 struct inode *inode = btnc->host; 212 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; 213 int err; 214 215 if (oldkey == newkey) 216 return 0; 217 218 obh = ctxt->bh; 219 ctxt->newbh = NULL; 220 221 if (inode->i_blkbits == PAGE_SHIFT) { 222 struct folio *ofolio = obh->b_folio; 223 folio_lock(ofolio); 224 retry: 225 /* BUG_ON(oldkey != obh->b_folio->index); */ 226 if (unlikely(oldkey != ofolio->index)) 227 NILFS_FOLIO_BUG(ofolio, 228 "invalid oldkey %lld (newkey=%lld)", 229 (unsigned long long)oldkey, 230 (unsigned long long)newkey); 231 232 xa_lock_irq(&btnc->i_pages); 233 err = __xa_insert(&btnc->i_pages, newkey, ofolio, GFP_NOFS); 234 xa_unlock_irq(&btnc->i_pages); 235 /* 236 * Note: folio->index will not change to newkey until 237 * nilfs_btnode_commit_change_key() will be called. 238 * To protect the folio in intermediate state, the folio lock 239 * is held. 240 */ 241 if (!err) 242 return 0; 243 else if (err != -EBUSY) 244 goto failed_unlock; 245 246 err = invalidate_inode_pages2_range(btnc, newkey, newkey); 247 if (!err) 248 goto retry; 249 /* fallback to copy mode */ 250 folio_unlock(ofolio); 251 } 252 253 nbh = nilfs_btnode_create_block(btnc, newkey); 254 if (IS_ERR(nbh)) 255 return PTR_ERR(nbh); 256 257 BUG_ON(nbh == obh); 258 ctxt->newbh = nbh; 259 return 0; 260 261 failed_unlock: 262 folio_unlock(obh->b_folio); 263 return err; 264 } 265 266 /** 267 * nilfs_btnode_commit_change_key - commit the change of the search key of 268 * a b-tree node block 269 * @btnc: page cache in which the b-tree node block is buffered 270 * @ctxt: structure for exchanging context information for key change 271 * 272 * nilfs_btnode_commit_change_key() executes the key change based on the 273 * context @ctxt prepared by nilfs_btnode_prepare_change_key(). If no valid 274 * block buffer is prepared in "newbh" of @ctxt (i.e., a full folio move), 275 * this function removes the folio from the old index and completes the move. 276 * Otherwise, it copies the block data and inherited flag states of "oldbh" 277 * to "newbh" and clears the "oldbh" from the cache. In either case, the 278 * relocated buffer is marked as dirty. 279 * 280 * As with nilfs_btnode_prepare_change_key(), the current implementation does 281 * not support folio sizes larger than the page size. 282 */ 283 void nilfs_btnode_commit_change_key(struct address_space *btnc, 284 struct nilfs_btnode_chkey_ctxt *ctxt) 285 { 286 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; 287 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; 288 struct folio *ofolio; 289 290 if (oldkey == newkey) 291 return; 292 293 if (nbh == NULL) { /* blocksize == pagesize */ 294 ofolio = obh->b_folio; 295 if (unlikely(oldkey != ofolio->index)) 296 NILFS_FOLIO_BUG(ofolio, 297 "invalid oldkey %lld (newkey=%lld)", 298 (unsigned long long)oldkey, 299 (unsigned long long)newkey); 300 mark_buffer_dirty(obh); 301 302 xa_lock_irq(&btnc->i_pages); 303 __xa_erase(&btnc->i_pages, oldkey); 304 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY); 305 xa_unlock_irq(&btnc->i_pages); 306 307 ofolio->index = obh->b_blocknr = newkey; 308 folio_unlock(ofolio); 309 } else { 310 nilfs_copy_buffer(nbh, obh); 311 mark_buffer_dirty(nbh); 312 313 nbh->b_blocknr = newkey; 314 ctxt->bh = nbh; 315 nilfs_btnode_delete(obh); /* will decrement bh->b_count */ 316 } 317 } 318 319 /** 320 * nilfs_btnode_abort_change_key - abort the change of the search key of a 321 * b-tree node block 322 * @btnc: page cache in which the b-tree node block is buffered 323 * @ctxt: structure for exchanging context information for key change 324 * 325 * nilfs_btnode_abort_change_key() cancels the key change associated with the 326 * context @ctxt prepared via nilfs_btnode_prepare_change_key() and performs 327 * any necessary cleanup. If no valid block buffer is prepared in "newbh" of 328 * @ctxt, this function removes the folio from the destination index and aborts 329 * the move. Otherwise, it clears "newbh" from the cache. 330 * 331 * As with nilfs_btnode_prepare_change_key(), the current implementation does 332 * not support folio sizes larger than the page size. 333 */ 334 void nilfs_btnode_abort_change_key(struct address_space *btnc, 335 struct nilfs_btnode_chkey_ctxt *ctxt) 336 { 337 struct buffer_head *nbh = ctxt->newbh; 338 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; 339 340 if (oldkey == newkey) 341 return; 342 343 if (nbh == NULL) { /* blocksize == pagesize */ 344 xa_erase_irq(&btnc->i_pages, newkey); 345 folio_unlock(ctxt->bh->b_folio); 346 } else { 347 /* 348 * When canceling a buffer that a prepare operation has 349 * allocated to copy a node block to another location, use 350 * nilfs_btnode_delete() to initialize and release the buffer 351 * so that the buffer flags will not be in an inconsistent 352 * state when it is reallocated. 353 */ 354 nilfs_btnode_delete(nbh); 355 } 356 } 357