1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/hfs/bnode.c 4 * 5 * Copyright (C) 2001 6 * Brad Boyer (flar@allandria.com) 7 * (C) 2003 Ardis Technologies <roman@ardistech.com> 8 * 9 * Handle basic btree node operations 10 */ 11 12 #include <linux/pagemap.h> 13 #include <linux/slab.h> 14 #include <linux/swap.h> 15 16 #include "btree.h" 17 18 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) 19 { 20 struct page *page; 21 int pagenum; 22 int bytes_read; 23 int bytes_to_read; 24 25 off += node->page_offset; 26 pagenum = off >> PAGE_SHIFT; 27 off &= ~PAGE_MASK; /* compute page offset for the first page */ 28 29 for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) { 30 if (pagenum >= node->tree->pages_per_bnode) 31 break; 32 page = node->page[pagenum]; 33 bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off); 34 35 memcpy_from_page(buf + bytes_read, page, off, bytes_to_read); 36 37 pagenum++; 38 off = 0; /* page offset only applies to the first page */ 39 } 40 } 41 42 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) 43 { 44 __be16 data; 45 // optimize later... 46 hfs_bnode_read(node, &data, off, 2); 47 return be16_to_cpu(data); 48 } 49 50 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) 51 { 52 u8 data; 53 // optimize later... 54 hfs_bnode_read(node, &data, off, 1); 55 return data; 56 } 57 58 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) 59 { 60 struct hfs_btree *tree; 61 int key_len; 62 63 tree = node->tree; 64 if (node->type == HFS_NODE_LEAF || 65 tree->attributes & HFS_TREE_VARIDXKEYS) 66 key_len = hfs_bnode_read_u8(node, off) + 1; 67 else 68 key_len = tree->max_key_len + 1; 69 70 if (key_len > sizeof(hfs_btree_key) || key_len < 1) { 71 memset(key, 0, sizeof(hfs_btree_key)); 72 pr_err("hfs: Invalid key length: %d\n", key_len); 73 return; 74 } 75 76 hfs_bnode_read(node, key, off, key_len); 77 } 78 79 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) 80 { 81 struct page *page; 82 83 off += node->page_offset; 84 page = node->page[0]; 85 86 memcpy_to_page(page, off, buf, len); 87 set_page_dirty(page); 88 } 89 90 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) 91 { 92 __be16 v = cpu_to_be16(data); 93 // optimize later... 94 hfs_bnode_write(node, &v, off, 2); 95 } 96 97 void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data) 98 { 99 // optimize later... 100 hfs_bnode_write(node, &data, off, 1); 101 } 102 103 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) 104 { 105 struct page *page; 106 107 off += node->page_offset; 108 page = node->page[0]; 109 110 memzero_page(page, off, len); 111 set_page_dirty(page); 112 } 113 114 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, 115 struct hfs_bnode *src_node, int src, int len) 116 { 117 struct page *src_page, *dst_page; 118 119 hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); 120 if (!len) 121 return; 122 src += src_node->page_offset; 123 dst += dst_node->page_offset; 124 src_page = src_node->page[0]; 125 dst_page = dst_node->page[0]; 126 127 memcpy_page(dst_page, dst, src_page, src, len); 128 set_page_dirty(dst_page); 129 } 130 131 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) 132 { 133 struct page *page; 134 void *ptr; 135 136 hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); 137 if (!len) 138 return; 139 src += node->page_offset; 140 dst += node->page_offset; 141 page = node->page[0]; 142 ptr = kmap_local_page(page); 143 memmove(ptr + dst, ptr + src, len); 144 kunmap_local(ptr); 145 set_page_dirty(page); 146 } 147 148 void hfs_bnode_dump(struct hfs_bnode *node) 149 { 150 struct hfs_bnode_desc desc; 151 __be32 cnid; 152 int i, off, key_off; 153 154 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this); 155 hfs_bnode_read(node, &desc, 0, sizeof(desc)); 156 hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n", 157 be32_to_cpu(desc.next), be32_to_cpu(desc.prev), 158 desc.type, desc.height, be16_to_cpu(desc.num_recs)); 159 160 off = node->tree->node_size - 2; 161 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { 162 key_off = hfs_bnode_read_u16(node, off); 163 hfs_dbg_cont(BNODE_MOD, " %d", key_off); 164 if (i && node->type == HFS_NODE_INDEX) { 165 int tmp; 166 167 if (node->tree->attributes & HFS_TREE_VARIDXKEYS) 168 tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1; 169 else 170 tmp = node->tree->max_key_len + 1; 171 hfs_dbg_cont(BNODE_MOD, " (%d,%d", 172 tmp, hfs_bnode_read_u8(node, key_off)); 173 hfs_bnode_read(node, &cnid, key_off + tmp, 4); 174 hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid)); 175 } else if (i && node->type == HFS_NODE_LEAF) { 176 int tmp; 177 178 tmp = hfs_bnode_read_u8(node, key_off); 179 hfs_dbg_cont(BNODE_MOD, " (%d)", tmp); 180 } 181 } 182 hfs_dbg_cont(BNODE_MOD, "\n"); 183 } 184 185 void hfs_bnode_unlink(struct hfs_bnode *node) 186 { 187 struct hfs_btree *tree; 188 struct hfs_bnode *tmp; 189 __be32 cnid; 190 191 tree = node->tree; 192 if (node->prev) { 193 tmp = hfs_bnode_find(tree, node->prev); 194 if (IS_ERR(tmp)) 195 return; 196 tmp->next = node->next; 197 cnid = cpu_to_be32(tmp->next); 198 hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4); 199 hfs_bnode_put(tmp); 200 } else if (node->type == HFS_NODE_LEAF) 201 tree->leaf_head = node->next; 202 203 if (node->next) { 204 tmp = hfs_bnode_find(tree, node->next); 205 if (IS_ERR(tmp)) 206 return; 207 tmp->prev = node->prev; 208 cnid = cpu_to_be32(tmp->prev); 209 hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4); 210 hfs_bnode_put(tmp); 211 } else if (node->type == HFS_NODE_LEAF) 212 tree->leaf_tail = node->prev; 213 214 // move down? 215 if (!node->prev && !node->next) { 216 printk(KERN_DEBUG "hfs_btree_del_level\n"); 217 } 218 if (!node->parent) { 219 tree->root = 0; 220 tree->depth = 0; 221 } 222 set_bit(HFS_BNODE_DELETED, &node->flags); 223 } 224 225 static inline int hfs_bnode_hash(u32 num) 226 { 227 num = (num >> 16) + num; 228 num += num >> 8; 229 return num & (NODE_HASH_SIZE - 1); 230 } 231 232 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) 233 { 234 struct hfs_bnode *node; 235 236 if (cnid >= tree->node_count) { 237 pr_err("request for non-existent node %d in B*Tree\n", cnid); 238 return NULL; 239 } 240 241 for (node = tree->node_hash[hfs_bnode_hash(cnid)]; 242 node; node = node->next_hash) { 243 if (node->this == cnid) { 244 return node; 245 } 246 } 247 return NULL; 248 } 249 250 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) 251 { 252 struct hfs_bnode *node, *node2; 253 struct address_space *mapping; 254 struct page *page; 255 int size, block, i, hash; 256 loff_t off; 257 258 if (cnid >= tree->node_count) { 259 pr_err("request for non-existent node %d in B*Tree\n", cnid); 260 return NULL; 261 } 262 263 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * 264 sizeof(struct page *); 265 node = kzalloc(size, GFP_KERNEL); 266 if (!node) 267 return NULL; 268 node->tree = tree; 269 node->this = cnid; 270 set_bit(HFS_BNODE_NEW, &node->flags); 271 atomic_set(&node->refcnt, 1); 272 hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n", 273 node->tree->cnid, node->this); 274 init_waitqueue_head(&node->lock_wq); 275 spin_lock(&tree->hash_lock); 276 node2 = hfs_bnode_findhash(tree, cnid); 277 if (!node2) { 278 hash = hfs_bnode_hash(cnid); 279 node->next_hash = tree->node_hash[hash]; 280 tree->node_hash[hash] = node; 281 tree->node_hash_cnt++; 282 } else { 283 hfs_bnode_get(node2); 284 spin_unlock(&tree->hash_lock); 285 kfree(node); 286 wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); 287 return node2; 288 } 289 spin_unlock(&tree->hash_lock); 290 291 mapping = tree->inode->i_mapping; 292 off = (loff_t)cnid * tree->node_size; 293 block = off >> PAGE_SHIFT; 294 node->page_offset = off & ~PAGE_MASK; 295 for (i = 0; i < tree->pages_per_bnode; i++) { 296 page = read_mapping_page(mapping, block++, NULL); 297 if (IS_ERR(page)) 298 goto fail; 299 node->page[i] = page; 300 } 301 302 return node; 303 fail: 304 set_bit(HFS_BNODE_ERROR, &node->flags); 305 return node; 306 } 307 308 void hfs_bnode_unhash(struct hfs_bnode *node) 309 { 310 struct hfs_bnode **p; 311 312 hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n", 313 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 314 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; 315 *p && *p != node; p = &(*p)->next_hash) 316 ; 317 BUG_ON(!*p); 318 *p = node->next_hash; 319 node->tree->node_hash_cnt--; 320 } 321 322 /* Load a particular node out of a tree */ 323 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) 324 { 325 struct hfs_bnode *node; 326 struct hfs_bnode_desc *desc; 327 int i, rec_off, off, next_off; 328 int entry_size, key_size; 329 330 spin_lock(&tree->hash_lock); 331 node = hfs_bnode_findhash(tree, num); 332 if (node) { 333 hfs_bnode_get(node); 334 spin_unlock(&tree->hash_lock); 335 wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); 336 if (test_bit(HFS_BNODE_ERROR, &node->flags)) 337 goto node_error; 338 return node; 339 } 340 spin_unlock(&tree->hash_lock); 341 node = __hfs_bnode_create(tree, num); 342 if (!node) 343 return ERR_PTR(-ENOMEM); 344 if (test_bit(HFS_BNODE_ERROR, &node->flags)) 345 goto node_error; 346 if (!test_bit(HFS_BNODE_NEW, &node->flags)) 347 return node; 348 349 desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) + 350 node->page_offset); 351 node->prev = be32_to_cpu(desc->prev); 352 node->next = be32_to_cpu(desc->next); 353 node->num_recs = be16_to_cpu(desc->num_recs); 354 node->type = desc->type; 355 node->height = desc->height; 356 kunmap_local(desc); 357 358 switch (node->type) { 359 case HFS_NODE_HEADER: 360 case HFS_NODE_MAP: 361 if (node->height != 0) 362 goto node_error; 363 break; 364 case HFS_NODE_LEAF: 365 if (node->height != 1) 366 goto node_error; 367 break; 368 case HFS_NODE_INDEX: 369 if (node->height <= 1 || node->height > tree->depth) 370 goto node_error; 371 break; 372 default: 373 goto node_error; 374 } 375 376 rec_off = tree->node_size - 2; 377 off = hfs_bnode_read_u16(node, rec_off); 378 if (off != sizeof(struct hfs_bnode_desc)) 379 goto node_error; 380 for (i = 1; i <= node->num_recs; off = next_off, i++) { 381 rec_off -= 2; 382 next_off = hfs_bnode_read_u16(node, rec_off); 383 if (next_off <= off || 384 next_off > tree->node_size || 385 next_off & 1) 386 goto node_error; 387 entry_size = next_off - off; 388 if (node->type != HFS_NODE_INDEX && 389 node->type != HFS_NODE_LEAF) 390 continue; 391 key_size = hfs_bnode_read_u8(node, off) + 1; 392 if (key_size >= entry_size /*|| key_size & 1*/) 393 goto node_error; 394 } 395 clear_bit(HFS_BNODE_NEW, &node->flags); 396 wake_up(&node->lock_wq); 397 return node; 398 399 node_error: 400 set_bit(HFS_BNODE_ERROR, &node->flags); 401 clear_bit(HFS_BNODE_NEW, &node->flags); 402 wake_up(&node->lock_wq); 403 hfs_bnode_put(node); 404 return ERR_PTR(-EIO); 405 } 406 407 void hfs_bnode_free(struct hfs_bnode *node) 408 { 409 int i; 410 411 for (i = 0; i < node->tree->pages_per_bnode; i++) 412 if (node->page[i]) 413 put_page(node->page[i]); 414 kfree(node); 415 } 416 417 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) 418 { 419 struct hfs_bnode *node; 420 struct page **pagep; 421 int i; 422 423 spin_lock(&tree->hash_lock); 424 node = hfs_bnode_findhash(tree, num); 425 spin_unlock(&tree->hash_lock); 426 if (node) { 427 pr_crit("new node %u already hashed?\n", num); 428 WARN_ON(1); 429 return node; 430 } 431 node = __hfs_bnode_create(tree, num); 432 if (!node) 433 return ERR_PTR(-ENOMEM); 434 if (test_bit(HFS_BNODE_ERROR, &node->flags)) { 435 hfs_bnode_put(node); 436 return ERR_PTR(-EIO); 437 } 438 439 pagep = node->page; 440 memzero_page(*pagep, node->page_offset, 441 min((int)PAGE_SIZE, (int)tree->node_size)); 442 set_page_dirty(*pagep); 443 for (i = 1; i < tree->pages_per_bnode; i++) { 444 memzero_page(*++pagep, 0, PAGE_SIZE); 445 set_page_dirty(*pagep); 446 } 447 clear_bit(HFS_BNODE_NEW, &node->flags); 448 wake_up(&node->lock_wq); 449 450 return node; 451 } 452 453 void hfs_bnode_get(struct hfs_bnode *node) 454 { 455 if (node) { 456 atomic_inc(&node->refcnt); 457 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n", 458 node->tree->cnid, node->this, 459 atomic_read(&node->refcnt)); 460 } 461 } 462 463 /* Dispose of resources used by a node */ 464 void hfs_bnode_put(struct hfs_bnode *node) 465 { 466 if (node) { 467 struct hfs_btree *tree = node->tree; 468 int i; 469 470 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n", 471 node->tree->cnid, node->this, 472 atomic_read(&node->refcnt)); 473 BUG_ON(!atomic_read(&node->refcnt)); 474 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) 475 return; 476 for (i = 0; i < tree->pages_per_bnode; i++) { 477 if (!node->page[i]) 478 continue; 479 mark_page_accessed(node->page[i]); 480 } 481 482 if (test_bit(HFS_BNODE_DELETED, &node->flags)) { 483 hfs_bnode_unhash(node); 484 spin_unlock(&tree->hash_lock); 485 hfs_bmap_free(node); 486 hfs_bnode_free(node); 487 return; 488 } 489 spin_unlock(&tree->hash_lock); 490 } 491 } 492