1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/hfsplus/bnode.c 4 * 5 * Copyright (C) 2001 6 * Brad Boyer (flar@allandria.com) 7 * (C) 2003 Ardis Technologies <roman@ardistech.com> 8 * 9 * Handle basic btree node operations 10 */ 11 12 #include <linux/string.h> 13 #include <linux/slab.h> 14 #include <linux/pagemap.h> 15 #include <linux/fs.h> 16 #include <linux/swap.h> 17 18 #include "hfsplus_fs.h" 19 #include "hfsplus_raw.h" 20 21 static inline 22 bool is_bnode_offset_valid(struct hfs_bnode *node, int off) 23 { 24 bool is_valid = off < node->tree->node_size; 25 26 if (!is_valid) { 27 pr_err("requested invalid offset: " 28 "NODE: id %u, type %#x, height %u, " 29 "node_size %u, offset %d\n", 30 node->this, node->type, node->height, 31 node->tree->node_size, off); 32 } 33 34 return is_valid; 35 } 36 37 static inline 38 int check_and_correct_requested_length(struct hfs_bnode *node, int off, int len) 39 { 40 unsigned int node_size; 41 42 if (!is_bnode_offset_valid(node, off)) 43 return 0; 44 45 node_size = node->tree->node_size; 46 47 if ((off + len) > node_size) { 48 int new_len = (int)node_size - off; 49 50 pr_err("requested length has been corrected: " 51 "NODE: id %u, type %#x, height %u, " 52 "node_size %u, offset %d, " 53 "requested_len %d, corrected_len %d\n", 54 node->this, node->type, node->height, 55 node->tree->node_size, off, len, new_len); 56 57 return new_len; 58 } 59 60 return len; 61 } 62 63 /* Copy a specified range of bytes from the raw data of a node */ 64 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) 65 { 66 struct page **pagep; 67 int l; 68 69 if (!is_bnode_offset_valid(node, off)) 70 return; 71 72 if (len == 0) { 73 pr_err("requested zero length: " 74 "NODE: id %u, type %#x, height %u, " 75 "node_size %u, offset %d, len %d\n", 76 node->this, node->type, node->height, 77 node->tree->node_size, off, len); 78 return; 79 } 80 81 len = check_and_correct_requested_length(node, off, len); 82 83 off += node->page_offset; 84 pagep = node->page + (off >> PAGE_SHIFT); 85 off &= ~PAGE_MASK; 86 87 l = min_t(int, len, PAGE_SIZE - off); 88 memcpy_from_page(buf, *pagep, off, l); 89 90 while ((len -= l) != 0) { 91 buf += l; 92 l = min_t(int, len, PAGE_SIZE); 93 memcpy_from_page(buf, *++pagep, 0, l); 94 } 95 } 96 97 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) 98 { 99 __be16 data; 100 /* TODO: optimize later... */ 101 hfs_bnode_read(node, &data, off, 2); 102 return be16_to_cpu(data); 103 } 104 105 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) 106 { 107 u8 data; 108 /* TODO: optimize later... */ 109 hfs_bnode_read(node, &data, off, 1); 110 return data; 111 } 112 113 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) 114 { 115 struct hfs_btree *tree; 116 int key_len; 117 118 tree = node->tree; 119 if (node->type == HFS_NODE_LEAF || 120 tree->attributes & HFS_TREE_VARIDXKEYS || 121 node->tree->cnid == HFSPLUS_ATTR_CNID) 122 key_len = hfs_bnode_read_u16(node, off) + 2; 123 else 124 key_len = tree->max_key_len + 2; 125 126 if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) { 127 memset(key, 0, sizeof(hfsplus_btree_key)); 128 pr_err("hfsplus: Invalid key length: %d\n", key_len); 129 return; 130 } 131 132 hfs_bnode_read(node, key, off, key_len); 133 } 134 135 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) 136 { 137 struct page **pagep; 138 int l; 139 140 if (!is_bnode_offset_valid(node, off)) 141 return; 142 143 if (len == 0) { 144 pr_err("requested zero length: " 145 "NODE: id %u, type %#x, height %u, " 146 "node_size %u, offset %d, len %d\n", 147 node->this, node->type, node->height, 148 node->tree->node_size, off, len); 149 return; 150 } 151 152 len = check_and_correct_requested_length(node, off, len); 153 154 off += node->page_offset; 155 pagep = node->page + (off >> PAGE_SHIFT); 156 off &= ~PAGE_MASK; 157 158 l = min_t(int, len, PAGE_SIZE - off); 159 memcpy_to_page(*pagep, off, buf, l); 160 set_page_dirty(*pagep); 161 162 while ((len -= l) != 0) { 163 buf += l; 164 l = min_t(int, len, PAGE_SIZE); 165 memcpy_to_page(*++pagep, 0, buf, l); 166 set_page_dirty(*pagep); 167 } 168 } 169 170 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) 171 { 172 __be16 v = cpu_to_be16(data); 173 /* TODO: optimize later... */ 174 hfs_bnode_write(node, &v, off, 2); 175 } 176 177 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) 178 { 179 struct page **pagep; 180 int l; 181 182 if (!is_bnode_offset_valid(node, off)) 183 return; 184 185 if (len == 0) { 186 pr_err("requested zero length: " 187 "NODE: id %u, type %#x, height %u, " 188 "node_size %u, offset %d, len %d\n", 189 node->this, node->type, node->height, 190 node->tree->node_size, off, len); 191 return; 192 } 193 194 len = check_and_correct_requested_length(node, off, len); 195 196 off += node->page_offset; 197 pagep = node->page + (off >> PAGE_SHIFT); 198 off &= ~PAGE_MASK; 199 200 l = min_t(int, len, PAGE_SIZE - off); 201 memzero_page(*pagep, off, l); 202 set_page_dirty(*pagep); 203 204 while ((len -= l) != 0) { 205 l = min_t(int, len, PAGE_SIZE); 206 memzero_page(*++pagep, 0, l); 207 set_page_dirty(*pagep); 208 } 209 } 210 211 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, 212 struct hfs_bnode *src_node, int src, int len) 213 { 214 struct page **src_page, **dst_page; 215 int l; 216 217 hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); 218 if (!len) 219 return; 220 221 len = check_and_correct_requested_length(src_node, src, len); 222 len = check_and_correct_requested_length(dst_node, dst, len); 223 224 src += src_node->page_offset; 225 dst += dst_node->page_offset; 226 src_page = src_node->page + (src >> PAGE_SHIFT); 227 src &= ~PAGE_MASK; 228 dst_page = dst_node->page + (dst >> PAGE_SHIFT); 229 dst &= ~PAGE_MASK; 230 231 if (src == dst) { 232 l = min_t(int, len, PAGE_SIZE - src); 233 memcpy_page(*dst_page, src, *src_page, src, l); 234 set_page_dirty(*dst_page); 235 236 while ((len -= l) != 0) { 237 l = min_t(int, len, PAGE_SIZE); 238 memcpy_page(*++dst_page, 0, *++src_page, 0, l); 239 set_page_dirty(*dst_page); 240 } 241 } else { 242 void *src_ptr, *dst_ptr; 243 244 do { 245 dst_ptr = kmap_local_page(*dst_page) + dst; 246 src_ptr = kmap_local_page(*src_page) + src; 247 if (PAGE_SIZE - src < PAGE_SIZE - dst) { 248 l = PAGE_SIZE - src; 249 src = 0; 250 dst += l; 251 } else { 252 l = PAGE_SIZE - dst; 253 src += l; 254 dst = 0; 255 } 256 l = min(len, l); 257 memcpy(dst_ptr, src_ptr, l); 258 kunmap_local(src_ptr); 259 set_page_dirty(*dst_page); 260 kunmap_local(dst_ptr); 261 if (!dst) 262 dst_page++; 263 else 264 src_page++; 265 } while ((len -= l)); 266 } 267 } 268 269 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) 270 { 271 struct page **src_page, **dst_page; 272 void *src_ptr, *dst_ptr; 273 int l; 274 275 hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); 276 if (!len) 277 return; 278 279 len = check_and_correct_requested_length(node, src, len); 280 len = check_and_correct_requested_length(node, dst, len); 281 282 src += node->page_offset; 283 dst += node->page_offset; 284 if (dst > src) { 285 src += len - 1; 286 src_page = node->page + (src >> PAGE_SHIFT); 287 src = (src & ~PAGE_MASK) + 1; 288 dst += len - 1; 289 dst_page = node->page + (dst >> PAGE_SHIFT); 290 dst = (dst & ~PAGE_MASK) + 1; 291 292 if (src == dst) { 293 while (src < len) { 294 dst_ptr = kmap_local_page(*dst_page); 295 src_ptr = kmap_local_page(*src_page); 296 memmove(dst_ptr, src_ptr, src); 297 kunmap_local(src_ptr); 298 set_page_dirty(*dst_page); 299 kunmap_local(dst_ptr); 300 len -= src; 301 src = PAGE_SIZE; 302 src_page--; 303 dst_page--; 304 } 305 src -= len; 306 dst_ptr = kmap_local_page(*dst_page); 307 src_ptr = kmap_local_page(*src_page); 308 memmove(dst_ptr + src, src_ptr + src, len); 309 kunmap_local(src_ptr); 310 set_page_dirty(*dst_page); 311 kunmap_local(dst_ptr); 312 } else { 313 do { 314 dst_ptr = kmap_local_page(*dst_page) + dst; 315 src_ptr = kmap_local_page(*src_page) + src; 316 if (src < dst) { 317 l = src; 318 src = PAGE_SIZE; 319 dst -= l; 320 } else { 321 l = dst; 322 src -= l; 323 dst = PAGE_SIZE; 324 } 325 l = min(len, l); 326 memmove(dst_ptr - l, src_ptr - l, l); 327 kunmap_local(src_ptr); 328 set_page_dirty(*dst_page); 329 kunmap_local(dst_ptr); 330 if (dst == PAGE_SIZE) 331 dst_page--; 332 else 333 src_page--; 334 } while ((len -= l)); 335 } 336 } else { 337 src_page = node->page + (src >> PAGE_SHIFT); 338 src &= ~PAGE_MASK; 339 dst_page = node->page + (dst >> PAGE_SHIFT); 340 dst &= ~PAGE_MASK; 341 342 if (src == dst) { 343 l = min_t(int, len, PAGE_SIZE - src); 344 345 dst_ptr = kmap_local_page(*dst_page) + src; 346 src_ptr = kmap_local_page(*src_page) + src; 347 memmove(dst_ptr, src_ptr, l); 348 kunmap_local(src_ptr); 349 set_page_dirty(*dst_page); 350 kunmap_local(dst_ptr); 351 352 while ((len -= l) != 0) { 353 l = min_t(int, len, PAGE_SIZE); 354 dst_ptr = kmap_local_page(*++dst_page); 355 src_ptr = kmap_local_page(*++src_page); 356 memmove(dst_ptr, src_ptr, l); 357 kunmap_local(src_ptr); 358 set_page_dirty(*dst_page); 359 kunmap_local(dst_ptr); 360 } 361 } else { 362 do { 363 dst_ptr = kmap_local_page(*dst_page) + dst; 364 src_ptr = kmap_local_page(*src_page) + src; 365 if (PAGE_SIZE - src < 366 PAGE_SIZE - dst) { 367 l = PAGE_SIZE - src; 368 src = 0; 369 dst += l; 370 } else { 371 l = PAGE_SIZE - dst; 372 src += l; 373 dst = 0; 374 } 375 l = min(len, l); 376 memmove(dst_ptr, src_ptr, l); 377 kunmap_local(src_ptr); 378 set_page_dirty(*dst_page); 379 kunmap_local(dst_ptr); 380 if (!dst) 381 dst_page++; 382 else 383 src_page++; 384 } while ((len -= l)); 385 } 386 } 387 } 388 389 void hfs_bnode_dump(struct hfs_bnode *node) 390 { 391 struct hfs_bnode_desc desc; 392 __be32 cnid; 393 int i, off, key_off; 394 395 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this); 396 hfs_bnode_read(node, &desc, 0, sizeof(desc)); 397 hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n", 398 be32_to_cpu(desc.next), be32_to_cpu(desc.prev), 399 desc.type, desc.height, be16_to_cpu(desc.num_recs)); 400 401 off = node->tree->node_size - 2; 402 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { 403 key_off = hfs_bnode_read_u16(node, off); 404 hfs_dbg(BNODE_MOD, " %d", key_off); 405 if (i && node->type == HFS_NODE_INDEX) { 406 int tmp; 407 408 if (node->tree->attributes & HFS_TREE_VARIDXKEYS || 409 node->tree->cnid == HFSPLUS_ATTR_CNID) 410 tmp = hfs_bnode_read_u16(node, key_off) + 2; 411 else 412 tmp = node->tree->max_key_len + 2; 413 hfs_dbg_cont(BNODE_MOD, " (%d", tmp); 414 hfs_bnode_read(node, &cnid, key_off + tmp, 4); 415 hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid)); 416 } else if (i && node->type == HFS_NODE_LEAF) { 417 int tmp; 418 419 tmp = hfs_bnode_read_u16(node, key_off); 420 hfs_dbg_cont(BNODE_MOD, " (%d)", tmp); 421 } 422 } 423 hfs_dbg_cont(BNODE_MOD, "\n"); 424 } 425 426 void hfs_bnode_unlink(struct hfs_bnode *node) 427 { 428 struct hfs_btree *tree; 429 struct hfs_bnode *tmp; 430 __be32 cnid; 431 432 tree = node->tree; 433 if (node->prev) { 434 tmp = hfs_bnode_find(tree, node->prev); 435 if (IS_ERR(tmp)) 436 return; 437 tmp->next = node->next; 438 cnid = cpu_to_be32(tmp->next); 439 hfs_bnode_write(tmp, &cnid, 440 offsetof(struct hfs_bnode_desc, next), 4); 441 hfs_bnode_put(tmp); 442 } else if (node->type == HFS_NODE_LEAF) 443 tree->leaf_head = node->next; 444 445 if (node->next) { 446 tmp = hfs_bnode_find(tree, node->next); 447 if (IS_ERR(tmp)) 448 return; 449 tmp->prev = node->prev; 450 cnid = cpu_to_be32(tmp->prev); 451 hfs_bnode_write(tmp, &cnid, 452 offsetof(struct hfs_bnode_desc, prev), 4); 453 hfs_bnode_put(tmp); 454 } else if (node->type == HFS_NODE_LEAF) 455 tree->leaf_tail = node->prev; 456 457 /* move down? */ 458 if (!node->prev && !node->next) 459 hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n"); 460 if (!node->parent) { 461 tree->root = 0; 462 tree->depth = 0; 463 } 464 set_bit(HFS_BNODE_DELETED, &node->flags); 465 } 466 467 static inline int hfs_bnode_hash(u32 num) 468 { 469 num = (num >> 16) + num; 470 num += num >> 8; 471 return num & (NODE_HASH_SIZE - 1); 472 } 473 474 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) 475 { 476 struct hfs_bnode *node; 477 478 if (cnid >= tree->node_count) { 479 pr_err("request for non-existent node %d in B*Tree\n", 480 cnid); 481 return NULL; 482 } 483 484 for (node = tree->node_hash[hfs_bnode_hash(cnid)]; 485 node; node = node->next_hash) 486 if (node->this == cnid) 487 return node; 488 return NULL; 489 } 490 491 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) 492 { 493 struct hfs_bnode *node, *node2; 494 struct address_space *mapping; 495 struct page *page; 496 int size, block, i, hash; 497 loff_t off; 498 499 if (cnid >= tree->node_count) { 500 pr_err("request for non-existent node %d in B*Tree\n", 501 cnid); 502 return NULL; 503 } 504 505 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * 506 sizeof(struct page *); 507 node = kzalloc(size, GFP_KERNEL); 508 if (!node) 509 return NULL; 510 node->tree = tree; 511 node->this = cnid; 512 set_bit(HFS_BNODE_NEW, &node->flags); 513 atomic_set(&node->refcnt, 1); 514 hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n", 515 node->tree->cnid, node->this); 516 init_waitqueue_head(&node->lock_wq); 517 spin_lock(&tree->hash_lock); 518 node2 = hfs_bnode_findhash(tree, cnid); 519 if (!node2) { 520 hash = hfs_bnode_hash(cnid); 521 node->next_hash = tree->node_hash[hash]; 522 tree->node_hash[hash] = node; 523 tree->node_hash_cnt++; 524 } else { 525 spin_unlock(&tree->hash_lock); 526 kfree(node); 527 wait_event(node2->lock_wq, 528 !test_bit(HFS_BNODE_NEW, &node2->flags)); 529 return node2; 530 } 531 spin_unlock(&tree->hash_lock); 532 533 mapping = tree->inode->i_mapping; 534 off = (loff_t)cnid << tree->node_size_shift; 535 block = off >> PAGE_SHIFT; 536 node->page_offset = off & ~PAGE_MASK; 537 for (i = 0; i < tree->pages_per_bnode; block++, i++) { 538 page = read_mapping_page(mapping, block, NULL); 539 if (IS_ERR(page)) 540 goto fail; 541 node->page[i] = page; 542 } 543 544 return node; 545 fail: 546 set_bit(HFS_BNODE_ERROR, &node->flags); 547 return node; 548 } 549 550 void hfs_bnode_unhash(struct hfs_bnode *node) 551 { 552 struct hfs_bnode **p; 553 554 hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n", 555 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 556 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; 557 *p && *p != node; p = &(*p)->next_hash) 558 ; 559 BUG_ON(!*p); 560 *p = node->next_hash; 561 node->tree->node_hash_cnt--; 562 } 563 564 /* Load a particular node out of a tree */ 565 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) 566 { 567 struct hfs_bnode *node; 568 struct hfs_bnode_desc *desc; 569 int i, rec_off, off, next_off; 570 int entry_size, key_size; 571 572 spin_lock(&tree->hash_lock); 573 node = hfs_bnode_findhash(tree, num); 574 if (node) { 575 hfs_bnode_get(node); 576 spin_unlock(&tree->hash_lock); 577 wait_event(node->lock_wq, 578 !test_bit(HFS_BNODE_NEW, &node->flags)); 579 if (test_bit(HFS_BNODE_ERROR, &node->flags)) 580 goto node_error; 581 return node; 582 } 583 spin_unlock(&tree->hash_lock); 584 node = __hfs_bnode_create(tree, num); 585 if (!node) 586 return ERR_PTR(-ENOMEM); 587 if (test_bit(HFS_BNODE_ERROR, &node->flags)) 588 goto node_error; 589 if (!test_bit(HFS_BNODE_NEW, &node->flags)) 590 return node; 591 592 desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) + 593 node->page_offset); 594 node->prev = be32_to_cpu(desc->prev); 595 node->next = be32_to_cpu(desc->next); 596 node->num_recs = be16_to_cpu(desc->num_recs); 597 node->type = desc->type; 598 node->height = desc->height; 599 kunmap_local(desc); 600 601 switch (node->type) { 602 case HFS_NODE_HEADER: 603 case HFS_NODE_MAP: 604 if (node->height != 0) 605 goto node_error; 606 break; 607 case HFS_NODE_LEAF: 608 if (node->height != 1) 609 goto node_error; 610 break; 611 case HFS_NODE_INDEX: 612 if (node->height <= 1 || node->height > tree->depth) 613 goto node_error; 614 break; 615 default: 616 goto node_error; 617 } 618 619 rec_off = tree->node_size - 2; 620 off = hfs_bnode_read_u16(node, rec_off); 621 if (off != sizeof(struct hfs_bnode_desc)) 622 goto node_error; 623 for (i = 1; i <= node->num_recs; off = next_off, i++) { 624 rec_off -= 2; 625 next_off = hfs_bnode_read_u16(node, rec_off); 626 if (next_off <= off || 627 next_off > tree->node_size || 628 next_off & 1) 629 goto node_error; 630 entry_size = next_off - off; 631 if (node->type != HFS_NODE_INDEX && 632 node->type != HFS_NODE_LEAF) 633 continue; 634 key_size = hfs_bnode_read_u16(node, off) + 2; 635 if (key_size >= entry_size || key_size & 1) 636 goto node_error; 637 } 638 clear_bit(HFS_BNODE_NEW, &node->flags); 639 wake_up(&node->lock_wq); 640 return node; 641 642 node_error: 643 set_bit(HFS_BNODE_ERROR, &node->flags); 644 clear_bit(HFS_BNODE_NEW, &node->flags); 645 wake_up(&node->lock_wq); 646 hfs_bnode_put(node); 647 return ERR_PTR(-EIO); 648 } 649 650 void hfs_bnode_free(struct hfs_bnode *node) 651 { 652 int i; 653 654 for (i = 0; i < node->tree->pages_per_bnode; i++) 655 if (node->page[i]) 656 put_page(node->page[i]); 657 kfree(node); 658 } 659 660 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) 661 { 662 struct hfs_bnode *node; 663 struct page **pagep; 664 int i; 665 666 spin_lock(&tree->hash_lock); 667 node = hfs_bnode_findhash(tree, num); 668 spin_unlock(&tree->hash_lock); 669 if (node) { 670 pr_crit("new node %u already hashed?\n", num); 671 WARN_ON(1); 672 return node; 673 } 674 node = __hfs_bnode_create(tree, num); 675 if (!node) 676 return ERR_PTR(-ENOMEM); 677 if (test_bit(HFS_BNODE_ERROR, &node->flags)) { 678 hfs_bnode_put(node); 679 return ERR_PTR(-EIO); 680 } 681 682 pagep = node->page; 683 memzero_page(*pagep, node->page_offset, 684 min_t(int, PAGE_SIZE, tree->node_size)); 685 set_page_dirty(*pagep); 686 for (i = 1; i < tree->pages_per_bnode; i++) { 687 memzero_page(*++pagep, 0, PAGE_SIZE); 688 set_page_dirty(*pagep); 689 } 690 clear_bit(HFS_BNODE_NEW, &node->flags); 691 wake_up(&node->lock_wq); 692 693 return node; 694 } 695 696 void hfs_bnode_get(struct hfs_bnode *node) 697 { 698 if (node) { 699 atomic_inc(&node->refcnt); 700 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n", 701 node->tree->cnid, node->this, 702 atomic_read(&node->refcnt)); 703 } 704 } 705 706 /* Dispose of resources used by a node */ 707 void hfs_bnode_put(struct hfs_bnode *node) 708 { 709 if (node) { 710 struct hfs_btree *tree = node->tree; 711 int i; 712 713 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n", 714 node->tree->cnid, node->this, 715 atomic_read(&node->refcnt)); 716 BUG_ON(!atomic_read(&node->refcnt)); 717 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) 718 return; 719 for (i = 0; i < tree->pages_per_bnode; i++) { 720 if (!node->page[i]) 721 continue; 722 mark_page_accessed(node->page[i]); 723 } 724 725 if (test_bit(HFS_BNODE_DELETED, &node->flags)) { 726 hfs_bnode_unhash(node); 727 spin_unlock(&tree->hash_lock); 728 if (hfs_bnode_need_zeroout(tree)) 729 hfs_bnode_clear(node, 0, tree->node_size); 730 hfs_bmap_free(node); 731 hfs_bnode_free(node); 732 return; 733 } 734 spin_unlock(&tree->hash_lock); 735 } 736 } 737 738 /* 739 * Unused nodes have to be zeroed if this is the catalog tree and 740 * a corresponding flag in the volume header is set. 741 */ 742 bool hfs_bnode_need_zeroout(struct hfs_btree *tree) 743 { 744 struct super_block *sb = tree->inode->i_sb; 745 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 746 const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes); 747 748 return tree->cnid == HFSPLUS_CAT_CNID && 749 volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX; 750 } 751