xref: /linux/fs/hfsplus/btree.c (revision 63584d76765bb3e212f70c4c3951ea785fabef1b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hfsplus/btree.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer (flar@allandria.com)
7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8  *
9  * Handle opening/closing btree
10  */
11 
12 #include <linux/slab.h>
13 #include <linux/pagemap.h>
14 #include <linux/log2.h>
15 
16 #include "hfsplus_fs.h"
17 #include "hfsplus_raw.h"
18 
19 /*
20  * Initial source code of clump size calculation is gotten
21  * from http://opensource.apple.com/tarballs/diskdev_cmds/
22  */
23 #define CLUMP_ENTRIES	15
24 
25 static short clumptbl[CLUMP_ENTRIES * 3] = {
26 /*
27  *	    Volume	Attributes	 Catalog	 Extents
28  *	     Size	Clump (MB)	Clump (MB)	Clump (MB)
29  */
30 	/*   1GB */	  4,		  4,		 4,
31 	/*   2GB */	  6,		  6,		 4,
32 	/*   4GB */	  8,		  8,		 4,
33 	/*   8GB */	 11,		 11,		 5,
34 	/*
35 	 * For volumes 16GB and larger, we want to make sure that a full OS
36 	 * install won't require fragmentation of the Catalog or Attributes
37 	 * B-trees.  We do this by making the clump sizes sufficiently large,
38 	 * and by leaving a gap after the B-trees for them to grow into.
39 	 *
40 	 * For SnowLeopard 10A298, a FullNetInstall with all packages selected
41 	 * results in:
42 	 * Catalog B-tree Header
43 	 *	nodeSize:          8192
44 	 *	totalNodes:       31616
45 	 *	freeNodes:         1978
46 	 * (used = 231.55 MB)
47 	 * Attributes B-tree Header
48 	 *	nodeSize:          8192
49 	 *	totalNodes:       63232
50 	 *	freeNodes:          958
51 	 * (used = 486.52 MB)
52 	 *
53 	 * We also want Time Machine backup volumes to have a sufficiently
54 	 * large clump size to reduce fragmentation.
55 	 *
56 	 * The series of numbers for Catalog and Attribute form a geometric
57 	 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
58 	 * the previous term.  For Attributes (16GB to 512GB), each term is
59 	 * 4**(1/5) times the previous term.  For 1TB to 16TB, each term is
60 	 * 2**(1/5) times the previous term.
61 	 */
62 	/*  16GB */	 64,		 32,		 5,
63 	/*  32GB */	 84,		 49,		 6,
64 	/*  64GB */	111,		 74,		 7,
65 	/* 128GB */	147,		111,		 8,
66 	/* 256GB */	194,		169,		 9,
67 	/* 512GB */	256,		256,		11,
68 	/*   1TB */	294,		294,		14,
69 	/*   2TB */	338,		338,		16,
70 	/*   4TB */	388,		388,		20,
71 	/*   8TB */	446,		446,		25,
72 	/*  16TB */	512,		512,		32
73 };
74 
75 u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
76 					u64 sectors, int file_id)
77 {
78 	u32 mod = max(node_size, block_size);
79 	u32 clump_size;
80 	int column;
81 	int i;
82 
83 	/* Figure out which column of the above table to use for this file. */
84 	switch (file_id) {
85 	case HFSPLUS_ATTR_CNID:
86 		column = 0;
87 		break;
88 	case HFSPLUS_CAT_CNID:
89 		column = 1;
90 		break;
91 	default:
92 		column = 2;
93 		break;
94 	}
95 
96 	/*
97 	 * The default clump size is 0.8% of the volume size. And
98 	 * it must also be a multiple of the node and block size.
99 	 */
100 	if (sectors < 0x200000) {
101 		clump_size = sectors << 2;	/*  0.8 %  */
102 		if (clump_size < (8 * node_size))
103 			clump_size = 8 * node_size;
104 	} else {
105 		/* turn exponent into table index... */
106 		for (i = 0, sectors = sectors >> 22;
107 		     sectors && (i < CLUMP_ENTRIES - 1);
108 		     ++i, sectors = sectors >> 1) {
109 			/* empty body */
110 		}
111 
112 		clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
113 	}
114 
115 	/*
116 	 * Round the clump size to a multiple of node and block size.
117 	 * NOTE: This rounds down.
118 	 */
119 	clump_size /= mod;
120 	clump_size *= mod;
121 
122 	/*
123 	 * Rounding down could have rounded down to 0 if the block size was
124 	 * greater than the clump size.  If so, just use one block or node.
125 	 */
126 	if (clump_size == 0)
127 		clump_size = mod;
128 
129 	return clump_size;
130 }
131 
132 /* Context for iterating b-tree map pages
133  * @page_idx: The index of the page within the b-node's page array
134  * @off: The byte offset within the mapped page
135  * @len: The remaining length of the map record
136  */
137 struct hfs_bmap_ctx {
138 	unsigned int page_idx;
139 	unsigned int off;
140 	u16 len;
141 };
142 
143 /*
144  * Finds the specific page containing the requested byte offset within the map
145  * record. Automatically handles the difference between header and map nodes.
146  * Returns the struct page pointer, or an ERR_PTR on failure.
147  * Note: The caller is responsible for mapping/unmapping the returned page.
148  */
149 static struct page *hfs_bmap_get_map_page(struct hfs_bnode *node,
150 					  struct hfs_bmap_ctx *ctx,
151 					  u32 byte_offset)
152 {
153 	u16 rec_idx, off16;
154 	unsigned int page_off;
155 
156 	if (node->this == HFSPLUS_TREE_HEAD) {
157 		if (node->type != HFS_NODE_HEADER) {
158 			pr_err("hfsplus: invalid btree header node\n");
159 			return ERR_PTR(-EIO);
160 		}
161 		rec_idx = HFSPLUS_BTREE_HDR_MAP_REC_INDEX;
162 	} else {
163 		if (node->type != HFS_NODE_MAP) {
164 			pr_err("hfsplus: invalid btree map node\n");
165 			return ERR_PTR(-EIO);
166 		}
167 		rec_idx = HFSPLUS_BTREE_MAP_NODE_REC_INDEX;
168 	}
169 
170 	ctx->len = hfs_brec_lenoff(node, rec_idx, &off16);
171 	if (!ctx->len)
172 		return ERR_PTR(-ENOENT);
173 
174 	if (!is_bnode_offset_valid(node, off16))
175 		return ERR_PTR(-EIO);
176 
177 	ctx->len = check_and_correct_requested_length(node, off16, ctx->len);
178 
179 	if (byte_offset >= ctx->len)
180 		return ERR_PTR(-EINVAL);
181 
182 	page_off = (u32)off16 + node->page_offset + byte_offset;
183 	ctx->page_idx = page_off >> PAGE_SHIFT;
184 	ctx->off = page_off & ~PAGE_MASK;
185 
186 	return node->page[ctx->page_idx];
187 }
188 
189 /**
190  * hfs_bmap_test_bit - test a bit in the b-tree map
191  * @node: the b-tree node containing the map record
192  * @node_bit_idx: the relative bit index within the node's map record
193  *
194  * Returns true if set, false if clear or on failure.
195  */
196 static bool hfs_bmap_test_bit(struct hfs_bnode *node, u32 node_bit_idx)
197 {
198 	struct hfs_bmap_ctx ctx;
199 	struct page *page;
200 	u8 *bmap, byte, mask;
201 
202 	page = hfs_bmap_get_map_page(node, &ctx, node_bit_idx / BITS_PER_BYTE);
203 	if (IS_ERR(page))
204 		return false;
205 
206 	bmap = kmap_local_page(page);
207 	byte = bmap[ctx.off];
208 	kunmap_local(bmap);
209 
210 	mask = 1 << (7 - (node_bit_idx % BITS_PER_BYTE));
211 	return (byte & mask) != 0;
212 }
213 
214 
215 /**
216  * hfs_bmap_clear_bit - clear a bit in the b-tree map
217  * @node: the b-tree node containing the map record
218  * @node_bit_idx: the relative bit index within the node's map record
219  *
220  * Returns 0 on success, -EINVAL if already clear, or negative error code.
221  */
222 static int hfs_bmap_clear_bit(struct hfs_bnode *node, u32 node_bit_idx)
223 {
224 	struct hfs_bmap_ctx ctx;
225 	struct page *page;
226 	u8 *bmap, mask;
227 
228 	page = hfs_bmap_get_map_page(node, &ctx, node_bit_idx / BITS_PER_BYTE);
229 	if (IS_ERR(page))
230 		return PTR_ERR(page);
231 
232 	bmap = kmap_local_page(page);
233 
234 	mask = 1 << (7 - (node_bit_idx % BITS_PER_BYTE));
235 
236 	if (!(bmap[ctx.off] & mask)) {
237 		kunmap_local(bmap);
238 		return -EINVAL;
239 	}
240 
241 	bmap[ctx.off] &= ~mask;
242 	set_page_dirty(page);
243 	kunmap_local(bmap);
244 
245 	return 0;
246 }
247 
248 #define HFS_EXTENT_TREE_NAME  "Extents Overflow File"
249 #define HFS_CATALOG_TREE_NAME "Catalog File"
250 #define HFS_ATTR_TREE_NAME    "Attributes File"
251 #define HFS_UNKNOWN_TREE_NAME "Unknown B-tree"
252 
253 static const char *hfs_btree_name(u32 cnid)
254 {
255 	switch (cnid) {
256 	case HFSPLUS_EXT_CNID:
257 		return HFS_EXTENT_TREE_NAME;
258 	case HFSPLUS_CAT_CNID:
259 		return HFS_CATALOG_TREE_NAME;
260 	case HFSPLUS_ATTR_CNID:
261 		return HFS_ATTR_TREE_NAME;
262 	default:
263 		return HFS_UNKNOWN_TREE_NAME;
264 	}
265 }
266 
267 /* Get a reference to a B*Tree and do some initial checks */
268 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
269 {
270 	struct hfs_btree *tree;
271 	struct hfs_btree_header_rec *head;
272 	struct address_space *mapping;
273 	struct hfs_bnode *node;
274 	struct inode *inode;
275 	struct page *page;
276 	unsigned int size;
277 
278 	tree = kzalloc_obj(*tree);
279 	if (!tree)
280 		return NULL;
281 
282 	mutex_init(&tree->tree_lock);
283 	spin_lock_init(&tree->hash_lock);
284 	tree->sb = sb;
285 	tree->cnid = id;
286 	inode = hfsplus_iget(sb, id);
287 	if (IS_ERR(inode))
288 		goto free_tree;
289 	tree->inode = inode;
290 
291 	if (!HFSPLUS_I(tree->inode)->first_blocks) {
292 		pr_err("invalid btree extent records (0 size)\n");
293 		goto free_inode;
294 	}
295 
296 	mapping = tree->inode->i_mapping;
297 	page = read_mapping_page(mapping, 0, NULL);
298 	if (IS_ERR(page))
299 		goto free_inode;
300 
301 	/* Load the header */
302 	head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
303 		sizeof(struct hfs_bnode_desc));
304 	tree->root = be32_to_cpu(head->root);
305 	tree->leaf_count = be32_to_cpu(head->leaf_count);
306 	tree->leaf_head = be32_to_cpu(head->leaf_head);
307 	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
308 	tree->node_count = be32_to_cpu(head->node_count);
309 	tree->free_nodes = be32_to_cpu(head->free_nodes);
310 	tree->attributes = be32_to_cpu(head->attributes);
311 	tree->node_size = be16_to_cpu(head->node_size);
312 	tree->max_key_len = be16_to_cpu(head->max_key_len);
313 	tree->depth = be16_to_cpu(head->depth);
314 
315 	/* Verify the tree and set the correct compare function */
316 	switch (id) {
317 	case HFSPLUS_EXT_CNID:
318 		if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
319 			pr_err("invalid extent max_key_len %d\n",
320 				tree->max_key_len);
321 			goto fail_page;
322 		}
323 		if (tree->attributes & HFS_TREE_VARIDXKEYS) {
324 			pr_err("invalid extent btree flag\n");
325 			goto fail_page;
326 		}
327 
328 		tree->keycmp = hfsplus_ext_cmp_key;
329 		break;
330 	case HFSPLUS_CAT_CNID:
331 		if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
332 			pr_err("invalid catalog max_key_len %d\n",
333 				tree->max_key_len);
334 			goto fail_page;
335 		}
336 		if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
337 			pr_err("invalid catalog btree flag\n");
338 			goto fail_page;
339 		}
340 
341 		if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
342 		    (head->key_type == HFSPLUS_KEY_BINARY))
343 			tree->keycmp = hfsplus_cat_bin_cmp_key;
344 		else {
345 			tree->keycmp = hfsplus_cat_case_cmp_key;
346 			set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
347 		}
348 		break;
349 	case HFSPLUS_ATTR_CNID:
350 		if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
351 			pr_err("invalid attributes max_key_len %d\n",
352 				tree->max_key_len);
353 			goto fail_page;
354 		}
355 		tree->keycmp = hfsplus_attr_bin_cmp_key;
356 		break;
357 	default:
358 		pr_err("unknown B*Tree requested\n");
359 		goto fail_page;
360 	}
361 
362 	if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
363 		pr_err("invalid btree flag\n");
364 		goto fail_page;
365 	}
366 
367 	size = tree->node_size;
368 	if (!is_power_of_2(size))
369 		goto fail_page;
370 	if (!tree->node_count)
371 		goto fail_page;
372 
373 	tree->node_size_shift = ffs(size) - 1;
374 
375 	tree->pages_per_bnode =
376 		(tree->node_size + PAGE_SIZE - 1) >>
377 		PAGE_SHIFT;
378 
379 	kunmap_local(head);
380 	put_page(page);
381 
382 	node = hfs_bnode_find(tree, HFSPLUS_TREE_HEAD);
383 	if (IS_ERR(node))
384 		goto free_inode;
385 
386 	if (!hfs_bmap_test_bit(node, 0)) {
387 		pr_warn("(%s): %s (cnid 0x%x) map record invalid or bitmap corruption detected, forcing read-only.\n",
388 				sb->s_id, hfs_btree_name(id), id);
389 		pr_warn("Run fsck.hfsplus to repair.\n");
390 		sb->s_flags |= SB_RDONLY;
391 	}
392 
393 	hfs_bnode_put(node);
394 
395 	return tree;
396 
397  fail_page:
398 	kunmap_local(head);
399 	put_page(page);
400  free_inode:
401 	tree->inode->i_mapping->a_ops = &hfsplus_aops;
402 	iput(tree->inode);
403  free_tree:
404 	kfree(tree);
405 	return NULL;
406 }
407 
408 /* Release resources used by a btree */
409 void hfs_btree_close(struct hfs_btree *tree)
410 {
411 	struct hfs_bnode *node;
412 	int i;
413 
414 	if (!tree)
415 		return;
416 
417 	for (i = 0; i < NODE_HASH_SIZE; i++) {
418 		while ((node = tree->node_hash[i])) {
419 			tree->node_hash[i] = node->next_hash;
420 			if (atomic_read(&node->refcnt))
421 				pr_crit("node %d:%d "
422 						"still has %d user(s)!\n",
423 					node->tree->cnid, node->this,
424 					atomic_read(&node->refcnt));
425 			hfs_bnode_free(node);
426 			tree->node_hash_cnt--;
427 		}
428 	}
429 	iput(tree->inode);
430 	kfree(tree);
431 }
432 
433 int hfs_btree_write(struct hfs_btree *tree)
434 {
435 	struct hfs_btree_header_rec *head;
436 	struct hfs_bnode *node;
437 	struct page *page;
438 
439 	node = hfs_bnode_find(tree, 0);
440 	if (IS_ERR(node))
441 		/* panic? */
442 		return -EIO;
443 	/* Load the header */
444 	page = node->page[0];
445 	head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
446 		sizeof(struct hfs_bnode_desc));
447 
448 	head->root = cpu_to_be32(tree->root);
449 	head->leaf_count = cpu_to_be32(tree->leaf_count);
450 	head->leaf_head = cpu_to_be32(tree->leaf_head);
451 	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
452 	head->node_count = cpu_to_be32(tree->node_count);
453 	head->free_nodes = cpu_to_be32(tree->free_nodes);
454 	head->attributes = cpu_to_be32(tree->attributes);
455 	head->depth = cpu_to_be16(tree->depth);
456 
457 	kunmap_local(head);
458 	set_page_dirty(page);
459 	hfs_bnode_put(node);
460 	return 0;
461 }
462 
463 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
464 {
465 	struct hfs_btree *tree = prev->tree;
466 	struct hfs_bnode *node;
467 	struct hfs_bnode_desc desc;
468 	__be32 cnid;
469 
470 	node = hfs_bnode_create(tree, idx);
471 	if (IS_ERR(node))
472 		return node;
473 
474 	tree->free_nodes--;
475 	prev->next = idx;
476 	cnid = cpu_to_be32(idx);
477 	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
478 
479 	node->type = HFS_NODE_MAP;
480 	node->num_recs = 1;
481 	hfs_bnode_clear(node, 0, tree->node_size);
482 	desc.next = 0;
483 	desc.prev = 0;
484 	desc.type = HFS_NODE_MAP;
485 	desc.height = 0;
486 	desc.num_recs = cpu_to_be16(1);
487 	desc.reserved = 0;
488 	hfs_bnode_write(node, &desc, 0, sizeof(desc));
489 	hfs_bnode_write_u16(node, 14, 0x8000);
490 	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
491 	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
492 
493 	return node;
494 }
495 
496 /* Make sure @tree has enough space for the @rsvd_nodes */
497 int hfs_bmap_reserve(struct hfs_btree *tree, u32 rsvd_nodes)
498 {
499 	struct inode *inode = tree->inode;
500 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
501 	u32 count;
502 	int res;
503 
504 	lockdep_assert_held(&tree->tree_lock);
505 
506 	if (rsvd_nodes <= 0)
507 		return 0;
508 
509 	while (tree->free_nodes < rsvd_nodes) {
510 		res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
511 		if (res)
512 			return res;
513 		hip->phys_size = inode->i_size =
514 			(loff_t)hip->alloc_blocks <<
515 				HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
516 		hip->fs_blocks =
517 			hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
518 		inode_set_bytes(inode, inode->i_size);
519 		count = inode->i_size >> tree->node_size_shift;
520 		tree->free_nodes += count - tree->node_count;
521 		tree->node_count = count;
522 	}
523 	return 0;
524 }
525 
526 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
527 {
528 	struct hfs_bnode *node, *next_node;
529 	struct hfs_bmap_ctx ctx;
530 	struct page *page;
531 	u32 nidx, idx;
532 	u8 *data, byte, m;
533 	int i, res;
534 
535 	lockdep_assert_held(&tree->tree_lock);
536 
537 	res = hfs_bmap_reserve(tree, 1);
538 	if (res)
539 		return ERR_PTR(res);
540 
541 	nidx = 0;
542 	node = hfs_bnode_find(tree, nidx);
543 	if (IS_ERR(node))
544 		return node;
545 
546 	page = hfs_bmap_get_map_page(node, &ctx, 0);
547 	if (IS_ERR(page)) {
548 		res = PTR_ERR(page);
549 		hfs_bnode_put(node);
550 		return ERR_PTR(res);
551 	}
552 
553 	data = kmap_local_page(page);
554 	idx = 0;
555 
556 	for (;;) {
557 		while (ctx.len) {
558 			byte = data[ctx.off];
559 			if (byte != 0xff) {
560 				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
561 					if (!(byte & m)) {
562 						idx += i;
563 						data[ctx.off] |= m;
564 						set_page_dirty(page);
565 						kunmap_local(data);
566 						tree->free_nodes--;
567 						hfs_btree_write(tree);
568 						mark_inode_dirty(tree->inode);
569 						hfs_bnode_put(node);
570 						return hfs_bnode_create(tree,
571 							idx);
572 					}
573 				}
574 			}
575 			if (++ctx.off >= PAGE_SIZE) {
576 				kunmap_local(data);
577 				page = node->page[++ctx.page_idx];
578 				data = kmap_local_page(page);
579 				ctx.off = 0;
580 			}
581 			idx += 8;
582 			ctx.len--;
583 		}
584 		kunmap_local(data);
585 		nidx = node->next;
586 		if (!nidx) {
587 			hfs_dbg("create new bmap node\n");
588 			next_node = hfs_bmap_new_bmap(node, idx);
589 			hfs_btree_write(tree);
590 		} else
591 			next_node = hfs_bnode_find(tree, nidx);
592 		hfs_bnode_put(node);
593 		if (IS_ERR(next_node))
594 			return next_node;
595 		node = next_node;
596 
597 		page = hfs_bmap_get_map_page(node, &ctx, 0);
598 		if (IS_ERR(page)) {
599 			res = PTR_ERR(page);
600 			hfs_bnode_put(node);
601 			return ERR_PTR(res);
602 		}
603 		data = kmap_local_page(page);
604 	}
605 }
606 
607 void hfs_bmap_free(struct hfs_bnode *node)
608 {
609 	struct hfs_btree *tree;
610 	u16 off, len;
611 	u32 nidx;
612 	int res;
613 
614 	hfs_dbg("node %u\n", node->this);
615 	BUG_ON(!node->this);
616 	tree = node->tree;
617 	lockdep_assert_held(&tree->tree_lock);
618 	nidx = node->this;
619 	node = hfs_bnode_find(tree, 0);
620 	if (IS_ERR(node))
621 		return;
622 	len = hfs_brec_lenoff(node, 2, &off);
623 	while (nidx >= len * 8) {
624 		u32 i;
625 
626 		nidx -= len * 8;
627 		i = node->next;
628 		if (!i) {
629 			/* panic */;
630 			pr_crit("unable to free bnode %u. "
631 					"bmap not found!\n",
632 				node->this);
633 			hfs_bnode_put(node);
634 			return;
635 		}
636 		hfs_bnode_put(node);
637 		node = hfs_bnode_find(tree, i);
638 		if (IS_ERR(node))
639 			return;
640 		if (node->type != HFS_NODE_MAP) {
641 			/* panic */;
642 			pr_crit("invalid bmap found! "
643 					"(%u,%d)\n",
644 				node->this, node->type);
645 			hfs_bnode_put(node);
646 			return;
647 		}
648 		len = hfs_brec_lenoff(node, 0, &off);
649 	}
650 
651 	res = hfs_bmap_clear_bit(node, nidx);
652 	if (res == -EINVAL) {
653 		pr_crit("trying to free the freed bnode %u(%d)\n",
654 			nidx, node->type);
655 	} else if (res) {
656 		pr_crit("fail to free bnode %u(%d)\n",
657 			nidx, node->type);
658 	} else {
659 		tree->free_nodes++;
660 		hfs_btree_write(tree);
661 		mark_inode_dirty(tree->inode);
662 	}
663 
664 	hfs_bnode_put(node);
665 }
666