xref: /linux/fs/hfsplus/bnode.c (revision b3e1c7855e8e1c4d77685ce4a8cd9cdd576058eb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hfsplus/bnode.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer (flar@allandria.com)
7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8  *
9  * Handle basic btree node operations
10  */
11 
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/fs.h>
16 #include <linux/swap.h>
17 
18 #include "hfsplus_fs.h"
19 #include "hfsplus_raw.h"
20 
21 
22 /* Copy a specified range of bytes from the raw data of a node */
hfs_bnode_read(struct hfs_bnode * node,void * buf,int off,int len)23 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
24 {
25 	struct page **pagep;
26 	int l;
27 
28 	if (!is_bnode_offset_valid(node, off))
29 		return;
30 
31 	if (len == 0) {
32 		pr_err("requested zero length: "
33 		       "NODE: id %u, type %#x, height %u, "
34 		       "node_size %u, offset %d, len %d\n",
35 		       node->this, node->type, node->height,
36 		       node->tree->node_size, off, len);
37 		return;
38 	}
39 
40 	len = check_and_correct_requested_length(node, off, len);
41 
42 	off += node->page_offset;
43 	pagep = node->page + (off >> PAGE_SHIFT);
44 	off &= ~PAGE_MASK;
45 
46 	l = min_t(int, len, PAGE_SIZE - off);
47 	memcpy_from_page(buf, *pagep, off, l);
48 
49 	while ((len -= l) != 0) {
50 		buf += l;
51 		l = min_t(int, len, PAGE_SIZE);
52 		memcpy_from_page(buf, *++pagep, 0, l);
53 	}
54 }
55 
hfs_bnode_read_u16(struct hfs_bnode * node,int off)56 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
57 {
58 	__be16 data;
59 	/* TODO: optimize later... */
60 	hfs_bnode_read(node, &data, off, 2);
61 	return be16_to_cpu(data);
62 }
63 
hfs_bnode_read_u8(struct hfs_bnode * node,int off)64 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
65 {
66 	u8 data;
67 	/* TODO: optimize later... */
68 	hfs_bnode_read(node, &data, off, 1);
69 	return data;
70 }
71 
hfs_bnode_read_key(struct hfs_bnode * node,void * key,int off)72 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
73 {
74 	struct hfs_btree *tree;
75 	int key_len;
76 
77 	tree = node->tree;
78 	if (node->type == HFS_NODE_LEAF ||
79 	    tree->attributes & HFS_TREE_VARIDXKEYS ||
80 	    node->tree->cnid == HFSPLUS_ATTR_CNID)
81 		key_len = hfs_bnode_read_u16(node, off) + 2;
82 	else
83 		key_len = tree->max_key_len + 2;
84 
85 	if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) {
86 		memset(key, 0, sizeof(hfsplus_btree_key));
87 		pr_err("hfsplus: Invalid key length: %d\n", key_len);
88 		return;
89 	}
90 
91 	hfs_bnode_read(node, key, off, key_len);
92 }
93 
hfs_bnode_write(struct hfs_bnode * node,void * buf,int off,int len)94 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
95 {
96 	struct page **pagep;
97 	int l;
98 
99 	if (!is_bnode_offset_valid(node, off))
100 		return;
101 
102 	if (len == 0) {
103 		pr_err("requested zero length: "
104 		       "NODE: id %u, type %#x, height %u, "
105 		       "node_size %u, offset %d, len %d\n",
106 		       node->this, node->type, node->height,
107 		       node->tree->node_size, off, len);
108 		return;
109 	}
110 
111 	len = check_and_correct_requested_length(node, off, len);
112 
113 	off += node->page_offset;
114 	pagep = node->page + (off >> PAGE_SHIFT);
115 	off &= ~PAGE_MASK;
116 
117 	l = min_t(int, len, PAGE_SIZE - off);
118 	memcpy_to_page(*pagep, off, buf, l);
119 	set_page_dirty(*pagep);
120 
121 	while ((len -= l) != 0) {
122 		buf += l;
123 		l = min_t(int, len, PAGE_SIZE);
124 		memcpy_to_page(*++pagep, 0, buf, l);
125 		set_page_dirty(*pagep);
126 	}
127 }
128 
hfs_bnode_write_u16(struct hfs_bnode * node,int off,u16 data)129 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
130 {
131 	__be16 v = cpu_to_be16(data);
132 	/* TODO: optimize later... */
133 	hfs_bnode_write(node, &v, off, 2);
134 }
135 
hfs_bnode_clear(struct hfs_bnode * node,int off,int len)136 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
137 {
138 	struct page **pagep;
139 	int l;
140 
141 	if (!is_bnode_offset_valid(node, off))
142 		return;
143 
144 	if (len == 0) {
145 		pr_err("requested zero length: "
146 		       "NODE: id %u, type %#x, height %u, "
147 		       "node_size %u, offset %d, len %d\n",
148 		       node->this, node->type, node->height,
149 		       node->tree->node_size, off, len);
150 		return;
151 	}
152 
153 	len = check_and_correct_requested_length(node, off, len);
154 
155 	off += node->page_offset;
156 	pagep = node->page + (off >> PAGE_SHIFT);
157 	off &= ~PAGE_MASK;
158 
159 	l = min_t(int, len, PAGE_SIZE - off);
160 	memzero_page(*pagep, off, l);
161 	set_page_dirty(*pagep);
162 
163 	while ((len -= l) != 0) {
164 		l = min_t(int, len, PAGE_SIZE);
165 		memzero_page(*++pagep, 0, l);
166 		set_page_dirty(*pagep);
167 	}
168 }
169 
hfs_bnode_copy(struct hfs_bnode * dst_node,int dst,struct hfs_bnode * src_node,int src,int len)170 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
171 		    struct hfs_bnode *src_node, int src, int len)
172 {
173 	struct page **src_page, **dst_page;
174 	int l;
175 
176 	hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
177 	if (!len)
178 		return;
179 
180 	len = check_and_correct_requested_length(src_node, src, len);
181 	len = check_and_correct_requested_length(dst_node, dst, len);
182 
183 	src += src_node->page_offset;
184 	dst += dst_node->page_offset;
185 	src_page = src_node->page + (src >> PAGE_SHIFT);
186 	src &= ~PAGE_MASK;
187 	dst_page = dst_node->page + (dst >> PAGE_SHIFT);
188 	dst &= ~PAGE_MASK;
189 
190 	if (src == dst) {
191 		l = min_t(int, len, PAGE_SIZE - src);
192 		memcpy_page(*dst_page, src, *src_page, src, l);
193 		set_page_dirty(*dst_page);
194 
195 		while ((len -= l) != 0) {
196 			l = min_t(int, len, PAGE_SIZE);
197 			memcpy_page(*++dst_page, 0, *++src_page, 0, l);
198 			set_page_dirty(*dst_page);
199 		}
200 	} else {
201 		void *src_ptr, *dst_ptr;
202 
203 		do {
204 			dst_ptr = kmap_local_page(*dst_page) + dst;
205 			src_ptr = kmap_local_page(*src_page) + src;
206 			if (PAGE_SIZE - src < PAGE_SIZE - dst) {
207 				l = PAGE_SIZE - src;
208 				src = 0;
209 				dst += l;
210 			} else {
211 				l = PAGE_SIZE - dst;
212 				src += l;
213 				dst = 0;
214 			}
215 			l = min(len, l);
216 			memcpy(dst_ptr, src_ptr, l);
217 			kunmap_local(src_ptr);
218 			set_page_dirty(*dst_page);
219 			kunmap_local(dst_ptr);
220 			if (!dst)
221 				dst_page++;
222 			else
223 				src_page++;
224 		} while ((len -= l));
225 	}
226 }
227 
hfs_bnode_move(struct hfs_bnode * node,int dst,int src,int len)228 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
229 {
230 	struct page **src_page, **dst_page;
231 	void *src_ptr, *dst_ptr;
232 	int l;
233 
234 	hfs_dbg("dst %u, src %u, len %u\n", dst, src, len);
235 	if (!len)
236 		return;
237 
238 	len = check_and_correct_requested_length(node, src, len);
239 	len = check_and_correct_requested_length(node, dst, len);
240 
241 	src += node->page_offset;
242 	dst += node->page_offset;
243 	if (dst > src) {
244 		src += len - 1;
245 		src_page = node->page + (src >> PAGE_SHIFT);
246 		src = (src & ~PAGE_MASK) + 1;
247 		dst += len - 1;
248 		dst_page = node->page + (dst >> PAGE_SHIFT);
249 		dst = (dst & ~PAGE_MASK) + 1;
250 
251 		if (src == dst) {
252 			while (src < len) {
253 				dst_ptr = kmap_local_page(*dst_page);
254 				src_ptr = kmap_local_page(*src_page);
255 				memmove(dst_ptr, src_ptr, src);
256 				kunmap_local(src_ptr);
257 				set_page_dirty(*dst_page);
258 				kunmap_local(dst_ptr);
259 				len -= src;
260 				src = PAGE_SIZE;
261 				src_page--;
262 				dst_page--;
263 			}
264 			src -= len;
265 			dst_ptr = kmap_local_page(*dst_page);
266 			src_ptr = kmap_local_page(*src_page);
267 			memmove(dst_ptr + src, src_ptr + src, len);
268 			kunmap_local(src_ptr);
269 			set_page_dirty(*dst_page);
270 			kunmap_local(dst_ptr);
271 		} else {
272 			do {
273 				dst_ptr = kmap_local_page(*dst_page) + dst;
274 				src_ptr = kmap_local_page(*src_page) + src;
275 				if (src < dst) {
276 					l = src;
277 					src = PAGE_SIZE;
278 					dst -= l;
279 				} else {
280 					l = dst;
281 					src -= l;
282 					dst = PAGE_SIZE;
283 				}
284 				l = min(len, l);
285 				memmove(dst_ptr - l, src_ptr - l, l);
286 				kunmap_local(src_ptr);
287 				set_page_dirty(*dst_page);
288 				kunmap_local(dst_ptr);
289 				if (dst == PAGE_SIZE)
290 					dst_page--;
291 				else
292 					src_page--;
293 			} while ((len -= l));
294 		}
295 	} else {
296 		src_page = node->page + (src >> PAGE_SHIFT);
297 		src &= ~PAGE_MASK;
298 		dst_page = node->page + (dst >> PAGE_SHIFT);
299 		dst &= ~PAGE_MASK;
300 
301 		if (src == dst) {
302 			l = min_t(int, len, PAGE_SIZE - src);
303 
304 			dst_ptr = kmap_local_page(*dst_page) + src;
305 			src_ptr = kmap_local_page(*src_page) + src;
306 			memmove(dst_ptr, src_ptr, l);
307 			kunmap_local(src_ptr);
308 			set_page_dirty(*dst_page);
309 			kunmap_local(dst_ptr);
310 
311 			while ((len -= l) != 0) {
312 				l = min_t(int, len, PAGE_SIZE);
313 				dst_ptr = kmap_local_page(*++dst_page);
314 				src_ptr = kmap_local_page(*++src_page);
315 				memmove(dst_ptr, src_ptr, l);
316 				kunmap_local(src_ptr);
317 				set_page_dirty(*dst_page);
318 				kunmap_local(dst_ptr);
319 			}
320 		} else {
321 			do {
322 				dst_ptr = kmap_local_page(*dst_page) + dst;
323 				src_ptr = kmap_local_page(*src_page) + src;
324 				if (PAGE_SIZE - src <
325 						PAGE_SIZE - dst) {
326 					l = PAGE_SIZE - src;
327 					src = 0;
328 					dst += l;
329 				} else {
330 					l = PAGE_SIZE - dst;
331 					src += l;
332 					dst = 0;
333 				}
334 				l = min(len, l);
335 				memmove(dst_ptr, src_ptr, l);
336 				kunmap_local(src_ptr);
337 				set_page_dirty(*dst_page);
338 				kunmap_local(dst_ptr);
339 				if (!dst)
340 					dst_page++;
341 				else
342 					src_page++;
343 			} while ((len -= l));
344 		}
345 	}
346 }
347 
hfs_bnode_dump(struct hfs_bnode * node)348 void hfs_bnode_dump(struct hfs_bnode *node)
349 {
350 	struct hfs_bnode_desc desc;
351 	__be32 cnid;
352 	int i, off, key_off;
353 
354 	hfs_dbg("node %d\n", node->this);
355 	hfs_bnode_read(node, &desc, 0, sizeof(desc));
356 	hfs_dbg("next %d, prev %d, type %d, height %d, num_recs %d\n",
357 		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
358 		desc.type, desc.height, be16_to_cpu(desc.num_recs));
359 
360 	off = node->tree->node_size - 2;
361 	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
362 		key_off = hfs_bnode_read_u16(node, off);
363 		hfs_dbg(" key_off %d", key_off);
364 		if (i && node->type == HFS_NODE_INDEX) {
365 			int tmp;
366 
367 			if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
368 					node->tree->cnid == HFSPLUS_ATTR_CNID)
369 				tmp = hfs_bnode_read_u16(node, key_off) + 2;
370 			else
371 				tmp = node->tree->max_key_len + 2;
372 			hfs_dbg(" (%d", tmp);
373 			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
374 			hfs_dbg(", cnid %d)", be32_to_cpu(cnid));
375 		} else if (i && node->type == HFS_NODE_LEAF) {
376 			int tmp;
377 
378 			tmp = hfs_bnode_read_u16(node, key_off);
379 			hfs_dbg(" (%d)", tmp);
380 		}
381 	}
382 	hfs_dbg("\n");
383 }
384 
hfs_bnode_unlink(struct hfs_bnode * node)385 void hfs_bnode_unlink(struct hfs_bnode *node)
386 {
387 	struct hfs_btree *tree;
388 	struct hfs_bnode *tmp;
389 	__be32 cnid;
390 
391 	tree = node->tree;
392 	if (node->prev) {
393 		tmp = hfs_bnode_find(tree, node->prev);
394 		if (IS_ERR(tmp))
395 			return;
396 		tmp->next = node->next;
397 		cnid = cpu_to_be32(tmp->next);
398 		hfs_bnode_write(tmp, &cnid,
399 			offsetof(struct hfs_bnode_desc, next), 4);
400 		hfs_bnode_put(tmp);
401 	} else if (node->type == HFS_NODE_LEAF)
402 		tree->leaf_head = node->next;
403 
404 	if (node->next) {
405 		tmp = hfs_bnode_find(tree, node->next);
406 		if (IS_ERR(tmp))
407 			return;
408 		tmp->prev = node->prev;
409 		cnid = cpu_to_be32(tmp->prev);
410 		hfs_bnode_write(tmp, &cnid,
411 			offsetof(struct hfs_bnode_desc, prev), 4);
412 		hfs_bnode_put(tmp);
413 	} else if (node->type == HFS_NODE_LEAF)
414 		tree->leaf_tail = node->prev;
415 
416 	/* move down? */
417 	if (!node->prev && !node->next)
418 		hfs_dbg("btree delete level\n");
419 	if (!node->parent) {
420 		tree->root = 0;
421 		tree->depth = 0;
422 	}
423 	set_bit(HFS_BNODE_DELETED, &node->flags);
424 }
425 
hfs_bnode_hash(u32 num)426 static inline int hfs_bnode_hash(u32 num)
427 {
428 	num = (num >> 16) + num;
429 	num += num >> 8;
430 	return num & (NODE_HASH_SIZE - 1);
431 }
432 
hfs_bnode_findhash(struct hfs_btree * tree,u32 cnid)433 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
434 {
435 	struct hfs_bnode *node;
436 
437 	if (cnid >= tree->node_count) {
438 		pr_err("request for non-existent node %d in B*Tree\n",
439 		       cnid);
440 		return NULL;
441 	}
442 
443 	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
444 			node; node = node->next_hash)
445 		if (node->this == cnid)
446 			return node;
447 	return NULL;
448 }
449 
__hfs_bnode_create(struct hfs_btree * tree,u32 cnid)450 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
451 {
452 	struct hfs_bnode *node, *node2;
453 	struct address_space *mapping;
454 	struct page *page;
455 	int size, block, i, hash;
456 	loff_t off;
457 
458 	if (cnid >= tree->node_count) {
459 		pr_err("request for non-existent node %d in B*Tree\n",
460 		       cnid);
461 		return NULL;
462 	}
463 
464 	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
465 		sizeof(struct page *);
466 	node = kzalloc(size, GFP_KERNEL);
467 	if (!node)
468 		return NULL;
469 	node->tree = tree;
470 	node->this = cnid;
471 	set_bit(HFS_BNODE_NEW, &node->flags);
472 	atomic_set(&node->refcnt, 1);
473 	hfs_dbg("cnid %d, node %d, refcnt 1\n",
474 		node->tree->cnid, node->this);
475 	init_waitqueue_head(&node->lock_wq);
476 	spin_lock(&tree->hash_lock);
477 	node2 = hfs_bnode_findhash(tree, cnid);
478 	if (!node2) {
479 		hash = hfs_bnode_hash(cnid);
480 		node->next_hash = tree->node_hash[hash];
481 		tree->node_hash[hash] = node;
482 		tree->node_hash_cnt++;
483 	} else {
484 		spin_unlock(&tree->hash_lock);
485 		kfree(node);
486 		wait_event(node2->lock_wq,
487 			!test_bit(HFS_BNODE_NEW, &node2->flags));
488 		return node2;
489 	}
490 	spin_unlock(&tree->hash_lock);
491 
492 	mapping = tree->inode->i_mapping;
493 	off = (loff_t)cnid << tree->node_size_shift;
494 	block = off >> PAGE_SHIFT;
495 	node->page_offset = off & ~PAGE_MASK;
496 	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
497 		page = read_mapping_page(mapping, block, NULL);
498 		if (IS_ERR(page))
499 			goto fail;
500 		node->page[i] = page;
501 	}
502 
503 	return node;
504 fail:
505 	set_bit(HFS_BNODE_ERROR, &node->flags);
506 	return node;
507 }
508 
hfs_bnode_unhash(struct hfs_bnode * node)509 void hfs_bnode_unhash(struct hfs_bnode *node)
510 {
511 	struct hfs_bnode **p;
512 
513 	hfs_dbg("cnid %d, node %d, refcnt %d\n",
514 		node->tree->cnid, node->this, atomic_read(&node->refcnt));
515 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
516 	     *p && *p != node; p = &(*p)->next_hash)
517 		;
518 	BUG_ON(!*p);
519 	*p = node->next_hash;
520 	node->tree->node_hash_cnt--;
521 }
522 
523 /* Load a particular node out of a tree */
hfs_bnode_find(struct hfs_btree * tree,u32 num)524 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
525 {
526 	struct hfs_bnode *node;
527 	struct hfs_bnode_desc *desc;
528 	int i, rec_off, off, next_off;
529 	int entry_size, key_size;
530 
531 	spin_lock(&tree->hash_lock);
532 	node = hfs_bnode_findhash(tree, num);
533 	if (node) {
534 		hfs_bnode_get(node);
535 		spin_unlock(&tree->hash_lock);
536 		wait_event(node->lock_wq,
537 			!test_bit(HFS_BNODE_NEW, &node->flags));
538 		if (test_bit(HFS_BNODE_ERROR, &node->flags))
539 			goto node_error;
540 		return node;
541 	}
542 	spin_unlock(&tree->hash_lock);
543 	node = __hfs_bnode_create(tree, num);
544 	if (!node)
545 		return ERR_PTR(-ENOMEM);
546 	if (test_bit(HFS_BNODE_ERROR, &node->flags))
547 		goto node_error;
548 	if (!test_bit(HFS_BNODE_NEW, &node->flags))
549 		return node;
550 
551 	desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
552 							 node->page_offset);
553 	node->prev = be32_to_cpu(desc->prev);
554 	node->next = be32_to_cpu(desc->next);
555 	node->num_recs = be16_to_cpu(desc->num_recs);
556 	node->type = desc->type;
557 	node->height = desc->height;
558 	kunmap_local(desc);
559 
560 	switch (node->type) {
561 	case HFS_NODE_HEADER:
562 	case HFS_NODE_MAP:
563 		if (node->height != 0)
564 			goto node_error;
565 		break;
566 	case HFS_NODE_LEAF:
567 		if (node->height != 1)
568 			goto node_error;
569 		break;
570 	case HFS_NODE_INDEX:
571 		if (node->height <= 1 || node->height > tree->depth)
572 			goto node_error;
573 		break;
574 	default:
575 		goto node_error;
576 	}
577 
578 	rec_off = tree->node_size - 2;
579 	off = hfs_bnode_read_u16(node, rec_off);
580 	if (off != sizeof(struct hfs_bnode_desc))
581 		goto node_error;
582 	for (i = 1; i <= node->num_recs; off = next_off, i++) {
583 		rec_off -= 2;
584 		next_off = hfs_bnode_read_u16(node, rec_off);
585 		if (next_off <= off ||
586 		    next_off > tree->node_size ||
587 		    next_off & 1)
588 			goto node_error;
589 		entry_size = next_off - off;
590 		if (node->type != HFS_NODE_INDEX &&
591 		    node->type != HFS_NODE_LEAF)
592 			continue;
593 		key_size = hfs_bnode_read_u16(node, off) + 2;
594 		if (key_size >= entry_size || key_size & 1)
595 			goto node_error;
596 	}
597 	clear_bit(HFS_BNODE_NEW, &node->flags);
598 	wake_up(&node->lock_wq);
599 	return node;
600 
601 node_error:
602 	set_bit(HFS_BNODE_ERROR, &node->flags);
603 	clear_bit(HFS_BNODE_NEW, &node->flags);
604 	wake_up(&node->lock_wq);
605 	hfs_bnode_put(node);
606 	return ERR_PTR(-EIO);
607 }
608 
hfs_bnode_free(struct hfs_bnode * node)609 void hfs_bnode_free(struct hfs_bnode *node)
610 {
611 	int i;
612 
613 	for (i = 0; i < node->tree->pages_per_bnode; i++)
614 		if (node->page[i])
615 			put_page(node->page[i]);
616 	kfree(node);
617 }
618 
hfs_bnode_create(struct hfs_btree * tree,u32 num)619 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
620 {
621 	struct hfs_bnode *node;
622 	struct page **pagep;
623 	int i;
624 
625 	spin_lock(&tree->hash_lock);
626 	node = hfs_bnode_findhash(tree, num);
627 	spin_unlock(&tree->hash_lock);
628 	if (node) {
629 		pr_crit("new node %u already hashed?\n", num);
630 		WARN_ON(1);
631 		return node;
632 	}
633 	node = __hfs_bnode_create(tree, num);
634 	if (!node)
635 		return ERR_PTR(-ENOMEM);
636 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
637 		hfs_bnode_put(node);
638 		return ERR_PTR(-EIO);
639 	}
640 
641 	pagep = node->page;
642 	memzero_page(*pagep, node->page_offset,
643 		     min_t(int, PAGE_SIZE, tree->node_size));
644 	set_page_dirty(*pagep);
645 	for (i = 1; i < tree->pages_per_bnode; i++) {
646 		memzero_page(*++pagep, 0, PAGE_SIZE);
647 		set_page_dirty(*pagep);
648 	}
649 	clear_bit(HFS_BNODE_NEW, &node->flags);
650 	wake_up(&node->lock_wq);
651 
652 	return node;
653 }
654 
hfs_bnode_get(struct hfs_bnode * node)655 void hfs_bnode_get(struct hfs_bnode *node)
656 {
657 	if (node) {
658 		atomic_inc(&node->refcnt);
659 		hfs_dbg("cnid %d, node %d, refcnt %d\n",
660 			node->tree->cnid, node->this,
661 			atomic_read(&node->refcnt));
662 	}
663 }
664 
665 /* Dispose of resources used by a node */
hfs_bnode_put(struct hfs_bnode * node)666 void hfs_bnode_put(struct hfs_bnode *node)
667 {
668 	if (node) {
669 		struct hfs_btree *tree = node->tree;
670 		int i;
671 
672 		hfs_dbg("cnid %d, node %d, refcnt %d\n",
673 			node->tree->cnid, node->this,
674 			atomic_read(&node->refcnt));
675 		BUG_ON(!atomic_read(&node->refcnt));
676 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
677 			return;
678 		for (i = 0; i < tree->pages_per_bnode; i++) {
679 			if (!node->page[i])
680 				continue;
681 			mark_page_accessed(node->page[i]);
682 		}
683 
684 		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
685 			hfs_bnode_unhash(node);
686 			spin_unlock(&tree->hash_lock);
687 			if (hfs_bnode_need_zeroout(tree))
688 				hfs_bnode_clear(node, 0, tree->node_size);
689 			hfs_bmap_free(node);
690 			hfs_bnode_free(node);
691 			return;
692 		}
693 		spin_unlock(&tree->hash_lock);
694 	}
695 }
696 
697 /*
698  * Unused nodes have to be zeroed if this is the catalog tree and
699  * a corresponding flag in the volume header is set.
700  */
hfs_bnode_need_zeroout(struct hfs_btree * tree)701 bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
702 {
703 	struct super_block *sb = tree->inode->i_sb;
704 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
705 	const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
706 
707 	return tree->cnid == HFSPLUS_CAT_CNID &&
708 		volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
709 }
710