1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hfsplus/bnode.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer ([email protected])
7  * (C) 2003 Ardis Technologies <[email protected]>
8  *
9  * Handle basic btree node operations
10  */
11 
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/fs.h>
16 #include <linux/swap.h>
17 
18 #include "hfsplus_fs.h"
19 #include "hfsplus_raw.h"
20 
21 /* Copy a specified range of bytes from the raw data of a node */
hfs_bnode_read(struct hfs_bnode * node,void * buf,int off,int len)22 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
23 {
24 	struct page **pagep;
25 	int l;
26 
27 	off += node->page_offset;
28 	pagep = node->page + (off >> PAGE_SHIFT);
29 	off &= ~PAGE_MASK;
30 
31 	l = min_t(int, len, PAGE_SIZE - off);
32 	memcpy_from_page(buf, *pagep, off, l);
33 
34 	while ((len -= l) != 0) {
35 		buf += l;
36 		l = min_t(int, len, PAGE_SIZE);
37 		memcpy_from_page(buf, *++pagep, 0, l);
38 	}
39 }
40 
hfs_bnode_read_u16(struct hfs_bnode * node,int off)41 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
42 {
43 	__be16 data;
44 	/* TODO: optimize later... */
45 	hfs_bnode_read(node, &data, off, 2);
46 	return be16_to_cpu(data);
47 }
48 
hfs_bnode_read_u8(struct hfs_bnode * node,int off)49 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
50 {
51 	u8 data;
52 	/* TODO: optimize later... */
53 	hfs_bnode_read(node, &data, off, 1);
54 	return data;
55 }
56 
hfs_bnode_read_key(struct hfs_bnode * node,void * key,int off)57 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
58 {
59 	struct hfs_btree *tree;
60 	int key_len;
61 
62 	tree = node->tree;
63 	if (node->type == HFS_NODE_LEAF ||
64 	    tree->attributes & HFS_TREE_VARIDXKEYS ||
65 	    node->tree->cnid == HFSPLUS_ATTR_CNID)
66 		key_len = hfs_bnode_read_u16(node, off) + 2;
67 	else
68 		key_len = tree->max_key_len + 2;
69 
70 	if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) {
71 		memset(key, 0, sizeof(hfsplus_btree_key));
72 		pr_err("hfsplus: Invalid key length: %d\n", key_len);
73 		return;
74 	}
75 
76 	hfs_bnode_read(node, key, off, key_len);
77 }
78 
hfs_bnode_write(struct hfs_bnode * node,void * buf,int off,int len)79 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
80 {
81 	struct page **pagep;
82 	int l;
83 
84 	off += node->page_offset;
85 	pagep = node->page + (off >> PAGE_SHIFT);
86 	off &= ~PAGE_MASK;
87 
88 	l = min_t(int, len, PAGE_SIZE - off);
89 	memcpy_to_page(*pagep, off, buf, l);
90 	set_page_dirty(*pagep);
91 
92 	while ((len -= l) != 0) {
93 		buf += l;
94 		l = min_t(int, len, PAGE_SIZE);
95 		memcpy_to_page(*++pagep, 0, buf, l);
96 		set_page_dirty(*pagep);
97 	}
98 }
99 
hfs_bnode_write_u16(struct hfs_bnode * node,int off,u16 data)100 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
101 {
102 	__be16 v = cpu_to_be16(data);
103 	/* TODO: optimize later... */
104 	hfs_bnode_write(node, &v, off, 2);
105 }
106 
hfs_bnode_clear(struct hfs_bnode * node,int off,int len)107 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
108 {
109 	struct page **pagep;
110 	int l;
111 
112 	off += node->page_offset;
113 	pagep = node->page + (off >> PAGE_SHIFT);
114 	off &= ~PAGE_MASK;
115 
116 	l = min_t(int, len, PAGE_SIZE - off);
117 	memzero_page(*pagep, off, l);
118 	set_page_dirty(*pagep);
119 
120 	while ((len -= l) != 0) {
121 		l = min_t(int, len, PAGE_SIZE);
122 		memzero_page(*++pagep, 0, l);
123 		set_page_dirty(*pagep);
124 	}
125 }
126 
hfs_bnode_copy(struct hfs_bnode * dst_node,int dst,struct hfs_bnode * src_node,int src,int len)127 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
128 		    struct hfs_bnode *src_node, int src, int len)
129 {
130 	struct page **src_page, **dst_page;
131 	int l;
132 
133 	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
134 	if (!len)
135 		return;
136 	src += src_node->page_offset;
137 	dst += dst_node->page_offset;
138 	src_page = src_node->page + (src >> PAGE_SHIFT);
139 	src &= ~PAGE_MASK;
140 	dst_page = dst_node->page + (dst >> PAGE_SHIFT);
141 	dst &= ~PAGE_MASK;
142 
143 	if (src == dst) {
144 		l = min_t(int, len, PAGE_SIZE - src);
145 		memcpy_page(*dst_page, src, *src_page, src, l);
146 		set_page_dirty(*dst_page);
147 
148 		while ((len -= l) != 0) {
149 			l = min_t(int, len, PAGE_SIZE);
150 			memcpy_page(*++dst_page, 0, *++src_page, 0, l);
151 			set_page_dirty(*dst_page);
152 		}
153 	} else {
154 		void *src_ptr, *dst_ptr;
155 
156 		do {
157 			dst_ptr = kmap_local_page(*dst_page) + dst;
158 			src_ptr = kmap_local_page(*src_page) + src;
159 			if (PAGE_SIZE - src < PAGE_SIZE - dst) {
160 				l = PAGE_SIZE - src;
161 				src = 0;
162 				dst += l;
163 			} else {
164 				l = PAGE_SIZE - dst;
165 				src += l;
166 				dst = 0;
167 			}
168 			l = min(len, l);
169 			memcpy(dst_ptr, src_ptr, l);
170 			kunmap_local(src_ptr);
171 			set_page_dirty(*dst_page);
172 			kunmap_local(dst_ptr);
173 			if (!dst)
174 				dst_page++;
175 			else
176 				src_page++;
177 		} while ((len -= l));
178 	}
179 }
180 
hfs_bnode_move(struct hfs_bnode * node,int dst,int src,int len)181 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
182 {
183 	struct page **src_page, **dst_page;
184 	void *src_ptr, *dst_ptr;
185 	int l;
186 
187 	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
188 	if (!len)
189 		return;
190 	src += node->page_offset;
191 	dst += node->page_offset;
192 	if (dst > src) {
193 		src += len - 1;
194 		src_page = node->page + (src >> PAGE_SHIFT);
195 		src = (src & ~PAGE_MASK) + 1;
196 		dst += len - 1;
197 		dst_page = node->page + (dst >> PAGE_SHIFT);
198 		dst = (dst & ~PAGE_MASK) + 1;
199 
200 		if (src == dst) {
201 			while (src < len) {
202 				dst_ptr = kmap_local_page(*dst_page);
203 				src_ptr = kmap_local_page(*src_page);
204 				memmove(dst_ptr, src_ptr, src);
205 				kunmap_local(src_ptr);
206 				set_page_dirty(*dst_page);
207 				kunmap_local(dst_ptr);
208 				len -= src;
209 				src = PAGE_SIZE;
210 				src_page--;
211 				dst_page--;
212 			}
213 			src -= len;
214 			dst_ptr = kmap_local_page(*dst_page);
215 			src_ptr = kmap_local_page(*src_page);
216 			memmove(dst_ptr + src, src_ptr + src, len);
217 			kunmap_local(src_ptr);
218 			set_page_dirty(*dst_page);
219 			kunmap_local(dst_ptr);
220 		} else {
221 			do {
222 				dst_ptr = kmap_local_page(*dst_page) + dst;
223 				src_ptr = kmap_local_page(*src_page) + src;
224 				if (src < dst) {
225 					l = src;
226 					src = PAGE_SIZE;
227 					dst -= l;
228 				} else {
229 					l = dst;
230 					src -= l;
231 					dst = PAGE_SIZE;
232 				}
233 				l = min(len, l);
234 				memmove(dst_ptr - l, src_ptr - l, l);
235 				kunmap_local(src_ptr);
236 				set_page_dirty(*dst_page);
237 				kunmap_local(dst_ptr);
238 				if (dst == PAGE_SIZE)
239 					dst_page--;
240 				else
241 					src_page--;
242 			} while ((len -= l));
243 		}
244 	} else {
245 		src_page = node->page + (src >> PAGE_SHIFT);
246 		src &= ~PAGE_MASK;
247 		dst_page = node->page + (dst >> PAGE_SHIFT);
248 		dst &= ~PAGE_MASK;
249 
250 		if (src == dst) {
251 			l = min_t(int, len, PAGE_SIZE - src);
252 
253 			dst_ptr = kmap_local_page(*dst_page) + src;
254 			src_ptr = kmap_local_page(*src_page) + src;
255 			memmove(dst_ptr, src_ptr, l);
256 			kunmap_local(src_ptr);
257 			set_page_dirty(*dst_page);
258 			kunmap_local(dst_ptr);
259 
260 			while ((len -= l) != 0) {
261 				l = min_t(int, len, PAGE_SIZE);
262 				dst_ptr = kmap_local_page(*++dst_page);
263 				src_ptr = kmap_local_page(*++src_page);
264 				memmove(dst_ptr, src_ptr, l);
265 				kunmap_local(src_ptr);
266 				set_page_dirty(*dst_page);
267 				kunmap_local(dst_ptr);
268 			}
269 		} else {
270 			do {
271 				dst_ptr = kmap_local_page(*dst_page) + dst;
272 				src_ptr = kmap_local_page(*src_page) + src;
273 				if (PAGE_SIZE - src <
274 						PAGE_SIZE - dst) {
275 					l = PAGE_SIZE - src;
276 					src = 0;
277 					dst += l;
278 				} else {
279 					l = PAGE_SIZE - dst;
280 					src += l;
281 					dst = 0;
282 				}
283 				l = min(len, l);
284 				memmove(dst_ptr, src_ptr, l);
285 				kunmap_local(src_ptr);
286 				set_page_dirty(*dst_page);
287 				kunmap_local(dst_ptr);
288 				if (!dst)
289 					dst_page++;
290 				else
291 					src_page++;
292 			} while ((len -= l));
293 		}
294 	}
295 }
296 
hfs_bnode_dump(struct hfs_bnode * node)297 void hfs_bnode_dump(struct hfs_bnode *node)
298 {
299 	struct hfs_bnode_desc desc;
300 	__be32 cnid;
301 	int i, off, key_off;
302 
303 	hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
304 	hfs_bnode_read(node, &desc, 0, sizeof(desc));
305 	hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
306 		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
307 		desc.type, desc.height, be16_to_cpu(desc.num_recs));
308 
309 	off = node->tree->node_size - 2;
310 	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
311 		key_off = hfs_bnode_read_u16(node, off);
312 		hfs_dbg(BNODE_MOD, " %d", key_off);
313 		if (i && node->type == HFS_NODE_INDEX) {
314 			int tmp;
315 
316 			if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
317 					node->tree->cnid == HFSPLUS_ATTR_CNID)
318 				tmp = hfs_bnode_read_u16(node, key_off) + 2;
319 			else
320 				tmp = node->tree->max_key_len + 2;
321 			hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
322 			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
323 			hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
324 		} else if (i && node->type == HFS_NODE_LEAF) {
325 			int tmp;
326 
327 			tmp = hfs_bnode_read_u16(node, key_off);
328 			hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
329 		}
330 	}
331 	hfs_dbg_cont(BNODE_MOD, "\n");
332 }
333 
hfs_bnode_unlink(struct hfs_bnode * node)334 void hfs_bnode_unlink(struct hfs_bnode *node)
335 {
336 	struct hfs_btree *tree;
337 	struct hfs_bnode *tmp;
338 	__be32 cnid;
339 
340 	tree = node->tree;
341 	if (node->prev) {
342 		tmp = hfs_bnode_find(tree, node->prev);
343 		if (IS_ERR(tmp))
344 			return;
345 		tmp->next = node->next;
346 		cnid = cpu_to_be32(tmp->next);
347 		hfs_bnode_write(tmp, &cnid,
348 			offsetof(struct hfs_bnode_desc, next), 4);
349 		hfs_bnode_put(tmp);
350 	} else if (node->type == HFS_NODE_LEAF)
351 		tree->leaf_head = node->next;
352 
353 	if (node->next) {
354 		tmp = hfs_bnode_find(tree, node->next);
355 		if (IS_ERR(tmp))
356 			return;
357 		tmp->prev = node->prev;
358 		cnid = cpu_to_be32(tmp->prev);
359 		hfs_bnode_write(tmp, &cnid,
360 			offsetof(struct hfs_bnode_desc, prev), 4);
361 		hfs_bnode_put(tmp);
362 	} else if (node->type == HFS_NODE_LEAF)
363 		tree->leaf_tail = node->prev;
364 
365 	/* move down? */
366 	if (!node->prev && !node->next)
367 		hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
368 	if (!node->parent) {
369 		tree->root = 0;
370 		tree->depth = 0;
371 	}
372 	set_bit(HFS_BNODE_DELETED, &node->flags);
373 }
374 
hfs_bnode_hash(u32 num)375 static inline int hfs_bnode_hash(u32 num)
376 {
377 	num = (num >> 16) + num;
378 	num += num >> 8;
379 	return num & (NODE_HASH_SIZE - 1);
380 }
381 
hfs_bnode_findhash(struct hfs_btree * tree,u32 cnid)382 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
383 {
384 	struct hfs_bnode *node;
385 
386 	if (cnid >= tree->node_count) {
387 		pr_err("request for non-existent node %d in B*Tree\n",
388 		       cnid);
389 		return NULL;
390 	}
391 
392 	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
393 			node; node = node->next_hash)
394 		if (node->this == cnid)
395 			return node;
396 	return NULL;
397 }
398 
__hfs_bnode_create(struct hfs_btree * tree,u32 cnid)399 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
400 {
401 	struct hfs_bnode *node, *node2;
402 	struct address_space *mapping;
403 	struct page *page;
404 	int size, block, i, hash;
405 	loff_t off;
406 
407 	if (cnid >= tree->node_count) {
408 		pr_err("request for non-existent node %d in B*Tree\n",
409 		       cnid);
410 		return NULL;
411 	}
412 
413 	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
414 		sizeof(struct page *);
415 	node = kzalloc(size, GFP_KERNEL);
416 	if (!node)
417 		return NULL;
418 	node->tree = tree;
419 	node->this = cnid;
420 	set_bit(HFS_BNODE_NEW, &node->flags);
421 	atomic_set(&node->refcnt, 1);
422 	hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
423 		node->tree->cnid, node->this);
424 	init_waitqueue_head(&node->lock_wq);
425 	spin_lock(&tree->hash_lock);
426 	node2 = hfs_bnode_findhash(tree, cnid);
427 	if (!node2) {
428 		hash = hfs_bnode_hash(cnid);
429 		node->next_hash = tree->node_hash[hash];
430 		tree->node_hash[hash] = node;
431 		tree->node_hash_cnt++;
432 	} else {
433 		spin_unlock(&tree->hash_lock);
434 		kfree(node);
435 		wait_event(node2->lock_wq,
436 			!test_bit(HFS_BNODE_NEW, &node2->flags));
437 		return node2;
438 	}
439 	spin_unlock(&tree->hash_lock);
440 
441 	mapping = tree->inode->i_mapping;
442 	off = (loff_t)cnid << tree->node_size_shift;
443 	block = off >> PAGE_SHIFT;
444 	node->page_offset = off & ~PAGE_MASK;
445 	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
446 		page = read_mapping_page(mapping, block, NULL);
447 		if (IS_ERR(page))
448 			goto fail;
449 		node->page[i] = page;
450 	}
451 
452 	return node;
453 fail:
454 	set_bit(HFS_BNODE_ERROR, &node->flags);
455 	return node;
456 }
457 
hfs_bnode_unhash(struct hfs_bnode * node)458 void hfs_bnode_unhash(struct hfs_bnode *node)
459 {
460 	struct hfs_bnode **p;
461 
462 	hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
463 		node->tree->cnid, node->this, atomic_read(&node->refcnt));
464 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
465 	     *p && *p != node; p = &(*p)->next_hash)
466 		;
467 	BUG_ON(!*p);
468 	*p = node->next_hash;
469 	node->tree->node_hash_cnt--;
470 }
471 
472 /* Load a particular node out of a tree */
hfs_bnode_find(struct hfs_btree * tree,u32 num)473 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
474 {
475 	struct hfs_bnode *node;
476 	struct hfs_bnode_desc *desc;
477 	int i, rec_off, off, next_off;
478 	int entry_size, key_size;
479 
480 	spin_lock(&tree->hash_lock);
481 	node = hfs_bnode_findhash(tree, num);
482 	if (node) {
483 		hfs_bnode_get(node);
484 		spin_unlock(&tree->hash_lock);
485 		wait_event(node->lock_wq,
486 			!test_bit(HFS_BNODE_NEW, &node->flags));
487 		if (test_bit(HFS_BNODE_ERROR, &node->flags))
488 			goto node_error;
489 		return node;
490 	}
491 	spin_unlock(&tree->hash_lock);
492 	node = __hfs_bnode_create(tree, num);
493 	if (!node)
494 		return ERR_PTR(-ENOMEM);
495 	if (test_bit(HFS_BNODE_ERROR, &node->flags))
496 		goto node_error;
497 	if (!test_bit(HFS_BNODE_NEW, &node->flags))
498 		return node;
499 
500 	desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
501 							 node->page_offset);
502 	node->prev = be32_to_cpu(desc->prev);
503 	node->next = be32_to_cpu(desc->next);
504 	node->num_recs = be16_to_cpu(desc->num_recs);
505 	node->type = desc->type;
506 	node->height = desc->height;
507 	kunmap_local(desc);
508 
509 	switch (node->type) {
510 	case HFS_NODE_HEADER:
511 	case HFS_NODE_MAP:
512 		if (node->height != 0)
513 			goto node_error;
514 		break;
515 	case HFS_NODE_LEAF:
516 		if (node->height != 1)
517 			goto node_error;
518 		break;
519 	case HFS_NODE_INDEX:
520 		if (node->height <= 1 || node->height > tree->depth)
521 			goto node_error;
522 		break;
523 	default:
524 		goto node_error;
525 	}
526 
527 	rec_off = tree->node_size - 2;
528 	off = hfs_bnode_read_u16(node, rec_off);
529 	if (off != sizeof(struct hfs_bnode_desc))
530 		goto node_error;
531 	for (i = 1; i <= node->num_recs; off = next_off, i++) {
532 		rec_off -= 2;
533 		next_off = hfs_bnode_read_u16(node, rec_off);
534 		if (next_off <= off ||
535 		    next_off > tree->node_size ||
536 		    next_off & 1)
537 			goto node_error;
538 		entry_size = next_off - off;
539 		if (node->type != HFS_NODE_INDEX &&
540 		    node->type != HFS_NODE_LEAF)
541 			continue;
542 		key_size = hfs_bnode_read_u16(node, off) + 2;
543 		if (key_size >= entry_size || key_size & 1)
544 			goto node_error;
545 	}
546 	clear_bit(HFS_BNODE_NEW, &node->flags);
547 	wake_up(&node->lock_wq);
548 	return node;
549 
550 node_error:
551 	set_bit(HFS_BNODE_ERROR, &node->flags);
552 	clear_bit(HFS_BNODE_NEW, &node->flags);
553 	wake_up(&node->lock_wq);
554 	hfs_bnode_put(node);
555 	return ERR_PTR(-EIO);
556 }
557 
hfs_bnode_free(struct hfs_bnode * node)558 void hfs_bnode_free(struct hfs_bnode *node)
559 {
560 	int i;
561 
562 	for (i = 0; i < node->tree->pages_per_bnode; i++)
563 		if (node->page[i])
564 			put_page(node->page[i]);
565 	kfree(node);
566 }
567 
hfs_bnode_create(struct hfs_btree * tree,u32 num)568 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
569 {
570 	struct hfs_bnode *node;
571 	struct page **pagep;
572 	int i;
573 
574 	spin_lock(&tree->hash_lock);
575 	node = hfs_bnode_findhash(tree, num);
576 	spin_unlock(&tree->hash_lock);
577 	if (node) {
578 		pr_crit("new node %u already hashed?\n", num);
579 		WARN_ON(1);
580 		return node;
581 	}
582 	node = __hfs_bnode_create(tree, num);
583 	if (!node)
584 		return ERR_PTR(-ENOMEM);
585 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
586 		hfs_bnode_put(node);
587 		return ERR_PTR(-EIO);
588 	}
589 
590 	pagep = node->page;
591 	memzero_page(*pagep, node->page_offset,
592 		     min_t(int, PAGE_SIZE, tree->node_size));
593 	set_page_dirty(*pagep);
594 	for (i = 1; i < tree->pages_per_bnode; i++) {
595 		memzero_page(*++pagep, 0, PAGE_SIZE);
596 		set_page_dirty(*pagep);
597 	}
598 	clear_bit(HFS_BNODE_NEW, &node->flags);
599 	wake_up(&node->lock_wq);
600 
601 	return node;
602 }
603 
hfs_bnode_get(struct hfs_bnode * node)604 void hfs_bnode_get(struct hfs_bnode *node)
605 {
606 	if (node) {
607 		atomic_inc(&node->refcnt);
608 		hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
609 			node->tree->cnid, node->this,
610 			atomic_read(&node->refcnt));
611 	}
612 }
613 
614 /* Dispose of resources used by a node */
hfs_bnode_put(struct hfs_bnode * node)615 void hfs_bnode_put(struct hfs_bnode *node)
616 {
617 	if (node) {
618 		struct hfs_btree *tree = node->tree;
619 		int i;
620 
621 		hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
622 			node->tree->cnid, node->this,
623 			atomic_read(&node->refcnt));
624 		BUG_ON(!atomic_read(&node->refcnt));
625 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
626 			return;
627 		for (i = 0; i < tree->pages_per_bnode; i++) {
628 			if (!node->page[i])
629 				continue;
630 			mark_page_accessed(node->page[i]);
631 		}
632 
633 		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
634 			hfs_bnode_unhash(node);
635 			spin_unlock(&tree->hash_lock);
636 			if (hfs_bnode_need_zeroout(tree))
637 				hfs_bnode_clear(node, 0, tree->node_size);
638 			hfs_bmap_free(node);
639 			hfs_bnode_free(node);
640 			return;
641 		}
642 		spin_unlock(&tree->hash_lock);
643 	}
644 }
645 
646 /*
647  * Unused nodes have to be zeroed if this is the catalog tree and
648  * a corresponding flag in the volume header is set.
649  */
hfs_bnode_need_zeroout(struct hfs_btree * tree)650 bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
651 {
652 	struct super_block *sb = tree->inode->i_sb;
653 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
654 	const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
655 
656 	return tree->cnid == HFSPLUS_CAT_CNID &&
657 		volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
658 }
659