xref: /linux/drivers/md/persistent-data/dm-btree-spine.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011 Red Hat, Inc.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm-btree-internal.h"
9 #include "dm-transaction-manager.h"
10 
11 #include <linux/device-mapper.h>
12 
13 #define DM_MSG_PREFIX "btree spine"
14 
15 /*----------------------------------------------------------------*/
16 
17 #define BTREE_CSUM_XOR 121107
18 
node_prepare_for_write(const struct dm_block_validator * v,struct dm_block * b,size_t block_size)19 static void node_prepare_for_write(const struct dm_block_validator *v,
20 				   struct dm_block *b,
21 				   size_t block_size)
22 {
23 	struct btree_node *n = dm_block_data(b);
24 	struct node_header *h = &n->header;
25 
26 	h->blocknr = cpu_to_le64(dm_block_location(b));
27 	h->csum = cpu_to_le32(dm_bm_checksum(&h->flags,
28 					     block_size - sizeof(__le32),
29 					     BTREE_CSUM_XOR));
30 }
31 
node_check(const struct dm_block_validator * v,struct dm_block * b,size_t block_size)32 static int node_check(const struct dm_block_validator *v,
33 		      struct dm_block *b,
34 		      size_t block_size)
35 {
36 	struct btree_node *n = dm_block_data(b);
37 	struct node_header *h = &n->header;
38 	size_t value_size;
39 	__le32 csum_disk;
40 	uint32_t flags, nr_entries, max_entries;
41 
42 	if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
43 		DMERR_LIMIT("%s failed: blocknr %llu != wanted %llu", __func__,
44 			    le64_to_cpu(h->blocknr), dm_block_location(b));
45 		return -ENOTBLK;
46 	}
47 
48 	csum_disk = cpu_to_le32(dm_bm_checksum(&h->flags,
49 					       block_size - sizeof(__le32),
50 					       BTREE_CSUM_XOR));
51 	if (csum_disk != h->csum) {
52 		DMERR_LIMIT("%s failed: csum %u != wanted %u", __func__,
53 			    le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
54 		return -EILSEQ;
55 	}
56 
57 	nr_entries = le32_to_cpu(h->nr_entries);
58 	max_entries = le32_to_cpu(h->max_entries);
59 	value_size = le32_to_cpu(h->value_size);
60 
61 	if (sizeof(struct node_header) +
62 	    (sizeof(__le64) + value_size) * max_entries > block_size) {
63 		DMERR_LIMIT("%s failed: max_entries too large", __func__);
64 		return -EILSEQ;
65 	}
66 
67 	if (nr_entries > max_entries) {
68 		DMERR_LIMIT("%s failed: too many entries", __func__);
69 		return -EILSEQ;
70 	}
71 
72 	/*
73 	 * The node must be either INTERNAL or LEAF.
74 	 */
75 	flags = le32_to_cpu(h->flags);
76 	if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
77 		DMERR_LIMIT("%s failed: node is neither INTERNAL or LEAF", __func__);
78 		return -EILSEQ;
79 	}
80 
81 	return 0;
82 }
83 
84 const struct dm_block_validator btree_node_validator = {
85 	.name = "btree_node",
86 	.prepare_for_write = node_prepare_for_write,
87 	.check = node_check
88 };
89 
90 /*----------------------------------------------------------------*/
91 
bn_read_lock(struct dm_btree_info * info,dm_block_t b,struct dm_block ** result)92 int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
93 		 struct dm_block **result)
94 {
95 	return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
96 }
97 
bn_shadow(struct dm_btree_info * info,dm_block_t orig,struct dm_btree_value_type * vt,struct dm_block ** result)98 static int bn_shadow(struct dm_btree_info *info, dm_block_t orig,
99 	      struct dm_btree_value_type *vt,
100 	      struct dm_block **result)
101 {
102 	int r, inc;
103 
104 	r = dm_tm_shadow_block(info->tm, orig, &btree_node_validator,
105 			       result, &inc);
106 	if (!r && inc)
107 		inc_children(info->tm, dm_block_data(*result), vt);
108 
109 	return r;
110 }
111 
new_block(struct dm_btree_info * info,struct dm_block ** result)112 int new_block(struct dm_btree_info *info, struct dm_block **result)
113 {
114 	return dm_tm_new_block(info->tm, &btree_node_validator, result);
115 }
116 
unlock_block(struct dm_btree_info * info,struct dm_block * b)117 void unlock_block(struct dm_btree_info *info, struct dm_block *b)
118 {
119 	dm_tm_unlock(info->tm, b);
120 }
121 
122 /*----------------------------------------------------------------*/
123 
init_ro_spine(struct ro_spine * s,struct dm_btree_info * info)124 void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info)
125 {
126 	s->info = info;
127 	s->count = 0;
128 	s->nodes[0] = NULL;
129 	s->nodes[1] = NULL;
130 }
131 
exit_ro_spine(struct ro_spine * s)132 void exit_ro_spine(struct ro_spine *s)
133 {
134 	int i;
135 
136 	for (i = 0; i < s->count; i++)
137 		unlock_block(s->info, s->nodes[i]);
138 }
139 
ro_step(struct ro_spine * s,dm_block_t new_child)140 int ro_step(struct ro_spine *s, dm_block_t new_child)
141 {
142 	int r;
143 
144 	if (s->count == 2) {
145 		unlock_block(s->info, s->nodes[0]);
146 		s->nodes[0] = s->nodes[1];
147 		s->count--;
148 	}
149 
150 	r = bn_read_lock(s->info, new_child, s->nodes + s->count);
151 	if (!r)
152 		s->count++;
153 
154 	return r;
155 }
156 
ro_pop(struct ro_spine * s)157 void ro_pop(struct ro_spine *s)
158 {
159 	BUG_ON(!s->count);
160 	--s->count;
161 	unlock_block(s->info, s->nodes[s->count]);
162 }
163 
ro_node(struct ro_spine * s)164 struct btree_node *ro_node(struct ro_spine *s)
165 {
166 	struct dm_block *block;
167 
168 	BUG_ON(!s->count);
169 	block = s->nodes[s->count - 1];
170 
171 	return dm_block_data(block);
172 }
173 
174 /*----------------------------------------------------------------*/
175 
init_shadow_spine(struct shadow_spine * s,struct dm_btree_info * info)176 void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info)
177 {
178 	s->info = info;
179 	s->count = 0;
180 }
181 
exit_shadow_spine(struct shadow_spine * s)182 void exit_shadow_spine(struct shadow_spine *s)
183 {
184 	int i;
185 
186 	for (i = 0; i < s->count; i++)
187 		unlock_block(s->info, s->nodes[i]);
188 }
189 
shadow_step(struct shadow_spine * s,dm_block_t b,struct dm_btree_value_type * vt)190 int shadow_step(struct shadow_spine *s, dm_block_t b,
191 		struct dm_btree_value_type *vt)
192 {
193 	int r;
194 
195 	if (s->count == 2) {
196 		unlock_block(s->info, s->nodes[0]);
197 		s->nodes[0] = s->nodes[1];
198 		s->count--;
199 	}
200 
201 	r = bn_shadow(s->info, b, vt, s->nodes + s->count);
202 	if (!r) {
203 		if (!s->count)
204 			s->root = dm_block_location(s->nodes[0]);
205 
206 		s->count++;
207 	}
208 
209 	return r;
210 }
211 
shadow_current(struct shadow_spine * s)212 struct dm_block *shadow_current(struct shadow_spine *s)
213 {
214 	BUG_ON(!s->count);
215 
216 	return s->nodes[s->count - 1];
217 }
218 
shadow_parent(struct shadow_spine * s)219 struct dm_block *shadow_parent(struct shadow_spine *s)
220 {
221 	BUG_ON(s->count != 2);
222 
223 	return s->count == 2 ? s->nodes[0] : NULL;
224 }
225 
shadow_has_parent(struct shadow_spine * s)226 int shadow_has_parent(struct shadow_spine *s)
227 {
228 	return s->count >= 2;
229 }
230 
shadow_root(struct shadow_spine * s)231 dm_block_t shadow_root(struct shadow_spine *s)
232 {
233 	return s->root;
234 }
235 
le64_inc(void * context,const void * value_le,unsigned int count)236 static void le64_inc(void *context, const void *value_le, unsigned int count)
237 {
238 	dm_tm_with_runs(context, value_le, count, dm_tm_inc_range);
239 }
240 
le64_dec(void * context,const void * value_le,unsigned int count)241 static void le64_dec(void *context, const void *value_le, unsigned int count)
242 {
243 	dm_tm_with_runs(context, value_le, count, dm_tm_dec_range);
244 }
245 
le64_equal(void * context,const void * value1_le,const void * value2_le)246 static int le64_equal(void *context, const void *value1_le, const void *value2_le)
247 {
248 	__le64 v1_le, v2_le;
249 
250 	memcpy(&v1_le, value1_le, sizeof(v1_le));
251 	memcpy(&v2_le, value2_le, sizeof(v2_le));
252 	return v1_le == v2_le;
253 }
254 
init_le64_type(struct dm_transaction_manager * tm,struct dm_btree_value_type * vt)255 void init_le64_type(struct dm_transaction_manager *tm,
256 		    struct dm_btree_value_type *vt)
257 {
258 	vt->context = tm;
259 	vt->size = sizeof(__le64);
260 	vt->inc = le64_inc;
261 	vt->dec = le64_dec;
262 	vt->equal = le64_equal;
263 }
264