xref: /linux/drivers/net/wireguard/selftest/allowedips.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  *
5  * This contains some basic static unit tests for the allowedips data structure.
6  * It also has two additional modes that are disabled and meant to be used by
7  * folks directly playing with this file. If you define the macro
8  * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in
9  * memory, it will be printed out as KERN_DEBUG in a format that can be passed
10  * to graphviz (the dot command) to visualize it. If you define the macro
11  * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of
12  * randomized tests done against a trivial implementation, which may take
13  * upwards of a half-hour to complete. There's no set of users who should be
14  * enabling these, and the only developers that should go anywhere near these
15  * nobs are the ones who are reading this comment.
16  */
17 
18 #ifdef DEBUG
19 
20 #include <linux/siphash.h>
21 
22 static __init void print_node(struct allowedips_node *node, u8 bits)
23 {
24 	char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
25 	char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
26 	u8 ip1[16], ip2[16], cidr1, cidr2;
27 	char *style = "dotted";
28 	u32 color = 0;
29 
30 	if (node == NULL)
31 		return;
32 	if (bits == 32) {
33 		fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
34 		fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
35 	} else if (bits == 128) {
36 		fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
37 		fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
38 	}
39 	if (node->peer) {
40 		hsiphash_key_t key = { { 0 } };
41 
42 		memcpy(&key, &node->peer, sizeof(node->peer));
43 		color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 |
44 			hsiphash_1u32(0xbabecafe, &key) % 200 << 8 |
45 			hsiphash_1u32(0xabad1dea, &key) % 200;
46 		style = "bold";
47 	}
48 	wg_allowedips_read_node(node, ip1, &cidr1);
49 	printk(fmt_declaration, ip1, cidr1, style, color);
50 	if (node->bit[0]) {
51 		wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
52 		printk(fmt_connection, ip1, cidr1, ip2, cidr2);
53 	}
54 	if (node->bit[1]) {
55 		wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
56 		printk(fmt_connection, ip1, cidr1, ip2, cidr2);
57 	}
58 	if (node->bit[0])
59 		print_node(rcu_dereference_raw(node->bit[0]), bits);
60 	if (node->bit[1])
61 		print_node(rcu_dereference_raw(node->bit[1]), bits);
62 }
63 
64 static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
65 {
66 	printk(KERN_DEBUG "digraph trie {\n");
67 	print_node(rcu_dereference_raw(top), bits);
68 	printk(KERN_DEBUG "}\n");
69 }
70 
71 enum {
72 	NUM_PEERS = 2000,
73 	NUM_RAND_ROUTES = 400,
74 	NUM_MUTATED_ROUTES = 100,
75 	NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30
76 };
77 
78 struct horrible_allowedips {
79 	struct hlist_head head;
80 };
81 
82 struct horrible_allowedips_node {
83 	struct hlist_node table;
84 	union nf_inet_addr ip;
85 	union nf_inet_addr mask;
86 	u8 ip_version;
87 	void *value;
88 };
89 
90 static __init void horrible_allowedips_init(struct horrible_allowedips *table)
91 {
92 	INIT_HLIST_HEAD(&table->head);
93 }
94 
95 static __init void horrible_allowedips_free(struct horrible_allowedips *table)
96 {
97 	struct horrible_allowedips_node *node;
98 	struct hlist_node *h;
99 
100 	hlist_for_each_entry_safe(node, h, &table->head, table) {
101 		hlist_del(&node->table);
102 		kfree(node);
103 	}
104 }
105 
106 static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
107 {
108 	union nf_inet_addr mask;
109 
110 	memset(&mask, 0, sizeof(mask));
111 	memset(&mask.all, 0xff, cidr / 8);
112 	if (cidr % 32)
113 		mask.all[cidr / 32] = (__force u32)htonl(
114 			(0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
115 	return mask;
116 }
117 
118 static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet)
119 {
120 	return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) +
121 	       hweight32(subnet.all[2]) + hweight32(subnet.all[3]);
122 }
123 
124 static __init inline void
125 horrible_mask_self(struct horrible_allowedips_node *node)
126 {
127 	if (node->ip_version == 4) {
128 		node->ip.ip &= node->mask.ip;
129 	} else if (node->ip_version == 6) {
130 		node->ip.ip6[0] &= node->mask.ip6[0];
131 		node->ip.ip6[1] &= node->mask.ip6[1];
132 		node->ip.ip6[2] &= node->mask.ip6[2];
133 		node->ip.ip6[3] &= node->mask.ip6[3];
134 	}
135 }
136 
137 static __init inline bool
138 horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
139 {
140 	return (ip->s_addr & node->mask.ip) == node->ip.ip;
141 }
142 
143 static __init inline bool
144 horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
145 {
146 	return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
147 	       (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
148 	       (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
149 	       (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
150 }
151 
152 static __init void
153 horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
154 {
155 	struct horrible_allowedips_node *other = NULL, *where = NULL;
156 	u8 my_cidr = horrible_mask_to_cidr(node->mask);
157 
158 	hlist_for_each_entry(other, &table->head, table) {
159 		if (other->ip_version == node->ip_version &&
160 		    !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
161 		    !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
162 			other->value = node->value;
163 			kfree(node);
164 			return;
165 		}
166 	}
167 	hlist_for_each_entry(other, &table->head, table) {
168 		where = other;
169 		if (horrible_mask_to_cidr(other->mask) <= my_cidr)
170 			break;
171 	}
172 	if (!other && !where)
173 		hlist_add_head(&node->table, &table->head);
174 	else if (!other)
175 		hlist_add_behind(&node->table, &where->table);
176 	else
177 		hlist_add_before(&node->table, &where->table);
178 }
179 
180 static __init int
181 horrible_allowedips_insert_v4(struct horrible_allowedips *table,
182 			      struct in_addr *ip, u8 cidr, void *value)
183 {
184 	struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
185 
186 	if (unlikely(!node))
187 		return -ENOMEM;
188 	node->ip.in = *ip;
189 	node->mask = horrible_cidr_to_mask(cidr);
190 	node->ip_version = 4;
191 	node->value = value;
192 	horrible_mask_self(node);
193 	horrible_insert_ordered(table, node);
194 	return 0;
195 }
196 
197 static __init int
198 horrible_allowedips_insert_v6(struct horrible_allowedips *table,
199 			      struct in6_addr *ip, u8 cidr, void *value)
200 {
201 	struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
202 
203 	if (unlikely(!node))
204 		return -ENOMEM;
205 	node->ip.in6 = *ip;
206 	node->mask = horrible_cidr_to_mask(cidr);
207 	node->ip_version = 6;
208 	node->value = value;
209 	horrible_mask_self(node);
210 	horrible_insert_ordered(table, node);
211 	return 0;
212 }
213 
214 static __init void *
215 horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
216 {
217 	struct horrible_allowedips_node *node;
218 
219 	hlist_for_each_entry(node, &table->head, table) {
220 		if (node->ip_version == 4 && horrible_match_v4(node, ip))
221 			return node->value;
222 	}
223 	return NULL;
224 }
225 
226 static __init void *
227 horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
228 {
229 	struct horrible_allowedips_node *node;
230 
231 	hlist_for_each_entry(node, &table->head, table) {
232 		if (node->ip_version == 6 && horrible_match_v6(node, ip))
233 			return node->value;
234 	}
235 	return NULL;
236 }
237 
238 
239 static __init void
240 horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
241 {
242 	struct horrible_allowedips_node *node;
243 	struct hlist_node *h;
244 
245 	hlist_for_each_entry_safe(node, h, &table->head, table) {
246 		if (node->value != value)
247 			continue;
248 		hlist_del(&node->table);
249 		kfree(node);
250 	}
251 
252 }
253 
254 static __init bool randomized_test(void)
255 {
256 	unsigned int i, j, k, mutate_amount, cidr;
257 	u8 ip[16], mutate_mask[16], mutated[16];
258 	struct wg_peer **peers, *peer;
259 	struct horrible_allowedips h;
260 	DEFINE_MUTEX(mutex);
261 	struct allowedips t;
262 	bool ret = false;
263 
264 	mutex_init(&mutex);
265 
266 	wg_allowedips_init(&t);
267 	horrible_allowedips_init(&h);
268 
269 	peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL);
270 	if (unlikely(!peers)) {
271 		pr_err("allowedips random self-test malloc: FAIL\n");
272 		goto free;
273 	}
274 	for (i = 0; i < NUM_PEERS; ++i) {
275 		peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL);
276 		if (unlikely(!peers[i])) {
277 			pr_err("allowedips random self-test malloc: FAIL\n");
278 			goto free;
279 		}
280 		kref_init(&peers[i]->refcount);
281 		INIT_LIST_HEAD(&peers[i]->allowedips_list);
282 	}
283 
284 	mutex_lock(&mutex);
285 
286 	for (i = 0; i < NUM_RAND_ROUTES; ++i) {
287 		get_random_bytes(ip, 4);
288 		cidr = get_random_u32_inclusive(1, 32);
289 		peer = peers[get_random_u32_below(NUM_PEERS)];
290 		if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
291 					    peer, &mutex) < 0) {
292 			pr_err("allowedips random self-test malloc: FAIL\n");
293 			goto free_locked;
294 		}
295 		if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip,
296 						  cidr, peer) < 0) {
297 			pr_err("allowedips random self-test malloc: FAIL\n");
298 			goto free_locked;
299 		}
300 		for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
301 			memcpy(mutated, ip, 4);
302 			get_random_bytes(mutate_mask, 4);
303 			mutate_amount = get_random_u32_below(32);
304 			for (k = 0; k < mutate_amount / 8; ++k)
305 				mutate_mask[k] = 0xff;
306 			mutate_mask[k] = 0xff
307 					 << ((8 - (mutate_amount % 8)) % 8);
308 			for (; k < 4; ++k)
309 				mutate_mask[k] = 0;
310 			for (k = 0; k < 4; ++k)
311 				mutated[k] = (mutated[k] & mutate_mask[k]) |
312 					     (~mutate_mask[k] &
313 					      get_random_u8());
314 			cidr = get_random_u32_inclusive(1, 32);
315 			peer = peers[get_random_u32_below(NUM_PEERS)];
316 			if (wg_allowedips_insert_v4(&t,
317 						    (struct in_addr *)mutated,
318 						    cidr, peer, &mutex) < 0) {
319 				pr_err("allowedips random self-test malloc: FAIL\n");
320 				goto free_locked;
321 			}
322 			if (horrible_allowedips_insert_v4(&h,
323 				(struct in_addr *)mutated, cidr, peer)) {
324 				pr_err("allowedips random self-test malloc: FAIL\n");
325 				goto free_locked;
326 			}
327 		}
328 	}
329 
330 	for (i = 0; i < NUM_RAND_ROUTES; ++i) {
331 		get_random_bytes(ip, 16);
332 		cidr = get_random_u32_inclusive(1, 128);
333 		peer = peers[get_random_u32_below(NUM_PEERS)];
334 		if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
335 					    peer, &mutex) < 0) {
336 			pr_err("allowedips random self-test malloc: FAIL\n");
337 			goto free_locked;
338 		}
339 		if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip,
340 						  cidr, peer) < 0) {
341 			pr_err("allowedips random self-test malloc: FAIL\n");
342 			goto free_locked;
343 		}
344 		for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
345 			memcpy(mutated, ip, 16);
346 			get_random_bytes(mutate_mask, 16);
347 			mutate_amount = get_random_u32_below(128);
348 			for (k = 0; k < mutate_amount / 8; ++k)
349 				mutate_mask[k] = 0xff;
350 			mutate_mask[k] = 0xff
351 					 << ((8 - (mutate_amount % 8)) % 8);
352 			for (; k < 4; ++k)
353 				mutate_mask[k] = 0;
354 			for (k = 0; k < 4; ++k)
355 				mutated[k] = (mutated[k] & mutate_mask[k]) |
356 					     (~mutate_mask[k] &
357 					      get_random_u8());
358 			cidr = get_random_u32_inclusive(1, 128);
359 			peer = peers[get_random_u32_below(NUM_PEERS)];
360 			if (wg_allowedips_insert_v6(&t,
361 						    (struct in6_addr *)mutated,
362 						    cidr, peer, &mutex) < 0) {
363 				pr_err("allowedips random self-test malloc: FAIL\n");
364 				goto free_locked;
365 			}
366 			if (horrible_allowedips_insert_v6(
367 				    &h, (struct in6_addr *)mutated, cidr,
368 				    peer)) {
369 				pr_err("allowedips random self-test malloc: FAIL\n");
370 				goto free_locked;
371 			}
372 		}
373 	}
374 
375 	mutex_unlock(&mutex);
376 
377 	if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
378 		print_tree(t.root4, 32);
379 		print_tree(t.root6, 128);
380 	}
381 
382 	for (j = 0;; ++j) {
383 		for (i = 0; i < NUM_QUERIES; ++i) {
384 			get_random_bytes(ip, 4);
385 			if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
386 				pr_err("allowedips random v4 self-test: FAIL\n");
387 				goto free;
388 			}
389 			get_random_bytes(ip, 16);
390 			if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
391 				pr_err("allowedips random v6 self-test: FAIL\n");
392 				goto free;
393 			}
394 		}
395 		if (j >= NUM_PEERS)
396 			break;
397 		mutex_lock(&mutex);
398 		wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
399 		mutex_unlock(&mutex);
400 		horrible_allowedips_remove_by_value(&h, peers[j]);
401 	}
402 
403 	if (t.root4 || t.root6) {
404 		pr_err("allowedips random self-test removal: FAIL\n");
405 		goto free;
406 	}
407 
408 	ret = true;
409 
410 free:
411 	mutex_lock(&mutex);
412 free_locked:
413 	wg_allowedips_free(&t, &mutex);
414 	mutex_unlock(&mutex);
415 	horrible_allowedips_free(&h);
416 	if (peers) {
417 		for (i = 0; i < NUM_PEERS; ++i)
418 			kfree(peers[i]);
419 	}
420 	kfree(peers);
421 	return ret;
422 }
423 
424 static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d)
425 {
426 	static struct in_addr ip;
427 	u8 *split = (u8 *)&ip;
428 
429 	split[0] = a;
430 	split[1] = b;
431 	split[2] = c;
432 	split[3] = d;
433 	return &ip;
434 }
435 
436 static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d)
437 {
438 	static struct in6_addr ip;
439 	__be32 *split = (__be32 *)&ip;
440 
441 	split[0] = cpu_to_be32(a);
442 	split[1] = cpu_to_be32(b);
443 	split[2] = cpu_to_be32(c);
444 	split[3] = cpu_to_be32(d);
445 	return &ip;
446 }
447 
448 static __init struct wg_peer *init_peer(void)
449 {
450 	struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL);
451 
452 	if (!peer)
453 		return NULL;
454 	kref_init(&peer->refcount);
455 	INIT_LIST_HEAD(&peer->allowedips_list);
456 	return peer;
457 }
458 
459 #define insert(version, mem, ipa, ipb, ipc, ipd, cidr)                       \
460 	wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
461 					cidr, mem, &mutex)
462 
463 #define maybe_fail() do {                                               \
464 		++i;                                                    \
465 		if (!_s) {                                              \
466 			pr_info("allowedips self-test %zu: FAIL\n", i); \
467 			success = false;                                \
468 		}                                                       \
469 	} while (0)
470 
471 #define test(version, mem, ipa, ipb, ipc, ipd) do {                          \
472 		bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
473 				 ip##version(ipa, ipb, ipc, ipd)) == (mem);  \
474 		maybe_fail();                                                \
475 	} while (0)
476 
477 #define test_negative(version, mem, ipa, ipb, ipc, ipd) do {                 \
478 		bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
479 				 ip##version(ipa, ipb, ipc, ipd)) != (mem);  \
480 		maybe_fail();                                                \
481 	} while (0)
482 
483 #define test_boolean(cond) do {   \
484 		bool _s = (cond); \
485 		maybe_fail();     \
486 	} while (0)
487 
488 bool __init wg_allowedips_selftest(void)
489 {
490 	bool found_a = false, found_b = false, found_c = false, found_d = false,
491 	     found_e = false, found_other = false;
492 	struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(),
493 		       *d = init_peer(), *e = init_peer(), *f = init_peer(),
494 		       *g = init_peer(), *h = init_peer();
495 	struct allowedips_node *iter_node;
496 	bool success = false;
497 	struct allowedips t;
498 	DEFINE_MUTEX(mutex);
499 	struct in6_addr ip;
500 	size_t i = 0, count = 0;
501 	__be64 part;
502 
503 	mutex_init(&mutex);
504 	mutex_lock(&mutex);
505 	wg_allowedips_init(&t);
506 
507 	if (!a || !b || !c || !d || !e || !f || !g || !h) {
508 		pr_err("allowedips self-test malloc: FAIL\n");
509 		goto free;
510 	}
511 
512 	insert(4, a, 192, 168, 4, 0, 24);
513 	insert(4, b, 192, 168, 4, 4, 32);
514 	insert(4, c, 192, 168, 0, 0, 16);
515 	insert(4, d, 192, 95, 5, 64, 27);
516 	/* replaces previous entry, and maskself is required */
517 	insert(4, c, 192, 95, 5, 65, 27);
518 	insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
519 	insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64);
520 	insert(4, e, 0, 0, 0, 0, 0);
521 	insert(6, e, 0, 0, 0, 0, 0);
522 	/* replaces previous entry */
523 	insert(6, f, 0, 0, 0, 0, 0);
524 	insert(6, g, 0x24046800, 0, 0, 0, 32);
525 	/* maskself is required */
526 	insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64);
527 	insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128);
528 	insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
529 	insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
530 	insert(4, g, 64, 15, 112, 0, 20);
531 	/* maskself is required */
532 	insert(4, h, 64, 15, 123, 211, 25);
533 	insert(4, a, 10, 0, 0, 0, 25);
534 	insert(4, b, 10, 0, 0, 128, 25);
535 	insert(4, a, 10, 1, 0, 0, 30);
536 	insert(4, b, 10, 1, 0, 4, 30);
537 	insert(4, c, 10, 1, 0, 8, 29);
538 	insert(4, d, 10, 1, 0, 16, 29);
539 
540 	if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
541 		print_tree(t.root4, 32);
542 		print_tree(t.root6, 128);
543 	}
544 
545 	success = true;
546 
547 	test(4, a, 192, 168, 4, 20);
548 	test(4, a, 192, 168, 4, 0);
549 	test(4, b, 192, 168, 4, 4);
550 	test(4, c, 192, 168, 200, 182);
551 	test(4, c, 192, 95, 5, 68);
552 	test(4, e, 192, 95, 5, 96);
553 	test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543);
554 	test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee);
555 	test(6, f, 0x26075300, 0x60006b01, 0, 0);
556 	test(6, g, 0x24046800, 0x40040806, 0, 0x1006);
557 	test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678);
558 	test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678);
559 	test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678);
560 	test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678);
561 	test(6, h, 0x24046800, 0x40040800, 0, 0);
562 	test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010);
563 	test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef);
564 	test(4, g, 64, 15, 116, 26);
565 	test(4, g, 64, 15, 127, 3);
566 	test(4, g, 64, 15, 123, 1);
567 	test(4, h, 64, 15, 123, 128);
568 	test(4, h, 64, 15, 123, 129);
569 	test(4, a, 10, 0, 0, 52);
570 	test(4, b, 10, 0, 0, 220);
571 	test(4, a, 10, 1, 0, 2);
572 	test(4, b, 10, 1, 0, 6);
573 	test(4, c, 10, 1, 0, 10);
574 	test(4, d, 10, 1, 0, 20);
575 
576 	insert(4, a, 1, 0, 0, 0, 32);
577 	insert(4, a, 64, 0, 0, 0, 32);
578 	insert(4, a, 128, 0, 0, 0, 32);
579 	insert(4, a, 192, 0, 0, 0, 32);
580 	insert(4, a, 255, 0, 0, 0, 32);
581 	wg_allowedips_remove_by_peer(&t, a, &mutex);
582 	test_negative(4, a, 1, 0, 0, 0);
583 	test_negative(4, a, 64, 0, 0, 0);
584 	test_negative(4, a, 128, 0, 0, 0);
585 	test_negative(4, a, 192, 0, 0, 0);
586 	test_negative(4, a, 255, 0, 0, 0);
587 
588 	wg_allowedips_free(&t, &mutex);
589 	wg_allowedips_init(&t);
590 	insert(4, a, 192, 168, 0, 0, 16);
591 	insert(4, a, 192, 168, 0, 0, 24);
592 	wg_allowedips_remove_by_peer(&t, a, &mutex);
593 	test_negative(4, a, 192, 168, 0, 1);
594 
595 	/* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node
596 	 * if something goes wrong.
597 	 */
598 	for (i = 0; i < 64; ++i) {
599 		part = cpu_to_be64(~0LLU << i);
600 		memset(&ip, 0xff, 8);
601 		memcpy((u8 *)&ip + 8, &part, 8);
602 		wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
603 		memcpy(&ip, &part, 8);
604 		memset((u8 *)&ip + 8, 0, 8);
605 		wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
606 	}
607 	memset(&ip, 0, 16);
608 	wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
609 	wg_allowedips_free(&t, &mutex);
610 
611 	wg_allowedips_init(&t);
612 	insert(4, a, 192, 95, 5, 93, 27);
613 	insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
614 	insert(4, a, 10, 1, 0, 20, 29);
615 	insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83);
616 	insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21);
617 	list_for_each_entry(iter_node, &a->allowedips_list, peer_list) {
618 		u8 cidr, ip[16] __aligned(__alignof(u64));
619 		int family = wg_allowedips_read_node(iter_node, ip, &cidr);
620 
621 		count++;
622 
623 		if (cidr == 27 && family == AF_INET &&
624 		    !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr)))
625 			found_a = true;
626 		else if (cidr == 128 && family == AF_INET6 &&
627 			 !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543),
628 				 sizeof(struct in6_addr)))
629 			found_b = true;
630 		else if (cidr == 29 && family == AF_INET &&
631 			 !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr)))
632 			found_c = true;
633 		else if (cidr == 83 && family == AF_INET6 &&
634 			 !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0),
635 				 sizeof(struct in6_addr)))
636 			found_d = true;
637 		else if (cidr == 21 && family == AF_INET6 &&
638 			 !memcmp(ip, ip6(0x26075000, 0, 0, 0),
639 				 sizeof(struct in6_addr)))
640 			found_e = true;
641 		else
642 			found_other = true;
643 	}
644 	test_boolean(count == 5);
645 	test_boolean(found_a);
646 	test_boolean(found_b);
647 	test_boolean(found_c);
648 	test_boolean(found_d);
649 	test_boolean(found_e);
650 	test_boolean(!found_other);
651 
652 	if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success)
653 		success = randomized_test();
654 
655 	if (success)
656 		pr_info("allowedips self-tests: pass\n");
657 
658 free:
659 	wg_allowedips_free(&t, &mutex);
660 	kfree(a);
661 	kfree(b);
662 	kfree(c);
663 	kfree(d);
664 	kfree(e);
665 	kfree(f);
666 	kfree(g);
667 	kfree(h);
668 	mutex_unlock(&mutex);
669 
670 	return success;
671 }
672 
673 #undef test_negative
674 #undef test
675 #undef remove
676 #undef insert
677 #undef init_peer
678 
679 #endif
680