xref: /linux/lib/test_rhashtable.c (revision a36e9f5cfe9eb3a1dce8769c7058251c42705357)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resizable, Scalable, Concurrent Hash Table
4  *
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  */
8 
9 /**************************************************************************
10  * Self Test
11  **************************************************************************/
12 
13 #include <linux/init.h>
14 #include <linux/jhash.h>
15 #include <linux/kernel.h>
16 #include <linux/kthread.h>
17 #include <linux/module.h>
18 #include <linux/rcupdate.h>
19 #include <linux/rcupdate_wait.h>
20 #include <linux/rhashtable.h>
21 #include <linux/slab.h>
22 #include <linux/sched.h>
23 #include <linux/random.h>
24 #include <linux/vmalloc.h>
25 #include <linux/wait.h>
26 
27 #define MAX_ENTRIES	1000000
28 #define TEST_INSERT_FAIL INT_MAX
29 
30 static int parm_entries = 50000;
31 module_param(parm_entries, int, 0);
32 MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)");
33 
34 static int runs = 4;
35 module_param(runs, int, 0);
36 MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
37 
38 static int max_size = 0;
39 module_param(max_size, int, 0);
40 MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)");
41 
42 static bool shrinking = false;
43 module_param(shrinking, bool, 0);
44 MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)");
45 
46 static int size = 8;
47 module_param(size, int, 0);
48 MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
49 
50 static int tcount = 10;
51 module_param(tcount, int, 0);
52 MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
53 
54 static bool enomem_retry = false;
55 module_param(enomem_retry, bool, 0);
56 MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
57 
58 struct test_obj_val {
59 	int	id;
60 	int	tid;
61 };
62 
63 struct test_obj {
64 	struct test_obj_val	value;
65 	struct rhash_head	node;
66 };
67 
68 struct test_obj_rhl {
69 	struct test_obj_val	value;
70 	struct rhlist_head	list_node;
71 };
72 
73 struct thread_data {
74 	unsigned int entries;
75 	int id;
76 	struct task_struct *task;
77 	struct test_obj *objs;
78 };
79 
80 static u32 my_hashfn(const void *data, u32 len, u32 seed)
81 {
82 	const struct test_obj_rhl *obj = data;
83 
84 	return (obj->value.id % 10);
85 }
86 
87 static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
88 {
89 	const struct test_obj_rhl *test_obj = obj;
90 	const struct test_obj_val *val = arg->key;
91 
92 	return test_obj->value.id - val->id;
93 }
94 
95 static struct rhashtable_params test_rht_params = {
96 	.head_offset = offsetof(struct test_obj, node),
97 	.key_offset = offsetof(struct test_obj, value),
98 	.key_len = sizeof(struct test_obj_val),
99 	.hashfn = jhash,
100 };
101 
102 static struct rhashtable_params test_rht_params_dup = {
103 	.head_offset = offsetof(struct test_obj_rhl, list_node),
104 	.key_offset = offsetof(struct test_obj_rhl, value),
105 	.key_len = sizeof(struct test_obj_val),
106 	.hashfn = jhash,
107 	.obj_hashfn = my_hashfn,
108 	.obj_cmpfn = my_cmpfn,
109 	.nelem_hint = 128,
110 	.automatic_shrinking = false,
111 };
112 
113 static atomic_t startup_count;
114 static DECLARE_WAIT_QUEUE_HEAD(startup_wait);
115 
116 static int insert_retry(struct rhashtable *ht, struct test_obj *obj,
117                         const struct rhashtable_params params)
118 {
119 	int err, retries = -1, enomem_retries = 0;
120 
121 	do {
122 		retries++;
123 		cond_resched();
124 		err = rhashtable_insert_fast(ht, &obj->node, params);
125 		if (err == -ENOMEM && enomem_retry) {
126 			enomem_retries++;
127 			err = -EBUSY;
128 		}
129 	} while (err == -EBUSY);
130 
131 	if (enomem_retries)
132 		pr_info(" %u insertions retried after -ENOMEM\n",
133 			enomem_retries);
134 
135 	return err ? : retries;
136 }
137 
138 static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array,
139 				  unsigned int entries)
140 {
141 	unsigned int i;
142 
143 	for (i = 0; i < entries; i++) {
144 		struct test_obj *obj;
145 		bool expected = !(i % 2);
146 		struct test_obj_val key = {
147 			.id = i,
148 		};
149 
150 		if (array[i / 2].value.id == TEST_INSERT_FAIL)
151 			expected = false;
152 
153 		obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
154 
155 		if (expected && !obj) {
156 			pr_warn("Test failed: Could not find key %u\n", key.id);
157 			return -ENOENT;
158 		} else if (!expected && obj) {
159 			pr_warn("Test failed: Unexpected entry found for key %u\n",
160 				key.id);
161 			return -EEXIST;
162 		} else if (expected && obj) {
163 			if (obj->value.id != i) {
164 				pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
165 					obj->value.id, i);
166 				return -EINVAL;
167 			}
168 		}
169 
170 		cond_resched_rcu();
171 	}
172 
173 	return 0;
174 }
175 
176 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
177 {
178 	unsigned int total = 0, chain_len = 0;
179 	struct rhashtable_iter hti;
180 	struct rhash_head *pos;
181 
182 	rhashtable_walk_enter(ht, &hti);
183 	rhashtable_walk_start(&hti);
184 
185 	while ((pos = rhashtable_walk_next(&hti))) {
186 		if (PTR_ERR(pos) == -EAGAIN) {
187 			pr_info("Info: encountered resize\n");
188 			chain_len++;
189 			continue;
190 		} else if (IS_ERR(pos)) {
191 			pr_warn("Test failed: rhashtable_walk_next() error: %ld\n",
192 				PTR_ERR(pos));
193 			break;
194 		}
195 
196 		total++;
197 	}
198 
199 	rhashtable_walk_stop(&hti);
200 	rhashtable_walk_exit(&hti);
201 
202 	pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n",
203 		total, atomic_read(&ht->nelems), entries, chain_len);
204 
205 	if (total != atomic_read(&ht->nelems) || total != entries)
206 		pr_warn("Test failed: Total count mismatch ^^^");
207 }
208 
209 static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array,
210 				  unsigned int entries)
211 {
212 	struct test_obj *obj;
213 	int err;
214 	unsigned int i, insert_retries = 0;
215 	s64 start, end;
216 
217 	/*
218 	 * Insertion Test:
219 	 * Insert entries into table with all keys even numbers
220 	 */
221 	pr_info("  Adding %d keys\n", entries);
222 	start = ktime_get_ns();
223 	for (i = 0; i < entries; i++) {
224 		struct test_obj *obj = &array[i];
225 
226 		obj->value.id = i * 2;
227 		err = insert_retry(ht, obj, test_rht_params);
228 		if (err > 0)
229 			insert_retries += err;
230 		else if (err)
231 			return err;
232 	}
233 
234 	if (insert_retries)
235 		pr_info("  %u insertions retried due to memory pressure\n",
236 			insert_retries);
237 
238 	test_bucket_stats(ht, entries);
239 	rcu_read_lock();
240 	test_rht_lookup(ht, array, entries);
241 	rcu_read_unlock();
242 
243 	test_bucket_stats(ht, entries);
244 
245 	pr_info("  Deleting %d keys\n", entries);
246 	for (i = 0; i < entries; i++) {
247 		struct test_obj_val key = {
248 			.id = i * 2,
249 		};
250 
251 		if (array[i].value.id != TEST_INSERT_FAIL) {
252 			obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
253 			BUG_ON(!obj);
254 
255 			rhashtable_remove_fast(ht, &obj->node, test_rht_params);
256 		}
257 
258 		cond_resched();
259 	}
260 
261 	end = ktime_get_ns();
262 	pr_info("  Duration of test: %lld ns\n", end - start);
263 
264 	return end - start;
265 }
266 
267 static struct rhashtable ht;
268 static struct rhltable rhlt;
269 
270 static int __init test_rhltable(unsigned int entries)
271 {
272 	struct test_obj_rhl *rhl_test_objects;
273 	unsigned long *obj_in_table;
274 	unsigned int i, j, k;
275 	int ret, err;
276 
277 	if (entries == 0)
278 		entries = 1;
279 
280 	rhl_test_objects = vzalloc(array_size(entries,
281 					      sizeof(*rhl_test_objects)));
282 	if (!rhl_test_objects)
283 		return -ENOMEM;
284 
285 	ret = -ENOMEM;
286 	obj_in_table = vzalloc(array_size(sizeof(unsigned long),
287 					  BITS_TO_LONGS(entries)));
288 	if (!obj_in_table)
289 		goto out_free;
290 
291 	err = rhltable_init(&rhlt, &test_rht_params);
292 	if (WARN_ON(err))
293 		goto out_free;
294 
295 	k = get_random_u32();
296 	ret = 0;
297 	for (i = 0; i < entries; i++) {
298 		rhl_test_objects[i].value.id = k;
299 		err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
300 				      test_rht_params);
301 		if (WARN(err, "error %d on element %d\n", err, i))
302 			break;
303 		if (err == 0)
304 			set_bit(i, obj_in_table);
305 	}
306 
307 	if (err)
308 		ret = err;
309 
310 	pr_info("test %d add/delete pairs into rhlist\n", entries);
311 	for (i = 0; i < entries; i++) {
312 		struct rhlist_head *h, *pos;
313 		struct test_obj_rhl *obj;
314 		struct test_obj_val key = {
315 			.id = k,
316 		};
317 		bool found;
318 
319 		rcu_read_lock();
320 		h = rhltable_lookup(&rhlt, &key, test_rht_params);
321 		if (WARN(!h, "key not found during iteration %d of %d", i, entries)) {
322 			rcu_read_unlock();
323 			break;
324 		}
325 
326 		if (i) {
327 			j = i - 1;
328 			rhl_for_each_entry_rcu(obj, pos, h, list_node) {
329 				if (WARN(pos == &rhl_test_objects[j].list_node, "old element found, should be gone"))
330 					break;
331 			}
332 		}
333 
334 		cond_resched_rcu();
335 
336 		found = false;
337 
338 		rhl_for_each_entry_rcu(obj, pos, h, list_node) {
339 			if (pos == &rhl_test_objects[i].list_node) {
340 				found = true;
341 				break;
342 			}
343 		}
344 
345 		rcu_read_unlock();
346 
347 		if (WARN(!found, "element %d not found", i))
348 			break;
349 
350 		err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
351 		WARN(err, "rhltable_remove: err %d for iteration %d\n", err, i);
352 		if (err == 0)
353 			clear_bit(i, obj_in_table);
354 	}
355 
356 	if (ret == 0 && err)
357 		ret = err;
358 
359 	for (i = 0; i < entries; i++) {
360 		WARN(test_bit(i, obj_in_table), "elem %d allegedly still present", i);
361 
362 		err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
363 				      test_rht_params);
364 		if (WARN(err, "error %d on element %d\n", err, i))
365 			break;
366 		if (err == 0)
367 			set_bit(i, obj_in_table);
368 	}
369 
370 	pr_info("test %d random rhlist add/delete operations\n", entries);
371 	for (j = 0; j < entries; j++) {
372 		u32 i = get_random_u32_below(entries);
373 		u32 prand = get_random_u32_below(4);
374 
375 		cond_resched();
376 
377 		err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
378 		if (test_bit(i, obj_in_table)) {
379 			clear_bit(i, obj_in_table);
380 			if (WARN(err, "cannot remove element at slot %d", i))
381 				continue;
382 		} else {
383 			if (WARN(err != -ENOENT, "removed non-existent element %d, error %d not %d",
384 			     i, err, -ENOENT))
385 				continue;
386 		}
387 
388 		if (prand & 1) {
389 			err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
390 			if (err == 0) {
391 				if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
392 					continue;
393 			} else {
394 				if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
395 					continue;
396 			}
397 		}
398 
399 		if (prand & 2) {
400 			i = get_random_u32_below(entries);
401 			if (test_bit(i, obj_in_table)) {
402 				err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
403 				WARN(err, "cannot remove element at slot %d", i);
404 				if (err == 0)
405 					clear_bit(i, obj_in_table);
406 			} else {
407 				err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
408 				WARN(err, "failed to insert object %d", i);
409 				if (err == 0)
410 					set_bit(i, obj_in_table);
411 			}
412 		}
413 	}
414 
415 	for (i = 0; i < entries; i++) {
416 		cond_resched();
417 		err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
418 		if (test_bit(i, obj_in_table)) {
419 			if (WARN(err, "cannot remove element at slot %d", i))
420 				continue;
421 		} else {
422 			if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d",
423 				 err, -ENOENT))
424 				continue;
425 		}
426 	}
427 
428 	rhltable_destroy(&rhlt);
429 out_free:
430 	vfree(rhl_test_objects);
431 	vfree(obj_in_table);
432 	return ret;
433 }
434 
435 static int __init test_rhashtable_max(struct test_obj *array,
436 				      unsigned int entries)
437 {
438 	unsigned int i;
439 	int err;
440 
441 	test_rht_params.max_size = roundup_pow_of_two(entries / 8);
442 	err = rhashtable_init(&ht, &test_rht_params);
443 	if (err)
444 		return err;
445 
446 	for (i = 0; i < ht.max_elems; i++) {
447 		struct test_obj *obj = &array[i];
448 
449 		obj->value.id = i * 2;
450 		err = insert_retry(&ht, obj, test_rht_params);
451 		if (err < 0)
452 			return err;
453 	}
454 
455 	err = insert_retry(&ht, &array[ht.max_elems], test_rht_params);
456 	if (err == -E2BIG) {
457 		err = 0;
458 	} else {
459 		pr_info("insert element %u should have failed with %d, got %d\n",
460 				ht.max_elems, -E2BIG, err);
461 		if (err == 0)
462 			err = -1;
463 	}
464 
465 	rhashtable_destroy(&ht);
466 
467 	return err;
468 }
469 
470 static unsigned int __init print_ht(struct rhltable *rhlt)
471 {
472 	struct rhashtable *ht;
473 	const struct bucket_table *tbl;
474 	char buff[512] = "";
475 	int offset = 0;
476 	unsigned int i, cnt = 0;
477 
478 	ht = &rhlt->ht;
479 	/* Take the mutex to avoid RCU warning */
480 	mutex_lock(&ht->mutex);
481 	tbl = rht_dereference(ht->tbl, ht);
482 	for (i = 0; i < tbl->size; i++) {
483 		struct rhash_head *pos, *next;
484 		struct test_obj_rhl *p;
485 
486 		pos = rht_ptr_exclusive(tbl->buckets + i);
487 		next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
488 
489 		if (!rht_is_a_nulls(pos)) {
490 			offset += sprintf(buff + offset, "\nbucket[%d] -> ", i);
491 		}
492 
493 		while (!rht_is_a_nulls(pos)) {
494 			struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
495 			offset += sprintf(buff + offset, "[[");
496 			do {
497 				pos = &list->rhead;
498 				list = rht_dereference(list->next, ht);
499 				p = rht_obj(ht, pos);
500 
501 				offset += sprintf(buff + offset, " val %d (tid=%d)%s", p->value.id, p->value.tid,
502 					list? ", " : " ");
503 				cnt++;
504 			} while (list);
505 
506 			pos = next,
507 			next = !rht_is_a_nulls(pos) ?
508 				rht_dereference(pos->next, ht) : NULL;
509 
510 			offset += sprintf(buff + offset, "]]%s", !rht_is_a_nulls(pos) ? " -> " : "");
511 		}
512 	}
513 	printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
514 	mutex_unlock(&ht->mutex);
515 
516 	return cnt;
517 }
518 
519 static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
520 				  int cnt, bool slow)
521 {
522 	struct rhltable *rhlt;
523 	unsigned int i, ret;
524 	const char *key;
525 	int err = 0;
526 
527 	rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
528 	if (WARN_ON(!rhlt))
529 		return -EINVAL;
530 
531 	err = rhltable_init(rhlt, &test_rht_params_dup);
532 	if (WARN_ON(err)) {
533 		kfree(rhlt);
534 		return err;
535 	}
536 
537 	for (i = 0; i < cnt; i++) {
538 		rhl_test_objects[i].value.tid = i;
539 		key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
540 		key += test_rht_params_dup.key_offset;
541 
542 		if (slow) {
543 			err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
544 							     &rhl_test_objects[i].list_node.rhead));
545 			if (err == -EAGAIN)
546 				err = 0;
547 		} else
548 			err = rhltable_insert(rhlt,
549 					      &rhl_test_objects[i].list_node,
550 					      test_rht_params_dup);
551 		if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
552 			goto skip_print;
553 	}
554 
555 	ret = print_ht(rhlt);
556 	WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
557 
558 skip_print:
559 	rhltable_destroy(rhlt);
560 	kfree(rhlt);
561 
562 	return 0;
563 }
564 
565 static int __init test_insert_duplicates_run(void)
566 {
567 	struct test_obj_rhl rhl_test_objects[3] = {};
568 
569 	pr_info("test inserting duplicates\n");
570 
571 	/* two different values that map to same bucket */
572 	rhl_test_objects[0].value.id = 1;
573 	rhl_test_objects[1].value.id = 21;
574 
575 	/* and another duplicate with same as [0] value
576 	 * which will be second on the bucket list */
577 	rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
578 
579 	test_insert_dup(rhl_test_objects, 2, false);
580 	test_insert_dup(rhl_test_objects, 3, false);
581 	test_insert_dup(rhl_test_objects, 2, true);
582 	test_insert_dup(rhl_test_objects, 3, true);
583 
584 	return 0;
585 }
586 
587 static int thread_lookup_test(struct thread_data *tdata)
588 {
589 	unsigned int entries = tdata->entries;
590 	int i, err = 0;
591 
592 	for (i = 0; i < entries; i++) {
593 		struct test_obj *obj;
594 		struct test_obj_val key = {
595 			.id = i,
596 			.tid = tdata->id,
597 		};
598 
599 		obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
600 		if (obj && (tdata->objs[i].value.id == TEST_INSERT_FAIL)) {
601 			pr_err("  found unexpected object %d-%d\n", key.tid, key.id);
602 			err++;
603 		} else if (!obj && (tdata->objs[i].value.id != TEST_INSERT_FAIL)) {
604 			pr_err("  object %d-%d not found!\n", key.tid, key.id);
605 			err++;
606 		} else if (obj && memcmp(&obj->value, &key, sizeof(key))) {
607 			pr_err("  wrong object returned (got %d-%d, expected %d-%d)\n",
608 			       obj->value.tid, obj->value.id, key.tid, key.id);
609 			err++;
610 		}
611 
612 		cond_resched();
613 	}
614 	return err;
615 }
616 
617 static int threadfunc(void *data)
618 {
619 	int i, step, err = 0, insert_retries = 0;
620 	struct thread_data *tdata = data;
621 
622 	if (atomic_dec_and_test(&startup_count))
623 		wake_up(&startup_wait);
624 	if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) {
625 		pr_err("  thread[%d]: interrupted\n", tdata->id);
626 		goto out;
627 	}
628 
629 	for (i = 0; i < tdata->entries; i++) {
630 		tdata->objs[i].value.id = i;
631 		tdata->objs[i].value.tid = tdata->id;
632 		err = insert_retry(&ht, &tdata->objs[i], test_rht_params);
633 		if (err > 0) {
634 			insert_retries += err;
635 		} else if (err) {
636 			pr_err("  thread[%d]: rhashtable_insert_fast failed\n",
637 			       tdata->id);
638 			goto out;
639 		}
640 	}
641 	if (insert_retries)
642 		pr_info("  thread[%d]: %u insertions retried due to memory pressure\n",
643 			tdata->id, insert_retries);
644 
645 	err = thread_lookup_test(tdata);
646 	if (err) {
647 		pr_err("  thread[%d]: rhashtable_lookup_test failed\n",
648 		       tdata->id);
649 		goto out;
650 	}
651 
652 	for (step = 10; step > 0; step--) {
653 		for (i = 0; i < tdata->entries; i += step) {
654 			if (tdata->objs[i].value.id == TEST_INSERT_FAIL)
655 				continue;
656 			err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
657 			                             test_rht_params);
658 			if (err) {
659 				pr_err("  thread[%d]: rhashtable_remove_fast failed\n",
660 				       tdata->id);
661 				goto out;
662 			}
663 			tdata->objs[i].value.id = TEST_INSERT_FAIL;
664 
665 			cond_resched();
666 		}
667 		err = thread_lookup_test(tdata);
668 		if (err) {
669 			pr_err("  thread[%d]: rhashtable_lookup_test (2) failed\n",
670 			       tdata->id);
671 			goto out;
672 		}
673 	}
674 out:
675 	while (!kthread_should_stop()) {
676 		set_current_state(TASK_INTERRUPTIBLE);
677 		schedule();
678 	}
679 	return err;
680 }
681 
682 static int __init test_rht_init(void)
683 {
684 	unsigned int entries;
685 	int i, err, started_threads = 0, failed_threads = 0;
686 	u64 total_time = 0;
687 	struct thread_data *tdata;
688 	struct test_obj *objs;
689 
690 	if (parm_entries < 0)
691 		parm_entries = 1;
692 
693 	entries = min(parm_entries, MAX_ENTRIES);
694 
695 	test_rht_params.automatic_shrinking = shrinking;
696 	test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries);
697 	test_rht_params.nelem_hint = size;
698 
699 	objs = vzalloc(array_size(sizeof(struct test_obj),
700 				  test_rht_params.max_size + 1));
701 	if (!objs)
702 		return -ENOMEM;
703 
704 	pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
705 		size, max_size, shrinking);
706 
707 	for (i = 0; i < runs; i++) {
708 		s64 time;
709 
710 		pr_info("Test %02d:\n", i);
711 		memset(objs, 0, test_rht_params.max_size * sizeof(struct test_obj));
712 
713 		err = rhashtable_init(&ht, &test_rht_params);
714 		if (err < 0) {
715 			pr_warn("Test failed: Unable to initialize hashtable: %d\n",
716 				err);
717 			continue;
718 		}
719 
720 		time = test_rhashtable(&ht, objs, entries);
721 		rhashtable_destroy(&ht);
722 		if (time < 0) {
723 			vfree(objs);
724 			pr_warn("Test failed: return code %lld\n", time);
725 			return -EINVAL;
726 		}
727 
728 		total_time += time;
729 	}
730 
731 	pr_info("test if its possible to exceed max_size %d: %s\n",
732 			test_rht_params.max_size, test_rhashtable_max(objs, entries) == 0 ?
733 			"no, ok" : "YES, failed");
734 	vfree(objs);
735 
736 	do_div(total_time, runs);
737 	pr_info("Average test time: %llu\n", total_time);
738 
739 	test_insert_duplicates_run();
740 
741 	if (!tcount)
742 		return 0;
743 
744 	pr_info("Testing concurrent rhashtable access from %d threads\n",
745 	        tcount);
746 	atomic_set(&startup_count, tcount);
747 	tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
748 	if (!tdata)
749 		return -ENOMEM;
750 	objs  = vzalloc(array3_size(sizeof(struct test_obj), tcount, entries));
751 	if (!objs) {
752 		vfree(tdata);
753 		return -ENOMEM;
754 	}
755 
756 	test_rht_params.max_size = max_size ? :
757 	                           roundup_pow_of_two(tcount * entries);
758 	err = rhashtable_init(&ht, &test_rht_params);
759 	if (err < 0) {
760 		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
761 			err);
762 		vfree(tdata);
763 		vfree(objs);
764 		return -EINVAL;
765 	}
766 	for (i = 0; i < tcount; i++) {
767 		tdata[i].id = i;
768 		tdata[i].entries = entries;
769 		tdata[i].objs = objs + i * entries;
770 		tdata[i].task = kthread_run(threadfunc, &tdata[i],
771 		                            "rhashtable_thrad[%d]", i);
772 		if (IS_ERR(tdata[i].task)) {
773 			pr_err(" kthread_run failed for thread %d\n", i);
774 			atomic_dec(&startup_count);
775 		} else {
776 			started_threads++;
777 		}
778 	}
779 	if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0))
780 		pr_err("  wait_event interruptible failed\n");
781 	/* count is 0 now, set it to -1 and wake up all threads together */
782 	atomic_dec(&startup_count);
783 	wake_up_all(&startup_wait);
784 	for (i = 0; i < tcount; i++) {
785 		if (IS_ERR(tdata[i].task))
786 			continue;
787 		if ((err = kthread_stop(tdata[i].task))) {
788 			pr_warn("Test failed: thread %d returned: %d\n",
789 			        i, err);
790 			failed_threads++;
791 		}
792 	}
793 	rhashtable_destroy(&ht);
794 	vfree(tdata);
795 	vfree(objs);
796 
797 	/*
798 	 * rhltable_remove is very expensive, default values can cause test
799 	 * to run for 2 minutes or more,  use a smaller number instead.
800 	 */
801 	err = test_rhltable(entries / 16);
802 	pr_info("Started %d threads, %d failed, rhltable test returns %d\n",
803 	        started_threads, failed_threads, err);
804 	return 0;
805 }
806 
807 static void __exit test_rht_exit(void)
808 {
809 }
810 
811 module_init(test_rht_init);
812 module_exit(test_rht_exit);
813 
814 MODULE_DESCRIPTION("Resizable, Scalable, Concurrent Hash Table test module");
815 MODULE_LICENSE("GPL v2");
816