xref: /linux/net/netfilter/nft_set_hash.c (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
4  *
5  * Development of this code funded by Astaro AG (http://www.astaro.com/)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/log2.h>
13 #include <linux/jhash.h>
14 #include <linux/netlink.h>
15 #include <linux/workqueue.h>
16 #include <linux/rhashtable.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables_core.h>
20 
21 /* We target a hash table size of 4, element hint is 75% of final size */
22 #define NFT_RHASH_ELEMENT_HINT 3
23 
24 struct nft_rhash {
25 	struct rhashtable		ht;
26 	struct delayed_work		gc_work;
27 	u32				wq_gc_seq;
28 };
29 
30 struct nft_rhash_elem {
31 	struct nft_elem_priv		priv;
32 	struct rhash_head		node;
33 	struct llist_node		walk_node;
34 	u32				wq_gc_seq;
35 	struct nft_set_ext		ext;
36 };
37 
38 struct nft_rhash_cmp_arg {
39 	const struct nft_set		*set;
40 	const u32			*key;
41 	u8				genmask;
42 	u64				tstamp;
43 };
44 
45 static inline u32 nft_rhash_key(const void *data, u32 len, u32 seed)
46 {
47 	const struct nft_rhash_cmp_arg *arg = data;
48 
49 	return jhash(arg->key, len, seed);
50 }
51 
52 static inline u32 nft_rhash_obj(const void *data, u32 len, u32 seed)
53 {
54 	const struct nft_rhash_elem *he = data;
55 
56 	return jhash(nft_set_ext_key(&he->ext), len, seed);
57 }
58 
59 static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg,
60 				const void *ptr)
61 {
62 	const struct nft_rhash_cmp_arg *x = arg->key;
63 	const struct nft_rhash_elem *he = ptr;
64 
65 	if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
66 		return 1;
67 	if (nft_set_elem_is_dead(&he->ext))
68 		return 1;
69 	if (__nft_set_elem_expired(&he->ext, x->tstamp))
70 		return 1;
71 	if (!nft_set_elem_active(&he->ext, x->genmask))
72 		return 1;
73 	return 0;
74 }
75 
76 static const struct rhashtable_params nft_rhash_params = {
77 	.head_offset		= offsetof(struct nft_rhash_elem, node),
78 	.hashfn			= nft_rhash_key,
79 	.obj_hashfn		= nft_rhash_obj,
80 	.obj_cmpfn		= nft_rhash_cmp,
81 	.automatic_shrinking	= true,
82 };
83 
84 INDIRECT_CALLABLE_SCOPE
85 const struct nft_set_ext *
86 nft_rhash_lookup(const struct net *net, const struct nft_set *set,
87 		 const u32 *key)
88 {
89 	struct nft_rhash *priv = nft_set_priv(set);
90 	const struct nft_rhash_elem *he;
91 	struct nft_rhash_cmp_arg arg = {
92 		.genmask = nft_genmask_cur(net),
93 		.set	 = set,
94 		.key	 = key,
95 		.tstamp  = get_jiffies_64(),
96 	};
97 
98 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
99 	if (he != NULL)
100 		return &he->ext;
101 
102 	return NULL;
103 }
104 
105 static struct nft_elem_priv *
106 nft_rhash_get(const struct net *net, const struct nft_set *set,
107 	      const struct nft_set_elem *elem, unsigned int flags)
108 {
109 	struct nft_rhash *priv = nft_set_priv(set);
110 	struct nft_rhash_elem *he;
111 	struct nft_rhash_cmp_arg arg = {
112 		.genmask = nft_genmask_cur(net),
113 		.set	 = set,
114 		.key	 = elem->key.val.data,
115 		.tstamp  = get_jiffies_64(),
116 	};
117 
118 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
119 	if (he != NULL)
120 		return &he->priv;
121 
122 	return ERR_PTR(-ENOENT);
123 }
124 
125 static const struct nft_set_ext *
126 nft_rhash_update(struct nft_set *set, const u32 *key,
127 		 const struct nft_expr *expr, struct nft_regs *regs)
128 {
129 	struct nft_rhash *priv = nft_set_priv(set);
130 	struct nft_rhash_elem *he, *prev;
131 	struct nft_elem_priv *elem_priv;
132 	struct nft_rhash_cmp_arg arg = {
133 		.genmask = NFT_GENMASK_ANY,
134 		.set	 = set,
135 		.key	 = key,
136 		.tstamp  = get_jiffies_64(),
137 	};
138 
139 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
140 	if (he != NULL)
141 		goto out;
142 
143 	elem_priv = nft_dynset_new(set, expr, regs);
144 	if (!elem_priv)
145 		goto err1;
146 
147 	he = nft_elem_priv_cast(elem_priv);
148 	init_llist_node(&he->walk_node);
149 	prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
150 						nft_rhash_params);
151 	if (IS_ERR(prev))
152 		goto err2;
153 
154 	/* Another cpu may race to insert the element with the same key */
155 	if (prev) {
156 		nft_set_elem_destroy(set, &he->priv, true);
157 		atomic_dec(&set->nelems);
158 		he = prev;
159 	}
160 
161 out:
162 	return &he->ext;
163 
164 err2:
165 	nft_set_elem_destroy(set, &he->priv, true);
166 	atomic_dec(&set->nelems);
167 err1:
168 	return NULL;
169 }
170 
171 static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
172 			    const struct nft_set_elem *elem,
173 			    struct nft_elem_priv **elem_priv)
174 {
175 	struct nft_rhash_elem *he = nft_elem_priv_cast(elem->priv);
176 	struct nft_rhash *priv = nft_set_priv(set);
177 	struct nft_rhash_cmp_arg arg = {
178 		.genmask = nft_genmask_next(net),
179 		.set	 = set,
180 		.key	 = elem->key.val.data,
181 		.tstamp	 = nft_net_tstamp(net),
182 	};
183 	struct nft_rhash_elem *prev;
184 
185 	init_llist_node(&he->walk_node);
186 	prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
187 						nft_rhash_params);
188 	if (IS_ERR(prev))
189 		return PTR_ERR(prev);
190 	if (prev) {
191 		*elem_priv = &prev->priv;
192 		return -EEXIST;
193 	}
194 	return 0;
195 }
196 
197 static void nft_rhash_activate(const struct net *net, const struct nft_set *set,
198 			       struct nft_elem_priv *elem_priv)
199 {
200 	struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv);
201 
202 	nft_clear(net, &he->ext);
203 }
204 
205 static void nft_rhash_flush(const struct net *net,
206 			    const struct nft_set *set,
207 			    struct nft_elem_priv *elem_priv)
208 {
209 	struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv);
210 
211 	nft_set_elem_change_active(net, set, &he->ext);
212 }
213 
214 static struct nft_elem_priv *
215 nft_rhash_deactivate(const struct net *net, const struct nft_set *set,
216 		     const struct nft_set_elem *elem)
217 {
218 	struct nft_rhash *priv = nft_set_priv(set);
219 	struct nft_rhash_elem *he;
220 	struct nft_rhash_cmp_arg arg = {
221 		.genmask = nft_genmask_next(net),
222 		.set	 = set,
223 		.key	 = elem->key.val.data,
224 		.tstamp	 = nft_net_tstamp(net),
225 	};
226 
227 	rcu_read_lock();
228 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
229 	if (he)
230 		nft_set_elem_change_active(net, set, &he->ext);
231 
232 	rcu_read_unlock();
233 
234 	return &he->priv;
235 }
236 
237 static void nft_rhash_remove(const struct net *net,
238 			     const struct nft_set *set,
239 			     struct nft_elem_priv *elem_priv)
240 {
241 	struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv);
242 	struct nft_rhash *priv = nft_set_priv(set);
243 
244 	rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
245 }
246 
247 static bool nft_rhash_delete(const struct nft_set *set,
248 			     const u32 *key)
249 {
250 	struct nft_rhash *priv = nft_set_priv(set);
251 	struct nft_rhash_cmp_arg arg = {
252 		.genmask = NFT_GENMASK_ANY,
253 		.set = set,
254 		.key = key,
255 	};
256 	struct nft_rhash_elem *he;
257 
258 	he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
259 	if (he == NULL)
260 		return false;
261 
262 	nft_set_elem_dead(&he->ext);
263 
264 	return true;
265 }
266 
267 static void nft_rhash_walk_ro(const struct nft_ctx *ctx, struct nft_set *set,
268 			      struct nft_set_iter *iter)
269 {
270 	struct nft_rhash *priv = nft_set_priv(set);
271 	struct rhashtable_iter hti;
272 	struct nft_rhash_elem *he;
273 
274 	rhashtable_walk_enter(&priv->ht, &hti);
275 	rhashtable_walk_start(&hti);
276 
277 	while ((he = rhashtable_walk_next(&hti))) {
278 		if (IS_ERR(he)) {
279 			if (PTR_ERR(he) != -EAGAIN) {
280 				iter->err = PTR_ERR(he);
281 				break;
282 			}
283 
284 			continue;
285 		}
286 
287 		if (iter->count < iter->skip)
288 			goto cont;
289 
290 		iter->err = iter->fn(ctx, set, iter, &he->priv);
291 		if (iter->err < 0)
292 			break;
293 
294 cont:
295 		iter->count++;
296 	}
297 	rhashtable_walk_stop(&hti);
298 	rhashtable_walk_exit(&hti);
299 }
300 
301 static void nft_rhash_walk_update(const struct nft_ctx *ctx,
302 				  struct nft_set *set,
303 				  struct nft_set_iter *iter)
304 {
305 	struct nft_rhash *priv = nft_set_priv(set);
306 	struct nft_rhash_elem *he, *tmp;
307 	struct llist_node *first_node;
308 	struct rhashtable_iter hti;
309 	LLIST_HEAD(walk_list);
310 
311 	lockdep_assert_held(&nft_pernet(ctx->net)->commit_mutex);
312 
313 	if (set->in_update_walk) {
314 		/* This can happen with bogus rulesets during ruleset validation
315 		 * when a verdict map causes a jump back to the same map.
316 		 *
317 		 * Without this extra check the walk_next loop below will see
318 		 * elems on the callers walk_list and skip (not validate) them.
319 		 */
320 		iter->err = -EMLINK;
321 		return;
322 	}
323 
324 	/* walk happens under RCU.
325 	 *
326 	 * We create a snapshot list so ->iter callback can sleep.
327 	 * commit_mutex is held, elements can ...
328 	 * .. be added in parallel from dataplane (dynset)
329 	 * .. be marked as dead in parallel from dataplane (dynset).
330 	 * .. be queued for removal in parallel (gc timeout).
331 	 * .. not be freed: transaction mutex is held.
332 	 */
333 	rhashtable_walk_enter(&priv->ht, &hti);
334 	rhashtable_walk_start(&hti);
335 
336 	while ((he = rhashtable_walk_next(&hti))) {
337 		if (IS_ERR(he)) {
338 			if (PTR_ERR(he) != -EAGAIN) {
339 				iter->err = PTR_ERR(he);
340 				break;
341 			}
342 
343 			continue;
344 		}
345 
346 		/* rhashtable resized during walk, skip */
347 		if (llist_on_list(&he->walk_node))
348 			continue;
349 
350 		llist_add(&he->walk_node, &walk_list);
351 	}
352 	rhashtable_walk_stop(&hti);
353 	rhashtable_walk_exit(&hti);
354 
355 	first_node = __llist_del_all(&walk_list);
356 	set->in_update_walk = true;
357 	llist_for_each_entry_safe(he, tmp, first_node, walk_node) {
358 		if (iter->err == 0) {
359 			iter->err = iter->fn(ctx, set, iter, &he->priv);
360 			if (iter->err == 0)
361 				iter->count++;
362 		}
363 
364 		/* all entries must be cleared again, else next ->walk iteration
365 		 * will skip entries.
366 		 */
367 		init_llist_node(&he->walk_node);
368 	}
369 	set->in_update_walk = false;
370 }
371 
372 static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
373 			   struct nft_set_iter *iter)
374 {
375 	switch (iter->type) {
376 	case NFT_ITER_UPDATE:
377 		/* only relevant for netlink dumps which use READ type */
378 		WARN_ON_ONCE(iter->skip != 0);
379 
380 		nft_rhash_walk_update(ctx, set, iter);
381 		break;
382 	case NFT_ITER_READ:
383 		nft_rhash_walk_ro(ctx, set, iter);
384 		break;
385 	default:
386 		iter->err = -EINVAL;
387 		WARN_ON_ONCE(1);
388 		break;
389 	}
390 }
391 
392 static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
393 					struct nft_set_ext *ext)
394 {
395 	struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
396 	struct nft_expr *expr;
397 	u32 size;
398 
399 	nft_setelem_expr_foreach(expr, elem_expr, size) {
400 		if (expr->ops->gc &&
401 		    expr->ops->gc(read_pnet(&set->net), expr) &&
402 		    set->flags & NFT_SET_EVAL)
403 			return true;
404 	}
405 
406 	return false;
407 }
408 
409 static void nft_rhash_gc(struct work_struct *work)
410 {
411 	struct nftables_pernet *nft_net;
412 	struct nft_set *set;
413 	struct nft_rhash_elem *he;
414 	struct nft_rhash *priv;
415 	struct rhashtable_iter hti;
416 	struct nft_trans_gc *gc;
417 	struct net *net;
418 	u32 gc_seq;
419 
420 	priv = container_of(work, struct nft_rhash, gc_work.work);
421 	set  = nft_set_container_of(priv);
422 	net  = read_pnet(&set->net);
423 	nft_net = nft_pernet(net);
424 	gc_seq = READ_ONCE(nft_net->gc_seq);
425 
426 	if (nft_set_gc_is_pending(set))
427 		goto done;
428 
429 	gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
430 	if (!gc)
431 		goto done;
432 
433 	/* Elements never collected use a zero gc worker sequence number. */
434 	if (unlikely(++priv->wq_gc_seq == 0))
435 		priv->wq_gc_seq++;
436 
437 	rhashtable_walk_enter(&priv->ht, &hti);
438 	rhashtable_walk_start(&hti);
439 
440 	while ((he = rhashtable_walk_next(&hti))) {
441 		if (IS_ERR(he)) {
442 			nft_trans_gc_destroy(gc);
443 			gc = NULL;
444 			goto try_later;
445 		}
446 
447 		/* Ruleset has been updated, try later. */
448 		if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
449 			nft_trans_gc_destroy(gc);
450 			gc = NULL;
451 			goto try_later;
452 		}
453 
454 		/* rhashtable walk is unstable, already seen in this gc run?
455 		 * Then, skip this element. In case of (unlikely) sequence
456 		 * wraparound and stale element wq_gc_seq, next gc run will
457 		 * just find this expired element.
458 		 */
459 		if (he->wq_gc_seq == priv->wq_gc_seq)
460 			continue;
461 
462 		if (nft_set_elem_is_dead(&he->ext))
463 			goto dead_elem;
464 
465 		if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPRESSIONS) &&
466 		    nft_rhash_expr_needs_gc_run(set, &he->ext))
467 			goto needs_gc_run;
468 
469 		if (!nft_set_elem_expired(&he->ext))
470 			continue;
471 needs_gc_run:
472 		nft_set_elem_dead(&he->ext);
473 dead_elem:
474 		gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
475 		if (!gc)
476 			goto try_later;
477 
478 		/* annotate gc sequence for this attempt. */
479 		he->wq_gc_seq = priv->wq_gc_seq;
480 		nft_trans_gc_elem_add(gc, he);
481 	}
482 
483 	gc = nft_trans_gc_catchall_async(gc, gc_seq);
484 
485 try_later:
486 	/* catchall list iteration requires rcu read side lock. */
487 	rhashtable_walk_stop(&hti);
488 	rhashtable_walk_exit(&hti);
489 
490 	if (gc)
491 		nft_trans_gc_queue_async_done(gc);
492 
493 done:
494 	queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
495 			   nft_set_gc_interval(set));
496 }
497 
498 static u64 nft_rhash_privsize(const struct nlattr * const nla[],
499 			      const struct nft_set_desc *desc)
500 {
501 	return sizeof(struct nft_rhash);
502 }
503 
504 static void nft_rhash_gc_init(const struct nft_set *set)
505 {
506 	struct nft_rhash *priv = nft_set_priv(set);
507 
508 	queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
509 			   nft_set_gc_interval(set));
510 }
511 
512 static int nft_rhash_init(const struct nft_set *set,
513 			  const struct nft_set_desc *desc,
514 			  const struct nlattr * const tb[])
515 {
516 	struct nft_rhash *priv = nft_set_priv(set);
517 	struct rhashtable_params params = nft_rhash_params;
518 	int err;
519 
520 	BUILD_BUG_ON(offsetof(struct nft_rhash_elem, priv) != 0);
521 
522 	params.nelem_hint = desc->size ?: NFT_RHASH_ELEMENT_HINT;
523 	params.key_len	  = set->klen;
524 
525 	err = rhashtable_init(&priv->ht, &params);
526 	if (err < 0)
527 		return err;
528 
529 	INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc);
530 	if (set->flags & (NFT_SET_TIMEOUT | NFT_SET_EVAL))
531 		nft_rhash_gc_init(set);
532 
533 	return 0;
534 }
535 
536 struct nft_rhash_ctx {
537 	const struct nft_ctx	ctx;
538 	const struct nft_set	*set;
539 };
540 
541 static void nft_rhash_elem_destroy(void *ptr, void *arg)
542 {
543 	struct nft_rhash_ctx *rhash_ctx = arg;
544 	struct nft_rhash_elem *he = ptr;
545 
546 	nf_tables_set_elem_destroy(&rhash_ctx->ctx, rhash_ctx->set, &he->priv);
547 }
548 
549 static void nft_rhash_destroy(const struct nft_ctx *ctx,
550 			      const struct nft_set *set)
551 {
552 	struct nft_rhash *priv = nft_set_priv(set);
553 	struct nft_rhash_ctx rhash_ctx = {
554 		.ctx	= *ctx,
555 		.set	= set,
556 	};
557 
558 	cancel_delayed_work_sync(&priv->gc_work);
559 	rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
560 				    (void *)&rhash_ctx);
561 }
562 
563 /* Number of buckets is stored in u32, so cap our result to 1U<<31 */
564 #define NFT_MAX_BUCKETS (1U << 31)
565 
566 static u32 nft_hash_buckets(u32 size)
567 {
568 	u64 val = div_u64((u64)size * 4, 3);
569 
570 	if (val >= NFT_MAX_BUCKETS)
571 		return NFT_MAX_BUCKETS;
572 
573 	return roundup_pow_of_two(val);
574 }
575 
576 static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
577 			       struct nft_set_estimate *est)
578 {
579 	est->size   = ~0;
580 	est->lookup = NFT_SET_CLASS_O_1;
581 	est->space  = NFT_SET_CLASS_O_N;
582 
583 	return true;
584 }
585 
586 struct nft_hash {
587 	u32				seed;
588 	u32				buckets;
589 	struct hlist_head		table[];
590 };
591 
592 struct nft_hash_elem {
593 	struct nft_elem_priv		priv;
594 	struct hlist_node		node;
595 	struct nft_set_ext		ext;
596 };
597 
598 INDIRECT_CALLABLE_SCOPE
599 const struct nft_set_ext *
600 nft_hash_lookup(const struct net *net, const struct nft_set *set,
601 		const u32 *key)
602 {
603 	struct nft_hash *priv = nft_set_priv(set);
604 	u8 genmask = nft_genmask_cur(net);
605 	const struct nft_hash_elem *he;
606 	u32 hash;
607 
608 	hash = jhash(key, set->klen, priv->seed);
609 	hash = reciprocal_scale(hash, priv->buckets);
610 	hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
611 		if (!memcmp(nft_set_ext_key(&he->ext), key, set->klen) &&
612 		    nft_set_elem_active(&he->ext, genmask))
613 			return &he->ext;
614 	}
615 	return NULL;
616 }
617 
618 static struct nft_elem_priv *
619 nft_hash_get(const struct net *net, const struct nft_set *set,
620 	     const struct nft_set_elem *elem, unsigned int flags)
621 {
622 	struct nft_hash *priv = nft_set_priv(set);
623 	u8 genmask = nft_genmask_cur(net);
624 	struct nft_hash_elem *he;
625 	u32 hash;
626 
627 	hash = jhash(elem->key.val.data, set->klen, priv->seed);
628 	hash = reciprocal_scale(hash, priv->buckets);
629 	hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
630 		if (!memcmp(nft_set_ext_key(&he->ext), elem->key.val.data, set->klen) &&
631 		    nft_set_elem_active(&he->ext, genmask))
632 			return &he->priv;
633 	}
634 	return ERR_PTR(-ENOENT);
635 }
636 
637 INDIRECT_CALLABLE_SCOPE
638 const struct nft_set_ext *
639 nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
640 		     const u32 *key)
641 {
642 	struct nft_hash *priv = nft_set_priv(set);
643 	u8 genmask = nft_genmask_cur(net);
644 	const struct nft_hash_elem *he;
645 	u32 hash, k1, k2;
646 
647 	k1 = *key;
648 	hash = jhash_1word(k1, priv->seed);
649 	hash = reciprocal_scale(hash, priv->buckets);
650 	hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
651 		k2 = *(u32 *)nft_set_ext_key(&he->ext)->data;
652 		if (k1 == k2 &&
653 		    nft_set_elem_active(&he->ext, genmask))
654 			return &he->ext;
655 	}
656 	return NULL;
657 }
658 
659 static u32 nft_jhash(const struct nft_set *set, const struct nft_hash *priv,
660 		     const struct nft_set_ext *ext)
661 {
662 	const struct nft_data *key = nft_set_ext_key(ext);
663 	u32 hash, k1;
664 
665 	if (set->klen == 4) {
666 		k1 = *(u32 *)key;
667 		hash = jhash_1word(k1, priv->seed);
668 	} else {
669 		hash = jhash(key, set->klen, priv->seed);
670 	}
671 	hash = reciprocal_scale(hash, priv->buckets);
672 
673 	return hash;
674 }
675 
676 static int nft_hash_insert(const struct net *net, const struct nft_set *set,
677 			   const struct nft_set_elem *elem,
678 			   struct nft_elem_priv **elem_priv)
679 {
680 	struct nft_hash_elem *this = nft_elem_priv_cast(elem->priv), *he;
681 	struct nft_hash *priv = nft_set_priv(set);
682 	u8 genmask = nft_genmask_next(net);
683 	u32 hash;
684 
685 	hash = nft_jhash(set, priv, &this->ext);
686 	hlist_for_each_entry(he, &priv->table[hash], node) {
687 		if (!memcmp(nft_set_ext_key(&this->ext),
688 			    nft_set_ext_key(&he->ext), set->klen) &&
689 		    nft_set_elem_active(&he->ext, genmask)) {
690 			*elem_priv = &he->priv;
691 			return -EEXIST;
692 		}
693 	}
694 	hlist_add_head_rcu(&this->node, &priv->table[hash]);
695 	return 0;
696 }
697 
698 static void nft_hash_activate(const struct net *net, const struct nft_set *set,
699 			      struct nft_elem_priv *elem_priv)
700 {
701 	struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv);
702 
703 	nft_clear(net, &he->ext);
704 }
705 
706 static void nft_hash_flush(const struct net *net,
707 			   const struct nft_set *set,
708 			   struct nft_elem_priv *elem_priv)
709 {
710 	struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv);
711 
712 	nft_set_elem_change_active(net, set, &he->ext);
713 }
714 
715 static struct nft_elem_priv *
716 nft_hash_deactivate(const struct net *net, const struct nft_set *set,
717 		    const struct nft_set_elem *elem)
718 {
719 	struct nft_hash_elem *this = nft_elem_priv_cast(elem->priv), *he;
720 	struct nft_hash *priv = nft_set_priv(set);
721 	u8 genmask = nft_genmask_next(net);
722 	u32 hash;
723 
724 	hash = nft_jhash(set, priv, &this->ext);
725 	hlist_for_each_entry(he, &priv->table[hash], node) {
726 		if (!memcmp(nft_set_ext_key(&he->ext), &elem->key.val,
727 			    set->klen) &&
728 		    nft_set_elem_active(&he->ext, genmask)) {
729 			nft_set_elem_change_active(net, set, &he->ext);
730 			return &he->priv;
731 		}
732 	}
733 	return NULL;
734 }
735 
736 static void nft_hash_remove(const struct net *net,
737 			    const struct nft_set *set,
738 			    struct nft_elem_priv *elem_priv)
739 {
740 	struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv);
741 
742 	hlist_del_rcu(&he->node);
743 }
744 
745 static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
746 			  struct nft_set_iter *iter)
747 {
748 	struct nft_hash *priv = nft_set_priv(set);
749 	struct nft_hash_elem *he;
750 	int i;
751 
752 	for (i = 0; i < priv->buckets; i++) {
753 		hlist_for_each_entry_rcu(he, &priv->table[i], node,
754 					 lockdep_is_held(&nft_pernet(ctx->net)->commit_mutex)) {
755 			if (iter->count < iter->skip)
756 				goto cont;
757 
758 			iter->err = iter->fn(ctx, set, iter, &he->priv);
759 			if (iter->err < 0)
760 				return;
761 cont:
762 			iter->count++;
763 		}
764 	}
765 }
766 
767 static u64 nft_hash_privsize(const struct nlattr * const nla[],
768 			     const struct nft_set_desc *desc)
769 {
770 	return sizeof(struct nft_hash) +
771 	       (u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head);
772 }
773 
774 static int nft_hash_init(const struct nft_set *set,
775 			 const struct nft_set_desc *desc,
776 			 const struct nlattr * const tb[])
777 {
778 	struct nft_hash *priv = nft_set_priv(set);
779 
780 	priv->buckets = nft_hash_buckets(desc->size);
781 	get_random_bytes(&priv->seed, sizeof(priv->seed));
782 
783 	return 0;
784 }
785 
786 static void nft_hash_destroy(const struct nft_ctx *ctx,
787 			     const struct nft_set *set)
788 {
789 	struct nft_hash *priv = nft_set_priv(set);
790 	struct nft_hash_elem *he;
791 	struct hlist_node *next;
792 	int i;
793 
794 	for (i = 0; i < priv->buckets; i++) {
795 		hlist_for_each_entry_safe(he, next, &priv->table[i], node) {
796 			hlist_del_rcu(&he->node);
797 			nf_tables_set_elem_destroy(ctx, set, &he->priv);
798 		}
799 	}
800 }
801 
802 static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
803 			      struct nft_set_estimate *est)
804 {
805 	if (!desc->size)
806 		return false;
807 
808 	if (desc->klen == 4)
809 		return false;
810 
811 	est->size   = sizeof(struct nft_hash) +
812 		      (u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
813 		      (u64)desc->size * sizeof(struct nft_hash_elem);
814 	est->lookup = NFT_SET_CLASS_O_1;
815 	est->space  = NFT_SET_CLASS_O_N;
816 
817 	return true;
818 }
819 
820 static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features,
821 				   struct nft_set_estimate *est)
822 {
823 	if (!desc->size)
824 		return false;
825 
826 	if (desc->klen != 4)
827 		return false;
828 
829 	est->size   = sizeof(struct nft_hash) +
830 		      (u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
831 		      (u64)desc->size * sizeof(struct nft_hash_elem);
832 	est->lookup = NFT_SET_CLASS_O_1;
833 	est->space  = NFT_SET_CLASS_O_N;
834 
835 	return true;
836 }
837 
838 const struct nft_set_type nft_set_rhash_type = {
839 	.features	= NFT_SET_MAP | NFT_SET_OBJECT |
840 			  NFT_SET_TIMEOUT | NFT_SET_EVAL,
841 	.ops		= {
842 		.privsize       = nft_rhash_privsize,
843 		.elemsize	= offsetof(struct nft_rhash_elem, ext),
844 		.estimate	= nft_rhash_estimate,
845 		.init		= nft_rhash_init,
846 		.gc_init	= nft_rhash_gc_init,
847 		.destroy	= nft_rhash_destroy,
848 		.insert		= nft_rhash_insert,
849 		.activate	= nft_rhash_activate,
850 		.deactivate	= nft_rhash_deactivate,
851 		.flush		= nft_rhash_flush,
852 		.remove		= nft_rhash_remove,
853 		.lookup		= nft_rhash_lookup,
854 		.update		= nft_rhash_update,
855 		.delete		= nft_rhash_delete,
856 		.walk		= nft_rhash_walk,
857 		.get		= nft_rhash_get,
858 	},
859 };
860 
861 const struct nft_set_type nft_set_hash_type = {
862 	.features	= NFT_SET_MAP | NFT_SET_OBJECT,
863 	.ops		= {
864 		.privsize       = nft_hash_privsize,
865 		.elemsize	= offsetof(struct nft_hash_elem, ext),
866 		.estimate	= nft_hash_estimate,
867 		.init		= nft_hash_init,
868 		.destroy	= nft_hash_destroy,
869 		.insert		= nft_hash_insert,
870 		.activate	= nft_hash_activate,
871 		.deactivate	= nft_hash_deactivate,
872 		.flush		= nft_hash_flush,
873 		.remove		= nft_hash_remove,
874 		.lookup		= nft_hash_lookup,
875 		.walk		= nft_hash_walk,
876 		.get		= nft_hash_get,
877 	},
878 };
879 
880 const struct nft_set_type nft_set_hash_fast_type = {
881 	.features	= NFT_SET_MAP | NFT_SET_OBJECT,
882 	.ops		= {
883 		.privsize       = nft_hash_privsize,
884 		.elemsize	= offsetof(struct nft_hash_elem, ext),
885 		.estimate	= nft_hash_fast_estimate,
886 		.init		= nft_hash_init,
887 		.destroy	= nft_hash_destroy,
888 		.insert		= nft_hash_insert,
889 		.activate	= nft_hash_activate,
890 		.deactivate	= nft_hash_deactivate,
891 		.flush		= nft_hash_flush,
892 		.remove		= nft_hash_remove,
893 		.lookup		= nft_hash_lookup_fast,
894 		.walk		= nft_hash_walk,
895 		.get		= nft_hash_get,
896 	},
897 };
898