xref: /linux/net/netfilter/nft_set_rbtree.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4  *
5  * Development of this code funded by Astaro AG (http://www.astaro.com/)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 
18 struct nft_rbtree {
19 	struct rb_root		root;
20 	rwlock_t		lock;
21 	seqcount_rwlock_t	count;
22 	unsigned long		last_gc;
23 };
24 
25 struct nft_rbtree_elem {
26 	struct nft_elem_priv	priv;
27 	struct rb_node		node;
28 	struct nft_set_ext	ext;
29 };
30 
nft_rbtree_interval_end(const struct nft_rbtree_elem * rbe)31 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
32 {
33 	return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
34 	       (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
35 }
36 
nft_rbtree_interval_start(const struct nft_rbtree_elem * rbe)37 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
38 {
39 	return !nft_rbtree_interval_end(rbe);
40 }
41 
nft_rbtree_cmp(const struct nft_set * set,const struct nft_rbtree_elem * e1,const struct nft_rbtree_elem * e2)42 static int nft_rbtree_cmp(const struct nft_set *set,
43 			  const struct nft_rbtree_elem *e1,
44 			  const struct nft_rbtree_elem *e2)
45 {
46 	return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
47 		      set->klen);
48 }
49 
nft_rbtree_elem_expired(const struct nft_rbtree_elem * rbe)50 static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
51 {
52 	return nft_set_elem_expired(&rbe->ext);
53 }
54 
55 static const struct nft_set_ext *
__nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key,unsigned int seq)56 __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
57 		    const u32 *key, unsigned int seq)
58 {
59 	struct nft_rbtree *priv = nft_set_priv(set);
60 	const struct nft_rbtree_elem *rbe, *interval = NULL;
61 	u8 genmask = nft_genmask_cur(net);
62 	const struct rb_node *parent;
63 	int d;
64 
65 	parent = rcu_dereference_raw(priv->root.rb_node);
66 	while (parent != NULL) {
67 		if (read_seqcount_retry(&priv->count, seq))
68 			return NULL;
69 
70 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
71 
72 		d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
73 		if (d < 0) {
74 			parent = rcu_dereference_raw(parent->rb_left);
75 			if (interval &&
76 			    !nft_rbtree_cmp(set, rbe, interval) &&
77 			    nft_rbtree_interval_end(rbe) &&
78 			    nft_rbtree_interval_start(interval))
79 				continue;
80 			interval = rbe;
81 		} else if (d > 0)
82 			parent = rcu_dereference_raw(parent->rb_right);
83 		else {
84 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
85 				parent = rcu_dereference_raw(parent->rb_left);
86 				continue;
87 			}
88 
89 			if (nft_rbtree_elem_expired(rbe))
90 				return NULL;
91 
92 			if (nft_rbtree_interval_end(rbe)) {
93 				if (nft_set_is_anonymous(set))
94 					return NULL;
95 				parent = rcu_dereference_raw(parent->rb_left);
96 				interval = NULL;
97 				continue;
98 			}
99 
100 			return &rbe->ext;
101 		}
102 	}
103 
104 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
105 	    nft_set_elem_active(&interval->ext, genmask) &&
106 	    !nft_rbtree_elem_expired(interval) &&
107 	    nft_rbtree_interval_start(interval))
108 		return &interval->ext;
109 
110 	return NULL;
111 }
112 
113 INDIRECT_CALLABLE_SCOPE
114 const struct nft_set_ext *
nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key)115 nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
116 		  const u32 *key)
117 {
118 	struct nft_rbtree *priv = nft_set_priv(set);
119 	unsigned int seq = read_seqcount_begin(&priv->count);
120 	const struct nft_set_ext *ext;
121 
122 	ext = __nft_rbtree_lookup(net, set, key, seq);
123 	if (ext || !read_seqcount_retry(&priv->count, seq))
124 		return ext;
125 
126 	read_lock_bh(&priv->lock);
127 	seq = read_seqcount_begin(&priv->count);
128 	ext = __nft_rbtree_lookup(net, set, key, seq);
129 	read_unlock_bh(&priv->lock);
130 
131 	return ext;
132 }
133 
__nft_rbtree_get(const struct net * net,const struct nft_set * set,const u32 * key,struct nft_rbtree_elem ** elem,unsigned int seq,unsigned int flags,u8 genmask)134 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
135 			     const u32 *key, struct nft_rbtree_elem **elem,
136 			     unsigned int seq, unsigned int flags, u8 genmask)
137 {
138 	struct nft_rbtree_elem *rbe, *interval = NULL;
139 	struct nft_rbtree *priv = nft_set_priv(set);
140 	const struct rb_node *parent;
141 	const void *this;
142 	int d;
143 
144 	parent = rcu_dereference_raw(priv->root.rb_node);
145 	while (parent != NULL) {
146 		if (read_seqcount_retry(&priv->count, seq))
147 			return false;
148 
149 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
150 
151 		this = nft_set_ext_key(&rbe->ext);
152 		d = memcmp(this, key, set->klen);
153 		if (d < 0) {
154 			parent = rcu_dereference_raw(parent->rb_left);
155 			if (!(flags & NFT_SET_ELEM_INTERVAL_END))
156 				interval = rbe;
157 		} else if (d > 0) {
158 			parent = rcu_dereference_raw(parent->rb_right);
159 			if (flags & NFT_SET_ELEM_INTERVAL_END)
160 				interval = rbe;
161 		} else {
162 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
163 				parent = rcu_dereference_raw(parent->rb_left);
164 				continue;
165 			}
166 
167 			if (nft_set_elem_expired(&rbe->ext))
168 				return false;
169 
170 			if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
171 			    (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
172 			    (flags & NFT_SET_ELEM_INTERVAL_END)) {
173 				*elem = rbe;
174 				return true;
175 			}
176 
177 			if (nft_rbtree_interval_end(rbe))
178 				interval = NULL;
179 
180 			parent = rcu_dereference_raw(parent->rb_left);
181 		}
182 	}
183 
184 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
185 	    nft_set_elem_active(&interval->ext, genmask) &&
186 	    !nft_set_elem_expired(&interval->ext) &&
187 	    ((!nft_rbtree_interval_end(interval) &&
188 	      !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
189 	     (nft_rbtree_interval_end(interval) &&
190 	      (flags & NFT_SET_ELEM_INTERVAL_END)))) {
191 		*elem = interval;
192 		return true;
193 	}
194 
195 	return false;
196 }
197 
198 static struct nft_elem_priv *
nft_rbtree_get(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,unsigned int flags)199 nft_rbtree_get(const struct net *net, const struct nft_set *set,
200 	       const struct nft_set_elem *elem, unsigned int flags)
201 {
202 	struct nft_rbtree *priv = nft_set_priv(set);
203 	unsigned int seq = read_seqcount_begin(&priv->count);
204 	struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
205 	const u32 *key = (const u32 *)&elem->key.val;
206 	u8 genmask = nft_genmask_cur(net);
207 	bool ret;
208 
209 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
210 	if (ret || !read_seqcount_retry(&priv->count, seq))
211 		return &rbe->priv;
212 
213 	read_lock_bh(&priv->lock);
214 	seq = read_seqcount_begin(&priv->count);
215 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
216 	read_unlock_bh(&priv->lock);
217 
218 	if (!ret)
219 		return ERR_PTR(-ENOENT);
220 
221 	return &rbe->priv;
222 }
223 
nft_rbtree_gc_elem_remove(struct net * net,struct nft_set * set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)224 static void nft_rbtree_gc_elem_remove(struct net *net, struct nft_set *set,
225 				      struct nft_rbtree *priv,
226 				      struct nft_rbtree_elem *rbe)
227 {
228 	lockdep_assert_held_write(&priv->lock);
229 	nft_setelem_data_deactivate(net, set, &rbe->priv);
230 	rb_erase(&rbe->node, &priv->root);
231 }
232 
233 static const struct nft_rbtree_elem *
nft_rbtree_gc_elem(const struct nft_set * __set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)234 nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
235 		   struct nft_rbtree_elem *rbe)
236 {
237 	struct nft_set *set = (struct nft_set *)__set;
238 	struct rb_node *prev = rb_prev(&rbe->node);
239 	struct net *net = read_pnet(&set->net);
240 	struct nft_rbtree_elem *rbe_prev;
241 	struct nft_trans_gc *gc;
242 
243 	gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
244 	if (!gc)
245 		return ERR_PTR(-ENOMEM);
246 
247 	/* search for end interval coming before this element.
248 	 * end intervals don't carry a timeout extension, they
249 	 * are coupled with the interval start element.
250 	 */
251 	while (prev) {
252 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
253 		if (nft_rbtree_interval_end(rbe_prev) &&
254 		    nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
255 			break;
256 
257 		prev = rb_prev(prev);
258 	}
259 
260 	rbe_prev = NULL;
261 	if (prev) {
262 		rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
263 		nft_rbtree_gc_elem_remove(net, set, priv, rbe_prev);
264 
265 		/* There is always room in this trans gc for this element,
266 		 * memory allocation never actually happens, hence, the warning
267 		 * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
268 		 * this is synchronous gc which never fails.
269 		 */
270 		gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
271 		if (WARN_ON_ONCE(!gc))
272 			return ERR_PTR(-ENOMEM);
273 
274 		nft_trans_gc_elem_add(gc, rbe_prev);
275 	}
276 
277 	nft_rbtree_gc_elem_remove(net, set, priv, rbe);
278 	gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
279 	if (WARN_ON_ONCE(!gc))
280 		return ERR_PTR(-ENOMEM);
281 
282 	nft_trans_gc_elem_add(gc, rbe);
283 
284 	nft_trans_gc_queue_sync_done(gc);
285 
286 	return rbe_prev;
287 }
288 
nft_rbtree_update_first(const struct nft_set * set,struct nft_rbtree_elem * rbe,struct rb_node * first)289 static bool nft_rbtree_update_first(const struct nft_set *set,
290 				    struct nft_rbtree_elem *rbe,
291 				    struct rb_node *first)
292 {
293 	struct nft_rbtree_elem *first_elem;
294 
295 	first_elem = rb_entry(first, struct nft_rbtree_elem, node);
296 	/* this element is closest to where the new element is to be inserted:
297 	 * update the first element for the node list path.
298 	 */
299 	if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
300 		return true;
301 
302 	return false;
303 }
304 
__nft_rbtree_insert(const struct net * net,const struct nft_set * set,struct nft_rbtree_elem * new,struct nft_elem_priv ** elem_priv)305 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
306 			       struct nft_rbtree_elem *new,
307 			       struct nft_elem_priv **elem_priv)
308 {
309 	struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
310 	struct rb_node *node, *next, *parent, **p, *first = NULL;
311 	struct nft_rbtree *priv = nft_set_priv(set);
312 	u8 cur_genmask = nft_genmask_cur(net);
313 	u8 genmask = nft_genmask_next(net);
314 	u64 tstamp = nft_net_tstamp(net);
315 	int d;
316 
317 	/* Descend the tree to search for an existing element greater than the
318 	 * key value to insert that is greater than the new element. This is the
319 	 * first element to walk the ordered elements to find possible overlap.
320 	 */
321 	parent = NULL;
322 	p = &priv->root.rb_node;
323 	while (*p != NULL) {
324 		parent = *p;
325 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
326 		d = nft_rbtree_cmp(set, rbe, new);
327 
328 		if (d < 0) {
329 			p = &parent->rb_left;
330 		} else if (d > 0) {
331 			if (!first ||
332 			    nft_rbtree_update_first(set, rbe, first))
333 				first = &rbe->node;
334 
335 			p = &parent->rb_right;
336 		} else {
337 			if (nft_rbtree_interval_end(rbe))
338 				p = &parent->rb_left;
339 			else
340 				p = &parent->rb_right;
341 		}
342 	}
343 
344 	if (!first)
345 		first = rb_first(&priv->root);
346 
347 	/* Detect overlap by going through the list of valid tree nodes.
348 	 * Values stored in the tree are in reversed order, starting from
349 	 * highest to lowest value.
350 	 */
351 	for (node = first; node != NULL; node = next) {
352 		next = rb_next(node);
353 
354 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
355 
356 		if (!nft_set_elem_active(&rbe->ext, genmask))
357 			continue;
358 
359 		/* perform garbage collection to avoid bogus overlap reports
360 		 * but skip new elements in this transaction.
361 		 */
362 		if (__nft_set_elem_expired(&rbe->ext, tstamp) &&
363 		    nft_set_elem_active(&rbe->ext, cur_genmask)) {
364 			const struct nft_rbtree_elem *removed_end;
365 
366 			removed_end = nft_rbtree_gc_elem(set, priv, rbe);
367 			if (IS_ERR(removed_end))
368 				return PTR_ERR(removed_end);
369 
370 			if (removed_end == rbe_le || removed_end == rbe_ge)
371 				return -EAGAIN;
372 
373 			continue;
374 		}
375 
376 		d = nft_rbtree_cmp(set, rbe, new);
377 		if (d == 0) {
378 			/* Matching end element: no need to look for an
379 			 * overlapping greater or equal element.
380 			 */
381 			if (nft_rbtree_interval_end(rbe)) {
382 				rbe_le = rbe;
383 				break;
384 			}
385 
386 			/* first element that is greater or equal to key value. */
387 			if (!rbe_ge) {
388 				rbe_ge = rbe;
389 				continue;
390 			}
391 
392 			/* this is a closer more or equal element, update it. */
393 			if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
394 				rbe_ge = rbe;
395 				continue;
396 			}
397 
398 			/* element is equal to key value, make sure flags are
399 			 * the same, an existing more or equal start element
400 			 * must not be replaced by more or equal end element.
401 			 */
402 			if ((nft_rbtree_interval_start(new) &&
403 			     nft_rbtree_interval_start(rbe_ge)) ||
404 			    (nft_rbtree_interval_end(new) &&
405 			     nft_rbtree_interval_end(rbe_ge))) {
406 				rbe_ge = rbe;
407 				continue;
408 			}
409 		} else if (d > 0) {
410 			/* annotate element greater than the new element. */
411 			rbe_ge = rbe;
412 			continue;
413 		} else if (d < 0) {
414 			/* annotate element less than the new element. */
415 			rbe_le = rbe;
416 			break;
417 		}
418 	}
419 
420 	/* - new start element matching existing start element: full overlap
421 	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
422 	 */
423 	if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
424 	    nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
425 		*elem_priv = &rbe_ge->priv;
426 		return -EEXIST;
427 	}
428 
429 	/* - new end element matching existing end element: full overlap
430 	 *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
431 	 */
432 	if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
433 	    nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
434 		*elem_priv = &rbe_le->priv;
435 		return -EEXIST;
436 	}
437 
438 	/* - new start element with existing closest, less or equal key value
439 	 *   being a start element: partial overlap, reported as -ENOTEMPTY.
440 	 *   Anonymous sets allow for two consecutive start element since they
441 	 *   are constant, skip them to avoid bogus overlap reports.
442 	 */
443 	if (!nft_set_is_anonymous(set) && rbe_le &&
444 	    nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
445 		return -ENOTEMPTY;
446 
447 	/* - new end element with existing closest, less or equal key value
448 	 *   being a end element: partial overlap, reported as -ENOTEMPTY.
449 	 */
450 	if (rbe_le &&
451 	    nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
452 		return -ENOTEMPTY;
453 
454 	/* - new end element with existing closest, greater or equal key value
455 	 *   being an end element: partial overlap, reported as -ENOTEMPTY
456 	 */
457 	if (rbe_ge &&
458 	    nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
459 		return -ENOTEMPTY;
460 
461 	/* Accepted element: pick insertion point depending on key value */
462 	parent = NULL;
463 	p = &priv->root.rb_node;
464 	while (*p != NULL) {
465 		parent = *p;
466 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
467 		d = nft_rbtree_cmp(set, rbe, new);
468 
469 		if (d < 0)
470 			p = &parent->rb_left;
471 		else if (d > 0)
472 			p = &parent->rb_right;
473 		else if (nft_rbtree_interval_end(rbe))
474 			p = &parent->rb_left;
475 		else
476 			p = &parent->rb_right;
477 	}
478 
479 	rb_link_node_rcu(&new->node, parent, p);
480 	rb_insert_color(&new->node, &priv->root);
481 	return 0;
482 }
483 
nft_rbtree_insert(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,struct nft_elem_priv ** elem_priv)484 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
485 			     const struct nft_set_elem *elem,
486 			     struct nft_elem_priv **elem_priv)
487 {
488 	struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem->priv);
489 	struct nft_rbtree *priv = nft_set_priv(set);
490 	int err;
491 
492 	do {
493 		if (fatal_signal_pending(current))
494 			return -EINTR;
495 
496 		cond_resched();
497 
498 		write_lock_bh(&priv->lock);
499 		write_seqcount_begin(&priv->count);
500 		err = __nft_rbtree_insert(net, set, rbe, elem_priv);
501 		write_seqcount_end(&priv->count);
502 		write_unlock_bh(&priv->lock);
503 	} while (err == -EAGAIN);
504 
505 	return err;
506 }
507 
nft_rbtree_erase(struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)508 static void nft_rbtree_erase(struct nft_rbtree *priv, struct nft_rbtree_elem *rbe)
509 {
510 	write_lock_bh(&priv->lock);
511 	write_seqcount_begin(&priv->count);
512 	rb_erase(&rbe->node, &priv->root);
513 	write_seqcount_end(&priv->count);
514 	write_unlock_bh(&priv->lock);
515 }
516 
nft_rbtree_remove(const struct net * net,const struct nft_set * set,struct nft_elem_priv * elem_priv)517 static void nft_rbtree_remove(const struct net *net,
518 			      const struct nft_set *set,
519 			      struct nft_elem_priv *elem_priv)
520 {
521 	struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
522 	struct nft_rbtree *priv = nft_set_priv(set);
523 
524 	nft_rbtree_erase(priv, rbe);
525 }
526 
nft_rbtree_activate(const struct net * net,const struct nft_set * set,struct nft_elem_priv * elem_priv)527 static void nft_rbtree_activate(const struct net *net,
528 				const struct nft_set *set,
529 				struct nft_elem_priv *elem_priv)
530 {
531 	struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
532 
533 	nft_clear(net, &rbe->ext);
534 }
535 
nft_rbtree_flush(const struct net * net,const struct nft_set * set,struct nft_elem_priv * elem_priv)536 static void nft_rbtree_flush(const struct net *net,
537 			     const struct nft_set *set,
538 			     struct nft_elem_priv *elem_priv)
539 {
540 	struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
541 
542 	nft_set_elem_change_active(net, set, &rbe->ext);
543 }
544 
545 static struct nft_elem_priv *
nft_rbtree_deactivate(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)546 nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
547 		      const struct nft_set_elem *elem)
548 {
549 	struct nft_rbtree_elem *rbe, *this = nft_elem_priv_cast(elem->priv);
550 	const struct nft_rbtree *priv = nft_set_priv(set);
551 	const struct rb_node *parent = priv->root.rb_node;
552 	u8 genmask = nft_genmask_next(net);
553 	u64 tstamp = nft_net_tstamp(net);
554 	int d;
555 
556 	while (parent != NULL) {
557 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
558 
559 		d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
560 					   set->klen);
561 		if (d < 0)
562 			parent = parent->rb_left;
563 		else if (d > 0)
564 			parent = parent->rb_right;
565 		else {
566 			if (nft_rbtree_interval_end(rbe) &&
567 			    nft_rbtree_interval_start(this)) {
568 				parent = parent->rb_left;
569 				continue;
570 			} else if (nft_rbtree_interval_start(rbe) &&
571 				   nft_rbtree_interval_end(this)) {
572 				parent = parent->rb_right;
573 				continue;
574 			} else if (__nft_set_elem_expired(&rbe->ext, tstamp)) {
575 				break;
576 			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
577 				parent = parent->rb_left;
578 				continue;
579 			}
580 			nft_rbtree_flush(net, set, &rbe->priv);
581 			return &rbe->priv;
582 		}
583 	}
584 	return NULL;
585 }
586 
nft_rbtree_walk(const struct nft_ctx * ctx,struct nft_set * set,struct nft_set_iter * iter)587 static void nft_rbtree_walk(const struct nft_ctx *ctx,
588 			    struct nft_set *set,
589 			    struct nft_set_iter *iter)
590 {
591 	struct nft_rbtree *priv = nft_set_priv(set);
592 	struct nft_rbtree_elem *rbe;
593 	struct rb_node *node;
594 
595 	read_lock_bh(&priv->lock);
596 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
597 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
598 
599 		if (iter->count < iter->skip)
600 			goto cont;
601 
602 		iter->err = iter->fn(ctx, set, iter, &rbe->priv);
603 		if (iter->err < 0) {
604 			read_unlock_bh(&priv->lock);
605 			return;
606 		}
607 cont:
608 		iter->count++;
609 	}
610 	read_unlock_bh(&priv->lock);
611 }
612 
nft_rbtree_gc_remove(struct net * net,struct nft_set * set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)613 static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
614 				 struct nft_rbtree *priv,
615 				 struct nft_rbtree_elem *rbe)
616 {
617 	nft_setelem_data_deactivate(net, set, &rbe->priv);
618 	nft_rbtree_erase(priv, rbe);
619 }
620 
nft_rbtree_gc(struct nft_set * set)621 static void nft_rbtree_gc(struct nft_set *set)
622 {
623 	struct nft_rbtree *priv = nft_set_priv(set);
624 	struct nft_rbtree_elem *rbe, *rbe_end = NULL;
625 	struct net *net = read_pnet(&set->net);
626 	u64 tstamp = nft_net_tstamp(net);
627 	struct rb_node *node, *next;
628 	struct nft_trans_gc *gc;
629 
630 	set  = nft_set_container_of(priv);
631 	net  = read_pnet(&set->net);
632 
633 	gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
634 	if (!gc)
635 		return;
636 
637 	for (node = rb_first(&priv->root); node ; node = next) {
638 		next = rb_next(node);
639 
640 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
641 
642 		/* elements are reversed in the rbtree for historical reasons,
643 		 * from highest to lowest value, that is why end element is
644 		 * always visited before the start element.
645 		 */
646 		if (nft_rbtree_interval_end(rbe)) {
647 			rbe_end = rbe;
648 			continue;
649 		}
650 		if (!__nft_set_elem_expired(&rbe->ext, tstamp))
651 			continue;
652 
653 		gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
654 		if (!gc)
655 			goto try_later;
656 
657 		/* end element needs to be removed first, it has
658 		 * no timeout extension.
659 		 */
660 		if (rbe_end) {
661 			nft_rbtree_gc_remove(net, set, priv, rbe_end);
662 			nft_trans_gc_elem_add(gc, rbe_end);
663 			rbe_end = NULL;
664 		}
665 
666 		gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
667 		if (!gc)
668 			goto try_later;
669 
670 		nft_rbtree_gc_remove(net, set, priv, rbe);
671 		nft_trans_gc_elem_add(gc, rbe);
672 	}
673 
674 try_later:
675 
676 	if (gc) {
677 		gc = nft_trans_gc_catchall_sync(gc);
678 		nft_trans_gc_queue_sync_done(gc);
679 		priv->last_gc = jiffies;
680 	}
681 }
682 
nft_rbtree_privsize(const struct nlattr * const nla[],const struct nft_set_desc * desc)683 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
684 			       const struct nft_set_desc *desc)
685 {
686 	return sizeof(struct nft_rbtree);
687 }
688 
nft_rbtree_init(const struct nft_set * set,const struct nft_set_desc * desc,const struct nlattr * const nla[])689 static int nft_rbtree_init(const struct nft_set *set,
690 			   const struct nft_set_desc *desc,
691 			   const struct nlattr * const nla[])
692 {
693 	struct nft_rbtree *priv = nft_set_priv(set);
694 
695 	BUILD_BUG_ON(offsetof(struct nft_rbtree_elem, priv) != 0);
696 
697 	rwlock_init(&priv->lock);
698 	seqcount_rwlock_init(&priv->count, &priv->lock);
699 	priv->root = RB_ROOT;
700 
701 	return 0;
702 }
703 
nft_rbtree_destroy(const struct nft_ctx * ctx,const struct nft_set * set)704 static void nft_rbtree_destroy(const struct nft_ctx *ctx,
705 			       const struct nft_set *set)
706 {
707 	struct nft_rbtree *priv = nft_set_priv(set);
708 	struct nft_rbtree_elem *rbe;
709 	struct rb_node *node;
710 
711 	while ((node = priv->root.rb_node) != NULL) {
712 		rb_erase(node, &priv->root);
713 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
714 		nf_tables_set_elem_destroy(ctx, set, &rbe->priv);
715 	}
716 }
717 
nft_rbtree_estimate(const struct nft_set_desc * desc,u32 features,struct nft_set_estimate * est)718 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
719 				struct nft_set_estimate *est)
720 {
721 	if (desc->field_count > 1)
722 		return false;
723 
724 	if (desc->size)
725 		est->size = sizeof(struct nft_rbtree) +
726 			    desc->size * sizeof(struct nft_rbtree_elem);
727 	else
728 		est->size = ~0;
729 
730 	est->lookup = NFT_SET_CLASS_O_LOG_N;
731 	est->space  = NFT_SET_CLASS_O_N;
732 
733 	return true;
734 }
735 
nft_rbtree_commit(struct nft_set * set)736 static void nft_rbtree_commit(struct nft_set *set)
737 {
738 	struct nft_rbtree *priv = nft_set_priv(set);
739 
740 	if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
741 		nft_rbtree_gc(set);
742 }
743 
nft_rbtree_gc_init(const struct nft_set * set)744 static void nft_rbtree_gc_init(const struct nft_set *set)
745 {
746 	struct nft_rbtree *priv = nft_set_priv(set);
747 
748 	priv->last_gc = jiffies;
749 }
750 
751 /* rbtree stores ranges as singleton elements, each range is composed of two
752  * elements ...
753  */
nft_rbtree_ksize(u32 size)754 static u32 nft_rbtree_ksize(u32 size)
755 {
756 	return size * 2;
757 }
758 
759 /* ... hide this detail to userspace. */
nft_rbtree_usize(u32 size)760 static u32 nft_rbtree_usize(u32 size)
761 {
762 	if (!size)
763 		return 0;
764 
765 	return size / 2;
766 }
767 
nft_rbtree_adjust_maxsize(const struct nft_set * set)768 static u32 nft_rbtree_adjust_maxsize(const struct nft_set *set)
769 {
770 	struct nft_rbtree *priv = nft_set_priv(set);
771 	struct nft_rbtree_elem *rbe;
772 	struct rb_node *node;
773 	const void *key;
774 
775 	node = rb_last(&priv->root);
776 	if (!node)
777 		return 0;
778 
779 	rbe = rb_entry(node, struct nft_rbtree_elem, node);
780 	if (!nft_rbtree_interval_end(rbe))
781 		return 0;
782 
783 	key = nft_set_ext_key(&rbe->ext);
784 	if (memchr(key, 1, set->klen))
785 		return 0;
786 
787 	/* this is the all-zero no-match element. */
788 	return 1;
789 }
790 
791 const struct nft_set_type nft_set_rbtree_type = {
792 	.features	= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
793 	.ops		= {
794 		.privsize	= nft_rbtree_privsize,
795 		.elemsize	= offsetof(struct nft_rbtree_elem, ext),
796 		.estimate	= nft_rbtree_estimate,
797 		.init		= nft_rbtree_init,
798 		.destroy	= nft_rbtree_destroy,
799 		.insert		= nft_rbtree_insert,
800 		.remove		= nft_rbtree_remove,
801 		.deactivate	= nft_rbtree_deactivate,
802 		.flush		= nft_rbtree_flush,
803 		.activate	= nft_rbtree_activate,
804 		.commit		= nft_rbtree_commit,
805 		.gc_init	= nft_rbtree_gc_init,
806 		.lookup		= nft_rbtree_lookup,
807 		.walk		= nft_rbtree_walk,
808 		.get		= nft_rbtree_get,
809 		.ksize		= nft_rbtree_ksize,
810 		.usize		= nft_rbtree_usize,
811 		.adjust_maxsize = nft_rbtree_adjust_maxsize,
812 	},
813 };
814