1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17
18 struct nft_rbtree {
19 struct rb_root root;
20 rwlock_t lock;
21 seqcount_rwlock_t count;
22 unsigned long last_gc;
23 };
24
25 struct nft_rbtree_elem {
26 struct nft_elem_priv priv;
27 struct rb_node node;
28 struct nft_set_ext ext;
29 };
30
nft_rbtree_interval_end(const struct nft_rbtree_elem * rbe)31 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
32 {
33 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
34 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
35 }
36
nft_rbtree_interval_start(const struct nft_rbtree_elem * rbe)37 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
38 {
39 return !nft_rbtree_interval_end(rbe);
40 }
41
nft_rbtree_cmp(const struct nft_set * set,const struct nft_rbtree_elem * e1,const struct nft_rbtree_elem * e2)42 static int nft_rbtree_cmp(const struct nft_set *set,
43 const struct nft_rbtree_elem *e1,
44 const struct nft_rbtree_elem *e2)
45 {
46 return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
47 set->klen);
48 }
49
nft_rbtree_elem_expired(const struct nft_rbtree_elem * rbe)50 static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
51 {
52 return nft_set_elem_expired(&rbe->ext);
53 }
54
55 static const struct nft_set_ext *
__nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key,unsigned int seq)56 __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
57 const u32 *key, unsigned int seq)
58 {
59 struct nft_rbtree *priv = nft_set_priv(set);
60 const struct nft_rbtree_elem *rbe, *interval = NULL;
61 u8 genmask = nft_genmask_cur(net);
62 const struct rb_node *parent;
63 int d;
64
65 parent = rcu_dereference_raw(priv->root.rb_node);
66 while (parent != NULL) {
67 if (read_seqcount_retry(&priv->count, seq))
68 return NULL;
69
70 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
71
72 d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
73 if (d < 0) {
74 parent = rcu_dereference_raw(parent->rb_left);
75 if (interval &&
76 !nft_rbtree_cmp(set, rbe, interval) &&
77 nft_rbtree_interval_end(rbe) &&
78 nft_rbtree_interval_start(interval))
79 continue;
80 if (nft_set_elem_active(&rbe->ext, genmask) &&
81 !nft_rbtree_elem_expired(rbe))
82 interval = rbe;
83 } else if (d > 0)
84 parent = rcu_dereference_raw(parent->rb_right);
85 else {
86 if (!nft_set_elem_active(&rbe->ext, genmask)) {
87 parent = rcu_dereference_raw(parent->rb_left);
88 continue;
89 }
90
91 if (nft_rbtree_elem_expired(rbe))
92 return NULL;
93
94 if (nft_rbtree_interval_end(rbe)) {
95 if (nft_set_is_anonymous(set))
96 return NULL;
97 parent = rcu_dereference_raw(parent->rb_left);
98 interval = NULL;
99 continue;
100 }
101
102 return &rbe->ext;
103 }
104 }
105
106 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
107 nft_rbtree_interval_start(interval))
108 return &interval->ext;
109
110 return NULL;
111 }
112
113 INDIRECT_CALLABLE_SCOPE
114 const struct nft_set_ext *
nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key)115 nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
116 const u32 *key)
117 {
118 struct nft_rbtree *priv = nft_set_priv(set);
119 unsigned int seq = read_seqcount_begin(&priv->count);
120 const struct nft_set_ext *ext;
121
122 ext = __nft_rbtree_lookup(net, set, key, seq);
123 if (ext || !read_seqcount_retry(&priv->count, seq))
124 return ext;
125
126 read_lock_bh(&priv->lock);
127 seq = read_seqcount_begin(&priv->count);
128 ext = __nft_rbtree_lookup(net, set, key, seq);
129 read_unlock_bh(&priv->lock);
130
131 return ext;
132 }
133
__nft_rbtree_get(const struct net * net,const struct nft_set * set,const u32 * key,struct nft_rbtree_elem ** elem,unsigned int seq,unsigned int flags,u8 genmask)134 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
135 const u32 *key, struct nft_rbtree_elem **elem,
136 unsigned int seq, unsigned int flags, u8 genmask)
137 {
138 struct nft_rbtree_elem *rbe, *interval = NULL;
139 struct nft_rbtree *priv = nft_set_priv(set);
140 const struct rb_node *parent;
141 const void *this;
142 int d;
143
144 parent = rcu_dereference_raw(priv->root.rb_node);
145 while (parent != NULL) {
146 if (read_seqcount_retry(&priv->count, seq))
147 return false;
148
149 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
150
151 this = nft_set_ext_key(&rbe->ext);
152 d = memcmp(this, key, set->klen);
153 if (d < 0) {
154 parent = rcu_dereference_raw(parent->rb_left);
155 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
156 interval = rbe;
157 } else if (d > 0) {
158 parent = rcu_dereference_raw(parent->rb_right);
159 if (flags & NFT_SET_ELEM_INTERVAL_END)
160 interval = rbe;
161 } else {
162 if (!nft_set_elem_active(&rbe->ext, genmask)) {
163 parent = rcu_dereference_raw(parent->rb_left);
164 continue;
165 }
166
167 if (nft_set_elem_expired(&rbe->ext))
168 return false;
169
170 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
171 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
172 (flags & NFT_SET_ELEM_INTERVAL_END)) {
173 *elem = rbe;
174 return true;
175 }
176
177 if (nft_rbtree_interval_end(rbe))
178 interval = NULL;
179
180 parent = rcu_dereference_raw(parent->rb_left);
181 }
182 }
183
184 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
185 nft_set_elem_active(&interval->ext, genmask) &&
186 !nft_set_elem_expired(&interval->ext) &&
187 ((!nft_rbtree_interval_end(interval) &&
188 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
189 (nft_rbtree_interval_end(interval) &&
190 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
191 *elem = interval;
192 return true;
193 }
194
195 return false;
196 }
197
198 static struct nft_elem_priv *
nft_rbtree_get(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,unsigned int flags)199 nft_rbtree_get(const struct net *net, const struct nft_set *set,
200 const struct nft_set_elem *elem, unsigned int flags)
201 {
202 struct nft_rbtree *priv = nft_set_priv(set);
203 unsigned int seq = read_seqcount_begin(&priv->count);
204 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
205 const u32 *key = (const u32 *)&elem->key.val;
206 u8 genmask = nft_genmask_cur(net);
207 bool ret;
208
209 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
210 if (ret || !read_seqcount_retry(&priv->count, seq))
211 return &rbe->priv;
212
213 read_lock_bh(&priv->lock);
214 seq = read_seqcount_begin(&priv->count);
215 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
216 read_unlock_bh(&priv->lock);
217
218 if (!ret)
219 return ERR_PTR(-ENOENT);
220
221 return &rbe->priv;
222 }
223
nft_rbtree_gc_elem_remove(struct net * net,struct nft_set * set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)224 static void nft_rbtree_gc_elem_remove(struct net *net, struct nft_set *set,
225 struct nft_rbtree *priv,
226 struct nft_rbtree_elem *rbe)
227 {
228 lockdep_assert_held_write(&priv->lock);
229 nft_setelem_data_deactivate(net, set, &rbe->priv);
230 rb_erase(&rbe->node, &priv->root);
231 }
232
233 static const struct nft_rbtree_elem *
nft_rbtree_gc_elem(const struct nft_set * __set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)234 nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
235 struct nft_rbtree_elem *rbe)
236 {
237 struct nft_set *set = (struct nft_set *)__set;
238 struct rb_node *prev = rb_prev(&rbe->node);
239 struct net *net = read_pnet(&set->net);
240 struct nft_rbtree_elem *rbe_prev;
241 struct nft_trans_gc *gc;
242
243 gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
244 if (!gc)
245 return ERR_PTR(-ENOMEM);
246
247 /* search for end interval coming before this element.
248 * end intervals don't carry a timeout extension, they
249 * are coupled with the interval start element.
250 */
251 while (prev) {
252 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
253 if (nft_rbtree_interval_end(rbe_prev) &&
254 nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
255 break;
256
257 prev = rb_prev(prev);
258 }
259
260 rbe_prev = NULL;
261 if (prev) {
262 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
263 nft_rbtree_gc_elem_remove(net, set, priv, rbe_prev);
264
265 /* There is always room in this trans gc for this element,
266 * memory allocation never actually happens, hence, the warning
267 * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
268 * this is synchronous gc which never fails.
269 */
270 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
271 if (WARN_ON_ONCE(!gc))
272 return ERR_PTR(-ENOMEM);
273
274 nft_trans_gc_elem_add(gc, rbe_prev);
275 }
276
277 nft_rbtree_gc_elem_remove(net, set, priv, rbe);
278 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
279 if (WARN_ON_ONCE(!gc))
280 return ERR_PTR(-ENOMEM);
281
282 nft_trans_gc_elem_add(gc, rbe);
283
284 nft_trans_gc_queue_sync_done(gc);
285
286 return rbe_prev;
287 }
288
nft_rbtree_update_first(const struct nft_set * set,struct nft_rbtree_elem * rbe,struct rb_node * first)289 static bool nft_rbtree_update_first(const struct nft_set *set,
290 struct nft_rbtree_elem *rbe,
291 struct rb_node *first)
292 {
293 struct nft_rbtree_elem *first_elem;
294
295 first_elem = rb_entry(first, struct nft_rbtree_elem, node);
296 /* this element is closest to where the new element is to be inserted:
297 * update the first element for the node list path.
298 */
299 if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
300 return true;
301
302 return false;
303 }
304
__nft_rbtree_insert(const struct net * net,const struct nft_set * set,struct nft_rbtree_elem * new,struct nft_elem_priv ** elem_priv)305 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
306 struct nft_rbtree_elem *new,
307 struct nft_elem_priv **elem_priv)
308 {
309 struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
310 struct rb_node *node, *next, *parent, **p, *first = NULL;
311 struct nft_rbtree *priv = nft_set_priv(set);
312 u8 cur_genmask = nft_genmask_cur(net);
313 u8 genmask = nft_genmask_next(net);
314 u64 tstamp = nft_net_tstamp(net);
315 int d;
316
317 /* Descend the tree to search for an existing element greater than the
318 * key value to insert that is greater than the new element. This is the
319 * first element to walk the ordered elements to find possible overlap.
320 */
321 parent = NULL;
322 p = &priv->root.rb_node;
323 while (*p != NULL) {
324 parent = *p;
325 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
326 d = nft_rbtree_cmp(set, rbe, new);
327
328 if (d < 0) {
329 p = &parent->rb_left;
330 } else if (d > 0) {
331 if (!first ||
332 nft_rbtree_update_first(set, rbe, first))
333 first = &rbe->node;
334
335 p = &parent->rb_right;
336 } else {
337 if (nft_rbtree_interval_end(rbe))
338 p = &parent->rb_left;
339 else
340 p = &parent->rb_right;
341 }
342 }
343
344 if (!first)
345 first = rb_first(&priv->root);
346
347 /* Detect overlap by going through the list of valid tree nodes.
348 * Values stored in the tree are in reversed order, starting from
349 * highest to lowest value.
350 */
351 for (node = first; node != NULL; node = next) {
352 next = rb_next(node);
353
354 rbe = rb_entry(node, struct nft_rbtree_elem, node);
355
356 if (!nft_set_elem_active(&rbe->ext, genmask))
357 continue;
358
359 /* perform garbage collection to avoid bogus overlap reports
360 * but skip new elements in this transaction.
361 */
362 if (__nft_set_elem_expired(&rbe->ext, tstamp) &&
363 nft_set_elem_active(&rbe->ext, cur_genmask)) {
364 const struct nft_rbtree_elem *removed_end;
365
366 removed_end = nft_rbtree_gc_elem(set, priv, rbe);
367 if (IS_ERR(removed_end))
368 return PTR_ERR(removed_end);
369
370 if (removed_end == rbe_le || removed_end == rbe_ge)
371 return -EAGAIN;
372
373 continue;
374 }
375
376 d = nft_rbtree_cmp(set, rbe, new);
377 if (d == 0) {
378 /* Matching end element: no need to look for an
379 * overlapping greater or equal element.
380 */
381 if (nft_rbtree_interval_end(rbe)) {
382 rbe_le = rbe;
383 break;
384 }
385
386 /* first element that is greater or equal to key value. */
387 if (!rbe_ge) {
388 rbe_ge = rbe;
389 continue;
390 }
391
392 /* this is a closer more or equal element, update it. */
393 if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
394 rbe_ge = rbe;
395 continue;
396 }
397
398 /* element is equal to key value, make sure flags are
399 * the same, an existing more or equal start element
400 * must not be replaced by more or equal end element.
401 */
402 if ((nft_rbtree_interval_start(new) &&
403 nft_rbtree_interval_start(rbe_ge)) ||
404 (nft_rbtree_interval_end(new) &&
405 nft_rbtree_interval_end(rbe_ge))) {
406 rbe_ge = rbe;
407 continue;
408 }
409 } else if (d > 0) {
410 /* annotate element greater than the new element. */
411 rbe_ge = rbe;
412 continue;
413 } else if (d < 0) {
414 /* annotate element less than the new element. */
415 rbe_le = rbe;
416 break;
417 }
418 }
419
420 /* - new start element matching existing start element: full overlap
421 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
422 */
423 if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
424 nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
425 *elem_priv = &rbe_ge->priv;
426 return -EEXIST;
427 }
428
429 /* - new end element matching existing end element: full overlap
430 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
431 */
432 if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
433 nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
434 *elem_priv = &rbe_le->priv;
435 return -EEXIST;
436 }
437
438 /* - new start element with existing closest, less or equal key value
439 * being a start element: partial overlap, reported as -ENOTEMPTY.
440 * Anonymous sets allow for two consecutive start element since they
441 * are constant, skip them to avoid bogus overlap reports.
442 */
443 if (!nft_set_is_anonymous(set) && rbe_le &&
444 nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
445 return -ENOTEMPTY;
446
447 /* - new end element with existing closest, less or equal key value
448 * being a end element: partial overlap, reported as -ENOTEMPTY.
449 */
450 if (rbe_le &&
451 nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
452 return -ENOTEMPTY;
453
454 /* - new end element with existing closest, greater or equal key value
455 * being an end element: partial overlap, reported as -ENOTEMPTY
456 */
457 if (rbe_ge &&
458 nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
459 return -ENOTEMPTY;
460
461 /* Accepted element: pick insertion point depending on key value */
462 parent = NULL;
463 p = &priv->root.rb_node;
464 while (*p != NULL) {
465 parent = *p;
466 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
467 d = nft_rbtree_cmp(set, rbe, new);
468
469 if (d < 0)
470 p = &parent->rb_left;
471 else if (d > 0)
472 p = &parent->rb_right;
473 else if (nft_rbtree_interval_end(rbe))
474 p = &parent->rb_left;
475 else
476 p = &parent->rb_right;
477 }
478
479 rb_link_node_rcu(&new->node, parent, p);
480 rb_insert_color(&new->node, &priv->root);
481 return 0;
482 }
483
nft_rbtree_insert(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,struct nft_elem_priv ** elem_priv)484 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
485 const struct nft_set_elem *elem,
486 struct nft_elem_priv **elem_priv)
487 {
488 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem->priv);
489 struct nft_rbtree *priv = nft_set_priv(set);
490 int err;
491
492 do {
493 if (fatal_signal_pending(current))
494 return -EINTR;
495
496 cond_resched();
497
498 write_lock_bh(&priv->lock);
499 write_seqcount_begin(&priv->count);
500 err = __nft_rbtree_insert(net, set, rbe, elem_priv);
501 write_seqcount_end(&priv->count);
502 write_unlock_bh(&priv->lock);
503 } while (err == -EAGAIN);
504
505 return err;
506 }
507
nft_rbtree_erase(struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)508 static void nft_rbtree_erase(struct nft_rbtree *priv, struct nft_rbtree_elem *rbe)
509 {
510 write_lock_bh(&priv->lock);
511 write_seqcount_begin(&priv->count);
512 rb_erase(&rbe->node, &priv->root);
513 write_seqcount_end(&priv->count);
514 write_unlock_bh(&priv->lock);
515 }
516
nft_rbtree_remove(const struct net * net,const struct nft_set * set,struct nft_elem_priv * elem_priv)517 static void nft_rbtree_remove(const struct net *net,
518 const struct nft_set *set,
519 struct nft_elem_priv *elem_priv)
520 {
521 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
522 struct nft_rbtree *priv = nft_set_priv(set);
523
524 nft_rbtree_erase(priv, rbe);
525 }
526
nft_rbtree_activate(const struct net * net,const struct nft_set * set,struct nft_elem_priv * elem_priv)527 static void nft_rbtree_activate(const struct net *net,
528 const struct nft_set *set,
529 struct nft_elem_priv *elem_priv)
530 {
531 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
532
533 nft_clear(net, &rbe->ext);
534 }
535
nft_rbtree_flush(const struct net * net,const struct nft_set * set,struct nft_elem_priv * elem_priv)536 static void nft_rbtree_flush(const struct net *net,
537 const struct nft_set *set,
538 struct nft_elem_priv *elem_priv)
539 {
540 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
541
542 nft_set_elem_change_active(net, set, &rbe->ext);
543 }
544
545 static struct nft_elem_priv *
nft_rbtree_deactivate(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)546 nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
547 const struct nft_set_elem *elem)
548 {
549 struct nft_rbtree_elem *rbe, *this = nft_elem_priv_cast(elem->priv);
550 const struct nft_rbtree *priv = nft_set_priv(set);
551 const struct rb_node *parent = priv->root.rb_node;
552 u8 genmask = nft_genmask_next(net);
553 u64 tstamp = nft_net_tstamp(net);
554 int d;
555
556 while (parent != NULL) {
557 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
558
559 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
560 set->klen);
561 if (d < 0)
562 parent = parent->rb_left;
563 else if (d > 0)
564 parent = parent->rb_right;
565 else {
566 if (nft_rbtree_interval_end(rbe) &&
567 nft_rbtree_interval_start(this)) {
568 parent = parent->rb_left;
569 continue;
570 } else if (nft_rbtree_interval_start(rbe) &&
571 nft_rbtree_interval_end(this)) {
572 parent = parent->rb_right;
573 continue;
574 } else if (__nft_set_elem_expired(&rbe->ext, tstamp)) {
575 break;
576 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
577 parent = parent->rb_left;
578 continue;
579 }
580 nft_rbtree_flush(net, set, &rbe->priv);
581 return &rbe->priv;
582 }
583 }
584 return NULL;
585 }
586
nft_rbtree_do_walk(const struct nft_ctx * ctx,struct nft_set * set,struct nft_set_iter * iter)587 static void nft_rbtree_do_walk(const struct nft_ctx *ctx,
588 struct nft_set *set,
589 struct nft_set_iter *iter)
590 {
591 struct nft_rbtree *priv = nft_set_priv(set);
592 struct nft_rbtree_elem *rbe;
593 struct rb_node *node;
594
595 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
596 rbe = rb_entry(node, struct nft_rbtree_elem, node);
597
598 if (iter->count < iter->skip)
599 goto cont;
600
601 iter->err = iter->fn(ctx, set, iter, &rbe->priv);
602 if (iter->err < 0)
603 return;
604 cont:
605 iter->count++;
606 }
607 }
608
nft_rbtree_walk(const struct nft_ctx * ctx,struct nft_set * set,struct nft_set_iter * iter)609 static void nft_rbtree_walk(const struct nft_ctx *ctx,
610 struct nft_set *set,
611 struct nft_set_iter *iter)
612 {
613 struct nft_rbtree *priv = nft_set_priv(set);
614
615 switch (iter->type) {
616 case NFT_ITER_UPDATE:
617 lockdep_assert_held(&nft_pernet(ctx->net)->commit_mutex);
618 nft_rbtree_do_walk(ctx, set, iter);
619 break;
620 case NFT_ITER_READ:
621 read_lock_bh(&priv->lock);
622 nft_rbtree_do_walk(ctx, set, iter);
623 read_unlock_bh(&priv->lock);
624 break;
625 default:
626 iter->err = -EINVAL;
627 WARN_ON_ONCE(1);
628 break;
629 }
630 }
631
nft_rbtree_gc_remove(struct net * net,struct nft_set * set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)632 static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
633 struct nft_rbtree *priv,
634 struct nft_rbtree_elem *rbe)
635 {
636 nft_setelem_data_deactivate(net, set, &rbe->priv);
637 nft_rbtree_erase(priv, rbe);
638 }
639
nft_rbtree_gc(struct nft_set * set)640 static void nft_rbtree_gc(struct nft_set *set)
641 {
642 struct nft_rbtree *priv = nft_set_priv(set);
643 struct nft_rbtree_elem *rbe, *rbe_end = NULL;
644 struct net *net = read_pnet(&set->net);
645 u64 tstamp = nft_net_tstamp(net);
646 struct rb_node *node, *next;
647 struct nft_trans_gc *gc;
648
649 set = nft_set_container_of(priv);
650 net = read_pnet(&set->net);
651
652 gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
653 if (!gc)
654 return;
655
656 for (node = rb_first(&priv->root); node ; node = next) {
657 next = rb_next(node);
658
659 rbe = rb_entry(node, struct nft_rbtree_elem, node);
660
661 /* elements are reversed in the rbtree for historical reasons,
662 * from highest to lowest value, that is why end element is
663 * always visited before the start element.
664 */
665 if (nft_rbtree_interval_end(rbe)) {
666 rbe_end = rbe;
667 continue;
668 }
669 if (!__nft_set_elem_expired(&rbe->ext, tstamp))
670 continue;
671
672 gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
673 if (!gc)
674 goto try_later;
675
676 /* end element needs to be removed first, it has
677 * no timeout extension.
678 */
679 if (rbe_end) {
680 nft_rbtree_gc_remove(net, set, priv, rbe_end);
681 nft_trans_gc_elem_add(gc, rbe_end);
682 rbe_end = NULL;
683 }
684
685 gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
686 if (!gc)
687 goto try_later;
688
689 nft_rbtree_gc_remove(net, set, priv, rbe);
690 nft_trans_gc_elem_add(gc, rbe);
691 }
692
693 try_later:
694
695 if (gc) {
696 gc = nft_trans_gc_catchall_sync(gc);
697 nft_trans_gc_queue_sync_done(gc);
698 priv->last_gc = jiffies;
699 }
700 }
701
nft_rbtree_privsize(const struct nlattr * const nla[],const struct nft_set_desc * desc)702 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
703 const struct nft_set_desc *desc)
704 {
705 return sizeof(struct nft_rbtree);
706 }
707
nft_rbtree_init(const struct nft_set * set,const struct nft_set_desc * desc,const struct nlattr * const nla[])708 static int nft_rbtree_init(const struct nft_set *set,
709 const struct nft_set_desc *desc,
710 const struct nlattr * const nla[])
711 {
712 struct nft_rbtree *priv = nft_set_priv(set);
713
714 BUILD_BUG_ON(offsetof(struct nft_rbtree_elem, priv) != 0);
715
716 rwlock_init(&priv->lock);
717 seqcount_rwlock_init(&priv->count, &priv->lock);
718 priv->root = RB_ROOT;
719
720 return 0;
721 }
722
nft_rbtree_destroy(const struct nft_ctx * ctx,const struct nft_set * set)723 static void nft_rbtree_destroy(const struct nft_ctx *ctx,
724 const struct nft_set *set)
725 {
726 struct nft_rbtree *priv = nft_set_priv(set);
727 struct nft_rbtree_elem *rbe;
728 struct rb_node *node;
729
730 while ((node = priv->root.rb_node) != NULL) {
731 rb_erase(node, &priv->root);
732 rbe = rb_entry(node, struct nft_rbtree_elem, node);
733 nf_tables_set_elem_destroy(ctx, set, &rbe->priv);
734 }
735 }
736
nft_rbtree_estimate(const struct nft_set_desc * desc,u32 features,struct nft_set_estimate * est)737 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
738 struct nft_set_estimate *est)
739 {
740 if (desc->field_count > 1)
741 return false;
742
743 if (desc->size)
744 est->size = sizeof(struct nft_rbtree) +
745 desc->size * sizeof(struct nft_rbtree_elem);
746 else
747 est->size = ~0;
748
749 est->lookup = NFT_SET_CLASS_O_LOG_N;
750 est->space = NFT_SET_CLASS_O_N;
751
752 return true;
753 }
754
nft_rbtree_commit(struct nft_set * set)755 static void nft_rbtree_commit(struct nft_set *set)
756 {
757 struct nft_rbtree *priv = nft_set_priv(set);
758
759 if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
760 nft_rbtree_gc(set);
761 }
762
nft_rbtree_gc_init(const struct nft_set * set)763 static void nft_rbtree_gc_init(const struct nft_set *set)
764 {
765 struct nft_rbtree *priv = nft_set_priv(set);
766
767 priv->last_gc = jiffies;
768 }
769
770 /* rbtree stores ranges as singleton elements, each range is composed of two
771 * elements ...
772 */
nft_rbtree_ksize(u32 size)773 static u32 nft_rbtree_ksize(u32 size)
774 {
775 return size * 2;
776 }
777
778 /* ... hide this detail to userspace. */
nft_rbtree_usize(u32 size)779 static u32 nft_rbtree_usize(u32 size)
780 {
781 if (!size)
782 return 0;
783
784 return size / 2;
785 }
786
nft_rbtree_adjust_maxsize(const struct nft_set * set)787 static u32 nft_rbtree_adjust_maxsize(const struct nft_set *set)
788 {
789 struct nft_rbtree *priv = nft_set_priv(set);
790 struct nft_rbtree_elem *rbe;
791 struct rb_node *node;
792 const void *key;
793
794 node = rb_last(&priv->root);
795 if (!node)
796 return 0;
797
798 rbe = rb_entry(node, struct nft_rbtree_elem, node);
799 if (!nft_rbtree_interval_end(rbe))
800 return 0;
801
802 key = nft_set_ext_key(&rbe->ext);
803 if (memchr(key, 1, set->klen))
804 return 0;
805
806 /* this is the all-zero no-match element. */
807 return 1;
808 }
809
810 const struct nft_set_type nft_set_rbtree_type = {
811 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
812 .ops = {
813 .privsize = nft_rbtree_privsize,
814 .elemsize = offsetof(struct nft_rbtree_elem, ext),
815 .estimate = nft_rbtree_estimate,
816 .init = nft_rbtree_init,
817 .destroy = nft_rbtree_destroy,
818 .insert = nft_rbtree_insert,
819 .remove = nft_rbtree_remove,
820 .deactivate = nft_rbtree_deactivate,
821 .flush = nft_rbtree_flush,
822 .activate = nft_rbtree_activate,
823 .commit = nft_rbtree_commit,
824 .gc_init = nft_rbtree_gc_init,
825 .lookup = nft_rbtree_lookup,
826 .walk = nft_rbtree_walk,
827 .get = nft_rbtree_get,
828 .ksize = nft_rbtree_ksize,
829 .usize = nft_rbtree_usize,
830 .adjust_maxsize = nft_rbtree_adjust_maxsize,
831 },
832 };
833