xref: /linux/net/netfilter/nf_conncount.c (revision de8a70cefcb26cdceaafdc5ac144712681419c29)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * count the number of connections matching an arbitrary key.
4  *
5  * (C) 2017 Red Hat GmbH
6  * Author: Florian Westphal <fw@strlen.de>
7  *
8  * split from xt_connlimit.c:
9  *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
10  *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
11  *		only ignore TIME_WAIT or gone connections
12  *   (C) CC Computer Consultants GmbH, 2007
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_count.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
34 
35 #define CONNCOUNT_SLOTS		256U
36 
37 #define CONNCOUNT_GC_MAX_NODES		8
38 #define CONNCOUNT_GC_MAX_COLLECT	64
39 #define MAX_KEYLEN			5
40 
41 /* we will save the tuples of all connections we care about */
42 struct nf_conncount_tuple {
43 	struct list_head		node;
44 	struct nf_conntrack_tuple	tuple;
45 	struct nf_conntrack_zone	zone;
46 	int				cpu;
47 	u32				jiffies32;
48 };
49 
50 struct nf_conncount_rb {
51 	struct rb_node node;
52 	struct nf_conncount_list list;
53 	u32 key[MAX_KEYLEN];
54 	struct rcu_head rcu_head;
55 };
56 
57 static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
58 
59 struct nf_conncount_data {
60 	unsigned int keylen;
61 	struct rb_root root[CONNCOUNT_SLOTS];
62 	struct net *net;
63 	struct work_struct gc_work;
64 	unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)];
65 	unsigned int gc_tree;
66 };
67 
68 static u_int32_t conncount_rnd __read_mostly;
69 static struct kmem_cache *conncount_rb_cachep __read_mostly;
70 static struct kmem_cache *conncount_conn_cachep __read_mostly;
71 
72 static inline bool already_closed(const struct nf_conn *conn)
73 {
74 	if (nf_ct_protonum(conn) == IPPROTO_TCP)
75 		return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
76 		       conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
77 	else
78 		return false;
79 }
80 
81 static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
82 {
83 	return memcmp(a, b, klen * sizeof(u32));
84 }
85 
86 static void conn_free(struct nf_conncount_list *list,
87 		      struct nf_conncount_tuple *conn)
88 {
89 	lockdep_assert_held(&list->list_lock);
90 
91 	list->count--;
92 	list_del(&conn->node);
93 
94 	kmem_cache_free(conncount_conn_cachep, conn);
95 }
96 
97 static const struct nf_conntrack_tuple_hash *
98 find_or_evict(struct net *net, struct nf_conncount_list *list,
99 	      struct nf_conncount_tuple *conn)
100 {
101 	const struct nf_conntrack_tuple_hash *found;
102 	unsigned long a, b;
103 	int cpu = raw_smp_processor_id();
104 	u32 age;
105 
106 	found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
107 	if (found)
108 		return found;
109 	b = conn->jiffies32;
110 	a = (u32)jiffies;
111 
112 	/* conn might have been added just before by another cpu and
113 	 * might still be unconfirmed.  In this case, nf_conntrack_find()
114 	 * returns no result.  Thus only evict if this cpu added the
115 	 * stale entry or if the entry is older than two jiffies.
116 	 */
117 	age = a - b;
118 	if (conn->cpu == cpu || age >= 2) {
119 		conn_free(list, conn);
120 		return ERR_PTR(-ENOENT);
121 	}
122 
123 	return ERR_PTR(-EAGAIN);
124 }
125 
126 static bool get_ct_or_tuple_from_skb(struct net *net,
127 				     const struct sk_buff *skb,
128 				     u16 l3num,
129 				     struct nf_conn **ct,
130 				     struct nf_conntrack_tuple *tuple,
131 				     const struct nf_conntrack_zone **zone,
132 				     bool *refcounted)
133 {
134 	const struct nf_conntrack_tuple_hash *h;
135 	enum ip_conntrack_info ctinfo;
136 	struct nf_conn *found_ct;
137 
138 	found_ct = nf_ct_get(skb, &ctinfo);
139 	if (found_ct && !nf_ct_is_template(found_ct)) {
140 		*tuple = found_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
141 		*zone = nf_ct_zone(found_ct);
142 		*ct = found_ct;
143 		return true;
144 	}
145 
146 	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num, net, tuple))
147 		return false;
148 
149 	if (found_ct)
150 		*zone = nf_ct_zone(found_ct);
151 
152 	h = nf_conntrack_find_get(net, *zone, tuple);
153 	if (!h)
154 		return true;
155 
156 	found_ct = nf_ct_tuplehash_to_ctrack(h);
157 	*refcounted = true;
158 	*ct = found_ct;
159 
160 	return true;
161 }
162 
163 static int __nf_conncount_add(struct net *net,
164 			      const struct sk_buff *skb,
165 			      u16 l3num,
166 			      struct nf_conncount_list *list)
167 {
168 	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
169 	const struct nf_conntrack_tuple_hash *found;
170 	struct nf_conncount_tuple *conn, *conn_n;
171 	struct nf_conntrack_tuple tuple;
172 	struct nf_conn *ct = NULL;
173 	struct nf_conn *found_ct;
174 	unsigned int collect = 0;
175 	bool refcounted = false;
176 	int err = 0;
177 
178 	if (!get_ct_or_tuple_from_skb(net, skb, l3num, &ct, &tuple, &zone, &refcounted))
179 		return -ENOENT;
180 
181 	if (ct && nf_ct_is_confirmed(ct)) {
182 		/* local connections are confirmed in postrouting so confirmation
183 		 * might have happened before hitting connlimit
184 		 */
185 		if (skb->skb_iif != LOOPBACK_IFINDEX) {
186 			err = -EEXIST;
187 			goto out_put;
188 		}
189 
190 		/* this is likely a local connection, skip optimization to avoid
191 		 * adding duplicates from a 'packet train'
192 		 */
193 		goto check_connections;
194 	}
195 
196 	if ((u32)jiffies == list->last_gc &&
197 	    (list->count - list->last_gc_count) < CONNCOUNT_GC_MAX_COLLECT)
198 		goto add_new_node;
199 
200 check_connections:
201 	/* check the saved connections */
202 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
203 		if (collect > CONNCOUNT_GC_MAX_COLLECT)
204 			break;
205 
206 		found = find_or_evict(net, list, conn);
207 		if (IS_ERR(found)) {
208 			/* Not found, but might be about to be confirmed */
209 			if (PTR_ERR(found) == -EAGAIN) {
210 				if (nf_ct_tuple_equal(&conn->tuple, &tuple) &&
211 				    nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
212 				    nf_ct_zone_id(zone, zone->dir))
213 					goto out_put; /* already exists */
214 			} else {
215 				collect++;
216 			}
217 			continue;
218 		}
219 
220 		found_ct = nf_ct_tuplehash_to_ctrack(found);
221 
222 		if (nf_ct_tuple_equal(&conn->tuple, &tuple) &&
223 		    nf_ct_zone_equal(found_ct, zone, zone->dir)) {
224 			/*
225 			 * We should not see tuples twice unless someone hooks
226 			 * this into a table without "-p tcp --syn".
227 			 *
228 			 * Attempt to avoid a re-add in this case.
229 			 */
230 			nf_ct_put(found_ct);
231 			goto out_put;
232 		} else if (already_closed(found_ct)) {
233 			/*
234 			 * we do not care about connections which are
235 			 * closed already -> ditch it
236 			 */
237 			nf_ct_put(found_ct);
238 			conn_free(list, conn);
239 			collect++;
240 			continue;
241 		}
242 
243 		nf_ct_put(found_ct);
244 	}
245 	list->last_gc = (u32)jiffies;
246 	list->last_gc_count = list->count;
247 
248 add_new_node:
249 	if (WARN_ON_ONCE(list->count > INT_MAX)) {
250 		err = -EOVERFLOW;
251 		goto out_put;
252 	}
253 
254 	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
255 	if (conn == NULL) {
256 		err = -ENOMEM;
257 		goto out_put;
258 	}
259 
260 	conn->tuple = tuple;
261 	conn->zone = *zone;
262 	conn->cpu = raw_smp_processor_id();
263 	conn->jiffies32 = (u32)jiffies;
264 	list_add_tail(&conn->node, &list->head);
265 	list->count++;
266 
267 out_put:
268 	if (refcounted)
269 		nf_ct_put(ct);
270 	return err;
271 }
272 
273 int nf_conncount_add_skb(struct net *net,
274 			 const struct sk_buff *skb,
275 			 u16 l3num,
276 			 struct nf_conncount_list *list)
277 {
278 	int ret;
279 
280 	/* check the saved connections */
281 	spin_lock_bh(&list->list_lock);
282 	ret = __nf_conncount_add(net, skb, l3num, list);
283 	spin_unlock_bh(&list->list_lock);
284 
285 	return ret;
286 }
287 EXPORT_SYMBOL_GPL(nf_conncount_add_skb);
288 
289 void nf_conncount_list_init(struct nf_conncount_list *list)
290 {
291 	spin_lock_init(&list->list_lock);
292 	INIT_LIST_HEAD(&list->head);
293 	list->count = 0;
294 	list->last_gc_count = 0;
295 	list->last_gc = (u32)jiffies;
296 }
297 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
298 
299 /* Return true if the list is empty. Must be called with BH disabled. */
300 static bool __nf_conncount_gc_list(struct net *net,
301 				   struct nf_conncount_list *list)
302 {
303 	const struct nf_conntrack_tuple_hash *found;
304 	struct nf_conncount_tuple *conn, *conn_n;
305 	struct nf_conn *found_ct;
306 	unsigned int collected = 0;
307 	bool ret = false;
308 
309 	/* don't bother if we just did GC */
310 	if ((u32)jiffies == READ_ONCE(list->last_gc))
311 		return false;
312 
313 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
314 		found = find_or_evict(net, list, conn);
315 		if (IS_ERR(found)) {
316 			if (PTR_ERR(found) == -ENOENT)
317 				collected++;
318 			continue;
319 		}
320 
321 		found_ct = nf_ct_tuplehash_to_ctrack(found);
322 		if (already_closed(found_ct)) {
323 			/*
324 			 * we do not care about connections which are
325 			 * closed already -> ditch it
326 			 */
327 			nf_ct_put(found_ct);
328 			conn_free(list, conn);
329 			collected++;
330 			continue;
331 		}
332 
333 		nf_ct_put(found_ct);
334 		if (collected > CONNCOUNT_GC_MAX_COLLECT)
335 			break;
336 	}
337 
338 	if (!list->count)
339 		ret = true;
340 	list->last_gc = (u32)jiffies;
341 	list->last_gc_count = list->count;
342 
343 	return ret;
344 }
345 
346 bool nf_conncount_gc_list(struct net *net,
347 			  struct nf_conncount_list *list)
348 {
349 	bool ret;
350 
351 	/* don't bother if other cpu is already doing GC */
352 	if (!spin_trylock_bh(&list->list_lock))
353 		return false;
354 
355 	ret = __nf_conncount_gc_list(net, list);
356 	spin_unlock_bh(&list->list_lock);
357 
358 	return ret;
359 }
360 EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
361 
362 static void __tree_nodes_free(struct rcu_head *h)
363 {
364 	struct nf_conncount_rb *rbconn;
365 
366 	rbconn = container_of(h, struct nf_conncount_rb, rcu_head);
367 	kmem_cache_free(conncount_rb_cachep, rbconn);
368 }
369 
370 /* caller must hold tree nf_conncount_locks[] lock */
371 static void tree_nodes_free(struct rb_root *root,
372 			    struct nf_conncount_rb *gc_nodes[],
373 			    unsigned int gc_count)
374 {
375 	struct nf_conncount_rb *rbconn;
376 
377 	while (gc_count) {
378 		rbconn = gc_nodes[--gc_count];
379 		spin_lock(&rbconn->list.list_lock);
380 		if (!rbconn->list.count) {
381 			rb_erase(&rbconn->node, root);
382 			call_rcu(&rbconn->rcu_head, __tree_nodes_free);
383 		}
384 		spin_unlock(&rbconn->list.list_lock);
385 	}
386 }
387 
388 static void schedule_gc_worker(struct nf_conncount_data *data, int tree)
389 {
390 	set_bit(tree, data->pending_trees);
391 	schedule_work(&data->gc_work);
392 }
393 
394 static unsigned int
395 insert_tree(struct net *net,
396 	    const struct sk_buff *skb,
397 	    u16 l3num,
398 	    struct nf_conncount_data *data,
399 	    struct rb_root *root,
400 	    unsigned int hash,
401 	    const u32 *key)
402 {
403 	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
404 	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
405 	bool do_gc = true, refcounted = false;
406 	unsigned int count = 0, gc_count = 0;
407 	struct rb_node **rbnode, *parent;
408 	struct nf_conntrack_tuple tuple;
409 	struct nf_conncount_tuple *conn;
410 	struct nf_conncount_rb *rbconn;
411 	struct nf_conn *ct = NULL;
412 
413 	spin_lock_bh(&nf_conncount_locks[hash]);
414 restart:
415 	parent = NULL;
416 	rbnode = &(root->rb_node);
417 	while (*rbnode) {
418 		int diff;
419 		rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
420 
421 		parent = *rbnode;
422 		diff = key_diff(key, rbconn->key, data->keylen);
423 		if (diff < 0) {
424 			rbnode = &((*rbnode)->rb_left);
425 		} else if (diff > 0) {
426 			rbnode = &((*rbnode)->rb_right);
427 		} else {
428 			int ret;
429 
430 			ret = nf_conncount_add_skb(net, skb, l3num, &rbconn->list);
431 			if (ret && ret != -EEXIST)
432 				count = 0; /* hotdrop */
433 			else
434 				count = rbconn->list.count;
435 			tree_nodes_free(root, gc_nodes, gc_count);
436 			goto out_unlock;
437 		}
438 
439 		if (gc_count >= ARRAY_SIZE(gc_nodes))
440 			continue;
441 
442 		if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
443 			gc_nodes[gc_count++] = rbconn;
444 	}
445 
446 	if (gc_count) {
447 		tree_nodes_free(root, gc_nodes, gc_count);
448 		schedule_gc_worker(data, hash);
449 		gc_count = 0;
450 		do_gc = false;
451 		goto restart;
452 	}
453 
454 	if (get_ct_or_tuple_from_skb(net, skb, l3num, &ct, &tuple, &zone, &refcounted)) {
455 		/* expected case: match, insert new node */
456 		rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
457 		if (rbconn == NULL)
458 			goto out_unlock;
459 
460 		conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
461 		if (conn == NULL) {
462 			kmem_cache_free(conncount_rb_cachep, rbconn);
463 			goto out_unlock;
464 		}
465 
466 		conn->tuple = tuple;
467 		conn->zone = *zone;
468 		conn->cpu = raw_smp_processor_id();
469 		conn->jiffies32 = (u32)jiffies;
470 		memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
471 
472 		nf_conncount_list_init(&rbconn->list);
473 		list_add(&conn->node, &rbconn->list.head);
474 		count = 1;
475 		rbconn->list.count = count;
476 
477 		rb_link_node_rcu(&rbconn->node, parent, rbnode);
478 		rb_insert_color(&rbconn->node, root);
479 	}
480 out_unlock:
481 	if (refcounted)
482 		nf_ct_put(ct);
483 	spin_unlock_bh(&nf_conncount_locks[hash]);
484 	return count;
485 }
486 
487 static unsigned int
488 count_tree(struct net *net,
489 	   const struct sk_buff *skb,
490 	   u16 l3num,
491 	   struct nf_conncount_data *data,
492 	   const u32 *key)
493 {
494 	struct rb_root *root;
495 	struct rb_node *parent;
496 	struct nf_conncount_rb *rbconn;
497 	unsigned int hash;
498 
499 	hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
500 	root = &data->root[hash];
501 
502 	parent = rcu_dereference_raw(root->rb_node);
503 	while (parent) {
504 		int diff;
505 
506 		rbconn = rb_entry(parent, struct nf_conncount_rb, node);
507 
508 		diff = key_diff(key, rbconn->key, data->keylen);
509 		if (diff < 0) {
510 			parent = rcu_dereference_raw(parent->rb_left);
511 		} else if (diff > 0) {
512 			parent = rcu_dereference_raw(parent->rb_right);
513 		} else {
514 			int ret;
515 
516 			if (!skb) {
517 				nf_conncount_gc_list(net, &rbconn->list);
518 				return rbconn->list.count;
519 			}
520 
521 			spin_lock_bh(&rbconn->list.list_lock);
522 			/* Node might be about to be free'd.
523 			 * We need to defer to insert_tree() in this case.
524 			 */
525 			if (rbconn->list.count == 0) {
526 				spin_unlock_bh(&rbconn->list.list_lock);
527 				break;
528 			}
529 
530 			/* same source network -> be counted! */
531 			ret = __nf_conncount_add(net, skb, l3num, &rbconn->list);
532 			spin_unlock_bh(&rbconn->list.list_lock);
533 			if (ret && ret != -EEXIST) {
534 				return 0; /* hotdrop */
535 			} else {
536 				/* -EEXIST means add was skipped, update the list */
537 				if (ret == -EEXIST)
538 					nf_conncount_gc_list(net, &rbconn->list);
539 				return rbconn->list.count;
540 			}
541 		}
542 	}
543 
544 	if (!skb)
545 		return 0;
546 
547 	return insert_tree(net, skb, l3num, data, root, hash, key);
548 }
549 
550 static void tree_gc_worker(struct work_struct *work)
551 {
552 	struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
553 	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn;
554 	struct rb_root *root;
555 	struct rb_node *node;
556 	unsigned int tree, next_tree, gc_count = 0;
557 
558 	tree = data->gc_tree % CONNCOUNT_SLOTS;
559 	root = &data->root[tree];
560 
561 	local_bh_disable();
562 	rcu_read_lock();
563 	for (node = rb_first(root); node != NULL; node = rb_next(node)) {
564 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
565 		if (nf_conncount_gc_list(data->net, &rbconn->list))
566 			gc_count++;
567 	}
568 	rcu_read_unlock();
569 	local_bh_enable();
570 
571 	cond_resched();
572 
573 	spin_lock_bh(&nf_conncount_locks[tree]);
574 	if (gc_count < ARRAY_SIZE(gc_nodes))
575 		goto next; /* do not bother */
576 
577 	gc_count = 0;
578 	node = rb_first(root);
579 	while (node != NULL) {
580 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
581 		node = rb_next(node);
582 
583 		if (rbconn->list.count > 0)
584 			continue;
585 
586 		gc_nodes[gc_count++] = rbconn;
587 		if (gc_count >= ARRAY_SIZE(gc_nodes)) {
588 			tree_nodes_free(root, gc_nodes, gc_count);
589 			gc_count = 0;
590 		}
591 	}
592 
593 	tree_nodes_free(root, gc_nodes, gc_count);
594 next:
595 	clear_bit(tree, data->pending_trees);
596 
597 	next_tree = (tree + 1) % CONNCOUNT_SLOTS;
598 	next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
599 
600 	if (next_tree < CONNCOUNT_SLOTS) {
601 		data->gc_tree = next_tree;
602 		schedule_work(work);
603 	}
604 
605 	spin_unlock_bh(&nf_conncount_locks[tree]);
606 }
607 
608 /* Count and return number of conntrack entries in 'net' with particular 'key'.
609  * If 'skb' is not null, insert the corresponding tuple into the accounting
610  * data structure. Call with RCU read lock.
611  */
612 unsigned int nf_conncount_count_skb(struct net *net,
613 				    const struct sk_buff *skb,
614 				    u16 l3num,
615 				    struct nf_conncount_data *data,
616 				    const u32 *key)
617 {
618 	return count_tree(net, skb, l3num, data, key);
619 
620 }
621 EXPORT_SYMBOL_GPL(nf_conncount_count_skb);
622 
623 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int keylen)
624 {
625 	struct nf_conncount_data *data;
626 	int i;
627 
628 	if (keylen % sizeof(u32) ||
629 	    keylen / sizeof(u32) > MAX_KEYLEN ||
630 	    keylen == 0)
631 		return ERR_PTR(-EINVAL);
632 
633 	net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
634 
635 	data = kmalloc(sizeof(*data), GFP_KERNEL);
636 	if (!data)
637 		return ERR_PTR(-ENOMEM);
638 
639 	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
640 		data->root[i] = RB_ROOT;
641 
642 	data->keylen = keylen / sizeof(u32);
643 	data->net = net;
644 	INIT_WORK(&data->gc_work, tree_gc_worker);
645 
646 	return data;
647 }
648 EXPORT_SYMBOL_GPL(nf_conncount_init);
649 
650 void nf_conncount_cache_free(struct nf_conncount_list *list)
651 {
652 	struct nf_conncount_tuple *conn, *conn_n;
653 
654 	list_for_each_entry_safe(conn, conn_n, &list->head, node)
655 		kmem_cache_free(conncount_conn_cachep, conn);
656 }
657 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
658 
659 static void destroy_tree(struct rb_root *r)
660 {
661 	struct nf_conncount_rb *rbconn;
662 	struct rb_node *node;
663 
664 	while ((node = rb_first(r)) != NULL) {
665 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
666 
667 		rb_erase(node, r);
668 
669 		nf_conncount_cache_free(&rbconn->list);
670 
671 		kmem_cache_free(conncount_rb_cachep, rbconn);
672 	}
673 }
674 
675 void nf_conncount_destroy(struct net *net, struct nf_conncount_data *data)
676 {
677 	unsigned int i;
678 
679 	cancel_work_sync(&data->gc_work);
680 
681 	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
682 		destroy_tree(&data->root[i]);
683 
684 	kfree(data);
685 }
686 EXPORT_SYMBOL_GPL(nf_conncount_destroy);
687 
688 static int __init nf_conncount_modinit(void)
689 {
690 	int i;
691 
692 	for (i = 0; i < CONNCOUNT_SLOTS; ++i)
693 		spin_lock_init(&nf_conncount_locks[i]);
694 
695 	conncount_conn_cachep = KMEM_CACHE(nf_conncount_tuple, 0);
696 	if (!conncount_conn_cachep)
697 		return -ENOMEM;
698 
699 	conncount_rb_cachep = KMEM_CACHE(nf_conncount_rb, 0);
700 	if (!conncount_rb_cachep) {
701 		kmem_cache_destroy(conncount_conn_cachep);
702 		return -ENOMEM;
703 	}
704 
705 	return 0;
706 }
707 
708 static void __exit nf_conncount_modexit(void)
709 {
710 	kmem_cache_destroy(conncount_conn_cachep);
711 	kmem_cache_destroy(conncount_rb_cachep);
712 }
713 
714 module_init(nf_conncount_modinit);
715 module_exit(nf_conncount_modexit);
716 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
717 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
718 MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
719 MODULE_LICENSE("GPL");
720