xref: /linux/net/netfilter/nf_conncount.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * count the number of connections matching an arbitrary key.
4  *
5  * (C) 2017 Red Hat GmbH
6  * Author: Florian Westphal <fw@strlen.de>
7  *
8  * split from xt_connlimit.c:
9  *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
10  *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
11  *		only ignore TIME_WAIT or gone connections
12  *   (C) CC Computer Consultants GmbH, 2007
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_count.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
34 
35 #define CONNCOUNT_SLOTS		256U
36 
37 #define CONNCOUNT_GC_MAX_NODES	8
38 #define MAX_KEYLEN		5
39 
40 /* we will save the tuples of all connections we care about */
41 struct nf_conncount_tuple {
42 	struct list_head		node;
43 	struct nf_conntrack_tuple	tuple;
44 	struct nf_conntrack_zone	zone;
45 	int				cpu;
46 	u32				jiffies32;
47 };
48 
49 struct nf_conncount_rb {
50 	struct rb_node node;
51 	struct nf_conncount_list list;
52 	u32 key[MAX_KEYLEN];
53 	struct rcu_head rcu_head;
54 };
55 
56 static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
57 
58 struct nf_conncount_data {
59 	unsigned int keylen;
60 	struct rb_root root[CONNCOUNT_SLOTS];
61 	struct net *net;
62 	struct work_struct gc_work;
63 	unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)];
64 	unsigned int gc_tree;
65 };
66 
67 static u_int32_t conncount_rnd __read_mostly;
68 static struct kmem_cache *conncount_rb_cachep __read_mostly;
69 static struct kmem_cache *conncount_conn_cachep __read_mostly;
70 
71 static inline bool already_closed(const struct nf_conn *conn)
72 {
73 	if (nf_ct_protonum(conn) == IPPROTO_TCP)
74 		return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
75 		       conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
76 	else
77 		return false;
78 }
79 
80 static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
81 {
82 	return memcmp(a, b, klen * sizeof(u32));
83 }
84 
85 static void conn_free(struct nf_conncount_list *list,
86 		      struct nf_conncount_tuple *conn)
87 {
88 	lockdep_assert_held(&list->list_lock);
89 
90 	list->count--;
91 	list_del(&conn->node);
92 
93 	kmem_cache_free(conncount_conn_cachep, conn);
94 }
95 
96 static const struct nf_conntrack_tuple_hash *
97 find_or_evict(struct net *net, struct nf_conncount_list *list,
98 	      struct nf_conncount_tuple *conn)
99 {
100 	const struct nf_conntrack_tuple_hash *found;
101 	unsigned long a, b;
102 	int cpu = raw_smp_processor_id();
103 	u32 age;
104 
105 	found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
106 	if (found)
107 		return found;
108 	b = conn->jiffies32;
109 	a = (u32)jiffies;
110 
111 	/* conn might have been added just before by another cpu and
112 	 * might still be unconfirmed.  In this case, nf_conntrack_find()
113 	 * returns no result.  Thus only evict if this cpu added the
114 	 * stale entry or if the entry is older than two jiffies.
115 	 */
116 	age = a - b;
117 	if (conn->cpu == cpu || age >= 2) {
118 		conn_free(list, conn);
119 		return ERR_PTR(-ENOENT);
120 	}
121 
122 	return ERR_PTR(-EAGAIN);
123 }
124 
125 static bool get_ct_or_tuple_from_skb(struct net *net,
126 				     const struct sk_buff *skb,
127 				     u16 l3num,
128 				     struct nf_conn **ct,
129 				     struct nf_conntrack_tuple *tuple,
130 				     const struct nf_conntrack_zone **zone,
131 				     bool *refcounted)
132 {
133 	const struct nf_conntrack_tuple_hash *h;
134 	enum ip_conntrack_info ctinfo;
135 	struct nf_conn *found_ct;
136 
137 	found_ct = nf_ct_get(skb, &ctinfo);
138 	if (found_ct && !nf_ct_is_template(found_ct)) {
139 		*tuple = found_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
140 		*zone = nf_ct_zone(found_ct);
141 		*ct = found_ct;
142 		return true;
143 	}
144 
145 	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num, net, tuple))
146 		return false;
147 
148 	if (found_ct)
149 		*zone = nf_ct_zone(found_ct);
150 
151 	h = nf_conntrack_find_get(net, *zone, tuple);
152 	if (!h)
153 		return true;
154 
155 	found_ct = nf_ct_tuplehash_to_ctrack(h);
156 	*refcounted = true;
157 	*ct = found_ct;
158 
159 	return true;
160 }
161 
162 static int __nf_conncount_add(struct net *net,
163 			      const struct sk_buff *skb,
164 			      u16 l3num,
165 			      struct nf_conncount_list *list)
166 {
167 	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
168 	const struct nf_conntrack_tuple_hash *found;
169 	struct nf_conncount_tuple *conn, *conn_n;
170 	struct nf_conntrack_tuple tuple;
171 	struct nf_conn *ct = NULL;
172 	struct nf_conn *found_ct;
173 	unsigned int collect = 0;
174 	bool refcounted = false;
175 
176 	if (!get_ct_or_tuple_from_skb(net, skb, l3num, &ct, &tuple, &zone, &refcounted))
177 		return -ENOENT;
178 
179 	if (ct && nf_ct_is_confirmed(ct)) {
180 		if (refcounted)
181 			nf_ct_put(ct);
182 		return -EEXIST;
183 	}
184 
185 	if ((u32)jiffies == list->last_gc)
186 		goto add_new_node;
187 
188 	/* check the saved connections */
189 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
190 		if (collect > CONNCOUNT_GC_MAX_NODES)
191 			break;
192 
193 		found = find_or_evict(net, list, conn);
194 		if (IS_ERR(found)) {
195 			/* Not found, but might be about to be confirmed */
196 			if (PTR_ERR(found) == -EAGAIN) {
197 				if (nf_ct_tuple_equal(&conn->tuple, &tuple) &&
198 				    nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
199 				    nf_ct_zone_id(zone, zone->dir))
200 					goto out_put; /* already exists */
201 			} else {
202 				collect++;
203 			}
204 			continue;
205 		}
206 
207 		found_ct = nf_ct_tuplehash_to_ctrack(found);
208 
209 		if (nf_ct_tuple_equal(&conn->tuple, &tuple) &&
210 		    nf_ct_zone_equal(found_ct, zone, zone->dir)) {
211 			/*
212 			 * We should not see tuples twice unless someone hooks
213 			 * this into a table without "-p tcp --syn".
214 			 *
215 			 * Attempt to avoid a re-add in this case.
216 			 */
217 			nf_ct_put(found_ct);
218 			goto out_put;
219 		} else if (already_closed(found_ct)) {
220 			/*
221 			 * we do not care about connections which are
222 			 * closed already -> ditch it
223 			 */
224 			nf_ct_put(found_ct);
225 			conn_free(list, conn);
226 			collect++;
227 			continue;
228 		}
229 
230 		nf_ct_put(found_ct);
231 	}
232 
233 add_new_node:
234 	if (WARN_ON_ONCE(list->count > INT_MAX))
235 		return -EOVERFLOW;
236 
237 	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
238 	if (conn == NULL)
239 		return -ENOMEM;
240 
241 	conn->tuple = tuple;
242 	conn->zone = *zone;
243 	conn->cpu = raw_smp_processor_id();
244 	conn->jiffies32 = (u32)jiffies;
245 	list_add_tail(&conn->node, &list->head);
246 	list->count++;
247 	list->last_gc = (u32)jiffies;
248 
249 out_put:
250 	if (refcounted)
251 		nf_ct_put(ct);
252 	return 0;
253 }
254 
255 int nf_conncount_add_skb(struct net *net,
256 			 const struct sk_buff *skb,
257 			 u16 l3num,
258 			 struct nf_conncount_list *list)
259 {
260 	int ret;
261 
262 	/* check the saved connections */
263 	spin_lock_bh(&list->list_lock);
264 	ret = __nf_conncount_add(net, skb, l3num, list);
265 	spin_unlock_bh(&list->list_lock);
266 
267 	return ret;
268 }
269 EXPORT_SYMBOL_GPL(nf_conncount_add_skb);
270 
271 void nf_conncount_list_init(struct nf_conncount_list *list)
272 {
273 	spin_lock_init(&list->list_lock);
274 	INIT_LIST_HEAD(&list->head);
275 	list->count = 0;
276 	list->last_gc = (u32)jiffies;
277 }
278 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
279 
280 /* Return true if the list is empty. Must be called with BH disabled. */
281 static bool __nf_conncount_gc_list(struct net *net,
282 				   struct nf_conncount_list *list)
283 {
284 	const struct nf_conntrack_tuple_hash *found;
285 	struct nf_conncount_tuple *conn, *conn_n;
286 	struct nf_conn *found_ct;
287 	unsigned int collected = 0;
288 	bool ret = false;
289 
290 	/* don't bother if we just did GC */
291 	if ((u32)jiffies == READ_ONCE(list->last_gc))
292 		return false;
293 
294 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
295 		found = find_or_evict(net, list, conn);
296 		if (IS_ERR(found)) {
297 			if (PTR_ERR(found) == -ENOENT)
298 				collected++;
299 			continue;
300 		}
301 
302 		found_ct = nf_ct_tuplehash_to_ctrack(found);
303 		if (already_closed(found_ct)) {
304 			/*
305 			 * we do not care about connections which are
306 			 * closed already -> ditch it
307 			 */
308 			nf_ct_put(found_ct);
309 			conn_free(list, conn);
310 			collected++;
311 			continue;
312 		}
313 
314 		nf_ct_put(found_ct);
315 		if (collected > CONNCOUNT_GC_MAX_NODES)
316 			break;
317 	}
318 
319 	if (!list->count)
320 		ret = true;
321 	list->last_gc = (u32)jiffies;
322 
323 	return ret;
324 }
325 
326 bool nf_conncount_gc_list(struct net *net,
327 			  struct nf_conncount_list *list)
328 {
329 	bool ret;
330 
331 	/* don't bother if other cpu is already doing GC */
332 	if (!spin_trylock_bh(&list->list_lock))
333 		return false;
334 
335 	ret = __nf_conncount_gc_list(net, list);
336 	spin_unlock_bh(&list->list_lock);
337 
338 	return ret;
339 }
340 EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
341 
342 static void __tree_nodes_free(struct rcu_head *h)
343 {
344 	struct nf_conncount_rb *rbconn;
345 
346 	rbconn = container_of(h, struct nf_conncount_rb, rcu_head);
347 	kmem_cache_free(conncount_rb_cachep, rbconn);
348 }
349 
350 /* caller must hold tree nf_conncount_locks[] lock */
351 static void tree_nodes_free(struct rb_root *root,
352 			    struct nf_conncount_rb *gc_nodes[],
353 			    unsigned int gc_count)
354 {
355 	struct nf_conncount_rb *rbconn;
356 
357 	while (gc_count) {
358 		rbconn = gc_nodes[--gc_count];
359 		spin_lock(&rbconn->list.list_lock);
360 		if (!rbconn->list.count) {
361 			rb_erase(&rbconn->node, root);
362 			call_rcu(&rbconn->rcu_head, __tree_nodes_free);
363 		}
364 		spin_unlock(&rbconn->list.list_lock);
365 	}
366 }
367 
368 static void schedule_gc_worker(struct nf_conncount_data *data, int tree)
369 {
370 	set_bit(tree, data->pending_trees);
371 	schedule_work(&data->gc_work);
372 }
373 
374 static unsigned int
375 insert_tree(struct net *net,
376 	    const struct sk_buff *skb,
377 	    u16 l3num,
378 	    struct nf_conncount_data *data,
379 	    struct rb_root *root,
380 	    unsigned int hash,
381 	    const u32 *key)
382 {
383 	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
384 	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
385 	bool do_gc = true, refcounted = false;
386 	unsigned int count = 0, gc_count = 0;
387 	struct rb_node **rbnode, *parent;
388 	struct nf_conntrack_tuple tuple;
389 	struct nf_conncount_tuple *conn;
390 	struct nf_conncount_rb *rbconn;
391 	struct nf_conn *ct = NULL;
392 
393 	spin_lock_bh(&nf_conncount_locks[hash]);
394 restart:
395 	parent = NULL;
396 	rbnode = &(root->rb_node);
397 	while (*rbnode) {
398 		int diff;
399 		rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
400 
401 		parent = *rbnode;
402 		diff = key_diff(key, rbconn->key, data->keylen);
403 		if (diff < 0) {
404 			rbnode = &((*rbnode)->rb_left);
405 		} else if (diff > 0) {
406 			rbnode = &((*rbnode)->rb_right);
407 		} else {
408 			int ret;
409 
410 			ret = nf_conncount_add_skb(net, skb, l3num, &rbconn->list);
411 			if (ret && ret != -EEXIST)
412 				count = 0; /* hotdrop */
413 			else
414 				count = rbconn->list.count;
415 			tree_nodes_free(root, gc_nodes, gc_count);
416 			goto out_unlock;
417 		}
418 
419 		if (gc_count >= ARRAY_SIZE(gc_nodes))
420 			continue;
421 
422 		if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
423 			gc_nodes[gc_count++] = rbconn;
424 	}
425 
426 	if (gc_count) {
427 		tree_nodes_free(root, gc_nodes, gc_count);
428 		schedule_gc_worker(data, hash);
429 		gc_count = 0;
430 		do_gc = false;
431 		goto restart;
432 	}
433 
434 	if (get_ct_or_tuple_from_skb(net, skb, l3num, &ct, &tuple, &zone, &refcounted)) {
435 		/* expected case: match, insert new node */
436 		rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
437 		if (rbconn == NULL)
438 			goto out_unlock;
439 
440 		conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
441 		if (conn == NULL) {
442 			kmem_cache_free(conncount_rb_cachep, rbconn);
443 			goto out_unlock;
444 		}
445 
446 		conn->tuple = tuple;
447 		conn->zone = *zone;
448 		conn->cpu = raw_smp_processor_id();
449 		conn->jiffies32 = (u32)jiffies;
450 		memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
451 
452 		nf_conncount_list_init(&rbconn->list);
453 		list_add(&conn->node, &rbconn->list.head);
454 		count = 1;
455 		rbconn->list.count = count;
456 
457 		rb_link_node_rcu(&rbconn->node, parent, rbnode);
458 		rb_insert_color(&rbconn->node, root);
459 
460 		if (refcounted)
461 			nf_ct_put(ct);
462 	}
463 out_unlock:
464 	spin_unlock_bh(&nf_conncount_locks[hash]);
465 	return count;
466 }
467 
468 static unsigned int
469 count_tree(struct net *net,
470 	   const struct sk_buff *skb,
471 	   u16 l3num,
472 	   struct nf_conncount_data *data,
473 	   const u32 *key)
474 {
475 	struct rb_root *root;
476 	struct rb_node *parent;
477 	struct nf_conncount_rb *rbconn;
478 	unsigned int hash;
479 
480 	hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
481 	root = &data->root[hash];
482 
483 	parent = rcu_dereference_raw(root->rb_node);
484 	while (parent) {
485 		int diff;
486 
487 		rbconn = rb_entry(parent, struct nf_conncount_rb, node);
488 
489 		diff = key_diff(key, rbconn->key, data->keylen);
490 		if (diff < 0) {
491 			parent = rcu_dereference_raw(parent->rb_left);
492 		} else if (diff > 0) {
493 			parent = rcu_dereference_raw(parent->rb_right);
494 		} else {
495 			int ret;
496 
497 			if (!skb) {
498 				nf_conncount_gc_list(net, &rbconn->list);
499 				return rbconn->list.count;
500 			}
501 
502 			spin_lock_bh(&rbconn->list.list_lock);
503 			/* Node might be about to be free'd.
504 			 * We need to defer to insert_tree() in this case.
505 			 */
506 			if (rbconn->list.count == 0) {
507 				spin_unlock_bh(&rbconn->list.list_lock);
508 				break;
509 			}
510 
511 			/* same source network -> be counted! */
512 			ret = __nf_conncount_add(net, skb, l3num, &rbconn->list);
513 			spin_unlock_bh(&rbconn->list.list_lock);
514 			if (ret && ret != -EEXIST) {
515 				return 0; /* hotdrop */
516 			} else {
517 				/* -EEXIST means add was skipped, update the list */
518 				if (ret == -EEXIST)
519 					nf_conncount_gc_list(net, &rbconn->list);
520 				return rbconn->list.count;
521 			}
522 		}
523 	}
524 
525 	if (!skb)
526 		return 0;
527 
528 	return insert_tree(net, skb, l3num, data, root, hash, key);
529 }
530 
531 static void tree_gc_worker(struct work_struct *work)
532 {
533 	struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
534 	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn;
535 	struct rb_root *root;
536 	struct rb_node *node;
537 	unsigned int tree, next_tree, gc_count = 0;
538 
539 	tree = data->gc_tree % CONNCOUNT_SLOTS;
540 	root = &data->root[tree];
541 
542 	local_bh_disable();
543 	rcu_read_lock();
544 	for (node = rb_first(root); node != NULL; node = rb_next(node)) {
545 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
546 		if (nf_conncount_gc_list(data->net, &rbconn->list))
547 			gc_count++;
548 	}
549 	rcu_read_unlock();
550 	local_bh_enable();
551 
552 	cond_resched();
553 
554 	spin_lock_bh(&nf_conncount_locks[tree]);
555 	if (gc_count < ARRAY_SIZE(gc_nodes))
556 		goto next; /* do not bother */
557 
558 	gc_count = 0;
559 	node = rb_first(root);
560 	while (node != NULL) {
561 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
562 		node = rb_next(node);
563 
564 		if (rbconn->list.count > 0)
565 			continue;
566 
567 		gc_nodes[gc_count++] = rbconn;
568 		if (gc_count >= ARRAY_SIZE(gc_nodes)) {
569 			tree_nodes_free(root, gc_nodes, gc_count);
570 			gc_count = 0;
571 		}
572 	}
573 
574 	tree_nodes_free(root, gc_nodes, gc_count);
575 next:
576 	clear_bit(tree, data->pending_trees);
577 
578 	next_tree = (tree + 1) % CONNCOUNT_SLOTS;
579 	next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
580 
581 	if (next_tree < CONNCOUNT_SLOTS) {
582 		data->gc_tree = next_tree;
583 		schedule_work(work);
584 	}
585 
586 	spin_unlock_bh(&nf_conncount_locks[tree]);
587 }
588 
589 /* Count and return number of conntrack entries in 'net' with particular 'key'.
590  * If 'skb' is not null, insert the corresponding tuple into the accounting
591  * data structure. Call with RCU read lock.
592  */
593 unsigned int nf_conncount_count_skb(struct net *net,
594 				    const struct sk_buff *skb,
595 				    u16 l3num,
596 				    struct nf_conncount_data *data,
597 				    const u32 *key)
598 {
599 	return count_tree(net, skb, l3num, data, key);
600 
601 }
602 EXPORT_SYMBOL_GPL(nf_conncount_count_skb);
603 
604 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int keylen)
605 {
606 	struct nf_conncount_data *data;
607 	int i;
608 
609 	if (keylen % sizeof(u32) ||
610 	    keylen / sizeof(u32) > MAX_KEYLEN ||
611 	    keylen == 0)
612 		return ERR_PTR(-EINVAL);
613 
614 	net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
615 
616 	data = kmalloc(sizeof(*data), GFP_KERNEL);
617 	if (!data)
618 		return ERR_PTR(-ENOMEM);
619 
620 	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
621 		data->root[i] = RB_ROOT;
622 
623 	data->keylen = keylen / sizeof(u32);
624 	data->net = net;
625 	INIT_WORK(&data->gc_work, tree_gc_worker);
626 
627 	return data;
628 }
629 EXPORT_SYMBOL_GPL(nf_conncount_init);
630 
631 void nf_conncount_cache_free(struct nf_conncount_list *list)
632 {
633 	struct nf_conncount_tuple *conn, *conn_n;
634 
635 	list_for_each_entry_safe(conn, conn_n, &list->head, node)
636 		kmem_cache_free(conncount_conn_cachep, conn);
637 }
638 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
639 
640 static void destroy_tree(struct rb_root *r)
641 {
642 	struct nf_conncount_rb *rbconn;
643 	struct rb_node *node;
644 
645 	while ((node = rb_first(r)) != NULL) {
646 		rbconn = rb_entry(node, struct nf_conncount_rb, node);
647 
648 		rb_erase(node, r);
649 
650 		nf_conncount_cache_free(&rbconn->list);
651 
652 		kmem_cache_free(conncount_rb_cachep, rbconn);
653 	}
654 }
655 
656 void nf_conncount_destroy(struct net *net, struct nf_conncount_data *data)
657 {
658 	unsigned int i;
659 
660 	cancel_work_sync(&data->gc_work);
661 
662 	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
663 		destroy_tree(&data->root[i]);
664 
665 	kfree(data);
666 }
667 EXPORT_SYMBOL_GPL(nf_conncount_destroy);
668 
669 static int __init nf_conncount_modinit(void)
670 {
671 	int i;
672 
673 	for (i = 0; i < CONNCOUNT_SLOTS; ++i)
674 		spin_lock_init(&nf_conncount_locks[i]);
675 
676 	conncount_conn_cachep = KMEM_CACHE(nf_conncount_tuple, 0);
677 	if (!conncount_conn_cachep)
678 		return -ENOMEM;
679 
680 	conncount_rb_cachep = KMEM_CACHE(nf_conncount_rb, 0);
681 	if (!conncount_rb_cachep) {
682 		kmem_cache_destroy(conncount_conn_cachep);
683 		return -ENOMEM;
684 	}
685 
686 	return 0;
687 }
688 
689 static void __exit nf_conncount_modexit(void)
690 {
691 	kmem_cache_destroy(conncount_conn_cachep);
692 	kmem_cache_destroy(conncount_rb_cachep);
693 }
694 
695 module_init(nf_conncount_modinit);
696 module_exit(nf_conncount_modexit);
697 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
698 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
699 MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
700 MODULE_LICENSE("GPL");
701