xref: /freebsd/sys/netgraph/netflow/netflow.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 2010-2011 Alexander V. Chernikov <melifaro@ipfw.ru>
3  * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
4  * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
29  */
30 
31 static const char rcs_id[] =
32     "@(#) $FreeBSD$";
33 
34 #include "opt_inet6.h"
35 #include "opt_route.h"
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/mbuf.h>
40 #include <sys/syslog.h>
41 #include <sys/systm.h>
42 #include <sys/socket.h>
43 #include <sys/endian.h>
44 
45 #include <machine/atomic.h>
46 #include <machine/stdarg.h>
47 
48 #include <net/if.h>
49 #include <net/route.h>
50 #include <net/ethernet.h>
51 #include <netinet/in.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/ip.h>
54 #include <netinet/ip6.h>
55 #include <netinet/tcp.h>
56 #include <netinet/udp.h>
57 
58 #include <netgraph/ng_message.h>
59 #include <netgraph/netgraph.h>
60 
61 #include <netgraph/netflow/netflow.h>
62 #include <netgraph/netflow/netflow_v9.h>
63 #include <netgraph/netflow/ng_netflow.h>
64 
65 #define	NBUCKETS	(65536)		/* must be power of 2 */
66 
67 /* This hash is for TCP or UDP packets. */
68 #define FULL_HASH(addr1, addr2, port1, port2)	\
69 	(((addr1 ^ (addr1 >> 16) ^ 		\
70 	htons(addr2 ^ (addr2 >> 16))) ^ 	\
71 	port1 ^ htons(port2)) &			\
72 	(NBUCKETS - 1))
73 
74 /* This hash is for all other IP packets. */
75 #define ADDR_HASH(addr1, addr2)			\
76 	((addr1 ^ (addr1 >> 16) ^ 		\
77 	htons(addr2 ^ (addr2 >> 16))) &		\
78 	(NBUCKETS - 1))
79 
80 /* Macros to shorten logical constructions */
81 /* XXX: priv must exist in namespace */
82 #define	INACTIVE(fle)	(time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
83 #define	AGED(fle)	(time_uptime - fle->f.first > priv->info.nfinfo_act_t)
84 #define	ISFREE(fle)	(fle->f.packets == 0)
85 
86 /*
87  * 4 is a magical number: statistically number of 4-packet flows is
88  * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
89  * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
90  * of reachable host and 4-packet otherwise.
91  */
92 #define	SMALL(fle)	(fle->f.packets <= 4)
93 
94 
95 MALLOC_DECLARE(M_NETFLOW_HASH);
96 MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
97 
98 static int export_add(item_p, struct flow_entry *);
99 static int export_send(priv_p, fib_export_p, item_p, int);
100 
101 static int hash_insert(priv_p, struct flow_hash_entry *, struct flow_rec *, int, uint8_t);
102 #ifdef INET6
103 static int hash6_insert(priv_p, struct flow6_hash_entry *, struct flow6_rec *, int, uint8_t);
104 #endif
105 
106 static __inline void expire_flow(priv_p, fib_export_p, struct flow_entry *, int);
107 
108 /*
109  * Generate hash for a given flow record.
110  *
111  * FIB is not used here, because:
112  * most VRFS will carry public IPv4 addresses which are unique even
113  * without FIB private addresses can overlap, but this is worked out
114  * via flow_rec bcmp() containing fib id. In IPv6 world addresses are
115  * all globally unique (it's not fully true, there is FC00::/7 for example,
116  * but chances of address overlap are MUCH smaller)
117  */
118 static __inline uint32_t
119 ip_hash(struct flow_rec *r)
120 {
121 	switch (r->r_ip_p) {
122 	case IPPROTO_TCP:
123 	case IPPROTO_UDP:
124 		return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
125 		    r->r_sport, r->r_dport);
126 	default:
127 		return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
128 	}
129 }
130 
131 #ifdef INET6
132 /* Generate hash for a given flow6 record. Use lower 4 octets from v6 addresses */
133 static __inline uint32_t
134 ip6_hash(struct flow6_rec *r)
135 {
136 	switch (r->r_ip_p) {
137 	case IPPROTO_TCP:
138 	case IPPROTO_UDP:
139 		return FULL_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
140 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3], r->r_sport,
141 		    r->r_dport);
142 	default:
143 		return ADDR_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
144 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3]);
145  	}
146 }
147 #endif
148 
149 /* This is callback from uma(9), called on alloc. */
150 static int
151 uma_ctor_flow(void *mem, int size, void *arg, int how)
152 {
153 	priv_p priv = (priv_p )arg;
154 
155 	if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
156 		return (ENOMEM);
157 
158 	atomic_add_32(&priv->info.nfinfo_used, 1);
159 
160 	return (0);
161 }
162 
163 /* This is callback from uma(9), called on free. */
164 static void
165 uma_dtor_flow(void *mem, int size, void *arg)
166 {
167 	priv_p priv = (priv_p )arg;
168 
169 	atomic_subtract_32(&priv->info.nfinfo_used, 1);
170 }
171 
172 #ifdef INET6
173 /* This is callback from uma(9), called on alloc. */
174 static int
175 uma_ctor_flow6(void *mem, int size, void *arg, int how)
176 {
177 	priv_p priv = (priv_p )arg;
178 
179 	if (atomic_load_acq_32(&priv->info.nfinfo_used6) >= CACHESIZE)
180 		return (ENOMEM);
181 
182 	atomic_add_32(&priv->info.nfinfo_used6, 1);
183 
184 	return (0);
185 }
186 
187 /* This is callback from uma(9), called on free. */
188 static void
189 uma_dtor_flow6(void *mem, int size, void *arg)
190 {
191 	priv_p priv = (priv_p )arg;
192 
193 	atomic_subtract_32(&priv->info.nfinfo_used6, 1);
194 }
195 #endif
196 
197 /*
198  * Detach export datagram from priv, if there is any.
199  * If there is no, allocate a new one.
200  */
201 static item_p
202 get_export_dgram(priv_p priv, fib_export_p fe)
203 {
204 	item_p	item = NULL;
205 
206 	mtx_lock(&fe->export_mtx);
207 	if (fe->exp.item != NULL) {
208 		item = fe->exp.item;
209 		fe->exp.item = NULL;
210 	}
211 	mtx_unlock(&fe->export_mtx);
212 
213 	if (item == NULL) {
214 		struct netflow_v5_export_dgram *dgram;
215 		struct mbuf *m;
216 
217 		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
218 		if (m == NULL)
219 			return (NULL);
220 		item = ng_package_data(m, NG_NOFLAGS);
221 		if (item == NULL)
222 			return (NULL);
223 		dgram = mtod(m, struct netflow_v5_export_dgram *);
224 		dgram->header.count = 0;
225 		dgram->header.version = htons(NETFLOW_V5);
226 		dgram->header.pad = 0;
227 
228 	}
229 
230 	return (item);
231 }
232 
233 /*
234  * Re-attach incomplete datagram back to priv.
235  * If there is already another one, then send incomplete. */
236 static void
237 return_export_dgram(priv_p priv, fib_export_p fe, item_p item, int flags)
238 {
239 	/*
240 	 * It may happen on SMP, that some thread has already
241 	 * put its item there, in this case we bail out and
242 	 * send what we have to collector.
243 	 */
244 	mtx_lock(&fe->export_mtx);
245 	if (fe->exp.item == NULL) {
246 		fe->exp.item = item;
247 		mtx_unlock(&fe->export_mtx);
248 	} else {
249 		mtx_unlock(&fe->export_mtx);
250 		export_send(priv, fe, item, flags);
251 	}
252 }
253 
254 /*
255  * The flow is over. Call export_add() and free it. If datagram is
256  * full, then call export_send().
257  */
258 static __inline void
259 expire_flow(priv_p priv, fib_export_p fe, struct flow_entry *fle, int flags)
260 {
261 	struct netflow_export_item exp;
262 	uint16_t version = fle->f.version;
263 
264 	if ((priv->export != NULL) && (version == IPVERSION)) {
265 		exp.item = get_export_dgram(priv, fe);
266 		if (exp.item == NULL) {
267 			atomic_add_32(&priv->info.nfinfo_export_failed, 1);
268 			if (priv->export9 != NULL)
269 				atomic_add_32(&priv->info.nfinfo_export9_failed, 1);
270 			/* fle definitely contains IPv4 flow */
271 			uma_zfree_arg(priv->zone, fle, priv);
272 			return;
273 		}
274 
275 		if (export_add(exp.item, fle) > 0)
276 			export_send(priv, fe, exp.item, flags);
277 		else
278 			return_export_dgram(priv, fe, exp.item, NG_QUEUE);
279 	}
280 
281 	if (priv->export9 != NULL) {
282 		exp.item9 = get_export9_dgram(priv, fe, &exp.item9_opt);
283 		if (exp.item9 == NULL) {
284 			atomic_add_32(&priv->info.nfinfo_export9_failed, 1);
285 			if (version == IPVERSION)
286 				uma_zfree_arg(priv->zone, fle, priv);
287 #ifdef INET6
288 			else if (version == IP6VERSION)
289 				uma_zfree_arg(priv->zone6, fle, priv);
290 #endif
291 			else
292 				panic("ng_netflow: Unknown IP proto: %d", version);
293 			return;
294 		}
295 
296 		if (export9_add(exp.item9, exp.item9_opt, fle) > 0)
297 			export9_send(priv, fe, exp.item9, exp.item9_opt, flags);
298 		else
299 			return_export9_dgram(priv, fe, exp.item9, exp.item9_opt, NG_QUEUE);
300 	}
301 
302 	if (version == IPVERSION)
303 		uma_zfree_arg(priv->zone, fle, priv);
304 #ifdef INET6
305 	else if (version == IP6VERSION)
306 		uma_zfree_arg(priv->zone6, fle, priv);
307 #endif
308 }
309 
310 /* Get a snapshot of node statistics */
311 void
312 ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
313 {
314 	/* XXX: atomic */
315 	memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
316 }
317 
318 /*
319  * Insert a record into defined slot.
320  *
321  * First we get for us a free flow entry, then fill in all
322  * possible fields in it.
323  *
324  * TODO: consider dropping hash mutex while filling in datagram,
325  * as this was done in previous version. Need to test & profile
326  * to be sure.
327  */
328 static __inline int
329 hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r,
330 	int plen, uint8_t tcp_flags)
331 {
332 	struct flow_entry *fle;
333 	struct sockaddr_in sin;
334 	struct rtentry *rt;
335 
336 	mtx_assert(&hsh->mtx, MA_OWNED);
337 
338 	fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
339 	if (fle == NULL) {
340 		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
341 		return (ENOMEM);
342 	}
343 
344 	/*
345 	 * Now fle is totally ours. It is detached from all lists,
346 	 * we can safely edit it.
347 	 */
348 
349 	fle->f.version = IPVERSION;
350 	bcopy(r, &fle->f.r, sizeof(struct flow_rec));
351 	fle->f.bytes = plen;
352 	fle->f.packets = 1;
353 	fle->f.tcp_flags = tcp_flags;
354 
355 	fle->f.first = fle->f.last = time_uptime;
356 
357 	/*
358 	 * First we do route table lookup on destination address. So we can
359 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
360 	 */
361 	bzero(&sin, sizeof(sin));
362 	sin.sin_len = sizeof(struct sockaddr_in);
363 	sin.sin_family = AF_INET;
364 	sin.sin_addr = fle->f.r.r_dst;
365 	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, r->fib);
366 	if (rt != NULL) {
367 		fle->f.fle_o_ifx = rt->rt_ifp->if_index;
368 
369 		if (rt->rt_flags & RTF_GATEWAY &&
370 		    rt->rt_gateway->sa_family == AF_INET)
371 			fle->f.next_hop =
372 			    ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
373 
374 		if (rt_mask(rt))
375 			fle->f.dst_mask = bitcount32(((struct sockaddr_in *)
376 			    rt_mask(rt))->sin_addr.s_addr);
377 		else if (rt->rt_flags & RTF_HOST)
378 			/* Give up. We can't determine mask :( */
379 			fle->f.dst_mask = 32;
380 
381 		RTFREE_LOCKED(rt);
382 	}
383 
384 	/* Do route lookup on source address, to fill in src_mask. */
385 	bzero(&sin, sizeof(sin));
386 	sin.sin_len = sizeof(struct sockaddr_in);
387 	sin.sin_family = AF_INET;
388 	sin.sin_addr = fle->f.r.r_src;
389 	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, r->fib);
390 	if (rt != NULL) {
391 		if (rt_mask(rt))
392 			fle->f.src_mask = bitcount32(((struct sockaddr_in *)
393 			    rt_mask(rt))->sin_addr.s_addr);
394 		else if (rt->rt_flags & RTF_HOST)
395 			/* Give up. We can't determine mask :( */
396 			fle->f.src_mask = 32;
397 
398 		RTFREE_LOCKED(rt);
399 	}
400 
401 	/* Push new flow at the and of hash. */
402 	TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
403 
404 	return (0);
405 }
406 
407 #ifdef INET6
408 /* XXX: make normal function, instead of.. */
409 #define ipv6_masklen(x)		bitcount32((x).__u6_addr.__u6_addr32[0]) + \
410 				bitcount32((x).__u6_addr.__u6_addr32[1]) + \
411 				bitcount32((x).__u6_addr.__u6_addr32[2]) + \
412 				bitcount32((x).__u6_addr.__u6_addr32[3])
413 /* XXX: Do we need inline here ? */
414 static __inline int
415 hash6_insert(priv_p priv, struct flow6_hash_entry *hsh6, struct flow6_rec *r,
416 	int plen, uint8_t tcp_flags)
417 {
418 	struct flow6_entry *fle6;
419 	struct sockaddr_in6 *src, *dst;
420 	struct rtentry *rt;
421 	struct route_in6 rin6;
422 
423 	mtx_assert(&hsh6->mtx, MA_OWNED);
424 
425 	fle6 = uma_zalloc_arg(priv->zone6, priv, M_NOWAIT);
426 	if (fle6 == NULL) {
427 		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
428 		return (ENOMEM);
429 	}
430 
431 	/*
432 	 * Now fle is totally ours. It is detached from all lists,
433 	 * we can safely edit it.
434 	 */
435 
436 	fle6->f.version = IP6VERSION;
437 	bcopy(r, &fle6->f.r, sizeof(struct flow6_rec));
438 	fle6->f.bytes = plen;
439 	fle6->f.packets = 1;
440 	fle6->f.tcp_flags = tcp_flags;
441 
442 	fle6->f.first = fle6->f.last = time_uptime;
443 
444 	/*
445 	 * First we do route table lookup on destination address. So we can
446 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
447 	 */
448 	bzero(&rin6, sizeof(struct route_in6));
449 	dst = (struct sockaddr_in6 *)&rin6.ro_dst;
450 	dst->sin6_len = sizeof(struct sockaddr_in6);
451 	dst->sin6_family = AF_INET6;
452 	dst->sin6_addr = r->dst.r_dst6;
453 
454 	rin6.ro_rt = rtalloc1_fib((struct sockaddr *)dst, 0, 0, r->fib);
455 
456 	if (rin6.ro_rt != NULL) {
457 		rt = rin6.ro_rt;
458 		fle6->f.fle_o_ifx = rt->rt_ifp->if_index;
459 
460 		if (rt->rt_flags & RTF_GATEWAY &&
461 		    rt->rt_gateway->sa_family == AF_INET6)
462 			fle6->f.n.next_hop6 =
463 			    ((struct sockaddr_in6 *)(rt->rt_gateway))->sin6_addr;
464 
465 		if (rt_mask(rt))
466 			fle6->f.dst_mask = ipv6_masklen(((struct sockaddr_in6 *)rt_mask(rt))->sin6_addr);
467 		else
468 			fle6->f.dst_mask = 128;
469 
470 		RTFREE_LOCKED(rt);
471 	}
472 
473 	/* Do route lookup on source address, to fill in src_mask. */
474 	bzero(&rin6, sizeof(struct route_in6));
475 	src = (struct sockaddr_in6 *)&rin6.ro_dst;
476 	src->sin6_len = sizeof(struct sockaddr_in6);
477 	src->sin6_family = AF_INET6;
478 	src->sin6_addr = r->src.r_src6;
479 
480 	rin6.ro_rt = rtalloc1_fib((struct sockaddr *)src, 0, 0, r->fib);
481 
482 	if (rin6.ro_rt != NULL) {
483 		rt = rin6.ro_rt;
484 
485 		if (rt_mask(rt))
486 			fle6->f.src_mask = ipv6_masklen(((struct sockaddr_in6 *)rt_mask(rt))->sin6_addr);
487 		else
488 			fle6->f.src_mask = 128;
489 
490 		RTFREE_LOCKED(rt);
491 	}
492 
493 	/* Push new flow at the and of hash. */
494 	TAILQ_INSERT_TAIL(&hsh6->head, fle6, fle6_hash);
495 
496 	return (0);
497 }
498 #endif
499 
500 
501 /*
502  * Non-static functions called from ng_netflow.c
503  */
504 
505 /* Allocate memory and set up flow cache */
506 void
507 ng_netflow_cache_init(priv_p priv)
508 {
509 	struct flow_hash_entry *hsh;
510 #ifdef INET6
511 	struct flow6_hash_entry *hsh6;
512 #endif
513 	int i;
514 
515 	/* Initialize cache UMA zone. */
516 	priv->zone = uma_zcreate("NetFlow IPv4 cache", sizeof(struct flow_entry),
517 	    uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
518 	uma_zone_set_max(priv->zone, CACHESIZE);
519 #ifdef INET6
520 	priv->zone6 = uma_zcreate("NetFlow IPv6 cache", sizeof(struct flow6_entry),
521 	    uma_ctor_flow6, uma_dtor_flow6, NULL, NULL, UMA_ALIGN_CACHE, 0);
522 	uma_zone_set_max(priv->zone6, CACHESIZE);
523 #endif
524 
525 	/* Allocate hash. */
526 	priv->hash = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
527 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
528 
529 	/* Initialize hash. */
530 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
531 		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
532 		TAILQ_INIT(&hsh->head);
533 	}
534 
535 #ifdef INET6
536 	/* Allocate hash. */
537 	priv->hash6 = malloc(NBUCKETS * sizeof(struct flow6_hash_entry),
538 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
539 
540 	/* Initialize hash. */
541 	for (i = 0, hsh6 = priv->hash6; i < NBUCKETS; i++, hsh6++) {
542 		mtx_init(&hsh6->mtx, "hash mutex", NULL, MTX_DEF);
543 		TAILQ_INIT(&hsh6->head);
544 	}
545 #endif
546 
547 	ng_netflow_v9_cache_init(priv);
548 	CTR0(KTR_NET, "ng_netflow startup()");
549 }
550 
551 /* Initialize new FIB table for v5 and v9 */
552 int
553 ng_netflow_fib_init(priv_p priv, int fib)
554 {
555 	fib_export_p	fe = priv_to_fib(priv, fib);
556 
557 	CTR1(KTR_NET, "ng_netflow(): fib init: %d", fib);
558 
559 	if (fe != NULL)
560 		return (0);
561 
562 	if ((fe = malloc(sizeof(struct fib_export), M_NETGRAPH, M_NOWAIT | M_ZERO)) == NULL)
563 		return (1);
564 
565 	mtx_init(&fe->export_mtx, "export dgram lock", NULL, MTX_DEF);
566 	mtx_init(&fe->export9_mtx, "export9 dgram lock", NULL, MTX_DEF);
567 	fe->fib = fib;
568 	fe->domain_id = fib;
569 
570 	if (atomic_cmpset_ptr((volatile uintptr_t *)&priv->fib_data[fib], (uintptr_t)NULL, (uintptr_t)fe) == 0) {
571 		/* FIB already set up by other ISR */
572 		CTR3(KTR_NET, "ng_netflow(): fib init: %d setup %p but got %p", fib, fe, priv_to_fib(priv, fib));
573 		mtx_destroy(&fe->export_mtx);
574 		mtx_destroy(&fe->export9_mtx);
575 		free(fe, M_NETGRAPH);
576 	} else {
577 		/* Increase counter for statistics */
578 		CTR3(KTR_NET, "ng_netflow(): fib %d setup to %p (%p)", fib, fe, priv_to_fib(priv, fib));
579 		atomic_fetchadd_32(&priv->info.nfinfo_alloc_fibs, 1);
580 	}
581 
582 	return (0);
583 }
584 
585 /* Free all flow cache memory. Called from node close method. */
586 void
587 ng_netflow_cache_flush(priv_p priv)
588 {
589 	struct flow_entry	*fle, *fle1;
590 	struct flow_hash_entry	*hsh;
591 #ifdef INET6
592 	struct flow6_entry	*fle6, *fle61;
593 	struct flow6_hash_entry	*hsh6;
594 #endif
595 	struct netflow_export_item exp;
596 	fib_export_p fe;
597 	int i;
598 
599 	bzero(&exp, sizeof(exp));
600 
601 	/*
602 	 * We are going to free probably billable data.
603 	 * Expire everything before freeing it.
604 	 * No locking is required since callout is already drained.
605 	 */
606 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
607 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
608 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
609 			fe = priv_to_fib(priv, fle->f.r.fib);
610 			expire_flow(priv, fe, fle, NG_QUEUE);
611 		}
612 #ifdef INET6
613 	for (hsh6 = priv->hash6, i = 0; i < NBUCKETS; hsh6++, i++)
614 		TAILQ_FOREACH_SAFE(fle6, &hsh6->head, fle6_hash, fle61) {
615 			TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
616 			fe = priv_to_fib(priv, fle6->f.r.fib);
617 			expire_flow(priv, fe, (struct flow_entry *)fle6, NG_QUEUE);
618 		}
619 #endif
620 
621 	uma_zdestroy(priv->zone);
622 	/* Destroy hash mutexes. */
623 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
624 		mtx_destroy(&hsh->mtx);
625 
626 	/* Free hash memory. */
627 	if (priv->hash != NULL)
628 		free(priv->hash, M_NETFLOW_HASH);
629 #ifdef INET6
630 	uma_zdestroy(priv->zone6);
631 	/* Destroy hash mutexes. */
632 	for (i = 0, hsh6 = priv->hash6; i < NBUCKETS; i++, hsh6++)
633 		mtx_destroy(&hsh6->mtx);
634 
635 	/* Free hash memory. */
636 	if (priv->hash6 != NULL)
637 		free(priv->hash6, M_NETFLOW_HASH);
638 #endif
639 
640 	for (i = 0; i < RT_NUMFIBS; i++) {
641 		if ((fe = priv_to_fib(priv, i)) == NULL)
642 			continue;
643 
644 		if (fe->exp.item != NULL)
645 			export_send(priv, fe, fe->exp.item, NG_QUEUE);
646 
647 		if (fe->exp.item9 != NULL)
648 			export9_send(priv, fe, fe->exp.item9, fe->exp.item9_opt, NG_QUEUE);
649 
650 		mtx_destroy(&fe->export_mtx);
651 		mtx_destroy(&fe->export9_mtx);
652 		free(fe, M_NETGRAPH);
653 	}
654 
655 	ng_netflow_v9_cache_flush(priv);
656 }
657 
658 /* Insert packet from into flow cache. */
659 int
660 ng_netflow_flow_add(priv_p priv, fib_export_p fe, struct ip *ip, caddr_t upper_ptr, uint8_t upper_proto,
661 		uint8_t is_frag, unsigned int src_if_index)
662 {
663 	register struct flow_entry	*fle, *fle1;
664 	struct flow_hash_entry	*hsh;
665 	struct flow_rec		r;
666 	int			hlen, plen;
667 	int			error = 0;
668 	uint8_t			tcp_flags = 0;
669 	uint16_t		eproto;
670 
671 	/* Try to fill flow_rec r */
672 	bzero(&r, sizeof(r));
673 	/* check version */
674 	if (ip->ip_v != IPVERSION)
675 		return (EINVAL);
676 
677 	/* verify min header length */
678 	hlen = ip->ip_hl << 2;
679 
680 	if (hlen < sizeof(struct ip))
681 		return (EINVAL);
682 
683 	eproto = ETHERTYPE_IP;
684 	/* Assume L4 template by default */
685 	r.flow_type = NETFLOW_V9_FLOW_V4_L4;
686 
687 	r.r_src = ip->ip_src;
688 	r.r_dst = ip->ip_dst;
689 	r.fib = fe->fib;
690 
691 	/* save packet length */
692 	plen = ntohs(ip->ip_len);
693 
694 	r.r_ip_p = ip->ip_p;
695 	r.r_tos = ip->ip_tos;
696 
697 	r.r_i_ifx = src_if_index;
698 
699 	/*
700 	 * XXX NOTE: only first fragment of fragmented TCP, UDP and
701 	 * ICMP packet will be recorded with proper s_port and d_port.
702 	 * Following fragments will be recorded simply as IP packet with
703 	 * ip_proto = ip->ip_p and s_port, d_port set to zero.
704 	 * I know, it looks like bug. But I don't want to re-implement
705 	 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
706 	 * and nobody complains yet :)
707 	 */
708 	if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
709 		switch(r.r_ip_p) {
710 		case IPPROTO_TCP:
711 		{
712 			register struct tcphdr *tcp;
713 
714 			tcp = (struct tcphdr *)((caddr_t )ip + hlen);
715 			r.r_sport = tcp->th_sport;
716 			r.r_dport = tcp->th_dport;
717 			tcp_flags = tcp->th_flags;
718 			break;
719 		}
720 			case IPPROTO_UDP:
721 			r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
722 			break;
723 		}
724 
725 	atomic_fetchadd_32(&priv->info.nfinfo_packets, 1);
726 	/* XXX: atomic */
727 	priv->info.nfinfo_bytes += plen;
728 
729 	/* Find hash slot. */
730 	hsh = &priv->hash[ip_hash(&r)];
731 
732 	mtx_lock(&hsh->mtx);
733 
734 	/*
735 	 * Go through hash and find our entry. If we encounter an
736 	 * entry, that should be expired, purge it. We do a reverse
737 	 * search since most active entries are first, and most
738 	 * searches are done on most active entries.
739 	 */
740 	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
741 		if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
742 			break;
743 		if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
744 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
745 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, NG_QUEUE);
746 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
747 		}
748 	}
749 
750 	if (fle) {			/* An existent entry. */
751 
752 		fle->f.bytes += plen;
753 		fle->f.packets ++;
754 		fle->f.tcp_flags |= tcp_flags;
755 		fle->f.last = time_uptime;
756 
757 		/*
758 		 * We have the following reasons to expire flow in active way:
759 		 * - it hit active timeout
760 		 * - a TCP connection closed
761 		 * - it is going to overflow counter
762 		 */
763 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
764 		    (fle->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
765 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
766 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, NG_QUEUE);
767 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
768 		} else {
769 			/*
770 			 * It is the newest, move it to the tail,
771 			 * if it isn't there already. Next search will
772 			 * locate it quicker.
773 			 */
774 			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
775 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
776 				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
777 			}
778 		}
779 	} else				/* A new flow entry. */
780 		error = hash_insert(priv, hsh, &r, plen, tcp_flags);
781 
782 	mtx_unlock(&hsh->mtx);
783 
784 	return (error);
785 }
786 
787 #ifdef INET6
788 /* Insert IPv6 packet from into flow cache. */
789 int
790 ng_netflow_flow6_add(priv_p priv, fib_export_p fe, struct ip6_hdr *ip6, caddr_t upper_ptr, uint8_t upper_proto,
791 		uint8_t is_frag, unsigned int src_if_index)
792 {
793 	register struct flow6_entry	*fle6 = NULL, *fle61;
794 	struct flow6_hash_entry		*hsh6;
795 	struct flow6_rec		r;
796 	int			plen;
797 	int			error = 0;
798 	uint8_t			tcp_flags = 0;
799 
800 	/* check version */
801 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION)
802 		return (EINVAL);
803 
804 	bzero(&r, sizeof(r));
805 
806 	r.src.r_src6 = ip6->ip6_src;
807 	r.dst.r_dst6 = ip6->ip6_dst;
808 	r.fib = fe->fib;
809 
810 	/* Assume L4 template by default */
811 	r.flow_type = NETFLOW_V9_FLOW_V6_L4;
812 
813 	/* save packet length */
814 	plen = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr);
815 
816 	/* XXX: set DSCP/CoS value */
817 #if 0
818 	r.r_tos = ip->ip_tos;
819 #endif
820 	if (is_frag == 0) {
821 		switch(upper_proto) {
822 		case IPPROTO_TCP:
823 		{
824 			register struct tcphdr *tcp;
825 
826 			tcp = (struct tcphdr *)upper_ptr;
827 			r.r_ports = *(uint32_t *)upper_ptr;
828 			tcp_flags = tcp->th_flags;
829 			break;
830 		}
831  		case IPPROTO_UDP:
832 		case IPPROTO_SCTP:
833 		{
834 			r.r_ports = *(uint32_t *)upper_ptr;
835 			break;
836 		}
837 
838 		}
839 	}
840 
841 	r.r_ip_p = upper_proto;
842 	r.r_i_ifx = src_if_index;
843 
844 	atomic_fetchadd_32(&priv->info.nfinfo_packets6, 1);
845 	/* XXX: atomic */
846 	priv->info.nfinfo_bytes6 += plen;
847 
848 	/* Find hash slot. */
849 	hsh6 = &priv->hash6[ip6_hash(&r)];
850 
851 	mtx_lock(&hsh6->mtx);
852 
853 	/*
854 	 * Go through hash and find our entry. If we encounter an
855 	 * entry, that should be expired, purge it. We do a reverse
856 	 * search since most active entries are first, and most
857 	 * searches are done on most active entries.
858 	 */
859 	TAILQ_FOREACH_REVERSE_SAFE(fle6, &hsh6->head, f6head, fle6_hash, fle61) {
860 		if (fle6->f.version != IP6VERSION)
861 			continue;
862 		if (bcmp(&r, &fle6->f.r, sizeof(struct flow6_rec)) == 0)
863 			break;
864 		if ((INACTIVE(fle6) && SMALL(fle6)) || AGED(fle6)) {
865 			TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
866 			expire_flow(priv, priv_to_fib(priv, fle6->f.r.fib), (struct flow_entry *)fle6, NG_QUEUE);
867 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
868 		}
869 	}
870 
871 	if (fle6 != NULL) {			/* An existent entry. */
872 
873 		fle6->f.bytes += plen;
874 		fle6->f.packets ++;
875 		fle6->f.tcp_flags |= tcp_flags;
876 		fle6->f.last = time_uptime;
877 
878 		/*
879 		 * We have the following reasons to expire flow in active way:
880 		 * - it hit active timeout
881 		 * - a TCP connection closed
882 		 * - it is going to overflow counter
883 		 */
884 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle6) ||
885 		    (fle6->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
886 			TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
887 			expire_flow(priv, priv_to_fib(priv, fle6->f.r.fib), (struct flow_entry *)fle6, NG_QUEUE);
888 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
889 		} else {
890 			/*
891 			 * It is the newest, move it to the tail,
892 			 * if it isn't there already. Next search will
893 			 * locate it quicker.
894 			 */
895 			if (fle6 != TAILQ_LAST(&hsh6->head, f6head)) {
896 				TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
897 				TAILQ_INSERT_TAIL(&hsh6->head, fle6, fle6_hash);
898 			}
899 		}
900 	} else				/* A new flow entry. */
901 		error = hash6_insert(priv, hsh6, &r, plen, tcp_flags);
902 
903 	mtx_unlock(&hsh6->mtx);
904 
905 	return (error);
906 }
907 #endif
908 
909 /*
910  * Return records from cache to userland.
911  *
912  * TODO: matching particular IP should be done in kernel, here.
913  * XXX: IPv6 flows will return random data
914  */
915 int
916 ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
917 {
918 	struct flow_hash_entry	*hsh;
919 	struct flow_entry	*fle;
920 	struct ngnf_flows	*data;
921 	int	i;
922 
923 	data = (struct ngnf_flows *)resp->data;
924 	data->last = 0;
925 	data->nentries = 0;
926 
927 	/* Check if this is a first run */
928 	if (last == 0) {
929 		hsh = priv->hash;
930 		i = 0;
931 	} else {
932 		if (last > NBUCKETS-1)
933 			return (EINVAL);
934 		hsh = priv->hash + last;
935 		i = last;
936 	}
937 
938 	/*
939 	 * We will transfer not more than NREC_AT_ONCE. More data
940 	 * will come in next message.
941 	 * We send current hash index to userland, and userland should
942 	 * return it back to us. Then, we will restart with new entry.
943 	 *
944 	 * The resulting cache snapshot is inaccurate for the
945 	 * following reasons:
946 	 *  - we skip locked hash entries
947 	 *  - we bail out, if someone wants our entry
948 	 *  - we skip rest of entry, when hit NREC_AT_ONCE
949 	 */
950 	for (; i < NBUCKETS; hsh++, i++) {
951 		if (mtx_trylock(&hsh->mtx) == 0)
952 			continue;
953 
954 		TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
955 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
956 				break;
957 
958 			bcopy(&fle->f, &(data->entries[data->nentries]),
959 			    sizeof(fle->f));
960 			data->nentries++;
961 			if (data->nentries == NREC_AT_ONCE) {
962 				mtx_unlock(&hsh->mtx);
963 				if (++i < NBUCKETS)
964 					data->last = i;
965 				return (0);
966 			}
967 		}
968 		mtx_unlock(&hsh->mtx);
969 	}
970 
971 	return (0);
972 }
973 
974 /* We have full datagram in privdata. Send it to export hook. */
975 static int
976 export_send(priv_p priv, fib_export_p fe, item_p item, int flags)
977 {
978 	struct mbuf *m = NGI_M(item);
979 	struct netflow_v5_export_dgram *dgram = mtod(m,
980 					struct netflow_v5_export_dgram *);
981 	struct netflow_v5_header *header = &dgram->header;
982 	struct timespec ts;
983 	int error = 0;
984 
985 	/* Fill mbuf header. */
986 	m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
987 	   header->count + sizeof(struct netflow_v5_header);
988 
989 	/* Fill export header. */
990 	header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
991 	getnanotime(&ts);
992 	header->unix_secs  = htonl(ts.tv_sec);
993 	header->unix_nsecs = htonl(ts.tv_nsec);
994 	header->engine_type = 0;
995 	header->engine_id = fe->domain_id;
996 	header->pad = 0;
997 	header->flow_seq = htonl(atomic_fetchadd_32(&fe->flow_seq,
998 	    header->count));
999 	header->count = htons(header->count);
1000 
1001 	if (priv->export != NULL)
1002 		NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
1003 	else
1004 		NG_FREE_ITEM(item);
1005 
1006 	return (error);
1007 }
1008 
1009 
1010 /* Add export record to dgram. */
1011 static int
1012 export_add(item_p item, struct flow_entry *fle)
1013 {
1014 	struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
1015 					struct netflow_v5_export_dgram *);
1016 	struct netflow_v5_header *header = &dgram->header;
1017 	struct netflow_v5_record *rec;
1018 
1019 	rec = &dgram->r[header->count];
1020 	header->count ++;
1021 
1022 	KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
1023 	    ("ng_netflow: export too big"));
1024 
1025 	/* Fill in export record. */
1026 	rec->src_addr = fle->f.r.r_src.s_addr;
1027 	rec->dst_addr = fle->f.r.r_dst.s_addr;
1028 	rec->next_hop = fle->f.next_hop.s_addr;
1029 	rec->i_ifx    = htons(fle->f.fle_i_ifx);
1030 	rec->o_ifx    = htons(fle->f.fle_o_ifx);
1031 	rec->packets  = htonl(fle->f.packets);
1032 	rec->octets   = htonl(fle->f.bytes);
1033 	rec->first    = htonl(MILLIUPTIME(fle->f.first));
1034 	rec->last     = htonl(MILLIUPTIME(fle->f.last));
1035 	rec->s_port   = fle->f.r.r_sport;
1036 	rec->d_port   = fle->f.r.r_dport;
1037 	rec->flags    = fle->f.tcp_flags;
1038 	rec->prot     = fle->f.r.r_ip_p;
1039 	rec->tos      = fle->f.r.r_tos;
1040 	rec->dst_mask = fle->f.dst_mask;
1041 	rec->src_mask = fle->f.src_mask;
1042 	rec->pad1     = 0;
1043 	rec->pad2     = 0;
1044 
1045 	/* Not supported fields. */
1046 	rec->src_as = rec->dst_as = 0;
1047 
1048 	if (header->count == NETFLOW_V5_MAX_RECORDS)
1049 		return (1); /* end of datagram */
1050 	else
1051 		return (0);
1052 }
1053 
1054 /* Periodic flow expiry run. */
1055 void
1056 ng_netflow_expire(void *arg)
1057 {
1058 	struct flow_entry	*fle, *fle1;
1059 	struct flow_hash_entry	*hsh;
1060 #ifdef INET6
1061 	struct flow6_entry	*fle6, *fle61;
1062 	struct flow6_hash_entry	*hsh6;
1063 #endif
1064 	priv_p			priv = (priv_p )arg;
1065 	uint32_t		used;
1066 	int			i;
1067 
1068 	/*
1069 	 * Going through all the cache.
1070 	 */
1071 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
1072 		/*
1073 		 * Skip entries, that are already being worked on.
1074 		 */
1075 		if (mtx_trylock(&hsh->mtx) == 0)
1076 			continue;
1077 
1078 		used = atomic_load_acq_32(&priv->info.nfinfo_used);
1079 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
1080 			/*
1081 			 * Interrupt thread wants this entry!
1082 			 * Quick! Quick! Bail out!
1083 			 */
1084 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
1085 				break;
1086 
1087 			/*
1088 			 * Don't expire aggressively while hash collision
1089 			 * ratio is predicted small.
1090 			 */
1091 			if (used <= (NBUCKETS*2) && !INACTIVE(fle))
1092 				break;
1093 
1094 			if ((INACTIVE(fle) && (SMALL(fle) ||
1095 			    (used > (NBUCKETS*2)))) || AGED(fle)) {
1096 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
1097 				expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, NG_NOFLAGS);
1098 				used--;
1099 				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
1100 			}
1101 		}
1102 		mtx_unlock(&hsh->mtx);
1103 	}
1104 
1105 #ifdef INET6
1106 	for (hsh6 = priv->hash6, i = 0; i < NBUCKETS; hsh6++, i++) {
1107 		/*
1108 		 * Skip entries, that are already being worked on.
1109 		 */
1110 		if (mtx_trylock(&hsh6->mtx) == 0)
1111 			continue;
1112 
1113 		used = atomic_load_acq_32(&priv->info.nfinfo_used6);
1114 		TAILQ_FOREACH_SAFE(fle6, &hsh6->head, fle6_hash, fle61) {
1115 			/*
1116 			 * Interrupt thread wants this entry!
1117 			 * Quick! Quick! Bail out!
1118 			 */
1119 			if (hsh6->mtx.mtx_lock & MTX_CONTESTED)
1120 				break;
1121 
1122 			/*
1123 			 * Don't expire aggressively while hash collision
1124 			 * ratio is predicted small.
1125 			 */
1126 			if (used <= (NBUCKETS*2) && !INACTIVE(fle6))
1127 				break;
1128 
1129 			if ((INACTIVE(fle6) && (SMALL(fle6) ||
1130 			    (used > (NBUCKETS*2)))) || AGED(fle6)) {
1131 				TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
1132 				expire_flow(priv, priv_to_fib(priv, fle6->f.r.fib), (struct flow_entry *)fle6, NG_NOFLAGS);
1133 				used--;
1134 				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
1135 			}
1136 		}
1137 		mtx_unlock(&hsh6->mtx);
1138 	}
1139 #endif
1140 
1141 	/* Schedule next expire. */
1142 	callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
1143 	    (void *)priv);
1144 }
1145