xref: /freebsd/sys/netgraph/netflow/netflow.c (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010-2011 Alexander V. Chernikov <melifaro@ipfw.ru>
5  * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
6  * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_route.h"
39 #include <sys/param.h>
40 #include <sys/bitstring.h>
41 #include <sys/systm.h>
42 #include <sys/counter.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/limits.h>
46 #include <sys/mbuf.h>
47 #include <sys/syslog.h>
48 #include <sys/socket.h>
49 #include <vm/uma.h>
50 
51 #include <net/if.h>
52 #include <net/if_dl.h>
53 #include <net/if_var.h>
54 #include <net/route.h>
55 #include <net/route/nhop.h>
56 #include <net/route/route_ctl.h>
57 #include <net/ethernet.h>
58 #include <netinet/in.h>
59 #include <netinet/in_fib.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet/tcp.h>
64 #include <netinet/udp.h>
65 
66 #include <netinet6/in6_fib.h>
67 
68 #include <netgraph/ng_message.h>
69 #include <netgraph/netgraph.h>
70 
71 #include <netgraph/netflow/netflow.h>
72 #include <netgraph/netflow/netflow_v9.h>
73 #include <netgraph/netflow/ng_netflow.h>
74 
75 #define	NBUCKETS	(65536)		/* must be power of 2 */
76 
77 /* This hash is for TCP or UDP packets. */
78 #define FULL_HASH(addr1, addr2, port1, port2)	\
79 	(((addr1 ^ (addr1 >> 16) ^ 		\
80 	htons(addr2 ^ (addr2 >> 16))) ^ 	\
81 	port1 ^ htons(port2)) &			\
82 	(NBUCKETS - 1))
83 
84 /* This hash is for all other IP packets. */
85 #define ADDR_HASH(addr1, addr2)			\
86 	((addr1 ^ (addr1 >> 16) ^ 		\
87 	htons(addr2 ^ (addr2 >> 16))) &		\
88 	(NBUCKETS - 1))
89 
90 /* Macros to shorten logical constructions */
91 /* XXX: priv must exist in namespace */
92 #define	INACTIVE(fle)	(time_uptime - fle->f.last > priv->nfinfo_inact_t)
93 #define	AGED(fle)	(time_uptime - fle->f.first > priv->nfinfo_act_t)
94 #define	ISFREE(fle)	(fle->f.packets == 0)
95 
96 /*
97  * 4 is a magical number: statistically number of 4-packet flows is
98  * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
99  * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
100  * of reachable host and 4-packet otherwise.
101  */
102 #define	SMALL(fle)	(fle->f.packets <= 4)
103 
104 MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
105 
106 static int export_add(item_p, struct flow_entry *);
107 static int export_send(priv_p, fib_export_p, item_p, int);
108 
109 #ifdef INET
110 static int hash_insert(priv_p, struct flow_hash_entry *, struct flow_rec *,
111     int, uint8_t, uint8_t);
112 #endif
113 #ifdef INET6
114 static int hash6_insert(priv_p, struct flow_hash_entry *, struct flow6_rec *,
115     int, uint8_t, uint8_t);
116 #endif
117 
118 static void expire_flow(priv_p, fib_export_p, struct flow_entry *, int);
119 
120 /*
121  * Generate hash for a given flow record.
122  *
123  * FIB is not used here, because:
124  * most VRFS will carry public IPv4 addresses which are unique even
125  * without FIB private addresses can overlap, but this is worked out
126  * via flow_rec bcmp() containing fib id. In IPv6 world addresses are
127  * all globally unique (it's not fully true, there is FC00::/7 for example,
128  * but chances of address overlap are MUCH smaller)
129  */
130 static inline uint32_t
131 ip_hash(struct flow_rec *r)
132 {
133 
134 	switch (r->r_ip_p) {
135 	case IPPROTO_TCP:
136 	case IPPROTO_UDP:
137 		return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
138 		    r->r_sport, r->r_dport);
139 	default:
140 		return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
141 	}
142 }
143 
144 #ifdef INET6
145 /* Generate hash for a given flow6 record. Use lower 4 octets from v6 addresses */
146 static inline uint32_t
147 ip6_hash(struct flow6_rec *r)
148 {
149 
150 	switch (r->r_ip_p) {
151 	case IPPROTO_TCP:
152 	case IPPROTO_UDP:
153 		return FULL_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
154 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3], r->r_sport,
155 		    r->r_dport);
156 	default:
157 		return ADDR_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
158 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3]);
159  	}
160 }
161 
162 #endif
163 
164 /*
165  * Detach export datagram from priv, if there is any.
166  * If there is no, allocate a new one.
167  */
168 static item_p
169 get_export_dgram(priv_p priv, fib_export_p fe)
170 {
171 	item_p	item = NULL;
172 
173 	mtx_lock(&fe->export_mtx);
174 	if (fe->exp.item != NULL) {
175 		item = fe->exp.item;
176 		fe->exp.item = NULL;
177 	}
178 	mtx_unlock(&fe->export_mtx);
179 
180 	if (item == NULL) {
181 		struct netflow_v5_export_dgram *dgram;
182 		struct mbuf *m;
183 
184 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
185 		if (m == NULL)
186 			return (NULL);
187 		item = ng_package_data(m, NG_NOFLAGS);
188 		if (item == NULL)
189 			return (NULL);
190 		dgram = mtod(m, struct netflow_v5_export_dgram *);
191 		dgram->header.count = 0;
192 		dgram->header.version = htons(NETFLOW_V5);
193 		dgram->header.pad = 0;
194 	}
195 
196 	return (item);
197 }
198 
199 /*
200  * Re-attach incomplete datagram back to priv.
201  * If there is already another one, then send incomplete. */
202 static void
203 return_export_dgram(priv_p priv, fib_export_p fe, item_p item, int flags)
204 {
205 
206 	/*
207 	 * It may happen on SMP, that some thread has already
208 	 * put its item there, in this case we bail out and
209 	 * send what we have to collector.
210 	 */
211 	mtx_lock(&fe->export_mtx);
212 	if (fe->exp.item == NULL) {
213 		fe->exp.item = item;
214 		mtx_unlock(&fe->export_mtx);
215 	} else {
216 		mtx_unlock(&fe->export_mtx);
217 		export_send(priv, fe, item, flags);
218 	}
219 }
220 
221 /*
222  * The flow is over. Call export_add() and free it. If datagram is
223  * full, then call export_send().
224  */
225 static void
226 expire_flow(priv_p priv, fib_export_p fe, struct flow_entry *fle, int flags)
227 {
228 	struct netflow_export_item exp;
229 	uint16_t version = fle->f.version;
230 
231 	if ((priv->export != NULL) && (version == IPVERSION)) {
232 		exp.item = get_export_dgram(priv, fe);
233 		if (exp.item == NULL) {
234 			priv->nfinfo_export_failed++;
235 			if (priv->export9 != NULL)
236 				priv->nfinfo_export9_failed++;
237 			/* fle definitely contains IPv4 flow. */
238 			uma_zfree_arg(priv->zone, fle, priv);
239 			return;
240 		}
241 
242 		if (export_add(exp.item, fle) > 0)
243 			export_send(priv, fe, exp.item, flags);
244 		else
245 			return_export_dgram(priv, fe, exp.item, NG_QUEUE);
246 	}
247 
248 	if (priv->export9 != NULL) {
249 		exp.item9 = get_export9_dgram(priv, fe, &exp.item9_opt);
250 		if (exp.item9 == NULL) {
251 			priv->nfinfo_export9_failed++;
252 			if (version == IPVERSION)
253 				uma_zfree_arg(priv->zone, fle, priv);
254 #ifdef INET6
255 			else if (version == IP6VERSION)
256 				uma_zfree_arg(priv->zone6, fle, priv);
257 #endif
258 			else
259 				panic("ng_netflow: Unknown IP proto: %d",
260 				    version);
261 			return;
262 		}
263 
264 		if (export9_add(exp.item9, exp.item9_opt, fle) > 0)
265 			export9_send(priv, fe, exp.item9, exp.item9_opt, flags);
266 		else
267 			return_export9_dgram(priv, fe, exp.item9,
268 			    exp.item9_opt, NG_QUEUE);
269 	}
270 
271 	if (version == IPVERSION)
272 		uma_zfree_arg(priv->zone, fle, priv);
273 #ifdef INET6
274 	else if (version == IP6VERSION)
275 		uma_zfree_arg(priv->zone6, fle, priv);
276 #endif
277 }
278 
279 /* Get a snapshot of node statistics */
280 void
281 ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
282 {
283 
284 	i->nfinfo_bytes = counter_u64_fetch(priv->nfinfo_bytes);
285 	i->nfinfo_packets = counter_u64_fetch(priv->nfinfo_packets);
286 	i->nfinfo_bytes6 = counter_u64_fetch(priv->nfinfo_bytes6);
287 	i->nfinfo_packets6 = counter_u64_fetch(priv->nfinfo_packets6);
288 	i->nfinfo_sbytes = counter_u64_fetch(priv->nfinfo_sbytes);
289 	i->nfinfo_spackets = counter_u64_fetch(priv->nfinfo_spackets);
290 	i->nfinfo_sbytes6 = counter_u64_fetch(priv->nfinfo_sbytes6);
291 	i->nfinfo_spackets6 = counter_u64_fetch(priv->nfinfo_spackets6);
292 	i->nfinfo_act_exp = counter_u64_fetch(priv->nfinfo_act_exp);
293 	i->nfinfo_inact_exp = counter_u64_fetch(priv->nfinfo_inact_exp);
294 
295 	i->nfinfo_used = uma_zone_get_cur(priv->zone);
296 #ifdef INET6
297 	i->nfinfo_used6 = uma_zone_get_cur(priv->zone6);
298 #endif
299 
300 	i->nfinfo_alloc_failed = priv->nfinfo_alloc_failed;
301 	i->nfinfo_export_failed = priv->nfinfo_export_failed;
302 	i->nfinfo_export9_failed = priv->nfinfo_export9_failed;
303 	i->nfinfo_realloc_mbuf = priv->nfinfo_realloc_mbuf;
304 	i->nfinfo_alloc_fibs = priv->nfinfo_alloc_fibs;
305 	i->nfinfo_inact_t = priv->nfinfo_inact_t;
306 	i->nfinfo_act_t = priv->nfinfo_act_t;
307 }
308 
309 /*
310  * Insert a record into defined slot.
311  *
312  * First we get for us a free flow entry, then fill in all
313  * possible fields in it.
314  *
315  * TODO: consider dropping hash mutex while filling in datagram,
316  * as this was done in previous version. Need to test & profile
317  * to be sure.
318  */
319 #ifdef INET
320 static int
321 hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r,
322 	int plen, uint8_t flags, uint8_t tcp_flags)
323 {
324 	struct flow_entry *fle;
325 
326 	mtx_assert(&hsh->mtx, MA_OWNED);
327 
328 	fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
329 	if (fle == NULL) {
330 		priv->nfinfo_alloc_failed++;
331 		return (ENOMEM);
332 	}
333 
334 	/*
335 	 * Now fle is totally ours. It is detached from all lists,
336 	 * we can safely edit it.
337 	 */
338 	fle->f.version = IPVERSION;
339 	bcopy(r, &fle->f.r, sizeof(struct flow_rec));
340 	fle->f.bytes = plen;
341 	fle->f.packets = 1;
342 	fle->f.tcp_flags = tcp_flags;
343 
344 	fle->f.first = fle->f.last = time_uptime;
345 
346 	/*
347 	 * First we do route table lookup on destination address. So we can
348 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
349 	 */
350 	if ((flags & NG_NETFLOW_CONF_NODSTLOOKUP) == 0) {
351 		struct rtentry *rt;
352 		struct route_nhop_data rnd;
353 
354 		rt = fib4_lookup_rt(r->fib, fle->f.r.r_dst, 0, NHR_NONE, &rnd);
355 		if (rt != NULL) {
356 			struct in_addr addr;
357 			uint32_t scopeid;
358 			struct nhop_object *nh = nhop_select_func(rnd.rnd_nhop, 0);
359 			int plen;
360 
361 			rt_get_inet_prefix_plen(rt, &addr, &plen, &scopeid);
362 			fle->f.fle_o_ifx = nh->nh_ifp->if_index;
363 			if (nh->gw_sa.sa_len == AF_INET)
364 				fle->f.next_hop = nh->gw4_sa.sin_addr;
365 			fle->f.dst_mask = plen;
366 		}
367 	}
368 
369 	/* Do route lookup on source address, to fill in src_mask. */
370 	if ((flags & NG_NETFLOW_CONF_NOSRCLOOKUP) == 0) {
371 		struct rtentry *rt;
372 		struct route_nhop_data rnd;
373 
374 		rt = fib4_lookup_rt(r->fib, fle->f.r.r_src, 0, NHR_NONE, &rnd);
375 		if (rt != NULL) {
376 			struct in_addr addr;
377 			uint32_t scopeid;
378 			int plen;
379 
380 			rt_get_inet_prefix_plen(rt, &addr, &plen, &scopeid);
381 			fle->f.src_mask = plen;
382 		}
383 	}
384 
385 	/* Push new flow at the and of hash. */
386 	TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
387 
388 	return (0);
389 }
390 #endif
391 
392 #ifdef INET6
393 static int
394 hash6_insert(priv_p priv, struct flow_hash_entry *hsh6, struct flow6_rec *r,
395 	int plen, uint8_t flags, uint8_t tcp_flags)
396 {
397 	struct flow6_entry *fle6;
398 
399 	mtx_assert(&hsh6->mtx, MA_OWNED);
400 
401 	fle6 = uma_zalloc_arg(priv->zone6, priv, M_NOWAIT);
402 	if (fle6 == NULL) {
403 		priv->nfinfo_alloc_failed++;
404 		return (ENOMEM);
405 	}
406 
407 	/*
408 	 * Now fle is totally ours. It is detached from all lists,
409 	 * we can safely edit it.
410 	 */
411 
412 	fle6->f.version = IP6VERSION;
413 	bcopy(r, &fle6->f.r, sizeof(struct flow6_rec));
414 	fle6->f.bytes = plen;
415 	fle6->f.packets = 1;
416 	fle6->f.tcp_flags = tcp_flags;
417 
418 	fle6->f.first = fle6->f.last = time_uptime;
419 
420 	/*
421 	 * First we do route table lookup on destination address. So we can
422 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
423 	 */
424 	if ((flags & NG_NETFLOW_CONF_NODSTLOOKUP) == 0) {
425 		struct rtentry *rt;
426 		struct route_nhop_data rnd;
427 
428 		rt = fib6_lookup_rt(r->fib, &fle6->f.r.dst.r_dst6, 0, NHR_NONE, &rnd);
429 		if (rt != NULL) {
430 			struct in6_addr addr;
431 			uint32_t scopeid;
432 			struct nhop_object *nh = nhop_select_func(rnd.rnd_nhop, 0);
433 			int plen;
434 
435 			rt_get_inet6_prefix_plen(rt, &addr, &plen, &scopeid);
436 			fle6->f.fle_o_ifx = nh->nh_ifp->if_index;
437 			if (nh->gw_sa.sa_len == AF_INET6)
438 				fle6->f.n.next_hop6 = nh->gw6_sa.sin6_addr;
439 			fle6->f.dst_mask = plen;
440 		}
441 	}
442 
443 	if ((flags & NG_NETFLOW_CONF_NOSRCLOOKUP) == 0) {
444 		/* Do route lookup on source address, to fill in src_mask. */
445 		struct rtentry *rt;
446 		struct route_nhop_data rnd;
447 
448 		rt = fib6_lookup_rt(r->fib, &fle6->f.r.src.r_src6, 0, NHR_NONE, &rnd);
449 		if (rt != NULL) {
450 			struct in6_addr addr;
451 			uint32_t scopeid;
452 			int plen;
453 
454 			rt_get_inet6_prefix_plen(rt, &addr, &plen, &scopeid);
455 			fle6->f.src_mask = plen;
456 		}
457 	}
458 
459 	/* Push new flow at the and of hash. */
460 	TAILQ_INSERT_TAIL(&hsh6->head, (struct flow_entry *)fle6, fle_hash);
461 
462 	return (0);
463 }
464 #endif
465 
466 /*
467  * Non-static functions called from ng_netflow.c
468  */
469 
470 /* Allocate memory and set up flow cache */
471 void
472 ng_netflow_cache_init(priv_p priv)
473 {
474 	struct flow_hash_entry *hsh;
475 	int i;
476 
477 	/* Initialize cache UMA zone. */
478 	priv->zone = uma_zcreate("NetFlow IPv4 cache",
479 	    sizeof(struct flow_entry), NULL, NULL, NULL, NULL,
480 	    UMA_ALIGN_CACHE, 0);
481 	uma_zone_set_max(priv->zone, CACHESIZE);
482 #ifdef INET6
483 	priv->zone6 = uma_zcreate("NetFlow IPv6 cache",
484 	    sizeof(struct flow6_entry), NULL, NULL, NULL, NULL,
485 	    UMA_ALIGN_CACHE, 0);
486 	uma_zone_set_max(priv->zone6, CACHESIZE);
487 #endif
488 
489 	/* Allocate hash. */
490 	priv->hash = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
491 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
492 
493 	/* Initialize hash. */
494 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
495 		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
496 		TAILQ_INIT(&hsh->head);
497 	}
498 
499 #ifdef INET6
500 	/* Allocate hash. */
501 	priv->hash6 = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
502 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
503 
504 	/* Initialize hash. */
505 	for (i = 0, hsh = priv->hash6; i < NBUCKETS; i++, hsh++) {
506 		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
507 		TAILQ_INIT(&hsh->head);
508 	}
509 #endif
510 
511 	priv->nfinfo_bytes = counter_u64_alloc(M_WAITOK);
512 	priv->nfinfo_packets = counter_u64_alloc(M_WAITOK);
513 	priv->nfinfo_bytes6 = counter_u64_alloc(M_WAITOK);
514 	priv->nfinfo_packets6 = counter_u64_alloc(M_WAITOK);
515 	priv->nfinfo_sbytes = counter_u64_alloc(M_WAITOK);
516 	priv->nfinfo_spackets = counter_u64_alloc(M_WAITOK);
517 	priv->nfinfo_sbytes6 = counter_u64_alloc(M_WAITOK);
518 	priv->nfinfo_spackets6 = counter_u64_alloc(M_WAITOK);
519 	priv->nfinfo_act_exp = counter_u64_alloc(M_WAITOK);
520 	priv->nfinfo_inact_exp = counter_u64_alloc(M_WAITOK);
521 
522 	ng_netflow_v9_cache_init(priv);
523 	CTR0(KTR_NET, "ng_netflow startup()");
524 }
525 
526 /* Initialize new FIB table for v5 and v9 */
527 int
528 ng_netflow_fib_init(priv_p priv, int fib)
529 {
530 	fib_export_p	fe = priv_to_fib(priv, fib);
531 
532 	CTR1(KTR_NET, "ng_netflow(): fib init: %d", fib);
533 
534 	if (fe != NULL)
535 		return (0);
536 
537 	if ((fe = malloc(sizeof(struct fib_export), M_NETGRAPH,
538 	    M_NOWAIT | M_ZERO)) == NULL)
539 		return (ENOMEM);
540 
541 	mtx_init(&fe->export_mtx, "export dgram lock", NULL, MTX_DEF);
542 	mtx_init(&fe->export9_mtx, "export9 dgram lock", NULL, MTX_DEF);
543 	fe->fib = fib;
544 	fe->domain_id = fib;
545 
546 	if (atomic_cmpset_ptr((volatile uintptr_t *)&priv->fib_data[fib],
547 	    (uintptr_t)NULL, (uintptr_t)fe) == 0) {
548 		/* FIB already set up by other ISR */
549 		CTR3(KTR_NET, "ng_netflow(): fib init: %d setup %p but got %p",
550 		    fib, fe, priv_to_fib(priv, fib));
551 		mtx_destroy(&fe->export_mtx);
552 		mtx_destroy(&fe->export9_mtx);
553 		free(fe, M_NETGRAPH);
554 	} else {
555 		/* Increase counter for statistics */
556 		CTR3(KTR_NET, "ng_netflow(): fib %d setup to %p (%p)",
557 		    fib, fe, priv_to_fib(priv, fib));
558 		priv->nfinfo_alloc_fibs++;
559 	}
560 
561 	return (0);
562 }
563 
564 /* Free all flow cache memory. Called from node close method. */
565 void
566 ng_netflow_cache_flush(priv_p priv)
567 {
568 	struct flow_entry	*fle, *fle1;
569 	struct flow_hash_entry	*hsh;
570 	struct netflow_export_item exp;
571 	fib_export_p fe;
572 	int i;
573 
574 	bzero(&exp, sizeof(exp));
575 
576 	/*
577 	 * We are going to free probably billable data.
578 	 * Expire everything before freeing it.
579 	 * No locking is required since callout is already drained.
580 	 */
581 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
582 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
583 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
584 			fe = priv_to_fib(priv, fle->f.r.fib);
585 			expire_flow(priv, fe, fle, NG_QUEUE);
586 		}
587 #ifdef INET6
588 	for (hsh = priv->hash6, i = 0; i < NBUCKETS; hsh++, i++)
589 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
590 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
591 			fe = priv_to_fib(priv, fle->f.r.fib);
592 			expire_flow(priv, fe, fle, NG_QUEUE);
593 		}
594 #endif
595 
596 	uma_zdestroy(priv->zone);
597 	/* Destroy hash mutexes. */
598 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
599 		mtx_destroy(&hsh->mtx);
600 
601 	/* Free hash memory. */
602 	if (priv->hash != NULL)
603 		free(priv->hash, M_NETFLOW_HASH);
604 #ifdef INET6
605 	uma_zdestroy(priv->zone6);
606 	/* Destroy hash mutexes. */
607 	for (i = 0, hsh = priv->hash6; i < NBUCKETS; i++, hsh++)
608 		mtx_destroy(&hsh->mtx);
609 
610 	/* Free hash memory. */
611 	if (priv->hash6 != NULL)
612 		free(priv->hash6, M_NETFLOW_HASH);
613 #endif
614 
615 	for (i = 0; i < priv->maxfibs; i++) {
616 		if ((fe = priv_to_fib(priv, i)) == NULL)
617 			continue;
618 
619 		if (fe->exp.item != NULL)
620 			export_send(priv, fe, fe->exp.item, NG_QUEUE);
621 
622 		if (fe->exp.item9 != NULL)
623 			export9_send(priv, fe, fe->exp.item9,
624 			    fe->exp.item9_opt, NG_QUEUE);
625 
626 		mtx_destroy(&fe->export_mtx);
627 		mtx_destroy(&fe->export9_mtx);
628 		free(fe, M_NETGRAPH);
629 	}
630 
631 	counter_u64_free(priv->nfinfo_bytes);
632 	counter_u64_free(priv->nfinfo_packets);
633 	counter_u64_free(priv->nfinfo_bytes6);
634 	counter_u64_free(priv->nfinfo_packets6);
635 	counter_u64_free(priv->nfinfo_sbytes);
636 	counter_u64_free(priv->nfinfo_spackets);
637 	counter_u64_free(priv->nfinfo_sbytes6);
638 	counter_u64_free(priv->nfinfo_spackets6);
639 	counter_u64_free(priv->nfinfo_act_exp);
640 	counter_u64_free(priv->nfinfo_inact_exp);
641 
642 	ng_netflow_v9_cache_flush(priv);
643 }
644 
645 #ifdef INET
646 /* Insert packet from into flow cache. */
647 int
648 ng_netflow_flow_add(priv_p priv, fib_export_p fe, struct ip *ip,
649     caddr_t upper_ptr, uint8_t upper_proto, uint8_t flags,
650     unsigned int src_if_index)
651 {
652 	struct flow_entry	*fle, *fle1;
653 	struct flow_hash_entry	*hsh;
654 	struct flow_rec		r;
655 	int			hlen, plen;
656 	int			error = 0;
657 	uint16_t		eproto;
658 	uint8_t			tcp_flags = 0;
659 
660 	bzero(&r, sizeof(r));
661 
662 	if (ip->ip_v != IPVERSION)
663 		return (EINVAL);
664 
665 	hlen = ip->ip_hl << 2;
666 	if (hlen < sizeof(struct ip))
667 		return (EINVAL);
668 
669 	eproto = ETHERTYPE_IP;
670 	/* Assume L4 template by default */
671 	r.flow_type = NETFLOW_V9_FLOW_V4_L4;
672 
673 	r.r_src = ip->ip_src;
674 	r.r_dst = ip->ip_dst;
675 	r.fib = fe->fib;
676 
677 	plen = ntohs(ip->ip_len);
678 
679 	r.r_ip_p = ip->ip_p;
680 	r.r_tos = ip->ip_tos;
681 
682 	r.r_i_ifx = src_if_index;
683 
684 	/*
685 	 * XXX NOTE: only first fragment of fragmented TCP, UDP and
686 	 * ICMP packet will be recorded with proper s_port and d_port.
687 	 * Following fragments will be recorded simply as IP packet with
688 	 * ip_proto = ip->ip_p and s_port, d_port set to zero.
689 	 * I know, it looks like bug. But I don't want to re-implement
690 	 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
691 	 * and nobody complains yet :)
692 	 */
693 	if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
694 		switch(r.r_ip_p) {
695 		case IPPROTO_TCP:
696 		    {
697 			struct tcphdr *tcp;
698 
699 			tcp = (struct tcphdr *)((caddr_t )ip + hlen);
700 			r.r_sport = tcp->th_sport;
701 			r.r_dport = tcp->th_dport;
702 			tcp_flags = tcp->th_flags;
703 			break;
704 		    }
705 		case IPPROTO_UDP:
706 			r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
707 			break;
708 		}
709 
710 	counter_u64_add(priv->nfinfo_packets, 1);
711 	counter_u64_add(priv->nfinfo_bytes, plen);
712 
713 	/* Find hash slot. */
714 	hsh = &priv->hash[ip_hash(&r)];
715 
716 	mtx_lock(&hsh->mtx);
717 
718 	/*
719 	 * Go through hash and find our entry. If we encounter an
720 	 * entry, that should be expired, purge it. We do a reverse
721 	 * search since most active entries are first, and most
722 	 * searches are done on most active entries.
723 	 */
724 	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
725 		if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
726 			break;
727 		if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
728 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
729 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib),
730 			    fle, NG_QUEUE);
731 			counter_u64_add(priv->nfinfo_act_exp, 1);
732 		}
733 	}
734 
735 	if (fle) {			/* An existent entry. */
736 
737 		fle->f.bytes += plen;
738 		fle->f.packets ++;
739 		fle->f.tcp_flags |= tcp_flags;
740 		fle->f.last = time_uptime;
741 
742 		/*
743 		 * We have the following reasons to expire flow in active way:
744 		 * - it hit active timeout
745 		 * - a TCP connection closed
746 		 * - it is going to overflow counter
747 		 */
748 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
749 		    (fle->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
750 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
751 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib),
752 			    fle, NG_QUEUE);
753 			counter_u64_add(priv->nfinfo_act_exp, 1);
754 		} else {
755 			/*
756 			 * It is the newest, move it to the tail,
757 			 * if it isn't there already. Next search will
758 			 * locate it quicker.
759 			 */
760 			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
761 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
762 				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
763 			}
764 		}
765 	} else				/* A new flow entry. */
766 		error = hash_insert(priv, hsh, &r, plen, flags, tcp_flags);
767 
768 	mtx_unlock(&hsh->mtx);
769 
770 	return (error);
771 }
772 #endif
773 
774 #ifdef INET6
775 /* Insert IPv6 packet from into flow cache. */
776 int
777 ng_netflow_flow6_add(priv_p priv, fib_export_p fe, struct ip6_hdr *ip6,
778     caddr_t upper_ptr, uint8_t upper_proto, uint8_t flags,
779     unsigned int src_if_index)
780 {
781 	struct flow_entry	*fle = NULL, *fle1;
782 	struct flow6_entry	*fle6;
783 	struct flow_hash_entry	*hsh;
784 	struct flow6_rec	r;
785 	int			plen;
786 	int			error = 0;
787 	uint8_t			tcp_flags = 0;
788 
789 	/* check version */
790 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION)
791 		return (EINVAL);
792 
793 	bzero(&r, sizeof(r));
794 
795 	r.src.r_src6 = ip6->ip6_src;
796 	r.dst.r_dst6 = ip6->ip6_dst;
797 	r.fib = fe->fib;
798 
799 	/* Assume L4 template by default */
800 	r.flow_type = NETFLOW_V9_FLOW_V6_L4;
801 
802 	plen = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr);
803 
804 #if 0
805 	/* XXX: set DSCP/CoS value */
806 	r.r_tos = ip->ip_tos;
807 #endif
808 	if ((flags & NG_NETFLOW_IS_FRAG) == 0) {
809 		switch(upper_proto) {
810 		case IPPROTO_TCP:
811 		    {
812 			struct tcphdr *tcp;
813 
814 			tcp = (struct tcphdr *)upper_ptr;
815 			r.r_ports = *(uint32_t *)upper_ptr;
816 			tcp_flags = tcp->th_flags;
817 			break;
818 		    }
819  		case IPPROTO_UDP:
820 		case IPPROTO_SCTP:
821 			r.r_ports = *(uint32_t *)upper_ptr;
822 			break;
823 		}
824 	}
825 
826 	r.r_ip_p = upper_proto;
827 	r.r_i_ifx = src_if_index;
828 
829 	counter_u64_add(priv->nfinfo_packets6, 1);
830 	counter_u64_add(priv->nfinfo_bytes6, plen);
831 
832 	/* Find hash slot. */
833 	hsh = &priv->hash6[ip6_hash(&r)];
834 
835 	mtx_lock(&hsh->mtx);
836 
837 	/*
838 	 * Go through hash and find our entry. If we encounter an
839 	 * entry, that should be expired, purge it. We do a reverse
840 	 * search since most active entries are first, and most
841 	 * searches are done on most active entries.
842 	 */
843 	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
844 		if (fle->f.version != IP6VERSION)
845 			continue;
846 		fle6 = (struct flow6_entry *)fle;
847 		if (bcmp(&r, &fle6->f.r, sizeof(struct flow6_rec)) == 0)
848 			break;
849 		if ((INACTIVE(fle6) && SMALL(fle6)) || AGED(fle6)) {
850 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
851 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle,
852 			    NG_QUEUE);
853 			counter_u64_add(priv->nfinfo_act_exp, 1);
854 		}
855 	}
856 
857 	if (fle != NULL) {			/* An existent entry. */
858 		fle6 = (struct flow6_entry *)fle;
859 
860 		fle6->f.bytes += plen;
861 		fle6->f.packets ++;
862 		fle6->f.tcp_flags |= tcp_flags;
863 		fle6->f.last = time_uptime;
864 
865 		/*
866 		 * We have the following reasons to expire flow in active way:
867 		 * - it hit active timeout
868 		 * - a TCP connection closed
869 		 * - it is going to overflow counter
870 		 */
871 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle6) ||
872 		    (fle6->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
873 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
874 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle,
875 			    NG_QUEUE);
876 			counter_u64_add(priv->nfinfo_act_exp, 1);
877 		} else {
878 			/*
879 			 * It is the newest, move it to the tail,
880 			 * if it isn't there already. Next search will
881 			 * locate it quicker.
882 			 */
883 			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
884 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
885 				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
886 			}
887 		}
888 	} else				/* A new flow entry. */
889 		error = hash6_insert(priv, hsh, &r, plen, flags, tcp_flags);
890 
891 	mtx_unlock(&hsh->mtx);
892 
893 	return (error);
894 }
895 #endif
896 
897 /*
898  * Return records from cache to userland.
899  *
900  * TODO: matching particular IP should be done in kernel, here.
901  */
902 int
903 ng_netflow_flow_show(priv_p priv, struct ngnf_show_header *req,
904 struct ngnf_show_header *resp)
905 {
906 	struct flow_hash_entry	*hsh;
907 	struct flow_entry	*fle;
908 	struct flow_entry_data	*data = (struct flow_entry_data *)(resp + 1);
909 #ifdef INET6
910 	struct flow6_entry_data	*data6 = (struct flow6_entry_data *)(resp + 1);
911 #endif
912 	int	i, max;
913 
914 	i = req->hash_id;
915 	if (i > NBUCKETS-1)
916 		return (EINVAL);
917 
918 #ifdef INET6
919 	if (req->version == 6) {
920 		resp->version = 6;
921 		hsh = priv->hash6 + i;
922 		max = NREC6_AT_ONCE;
923 	} else
924 #endif
925 	if (req->version == 4) {
926 		resp->version = 4;
927 		hsh = priv->hash + i;
928 		max = NREC_AT_ONCE;
929 	} else
930 		return (EINVAL);
931 
932 	/*
933 	 * We will transfer not more than NREC_AT_ONCE. More data
934 	 * will come in next message.
935 	 * We send current hash index and current record number in list
936 	 * to userland, and userland should return it back to us.
937 	 * Then, we will restart with new entry.
938 	 *
939 	 * The resulting cache snapshot can be inaccurate if flow expiration
940 	 * is taking place on hash item between userland data requests for
941 	 * this hash item id.
942 	 */
943 	resp->nentries = 0;
944 	for (; i < NBUCKETS; hsh++, i++) {
945 		int list_id;
946 
947 		if (mtx_trylock(&hsh->mtx) == 0) {
948 			/*
949 			 * Requested hash index is not available,
950 			 * relay decision to skip or re-request data
951 			 * to userland.
952 			 */
953 			resp->hash_id = i;
954 			resp->list_id = 0;
955 			return (0);
956 		}
957 
958 		list_id = 0;
959 		TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
960 			if (hsh->mtx.mtx_lock & MTX_CONTESTED) {
961 				resp->hash_id = i;
962 				resp->list_id = list_id;
963 				mtx_unlock(&hsh->mtx);
964 				return (0);
965 			}
966 
967 			list_id++;
968 			/* Search for particular record in list. */
969 			if (req->list_id > 0) {
970 				if (list_id < req->list_id)
971 					continue;
972 
973 				/* Requested list position found. */
974 				req->list_id = 0;
975 			}
976 #ifdef INET6
977 			if (req->version == 6) {
978 				struct flow6_entry *fle6;
979 
980 				fle6 = (struct flow6_entry *)fle;
981 				bcopy(&fle6->f, data6 + resp->nentries,
982 				    sizeof(fle6->f));
983 			} else
984 #endif
985 				bcopy(&fle->f, data + resp->nentries,
986 				    sizeof(fle->f));
987 			resp->nentries++;
988 			if (resp->nentries == max) {
989 				resp->hash_id = i;
990 				/*
991 				 * If it was the last item in list
992 				 * we simply skip to next hash_id.
993 				 */
994 				resp->list_id = list_id + 1;
995 				mtx_unlock(&hsh->mtx);
996 				return (0);
997 			}
998 		}
999 		mtx_unlock(&hsh->mtx);
1000 	}
1001 
1002 	resp->hash_id = resp->list_id = 0;
1003 
1004 	return (0);
1005 }
1006 
1007 /* We have full datagram in privdata. Send it to export hook. */
1008 static int
1009 export_send(priv_p priv, fib_export_p fe, item_p item, int flags)
1010 {
1011 	struct mbuf *m = NGI_M(item);
1012 	struct netflow_v5_export_dgram *dgram = mtod(m,
1013 					struct netflow_v5_export_dgram *);
1014 	struct netflow_v5_header *header = &dgram->header;
1015 	struct timespec ts;
1016 	int error = 0;
1017 
1018 	/* Fill mbuf header. */
1019 	m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
1020 	   header->count + sizeof(struct netflow_v5_header);
1021 
1022 	/* Fill export header. */
1023 	header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
1024 	getnanotime(&ts);
1025 	header->unix_secs  = htonl(ts.tv_sec);
1026 	header->unix_nsecs = htonl(ts.tv_nsec);
1027 	header->engine_type = 0;
1028 	header->engine_id = fe->domain_id;
1029 	header->pad = 0;
1030 	header->flow_seq = htonl(atomic_fetchadd_32(&fe->flow_seq,
1031 	    header->count));
1032 	header->count = htons(header->count);
1033 
1034 	if (priv->export != NULL)
1035 		NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
1036 	else
1037 		NG_FREE_ITEM(item);
1038 
1039 	return (error);
1040 }
1041 
1042 /* Add export record to dgram. */
1043 static int
1044 export_add(item_p item, struct flow_entry *fle)
1045 {
1046 	struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
1047 					struct netflow_v5_export_dgram *);
1048 	struct netflow_v5_header *header = &dgram->header;
1049 	struct netflow_v5_record *rec;
1050 
1051 	rec = &dgram->r[header->count];
1052 	header->count ++;
1053 
1054 	KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
1055 	    ("ng_netflow: export too big"));
1056 
1057 	/* Fill in export record. */
1058 	rec->src_addr = fle->f.r.r_src.s_addr;
1059 	rec->dst_addr = fle->f.r.r_dst.s_addr;
1060 	rec->next_hop = fle->f.next_hop.s_addr;
1061 	rec->i_ifx    = htons(fle->f.fle_i_ifx);
1062 	rec->o_ifx    = htons(fle->f.fle_o_ifx);
1063 	rec->packets  = htonl(fle->f.packets);
1064 	rec->octets   = htonl(fle->f.bytes);
1065 	rec->first    = htonl(MILLIUPTIME(fle->f.first));
1066 	rec->last     = htonl(MILLIUPTIME(fle->f.last));
1067 	rec->s_port   = fle->f.r.r_sport;
1068 	rec->d_port   = fle->f.r.r_dport;
1069 	rec->flags    = fle->f.tcp_flags;
1070 	rec->prot     = fle->f.r.r_ip_p;
1071 	rec->tos      = fle->f.r.r_tos;
1072 	rec->dst_mask = fle->f.dst_mask;
1073 	rec->src_mask = fle->f.src_mask;
1074 	rec->pad1     = 0;
1075 	rec->pad2     = 0;
1076 
1077 	/* Not supported fields. */
1078 	rec->src_as = rec->dst_as = 0;
1079 
1080 	if (header->count == NETFLOW_V5_MAX_RECORDS)
1081 		return (1); /* end of datagram */
1082 	else
1083 		return (0);
1084 }
1085 
1086 /* Periodic flow expiry run. */
1087 void
1088 ng_netflow_expire(void *arg)
1089 {
1090 	struct flow_entry	*fle, *fle1;
1091 	struct flow_hash_entry	*hsh;
1092 	priv_p			priv = (priv_p )arg;
1093 	int			used, i;
1094 
1095 	/*
1096 	 * Going through all the cache.
1097 	 */
1098 	used = uma_zone_get_cur(priv->zone);
1099 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
1100 		/*
1101 		 * Skip entries, that are already being worked on.
1102 		 */
1103 		if (mtx_trylock(&hsh->mtx) == 0)
1104 			continue;
1105 
1106 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
1107 			/*
1108 			 * Interrupt thread wants this entry!
1109 			 * Quick! Quick! Bail out!
1110 			 */
1111 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
1112 				break;
1113 
1114 			/*
1115 			 * Don't expire aggressively while hash collision
1116 			 * ratio is predicted small.
1117 			 */
1118 			if (used <= (NBUCKETS*2) && !INACTIVE(fle))
1119 				break;
1120 
1121 			if ((INACTIVE(fle) && (SMALL(fle) ||
1122 			    (used > (NBUCKETS*2)))) || AGED(fle)) {
1123 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
1124 				expire_flow(priv, priv_to_fib(priv,
1125 				    fle->f.r.fib), fle, NG_NOFLAGS);
1126 				used--;
1127 				counter_u64_add(priv->nfinfo_inact_exp, 1);
1128 			}
1129 		}
1130 		mtx_unlock(&hsh->mtx);
1131 	}
1132 
1133 #ifdef INET6
1134 	used = uma_zone_get_cur(priv->zone6);
1135 	for (hsh = priv->hash6, i = 0; i < NBUCKETS; hsh++, i++) {
1136 		struct flow6_entry	*fle6;
1137 
1138 		/*
1139 		 * Skip entries, that are already being worked on.
1140 		 */
1141 		if (mtx_trylock(&hsh->mtx) == 0)
1142 			continue;
1143 
1144 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
1145 			fle6 = (struct flow6_entry *)fle;
1146 			/*
1147 			 * Interrupt thread wants this entry!
1148 			 * Quick! Quick! Bail out!
1149 			 */
1150 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
1151 				break;
1152 
1153 			/*
1154 			 * Don't expire aggressively while hash collision
1155 			 * ratio is predicted small.
1156 			 */
1157 			if (used <= (NBUCKETS*2) && !INACTIVE(fle6))
1158 				break;
1159 
1160 			if ((INACTIVE(fle6) && (SMALL(fle6) ||
1161 			    (used > (NBUCKETS*2)))) || AGED(fle6)) {
1162 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
1163 				expire_flow(priv, priv_to_fib(priv,
1164 				    fle->f.r.fib), fle, NG_NOFLAGS);
1165 				used--;
1166 				counter_u64_add(priv->nfinfo_inact_exp, 1);
1167 			}
1168 		}
1169 		mtx_unlock(&hsh->mtx);
1170 	}
1171 #endif
1172 
1173 	/* Schedule next expire. */
1174 	callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
1175 	    (void *)priv);
1176 }
1177