xref: /freebsd/sys/netgraph/netflow/netflow.c (revision 807b6a646a0a0dbc258bf239468b5d9f901d1f92)
1 /*-
2  * Copyright (c) 2010-2011 Alexander V. Chernikov <melifaro@ipfw.ru>
3  * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
4  * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_inet6.h"
35 #include "opt_route.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/counter.h>
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/limits.h>
42 #include <sys/mbuf.h>
43 #include <sys/syslog.h>
44 #include <sys/socket.h>
45 
46 #include <net/if.h>
47 #include <net/if_var.h>
48 #include <net/route.h>
49 #include <net/ethernet.h>
50 #include <netinet/in.h>
51 #include <netinet/in_systm.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/tcp.h>
55 #include <netinet/udp.h>
56 
57 #include <netgraph/ng_message.h>
58 #include <netgraph/netgraph.h>
59 
60 #include <netgraph/netflow/netflow.h>
61 #include <netgraph/netflow/netflow_v9.h>
62 #include <netgraph/netflow/ng_netflow.h>
63 
64 #define	NBUCKETS	(65536)		/* must be power of 2 */
65 
66 /* This hash is for TCP or UDP packets. */
67 #define FULL_HASH(addr1, addr2, port1, port2)	\
68 	(((addr1 ^ (addr1 >> 16) ^ 		\
69 	htons(addr2 ^ (addr2 >> 16))) ^ 	\
70 	port1 ^ htons(port2)) &			\
71 	(NBUCKETS - 1))
72 
73 /* This hash is for all other IP packets. */
74 #define ADDR_HASH(addr1, addr2)			\
75 	((addr1 ^ (addr1 >> 16) ^ 		\
76 	htons(addr2 ^ (addr2 >> 16))) &		\
77 	(NBUCKETS - 1))
78 
79 /* Macros to shorten logical constructions */
80 /* XXX: priv must exist in namespace */
81 #define	INACTIVE(fle)	(time_uptime - fle->f.last > priv->nfinfo_inact_t)
82 #define	AGED(fle)	(time_uptime - fle->f.first > priv->nfinfo_act_t)
83 #define	ISFREE(fle)	(fle->f.packets == 0)
84 
85 /*
86  * 4 is a magical number: statistically number of 4-packet flows is
87  * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
88  * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
89  * of reachable host and 4-packet otherwise.
90  */
91 #define	SMALL(fle)	(fle->f.packets <= 4)
92 
93 MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
94 
95 static int export_add(item_p, struct flow_entry *);
96 static int export_send(priv_p, fib_export_p, item_p, int);
97 
98 static int hash_insert(priv_p, struct flow_hash_entry *, struct flow_rec *,
99     int, uint8_t, uint8_t);
100 #ifdef INET6
101 static int hash6_insert(priv_p, struct flow_hash_entry *, struct flow6_rec *,
102     int, uint8_t, uint8_t);
103 #endif
104 
105 static void expire_flow(priv_p, fib_export_p, struct flow_entry *, int);
106 
107 /*
108  * Generate hash for a given flow record.
109  *
110  * FIB is not used here, because:
111  * most VRFS will carry public IPv4 addresses which are unique even
112  * without FIB private addresses can overlap, but this is worked out
113  * via flow_rec bcmp() containing fib id. In IPv6 world addresses are
114  * all globally unique (it's not fully true, there is FC00::/7 for example,
115  * but chances of address overlap are MUCH smaller)
116  */
117 static inline uint32_t
118 ip_hash(struct flow_rec *r)
119 {
120 
121 	switch (r->r_ip_p) {
122 	case IPPROTO_TCP:
123 	case IPPROTO_UDP:
124 		return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
125 		    r->r_sport, r->r_dport);
126 	default:
127 		return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
128 	}
129 }
130 
131 #ifdef INET6
132 /* Generate hash for a given flow6 record. Use lower 4 octets from v6 addresses */
133 static inline uint32_t
134 ip6_hash(struct flow6_rec *r)
135 {
136 
137 	switch (r->r_ip_p) {
138 	case IPPROTO_TCP:
139 	case IPPROTO_UDP:
140 		return FULL_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
141 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3], r->r_sport,
142 		    r->r_dport);
143 	default:
144 		return ADDR_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
145 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3]);
146  	}
147 }
148 #endif
149 
150 /*
151  * Detach export datagram from priv, if there is any.
152  * If there is no, allocate a new one.
153  */
154 static item_p
155 get_export_dgram(priv_p priv, fib_export_p fe)
156 {
157 	item_p	item = NULL;
158 
159 	mtx_lock(&fe->export_mtx);
160 	if (fe->exp.item != NULL) {
161 		item = fe->exp.item;
162 		fe->exp.item = NULL;
163 	}
164 	mtx_unlock(&fe->export_mtx);
165 
166 	if (item == NULL) {
167 		struct netflow_v5_export_dgram *dgram;
168 		struct mbuf *m;
169 
170 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
171 		if (m == NULL)
172 			return (NULL);
173 		item = ng_package_data(m, NG_NOFLAGS);
174 		if (item == NULL)
175 			return (NULL);
176 		dgram = mtod(m, struct netflow_v5_export_dgram *);
177 		dgram->header.count = 0;
178 		dgram->header.version = htons(NETFLOW_V5);
179 		dgram->header.pad = 0;
180 	}
181 
182 	return (item);
183 }
184 
185 /*
186  * Re-attach incomplete datagram back to priv.
187  * If there is already another one, then send incomplete. */
188 static void
189 return_export_dgram(priv_p priv, fib_export_p fe, item_p item, int flags)
190 {
191 
192 	/*
193 	 * It may happen on SMP, that some thread has already
194 	 * put its item there, in this case we bail out and
195 	 * send what we have to collector.
196 	 */
197 	mtx_lock(&fe->export_mtx);
198 	if (fe->exp.item == NULL) {
199 		fe->exp.item = item;
200 		mtx_unlock(&fe->export_mtx);
201 	} else {
202 		mtx_unlock(&fe->export_mtx);
203 		export_send(priv, fe, item, flags);
204 	}
205 }
206 
207 /*
208  * The flow is over. Call export_add() and free it. If datagram is
209  * full, then call export_send().
210  */
211 static void
212 expire_flow(priv_p priv, fib_export_p fe, struct flow_entry *fle, int flags)
213 {
214 	struct netflow_export_item exp;
215 	uint16_t version = fle->f.version;
216 
217 	if ((priv->export != NULL) && (version == IPVERSION)) {
218 		exp.item = get_export_dgram(priv, fe);
219 		if (exp.item == NULL) {
220 			priv->nfinfo_export_failed++;
221 			if (priv->export9 != NULL)
222 				priv->nfinfo_export9_failed++;
223 			/* fle definitely contains IPv4 flow. */
224 			uma_zfree_arg(priv->zone, fle, priv);
225 			return;
226 		}
227 
228 		if (export_add(exp.item, fle) > 0)
229 			export_send(priv, fe, exp.item, flags);
230 		else
231 			return_export_dgram(priv, fe, exp.item, NG_QUEUE);
232 	}
233 
234 	if (priv->export9 != NULL) {
235 		exp.item9 = get_export9_dgram(priv, fe, &exp.item9_opt);
236 		if (exp.item9 == NULL) {
237 			priv->nfinfo_export9_failed++;
238 			if (version == IPVERSION)
239 				uma_zfree_arg(priv->zone, fle, priv);
240 #ifdef INET6
241 			else if (version == IP6VERSION)
242 				uma_zfree_arg(priv->zone6, fle, priv);
243 #endif
244 			else
245 				panic("ng_netflow: Unknown IP proto: %d",
246 				    version);
247 			return;
248 		}
249 
250 		if (export9_add(exp.item9, exp.item9_opt, fle) > 0)
251 			export9_send(priv, fe, exp.item9, exp.item9_opt, flags);
252 		else
253 			return_export9_dgram(priv, fe, exp.item9,
254 			    exp.item9_opt, NG_QUEUE);
255 	}
256 
257 	if (version == IPVERSION)
258 		uma_zfree_arg(priv->zone, fle, priv);
259 #ifdef INET6
260 	else if (version == IP6VERSION)
261 		uma_zfree_arg(priv->zone6, fle, priv);
262 #endif
263 }
264 
265 /* Get a snapshot of node statistics */
266 void
267 ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
268 {
269 
270 	i->nfinfo_bytes = counter_u64_fetch(priv->nfinfo_bytes);
271 	i->nfinfo_packets = counter_u64_fetch(priv->nfinfo_packets);
272 	i->nfinfo_bytes6 = counter_u64_fetch(priv->nfinfo_bytes6);
273 	i->nfinfo_packets6 = counter_u64_fetch(priv->nfinfo_packets6);
274 	i->nfinfo_sbytes = counter_u64_fetch(priv->nfinfo_sbytes);
275 	i->nfinfo_spackets = counter_u64_fetch(priv->nfinfo_spackets);
276 	i->nfinfo_sbytes6 = counter_u64_fetch(priv->nfinfo_sbytes6);
277 	i->nfinfo_spackets6 = counter_u64_fetch(priv->nfinfo_spackets6);
278 	i->nfinfo_act_exp = counter_u64_fetch(priv->nfinfo_act_exp);
279 	i->nfinfo_inact_exp = counter_u64_fetch(priv->nfinfo_inact_exp);
280 
281 	i->nfinfo_used = uma_zone_get_cur(priv->zone);
282 #ifdef INET6
283 	i->nfinfo_used6 = uma_zone_get_cur(priv->zone6);
284 #endif
285 
286 	i->nfinfo_alloc_failed = priv->nfinfo_alloc_failed;
287 	i->nfinfo_export_failed = priv->nfinfo_export_failed;
288 	i->nfinfo_export9_failed = priv->nfinfo_export9_failed;
289 	i->nfinfo_realloc_mbuf = priv->nfinfo_realloc_mbuf;
290 	i->nfinfo_alloc_fibs = priv->nfinfo_alloc_fibs;
291 	i->nfinfo_inact_t = priv->nfinfo_inact_t;
292 	i->nfinfo_act_t = priv->nfinfo_act_t;
293 }
294 
295 /*
296  * Insert a record into defined slot.
297  *
298  * First we get for us a free flow entry, then fill in all
299  * possible fields in it.
300  *
301  * TODO: consider dropping hash mutex while filling in datagram,
302  * as this was done in previous version. Need to test & profile
303  * to be sure.
304  */
305 static int
306 hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r,
307 	int plen, uint8_t flags, uint8_t tcp_flags)
308 {
309 	struct flow_entry *fle;
310 	struct sockaddr_in sin;
311 	struct rtentry *rt;
312 
313 	mtx_assert(&hsh->mtx, MA_OWNED);
314 
315 	fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
316 	if (fle == NULL) {
317 		priv->nfinfo_alloc_failed++;
318 		return (ENOMEM);
319 	}
320 
321 	/*
322 	 * Now fle is totally ours. It is detached from all lists,
323 	 * we can safely edit it.
324 	 */
325 	fle->f.version = IPVERSION;
326 	bcopy(r, &fle->f.r, sizeof(struct flow_rec));
327 	fle->f.bytes = plen;
328 	fle->f.packets = 1;
329 	fle->f.tcp_flags = tcp_flags;
330 
331 	fle->f.first = fle->f.last = time_uptime;
332 
333 	/*
334 	 * First we do route table lookup on destination address. So we can
335 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
336 	 */
337 	if ((flags & NG_NETFLOW_CONF_NODSTLOOKUP) == 0) {
338 		bzero(&sin, sizeof(sin));
339 		sin.sin_len = sizeof(struct sockaddr_in);
340 		sin.sin_family = AF_INET;
341 		sin.sin_addr = fle->f.r.r_dst;
342 		rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, r->fib);
343 		if (rt != NULL) {
344 			fle->f.fle_o_ifx = rt->rt_ifp->if_index;
345 
346 			if (rt->rt_flags & RTF_GATEWAY &&
347 			    rt->rt_gateway->sa_family == AF_INET)
348 				fle->f.next_hop =
349 				    ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
350 
351 			if (rt_mask(rt))
352 				fle->f.dst_mask =
353 				    bitcount32(((struct sockaddr_in *)rt_mask(rt))->sin_addr.s_addr);
354 			else if (rt->rt_flags & RTF_HOST)
355 				/* Give up. We can't determine mask :( */
356 				fle->f.dst_mask = 32;
357 
358 			RTFREE_LOCKED(rt);
359 		}
360 	}
361 
362 	/* Do route lookup on source address, to fill in src_mask. */
363 	if ((flags & NG_NETFLOW_CONF_NOSRCLOOKUP) == 0) {
364 		bzero(&sin, sizeof(sin));
365 		sin.sin_len = sizeof(struct sockaddr_in);
366 		sin.sin_family = AF_INET;
367 		sin.sin_addr = fle->f.r.r_src;
368 		rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, r->fib);
369 		if (rt != NULL) {
370 			if (rt_mask(rt))
371 				fle->f.src_mask =
372 				    bitcount32(((struct sockaddr_in *)rt_mask(rt))->sin_addr.s_addr);
373 			else if (rt->rt_flags & RTF_HOST)
374 				/* Give up. We can't determine mask :( */
375 				fle->f.src_mask = 32;
376 
377 			RTFREE_LOCKED(rt);
378 		}
379 	}
380 
381 	/* Push new flow at the and of hash. */
382 	TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
383 
384 	return (0);
385 }
386 
387 #ifdef INET6
388 /* XXX: make normal function, instead of.. */
389 #define ipv6_masklen(x)		bitcount32((x).__u6_addr.__u6_addr32[0]) + \
390 				bitcount32((x).__u6_addr.__u6_addr32[1]) + \
391 				bitcount32((x).__u6_addr.__u6_addr32[2]) + \
392 				bitcount32((x).__u6_addr.__u6_addr32[3])
393 #define RT_MASK6(x)	(ipv6_masklen(((struct sockaddr_in6 *)rt_mask(x))->sin6_addr))
394 static int
395 hash6_insert(priv_p priv, struct flow_hash_entry *hsh6, struct flow6_rec *r,
396 	int plen, uint8_t flags, uint8_t tcp_flags)
397 {
398 	struct flow6_entry *fle6;
399 	struct sockaddr_in6 sin6;
400 	struct rtentry *rt;
401 
402 	mtx_assert(&hsh6->mtx, MA_OWNED);
403 
404 	fle6 = uma_zalloc_arg(priv->zone6, priv, M_NOWAIT);
405 	if (fle6 == NULL) {
406 		priv->nfinfo_alloc_failed++;
407 		return (ENOMEM);
408 	}
409 
410 	/*
411 	 * Now fle is totally ours. It is detached from all lists,
412 	 * we can safely edit it.
413 	 */
414 
415 	fle6->f.version = IP6VERSION;
416 	bcopy(r, &fle6->f.r, sizeof(struct flow6_rec));
417 	fle6->f.bytes = plen;
418 	fle6->f.packets = 1;
419 	fle6->f.tcp_flags = tcp_flags;
420 
421 	fle6->f.first = fle6->f.last = time_uptime;
422 
423 	/*
424 	 * First we do route table lookup on destination address. So we can
425 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
426 	 */
427 	if ((flags & NG_NETFLOW_CONF_NODSTLOOKUP) == 0) {
428 		bzero(&sin6, sizeof(struct sockaddr_in6));
429 		sin6.sin6_len = sizeof(struct sockaddr_in6);
430 		sin6.sin6_family = AF_INET6;
431 		sin6.sin6_addr = r->dst.r_dst6;
432 
433 		rt = rtalloc1_fib((struct sockaddr *)&sin6, 0, 0, r->fib);
434 
435 		if (rt != NULL) {
436 			fle6->f.fle_o_ifx = rt->rt_ifp->if_index;
437 
438 			if (rt->rt_flags & RTF_GATEWAY &&
439 			    rt->rt_gateway->sa_family == AF_INET6)
440 				fle6->f.n.next_hop6 =
441 				    ((struct sockaddr_in6 *)(rt->rt_gateway))->sin6_addr;
442 
443 			if (rt_mask(rt))
444 				fle6->f.dst_mask = RT_MASK6(rt);
445 			else
446 				fle6->f.dst_mask = 128;
447 
448 			RTFREE_LOCKED(rt);
449 		}
450 	}
451 
452 	if ((flags & NG_NETFLOW_CONF_NOSRCLOOKUP) == 0) {
453 		/* Do route lookup on source address, to fill in src_mask. */
454 		bzero(&sin6, sizeof(struct sockaddr_in6));
455 		sin6.sin6_len = sizeof(struct sockaddr_in6);
456 		sin6.sin6_family = AF_INET6;
457 		sin6.sin6_addr = r->src.r_src6;
458 
459 		rt = rtalloc1_fib((struct sockaddr *)&sin6, 0, 0, r->fib);
460 
461 		if (rt != NULL) {
462 			if (rt_mask(rt))
463 				fle6->f.src_mask = RT_MASK6(rt);
464 			else
465 				fle6->f.src_mask = 128;
466 
467 			RTFREE_LOCKED(rt);
468 		}
469 	}
470 
471 	/* Push new flow at the and of hash. */
472 	TAILQ_INSERT_TAIL(&hsh6->head, (struct flow_entry *)fle6, fle_hash);
473 
474 	return (0);
475 }
476 #undef ipv6_masklen
477 #undef RT_MASK6
478 #endif
479 
480 
481 /*
482  * Non-static functions called from ng_netflow.c
483  */
484 
485 /* Allocate memory and set up flow cache */
486 void
487 ng_netflow_cache_init(priv_p priv)
488 {
489 	struct flow_hash_entry *hsh;
490 	int i;
491 
492 	/* Initialize cache UMA zone. */
493 	priv->zone = uma_zcreate("NetFlow IPv4 cache",
494 	    sizeof(struct flow_entry), NULL, NULL, NULL, NULL,
495 	    UMA_ALIGN_CACHE, 0);
496 	uma_zone_set_max(priv->zone, CACHESIZE);
497 #ifdef INET6
498 	priv->zone6 = uma_zcreate("NetFlow IPv6 cache",
499 	    sizeof(struct flow6_entry), NULL, NULL, NULL, NULL,
500 	    UMA_ALIGN_CACHE, 0);
501 	uma_zone_set_max(priv->zone6, CACHESIZE);
502 #endif
503 
504 	/* Allocate hash. */
505 	priv->hash = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
506 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
507 
508 	/* Initialize hash. */
509 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
510 		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
511 		TAILQ_INIT(&hsh->head);
512 	}
513 
514 #ifdef INET6
515 	/* Allocate hash. */
516 	priv->hash6 = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
517 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
518 
519 	/* Initialize hash. */
520 	for (i = 0, hsh = priv->hash6; i < NBUCKETS; i++, hsh++) {
521 		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
522 		TAILQ_INIT(&hsh->head);
523 	}
524 #endif
525 
526 	priv->nfinfo_bytes = counter_u64_alloc(M_WAITOK);
527 	priv->nfinfo_packets = counter_u64_alloc(M_WAITOK);
528 	priv->nfinfo_bytes6 = counter_u64_alloc(M_WAITOK);
529 	priv->nfinfo_packets6 = counter_u64_alloc(M_WAITOK);
530 	priv->nfinfo_sbytes = counter_u64_alloc(M_WAITOK);
531 	priv->nfinfo_spackets = counter_u64_alloc(M_WAITOK);
532 	priv->nfinfo_sbytes6 = counter_u64_alloc(M_WAITOK);
533 	priv->nfinfo_spackets6 = counter_u64_alloc(M_WAITOK);
534 	priv->nfinfo_act_exp = counter_u64_alloc(M_WAITOK);
535 	priv->nfinfo_inact_exp = counter_u64_alloc(M_WAITOK);
536 
537 	ng_netflow_v9_cache_init(priv);
538 	CTR0(KTR_NET, "ng_netflow startup()");
539 }
540 
541 /* Initialize new FIB table for v5 and v9 */
542 int
543 ng_netflow_fib_init(priv_p priv, int fib)
544 {
545 	fib_export_p	fe = priv_to_fib(priv, fib);
546 
547 	CTR1(KTR_NET, "ng_netflow(): fib init: %d", fib);
548 
549 	if (fe != NULL)
550 		return (0);
551 
552 	if ((fe = malloc(sizeof(struct fib_export), M_NETGRAPH,
553 	    M_NOWAIT | M_ZERO)) == NULL)
554 		return (ENOMEM);
555 
556 	mtx_init(&fe->export_mtx, "export dgram lock", NULL, MTX_DEF);
557 	mtx_init(&fe->export9_mtx, "export9 dgram lock", NULL, MTX_DEF);
558 	fe->fib = fib;
559 	fe->domain_id = fib;
560 
561 	if (atomic_cmpset_ptr((volatile uintptr_t *)&priv->fib_data[fib],
562 	    (uintptr_t)NULL, (uintptr_t)fe) == 0) {
563 		/* FIB already set up by other ISR */
564 		CTR3(KTR_NET, "ng_netflow(): fib init: %d setup %p but got %p",
565 		    fib, fe, priv_to_fib(priv, fib));
566 		mtx_destroy(&fe->export_mtx);
567 		mtx_destroy(&fe->export9_mtx);
568 		free(fe, M_NETGRAPH);
569 	} else {
570 		/* Increase counter for statistics */
571 		CTR3(KTR_NET, "ng_netflow(): fib %d setup to %p (%p)",
572 		    fib, fe, priv_to_fib(priv, fib));
573 		priv->nfinfo_alloc_fibs++;
574 	}
575 
576 	return (0);
577 }
578 
579 /* Free all flow cache memory. Called from node close method. */
580 void
581 ng_netflow_cache_flush(priv_p priv)
582 {
583 	struct flow_entry	*fle, *fle1;
584 	struct flow_hash_entry	*hsh;
585 	struct netflow_export_item exp;
586 	fib_export_p fe;
587 	int i;
588 
589 	bzero(&exp, sizeof(exp));
590 
591 	/*
592 	 * We are going to free probably billable data.
593 	 * Expire everything before freeing it.
594 	 * No locking is required since callout is already drained.
595 	 */
596 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
597 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
598 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
599 			fe = priv_to_fib(priv, fle->f.r.fib);
600 			expire_flow(priv, fe, fle, NG_QUEUE);
601 		}
602 #ifdef INET6
603 	for (hsh = priv->hash6, i = 0; i < NBUCKETS; hsh++, i++)
604 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
605 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
606 			fe = priv_to_fib(priv, fle->f.r.fib);
607 			expire_flow(priv, fe, fle, NG_QUEUE);
608 		}
609 #endif
610 
611 	uma_zdestroy(priv->zone);
612 	/* Destroy hash mutexes. */
613 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
614 		mtx_destroy(&hsh->mtx);
615 
616 	/* Free hash memory. */
617 	if (priv->hash != NULL)
618 		free(priv->hash, M_NETFLOW_HASH);
619 #ifdef INET6
620 	uma_zdestroy(priv->zone6);
621 	/* Destroy hash mutexes. */
622 	for (i = 0, hsh = priv->hash6; i < NBUCKETS; i++, hsh++)
623 		mtx_destroy(&hsh->mtx);
624 
625 	/* Free hash memory. */
626 	if (priv->hash6 != NULL)
627 		free(priv->hash6, M_NETFLOW_HASH);
628 #endif
629 
630 	for (i = 0; i < priv->maxfibs; i++) {
631 		if ((fe = priv_to_fib(priv, i)) == NULL)
632 			continue;
633 
634 		if (fe->exp.item != NULL)
635 			export_send(priv, fe, fe->exp.item, NG_QUEUE);
636 
637 		if (fe->exp.item9 != NULL)
638 			export9_send(priv, fe, fe->exp.item9,
639 			    fe->exp.item9_opt, NG_QUEUE);
640 
641 		mtx_destroy(&fe->export_mtx);
642 		mtx_destroy(&fe->export9_mtx);
643 		free(fe, M_NETGRAPH);
644 	}
645 
646 	counter_u64_free(priv->nfinfo_bytes);
647 	counter_u64_free(priv->nfinfo_packets);
648 	counter_u64_free(priv->nfinfo_bytes6);
649 	counter_u64_free(priv->nfinfo_packets6);
650 	counter_u64_free(priv->nfinfo_sbytes);
651 	counter_u64_free(priv->nfinfo_spackets);
652 	counter_u64_free(priv->nfinfo_sbytes6);
653 	counter_u64_free(priv->nfinfo_spackets6);
654 	counter_u64_free(priv->nfinfo_act_exp);
655 	counter_u64_free(priv->nfinfo_inact_exp);
656 
657 	ng_netflow_v9_cache_flush(priv);
658 }
659 
660 /* Insert packet from into flow cache. */
661 int
662 ng_netflow_flow_add(priv_p priv, fib_export_p fe, struct ip *ip,
663     caddr_t upper_ptr, uint8_t upper_proto, uint8_t flags,
664     unsigned int src_if_index)
665 {
666 	struct flow_entry	*fle, *fle1;
667 	struct flow_hash_entry	*hsh;
668 	struct flow_rec		r;
669 	int			hlen, plen;
670 	int			error = 0;
671 	uint16_t		eproto;
672 	uint8_t			tcp_flags = 0;
673 
674 	bzero(&r, sizeof(r));
675 
676 	if (ip->ip_v != IPVERSION)
677 		return (EINVAL);
678 
679 	hlen = ip->ip_hl << 2;
680 	if (hlen < sizeof(struct ip))
681 		return (EINVAL);
682 
683 	eproto = ETHERTYPE_IP;
684 	/* Assume L4 template by default */
685 	r.flow_type = NETFLOW_V9_FLOW_V4_L4;
686 
687 	r.r_src = ip->ip_src;
688 	r.r_dst = ip->ip_dst;
689 	r.fib = fe->fib;
690 
691 	plen = ntohs(ip->ip_len);
692 
693 	r.r_ip_p = ip->ip_p;
694 	r.r_tos = ip->ip_tos;
695 
696 	r.r_i_ifx = src_if_index;
697 
698 	/*
699 	 * XXX NOTE: only first fragment of fragmented TCP, UDP and
700 	 * ICMP packet will be recorded with proper s_port and d_port.
701 	 * Following fragments will be recorded simply as IP packet with
702 	 * ip_proto = ip->ip_p and s_port, d_port set to zero.
703 	 * I know, it looks like bug. But I don't want to re-implement
704 	 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
705 	 * and nobody complains yet :)
706 	 */
707 	if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
708 		switch(r.r_ip_p) {
709 		case IPPROTO_TCP:
710 		    {
711 			struct tcphdr *tcp;
712 
713 			tcp = (struct tcphdr *)((caddr_t )ip + hlen);
714 			r.r_sport = tcp->th_sport;
715 			r.r_dport = tcp->th_dport;
716 			tcp_flags = tcp->th_flags;
717 			break;
718 		    }
719 		case IPPROTO_UDP:
720 			r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
721 			break;
722 		}
723 
724 	counter_u64_add(priv->nfinfo_packets, 1);
725 	counter_u64_add(priv->nfinfo_bytes, plen);
726 
727 	/* Find hash slot. */
728 	hsh = &priv->hash[ip_hash(&r)];
729 
730 	mtx_lock(&hsh->mtx);
731 
732 	/*
733 	 * Go through hash and find our entry. If we encounter an
734 	 * entry, that should be expired, purge it. We do a reverse
735 	 * search since most active entries are first, and most
736 	 * searches are done on most active entries.
737 	 */
738 	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
739 		if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
740 			break;
741 		if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
742 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
743 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib),
744 			    fle, NG_QUEUE);
745 			counter_u64_add(priv->nfinfo_act_exp, 1);
746 		}
747 	}
748 
749 	if (fle) {			/* An existent entry. */
750 
751 		fle->f.bytes += plen;
752 		fle->f.packets ++;
753 		fle->f.tcp_flags |= tcp_flags;
754 		fle->f.last = time_uptime;
755 
756 		/*
757 		 * We have the following reasons to expire flow in active way:
758 		 * - it hit active timeout
759 		 * - a TCP connection closed
760 		 * - it is going to overflow counter
761 		 */
762 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
763 		    (fle->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
764 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
765 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib),
766 			    fle, NG_QUEUE);
767 			counter_u64_add(priv->nfinfo_act_exp, 1);
768 		} else {
769 			/*
770 			 * It is the newest, move it to the tail,
771 			 * if it isn't there already. Next search will
772 			 * locate it quicker.
773 			 */
774 			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
775 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
776 				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
777 			}
778 		}
779 	} else				/* A new flow entry. */
780 		error = hash_insert(priv, hsh, &r, plen, flags, tcp_flags);
781 
782 	mtx_unlock(&hsh->mtx);
783 
784 	return (error);
785 }
786 
787 #ifdef INET6
788 /* Insert IPv6 packet from into flow cache. */
789 int
790 ng_netflow_flow6_add(priv_p priv, fib_export_p fe, struct ip6_hdr *ip6,
791     caddr_t upper_ptr, uint8_t upper_proto, uint8_t flags,
792     unsigned int src_if_index)
793 {
794 	struct flow_entry	*fle = NULL, *fle1;
795 	struct flow6_entry	*fle6;
796 	struct flow_hash_entry	*hsh;
797 	struct flow6_rec	r;
798 	int			plen;
799 	int			error = 0;
800 	uint8_t			tcp_flags = 0;
801 
802 	/* check version */
803 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION)
804 		return (EINVAL);
805 
806 	bzero(&r, sizeof(r));
807 
808 	r.src.r_src6 = ip6->ip6_src;
809 	r.dst.r_dst6 = ip6->ip6_dst;
810 	r.fib = fe->fib;
811 
812 	/* Assume L4 template by default */
813 	r.flow_type = NETFLOW_V9_FLOW_V6_L4;
814 
815 	plen = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr);
816 
817 #if 0
818 	/* XXX: set DSCP/CoS value */
819 	r.r_tos = ip->ip_tos;
820 #endif
821 	if ((flags & NG_NETFLOW_IS_FRAG) == 0) {
822 		switch(upper_proto) {
823 		case IPPROTO_TCP:
824 		    {
825 			struct tcphdr *tcp;
826 
827 			tcp = (struct tcphdr *)upper_ptr;
828 			r.r_ports = *(uint32_t *)upper_ptr;
829 			tcp_flags = tcp->th_flags;
830 			break;
831 		    }
832  		case IPPROTO_UDP:
833 		case IPPROTO_SCTP:
834 			r.r_ports = *(uint32_t *)upper_ptr;
835 			break;
836 		}
837 	}
838 
839 	r.r_ip_p = upper_proto;
840 	r.r_i_ifx = src_if_index;
841 
842 	counter_u64_add(priv->nfinfo_packets6, 1);
843 	counter_u64_add(priv->nfinfo_bytes6, plen);
844 
845 	/* Find hash slot. */
846 	hsh = &priv->hash6[ip6_hash(&r)];
847 
848 	mtx_lock(&hsh->mtx);
849 
850 	/*
851 	 * Go through hash and find our entry. If we encounter an
852 	 * entry, that should be expired, purge it. We do a reverse
853 	 * search since most active entries are first, and most
854 	 * searches are done on most active entries.
855 	 */
856 	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
857 		if (fle->f.version != IP6VERSION)
858 			continue;
859 		fle6 = (struct flow6_entry *)fle;
860 		if (bcmp(&r, &fle6->f.r, sizeof(struct flow6_rec)) == 0)
861 			break;
862 		if ((INACTIVE(fle6) && SMALL(fle6)) || AGED(fle6)) {
863 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
864 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle,
865 			    NG_QUEUE);
866 			counter_u64_add(priv->nfinfo_act_exp, 1);
867 		}
868 	}
869 
870 	if (fle != NULL) {			/* An existent entry. */
871 		fle6 = (struct flow6_entry *)fle;
872 
873 		fle6->f.bytes += plen;
874 		fle6->f.packets ++;
875 		fle6->f.tcp_flags |= tcp_flags;
876 		fle6->f.last = time_uptime;
877 
878 		/*
879 		 * We have the following reasons to expire flow in active way:
880 		 * - it hit active timeout
881 		 * - a TCP connection closed
882 		 * - it is going to overflow counter
883 		 */
884 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle6) ||
885 		    (fle6->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
886 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
887 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle,
888 			    NG_QUEUE);
889 			counter_u64_add(priv->nfinfo_act_exp, 1);
890 		} else {
891 			/*
892 			 * It is the newest, move it to the tail,
893 			 * if it isn't there already. Next search will
894 			 * locate it quicker.
895 			 */
896 			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
897 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
898 				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
899 			}
900 		}
901 	} else				/* A new flow entry. */
902 		error = hash6_insert(priv, hsh, &r, plen, flags, tcp_flags);
903 
904 	mtx_unlock(&hsh->mtx);
905 
906 	return (error);
907 }
908 #endif
909 
910 /*
911  * Return records from cache to userland.
912  *
913  * TODO: matching particular IP should be done in kernel, here.
914  */
915 int
916 ng_netflow_flow_show(priv_p priv, struct ngnf_show_header *req,
917 struct ngnf_show_header *resp)
918 {
919 	struct flow_hash_entry	*hsh;
920 	struct flow_entry	*fle;
921 	struct flow_entry_data	*data = (struct flow_entry_data *)(resp + 1);
922 #ifdef INET6
923 	struct flow6_entry_data	*data6 = (struct flow6_entry_data *)(resp + 1);
924 #endif
925 	int	i, max;
926 
927 	i = req->hash_id;
928 	if (i > NBUCKETS-1)
929 		return (EINVAL);
930 
931 #ifdef INET6
932 	if (req->version == 6) {
933 		resp->version = 6;
934 		hsh = priv->hash6 + i;
935 		max = NREC6_AT_ONCE;
936 	} else
937 #endif
938 	if (req->version == 4) {
939 		resp->version = 4;
940 		hsh = priv->hash + i;
941 		max = NREC_AT_ONCE;
942 	} else
943 		return (EINVAL);
944 
945 	/*
946 	 * We will transfer not more than NREC_AT_ONCE. More data
947 	 * will come in next message.
948 	 * We send current hash index and current record number in list
949 	 * to userland, and userland should return it back to us.
950 	 * Then, we will restart with new entry.
951 	 *
952 	 * The resulting cache snapshot can be inaccurate if flow expiration
953 	 * is taking place on hash item between userland data requests for
954 	 * this hash item id.
955 	 */
956 	resp->nentries = 0;
957 	for (; i < NBUCKETS; hsh++, i++) {
958 		int list_id;
959 
960 		if (mtx_trylock(&hsh->mtx) == 0) {
961 			/*
962 			 * Requested hash index is not available,
963 			 * relay decision to skip or re-request data
964 			 * to userland.
965 			 */
966 			resp->hash_id = i;
967 			resp->list_id = 0;
968 			return (0);
969 		}
970 
971 		list_id = 0;
972 		TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
973 			if (hsh->mtx.mtx_lock & MTX_CONTESTED) {
974 				resp->hash_id = i;
975 				resp->list_id = list_id;
976 				mtx_unlock(&hsh->mtx);
977 				return (0);
978 			}
979 
980 			list_id++;
981 			/* Search for particular record in list. */
982 			if (req->list_id > 0) {
983 				if (list_id < req->list_id)
984 					continue;
985 
986 				/* Requested list position found. */
987 				req->list_id = 0;
988 			}
989 #ifdef INET6
990 			if (req->version == 6) {
991 				struct flow6_entry *fle6;
992 
993 				fle6 = (struct flow6_entry *)fle;
994 				bcopy(&fle6->f, data6 + resp->nentries,
995 				    sizeof(fle6->f));
996 			} else
997 #endif
998 				bcopy(&fle->f, data + resp->nentries,
999 				    sizeof(fle->f));
1000 			resp->nentries++;
1001 			if (resp->nentries == max) {
1002 				resp->hash_id = i;
1003 				/*
1004 				 * If it was the last item in list
1005 				 * we simply skip to next hash_id.
1006 				 */
1007 				resp->list_id = list_id + 1;
1008 				mtx_unlock(&hsh->mtx);
1009 				return (0);
1010 			}
1011 		}
1012 		mtx_unlock(&hsh->mtx);
1013 	}
1014 
1015 	resp->hash_id = resp->list_id = 0;
1016 
1017 	return (0);
1018 }
1019 
1020 /* We have full datagram in privdata. Send it to export hook. */
1021 static int
1022 export_send(priv_p priv, fib_export_p fe, item_p item, int flags)
1023 {
1024 	struct mbuf *m = NGI_M(item);
1025 	struct netflow_v5_export_dgram *dgram = mtod(m,
1026 					struct netflow_v5_export_dgram *);
1027 	struct netflow_v5_header *header = &dgram->header;
1028 	struct timespec ts;
1029 	int error = 0;
1030 
1031 	/* Fill mbuf header. */
1032 	m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
1033 	   header->count + sizeof(struct netflow_v5_header);
1034 
1035 	/* Fill export header. */
1036 	header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
1037 	getnanotime(&ts);
1038 	header->unix_secs  = htonl(ts.tv_sec);
1039 	header->unix_nsecs = htonl(ts.tv_nsec);
1040 	header->engine_type = 0;
1041 	header->engine_id = fe->domain_id;
1042 	header->pad = 0;
1043 	header->flow_seq = htonl(atomic_fetchadd_32(&fe->flow_seq,
1044 	    header->count));
1045 	header->count = htons(header->count);
1046 
1047 	if (priv->export != NULL)
1048 		NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
1049 	else
1050 		NG_FREE_ITEM(item);
1051 
1052 	return (error);
1053 }
1054 
1055 
1056 /* Add export record to dgram. */
1057 static int
1058 export_add(item_p item, struct flow_entry *fle)
1059 {
1060 	struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
1061 					struct netflow_v5_export_dgram *);
1062 	struct netflow_v5_header *header = &dgram->header;
1063 	struct netflow_v5_record *rec;
1064 
1065 	rec = &dgram->r[header->count];
1066 	header->count ++;
1067 
1068 	KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
1069 	    ("ng_netflow: export too big"));
1070 
1071 	/* Fill in export record. */
1072 	rec->src_addr = fle->f.r.r_src.s_addr;
1073 	rec->dst_addr = fle->f.r.r_dst.s_addr;
1074 	rec->next_hop = fle->f.next_hop.s_addr;
1075 	rec->i_ifx    = htons(fle->f.fle_i_ifx);
1076 	rec->o_ifx    = htons(fle->f.fle_o_ifx);
1077 	rec->packets  = htonl(fle->f.packets);
1078 	rec->octets   = htonl(fle->f.bytes);
1079 	rec->first    = htonl(MILLIUPTIME(fle->f.first));
1080 	rec->last     = htonl(MILLIUPTIME(fle->f.last));
1081 	rec->s_port   = fle->f.r.r_sport;
1082 	rec->d_port   = fle->f.r.r_dport;
1083 	rec->flags    = fle->f.tcp_flags;
1084 	rec->prot     = fle->f.r.r_ip_p;
1085 	rec->tos      = fle->f.r.r_tos;
1086 	rec->dst_mask = fle->f.dst_mask;
1087 	rec->src_mask = fle->f.src_mask;
1088 	rec->pad1     = 0;
1089 	rec->pad2     = 0;
1090 
1091 	/* Not supported fields. */
1092 	rec->src_as = rec->dst_as = 0;
1093 
1094 	if (header->count == NETFLOW_V5_MAX_RECORDS)
1095 		return (1); /* end of datagram */
1096 	else
1097 		return (0);
1098 }
1099 
1100 /* Periodic flow expiry run. */
1101 void
1102 ng_netflow_expire(void *arg)
1103 {
1104 	struct flow_entry	*fle, *fle1;
1105 	struct flow_hash_entry	*hsh;
1106 	priv_p			priv = (priv_p )arg;
1107 	int			used, i;
1108 
1109 	/*
1110 	 * Going through all the cache.
1111 	 */
1112 	used = uma_zone_get_cur(priv->zone);
1113 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
1114 		/*
1115 		 * Skip entries, that are already being worked on.
1116 		 */
1117 		if (mtx_trylock(&hsh->mtx) == 0)
1118 			continue;
1119 
1120 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
1121 			/*
1122 			 * Interrupt thread wants this entry!
1123 			 * Quick! Quick! Bail out!
1124 			 */
1125 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
1126 				break;
1127 
1128 			/*
1129 			 * Don't expire aggressively while hash collision
1130 			 * ratio is predicted small.
1131 			 */
1132 			if (used <= (NBUCKETS*2) && !INACTIVE(fle))
1133 				break;
1134 
1135 			if ((INACTIVE(fle) && (SMALL(fle) ||
1136 			    (used > (NBUCKETS*2)))) || AGED(fle)) {
1137 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
1138 				expire_flow(priv, priv_to_fib(priv,
1139 				    fle->f.r.fib), fle, NG_NOFLAGS);
1140 				used--;
1141 				counter_u64_add(priv->nfinfo_inact_exp, 1);
1142 			}
1143 		}
1144 		mtx_unlock(&hsh->mtx);
1145 	}
1146 
1147 #ifdef INET6
1148 	used = uma_zone_get_cur(priv->zone6);
1149 	for (hsh = priv->hash6, i = 0; i < NBUCKETS; hsh++, i++) {
1150 		struct flow6_entry	*fle6;
1151 
1152 		/*
1153 		 * Skip entries, that are already being worked on.
1154 		 */
1155 		if (mtx_trylock(&hsh->mtx) == 0)
1156 			continue;
1157 
1158 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
1159 			fle6 = (struct flow6_entry *)fle;
1160 			/*
1161 			 * Interrupt thread wants this entry!
1162 			 * Quick! Quick! Bail out!
1163 			 */
1164 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
1165 				break;
1166 
1167 			/*
1168 			 * Don't expire aggressively while hash collision
1169 			 * ratio is predicted small.
1170 			 */
1171 			if (used <= (NBUCKETS*2) && !INACTIVE(fle6))
1172 				break;
1173 
1174 			if ((INACTIVE(fle6) && (SMALL(fle6) ||
1175 			    (used > (NBUCKETS*2)))) || AGED(fle6)) {
1176 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
1177 				expire_flow(priv, priv_to_fib(priv,
1178 				    fle->f.r.fib), fle, NG_NOFLAGS);
1179 				used--;
1180 				counter_u64_add(priv->nfinfo_inact_exp, 1);
1181 			}
1182 		}
1183 		mtx_unlock(&hsh->mtx);
1184 	}
1185 #endif
1186 
1187 	/* Schedule next expire. */
1188 	callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
1189 	    (void *)priv);
1190 }
1191