xref: /freebsd/sys/netgraph/netflow/netflow.c (revision 5dcd9c10612684d1c823670cbb5b4715028784e7)
1 /*-
2  * Copyright (c) 2010-2011 Alexander V. Chernikov <melifaro@ipfw.ru>
3  * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
4  * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
29  */
30 
31 static const char rcs_id[] =
32     "@(#) $FreeBSD$";
33 
34 #include "opt_inet6.h"
35 #include "opt_route.h"
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/mbuf.h>
40 #include <sys/syslog.h>
41 #include <sys/systm.h>
42 #include <sys/socket.h>
43 #include <sys/endian.h>
44 
45 #include <machine/atomic.h>
46 #include <machine/stdarg.h>
47 
48 #include <net/if.h>
49 #include <net/route.h>
50 #include <net/ethernet.h>
51 #include <netinet/in.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/ip.h>
54 #include <netinet/ip6.h>
55 #include <netinet/tcp.h>
56 #include <netinet/udp.h>
57 
58 #include <netgraph/ng_message.h>
59 #include <netgraph/netgraph.h>
60 
61 #include <netgraph/netflow/netflow.h>
62 #include <netgraph/netflow/netflow_v9.h>
63 #include <netgraph/netflow/ng_netflow.h>
64 
65 #define	NBUCKETS	(65536)		/* must be power of 2 */
66 
67 /* This hash is for TCP or UDP packets. */
68 #define FULL_HASH(addr1, addr2, port1, port2)	\
69 	(((addr1 ^ (addr1 >> 16) ^ 		\
70 	htons(addr2 ^ (addr2 >> 16))) ^ 	\
71 	port1 ^ htons(port2)) &			\
72 	(NBUCKETS - 1))
73 
74 /* This hash is for all other IP packets. */
75 #define ADDR_HASH(addr1, addr2)			\
76 	((addr1 ^ (addr1 >> 16) ^ 		\
77 	htons(addr2 ^ (addr2 >> 16))) &		\
78 	(NBUCKETS - 1))
79 
80 /* Macros to shorten logical constructions */
81 /* XXX: priv must exist in namespace */
82 #define	INACTIVE(fle)	(time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
83 #define	AGED(fle)	(time_uptime - fle->f.first > priv->info.nfinfo_act_t)
84 #define	ISFREE(fle)	(fle->f.packets == 0)
85 
86 /*
87  * 4 is a magical number: statistically number of 4-packet flows is
88  * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
89  * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
90  * of reachable host and 4-packet otherwise.
91  */
92 #define	SMALL(fle)	(fle->f.packets <= 4)
93 
94 
95 MALLOC_DECLARE(M_NETFLOW_HASH);
96 MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
97 
98 static int export_add(item_p, struct flow_entry *);
99 static int export_send(priv_p, fib_export_p, item_p, int);
100 
101 static int hash_insert(priv_p, struct flow_hash_entry *, struct flow_rec *, int, uint8_t);
102 static int hash6_insert(priv_p, struct flow6_hash_entry *, struct flow6_rec *, int, uint8_t);
103 
104 static __inline void expire_flow(priv_p, fib_export_p, struct flow_entry *, int);
105 
106 /*
107  * Generate hash for a given flow record.
108  *
109  * FIB is not used here, because:
110  * most VRFS will carry public IPv4 addresses which are unique even
111  * without FIB private addresses can overlap, but this is worked out
112  * via flow_rec bcmp() containing fib id. In IPv6 world addresses are
113  * all globally unique (it's not fully true, there is FC00::/7 for example,
114  * but chances of address overlap are MUCH smaller)
115  */
116 static __inline uint32_t
117 ip_hash(struct flow_rec *r)
118 {
119 	switch (r->r_ip_p) {
120 	case IPPROTO_TCP:
121 	case IPPROTO_UDP:
122 		return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
123 		    r->r_sport, r->r_dport);
124 	default:
125 		return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
126 	}
127 }
128 
129 #ifdef INET6
130 /* Generate hash for a given flow6 record. Use lower 4 octets from v6 addresses */
131 static __inline uint32_t
132 ip6_hash(struct flow6_rec *r)
133 {
134 	switch (r->r_ip_p) {
135 	case IPPROTO_TCP:
136 	case IPPROTO_UDP:
137 		return FULL_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
138 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3], r->r_sport,
139 		    r->r_dport);
140 	default:
141 		return ADDR_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3],
142 		    r->dst.r_dst6.__u6_addr.__u6_addr32[3]);
143  	}
144 }
145 #endif
146 
147 /* This is callback from uma(9), called on alloc. */
148 static int
149 uma_ctor_flow(void *mem, int size, void *arg, int how)
150 {
151 	priv_p priv = (priv_p )arg;
152 
153 	if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
154 		return (ENOMEM);
155 
156 	atomic_add_32(&priv->info.nfinfo_used, 1);
157 
158 	return (0);
159 }
160 
161 /* This is callback from uma(9), called on free. */
162 static void
163 uma_dtor_flow(void *mem, int size, void *arg)
164 {
165 	priv_p priv = (priv_p )arg;
166 
167 	atomic_subtract_32(&priv->info.nfinfo_used, 1);
168 }
169 
170 #ifdef INET6
171 /* This is callback from uma(9), called on alloc. */
172 static int
173 uma_ctor_flow6(void *mem, int size, void *arg, int how)
174 {
175 	priv_p priv = (priv_p )arg;
176 
177 	if (atomic_load_acq_32(&priv->info.nfinfo_used6) >= CACHESIZE)
178 		return (ENOMEM);
179 
180 	atomic_add_32(&priv->info.nfinfo_used6, 1);
181 
182 	return (0);
183 }
184 
185 /* This is callback from uma(9), called on free. */
186 static void
187 uma_dtor_flow6(void *mem, int size, void *arg)
188 {
189 	priv_p priv = (priv_p )arg;
190 
191 	atomic_subtract_32(&priv->info.nfinfo_used6, 1);
192 }
193 #endif
194 
195 /*
196  * Detach export datagram from priv, if there is any.
197  * If there is no, allocate a new one.
198  */
199 static item_p
200 get_export_dgram(priv_p priv, fib_export_p fe)
201 {
202 	item_p	item = NULL;
203 
204 	mtx_lock(&fe->export_mtx);
205 	if (fe->exp.item != NULL) {
206 		item = fe->exp.item;
207 		fe->exp.item = NULL;
208 	}
209 	mtx_unlock(&fe->export_mtx);
210 
211 	if (item == NULL) {
212 		struct netflow_v5_export_dgram *dgram;
213 		struct mbuf *m;
214 
215 		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
216 		if (m == NULL)
217 			return (NULL);
218 		item = ng_package_data(m, NG_NOFLAGS);
219 		if (item == NULL)
220 			return (NULL);
221 		dgram = mtod(m, struct netflow_v5_export_dgram *);
222 		dgram->header.count = 0;
223 		dgram->header.version = htons(NETFLOW_V5);
224 		dgram->header.pad = 0;
225 
226 	}
227 
228 	return (item);
229 }
230 
231 /*
232  * Re-attach incomplete datagram back to priv.
233  * If there is already another one, then send incomplete. */
234 static void
235 return_export_dgram(priv_p priv, fib_export_p fe, item_p item, int flags)
236 {
237 	/*
238 	 * It may happen on SMP, that some thread has already
239 	 * put its item there, in this case we bail out and
240 	 * send what we have to collector.
241 	 */
242 	mtx_lock(&fe->export_mtx);
243 	if (fe->exp.item == NULL) {
244 		fe->exp.item = item;
245 		mtx_unlock(&fe->export_mtx);
246 	} else {
247 		mtx_unlock(&fe->export_mtx);
248 		export_send(priv, fe, item, flags);
249 	}
250 }
251 
252 /*
253  * The flow is over. Call export_add() and free it. If datagram is
254  * full, then call export_send().
255  */
256 static __inline void
257 expire_flow(priv_p priv, fib_export_p fe, struct flow_entry *fle, int flags)
258 {
259 	struct netflow_export_item exp;
260 	uint16_t version = fle->f.version;
261 
262 	if ((priv->export != NULL) && (version == IPVERSION)) {
263 		exp.item = get_export_dgram(priv, fe);
264 		if (exp.item == NULL) {
265 			atomic_add_32(&priv->info.nfinfo_export_failed, 1);
266 			if (priv->export9 != NULL)
267 				atomic_add_32(&priv->info.nfinfo_export9_failed, 1);
268 			/* fle definitely contains IPv4 flow */
269 			uma_zfree_arg(priv->zone, fle, priv);
270 			return;
271 		}
272 
273 		if (export_add(exp.item, fle) > 0)
274 			export_send(priv, fe, exp.item, flags);
275 		else
276 			return_export_dgram(priv, fe, exp.item, NG_QUEUE);
277 	}
278 
279 	if (priv->export9 != NULL) {
280 		exp.item9 = get_export9_dgram(priv, fe, &exp.item9_opt);
281 		if (exp.item9 == NULL) {
282 			atomic_add_32(&priv->info.nfinfo_export9_failed, 1);
283 			if (version == IPVERSION)
284 				uma_zfree_arg(priv->zone, fle, priv);
285 			else if (version == IP6VERSION)
286 				uma_zfree_arg(priv->zone6, fle, priv);
287 			else
288 				panic("ng_netflow: Unknown IP proto: %d", version);
289 			return;
290 		}
291 
292 		if (export9_add(exp.item9, exp.item9_opt, fle) > 0)
293 			export9_send(priv, fe, exp.item9, exp.item9_opt, flags);
294 		else
295 			return_export9_dgram(priv, fe, exp.item9, exp.item9_opt, NG_QUEUE);
296 	}
297 
298 	if (version == IPVERSION)
299 		uma_zfree_arg(priv->zone, fle, priv);
300 	else if (version == IP6VERSION)
301 		uma_zfree_arg(priv->zone6, fle, priv);
302 }
303 
304 /* Get a snapshot of node statistics */
305 void
306 ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
307 {
308 	/* XXX: atomic */
309 	memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
310 }
311 
312 /*
313  * Insert a record into defined slot.
314  *
315  * First we get for us a free flow entry, then fill in all
316  * possible fields in it.
317  *
318  * TODO: consider dropping hash mutex while filling in datagram,
319  * as this was done in previous version. Need to test & profile
320  * to be sure.
321  */
322 static __inline int
323 hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r,
324 	int plen, uint8_t tcp_flags)
325 {
326 	struct flow_entry *fle;
327 	struct sockaddr_in sin;
328 	struct rtentry *rt;
329 
330 	mtx_assert(&hsh->mtx, MA_OWNED);
331 
332 	fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
333 	if (fle == NULL) {
334 		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
335 		return (ENOMEM);
336 	}
337 
338 	/*
339 	 * Now fle is totally ours. It is detached from all lists,
340 	 * we can safely edit it.
341 	 */
342 
343 	fle->f.version = IPVERSION;
344 	bcopy(r, &fle->f.r, sizeof(struct flow_rec));
345 	fle->f.bytes = plen;
346 	fle->f.packets = 1;
347 	fle->f.tcp_flags = tcp_flags;
348 
349 	fle->f.first = fle->f.last = time_uptime;
350 
351 	/*
352 	 * First we do route table lookup on destination address. So we can
353 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
354 	 */
355 	bzero(&sin, sizeof(sin));
356 	sin.sin_len = sizeof(struct sockaddr_in);
357 	sin.sin_family = AF_INET;
358 	sin.sin_addr = fle->f.r.r_dst;
359 	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, r->fib);
360 	if (rt != NULL) {
361 		fle->f.fle_o_ifx = rt->rt_ifp->if_index;
362 
363 		if (rt->rt_flags & RTF_GATEWAY &&
364 		    rt->rt_gateway->sa_family == AF_INET)
365 			fle->f.next_hop =
366 			    ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
367 
368 		if (rt_mask(rt))
369 			fle->f.dst_mask = bitcount32(((struct sockaddr_in *)
370 			    rt_mask(rt))->sin_addr.s_addr);
371 		else if (rt->rt_flags & RTF_HOST)
372 			/* Give up. We can't determine mask :( */
373 			fle->f.dst_mask = 32;
374 
375 		RTFREE_LOCKED(rt);
376 	}
377 
378 	/* Do route lookup on source address, to fill in src_mask. */
379 	bzero(&sin, sizeof(sin));
380 	sin.sin_len = sizeof(struct sockaddr_in);
381 	sin.sin_family = AF_INET;
382 	sin.sin_addr = fle->f.r.r_src;
383 	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, r->fib);
384 	if (rt != NULL) {
385 		if (rt_mask(rt))
386 			fle->f.src_mask = bitcount32(((struct sockaddr_in *)
387 			    rt_mask(rt))->sin_addr.s_addr);
388 		else if (rt->rt_flags & RTF_HOST)
389 			/* Give up. We can't determine mask :( */
390 			fle->f.src_mask = 32;
391 
392 		RTFREE_LOCKED(rt);
393 	}
394 
395 	/* Push new flow at the and of hash. */
396 	TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
397 
398 	return (0);
399 }
400 
401 #ifdef INET6
402 /* XXX: make normal function, instead of.. */
403 #define ipv6_masklen(x)		bitcount32((x).__u6_addr.__u6_addr32[0]) + \
404 				bitcount32((x).__u6_addr.__u6_addr32[1]) + \
405 				bitcount32((x).__u6_addr.__u6_addr32[2]) + \
406 				bitcount32((x).__u6_addr.__u6_addr32[3])
407 /* XXX: Do we need inline here ? */
408 static __inline int
409 hash6_insert(priv_p priv, struct flow6_hash_entry *hsh6, struct flow6_rec *r,
410 	int plen, uint8_t tcp_flags)
411 {
412 	struct flow6_entry *fle6;
413 	struct sockaddr_in6 *src, *dst;
414 	struct rtentry *rt;
415 	struct route_in6 rin6;
416 
417 	mtx_assert(&hsh6->mtx, MA_OWNED);
418 
419 	fle6 = uma_zalloc_arg(priv->zone6, priv, M_NOWAIT);
420 	if (fle6 == NULL) {
421 		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
422 		return (ENOMEM);
423 	}
424 
425 	/*
426 	 * Now fle is totally ours. It is detached from all lists,
427 	 * we can safely edit it.
428 	 */
429 
430 	fle6->f.version = IP6VERSION;
431 	bcopy(r, &fle6->f.r, sizeof(struct flow6_rec));
432 	fle6->f.bytes = plen;
433 	fle6->f.packets = 1;
434 	fle6->f.tcp_flags = tcp_flags;
435 
436 	fle6->f.first = fle6->f.last = time_uptime;
437 
438 	/*
439 	 * First we do route table lookup on destination address. So we can
440 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
441 	 */
442 	bzero(&rin6, sizeof(struct route_in6));
443 	dst = (struct sockaddr_in6 *)&rin6.ro_dst;
444 	dst->sin6_len = sizeof(struct sockaddr_in6);
445 	dst->sin6_family = AF_INET6;
446 	dst->sin6_addr = r->dst.r_dst6;
447 
448 	rin6.ro_rt = rtalloc1_fib((struct sockaddr *)dst, 0, 0, r->fib);
449 
450 	if (rin6.ro_rt != NULL) {
451 		rt = rin6.ro_rt;
452 		fle6->f.fle_o_ifx = rt->rt_ifp->if_index;
453 
454 		if (rt->rt_flags & RTF_GATEWAY &&
455 		    rt->rt_gateway->sa_family == AF_INET6)
456 			fle6->f.n.next_hop6 =
457 			    ((struct sockaddr_in6 *)(rt->rt_gateway))->sin6_addr;
458 
459 		if (rt_mask(rt))
460 			fle6->f.dst_mask = ipv6_masklen(((struct sockaddr_in6 *)rt_mask(rt))->sin6_addr);
461 		else
462 			fle6->f.dst_mask = 128;
463 
464 		RTFREE_LOCKED(rt);
465 	}
466 
467 	/* Do route lookup on source address, to fill in src_mask. */
468 	bzero(&rin6, sizeof(struct route_in6));
469 	src = (struct sockaddr_in6 *)&rin6.ro_dst;
470 	src->sin6_len = sizeof(struct sockaddr_in6);
471 	src->sin6_family = AF_INET6;
472 	src->sin6_addr = r->src.r_src6;
473 
474 	rin6.ro_rt = rtalloc1_fib((struct sockaddr *)src, 0, 0, r->fib);
475 
476 	if (rin6.ro_rt != NULL) {
477 		rt = rin6.ro_rt;
478 
479 		if (rt_mask(rt))
480 			fle6->f.src_mask = ipv6_masklen(((struct sockaddr_in6 *)rt_mask(rt))->sin6_addr);
481 		else
482 			fle6->f.src_mask = 128;
483 
484 		RTFREE_LOCKED(rt);
485 	}
486 
487 	/* Push new flow at the and of hash. */
488 	TAILQ_INSERT_TAIL(&hsh6->head, fle6, fle6_hash);
489 
490 	return (0);
491 }
492 #endif
493 
494 
495 /*
496  * Non-static functions called from ng_netflow.c
497  */
498 
499 /* Allocate memory and set up flow cache */
500 int
501 ng_netflow_cache_init(priv_p priv)
502 {
503 	struct flow_hash_entry *hsh;
504 #ifdef INET6
505 	struct flow6_hash_entry *hsh6;
506 #endif
507 	int i;
508 
509 	/* Initialize cache UMA zone. */
510 	priv->zone = uma_zcreate("NetFlow IPv4 cache", sizeof(struct flow_entry),
511 	    uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
512 	uma_zone_set_max(priv->zone, CACHESIZE);
513 #ifdef INET6
514 	priv->zone6 = uma_zcreate("NetFlow IPv6 cache", sizeof(struct flow6_entry),
515 	    uma_ctor_flow6, uma_dtor_flow6, NULL, NULL, UMA_ALIGN_CACHE, 0);
516 	uma_zone_set_max(priv->zone6, CACHESIZE);
517 #endif
518 
519 	/* Allocate hash. */
520 	priv->hash = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
521 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
522 
523 	/* Initialize hash. */
524 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
525 		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
526 		TAILQ_INIT(&hsh->head);
527 	}
528 
529 #ifdef INET6
530 	/* Allocate hash. */
531 	priv->hash6 = malloc(NBUCKETS * sizeof(struct flow6_hash_entry),
532 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
533 
534 	/* Initialize hash. */
535 	for (i = 0, hsh6 = priv->hash6; i < NBUCKETS; i++, hsh6++) {
536 		mtx_init(&hsh6->mtx, "hash mutex", NULL, MTX_DEF);
537 		TAILQ_INIT(&hsh6->head);
538 	}
539 #endif
540 
541 	ng_netflow_v9_cache_init(priv);
542 	CTR0(KTR_NET, "ng_netflow startup()");
543 
544 	return (0);
545 }
546 
547 /* Initialize new FIB table for v5 and v9 */
548 int
549 ng_netflow_fib_init(priv_p priv, int fib)
550 {
551 	fib_export_p	fe = priv_to_fib(priv, fib);
552 
553 	CTR1(KTR_NET, "ng_netflow(): fib init: %d", fib);
554 
555 	if (fe != NULL)
556 		return (0);
557 
558 	if ((fe = malloc(sizeof(struct fib_export), M_NETGRAPH, M_NOWAIT | M_ZERO)) == NULL)
559 		return (1);
560 
561 	mtx_init(&fe->export_mtx, "export dgram lock", NULL, MTX_DEF);
562 	mtx_init(&fe->export9_mtx, "export9 dgram lock", NULL, MTX_DEF);
563 	fe->fib = fib;
564 	fe->domain_id = fib;
565 
566 	if (atomic_cmpset_ptr((volatile uintptr_t *)&priv->fib_data[fib], (uintptr_t)NULL, (uintptr_t)fe) == 0) {
567 		/* FIB already set up by other ISR */
568 		CTR3(KTR_NET, "ng_netflow(): fib init: %d setup %p but got %p", fib, fe, priv_to_fib(priv, fib));
569 		mtx_destroy(&fe->export_mtx);
570 		mtx_destroy(&fe->export9_mtx);
571 		free(fe, M_NETGRAPH);
572 	} else {
573 		/* Increase counter for statistics */
574 		CTR3(KTR_NET, "ng_netflow(): fib %d setup to %p (%p)", fib, fe, priv_to_fib(priv, fib));
575 		atomic_fetchadd_32(&priv->info.nfinfo_alloc_fibs, 1);
576 	}
577 
578 	return (0);
579 }
580 
581 /* Free all flow cache memory. Called from node close method. */
582 void
583 ng_netflow_cache_flush(priv_p priv)
584 {
585 	struct flow_entry	*fle, *fle1;
586 	struct flow_hash_entry	*hsh;
587 #ifdef INET6
588 	struct flow6_entry	*fle6, *fle61;
589 	struct flow6_hash_entry	*hsh6;
590 #endif
591 	struct netflow_export_item exp;
592 	fib_export_p fe;
593 	int i;
594 
595 	bzero(&exp, sizeof(exp));
596 
597 	/*
598 	 * We are going to free probably billable data.
599 	 * Expire everything before freeing it.
600 	 * No locking is required since callout is already drained.
601 	 */
602 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
603 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
604 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
605 			fe = priv_to_fib(priv, fle->f.r.fib);
606 			expire_flow(priv, fe, fle, NG_QUEUE);
607 		}
608 #ifdef INET6
609 	for (hsh6 = priv->hash6, i = 0; i < NBUCKETS; hsh6++, i++)
610 		TAILQ_FOREACH_SAFE(fle6, &hsh6->head, fle6_hash, fle61) {
611 			TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
612 			fe = priv_to_fib(priv, fle6->f.r.fib);
613 			expire_flow(priv, fe, (struct flow_entry *)fle6, NG_QUEUE);
614 		}
615 #endif
616 
617 	uma_zdestroy(priv->zone);
618 	/* Destroy hash mutexes. */
619 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
620 		mtx_destroy(&hsh->mtx);
621 
622 	/* Free hash memory. */
623 	if (priv->hash != NULL)
624 		free(priv->hash, M_NETFLOW_HASH);
625 #ifdef INET6
626 	uma_zdestroy(priv->zone6);
627 	/* Destroy hash mutexes. */
628 	for (i = 0, hsh6 = priv->hash6; i < NBUCKETS; i++, hsh6++)
629 		mtx_destroy(&hsh6->mtx);
630 
631 	/* Free hash memory. */
632 	if (priv->hash6 != NULL)
633 		free(priv->hash6, M_NETFLOW_HASH);
634 #endif
635 
636 	for (i = 0; i < RT_NUMFIBS; i++) {
637 		if ((fe = priv_to_fib(priv, i)) == NULL)
638 			continue;
639 
640 		if (fe->exp.item != NULL)
641 			export_send(priv, fe, fe->exp.item, NG_QUEUE);
642 
643 		if (fe->exp.item9 != NULL)
644 			export9_send(priv, fe, fe->exp.item9, fe->exp.item9_opt, NG_QUEUE);
645 
646 		mtx_destroy(&fe->export_mtx);
647 		mtx_destroy(&fe->export9_mtx);
648 		free(fe, M_NETGRAPH);
649 	}
650 
651 	ng_netflow_v9_cache_flush(priv);
652 }
653 
654 /* Insert packet from into flow cache. */
655 int
656 ng_netflow_flow_add(priv_p priv, fib_export_p fe, struct ip *ip, caddr_t upper_ptr, uint8_t upper_proto,
657 		uint8_t is_frag, unsigned int src_if_index)
658 {
659 	register struct flow_entry	*fle, *fle1;
660 	struct flow_hash_entry	*hsh;
661 	struct flow_rec		r;
662 	int			hlen, plen;
663 	int			error = 0;
664 	uint8_t			tcp_flags = 0;
665 	uint16_t		eproto;
666 
667 	/* Try to fill flow_rec r */
668 	bzero(&r, sizeof(r));
669 	/* check version */
670 	if (ip->ip_v != IPVERSION)
671 		return (EINVAL);
672 
673 	/* verify min header length */
674 	hlen = ip->ip_hl << 2;
675 
676 	if (hlen < sizeof(struct ip))
677 		return (EINVAL);
678 
679 	eproto = ETHERTYPE_IP;
680 	/* Assume L4 template by default */
681 	r.flow_type = NETFLOW_V9_FLOW_V4_L4;
682 
683 	r.r_src = ip->ip_src;
684 	r.r_dst = ip->ip_dst;
685 	r.fib = fe->fib;
686 
687 	/* save packet length */
688 	plen = ntohs(ip->ip_len);
689 
690 	r.r_ip_p = ip->ip_p;
691 	r.r_tos = ip->ip_tos;
692 
693 	r.r_i_ifx = src_if_index;
694 
695 	/*
696 	 * XXX NOTE: only first fragment of fragmented TCP, UDP and
697 	 * ICMP packet will be recorded with proper s_port and d_port.
698 	 * Following fragments will be recorded simply as IP packet with
699 	 * ip_proto = ip->ip_p and s_port, d_port set to zero.
700 	 * I know, it looks like bug. But I don't want to re-implement
701 	 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
702 	 * and nobody complains yet :)
703 	 */
704 	if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
705 		switch(r.r_ip_p) {
706 		case IPPROTO_TCP:
707 		{
708 			register struct tcphdr *tcp;
709 
710 			tcp = (struct tcphdr *)((caddr_t )ip + hlen);
711 			r.r_sport = tcp->th_sport;
712 			r.r_dport = tcp->th_dport;
713 			tcp_flags = tcp->th_flags;
714 			break;
715 		}
716 			case IPPROTO_UDP:
717 			r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
718 			break;
719 		}
720 
721 	atomic_fetchadd_32(&priv->info.nfinfo_packets, 1);
722 	/* XXX: atomic */
723 	priv->info.nfinfo_bytes += plen;
724 
725 	/* Find hash slot. */
726 	hsh = &priv->hash[ip_hash(&r)];
727 
728 	mtx_lock(&hsh->mtx);
729 
730 	/*
731 	 * Go through hash and find our entry. If we encounter an
732 	 * entry, that should be expired, purge it. We do a reverse
733 	 * search since most active entries are first, and most
734 	 * searches are done on most active entries.
735 	 */
736 	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
737 		if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
738 			break;
739 		if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
740 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
741 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, NG_QUEUE);
742 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
743 		}
744 	}
745 
746 	if (fle) {			/* An existent entry. */
747 
748 		fle->f.bytes += plen;
749 		fle->f.packets ++;
750 		fle->f.tcp_flags |= tcp_flags;
751 		fle->f.last = time_uptime;
752 
753 		/*
754 		 * We have the following reasons to expire flow in active way:
755 		 * - it hit active timeout
756 		 * - a TCP connection closed
757 		 * - it is going to overflow counter
758 		 */
759 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
760 		    (fle->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
761 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
762 			expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, NG_QUEUE);
763 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
764 		} else {
765 			/*
766 			 * It is the newest, move it to the tail,
767 			 * if it isn't there already. Next search will
768 			 * locate it quicker.
769 			 */
770 			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
771 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
772 				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
773 			}
774 		}
775 	} else				/* A new flow entry. */
776 		error = hash_insert(priv, hsh, &r, plen, tcp_flags);
777 
778 	mtx_unlock(&hsh->mtx);
779 
780 	return (error);
781 }
782 
783 #ifdef INET6
784 /* Insert IPv6 packet from into flow cache. */
785 int
786 ng_netflow_flow6_add(priv_p priv, fib_export_p fe, struct ip6_hdr *ip6, caddr_t upper_ptr, uint8_t upper_proto,
787 		uint8_t is_frag, unsigned int src_if_index)
788 {
789 	register struct flow6_entry	*fle6 = NULL, *fle61;
790 	struct flow6_hash_entry		*hsh6;
791 	struct flow6_rec		r;
792 	int			plen;
793 	int			error = 0;
794 	uint8_t			tcp_flags = 0;
795 
796 	/* check version */
797 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION)
798 		return (EINVAL);
799 
800 	bzero(&r, sizeof(r));
801 
802 	r.src.r_src6 = ip6->ip6_src;
803 	r.dst.r_dst6 = ip6->ip6_dst;
804 	r.fib = fe->fib;
805 
806 	/* Assume L4 template by default */
807 	r.flow_type = NETFLOW_V9_FLOW_V6_L4;
808 
809 	/* save packet length */
810 	plen = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr);
811 
812 	/* XXX: set DSCP/CoS value */
813 #if 0
814 	r.r_tos = ip->ip_tos;
815 #endif
816 	if (is_frag == 0) {
817 		switch(upper_proto) {
818 		case IPPROTO_TCP:
819 		{
820 			register struct tcphdr *tcp;
821 
822 			tcp = (struct tcphdr *)upper_ptr;
823 			r.r_ports = *(uint32_t *)upper_ptr;
824 			tcp_flags = tcp->th_flags;
825 			break;
826 		}
827  		case IPPROTO_UDP:
828 		case IPPROTO_SCTP:
829 		{
830 			r.r_ports = *(uint32_t *)upper_ptr;
831 			break;
832 		}
833 
834 		}
835 	}
836 
837 	r.r_ip_p = upper_proto;
838 	r.r_i_ifx = src_if_index;
839 
840 	atomic_fetchadd_32(&priv->info.nfinfo_packets6, 1);
841 	/* XXX: atomic */
842 	priv->info.nfinfo_bytes6 += plen;
843 
844 	/* Find hash slot. */
845 	hsh6 = &priv->hash6[ip6_hash(&r)];
846 
847 	mtx_lock(&hsh6->mtx);
848 
849 	/*
850 	 * Go through hash and find our entry. If we encounter an
851 	 * entry, that should be expired, purge it. We do a reverse
852 	 * search since most active entries are first, and most
853 	 * searches are done on most active entries.
854 	 */
855 	TAILQ_FOREACH_REVERSE_SAFE(fle6, &hsh6->head, f6head, fle6_hash, fle61) {
856 		if (fle6->f.version != IP6VERSION)
857 			continue;
858 		if (bcmp(&r, &fle6->f.r, sizeof(struct flow6_rec)) == 0)
859 			break;
860 		if ((INACTIVE(fle6) && SMALL(fle6)) || AGED(fle6)) {
861 			TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
862 			expire_flow(priv, priv_to_fib(priv, fle6->f.r.fib), (struct flow_entry *)fle6, NG_QUEUE);
863 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
864 		}
865 	}
866 
867 	if (fle6 != NULL) {			/* An existent entry. */
868 
869 		fle6->f.bytes += plen;
870 		fle6->f.packets ++;
871 		fle6->f.tcp_flags |= tcp_flags;
872 		fle6->f.last = time_uptime;
873 
874 		/*
875 		 * We have the following reasons to expire flow in active way:
876 		 * - it hit active timeout
877 		 * - a TCP connection closed
878 		 * - it is going to overflow counter
879 		 */
880 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle6) ||
881 		    (fle6->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) {
882 			TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
883 			expire_flow(priv, priv_to_fib(priv, fle6->f.r.fib), (struct flow_entry *)fle6, NG_QUEUE);
884 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
885 		} else {
886 			/*
887 			 * It is the newest, move it to the tail,
888 			 * if it isn't there already. Next search will
889 			 * locate it quicker.
890 			 */
891 			if (fle6 != TAILQ_LAST(&hsh6->head, f6head)) {
892 				TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
893 				TAILQ_INSERT_TAIL(&hsh6->head, fle6, fle6_hash);
894 			}
895 		}
896 	} else				/* A new flow entry. */
897 		error = hash6_insert(priv, hsh6, &r, plen, tcp_flags);
898 
899 	mtx_unlock(&hsh6->mtx);
900 
901 	return (error);
902 }
903 #endif
904 
905 /*
906  * Return records from cache to userland.
907  *
908  * TODO: matching particular IP should be done in kernel, here.
909  * XXX: IPv6 flows will return random data
910  */
911 int
912 ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
913 {
914 	struct flow_hash_entry	*hsh;
915 	struct flow_entry	*fle;
916 	struct ngnf_flows	*data;
917 	int	i;
918 
919 	data = (struct ngnf_flows *)resp->data;
920 	data->last = 0;
921 	data->nentries = 0;
922 
923 	/* Check if this is a first run */
924 	if (last == 0) {
925 		hsh = priv->hash;
926 		i = 0;
927 	} else {
928 		if (last > NBUCKETS-1)
929 			return (EINVAL);
930 		hsh = priv->hash + last;
931 		i = last;
932 	}
933 
934 	/*
935 	 * We will transfer not more than NREC_AT_ONCE. More data
936 	 * will come in next message.
937 	 * We send current hash index to userland, and userland should
938 	 * return it back to us. Then, we will restart with new entry.
939 	 *
940 	 * The resulting cache snapshot is inaccurate for the
941 	 * following reasons:
942 	 *  - we skip locked hash entries
943 	 *  - we bail out, if someone wants our entry
944 	 *  - we skip rest of entry, when hit NREC_AT_ONCE
945 	 */
946 	for (; i < NBUCKETS; hsh++, i++) {
947 		if (mtx_trylock(&hsh->mtx) == 0)
948 			continue;
949 
950 		TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
951 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
952 				break;
953 
954 			bcopy(&fle->f, &(data->entries[data->nentries]),
955 			    sizeof(fle->f));
956 			data->nentries++;
957 			if (data->nentries == NREC_AT_ONCE) {
958 				mtx_unlock(&hsh->mtx);
959 				if (++i < NBUCKETS)
960 					data->last = i;
961 				return (0);
962 			}
963 		}
964 		mtx_unlock(&hsh->mtx);
965 	}
966 
967 	return (0);
968 }
969 
970 /* We have full datagram in privdata. Send it to export hook. */
971 static int
972 export_send(priv_p priv, fib_export_p fe, item_p item, int flags)
973 {
974 	struct mbuf *m = NGI_M(item);
975 	struct netflow_v5_export_dgram *dgram = mtod(m,
976 					struct netflow_v5_export_dgram *);
977 	struct netflow_v5_header *header = &dgram->header;
978 	struct timespec ts;
979 	int error = 0;
980 
981 	/* Fill mbuf header. */
982 	m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
983 	   header->count + sizeof(struct netflow_v5_header);
984 
985 	/* Fill export header. */
986 	header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
987 	getnanotime(&ts);
988 	header->unix_secs  = htonl(ts.tv_sec);
989 	header->unix_nsecs = htonl(ts.tv_nsec);
990 	header->engine_type = 0;
991 	header->engine_id = fe->domain_id;
992 	header->pad = 0;
993 	header->flow_seq = htonl(atomic_fetchadd_32(&fe->flow_seq,
994 	    header->count));
995 	header->count = htons(header->count);
996 
997 	if (priv->export != NULL)
998 		NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
999 	else
1000 		NG_FREE_ITEM(item);
1001 
1002 	return (error);
1003 }
1004 
1005 
1006 /* Add export record to dgram. */
1007 static int
1008 export_add(item_p item, struct flow_entry *fle)
1009 {
1010 	struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
1011 					struct netflow_v5_export_dgram *);
1012 	struct netflow_v5_header *header = &dgram->header;
1013 	struct netflow_v5_record *rec;
1014 
1015 	rec = &dgram->r[header->count];
1016 	header->count ++;
1017 
1018 	KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
1019 	    ("ng_netflow: export too big"));
1020 
1021 	/* Fill in export record. */
1022 	rec->src_addr = fle->f.r.r_src.s_addr;
1023 	rec->dst_addr = fle->f.r.r_dst.s_addr;
1024 	rec->next_hop = fle->f.next_hop.s_addr;
1025 	rec->i_ifx    = htons(fle->f.fle_i_ifx);
1026 	rec->o_ifx    = htons(fle->f.fle_o_ifx);
1027 	rec->packets  = htonl(fle->f.packets);
1028 	rec->octets   = htonl(fle->f.bytes);
1029 	rec->first    = htonl(MILLIUPTIME(fle->f.first));
1030 	rec->last     = htonl(MILLIUPTIME(fle->f.last));
1031 	rec->s_port   = fle->f.r.r_sport;
1032 	rec->d_port   = fle->f.r.r_dport;
1033 	rec->flags    = fle->f.tcp_flags;
1034 	rec->prot     = fle->f.r.r_ip_p;
1035 	rec->tos      = fle->f.r.r_tos;
1036 	rec->dst_mask = fle->f.dst_mask;
1037 	rec->src_mask = fle->f.src_mask;
1038 	rec->pad1     = 0;
1039 	rec->pad2     = 0;
1040 
1041 	/* Not supported fields. */
1042 	rec->src_as = rec->dst_as = 0;
1043 
1044 	if (header->count == NETFLOW_V5_MAX_RECORDS)
1045 		return (1); /* end of datagram */
1046 	else
1047 		return (0);
1048 }
1049 
1050 /* Periodic flow expiry run. */
1051 void
1052 ng_netflow_expire(void *arg)
1053 {
1054 	struct flow_entry	*fle, *fle1;
1055 	struct flow_hash_entry	*hsh;
1056 #ifdef INET6
1057 	struct flow6_entry	*fle6, *fle61;
1058 	struct flow6_hash_entry	*hsh6;
1059 #endif
1060 	priv_p			priv = (priv_p )arg;
1061 	uint32_t		used;
1062 	int			i;
1063 
1064 	/*
1065 	 * Going through all the cache.
1066 	 */
1067 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
1068 		/*
1069 		 * Skip entries, that are already being worked on.
1070 		 */
1071 		if (mtx_trylock(&hsh->mtx) == 0)
1072 			continue;
1073 
1074 		used = atomic_load_acq_32(&priv->info.nfinfo_used);
1075 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
1076 			/*
1077 			 * Interrupt thread wants this entry!
1078 			 * Quick! Quick! Bail out!
1079 			 */
1080 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
1081 				break;
1082 
1083 			/*
1084 			 * Don't expire aggressively while hash collision
1085 			 * ratio is predicted small.
1086 			 */
1087 			if (used <= (NBUCKETS*2) && !INACTIVE(fle))
1088 				break;
1089 
1090 			if ((INACTIVE(fle) && (SMALL(fle) ||
1091 			    (used > (NBUCKETS*2)))) || AGED(fle)) {
1092 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
1093 				expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, NG_NOFLAGS);
1094 				used--;
1095 				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
1096 			}
1097 		}
1098 		mtx_unlock(&hsh->mtx);
1099 	}
1100 
1101 #ifdef INET6
1102 	for (hsh6 = priv->hash6, i = 0; i < NBUCKETS; hsh6++, i++) {
1103 		/*
1104 		 * Skip entries, that are already being worked on.
1105 		 */
1106 		if (mtx_trylock(&hsh6->mtx) == 0)
1107 			continue;
1108 
1109 		used = atomic_load_acq_32(&priv->info.nfinfo_used6);
1110 		TAILQ_FOREACH_SAFE(fle6, &hsh6->head, fle6_hash, fle61) {
1111 			/*
1112 			 * Interrupt thread wants this entry!
1113 			 * Quick! Quick! Bail out!
1114 			 */
1115 			if (hsh6->mtx.mtx_lock & MTX_CONTESTED)
1116 				break;
1117 
1118 			/*
1119 			 * Don't expire aggressively while hash collision
1120 			 * ratio is predicted small.
1121 			 */
1122 			if (used <= (NBUCKETS*2) && !INACTIVE(fle6))
1123 				break;
1124 
1125 			if ((INACTIVE(fle6) && (SMALL(fle6) ||
1126 			    (used > (NBUCKETS*2)))) || AGED(fle6)) {
1127 				TAILQ_REMOVE(&hsh6->head, fle6, fle6_hash);
1128 				expire_flow(priv, priv_to_fib(priv, fle6->f.r.fib), (struct flow_entry *)fle6, NG_NOFLAGS);
1129 				used--;
1130 				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
1131 			}
1132 		}
1133 		mtx_unlock(&hsh6->mtx);
1134 	}
1135 #endif
1136 
1137 	/* Schedule next expire. */
1138 	callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
1139 	    (void *)priv);
1140 }
1141