1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2010-2011 Alexander V. Chernikov <melifaro@ipfw.ru> 5 * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org> 6 * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $ 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_route.h" 39 #include <sys/param.h> 40 #include <sys/bitstring.h> 41 #include <sys/systm.h> 42 #include <sys/counter.h> 43 #include <sys/kernel.h> 44 #include <sys/ktr.h> 45 #include <sys/limits.h> 46 #include <sys/mbuf.h> 47 #include <sys/syslog.h> 48 #include <sys/socket.h> 49 #include <vm/uma.h> 50 51 #include <net/if.h> 52 #include <net/if_dl.h> 53 #include <net/if_var.h> 54 #include <net/if_private.h> 55 #include <net/route.h> 56 #include <net/route/nhop.h> 57 #include <net/route/route_ctl.h> 58 #include <net/ethernet.h> 59 #include <netinet/in.h> 60 #include <netinet/in_fib.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/ip.h> 63 #include <netinet/ip6.h> 64 #include <netinet/tcp.h> 65 #include <netinet/udp.h> 66 67 #include <netinet6/in6_fib.h> 68 69 #include <netgraph/ng_message.h> 70 #include <netgraph/netgraph.h> 71 72 #include <netgraph/netflow/netflow.h> 73 #include <netgraph/netflow/netflow_v9.h> 74 #include <netgraph/netflow/ng_netflow.h> 75 76 #define NBUCKETS (65536) /* must be power of 2 */ 77 78 /* This hash is for TCP or UDP packets. */ 79 #define FULL_HASH(addr1, addr2, port1, port2) \ 80 (((addr1 ^ (addr1 >> 16) ^ \ 81 htons(addr2 ^ (addr2 >> 16))) ^ \ 82 port1 ^ htons(port2)) & \ 83 (NBUCKETS - 1)) 84 85 /* This hash is for all other IP packets. */ 86 #define ADDR_HASH(addr1, addr2) \ 87 ((addr1 ^ (addr1 >> 16) ^ \ 88 htons(addr2 ^ (addr2 >> 16))) & \ 89 (NBUCKETS - 1)) 90 91 /* Macros to shorten logical constructions */ 92 /* XXX: priv must exist in namespace */ 93 #define INACTIVE(fle) (time_uptime - fle->f.last > priv->nfinfo_inact_t) 94 #define AGED(fle) (time_uptime - fle->f.first > priv->nfinfo_act_t) 95 #define ISFREE(fle) (fle->f.packets == 0) 96 97 /* 98 * 4 is a magical number: statistically number of 4-packet flows is 99 * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP 100 * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case 101 * of reachable host and 4-packet otherwise. 102 */ 103 #define SMALL(fle) (fle->f.packets <= 4) 104 105 MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash"); 106 107 static int export_add(item_p, struct flow_entry *); 108 static int export_send(priv_p, fib_export_p, item_p, int); 109 110 #ifdef INET 111 static int hash_insert(priv_p, struct flow_hash_entry *, struct flow_rec *, 112 int, uint8_t, uint8_t); 113 #endif 114 #ifdef INET6 115 static int hash6_insert(priv_p, struct flow_hash_entry *, struct flow6_rec *, 116 int, uint8_t, uint8_t); 117 #endif 118 119 static void expire_flow(priv_p, fib_export_p, struct flow_entry *, int); 120 121 #ifdef INET 122 /* 123 * Generate hash for a given flow record. 124 * 125 * FIB is not used here, because: 126 * most VRFS will carry public IPv4 addresses which are unique even 127 * without FIB private addresses can overlap, but this is worked out 128 * via flow_rec bcmp() containing fib id. In IPv6 world addresses are 129 * all globally unique (it's not fully true, there is FC00::/7 for example, 130 * but chances of address overlap are MUCH smaller) 131 */ 132 static inline uint32_t 133 ip_hash(struct flow_rec *r) 134 { 135 136 switch (r->r_ip_p) { 137 case IPPROTO_TCP: 138 case IPPROTO_UDP: 139 return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr, 140 r->r_sport, r->r_dport); 141 default: 142 return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr); 143 } 144 } 145 #endif 146 147 #ifdef INET6 148 /* Generate hash for a given flow6 record. Use lower 4 octets from v6 addresses */ 149 static inline uint32_t 150 ip6_hash(struct flow6_rec *r) 151 { 152 153 switch (r->r_ip_p) { 154 case IPPROTO_TCP: 155 case IPPROTO_UDP: 156 return FULL_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3], 157 r->dst.r_dst6.__u6_addr.__u6_addr32[3], r->r_sport, 158 r->r_dport); 159 default: 160 return ADDR_HASH(r->src.r_src6.__u6_addr.__u6_addr32[3], 161 r->dst.r_dst6.__u6_addr.__u6_addr32[3]); 162 } 163 } 164 165 #endif 166 167 /* 168 * Detach export datagram from priv, if there is any. 169 * If there is no, allocate a new one. 170 */ 171 static item_p 172 get_export_dgram(priv_p priv, fib_export_p fe) 173 { 174 item_p item = NULL; 175 176 mtx_lock(&fe->export_mtx); 177 if (fe->exp.item != NULL) { 178 item = fe->exp.item; 179 fe->exp.item = NULL; 180 } 181 mtx_unlock(&fe->export_mtx); 182 183 if (item == NULL) { 184 struct netflow_v5_export_dgram *dgram; 185 struct mbuf *m; 186 187 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 188 if (m == NULL) 189 return (NULL); 190 item = ng_package_data(m, NG_NOFLAGS); 191 if (item == NULL) 192 return (NULL); 193 dgram = mtod(m, struct netflow_v5_export_dgram *); 194 dgram->header.count = 0; 195 dgram->header.version = htons(NETFLOW_V5); 196 dgram->header.pad = 0; 197 } 198 199 return (item); 200 } 201 202 /* 203 * Re-attach incomplete datagram back to priv. 204 * If there is already another one, then send incomplete. */ 205 static void 206 return_export_dgram(priv_p priv, fib_export_p fe, item_p item, int flags) 207 { 208 209 /* 210 * It may happen on SMP, that some thread has already 211 * put its item there, in this case we bail out and 212 * send what we have to collector. 213 */ 214 mtx_lock(&fe->export_mtx); 215 if (fe->exp.item == NULL) { 216 fe->exp.item = item; 217 mtx_unlock(&fe->export_mtx); 218 } else { 219 mtx_unlock(&fe->export_mtx); 220 export_send(priv, fe, item, flags); 221 } 222 } 223 224 /* 225 * The flow is over. Call export_add() and free it. If datagram is 226 * full, then call export_send(). 227 */ 228 static void 229 expire_flow(priv_p priv, fib_export_p fe, struct flow_entry *fle, int flags) 230 { 231 struct netflow_export_item exp; 232 uint16_t version = fle->f.version; 233 234 if ((priv->export != NULL) && (version == IPVERSION)) { 235 exp.item = get_export_dgram(priv, fe); 236 if (exp.item == NULL) { 237 priv->nfinfo_export_failed++; 238 if (priv->export9 != NULL) 239 priv->nfinfo_export9_failed++; 240 /* fle definitely contains IPv4 flow. */ 241 uma_zfree_arg(priv->zone, fle, priv); 242 return; 243 } 244 245 if (export_add(exp.item, fle) > 0) 246 export_send(priv, fe, exp.item, flags); 247 else 248 return_export_dgram(priv, fe, exp.item, NG_QUEUE); 249 } 250 251 if (priv->export9 != NULL) { 252 exp.item9 = get_export9_dgram(priv, fe, &exp.item9_opt); 253 if (exp.item9 == NULL) { 254 priv->nfinfo_export9_failed++; 255 if (version == IPVERSION) 256 uma_zfree_arg(priv->zone, fle, priv); 257 #ifdef INET6 258 else if (version == IP6VERSION) 259 uma_zfree_arg(priv->zone6, fle, priv); 260 #endif 261 else 262 panic("ng_netflow: Unknown IP proto: %d", 263 version); 264 return; 265 } 266 267 if (export9_add(exp.item9, exp.item9_opt, fle) > 0) 268 export9_send(priv, fe, exp.item9, exp.item9_opt, flags); 269 else 270 return_export9_dgram(priv, fe, exp.item9, 271 exp.item9_opt, NG_QUEUE); 272 } 273 274 if (version == IPVERSION) 275 uma_zfree_arg(priv->zone, fle, priv); 276 #ifdef INET6 277 else if (version == IP6VERSION) 278 uma_zfree_arg(priv->zone6, fle, priv); 279 #endif 280 } 281 282 /* Get a snapshot of node statistics */ 283 void 284 ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i) 285 { 286 287 i->nfinfo_bytes = counter_u64_fetch(priv->nfinfo_bytes); 288 i->nfinfo_packets = counter_u64_fetch(priv->nfinfo_packets); 289 i->nfinfo_bytes6 = counter_u64_fetch(priv->nfinfo_bytes6); 290 i->nfinfo_packets6 = counter_u64_fetch(priv->nfinfo_packets6); 291 i->nfinfo_sbytes = counter_u64_fetch(priv->nfinfo_sbytes); 292 i->nfinfo_spackets = counter_u64_fetch(priv->nfinfo_spackets); 293 i->nfinfo_sbytes6 = counter_u64_fetch(priv->nfinfo_sbytes6); 294 i->nfinfo_spackets6 = counter_u64_fetch(priv->nfinfo_spackets6); 295 i->nfinfo_act_exp = counter_u64_fetch(priv->nfinfo_act_exp); 296 i->nfinfo_inact_exp = counter_u64_fetch(priv->nfinfo_inact_exp); 297 298 i->nfinfo_used = uma_zone_get_cur(priv->zone); 299 #ifdef INET6 300 i->nfinfo_used6 = uma_zone_get_cur(priv->zone6); 301 #endif 302 303 i->nfinfo_alloc_failed = priv->nfinfo_alloc_failed; 304 i->nfinfo_export_failed = priv->nfinfo_export_failed; 305 i->nfinfo_export9_failed = priv->nfinfo_export9_failed; 306 i->nfinfo_realloc_mbuf = priv->nfinfo_realloc_mbuf; 307 i->nfinfo_alloc_fibs = priv->nfinfo_alloc_fibs; 308 i->nfinfo_inact_t = priv->nfinfo_inact_t; 309 i->nfinfo_act_t = priv->nfinfo_act_t; 310 } 311 312 /* 313 * Insert a record into defined slot. 314 * 315 * First we get for us a free flow entry, then fill in all 316 * possible fields in it. 317 * 318 * TODO: consider dropping hash mutex while filling in datagram, 319 * as this was done in previous version. Need to test & profile 320 * to be sure. 321 */ 322 #ifdef INET 323 static int 324 hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r, 325 int plen, uint8_t flags, uint8_t tcp_flags) 326 { 327 struct flow_entry *fle; 328 329 mtx_assert(&hsh->mtx, MA_OWNED); 330 331 fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT); 332 if (fle == NULL) { 333 priv->nfinfo_alloc_failed++; 334 return (ENOMEM); 335 } 336 337 /* 338 * Now fle is totally ours. It is detached from all lists, 339 * we can safely edit it. 340 */ 341 fle->f.version = IPVERSION; 342 bcopy(r, &fle->f.r, sizeof(struct flow_rec)); 343 fle->f.bytes = plen; 344 fle->f.packets = 1; 345 fle->f.tcp_flags = tcp_flags; 346 347 fle->f.first = fle->f.last = time_uptime; 348 349 /* 350 * First we do route table lookup on destination address. So we can 351 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases. 352 */ 353 if ((flags & NG_NETFLOW_CONF_NODSTLOOKUP) == 0) { 354 struct rtentry *rt; 355 struct route_nhop_data rnd; 356 357 rt = fib4_lookup_rt(r->fib, fle->f.r.r_dst, 0, NHR_NONE, &rnd); 358 if (rt != NULL) { 359 struct in_addr addr; 360 uint32_t scopeid; 361 struct nhop_object *nh = nhop_select_func(rnd.rnd_nhop, 0); 362 int plen; 363 364 rt_get_inet_prefix_plen(rt, &addr, &plen, &scopeid); 365 fle->f.fle_o_ifx = nh->nh_ifp->if_index; 366 if (nh->gw_sa.sa_family == AF_INET) 367 fle->f.next_hop = nh->gw4_sa.sin_addr; 368 /* 369 * XXX we're leaving an empty gateway here for 370 * IPv6 nexthops. 371 */ 372 fle->f.dst_mask = plen; 373 } 374 } 375 376 /* Do route lookup on source address, to fill in src_mask. */ 377 if ((flags & NG_NETFLOW_CONF_NOSRCLOOKUP) == 0) { 378 struct rtentry *rt; 379 struct route_nhop_data rnd; 380 381 rt = fib4_lookup_rt(r->fib, fle->f.r.r_src, 0, NHR_NONE, &rnd); 382 if (rt != NULL) { 383 struct in_addr addr; 384 uint32_t scopeid; 385 int plen; 386 387 rt_get_inet_prefix_plen(rt, &addr, &plen, &scopeid); 388 fle->f.src_mask = plen; 389 } 390 } 391 392 /* Push new flow at the and of hash. */ 393 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash); 394 395 return (0); 396 } 397 #endif 398 399 #ifdef INET6 400 static int 401 hash6_insert(priv_p priv, struct flow_hash_entry *hsh6, struct flow6_rec *r, 402 int plen, uint8_t flags, uint8_t tcp_flags) 403 { 404 struct flow6_entry *fle6; 405 406 mtx_assert(&hsh6->mtx, MA_OWNED); 407 408 fle6 = uma_zalloc_arg(priv->zone6, priv, M_NOWAIT); 409 if (fle6 == NULL) { 410 priv->nfinfo_alloc_failed++; 411 return (ENOMEM); 412 } 413 414 /* 415 * Now fle is totally ours. It is detached from all lists, 416 * we can safely edit it. 417 */ 418 419 fle6->f.version = IP6VERSION; 420 bcopy(r, &fle6->f.r, sizeof(struct flow6_rec)); 421 fle6->f.bytes = plen; 422 fle6->f.packets = 1; 423 fle6->f.tcp_flags = tcp_flags; 424 425 fle6->f.first = fle6->f.last = time_uptime; 426 427 /* 428 * First we do route table lookup on destination address. So we can 429 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases. 430 */ 431 if ((flags & NG_NETFLOW_CONF_NODSTLOOKUP) == 0) { 432 struct rtentry *rt; 433 struct route_nhop_data rnd; 434 435 rt = fib6_lookup_rt(r->fib, &fle6->f.r.dst.r_dst6, 0, NHR_NONE, &rnd); 436 if (rt != NULL) { 437 struct in6_addr addr; 438 uint32_t scopeid; 439 struct nhop_object *nh = nhop_select_func(rnd.rnd_nhop, 0); 440 int plen; 441 442 rt_get_inet6_prefix_plen(rt, &addr, &plen, &scopeid); 443 fle6->f.fle_o_ifx = nh->nh_ifp->if_index; 444 if (nh->gw_sa.sa_family == AF_INET6) 445 fle6->f.n.next_hop6 = nh->gw6_sa.sin6_addr; 446 fle6->f.dst_mask = plen; 447 } 448 } 449 450 if ((flags & NG_NETFLOW_CONF_NOSRCLOOKUP) == 0) { 451 /* Do route lookup on source address, to fill in src_mask. */ 452 struct rtentry *rt; 453 struct route_nhop_data rnd; 454 455 rt = fib6_lookup_rt(r->fib, &fle6->f.r.src.r_src6, 0, NHR_NONE, &rnd); 456 if (rt != NULL) { 457 struct in6_addr addr; 458 uint32_t scopeid; 459 int plen; 460 461 rt_get_inet6_prefix_plen(rt, &addr, &plen, &scopeid); 462 fle6->f.src_mask = plen; 463 } 464 } 465 466 /* Push new flow at the and of hash. */ 467 TAILQ_INSERT_TAIL(&hsh6->head, (struct flow_entry *)fle6, fle_hash); 468 469 return (0); 470 } 471 #endif 472 473 /* 474 * Non-static functions called from ng_netflow.c 475 */ 476 477 /* Allocate memory and set up flow cache */ 478 void 479 ng_netflow_cache_init(priv_p priv) 480 { 481 struct flow_hash_entry *hsh; 482 int i; 483 484 /* Initialize cache UMA zone. */ 485 priv->zone = uma_zcreate("NetFlow IPv4 cache", 486 sizeof(struct flow_entry), NULL, NULL, NULL, NULL, 487 UMA_ALIGN_CACHE, 0); 488 uma_zone_set_max(priv->zone, CACHESIZE); 489 #ifdef INET6 490 priv->zone6 = uma_zcreate("NetFlow IPv6 cache", 491 sizeof(struct flow6_entry), NULL, NULL, NULL, NULL, 492 UMA_ALIGN_CACHE, 0); 493 uma_zone_set_max(priv->zone6, CACHESIZE); 494 #endif 495 496 /* Allocate hash. */ 497 priv->hash = malloc(NBUCKETS * sizeof(struct flow_hash_entry), 498 M_NETFLOW_HASH, M_WAITOK | M_ZERO); 499 500 /* Initialize hash. */ 501 for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) { 502 mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF); 503 TAILQ_INIT(&hsh->head); 504 } 505 506 #ifdef INET6 507 /* Allocate hash. */ 508 priv->hash6 = malloc(NBUCKETS * sizeof(struct flow_hash_entry), 509 M_NETFLOW_HASH, M_WAITOK | M_ZERO); 510 511 /* Initialize hash. */ 512 for (i = 0, hsh = priv->hash6; i < NBUCKETS; i++, hsh++) { 513 mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF); 514 TAILQ_INIT(&hsh->head); 515 } 516 #endif 517 518 priv->nfinfo_bytes = counter_u64_alloc(M_WAITOK); 519 priv->nfinfo_packets = counter_u64_alloc(M_WAITOK); 520 priv->nfinfo_bytes6 = counter_u64_alloc(M_WAITOK); 521 priv->nfinfo_packets6 = counter_u64_alloc(M_WAITOK); 522 priv->nfinfo_sbytes = counter_u64_alloc(M_WAITOK); 523 priv->nfinfo_spackets = counter_u64_alloc(M_WAITOK); 524 priv->nfinfo_sbytes6 = counter_u64_alloc(M_WAITOK); 525 priv->nfinfo_spackets6 = counter_u64_alloc(M_WAITOK); 526 priv->nfinfo_act_exp = counter_u64_alloc(M_WAITOK); 527 priv->nfinfo_inact_exp = counter_u64_alloc(M_WAITOK); 528 529 ng_netflow_v9_cache_init(priv); 530 CTR0(KTR_NET, "ng_netflow startup()"); 531 } 532 533 /* Initialize new FIB table for v5 and v9 */ 534 int 535 ng_netflow_fib_init(priv_p priv, int fib) 536 { 537 fib_export_p fe = priv_to_fib(priv, fib); 538 539 CTR1(KTR_NET, "ng_netflow(): fib init: %d", fib); 540 541 if (fe != NULL) 542 return (0); 543 544 if ((fe = malloc(sizeof(struct fib_export), M_NETGRAPH, 545 M_NOWAIT | M_ZERO)) == NULL) 546 return (ENOMEM); 547 548 mtx_init(&fe->export_mtx, "export dgram lock", NULL, MTX_DEF); 549 mtx_init(&fe->export9_mtx, "export9 dgram lock", NULL, MTX_DEF); 550 fe->fib = fib; 551 fe->domain_id = fib; 552 553 if (atomic_cmpset_ptr((volatile uintptr_t *)&priv->fib_data[fib], 554 (uintptr_t)NULL, (uintptr_t)fe) == 0) { 555 /* FIB already set up by other ISR */ 556 CTR3(KTR_NET, "ng_netflow(): fib init: %d setup %p but got %p", 557 fib, fe, priv_to_fib(priv, fib)); 558 mtx_destroy(&fe->export_mtx); 559 mtx_destroy(&fe->export9_mtx); 560 free(fe, M_NETGRAPH); 561 } else { 562 /* Increase counter for statistics */ 563 CTR3(KTR_NET, "ng_netflow(): fib %d setup to %p (%p)", 564 fib, fe, priv_to_fib(priv, fib)); 565 priv->nfinfo_alloc_fibs++; 566 } 567 568 return (0); 569 } 570 571 /* Free all flow cache memory. Called from node close method. */ 572 void 573 ng_netflow_cache_flush(priv_p priv) 574 { 575 struct flow_entry *fle, *fle1; 576 struct flow_hash_entry *hsh; 577 struct netflow_export_item exp; 578 fib_export_p fe; 579 int i; 580 581 bzero(&exp, sizeof(exp)); 582 583 /* 584 * We are going to free probably billable data. 585 * Expire everything before freeing it. 586 * No locking is required since callout is already drained. 587 */ 588 for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) 589 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) { 590 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 591 fe = priv_to_fib(priv, fle->f.r.fib); 592 expire_flow(priv, fe, fle, NG_QUEUE); 593 } 594 #ifdef INET6 595 for (hsh = priv->hash6, i = 0; i < NBUCKETS; hsh++, i++) 596 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) { 597 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 598 fe = priv_to_fib(priv, fle->f.r.fib); 599 expire_flow(priv, fe, fle, NG_QUEUE); 600 } 601 #endif 602 603 uma_zdestroy(priv->zone); 604 /* Destroy hash mutexes. */ 605 for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) 606 mtx_destroy(&hsh->mtx); 607 608 /* Free hash memory. */ 609 if (priv->hash != NULL) 610 free(priv->hash, M_NETFLOW_HASH); 611 #ifdef INET6 612 uma_zdestroy(priv->zone6); 613 /* Destroy hash mutexes. */ 614 for (i = 0, hsh = priv->hash6; i < NBUCKETS; i++, hsh++) 615 mtx_destroy(&hsh->mtx); 616 617 /* Free hash memory. */ 618 if (priv->hash6 != NULL) 619 free(priv->hash6, M_NETFLOW_HASH); 620 #endif 621 622 for (i = 0; i < priv->maxfibs; i++) { 623 if ((fe = priv_to_fib(priv, i)) == NULL) 624 continue; 625 626 if (fe->exp.item != NULL) 627 export_send(priv, fe, fe->exp.item, NG_QUEUE); 628 629 if (fe->exp.item9 != NULL) 630 export9_send(priv, fe, fe->exp.item9, 631 fe->exp.item9_opt, NG_QUEUE); 632 633 mtx_destroy(&fe->export_mtx); 634 mtx_destroy(&fe->export9_mtx); 635 free(fe, M_NETGRAPH); 636 } 637 638 counter_u64_free(priv->nfinfo_bytes); 639 counter_u64_free(priv->nfinfo_packets); 640 counter_u64_free(priv->nfinfo_bytes6); 641 counter_u64_free(priv->nfinfo_packets6); 642 counter_u64_free(priv->nfinfo_sbytes); 643 counter_u64_free(priv->nfinfo_spackets); 644 counter_u64_free(priv->nfinfo_sbytes6); 645 counter_u64_free(priv->nfinfo_spackets6); 646 counter_u64_free(priv->nfinfo_act_exp); 647 counter_u64_free(priv->nfinfo_inact_exp); 648 649 ng_netflow_v9_cache_flush(priv); 650 } 651 652 #ifdef INET 653 /* Insert packet from into flow cache. */ 654 int 655 ng_netflow_flow_add(priv_p priv, fib_export_p fe, struct ip *ip, 656 caddr_t upper_ptr, uint8_t upper_proto, uint8_t flags, 657 unsigned int src_if_index) 658 { 659 struct flow_entry *fle, *fle1; 660 struct flow_hash_entry *hsh; 661 struct flow_rec r; 662 int hlen, plen; 663 int error = 0; 664 uint8_t tcp_flags = 0; 665 666 bzero(&r, sizeof(r)); 667 668 if (ip->ip_v != IPVERSION) 669 return (EINVAL); 670 671 hlen = ip->ip_hl << 2; 672 if (hlen < sizeof(struct ip)) 673 return (EINVAL); 674 675 /* Assume L4 template by default */ 676 r.flow_type = NETFLOW_V9_FLOW_V4_L4; 677 678 r.r_src = ip->ip_src; 679 r.r_dst = ip->ip_dst; 680 r.fib = fe->fib; 681 682 plen = ntohs(ip->ip_len); 683 684 r.r_ip_p = ip->ip_p; 685 r.r_tos = ip->ip_tos; 686 687 r.r_i_ifx = src_if_index; 688 689 /* 690 * XXX NOTE: only first fragment of fragmented TCP, UDP and 691 * ICMP packet will be recorded with proper s_port and d_port. 692 * Following fragments will be recorded simply as IP packet with 693 * ip_proto = ip->ip_p and s_port, d_port set to zero. 694 * I know, it looks like bug. But I don't want to re-implement 695 * ip packet assebmling here. Anyway, (in)famous trafd works this way - 696 * and nobody complains yet :) 697 */ 698 if ((ip->ip_off & htons(IP_OFFMASK)) == 0) 699 switch(r.r_ip_p) { 700 case IPPROTO_TCP: 701 { 702 struct tcphdr *tcp; 703 704 tcp = (struct tcphdr *)((caddr_t )ip + hlen); 705 r.r_sport = tcp->th_sport; 706 r.r_dport = tcp->th_dport; 707 tcp_flags = tcp->th_flags; 708 break; 709 } 710 case IPPROTO_UDP: 711 r.r_ports = *(uint32_t *)((caddr_t )ip + hlen); 712 break; 713 } 714 715 counter_u64_add(priv->nfinfo_packets, 1); 716 counter_u64_add(priv->nfinfo_bytes, plen); 717 718 /* Find hash slot. */ 719 hsh = &priv->hash[ip_hash(&r)]; 720 721 mtx_lock(&hsh->mtx); 722 723 /* 724 * Go through hash and find our entry. If we encounter an 725 * entry, that should be expired, purge it. We do a reverse 726 * search since most active entries are first, and most 727 * searches are done on most active entries. 728 */ 729 TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) { 730 if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0) 731 break; 732 if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) { 733 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 734 expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), 735 fle, NG_QUEUE); 736 counter_u64_add(priv->nfinfo_act_exp, 1); 737 } 738 } 739 740 if (fle) { /* An existent entry. */ 741 742 fle->f.bytes += plen; 743 fle->f.packets ++; 744 fle->f.tcp_flags |= tcp_flags; 745 fle->f.last = time_uptime; 746 747 /* 748 * We have the following reasons to expire flow in active way: 749 * - it hit active timeout 750 * - a TCP connection closed 751 * - it is going to overflow counter 752 */ 753 if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) || 754 (fle->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) { 755 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 756 expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), 757 fle, NG_QUEUE); 758 counter_u64_add(priv->nfinfo_act_exp, 1); 759 } else { 760 /* 761 * It is the newest, move it to the tail, 762 * if it isn't there already. Next search will 763 * locate it quicker. 764 */ 765 if (fle != TAILQ_LAST(&hsh->head, fhead)) { 766 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 767 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash); 768 } 769 } 770 } else /* A new flow entry. */ 771 error = hash_insert(priv, hsh, &r, plen, flags, tcp_flags); 772 773 mtx_unlock(&hsh->mtx); 774 775 return (error); 776 } 777 #endif 778 779 #ifdef INET6 780 /* Insert IPv6 packet from into flow cache. */ 781 int 782 ng_netflow_flow6_add(priv_p priv, fib_export_p fe, struct ip6_hdr *ip6, 783 caddr_t upper_ptr, uint8_t upper_proto, uint8_t flags, 784 unsigned int src_if_index) 785 { 786 struct flow_entry *fle = NULL, *fle1; 787 struct flow6_entry *fle6; 788 struct flow_hash_entry *hsh; 789 struct flow6_rec r; 790 int plen; 791 int error = 0; 792 uint8_t tcp_flags = 0; 793 794 /* check version */ 795 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) 796 return (EINVAL); 797 798 bzero(&r, sizeof(r)); 799 800 r.src.r_src6 = ip6->ip6_src; 801 r.dst.r_dst6 = ip6->ip6_dst; 802 r.fib = fe->fib; 803 804 /* Assume L4 template by default */ 805 r.flow_type = NETFLOW_V9_FLOW_V6_L4; 806 807 plen = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr); 808 809 #if 0 810 /* XXX: set DSCP/CoS value */ 811 r.r_tos = ip->ip_tos; 812 #endif 813 if ((flags & NG_NETFLOW_IS_FRAG) == 0) { 814 switch(upper_proto) { 815 case IPPROTO_TCP: 816 { 817 struct tcphdr *tcp; 818 819 tcp = (struct tcphdr *)upper_ptr; 820 r.r_ports = *(uint32_t *)upper_ptr; 821 tcp_flags = tcp->th_flags; 822 break; 823 } 824 case IPPROTO_UDP: 825 case IPPROTO_SCTP: 826 r.r_ports = *(uint32_t *)upper_ptr; 827 break; 828 } 829 } 830 831 r.r_ip_p = upper_proto; 832 r.r_i_ifx = src_if_index; 833 834 counter_u64_add(priv->nfinfo_packets6, 1); 835 counter_u64_add(priv->nfinfo_bytes6, plen); 836 837 /* Find hash slot. */ 838 hsh = &priv->hash6[ip6_hash(&r)]; 839 840 mtx_lock(&hsh->mtx); 841 842 /* 843 * Go through hash and find our entry. If we encounter an 844 * entry, that should be expired, purge it. We do a reverse 845 * search since most active entries are first, and most 846 * searches are done on most active entries. 847 */ 848 TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) { 849 if (fle->f.version != IP6VERSION) 850 continue; 851 fle6 = (struct flow6_entry *)fle; 852 if (bcmp(&r, &fle6->f.r, sizeof(struct flow6_rec)) == 0) 853 break; 854 if ((INACTIVE(fle6) && SMALL(fle6)) || AGED(fle6)) { 855 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 856 expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, 857 NG_QUEUE); 858 counter_u64_add(priv->nfinfo_act_exp, 1); 859 } 860 } 861 862 if (fle != NULL) { /* An existent entry. */ 863 fle6 = (struct flow6_entry *)fle; 864 865 fle6->f.bytes += plen; 866 fle6->f.packets ++; 867 fle6->f.tcp_flags |= tcp_flags; 868 fle6->f.last = time_uptime; 869 870 /* 871 * We have the following reasons to expire flow in active way: 872 * - it hit active timeout 873 * - a TCP connection closed 874 * - it is going to overflow counter 875 */ 876 if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle6) || 877 (fle6->f.bytes >= (CNTR_MAX - IF_MAXMTU)) ) { 878 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 879 expire_flow(priv, priv_to_fib(priv, fle->f.r.fib), fle, 880 NG_QUEUE); 881 counter_u64_add(priv->nfinfo_act_exp, 1); 882 } else { 883 /* 884 * It is the newest, move it to the tail, 885 * if it isn't there already. Next search will 886 * locate it quicker. 887 */ 888 if (fle != TAILQ_LAST(&hsh->head, fhead)) { 889 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 890 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash); 891 } 892 } 893 } else /* A new flow entry. */ 894 error = hash6_insert(priv, hsh, &r, plen, flags, tcp_flags); 895 896 mtx_unlock(&hsh->mtx); 897 898 return (error); 899 } 900 #endif 901 902 /* 903 * Return records from cache to userland. 904 * 905 * TODO: matching particular IP should be done in kernel, here. 906 */ 907 int 908 ng_netflow_flow_show(priv_p priv, struct ngnf_show_header *req, 909 struct ngnf_show_header *resp) 910 { 911 struct flow_hash_entry *hsh; 912 struct flow_entry *fle; 913 struct flow_entry_data *data = (struct flow_entry_data *)(resp + 1); 914 #ifdef INET6 915 struct flow6_entry_data *data6 = (struct flow6_entry_data *)(resp + 1); 916 #endif 917 int i, max; 918 919 i = req->hash_id; 920 if (i > NBUCKETS-1) 921 return (EINVAL); 922 923 #ifdef INET6 924 if (req->version == 6) { 925 resp->version = 6; 926 hsh = priv->hash6 + i; 927 max = NREC6_AT_ONCE; 928 } else 929 #endif 930 if (req->version == 4) { 931 resp->version = 4; 932 hsh = priv->hash + i; 933 max = NREC_AT_ONCE; 934 } else 935 return (EINVAL); 936 937 /* 938 * We will transfer not more than NREC_AT_ONCE. More data 939 * will come in next message. 940 * We send current hash index and current record number in list 941 * to userland, and userland should return it back to us. 942 * Then, we will restart with new entry. 943 * 944 * The resulting cache snapshot can be inaccurate if flow expiration 945 * is taking place on hash item between userland data requests for 946 * this hash item id. 947 */ 948 resp->nentries = 0; 949 for (; i < NBUCKETS; hsh++, i++) { 950 int list_id; 951 952 if (mtx_trylock(&hsh->mtx) == 0) { 953 /* 954 * Requested hash index is not available, 955 * relay decision to skip or re-request data 956 * to userland. 957 */ 958 resp->hash_id = i; 959 resp->list_id = 0; 960 return (0); 961 } 962 963 list_id = 0; 964 TAILQ_FOREACH(fle, &hsh->head, fle_hash) { 965 if (hsh->mtx.mtx_lock & MTX_CONTESTED) { 966 resp->hash_id = i; 967 resp->list_id = list_id; 968 mtx_unlock(&hsh->mtx); 969 return (0); 970 } 971 972 list_id++; 973 /* Search for particular record in list. */ 974 if (req->list_id > 0) { 975 if (list_id < req->list_id) 976 continue; 977 978 /* Requested list position found. */ 979 req->list_id = 0; 980 } 981 #ifdef INET6 982 if (req->version == 6) { 983 struct flow6_entry *fle6; 984 985 fle6 = (struct flow6_entry *)fle; 986 bcopy(&fle6->f, data6 + resp->nentries, 987 sizeof(fle6->f)); 988 } else 989 #endif 990 bcopy(&fle->f, data + resp->nentries, 991 sizeof(fle->f)); 992 resp->nentries++; 993 if (resp->nentries == max) { 994 resp->hash_id = i; 995 /* 996 * If it was the last item in list 997 * we simply skip to next hash_id. 998 */ 999 resp->list_id = list_id + 1; 1000 mtx_unlock(&hsh->mtx); 1001 return (0); 1002 } 1003 } 1004 mtx_unlock(&hsh->mtx); 1005 } 1006 1007 resp->hash_id = resp->list_id = 0; 1008 1009 return (0); 1010 } 1011 1012 /* We have full datagram in privdata. Send it to export hook. */ 1013 static int 1014 export_send(priv_p priv, fib_export_p fe, item_p item, int flags) 1015 { 1016 struct mbuf *m = NGI_M(item); 1017 struct netflow_v5_export_dgram *dgram = mtod(m, 1018 struct netflow_v5_export_dgram *); 1019 struct netflow_v5_header *header = &dgram->header; 1020 struct timespec ts; 1021 int error = 0; 1022 1023 /* Fill mbuf header. */ 1024 m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) * 1025 header->count + sizeof(struct netflow_v5_header); 1026 1027 /* Fill export header. */ 1028 header->sys_uptime = htonl(MILLIUPTIME(time_uptime)); 1029 getnanotime(&ts); 1030 header->unix_secs = htonl(ts.tv_sec); 1031 header->unix_nsecs = htonl(ts.tv_nsec); 1032 header->engine_type = 0; 1033 header->engine_id = fe->domain_id; 1034 header->pad = 0; 1035 header->flow_seq = htonl(atomic_fetchadd_32(&fe->flow_seq, 1036 header->count)); 1037 header->count = htons(header->count); 1038 1039 if (priv->export != NULL) 1040 NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags); 1041 else 1042 NG_FREE_ITEM(item); 1043 1044 return (error); 1045 } 1046 1047 /* Add export record to dgram. */ 1048 static int 1049 export_add(item_p item, struct flow_entry *fle) 1050 { 1051 struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item), 1052 struct netflow_v5_export_dgram *); 1053 struct netflow_v5_header *header = &dgram->header; 1054 struct netflow_v5_record *rec; 1055 1056 rec = &dgram->r[header->count]; 1057 header->count ++; 1058 1059 KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS, 1060 ("ng_netflow: export too big")); 1061 1062 /* Fill in export record. */ 1063 rec->src_addr = fle->f.r.r_src.s_addr; 1064 rec->dst_addr = fle->f.r.r_dst.s_addr; 1065 rec->next_hop = fle->f.next_hop.s_addr; 1066 rec->i_ifx = htons(fle->f.fle_i_ifx); 1067 rec->o_ifx = htons(fle->f.fle_o_ifx); 1068 rec->packets = htonl(fle->f.packets); 1069 rec->octets = htonl(fle->f.bytes); 1070 rec->first = htonl(MILLIUPTIME(fle->f.first)); 1071 rec->last = htonl(MILLIUPTIME(fle->f.last)); 1072 rec->s_port = fle->f.r.r_sport; 1073 rec->d_port = fle->f.r.r_dport; 1074 rec->flags = fle->f.tcp_flags; 1075 rec->prot = fle->f.r.r_ip_p; 1076 rec->tos = fle->f.r.r_tos; 1077 rec->dst_mask = fle->f.dst_mask; 1078 rec->src_mask = fle->f.src_mask; 1079 rec->pad1 = 0; 1080 rec->pad2 = 0; 1081 1082 /* Not supported fields. */ 1083 rec->src_as = rec->dst_as = 0; 1084 1085 if (header->count == NETFLOW_V5_MAX_RECORDS) 1086 return (1); /* end of datagram */ 1087 else 1088 return (0); 1089 } 1090 1091 /* Periodic flow expiry run. */ 1092 void 1093 ng_netflow_expire(void *arg) 1094 { 1095 struct flow_entry *fle, *fle1; 1096 struct flow_hash_entry *hsh; 1097 priv_p priv = (priv_p )arg; 1098 int used, i; 1099 1100 /* 1101 * Going through all the cache. 1102 */ 1103 used = uma_zone_get_cur(priv->zone); 1104 for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) { 1105 /* 1106 * Skip entries, that are already being worked on. 1107 */ 1108 if (mtx_trylock(&hsh->mtx) == 0) 1109 continue; 1110 1111 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) { 1112 /* 1113 * Interrupt thread wants this entry! 1114 * Quick! Quick! Bail out! 1115 */ 1116 if (hsh->mtx.mtx_lock & MTX_CONTESTED) 1117 break; 1118 1119 /* 1120 * Don't expire aggressively while hash collision 1121 * ratio is predicted small. 1122 */ 1123 if (used <= (NBUCKETS*2) && !INACTIVE(fle)) 1124 break; 1125 1126 if ((INACTIVE(fle) && (SMALL(fle) || 1127 (used > (NBUCKETS*2)))) || AGED(fle)) { 1128 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 1129 expire_flow(priv, priv_to_fib(priv, 1130 fle->f.r.fib), fle, NG_NOFLAGS); 1131 used--; 1132 counter_u64_add(priv->nfinfo_inact_exp, 1); 1133 } 1134 } 1135 mtx_unlock(&hsh->mtx); 1136 } 1137 1138 #ifdef INET6 1139 used = uma_zone_get_cur(priv->zone6); 1140 for (hsh = priv->hash6, i = 0; i < NBUCKETS; hsh++, i++) { 1141 struct flow6_entry *fle6; 1142 1143 /* 1144 * Skip entries, that are already being worked on. 1145 */ 1146 if (mtx_trylock(&hsh->mtx) == 0) 1147 continue; 1148 1149 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) { 1150 fle6 = (struct flow6_entry *)fle; 1151 /* 1152 * Interrupt thread wants this entry! 1153 * Quick! Quick! Bail out! 1154 */ 1155 if (hsh->mtx.mtx_lock & MTX_CONTESTED) 1156 break; 1157 1158 /* 1159 * Don't expire aggressively while hash collision 1160 * ratio is predicted small. 1161 */ 1162 if (used <= (NBUCKETS*2) && !INACTIVE(fle6)) 1163 break; 1164 1165 if ((INACTIVE(fle6) && (SMALL(fle6) || 1166 (used > (NBUCKETS*2)))) || AGED(fle6)) { 1167 TAILQ_REMOVE(&hsh->head, fle, fle_hash); 1168 expire_flow(priv, priv_to_fib(priv, 1169 fle->f.r.fib), fle, NG_NOFLAGS); 1170 used--; 1171 counter_u64_add(priv->nfinfo_inact_exp, 1); 1172 } 1173 } 1174 mtx_unlock(&hsh->mtx); 1175 } 1176 #endif 1177 1178 /* Schedule next expire. */ 1179 callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire, 1180 (void *)priv); 1181 } 1182