1 /*- 2 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #define DEB(x) 30 #define DDB(x) x 31 32 /* 33 * Dynamic rule support for ipfw 34 */ 35 36 #include "opt_ipfw.h" 37 #include "opt_inet.h" 38 #ifndef INET 39 #error IPFIREWALL requires INET. 40 #endif /* INET */ 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/kernel.h> 48 #include <sys/ktr.h> 49 #include <sys/lock.h> 50 #include <sys/socket.h> 51 #include <sys/sysctl.h> 52 #include <sys/syslog.h> 53 #include <net/ethernet.h> /* for ETHERTYPE_IP */ 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/vnet.h> 57 58 #include <netinet/in.h> 59 #include <netinet/ip.h> 60 #include <netinet/ip_var.h> /* ip_defttl */ 61 #include <netinet/ip_fw.h> 62 #include <netinet/tcp_var.h> 63 #include <netinet/udp.h> 64 65 #include <netinet/ip6.h> /* IN6_ARE_ADDR_EQUAL */ 66 #ifdef INET6 67 #include <netinet6/in6_var.h> 68 #include <netinet6/ip6_var.h> 69 #endif 70 71 #include <netpfil/ipfw/ip_fw_private.h> 72 73 #include <machine/in_cksum.h> /* XXX for in_cksum */ 74 75 #ifdef MAC 76 #include <security/mac/mac_framework.h> 77 #endif 78 79 /* 80 * Description of dynamic rules. 81 * 82 * Dynamic rules are stored in lists accessed through a hash table 83 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can 84 * be modified through the sysctl variable dyn_buckets which is 85 * updated when the table becomes empty. 86 * 87 * XXX currently there is only one list, ipfw_dyn. 88 * 89 * When a packet is received, its address fields are first masked 90 * with the mask defined for the rule, then hashed, then matched 91 * against the entries in the corresponding list. 92 * Dynamic rules can be used for different purposes: 93 * + stateful rules; 94 * + enforcing limits on the number of sessions; 95 * + in-kernel NAT (not implemented yet) 96 * 97 * The lifetime of dynamic rules is regulated by dyn_*_lifetime, 98 * measured in seconds and depending on the flags. 99 * 100 * The total number of dynamic rules is equal to UMA zone items count. 101 * The max number of dynamic rules is dyn_max. When we reach 102 * the maximum number of rules we do not create anymore. This is 103 * done to avoid consuming too much memory, but also too much 104 * time when searching on each packet (ideally, we should try instead 105 * to put a limit on the length of the list on each bucket...). 106 * 107 * Each dynamic rule holds a pointer to the parent ipfw rule so 108 * we know what action to perform. Dynamic rules are removed when 109 * the parent rule is deleted. XXX we should make them survive. 110 * 111 * There are some limitations with dynamic rules -- we do not 112 * obey the 'randomized match', and we do not do multiple 113 * passes through the firewall. XXX check the latter!!! 114 */ 115 116 struct ipfw_dyn_bucket { 117 struct mtx mtx; /* Bucket protecting lock */ 118 ipfw_dyn_rule *head; /* Pointer to first rule */ 119 }; 120 121 /* 122 * Static variables followed by global ones 123 */ 124 static VNET_DEFINE(struct ipfw_dyn_bucket *, ipfw_dyn_v); 125 static VNET_DEFINE(u_int32_t, dyn_buckets_max); 126 static VNET_DEFINE(u_int32_t, curr_dyn_buckets); 127 static VNET_DEFINE(struct callout, ipfw_timeout); 128 #define V_ipfw_dyn_v VNET(ipfw_dyn_v) 129 #define V_dyn_buckets_max VNET(dyn_buckets_max) 130 #define V_curr_dyn_buckets VNET(curr_dyn_buckets) 131 #define V_ipfw_timeout VNET(ipfw_timeout) 132 133 static VNET_DEFINE(uma_zone_t, ipfw_dyn_rule_zone); 134 #define V_ipfw_dyn_rule_zone VNET(ipfw_dyn_rule_zone) 135 136 #define IPFW_BUCK_LOCK_INIT(b) \ 137 mtx_init(&(b)->mtx, "IPFW dynamic bucket", NULL, MTX_DEF) 138 #define IPFW_BUCK_LOCK_DESTROY(b) \ 139 mtx_destroy(&(b)->mtx) 140 #define IPFW_BUCK_LOCK(i) mtx_lock(&V_ipfw_dyn_v[(i)].mtx) 141 #define IPFW_BUCK_UNLOCK(i) mtx_unlock(&V_ipfw_dyn_v[(i)].mtx) 142 #define IPFW_BUCK_ASSERT(i) mtx_assert(&V_ipfw_dyn_v[(i)].mtx, MA_OWNED) 143 144 /* 145 * Timeouts for various events in handing dynamic rules. 146 */ 147 static VNET_DEFINE(u_int32_t, dyn_ack_lifetime); 148 static VNET_DEFINE(u_int32_t, dyn_syn_lifetime); 149 static VNET_DEFINE(u_int32_t, dyn_fin_lifetime); 150 static VNET_DEFINE(u_int32_t, dyn_rst_lifetime); 151 static VNET_DEFINE(u_int32_t, dyn_udp_lifetime); 152 static VNET_DEFINE(u_int32_t, dyn_short_lifetime); 153 154 #define V_dyn_ack_lifetime VNET(dyn_ack_lifetime) 155 #define V_dyn_syn_lifetime VNET(dyn_syn_lifetime) 156 #define V_dyn_fin_lifetime VNET(dyn_fin_lifetime) 157 #define V_dyn_rst_lifetime VNET(dyn_rst_lifetime) 158 #define V_dyn_udp_lifetime VNET(dyn_udp_lifetime) 159 #define V_dyn_short_lifetime VNET(dyn_short_lifetime) 160 161 /* 162 * Keepalives are sent if dyn_keepalive is set. They are sent every 163 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval 164 * seconds of lifetime of a rule. 165 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower 166 * than dyn_keepalive_period. 167 */ 168 169 static VNET_DEFINE(u_int32_t, dyn_keepalive_interval); 170 static VNET_DEFINE(u_int32_t, dyn_keepalive_period); 171 static VNET_DEFINE(u_int32_t, dyn_keepalive); 172 static VNET_DEFINE(time_t, dyn_keepalive_last); 173 174 #define V_dyn_keepalive_interval VNET(dyn_keepalive_interval) 175 #define V_dyn_keepalive_period VNET(dyn_keepalive_period) 176 #define V_dyn_keepalive VNET(dyn_keepalive) 177 #define V_dyn_keepalive_last VNET(dyn_keepalive_last) 178 179 static VNET_DEFINE(u_int32_t, dyn_max); /* max # of dynamic rules */ 180 181 #define DYN_COUNT uma_zone_get_cur(V_ipfw_dyn_rule_zone) 182 #define V_dyn_max VNET(dyn_max) 183 184 /* for userspace, we emulate the uma_zone_counter with ipfw_dyn_count */ 185 static int ipfw_dyn_count; /* number of objects */ 186 187 #ifdef USERSPACE /* emulation of UMA object counters for userspace */ 188 #define uma_zone_get_cur(x) ipfw_dyn_count 189 #endif /* USERSPACE */ 190 191 static int last_log; /* Log ratelimiting */ 192 193 static void ipfw_dyn_tick(void *vnetx); 194 static void check_dyn_rules(struct ip_fw_chain *, struct ip_fw *, 195 int, int, int); 196 #ifdef SYSCTL_NODE 197 198 static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS); 199 static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS); 200 201 SYSBEGIN(f2) 202 203 SYSCTL_DECL(_net_inet_ip_fw); 204 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_buckets, 205 CTLFLAG_RW, &VNET_NAME(dyn_buckets_max), 0, 206 "Max number of dyn. buckets"); 207 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, 208 CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0, 209 "Current Number of dyn. buckets"); 210 SYSCTL_VNET_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count, 211 CTLTYPE_UINT|CTLFLAG_RD, 0, 0, sysctl_ipfw_dyn_count, "IU", 212 "Number of dyn. rules"); 213 SYSCTL_VNET_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max, 214 CTLTYPE_UINT|CTLFLAG_RW, 0, 0, sysctl_ipfw_dyn_max, "IU", 215 "Max number of dyn. rules"); 216 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, 217 CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0, 218 "Lifetime of dyn. rules for acks"); 219 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, 220 CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0, 221 "Lifetime of dyn. rules for syn"); 222 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, 223 CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0, 224 "Lifetime of dyn. rules for fin"); 225 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, 226 CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0, 227 "Lifetime of dyn. rules for rst"); 228 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, 229 CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0, 230 "Lifetime of dyn. rules for UDP"); 231 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, 232 CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0, 233 "Lifetime of dyn. rules for other situations"); 234 SYSCTL_VNET_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, 235 CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0, 236 "Enable keepalives for dyn. rules"); 237 238 SYSEND 239 240 #endif /* SYSCTL_NODE */ 241 242 243 static __inline int 244 hash_packet6(struct ipfw_flow_id *id) 245 { 246 u_int32_t i; 247 i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^ 248 (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^ 249 (id->src_ip6.__u6_addr.__u6_addr32[2]) ^ 250 (id->src_ip6.__u6_addr.__u6_addr32[3]) ^ 251 (id->dst_port) ^ (id->src_port); 252 return i; 253 } 254 255 /* 256 * IMPORTANT: the hash function for dynamic rules must be commutative 257 * in source and destination (ip,port), because rules are bidirectional 258 * and we want to find both in the same bucket. 259 */ 260 static __inline int 261 hash_packet(struct ipfw_flow_id *id, int buckets) 262 { 263 u_int32_t i; 264 265 #ifdef INET6 266 if (IS_IP6_FLOW_ID(id)) 267 i = hash_packet6(id); 268 else 269 #endif /* INET6 */ 270 i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port); 271 i &= (buckets - 1); 272 return i; 273 } 274 275 /** 276 * Print customizable flow id description via log(9) facility. 277 */ 278 static void 279 print_dyn_rule_flags(struct ipfw_flow_id *id, int dyn_type, int log_flags, 280 char *prefix, char *postfix) 281 { 282 struct in_addr da; 283 #ifdef INET6 284 char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN]; 285 #else 286 char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN]; 287 #endif 288 289 #ifdef INET6 290 if (IS_IP6_FLOW_ID(id)) { 291 ip6_sprintf(src, &id->src_ip6); 292 ip6_sprintf(dst, &id->dst_ip6); 293 } else 294 #endif 295 { 296 da.s_addr = htonl(id->src_ip); 297 inet_ntop(AF_INET, &da, src, sizeof(src)); 298 da.s_addr = htonl(id->dst_ip); 299 inet_ntop(AF_INET, &da, dst, sizeof(dst)); 300 } 301 log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n", 302 prefix, dyn_type, src, id->src_port, dst, 303 id->dst_port, DYN_COUNT, postfix); 304 } 305 306 #define print_dyn_rule(id, dtype, prefix, postfix) \ 307 print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix) 308 309 #define TIME_LEQ(a,b) ((int)((a)-(b)) <= 0) 310 311 /* 312 * Lookup a dynamic rule, locked version. 313 */ 314 static ipfw_dyn_rule * 315 lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int i, int *match_direction, 316 struct tcphdr *tcp) 317 { 318 /* 319 * Stateful ipfw extensions. 320 * Lookup into dynamic session queue. 321 */ 322 #define MATCH_REVERSE 0 323 #define MATCH_FORWARD 1 324 #define MATCH_NONE 2 325 #define MATCH_UNKNOWN 3 326 int dir = MATCH_NONE; 327 ipfw_dyn_rule *prev, *q = NULL; 328 329 IPFW_BUCK_ASSERT(i); 330 331 for (prev = NULL, q = V_ipfw_dyn_v[i].head; q; prev = q, q = q->next) { 332 if (q->dyn_type == O_LIMIT_PARENT && q->count) 333 continue; 334 335 if (pkt->proto != q->id.proto || q->dyn_type == O_LIMIT_PARENT) 336 continue; 337 338 if (IS_IP6_FLOW_ID(pkt)) { 339 if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.src_ip6) && 340 IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.dst_ip6) && 341 pkt->src_port == q->id.src_port && 342 pkt->dst_port == q->id.dst_port) { 343 dir = MATCH_FORWARD; 344 break; 345 } 346 if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.dst_ip6) && 347 IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.src_ip6) && 348 pkt->src_port == q->id.dst_port && 349 pkt->dst_port == q->id.src_port) { 350 dir = MATCH_REVERSE; 351 break; 352 } 353 } else { 354 if (pkt->src_ip == q->id.src_ip && 355 pkt->dst_ip == q->id.dst_ip && 356 pkt->src_port == q->id.src_port && 357 pkt->dst_port == q->id.dst_port) { 358 dir = MATCH_FORWARD; 359 break; 360 } 361 if (pkt->src_ip == q->id.dst_ip && 362 pkt->dst_ip == q->id.src_ip && 363 pkt->src_port == q->id.dst_port && 364 pkt->dst_port == q->id.src_port) { 365 dir = MATCH_REVERSE; 366 break; 367 } 368 } 369 } 370 if (q == NULL) 371 goto done; /* q = NULL, not found */ 372 373 if (prev != NULL) { /* found and not in front */ 374 prev->next = q->next; 375 q->next = V_ipfw_dyn_v[i].head; 376 V_ipfw_dyn_v[i].head = q; 377 } 378 if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */ 379 uint32_t ack; 380 u_char flags = pkt->_flags & (TH_FIN | TH_SYN | TH_RST); 381 382 #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) 383 #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) 384 #define TCP_FLAGS (TH_FLAGS | (TH_FLAGS << 8)) 385 #define ACK_FWD 0x10000 /* fwd ack seen */ 386 #define ACK_REV 0x20000 /* rev ack seen */ 387 388 q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8); 389 switch (q->state & TCP_FLAGS) { 390 case TH_SYN: /* opening */ 391 q->expire = time_uptime + V_dyn_syn_lifetime; 392 break; 393 394 case BOTH_SYN: /* move to established */ 395 case BOTH_SYN | TH_FIN: /* one side tries to close */ 396 case BOTH_SYN | (TH_FIN << 8): 397 #define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0) 398 if (tcp == NULL) 399 break; 400 401 ack = ntohl(tcp->th_ack); 402 if (dir == MATCH_FORWARD) { 403 if (q->ack_fwd == 0 || 404 _SEQ_GE(ack, q->ack_fwd)) { 405 q->ack_fwd = ack; 406 q->state |= ACK_FWD; 407 } 408 } else { 409 if (q->ack_rev == 0 || 410 _SEQ_GE(ack, q->ack_rev)) { 411 q->ack_rev = ack; 412 q->state |= ACK_REV; 413 } 414 } 415 if ((q->state & (ACK_FWD | ACK_REV)) == 416 (ACK_FWD | ACK_REV)) { 417 q->expire = time_uptime + V_dyn_ack_lifetime; 418 q->state &= ~(ACK_FWD | ACK_REV); 419 } 420 break; 421 422 case BOTH_SYN | BOTH_FIN: /* both sides closed */ 423 if (V_dyn_fin_lifetime >= V_dyn_keepalive_period) 424 V_dyn_fin_lifetime = V_dyn_keepalive_period - 1; 425 q->expire = time_uptime + V_dyn_fin_lifetime; 426 break; 427 428 default: 429 #if 0 430 /* 431 * reset or some invalid combination, but can also 432 * occur if we use keep-state the wrong way. 433 */ 434 if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0) 435 printf("invalid state: 0x%x\n", q->state); 436 #endif 437 if (V_dyn_rst_lifetime >= V_dyn_keepalive_period) 438 V_dyn_rst_lifetime = V_dyn_keepalive_period - 1; 439 q->expire = time_uptime + V_dyn_rst_lifetime; 440 break; 441 } 442 } else if (pkt->proto == IPPROTO_UDP) { 443 q->expire = time_uptime + V_dyn_udp_lifetime; 444 } else { 445 /* other protocols */ 446 q->expire = time_uptime + V_dyn_short_lifetime; 447 } 448 done: 449 if (match_direction != NULL) 450 *match_direction = dir; 451 return (q); 452 } 453 454 ipfw_dyn_rule * 455 ipfw_lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction, 456 struct tcphdr *tcp) 457 { 458 ipfw_dyn_rule *q; 459 int i; 460 461 i = hash_packet(pkt, V_curr_dyn_buckets); 462 463 IPFW_BUCK_LOCK(i); 464 q = lookup_dyn_rule_locked(pkt, i, match_direction, tcp); 465 if (q == NULL) 466 IPFW_BUCK_UNLOCK(i); 467 /* NB: return table locked when q is not NULL */ 468 return q; 469 } 470 471 /* 472 * Unlock bucket mtx 473 * @p - pointer to dynamic rule 474 */ 475 void 476 ipfw_dyn_unlock(ipfw_dyn_rule *q) 477 { 478 479 IPFW_BUCK_UNLOCK(q->bucket); 480 } 481 482 static int 483 resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets) 484 { 485 int i, k, nbuckets_old; 486 ipfw_dyn_rule *q; 487 struct ipfw_dyn_bucket *dyn_v, *dyn_v_old; 488 489 /* Check if given number is power of 2 and less than 64k */ 490 if ((nbuckets > 65536) || (!powerof2(nbuckets))) 491 return 1; 492 493 CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__, 494 V_curr_dyn_buckets, nbuckets); 495 496 /* Allocate and initialize new hash */ 497 dyn_v = malloc(nbuckets * sizeof(ipfw_dyn_rule), M_IPFW, 498 M_WAITOK | M_ZERO); 499 500 for (i = 0 ; i < nbuckets; i++) 501 IPFW_BUCK_LOCK_INIT(&dyn_v[i]); 502 503 /* 504 * Call upper half lock, as get_map() do to ease 505 * read-only access to dynamic rules hash from sysctl 506 */ 507 IPFW_UH_WLOCK(chain); 508 509 /* 510 * Acquire chain write lock to permit hash access 511 * for main traffic path without additional locks 512 */ 513 IPFW_WLOCK(chain); 514 515 /* Save old values */ 516 nbuckets_old = V_curr_dyn_buckets; 517 dyn_v_old = V_ipfw_dyn_v; 518 519 /* Skip relinking if array is not set up */ 520 if (V_ipfw_dyn_v == NULL) 521 V_curr_dyn_buckets = 0; 522 523 /* Re-link all dynamic states */ 524 for (i = 0 ; i < V_curr_dyn_buckets ; i++) { 525 while (V_ipfw_dyn_v[i].head != NULL) { 526 /* Remove from current chain */ 527 q = V_ipfw_dyn_v[i].head; 528 V_ipfw_dyn_v[i].head = q->next; 529 530 /* Get new hash value */ 531 k = hash_packet(&q->id, nbuckets); 532 q->bucket = k; 533 /* Add to the new head */ 534 q->next = dyn_v[k].head; 535 dyn_v[k].head = q; 536 } 537 } 538 539 /* Update current pointers/buckets values */ 540 V_curr_dyn_buckets = nbuckets; 541 V_ipfw_dyn_v = dyn_v; 542 543 IPFW_WUNLOCK(chain); 544 545 IPFW_UH_WUNLOCK(chain); 546 547 /* Start periodic callout on initial creation */ 548 if (dyn_v_old == NULL) { 549 callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0); 550 return (0); 551 } 552 553 /* Destroy all mutexes */ 554 for (i = 0 ; i < nbuckets_old ; i++) 555 IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]); 556 557 /* Free old hash */ 558 free(dyn_v_old, M_IPFW); 559 560 return 0; 561 } 562 563 /** 564 * Install state of type 'type' for a dynamic session. 565 * The hash table contains two type of rules: 566 * - regular rules (O_KEEP_STATE) 567 * - rules for sessions with limited number of sess per user 568 * (O_LIMIT). When they are created, the parent is 569 * increased by 1, and decreased on delete. In this case, 570 * the third parameter is the parent rule and not the chain. 571 * - "parent" rules for the above (O_LIMIT_PARENT). 572 */ 573 static ipfw_dyn_rule * 574 add_dyn_rule(struct ipfw_flow_id *id, int i, u_int8_t dyn_type, struct ip_fw *rule) 575 { 576 ipfw_dyn_rule *r; 577 578 IPFW_BUCK_ASSERT(i); 579 580 r = uma_zalloc(V_ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO); 581 if (r == NULL) { 582 if (last_log != time_uptime) { 583 last_log = time_uptime; 584 log(LOG_DEBUG, "ipfw: %s: Cannot allocate rule\n", 585 __func__); 586 } 587 return NULL; 588 } 589 ipfw_dyn_count++; 590 591 /* 592 * refcount on parent is already incremented, so 593 * it is safe to use parent unlocked. 594 */ 595 if (dyn_type == O_LIMIT) { 596 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule; 597 if ( parent->dyn_type != O_LIMIT_PARENT) 598 panic("invalid parent"); 599 r->parent = parent; 600 rule = parent->rule; 601 } 602 603 r->id = *id; 604 r->expire = time_uptime + V_dyn_syn_lifetime; 605 r->rule = rule; 606 r->dyn_type = dyn_type; 607 IPFW_ZERO_DYN_COUNTER(r); 608 r->count = 0; 609 610 r->bucket = i; 611 r->next = V_ipfw_dyn_v[i].head; 612 V_ipfw_dyn_v[i].head = r; 613 DEB(print_dyn_rule(id, dyn_type, "add dyn entry", "total");) 614 return r; 615 } 616 617 /** 618 * lookup dynamic parent rule using pkt and rule as search keys. 619 * If the lookup fails, then install one. 620 */ 621 static ipfw_dyn_rule * 622 lookup_dyn_parent(struct ipfw_flow_id *pkt, int *pindex, struct ip_fw *rule) 623 { 624 ipfw_dyn_rule *q; 625 int i, is_v6; 626 627 is_v6 = IS_IP6_FLOW_ID(pkt); 628 i = hash_packet( pkt, V_curr_dyn_buckets ); 629 *pindex = i; 630 IPFW_BUCK_LOCK(i); 631 for (q = V_ipfw_dyn_v[i].head ; q != NULL ; q=q->next) 632 if (q->dyn_type == O_LIMIT_PARENT && 633 rule== q->rule && 634 pkt->proto == q->id.proto && 635 pkt->src_port == q->id.src_port && 636 pkt->dst_port == q->id.dst_port && 637 ( 638 (is_v6 && 639 IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6), 640 &(q->id.src_ip6)) && 641 IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6), 642 &(q->id.dst_ip6))) || 643 (!is_v6 && 644 pkt->src_ip == q->id.src_ip && 645 pkt->dst_ip == q->id.dst_ip) 646 ) 647 ) { 648 q->expire = time_uptime + V_dyn_short_lifetime; 649 DEB(print_dyn_rule(pkt, q->dyn_type, 650 "lookup_dyn_parent found", "");) 651 return q; 652 } 653 654 /* Add virtual limiting rule */ 655 return add_dyn_rule(pkt, i, O_LIMIT_PARENT, rule); 656 } 657 658 /** 659 * Install dynamic state for rule type cmd->o.opcode 660 * 661 * Returns 1 (failure) if state is not installed because of errors or because 662 * session limitations are enforced. 663 */ 664 int 665 ipfw_install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, 666 struct ip_fw_args *args, uint32_t tablearg) 667 { 668 ipfw_dyn_rule *q; 669 int i; 670 671 DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state", "");) 672 673 i = hash_packet(&args->f_id, V_curr_dyn_buckets); 674 675 IPFW_BUCK_LOCK(i); 676 677 q = lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL); 678 679 if (q != NULL) { /* should never occur */ 680 DEB( 681 if (last_log != time_uptime) { 682 last_log = time_uptime; 683 printf("ipfw: %s: entry already present, done\n", 684 __func__); 685 }) 686 IPFW_BUCK_UNLOCK(i); 687 return (0); 688 } 689 690 /* 691 * State limiting is done via uma(9) zone limiting. 692 * Save pointer to newly-installed rule and reject 693 * packet if add_dyn_rule() returned NULL. 694 * Note q is currently set to NULL. 695 */ 696 697 switch (cmd->o.opcode) { 698 case O_KEEP_STATE: /* bidir rule */ 699 q = add_dyn_rule(&args->f_id, i, O_KEEP_STATE, rule); 700 break; 701 702 case O_LIMIT: { /* limit number of sessions */ 703 struct ipfw_flow_id id; 704 ipfw_dyn_rule *parent; 705 uint32_t conn_limit; 706 uint16_t limit_mask = cmd->limit_mask; 707 int pindex; 708 709 conn_limit = IP_FW_ARG_TABLEARG(cmd->conn_limit); 710 711 DEB( 712 if (cmd->conn_limit == IP_FW_TABLEARG) 713 printf("ipfw: %s: O_LIMIT rule, conn_limit: %u " 714 "(tablearg)\n", __func__, conn_limit); 715 else 716 printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n", 717 __func__, conn_limit); 718 ) 719 720 id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0; 721 id.proto = args->f_id.proto; 722 id.addr_type = args->f_id.addr_type; 723 id.fib = M_GETFIB(args->m); 724 725 if (IS_IP6_FLOW_ID (&(args->f_id))) { 726 if (limit_mask & DYN_SRC_ADDR) 727 id.src_ip6 = args->f_id.src_ip6; 728 if (limit_mask & DYN_DST_ADDR) 729 id.dst_ip6 = args->f_id.dst_ip6; 730 } else { 731 if (limit_mask & DYN_SRC_ADDR) 732 id.src_ip = args->f_id.src_ip; 733 if (limit_mask & DYN_DST_ADDR) 734 id.dst_ip = args->f_id.dst_ip; 735 } 736 if (limit_mask & DYN_SRC_PORT) 737 id.src_port = args->f_id.src_port; 738 if (limit_mask & DYN_DST_PORT) 739 id.dst_port = args->f_id.dst_port; 740 741 /* 742 * We have to release lock for previous bucket to 743 * avoid possible deadlock 744 */ 745 IPFW_BUCK_UNLOCK(i); 746 747 if ((parent = lookup_dyn_parent(&id, &pindex, rule)) == NULL) { 748 printf("ipfw: %s: add parent failed\n", __func__); 749 IPFW_BUCK_UNLOCK(pindex); 750 return (1); 751 } 752 753 if (parent->count >= conn_limit) { 754 if (V_fw_verbose && last_log != time_uptime) { 755 last_log = time_uptime; 756 char sbuf[24]; 757 last_log = time_uptime; 758 snprintf(sbuf, sizeof(sbuf), 759 "%d drop session", 760 parent->rule->rulenum); 761 print_dyn_rule_flags(&args->f_id, 762 cmd->o.opcode, 763 LOG_SECURITY | LOG_DEBUG, 764 sbuf, "too many entries"); 765 } 766 IPFW_BUCK_UNLOCK(pindex); 767 return (1); 768 } 769 /* Increment counter on parent */ 770 parent->count++; 771 IPFW_BUCK_UNLOCK(pindex); 772 773 IPFW_BUCK_LOCK(i); 774 q = add_dyn_rule(&args->f_id, i, O_LIMIT, (struct ip_fw *)parent); 775 if (q == NULL) { 776 /* Decrement index and notify caller */ 777 IPFW_BUCK_UNLOCK(i); 778 IPFW_BUCK_LOCK(pindex); 779 parent->count--; 780 IPFW_BUCK_UNLOCK(pindex); 781 return (1); 782 } 783 break; 784 } 785 default: 786 printf("ipfw: %s: unknown dynamic rule type %u\n", 787 __func__, cmd->o.opcode); 788 } 789 790 if (q == NULL) { 791 IPFW_BUCK_UNLOCK(i); 792 return (1); /* Notify caller about failure */ 793 } 794 795 /* XXX just set lifetime */ 796 lookup_dyn_rule_locked(&args->f_id, i, NULL, NULL); 797 798 IPFW_BUCK_UNLOCK(i); 799 return (0); 800 } 801 802 /* 803 * Generate a TCP packet, containing either a RST or a keepalive. 804 * When flags & TH_RST, we are sending a RST packet, because of a 805 * "reset" action matched the packet. 806 * Otherwise we are sending a keepalive, and flags & TH_ 807 * The 'replyto' mbuf is the mbuf being replied to, if any, and is required 808 * so that MAC can label the reply appropriately. 809 */ 810 struct mbuf * 811 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq, 812 u_int32_t ack, int flags) 813 { 814 struct mbuf *m = NULL; /* stupid compiler */ 815 int len, dir; 816 struct ip *h = NULL; /* stupid compiler */ 817 #ifdef INET6 818 struct ip6_hdr *h6 = NULL; 819 #endif 820 struct tcphdr *th = NULL; 821 822 MGETHDR(m, M_NOWAIT, MT_DATA); 823 if (m == NULL) 824 return (NULL); 825 826 M_SETFIB(m, id->fib); 827 #ifdef MAC 828 if (replyto != NULL) 829 mac_netinet_firewall_reply(replyto, m); 830 else 831 mac_netinet_firewall_send(m); 832 #else 833 (void)replyto; /* don't warn about unused arg */ 834 #endif 835 836 switch (id->addr_type) { 837 case 4: 838 len = sizeof(struct ip) + sizeof(struct tcphdr); 839 break; 840 #ifdef INET6 841 case 6: 842 len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 843 break; 844 #endif 845 default: 846 /* XXX: log me?!? */ 847 FREE_PKT(m); 848 return (NULL); 849 } 850 dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN); 851 852 m->m_data += max_linkhdr; 853 m->m_flags |= M_SKIP_FIREWALL; 854 m->m_pkthdr.len = m->m_len = len; 855 m->m_pkthdr.rcvif = NULL; 856 bzero(m->m_data, len); 857 858 switch (id->addr_type) { 859 case 4: 860 h = mtod(m, struct ip *); 861 862 /* prepare for checksum */ 863 h->ip_p = IPPROTO_TCP; 864 h->ip_len = htons(sizeof(struct tcphdr)); 865 if (dir) { 866 h->ip_src.s_addr = htonl(id->src_ip); 867 h->ip_dst.s_addr = htonl(id->dst_ip); 868 } else { 869 h->ip_src.s_addr = htonl(id->dst_ip); 870 h->ip_dst.s_addr = htonl(id->src_ip); 871 } 872 873 th = (struct tcphdr *)(h + 1); 874 break; 875 #ifdef INET6 876 case 6: 877 h6 = mtod(m, struct ip6_hdr *); 878 879 /* prepare for checksum */ 880 h6->ip6_nxt = IPPROTO_TCP; 881 h6->ip6_plen = htons(sizeof(struct tcphdr)); 882 if (dir) { 883 h6->ip6_src = id->src_ip6; 884 h6->ip6_dst = id->dst_ip6; 885 } else { 886 h6->ip6_src = id->dst_ip6; 887 h6->ip6_dst = id->src_ip6; 888 } 889 890 th = (struct tcphdr *)(h6 + 1); 891 break; 892 #endif 893 } 894 895 if (dir) { 896 th->th_sport = htons(id->src_port); 897 th->th_dport = htons(id->dst_port); 898 } else { 899 th->th_sport = htons(id->dst_port); 900 th->th_dport = htons(id->src_port); 901 } 902 th->th_off = sizeof(struct tcphdr) >> 2; 903 904 if (flags & TH_RST) { 905 if (flags & TH_ACK) { 906 th->th_seq = htonl(ack); 907 th->th_flags = TH_RST; 908 } else { 909 if (flags & TH_SYN) 910 seq++; 911 th->th_ack = htonl(seq); 912 th->th_flags = TH_RST | TH_ACK; 913 } 914 } else { 915 /* 916 * Keepalive - use caller provided sequence numbers 917 */ 918 th->th_seq = htonl(seq); 919 th->th_ack = htonl(ack); 920 th->th_flags = TH_ACK; 921 } 922 923 switch (id->addr_type) { 924 case 4: 925 th->th_sum = in_cksum(m, len); 926 927 /* finish the ip header */ 928 h->ip_v = 4; 929 h->ip_hl = sizeof(*h) >> 2; 930 h->ip_tos = IPTOS_LOWDELAY; 931 h->ip_off = htons(0); 932 h->ip_len = htons(len); 933 h->ip_ttl = V_ip_defttl; 934 h->ip_sum = 0; 935 break; 936 #ifdef INET6 937 case 6: 938 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6), 939 sizeof(struct tcphdr)); 940 941 /* finish the ip6 header */ 942 h6->ip6_vfc |= IPV6_VERSION; 943 h6->ip6_hlim = IPV6_DEFHLIM; 944 break; 945 #endif 946 } 947 948 return (m); 949 } 950 951 /* 952 * Queue keepalive packets for given dynamic rule 953 */ 954 static struct mbuf ** 955 ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q) 956 { 957 struct mbuf *m_rev, *m_fwd; 958 959 m_rev = (q->state & ACK_REV) ? NULL : 960 ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN); 961 m_fwd = (q->state & ACK_FWD) ? NULL : 962 ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0); 963 964 if (m_rev != NULL) { 965 *mtailp = m_rev; 966 mtailp = &(*mtailp)->m_nextpkt; 967 } 968 if (m_fwd != NULL) { 969 *mtailp = m_fwd; 970 mtailp = &(*mtailp)->m_nextpkt; 971 } 972 973 return (mtailp); 974 } 975 976 /* 977 * This procedure is used to perform various maintance 978 * on dynamic hash list. Currently it is called every second. 979 */ 980 static void 981 ipfw_dyn_tick(void * vnetx) 982 { 983 struct ip_fw_chain *chain; 984 int check_ka = 0; 985 #ifdef VIMAGE 986 struct vnet *vp = vnetx; 987 #endif 988 989 CURVNET_SET(vp); 990 991 chain = &V_layer3_chain; 992 993 /* Run keepalive checks every keepalive_period iff ka is enabled */ 994 if ((V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) && 995 (V_dyn_keepalive != 0)) { 996 V_dyn_keepalive_last = time_uptime; 997 check_ka = 1; 998 } 999 1000 check_dyn_rules(chain, NULL, RESVD_SET, check_ka, 1); 1001 1002 callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, vnetx, 0); 1003 1004 CURVNET_RESTORE(); 1005 } 1006 1007 1008 /* 1009 * Walk thru all dynamic states doing generic maintance: 1010 * 1) free expired states 1011 * 2) free all states based on deleted rule / set 1012 * 3) send keepalives for states if needed 1013 * 1014 * @chain - pointer to current ipfw rules chain 1015 * @rule - delete all states originated by given rule if != NULL 1016 * @set - delete all states originated by any rule in set @set if != RESVD_SET 1017 * @check_ka - perform checking/sending keepalives 1018 * @timer - indicate call from timer routine. 1019 * 1020 * Timer routine must call this function unlocked to permit 1021 * sending keepalives/resizing table. 1022 * 1023 * Others has to call function with IPFW_UH_WLOCK held. 1024 * Additionally, function assume that dynamic rule/set is 1025 * ALREADY deleted so no new states can be generated by 1026 * 'deleted' rules. 1027 * 1028 * Write lock is needed to ensure that unused parent rules 1029 * are not freed by other instance (see stage 2, 3) 1030 */ 1031 static void 1032 check_dyn_rules(struct ip_fw_chain *chain, struct ip_fw *rule, 1033 int set, int check_ka, int timer) 1034 { 1035 struct mbuf *m0, *m, *mnext, **mtailp; 1036 struct ip *h; 1037 int i, dyn_count, new_buckets = 0, max_buckets; 1038 int expired = 0, expired_limits = 0, parents = 0, total = 0; 1039 ipfw_dyn_rule *q, *q_prev, *q_next; 1040 ipfw_dyn_rule *exp_head, **exptailp; 1041 ipfw_dyn_rule *exp_lhead, **expltailp; 1042 1043 KASSERT(V_ipfw_dyn_v != NULL, ("%s: dynamic table not allocated", 1044 __func__)); 1045 1046 /* Avoid possible LOR */ 1047 KASSERT(!check_ka || timer, ("%s: keepalive check with lock held", 1048 __func__)); 1049 1050 /* 1051 * Do not perform any checks if we currently have no dynamic states 1052 */ 1053 if (DYN_COUNT == 0) 1054 return; 1055 1056 /* Expired states */ 1057 exp_head = NULL; 1058 exptailp = &exp_head; 1059 1060 /* Expired limit states */ 1061 exp_lhead = NULL; 1062 expltailp = &exp_lhead; 1063 1064 /* 1065 * We make a chain of packets to go out here -- not deferring 1066 * until after we drop the IPFW dynamic rule lock would result 1067 * in a lock order reversal with the normal packet input -> ipfw 1068 * call stack. 1069 */ 1070 m0 = NULL; 1071 mtailp = &m0; 1072 1073 /* Protect from hash resizing */ 1074 if (timer != 0) 1075 IPFW_UH_WLOCK(chain); 1076 else 1077 IPFW_UH_WLOCK_ASSERT(chain); 1078 1079 #define NEXT_RULE() { q_prev = q; q = q->next ; continue; } 1080 1081 /* Stage 1: perform requested deletion */ 1082 for (i = 0 ; i < V_curr_dyn_buckets ; i++) { 1083 IPFW_BUCK_LOCK(i); 1084 for (q = V_ipfw_dyn_v[i].head, q_prev = q; q ; ) { 1085 /* account every rule */ 1086 total++; 1087 1088 /* Skip parent rules at all */ 1089 if (q->dyn_type == O_LIMIT_PARENT) { 1090 parents++; 1091 NEXT_RULE(); 1092 } 1093 1094 /* 1095 * Remove rules which are: 1096 * 1) expired 1097 * 2) created by given rule 1098 * 3) created by any rule in given set 1099 */ 1100 if ((TIME_LEQ(q->expire, time_uptime)) || 1101 ((rule != NULL) && (q->rule == rule)) || 1102 ((set != RESVD_SET) && (q->rule->set == set))) { 1103 /* Unlink q from current list */ 1104 q_next = q->next; 1105 if (q == V_ipfw_dyn_v[i].head) 1106 V_ipfw_dyn_v[i].head = q_next; 1107 else 1108 q_prev->next = q_next; 1109 1110 q->next = NULL; 1111 1112 /* queue q to expire list */ 1113 if (q->dyn_type != O_LIMIT) { 1114 *exptailp = q; 1115 exptailp = &(*exptailp)->next; 1116 DEB(print_dyn_rule(&q->id, q->dyn_type, 1117 "unlink entry", "left"); 1118 ) 1119 } else { 1120 /* Separate list for limit rules */ 1121 *expltailp = q; 1122 expltailp = &(*expltailp)->next; 1123 expired_limits++; 1124 DEB(print_dyn_rule(&q->id, q->dyn_type, 1125 "unlink limit entry", "left"); 1126 ) 1127 } 1128 1129 q = q_next; 1130 expired++; 1131 continue; 1132 } 1133 1134 /* 1135 * Check if we need to send keepalive: 1136 * we need to ensure if is time to do KA, 1137 * this is established TCP session, and 1138 * expire time is within keepalive interval 1139 */ 1140 if ((check_ka != 0) && (q->id.proto == IPPROTO_TCP) && 1141 ((q->state & BOTH_SYN) == BOTH_SYN) && 1142 (TIME_LEQ(q->expire, time_uptime + 1143 V_dyn_keepalive_interval))) 1144 mtailp = ipfw_dyn_send_ka(mtailp, q); 1145 1146 NEXT_RULE(); 1147 } 1148 IPFW_BUCK_UNLOCK(i); 1149 } 1150 1151 /* Stage 2: decrement counters from O_LIMIT parents */ 1152 if (expired_limits != 0) { 1153 /* 1154 * XXX: Note that deleting set with more than one 1155 * heavily-used LIMIT rules can result in overwhelming 1156 * locking due to lack of per-hash value sorting 1157 * 1158 * We should probably think about: 1159 * 1) pre-allocating hash of size, say, 1160 * MAX(16, V_curr_dyn_buckets / 1024) 1161 * 2) checking if expired_limits is large enough 1162 * 3) If yes, init hash (or its part), re-link 1163 * current list and start decrementing procedure in 1164 * each bucket separately 1165 */ 1166 1167 /* 1168 * Small optimization: do not unlock bucket until 1169 * we see the next item resides in different bucket 1170 */ 1171 if (exp_lhead != NULL) { 1172 i = exp_lhead->parent->bucket; 1173 IPFW_BUCK_LOCK(i); 1174 } 1175 for (q = exp_lhead; q != NULL; q = q->next) { 1176 if (i != q->parent->bucket) { 1177 IPFW_BUCK_UNLOCK(i); 1178 i = q->parent->bucket; 1179 IPFW_BUCK_LOCK(i); 1180 } 1181 1182 /* Decrease parent refcount */ 1183 q->parent->count--; 1184 } 1185 if (exp_lhead != NULL) 1186 IPFW_BUCK_UNLOCK(i); 1187 } 1188 1189 /* 1190 * We protectet ourselves from unused parent deletion 1191 * (from the timer function) by holding UH write lock. 1192 */ 1193 1194 /* Stage 3: remove unused parent rules */ 1195 if ((parents != 0) && (expired != 0)) { 1196 for (i = 0 ; i < V_curr_dyn_buckets ; i++) { 1197 IPFW_BUCK_LOCK(i); 1198 for (q = V_ipfw_dyn_v[i].head, q_prev = q ; q ; ) { 1199 if (q->dyn_type != O_LIMIT_PARENT) 1200 NEXT_RULE(); 1201 1202 if (q->count != 0) 1203 NEXT_RULE(); 1204 1205 /* Parent rule without consumers */ 1206 1207 /* Unlink q from current list */ 1208 q_next = q->next; 1209 if (q == V_ipfw_dyn_v[i].head) 1210 V_ipfw_dyn_v[i].head = q_next; 1211 else 1212 q_prev->next = q_next; 1213 1214 q->next = NULL; 1215 1216 /* Add to expired list */ 1217 *exptailp = q; 1218 exptailp = &(*exptailp)->next; 1219 1220 DEB(print_dyn_rule(&q->id, q->dyn_type, 1221 "unlink parent entry", "left"); 1222 ) 1223 1224 expired++; 1225 1226 q = q_next; 1227 } 1228 IPFW_BUCK_UNLOCK(i); 1229 } 1230 } 1231 1232 #undef NEXT_RULE 1233 1234 if (timer != 0) { 1235 /* 1236 * Check if we need to resize hash: 1237 * if current number of states exceeds number of buckes in hash, 1238 * grow hash size to the minimum power of 2 which is bigger than 1239 * current states count. Limit hash size by 64k. 1240 */ 1241 max_buckets = (V_dyn_buckets_max > 65536) ? 1242 65536 : V_dyn_buckets_max; 1243 1244 dyn_count = DYN_COUNT; 1245 1246 if ((dyn_count > V_curr_dyn_buckets * 2) && 1247 (dyn_count < max_buckets)) { 1248 new_buckets = V_curr_dyn_buckets; 1249 while (new_buckets < dyn_count) { 1250 new_buckets *= 2; 1251 1252 if (new_buckets >= max_buckets) 1253 break; 1254 } 1255 } 1256 1257 IPFW_UH_WUNLOCK(chain); 1258 } 1259 1260 /* Finally delete old states ad limits if any */ 1261 for (q = exp_head; q != NULL; q = q_next) { 1262 q_next = q->next; 1263 uma_zfree(V_ipfw_dyn_rule_zone, q); 1264 ipfw_dyn_count--; 1265 } 1266 1267 for (q = exp_lhead; q != NULL; q = q_next) { 1268 q_next = q->next; 1269 uma_zfree(V_ipfw_dyn_rule_zone, q); 1270 ipfw_dyn_count--; 1271 } 1272 1273 /* 1274 * The rest code MUST be called from timer routine only 1275 * without holding any locks 1276 */ 1277 if (timer == 0) 1278 return; 1279 1280 /* Send keepalive packets if any */ 1281 for (m = m0; m != NULL; m = mnext) { 1282 mnext = m->m_nextpkt; 1283 m->m_nextpkt = NULL; 1284 h = mtod(m, struct ip *); 1285 if (h->ip_v == 4) 1286 ip_output(m, NULL, NULL, 0, NULL, NULL); 1287 #ifdef INET6 1288 else 1289 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1290 #endif 1291 } 1292 1293 /* Run table resize without holding any locks */ 1294 if (new_buckets != 0) 1295 resize_dynamic_table(chain, new_buckets); 1296 } 1297 1298 /* 1299 * Deletes all dynamic rules originated by given rule or all rules in 1300 * given set. Specify RESVD_SET to indicate set should not be used. 1301 * @chain - pointer to current ipfw rules chain 1302 * @rule - delete all states originated by given rule if != NULL 1303 * @set - delete all states originated by any rule in set @set if != RESVD_SET 1304 * 1305 * Function has to be called with IPFW_UH_WLOCK held. 1306 * Additionally, function assume that dynamic rule/set is 1307 * ALREADY deleted so no new states can be generated by 1308 * 'deleted' rules. 1309 */ 1310 void 1311 ipfw_expire_dyn_rules(struct ip_fw_chain *chain, struct ip_fw *rule, int set) 1312 { 1313 1314 check_dyn_rules(chain, rule, set, 0, 0); 1315 } 1316 1317 void 1318 ipfw_dyn_init(struct ip_fw_chain *chain) 1319 { 1320 1321 V_ipfw_dyn_v = NULL; 1322 V_dyn_buckets_max = 256; /* must be power of 2 */ 1323 V_curr_dyn_buckets = 256; /* must be power of 2 */ 1324 1325 V_dyn_ack_lifetime = 300; 1326 V_dyn_syn_lifetime = 20; 1327 V_dyn_fin_lifetime = 1; 1328 V_dyn_rst_lifetime = 1; 1329 V_dyn_udp_lifetime = 10; 1330 V_dyn_short_lifetime = 5; 1331 1332 V_dyn_keepalive_interval = 20; 1333 V_dyn_keepalive_period = 5; 1334 V_dyn_keepalive = 1; /* do send keepalives */ 1335 V_dyn_keepalive_last = time_uptime; 1336 1337 V_dyn_max = 4096; /* max # of dynamic rules */ 1338 1339 V_ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule", 1340 sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL, 1341 UMA_ALIGN_PTR, 0); 1342 1343 /* Enforce limit on dynamic rules */ 1344 uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max); 1345 1346 callout_init(&V_ipfw_timeout, CALLOUT_MPSAFE); 1347 1348 /* 1349 * This can potentially be done on first dynamic rule 1350 * being added to chain. 1351 */ 1352 resize_dynamic_table(chain, V_curr_dyn_buckets); 1353 } 1354 1355 void 1356 ipfw_dyn_uninit(int pass) 1357 { 1358 int i; 1359 1360 if (pass == 0) { 1361 callout_drain(&V_ipfw_timeout); 1362 return; 1363 } 1364 1365 if (V_ipfw_dyn_v != NULL) { 1366 /* 1367 * Skip deleting all dynamic states - 1368 * uma_zdestroy() does this more efficiently; 1369 */ 1370 1371 /* Destroy all mutexes */ 1372 for (i = 0 ; i < V_curr_dyn_buckets ; i++) 1373 IPFW_BUCK_LOCK_DESTROY(&V_ipfw_dyn_v[i]); 1374 free(V_ipfw_dyn_v, M_IPFW); 1375 V_ipfw_dyn_v = NULL; 1376 } 1377 1378 uma_zdestroy(V_ipfw_dyn_rule_zone); 1379 } 1380 1381 #ifdef SYSCTL_NODE 1382 /* 1383 * Get/set maximum number of dynamic states in given VNET instance. 1384 */ 1385 static int 1386 sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS) 1387 { 1388 int error; 1389 unsigned int nstates; 1390 1391 nstates = V_dyn_max; 1392 1393 error = sysctl_handle_int(oidp, &nstates, 0, req); 1394 /* Read operation or some error */ 1395 if ((error != 0) || (req->newptr == NULL)) 1396 return (error); 1397 1398 V_dyn_max = nstates; 1399 uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max); 1400 1401 return (0); 1402 } 1403 1404 /* 1405 * Get current number of dynamic states in given VNET instance. 1406 */ 1407 static int 1408 sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS) 1409 { 1410 int error; 1411 unsigned int nstates; 1412 1413 nstates = DYN_COUNT; 1414 1415 error = sysctl_handle_int(oidp, &nstates, 0, req); 1416 1417 return (error); 1418 } 1419 #endif 1420 1421 /* 1422 * Returns number of dynamic rules. 1423 */ 1424 int 1425 ipfw_dyn_len(void) 1426 { 1427 1428 return (V_ipfw_dyn_v == NULL) ? 0 : 1429 (DYN_COUNT * sizeof(ipfw_dyn_rule)); 1430 } 1431 1432 /* 1433 * Fill given buffer with dynamic states. 1434 * IPFW_UH_RLOCK has to be held while calling. 1435 */ 1436 void 1437 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep) 1438 { 1439 ipfw_dyn_rule *p, *last = NULL; 1440 char *bp; 1441 int i; 1442 1443 if (V_ipfw_dyn_v == NULL) 1444 return; 1445 bp = *pbp; 1446 1447 IPFW_UH_RLOCK_ASSERT(chain); 1448 1449 for (i = 0 ; i < V_curr_dyn_buckets; i++) { 1450 IPFW_BUCK_LOCK(i); 1451 for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) { 1452 if (bp + sizeof *p <= ep) { 1453 ipfw_dyn_rule *dst = 1454 (ipfw_dyn_rule *)bp; 1455 bcopy(p, dst, sizeof *p); 1456 bcopy(&(p->rule->rulenum), &(dst->rule), 1457 sizeof(p->rule->rulenum)); 1458 /* 1459 * store set number into high word of 1460 * dst->rule pointer. 1461 */ 1462 bcopy(&(p->rule->set), 1463 (char *)&dst->rule + 1464 sizeof(p->rule->rulenum), 1465 sizeof(p->rule->set)); 1466 /* 1467 * store a non-null value in "next". 1468 * The userland code will interpret a 1469 * NULL here as a marker 1470 * for the last dynamic rule. 1471 */ 1472 bcopy(&dst, &dst->next, sizeof(dst)); 1473 last = dst; 1474 dst->expire = 1475 TIME_LEQ(dst->expire, time_uptime) ? 1476 0 : dst->expire - time_uptime ; 1477 bp += sizeof(ipfw_dyn_rule); 1478 } 1479 } 1480 IPFW_BUCK_UNLOCK(i); 1481 } 1482 1483 if (last != NULL) /* mark last dynamic rule */ 1484 bzero(&last->next, sizeof(last)); 1485 *pbp = bp; 1486 } 1487 /* end of file */ 1488