1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #define DEB(x) 32 #define DDB(x) x 33 34 /* 35 * Dynamic rule support for ipfw 36 */ 37 38 #include "opt_ipfw.h" 39 #include "opt_inet.h" 40 #ifndef INET 41 #error IPFIREWALL requires INET. 42 #endif /* INET */ 43 #include "opt_inet6.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/malloc.h> 48 #include <sys/mbuf.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/rmlock.h> 53 #include <sys/socket.h> 54 #include <sys/sysctl.h> 55 #include <sys/syslog.h> 56 #include <net/ethernet.h> /* for ETHERTYPE_IP */ 57 #include <net/if.h> 58 #include <net/if_var.h> 59 #include <net/pfil.h> 60 #include <net/vnet.h> 61 62 #include <netinet/in.h> 63 #include <netinet/ip.h> 64 #include <netinet/ip_var.h> /* ip_defttl */ 65 #include <netinet/ip_fw.h> 66 #include <netinet/tcp_var.h> 67 #include <netinet/udp.h> 68 69 #include <netinet/ip6.h> /* IN6_ARE_ADDR_EQUAL */ 70 #ifdef INET6 71 #include <netinet6/in6_var.h> 72 #include <netinet6/ip6_var.h> 73 #endif 74 75 #include <netpfil/ipfw/ip_fw_private.h> 76 77 #include <machine/in_cksum.h> /* XXX for in_cksum */ 78 79 #ifdef MAC 80 #include <security/mac/mac_framework.h> 81 #endif 82 83 /* 84 * Description of dynamic rules. 85 * 86 * Dynamic rules are stored in lists accessed through a hash table 87 * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can 88 * be modified through the sysctl variable dyn_buckets which is 89 * updated when the table becomes empty. 90 * 91 * XXX currently there is only one list, ipfw_dyn. 92 * 93 * When a packet is received, its address fields are first masked 94 * with the mask defined for the rule, then hashed, then matched 95 * against the entries in the corresponding list. 96 * Dynamic rules can be used for different purposes: 97 * + stateful rules; 98 * + enforcing limits on the number of sessions; 99 * + in-kernel NAT (not implemented yet) 100 * 101 * The lifetime of dynamic rules is regulated by dyn_*_lifetime, 102 * measured in seconds and depending on the flags. 103 * 104 * The total number of dynamic rules is equal to UMA zone items count. 105 * The max number of dynamic rules is dyn_max. When we reach 106 * the maximum number of rules we do not create anymore. This is 107 * done to avoid consuming too much memory, but also too much 108 * time when searching on each packet (ideally, we should try instead 109 * to put a limit on the length of the list on each bucket...). 110 * 111 * Each dynamic rule holds a pointer to the parent ipfw rule so 112 * we know what action to perform. Dynamic rules are removed when 113 * the parent rule is deleted. This can be changed by dyn_keep_states 114 * sysctl. 115 * 116 * There are some limitations with dynamic rules -- we do not 117 * obey the 'randomized match', and we do not do multiple 118 * passes through the firewall. XXX check the latter!!! 119 */ 120 121 struct ipfw_dyn_bucket { 122 struct mtx mtx; /* Bucket protecting lock */ 123 ipfw_dyn_rule *head; /* Pointer to first rule */ 124 }; 125 126 /* 127 * Static variables followed by global ones 128 */ 129 static VNET_DEFINE(struct ipfw_dyn_bucket *, ipfw_dyn_v); 130 static VNET_DEFINE(u_int32_t, dyn_buckets_max); 131 static VNET_DEFINE(u_int32_t, curr_dyn_buckets); 132 static VNET_DEFINE(struct callout, ipfw_timeout); 133 #define V_ipfw_dyn_v VNET(ipfw_dyn_v) 134 #define V_dyn_buckets_max VNET(dyn_buckets_max) 135 #define V_curr_dyn_buckets VNET(curr_dyn_buckets) 136 #define V_ipfw_timeout VNET(ipfw_timeout) 137 138 static VNET_DEFINE(uma_zone_t, ipfw_dyn_rule_zone); 139 #define V_ipfw_dyn_rule_zone VNET(ipfw_dyn_rule_zone) 140 141 #define IPFW_BUCK_LOCK_INIT(b) \ 142 mtx_init(&(b)->mtx, "IPFW dynamic bucket", NULL, MTX_DEF) 143 #define IPFW_BUCK_LOCK_DESTROY(b) \ 144 mtx_destroy(&(b)->mtx) 145 #define IPFW_BUCK_LOCK(i) mtx_lock(&V_ipfw_dyn_v[(i)].mtx) 146 #define IPFW_BUCK_UNLOCK(i) mtx_unlock(&V_ipfw_dyn_v[(i)].mtx) 147 #define IPFW_BUCK_ASSERT(i) mtx_assert(&V_ipfw_dyn_v[(i)].mtx, MA_OWNED) 148 149 150 static VNET_DEFINE(int, dyn_keep_states); 151 #define V_dyn_keep_states VNET(dyn_keep_states) 152 153 /* 154 * Timeouts for various events in handing dynamic rules. 155 */ 156 static VNET_DEFINE(u_int32_t, dyn_ack_lifetime); 157 static VNET_DEFINE(u_int32_t, dyn_syn_lifetime); 158 static VNET_DEFINE(u_int32_t, dyn_fin_lifetime); 159 static VNET_DEFINE(u_int32_t, dyn_rst_lifetime); 160 static VNET_DEFINE(u_int32_t, dyn_udp_lifetime); 161 static VNET_DEFINE(u_int32_t, dyn_short_lifetime); 162 163 #define V_dyn_ack_lifetime VNET(dyn_ack_lifetime) 164 #define V_dyn_syn_lifetime VNET(dyn_syn_lifetime) 165 #define V_dyn_fin_lifetime VNET(dyn_fin_lifetime) 166 #define V_dyn_rst_lifetime VNET(dyn_rst_lifetime) 167 #define V_dyn_udp_lifetime VNET(dyn_udp_lifetime) 168 #define V_dyn_short_lifetime VNET(dyn_short_lifetime) 169 170 /* 171 * Keepalives are sent if dyn_keepalive is set. They are sent every 172 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval 173 * seconds of lifetime of a rule. 174 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower 175 * than dyn_keepalive_period. 176 */ 177 178 static VNET_DEFINE(u_int32_t, dyn_keepalive_interval); 179 static VNET_DEFINE(u_int32_t, dyn_keepalive_period); 180 static VNET_DEFINE(u_int32_t, dyn_keepalive); 181 static VNET_DEFINE(time_t, dyn_keepalive_last); 182 183 #define V_dyn_keepalive_interval VNET(dyn_keepalive_interval) 184 #define V_dyn_keepalive_period VNET(dyn_keepalive_period) 185 #define V_dyn_keepalive VNET(dyn_keepalive) 186 #define V_dyn_keepalive_last VNET(dyn_keepalive_last) 187 188 static VNET_DEFINE(u_int32_t, dyn_max); /* max # of dynamic rules */ 189 190 #define DYN_COUNT uma_zone_get_cur(V_ipfw_dyn_rule_zone) 191 #define V_dyn_max VNET(dyn_max) 192 193 /* for userspace, we emulate the uma_zone_counter with ipfw_dyn_count */ 194 static int ipfw_dyn_count; /* number of objects */ 195 196 #ifdef USERSPACE /* emulation of UMA object counters for userspace */ 197 #define uma_zone_get_cur(x) ipfw_dyn_count 198 #endif /* USERSPACE */ 199 200 static int last_log; /* Log ratelimiting */ 201 202 static void ipfw_dyn_tick(void *vnetx); 203 static void check_dyn_rules(struct ip_fw_chain *, ipfw_range_tlv *, int, int); 204 #ifdef SYSCTL_NODE 205 206 static int sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS); 207 static int sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS); 208 209 SYSBEGIN(f2) 210 211 SYSCTL_DECL(_net_inet_ip_fw); 212 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_buckets, 213 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_buckets_max), 0, 214 "Max number of dyn. buckets"); 215 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, 216 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0, 217 "Current Number of dyn. buckets"); 218 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_count, 219 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RD, 0, 0, sysctl_ipfw_dyn_count, "IU", 220 "Number of dyn. rules"); 221 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max, 222 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW, 0, 0, sysctl_ipfw_dyn_max, "IU", 223 "Max number of dyn. rules"); 224 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, 225 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0, 226 "Lifetime of dyn. rules for acks"); 227 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, 228 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0, 229 "Lifetime of dyn. rules for syn"); 230 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, 231 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0, 232 "Lifetime of dyn. rules for fin"); 233 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, 234 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0, 235 "Lifetime of dyn. rules for rst"); 236 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, 237 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0, 238 "Lifetime of dyn. rules for UDP"); 239 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, 240 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0, 241 "Lifetime of dyn. rules for other situations"); 242 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, 243 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0, 244 "Enable keepalives for dyn. rules"); 245 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, dyn_keep_states, 246 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keep_states), 0, 247 "Do not flush dynamic states on rule deletion"); 248 249 SYSEND 250 251 #endif /* SYSCTL_NODE */ 252 253 254 #ifdef INET6 255 static __inline int 256 hash_packet6(const struct ipfw_flow_id *id) 257 { 258 u_int32_t i; 259 i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^ 260 (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^ 261 (id->src_ip6.__u6_addr.__u6_addr32[2]) ^ 262 (id->src_ip6.__u6_addr.__u6_addr32[3]); 263 return ntohl(i); 264 } 265 #endif 266 267 /* 268 * IMPORTANT: the hash function for dynamic rules must be commutative 269 * in source and destination (ip,port), because rules are bidirectional 270 * and we want to find both in the same bucket. 271 */ 272 static __inline int 273 hash_packet(const struct ipfw_flow_id *id, int buckets) 274 { 275 u_int32_t i; 276 277 #ifdef INET6 278 if (IS_IP6_FLOW_ID(id)) 279 i = hash_packet6(id); 280 else 281 #endif /* INET6 */ 282 i = (id->dst_ip) ^ (id->src_ip); 283 i ^= (id->dst_port) ^ (id->src_port); 284 return (i & (buckets - 1)); 285 } 286 287 #if 0 288 #define DYN_DEBUG(fmt, ...) do { \ 289 printf("%s: " fmt "\n", __func__, __VA_ARGS__); \ 290 } while (0) 291 #else 292 #define DYN_DEBUG(fmt, ...) 293 #endif 294 295 static char *default_state_name = "default"; 296 struct dyn_state_obj { 297 struct named_object no; 298 char name[64]; 299 }; 300 301 #define DYN_STATE_OBJ(ch, cmd) \ 302 ((struct dyn_state_obj *)SRV_OBJECT(ch, (cmd)->arg1)) 303 /* 304 * Classifier callback. 305 * Return 0 if opcode contains object that should be referenced 306 * or rewritten. 307 */ 308 static int 309 dyn_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 310 { 311 312 DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1); 313 /* Don't rewrite "check-state any" */ 314 if (cmd->arg1 == 0 && 315 cmd->opcode == O_CHECK_STATE) 316 return (1); 317 318 *puidx = cmd->arg1; 319 *ptype = 0; 320 return (0); 321 } 322 323 static void 324 dyn_update(ipfw_insn *cmd, uint16_t idx) 325 { 326 327 cmd->arg1 = idx; 328 DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1); 329 } 330 331 static int 332 dyn_findbyname(struct ip_fw_chain *ch, struct tid_info *ti, 333 struct named_object **pno) 334 { 335 ipfw_obj_ntlv *ntlv; 336 const char *name; 337 338 DYN_DEBUG("uidx %d", ti->uidx); 339 if (ti->uidx != 0) { 340 if (ti->tlvs == NULL) 341 return (EINVAL); 342 /* Search ntlv in the buffer provided by user */ 343 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 344 IPFW_TLV_STATE_NAME); 345 if (ntlv == NULL) 346 return (EINVAL); 347 name = ntlv->name; 348 } else 349 name = default_state_name; 350 /* 351 * Search named object with corresponding name. 352 * Since states objects are global - ignore the set value 353 * and use zero instead. 354 */ 355 *pno = ipfw_objhash_lookup_name_type(CHAIN_TO_SRV(ch), 0, 356 IPFW_TLV_STATE_NAME, name); 357 /* 358 * We always return success here. 359 * The caller will check *pno and mark object as unresolved, 360 * then it will automatically create "default" object. 361 */ 362 return (0); 363 } 364 365 static struct named_object * 366 dyn_findbykidx(struct ip_fw_chain *ch, uint16_t idx) 367 { 368 369 DYN_DEBUG("kidx %d", idx); 370 return (ipfw_objhash_lookup_kidx(CHAIN_TO_SRV(ch), idx)); 371 } 372 373 static int 374 dyn_create(struct ip_fw_chain *ch, struct tid_info *ti, 375 uint16_t *pkidx) 376 { 377 struct namedobj_instance *ni; 378 struct dyn_state_obj *obj; 379 struct named_object *no; 380 ipfw_obj_ntlv *ntlv; 381 char *name; 382 383 DYN_DEBUG("uidx %d", ti->uidx); 384 if (ti->uidx != 0) { 385 if (ti->tlvs == NULL) 386 return (EINVAL); 387 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 388 IPFW_TLV_STATE_NAME); 389 if (ntlv == NULL) 390 return (EINVAL); 391 name = ntlv->name; 392 } else 393 name = default_state_name; 394 395 ni = CHAIN_TO_SRV(ch); 396 obj = malloc(sizeof(*obj), M_IPFW, M_WAITOK | M_ZERO); 397 obj->no.name = obj->name; 398 obj->no.etlv = IPFW_TLV_STATE_NAME; 399 strlcpy(obj->name, name, sizeof(obj->name)); 400 401 IPFW_UH_WLOCK(ch); 402 no = ipfw_objhash_lookup_name_type(ni, 0, 403 IPFW_TLV_STATE_NAME, name); 404 if (no != NULL) { 405 /* 406 * Object is already created. 407 * Just return its kidx and bump refcount. 408 */ 409 *pkidx = no->kidx; 410 no->refcnt++; 411 IPFW_UH_WUNLOCK(ch); 412 free(obj, M_IPFW); 413 DYN_DEBUG("\tfound kidx %d", *pkidx); 414 return (0); 415 } 416 if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) { 417 DYN_DEBUG("\talloc_idx failed for %s", name); 418 IPFW_UH_WUNLOCK(ch); 419 free(obj, M_IPFW); 420 return (ENOSPC); 421 } 422 ipfw_objhash_add(ni, &obj->no); 423 SRV_OBJECT(ch, obj->no.kidx) = obj; 424 obj->no.refcnt++; 425 *pkidx = obj->no.kidx; 426 IPFW_UH_WUNLOCK(ch); 427 DYN_DEBUG("\tcreated kidx %d", *pkidx); 428 return (0); 429 } 430 431 static void 432 dyn_destroy(struct ip_fw_chain *ch, struct named_object *no) 433 { 434 struct dyn_state_obj *obj; 435 436 IPFW_UH_WLOCK_ASSERT(ch); 437 438 KASSERT(no->refcnt == 1, 439 ("Destroying object '%s' (type %u, idx %u) with refcnt %u", 440 no->name, no->etlv, no->kidx, no->refcnt)); 441 442 DYN_DEBUG("kidx %d", no->kidx); 443 obj = SRV_OBJECT(ch, no->kidx); 444 SRV_OBJECT(ch, no->kidx) = NULL; 445 ipfw_objhash_del(CHAIN_TO_SRV(ch), no); 446 ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), no->kidx); 447 448 free(obj, M_IPFW); 449 } 450 451 static struct opcode_obj_rewrite dyn_opcodes[] = { 452 { 453 O_KEEP_STATE, IPFW_TLV_STATE_NAME, 454 dyn_classify, dyn_update, 455 dyn_findbyname, dyn_findbykidx, 456 dyn_create, dyn_destroy 457 }, 458 { 459 O_CHECK_STATE, IPFW_TLV_STATE_NAME, 460 dyn_classify, dyn_update, 461 dyn_findbyname, dyn_findbykidx, 462 dyn_create, dyn_destroy 463 }, 464 { 465 O_PROBE_STATE, IPFW_TLV_STATE_NAME, 466 dyn_classify, dyn_update, 467 dyn_findbyname, dyn_findbykidx, 468 dyn_create, dyn_destroy 469 }, 470 { 471 O_LIMIT, IPFW_TLV_STATE_NAME, 472 dyn_classify, dyn_update, 473 dyn_findbyname, dyn_findbykidx, 474 dyn_create, dyn_destroy 475 }, 476 }; 477 /** 478 * Print customizable flow id description via log(9) facility. 479 */ 480 static void 481 print_dyn_rule_flags(const struct ipfw_flow_id *id, int dyn_type, 482 int log_flags, char *prefix, char *postfix) 483 { 484 struct in_addr da; 485 #ifdef INET6 486 char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN]; 487 #else 488 char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN]; 489 #endif 490 491 #ifdef INET6 492 if (IS_IP6_FLOW_ID(id)) { 493 ip6_sprintf(src, &id->src_ip6); 494 ip6_sprintf(dst, &id->dst_ip6); 495 } else 496 #endif 497 { 498 da.s_addr = htonl(id->src_ip); 499 inet_ntop(AF_INET, &da, src, sizeof(src)); 500 da.s_addr = htonl(id->dst_ip); 501 inet_ntop(AF_INET, &da, dst, sizeof(dst)); 502 } 503 log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n", 504 prefix, dyn_type, src, id->src_port, dst, 505 id->dst_port, DYN_COUNT, postfix); 506 } 507 508 #define print_dyn_rule(id, dtype, prefix, postfix) \ 509 print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix) 510 511 #define TIME_LEQ(a,b) ((int)((a)-(b)) <= 0) 512 #define TIME_LE(a,b) ((int)((a)-(b)) < 0) 513 514 static void 515 dyn_update_proto_state(ipfw_dyn_rule *q, const struct ipfw_flow_id *id, 516 const void *ulp, int dir) 517 { 518 const struct tcphdr *tcp; 519 uint32_t ack; 520 u_char flags; 521 522 if (id->proto == IPPROTO_TCP) { 523 tcp = (const struct tcphdr *)ulp; 524 flags = id->_flags & (TH_FIN | TH_SYN | TH_RST); 525 #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) 526 #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) 527 #define TCP_FLAGS (TH_FLAGS | (TH_FLAGS << 8)) 528 #define ACK_FWD 0x10000 /* fwd ack seen */ 529 #define ACK_REV 0x20000 /* rev ack seen */ 530 531 q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8); 532 switch (q->state & TCP_FLAGS) { 533 case TH_SYN: /* opening */ 534 q->expire = time_uptime + V_dyn_syn_lifetime; 535 break; 536 537 case BOTH_SYN: /* move to established */ 538 case BOTH_SYN | TH_FIN: /* one side tries to close */ 539 case BOTH_SYN | (TH_FIN << 8): 540 #define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0) 541 if (tcp == NULL) 542 break; 543 544 ack = ntohl(tcp->th_ack); 545 if (dir == MATCH_FORWARD) { 546 if (q->ack_fwd == 0 || 547 _SEQ_GE(ack, q->ack_fwd)) { 548 q->ack_fwd = ack; 549 q->state |= ACK_FWD; 550 } 551 } else { 552 if (q->ack_rev == 0 || 553 _SEQ_GE(ack, q->ack_rev)) { 554 q->ack_rev = ack; 555 q->state |= ACK_REV; 556 } 557 } 558 if ((q->state & (ACK_FWD | ACK_REV)) == 559 (ACK_FWD | ACK_REV)) { 560 q->expire = time_uptime + V_dyn_ack_lifetime; 561 q->state &= ~(ACK_FWD | ACK_REV); 562 } 563 break; 564 565 case BOTH_SYN | BOTH_FIN: /* both sides closed */ 566 if (V_dyn_fin_lifetime >= V_dyn_keepalive_period) 567 V_dyn_fin_lifetime = 568 V_dyn_keepalive_period - 1; 569 q->expire = time_uptime + V_dyn_fin_lifetime; 570 break; 571 572 default: 573 #if 0 574 /* 575 * reset or some invalid combination, but can also 576 * occur if we use keep-state the wrong way. 577 */ 578 if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0) 579 printf("invalid state: 0x%x\n", q->state); 580 #endif 581 if (V_dyn_rst_lifetime >= V_dyn_keepalive_period) 582 V_dyn_rst_lifetime = 583 V_dyn_keepalive_period - 1; 584 q->expire = time_uptime + V_dyn_rst_lifetime; 585 break; 586 } 587 } else if (id->proto == IPPROTO_UDP || 588 id->proto == IPPROTO_UDPLITE) { 589 q->expire = time_uptime + V_dyn_udp_lifetime; 590 } else { 591 /* other protocols */ 592 q->expire = time_uptime + V_dyn_short_lifetime; 593 } 594 } 595 596 /* 597 * Lookup a dynamic rule, locked version. 598 */ 599 static ipfw_dyn_rule * 600 lookup_dyn_rule_locked(const struct ipfw_flow_id *pkt, const void *ulp, 601 int i, int *match_direction, uint16_t kidx) 602 { 603 /* 604 * Stateful ipfw extensions. 605 * Lookup into dynamic session queue. 606 */ 607 ipfw_dyn_rule *prev, *q = NULL; 608 int dir; 609 610 IPFW_BUCK_ASSERT(i); 611 612 dir = MATCH_NONE; 613 for (prev = NULL, q = V_ipfw_dyn_v[i].head; q; prev = q, q = q->next) { 614 if (q->dyn_type == O_LIMIT_PARENT) 615 continue; 616 617 if (pkt->addr_type != q->id.addr_type) 618 continue; 619 620 if (pkt->proto != q->id.proto) 621 continue; 622 623 if (kidx != 0 && kidx != q->kidx) 624 continue; 625 626 if (IS_IP6_FLOW_ID(pkt)) { 627 if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.src_ip6) && 628 IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.dst_ip6) && 629 pkt->src_port == q->id.src_port && 630 pkt->dst_port == q->id.dst_port) { 631 dir = MATCH_FORWARD; 632 break; 633 } 634 if (IN6_ARE_ADDR_EQUAL(&pkt->src_ip6, &q->id.dst_ip6) && 635 IN6_ARE_ADDR_EQUAL(&pkt->dst_ip6, &q->id.src_ip6) && 636 pkt->src_port == q->id.dst_port && 637 pkt->dst_port == q->id.src_port) { 638 dir = MATCH_REVERSE; 639 break; 640 } 641 } else { 642 if (pkt->src_ip == q->id.src_ip && 643 pkt->dst_ip == q->id.dst_ip && 644 pkt->src_port == q->id.src_port && 645 pkt->dst_port == q->id.dst_port) { 646 dir = MATCH_FORWARD; 647 break; 648 } 649 if (pkt->src_ip == q->id.dst_ip && 650 pkt->dst_ip == q->id.src_ip && 651 pkt->src_port == q->id.dst_port && 652 pkt->dst_port == q->id.src_port) { 653 dir = MATCH_REVERSE; 654 break; 655 } 656 } 657 } 658 if (q == NULL) 659 goto done; /* q = NULL, not found */ 660 661 if (prev != NULL) { /* found and not in front */ 662 prev->next = q->next; 663 q->next = V_ipfw_dyn_v[i].head; 664 V_ipfw_dyn_v[i].head = q; 665 } 666 667 /* update state according to flags */ 668 dyn_update_proto_state(q, pkt, ulp, dir); 669 done: 670 if (match_direction != NULL) 671 *match_direction = dir; 672 return (q); 673 } 674 675 struct ip_fw * 676 ipfw_dyn_lookup_state(const struct ipfw_flow_id *pkt, const void *ulp, 677 int pktlen, int *match_direction, uint16_t kidx) 678 { 679 struct ip_fw *rule; 680 ipfw_dyn_rule *q; 681 int i; 682 683 i = hash_packet(pkt, V_curr_dyn_buckets); 684 685 IPFW_BUCK_LOCK(i); 686 q = lookup_dyn_rule_locked(pkt, ulp, i, match_direction, kidx); 687 if (q == NULL) 688 rule = NULL; 689 else { 690 rule = q->rule; 691 IPFW_INC_DYN_COUNTER(q, pktlen); 692 } 693 IPFW_BUCK_UNLOCK(i); 694 return (rule); 695 } 696 697 static int 698 resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets) 699 { 700 int i, k, nbuckets_old; 701 ipfw_dyn_rule *q; 702 struct ipfw_dyn_bucket *dyn_v, *dyn_v_old; 703 704 /* Check if given number is power of 2 and less than 64k */ 705 if ((nbuckets > 65536) || (!powerof2(nbuckets))) 706 return 1; 707 708 CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__, 709 V_curr_dyn_buckets, nbuckets); 710 711 /* Allocate and initialize new hash */ 712 dyn_v = malloc(nbuckets * sizeof(*dyn_v), M_IPFW, 713 M_WAITOK | M_ZERO); 714 715 for (i = 0 ; i < nbuckets; i++) 716 IPFW_BUCK_LOCK_INIT(&dyn_v[i]); 717 718 /* 719 * Call upper half lock, as get_map() do to ease 720 * read-only access to dynamic rules hash from sysctl 721 */ 722 IPFW_UH_WLOCK(chain); 723 724 /* 725 * Acquire chain write lock to permit hash access 726 * for main traffic path without additional locks 727 */ 728 IPFW_WLOCK(chain); 729 730 /* Save old values */ 731 nbuckets_old = V_curr_dyn_buckets; 732 dyn_v_old = V_ipfw_dyn_v; 733 734 /* Skip relinking if array is not set up */ 735 if (V_ipfw_dyn_v == NULL) 736 V_curr_dyn_buckets = 0; 737 738 /* Re-link all dynamic states */ 739 for (i = 0 ; i < V_curr_dyn_buckets ; i++) { 740 while (V_ipfw_dyn_v[i].head != NULL) { 741 /* Remove from current chain */ 742 q = V_ipfw_dyn_v[i].head; 743 V_ipfw_dyn_v[i].head = q->next; 744 745 /* Get new hash value */ 746 k = hash_packet(&q->id, nbuckets); 747 q->bucket = k; 748 /* Add to the new head */ 749 q->next = dyn_v[k].head; 750 dyn_v[k].head = q; 751 } 752 } 753 754 /* Update current pointers/buckets values */ 755 V_curr_dyn_buckets = nbuckets; 756 V_ipfw_dyn_v = dyn_v; 757 758 IPFW_WUNLOCK(chain); 759 760 IPFW_UH_WUNLOCK(chain); 761 762 /* Start periodic callout on initial creation */ 763 if (dyn_v_old == NULL) { 764 callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0); 765 return (0); 766 } 767 768 /* Destroy all mutexes */ 769 for (i = 0 ; i < nbuckets_old ; i++) 770 IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]); 771 772 /* Free old hash */ 773 free(dyn_v_old, M_IPFW); 774 775 return 0; 776 } 777 778 /** 779 * Install state of type 'type' for a dynamic session. 780 * The hash table contains two type of rules: 781 * - regular rules (O_KEEP_STATE) 782 * - rules for sessions with limited number of sess per user 783 * (O_LIMIT). When they are created, the parent is 784 * increased by 1, and decreased on delete. In this case, 785 * the third parameter is the parent rule and not the chain. 786 * - "parent" rules for the above (O_LIMIT_PARENT). 787 */ 788 static ipfw_dyn_rule * 789 add_dyn_rule(const struct ipfw_flow_id *id, int i, uint8_t dyn_type, 790 struct ip_fw *rule, uint16_t kidx) 791 { 792 ipfw_dyn_rule *r; 793 794 IPFW_BUCK_ASSERT(i); 795 796 r = uma_zalloc(V_ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO); 797 if (r == NULL) { 798 if (last_log != time_uptime) { 799 last_log = time_uptime; 800 log(LOG_DEBUG, 801 "ipfw: Cannot allocate dynamic state, " 802 "consider increasing net.inet.ip.fw.dyn_max\n"); 803 } 804 return NULL; 805 } 806 ipfw_dyn_count++; 807 808 /* 809 * refcount on parent is already incremented, so 810 * it is safe to use parent unlocked. 811 */ 812 if (dyn_type == O_LIMIT) { 813 ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule; 814 if ( parent->dyn_type != O_LIMIT_PARENT) 815 panic("invalid parent"); 816 r->parent = parent; 817 rule = parent->rule; 818 } 819 820 r->id = *id; 821 r->expire = time_uptime + V_dyn_syn_lifetime; 822 r->rule = rule; 823 r->dyn_type = dyn_type; 824 IPFW_ZERO_DYN_COUNTER(r); 825 r->count = 0; 826 r->kidx = kidx; 827 r->bucket = i; 828 r->next = V_ipfw_dyn_v[i].head; 829 V_ipfw_dyn_v[i].head = r; 830 DEB(print_dyn_rule(id, dyn_type, "add dyn entry", "total");) 831 return r; 832 } 833 834 /** 835 * lookup dynamic parent rule using pkt and rule as search keys. 836 * If the lookup fails, then install one. 837 */ 838 static ipfw_dyn_rule * 839 lookup_dyn_parent(const struct ipfw_flow_id *pkt, int *pindex, 840 struct ip_fw *rule, uint16_t kidx) 841 { 842 ipfw_dyn_rule *q; 843 int i, is_v6; 844 845 is_v6 = IS_IP6_FLOW_ID(pkt); 846 i = hash_packet( pkt, V_curr_dyn_buckets ); 847 *pindex = i; 848 IPFW_BUCK_LOCK(i); 849 for (q = V_ipfw_dyn_v[i].head ; q != NULL ; q=q->next) 850 if (q->dyn_type == O_LIMIT_PARENT && 851 kidx == q->kidx && 852 rule == q->rule && 853 pkt->proto == q->id.proto && 854 pkt->src_port == q->id.src_port && 855 pkt->dst_port == q->id.dst_port && 856 ( 857 (is_v6 && 858 IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6), 859 &(q->id.src_ip6)) && 860 IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6), 861 &(q->id.dst_ip6))) || 862 (!is_v6 && 863 pkt->src_ip == q->id.src_ip && 864 pkt->dst_ip == q->id.dst_ip) 865 ) 866 ) { 867 q->expire = time_uptime + V_dyn_short_lifetime; 868 DEB(print_dyn_rule(pkt, q->dyn_type, 869 "lookup_dyn_parent found", "");) 870 return q; 871 } 872 873 /* Add virtual limiting rule */ 874 return add_dyn_rule(pkt, i, O_LIMIT_PARENT, rule, kidx); 875 } 876 877 /** 878 * Install dynamic state for rule type cmd->o.opcode 879 * 880 * Returns 1 (failure) if state is not installed because of errors or because 881 * session limitations are enforced. 882 */ 883 int 884 ipfw_dyn_install_state(struct ip_fw_chain *chain, struct ip_fw *rule, 885 ipfw_insn_limit *cmd, struct ip_fw_args *args, uint32_t tablearg) 886 { 887 ipfw_dyn_rule *q; 888 int i; 889 890 DEB(print_dyn_rule(&args->f_id, cmd->o.opcode, "install_state", 891 (cmd->o.arg1 == 0 ? "": DYN_STATE_OBJ(chain, &cmd->o)->name));) 892 893 i = hash_packet(&args->f_id, V_curr_dyn_buckets); 894 895 IPFW_BUCK_LOCK(i); 896 897 q = lookup_dyn_rule_locked(&args->f_id, NULL, i, NULL, cmd->o.arg1); 898 if (q != NULL) { /* should never occur */ 899 DEB( 900 if (last_log != time_uptime) { 901 last_log = time_uptime; 902 printf("ipfw: %s: entry already present, done\n", 903 __func__); 904 }) 905 IPFW_BUCK_UNLOCK(i); 906 return (0); 907 } 908 909 /* 910 * State limiting is done via uma(9) zone limiting. 911 * Save pointer to newly-installed rule and reject 912 * packet if add_dyn_rule() returned NULL. 913 * Note q is currently set to NULL. 914 */ 915 916 switch (cmd->o.opcode) { 917 case O_KEEP_STATE: /* bidir rule */ 918 q = add_dyn_rule(&args->f_id, i, O_KEEP_STATE, rule, 919 cmd->o.arg1); 920 break; 921 922 case O_LIMIT: { /* limit number of sessions */ 923 struct ipfw_flow_id id; 924 ipfw_dyn_rule *parent; 925 uint32_t conn_limit; 926 uint16_t limit_mask = cmd->limit_mask; 927 int pindex; 928 929 conn_limit = IP_FW_ARG_TABLEARG(chain, cmd->conn_limit, limit); 930 931 DEB( 932 if (cmd->conn_limit == IP_FW_TARG) 933 printf("ipfw: %s: O_LIMIT rule, conn_limit: %u " 934 "(tablearg)\n", __func__, conn_limit); 935 else 936 printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n", 937 __func__, conn_limit); 938 ) 939 940 id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0; 941 id.proto = args->f_id.proto; 942 id.addr_type = args->f_id.addr_type; 943 id.fib = M_GETFIB(args->m); 944 945 if (IS_IP6_FLOW_ID (&(args->f_id))) { 946 bzero(&id.src_ip6, sizeof(id.src_ip6)); 947 bzero(&id.dst_ip6, sizeof(id.dst_ip6)); 948 949 if (limit_mask & DYN_SRC_ADDR) 950 id.src_ip6 = args->f_id.src_ip6; 951 if (limit_mask & DYN_DST_ADDR) 952 id.dst_ip6 = args->f_id.dst_ip6; 953 } else { 954 if (limit_mask & DYN_SRC_ADDR) 955 id.src_ip = args->f_id.src_ip; 956 if (limit_mask & DYN_DST_ADDR) 957 id.dst_ip = args->f_id.dst_ip; 958 } 959 if (limit_mask & DYN_SRC_PORT) 960 id.src_port = args->f_id.src_port; 961 if (limit_mask & DYN_DST_PORT) 962 id.dst_port = args->f_id.dst_port; 963 964 /* 965 * We have to release lock for previous bucket to 966 * avoid possible deadlock 967 */ 968 IPFW_BUCK_UNLOCK(i); 969 970 parent = lookup_dyn_parent(&id, &pindex, rule, cmd->o.arg1); 971 if (parent == NULL) { 972 printf("ipfw: %s: add parent failed\n", __func__); 973 IPFW_BUCK_UNLOCK(pindex); 974 return (1); 975 } 976 977 if (parent->count >= conn_limit) { 978 if (V_fw_verbose && last_log != time_uptime) { 979 char sbuf[24]; 980 last_log = time_uptime; 981 snprintf(sbuf, sizeof(sbuf), 982 "%d drop session", 983 parent->rule->rulenum); 984 print_dyn_rule_flags(&args->f_id, 985 cmd->o.opcode, 986 LOG_SECURITY | LOG_DEBUG, 987 sbuf, "too many entries"); 988 } 989 IPFW_BUCK_UNLOCK(pindex); 990 return (1); 991 } 992 /* Increment counter on parent */ 993 parent->count++; 994 IPFW_BUCK_UNLOCK(pindex); 995 996 IPFW_BUCK_LOCK(i); 997 q = add_dyn_rule(&args->f_id, i, O_LIMIT, 998 (struct ip_fw *)parent, cmd->o.arg1); 999 if (q == NULL) { 1000 /* Decrement index and notify caller */ 1001 IPFW_BUCK_UNLOCK(i); 1002 IPFW_BUCK_LOCK(pindex); 1003 parent->count--; 1004 IPFW_BUCK_UNLOCK(pindex); 1005 return (1); 1006 } 1007 break; 1008 } 1009 default: 1010 printf("ipfw: %s: unknown dynamic rule type %u\n", 1011 __func__, cmd->o.opcode); 1012 } 1013 1014 if (q == NULL) { 1015 IPFW_BUCK_UNLOCK(i); 1016 return (1); /* Notify caller about failure */ 1017 } 1018 1019 dyn_update_proto_state(q, &args->f_id, NULL, MATCH_FORWARD); 1020 IPFW_BUCK_UNLOCK(i); 1021 return (0); 1022 } 1023 1024 /* 1025 * Queue keepalive packets for given dynamic rule 1026 */ 1027 static struct mbuf ** 1028 ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q) 1029 { 1030 struct mbuf *m_rev, *m_fwd; 1031 1032 m_rev = (q->state & ACK_REV) ? NULL : 1033 ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN); 1034 m_fwd = (q->state & ACK_FWD) ? NULL : 1035 ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0); 1036 1037 if (m_rev != NULL) { 1038 *mtailp = m_rev; 1039 mtailp = &(*mtailp)->m_nextpkt; 1040 } 1041 if (m_fwd != NULL) { 1042 *mtailp = m_fwd; 1043 mtailp = &(*mtailp)->m_nextpkt; 1044 } 1045 1046 return (mtailp); 1047 } 1048 1049 /* 1050 * This procedure is used to perform various maintenance 1051 * on dynamic hash list. Currently it is called every second. 1052 */ 1053 static void 1054 ipfw_dyn_tick(void * vnetx) 1055 { 1056 struct ip_fw_chain *chain; 1057 int check_ka = 0; 1058 #ifdef VIMAGE 1059 struct vnet *vp = vnetx; 1060 #endif 1061 1062 CURVNET_SET(vp); 1063 1064 chain = &V_layer3_chain; 1065 1066 /* Run keepalive checks every keepalive_period iff ka is enabled */ 1067 if ((V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) && 1068 (V_dyn_keepalive != 0)) { 1069 V_dyn_keepalive_last = time_uptime; 1070 check_ka = 1; 1071 } 1072 1073 check_dyn_rules(chain, NULL, check_ka, 1); 1074 1075 callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, vnetx, 0); 1076 1077 CURVNET_RESTORE(); 1078 } 1079 1080 1081 /* 1082 * Walk through all dynamic states doing generic maintenance: 1083 * 1) free expired states 1084 * 2) free all states based on deleted rule / set 1085 * 3) send keepalives for states if needed 1086 * 1087 * @chain - pointer to current ipfw rules chain 1088 * @rule - delete all states originated by given rule if != NULL 1089 * @set - delete all states originated by any rule in set @set if != RESVD_SET 1090 * @check_ka - perform checking/sending keepalives 1091 * @timer - indicate call from timer routine. 1092 * 1093 * Timer routine must call this function unlocked to permit 1094 * sending keepalives/resizing table. 1095 * 1096 * Others has to call function with IPFW_UH_WLOCK held. 1097 * Additionally, function assume that dynamic rule/set is 1098 * ALREADY deleted so no new states can be generated by 1099 * 'deleted' rules. 1100 * 1101 * Write lock is needed to ensure that unused parent rules 1102 * are not freed by other instance (see stage 2, 3) 1103 */ 1104 static void 1105 check_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt, 1106 int check_ka, int timer) 1107 { 1108 struct mbuf *m0, *m, *mnext, **mtailp; 1109 struct ip *h; 1110 int i, dyn_count, new_buckets = 0, max_buckets; 1111 int expired = 0, expired_limits = 0, parents = 0, total = 0; 1112 ipfw_dyn_rule *q, *q_prev, *q_next; 1113 ipfw_dyn_rule *exp_head, **exptailp; 1114 ipfw_dyn_rule *exp_lhead, **expltailp; 1115 1116 KASSERT(V_ipfw_dyn_v != NULL, ("%s: dynamic table not allocated", 1117 __func__)); 1118 1119 /* Avoid possible LOR */ 1120 KASSERT(!check_ka || timer, ("%s: keepalive check with lock held", 1121 __func__)); 1122 1123 /* 1124 * Do not perform any checks if we currently have no dynamic states 1125 */ 1126 if (DYN_COUNT == 0) 1127 return; 1128 1129 /* Expired states */ 1130 exp_head = NULL; 1131 exptailp = &exp_head; 1132 1133 /* Expired limit states */ 1134 exp_lhead = NULL; 1135 expltailp = &exp_lhead; 1136 1137 /* 1138 * We make a chain of packets to go out here -- not deferring 1139 * until after we drop the IPFW dynamic rule lock would result 1140 * in a lock order reversal with the normal packet input -> ipfw 1141 * call stack. 1142 */ 1143 m0 = NULL; 1144 mtailp = &m0; 1145 1146 /* Protect from hash resizing */ 1147 if (timer != 0) 1148 IPFW_UH_WLOCK(chain); 1149 else 1150 IPFW_UH_WLOCK_ASSERT(chain); 1151 1152 #define NEXT_RULE() { q_prev = q; q = q->next ; continue; } 1153 1154 /* Stage 1: perform requested deletion */ 1155 for (i = 0 ; i < V_curr_dyn_buckets ; i++) { 1156 IPFW_BUCK_LOCK(i); 1157 for (q = V_ipfw_dyn_v[i].head, q_prev = q; q ; ) { 1158 /* account every rule */ 1159 total++; 1160 1161 /* Skip parent rules at all */ 1162 if (q->dyn_type == O_LIMIT_PARENT) { 1163 parents++; 1164 NEXT_RULE(); 1165 } 1166 1167 /* 1168 * Remove rules which are: 1169 * 1) expired 1170 * 2) matches deletion range 1171 */ 1172 if ((TIME_LEQ(q->expire, time_uptime)) || 1173 (rt != NULL && ipfw_match_range(q->rule, rt))) { 1174 if (TIME_LE(time_uptime, q->expire) && 1175 q->dyn_type == O_KEEP_STATE && 1176 V_dyn_keep_states != 0) { 1177 /* 1178 * Do not delete state if 1179 * it is not expired and 1180 * dyn_keep_states is ON. 1181 * However we need to re-link it 1182 * to any other stable rule 1183 */ 1184 q->rule = chain->default_rule; 1185 NEXT_RULE(); 1186 } 1187 1188 /* Unlink q from current list */ 1189 q_next = q->next; 1190 if (q == V_ipfw_dyn_v[i].head) 1191 V_ipfw_dyn_v[i].head = q_next; 1192 else 1193 q_prev->next = q_next; 1194 1195 q->next = NULL; 1196 1197 /* queue q to expire list */ 1198 if (q->dyn_type != O_LIMIT) { 1199 *exptailp = q; 1200 exptailp = &(*exptailp)->next; 1201 DEB(print_dyn_rule(&q->id, q->dyn_type, 1202 "unlink entry", "left"); 1203 ) 1204 } else { 1205 /* Separate list for limit rules */ 1206 *expltailp = q; 1207 expltailp = &(*expltailp)->next; 1208 expired_limits++; 1209 DEB(print_dyn_rule(&q->id, q->dyn_type, 1210 "unlink limit entry", "left"); 1211 ) 1212 } 1213 1214 q = q_next; 1215 expired++; 1216 continue; 1217 } 1218 1219 /* 1220 * Check if we need to send keepalive: 1221 * we need to ensure if is time to do KA, 1222 * this is established TCP session, and 1223 * expire time is within keepalive interval 1224 */ 1225 if ((check_ka != 0) && (q->id.proto == IPPROTO_TCP) && 1226 ((q->state & BOTH_SYN) == BOTH_SYN) && 1227 (TIME_LEQ(q->expire, time_uptime + 1228 V_dyn_keepalive_interval))) 1229 mtailp = ipfw_dyn_send_ka(mtailp, q); 1230 1231 NEXT_RULE(); 1232 } 1233 IPFW_BUCK_UNLOCK(i); 1234 } 1235 1236 /* Stage 2: decrement counters from O_LIMIT parents */ 1237 if (expired_limits != 0) { 1238 /* 1239 * XXX: Note that deleting set with more than one 1240 * heavily-used LIMIT rules can result in overwhelming 1241 * locking due to lack of per-hash value sorting 1242 * 1243 * We should probably think about: 1244 * 1) pre-allocating hash of size, say, 1245 * MAX(16, V_curr_dyn_buckets / 1024) 1246 * 2) checking if expired_limits is large enough 1247 * 3) If yes, init hash (or its part), re-link 1248 * current list and start decrementing procedure in 1249 * each bucket separately 1250 */ 1251 1252 /* 1253 * Small optimization: do not unlock bucket until 1254 * we see the next item resides in different bucket 1255 */ 1256 if (exp_lhead != NULL) { 1257 i = exp_lhead->parent->bucket; 1258 IPFW_BUCK_LOCK(i); 1259 } 1260 for (q = exp_lhead; q != NULL; q = q->next) { 1261 if (i != q->parent->bucket) { 1262 IPFW_BUCK_UNLOCK(i); 1263 i = q->parent->bucket; 1264 IPFW_BUCK_LOCK(i); 1265 } 1266 1267 /* Decrease parent refcount */ 1268 q->parent->count--; 1269 } 1270 if (exp_lhead != NULL) 1271 IPFW_BUCK_UNLOCK(i); 1272 } 1273 1274 /* 1275 * We protectet ourselves from unused parent deletion 1276 * (from the timer function) by holding UH write lock. 1277 */ 1278 1279 /* Stage 3: remove unused parent rules */ 1280 if ((parents != 0) && (expired != 0)) { 1281 for (i = 0 ; i < V_curr_dyn_buckets ; i++) { 1282 IPFW_BUCK_LOCK(i); 1283 for (q = V_ipfw_dyn_v[i].head, q_prev = q ; q ; ) { 1284 if (q->dyn_type != O_LIMIT_PARENT) 1285 NEXT_RULE(); 1286 1287 if (q->count != 0) 1288 NEXT_RULE(); 1289 1290 /* Parent rule without consumers */ 1291 1292 /* Unlink q from current list */ 1293 q_next = q->next; 1294 if (q == V_ipfw_dyn_v[i].head) 1295 V_ipfw_dyn_v[i].head = q_next; 1296 else 1297 q_prev->next = q_next; 1298 1299 q->next = NULL; 1300 1301 /* Add to expired list */ 1302 *exptailp = q; 1303 exptailp = &(*exptailp)->next; 1304 1305 DEB(print_dyn_rule(&q->id, q->dyn_type, 1306 "unlink parent entry", "left"); 1307 ) 1308 1309 expired++; 1310 1311 q = q_next; 1312 } 1313 IPFW_BUCK_UNLOCK(i); 1314 } 1315 } 1316 1317 #undef NEXT_RULE 1318 1319 if (timer != 0) { 1320 /* 1321 * Check if we need to resize hash: 1322 * if current number of states exceeds number of buckes in hash, 1323 * grow hash size to the minimum power of 2 which is bigger than 1324 * current states count. Limit hash size by 64k. 1325 */ 1326 max_buckets = (V_dyn_buckets_max > 65536) ? 1327 65536 : V_dyn_buckets_max; 1328 1329 dyn_count = DYN_COUNT; 1330 1331 if ((dyn_count > V_curr_dyn_buckets * 2) && 1332 (dyn_count < max_buckets)) { 1333 new_buckets = V_curr_dyn_buckets; 1334 while (new_buckets < dyn_count) { 1335 new_buckets *= 2; 1336 1337 if (new_buckets >= max_buckets) 1338 break; 1339 } 1340 } 1341 1342 IPFW_UH_WUNLOCK(chain); 1343 } 1344 1345 /* Finally delete old states ad limits if any */ 1346 for (q = exp_head; q != NULL; q = q_next) { 1347 q_next = q->next; 1348 uma_zfree(V_ipfw_dyn_rule_zone, q); 1349 ipfw_dyn_count--; 1350 } 1351 1352 for (q = exp_lhead; q != NULL; q = q_next) { 1353 q_next = q->next; 1354 uma_zfree(V_ipfw_dyn_rule_zone, q); 1355 ipfw_dyn_count--; 1356 } 1357 1358 /* 1359 * The rest code MUST be called from timer routine only 1360 * without holding any locks 1361 */ 1362 if (timer == 0) 1363 return; 1364 1365 /* Send keepalive packets if any */ 1366 for (m = m0; m != NULL; m = mnext) { 1367 mnext = m->m_nextpkt; 1368 m->m_nextpkt = NULL; 1369 h = mtod(m, struct ip *); 1370 if (h->ip_v == 4) 1371 ip_output(m, NULL, NULL, 0, NULL, NULL); 1372 #ifdef INET6 1373 else 1374 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 1375 #endif 1376 } 1377 1378 /* Run table resize without holding any locks */ 1379 if (new_buckets != 0) 1380 resize_dynamic_table(chain, new_buckets); 1381 } 1382 1383 /* 1384 * Deletes all dynamic rules originated by given rule or all rules in 1385 * given set. Specify RESVD_SET to indicate set should not be used. 1386 * @chain - pointer to current ipfw rules chain 1387 * @rr - delete all states originated by rules in matched range. 1388 * 1389 * Function has to be called with IPFW_UH_WLOCK held. 1390 * Additionally, function assume that dynamic rule/set is 1391 * ALREADY deleted so no new states can be generated by 1392 * 'deleted' rules. 1393 */ 1394 void 1395 ipfw_expire_dyn_rules(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 1396 { 1397 1398 check_dyn_rules(chain, rt, 0, 0); 1399 } 1400 1401 /* 1402 * Check if rule contains at least one dynamic opcode. 1403 * 1404 * Returns 1 if such opcode is found, 0 otherwise. 1405 */ 1406 int 1407 ipfw_is_dyn_rule(struct ip_fw *rule) 1408 { 1409 int cmdlen, l; 1410 ipfw_insn *cmd; 1411 1412 l = rule->cmd_len; 1413 cmd = rule->cmd; 1414 cmdlen = 0; 1415 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 1416 cmdlen = F_LEN(cmd); 1417 1418 switch (cmd->opcode) { 1419 case O_LIMIT: 1420 case O_KEEP_STATE: 1421 case O_PROBE_STATE: 1422 case O_CHECK_STATE: 1423 return (1); 1424 } 1425 } 1426 1427 return (0); 1428 } 1429 1430 void 1431 ipfw_dyn_init(struct ip_fw_chain *chain) 1432 { 1433 1434 V_ipfw_dyn_v = NULL; 1435 V_dyn_buckets_max = 256; /* must be power of 2 */ 1436 V_curr_dyn_buckets = 256; /* must be power of 2 */ 1437 1438 V_dyn_ack_lifetime = 300; 1439 V_dyn_syn_lifetime = 20; 1440 V_dyn_fin_lifetime = 1; 1441 V_dyn_rst_lifetime = 1; 1442 V_dyn_udp_lifetime = 10; 1443 V_dyn_short_lifetime = 5; 1444 1445 V_dyn_keepalive_interval = 20; 1446 V_dyn_keepalive_period = 5; 1447 V_dyn_keepalive = 1; /* do send keepalives */ 1448 V_dyn_keepalive_last = time_uptime; 1449 1450 V_dyn_max = 16384; /* max # of dynamic rules */ 1451 1452 V_ipfw_dyn_rule_zone = uma_zcreate("IPFW dynamic rule", 1453 sizeof(ipfw_dyn_rule), NULL, NULL, NULL, NULL, 1454 UMA_ALIGN_PTR, 0); 1455 1456 /* Enforce limit on dynamic rules */ 1457 uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max); 1458 1459 callout_init(&V_ipfw_timeout, 1); 1460 1461 /* 1462 * This can potentially be done on first dynamic rule 1463 * being added to chain. 1464 */ 1465 resize_dynamic_table(chain, V_curr_dyn_buckets); 1466 IPFW_ADD_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes); 1467 } 1468 1469 void 1470 ipfw_dyn_uninit(int pass) 1471 { 1472 int i; 1473 1474 if (pass == 0) { 1475 callout_drain(&V_ipfw_timeout); 1476 return; 1477 } 1478 IPFW_DEL_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes); 1479 1480 if (V_ipfw_dyn_v != NULL) { 1481 /* 1482 * Skip deleting all dynamic states - 1483 * uma_zdestroy() does this more efficiently; 1484 */ 1485 1486 /* Destroy all mutexes */ 1487 for (i = 0 ; i < V_curr_dyn_buckets ; i++) 1488 IPFW_BUCK_LOCK_DESTROY(&V_ipfw_dyn_v[i]); 1489 free(V_ipfw_dyn_v, M_IPFW); 1490 V_ipfw_dyn_v = NULL; 1491 } 1492 1493 uma_zdestroy(V_ipfw_dyn_rule_zone); 1494 } 1495 1496 #ifdef SYSCTL_NODE 1497 /* 1498 * Get/set maximum number of dynamic states in given VNET instance. 1499 */ 1500 static int 1501 sysctl_ipfw_dyn_max(SYSCTL_HANDLER_ARGS) 1502 { 1503 int error; 1504 unsigned int nstates; 1505 1506 nstates = V_dyn_max; 1507 1508 error = sysctl_handle_int(oidp, &nstates, 0, req); 1509 /* Read operation or some error */ 1510 if ((error != 0) || (req->newptr == NULL)) 1511 return (error); 1512 1513 V_dyn_max = nstates; 1514 uma_zone_set_max(V_ipfw_dyn_rule_zone, V_dyn_max); 1515 1516 return (0); 1517 } 1518 1519 /* 1520 * Get current number of dynamic states in given VNET instance. 1521 */ 1522 static int 1523 sysctl_ipfw_dyn_count(SYSCTL_HANDLER_ARGS) 1524 { 1525 int error; 1526 unsigned int nstates; 1527 1528 nstates = DYN_COUNT; 1529 1530 error = sysctl_handle_int(oidp, &nstates, 0, req); 1531 1532 return (error); 1533 } 1534 #endif 1535 1536 /* 1537 * Returns size of dynamic states in legacy format 1538 */ 1539 int 1540 ipfw_dyn_len(void) 1541 { 1542 1543 return (V_ipfw_dyn_v == NULL) ? 0 : 1544 (DYN_COUNT * sizeof(ipfw_dyn_rule)); 1545 } 1546 1547 /* 1548 * Returns number of dynamic states. 1549 * Used by dump format v1 (current). 1550 */ 1551 int 1552 ipfw_dyn_get_count(void) 1553 { 1554 1555 return (V_ipfw_dyn_v == NULL) ? 0 : DYN_COUNT; 1556 } 1557 1558 static void 1559 export_dyn_rule(ipfw_dyn_rule *src, ipfw_dyn_rule *dst) 1560 { 1561 uint16_t rulenum; 1562 1563 rulenum = (uint16_t)src->rule->rulenum; 1564 memcpy(dst, src, sizeof(*src)); 1565 memcpy(&dst->rule, &rulenum, sizeof(rulenum)); 1566 /* 1567 * store set number into high word of 1568 * dst->rule pointer. 1569 */ 1570 memcpy((char *)&dst->rule + sizeof(rulenum), &src->rule->set, 1571 sizeof(src->rule->set)); 1572 /* 1573 * store a non-null value in "next". 1574 * The userland code will interpret a 1575 * NULL here as a marker 1576 * for the last dynamic rule. 1577 */ 1578 memcpy(&dst->next, &dst, sizeof(dst)); 1579 dst->expire = TIME_LEQ(dst->expire, time_uptime) ? 0: 1580 dst->expire - time_uptime; 1581 } 1582 1583 /* 1584 * Fills int buffer given by @sd with dynamic states. 1585 * Used by dump format v1 (current). 1586 * 1587 * Returns 0 on success. 1588 */ 1589 int 1590 ipfw_dump_states(struct ip_fw_chain *chain, struct sockopt_data *sd) 1591 { 1592 ipfw_dyn_rule *p; 1593 ipfw_obj_dyntlv *dst, *last; 1594 ipfw_obj_ctlv *ctlv; 1595 int i; 1596 size_t sz; 1597 1598 if (V_ipfw_dyn_v == NULL) 1599 return (0); 1600 1601 IPFW_UH_RLOCK_ASSERT(chain); 1602 1603 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 1604 if (ctlv == NULL) 1605 return (ENOMEM); 1606 sz = sizeof(ipfw_obj_dyntlv); 1607 ctlv->head.type = IPFW_TLV_DYNSTATE_LIST; 1608 ctlv->objsize = sz; 1609 last = NULL; 1610 1611 for (i = 0 ; i < V_curr_dyn_buckets; i++) { 1612 IPFW_BUCK_LOCK(i); 1613 for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) { 1614 dst = (ipfw_obj_dyntlv *)ipfw_get_sopt_space(sd, sz); 1615 if (dst == NULL) { 1616 IPFW_BUCK_UNLOCK(i); 1617 return (ENOMEM); 1618 } 1619 1620 export_dyn_rule(p, &dst->state); 1621 dst->head.length = sz; 1622 dst->head.type = IPFW_TLV_DYN_ENT; 1623 last = dst; 1624 } 1625 IPFW_BUCK_UNLOCK(i); 1626 } 1627 1628 if (last != NULL) /* mark last dynamic rule */ 1629 last->head.flags = IPFW_DF_LAST; 1630 1631 return (0); 1632 } 1633 1634 /* 1635 * Fill given buffer with dynamic states (legacy format). 1636 * IPFW_UH_RLOCK has to be held while calling. 1637 */ 1638 void 1639 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep) 1640 { 1641 ipfw_dyn_rule *p, *last = NULL; 1642 char *bp; 1643 int i; 1644 1645 if (V_ipfw_dyn_v == NULL) 1646 return; 1647 bp = *pbp; 1648 1649 IPFW_UH_RLOCK_ASSERT(chain); 1650 1651 for (i = 0 ; i < V_curr_dyn_buckets; i++) { 1652 IPFW_BUCK_LOCK(i); 1653 for (p = V_ipfw_dyn_v[i].head ; p != NULL; p = p->next) { 1654 if (bp + sizeof *p <= ep) { 1655 ipfw_dyn_rule *dst = 1656 (ipfw_dyn_rule *)bp; 1657 1658 export_dyn_rule(p, dst); 1659 last = dst; 1660 bp += sizeof(ipfw_dyn_rule); 1661 } 1662 } 1663 IPFW_BUCK_UNLOCK(i); 1664 } 1665 1666 if (last != NULL) /* mark last dynamic rule */ 1667 bzero(&last->next, sizeof(last)); 1668 *pbp = bp; 1669 } 1670 /* end of file */ 1671