1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2017-2018 Yandex LLC 5 * Copyright (c) 2017-2018 Andrey V. Elsukov <ae@FreeBSD.org> 6 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ipfw.h" 36 #ifndef INET 37 #error IPFIREWALL requires INET. 38 #endif /* INET */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/hash.h> 43 #include <sys/mbuf.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/pcpu.h> 47 #include <sys/queue.h> 48 #include <sys/rmlock.h> 49 #include <sys/smp.h> 50 #include <sys/socket.h> 51 #include <sys/sysctl.h> 52 #include <sys/syslog.h> 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/pfil.h> 57 #include <net/vnet.h> 58 59 #include <netinet/in.h> 60 #include <netinet/ip.h> 61 #include <netinet/ip_var.h> 62 #include <netinet/ip_fw.h> 63 #include <netinet/tcp_var.h> 64 #include <netinet/udp.h> 65 66 #include <netinet/ip6.h> /* IN6_ARE_ADDR_EQUAL */ 67 #ifdef INET6 68 #include <netinet6/in6_var.h> 69 #include <netinet6/ip6_var.h> 70 #include <netinet6/scope6_var.h> 71 #endif 72 73 #include <netpfil/ipfw/ip_fw_private.h> 74 75 #include <machine/in_cksum.h> /* XXX for in_cksum */ 76 77 #ifdef MAC 78 #include <security/mac/mac_framework.h> 79 #endif 80 81 /* 82 * Description of dynamic states. 83 * 84 * Dynamic states are stored in lists accessed through a hash tables 85 * whose size is curr_dyn_buckets. This value can be modified through 86 * the sysctl variable dyn_buckets. 87 * 88 * Currently there are four tables: dyn_ipv4, dyn_ipv6, dyn_ipv4_parent, 89 * and dyn_ipv6_parent. 90 * 91 * When a packet is received, its address fields hashed, then matched 92 * against the entries in the corresponding list by addr_type. 93 * Dynamic states can be used for different purposes: 94 * + stateful rules; 95 * + enforcing limits on the number of sessions; 96 * + in-kernel NAT (not implemented yet) 97 * 98 * The lifetime of dynamic states is regulated by dyn_*_lifetime, 99 * measured in seconds and depending on the flags. 100 * 101 * The total number of dynamic states is equal to UMA zone items count. 102 * The max number of dynamic states is dyn_max. When we reach 103 * the maximum number of rules we do not create anymore. This is 104 * done to avoid consuming too much memory, but also too much 105 * time when searching on each packet (ideally, we should try instead 106 * to put a limit on the length of the list on each bucket...). 107 * 108 * Each state holds a pointer to the parent ipfw rule so we know what 109 * action to perform. Dynamic rules are removed when the parent rule is 110 * deleted. 111 * 112 * There are some limitations with dynamic rules -- we do not 113 * obey the 'randomized match', and we do not do multiple 114 * passes through the firewall. XXX check the latter!!! 115 */ 116 117 /* By default use jenkins hash function */ 118 #define IPFIREWALL_JENKINSHASH 119 120 #define DYN_COUNTER_INC(d, dir, pktlen) do { \ 121 (d)->pcnt_ ## dir++; \ 122 (d)->bcnt_ ## dir += pktlen; \ 123 } while (0) 124 125 #define DYN_REFERENCED 0x01 126 /* 127 * DYN_REFERENCED flag is used to show that state keeps reference to named 128 * object, and this reference should be released when state becomes expired. 129 */ 130 131 struct dyn_data { 132 void *parent; /* pointer to parent rule */ 133 uint32_t chain_id; /* cached ruleset id */ 134 uint32_t f_pos; /* cached rule index */ 135 136 uint32_t hashval; /* hash value used for hash resize */ 137 uint16_t fibnum; /* fib used to send keepalives */ 138 uint8_t _pad[2]; 139 uint8_t flags; /* internal flags */ 140 uint8_t set; /* parent rule set number */ 141 uint16_t rulenum; /* parent rule number */ 142 uint32_t ruleid; /* parent rule id */ 143 144 uint32_t state; /* TCP session state and flags */ 145 uint32_t ack_fwd; /* most recent ACKs in forward */ 146 uint32_t ack_rev; /* and reverse direction (used */ 147 /* to generate keepalives) */ 148 uint32_t sync; /* synchronization time */ 149 uint32_t expire; /* expire time */ 150 151 uint64_t pcnt_fwd; /* bytes counter in forward */ 152 uint64_t bcnt_fwd; /* packets counter in forward */ 153 uint64_t pcnt_rev; /* bytes counter in reverse */ 154 uint64_t bcnt_rev; /* packets counter in reverse */ 155 }; 156 157 #define DPARENT_COUNT_DEC(p) do { \ 158 MPASS(p->count > 0); \ 159 ck_pr_dec_32(&(p)->count); \ 160 } while (0) 161 #define DPARENT_COUNT_INC(p) ck_pr_inc_32(&(p)->count) 162 #define DPARENT_COUNT(p) ck_pr_load_32(&(p)->count) 163 struct dyn_parent { 164 void *parent; /* pointer to parent rule */ 165 uint32_t count; /* number of linked states */ 166 uint8_t _pad; 167 uint8_t set; /* parent rule set number */ 168 uint16_t rulenum; /* parent rule number */ 169 uint32_t ruleid; /* parent rule id */ 170 uint32_t hashval; /* hash value used for hash resize */ 171 uint32_t expire; /* expire time */ 172 }; 173 174 struct dyn_ipv4_state { 175 uint8_t type; /* State type */ 176 uint8_t proto; /* UL Protocol */ 177 uint16_t kidx; /* named object index */ 178 uint16_t sport, dport; /* ULP source and destination ports */ 179 in_addr_t src, dst; /* IPv4 source and destination */ 180 181 union { 182 struct dyn_data *data; 183 struct dyn_parent *limit; 184 }; 185 CK_SLIST_ENTRY(dyn_ipv4_state) entry; 186 SLIST_ENTRY(dyn_ipv4_state) expired; 187 }; 188 CK_SLIST_HEAD(dyn_ipv4ck_slist, dyn_ipv4_state); 189 VNET_DEFINE_STATIC(struct dyn_ipv4ck_slist *, dyn_ipv4); 190 VNET_DEFINE_STATIC(struct dyn_ipv4ck_slist *, dyn_ipv4_parent); 191 192 SLIST_HEAD(dyn_ipv4_slist, dyn_ipv4_state); 193 VNET_DEFINE_STATIC(struct dyn_ipv4_slist, dyn_expired_ipv4); 194 #define V_dyn_ipv4 VNET(dyn_ipv4) 195 #define V_dyn_ipv4_parent VNET(dyn_ipv4_parent) 196 #define V_dyn_expired_ipv4 VNET(dyn_expired_ipv4) 197 198 #ifdef INET6 199 struct dyn_ipv6_state { 200 uint8_t type; /* State type */ 201 uint8_t proto; /* UL Protocol */ 202 uint16_t kidx; /* named object index */ 203 uint16_t sport, dport; /* ULP source and destination ports */ 204 struct in6_addr src, dst; /* IPv6 source and destination */ 205 uint32_t zoneid; /* IPv6 scope zone id */ 206 union { 207 struct dyn_data *data; 208 struct dyn_parent *limit; 209 }; 210 CK_SLIST_ENTRY(dyn_ipv6_state) entry; 211 SLIST_ENTRY(dyn_ipv6_state) expired; 212 }; 213 CK_SLIST_HEAD(dyn_ipv6ck_slist, dyn_ipv6_state); 214 VNET_DEFINE_STATIC(struct dyn_ipv6ck_slist *, dyn_ipv6); 215 VNET_DEFINE_STATIC(struct dyn_ipv6ck_slist *, dyn_ipv6_parent); 216 217 SLIST_HEAD(dyn_ipv6_slist, dyn_ipv6_state); 218 VNET_DEFINE_STATIC(struct dyn_ipv6_slist, dyn_expired_ipv6); 219 #define V_dyn_ipv6 VNET(dyn_ipv6) 220 #define V_dyn_ipv6_parent VNET(dyn_ipv6_parent) 221 #define V_dyn_expired_ipv6 VNET(dyn_expired_ipv6) 222 #endif /* INET6 */ 223 224 /* 225 * Per-CPU pointer indicates that specified state is currently in use 226 * and must not be reclaimed by expiration callout. 227 */ 228 static void **dyn_hp_cache; 229 DPCPU_DEFINE_STATIC(void *, dyn_hp); 230 #define DYNSTATE_GET(cpu) ck_pr_load_ptr(DPCPU_ID_PTR((cpu), dyn_hp)) 231 #define DYNSTATE_PROTECT(v) ck_pr_store_ptr(DPCPU_PTR(dyn_hp), (v)) 232 #define DYNSTATE_RELEASE() DYNSTATE_PROTECT(NULL) 233 #define DYNSTATE_CRITICAL_ENTER() critical_enter() 234 #define DYNSTATE_CRITICAL_EXIT() do { \ 235 DYNSTATE_RELEASE(); \ 236 critical_exit(); \ 237 } while (0); 238 239 /* 240 * We keep two version numbers, one is updated when new entry added to 241 * the list. Second is updated when an entry deleted from the list. 242 * Versions are updated under bucket lock. 243 * 244 * Bucket "add" version number is used to know, that in the time between 245 * state lookup (i.e. ipfw_dyn_lookup_state()) and the followed state 246 * creation (i.e. ipfw_dyn_install_state()) another concurrent thread did 247 * not install some state in this bucket. Using this info we can avoid 248 * additional state lookup, because we are sure that we will not install 249 * the state twice. 250 * 251 * Also doing the tracking of bucket "del" version during lookup we can 252 * be sure, that state entry was not unlinked and freed in time between 253 * we read the state pointer and protect it with hazard pointer. 254 * 255 * An entry unlinked from CK list keeps unchanged until it is freed. 256 * Unlinked entries are linked into expired lists using "expired" field. 257 */ 258 259 /* 260 * dyn_expire_lock is used to protect access to dyn_expired_xxx lists. 261 * dyn_bucket_lock is used to get write access to lists in specific bucket. 262 * Currently one dyn_bucket_lock is used for all ipv4, ipv4_parent, ipv6, 263 * and ipv6_parent lists. 264 */ 265 VNET_DEFINE_STATIC(struct mtx, dyn_expire_lock); 266 VNET_DEFINE_STATIC(struct mtx *, dyn_bucket_lock); 267 #define V_dyn_expire_lock VNET(dyn_expire_lock) 268 #define V_dyn_bucket_lock VNET(dyn_bucket_lock) 269 270 /* 271 * Bucket's add/delete generation versions. 272 */ 273 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_add); 274 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_del); 275 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_parent_add); 276 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_parent_del); 277 #define V_dyn_ipv4_add VNET(dyn_ipv4_add) 278 #define V_dyn_ipv4_del VNET(dyn_ipv4_del) 279 #define V_dyn_ipv4_parent_add VNET(dyn_ipv4_parent_add) 280 #define V_dyn_ipv4_parent_del VNET(dyn_ipv4_parent_del) 281 282 #ifdef INET6 283 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_add); 284 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_del); 285 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_parent_add); 286 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_parent_del); 287 #define V_dyn_ipv6_add VNET(dyn_ipv6_add) 288 #define V_dyn_ipv6_del VNET(dyn_ipv6_del) 289 #define V_dyn_ipv6_parent_add VNET(dyn_ipv6_parent_add) 290 #define V_dyn_ipv6_parent_del VNET(dyn_ipv6_parent_del) 291 #endif /* INET6 */ 292 293 #define DYN_BUCKET(h, b) ((h) & (b - 1)) 294 #define DYN_BUCKET_VERSION(b, v) ck_pr_load_32(&V_dyn_ ## v[(b)]) 295 #define DYN_BUCKET_VERSION_BUMP(b, v) ck_pr_inc_32(&V_dyn_ ## v[(b)]) 296 297 #define DYN_BUCKET_LOCK_INIT(lock, b) \ 298 mtx_init(&lock[(b)], "IPFW dynamic bucket", NULL, MTX_DEF) 299 #define DYN_BUCKET_LOCK_DESTROY(lock, b) mtx_destroy(&lock[(b)]) 300 #define DYN_BUCKET_LOCK(b) mtx_lock(&V_dyn_bucket_lock[(b)]) 301 #define DYN_BUCKET_UNLOCK(b) mtx_unlock(&V_dyn_bucket_lock[(b)]) 302 #define DYN_BUCKET_ASSERT(b) mtx_assert(&V_dyn_bucket_lock[(b)], MA_OWNED) 303 304 #define DYN_EXPIRED_LOCK_INIT() \ 305 mtx_init(&V_dyn_expire_lock, "IPFW expired states list", NULL, MTX_DEF) 306 #define DYN_EXPIRED_LOCK_DESTROY() mtx_destroy(&V_dyn_expire_lock) 307 #define DYN_EXPIRED_LOCK() mtx_lock(&V_dyn_expire_lock) 308 #define DYN_EXPIRED_UNLOCK() mtx_unlock(&V_dyn_expire_lock) 309 310 VNET_DEFINE_STATIC(uint32_t, dyn_buckets_max); 311 VNET_DEFINE_STATIC(uint32_t, curr_dyn_buckets); 312 VNET_DEFINE_STATIC(struct callout, dyn_timeout); 313 #define V_dyn_buckets_max VNET(dyn_buckets_max) 314 #define V_curr_dyn_buckets VNET(curr_dyn_buckets) 315 #define V_dyn_timeout VNET(dyn_timeout) 316 317 /* Maximum length of states chain in a bucket */ 318 VNET_DEFINE_STATIC(uint32_t, curr_max_length); 319 #define V_curr_max_length VNET(curr_max_length) 320 321 VNET_DEFINE_STATIC(uint32_t, dyn_keep_states); 322 #define V_dyn_keep_states VNET(dyn_keep_states) 323 324 VNET_DEFINE_STATIC(uma_zone_t, dyn_data_zone); 325 VNET_DEFINE_STATIC(uma_zone_t, dyn_parent_zone); 326 VNET_DEFINE_STATIC(uma_zone_t, dyn_ipv4_zone); 327 #ifdef INET6 328 VNET_DEFINE_STATIC(uma_zone_t, dyn_ipv6_zone); 329 #define V_dyn_ipv6_zone VNET(dyn_ipv6_zone) 330 #endif /* INET6 */ 331 #define V_dyn_data_zone VNET(dyn_data_zone) 332 #define V_dyn_parent_zone VNET(dyn_parent_zone) 333 #define V_dyn_ipv4_zone VNET(dyn_ipv4_zone) 334 335 /* 336 * Timeouts for various events in handing dynamic rules. 337 */ 338 VNET_DEFINE_STATIC(uint32_t, dyn_ack_lifetime); 339 VNET_DEFINE_STATIC(uint32_t, dyn_syn_lifetime); 340 VNET_DEFINE_STATIC(uint32_t, dyn_fin_lifetime); 341 VNET_DEFINE_STATIC(uint32_t, dyn_rst_lifetime); 342 VNET_DEFINE_STATIC(uint32_t, dyn_udp_lifetime); 343 VNET_DEFINE_STATIC(uint32_t, dyn_short_lifetime); 344 345 #define V_dyn_ack_lifetime VNET(dyn_ack_lifetime) 346 #define V_dyn_syn_lifetime VNET(dyn_syn_lifetime) 347 #define V_dyn_fin_lifetime VNET(dyn_fin_lifetime) 348 #define V_dyn_rst_lifetime VNET(dyn_rst_lifetime) 349 #define V_dyn_udp_lifetime VNET(dyn_udp_lifetime) 350 #define V_dyn_short_lifetime VNET(dyn_short_lifetime) 351 352 /* 353 * Keepalives are sent if dyn_keepalive is set. They are sent every 354 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval 355 * seconds of lifetime of a rule. 356 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower 357 * than dyn_keepalive_period. 358 */ 359 VNET_DEFINE_STATIC(uint32_t, dyn_keepalive_interval); 360 VNET_DEFINE_STATIC(uint32_t, dyn_keepalive_period); 361 VNET_DEFINE_STATIC(uint32_t, dyn_keepalive); 362 VNET_DEFINE_STATIC(time_t, dyn_keepalive_last); 363 364 #define V_dyn_keepalive_interval VNET(dyn_keepalive_interval) 365 #define V_dyn_keepalive_period VNET(dyn_keepalive_period) 366 #define V_dyn_keepalive VNET(dyn_keepalive) 367 #define V_dyn_keepalive_last VNET(dyn_keepalive_last) 368 369 VNET_DEFINE_STATIC(uint32_t, dyn_max); /* max # of dynamic states */ 370 VNET_DEFINE_STATIC(uint32_t, dyn_count); /* number of states */ 371 VNET_DEFINE_STATIC(uint32_t, dyn_parent_max); /* max # of parent states */ 372 VNET_DEFINE_STATIC(uint32_t, dyn_parent_count); /* number of parent states */ 373 374 #define V_dyn_max VNET(dyn_max) 375 #define V_dyn_count VNET(dyn_count) 376 #define V_dyn_parent_max VNET(dyn_parent_max) 377 #define V_dyn_parent_count VNET(dyn_parent_count) 378 379 #define DYN_COUNT_DEC(name) do { \ 380 MPASS((V_ ## name) > 0); \ 381 ck_pr_dec_32(&(V_ ## name)); \ 382 } while (0) 383 #define DYN_COUNT_INC(name) ck_pr_inc_32(&(V_ ## name)) 384 #define DYN_COUNT(name) ck_pr_load_32(&(V_ ## name)) 385 386 static time_t last_log; /* Log ratelimiting */ 387 388 /* 389 * Get/set maximum number of dynamic states in given VNET instance. 390 */ 391 static int 392 sysctl_dyn_max(SYSCTL_HANDLER_ARGS) 393 { 394 uint32_t nstates; 395 int error; 396 397 nstates = V_dyn_max; 398 error = sysctl_handle_32(oidp, &nstates, 0, req); 399 /* Read operation or some error */ 400 if ((error != 0) || (req->newptr == NULL)) 401 return (error); 402 403 V_dyn_max = nstates; 404 uma_zone_set_max(V_dyn_data_zone, V_dyn_max); 405 return (0); 406 } 407 408 static int 409 sysctl_dyn_parent_max(SYSCTL_HANDLER_ARGS) 410 { 411 uint32_t nstates; 412 int error; 413 414 nstates = V_dyn_parent_max; 415 error = sysctl_handle_32(oidp, &nstates, 0, req); 416 /* Read operation or some error */ 417 if ((error != 0) || (req->newptr == NULL)) 418 return (error); 419 420 V_dyn_parent_max = nstates; 421 uma_zone_set_max(V_dyn_parent_zone, V_dyn_parent_max); 422 return (0); 423 } 424 425 static int 426 sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS) 427 { 428 uint32_t nbuckets; 429 int error; 430 431 nbuckets = V_dyn_buckets_max; 432 error = sysctl_handle_32(oidp, &nbuckets, 0, req); 433 /* Read operation or some error */ 434 if ((error != 0) || (req->newptr == NULL)) 435 return (error); 436 437 if (nbuckets > 256) 438 V_dyn_buckets_max = 1 << fls(nbuckets - 1); 439 else 440 return (EINVAL); 441 return (0); 442 } 443 444 SYSCTL_DECL(_net_inet_ip_fw); 445 446 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_count, 447 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(dyn_count), 0, 448 "Current number of dynamic states."); 449 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_parent_count, 450 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(dyn_parent_count), 0, 451 "Current number of parent states. "); 452 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, 453 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0, 454 "Current number of buckets for states hash table."); 455 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, curr_max_length, 456 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_max_length), 0, 457 "Current maximum length of states chains in hash buckets."); 458 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, 459 CTLFLAG_VNET | CTLTYPE_U32 | CTLFLAG_RW, 0, 0, sysctl_dyn_buckets, 460 "IU", "Max number of buckets for dynamic states hash table."); 461 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max, 462 CTLFLAG_VNET | CTLTYPE_U32 | CTLFLAG_RW, 0, 0, sysctl_dyn_max, 463 "IU", "Max number of dynamic states."); 464 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_parent_max, 465 CTLFLAG_VNET | CTLTYPE_U32 | CTLFLAG_RW, 0, 0, sysctl_dyn_parent_max, 466 "IU", "Max number of parent dynamic states."); 467 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, 468 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0, 469 "Lifetime of dynamic states for TCP ACK."); 470 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, 471 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0, 472 "Lifetime of dynamic states for TCP SYN."); 473 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, 474 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0, 475 "Lifetime of dynamic states for TCP FIN."); 476 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, 477 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0, 478 "Lifetime of dynamic states for TCP RST."); 479 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, 480 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0, 481 "Lifetime of dynamic states for UDP."); 482 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, 483 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0, 484 "Lifetime of dynamic states for other situations."); 485 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, 486 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0, 487 "Enable keepalives for dynamic states."); 488 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_keep_states, 489 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keep_states), 0, 490 "Do not flush dynamic states on rule deletion"); 491 492 493 #ifdef IPFIREWALL_DYNDEBUG 494 #define DYN_DEBUG(fmt, ...) do { \ 495 printf("%s: " fmt "\n", __func__, __VA_ARGS__); \ 496 } while (0) 497 #else 498 #define DYN_DEBUG(fmt, ...) 499 #endif /* !IPFIREWALL_DYNDEBUG */ 500 501 #ifdef INET6 502 /* Functions to work with IPv6 states */ 503 static struct dyn_ipv6_state *dyn_lookup_ipv6_state( 504 const struct ipfw_flow_id *, uint32_t, const void *, 505 struct ipfw_dyn_info *, int); 506 static int dyn_lookup_ipv6_state_locked(const struct ipfw_flow_id *, 507 uint32_t, const void *, int, uint32_t, uint16_t); 508 static struct dyn_ipv6_state *dyn_alloc_ipv6_state( 509 const struct ipfw_flow_id *, uint32_t, uint16_t, uint8_t); 510 static int dyn_add_ipv6_state(void *, uint32_t, uint16_t, uint8_t, 511 const struct ipfw_flow_id *, uint32_t, const void *, int, uint32_t, 512 struct ipfw_dyn_info *, uint16_t, uint16_t, uint8_t); 513 static void dyn_export_ipv6_state(const struct dyn_ipv6_state *, 514 ipfw_dyn_rule *); 515 516 static uint32_t dyn_getscopeid(const struct ip_fw_args *); 517 static void dyn_make_keepalive_ipv6(struct mbuf *, const struct in6_addr *, 518 const struct in6_addr *, uint32_t, uint32_t, uint32_t, uint16_t, 519 uint16_t); 520 static void dyn_enqueue_keepalive_ipv6(struct mbufq *, 521 const struct dyn_ipv6_state *); 522 static void dyn_send_keepalive_ipv6(struct ip_fw_chain *); 523 524 static struct dyn_ipv6_state *dyn_lookup_ipv6_parent( 525 const struct ipfw_flow_id *, uint32_t, const void *, uint32_t, uint16_t, 526 uint32_t); 527 static struct dyn_ipv6_state *dyn_lookup_ipv6_parent_locked( 528 const struct ipfw_flow_id *, uint32_t, const void *, uint32_t, uint16_t, 529 uint32_t); 530 static struct dyn_ipv6_state *dyn_add_ipv6_parent(void *, uint32_t, uint16_t, 531 uint8_t, const struct ipfw_flow_id *, uint32_t, uint32_t, uint32_t, 532 uint16_t); 533 #endif /* INET6 */ 534 535 /* Functions to work with limit states */ 536 static void *dyn_get_parent_state(const struct ipfw_flow_id *, uint32_t, 537 struct ip_fw *, uint32_t, uint32_t, uint16_t); 538 static struct dyn_ipv4_state *dyn_lookup_ipv4_parent( 539 const struct ipfw_flow_id *, const void *, uint32_t, uint16_t, uint32_t); 540 static struct dyn_ipv4_state *dyn_lookup_ipv4_parent_locked( 541 const struct ipfw_flow_id *, const void *, uint32_t, uint16_t, uint32_t); 542 static struct dyn_parent *dyn_alloc_parent(void *, uint32_t, uint16_t, 543 uint8_t, uint32_t); 544 static struct dyn_ipv4_state *dyn_add_ipv4_parent(void *, uint32_t, uint16_t, 545 uint8_t, const struct ipfw_flow_id *, uint32_t, uint32_t, uint16_t); 546 547 static void dyn_tick(void *); 548 static void dyn_expire_states(struct ip_fw_chain *, ipfw_range_tlv *); 549 static void dyn_free_states(struct ip_fw_chain *); 550 static void dyn_export_parent(const struct dyn_parent *, uint16_t, 551 ipfw_dyn_rule *); 552 static void dyn_export_data(const struct dyn_data *, uint16_t, uint8_t, 553 ipfw_dyn_rule *); 554 static uint32_t dyn_update_tcp_state(struct dyn_data *, 555 const struct ipfw_flow_id *, const struct tcphdr *, int); 556 static void dyn_update_proto_state(struct dyn_data *, 557 const struct ipfw_flow_id *, const void *, int, int); 558 559 /* Functions to work with IPv4 states */ 560 struct dyn_ipv4_state *dyn_lookup_ipv4_state(const struct ipfw_flow_id *, 561 const void *, struct ipfw_dyn_info *, int); 562 static int dyn_lookup_ipv4_state_locked(const struct ipfw_flow_id *, 563 const void *, int, uint32_t, uint16_t); 564 static struct dyn_ipv4_state *dyn_alloc_ipv4_state( 565 const struct ipfw_flow_id *, uint16_t, uint8_t); 566 static int dyn_add_ipv4_state(void *, uint32_t, uint16_t, uint8_t, 567 const struct ipfw_flow_id *, const void *, int, uint32_t, 568 struct ipfw_dyn_info *, uint16_t, uint16_t, uint8_t); 569 static void dyn_export_ipv4_state(const struct dyn_ipv4_state *, 570 ipfw_dyn_rule *); 571 572 /* 573 * Named states support. 574 */ 575 static char *default_state_name = "default"; 576 struct dyn_state_obj { 577 struct named_object no; 578 char name[64]; 579 }; 580 581 #define DYN_STATE_OBJ(ch, cmd) \ 582 ((struct dyn_state_obj *)SRV_OBJECT(ch, (cmd)->arg1)) 583 /* 584 * Classifier callback. 585 * Return 0 if opcode contains object that should be referenced 586 * or rewritten. 587 */ 588 static int 589 dyn_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 590 { 591 592 DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1); 593 /* Don't rewrite "check-state any" */ 594 if (cmd->arg1 == 0 && 595 cmd->opcode == O_CHECK_STATE) 596 return (1); 597 598 *puidx = cmd->arg1; 599 *ptype = 0; 600 return (0); 601 } 602 603 static void 604 dyn_update(ipfw_insn *cmd, uint16_t idx) 605 { 606 607 cmd->arg1 = idx; 608 DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1); 609 } 610 611 static int 612 dyn_findbyname(struct ip_fw_chain *ch, struct tid_info *ti, 613 struct named_object **pno) 614 { 615 ipfw_obj_ntlv *ntlv; 616 const char *name; 617 618 DYN_DEBUG("uidx %d", ti->uidx); 619 if (ti->uidx != 0) { 620 if (ti->tlvs == NULL) 621 return (EINVAL); 622 /* Search ntlv in the buffer provided by user */ 623 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 624 IPFW_TLV_STATE_NAME); 625 if (ntlv == NULL) 626 return (EINVAL); 627 name = ntlv->name; 628 } else 629 name = default_state_name; 630 /* 631 * Search named object with corresponding name. 632 * Since states objects are global - ignore the set value 633 * and use zero instead. 634 */ 635 *pno = ipfw_objhash_lookup_name_type(CHAIN_TO_SRV(ch), 0, 636 IPFW_TLV_STATE_NAME, name); 637 /* 638 * We always return success here. 639 * The caller will check *pno and mark object as unresolved, 640 * then it will automatically create "default" object. 641 */ 642 return (0); 643 } 644 645 static struct named_object * 646 dyn_findbykidx(struct ip_fw_chain *ch, uint16_t idx) 647 { 648 649 DYN_DEBUG("kidx %d", idx); 650 return (ipfw_objhash_lookup_kidx(CHAIN_TO_SRV(ch), idx)); 651 } 652 653 static int 654 dyn_create(struct ip_fw_chain *ch, struct tid_info *ti, 655 uint16_t *pkidx) 656 { 657 struct namedobj_instance *ni; 658 struct dyn_state_obj *obj; 659 struct named_object *no; 660 ipfw_obj_ntlv *ntlv; 661 char *name; 662 663 DYN_DEBUG("uidx %d", ti->uidx); 664 if (ti->uidx != 0) { 665 if (ti->tlvs == NULL) 666 return (EINVAL); 667 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 668 IPFW_TLV_STATE_NAME); 669 if (ntlv == NULL) 670 return (EINVAL); 671 name = ntlv->name; 672 } else 673 name = default_state_name; 674 675 ni = CHAIN_TO_SRV(ch); 676 obj = malloc(sizeof(*obj), M_IPFW, M_WAITOK | M_ZERO); 677 obj->no.name = obj->name; 678 obj->no.etlv = IPFW_TLV_STATE_NAME; 679 strlcpy(obj->name, name, sizeof(obj->name)); 680 681 IPFW_UH_WLOCK(ch); 682 no = ipfw_objhash_lookup_name_type(ni, 0, 683 IPFW_TLV_STATE_NAME, name); 684 if (no != NULL) { 685 /* 686 * Object is already created. 687 * Just return its kidx and bump refcount. 688 */ 689 *pkidx = no->kidx; 690 no->refcnt++; 691 IPFW_UH_WUNLOCK(ch); 692 free(obj, M_IPFW); 693 DYN_DEBUG("\tfound kidx %d", *pkidx); 694 return (0); 695 } 696 if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) { 697 DYN_DEBUG("\talloc_idx failed for %s", name); 698 IPFW_UH_WUNLOCK(ch); 699 free(obj, M_IPFW); 700 return (ENOSPC); 701 } 702 ipfw_objhash_add(ni, &obj->no); 703 SRV_OBJECT(ch, obj->no.kidx) = obj; 704 obj->no.refcnt++; 705 *pkidx = obj->no.kidx; 706 IPFW_UH_WUNLOCK(ch); 707 DYN_DEBUG("\tcreated kidx %d", *pkidx); 708 return (0); 709 } 710 711 static void 712 dyn_destroy(struct ip_fw_chain *ch, struct named_object *no) 713 { 714 struct dyn_state_obj *obj; 715 716 IPFW_UH_WLOCK_ASSERT(ch); 717 718 KASSERT(no->etlv == IPFW_TLV_STATE_NAME, 719 ("%s: wrong object type %u", __func__, no->etlv)); 720 KASSERT(no->refcnt == 1, 721 ("Destroying object '%s' (type %u, idx %u) with refcnt %u", 722 no->name, no->etlv, no->kidx, no->refcnt)); 723 DYN_DEBUG("kidx %d", no->kidx); 724 obj = SRV_OBJECT(ch, no->kidx); 725 SRV_OBJECT(ch, no->kidx) = NULL; 726 ipfw_objhash_del(CHAIN_TO_SRV(ch), no); 727 ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), no->kidx); 728 729 free(obj, M_IPFW); 730 } 731 732 static struct opcode_obj_rewrite dyn_opcodes[] = { 733 { 734 O_KEEP_STATE, IPFW_TLV_STATE_NAME, 735 dyn_classify, dyn_update, 736 dyn_findbyname, dyn_findbykidx, 737 dyn_create, dyn_destroy 738 }, 739 { 740 O_CHECK_STATE, IPFW_TLV_STATE_NAME, 741 dyn_classify, dyn_update, 742 dyn_findbyname, dyn_findbykidx, 743 dyn_create, dyn_destroy 744 }, 745 { 746 O_PROBE_STATE, IPFW_TLV_STATE_NAME, 747 dyn_classify, dyn_update, 748 dyn_findbyname, dyn_findbykidx, 749 dyn_create, dyn_destroy 750 }, 751 { 752 O_LIMIT, IPFW_TLV_STATE_NAME, 753 dyn_classify, dyn_update, 754 dyn_findbyname, dyn_findbykidx, 755 dyn_create, dyn_destroy 756 }, 757 }; 758 759 /* 760 * IMPORTANT: the hash function for dynamic rules must be commutative 761 * in source and destination (ip,port), because rules are bidirectional 762 * and we want to find both in the same bucket. 763 */ 764 #ifndef IPFIREWALL_JENKINSHASH 765 static __inline uint32_t 766 hash_packet(const struct ipfw_flow_id *id) 767 { 768 uint32_t i; 769 770 #ifdef INET6 771 if (IS_IP6_FLOW_ID(id)) 772 i = ntohl((id->dst_ip6.__u6_addr.__u6_addr32[2]) ^ 773 (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^ 774 (id->src_ip6.__u6_addr.__u6_addr32[2]) ^ 775 (id->src_ip6.__u6_addr.__u6_addr32[3])); 776 else 777 #endif /* INET6 */ 778 i = (id->dst_ip) ^ (id->src_ip); 779 i ^= (id->dst_port) ^ (id->src_port); 780 return (i); 781 } 782 783 static __inline uint32_t 784 hash_parent(const struct ipfw_flow_id *id, const void *rule) 785 { 786 787 return (hash_packet(id) ^ ((uintptr_t)rule)); 788 } 789 790 #else /* IPFIREWALL_JENKINSHASH */ 791 792 VNET_DEFINE_STATIC(uint32_t, dyn_hashseed); 793 #define V_dyn_hashseed VNET(dyn_hashseed) 794 795 static __inline int 796 addrcmp4(const struct ipfw_flow_id *id) 797 { 798 799 if (id->src_ip < id->dst_ip) 800 return (0); 801 if (id->src_ip > id->dst_ip) 802 return (1); 803 if (id->src_port <= id->dst_port) 804 return (0); 805 return (1); 806 } 807 808 #ifdef INET6 809 static __inline int 810 addrcmp6(const struct ipfw_flow_id *id) 811 { 812 int ret; 813 814 ret = memcmp(&id->src_ip6, &id->dst_ip6, sizeof(struct in6_addr)); 815 if (ret < 0) 816 return (0); 817 if (ret > 0) 818 return (1); 819 if (id->src_port <= id->dst_port) 820 return (0); 821 return (1); 822 } 823 824 static __inline uint32_t 825 hash_packet6(const struct ipfw_flow_id *id) 826 { 827 struct tuple6 { 828 struct in6_addr addr[2]; 829 uint16_t port[2]; 830 } t6; 831 832 if (addrcmp6(id) == 0) { 833 t6.addr[0] = id->src_ip6; 834 t6.addr[1] = id->dst_ip6; 835 t6.port[0] = id->src_port; 836 t6.port[1] = id->dst_port; 837 } else { 838 t6.addr[0] = id->dst_ip6; 839 t6.addr[1] = id->src_ip6; 840 t6.port[0] = id->dst_port; 841 t6.port[1] = id->src_port; 842 } 843 return (jenkins_hash32((const uint32_t *)&t6, 844 sizeof(t6) / sizeof(uint32_t), V_dyn_hashseed)); 845 } 846 #endif 847 848 static __inline uint32_t 849 hash_packet(const struct ipfw_flow_id *id) 850 { 851 struct tuple4 { 852 in_addr_t addr[2]; 853 uint16_t port[2]; 854 } t4; 855 856 if (IS_IP4_FLOW_ID(id)) { 857 /* All fields are in host byte order */ 858 if (addrcmp4(id) == 0) { 859 t4.addr[0] = id->src_ip; 860 t4.addr[1] = id->dst_ip; 861 t4.port[0] = id->src_port; 862 t4.port[1] = id->dst_port; 863 } else { 864 t4.addr[0] = id->dst_ip; 865 t4.addr[1] = id->src_ip; 866 t4.port[0] = id->dst_port; 867 t4.port[1] = id->src_port; 868 } 869 return (jenkins_hash32((const uint32_t *)&t4, 870 sizeof(t4) / sizeof(uint32_t), V_dyn_hashseed)); 871 } else 872 #ifdef INET6 873 if (IS_IP6_FLOW_ID(id)) 874 return (hash_packet6(id)); 875 #endif 876 return (0); 877 } 878 879 static __inline uint32_t 880 hash_parent(const struct ipfw_flow_id *id, const void *rule) 881 { 882 883 return (jenkins_hash32((const uint32_t *)&rule, 884 sizeof(rule) / sizeof(uint32_t), hash_packet(id))); 885 } 886 #endif /* IPFIREWALL_JENKINSHASH */ 887 888 /* 889 * Print customizable flow id description via log(9) facility. 890 */ 891 static void 892 print_dyn_rule_flags(const struct ipfw_flow_id *id, int dyn_type, 893 int log_flags, char *prefix, char *postfix) 894 { 895 struct in_addr da; 896 #ifdef INET6 897 char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN]; 898 #else 899 char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN]; 900 #endif 901 902 #ifdef INET6 903 if (IS_IP6_FLOW_ID(id)) { 904 ip6_sprintf(src, &id->src_ip6); 905 ip6_sprintf(dst, &id->dst_ip6); 906 } else 907 #endif 908 { 909 da.s_addr = htonl(id->src_ip); 910 inet_ntop(AF_INET, &da, src, sizeof(src)); 911 da.s_addr = htonl(id->dst_ip); 912 inet_ntop(AF_INET, &da, dst, sizeof(dst)); 913 } 914 log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n", 915 prefix, dyn_type, src, id->src_port, dst, 916 id->dst_port, V_dyn_count, postfix); 917 } 918 919 #define print_dyn_rule(id, dtype, prefix, postfix) \ 920 print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix) 921 922 #define TIME_LEQ(a,b) ((int)((a)-(b)) <= 0) 923 #define TIME_LE(a,b) ((int)((a)-(b)) < 0) 924 #define _SEQ_GE(a,b) ((int)((a)-(b)) >= 0) 925 #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) 926 #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) 927 #define TCP_FLAGS (TH_FLAGS | (TH_FLAGS << 8)) 928 #define ACK_FWD 0x00010000 /* fwd ack seen */ 929 #define ACK_REV 0x00020000 /* rev ack seen */ 930 #define ACK_BOTH (ACK_FWD | ACK_REV) 931 932 static uint32_t 933 dyn_update_tcp_state(struct dyn_data *data, const struct ipfw_flow_id *pkt, 934 const struct tcphdr *tcp, int dir) 935 { 936 uint32_t ack, expire; 937 uint32_t state, old; 938 uint8_t th_flags; 939 940 expire = data->expire; 941 old = state = data->state; 942 th_flags = pkt->_flags & (TH_FIN | TH_SYN | TH_RST); 943 state |= (dir == MATCH_FORWARD) ? th_flags: (th_flags << 8); 944 switch (state & TCP_FLAGS) { 945 case TH_SYN: /* opening */ 946 expire = time_uptime + V_dyn_syn_lifetime; 947 break; 948 949 case BOTH_SYN: /* move to established */ 950 case BOTH_SYN | TH_FIN: /* one side tries to close */ 951 case BOTH_SYN | (TH_FIN << 8): 952 if (tcp == NULL) 953 break; 954 ack = ntohl(tcp->th_ack); 955 if (dir == MATCH_FORWARD) { 956 if (data->ack_fwd == 0 || 957 _SEQ_GE(ack, data->ack_fwd)) { 958 state |= ACK_FWD; 959 if (data->ack_fwd != ack) 960 ck_pr_store_32(&data->ack_fwd, ack); 961 } 962 } else { 963 if (data->ack_rev == 0 || 964 _SEQ_GE(ack, data->ack_rev)) { 965 state |= ACK_REV; 966 if (data->ack_rev != ack) 967 ck_pr_store_32(&data->ack_rev, ack); 968 } 969 } 970 if ((state & ACK_BOTH) == ACK_BOTH) { 971 /* 972 * Set expire time to V_dyn_ack_lifetime only if 973 * we got ACKs for both directions. 974 * We use XOR here to avoid possible state 975 * overwriting in concurrent thread. 976 */ 977 expire = time_uptime + V_dyn_ack_lifetime; 978 ck_pr_xor_32(&data->state, ACK_BOTH); 979 } else if ((data->state & ACK_BOTH) != (state & ACK_BOTH)) 980 ck_pr_or_32(&data->state, state & ACK_BOTH); 981 break; 982 983 case BOTH_SYN | BOTH_FIN: /* both sides closed */ 984 if (V_dyn_fin_lifetime >= V_dyn_keepalive_period) 985 V_dyn_fin_lifetime = V_dyn_keepalive_period - 1; 986 expire = time_uptime + V_dyn_fin_lifetime; 987 break; 988 989 default: 990 if (V_dyn_keepalive != 0 && 991 V_dyn_rst_lifetime >= V_dyn_keepalive_period) 992 V_dyn_rst_lifetime = V_dyn_keepalive_period - 1; 993 expire = time_uptime + V_dyn_rst_lifetime; 994 } 995 /* Save TCP state if it was changed */ 996 if ((state & TCP_FLAGS) != (old & TCP_FLAGS)) 997 ck_pr_or_32(&data->state, state & TCP_FLAGS); 998 return (expire); 999 } 1000 1001 /* 1002 * Update ULP specific state. 1003 * For TCP we keep sequence numbers and flags. For other protocols 1004 * currently we update only expire time. Packets and bytes counters 1005 * are also updated here. 1006 */ 1007 static void 1008 dyn_update_proto_state(struct dyn_data *data, const struct ipfw_flow_id *pkt, 1009 const void *ulp, int pktlen, int dir) 1010 { 1011 uint32_t expire; 1012 1013 /* NOTE: we are in critical section here. */ 1014 switch (pkt->proto) { 1015 case IPPROTO_UDP: 1016 case IPPROTO_UDPLITE: 1017 expire = time_uptime + V_dyn_udp_lifetime; 1018 break; 1019 case IPPROTO_TCP: 1020 expire = dyn_update_tcp_state(data, pkt, ulp, dir); 1021 break; 1022 default: 1023 expire = time_uptime + V_dyn_short_lifetime; 1024 } 1025 /* 1026 * Expiration timer has the per-second granularity, no need to update 1027 * it every time when state is matched. 1028 */ 1029 if (data->expire != expire) 1030 ck_pr_store_32(&data->expire, expire); 1031 1032 if (dir == MATCH_FORWARD) 1033 DYN_COUNTER_INC(data, fwd, pktlen); 1034 else 1035 DYN_COUNTER_INC(data, rev, pktlen); 1036 } 1037 1038 /* 1039 * Lookup IPv4 state. 1040 * Must be called in critical section. 1041 */ 1042 struct dyn_ipv4_state * 1043 dyn_lookup_ipv4_state(const struct ipfw_flow_id *pkt, const void *ulp, 1044 struct ipfw_dyn_info *info, int pktlen) 1045 { 1046 struct dyn_ipv4_state *s; 1047 uint32_t version, bucket; 1048 1049 bucket = DYN_BUCKET(info->hashval, V_curr_dyn_buckets); 1050 info->version = DYN_BUCKET_VERSION(bucket, ipv4_add); 1051 restart: 1052 version = DYN_BUCKET_VERSION(bucket, ipv4_del); 1053 CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) { 1054 DYNSTATE_PROTECT(s); 1055 if (version != DYN_BUCKET_VERSION(bucket, ipv4_del)) 1056 goto restart; 1057 if (s->proto != pkt->proto) 1058 continue; 1059 if (info->kidx != 0 && s->kidx != info->kidx) 1060 continue; 1061 if (s->sport == pkt->src_port && s->dport == pkt->dst_port && 1062 s->src == pkt->src_ip && s->dst == pkt->dst_ip) { 1063 info->direction = MATCH_FORWARD; 1064 break; 1065 } 1066 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1067 s->src == pkt->dst_ip && s->dst == pkt->src_ip) { 1068 info->direction = MATCH_REVERSE; 1069 break; 1070 } 1071 } 1072 1073 if (s != NULL) 1074 dyn_update_proto_state(s->data, pkt, ulp, pktlen, 1075 info->direction); 1076 return (s); 1077 } 1078 1079 /* 1080 * Lookup IPv4 state. 1081 * Simplifed version is used to check that matching state doesn't exist. 1082 */ 1083 static int 1084 dyn_lookup_ipv4_state_locked(const struct ipfw_flow_id *pkt, 1085 const void *ulp, int pktlen, uint32_t bucket, uint16_t kidx) 1086 { 1087 struct dyn_ipv4_state *s; 1088 int dir; 1089 1090 dir = MATCH_NONE; 1091 DYN_BUCKET_ASSERT(bucket); 1092 CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) { 1093 if (s->proto != pkt->proto || 1094 s->kidx != kidx) 1095 continue; 1096 if (s->sport == pkt->src_port && 1097 s->dport == pkt->dst_port && 1098 s->src == pkt->src_ip && s->dst == pkt->dst_ip) { 1099 dir = MATCH_FORWARD; 1100 break; 1101 } 1102 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1103 s->src == pkt->dst_ip && s->dst == pkt->src_ip) { 1104 dir = MATCH_REVERSE; 1105 break; 1106 } 1107 } 1108 if (s != NULL) 1109 dyn_update_proto_state(s->data, pkt, ulp, pktlen, dir); 1110 return (s != NULL); 1111 } 1112 1113 struct dyn_ipv4_state * 1114 dyn_lookup_ipv4_parent(const struct ipfw_flow_id *pkt, const void *rule, 1115 uint32_t ruleid, uint16_t rulenum, uint32_t hashval) 1116 { 1117 struct dyn_ipv4_state *s; 1118 uint32_t version, bucket; 1119 1120 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1121 restart: 1122 version = DYN_BUCKET_VERSION(bucket, ipv4_parent_del); 1123 CK_SLIST_FOREACH(s, &V_dyn_ipv4_parent[bucket], entry) { 1124 DYNSTATE_PROTECT(s); 1125 if (version != DYN_BUCKET_VERSION(bucket, ipv4_parent_del)) 1126 goto restart; 1127 /* 1128 * NOTE: we do not need to check kidx, because parent rule 1129 * can not create states with different kidx. 1130 * And parent rule always created for forward direction. 1131 */ 1132 if (s->limit->parent == rule && 1133 s->limit->ruleid == ruleid && 1134 s->limit->rulenum == rulenum && 1135 s->proto == pkt->proto && 1136 s->sport == pkt->src_port && 1137 s->dport == pkt->dst_port && 1138 s->src == pkt->src_ip && s->dst == pkt->dst_ip) { 1139 if (s->limit->expire != time_uptime + 1140 V_dyn_short_lifetime) 1141 ck_pr_store_32(&s->limit->expire, 1142 time_uptime + V_dyn_short_lifetime); 1143 break; 1144 } 1145 } 1146 return (s); 1147 } 1148 1149 static struct dyn_ipv4_state * 1150 dyn_lookup_ipv4_parent_locked(const struct ipfw_flow_id *pkt, 1151 const void *rule, uint32_t ruleid, uint16_t rulenum, uint32_t bucket) 1152 { 1153 struct dyn_ipv4_state *s; 1154 1155 DYN_BUCKET_ASSERT(bucket); 1156 CK_SLIST_FOREACH(s, &V_dyn_ipv4_parent[bucket], entry) { 1157 if (s->limit->parent == rule && 1158 s->limit->ruleid == ruleid && 1159 s->limit->rulenum == rulenum && 1160 s->proto == pkt->proto && 1161 s->sport == pkt->src_port && 1162 s->dport == pkt->dst_port && 1163 s->src == pkt->src_ip && s->dst == pkt->dst_ip) 1164 break; 1165 } 1166 return (s); 1167 } 1168 1169 1170 #ifdef INET6 1171 static uint32_t 1172 dyn_getscopeid(const struct ip_fw_args *args) 1173 { 1174 1175 /* 1176 * If source or destination address is an scopeid address, we need 1177 * determine the scope zone id to resolve address scope ambiguity. 1178 */ 1179 if (IN6_IS_ADDR_LINKLOCAL(&args->f_id.src_ip6) || 1180 IN6_IS_ADDR_LINKLOCAL(&args->f_id.dst_ip6)) { 1181 MPASS(args->oif != NULL || 1182 args->m->m_pkthdr.rcvif != NULL); 1183 return (in6_getscopezone(args->oif != NULL ? args->oif: 1184 args->m->m_pkthdr.rcvif, IPV6_ADDR_SCOPE_LINKLOCAL)); 1185 } 1186 return (0); 1187 } 1188 1189 /* 1190 * Lookup IPv6 state. 1191 * Must be called in critical section. 1192 */ 1193 static struct dyn_ipv6_state * 1194 dyn_lookup_ipv6_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1195 const void *ulp, struct ipfw_dyn_info *info, int pktlen) 1196 { 1197 struct dyn_ipv6_state *s; 1198 uint32_t version, bucket; 1199 1200 bucket = DYN_BUCKET(info->hashval, V_curr_dyn_buckets); 1201 info->version = DYN_BUCKET_VERSION(bucket, ipv6_add); 1202 restart: 1203 version = DYN_BUCKET_VERSION(bucket, ipv6_del); 1204 CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) { 1205 DYNSTATE_PROTECT(s); 1206 if (version != DYN_BUCKET_VERSION(bucket, ipv6_del)) 1207 goto restart; 1208 if (s->proto != pkt->proto || s->zoneid != zoneid) 1209 continue; 1210 if (info->kidx != 0 && s->kidx != info->kidx) 1211 continue; 1212 if (s->sport == pkt->src_port && s->dport == pkt->dst_port && 1213 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1214 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) { 1215 info->direction = MATCH_FORWARD; 1216 break; 1217 } 1218 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1219 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->dst_ip6) && 1220 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->src_ip6)) { 1221 info->direction = MATCH_REVERSE; 1222 break; 1223 } 1224 } 1225 if (s != NULL) 1226 dyn_update_proto_state(s->data, pkt, ulp, pktlen, 1227 info->direction); 1228 return (s); 1229 } 1230 1231 /* 1232 * Lookup IPv6 state. 1233 * Simplifed version is used to check that matching state doesn't exist. 1234 */ 1235 static int 1236 dyn_lookup_ipv6_state_locked(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1237 const void *ulp, int pktlen, uint32_t bucket, uint16_t kidx) 1238 { 1239 struct dyn_ipv6_state *s; 1240 int dir; 1241 1242 dir = MATCH_NONE; 1243 DYN_BUCKET_ASSERT(bucket); 1244 CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) { 1245 if (s->proto != pkt->proto || s->kidx != kidx || 1246 s->zoneid != zoneid) 1247 continue; 1248 if (s->sport == pkt->src_port && s->dport == pkt->dst_port && 1249 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1250 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) { 1251 dir = MATCH_FORWARD; 1252 break; 1253 } 1254 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1255 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->dst_ip6) && 1256 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->src_ip6)) { 1257 dir = MATCH_REVERSE; 1258 break; 1259 } 1260 } 1261 if (s != NULL) 1262 dyn_update_proto_state(s->data, pkt, ulp, pktlen, dir); 1263 return (s != NULL); 1264 } 1265 1266 static struct dyn_ipv6_state * 1267 dyn_lookup_ipv6_parent(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1268 const void *rule, uint32_t ruleid, uint16_t rulenum, uint32_t hashval) 1269 { 1270 struct dyn_ipv6_state *s; 1271 uint32_t version, bucket; 1272 1273 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1274 restart: 1275 version = DYN_BUCKET_VERSION(bucket, ipv6_parent_del); 1276 CK_SLIST_FOREACH(s, &V_dyn_ipv6_parent[bucket], entry) { 1277 DYNSTATE_PROTECT(s); 1278 if (version != DYN_BUCKET_VERSION(bucket, ipv6_parent_del)) 1279 goto restart; 1280 /* 1281 * NOTE: we do not need to check kidx, because parent rule 1282 * can not create states with different kidx. 1283 * Also parent rule always created for forward direction. 1284 */ 1285 if (s->limit->parent == rule && 1286 s->limit->ruleid == ruleid && 1287 s->limit->rulenum == rulenum && 1288 s->proto == pkt->proto && 1289 s->sport == pkt->src_port && 1290 s->dport == pkt->dst_port && s->zoneid == zoneid && 1291 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1292 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) { 1293 if (s->limit->expire != time_uptime + 1294 V_dyn_short_lifetime) 1295 ck_pr_store_32(&s->limit->expire, 1296 time_uptime + V_dyn_short_lifetime); 1297 break; 1298 } 1299 } 1300 return (s); 1301 } 1302 1303 static struct dyn_ipv6_state * 1304 dyn_lookup_ipv6_parent_locked(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1305 const void *rule, uint32_t ruleid, uint16_t rulenum, uint32_t bucket) 1306 { 1307 struct dyn_ipv6_state *s; 1308 1309 DYN_BUCKET_ASSERT(bucket); 1310 CK_SLIST_FOREACH(s, &V_dyn_ipv6_parent[bucket], entry) { 1311 if (s->limit->parent == rule && 1312 s->limit->ruleid == ruleid && 1313 s->limit->rulenum == rulenum && 1314 s->proto == pkt->proto && 1315 s->sport == pkt->src_port && 1316 s->dport == pkt->dst_port && s->zoneid == zoneid && 1317 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1318 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) 1319 break; 1320 } 1321 return (s); 1322 } 1323 1324 #endif /* INET6 */ 1325 1326 /* 1327 * Lookup dynamic state. 1328 * pkt - filled by ipfw_chk() ipfw_flow_id; 1329 * ulp - determined by ipfw_chk() upper level protocol header; 1330 * dyn_info - info about matched state to return back; 1331 * Returns pointer to state's parent rule and dyn_info. If there is 1332 * no state, NULL is returned. 1333 * On match ipfw_dyn_lookup() updates state's counters. 1334 */ 1335 struct ip_fw * 1336 ipfw_dyn_lookup_state(const struct ip_fw_args *args, const void *ulp, 1337 int pktlen, const ipfw_insn *cmd, struct ipfw_dyn_info *info) 1338 { 1339 struct dyn_data *data; 1340 struct ip_fw *rule; 1341 1342 IPFW_RLOCK_ASSERT(&V_layer3_chain); 1343 1344 data = NULL; 1345 rule = NULL; 1346 info->kidx = cmd->arg1; 1347 info->direction = MATCH_NONE; 1348 info->hashval = hash_packet(&args->f_id); 1349 1350 DYNSTATE_CRITICAL_ENTER(); 1351 if (IS_IP4_FLOW_ID(&args->f_id)) { 1352 struct dyn_ipv4_state *s; 1353 1354 s = dyn_lookup_ipv4_state(&args->f_id, ulp, info, pktlen); 1355 if (s != NULL) { 1356 /* 1357 * Dynamic states are created using the same 5-tuple, 1358 * so it is assumed, that parent rule for O_LIMIT 1359 * state has the same address family. 1360 */ 1361 data = s->data; 1362 if (s->type == O_LIMIT) { 1363 s = data->parent; 1364 rule = s->limit->parent; 1365 } else 1366 rule = data->parent; 1367 } 1368 } 1369 #ifdef INET6 1370 else if (IS_IP6_FLOW_ID(&args->f_id)) { 1371 struct dyn_ipv6_state *s; 1372 1373 s = dyn_lookup_ipv6_state(&args->f_id, dyn_getscopeid(args), 1374 ulp, info, pktlen); 1375 if (s != NULL) { 1376 data = s->data; 1377 if (s->type == O_LIMIT) { 1378 s = data->parent; 1379 rule = s->limit->parent; 1380 } else 1381 rule = data->parent; 1382 } 1383 } 1384 #endif 1385 if (data != NULL) { 1386 /* 1387 * If cached chain id is the same, we can avoid rule index 1388 * lookup. Otherwise do lookup and update chain_id and f_pos. 1389 * It is safe even if there is concurrent thread that want 1390 * update the same state, because chain->id can be changed 1391 * only under IPFW_WLOCK(). 1392 */ 1393 if (data->chain_id != V_layer3_chain.id) { 1394 data->f_pos = ipfw_find_rule(&V_layer3_chain, 1395 data->rulenum, data->ruleid); 1396 /* 1397 * Check that found state has not orphaned. 1398 * When chain->id being changed the parent 1399 * rule can be deleted. If found rule doesn't 1400 * match the parent pointer, consider this 1401 * result as MATCH_NONE and return NULL. 1402 * 1403 * This will lead to creation of new similar state 1404 * that will be added into head of this bucket. 1405 * And the state that we currently have matched 1406 * should be deleted by dyn_expire_states(). 1407 * 1408 * In case when dyn_keep_states is enabled, return 1409 * pointer to deleted rule and f_pos value 1410 * corresponding to penultimate rule. 1411 * When we have enabled V_dyn_keep_states, states 1412 * that become orphaned will get the DYN_REFERENCED 1413 * flag and rule will keep around. So we can return 1414 * it. But since it is not in the rules map, we need 1415 * return such f_pos value, so after the state 1416 * handling if the search will continue, the next rule 1417 * will be the last one - the default rule. 1418 */ 1419 if (V_layer3_chain.map[data->f_pos] == rule) { 1420 data->chain_id = V_layer3_chain.id; 1421 info->f_pos = data->f_pos; 1422 } else if (V_dyn_keep_states != 0) { 1423 /* 1424 * The original rule pointer is still usable. 1425 * So, we return it, but f_pos need to be 1426 * changed to point to the penultimate rule. 1427 */ 1428 MPASS(V_layer3_chain.n_rules > 1); 1429 data->chain_id = V_layer3_chain.id; 1430 data->f_pos = V_layer3_chain.n_rules - 2; 1431 info->f_pos = data->f_pos; 1432 } else { 1433 rule = NULL; 1434 info->direction = MATCH_NONE; 1435 DYN_DEBUG("rule %p [%u, %u] is considered " 1436 "invalid in data %p", rule, data->ruleid, 1437 data->rulenum, data); 1438 /* info->f_pos doesn't matter here. */ 1439 } 1440 } else 1441 info->f_pos = data->f_pos; 1442 } 1443 DYNSTATE_CRITICAL_EXIT(); 1444 #if 0 1445 /* 1446 * Return MATCH_NONE if parent rule is in disabled set. 1447 * This will lead to creation of new similar state that 1448 * will be added into head of this bucket. 1449 * 1450 * XXXAE: we need to be able update state's set when parent 1451 * rule set is changed. 1452 */ 1453 if (rule != NULL && (V_set_disable & (1 << rule->set))) { 1454 rule = NULL; 1455 info->direction = MATCH_NONE; 1456 } 1457 #endif 1458 return (rule); 1459 } 1460 1461 static struct dyn_parent * 1462 dyn_alloc_parent(void *parent, uint32_t ruleid, uint16_t rulenum, 1463 uint8_t set, uint32_t hashval) 1464 { 1465 struct dyn_parent *limit; 1466 1467 limit = uma_zalloc(V_dyn_parent_zone, M_NOWAIT | M_ZERO); 1468 if (limit == NULL) { 1469 if (last_log != time_uptime) { 1470 last_log = time_uptime; 1471 log(LOG_DEBUG, 1472 "ipfw: Cannot allocate parent dynamic state, " 1473 "consider increasing " 1474 "net.inet.ip.fw.dyn_parent_max\n"); 1475 } 1476 return (NULL); 1477 } 1478 1479 limit->parent = parent; 1480 limit->ruleid = ruleid; 1481 limit->rulenum = rulenum; 1482 limit->set = set; 1483 limit->hashval = hashval; 1484 limit->expire = time_uptime + V_dyn_short_lifetime; 1485 return (limit); 1486 } 1487 1488 static struct dyn_data * 1489 dyn_alloc_dyndata(void *parent, uint32_t ruleid, uint16_t rulenum, 1490 uint8_t set, const struct ipfw_flow_id *pkt, const void *ulp, int pktlen, 1491 uint32_t hashval, uint16_t fibnum) 1492 { 1493 struct dyn_data *data; 1494 1495 data = uma_zalloc(V_dyn_data_zone, M_NOWAIT | M_ZERO); 1496 if (data == NULL) { 1497 if (last_log != time_uptime) { 1498 last_log = time_uptime; 1499 log(LOG_DEBUG, 1500 "ipfw: Cannot allocate dynamic state, " 1501 "consider increasing net.inet.ip.fw.dyn_max\n"); 1502 } 1503 return (NULL); 1504 } 1505 1506 data->parent = parent; 1507 data->ruleid = ruleid; 1508 data->rulenum = rulenum; 1509 data->set = set; 1510 data->fibnum = fibnum; 1511 data->hashval = hashval; 1512 data->expire = time_uptime + V_dyn_syn_lifetime; 1513 dyn_update_proto_state(data, pkt, ulp, pktlen, MATCH_FORWARD); 1514 return (data); 1515 } 1516 1517 static struct dyn_ipv4_state * 1518 dyn_alloc_ipv4_state(const struct ipfw_flow_id *pkt, uint16_t kidx, 1519 uint8_t type) 1520 { 1521 struct dyn_ipv4_state *s; 1522 1523 s = uma_zalloc(V_dyn_ipv4_zone, M_NOWAIT | M_ZERO); 1524 if (s == NULL) 1525 return (NULL); 1526 1527 s->type = type; 1528 s->kidx = kidx; 1529 s->proto = pkt->proto; 1530 s->sport = pkt->src_port; 1531 s->dport = pkt->dst_port; 1532 s->src = pkt->src_ip; 1533 s->dst = pkt->dst_ip; 1534 return (s); 1535 } 1536 1537 /* 1538 * Add IPv4 parent state. 1539 * Returns pointer to parent state. When it is not NULL we are in 1540 * critical section and pointer protected by hazard pointer. 1541 * When some error occurs, it returns NULL and exit from critical section 1542 * is not needed. 1543 */ 1544 static struct dyn_ipv4_state * 1545 dyn_add_ipv4_parent(void *rule, uint32_t ruleid, uint16_t rulenum, 1546 uint8_t set, const struct ipfw_flow_id *pkt, uint32_t hashval, 1547 uint32_t version, uint16_t kidx) 1548 { 1549 struct dyn_ipv4_state *s; 1550 struct dyn_parent *limit; 1551 uint32_t bucket; 1552 1553 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1554 DYN_BUCKET_LOCK(bucket); 1555 if (version != DYN_BUCKET_VERSION(bucket, ipv4_parent_add)) { 1556 /* 1557 * Bucket version has been changed since last lookup, 1558 * do lookup again to be sure that state does not exist. 1559 */ 1560 s = dyn_lookup_ipv4_parent_locked(pkt, rule, ruleid, 1561 rulenum, bucket); 1562 if (s != NULL) { 1563 /* 1564 * Simultaneous thread has already created this 1565 * state. Just return it. 1566 */ 1567 DYNSTATE_CRITICAL_ENTER(); 1568 DYNSTATE_PROTECT(s); 1569 DYN_BUCKET_UNLOCK(bucket); 1570 return (s); 1571 } 1572 } 1573 1574 limit = dyn_alloc_parent(rule, ruleid, rulenum, set, hashval); 1575 if (limit == NULL) { 1576 DYN_BUCKET_UNLOCK(bucket); 1577 return (NULL); 1578 } 1579 1580 s = dyn_alloc_ipv4_state(pkt, kidx, O_LIMIT_PARENT); 1581 if (s == NULL) { 1582 DYN_BUCKET_UNLOCK(bucket); 1583 uma_zfree(V_dyn_parent_zone, limit); 1584 return (NULL); 1585 } 1586 1587 s->limit = limit; 1588 CK_SLIST_INSERT_HEAD(&V_dyn_ipv4_parent[bucket], s, entry); 1589 DYN_COUNT_INC(dyn_parent_count); 1590 DYN_BUCKET_VERSION_BUMP(bucket, ipv4_parent_add); 1591 DYNSTATE_CRITICAL_ENTER(); 1592 DYNSTATE_PROTECT(s); 1593 DYN_BUCKET_UNLOCK(bucket); 1594 return (s); 1595 } 1596 1597 static int 1598 dyn_add_ipv4_state(void *parent, uint32_t ruleid, uint16_t rulenum, 1599 uint8_t set, const struct ipfw_flow_id *pkt, const void *ulp, int pktlen, 1600 uint32_t hashval, struct ipfw_dyn_info *info, uint16_t fibnum, 1601 uint16_t kidx, uint8_t type) 1602 { 1603 struct dyn_ipv4_state *s; 1604 void *data; 1605 uint32_t bucket; 1606 1607 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1608 DYN_BUCKET_LOCK(bucket); 1609 if (info->direction == MATCH_UNKNOWN || 1610 info->kidx != kidx || 1611 info->hashval != hashval || 1612 info->version != DYN_BUCKET_VERSION(bucket, ipv4_add)) { 1613 /* 1614 * Bucket version has been changed since last lookup, 1615 * do lookup again to be sure that state does not exist. 1616 */ 1617 if (dyn_lookup_ipv4_state_locked(pkt, ulp, pktlen, 1618 bucket, kidx) != 0) { 1619 DYN_BUCKET_UNLOCK(bucket); 1620 return (EEXIST); 1621 } 1622 } 1623 1624 data = dyn_alloc_dyndata(parent, ruleid, rulenum, set, pkt, ulp, 1625 pktlen, hashval, fibnum); 1626 if (data == NULL) { 1627 DYN_BUCKET_UNLOCK(bucket); 1628 return (ENOMEM); 1629 } 1630 1631 s = dyn_alloc_ipv4_state(pkt, kidx, type); 1632 if (s == NULL) { 1633 DYN_BUCKET_UNLOCK(bucket); 1634 uma_zfree(V_dyn_data_zone, data); 1635 return (ENOMEM); 1636 } 1637 1638 s->data = data; 1639 CK_SLIST_INSERT_HEAD(&V_dyn_ipv4[bucket], s, entry); 1640 DYN_COUNT_INC(dyn_count); 1641 DYN_BUCKET_VERSION_BUMP(bucket, ipv4_add); 1642 DYN_BUCKET_UNLOCK(bucket); 1643 return (0); 1644 } 1645 1646 #ifdef INET6 1647 static struct dyn_ipv6_state * 1648 dyn_alloc_ipv6_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1649 uint16_t kidx, uint8_t type) 1650 { 1651 struct dyn_ipv6_state *s; 1652 1653 s = uma_zalloc(V_dyn_ipv6_zone, M_NOWAIT | M_ZERO); 1654 if (s == NULL) 1655 return (NULL); 1656 1657 s->type = type; 1658 s->kidx = kidx; 1659 s->zoneid = zoneid; 1660 s->proto = pkt->proto; 1661 s->sport = pkt->src_port; 1662 s->dport = pkt->dst_port; 1663 s->src = pkt->src_ip6; 1664 s->dst = pkt->dst_ip6; 1665 return (s); 1666 } 1667 1668 /* 1669 * Add IPv6 parent state. 1670 * Returns pointer to parent state. When it is not NULL we are in 1671 * critical section and pointer protected by hazard pointer. 1672 * When some error occurs, it return NULL and exit from critical section 1673 * is not needed. 1674 */ 1675 static struct dyn_ipv6_state * 1676 dyn_add_ipv6_parent(void *rule, uint32_t ruleid, uint16_t rulenum, 1677 uint8_t set, const struct ipfw_flow_id *pkt, uint32_t zoneid, 1678 uint32_t hashval, uint32_t version, uint16_t kidx) 1679 { 1680 struct dyn_ipv6_state *s; 1681 struct dyn_parent *limit; 1682 uint32_t bucket; 1683 1684 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1685 DYN_BUCKET_LOCK(bucket); 1686 if (version != DYN_BUCKET_VERSION(bucket, ipv6_parent_add)) { 1687 /* 1688 * Bucket version has been changed since last lookup, 1689 * do lookup again to be sure that state does not exist. 1690 */ 1691 s = dyn_lookup_ipv6_parent_locked(pkt, zoneid, rule, ruleid, 1692 rulenum, bucket); 1693 if (s != NULL) { 1694 /* 1695 * Simultaneous thread has already created this 1696 * state. Just return it. 1697 */ 1698 DYNSTATE_CRITICAL_ENTER(); 1699 DYNSTATE_PROTECT(s); 1700 DYN_BUCKET_UNLOCK(bucket); 1701 return (s); 1702 } 1703 } 1704 1705 limit = dyn_alloc_parent(rule, ruleid, rulenum, set, hashval); 1706 if (limit == NULL) { 1707 DYN_BUCKET_UNLOCK(bucket); 1708 return (NULL); 1709 } 1710 1711 s = dyn_alloc_ipv6_state(pkt, zoneid, kidx, O_LIMIT_PARENT); 1712 if (s == NULL) { 1713 DYN_BUCKET_UNLOCK(bucket); 1714 uma_zfree(V_dyn_parent_zone, limit); 1715 return (NULL); 1716 } 1717 1718 s->limit = limit; 1719 CK_SLIST_INSERT_HEAD(&V_dyn_ipv6_parent[bucket], s, entry); 1720 DYN_COUNT_INC(dyn_parent_count); 1721 DYN_BUCKET_VERSION_BUMP(bucket, ipv6_parent_add); 1722 DYNSTATE_CRITICAL_ENTER(); 1723 DYNSTATE_PROTECT(s); 1724 DYN_BUCKET_UNLOCK(bucket); 1725 return (s); 1726 } 1727 1728 static int 1729 dyn_add_ipv6_state(void *parent, uint32_t ruleid, uint16_t rulenum, 1730 uint8_t set, const struct ipfw_flow_id *pkt, uint32_t zoneid, 1731 const void *ulp, int pktlen, uint32_t hashval, struct ipfw_dyn_info *info, 1732 uint16_t fibnum, uint16_t kidx, uint8_t type) 1733 { 1734 struct dyn_ipv6_state *s; 1735 struct dyn_data *data; 1736 uint32_t bucket; 1737 1738 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1739 DYN_BUCKET_LOCK(bucket); 1740 if (info->direction == MATCH_UNKNOWN || 1741 info->kidx != kidx || 1742 info->hashval != hashval || 1743 info->version != DYN_BUCKET_VERSION(bucket, ipv6_add)) { 1744 /* 1745 * Bucket version has been changed since last lookup, 1746 * do lookup again to be sure that state does not exist. 1747 */ 1748 if (dyn_lookup_ipv6_state_locked(pkt, zoneid, ulp, pktlen, 1749 bucket, kidx) != 0) { 1750 DYN_BUCKET_UNLOCK(bucket); 1751 return (EEXIST); 1752 } 1753 } 1754 1755 data = dyn_alloc_dyndata(parent, ruleid, rulenum, set, pkt, ulp, 1756 pktlen, hashval, fibnum); 1757 if (data == NULL) { 1758 DYN_BUCKET_UNLOCK(bucket); 1759 return (ENOMEM); 1760 } 1761 1762 s = dyn_alloc_ipv6_state(pkt, zoneid, kidx, type); 1763 if (s == NULL) { 1764 DYN_BUCKET_UNLOCK(bucket); 1765 uma_zfree(V_dyn_data_zone, data); 1766 return (ENOMEM); 1767 } 1768 1769 s->data = data; 1770 CK_SLIST_INSERT_HEAD(&V_dyn_ipv6[bucket], s, entry); 1771 DYN_COUNT_INC(dyn_count); 1772 DYN_BUCKET_VERSION_BUMP(bucket, ipv6_add); 1773 DYN_BUCKET_UNLOCK(bucket); 1774 return (0); 1775 } 1776 #endif /* INET6 */ 1777 1778 static void * 1779 dyn_get_parent_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1780 struct ip_fw *rule, uint32_t hashval, uint32_t limit, uint16_t kidx) 1781 { 1782 char sbuf[24]; 1783 struct dyn_parent *p; 1784 void *ret; 1785 uint32_t bucket, version; 1786 1787 p = NULL; 1788 ret = NULL; 1789 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1790 DYNSTATE_CRITICAL_ENTER(); 1791 if (IS_IP4_FLOW_ID(pkt)) { 1792 struct dyn_ipv4_state *s; 1793 1794 version = DYN_BUCKET_VERSION(bucket, ipv4_parent_add); 1795 s = dyn_lookup_ipv4_parent(pkt, rule, rule->id, 1796 rule->rulenum, bucket); 1797 if (s == NULL) { 1798 /* 1799 * Exit from critical section because dyn_add_parent() 1800 * will acquire bucket lock. 1801 */ 1802 DYNSTATE_CRITICAL_EXIT(); 1803 1804 s = dyn_add_ipv4_parent(rule, rule->id, 1805 rule->rulenum, rule->set, pkt, hashval, 1806 version, kidx); 1807 if (s == NULL) 1808 return (NULL); 1809 /* Now we are in critical section again. */ 1810 } 1811 ret = s; 1812 p = s->limit; 1813 } 1814 #ifdef INET6 1815 else if (IS_IP6_FLOW_ID(pkt)) { 1816 struct dyn_ipv6_state *s; 1817 1818 version = DYN_BUCKET_VERSION(bucket, ipv6_parent_add); 1819 s = dyn_lookup_ipv6_parent(pkt, zoneid, rule, rule->id, 1820 rule->rulenum, bucket); 1821 if (s == NULL) { 1822 /* 1823 * Exit from critical section because dyn_add_parent() 1824 * can acquire bucket mutex. 1825 */ 1826 DYNSTATE_CRITICAL_EXIT(); 1827 1828 s = dyn_add_ipv6_parent(rule, rule->id, 1829 rule->rulenum, rule->set, pkt, zoneid, hashval, 1830 version, kidx); 1831 if (s == NULL) 1832 return (NULL); 1833 /* Now we are in critical section again. */ 1834 } 1835 ret = s; 1836 p = s->limit; 1837 } 1838 #endif 1839 else { 1840 DYNSTATE_CRITICAL_EXIT(); 1841 return (NULL); 1842 } 1843 1844 /* Check the limit */ 1845 if (DPARENT_COUNT(p) >= limit) { 1846 DYNSTATE_CRITICAL_EXIT(); 1847 if (V_fw_verbose && last_log != time_uptime) { 1848 last_log = time_uptime; 1849 snprintf(sbuf, sizeof(sbuf), "%u drop session", 1850 rule->rulenum); 1851 print_dyn_rule_flags(pkt, O_LIMIT, 1852 LOG_SECURITY | LOG_DEBUG, sbuf, 1853 "too many entries"); 1854 } 1855 return (NULL); 1856 } 1857 1858 /* Take new session into account. */ 1859 DPARENT_COUNT_INC(p); 1860 /* 1861 * We must exit from critical section because the following code 1862 * can acquire bucket mutex. 1863 * We rely on the the 'count' field. The state will not expire 1864 * until it has some child states, i.e. 'count' field is not zero. 1865 * Return state pointer, it will be used by child states as parent. 1866 */ 1867 DYNSTATE_CRITICAL_EXIT(); 1868 return (ret); 1869 } 1870 1871 static int 1872 dyn_install_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1873 uint16_t fibnum, const void *ulp, int pktlen, void *rule, 1874 uint32_t ruleid, uint16_t rulenum, uint8_t set, 1875 struct ipfw_dyn_info *info, uint32_t limit, uint16_t limit_mask, 1876 uint16_t kidx, uint8_t type) 1877 { 1878 struct ipfw_flow_id id; 1879 uint32_t hashval, parent_hashval; 1880 int ret; 1881 1882 MPASS(type == O_LIMIT || type == O_KEEP_STATE); 1883 1884 if (type == O_LIMIT) { 1885 /* Create masked flow id and calculate bucket */ 1886 id.addr_type = pkt->addr_type; 1887 id.proto = pkt->proto; 1888 id.fib = fibnum; /* unused */ 1889 id.src_port = (limit_mask & DYN_SRC_PORT) ? 1890 pkt->src_port: 0; 1891 id.dst_port = (limit_mask & DYN_DST_PORT) ? 1892 pkt->dst_port: 0; 1893 if (IS_IP4_FLOW_ID(pkt)) { 1894 id.src_ip = (limit_mask & DYN_SRC_ADDR) ? 1895 pkt->src_ip: 0; 1896 id.dst_ip = (limit_mask & DYN_DST_ADDR) ? 1897 pkt->dst_ip: 0; 1898 } 1899 #ifdef INET6 1900 else if (IS_IP6_FLOW_ID(pkt)) { 1901 if (limit_mask & DYN_SRC_ADDR) 1902 id.src_ip6 = pkt->src_ip6; 1903 else 1904 memset(&id.src_ip6, 0, sizeof(id.src_ip6)); 1905 if (limit_mask & DYN_DST_ADDR) 1906 id.dst_ip6 = pkt->dst_ip6; 1907 else 1908 memset(&id.dst_ip6, 0, sizeof(id.dst_ip6)); 1909 } 1910 #endif 1911 else 1912 return (EAFNOSUPPORT); 1913 1914 parent_hashval = hash_parent(&id, rule); 1915 rule = dyn_get_parent_state(&id, zoneid, rule, parent_hashval, 1916 limit, kidx); 1917 if (rule == NULL) { 1918 #if 0 1919 if (V_fw_verbose && last_log != time_uptime) { 1920 last_log = time_uptime; 1921 snprintf(sbuf, sizeof(sbuf), 1922 "%u drop session", rule->rulenum); 1923 print_dyn_rule_flags(pkt, O_LIMIT, 1924 LOG_SECURITY | LOG_DEBUG, sbuf, 1925 "too many entries"); 1926 } 1927 #endif 1928 return (EACCES); 1929 } 1930 /* 1931 * Limit is not reached, create new state. 1932 * Now rule points to parent state. 1933 */ 1934 } 1935 1936 hashval = hash_packet(pkt); 1937 if (IS_IP4_FLOW_ID(pkt)) 1938 ret = dyn_add_ipv4_state(rule, ruleid, rulenum, set, pkt, 1939 ulp, pktlen, hashval, info, fibnum, kidx, type); 1940 #ifdef INET6 1941 else if (IS_IP6_FLOW_ID(pkt)) 1942 ret = dyn_add_ipv6_state(rule, ruleid, rulenum, set, pkt, 1943 zoneid, ulp, pktlen, hashval, info, fibnum, kidx, type); 1944 #endif /* INET6 */ 1945 else 1946 ret = EAFNOSUPPORT; 1947 1948 if (type == O_LIMIT) { 1949 if (ret != 0) { 1950 /* 1951 * We failed to create child state for O_LIMIT 1952 * opcode. Since we already counted it in the parent, 1953 * we must revert counter back. The 'rule' points to 1954 * parent state, use it to get dyn_parent. 1955 * 1956 * XXXAE: it should be safe to use 'rule' pointer 1957 * without extra lookup, parent state is referenced 1958 * and should not be freed. 1959 */ 1960 if (IS_IP4_FLOW_ID(&id)) 1961 DPARENT_COUNT_DEC( 1962 ((struct dyn_ipv4_state *)rule)->limit); 1963 #ifdef INET6 1964 else if (IS_IP6_FLOW_ID(&id)) 1965 DPARENT_COUNT_DEC( 1966 ((struct dyn_ipv6_state *)rule)->limit); 1967 #endif 1968 } 1969 } 1970 /* 1971 * EEXIST means that simultaneous thread has created this 1972 * state. Consider this as success. 1973 * 1974 * XXXAE: should we invalidate 'info' content here? 1975 */ 1976 if (ret == EEXIST) 1977 return (0); 1978 return (ret); 1979 } 1980 1981 /* 1982 * Install dynamic state. 1983 * chain - ipfw's instance; 1984 * rule - the parent rule that installs the state; 1985 * cmd - opcode that installs the state; 1986 * args - ipfw arguments; 1987 * ulp - upper level protocol header; 1988 * pktlen - packet length; 1989 * info - dynamic state lookup info; 1990 * tablearg - tablearg id. 1991 * 1992 * Returns non-zero value (failure) if state is not installed because 1993 * of errors or because session limitations are enforced. 1994 */ 1995 int 1996 ipfw_dyn_install_state(struct ip_fw_chain *chain, struct ip_fw *rule, 1997 const ipfw_insn_limit *cmd, const struct ip_fw_args *args, 1998 const void *ulp, int pktlen, struct ipfw_dyn_info *info, 1999 uint32_t tablearg) 2000 { 2001 uint32_t limit; 2002 uint16_t limit_mask; 2003 2004 if (cmd->o.opcode == O_LIMIT) { 2005 limit = IP_FW_ARG_TABLEARG(chain, cmd->conn_limit, limit); 2006 limit_mask = cmd->limit_mask; 2007 } else { 2008 limit = 0; 2009 limit_mask = 0; 2010 } 2011 return (dyn_install_state(&args->f_id, 2012 #ifdef INET6 2013 IS_IP6_FLOW_ID(&args->f_id) ? dyn_getscopeid(args): 2014 #endif 2015 0, M_GETFIB(args->m), ulp, pktlen, rule, rule->id, rule->rulenum, 2016 rule->set, info, limit, limit_mask, cmd->o.arg1, cmd->o.opcode)); 2017 } 2018 2019 /* 2020 * Free safe to remove state entries from expired lists. 2021 */ 2022 static void 2023 dyn_free_states(struct ip_fw_chain *chain) 2024 { 2025 struct dyn_ipv4_state *s4, *s4n; 2026 #ifdef INET6 2027 struct dyn_ipv6_state *s6, *s6n; 2028 #endif 2029 int cached_count, i; 2030 2031 /* 2032 * We keep pointers to objects that are in use on each CPU 2033 * in the per-cpu dyn_hp pointer. When object is going to be 2034 * removed, first of it is unlinked from the corresponding 2035 * list. This leads to changing of dyn_bucket_xxx_delver version. 2036 * Unlinked objects is placed into corresponding dyn_expired_xxx 2037 * list. Reader that is going to dereference object pointer checks 2038 * dyn_bucket_xxx_delver version before and after storing pointer 2039 * into dyn_hp. If version is the same, the object is protected 2040 * from freeing and it is safe to dereference. Othervise reader 2041 * tries to iterate list again from the beginning, but this object 2042 * now unlinked and thus will not be accessible. 2043 * 2044 * Copy dyn_hp pointers for each CPU into dyn_hp_cache array. 2045 * It does not matter that some pointer can be changed in 2046 * time while we are copying. We need to check, that objects 2047 * removed in the previous pass are not in use. And if dyn_hp 2048 * pointer does not contain it in the time when we are copying, 2049 * it will not appear there, because it is already unlinked. 2050 * And for new pointers we will not free objects that will be 2051 * unlinked in this pass. 2052 */ 2053 cached_count = 0; 2054 CPU_FOREACH(i) { 2055 dyn_hp_cache[cached_count] = DYNSTATE_GET(i); 2056 if (dyn_hp_cache[cached_count] != NULL) 2057 cached_count++; 2058 } 2059 2060 /* 2061 * Free expired states that are safe to free. 2062 * Check each entry from previous pass in the dyn_expired_xxx 2063 * list, if pointer to the object is in the dyn_hp_cache array, 2064 * keep it until next pass. Otherwise it is safe to free the 2065 * object. 2066 * 2067 * XXXAE: optimize this to use SLIST_REMOVE_AFTER. 2068 */ 2069 #define DYN_FREE_STATES(s, next, name) do { \ 2070 s = SLIST_FIRST(&V_dyn_expired_ ## name); \ 2071 while (s != NULL) { \ 2072 next = SLIST_NEXT(s, expired); \ 2073 for (i = 0; i < cached_count; i++) \ 2074 if (dyn_hp_cache[i] == s) \ 2075 break; \ 2076 if (i == cached_count) { \ 2077 if (s->type == O_LIMIT_PARENT && \ 2078 s->limit->count != 0) { \ 2079 s = next; \ 2080 continue; \ 2081 } \ 2082 SLIST_REMOVE(&V_dyn_expired_ ## name, \ 2083 s, dyn_ ## name ## _state, expired); \ 2084 if (s->type == O_LIMIT_PARENT) \ 2085 uma_zfree(V_dyn_parent_zone, s->limit); \ 2086 else \ 2087 uma_zfree(V_dyn_data_zone, s->data); \ 2088 uma_zfree(V_dyn_ ## name ## _zone, s); \ 2089 } \ 2090 s = next; \ 2091 } \ 2092 } while (0) 2093 2094 /* 2095 * Protect access to expired lists with DYN_EXPIRED_LOCK. 2096 * Userland can invoke ipfw_expire_dyn_states() to delete 2097 * specific states, this will lead to modification of expired 2098 * lists. 2099 * 2100 * XXXAE: do we need DYN_EXPIRED_LOCK? We can just use 2101 * IPFW_UH_WLOCK to protect access to these lists. 2102 */ 2103 DYN_EXPIRED_LOCK(); 2104 DYN_FREE_STATES(s4, s4n, ipv4); 2105 #ifdef INET6 2106 DYN_FREE_STATES(s6, s6n, ipv6); 2107 #endif 2108 DYN_EXPIRED_UNLOCK(); 2109 #undef DYN_FREE_STATES 2110 } 2111 2112 /* 2113 * Returns: 2114 * 0 when state is not matched by specified range; 2115 * 1 when state is matched by specified range; 2116 * 2 when state is matched by specified range and requested deletion of 2117 * dynamic states. 2118 */ 2119 static int 2120 dyn_match_range(uint16_t rulenum, uint8_t set, const ipfw_range_tlv *rt) 2121 { 2122 2123 MPASS(rt != NULL); 2124 /* flush all states */ 2125 if (rt->flags & IPFW_RCFLAG_ALL) { 2126 if (rt->flags & IPFW_RCFLAG_DYNAMIC) 2127 return (2); /* forced */ 2128 return (1); 2129 } 2130 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && set != rt->set) 2131 return (0); 2132 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 && 2133 (rulenum < rt->start_rule || rulenum > rt->end_rule)) 2134 return (0); 2135 if (rt->flags & IPFW_RCFLAG_DYNAMIC) 2136 return (2); 2137 return (1); 2138 } 2139 2140 static void 2141 dyn_acquire_rule(struct ip_fw_chain *ch, struct dyn_data *data, 2142 struct ip_fw *rule, uint16_t kidx) 2143 { 2144 struct dyn_state_obj *obj; 2145 2146 /* 2147 * Do not acquire reference twice. 2148 * This can happen when rule deletion executed for 2149 * the same range, but different ruleset id. 2150 */ 2151 if (data->flags & DYN_REFERENCED) 2152 return; 2153 2154 IPFW_UH_WLOCK_ASSERT(ch); 2155 MPASS(kidx != 0); 2156 2157 data->flags |= DYN_REFERENCED; 2158 /* Reference the named object */ 2159 obj = SRV_OBJECT(ch, kidx); 2160 obj->no.refcnt++; 2161 MPASS(obj->no.etlv == IPFW_TLV_STATE_NAME); 2162 2163 /* Reference the parent rule */ 2164 rule->refcnt++; 2165 } 2166 2167 static void 2168 dyn_release_rule(struct ip_fw_chain *ch, struct dyn_data *data, 2169 struct ip_fw *rule, uint16_t kidx) 2170 { 2171 struct dyn_state_obj *obj; 2172 2173 IPFW_UH_WLOCK_ASSERT(ch); 2174 MPASS(kidx != 0); 2175 2176 obj = SRV_OBJECT(ch, kidx); 2177 if (obj->no.refcnt == 1) 2178 dyn_destroy(ch, &obj->no); 2179 else 2180 obj->no.refcnt--; 2181 2182 if (--rule->refcnt == 1) 2183 ipfw_free_rule(rule); 2184 } 2185 2186 /* 2187 * We do not keep O_LIMIT_PARENT states when V_dyn_keep_states is enabled. 2188 * O_LIMIT state is created when new connection is going to be established 2189 * and there is no matching state. So, since the old parent rule was deleted 2190 * we can't create new states with old parent, and thus we can not account 2191 * new connections with already established connections, and can not do 2192 * proper limiting. 2193 */ 2194 static int 2195 dyn_match_ipv4_state(struct ip_fw_chain *ch, struct dyn_ipv4_state *s, 2196 const ipfw_range_tlv *rt) 2197 { 2198 struct ip_fw *rule; 2199 int ret; 2200 2201 if (s->type == O_LIMIT_PARENT) 2202 return (dyn_match_range(s->limit->rulenum, 2203 s->limit->set, rt)); 2204 2205 ret = dyn_match_range(s->data->rulenum, s->data->set, rt); 2206 if (ret == 0 || V_dyn_keep_states == 0 || ret > 1) 2207 return (ret); 2208 2209 rule = s->data->parent; 2210 if (s->type == O_LIMIT) 2211 rule = ((struct dyn_ipv4_state *)rule)->limit->parent; 2212 dyn_acquire_rule(ch, s->data, rule, s->kidx); 2213 return (0); 2214 } 2215 2216 #ifdef INET6 2217 static int 2218 dyn_match_ipv6_state(struct ip_fw_chain *ch, struct dyn_ipv6_state *s, 2219 const ipfw_range_tlv *rt) 2220 { 2221 struct ip_fw *rule; 2222 int ret; 2223 2224 if (s->type == O_LIMIT_PARENT) 2225 return (dyn_match_range(s->limit->rulenum, 2226 s->limit->set, rt)); 2227 2228 ret = dyn_match_range(s->data->rulenum, s->data->set, rt); 2229 if (ret == 0 || V_dyn_keep_states == 0 || ret > 1) 2230 return (ret); 2231 2232 rule = s->data->parent; 2233 if (s->type == O_LIMIT) 2234 rule = ((struct dyn_ipv6_state *)rule)->limit->parent; 2235 dyn_acquire_rule(ch, s->data, rule, s->kidx); 2236 return (0); 2237 } 2238 #endif 2239 2240 /* 2241 * Unlink expired entries from states lists. 2242 * @rt can be used to specify the range of states for deletion. 2243 */ 2244 static void 2245 dyn_expire_states(struct ip_fw_chain *ch, ipfw_range_tlv *rt) 2246 { 2247 struct dyn_ipv4_slist expired_ipv4; 2248 #ifdef INET6 2249 struct dyn_ipv6_slist expired_ipv6; 2250 struct dyn_ipv6_state *s6, *s6n, *s6p; 2251 #endif 2252 struct dyn_ipv4_state *s4, *s4n, *s4p; 2253 void *rule; 2254 int bucket, removed, length, max_length; 2255 2256 IPFW_UH_WLOCK_ASSERT(ch); 2257 2258 /* 2259 * Unlink expired states from each bucket. 2260 * With acquired bucket lock iterate entries of each lists: 2261 * ipv4, ipv4_parent, ipv6, and ipv6_parent. Check expired time 2262 * and unlink entry from the list, link entry into temporary 2263 * expired_xxx lists then bump "del" bucket version. 2264 * 2265 * When an entry is removed, corresponding states counter is 2266 * decremented. If entry has O_LIMIT type, parent's reference 2267 * counter is decremented. 2268 * 2269 * NOTE: this function can be called from userspace context 2270 * when user deletes rules. In this case all matched states 2271 * will be forcedly unlinked. O_LIMIT_PARENT states will be kept 2272 * in the expired lists until reference counter become zero. 2273 */ 2274 #define DYN_UNLINK_STATES(s, prev, next, exp, af, name, extra) do { \ 2275 length = 0; \ 2276 removed = 0; \ 2277 prev = NULL; \ 2278 s = CK_SLIST_FIRST(&V_dyn_ ## name [bucket]); \ 2279 while (s != NULL) { \ 2280 next = CK_SLIST_NEXT(s, entry); \ 2281 if ((TIME_LEQ((s)->exp, time_uptime) && extra) || \ 2282 (rt != NULL && \ 2283 dyn_match_ ## af ## _state(ch, s, rt))) { \ 2284 if (prev != NULL) \ 2285 CK_SLIST_REMOVE_AFTER(prev, entry); \ 2286 else \ 2287 CK_SLIST_REMOVE_HEAD( \ 2288 &V_dyn_ ## name [bucket], entry); \ 2289 removed++; \ 2290 SLIST_INSERT_HEAD(&expired_ ## af, s, expired); \ 2291 if (s->type == O_LIMIT_PARENT) \ 2292 DYN_COUNT_DEC(dyn_parent_count); \ 2293 else { \ 2294 DYN_COUNT_DEC(dyn_count); \ 2295 if (s->data->flags & DYN_REFERENCED) { \ 2296 rule = s->data->parent; \ 2297 if (s->type == O_LIMIT) \ 2298 rule = ((__typeof(s)) \ 2299 rule)->limit->parent;\ 2300 dyn_release_rule(ch, s->data, \ 2301 rule, s->kidx); \ 2302 } \ 2303 if (s->type == O_LIMIT) { \ 2304 s = s->data->parent; \ 2305 DPARENT_COUNT_DEC(s->limit); \ 2306 } \ 2307 } \ 2308 } else { \ 2309 prev = s; \ 2310 length++; \ 2311 } \ 2312 s = next; \ 2313 } \ 2314 if (removed != 0) \ 2315 DYN_BUCKET_VERSION_BUMP(bucket, name ## _del); \ 2316 if (length > max_length) \ 2317 max_length = length; \ 2318 } while (0) 2319 2320 SLIST_INIT(&expired_ipv4); 2321 #ifdef INET6 2322 SLIST_INIT(&expired_ipv6); 2323 #endif 2324 max_length = 0; 2325 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2326 DYN_BUCKET_LOCK(bucket); 2327 DYN_UNLINK_STATES(s4, s4p, s4n, data->expire, ipv4, ipv4, 1); 2328 DYN_UNLINK_STATES(s4, s4p, s4n, limit->expire, ipv4, 2329 ipv4_parent, (s4->limit->count == 0)); 2330 #ifdef INET6 2331 DYN_UNLINK_STATES(s6, s6p, s6n, data->expire, ipv6, ipv6, 1); 2332 DYN_UNLINK_STATES(s6, s6p, s6n, limit->expire, ipv6, 2333 ipv6_parent, (s6->limit->count == 0)); 2334 #endif 2335 DYN_BUCKET_UNLOCK(bucket); 2336 } 2337 /* Update curr_max_length for statistics. */ 2338 V_curr_max_length = max_length; 2339 /* 2340 * Concatenate temporary lists with global expired lists. 2341 */ 2342 DYN_EXPIRED_LOCK(); 2343 SLIST_CONCAT(&V_dyn_expired_ipv4, &expired_ipv4, 2344 dyn_ipv4_state, expired); 2345 #ifdef INET6 2346 SLIST_CONCAT(&V_dyn_expired_ipv6, &expired_ipv6, 2347 dyn_ipv6_state, expired); 2348 #endif 2349 DYN_EXPIRED_UNLOCK(); 2350 #undef DYN_UNLINK_STATES 2351 #undef DYN_UNREF_STATES 2352 } 2353 2354 static struct mbuf * 2355 dyn_mgethdr(int len, uint16_t fibnum) 2356 { 2357 struct mbuf *m; 2358 2359 m = m_gethdr(M_NOWAIT, MT_DATA); 2360 if (m == NULL) 2361 return (NULL); 2362 #ifdef MAC 2363 mac_netinet_firewall_send(m); 2364 #endif 2365 M_SETFIB(m, fibnum); 2366 m->m_data += max_linkhdr; 2367 m->m_flags |= M_SKIP_FIREWALL; 2368 m->m_len = m->m_pkthdr.len = len; 2369 bzero(m->m_data, len); 2370 return (m); 2371 } 2372 2373 static void 2374 dyn_make_keepalive_ipv4(struct mbuf *m, in_addr_t src, in_addr_t dst, 2375 uint32_t seq, uint32_t ack, uint16_t sport, uint16_t dport) 2376 { 2377 struct tcphdr *tcp; 2378 struct ip *ip; 2379 2380 ip = mtod(m, struct ip *); 2381 ip->ip_v = 4; 2382 ip->ip_hl = sizeof(*ip) >> 2; 2383 ip->ip_tos = IPTOS_LOWDELAY; 2384 ip->ip_len = htons(m->m_len); 2385 ip->ip_off |= htons(IP_DF); 2386 ip->ip_ttl = V_ip_defttl; 2387 ip->ip_p = IPPROTO_TCP; 2388 ip->ip_src.s_addr = htonl(src); 2389 ip->ip_dst.s_addr = htonl(dst); 2390 2391 tcp = mtodo(m, sizeof(struct ip)); 2392 tcp->th_sport = htons(sport); 2393 tcp->th_dport = htons(dport); 2394 tcp->th_off = sizeof(struct tcphdr) >> 2; 2395 tcp->th_seq = htonl(seq); 2396 tcp->th_ack = htonl(ack); 2397 tcp->th_flags = TH_ACK; 2398 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2399 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 2400 2401 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2402 m->m_pkthdr.csum_flags = CSUM_TCP; 2403 } 2404 2405 static void 2406 dyn_enqueue_keepalive_ipv4(struct mbufq *q, const struct dyn_ipv4_state *s) 2407 { 2408 struct mbuf *m; 2409 2410 if ((s->data->state & ACK_FWD) == 0 && s->data->ack_fwd > 0) { 2411 m = dyn_mgethdr(sizeof(struct ip) + sizeof(struct tcphdr), 2412 s->data->fibnum); 2413 if (m != NULL) { 2414 dyn_make_keepalive_ipv4(m, s->dst, s->src, 2415 s->data->ack_fwd - 1, s->data->ack_rev, 2416 s->dport, s->sport); 2417 if (mbufq_enqueue(q, m)) { 2418 m_freem(m); 2419 log(LOG_DEBUG, "ipfw: limit for IPv4 " 2420 "keepalive queue is reached.\n"); 2421 return; 2422 } 2423 } 2424 } 2425 2426 if ((s->data->state & ACK_REV) == 0 && s->data->ack_rev > 0) { 2427 m = dyn_mgethdr(sizeof(struct ip) + sizeof(struct tcphdr), 2428 s->data->fibnum); 2429 if (m != NULL) { 2430 dyn_make_keepalive_ipv4(m, s->src, s->dst, 2431 s->data->ack_rev - 1, s->data->ack_fwd, 2432 s->sport, s->dport); 2433 if (mbufq_enqueue(q, m)) { 2434 m_freem(m); 2435 log(LOG_DEBUG, "ipfw: limit for IPv4 " 2436 "keepalive queue is reached.\n"); 2437 return; 2438 } 2439 } 2440 } 2441 } 2442 2443 /* 2444 * Prepare and send keep-alive packets. 2445 */ 2446 static void 2447 dyn_send_keepalive_ipv4(struct ip_fw_chain *chain) 2448 { 2449 struct mbufq q; 2450 struct mbuf *m; 2451 struct dyn_ipv4_state *s; 2452 uint32_t bucket; 2453 2454 mbufq_init(&q, INT_MAX); 2455 IPFW_UH_RLOCK(chain); 2456 /* 2457 * It is safe to not use hazard pointer and just do lockless 2458 * access to the lists, because states entries can not be deleted 2459 * while we hold IPFW_UH_RLOCK. 2460 */ 2461 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2462 CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) { 2463 /* 2464 * Only established TCP connections that will 2465 * become expired withing dyn_keepalive_interval. 2466 */ 2467 if (s->proto != IPPROTO_TCP || 2468 (s->data->state & BOTH_SYN) != BOTH_SYN || 2469 TIME_LEQ(time_uptime + V_dyn_keepalive_interval, 2470 s->data->expire)) 2471 continue; 2472 dyn_enqueue_keepalive_ipv4(&q, s); 2473 } 2474 } 2475 IPFW_UH_RUNLOCK(chain); 2476 while ((m = mbufq_dequeue(&q)) != NULL) 2477 ip_output(m, NULL, NULL, 0, NULL, NULL); 2478 } 2479 2480 #ifdef INET6 2481 static void 2482 dyn_make_keepalive_ipv6(struct mbuf *m, const struct in6_addr *src, 2483 const struct in6_addr *dst, uint32_t zoneid, uint32_t seq, uint32_t ack, 2484 uint16_t sport, uint16_t dport) 2485 { 2486 struct tcphdr *tcp; 2487 struct ip6_hdr *ip6; 2488 2489 ip6 = mtod(m, struct ip6_hdr *); 2490 ip6->ip6_vfc |= IPV6_VERSION; 2491 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 2492 ip6->ip6_nxt = IPPROTO_TCP; 2493 ip6->ip6_hlim = IPV6_DEFHLIM; 2494 ip6->ip6_src = *src; 2495 if (IN6_IS_ADDR_LINKLOCAL(src)) 2496 ip6->ip6_src.s6_addr16[1] = htons(zoneid & 0xffff); 2497 ip6->ip6_dst = *dst; 2498 if (IN6_IS_ADDR_LINKLOCAL(dst)) 2499 ip6->ip6_dst.s6_addr16[1] = htons(zoneid & 0xffff); 2500 2501 tcp = mtodo(m, sizeof(struct ip6_hdr)); 2502 tcp->th_sport = htons(sport); 2503 tcp->th_dport = htons(dport); 2504 tcp->th_off = sizeof(struct tcphdr) >> 2; 2505 tcp->th_seq = htonl(seq); 2506 tcp->th_ack = htonl(ack); 2507 tcp->th_flags = TH_ACK; 2508 tcp->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr), 2509 IPPROTO_TCP, 0); 2510 2511 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2512 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 2513 } 2514 2515 static void 2516 dyn_enqueue_keepalive_ipv6(struct mbufq *q, const struct dyn_ipv6_state *s) 2517 { 2518 struct mbuf *m; 2519 2520 if ((s->data->state & ACK_FWD) == 0 && s->data->ack_fwd > 0) { 2521 m = dyn_mgethdr(sizeof(struct ip6_hdr) + 2522 sizeof(struct tcphdr), s->data->fibnum); 2523 if (m != NULL) { 2524 dyn_make_keepalive_ipv6(m, &s->dst, &s->src, 2525 s->zoneid, s->data->ack_fwd - 1, s->data->ack_rev, 2526 s->dport, s->sport); 2527 if (mbufq_enqueue(q, m)) { 2528 m_freem(m); 2529 log(LOG_DEBUG, "ipfw: limit for IPv6 " 2530 "keepalive queue is reached.\n"); 2531 return; 2532 } 2533 } 2534 } 2535 2536 if ((s->data->state & ACK_REV) == 0 && s->data->ack_rev > 0) { 2537 m = dyn_mgethdr(sizeof(struct ip6_hdr) + 2538 sizeof(struct tcphdr), s->data->fibnum); 2539 if (m != NULL) { 2540 dyn_make_keepalive_ipv6(m, &s->src, &s->dst, 2541 s->zoneid, s->data->ack_rev - 1, s->data->ack_fwd, 2542 s->sport, s->dport); 2543 if (mbufq_enqueue(q, m)) { 2544 m_freem(m); 2545 log(LOG_DEBUG, "ipfw: limit for IPv6 " 2546 "keepalive queue is reached.\n"); 2547 return; 2548 } 2549 } 2550 } 2551 } 2552 2553 static void 2554 dyn_send_keepalive_ipv6(struct ip_fw_chain *chain) 2555 { 2556 struct mbufq q; 2557 struct mbuf *m; 2558 struct dyn_ipv6_state *s; 2559 uint32_t bucket; 2560 2561 mbufq_init(&q, INT_MAX); 2562 IPFW_UH_RLOCK(chain); 2563 /* 2564 * It is safe to not use hazard pointer and just do lockless 2565 * access to the lists, because states entries can not be deleted 2566 * while we hold IPFW_UH_RLOCK. 2567 */ 2568 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2569 CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) { 2570 /* 2571 * Only established TCP connections that will 2572 * become expired withing dyn_keepalive_interval. 2573 */ 2574 if (s->proto != IPPROTO_TCP || 2575 (s->data->state & BOTH_SYN) != BOTH_SYN || 2576 TIME_LEQ(time_uptime + V_dyn_keepalive_interval, 2577 s->data->expire)) 2578 continue; 2579 dyn_enqueue_keepalive_ipv6(&q, s); 2580 } 2581 } 2582 IPFW_UH_RUNLOCK(chain); 2583 while ((m = mbufq_dequeue(&q)) != NULL) 2584 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 2585 } 2586 #endif /* INET6 */ 2587 2588 static void 2589 dyn_grow_hashtable(struct ip_fw_chain *chain, uint32_t new) 2590 { 2591 #ifdef INET6 2592 struct dyn_ipv6ck_slist *ipv6, *ipv6_parent; 2593 uint32_t *ipv6_add, *ipv6_del, *ipv6_parent_add, *ipv6_parent_del; 2594 struct dyn_ipv6_state *s6; 2595 #endif 2596 struct dyn_ipv4ck_slist *ipv4, *ipv4_parent; 2597 uint32_t *ipv4_add, *ipv4_del, *ipv4_parent_add, *ipv4_parent_del; 2598 struct dyn_ipv4_state *s4; 2599 struct mtx *bucket_lock; 2600 void *tmp; 2601 uint32_t bucket; 2602 2603 MPASS(powerof2(new)); 2604 DYN_DEBUG("grow hash size %u -> %u", V_curr_dyn_buckets, new); 2605 /* 2606 * Allocate and initialize new lists. 2607 * XXXAE: on memory pressure this can disable callout timer. 2608 */ 2609 bucket_lock = malloc(new * sizeof(struct mtx), M_IPFW, 2610 M_WAITOK | M_ZERO); 2611 ipv4 = malloc(new * sizeof(struct dyn_ipv4ck_slist), M_IPFW, 2612 M_WAITOK | M_ZERO); 2613 ipv4_parent = malloc(new * sizeof(struct dyn_ipv4ck_slist), M_IPFW, 2614 M_WAITOK | M_ZERO); 2615 ipv4_add = malloc(new * sizeof(uint32_t), M_IPFW, M_WAITOK | M_ZERO); 2616 ipv4_del = malloc(new * sizeof(uint32_t), M_IPFW, M_WAITOK | M_ZERO); 2617 ipv4_parent_add = malloc(new * sizeof(uint32_t), M_IPFW, 2618 M_WAITOK | M_ZERO); 2619 ipv4_parent_del = malloc(new * sizeof(uint32_t), M_IPFW, 2620 M_WAITOK | M_ZERO); 2621 #ifdef INET6 2622 ipv6 = malloc(new * sizeof(struct dyn_ipv6ck_slist), M_IPFW, 2623 M_WAITOK | M_ZERO); 2624 ipv6_parent = malloc(new * sizeof(struct dyn_ipv6ck_slist), M_IPFW, 2625 M_WAITOK | M_ZERO); 2626 ipv6_add = malloc(new * sizeof(uint32_t), M_IPFW, M_WAITOK | M_ZERO); 2627 ipv6_del = malloc(new * sizeof(uint32_t), M_IPFW, M_WAITOK | M_ZERO); 2628 ipv6_parent_add = malloc(new * sizeof(uint32_t), M_IPFW, 2629 M_WAITOK | M_ZERO); 2630 ipv6_parent_del = malloc(new * sizeof(uint32_t), M_IPFW, 2631 M_WAITOK | M_ZERO); 2632 #endif 2633 for (bucket = 0; bucket < new; bucket++) { 2634 DYN_BUCKET_LOCK_INIT(bucket_lock, bucket); 2635 CK_SLIST_INIT(&ipv4[bucket]); 2636 CK_SLIST_INIT(&ipv4_parent[bucket]); 2637 #ifdef INET6 2638 CK_SLIST_INIT(&ipv6[bucket]); 2639 CK_SLIST_INIT(&ipv6_parent[bucket]); 2640 #endif 2641 } 2642 2643 #define DYN_RELINK_STATES(s, hval, i, head, ohead) do { \ 2644 while ((s = CK_SLIST_FIRST(&V_dyn_ ## ohead[i])) != NULL) { \ 2645 CK_SLIST_REMOVE_HEAD(&V_dyn_ ## ohead[i], entry); \ 2646 CK_SLIST_INSERT_HEAD(&head[DYN_BUCKET(s->hval, new)], \ 2647 s, entry); \ 2648 } \ 2649 } while (0) 2650 /* 2651 * Prevent rules changing from userland. 2652 */ 2653 IPFW_UH_WLOCK(chain); 2654 /* 2655 * Hold traffic processing until we finish resize to 2656 * prevent access to states lists. 2657 */ 2658 IPFW_WLOCK(chain); 2659 /* Re-link all dynamic states */ 2660 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2661 DYN_RELINK_STATES(s4, data->hashval, bucket, ipv4, ipv4); 2662 DYN_RELINK_STATES(s4, limit->hashval, bucket, ipv4_parent, 2663 ipv4_parent); 2664 #ifdef INET6 2665 DYN_RELINK_STATES(s6, data->hashval, bucket, ipv6, ipv6); 2666 DYN_RELINK_STATES(s6, limit->hashval, bucket, ipv6_parent, 2667 ipv6_parent); 2668 #endif 2669 } 2670 2671 #define DYN_SWAP_PTR(old, new, tmp) do { \ 2672 tmp = old; \ 2673 old = new; \ 2674 new = tmp; \ 2675 } while (0) 2676 /* Swap pointers */ 2677 DYN_SWAP_PTR(V_dyn_bucket_lock, bucket_lock, tmp); 2678 DYN_SWAP_PTR(V_dyn_ipv4, ipv4, tmp); 2679 DYN_SWAP_PTR(V_dyn_ipv4_parent, ipv4_parent, tmp); 2680 DYN_SWAP_PTR(V_dyn_ipv4_add, ipv4_add, tmp); 2681 DYN_SWAP_PTR(V_dyn_ipv4_parent_add, ipv4_parent_add, tmp); 2682 DYN_SWAP_PTR(V_dyn_ipv4_del, ipv4_del, tmp); 2683 DYN_SWAP_PTR(V_dyn_ipv4_parent_del, ipv4_parent_del, tmp); 2684 2685 #ifdef INET6 2686 DYN_SWAP_PTR(V_dyn_ipv6, ipv6, tmp); 2687 DYN_SWAP_PTR(V_dyn_ipv6_parent, ipv6_parent, tmp); 2688 DYN_SWAP_PTR(V_dyn_ipv6_add, ipv6_add, tmp); 2689 DYN_SWAP_PTR(V_dyn_ipv6_parent_add, ipv6_parent_add, tmp); 2690 DYN_SWAP_PTR(V_dyn_ipv6_del, ipv6_del, tmp); 2691 DYN_SWAP_PTR(V_dyn_ipv6_parent_del, ipv6_parent_del, tmp); 2692 #endif 2693 bucket = V_curr_dyn_buckets; 2694 V_curr_dyn_buckets = new; 2695 2696 IPFW_WUNLOCK(chain); 2697 IPFW_UH_WUNLOCK(chain); 2698 2699 /* Release old resources */ 2700 while (bucket-- != 0) 2701 DYN_BUCKET_LOCK_DESTROY(bucket_lock, bucket); 2702 free(bucket_lock, M_IPFW); 2703 free(ipv4, M_IPFW); 2704 free(ipv4_parent, M_IPFW); 2705 free(ipv4_add, M_IPFW); 2706 free(ipv4_parent_add, M_IPFW); 2707 free(ipv4_del, M_IPFW); 2708 free(ipv4_parent_del, M_IPFW); 2709 #ifdef INET6 2710 free(ipv6, M_IPFW); 2711 free(ipv6_parent, M_IPFW); 2712 free(ipv6_add, M_IPFW); 2713 free(ipv6_parent_add, M_IPFW); 2714 free(ipv6_del, M_IPFW); 2715 free(ipv6_parent_del, M_IPFW); 2716 #endif 2717 } 2718 2719 /* 2720 * This function is used to perform various maintenance 2721 * on dynamic hash lists. Currently it is called every second. 2722 */ 2723 static void 2724 dyn_tick(void *vnetx) 2725 { 2726 uint32_t buckets; 2727 2728 CURVNET_SET((struct vnet *)vnetx); 2729 /* 2730 * First free states unlinked in previous passes. 2731 */ 2732 dyn_free_states(&V_layer3_chain); 2733 /* 2734 * Now unlink others expired states. 2735 * We use IPFW_UH_WLOCK to avoid concurrent call of 2736 * dyn_expire_states(). It is the only function that does 2737 * deletion of state entries from states lists. 2738 */ 2739 IPFW_UH_WLOCK(&V_layer3_chain); 2740 dyn_expire_states(&V_layer3_chain, NULL); 2741 IPFW_UH_WUNLOCK(&V_layer3_chain); 2742 /* 2743 * Send keepalives if they are enabled and the time has come. 2744 */ 2745 if (V_dyn_keepalive != 0 && 2746 V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) { 2747 V_dyn_keepalive_last = time_uptime; 2748 dyn_send_keepalive_ipv4(&V_layer3_chain); 2749 #ifdef INET6 2750 dyn_send_keepalive_ipv6(&V_layer3_chain); 2751 #endif 2752 } 2753 /* 2754 * Check if we need to resize the hash: 2755 * if current number of states exceeds number of buckets in hash, 2756 * and dyn_buckets_max permits to grow the number of buckets, then 2757 * do it. Grow hash size to the minimum power of 2 which is bigger 2758 * than current states count. 2759 */ 2760 if (V_curr_dyn_buckets < V_dyn_buckets_max && 2761 (V_curr_dyn_buckets < V_dyn_count / 2 || ( 2762 V_curr_dyn_buckets < V_dyn_count && V_curr_max_length > 8))) { 2763 buckets = 1 << fls(V_dyn_count); 2764 if (buckets > V_dyn_buckets_max) 2765 buckets = V_dyn_buckets_max; 2766 dyn_grow_hashtable(&V_layer3_chain, buckets); 2767 } 2768 2769 callout_reset_on(&V_dyn_timeout, hz, dyn_tick, vnetx, 0); 2770 CURVNET_RESTORE(); 2771 } 2772 2773 void 2774 ipfw_expire_dyn_states(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 2775 { 2776 /* 2777 * Do not perform any checks if we currently have no dynamic states 2778 */ 2779 if (V_dyn_count == 0) 2780 return; 2781 2782 IPFW_UH_WLOCK_ASSERT(chain); 2783 dyn_expire_states(chain, rt); 2784 } 2785 2786 /* 2787 * Pass through all states and reset eaction for orphaned rules. 2788 */ 2789 void 2790 ipfw_dyn_reset_eaction(struct ip_fw_chain *ch, uint16_t eaction_id, 2791 uint16_t default_id, uint16_t instance_id) 2792 { 2793 #ifdef INET6 2794 struct dyn_ipv6_state *s6; 2795 #endif 2796 struct dyn_ipv4_state *s4; 2797 struct ip_fw *rule; 2798 uint32_t bucket; 2799 2800 #define DYN_RESET_EACTION(s, h, b) \ 2801 CK_SLIST_FOREACH(s, &V_dyn_ ## h[b], entry) { \ 2802 if ((s->data->flags & DYN_REFERENCED) == 0) \ 2803 continue; \ 2804 rule = s->data->parent; \ 2805 if (s->type == O_LIMIT) \ 2806 rule = ((__typeof(s))rule)->limit->parent; \ 2807 ipfw_reset_eaction(ch, rule, eaction_id, \ 2808 default_id, instance_id); \ 2809 } 2810 2811 IPFW_UH_WLOCK_ASSERT(ch); 2812 if (V_dyn_count == 0) 2813 return; 2814 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2815 DYN_RESET_EACTION(s4, ipv4, bucket); 2816 #ifdef INET6 2817 DYN_RESET_EACTION(s6, ipv6, bucket); 2818 #endif 2819 } 2820 } 2821 2822 /* 2823 * Returns size of dynamic states in legacy format 2824 */ 2825 int 2826 ipfw_dyn_len(void) 2827 { 2828 2829 return ((V_dyn_count + V_dyn_parent_count) * sizeof(ipfw_dyn_rule)); 2830 } 2831 2832 /* 2833 * Returns number of dynamic states. 2834 * Marks every named object index used by dynamic states with bit in @bmask. 2835 * Returns number of named objects accounted in bmask via @nocnt. 2836 * Used by dump format v1 (current). 2837 */ 2838 uint32_t 2839 ipfw_dyn_get_count(uint32_t *bmask, int *nocnt) 2840 { 2841 #ifdef INET6 2842 struct dyn_ipv6_state *s6; 2843 #endif 2844 struct dyn_ipv4_state *s4; 2845 uint32_t bucket; 2846 2847 #define DYN_COUNT_OBJECTS(s, h, b) \ 2848 CK_SLIST_FOREACH(s, &V_dyn_ ## h[b], entry) { \ 2849 MPASS(s->kidx != 0); \ 2850 if (ipfw_mark_object_kidx(bmask, IPFW_TLV_STATE_NAME, \ 2851 s->kidx) != 0) \ 2852 (*nocnt)++; \ 2853 } 2854 2855 IPFW_UH_RLOCK_ASSERT(&V_layer3_chain); 2856 2857 /* No need to pass through all the buckets. */ 2858 *nocnt = 0; 2859 if (V_dyn_count + V_dyn_parent_count == 0) 2860 return (0); 2861 2862 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2863 DYN_COUNT_OBJECTS(s4, ipv4, bucket); 2864 #ifdef INET6 2865 DYN_COUNT_OBJECTS(s6, ipv6, bucket); 2866 #endif 2867 } 2868 2869 return (V_dyn_count + V_dyn_parent_count); 2870 } 2871 2872 /* 2873 * Check if rule contains at least one dynamic opcode. 2874 * 2875 * Returns 1 if such opcode is found, 0 otherwise. 2876 */ 2877 int 2878 ipfw_is_dyn_rule(struct ip_fw *rule) 2879 { 2880 int cmdlen, l; 2881 ipfw_insn *cmd; 2882 2883 l = rule->cmd_len; 2884 cmd = rule->cmd; 2885 cmdlen = 0; 2886 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2887 cmdlen = F_LEN(cmd); 2888 2889 switch (cmd->opcode) { 2890 case O_LIMIT: 2891 case O_KEEP_STATE: 2892 case O_PROBE_STATE: 2893 case O_CHECK_STATE: 2894 return (1); 2895 } 2896 } 2897 2898 return (0); 2899 } 2900 2901 static void 2902 dyn_export_parent(const struct dyn_parent *p, uint16_t kidx, 2903 ipfw_dyn_rule *dst) 2904 { 2905 2906 dst->dyn_type = O_LIMIT_PARENT; 2907 dst->kidx = kidx; 2908 dst->count = (uint16_t)DPARENT_COUNT(p); 2909 dst->expire = TIME_LEQ(p->expire, time_uptime) ? 0: 2910 p->expire - time_uptime; 2911 2912 /* 'rule' is used to pass up the rule number and set */ 2913 memcpy(&dst->rule, &p->rulenum, sizeof(p->rulenum)); 2914 /* store set number into high word of dst->rule pointer. */ 2915 memcpy((char *)&dst->rule + sizeof(p->rulenum), &p->set, 2916 sizeof(p->set)); 2917 2918 /* unused fields */ 2919 dst->pcnt = 0; 2920 dst->bcnt = 0; 2921 dst->parent = NULL; 2922 dst->state = 0; 2923 dst->ack_fwd = 0; 2924 dst->ack_rev = 0; 2925 dst->bucket = p->hashval; 2926 /* 2927 * The legacy userland code will interpret a NULL here as a marker 2928 * for the last dynamic rule. 2929 */ 2930 dst->next = (ipfw_dyn_rule *)1; 2931 } 2932 2933 static void 2934 dyn_export_data(const struct dyn_data *data, uint16_t kidx, uint8_t type, 2935 ipfw_dyn_rule *dst) 2936 { 2937 2938 dst->dyn_type = type; 2939 dst->kidx = kidx; 2940 dst->pcnt = data->pcnt_fwd + data->pcnt_rev; 2941 dst->bcnt = data->bcnt_fwd + data->bcnt_rev; 2942 dst->expire = TIME_LEQ(data->expire, time_uptime) ? 0: 2943 data->expire - time_uptime; 2944 2945 /* 'rule' is used to pass up the rule number and set */ 2946 memcpy(&dst->rule, &data->rulenum, sizeof(data->rulenum)); 2947 /* store set number into high word of dst->rule pointer. */ 2948 memcpy((char *)&dst->rule + sizeof(data->rulenum), &data->set, 2949 sizeof(data->set)); 2950 2951 dst->state = data->state; 2952 if (data->flags & DYN_REFERENCED) 2953 dst->state |= IPFW_DYN_ORPHANED; 2954 2955 /* unused fields */ 2956 dst->parent = NULL; 2957 dst->ack_fwd = data->ack_fwd; 2958 dst->ack_rev = data->ack_rev; 2959 dst->count = 0; 2960 dst->bucket = data->hashval; 2961 /* 2962 * The legacy userland code will interpret a NULL here as a marker 2963 * for the last dynamic rule. 2964 */ 2965 dst->next = (ipfw_dyn_rule *)1; 2966 } 2967 2968 static void 2969 dyn_export_ipv4_state(const struct dyn_ipv4_state *s, ipfw_dyn_rule *dst) 2970 { 2971 2972 switch (s->type) { 2973 case O_LIMIT_PARENT: 2974 dyn_export_parent(s->limit, s->kidx, dst); 2975 break; 2976 default: 2977 dyn_export_data(s->data, s->kidx, s->type, dst); 2978 } 2979 2980 dst->id.dst_ip = s->dst; 2981 dst->id.src_ip = s->src; 2982 dst->id.dst_port = s->dport; 2983 dst->id.src_port = s->sport; 2984 dst->id.fib = s->data->fibnum; 2985 dst->id.proto = s->proto; 2986 dst->id._flags = 0; 2987 dst->id.addr_type = 4; 2988 2989 memset(&dst->id.dst_ip6, 0, sizeof(dst->id.dst_ip6)); 2990 memset(&dst->id.src_ip6, 0, sizeof(dst->id.src_ip6)); 2991 dst->id.flow_id6 = dst->id.extra = 0; 2992 } 2993 2994 #ifdef INET6 2995 static void 2996 dyn_export_ipv6_state(const struct dyn_ipv6_state *s, ipfw_dyn_rule *dst) 2997 { 2998 2999 switch (s->type) { 3000 case O_LIMIT_PARENT: 3001 dyn_export_parent(s->limit, s->kidx, dst); 3002 break; 3003 default: 3004 dyn_export_data(s->data, s->kidx, s->type, dst); 3005 } 3006 3007 dst->id.src_ip6 = s->src; 3008 dst->id.dst_ip6 = s->dst; 3009 dst->id.dst_port = s->dport; 3010 dst->id.src_port = s->sport; 3011 dst->id.fib = s->data->fibnum; 3012 dst->id.proto = s->proto; 3013 dst->id._flags = 0; 3014 dst->id.addr_type = 6; 3015 3016 dst->id.dst_ip = dst->id.src_ip = 0; 3017 dst->id.flow_id6 = dst->id.extra = 0; 3018 } 3019 #endif /* INET6 */ 3020 3021 /* 3022 * Fills the buffer given by @sd with dynamic states. 3023 * Used by dump format v1 (current). 3024 * 3025 * Returns 0 on success. 3026 */ 3027 int 3028 ipfw_dump_states(struct ip_fw_chain *chain, struct sockopt_data *sd) 3029 { 3030 #ifdef INET6 3031 struct dyn_ipv6_state *s6; 3032 #endif 3033 struct dyn_ipv4_state *s4; 3034 ipfw_obj_dyntlv *dst, *last; 3035 ipfw_obj_ctlv *ctlv; 3036 uint32_t bucket; 3037 3038 if (V_dyn_count == 0) 3039 return (0); 3040 3041 /* 3042 * IPFW_UH_RLOCK garantees that another userland request 3043 * and callout thread will not delete entries from states 3044 * lists. 3045 */ 3046 IPFW_UH_RLOCK_ASSERT(chain); 3047 3048 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 3049 if (ctlv == NULL) 3050 return (ENOMEM); 3051 ctlv->head.type = IPFW_TLV_DYNSTATE_LIST; 3052 ctlv->objsize = sizeof(ipfw_obj_dyntlv); 3053 last = NULL; 3054 3055 #define DYN_EXPORT_STATES(s, af, h, b) \ 3056 CK_SLIST_FOREACH(s, &V_dyn_ ## h[b], entry) { \ 3057 dst = (ipfw_obj_dyntlv *)ipfw_get_sopt_space(sd, \ 3058 sizeof(ipfw_obj_dyntlv)); \ 3059 if (dst == NULL) \ 3060 return (ENOMEM); \ 3061 dyn_export_ ## af ## _state(s, &dst->state); \ 3062 dst->head.length = sizeof(ipfw_obj_dyntlv); \ 3063 dst->head.type = IPFW_TLV_DYN_ENT; \ 3064 last = dst; \ 3065 } 3066 3067 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 3068 DYN_EXPORT_STATES(s4, ipv4, ipv4_parent, bucket); 3069 DYN_EXPORT_STATES(s4, ipv4, ipv4, bucket); 3070 #ifdef INET6 3071 DYN_EXPORT_STATES(s6, ipv6, ipv6_parent, bucket); 3072 DYN_EXPORT_STATES(s6, ipv6, ipv6, bucket); 3073 #endif /* INET6 */ 3074 } 3075 3076 /* mark last dynamic rule */ 3077 if (last != NULL) 3078 last->head.flags = IPFW_DF_LAST; /* XXX: unused */ 3079 return (0); 3080 #undef DYN_EXPORT_STATES 3081 } 3082 3083 /* 3084 * Fill given buffer with dynamic states (legacy format). 3085 * IPFW_UH_RLOCK has to be held while calling. 3086 */ 3087 void 3088 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep) 3089 { 3090 #ifdef INET6 3091 struct dyn_ipv6_state *s6; 3092 #endif 3093 struct dyn_ipv4_state *s4; 3094 ipfw_dyn_rule *p, *last = NULL; 3095 char *bp; 3096 uint32_t bucket; 3097 3098 if (V_dyn_count == 0) 3099 return; 3100 bp = *pbp; 3101 3102 IPFW_UH_RLOCK_ASSERT(chain); 3103 3104 #define DYN_EXPORT_STATES(s, af, head, b) \ 3105 CK_SLIST_FOREACH(s, &V_dyn_ ## head[b], entry) { \ 3106 if (bp + sizeof(*p) > ep) \ 3107 break; \ 3108 p = (ipfw_dyn_rule *)bp; \ 3109 dyn_export_ ## af ## _state(s, p); \ 3110 last = p; \ 3111 bp += sizeof(*p); \ 3112 } 3113 3114 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 3115 DYN_EXPORT_STATES(s4, ipv4, ipv4_parent, bucket); 3116 DYN_EXPORT_STATES(s4, ipv4, ipv4, bucket); 3117 #ifdef INET6 3118 DYN_EXPORT_STATES(s6, ipv6, ipv6_parent, bucket); 3119 DYN_EXPORT_STATES(s6, ipv6, ipv6, bucket); 3120 #endif /* INET6 */ 3121 } 3122 3123 if (last != NULL) /* mark last dynamic rule */ 3124 last->next = NULL; 3125 *pbp = bp; 3126 #undef DYN_EXPORT_STATES 3127 } 3128 3129 void 3130 ipfw_dyn_init(struct ip_fw_chain *chain) 3131 { 3132 3133 #ifdef IPFIREWALL_JENKINSHASH 3134 V_dyn_hashseed = arc4random(); 3135 #endif 3136 V_dyn_max = 16384; /* max # of states */ 3137 V_dyn_parent_max = 4096; /* max # of parent states */ 3138 V_dyn_buckets_max = 8192; /* must be power of 2 */ 3139 3140 V_dyn_ack_lifetime = 300; 3141 V_dyn_syn_lifetime = 20; 3142 V_dyn_fin_lifetime = 1; 3143 V_dyn_rst_lifetime = 1; 3144 V_dyn_udp_lifetime = 10; 3145 V_dyn_short_lifetime = 5; 3146 3147 V_dyn_keepalive_interval = 20; 3148 V_dyn_keepalive_period = 5; 3149 V_dyn_keepalive = 1; /* send keepalives */ 3150 V_dyn_keepalive_last = time_uptime; 3151 3152 V_dyn_data_zone = uma_zcreate("IPFW dynamic states data", 3153 sizeof(struct dyn_data), NULL, NULL, NULL, NULL, 3154 UMA_ALIGN_PTR, 0); 3155 uma_zone_set_max(V_dyn_data_zone, V_dyn_max); 3156 3157 V_dyn_parent_zone = uma_zcreate("IPFW parent dynamic states", 3158 sizeof(struct dyn_parent), NULL, NULL, NULL, NULL, 3159 UMA_ALIGN_PTR, 0); 3160 uma_zone_set_max(V_dyn_parent_zone, V_dyn_parent_max); 3161 3162 SLIST_INIT(&V_dyn_expired_ipv4); 3163 V_dyn_ipv4 = NULL; 3164 V_dyn_ipv4_parent = NULL; 3165 V_dyn_ipv4_zone = uma_zcreate("IPFW IPv4 dynamic states", 3166 sizeof(struct dyn_ipv4_state), NULL, NULL, NULL, NULL, 3167 UMA_ALIGN_PTR, 0); 3168 3169 #ifdef INET6 3170 SLIST_INIT(&V_dyn_expired_ipv6); 3171 V_dyn_ipv6 = NULL; 3172 V_dyn_ipv6_parent = NULL; 3173 V_dyn_ipv6_zone = uma_zcreate("IPFW IPv6 dynamic states", 3174 sizeof(struct dyn_ipv6_state), NULL, NULL, NULL, NULL, 3175 UMA_ALIGN_PTR, 0); 3176 #endif 3177 3178 /* Initialize buckets. */ 3179 V_curr_dyn_buckets = 0; 3180 V_dyn_bucket_lock = NULL; 3181 dyn_grow_hashtable(chain, 256); 3182 3183 if (IS_DEFAULT_VNET(curvnet)) 3184 dyn_hp_cache = malloc(mp_ncpus * sizeof(void *), M_IPFW, 3185 M_WAITOK | M_ZERO); 3186 3187 DYN_EXPIRED_LOCK_INIT(); 3188 callout_init(&V_dyn_timeout, 1); 3189 callout_reset(&V_dyn_timeout, hz, dyn_tick, curvnet); 3190 IPFW_ADD_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes); 3191 } 3192 3193 void 3194 ipfw_dyn_uninit(int pass) 3195 { 3196 #ifdef INET6 3197 struct dyn_ipv6_state *s6; 3198 #endif 3199 struct dyn_ipv4_state *s4; 3200 int bucket; 3201 3202 if (pass == 0) { 3203 callout_drain(&V_dyn_timeout); 3204 return; 3205 } 3206 IPFW_DEL_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes); 3207 DYN_EXPIRED_LOCK_DESTROY(); 3208 3209 #define DYN_FREE_STATES_FORCED(CK, s, af, name, en) do { \ 3210 while ((s = CK ## SLIST_FIRST(&V_dyn_ ## name)) != NULL) { \ 3211 CK ## SLIST_REMOVE_HEAD(&V_dyn_ ## name, en); \ 3212 if (s->type == O_LIMIT_PARENT) \ 3213 uma_zfree(V_dyn_parent_zone, s->limit); \ 3214 else \ 3215 uma_zfree(V_dyn_data_zone, s->data); \ 3216 uma_zfree(V_dyn_ ## af ## _zone, s); \ 3217 } \ 3218 } while (0) 3219 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 3220 DYN_BUCKET_LOCK_DESTROY(V_dyn_bucket_lock, bucket); 3221 3222 DYN_FREE_STATES_FORCED(CK_, s4, ipv4, ipv4[bucket], entry); 3223 DYN_FREE_STATES_FORCED(CK_, s4, ipv4, ipv4_parent[bucket], 3224 entry); 3225 #ifdef INET6 3226 DYN_FREE_STATES_FORCED(CK_, s6, ipv6, ipv6[bucket], entry); 3227 DYN_FREE_STATES_FORCED(CK_, s6, ipv6, ipv6_parent[bucket], 3228 entry); 3229 #endif /* INET6 */ 3230 } 3231 DYN_FREE_STATES_FORCED(, s4, ipv4, expired_ipv4, expired); 3232 #ifdef INET6 3233 DYN_FREE_STATES_FORCED(, s6, ipv6, expired_ipv6, expired); 3234 #endif 3235 #undef DYN_FREE_STATES_FORCED 3236 3237 uma_zdestroy(V_dyn_ipv4_zone); 3238 uma_zdestroy(V_dyn_data_zone); 3239 uma_zdestroy(V_dyn_parent_zone); 3240 #ifdef INET6 3241 uma_zdestroy(V_dyn_ipv6_zone); 3242 free(V_dyn_ipv6, M_IPFW); 3243 free(V_dyn_ipv6_parent, M_IPFW); 3244 free(V_dyn_ipv6_add, M_IPFW); 3245 free(V_dyn_ipv6_parent_add, M_IPFW); 3246 free(V_dyn_ipv6_del, M_IPFW); 3247 free(V_dyn_ipv6_parent_del, M_IPFW); 3248 #endif 3249 free(V_dyn_bucket_lock, M_IPFW); 3250 free(V_dyn_ipv4, M_IPFW); 3251 free(V_dyn_ipv4_parent, M_IPFW); 3252 free(V_dyn_ipv4_add, M_IPFW); 3253 free(V_dyn_ipv4_parent_add, M_IPFW); 3254 free(V_dyn_ipv4_del, M_IPFW); 3255 free(V_dyn_ipv4_parent_del, M_IPFW); 3256 if (IS_DEFAULT_VNET(curvnet)) 3257 free(dyn_hp_cache, M_IPFW); 3258 } 3259 3260 3261