1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2017-2018 Yandex LLC 5 * Copyright (c) 2017-2018 Andrey V. Elsukov <ae@FreeBSD.org> 6 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ipfw.h" 36 #ifndef INET 37 #error IPFIREWALL requires INET. 38 #endif /* INET */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/hash.h> 43 #include <sys/mbuf.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/pcpu.h> 47 #include <sys/queue.h> 48 #include <sys/rmlock.h> 49 #include <sys/smp.h> 50 #include <sys/socket.h> 51 #include <sys/sysctl.h> 52 #include <sys/syslog.h> 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/vnet.h> 57 58 #include <netinet/in.h> 59 #include <netinet/ip.h> 60 #include <netinet/ip_var.h> 61 #include <netinet/ip_fw.h> 62 #include <netinet/tcp_var.h> 63 #include <netinet/udp.h> 64 65 #include <netinet/ip6.h> /* IN6_ARE_ADDR_EQUAL */ 66 #ifdef INET6 67 #include <netinet6/in6_var.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet6/scope6_var.h> 70 #endif 71 72 #include <netpfil/ipfw/ip_fw_private.h> 73 74 #include <machine/in_cksum.h> /* XXX for in_cksum */ 75 76 #ifdef MAC 77 #include <security/mac/mac_framework.h> 78 #endif 79 80 /* 81 * Description of dynamic states. 82 * 83 * Dynamic states are stored in lists accessed through a hash tables 84 * whose size is curr_dyn_buckets. This value can be modified through 85 * the sysctl variable dyn_buckets. 86 * 87 * Currently there are four tables: dyn_ipv4, dyn_ipv6, dyn_ipv4_parent, 88 * and dyn_ipv6_parent. 89 * 90 * When a packet is received, its address fields hashed, then matched 91 * against the entries in the corresponding list by addr_type. 92 * Dynamic states can be used for different purposes: 93 * + stateful rules; 94 * + enforcing limits on the number of sessions; 95 * + in-kernel NAT (not implemented yet) 96 * 97 * The lifetime of dynamic states is regulated by dyn_*_lifetime, 98 * measured in seconds and depending on the flags. 99 * 100 * The total number of dynamic states is equal to UMA zone items count. 101 * The max number of dynamic states is dyn_max. When we reach 102 * the maximum number of rules we do not create anymore. This is 103 * done to avoid consuming too much memory, but also too much 104 * time when searching on each packet (ideally, we should try instead 105 * to put a limit on the length of the list on each bucket...). 106 * 107 * Each state holds a pointer to the parent ipfw rule so we know what 108 * action to perform. Dynamic rules are removed when the parent rule is 109 * deleted. 110 * 111 * There are some limitations with dynamic rules -- we do not 112 * obey the 'randomized match', and we do not do multiple 113 * passes through the firewall. XXX check the latter!!! 114 */ 115 116 /* By default use jenkins hash function */ 117 #define IPFIREWALL_JENKINSHASH 118 119 #define DYN_COUNTER_INC(d, dir, pktlen) do { \ 120 (d)->pcnt_ ## dir++; \ 121 (d)->bcnt_ ## dir += pktlen; \ 122 } while (0) 123 124 #define DYN_REFERENCED 0x01 125 /* 126 * DYN_REFERENCED flag is used to show that state keeps reference to named 127 * object, and this reference should be released when state becomes expired. 128 */ 129 130 struct dyn_data { 131 void *parent; /* pointer to parent rule */ 132 uint32_t chain_id; /* cached ruleset id */ 133 uint32_t f_pos; /* cached rule index */ 134 135 uint32_t hashval; /* hash value used for hash resize */ 136 uint16_t fibnum; /* fib used to send keepalives */ 137 uint8_t _pad[3]; 138 uint8_t flags; /* internal flags */ 139 uint16_t rulenum; /* parent rule number */ 140 uint32_t ruleid; /* parent rule id */ 141 142 uint32_t state; /* TCP session state and flags */ 143 uint32_t ack_fwd; /* most recent ACKs in forward */ 144 uint32_t ack_rev; /* and reverse direction (used */ 145 /* to generate keepalives) */ 146 uint32_t sync; /* synchronization time */ 147 uint32_t expire; /* expire time */ 148 149 uint64_t pcnt_fwd; /* bytes counter in forward */ 150 uint64_t bcnt_fwd; /* packets counter in forward */ 151 uint64_t pcnt_rev; /* bytes counter in reverse */ 152 uint64_t bcnt_rev; /* packets counter in reverse */ 153 }; 154 155 #define DPARENT_COUNT_DEC(p) do { \ 156 MPASS(p->count > 0); \ 157 ck_pr_dec_32(&(p)->count); \ 158 } while (0) 159 #define DPARENT_COUNT_INC(p) ck_pr_inc_32(&(p)->count) 160 #define DPARENT_COUNT(p) ck_pr_load_32(&(p)->count) 161 struct dyn_parent { 162 void *parent; /* pointer to parent rule */ 163 uint32_t count; /* number of linked states */ 164 uint8_t _pad[2]; 165 uint16_t rulenum; /* parent rule number */ 166 uint32_t ruleid; /* parent rule id */ 167 uint32_t hashval; /* hash value used for hash resize */ 168 uint32_t expire; /* expire time */ 169 }; 170 171 struct dyn_ipv4_state { 172 uint8_t type; /* State type */ 173 uint8_t proto; /* UL Protocol */ 174 uint16_t kidx; /* named object index */ 175 uint16_t sport, dport; /* ULP source and destination ports */ 176 in_addr_t src, dst; /* IPv4 source and destination */ 177 178 union { 179 struct dyn_data *data; 180 struct dyn_parent *limit; 181 }; 182 CK_SLIST_ENTRY(dyn_ipv4_state) entry; 183 SLIST_ENTRY(dyn_ipv4_state) expired; 184 }; 185 CK_SLIST_HEAD(dyn_ipv4ck_slist, dyn_ipv4_state); 186 VNET_DEFINE_STATIC(struct dyn_ipv4ck_slist *, dyn_ipv4); 187 VNET_DEFINE_STATIC(struct dyn_ipv4ck_slist *, dyn_ipv4_parent); 188 189 SLIST_HEAD(dyn_ipv4_slist, dyn_ipv4_state); 190 VNET_DEFINE_STATIC(struct dyn_ipv4_slist, dyn_expired_ipv4); 191 #define V_dyn_ipv4 VNET(dyn_ipv4) 192 #define V_dyn_ipv4_parent VNET(dyn_ipv4_parent) 193 #define V_dyn_expired_ipv4 VNET(dyn_expired_ipv4) 194 195 #ifdef INET6 196 struct dyn_ipv6_state { 197 uint8_t type; /* State type */ 198 uint8_t proto; /* UL Protocol */ 199 uint16_t kidx; /* named object index */ 200 uint16_t sport, dport; /* ULP source and destination ports */ 201 struct in6_addr src, dst; /* IPv6 source and destination */ 202 uint32_t zoneid; /* IPv6 scope zone id */ 203 union { 204 struct dyn_data *data; 205 struct dyn_parent *limit; 206 }; 207 CK_SLIST_ENTRY(dyn_ipv6_state) entry; 208 SLIST_ENTRY(dyn_ipv6_state) expired; 209 }; 210 CK_SLIST_HEAD(dyn_ipv6ck_slist, dyn_ipv6_state); 211 VNET_DEFINE_STATIC(struct dyn_ipv6ck_slist *, dyn_ipv6); 212 VNET_DEFINE_STATIC(struct dyn_ipv6ck_slist *, dyn_ipv6_parent); 213 214 SLIST_HEAD(dyn_ipv6_slist, dyn_ipv6_state); 215 VNET_DEFINE_STATIC(struct dyn_ipv6_slist, dyn_expired_ipv6); 216 #define V_dyn_ipv6 VNET(dyn_ipv6) 217 #define V_dyn_ipv6_parent VNET(dyn_ipv6_parent) 218 #define V_dyn_expired_ipv6 VNET(dyn_expired_ipv6) 219 #endif /* INET6 */ 220 221 /* 222 * Per-CPU pointer indicates that specified state is currently in use 223 * and must not be reclaimed by expiration callout. 224 */ 225 static void **dyn_hp_cache; 226 DPCPU_DEFINE_STATIC(void *, dyn_hp); 227 #define DYNSTATE_GET(cpu) ck_pr_load_ptr(DPCPU_ID_PTR((cpu), dyn_hp)) 228 #define DYNSTATE_PROTECT(v) ck_pr_store_ptr(DPCPU_PTR(dyn_hp), (v)) 229 #define DYNSTATE_RELEASE() DYNSTATE_PROTECT(NULL) 230 #define DYNSTATE_CRITICAL_ENTER() critical_enter() 231 #define DYNSTATE_CRITICAL_EXIT() do { \ 232 DYNSTATE_RELEASE(); \ 233 critical_exit(); \ 234 } while (0); 235 236 /* 237 * We keep two version numbers, one is updated when new entry added to 238 * the list. Second is updated when an entry deleted from the list. 239 * Versions are updated under bucket lock. 240 * 241 * Bucket "add" version number is used to know, that in the time between 242 * state lookup (i.e. ipfw_dyn_lookup_state()) and the followed state 243 * creation (i.e. ipfw_dyn_install_state()) another concurrent thread did 244 * not install some state in this bucket. Using this info we can avoid 245 * additional state lookup, because we are sure that we will not install 246 * the state twice. 247 * 248 * Also doing the tracking of bucket "del" version during lookup we can 249 * be sure, that state entry was not unlinked and freed in time between 250 * we read the state pointer and protect it with hazard pointer. 251 * 252 * An entry unlinked from CK list keeps unchanged until it is freed. 253 * Unlinked entries are linked into expired lists using "expired" field. 254 */ 255 256 /* 257 * dyn_expire_lock is used to protect access to dyn_expired_xxx lists. 258 * dyn_bucket_lock is used to get write access to lists in specific bucket. 259 * Currently one dyn_bucket_lock is used for all ipv4, ipv4_parent, ipv6, 260 * and ipv6_parent lists. 261 */ 262 VNET_DEFINE_STATIC(struct mtx, dyn_expire_lock); 263 VNET_DEFINE_STATIC(struct mtx *, dyn_bucket_lock); 264 #define V_dyn_expire_lock VNET(dyn_expire_lock) 265 #define V_dyn_bucket_lock VNET(dyn_bucket_lock) 266 267 /* 268 * Bucket's add/delete generation versions. 269 */ 270 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_add); 271 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_del); 272 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_parent_add); 273 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_parent_del); 274 #define V_dyn_ipv4_add VNET(dyn_ipv4_add) 275 #define V_dyn_ipv4_del VNET(dyn_ipv4_del) 276 #define V_dyn_ipv4_parent_add VNET(dyn_ipv4_parent_add) 277 #define V_dyn_ipv4_parent_del VNET(dyn_ipv4_parent_del) 278 279 #ifdef INET6 280 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_add); 281 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_del); 282 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_parent_add); 283 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_parent_del); 284 #define V_dyn_ipv6_add VNET(dyn_ipv6_add) 285 #define V_dyn_ipv6_del VNET(dyn_ipv6_del) 286 #define V_dyn_ipv6_parent_add VNET(dyn_ipv6_parent_add) 287 #define V_dyn_ipv6_parent_del VNET(dyn_ipv6_parent_del) 288 #endif /* INET6 */ 289 290 #define DYN_BUCKET(h, b) ((h) & (b - 1)) 291 #define DYN_BUCKET_VERSION(b, v) ck_pr_load_32(&V_dyn_ ## v[(b)]) 292 #define DYN_BUCKET_VERSION_BUMP(b, v) ck_pr_inc_32(&V_dyn_ ## v[(b)]) 293 294 #define DYN_BUCKET_LOCK_INIT(lock, b) \ 295 mtx_init(&lock[(b)], "IPFW dynamic bucket", NULL, MTX_DEF) 296 #define DYN_BUCKET_LOCK_DESTROY(lock, b) mtx_destroy(&lock[(b)]) 297 #define DYN_BUCKET_LOCK(b) mtx_lock(&V_dyn_bucket_lock[(b)]) 298 #define DYN_BUCKET_UNLOCK(b) mtx_unlock(&V_dyn_bucket_lock[(b)]) 299 #define DYN_BUCKET_ASSERT(b) mtx_assert(&V_dyn_bucket_lock[(b)], MA_OWNED) 300 301 #define DYN_EXPIRED_LOCK_INIT() \ 302 mtx_init(&V_dyn_expire_lock, "IPFW expired states list", NULL, MTX_DEF) 303 #define DYN_EXPIRED_LOCK_DESTROY() mtx_destroy(&V_dyn_expire_lock) 304 #define DYN_EXPIRED_LOCK() mtx_lock(&V_dyn_expire_lock) 305 #define DYN_EXPIRED_UNLOCK() mtx_unlock(&V_dyn_expire_lock) 306 307 VNET_DEFINE_STATIC(uint32_t, dyn_buckets_max); 308 VNET_DEFINE_STATIC(uint32_t, curr_dyn_buckets); 309 VNET_DEFINE_STATIC(struct callout, dyn_timeout); 310 #define V_dyn_buckets_max VNET(dyn_buckets_max) 311 #define V_curr_dyn_buckets VNET(curr_dyn_buckets) 312 #define V_dyn_timeout VNET(dyn_timeout) 313 314 /* Maximum length of states chain in a bucket */ 315 VNET_DEFINE_STATIC(uint32_t, curr_max_length); 316 #define V_curr_max_length VNET(curr_max_length) 317 318 VNET_DEFINE_STATIC(uint32_t, dyn_keep_states); 319 #define V_dyn_keep_states VNET(dyn_keep_states) 320 321 VNET_DEFINE_STATIC(uma_zone_t, dyn_data_zone); 322 VNET_DEFINE_STATIC(uma_zone_t, dyn_parent_zone); 323 VNET_DEFINE_STATIC(uma_zone_t, dyn_ipv4_zone); 324 #ifdef INET6 325 VNET_DEFINE_STATIC(uma_zone_t, dyn_ipv6_zone); 326 #define V_dyn_ipv6_zone VNET(dyn_ipv6_zone) 327 #endif /* INET6 */ 328 #define V_dyn_data_zone VNET(dyn_data_zone) 329 #define V_dyn_parent_zone VNET(dyn_parent_zone) 330 #define V_dyn_ipv4_zone VNET(dyn_ipv4_zone) 331 332 /* 333 * Timeouts for various events in handing dynamic rules. 334 */ 335 VNET_DEFINE_STATIC(uint32_t, dyn_ack_lifetime); 336 VNET_DEFINE_STATIC(uint32_t, dyn_syn_lifetime); 337 VNET_DEFINE_STATIC(uint32_t, dyn_fin_lifetime); 338 VNET_DEFINE_STATIC(uint32_t, dyn_rst_lifetime); 339 VNET_DEFINE_STATIC(uint32_t, dyn_udp_lifetime); 340 VNET_DEFINE_STATIC(uint32_t, dyn_short_lifetime); 341 342 #define V_dyn_ack_lifetime VNET(dyn_ack_lifetime) 343 #define V_dyn_syn_lifetime VNET(dyn_syn_lifetime) 344 #define V_dyn_fin_lifetime VNET(dyn_fin_lifetime) 345 #define V_dyn_rst_lifetime VNET(dyn_rst_lifetime) 346 #define V_dyn_udp_lifetime VNET(dyn_udp_lifetime) 347 #define V_dyn_short_lifetime VNET(dyn_short_lifetime) 348 349 /* 350 * Keepalives are sent if dyn_keepalive is set. They are sent every 351 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval 352 * seconds of lifetime of a rule. 353 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower 354 * than dyn_keepalive_period. 355 */ 356 VNET_DEFINE_STATIC(uint32_t, dyn_keepalive_interval); 357 VNET_DEFINE_STATIC(uint32_t, dyn_keepalive_period); 358 VNET_DEFINE_STATIC(uint32_t, dyn_keepalive); 359 VNET_DEFINE_STATIC(time_t, dyn_keepalive_last); 360 361 #define V_dyn_keepalive_interval VNET(dyn_keepalive_interval) 362 #define V_dyn_keepalive_period VNET(dyn_keepalive_period) 363 #define V_dyn_keepalive VNET(dyn_keepalive) 364 #define V_dyn_keepalive_last VNET(dyn_keepalive_last) 365 366 VNET_DEFINE_STATIC(uint32_t, dyn_max); /* max # of dynamic states */ 367 VNET_DEFINE_STATIC(uint32_t, dyn_count); /* number of states */ 368 VNET_DEFINE_STATIC(uint32_t, dyn_parent_max); /* max # of parent states */ 369 VNET_DEFINE_STATIC(uint32_t, dyn_parent_count); /* number of parent states */ 370 371 #define V_dyn_max VNET(dyn_max) 372 #define V_dyn_count VNET(dyn_count) 373 #define V_dyn_parent_max VNET(dyn_parent_max) 374 #define V_dyn_parent_count VNET(dyn_parent_count) 375 376 #define DYN_COUNT_DEC(name) do { \ 377 MPASS((V_ ## name) > 0); \ 378 ck_pr_dec_32(&(V_ ## name)); \ 379 } while (0) 380 #define DYN_COUNT_INC(name) ck_pr_inc_32(&(V_ ## name)) 381 #define DYN_COUNT(name) ck_pr_load_32(&(V_ ## name)) 382 383 static time_t last_log; /* Log ratelimiting */ 384 385 /* 386 * Get/set maximum number of dynamic states in given VNET instance. 387 */ 388 static int 389 sysctl_dyn_max(SYSCTL_HANDLER_ARGS) 390 { 391 uint32_t nstates; 392 int error; 393 394 nstates = V_dyn_max; 395 error = sysctl_handle_32(oidp, &nstates, 0, req); 396 /* Read operation or some error */ 397 if ((error != 0) || (req->newptr == NULL)) 398 return (error); 399 400 V_dyn_max = nstates; 401 uma_zone_set_max(V_dyn_data_zone, V_dyn_max); 402 return (0); 403 } 404 405 static int 406 sysctl_dyn_parent_max(SYSCTL_HANDLER_ARGS) 407 { 408 uint32_t nstates; 409 int error; 410 411 nstates = V_dyn_parent_max; 412 error = sysctl_handle_32(oidp, &nstates, 0, req); 413 /* Read operation or some error */ 414 if ((error != 0) || (req->newptr == NULL)) 415 return (error); 416 417 V_dyn_parent_max = nstates; 418 uma_zone_set_max(V_dyn_parent_zone, V_dyn_parent_max); 419 return (0); 420 } 421 422 static int 423 sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS) 424 { 425 uint32_t nbuckets; 426 int error; 427 428 nbuckets = V_dyn_buckets_max; 429 error = sysctl_handle_32(oidp, &nbuckets, 0, req); 430 /* Read operation or some error */ 431 if ((error != 0) || (req->newptr == NULL)) 432 return (error); 433 434 if (nbuckets > 256) 435 V_dyn_buckets_max = 1 << fls(nbuckets - 1); 436 else 437 return (EINVAL); 438 return (0); 439 } 440 441 SYSCTL_DECL(_net_inet_ip_fw); 442 443 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_count, 444 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(dyn_count), 0, 445 "Current number of dynamic states."); 446 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_parent_count, 447 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(dyn_parent_count), 0, 448 "Current number of parent states. "); 449 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, 450 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0, 451 "Current number of buckets for states hash table."); 452 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, curr_max_length, 453 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_max_length), 0, 454 "Current maximum length of states chains in hash buckets."); 455 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, 456 CTLFLAG_VNET | CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 457 0, 0, sysctl_dyn_buckets, "IU", 458 "Max number of buckets for dynamic states hash table."); 459 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max, 460 CTLFLAG_VNET | CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 461 0, 0, sysctl_dyn_max, "IU", 462 "Max number of dynamic states."); 463 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_parent_max, 464 CTLFLAG_VNET | CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 465 0, 0, sysctl_dyn_parent_max, "IU", 466 "Max number of parent dynamic states."); 467 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, 468 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0, 469 "Lifetime of dynamic states for TCP ACK."); 470 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, 471 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0, 472 "Lifetime of dynamic states for TCP SYN."); 473 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, 474 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0, 475 "Lifetime of dynamic states for TCP FIN."); 476 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, 477 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0, 478 "Lifetime of dynamic states for TCP RST."); 479 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, 480 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0, 481 "Lifetime of dynamic states for UDP."); 482 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, 483 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0, 484 "Lifetime of dynamic states for other situations."); 485 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, 486 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0, 487 "Enable keepalives for dynamic states."); 488 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_keep_states, 489 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keep_states), 0, 490 "Do not flush dynamic states on rule deletion"); 491 492 #ifdef IPFIREWALL_DYNDEBUG 493 #define DYN_DEBUG(fmt, ...) do { \ 494 printf("%s: " fmt "\n", __func__, __VA_ARGS__); \ 495 } while (0) 496 #else 497 #define DYN_DEBUG(fmt, ...) 498 #endif /* !IPFIREWALL_DYNDEBUG */ 499 500 #ifdef INET6 501 /* Functions to work with IPv6 states */ 502 static struct dyn_ipv6_state *dyn_lookup_ipv6_state( 503 const struct ipfw_flow_id *, uint32_t, const void *, 504 struct ipfw_dyn_info *, int); 505 static int dyn_lookup_ipv6_state_locked(const struct ipfw_flow_id *, 506 uint32_t, const void *, int, uint32_t, uint16_t); 507 static struct dyn_ipv6_state *dyn_alloc_ipv6_state( 508 const struct ipfw_flow_id *, uint32_t, uint16_t, uint8_t); 509 static int dyn_add_ipv6_state(void *, uint32_t, uint16_t, 510 const struct ipfw_flow_id *, uint32_t, const void *, int, uint32_t, 511 struct ipfw_dyn_info *, uint16_t, uint16_t, uint8_t); 512 static void dyn_export_ipv6_state(const struct dyn_ipv6_state *, 513 ipfw_dyn_rule *); 514 515 static uint32_t dyn_getscopeid(const struct ip_fw_args *); 516 static void dyn_make_keepalive_ipv6(struct mbuf *, const struct in6_addr *, 517 const struct in6_addr *, uint32_t, uint32_t, uint32_t, uint16_t, 518 uint16_t); 519 static void dyn_enqueue_keepalive_ipv6(struct mbufq *, 520 const struct dyn_ipv6_state *); 521 static void dyn_send_keepalive_ipv6(struct ip_fw_chain *); 522 523 static struct dyn_ipv6_state *dyn_lookup_ipv6_parent( 524 const struct ipfw_flow_id *, uint32_t, const void *, uint32_t, uint16_t, 525 uint32_t); 526 static struct dyn_ipv6_state *dyn_lookup_ipv6_parent_locked( 527 const struct ipfw_flow_id *, uint32_t, const void *, uint32_t, uint16_t, 528 uint32_t); 529 static struct dyn_ipv6_state *dyn_add_ipv6_parent(void *, uint32_t, uint16_t, 530 const struct ipfw_flow_id *, uint32_t, uint32_t, uint32_t, uint16_t); 531 #endif /* INET6 */ 532 533 /* Functions to work with limit states */ 534 static void *dyn_get_parent_state(const struct ipfw_flow_id *, uint32_t, 535 struct ip_fw *, uint32_t, uint32_t, uint16_t); 536 static struct dyn_ipv4_state *dyn_lookup_ipv4_parent( 537 const struct ipfw_flow_id *, const void *, uint32_t, uint16_t, uint32_t); 538 static struct dyn_ipv4_state *dyn_lookup_ipv4_parent_locked( 539 const struct ipfw_flow_id *, const void *, uint32_t, uint16_t, uint32_t); 540 static struct dyn_parent *dyn_alloc_parent(void *, uint32_t, uint16_t, 541 uint32_t); 542 static struct dyn_ipv4_state *dyn_add_ipv4_parent(void *, uint32_t, uint16_t, 543 const struct ipfw_flow_id *, uint32_t, uint32_t, uint16_t); 544 545 static void dyn_tick(void *); 546 static void dyn_expire_states(struct ip_fw_chain *, ipfw_range_tlv *); 547 static void dyn_free_states(struct ip_fw_chain *); 548 static void dyn_export_parent(const struct dyn_parent *, uint16_t, uint8_t, 549 ipfw_dyn_rule *); 550 static void dyn_export_data(const struct dyn_data *, uint16_t, uint8_t, 551 uint8_t, ipfw_dyn_rule *); 552 static uint32_t dyn_update_tcp_state(struct dyn_data *, 553 const struct ipfw_flow_id *, const struct tcphdr *, int); 554 static void dyn_update_proto_state(struct dyn_data *, 555 const struct ipfw_flow_id *, const void *, int, int); 556 557 /* Functions to work with IPv4 states */ 558 struct dyn_ipv4_state *dyn_lookup_ipv4_state(const struct ipfw_flow_id *, 559 const void *, struct ipfw_dyn_info *, int); 560 static int dyn_lookup_ipv4_state_locked(const struct ipfw_flow_id *, 561 const void *, int, uint32_t, uint16_t); 562 static struct dyn_ipv4_state *dyn_alloc_ipv4_state( 563 const struct ipfw_flow_id *, uint16_t, uint8_t); 564 static int dyn_add_ipv4_state(void *, uint32_t, uint16_t, 565 const struct ipfw_flow_id *, const void *, int, uint32_t, 566 struct ipfw_dyn_info *, uint16_t, uint16_t, uint8_t); 567 static void dyn_export_ipv4_state(const struct dyn_ipv4_state *, 568 ipfw_dyn_rule *); 569 570 /* 571 * Named states support. 572 */ 573 static char *default_state_name = "default"; 574 struct dyn_state_obj { 575 struct named_object no; 576 char name[64]; 577 }; 578 579 #define DYN_STATE_OBJ(ch, cmd) \ 580 ((struct dyn_state_obj *)SRV_OBJECT(ch, (cmd)->arg1)) 581 /* 582 * Classifier callback. 583 * Return 0 if opcode contains object that should be referenced 584 * or rewritten. 585 */ 586 static int 587 dyn_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 588 { 589 590 DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1); 591 /* Don't rewrite "check-state any" */ 592 if (cmd->arg1 == 0 && 593 cmd->opcode == O_CHECK_STATE) 594 return (1); 595 596 *puidx = cmd->arg1; 597 *ptype = 0; 598 return (0); 599 } 600 601 static void 602 dyn_update(ipfw_insn *cmd, uint16_t idx) 603 { 604 605 cmd->arg1 = idx; 606 DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1); 607 } 608 609 static int 610 dyn_findbyname(struct ip_fw_chain *ch, struct tid_info *ti, 611 struct named_object **pno) 612 { 613 ipfw_obj_ntlv *ntlv; 614 const char *name; 615 616 DYN_DEBUG("uidx %d", ti->uidx); 617 if (ti->uidx != 0) { 618 if (ti->tlvs == NULL) 619 return (EINVAL); 620 /* Search ntlv in the buffer provided by user */ 621 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 622 IPFW_TLV_STATE_NAME); 623 if (ntlv == NULL) 624 return (EINVAL); 625 name = ntlv->name; 626 } else 627 name = default_state_name; 628 /* 629 * Search named object with corresponding name. 630 * Since states objects are global - ignore the set value 631 * and use zero instead. 632 */ 633 *pno = ipfw_objhash_lookup_name_type(CHAIN_TO_SRV(ch), 0, 634 IPFW_TLV_STATE_NAME, name); 635 /* 636 * We always return success here. 637 * The caller will check *pno and mark object as unresolved, 638 * then it will automatically create "default" object. 639 */ 640 return (0); 641 } 642 643 static struct named_object * 644 dyn_findbykidx(struct ip_fw_chain *ch, uint16_t idx) 645 { 646 647 DYN_DEBUG("kidx %d", idx); 648 return (ipfw_objhash_lookup_kidx(CHAIN_TO_SRV(ch), idx)); 649 } 650 651 static int 652 dyn_create(struct ip_fw_chain *ch, struct tid_info *ti, 653 uint16_t *pkidx) 654 { 655 struct namedobj_instance *ni; 656 struct dyn_state_obj *obj; 657 struct named_object *no; 658 ipfw_obj_ntlv *ntlv; 659 char *name; 660 661 DYN_DEBUG("uidx %d", ti->uidx); 662 if (ti->uidx != 0) { 663 if (ti->tlvs == NULL) 664 return (EINVAL); 665 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 666 IPFW_TLV_STATE_NAME); 667 if (ntlv == NULL) 668 return (EINVAL); 669 name = ntlv->name; 670 } else 671 name = default_state_name; 672 673 ni = CHAIN_TO_SRV(ch); 674 obj = malloc(sizeof(*obj), M_IPFW, M_WAITOK | M_ZERO); 675 obj->no.name = obj->name; 676 obj->no.etlv = IPFW_TLV_STATE_NAME; 677 strlcpy(obj->name, name, sizeof(obj->name)); 678 679 IPFW_UH_WLOCK(ch); 680 no = ipfw_objhash_lookup_name_type(ni, 0, 681 IPFW_TLV_STATE_NAME, name); 682 if (no != NULL) { 683 /* 684 * Object is already created. 685 * Just return its kidx and bump refcount. 686 */ 687 *pkidx = no->kidx; 688 no->refcnt++; 689 IPFW_UH_WUNLOCK(ch); 690 free(obj, M_IPFW); 691 DYN_DEBUG("\tfound kidx %d", *pkidx); 692 return (0); 693 } 694 if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) { 695 DYN_DEBUG("\talloc_idx failed for %s", name); 696 IPFW_UH_WUNLOCK(ch); 697 free(obj, M_IPFW); 698 return (ENOSPC); 699 } 700 ipfw_objhash_add(ni, &obj->no); 701 SRV_OBJECT(ch, obj->no.kidx) = obj; 702 obj->no.refcnt++; 703 *pkidx = obj->no.kidx; 704 IPFW_UH_WUNLOCK(ch); 705 DYN_DEBUG("\tcreated kidx %d", *pkidx); 706 return (0); 707 } 708 709 static void 710 dyn_destroy(struct ip_fw_chain *ch, struct named_object *no) 711 { 712 struct dyn_state_obj *obj; 713 714 IPFW_UH_WLOCK_ASSERT(ch); 715 716 KASSERT(no->etlv == IPFW_TLV_STATE_NAME, 717 ("%s: wrong object type %u", __func__, no->etlv)); 718 KASSERT(no->refcnt == 1, 719 ("Destroying object '%s' (type %u, idx %u) with refcnt %u", 720 no->name, no->etlv, no->kidx, no->refcnt)); 721 DYN_DEBUG("kidx %d", no->kidx); 722 obj = SRV_OBJECT(ch, no->kidx); 723 SRV_OBJECT(ch, no->kidx) = NULL; 724 ipfw_objhash_del(CHAIN_TO_SRV(ch), no); 725 ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), no->kidx); 726 727 free(obj, M_IPFW); 728 } 729 730 static struct opcode_obj_rewrite dyn_opcodes[] = { 731 { 732 O_KEEP_STATE, IPFW_TLV_STATE_NAME, 733 dyn_classify, dyn_update, 734 dyn_findbyname, dyn_findbykidx, 735 dyn_create, dyn_destroy 736 }, 737 { 738 O_CHECK_STATE, IPFW_TLV_STATE_NAME, 739 dyn_classify, dyn_update, 740 dyn_findbyname, dyn_findbykidx, 741 dyn_create, dyn_destroy 742 }, 743 { 744 O_PROBE_STATE, IPFW_TLV_STATE_NAME, 745 dyn_classify, dyn_update, 746 dyn_findbyname, dyn_findbykidx, 747 dyn_create, dyn_destroy 748 }, 749 { 750 O_LIMIT, IPFW_TLV_STATE_NAME, 751 dyn_classify, dyn_update, 752 dyn_findbyname, dyn_findbykidx, 753 dyn_create, dyn_destroy 754 }, 755 }; 756 757 /* 758 * IMPORTANT: the hash function for dynamic rules must be commutative 759 * in source and destination (ip,port), because rules are bidirectional 760 * and we want to find both in the same bucket. 761 */ 762 #ifndef IPFIREWALL_JENKINSHASH 763 static __inline uint32_t 764 hash_packet(const struct ipfw_flow_id *id) 765 { 766 uint32_t i; 767 768 #ifdef INET6 769 if (IS_IP6_FLOW_ID(id)) 770 i = ntohl((id->dst_ip6.__u6_addr.__u6_addr32[2]) ^ 771 (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^ 772 (id->src_ip6.__u6_addr.__u6_addr32[2]) ^ 773 (id->src_ip6.__u6_addr.__u6_addr32[3])); 774 else 775 #endif /* INET6 */ 776 i = (id->dst_ip) ^ (id->src_ip); 777 i ^= (id->dst_port) ^ (id->src_port); 778 return (i); 779 } 780 781 static __inline uint32_t 782 hash_parent(const struct ipfw_flow_id *id, const void *rule) 783 { 784 785 return (hash_packet(id) ^ ((uintptr_t)rule)); 786 } 787 788 #else /* IPFIREWALL_JENKINSHASH */ 789 790 VNET_DEFINE_STATIC(uint32_t, dyn_hashseed); 791 #define V_dyn_hashseed VNET(dyn_hashseed) 792 793 static __inline int 794 addrcmp4(const struct ipfw_flow_id *id) 795 { 796 797 if (id->src_ip < id->dst_ip) 798 return (0); 799 if (id->src_ip > id->dst_ip) 800 return (1); 801 if (id->src_port <= id->dst_port) 802 return (0); 803 return (1); 804 } 805 806 #ifdef INET6 807 static __inline int 808 addrcmp6(const struct ipfw_flow_id *id) 809 { 810 int ret; 811 812 ret = memcmp(&id->src_ip6, &id->dst_ip6, sizeof(struct in6_addr)); 813 if (ret < 0) 814 return (0); 815 if (ret > 0) 816 return (1); 817 if (id->src_port <= id->dst_port) 818 return (0); 819 return (1); 820 } 821 822 static __inline uint32_t 823 hash_packet6(const struct ipfw_flow_id *id) 824 { 825 struct tuple6 { 826 struct in6_addr addr[2]; 827 uint16_t port[2]; 828 } t6; 829 830 if (addrcmp6(id) == 0) { 831 t6.addr[0] = id->src_ip6; 832 t6.addr[1] = id->dst_ip6; 833 t6.port[0] = id->src_port; 834 t6.port[1] = id->dst_port; 835 } else { 836 t6.addr[0] = id->dst_ip6; 837 t6.addr[1] = id->src_ip6; 838 t6.port[0] = id->dst_port; 839 t6.port[1] = id->src_port; 840 } 841 return (jenkins_hash32((const uint32_t *)&t6, 842 sizeof(t6) / sizeof(uint32_t), V_dyn_hashseed)); 843 } 844 #endif 845 846 static __inline uint32_t 847 hash_packet(const struct ipfw_flow_id *id) 848 { 849 struct tuple4 { 850 in_addr_t addr[2]; 851 uint16_t port[2]; 852 } t4; 853 854 if (IS_IP4_FLOW_ID(id)) { 855 /* All fields are in host byte order */ 856 if (addrcmp4(id) == 0) { 857 t4.addr[0] = id->src_ip; 858 t4.addr[1] = id->dst_ip; 859 t4.port[0] = id->src_port; 860 t4.port[1] = id->dst_port; 861 } else { 862 t4.addr[0] = id->dst_ip; 863 t4.addr[1] = id->src_ip; 864 t4.port[0] = id->dst_port; 865 t4.port[1] = id->src_port; 866 } 867 return (jenkins_hash32((const uint32_t *)&t4, 868 sizeof(t4) / sizeof(uint32_t), V_dyn_hashseed)); 869 } else 870 #ifdef INET6 871 if (IS_IP6_FLOW_ID(id)) 872 return (hash_packet6(id)); 873 #endif 874 return (0); 875 } 876 877 static __inline uint32_t 878 hash_parent(const struct ipfw_flow_id *id, const void *rule) 879 { 880 881 return (jenkins_hash32((const uint32_t *)&rule, 882 sizeof(rule) / sizeof(uint32_t), hash_packet(id))); 883 } 884 #endif /* IPFIREWALL_JENKINSHASH */ 885 886 /* 887 * Print customizable flow id description via log(9) facility. 888 */ 889 static void 890 print_dyn_rule_flags(const struct ipfw_flow_id *id, int dyn_type, 891 int log_flags, char *prefix, char *postfix) 892 { 893 struct in_addr da; 894 #ifdef INET6 895 char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN]; 896 #else 897 char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN]; 898 #endif 899 900 #ifdef INET6 901 if (IS_IP6_FLOW_ID(id)) { 902 ip6_sprintf(src, &id->src_ip6); 903 ip6_sprintf(dst, &id->dst_ip6); 904 } else 905 #endif 906 { 907 da.s_addr = htonl(id->src_ip); 908 inet_ntop(AF_INET, &da, src, sizeof(src)); 909 da.s_addr = htonl(id->dst_ip); 910 inet_ntop(AF_INET, &da, dst, sizeof(dst)); 911 } 912 log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n", 913 prefix, dyn_type, src, id->src_port, dst, 914 id->dst_port, V_dyn_count, postfix); 915 } 916 917 #define print_dyn_rule(id, dtype, prefix, postfix) \ 918 print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix) 919 920 #define TIME_LEQ(a,b) ((int)((a)-(b)) <= 0) 921 #define TIME_LE(a,b) ((int)((a)-(b)) < 0) 922 #define _SEQ_GE(a,b) ((int)((a)-(b)) >= 0) 923 #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) 924 #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) 925 #define TCP_FLAGS (TH_FLAGS | (TH_FLAGS << 8)) 926 #define ACK_FWD 0x00010000 /* fwd ack seen */ 927 #define ACK_REV 0x00020000 /* rev ack seen */ 928 #define ACK_BOTH (ACK_FWD | ACK_REV) 929 930 static uint32_t 931 dyn_update_tcp_state(struct dyn_data *data, const struct ipfw_flow_id *pkt, 932 const struct tcphdr *tcp, int dir) 933 { 934 uint32_t ack, expire; 935 uint32_t state, old; 936 uint8_t th_flags; 937 938 expire = data->expire; 939 old = state = data->state; 940 th_flags = pkt->_flags & (TH_FIN | TH_SYN | TH_RST); 941 state |= (dir == MATCH_FORWARD) ? th_flags: (th_flags << 8); 942 switch (state & TCP_FLAGS) { 943 case TH_SYN: /* opening */ 944 expire = time_uptime + V_dyn_syn_lifetime; 945 break; 946 947 case BOTH_SYN: /* move to established */ 948 case BOTH_SYN | TH_FIN: /* one side tries to close */ 949 case BOTH_SYN | (TH_FIN << 8): 950 if (tcp == NULL) 951 break; 952 ack = ntohl(tcp->th_ack); 953 if (dir == MATCH_FORWARD) { 954 if (data->ack_fwd == 0 || 955 _SEQ_GE(ack, data->ack_fwd)) { 956 state |= ACK_FWD; 957 if (data->ack_fwd != ack) 958 ck_pr_store_32(&data->ack_fwd, ack); 959 } 960 } else { 961 if (data->ack_rev == 0 || 962 _SEQ_GE(ack, data->ack_rev)) { 963 state |= ACK_REV; 964 if (data->ack_rev != ack) 965 ck_pr_store_32(&data->ack_rev, ack); 966 } 967 } 968 if ((state & ACK_BOTH) == ACK_BOTH) { 969 /* 970 * Set expire time to V_dyn_ack_lifetime only if 971 * we got ACKs for both directions. 972 * We use XOR here to avoid possible state 973 * overwriting in concurrent thread. 974 */ 975 expire = time_uptime + V_dyn_ack_lifetime; 976 ck_pr_xor_32(&data->state, ACK_BOTH); 977 } else if ((data->state & ACK_BOTH) != (state & ACK_BOTH)) 978 ck_pr_or_32(&data->state, state & ACK_BOTH); 979 break; 980 981 case BOTH_SYN | BOTH_FIN: /* both sides closed */ 982 if (V_dyn_fin_lifetime >= V_dyn_keepalive_period) 983 V_dyn_fin_lifetime = V_dyn_keepalive_period - 1; 984 expire = time_uptime + V_dyn_fin_lifetime; 985 break; 986 987 default: 988 if (V_dyn_keepalive != 0 && 989 V_dyn_rst_lifetime >= V_dyn_keepalive_period) 990 V_dyn_rst_lifetime = V_dyn_keepalive_period - 1; 991 expire = time_uptime + V_dyn_rst_lifetime; 992 } 993 /* Save TCP state if it was changed */ 994 if ((state & TCP_FLAGS) != (old & TCP_FLAGS)) 995 ck_pr_or_32(&data->state, state & TCP_FLAGS); 996 return (expire); 997 } 998 999 /* 1000 * Update ULP specific state. 1001 * For TCP we keep sequence numbers and flags. For other protocols 1002 * currently we update only expire time. Packets and bytes counters 1003 * are also updated here. 1004 */ 1005 static void 1006 dyn_update_proto_state(struct dyn_data *data, const struct ipfw_flow_id *pkt, 1007 const void *ulp, int pktlen, int dir) 1008 { 1009 uint32_t expire; 1010 1011 /* NOTE: we are in critical section here. */ 1012 switch (pkt->proto) { 1013 case IPPROTO_UDP: 1014 case IPPROTO_UDPLITE: 1015 expire = time_uptime + V_dyn_udp_lifetime; 1016 break; 1017 case IPPROTO_TCP: 1018 expire = dyn_update_tcp_state(data, pkt, ulp, dir); 1019 break; 1020 default: 1021 expire = time_uptime + V_dyn_short_lifetime; 1022 } 1023 /* 1024 * Expiration timer has the per-second granularity, no need to update 1025 * it every time when state is matched. 1026 */ 1027 if (data->expire != expire) 1028 ck_pr_store_32(&data->expire, expire); 1029 1030 if (dir == MATCH_FORWARD) 1031 DYN_COUNTER_INC(data, fwd, pktlen); 1032 else 1033 DYN_COUNTER_INC(data, rev, pktlen); 1034 } 1035 1036 /* 1037 * Lookup IPv4 state. 1038 * Must be called in critical section. 1039 */ 1040 struct dyn_ipv4_state * 1041 dyn_lookup_ipv4_state(const struct ipfw_flow_id *pkt, const void *ulp, 1042 struct ipfw_dyn_info *info, int pktlen) 1043 { 1044 struct dyn_ipv4_state *s; 1045 uint32_t version, bucket; 1046 1047 bucket = DYN_BUCKET(info->hashval, V_curr_dyn_buckets); 1048 info->version = DYN_BUCKET_VERSION(bucket, ipv4_add); 1049 restart: 1050 version = DYN_BUCKET_VERSION(bucket, ipv4_del); 1051 CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) { 1052 DYNSTATE_PROTECT(s); 1053 if (version != DYN_BUCKET_VERSION(bucket, ipv4_del)) 1054 goto restart; 1055 if (s->proto != pkt->proto) 1056 continue; 1057 if (info->kidx != 0 && s->kidx != info->kidx) 1058 continue; 1059 if (s->sport == pkt->src_port && s->dport == pkt->dst_port && 1060 s->src == pkt->src_ip && s->dst == pkt->dst_ip) { 1061 info->direction = MATCH_FORWARD; 1062 break; 1063 } 1064 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1065 s->src == pkt->dst_ip && s->dst == pkt->src_ip) { 1066 info->direction = MATCH_REVERSE; 1067 break; 1068 } 1069 } 1070 1071 if (s != NULL) 1072 dyn_update_proto_state(s->data, pkt, ulp, pktlen, 1073 info->direction); 1074 return (s); 1075 } 1076 1077 /* 1078 * Lookup IPv4 state. 1079 * Simplifed version is used to check that matching state doesn't exist. 1080 */ 1081 static int 1082 dyn_lookup_ipv4_state_locked(const struct ipfw_flow_id *pkt, 1083 const void *ulp, int pktlen, uint32_t bucket, uint16_t kidx) 1084 { 1085 struct dyn_ipv4_state *s; 1086 int dir; 1087 1088 dir = MATCH_NONE; 1089 DYN_BUCKET_ASSERT(bucket); 1090 CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) { 1091 if (s->proto != pkt->proto || 1092 s->kidx != kidx) 1093 continue; 1094 if (s->sport == pkt->src_port && 1095 s->dport == pkt->dst_port && 1096 s->src == pkt->src_ip && s->dst == pkt->dst_ip) { 1097 dir = MATCH_FORWARD; 1098 break; 1099 } 1100 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1101 s->src == pkt->dst_ip && s->dst == pkt->src_ip) { 1102 dir = MATCH_REVERSE; 1103 break; 1104 } 1105 } 1106 if (s != NULL) 1107 dyn_update_proto_state(s->data, pkt, ulp, pktlen, dir); 1108 return (s != NULL); 1109 } 1110 1111 struct dyn_ipv4_state * 1112 dyn_lookup_ipv4_parent(const struct ipfw_flow_id *pkt, const void *rule, 1113 uint32_t ruleid, uint16_t rulenum, uint32_t hashval) 1114 { 1115 struct dyn_ipv4_state *s; 1116 uint32_t version, bucket; 1117 1118 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1119 restart: 1120 version = DYN_BUCKET_VERSION(bucket, ipv4_parent_del); 1121 CK_SLIST_FOREACH(s, &V_dyn_ipv4_parent[bucket], entry) { 1122 DYNSTATE_PROTECT(s); 1123 if (version != DYN_BUCKET_VERSION(bucket, ipv4_parent_del)) 1124 goto restart; 1125 /* 1126 * NOTE: we do not need to check kidx, because parent rule 1127 * can not create states with different kidx. 1128 * And parent rule always created for forward direction. 1129 */ 1130 if (s->limit->parent == rule && 1131 s->limit->ruleid == ruleid && 1132 s->limit->rulenum == rulenum && 1133 s->proto == pkt->proto && 1134 s->sport == pkt->src_port && 1135 s->dport == pkt->dst_port && 1136 s->src == pkt->src_ip && s->dst == pkt->dst_ip) { 1137 if (s->limit->expire != time_uptime + 1138 V_dyn_short_lifetime) 1139 ck_pr_store_32(&s->limit->expire, 1140 time_uptime + V_dyn_short_lifetime); 1141 break; 1142 } 1143 } 1144 return (s); 1145 } 1146 1147 static struct dyn_ipv4_state * 1148 dyn_lookup_ipv4_parent_locked(const struct ipfw_flow_id *pkt, 1149 const void *rule, uint32_t ruleid, uint16_t rulenum, uint32_t bucket) 1150 { 1151 struct dyn_ipv4_state *s; 1152 1153 DYN_BUCKET_ASSERT(bucket); 1154 CK_SLIST_FOREACH(s, &V_dyn_ipv4_parent[bucket], entry) { 1155 if (s->limit->parent == rule && 1156 s->limit->ruleid == ruleid && 1157 s->limit->rulenum == rulenum && 1158 s->proto == pkt->proto && 1159 s->sport == pkt->src_port && 1160 s->dport == pkt->dst_port && 1161 s->src == pkt->src_ip && s->dst == pkt->dst_ip) 1162 break; 1163 } 1164 return (s); 1165 } 1166 1167 #ifdef INET6 1168 static uint32_t 1169 dyn_getscopeid(const struct ip_fw_args *args) 1170 { 1171 1172 /* 1173 * If source or destination address is an scopeid address, we need 1174 * determine the scope zone id to resolve address scope ambiguity. 1175 */ 1176 if (IN6_IS_ADDR_LINKLOCAL(&args->f_id.src_ip6) || 1177 IN6_IS_ADDR_LINKLOCAL(&args->f_id.dst_ip6)) 1178 return (in6_getscopezone(args->ifp, IPV6_ADDR_SCOPE_LINKLOCAL)); 1179 1180 return (0); 1181 } 1182 1183 /* 1184 * Lookup IPv6 state. 1185 * Must be called in critical section. 1186 */ 1187 static struct dyn_ipv6_state * 1188 dyn_lookup_ipv6_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1189 const void *ulp, struct ipfw_dyn_info *info, int pktlen) 1190 { 1191 struct dyn_ipv6_state *s; 1192 uint32_t version, bucket; 1193 1194 bucket = DYN_BUCKET(info->hashval, V_curr_dyn_buckets); 1195 info->version = DYN_BUCKET_VERSION(bucket, ipv6_add); 1196 restart: 1197 version = DYN_BUCKET_VERSION(bucket, ipv6_del); 1198 CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) { 1199 DYNSTATE_PROTECT(s); 1200 if (version != DYN_BUCKET_VERSION(bucket, ipv6_del)) 1201 goto restart; 1202 if (s->proto != pkt->proto || s->zoneid != zoneid) 1203 continue; 1204 if (info->kidx != 0 && s->kidx != info->kidx) 1205 continue; 1206 if (s->sport == pkt->src_port && s->dport == pkt->dst_port && 1207 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1208 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) { 1209 info->direction = MATCH_FORWARD; 1210 break; 1211 } 1212 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1213 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->dst_ip6) && 1214 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->src_ip6)) { 1215 info->direction = MATCH_REVERSE; 1216 break; 1217 } 1218 } 1219 if (s != NULL) 1220 dyn_update_proto_state(s->data, pkt, ulp, pktlen, 1221 info->direction); 1222 return (s); 1223 } 1224 1225 /* 1226 * Lookup IPv6 state. 1227 * Simplifed version is used to check that matching state doesn't exist. 1228 */ 1229 static int 1230 dyn_lookup_ipv6_state_locked(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1231 const void *ulp, int pktlen, uint32_t bucket, uint16_t kidx) 1232 { 1233 struct dyn_ipv6_state *s; 1234 int dir; 1235 1236 dir = MATCH_NONE; 1237 DYN_BUCKET_ASSERT(bucket); 1238 CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) { 1239 if (s->proto != pkt->proto || s->kidx != kidx || 1240 s->zoneid != zoneid) 1241 continue; 1242 if (s->sport == pkt->src_port && s->dport == pkt->dst_port && 1243 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1244 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) { 1245 dir = MATCH_FORWARD; 1246 break; 1247 } 1248 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1249 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->dst_ip6) && 1250 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->src_ip6)) { 1251 dir = MATCH_REVERSE; 1252 break; 1253 } 1254 } 1255 if (s != NULL) 1256 dyn_update_proto_state(s->data, pkt, ulp, pktlen, dir); 1257 return (s != NULL); 1258 } 1259 1260 static struct dyn_ipv6_state * 1261 dyn_lookup_ipv6_parent(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1262 const void *rule, uint32_t ruleid, uint16_t rulenum, uint32_t hashval) 1263 { 1264 struct dyn_ipv6_state *s; 1265 uint32_t version, bucket; 1266 1267 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1268 restart: 1269 version = DYN_BUCKET_VERSION(bucket, ipv6_parent_del); 1270 CK_SLIST_FOREACH(s, &V_dyn_ipv6_parent[bucket], entry) { 1271 DYNSTATE_PROTECT(s); 1272 if (version != DYN_BUCKET_VERSION(bucket, ipv6_parent_del)) 1273 goto restart; 1274 /* 1275 * NOTE: we do not need to check kidx, because parent rule 1276 * can not create states with different kidx. 1277 * Also parent rule always created for forward direction. 1278 */ 1279 if (s->limit->parent == rule && 1280 s->limit->ruleid == ruleid && 1281 s->limit->rulenum == rulenum && 1282 s->proto == pkt->proto && 1283 s->sport == pkt->src_port && 1284 s->dport == pkt->dst_port && s->zoneid == zoneid && 1285 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1286 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) { 1287 if (s->limit->expire != time_uptime + 1288 V_dyn_short_lifetime) 1289 ck_pr_store_32(&s->limit->expire, 1290 time_uptime + V_dyn_short_lifetime); 1291 break; 1292 } 1293 } 1294 return (s); 1295 } 1296 1297 static struct dyn_ipv6_state * 1298 dyn_lookup_ipv6_parent_locked(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1299 const void *rule, uint32_t ruleid, uint16_t rulenum, uint32_t bucket) 1300 { 1301 struct dyn_ipv6_state *s; 1302 1303 DYN_BUCKET_ASSERT(bucket); 1304 CK_SLIST_FOREACH(s, &V_dyn_ipv6_parent[bucket], entry) { 1305 if (s->limit->parent == rule && 1306 s->limit->ruleid == ruleid && 1307 s->limit->rulenum == rulenum && 1308 s->proto == pkt->proto && 1309 s->sport == pkt->src_port && 1310 s->dport == pkt->dst_port && s->zoneid == zoneid && 1311 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1312 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) 1313 break; 1314 } 1315 return (s); 1316 } 1317 1318 #endif /* INET6 */ 1319 1320 /* 1321 * Lookup dynamic state. 1322 * pkt - filled by ipfw_chk() ipfw_flow_id; 1323 * ulp - determined by ipfw_chk() upper level protocol header; 1324 * dyn_info - info about matched state to return back; 1325 * Returns pointer to state's parent rule and dyn_info. If there is 1326 * no state, NULL is returned. 1327 * On match ipfw_dyn_lookup() updates state's counters. 1328 */ 1329 struct ip_fw * 1330 ipfw_dyn_lookup_state(const struct ip_fw_args *args, const void *ulp, 1331 int pktlen, const ipfw_insn *cmd, struct ipfw_dyn_info *info) 1332 { 1333 struct dyn_data *data; 1334 struct ip_fw *rule; 1335 1336 IPFW_RLOCK_ASSERT(&V_layer3_chain); 1337 1338 data = NULL; 1339 rule = NULL; 1340 info->kidx = cmd->arg1; 1341 info->direction = MATCH_NONE; 1342 info->hashval = hash_packet(&args->f_id); 1343 1344 DYNSTATE_CRITICAL_ENTER(); 1345 if (IS_IP4_FLOW_ID(&args->f_id)) { 1346 struct dyn_ipv4_state *s; 1347 1348 s = dyn_lookup_ipv4_state(&args->f_id, ulp, info, pktlen); 1349 if (s != NULL) { 1350 /* 1351 * Dynamic states are created using the same 5-tuple, 1352 * so it is assumed, that parent rule for O_LIMIT 1353 * state has the same address family. 1354 */ 1355 data = s->data; 1356 if (s->type == O_LIMIT) { 1357 s = data->parent; 1358 rule = s->limit->parent; 1359 } else 1360 rule = data->parent; 1361 } 1362 } 1363 #ifdef INET6 1364 else if (IS_IP6_FLOW_ID(&args->f_id)) { 1365 struct dyn_ipv6_state *s; 1366 1367 s = dyn_lookup_ipv6_state(&args->f_id, dyn_getscopeid(args), 1368 ulp, info, pktlen); 1369 if (s != NULL) { 1370 data = s->data; 1371 if (s->type == O_LIMIT) { 1372 s = data->parent; 1373 rule = s->limit->parent; 1374 } else 1375 rule = data->parent; 1376 } 1377 } 1378 #endif 1379 if (data != NULL) { 1380 /* 1381 * If cached chain id is the same, we can avoid rule index 1382 * lookup. Otherwise do lookup and update chain_id and f_pos. 1383 * It is safe even if there is concurrent thread that want 1384 * update the same state, because chain->id can be changed 1385 * only under IPFW_WLOCK(). 1386 */ 1387 if (data->chain_id != V_layer3_chain.id) { 1388 data->f_pos = ipfw_find_rule(&V_layer3_chain, 1389 data->rulenum, data->ruleid); 1390 /* 1391 * Check that found state has not orphaned. 1392 * When chain->id being changed the parent 1393 * rule can be deleted. If found rule doesn't 1394 * match the parent pointer, consider this 1395 * result as MATCH_NONE and return NULL. 1396 * 1397 * This will lead to creation of new similar state 1398 * that will be added into head of this bucket. 1399 * And the state that we currently have matched 1400 * should be deleted by dyn_expire_states(). 1401 * 1402 * In case when dyn_keep_states is enabled, return 1403 * pointer to deleted rule and f_pos value 1404 * corresponding to penultimate rule. 1405 * When we have enabled V_dyn_keep_states, states 1406 * that become orphaned will get the DYN_REFERENCED 1407 * flag and rule will keep around. So we can return 1408 * it. But since it is not in the rules map, we need 1409 * return such f_pos value, so after the state 1410 * handling if the search will continue, the next rule 1411 * will be the last one - the default rule. 1412 */ 1413 if (V_layer3_chain.map[data->f_pos] == rule) { 1414 data->chain_id = V_layer3_chain.id; 1415 info->f_pos = data->f_pos; 1416 } else if (V_dyn_keep_states != 0) { 1417 /* 1418 * The original rule pointer is still usable. 1419 * So, we return it, but f_pos need to be 1420 * changed to point to the penultimate rule. 1421 */ 1422 MPASS(V_layer3_chain.n_rules > 1); 1423 data->chain_id = V_layer3_chain.id; 1424 data->f_pos = V_layer3_chain.n_rules - 2; 1425 info->f_pos = data->f_pos; 1426 } else { 1427 rule = NULL; 1428 info->direction = MATCH_NONE; 1429 DYN_DEBUG("rule %p [%u, %u] is considered " 1430 "invalid in data %p", rule, data->ruleid, 1431 data->rulenum, data); 1432 /* info->f_pos doesn't matter here. */ 1433 } 1434 } else 1435 info->f_pos = data->f_pos; 1436 } 1437 DYNSTATE_CRITICAL_EXIT(); 1438 #if 0 1439 /* 1440 * Return MATCH_NONE if parent rule is in disabled set. 1441 * This will lead to creation of new similar state that 1442 * will be added into head of this bucket. 1443 * 1444 * XXXAE: we need to be able update state's set when parent 1445 * rule set is changed. 1446 */ 1447 if (rule != NULL && (V_set_disable & (1 << rule->set))) { 1448 rule = NULL; 1449 info->direction = MATCH_NONE; 1450 } 1451 #endif 1452 return (rule); 1453 } 1454 1455 static struct dyn_parent * 1456 dyn_alloc_parent(void *parent, uint32_t ruleid, uint16_t rulenum, 1457 uint32_t hashval) 1458 { 1459 struct dyn_parent *limit; 1460 1461 limit = uma_zalloc(V_dyn_parent_zone, M_NOWAIT | M_ZERO); 1462 if (limit == NULL) { 1463 if (last_log != time_uptime) { 1464 last_log = time_uptime; 1465 log(LOG_DEBUG, 1466 "ipfw: Cannot allocate parent dynamic state, " 1467 "consider increasing " 1468 "net.inet.ip.fw.dyn_parent_max\n"); 1469 } 1470 return (NULL); 1471 } 1472 1473 limit->parent = parent; 1474 limit->ruleid = ruleid; 1475 limit->rulenum = rulenum; 1476 limit->hashval = hashval; 1477 limit->expire = time_uptime + V_dyn_short_lifetime; 1478 return (limit); 1479 } 1480 1481 static struct dyn_data * 1482 dyn_alloc_dyndata(void *parent, uint32_t ruleid, uint16_t rulenum, 1483 const struct ipfw_flow_id *pkt, const void *ulp, int pktlen, 1484 uint32_t hashval, uint16_t fibnum) 1485 { 1486 struct dyn_data *data; 1487 1488 data = uma_zalloc(V_dyn_data_zone, M_NOWAIT | M_ZERO); 1489 if (data == NULL) { 1490 if (last_log != time_uptime) { 1491 last_log = time_uptime; 1492 log(LOG_DEBUG, 1493 "ipfw: Cannot allocate dynamic state, " 1494 "consider increasing net.inet.ip.fw.dyn_max\n"); 1495 } 1496 return (NULL); 1497 } 1498 1499 data->parent = parent; 1500 data->ruleid = ruleid; 1501 data->rulenum = rulenum; 1502 data->fibnum = fibnum; 1503 data->hashval = hashval; 1504 data->expire = time_uptime + V_dyn_syn_lifetime; 1505 dyn_update_proto_state(data, pkt, ulp, pktlen, MATCH_FORWARD); 1506 return (data); 1507 } 1508 1509 static struct dyn_ipv4_state * 1510 dyn_alloc_ipv4_state(const struct ipfw_flow_id *pkt, uint16_t kidx, 1511 uint8_t type) 1512 { 1513 struct dyn_ipv4_state *s; 1514 1515 s = uma_zalloc(V_dyn_ipv4_zone, M_NOWAIT | M_ZERO); 1516 if (s == NULL) 1517 return (NULL); 1518 1519 s->type = type; 1520 s->kidx = kidx; 1521 s->proto = pkt->proto; 1522 s->sport = pkt->src_port; 1523 s->dport = pkt->dst_port; 1524 s->src = pkt->src_ip; 1525 s->dst = pkt->dst_ip; 1526 return (s); 1527 } 1528 1529 /* 1530 * Add IPv4 parent state. 1531 * Returns pointer to parent state. When it is not NULL we are in 1532 * critical section and pointer protected by hazard pointer. 1533 * When some error occurs, it returns NULL and exit from critical section 1534 * is not needed. 1535 */ 1536 static struct dyn_ipv4_state * 1537 dyn_add_ipv4_parent(void *rule, uint32_t ruleid, uint16_t rulenum, 1538 const struct ipfw_flow_id *pkt, uint32_t hashval, uint32_t version, 1539 uint16_t kidx) 1540 { 1541 struct dyn_ipv4_state *s; 1542 struct dyn_parent *limit; 1543 uint32_t bucket; 1544 1545 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1546 DYN_BUCKET_LOCK(bucket); 1547 if (version != DYN_BUCKET_VERSION(bucket, ipv4_parent_add)) { 1548 /* 1549 * Bucket version has been changed since last lookup, 1550 * do lookup again to be sure that state does not exist. 1551 */ 1552 s = dyn_lookup_ipv4_parent_locked(pkt, rule, ruleid, 1553 rulenum, bucket); 1554 if (s != NULL) { 1555 /* 1556 * Simultaneous thread has already created this 1557 * state. Just return it. 1558 */ 1559 DYNSTATE_CRITICAL_ENTER(); 1560 DYNSTATE_PROTECT(s); 1561 DYN_BUCKET_UNLOCK(bucket); 1562 return (s); 1563 } 1564 } 1565 1566 limit = dyn_alloc_parent(rule, ruleid, rulenum, hashval); 1567 if (limit == NULL) { 1568 DYN_BUCKET_UNLOCK(bucket); 1569 return (NULL); 1570 } 1571 1572 s = dyn_alloc_ipv4_state(pkt, kidx, O_LIMIT_PARENT); 1573 if (s == NULL) { 1574 DYN_BUCKET_UNLOCK(bucket); 1575 uma_zfree(V_dyn_parent_zone, limit); 1576 return (NULL); 1577 } 1578 1579 s->limit = limit; 1580 CK_SLIST_INSERT_HEAD(&V_dyn_ipv4_parent[bucket], s, entry); 1581 DYN_COUNT_INC(dyn_parent_count); 1582 DYN_BUCKET_VERSION_BUMP(bucket, ipv4_parent_add); 1583 DYNSTATE_CRITICAL_ENTER(); 1584 DYNSTATE_PROTECT(s); 1585 DYN_BUCKET_UNLOCK(bucket); 1586 return (s); 1587 } 1588 1589 static int 1590 dyn_add_ipv4_state(void *parent, uint32_t ruleid, uint16_t rulenum, 1591 const struct ipfw_flow_id *pkt, const void *ulp, int pktlen, 1592 uint32_t hashval, struct ipfw_dyn_info *info, uint16_t fibnum, 1593 uint16_t kidx, uint8_t type) 1594 { 1595 struct dyn_ipv4_state *s; 1596 void *data; 1597 uint32_t bucket; 1598 1599 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1600 DYN_BUCKET_LOCK(bucket); 1601 if (info->direction == MATCH_UNKNOWN || 1602 info->kidx != kidx || 1603 info->hashval != hashval || 1604 info->version != DYN_BUCKET_VERSION(bucket, ipv4_add)) { 1605 /* 1606 * Bucket version has been changed since last lookup, 1607 * do lookup again to be sure that state does not exist. 1608 */ 1609 if (dyn_lookup_ipv4_state_locked(pkt, ulp, pktlen, 1610 bucket, kidx) != 0) { 1611 DYN_BUCKET_UNLOCK(bucket); 1612 return (EEXIST); 1613 } 1614 } 1615 1616 data = dyn_alloc_dyndata(parent, ruleid, rulenum, pkt, ulp, 1617 pktlen, hashval, fibnum); 1618 if (data == NULL) { 1619 DYN_BUCKET_UNLOCK(bucket); 1620 return (ENOMEM); 1621 } 1622 1623 s = dyn_alloc_ipv4_state(pkt, kidx, type); 1624 if (s == NULL) { 1625 DYN_BUCKET_UNLOCK(bucket); 1626 uma_zfree(V_dyn_data_zone, data); 1627 return (ENOMEM); 1628 } 1629 1630 s->data = data; 1631 CK_SLIST_INSERT_HEAD(&V_dyn_ipv4[bucket], s, entry); 1632 DYN_COUNT_INC(dyn_count); 1633 DYN_BUCKET_VERSION_BUMP(bucket, ipv4_add); 1634 DYN_BUCKET_UNLOCK(bucket); 1635 return (0); 1636 } 1637 1638 #ifdef INET6 1639 static struct dyn_ipv6_state * 1640 dyn_alloc_ipv6_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1641 uint16_t kidx, uint8_t type) 1642 { 1643 struct dyn_ipv6_state *s; 1644 1645 s = uma_zalloc(V_dyn_ipv6_zone, M_NOWAIT | M_ZERO); 1646 if (s == NULL) 1647 return (NULL); 1648 1649 s->type = type; 1650 s->kidx = kidx; 1651 s->zoneid = zoneid; 1652 s->proto = pkt->proto; 1653 s->sport = pkt->src_port; 1654 s->dport = pkt->dst_port; 1655 s->src = pkt->src_ip6; 1656 s->dst = pkt->dst_ip6; 1657 return (s); 1658 } 1659 1660 /* 1661 * Add IPv6 parent state. 1662 * Returns pointer to parent state. When it is not NULL we are in 1663 * critical section and pointer protected by hazard pointer. 1664 * When some error occurs, it return NULL and exit from critical section 1665 * is not needed. 1666 */ 1667 static struct dyn_ipv6_state * 1668 dyn_add_ipv6_parent(void *rule, uint32_t ruleid, uint16_t rulenum, 1669 const struct ipfw_flow_id *pkt, uint32_t zoneid, uint32_t hashval, 1670 uint32_t version, uint16_t kidx) 1671 { 1672 struct dyn_ipv6_state *s; 1673 struct dyn_parent *limit; 1674 uint32_t bucket; 1675 1676 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1677 DYN_BUCKET_LOCK(bucket); 1678 if (version != DYN_BUCKET_VERSION(bucket, ipv6_parent_add)) { 1679 /* 1680 * Bucket version has been changed since last lookup, 1681 * do lookup again to be sure that state does not exist. 1682 */ 1683 s = dyn_lookup_ipv6_parent_locked(pkt, zoneid, rule, ruleid, 1684 rulenum, bucket); 1685 if (s != NULL) { 1686 /* 1687 * Simultaneous thread has already created this 1688 * state. Just return it. 1689 */ 1690 DYNSTATE_CRITICAL_ENTER(); 1691 DYNSTATE_PROTECT(s); 1692 DYN_BUCKET_UNLOCK(bucket); 1693 return (s); 1694 } 1695 } 1696 1697 limit = dyn_alloc_parent(rule, ruleid, rulenum, hashval); 1698 if (limit == NULL) { 1699 DYN_BUCKET_UNLOCK(bucket); 1700 return (NULL); 1701 } 1702 1703 s = dyn_alloc_ipv6_state(pkt, zoneid, kidx, O_LIMIT_PARENT); 1704 if (s == NULL) { 1705 DYN_BUCKET_UNLOCK(bucket); 1706 uma_zfree(V_dyn_parent_zone, limit); 1707 return (NULL); 1708 } 1709 1710 s->limit = limit; 1711 CK_SLIST_INSERT_HEAD(&V_dyn_ipv6_parent[bucket], s, entry); 1712 DYN_COUNT_INC(dyn_parent_count); 1713 DYN_BUCKET_VERSION_BUMP(bucket, ipv6_parent_add); 1714 DYNSTATE_CRITICAL_ENTER(); 1715 DYNSTATE_PROTECT(s); 1716 DYN_BUCKET_UNLOCK(bucket); 1717 return (s); 1718 } 1719 1720 static int 1721 dyn_add_ipv6_state(void *parent, uint32_t ruleid, uint16_t rulenum, 1722 const struct ipfw_flow_id *pkt, uint32_t zoneid, const void *ulp, 1723 int pktlen, uint32_t hashval, struct ipfw_dyn_info *info, 1724 uint16_t fibnum, uint16_t kidx, uint8_t type) 1725 { 1726 struct dyn_ipv6_state *s; 1727 struct dyn_data *data; 1728 uint32_t bucket; 1729 1730 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1731 DYN_BUCKET_LOCK(bucket); 1732 if (info->direction == MATCH_UNKNOWN || 1733 info->kidx != kidx || 1734 info->hashval != hashval || 1735 info->version != DYN_BUCKET_VERSION(bucket, ipv6_add)) { 1736 /* 1737 * Bucket version has been changed since last lookup, 1738 * do lookup again to be sure that state does not exist. 1739 */ 1740 if (dyn_lookup_ipv6_state_locked(pkt, zoneid, ulp, pktlen, 1741 bucket, kidx) != 0) { 1742 DYN_BUCKET_UNLOCK(bucket); 1743 return (EEXIST); 1744 } 1745 } 1746 1747 data = dyn_alloc_dyndata(parent, ruleid, rulenum, pkt, ulp, 1748 pktlen, hashval, fibnum); 1749 if (data == NULL) { 1750 DYN_BUCKET_UNLOCK(bucket); 1751 return (ENOMEM); 1752 } 1753 1754 s = dyn_alloc_ipv6_state(pkt, zoneid, kidx, type); 1755 if (s == NULL) { 1756 DYN_BUCKET_UNLOCK(bucket); 1757 uma_zfree(V_dyn_data_zone, data); 1758 return (ENOMEM); 1759 } 1760 1761 s->data = data; 1762 CK_SLIST_INSERT_HEAD(&V_dyn_ipv6[bucket], s, entry); 1763 DYN_COUNT_INC(dyn_count); 1764 DYN_BUCKET_VERSION_BUMP(bucket, ipv6_add); 1765 DYN_BUCKET_UNLOCK(bucket); 1766 return (0); 1767 } 1768 #endif /* INET6 */ 1769 1770 static void * 1771 dyn_get_parent_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1772 struct ip_fw *rule, uint32_t hashval, uint32_t limit, uint16_t kidx) 1773 { 1774 char sbuf[24]; 1775 struct dyn_parent *p; 1776 void *ret; 1777 uint32_t bucket, version; 1778 1779 p = NULL; 1780 ret = NULL; 1781 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1782 DYNSTATE_CRITICAL_ENTER(); 1783 if (IS_IP4_FLOW_ID(pkt)) { 1784 struct dyn_ipv4_state *s; 1785 1786 version = DYN_BUCKET_VERSION(bucket, ipv4_parent_add); 1787 s = dyn_lookup_ipv4_parent(pkt, rule, rule->id, 1788 rule->rulenum, bucket); 1789 if (s == NULL) { 1790 /* 1791 * Exit from critical section because dyn_add_parent() 1792 * will acquire bucket lock. 1793 */ 1794 DYNSTATE_CRITICAL_EXIT(); 1795 1796 s = dyn_add_ipv4_parent(rule, rule->id, 1797 rule->rulenum, pkt, hashval, version, kidx); 1798 if (s == NULL) 1799 return (NULL); 1800 /* Now we are in critical section again. */ 1801 } 1802 ret = s; 1803 p = s->limit; 1804 } 1805 #ifdef INET6 1806 else if (IS_IP6_FLOW_ID(pkt)) { 1807 struct dyn_ipv6_state *s; 1808 1809 version = DYN_BUCKET_VERSION(bucket, ipv6_parent_add); 1810 s = dyn_lookup_ipv6_parent(pkt, zoneid, rule, rule->id, 1811 rule->rulenum, bucket); 1812 if (s == NULL) { 1813 /* 1814 * Exit from critical section because dyn_add_parent() 1815 * can acquire bucket mutex. 1816 */ 1817 DYNSTATE_CRITICAL_EXIT(); 1818 1819 s = dyn_add_ipv6_parent(rule, rule->id, 1820 rule->rulenum, pkt, zoneid, hashval, version, 1821 kidx); 1822 if (s == NULL) 1823 return (NULL); 1824 /* Now we are in critical section again. */ 1825 } 1826 ret = s; 1827 p = s->limit; 1828 } 1829 #endif 1830 else { 1831 DYNSTATE_CRITICAL_EXIT(); 1832 return (NULL); 1833 } 1834 1835 /* Check the limit */ 1836 if (DPARENT_COUNT(p) >= limit) { 1837 DYNSTATE_CRITICAL_EXIT(); 1838 if (V_fw_verbose && last_log != time_uptime) { 1839 last_log = time_uptime; 1840 snprintf(sbuf, sizeof(sbuf), "%u drop session", 1841 rule->rulenum); 1842 print_dyn_rule_flags(pkt, O_LIMIT, 1843 LOG_SECURITY | LOG_DEBUG, sbuf, 1844 "too many entries"); 1845 } 1846 return (NULL); 1847 } 1848 1849 /* Take new session into account. */ 1850 DPARENT_COUNT_INC(p); 1851 /* 1852 * We must exit from critical section because the following code 1853 * can acquire bucket mutex. 1854 * We rely on the the 'count' field. The state will not expire 1855 * until it has some child states, i.e. 'count' field is not zero. 1856 * Return state pointer, it will be used by child states as parent. 1857 */ 1858 DYNSTATE_CRITICAL_EXIT(); 1859 return (ret); 1860 } 1861 1862 static int 1863 dyn_install_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1864 uint16_t fibnum, const void *ulp, int pktlen, struct ip_fw *rule, 1865 struct ipfw_dyn_info *info, uint32_t limit, uint16_t limit_mask, 1866 uint16_t kidx, uint8_t type) 1867 { 1868 struct ipfw_flow_id id; 1869 uint32_t hashval, parent_hashval, ruleid, rulenum; 1870 int ret; 1871 1872 MPASS(type == O_LIMIT || type == O_KEEP_STATE); 1873 1874 ruleid = rule->id; 1875 rulenum = rule->rulenum; 1876 if (type == O_LIMIT) { 1877 /* Create masked flow id and calculate bucket */ 1878 id.addr_type = pkt->addr_type; 1879 id.proto = pkt->proto; 1880 id.fib = fibnum; /* unused */ 1881 id.src_port = (limit_mask & DYN_SRC_PORT) ? 1882 pkt->src_port: 0; 1883 id.dst_port = (limit_mask & DYN_DST_PORT) ? 1884 pkt->dst_port: 0; 1885 if (IS_IP4_FLOW_ID(pkt)) { 1886 id.src_ip = (limit_mask & DYN_SRC_ADDR) ? 1887 pkt->src_ip: 0; 1888 id.dst_ip = (limit_mask & DYN_DST_ADDR) ? 1889 pkt->dst_ip: 0; 1890 } 1891 #ifdef INET6 1892 else if (IS_IP6_FLOW_ID(pkt)) { 1893 if (limit_mask & DYN_SRC_ADDR) 1894 id.src_ip6 = pkt->src_ip6; 1895 else 1896 memset(&id.src_ip6, 0, sizeof(id.src_ip6)); 1897 if (limit_mask & DYN_DST_ADDR) 1898 id.dst_ip6 = pkt->dst_ip6; 1899 else 1900 memset(&id.dst_ip6, 0, sizeof(id.dst_ip6)); 1901 } 1902 #endif 1903 else 1904 return (EAFNOSUPPORT); 1905 1906 parent_hashval = hash_parent(&id, rule); 1907 rule = dyn_get_parent_state(&id, zoneid, rule, parent_hashval, 1908 limit, kidx); 1909 if (rule == NULL) { 1910 #if 0 1911 if (V_fw_verbose && last_log != time_uptime) { 1912 last_log = time_uptime; 1913 snprintf(sbuf, sizeof(sbuf), 1914 "%u drop session", rule->rulenum); 1915 print_dyn_rule_flags(pkt, O_LIMIT, 1916 LOG_SECURITY | LOG_DEBUG, sbuf, 1917 "too many entries"); 1918 } 1919 #endif 1920 return (EACCES); 1921 } 1922 /* 1923 * Limit is not reached, create new state. 1924 * Now rule points to parent state. 1925 */ 1926 } 1927 1928 hashval = hash_packet(pkt); 1929 if (IS_IP4_FLOW_ID(pkt)) 1930 ret = dyn_add_ipv4_state(rule, ruleid, rulenum, pkt, 1931 ulp, pktlen, hashval, info, fibnum, kidx, type); 1932 #ifdef INET6 1933 else if (IS_IP6_FLOW_ID(pkt)) 1934 ret = dyn_add_ipv6_state(rule, ruleid, rulenum, pkt, 1935 zoneid, ulp, pktlen, hashval, info, fibnum, kidx, type); 1936 #endif /* INET6 */ 1937 else 1938 ret = EAFNOSUPPORT; 1939 1940 if (type == O_LIMIT) { 1941 if (ret != 0) { 1942 /* 1943 * We failed to create child state for O_LIMIT 1944 * opcode. Since we already counted it in the parent, 1945 * we must revert counter back. The 'rule' points to 1946 * parent state, use it to get dyn_parent. 1947 * 1948 * XXXAE: it should be safe to use 'rule' pointer 1949 * without extra lookup, parent state is referenced 1950 * and should not be freed. 1951 */ 1952 if (IS_IP4_FLOW_ID(&id)) 1953 DPARENT_COUNT_DEC( 1954 ((struct dyn_ipv4_state *)rule)->limit); 1955 #ifdef INET6 1956 else if (IS_IP6_FLOW_ID(&id)) 1957 DPARENT_COUNT_DEC( 1958 ((struct dyn_ipv6_state *)rule)->limit); 1959 #endif 1960 } 1961 } 1962 /* 1963 * EEXIST means that simultaneous thread has created this 1964 * state. Consider this as success. 1965 * 1966 * XXXAE: should we invalidate 'info' content here? 1967 */ 1968 if (ret == EEXIST) 1969 return (0); 1970 return (ret); 1971 } 1972 1973 /* 1974 * Install dynamic state. 1975 * chain - ipfw's instance; 1976 * rule - the parent rule that installs the state; 1977 * cmd - opcode that installs the state; 1978 * args - ipfw arguments; 1979 * ulp - upper level protocol header; 1980 * pktlen - packet length; 1981 * info - dynamic state lookup info; 1982 * tablearg - tablearg id. 1983 * 1984 * Returns non-zero value (failure) if state is not installed because 1985 * of errors or because session limitations are enforced. 1986 */ 1987 int 1988 ipfw_dyn_install_state(struct ip_fw_chain *chain, struct ip_fw *rule, 1989 const ipfw_insn_limit *cmd, const struct ip_fw_args *args, 1990 const void *ulp, int pktlen, struct ipfw_dyn_info *info, 1991 uint32_t tablearg) 1992 { 1993 uint32_t limit; 1994 uint16_t limit_mask; 1995 1996 if (cmd->o.opcode == O_LIMIT) { 1997 limit = IP_FW_ARG_TABLEARG(chain, cmd->conn_limit, limit); 1998 limit_mask = cmd->limit_mask; 1999 } else { 2000 limit = 0; 2001 limit_mask = 0; 2002 } 2003 return (dyn_install_state(&args->f_id, 2004 #ifdef INET6 2005 IS_IP6_FLOW_ID(&args->f_id) ? dyn_getscopeid(args): 2006 #endif 2007 0, M_GETFIB(args->m), ulp, pktlen, rule, info, limit, 2008 limit_mask, cmd->o.arg1, cmd->o.opcode)); 2009 } 2010 2011 /* 2012 * Free safe to remove state entries from expired lists. 2013 */ 2014 static void 2015 dyn_free_states(struct ip_fw_chain *chain) 2016 { 2017 struct dyn_ipv4_state *s4, *s4n; 2018 #ifdef INET6 2019 struct dyn_ipv6_state *s6, *s6n; 2020 #endif 2021 int cached_count, i; 2022 2023 /* 2024 * We keep pointers to objects that are in use on each CPU 2025 * in the per-cpu dyn_hp pointer. When object is going to be 2026 * removed, first of it is unlinked from the corresponding 2027 * list. This leads to changing of dyn_bucket_xxx_delver version. 2028 * Unlinked objects is placed into corresponding dyn_expired_xxx 2029 * list. Reader that is going to dereference object pointer checks 2030 * dyn_bucket_xxx_delver version before and after storing pointer 2031 * into dyn_hp. If version is the same, the object is protected 2032 * from freeing and it is safe to dereference. Othervise reader 2033 * tries to iterate list again from the beginning, but this object 2034 * now unlinked and thus will not be accessible. 2035 * 2036 * Copy dyn_hp pointers for each CPU into dyn_hp_cache array. 2037 * It does not matter that some pointer can be changed in 2038 * time while we are copying. We need to check, that objects 2039 * removed in the previous pass are not in use. And if dyn_hp 2040 * pointer does not contain it in the time when we are copying, 2041 * it will not appear there, because it is already unlinked. 2042 * And for new pointers we will not free objects that will be 2043 * unlinked in this pass. 2044 */ 2045 cached_count = 0; 2046 CPU_FOREACH(i) { 2047 dyn_hp_cache[cached_count] = DYNSTATE_GET(i); 2048 if (dyn_hp_cache[cached_count] != NULL) 2049 cached_count++; 2050 } 2051 2052 /* 2053 * Free expired states that are safe to free. 2054 * Check each entry from previous pass in the dyn_expired_xxx 2055 * list, if pointer to the object is in the dyn_hp_cache array, 2056 * keep it until next pass. Otherwise it is safe to free the 2057 * object. 2058 * 2059 * XXXAE: optimize this to use SLIST_REMOVE_AFTER. 2060 */ 2061 #define DYN_FREE_STATES(s, next, name) do { \ 2062 s = SLIST_FIRST(&V_dyn_expired_ ## name); \ 2063 while (s != NULL) { \ 2064 next = SLIST_NEXT(s, expired); \ 2065 for (i = 0; i < cached_count; i++) \ 2066 if (dyn_hp_cache[i] == s) \ 2067 break; \ 2068 if (i == cached_count) { \ 2069 if (s->type == O_LIMIT_PARENT && \ 2070 s->limit->count != 0) { \ 2071 s = next; \ 2072 continue; \ 2073 } \ 2074 SLIST_REMOVE(&V_dyn_expired_ ## name, \ 2075 s, dyn_ ## name ## _state, expired); \ 2076 if (s->type == O_LIMIT_PARENT) \ 2077 uma_zfree(V_dyn_parent_zone, s->limit); \ 2078 else \ 2079 uma_zfree(V_dyn_data_zone, s->data); \ 2080 uma_zfree(V_dyn_ ## name ## _zone, s); \ 2081 } \ 2082 s = next; \ 2083 } \ 2084 } while (0) 2085 2086 /* 2087 * Protect access to expired lists with DYN_EXPIRED_LOCK. 2088 * Userland can invoke ipfw_expire_dyn_states() to delete 2089 * specific states, this will lead to modification of expired 2090 * lists. 2091 * 2092 * XXXAE: do we need DYN_EXPIRED_LOCK? We can just use 2093 * IPFW_UH_WLOCK to protect access to these lists. 2094 */ 2095 DYN_EXPIRED_LOCK(); 2096 DYN_FREE_STATES(s4, s4n, ipv4); 2097 #ifdef INET6 2098 DYN_FREE_STATES(s6, s6n, ipv6); 2099 #endif 2100 DYN_EXPIRED_UNLOCK(); 2101 #undef DYN_FREE_STATES 2102 } 2103 2104 /* 2105 * Returns: 2106 * 0 when state is not matched by specified range; 2107 * 1 when state is matched by specified range; 2108 * 2 when state is matched by specified range and requested deletion of 2109 * dynamic states. 2110 */ 2111 static int 2112 dyn_match_range(uint16_t rulenum, uint8_t set, const ipfw_range_tlv *rt) 2113 { 2114 2115 MPASS(rt != NULL); 2116 /* flush all states */ 2117 if (rt->flags & IPFW_RCFLAG_ALL) { 2118 if (rt->flags & IPFW_RCFLAG_DYNAMIC) 2119 return (2); /* forced */ 2120 return (1); 2121 } 2122 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && set != rt->set) 2123 return (0); 2124 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 && 2125 (rulenum < rt->start_rule || rulenum > rt->end_rule)) 2126 return (0); 2127 if (rt->flags & IPFW_RCFLAG_DYNAMIC) 2128 return (2); 2129 return (1); 2130 } 2131 2132 static void 2133 dyn_acquire_rule(struct ip_fw_chain *ch, struct dyn_data *data, 2134 struct ip_fw *rule, uint16_t kidx) 2135 { 2136 struct dyn_state_obj *obj; 2137 2138 /* 2139 * Do not acquire reference twice. 2140 * This can happen when rule deletion executed for 2141 * the same range, but different ruleset id. 2142 */ 2143 if (data->flags & DYN_REFERENCED) 2144 return; 2145 2146 IPFW_UH_WLOCK_ASSERT(ch); 2147 MPASS(kidx != 0); 2148 2149 data->flags |= DYN_REFERENCED; 2150 /* Reference the named object */ 2151 obj = SRV_OBJECT(ch, kidx); 2152 obj->no.refcnt++; 2153 MPASS(obj->no.etlv == IPFW_TLV_STATE_NAME); 2154 2155 /* Reference the parent rule */ 2156 rule->refcnt++; 2157 } 2158 2159 static void 2160 dyn_release_rule(struct ip_fw_chain *ch, struct dyn_data *data, 2161 struct ip_fw *rule, uint16_t kidx) 2162 { 2163 struct dyn_state_obj *obj; 2164 2165 IPFW_UH_WLOCK_ASSERT(ch); 2166 MPASS(kidx != 0); 2167 2168 obj = SRV_OBJECT(ch, kidx); 2169 if (obj->no.refcnt == 1) 2170 dyn_destroy(ch, &obj->no); 2171 else 2172 obj->no.refcnt--; 2173 2174 if (--rule->refcnt == 1) 2175 ipfw_free_rule(rule); 2176 } 2177 2178 /* 2179 * We do not keep O_LIMIT_PARENT states when V_dyn_keep_states is enabled. 2180 * O_LIMIT state is created when new connection is going to be established 2181 * and there is no matching state. So, since the old parent rule was deleted 2182 * we can't create new states with old parent, and thus we can not account 2183 * new connections with already established connections, and can not do 2184 * proper limiting. 2185 */ 2186 static int 2187 dyn_match_ipv4_state(struct ip_fw_chain *ch, struct dyn_ipv4_state *s, 2188 const ipfw_range_tlv *rt) 2189 { 2190 struct ip_fw *rule; 2191 int ret; 2192 2193 if (s->type == O_LIMIT_PARENT) { 2194 rule = s->limit->parent; 2195 return (dyn_match_range(s->limit->rulenum, rule->set, rt)); 2196 } 2197 2198 rule = s->data->parent; 2199 if (s->type == O_LIMIT) 2200 rule = ((struct dyn_ipv4_state *)rule)->limit->parent; 2201 2202 ret = dyn_match_range(s->data->rulenum, rule->set, rt); 2203 if (ret == 0 || V_dyn_keep_states == 0 || ret > 1) 2204 return (ret); 2205 2206 dyn_acquire_rule(ch, s->data, rule, s->kidx); 2207 return (0); 2208 } 2209 2210 #ifdef INET6 2211 static int 2212 dyn_match_ipv6_state(struct ip_fw_chain *ch, struct dyn_ipv6_state *s, 2213 const ipfw_range_tlv *rt) 2214 { 2215 struct ip_fw *rule; 2216 int ret; 2217 2218 if (s->type == O_LIMIT_PARENT) { 2219 rule = s->limit->parent; 2220 return (dyn_match_range(s->limit->rulenum, rule->set, rt)); 2221 } 2222 2223 rule = s->data->parent; 2224 if (s->type == O_LIMIT) 2225 rule = ((struct dyn_ipv6_state *)rule)->limit->parent; 2226 2227 ret = dyn_match_range(s->data->rulenum, rule->set, rt); 2228 if (ret == 0 || V_dyn_keep_states == 0 || ret > 1) 2229 return (ret); 2230 2231 dyn_acquire_rule(ch, s->data, rule, s->kidx); 2232 return (0); 2233 } 2234 #endif 2235 2236 /* 2237 * Unlink expired entries from states lists. 2238 * @rt can be used to specify the range of states for deletion. 2239 */ 2240 static void 2241 dyn_expire_states(struct ip_fw_chain *ch, ipfw_range_tlv *rt) 2242 { 2243 struct dyn_ipv4_slist expired_ipv4; 2244 #ifdef INET6 2245 struct dyn_ipv6_slist expired_ipv6; 2246 struct dyn_ipv6_state *s6, *s6n, *s6p; 2247 #endif 2248 struct dyn_ipv4_state *s4, *s4n, *s4p; 2249 void *rule; 2250 int bucket, removed, length, max_length; 2251 2252 IPFW_UH_WLOCK_ASSERT(ch); 2253 2254 /* 2255 * Unlink expired states from each bucket. 2256 * With acquired bucket lock iterate entries of each lists: 2257 * ipv4, ipv4_parent, ipv6, and ipv6_parent. Check expired time 2258 * and unlink entry from the list, link entry into temporary 2259 * expired_xxx lists then bump "del" bucket version. 2260 * 2261 * When an entry is removed, corresponding states counter is 2262 * decremented. If entry has O_LIMIT type, parent's reference 2263 * counter is decremented. 2264 * 2265 * NOTE: this function can be called from userspace context 2266 * when user deletes rules. In this case all matched states 2267 * will be forcedly unlinked. O_LIMIT_PARENT states will be kept 2268 * in the expired lists until reference counter become zero. 2269 */ 2270 #define DYN_UNLINK_STATES(s, prev, next, exp, af, name, extra) do { \ 2271 length = 0; \ 2272 removed = 0; \ 2273 prev = NULL; \ 2274 s = CK_SLIST_FIRST(&V_dyn_ ## name [bucket]); \ 2275 while (s != NULL) { \ 2276 next = CK_SLIST_NEXT(s, entry); \ 2277 if ((TIME_LEQ((s)->exp, time_uptime) && extra) || \ 2278 (rt != NULL && \ 2279 dyn_match_ ## af ## _state(ch, s, rt))) { \ 2280 if (prev != NULL) \ 2281 CK_SLIST_REMOVE_AFTER(prev, entry); \ 2282 else \ 2283 CK_SLIST_REMOVE_HEAD( \ 2284 &V_dyn_ ## name [bucket], entry); \ 2285 removed++; \ 2286 SLIST_INSERT_HEAD(&expired_ ## af, s, expired); \ 2287 if (s->type == O_LIMIT_PARENT) \ 2288 DYN_COUNT_DEC(dyn_parent_count); \ 2289 else { \ 2290 DYN_COUNT_DEC(dyn_count); \ 2291 if (s->data->flags & DYN_REFERENCED) { \ 2292 rule = s->data->parent; \ 2293 if (s->type == O_LIMIT) \ 2294 rule = ((__typeof(s)) \ 2295 rule)->limit->parent;\ 2296 dyn_release_rule(ch, s->data, \ 2297 rule, s->kidx); \ 2298 } \ 2299 if (s->type == O_LIMIT) { \ 2300 s = s->data->parent; \ 2301 DPARENT_COUNT_DEC(s->limit); \ 2302 } \ 2303 } \ 2304 } else { \ 2305 prev = s; \ 2306 length++; \ 2307 } \ 2308 s = next; \ 2309 } \ 2310 if (removed != 0) \ 2311 DYN_BUCKET_VERSION_BUMP(bucket, name ## _del); \ 2312 if (length > max_length) \ 2313 max_length = length; \ 2314 } while (0) 2315 2316 SLIST_INIT(&expired_ipv4); 2317 #ifdef INET6 2318 SLIST_INIT(&expired_ipv6); 2319 #endif 2320 max_length = 0; 2321 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2322 DYN_BUCKET_LOCK(bucket); 2323 DYN_UNLINK_STATES(s4, s4p, s4n, data->expire, ipv4, ipv4, 1); 2324 DYN_UNLINK_STATES(s4, s4p, s4n, limit->expire, ipv4, 2325 ipv4_parent, (s4->limit->count == 0)); 2326 #ifdef INET6 2327 DYN_UNLINK_STATES(s6, s6p, s6n, data->expire, ipv6, ipv6, 1); 2328 DYN_UNLINK_STATES(s6, s6p, s6n, limit->expire, ipv6, 2329 ipv6_parent, (s6->limit->count == 0)); 2330 #endif 2331 DYN_BUCKET_UNLOCK(bucket); 2332 } 2333 /* Update curr_max_length for statistics. */ 2334 V_curr_max_length = max_length; 2335 /* 2336 * Concatenate temporary lists with global expired lists. 2337 */ 2338 DYN_EXPIRED_LOCK(); 2339 SLIST_CONCAT(&V_dyn_expired_ipv4, &expired_ipv4, 2340 dyn_ipv4_state, expired); 2341 #ifdef INET6 2342 SLIST_CONCAT(&V_dyn_expired_ipv6, &expired_ipv6, 2343 dyn_ipv6_state, expired); 2344 #endif 2345 DYN_EXPIRED_UNLOCK(); 2346 #undef DYN_UNLINK_STATES 2347 #undef DYN_UNREF_STATES 2348 } 2349 2350 static struct mbuf * 2351 dyn_mgethdr(int len, uint16_t fibnum) 2352 { 2353 struct mbuf *m; 2354 2355 m = m_gethdr(M_NOWAIT, MT_DATA); 2356 if (m == NULL) 2357 return (NULL); 2358 #ifdef MAC 2359 mac_netinet_firewall_send(m); 2360 #endif 2361 M_SETFIB(m, fibnum); 2362 m->m_data += max_linkhdr; 2363 m->m_flags |= M_SKIP_FIREWALL; 2364 m->m_len = m->m_pkthdr.len = len; 2365 bzero(m->m_data, len); 2366 return (m); 2367 } 2368 2369 static void 2370 dyn_make_keepalive_ipv4(struct mbuf *m, in_addr_t src, in_addr_t dst, 2371 uint32_t seq, uint32_t ack, uint16_t sport, uint16_t dport) 2372 { 2373 struct tcphdr *tcp; 2374 struct ip *ip; 2375 2376 ip = mtod(m, struct ip *); 2377 ip->ip_v = 4; 2378 ip->ip_hl = sizeof(*ip) >> 2; 2379 ip->ip_tos = IPTOS_LOWDELAY; 2380 ip->ip_len = htons(m->m_len); 2381 ip->ip_off |= htons(IP_DF); 2382 ip->ip_ttl = V_ip_defttl; 2383 ip->ip_p = IPPROTO_TCP; 2384 ip->ip_src.s_addr = htonl(src); 2385 ip->ip_dst.s_addr = htonl(dst); 2386 2387 tcp = mtodo(m, sizeof(struct ip)); 2388 tcp->th_sport = htons(sport); 2389 tcp->th_dport = htons(dport); 2390 tcp->th_off = sizeof(struct tcphdr) >> 2; 2391 tcp->th_seq = htonl(seq); 2392 tcp->th_ack = htonl(ack); 2393 tcp->th_flags = TH_ACK; 2394 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2395 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 2396 2397 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2398 m->m_pkthdr.csum_flags = CSUM_TCP; 2399 } 2400 2401 static void 2402 dyn_enqueue_keepalive_ipv4(struct mbufq *q, const struct dyn_ipv4_state *s) 2403 { 2404 struct mbuf *m; 2405 2406 if ((s->data->state & ACK_FWD) == 0 && s->data->ack_fwd > 0) { 2407 m = dyn_mgethdr(sizeof(struct ip) + sizeof(struct tcphdr), 2408 s->data->fibnum); 2409 if (m != NULL) { 2410 dyn_make_keepalive_ipv4(m, s->dst, s->src, 2411 s->data->ack_fwd - 1, s->data->ack_rev, 2412 s->dport, s->sport); 2413 if (mbufq_enqueue(q, m)) { 2414 m_freem(m); 2415 log(LOG_DEBUG, "ipfw: limit for IPv4 " 2416 "keepalive queue is reached.\n"); 2417 return; 2418 } 2419 } 2420 } 2421 2422 if ((s->data->state & ACK_REV) == 0 && s->data->ack_rev > 0) { 2423 m = dyn_mgethdr(sizeof(struct ip) + sizeof(struct tcphdr), 2424 s->data->fibnum); 2425 if (m != NULL) { 2426 dyn_make_keepalive_ipv4(m, s->src, s->dst, 2427 s->data->ack_rev - 1, s->data->ack_fwd, 2428 s->sport, s->dport); 2429 if (mbufq_enqueue(q, m)) { 2430 m_freem(m); 2431 log(LOG_DEBUG, "ipfw: limit for IPv4 " 2432 "keepalive queue is reached.\n"); 2433 return; 2434 } 2435 } 2436 } 2437 } 2438 2439 /* 2440 * Prepare and send keep-alive packets. 2441 */ 2442 static void 2443 dyn_send_keepalive_ipv4(struct ip_fw_chain *chain) 2444 { 2445 struct mbufq q; 2446 struct mbuf *m; 2447 struct dyn_ipv4_state *s; 2448 uint32_t bucket; 2449 2450 mbufq_init(&q, INT_MAX); 2451 IPFW_UH_RLOCK(chain); 2452 /* 2453 * It is safe to not use hazard pointer and just do lockless 2454 * access to the lists, because states entries can not be deleted 2455 * while we hold IPFW_UH_RLOCK. 2456 */ 2457 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2458 CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) { 2459 /* 2460 * Only established TCP connections that will 2461 * become expired withing dyn_keepalive_interval. 2462 */ 2463 if (s->proto != IPPROTO_TCP || 2464 (s->data->state & BOTH_SYN) != BOTH_SYN || 2465 TIME_LEQ(time_uptime + V_dyn_keepalive_interval, 2466 s->data->expire)) 2467 continue; 2468 dyn_enqueue_keepalive_ipv4(&q, s); 2469 } 2470 } 2471 IPFW_UH_RUNLOCK(chain); 2472 while ((m = mbufq_dequeue(&q)) != NULL) 2473 ip_output(m, NULL, NULL, 0, NULL, NULL); 2474 } 2475 2476 #ifdef INET6 2477 static void 2478 dyn_make_keepalive_ipv6(struct mbuf *m, const struct in6_addr *src, 2479 const struct in6_addr *dst, uint32_t zoneid, uint32_t seq, uint32_t ack, 2480 uint16_t sport, uint16_t dport) 2481 { 2482 struct tcphdr *tcp; 2483 struct ip6_hdr *ip6; 2484 2485 ip6 = mtod(m, struct ip6_hdr *); 2486 ip6->ip6_vfc |= IPV6_VERSION; 2487 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 2488 ip6->ip6_nxt = IPPROTO_TCP; 2489 ip6->ip6_hlim = IPV6_DEFHLIM; 2490 ip6->ip6_src = *src; 2491 if (IN6_IS_ADDR_LINKLOCAL(src)) 2492 ip6->ip6_src.s6_addr16[1] = htons(zoneid & 0xffff); 2493 ip6->ip6_dst = *dst; 2494 if (IN6_IS_ADDR_LINKLOCAL(dst)) 2495 ip6->ip6_dst.s6_addr16[1] = htons(zoneid & 0xffff); 2496 2497 tcp = mtodo(m, sizeof(struct ip6_hdr)); 2498 tcp->th_sport = htons(sport); 2499 tcp->th_dport = htons(dport); 2500 tcp->th_off = sizeof(struct tcphdr) >> 2; 2501 tcp->th_seq = htonl(seq); 2502 tcp->th_ack = htonl(ack); 2503 tcp->th_flags = TH_ACK; 2504 tcp->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr), 2505 IPPROTO_TCP, 0); 2506 2507 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2508 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 2509 } 2510 2511 static void 2512 dyn_enqueue_keepalive_ipv6(struct mbufq *q, const struct dyn_ipv6_state *s) 2513 { 2514 struct mbuf *m; 2515 2516 if ((s->data->state & ACK_FWD) == 0 && s->data->ack_fwd > 0) { 2517 m = dyn_mgethdr(sizeof(struct ip6_hdr) + 2518 sizeof(struct tcphdr), s->data->fibnum); 2519 if (m != NULL) { 2520 dyn_make_keepalive_ipv6(m, &s->dst, &s->src, 2521 s->zoneid, s->data->ack_fwd - 1, s->data->ack_rev, 2522 s->dport, s->sport); 2523 if (mbufq_enqueue(q, m)) { 2524 m_freem(m); 2525 log(LOG_DEBUG, "ipfw: limit for IPv6 " 2526 "keepalive queue is reached.\n"); 2527 return; 2528 } 2529 } 2530 } 2531 2532 if ((s->data->state & ACK_REV) == 0 && s->data->ack_rev > 0) { 2533 m = dyn_mgethdr(sizeof(struct ip6_hdr) + 2534 sizeof(struct tcphdr), s->data->fibnum); 2535 if (m != NULL) { 2536 dyn_make_keepalive_ipv6(m, &s->src, &s->dst, 2537 s->zoneid, s->data->ack_rev - 1, s->data->ack_fwd, 2538 s->sport, s->dport); 2539 if (mbufq_enqueue(q, m)) { 2540 m_freem(m); 2541 log(LOG_DEBUG, "ipfw: limit for IPv6 " 2542 "keepalive queue is reached.\n"); 2543 return; 2544 } 2545 } 2546 } 2547 } 2548 2549 static void 2550 dyn_send_keepalive_ipv6(struct ip_fw_chain *chain) 2551 { 2552 struct mbufq q; 2553 struct mbuf *m; 2554 struct dyn_ipv6_state *s; 2555 uint32_t bucket; 2556 2557 mbufq_init(&q, INT_MAX); 2558 IPFW_UH_RLOCK(chain); 2559 /* 2560 * It is safe to not use hazard pointer and just do lockless 2561 * access to the lists, because states entries can not be deleted 2562 * while we hold IPFW_UH_RLOCK. 2563 */ 2564 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2565 CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) { 2566 /* 2567 * Only established TCP connections that will 2568 * become expired withing dyn_keepalive_interval. 2569 */ 2570 if (s->proto != IPPROTO_TCP || 2571 (s->data->state & BOTH_SYN) != BOTH_SYN || 2572 TIME_LEQ(time_uptime + V_dyn_keepalive_interval, 2573 s->data->expire)) 2574 continue; 2575 dyn_enqueue_keepalive_ipv6(&q, s); 2576 } 2577 } 2578 IPFW_UH_RUNLOCK(chain); 2579 while ((m = mbufq_dequeue(&q)) != NULL) 2580 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 2581 } 2582 #endif /* INET6 */ 2583 2584 static void 2585 dyn_grow_hashtable(struct ip_fw_chain *chain, uint32_t new, int flags) 2586 { 2587 #ifdef INET6 2588 struct dyn_ipv6ck_slist *ipv6, *ipv6_parent; 2589 uint32_t *ipv6_add, *ipv6_del, *ipv6_parent_add, *ipv6_parent_del; 2590 struct dyn_ipv6_state *s6; 2591 #endif 2592 struct dyn_ipv4ck_slist *ipv4, *ipv4_parent; 2593 uint32_t *ipv4_add, *ipv4_del, *ipv4_parent_add, *ipv4_parent_del; 2594 struct dyn_ipv4_state *s4; 2595 struct mtx *bucket_lock; 2596 void *tmp; 2597 uint32_t bucket; 2598 2599 MPASS(powerof2(new)); 2600 DYN_DEBUG("grow hash size %u -> %u", V_curr_dyn_buckets, new); 2601 /* 2602 * Allocate and initialize new lists. 2603 */ 2604 bucket_lock = malloc(new * sizeof(struct mtx), M_IPFW, 2605 flags | M_ZERO); 2606 if (bucket_lock == NULL) 2607 return; 2608 2609 ipv4 = ipv4_parent = NULL; 2610 ipv4_add = ipv4_del = ipv4_parent_add = ipv4_parent_del = NULL; 2611 #ifdef INET6 2612 ipv6 = ipv6_parent = NULL; 2613 ipv6_add = ipv6_del = ipv6_parent_add = ipv6_parent_del = NULL; 2614 #endif 2615 2616 ipv4 = malloc(new * sizeof(struct dyn_ipv4ck_slist), M_IPFW, 2617 flags | M_ZERO); 2618 if (ipv4 == NULL) 2619 goto bad; 2620 ipv4_parent = malloc(new * sizeof(struct dyn_ipv4ck_slist), M_IPFW, 2621 flags | M_ZERO); 2622 if (ipv4_parent == NULL) 2623 goto bad; 2624 ipv4_add = malloc(new * sizeof(uint32_t), M_IPFW, flags | M_ZERO); 2625 if (ipv4_add == NULL) 2626 goto bad; 2627 ipv4_del = malloc(new * sizeof(uint32_t), M_IPFW, flags | M_ZERO); 2628 if (ipv4_del == NULL) 2629 goto bad; 2630 ipv4_parent_add = malloc(new * sizeof(uint32_t), M_IPFW, 2631 flags | M_ZERO); 2632 if (ipv4_parent_add == NULL) 2633 goto bad; 2634 ipv4_parent_del = malloc(new * sizeof(uint32_t), M_IPFW, 2635 flags | M_ZERO); 2636 if (ipv4_parent_del == NULL) 2637 goto bad; 2638 #ifdef INET6 2639 ipv6 = malloc(new * sizeof(struct dyn_ipv6ck_slist), M_IPFW, 2640 flags | M_ZERO); 2641 if (ipv6 == NULL) 2642 goto bad; 2643 ipv6_parent = malloc(new * sizeof(struct dyn_ipv6ck_slist), M_IPFW, 2644 flags | M_ZERO); 2645 if (ipv6_parent == NULL) 2646 goto bad; 2647 ipv6_add = malloc(new * sizeof(uint32_t), M_IPFW, flags | M_ZERO); 2648 if (ipv6_add == NULL) 2649 goto bad; 2650 ipv6_del = malloc(new * sizeof(uint32_t), M_IPFW, flags | M_ZERO); 2651 if (ipv6_del == NULL) 2652 goto bad; 2653 ipv6_parent_add = malloc(new * sizeof(uint32_t), M_IPFW, 2654 flags | M_ZERO); 2655 if (ipv6_parent_add == NULL) 2656 goto bad; 2657 ipv6_parent_del = malloc(new * sizeof(uint32_t), M_IPFW, 2658 flags | M_ZERO); 2659 if (ipv6_parent_del == NULL) 2660 goto bad; 2661 #endif 2662 for (bucket = 0; bucket < new; bucket++) { 2663 DYN_BUCKET_LOCK_INIT(bucket_lock, bucket); 2664 CK_SLIST_INIT(&ipv4[bucket]); 2665 CK_SLIST_INIT(&ipv4_parent[bucket]); 2666 #ifdef INET6 2667 CK_SLIST_INIT(&ipv6[bucket]); 2668 CK_SLIST_INIT(&ipv6_parent[bucket]); 2669 #endif 2670 } 2671 2672 #define DYN_RELINK_STATES(s, hval, i, head, ohead) do { \ 2673 while ((s = CK_SLIST_FIRST(&V_dyn_ ## ohead[i])) != NULL) { \ 2674 CK_SLIST_REMOVE_HEAD(&V_dyn_ ## ohead[i], entry); \ 2675 CK_SLIST_INSERT_HEAD(&head[DYN_BUCKET(s->hval, new)], \ 2676 s, entry); \ 2677 } \ 2678 } while (0) 2679 /* 2680 * Prevent rules changing from userland. 2681 */ 2682 IPFW_UH_WLOCK(chain); 2683 /* 2684 * Hold traffic processing until we finish resize to 2685 * prevent access to states lists. 2686 */ 2687 IPFW_WLOCK(chain); 2688 /* Re-link all dynamic states */ 2689 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2690 DYN_RELINK_STATES(s4, data->hashval, bucket, ipv4, ipv4); 2691 DYN_RELINK_STATES(s4, limit->hashval, bucket, ipv4_parent, 2692 ipv4_parent); 2693 #ifdef INET6 2694 DYN_RELINK_STATES(s6, data->hashval, bucket, ipv6, ipv6); 2695 DYN_RELINK_STATES(s6, limit->hashval, bucket, ipv6_parent, 2696 ipv6_parent); 2697 #endif 2698 } 2699 2700 #define DYN_SWAP_PTR(old, new, tmp) do { \ 2701 tmp = old; \ 2702 old = new; \ 2703 new = tmp; \ 2704 } while (0) 2705 /* Swap pointers */ 2706 DYN_SWAP_PTR(V_dyn_bucket_lock, bucket_lock, tmp); 2707 DYN_SWAP_PTR(V_dyn_ipv4, ipv4, tmp); 2708 DYN_SWAP_PTR(V_dyn_ipv4_parent, ipv4_parent, tmp); 2709 DYN_SWAP_PTR(V_dyn_ipv4_add, ipv4_add, tmp); 2710 DYN_SWAP_PTR(V_dyn_ipv4_parent_add, ipv4_parent_add, tmp); 2711 DYN_SWAP_PTR(V_dyn_ipv4_del, ipv4_del, tmp); 2712 DYN_SWAP_PTR(V_dyn_ipv4_parent_del, ipv4_parent_del, tmp); 2713 2714 #ifdef INET6 2715 DYN_SWAP_PTR(V_dyn_ipv6, ipv6, tmp); 2716 DYN_SWAP_PTR(V_dyn_ipv6_parent, ipv6_parent, tmp); 2717 DYN_SWAP_PTR(V_dyn_ipv6_add, ipv6_add, tmp); 2718 DYN_SWAP_PTR(V_dyn_ipv6_parent_add, ipv6_parent_add, tmp); 2719 DYN_SWAP_PTR(V_dyn_ipv6_del, ipv6_del, tmp); 2720 DYN_SWAP_PTR(V_dyn_ipv6_parent_del, ipv6_parent_del, tmp); 2721 #endif 2722 bucket = V_curr_dyn_buckets; 2723 V_curr_dyn_buckets = new; 2724 2725 IPFW_WUNLOCK(chain); 2726 IPFW_UH_WUNLOCK(chain); 2727 2728 /* Release old resources */ 2729 while (bucket-- != 0) 2730 DYN_BUCKET_LOCK_DESTROY(bucket_lock, bucket); 2731 bad: 2732 free(bucket_lock, M_IPFW); 2733 free(ipv4, M_IPFW); 2734 free(ipv4_parent, M_IPFW); 2735 free(ipv4_add, M_IPFW); 2736 free(ipv4_parent_add, M_IPFW); 2737 free(ipv4_del, M_IPFW); 2738 free(ipv4_parent_del, M_IPFW); 2739 #ifdef INET6 2740 free(ipv6, M_IPFW); 2741 free(ipv6_parent, M_IPFW); 2742 free(ipv6_add, M_IPFW); 2743 free(ipv6_parent_add, M_IPFW); 2744 free(ipv6_del, M_IPFW); 2745 free(ipv6_parent_del, M_IPFW); 2746 #endif 2747 } 2748 2749 /* 2750 * This function is used to perform various maintenance 2751 * on dynamic hash lists. Currently it is called every second. 2752 */ 2753 static void 2754 dyn_tick(void *vnetx) 2755 { 2756 struct epoch_tracker et; 2757 uint32_t buckets; 2758 2759 CURVNET_SET((struct vnet *)vnetx); 2760 /* 2761 * First free states unlinked in previous passes. 2762 */ 2763 dyn_free_states(&V_layer3_chain); 2764 /* 2765 * Now unlink others expired states. 2766 * We use IPFW_UH_WLOCK to avoid concurrent call of 2767 * dyn_expire_states(). It is the only function that does 2768 * deletion of state entries from states lists. 2769 */ 2770 IPFW_UH_WLOCK(&V_layer3_chain); 2771 dyn_expire_states(&V_layer3_chain, NULL); 2772 IPFW_UH_WUNLOCK(&V_layer3_chain); 2773 /* 2774 * Send keepalives if they are enabled and the time has come. 2775 */ 2776 if (V_dyn_keepalive != 0 && 2777 V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) { 2778 V_dyn_keepalive_last = time_uptime; 2779 NET_EPOCH_ENTER(et); 2780 dyn_send_keepalive_ipv4(&V_layer3_chain); 2781 #ifdef INET6 2782 dyn_send_keepalive_ipv6(&V_layer3_chain); 2783 #endif 2784 NET_EPOCH_EXIT(et); 2785 } 2786 /* 2787 * Check if we need to resize the hash: 2788 * if current number of states exceeds number of buckets in hash, 2789 * and dyn_buckets_max permits to grow the number of buckets, then 2790 * do it. Grow hash size to the minimum power of 2 which is bigger 2791 * than current states count. 2792 */ 2793 if (V_curr_dyn_buckets < V_dyn_buckets_max && 2794 (V_curr_dyn_buckets < V_dyn_count / 2 || ( 2795 V_curr_dyn_buckets < V_dyn_count && V_curr_max_length > 8))) { 2796 buckets = 1 << fls(V_dyn_count); 2797 if (buckets > V_dyn_buckets_max) 2798 buckets = V_dyn_buckets_max; 2799 dyn_grow_hashtable(&V_layer3_chain, buckets, M_NOWAIT); 2800 } 2801 2802 callout_reset_on(&V_dyn_timeout, hz, dyn_tick, vnetx, 0); 2803 CURVNET_RESTORE(); 2804 } 2805 2806 void 2807 ipfw_expire_dyn_states(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 2808 { 2809 /* 2810 * Do not perform any checks if we currently have no dynamic states 2811 */ 2812 if (V_dyn_count == 0) 2813 return; 2814 2815 IPFW_UH_WLOCK_ASSERT(chain); 2816 dyn_expire_states(chain, rt); 2817 } 2818 2819 /* 2820 * Pass through all states and reset eaction for orphaned rules. 2821 */ 2822 void 2823 ipfw_dyn_reset_eaction(struct ip_fw_chain *ch, uint16_t eaction_id, 2824 uint16_t default_id, uint16_t instance_id) 2825 { 2826 #ifdef INET6 2827 struct dyn_ipv6_state *s6; 2828 #endif 2829 struct dyn_ipv4_state *s4; 2830 struct ip_fw *rule; 2831 uint32_t bucket; 2832 2833 #define DYN_RESET_EACTION(s, h, b) \ 2834 CK_SLIST_FOREACH(s, &V_dyn_ ## h[b], entry) { \ 2835 if ((s->data->flags & DYN_REFERENCED) == 0) \ 2836 continue; \ 2837 rule = s->data->parent; \ 2838 if (s->type == O_LIMIT) \ 2839 rule = ((__typeof(s))rule)->limit->parent; \ 2840 ipfw_reset_eaction(ch, rule, eaction_id, \ 2841 default_id, instance_id); \ 2842 } 2843 2844 IPFW_UH_WLOCK_ASSERT(ch); 2845 if (V_dyn_count == 0) 2846 return; 2847 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2848 DYN_RESET_EACTION(s4, ipv4, bucket); 2849 #ifdef INET6 2850 DYN_RESET_EACTION(s6, ipv6, bucket); 2851 #endif 2852 } 2853 } 2854 2855 /* 2856 * Returns size of dynamic states in legacy format 2857 */ 2858 int 2859 ipfw_dyn_len(void) 2860 { 2861 2862 return ((V_dyn_count + V_dyn_parent_count) * sizeof(ipfw_dyn_rule)); 2863 } 2864 2865 /* 2866 * Returns number of dynamic states. 2867 * Marks every named object index used by dynamic states with bit in @bmask. 2868 * Returns number of named objects accounted in bmask via @nocnt. 2869 * Used by dump format v1 (current). 2870 */ 2871 uint32_t 2872 ipfw_dyn_get_count(uint32_t *bmask, int *nocnt) 2873 { 2874 #ifdef INET6 2875 struct dyn_ipv6_state *s6; 2876 #endif 2877 struct dyn_ipv4_state *s4; 2878 uint32_t bucket; 2879 2880 #define DYN_COUNT_OBJECTS(s, h, b) \ 2881 CK_SLIST_FOREACH(s, &V_dyn_ ## h[b], entry) { \ 2882 MPASS(s->kidx != 0); \ 2883 if (ipfw_mark_object_kidx(bmask, IPFW_TLV_STATE_NAME, \ 2884 s->kidx) != 0) \ 2885 (*nocnt)++; \ 2886 } 2887 2888 IPFW_UH_RLOCK_ASSERT(&V_layer3_chain); 2889 2890 /* No need to pass through all the buckets. */ 2891 *nocnt = 0; 2892 if (V_dyn_count + V_dyn_parent_count == 0) 2893 return (0); 2894 2895 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2896 DYN_COUNT_OBJECTS(s4, ipv4, bucket); 2897 #ifdef INET6 2898 DYN_COUNT_OBJECTS(s6, ipv6, bucket); 2899 #endif 2900 } 2901 2902 return (V_dyn_count + V_dyn_parent_count); 2903 } 2904 2905 /* 2906 * Check if rule contains at least one dynamic opcode. 2907 * 2908 * Returns 1 if such opcode is found, 0 otherwise. 2909 */ 2910 int 2911 ipfw_is_dyn_rule(struct ip_fw *rule) 2912 { 2913 int cmdlen, l; 2914 ipfw_insn *cmd; 2915 2916 l = rule->cmd_len; 2917 cmd = rule->cmd; 2918 cmdlen = 0; 2919 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2920 cmdlen = F_LEN(cmd); 2921 2922 switch (cmd->opcode) { 2923 case O_LIMIT: 2924 case O_KEEP_STATE: 2925 case O_PROBE_STATE: 2926 case O_CHECK_STATE: 2927 return (1); 2928 } 2929 } 2930 2931 return (0); 2932 } 2933 2934 static void 2935 dyn_export_parent(const struct dyn_parent *p, uint16_t kidx, uint8_t set, 2936 ipfw_dyn_rule *dst) 2937 { 2938 2939 dst->dyn_type = O_LIMIT_PARENT; 2940 dst->kidx = kidx; 2941 dst->count = (uint16_t)DPARENT_COUNT(p); 2942 dst->expire = TIME_LEQ(p->expire, time_uptime) ? 0: 2943 p->expire - time_uptime; 2944 2945 /* 'rule' is used to pass up the rule number and set */ 2946 memcpy(&dst->rule, &p->rulenum, sizeof(p->rulenum)); 2947 2948 /* store set number into high word of dst->rule pointer. */ 2949 memcpy((char *)&dst->rule + sizeof(p->rulenum), &set, sizeof(set)); 2950 2951 /* unused fields */ 2952 dst->pcnt = 0; 2953 dst->bcnt = 0; 2954 dst->parent = NULL; 2955 dst->state = 0; 2956 dst->ack_fwd = 0; 2957 dst->ack_rev = 0; 2958 dst->bucket = p->hashval; 2959 /* 2960 * The legacy userland code will interpret a NULL here as a marker 2961 * for the last dynamic rule. 2962 */ 2963 dst->next = (ipfw_dyn_rule *)1; 2964 } 2965 2966 static void 2967 dyn_export_data(const struct dyn_data *data, uint16_t kidx, uint8_t type, 2968 uint8_t set, ipfw_dyn_rule *dst) 2969 { 2970 2971 dst->dyn_type = type; 2972 dst->kidx = kidx; 2973 dst->pcnt = data->pcnt_fwd + data->pcnt_rev; 2974 dst->bcnt = data->bcnt_fwd + data->bcnt_rev; 2975 dst->expire = TIME_LEQ(data->expire, time_uptime) ? 0: 2976 data->expire - time_uptime; 2977 2978 /* 'rule' is used to pass up the rule number and set */ 2979 memcpy(&dst->rule, &data->rulenum, sizeof(data->rulenum)); 2980 2981 /* store set number into high word of dst->rule pointer. */ 2982 memcpy((char *)&dst->rule + sizeof(data->rulenum), &set, sizeof(set)); 2983 2984 dst->state = data->state; 2985 if (data->flags & DYN_REFERENCED) 2986 dst->state |= IPFW_DYN_ORPHANED; 2987 2988 /* unused fields */ 2989 dst->parent = NULL; 2990 dst->ack_fwd = data->ack_fwd; 2991 dst->ack_rev = data->ack_rev; 2992 dst->count = 0; 2993 dst->bucket = data->hashval; 2994 /* 2995 * The legacy userland code will interpret a NULL here as a marker 2996 * for the last dynamic rule. 2997 */ 2998 dst->next = (ipfw_dyn_rule *)1; 2999 } 3000 3001 static void 3002 dyn_export_ipv4_state(const struct dyn_ipv4_state *s, ipfw_dyn_rule *dst) 3003 { 3004 struct ip_fw *rule; 3005 3006 switch (s->type) { 3007 case O_LIMIT_PARENT: 3008 rule = s->limit->parent; 3009 dyn_export_parent(s->limit, s->kidx, rule->set, dst); 3010 break; 3011 default: 3012 rule = s->data->parent; 3013 if (s->type == O_LIMIT) 3014 rule = ((struct dyn_ipv4_state *)rule)->limit->parent; 3015 dyn_export_data(s->data, s->kidx, s->type, rule->set, dst); 3016 } 3017 3018 dst->id.dst_ip = s->dst; 3019 dst->id.src_ip = s->src; 3020 dst->id.dst_port = s->dport; 3021 dst->id.src_port = s->sport; 3022 dst->id.fib = s->data->fibnum; 3023 dst->id.proto = s->proto; 3024 dst->id._flags = 0; 3025 dst->id.addr_type = 4; 3026 3027 memset(&dst->id.dst_ip6, 0, sizeof(dst->id.dst_ip6)); 3028 memset(&dst->id.src_ip6, 0, sizeof(dst->id.src_ip6)); 3029 dst->id.flow_id6 = dst->id.extra = 0; 3030 } 3031 3032 #ifdef INET6 3033 static void 3034 dyn_export_ipv6_state(const struct dyn_ipv6_state *s, ipfw_dyn_rule *dst) 3035 { 3036 struct ip_fw *rule; 3037 3038 switch (s->type) { 3039 case O_LIMIT_PARENT: 3040 rule = s->limit->parent; 3041 dyn_export_parent(s->limit, s->kidx, rule->set, dst); 3042 break; 3043 default: 3044 rule = s->data->parent; 3045 if (s->type == O_LIMIT) 3046 rule = ((struct dyn_ipv6_state *)rule)->limit->parent; 3047 dyn_export_data(s->data, s->kidx, s->type, rule->set, dst); 3048 } 3049 3050 dst->id.src_ip6 = s->src; 3051 dst->id.dst_ip6 = s->dst; 3052 dst->id.dst_port = s->dport; 3053 dst->id.src_port = s->sport; 3054 dst->id.fib = s->data->fibnum; 3055 dst->id.proto = s->proto; 3056 dst->id._flags = 0; 3057 dst->id.addr_type = 6; 3058 3059 dst->id.dst_ip = dst->id.src_ip = 0; 3060 dst->id.flow_id6 = dst->id.extra = 0; 3061 } 3062 #endif /* INET6 */ 3063 3064 /* 3065 * Fills the buffer given by @sd with dynamic states. 3066 * Used by dump format v1 (current). 3067 * 3068 * Returns 0 on success. 3069 */ 3070 int 3071 ipfw_dump_states(struct ip_fw_chain *chain, struct sockopt_data *sd) 3072 { 3073 #ifdef INET6 3074 struct dyn_ipv6_state *s6; 3075 #endif 3076 struct dyn_ipv4_state *s4; 3077 ipfw_obj_dyntlv *dst, *last; 3078 ipfw_obj_ctlv *ctlv; 3079 uint32_t bucket; 3080 3081 if (V_dyn_count == 0) 3082 return (0); 3083 3084 /* 3085 * IPFW_UH_RLOCK garantees that another userland request 3086 * and callout thread will not delete entries from states 3087 * lists. 3088 */ 3089 IPFW_UH_RLOCK_ASSERT(chain); 3090 3091 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 3092 if (ctlv == NULL) 3093 return (ENOMEM); 3094 ctlv->head.type = IPFW_TLV_DYNSTATE_LIST; 3095 ctlv->objsize = sizeof(ipfw_obj_dyntlv); 3096 last = NULL; 3097 3098 #define DYN_EXPORT_STATES(s, af, h, b) \ 3099 CK_SLIST_FOREACH(s, &V_dyn_ ## h[b], entry) { \ 3100 dst = (ipfw_obj_dyntlv *)ipfw_get_sopt_space(sd, \ 3101 sizeof(ipfw_obj_dyntlv)); \ 3102 if (dst == NULL) \ 3103 return (ENOMEM); \ 3104 dyn_export_ ## af ## _state(s, &dst->state); \ 3105 dst->head.length = sizeof(ipfw_obj_dyntlv); \ 3106 dst->head.type = IPFW_TLV_DYN_ENT; \ 3107 last = dst; \ 3108 } 3109 3110 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 3111 DYN_EXPORT_STATES(s4, ipv4, ipv4_parent, bucket); 3112 DYN_EXPORT_STATES(s4, ipv4, ipv4, bucket); 3113 #ifdef INET6 3114 DYN_EXPORT_STATES(s6, ipv6, ipv6_parent, bucket); 3115 DYN_EXPORT_STATES(s6, ipv6, ipv6, bucket); 3116 #endif /* INET6 */ 3117 } 3118 3119 /* mark last dynamic rule */ 3120 if (last != NULL) 3121 last->head.flags = IPFW_DF_LAST; /* XXX: unused */ 3122 return (0); 3123 #undef DYN_EXPORT_STATES 3124 } 3125 3126 /* 3127 * Fill given buffer with dynamic states (legacy format). 3128 * IPFW_UH_RLOCK has to be held while calling. 3129 */ 3130 void 3131 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep) 3132 { 3133 #ifdef INET6 3134 struct dyn_ipv6_state *s6; 3135 #endif 3136 struct dyn_ipv4_state *s4; 3137 ipfw_dyn_rule *p, *last = NULL; 3138 char *bp; 3139 uint32_t bucket; 3140 3141 if (V_dyn_count == 0) 3142 return; 3143 bp = *pbp; 3144 3145 IPFW_UH_RLOCK_ASSERT(chain); 3146 3147 #define DYN_EXPORT_STATES(s, af, head, b) \ 3148 CK_SLIST_FOREACH(s, &V_dyn_ ## head[b], entry) { \ 3149 if (bp + sizeof(*p) > ep) \ 3150 break; \ 3151 p = (ipfw_dyn_rule *)bp; \ 3152 dyn_export_ ## af ## _state(s, p); \ 3153 last = p; \ 3154 bp += sizeof(*p); \ 3155 } 3156 3157 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 3158 DYN_EXPORT_STATES(s4, ipv4, ipv4_parent, bucket); 3159 DYN_EXPORT_STATES(s4, ipv4, ipv4, bucket); 3160 #ifdef INET6 3161 DYN_EXPORT_STATES(s6, ipv6, ipv6_parent, bucket); 3162 DYN_EXPORT_STATES(s6, ipv6, ipv6, bucket); 3163 #endif /* INET6 */ 3164 } 3165 3166 if (last != NULL) /* mark last dynamic rule */ 3167 last->next = NULL; 3168 *pbp = bp; 3169 #undef DYN_EXPORT_STATES 3170 } 3171 3172 void 3173 ipfw_dyn_init(struct ip_fw_chain *chain) 3174 { 3175 3176 #ifdef IPFIREWALL_JENKINSHASH 3177 V_dyn_hashseed = arc4random(); 3178 #endif 3179 V_dyn_max = 16384; /* max # of states */ 3180 V_dyn_parent_max = 4096; /* max # of parent states */ 3181 V_dyn_buckets_max = 8192; /* must be power of 2 */ 3182 3183 V_dyn_ack_lifetime = 300; 3184 V_dyn_syn_lifetime = 20; 3185 V_dyn_fin_lifetime = 1; 3186 V_dyn_rst_lifetime = 1; 3187 V_dyn_udp_lifetime = 10; 3188 V_dyn_short_lifetime = 5; 3189 3190 V_dyn_keepalive_interval = 20; 3191 V_dyn_keepalive_period = 5; 3192 V_dyn_keepalive = 1; /* send keepalives */ 3193 V_dyn_keepalive_last = time_uptime; 3194 3195 V_dyn_data_zone = uma_zcreate("IPFW dynamic states data", 3196 sizeof(struct dyn_data), NULL, NULL, NULL, NULL, 3197 UMA_ALIGN_PTR, 0); 3198 uma_zone_set_max(V_dyn_data_zone, V_dyn_max); 3199 3200 V_dyn_parent_zone = uma_zcreate("IPFW parent dynamic states", 3201 sizeof(struct dyn_parent), NULL, NULL, NULL, NULL, 3202 UMA_ALIGN_PTR, 0); 3203 uma_zone_set_max(V_dyn_parent_zone, V_dyn_parent_max); 3204 3205 SLIST_INIT(&V_dyn_expired_ipv4); 3206 V_dyn_ipv4 = NULL; 3207 V_dyn_ipv4_parent = NULL; 3208 V_dyn_ipv4_zone = uma_zcreate("IPFW IPv4 dynamic states", 3209 sizeof(struct dyn_ipv4_state), NULL, NULL, NULL, NULL, 3210 UMA_ALIGN_PTR, 0); 3211 3212 #ifdef INET6 3213 SLIST_INIT(&V_dyn_expired_ipv6); 3214 V_dyn_ipv6 = NULL; 3215 V_dyn_ipv6_parent = NULL; 3216 V_dyn_ipv6_zone = uma_zcreate("IPFW IPv6 dynamic states", 3217 sizeof(struct dyn_ipv6_state), NULL, NULL, NULL, NULL, 3218 UMA_ALIGN_PTR, 0); 3219 #endif 3220 3221 /* Initialize buckets. */ 3222 V_curr_dyn_buckets = 0; 3223 V_dyn_bucket_lock = NULL; 3224 dyn_grow_hashtable(chain, 256, M_WAITOK); 3225 3226 if (IS_DEFAULT_VNET(curvnet)) 3227 dyn_hp_cache = malloc(mp_ncpus * sizeof(void *), M_IPFW, 3228 M_WAITOK | M_ZERO); 3229 3230 DYN_EXPIRED_LOCK_INIT(); 3231 callout_init(&V_dyn_timeout, 1); 3232 callout_reset(&V_dyn_timeout, hz, dyn_tick, curvnet); 3233 IPFW_ADD_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes); 3234 } 3235 3236 void 3237 ipfw_dyn_uninit(int pass) 3238 { 3239 #ifdef INET6 3240 struct dyn_ipv6_state *s6; 3241 #endif 3242 struct dyn_ipv4_state *s4; 3243 int bucket; 3244 3245 if (pass == 0) { 3246 callout_drain(&V_dyn_timeout); 3247 return; 3248 } 3249 IPFW_DEL_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes); 3250 DYN_EXPIRED_LOCK_DESTROY(); 3251 3252 #define DYN_FREE_STATES_FORCED(CK, s, af, name, en) do { \ 3253 while ((s = CK ## SLIST_FIRST(&V_dyn_ ## name)) != NULL) { \ 3254 CK ## SLIST_REMOVE_HEAD(&V_dyn_ ## name, en); \ 3255 if (s->type == O_LIMIT_PARENT) \ 3256 uma_zfree(V_dyn_parent_zone, s->limit); \ 3257 else \ 3258 uma_zfree(V_dyn_data_zone, s->data); \ 3259 uma_zfree(V_dyn_ ## af ## _zone, s); \ 3260 } \ 3261 } while (0) 3262 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 3263 DYN_BUCKET_LOCK_DESTROY(V_dyn_bucket_lock, bucket); 3264 3265 DYN_FREE_STATES_FORCED(CK_, s4, ipv4, ipv4[bucket], entry); 3266 DYN_FREE_STATES_FORCED(CK_, s4, ipv4, ipv4_parent[bucket], 3267 entry); 3268 #ifdef INET6 3269 DYN_FREE_STATES_FORCED(CK_, s6, ipv6, ipv6[bucket], entry); 3270 DYN_FREE_STATES_FORCED(CK_, s6, ipv6, ipv6_parent[bucket], 3271 entry); 3272 #endif /* INET6 */ 3273 } 3274 DYN_FREE_STATES_FORCED(, s4, ipv4, expired_ipv4, expired); 3275 #ifdef INET6 3276 DYN_FREE_STATES_FORCED(, s6, ipv6, expired_ipv6, expired); 3277 #endif 3278 #undef DYN_FREE_STATES_FORCED 3279 3280 uma_zdestroy(V_dyn_ipv4_zone); 3281 uma_zdestroy(V_dyn_data_zone); 3282 uma_zdestroy(V_dyn_parent_zone); 3283 #ifdef INET6 3284 uma_zdestroy(V_dyn_ipv6_zone); 3285 free(V_dyn_ipv6, M_IPFW); 3286 free(V_dyn_ipv6_parent, M_IPFW); 3287 free(V_dyn_ipv6_add, M_IPFW); 3288 free(V_dyn_ipv6_parent_add, M_IPFW); 3289 free(V_dyn_ipv6_del, M_IPFW); 3290 free(V_dyn_ipv6_parent_del, M_IPFW); 3291 #endif 3292 free(V_dyn_bucket_lock, M_IPFW); 3293 free(V_dyn_ipv4, M_IPFW); 3294 free(V_dyn_ipv4_parent, M_IPFW); 3295 free(V_dyn_ipv4_add, M_IPFW); 3296 free(V_dyn_ipv4_parent_add, M_IPFW); 3297 free(V_dyn_ipv4_del, M_IPFW); 3298 free(V_dyn_ipv4_parent_del, M_IPFW); 3299 if (IS_DEFAULT_VNET(curvnet)) 3300 free(dyn_hp_cache, M_IPFW); 3301 } 3302