1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2017-2018 Yandex LLC 5 * Copyright (c) 2017-2018 Andrey V. Elsukov <ae@FreeBSD.org> 6 * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_ipfw.h" 34 #ifndef INET 35 #error IPFIREWALL requires INET. 36 #endif /* INET */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/hash.h> 41 #include <sys/mbuf.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/pcpu.h> 45 #include <sys/queue.h> 46 #include <sys/rmlock.h> 47 #include <sys/smp.h> 48 #include <sys/socket.h> 49 #include <sys/sysctl.h> 50 #include <sys/syslog.h> 51 #include <net/ethernet.h> 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/vnet.h> 55 56 #include <netinet/in.h> 57 #include <netinet/ip.h> 58 #include <netinet/ip_var.h> 59 #include <netinet/ip_fw.h> 60 #include <netinet/udp.h> 61 #include <netinet/tcp.h> 62 63 #include <netinet/ip6.h> /* IN6_ARE_ADDR_EQUAL */ 64 #ifdef INET6 65 #include <netinet6/in6_var.h> 66 #include <netinet6/ip6_var.h> 67 #include <netinet6/scope6_var.h> 68 #endif 69 70 #include <netpfil/ipfw/ip_fw_private.h> 71 72 #include <machine/in_cksum.h> /* XXX for in_cksum */ 73 74 #ifdef MAC 75 #include <security/mac/mac_framework.h> 76 #endif 77 78 /* 79 * Description of dynamic states. 80 * 81 * Dynamic states are stored in lists accessed through a hash tables 82 * whose size is curr_dyn_buckets. This value can be modified through 83 * the sysctl variable dyn_buckets. 84 * 85 * Currently there are four tables: dyn_ipv4, dyn_ipv6, dyn_ipv4_parent, 86 * and dyn_ipv6_parent. 87 * 88 * When a packet is received, its address fields hashed, then matched 89 * against the entries in the corresponding list by addr_type. 90 * Dynamic states can be used for different purposes: 91 * + stateful rules; 92 * + enforcing limits on the number of sessions; 93 * + in-kernel NAT (not implemented yet) 94 * 95 * The lifetime of dynamic states is regulated by dyn_*_lifetime, 96 * measured in seconds and depending on the flags. 97 * 98 * The total number of dynamic states is equal to UMA zone items count. 99 * The max number of dynamic states is dyn_max. When we reach 100 * the maximum number of rules we do not create anymore. This is 101 * done to avoid consuming too much memory, but also too much 102 * time when searching on each packet (ideally, we should try instead 103 * to put a limit on the length of the list on each bucket...). 104 * 105 * Each state holds a pointer to the parent ipfw rule so we know what 106 * action to perform. Dynamic rules are removed when the parent rule is 107 * deleted. 108 * 109 * There are some limitations with dynamic rules -- we do not 110 * obey the 'randomized match', and we do not do multiple 111 * passes through the firewall. XXX check the latter!!! 112 */ 113 114 /* By default use jenkins hash function */ 115 #define IPFIREWALL_JENKINSHASH 116 117 #define DYN_COUNTER_INC(d, dir, pktlen) do { \ 118 (d)->pcnt_ ## dir++; \ 119 (d)->bcnt_ ## dir += pktlen; \ 120 } while (0) 121 122 #define DYN_REFERENCED 0x01 123 /* 124 * DYN_REFERENCED flag is used to show that state keeps reference to named 125 * object, and this reference should be released when state becomes expired. 126 */ 127 128 struct dyn_data { 129 void *parent; /* pointer to parent rule */ 130 uint32_t chain_id; /* cached ruleset id */ 131 uint32_t f_pos; /* cached rule index */ 132 133 uint32_t hashval; /* hash value used for hash resize */ 134 uint16_t fibnum; /* fib used to send keepalives */ 135 uint8_t _pad[3]; 136 uint8_t flags; /* internal flags */ 137 uint16_t rulenum; /* parent rule number */ 138 uint32_t ruleid; /* parent rule id */ 139 140 uint32_t state; /* TCP session state and flags */ 141 uint32_t ack_fwd; /* most recent ACKs in forward */ 142 uint32_t ack_rev; /* and reverse direction (used */ 143 /* to generate keepalives) */ 144 uint32_t sync; /* synchronization time */ 145 uint32_t expire; /* expire time */ 146 147 uint64_t pcnt_fwd; /* packets counter in forward */ 148 uint64_t bcnt_fwd; /* bytes counter in forward */ 149 uint64_t pcnt_rev; /* packets counter in reverse */ 150 uint64_t bcnt_rev; /* bytes counter in reverse */ 151 }; 152 153 #define DPARENT_COUNT_DEC(p) do { \ 154 MPASS(p->count > 0); \ 155 ck_pr_dec_32(&(p)->count); \ 156 } while (0) 157 #define DPARENT_COUNT_INC(p) ck_pr_inc_32(&(p)->count) 158 #define DPARENT_COUNT(p) ck_pr_load_32(&(p)->count) 159 struct dyn_parent { 160 void *parent; /* pointer to parent rule */ 161 uint32_t count; /* number of linked states */ 162 uint8_t _pad[2]; 163 uint16_t rulenum; /* parent rule number */ 164 uint32_t ruleid; /* parent rule id */ 165 uint32_t hashval; /* hash value used for hash resize */ 166 uint32_t expire; /* expire time */ 167 }; 168 169 struct dyn_ipv4_state { 170 uint8_t type; /* State type */ 171 uint8_t proto; /* UL Protocol */ 172 uint16_t kidx; /* named object index */ 173 uint16_t sport, dport; /* ULP source and destination ports */ 174 in_addr_t src, dst; /* IPv4 source and destination */ 175 176 union { 177 struct dyn_data *data; 178 struct dyn_parent *limit; 179 }; 180 CK_SLIST_ENTRY(dyn_ipv4_state) entry; 181 SLIST_ENTRY(dyn_ipv4_state) expired; 182 }; 183 CK_SLIST_HEAD(dyn_ipv4ck_slist, dyn_ipv4_state); 184 VNET_DEFINE_STATIC(struct dyn_ipv4ck_slist *, dyn_ipv4); 185 VNET_DEFINE_STATIC(struct dyn_ipv4ck_slist *, dyn_ipv4_parent); 186 187 SLIST_HEAD(dyn_ipv4_slist, dyn_ipv4_state); 188 VNET_DEFINE_STATIC(struct dyn_ipv4_slist, dyn_expired_ipv4); 189 #define V_dyn_ipv4 VNET(dyn_ipv4) 190 #define V_dyn_ipv4_parent VNET(dyn_ipv4_parent) 191 #define V_dyn_expired_ipv4 VNET(dyn_expired_ipv4) 192 193 #ifdef INET6 194 struct dyn_ipv6_state { 195 uint8_t type; /* State type */ 196 uint8_t proto; /* UL Protocol */ 197 uint16_t kidx; /* named object index */ 198 uint16_t sport, dport; /* ULP source and destination ports */ 199 struct in6_addr src, dst; /* IPv6 source and destination */ 200 uint32_t zoneid; /* IPv6 scope zone id */ 201 union { 202 struct dyn_data *data; 203 struct dyn_parent *limit; 204 }; 205 CK_SLIST_ENTRY(dyn_ipv6_state) entry; 206 SLIST_ENTRY(dyn_ipv6_state) expired; 207 }; 208 CK_SLIST_HEAD(dyn_ipv6ck_slist, dyn_ipv6_state); 209 VNET_DEFINE_STATIC(struct dyn_ipv6ck_slist *, dyn_ipv6); 210 VNET_DEFINE_STATIC(struct dyn_ipv6ck_slist *, dyn_ipv6_parent); 211 212 SLIST_HEAD(dyn_ipv6_slist, dyn_ipv6_state); 213 VNET_DEFINE_STATIC(struct dyn_ipv6_slist, dyn_expired_ipv6); 214 #define V_dyn_ipv6 VNET(dyn_ipv6) 215 #define V_dyn_ipv6_parent VNET(dyn_ipv6_parent) 216 #define V_dyn_expired_ipv6 VNET(dyn_expired_ipv6) 217 #endif /* INET6 */ 218 219 /* 220 * Per-CPU pointer indicates that specified state is currently in use 221 * and must not be reclaimed by expiration callout. 222 */ 223 static void **dyn_hp_cache; 224 DPCPU_DEFINE_STATIC(void *, dyn_hp); 225 #define DYNSTATE_GET(cpu) ck_pr_load_ptr(DPCPU_ID_PTR((cpu), dyn_hp)) 226 #define DYNSTATE_PROTECT(v) ck_pr_store_ptr(DPCPU_PTR(dyn_hp), (v)) 227 #define DYNSTATE_RELEASE() DYNSTATE_PROTECT(NULL) 228 #define DYNSTATE_CRITICAL_ENTER() critical_enter() 229 #define DYNSTATE_CRITICAL_EXIT() do { \ 230 DYNSTATE_RELEASE(); \ 231 critical_exit(); \ 232 } while (0); 233 234 /* 235 * We keep two version numbers, one is updated when new entry added to 236 * the list. Second is updated when an entry deleted from the list. 237 * Versions are updated under bucket lock. 238 * 239 * Bucket "add" version number is used to know, that in the time between 240 * state lookup (i.e. ipfw_dyn_lookup_state()) and the followed state 241 * creation (i.e. ipfw_dyn_install_state()) another concurrent thread did 242 * not install some state in this bucket. Using this info we can avoid 243 * additional state lookup, because we are sure that we will not install 244 * the state twice. 245 * 246 * Also doing the tracking of bucket "del" version during lookup we can 247 * be sure, that state entry was not unlinked and freed in time between 248 * we read the state pointer and protect it with hazard pointer. 249 * 250 * An entry unlinked from CK list keeps unchanged until it is freed. 251 * Unlinked entries are linked into expired lists using "expired" field. 252 */ 253 254 /* 255 * dyn_expire_lock is used to protect access to dyn_expired_xxx lists. 256 * dyn_bucket_lock is used to get write access to lists in specific bucket. 257 * Currently one dyn_bucket_lock is used for all ipv4, ipv4_parent, ipv6, 258 * and ipv6_parent lists. 259 */ 260 VNET_DEFINE_STATIC(struct mtx, dyn_expire_lock); 261 VNET_DEFINE_STATIC(struct mtx *, dyn_bucket_lock); 262 #define V_dyn_expire_lock VNET(dyn_expire_lock) 263 #define V_dyn_bucket_lock VNET(dyn_bucket_lock) 264 265 /* 266 * Bucket's add/delete generation versions. 267 */ 268 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_add); 269 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_del); 270 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_parent_add); 271 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv4_parent_del); 272 #define V_dyn_ipv4_add VNET(dyn_ipv4_add) 273 #define V_dyn_ipv4_del VNET(dyn_ipv4_del) 274 #define V_dyn_ipv4_parent_add VNET(dyn_ipv4_parent_add) 275 #define V_dyn_ipv4_parent_del VNET(dyn_ipv4_parent_del) 276 277 #ifdef INET6 278 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_add); 279 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_del); 280 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_parent_add); 281 VNET_DEFINE_STATIC(uint32_t *, dyn_ipv6_parent_del); 282 #define V_dyn_ipv6_add VNET(dyn_ipv6_add) 283 #define V_dyn_ipv6_del VNET(dyn_ipv6_del) 284 #define V_dyn_ipv6_parent_add VNET(dyn_ipv6_parent_add) 285 #define V_dyn_ipv6_parent_del VNET(dyn_ipv6_parent_del) 286 #endif /* INET6 */ 287 288 #define DYN_BUCKET(h, b) ((h) & (b - 1)) 289 #define DYN_BUCKET_VERSION(b, v) ck_pr_load_32(&V_dyn_ ## v[(b)]) 290 #define DYN_BUCKET_VERSION_BUMP(b, v) ck_pr_inc_32(&V_dyn_ ## v[(b)]) 291 292 #define DYN_BUCKET_LOCK_INIT(lock, b) \ 293 mtx_init(&lock[(b)], "IPFW dynamic bucket", NULL, MTX_DEF) 294 #define DYN_BUCKET_LOCK_DESTROY(lock, b) mtx_destroy(&lock[(b)]) 295 #define DYN_BUCKET_LOCK(b) mtx_lock(&V_dyn_bucket_lock[(b)]) 296 #define DYN_BUCKET_UNLOCK(b) mtx_unlock(&V_dyn_bucket_lock[(b)]) 297 #define DYN_BUCKET_ASSERT(b) mtx_assert(&V_dyn_bucket_lock[(b)], MA_OWNED) 298 299 #define DYN_EXPIRED_LOCK_INIT() \ 300 mtx_init(&V_dyn_expire_lock, "IPFW expired states list", NULL, MTX_DEF) 301 #define DYN_EXPIRED_LOCK_DESTROY() mtx_destroy(&V_dyn_expire_lock) 302 #define DYN_EXPIRED_LOCK() mtx_lock(&V_dyn_expire_lock) 303 #define DYN_EXPIRED_UNLOCK() mtx_unlock(&V_dyn_expire_lock) 304 305 VNET_DEFINE_STATIC(uint32_t, dyn_buckets_max); 306 VNET_DEFINE_STATIC(uint32_t, curr_dyn_buckets); 307 VNET_DEFINE_STATIC(struct callout, dyn_timeout); 308 #define V_dyn_buckets_max VNET(dyn_buckets_max) 309 #define V_curr_dyn_buckets VNET(curr_dyn_buckets) 310 #define V_dyn_timeout VNET(dyn_timeout) 311 312 /* Maximum length of states chain in a bucket */ 313 VNET_DEFINE_STATIC(uint32_t, curr_max_length); 314 #define V_curr_max_length VNET(curr_max_length) 315 316 VNET_DEFINE_STATIC(uint32_t, dyn_keep_states); 317 #define V_dyn_keep_states VNET(dyn_keep_states) 318 319 VNET_DEFINE_STATIC(uma_zone_t, dyn_data_zone); 320 VNET_DEFINE_STATIC(uma_zone_t, dyn_parent_zone); 321 VNET_DEFINE_STATIC(uma_zone_t, dyn_ipv4_zone); 322 #ifdef INET6 323 VNET_DEFINE_STATIC(uma_zone_t, dyn_ipv6_zone); 324 #define V_dyn_ipv6_zone VNET(dyn_ipv6_zone) 325 #endif /* INET6 */ 326 #define V_dyn_data_zone VNET(dyn_data_zone) 327 #define V_dyn_parent_zone VNET(dyn_parent_zone) 328 #define V_dyn_ipv4_zone VNET(dyn_ipv4_zone) 329 330 /* 331 * Timeouts for various events in handing dynamic rules. 332 */ 333 VNET_DEFINE_STATIC(uint32_t, dyn_ack_lifetime); 334 VNET_DEFINE_STATIC(uint32_t, dyn_syn_lifetime); 335 VNET_DEFINE_STATIC(uint32_t, dyn_fin_lifetime); 336 VNET_DEFINE_STATIC(uint32_t, dyn_rst_lifetime); 337 VNET_DEFINE_STATIC(uint32_t, dyn_udp_lifetime); 338 VNET_DEFINE_STATIC(uint32_t, dyn_short_lifetime); 339 340 #define V_dyn_ack_lifetime VNET(dyn_ack_lifetime) 341 #define V_dyn_syn_lifetime VNET(dyn_syn_lifetime) 342 #define V_dyn_fin_lifetime VNET(dyn_fin_lifetime) 343 #define V_dyn_rst_lifetime VNET(dyn_rst_lifetime) 344 #define V_dyn_udp_lifetime VNET(dyn_udp_lifetime) 345 #define V_dyn_short_lifetime VNET(dyn_short_lifetime) 346 347 /* 348 * Keepalives are sent if dyn_keepalive is set. They are sent every 349 * dyn_keepalive_period seconds, in the last dyn_keepalive_interval 350 * seconds of lifetime of a rule. 351 * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower 352 * than dyn_keepalive_period. 353 */ 354 VNET_DEFINE_STATIC(uint32_t, dyn_keepalive_interval); 355 VNET_DEFINE_STATIC(uint32_t, dyn_keepalive_period); 356 VNET_DEFINE_STATIC(uint32_t, dyn_keepalive); 357 VNET_DEFINE_STATIC(time_t, dyn_keepalive_last); 358 359 #define V_dyn_keepalive_interval VNET(dyn_keepalive_interval) 360 #define V_dyn_keepalive_period VNET(dyn_keepalive_period) 361 #define V_dyn_keepalive VNET(dyn_keepalive) 362 #define V_dyn_keepalive_last VNET(dyn_keepalive_last) 363 364 VNET_DEFINE_STATIC(uint32_t, dyn_max); /* max # of dynamic states */ 365 VNET_DEFINE_STATIC(uint32_t, dyn_count); /* number of states */ 366 VNET_DEFINE_STATIC(uint32_t, dyn_parent_max); /* max # of parent states */ 367 VNET_DEFINE_STATIC(uint32_t, dyn_parent_count); /* number of parent states */ 368 369 #define V_dyn_max VNET(dyn_max) 370 #define V_dyn_count VNET(dyn_count) 371 #define V_dyn_parent_max VNET(dyn_parent_max) 372 #define V_dyn_parent_count VNET(dyn_parent_count) 373 374 #define DYN_COUNT_DEC(name) do { \ 375 MPASS((V_ ## name) > 0); \ 376 ck_pr_dec_32(&(V_ ## name)); \ 377 } while (0) 378 #define DYN_COUNT_INC(name) ck_pr_inc_32(&(V_ ## name)) 379 #define DYN_COUNT(name) ck_pr_load_32(&(V_ ## name)) 380 381 static time_t last_log; /* Log ratelimiting */ 382 383 /* 384 * Get/set maximum number of dynamic states in given VNET instance. 385 */ 386 static int 387 sysctl_dyn_max(SYSCTL_HANDLER_ARGS) 388 { 389 uint32_t nstates; 390 int error; 391 392 nstates = V_dyn_max; 393 error = sysctl_handle_32(oidp, &nstates, 0, req); 394 /* Read operation or some error */ 395 if ((error != 0) || (req->newptr == NULL)) 396 return (error); 397 398 V_dyn_max = nstates; 399 uma_zone_set_max(V_dyn_data_zone, V_dyn_max); 400 return (0); 401 } 402 403 static int 404 sysctl_dyn_parent_max(SYSCTL_HANDLER_ARGS) 405 { 406 uint32_t nstates; 407 int error; 408 409 nstates = V_dyn_parent_max; 410 error = sysctl_handle_32(oidp, &nstates, 0, req); 411 /* Read operation or some error */ 412 if ((error != 0) || (req->newptr == NULL)) 413 return (error); 414 415 V_dyn_parent_max = nstates; 416 uma_zone_set_max(V_dyn_parent_zone, V_dyn_parent_max); 417 return (0); 418 } 419 420 static int 421 sysctl_dyn_buckets(SYSCTL_HANDLER_ARGS) 422 { 423 uint32_t nbuckets; 424 int error; 425 426 nbuckets = V_dyn_buckets_max; 427 error = sysctl_handle_32(oidp, &nbuckets, 0, req); 428 /* Read operation or some error */ 429 if ((error != 0) || (req->newptr == NULL)) 430 return (error); 431 432 if (nbuckets > 256) 433 V_dyn_buckets_max = 1 << fls(nbuckets - 1); 434 else 435 return (EINVAL); 436 return (0); 437 } 438 439 SYSCTL_DECL(_net_inet_ip_fw); 440 441 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_count, 442 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(dyn_count), 0, 443 "Current number of dynamic states."); 444 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_parent_count, 445 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(dyn_parent_count), 0, 446 "Current number of parent states. "); 447 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, 448 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_dyn_buckets), 0, 449 "Current number of buckets for states hash table."); 450 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, curr_max_length, 451 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(curr_max_length), 0, 452 "Current maximum length of states chains in hash buckets."); 453 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_buckets, 454 CTLFLAG_VNET | CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 455 0, 0, sysctl_dyn_buckets, "IU", 456 "Max number of buckets for dynamic states hash table."); 457 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_max, 458 CTLFLAG_VNET | CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 459 0, 0, sysctl_dyn_max, "IU", 460 "Max number of dynamic states."); 461 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, dyn_parent_max, 462 CTLFLAG_VNET | CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 463 0, 0, sysctl_dyn_parent_max, "IU", 464 "Max number of parent dynamic states."); 465 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, 466 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_ack_lifetime), 0, 467 "Lifetime of dynamic states for TCP ACK."); 468 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, 469 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_syn_lifetime), 0, 470 "Lifetime of dynamic states for TCP SYN."); 471 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, 472 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_fin_lifetime), 0, 473 "Lifetime of dynamic states for TCP FIN."); 474 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, 475 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_rst_lifetime), 0, 476 "Lifetime of dynamic states for TCP RST."); 477 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, 478 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_udp_lifetime), 0, 479 "Lifetime of dynamic states for UDP."); 480 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, 481 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_short_lifetime), 0, 482 "Lifetime of dynamic states for other situations."); 483 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, 484 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keepalive), 0, 485 "Enable keepalives for dynamic states."); 486 SYSCTL_U32(_net_inet_ip_fw, OID_AUTO, dyn_keep_states, 487 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(dyn_keep_states), 0, 488 "Do not flush dynamic states on rule deletion"); 489 490 #ifdef IPFIREWALL_DYNDEBUG 491 #define DYN_DEBUG(fmt, ...) do { \ 492 printf("%s: " fmt "\n", __func__, __VA_ARGS__); \ 493 } while (0) 494 #else 495 #define DYN_DEBUG(fmt, ...) 496 #endif /* !IPFIREWALL_DYNDEBUG */ 497 498 #ifdef INET6 499 /* Functions to work with IPv6 states */ 500 static struct dyn_ipv6_state *dyn_lookup_ipv6_state( 501 const struct ipfw_flow_id *, uint32_t, const void *, 502 struct ipfw_dyn_info *, int); 503 static int dyn_lookup_ipv6_state_locked(const struct ipfw_flow_id *, 504 uint32_t, const void *, int, uint32_t, uint16_t); 505 static struct dyn_ipv6_state *dyn_alloc_ipv6_state( 506 const struct ipfw_flow_id *, uint32_t, uint16_t, uint8_t); 507 static int dyn_add_ipv6_state(void *, uint32_t, uint16_t, 508 const struct ipfw_flow_id *, uint32_t, const void *, int, uint32_t, 509 struct ipfw_dyn_info *, uint16_t, uint16_t, uint8_t); 510 static void dyn_export_ipv6_state(const struct dyn_ipv6_state *, 511 ipfw_dyn_rule *); 512 513 static uint32_t dyn_getscopeid(const struct ip_fw_args *); 514 static void dyn_make_keepalive_ipv6(struct mbuf *, const struct in6_addr *, 515 const struct in6_addr *, uint32_t, uint32_t, uint32_t, uint16_t, 516 uint16_t); 517 static void dyn_enqueue_keepalive_ipv6(struct mbufq *, 518 const struct dyn_ipv6_state *); 519 static void dyn_send_keepalive_ipv6(struct ip_fw_chain *); 520 521 static struct dyn_ipv6_state *dyn_lookup_ipv6_parent( 522 const struct ipfw_flow_id *, uint32_t, const void *, uint32_t, uint16_t, 523 uint32_t); 524 static struct dyn_ipv6_state *dyn_lookup_ipv6_parent_locked( 525 const struct ipfw_flow_id *, uint32_t, const void *, uint32_t, uint16_t, 526 uint32_t); 527 static struct dyn_ipv6_state *dyn_add_ipv6_parent(void *, uint32_t, uint16_t, 528 const struct ipfw_flow_id *, uint32_t, uint32_t, uint32_t, uint16_t); 529 #endif /* INET6 */ 530 531 /* Functions to work with limit states */ 532 static void *dyn_get_parent_state(const struct ipfw_flow_id *, uint32_t, 533 struct ip_fw *, uint32_t, uint32_t, uint16_t); 534 static struct dyn_ipv4_state *dyn_lookup_ipv4_parent( 535 const struct ipfw_flow_id *, const void *, uint32_t, uint16_t, uint32_t); 536 static struct dyn_ipv4_state *dyn_lookup_ipv4_parent_locked( 537 const struct ipfw_flow_id *, const void *, uint32_t, uint16_t, uint32_t); 538 static struct dyn_parent *dyn_alloc_parent(void *, uint32_t, uint16_t, 539 uint32_t); 540 static struct dyn_ipv4_state *dyn_add_ipv4_parent(void *, uint32_t, uint16_t, 541 const struct ipfw_flow_id *, uint32_t, uint32_t, uint16_t); 542 543 static void dyn_tick(void *); 544 static void dyn_expire_states(struct ip_fw_chain *, ipfw_range_tlv *); 545 static void dyn_free_states(struct ip_fw_chain *); 546 static void dyn_export_parent(const struct dyn_parent *, uint16_t, uint8_t, 547 ipfw_dyn_rule *); 548 static void dyn_export_data(const struct dyn_data *, uint16_t, uint8_t, 549 uint8_t, ipfw_dyn_rule *); 550 static uint32_t dyn_update_tcp_state(struct dyn_data *, 551 const struct ipfw_flow_id *, const struct tcphdr *, int); 552 static void dyn_update_proto_state(struct dyn_data *, 553 const struct ipfw_flow_id *, const void *, int, int); 554 555 /* Functions to work with IPv4 states */ 556 struct dyn_ipv4_state *dyn_lookup_ipv4_state(const struct ipfw_flow_id *, 557 const void *, struct ipfw_dyn_info *, int); 558 static int dyn_lookup_ipv4_state_locked(const struct ipfw_flow_id *, 559 const void *, int, uint32_t, uint16_t); 560 static struct dyn_ipv4_state *dyn_alloc_ipv4_state( 561 const struct ipfw_flow_id *, uint16_t, uint8_t); 562 static int dyn_add_ipv4_state(void *, uint32_t, uint16_t, 563 const struct ipfw_flow_id *, const void *, int, uint32_t, 564 struct ipfw_dyn_info *, uint16_t, uint16_t, uint8_t); 565 static void dyn_export_ipv4_state(const struct dyn_ipv4_state *, 566 ipfw_dyn_rule *); 567 568 /* 569 * Named states support. 570 */ 571 static char *default_state_name = "default"; 572 struct dyn_state_obj { 573 struct named_object no; 574 char name[64]; 575 }; 576 577 #define DYN_STATE_OBJ(ch, cmd) \ 578 ((struct dyn_state_obj *)SRV_OBJECT(ch, (cmd)->arg1)) 579 /* 580 * Classifier callback. 581 * Return 0 if opcode contains object that should be referenced 582 * or rewritten. 583 */ 584 static int 585 dyn_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype) 586 { 587 588 DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1); 589 /* Don't rewrite "check-state any" */ 590 if (cmd->arg1 == 0 && 591 cmd->opcode == O_CHECK_STATE) 592 return (1); 593 594 *puidx = cmd->arg1; 595 *ptype = 0; 596 return (0); 597 } 598 599 static void 600 dyn_update(ipfw_insn *cmd, uint16_t idx) 601 { 602 603 cmd->arg1 = idx; 604 DYN_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1); 605 } 606 607 static int 608 dyn_findbyname(struct ip_fw_chain *ch, struct tid_info *ti, 609 struct named_object **pno) 610 { 611 ipfw_obj_ntlv *ntlv; 612 const char *name; 613 614 DYN_DEBUG("uidx %d", ti->uidx); 615 if (ti->uidx != 0) { 616 if (ti->tlvs == NULL) 617 return (EINVAL); 618 /* Search ntlv in the buffer provided by user */ 619 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 620 IPFW_TLV_STATE_NAME); 621 if (ntlv == NULL) 622 return (EINVAL); 623 name = ntlv->name; 624 } else 625 name = default_state_name; 626 /* 627 * Search named object with corresponding name. 628 * Since states objects are global - ignore the set value 629 * and use zero instead. 630 */ 631 *pno = ipfw_objhash_lookup_name_type(CHAIN_TO_SRV(ch), 0, 632 IPFW_TLV_STATE_NAME, name); 633 /* 634 * We always return success here. 635 * The caller will check *pno and mark object as unresolved, 636 * then it will automatically create "default" object. 637 */ 638 return (0); 639 } 640 641 static struct named_object * 642 dyn_findbykidx(struct ip_fw_chain *ch, uint16_t idx) 643 { 644 645 DYN_DEBUG("kidx %d", idx); 646 return (ipfw_objhash_lookup_kidx(CHAIN_TO_SRV(ch), idx)); 647 } 648 649 static int 650 dyn_create(struct ip_fw_chain *ch, struct tid_info *ti, 651 uint16_t *pkidx) 652 { 653 struct namedobj_instance *ni; 654 struct dyn_state_obj *obj; 655 struct named_object *no; 656 ipfw_obj_ntlv *ntlv; 657 char *name; 658 659 DYN_DEBUG("uidx %d", ti->uidx); 660 if (ti->uidx != 0) { 661 if (ti->tlvs == NULL) 662 return (EINVAL); 663 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, 664 IPFW_TLV_STATE_NAME); 665 if (ntlv == NULL) 666 return (EINVAL); 667 name = ntlv->name; 668 } else 669 name = default_state_name; 670 671 ni = CHAIN_TO_SRV(ch); 672 obj = malloc(sizeof(*obj), M_IPFW, M_WAITOK | M_ZERO); 673 obj->no.name = obj->name; 674 obj->no.etlv = IPFW_TLV_STATE_NAME; 675 strlcpy(obj->name, name, sizeof(obj->name)); 676 677 IPFW_UH_WLOCK(ch); 678 no = ipfw_objhash_lookup_name_type(ni, 0, 679 IPFW_TLV_STATE_NAME, name); 680 if (no != NULL) { 681 /* 682 * Object is already created. 683 * Just return its kidx and bump refcount. 684 */ 685 *pkidx = no->kidx; 686 no->refcnt++; 687 IPFW_UH_WUNLOCK(ch); 688 free(obj, M_IPFW); 689 DYN_DEBUG("\tfound kidx %d", *pkidx); 690 return (0); 691 } 692 if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) { 693 DYN_DEBUG("\talloc_idx failed for %s", name); 694 IPFW_UH_WUNLOCK(ch); 695 free(obj, M_IPFW); 696 return (ENOSPC); 697 } 698 ipfw_objhash_add(ni, &obj->no); 699 SRV_OBJECT(ch, obj->no.kidx) = obj; 700 obj->no.refcnt++; 701 *pkidx = obj->no.kidx; 702 IPFW_UH_WUNLOCK(ch); 703 DYN_DEBUG("\tcreated kidx %d", *pkidx); 704 return (0); 705 } 706 707 static void 708 dyn_destroy(struct ip_fw_chain *ch, struct named_object *no) 709 { 710 struct dyn_state_obj *obj; 711 712 IPFW_UH_WLOCK_ASSERT(ch); 713 714 KASSERT(no->etlv == IPFW_TLV_STATE_NAME, 715 ("%s: wrong object type %u", __func__, no->etlv)); 716 KASSERT(no->refcnt == 1, 717 ("Destroying object '%s' (type %u, idx %u) with refcnt %u", 718 no->name, no->etlv, no->kidx, no->refcnt)); 719 DYN_DEBUG("kidx %d", no->kidx); 720 obj = SRV_OBJECT(ch, no->kidx); 721 SRV_OBJECT(ch, no->kidx) = NULL; 722 ipfw_objhash_del(CHAIN_TO_SRV(ch), no); 723 ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), no->kidx); 724 725 free(obj, M_IPFW); 726 } 727 728 static struct opcode_obj_rewrite dyn_opcodes[] = { 729 { 730 O_KEEP_STATE, IPFW_TLV_STATE_NAME, 731 dyn_classify, dyn_update, 732 dyn_findbyname, dyn_findbykidx, 733 dyn_create, dyn_destroy 734 }, 735 { 736 O_CHECK_STATE, IPFW_TLV_STATE_NAME, 737 dyn_classify, dyn_update, 738 dyn_findbyname, dyn_findbykidx, 739 dyn_create, dyn_destroy 740 }, 741 { 742 O_PROBE_STATE, IPFW_TLV_STATE_NAME, 743 dyn_classify, dyn_update, 744 dyn_findbyname, dyn_findbykidx, 745 dyn_create, dyn_destroy 746 }, 747 { 748 O_LIMIT, IPFW_TLV_STATE_NAME, 749 dyn_classify, dyn_update, 750 dyn_findbyname, dyn_findbykidx, 751 dyn_create, dyn_destroy 752 }, 753 }; 754 755 /* 756 * IMPORTANT: the hash function for dynamic rules must be commutative 757 * in source and destination (ip,port), because rules are bidirectional 758 * and we want to find both in the same bucket. 759 */ 760 #ifndef IPFIREWALL_JENKINSHASH 761 static __inline uint32_t 762 hash_packet(const struct ipfw_flow_id *id) 763 { 764 uint32_t i; 765 766 #ifdef INET6 767 if (IS_IP6_FLOW_ID(id)) 768 i = ntohl((id->dst_ip6.__u6_addr.__u6_addr32[2]) ^ 769 (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^ 770 (id->src_ip6.__u6_addr.__u6_addr32[2]) ^ 771 (id->src_ip6.__u6_addr.__u6_addr32[3])); 772 else 773 #endif /* INET6 */ 774 i = (id->dst_ip) ^ (id->src_ip); 775 i ^= (id->dst_port) ^ (id->src_port); 776 return (i); 777 } 778 779 static __inline uint32_t 780 hash_parent(const struct ipfw_flow_id *id, const void *rule) 781 { 782 783 return (hash_packet(id) ^ ((uintptr_t)rule)); 784 } 785 786 #else /* IPFIREWALL_JENKINSHASH */ 787 788 VNET_DEFINE_STATIC(uint32_t, dyn_hashseed); 789 #define V_dyn_hashseed VNET(dyn_hashseed) 790 791 static __inline int 792 addrcmp4(const struct ipfw_flow_id *id) 793 { 794 795 if (id->src_ip < id->dst_ip) 796 return (0); 797 if (id->src_ip > id->dst_ip) 798 return (1); 799 if (id->src_port <= id->dst_port) 800 return (0); 801 return (1); 802 } 803 804 #ifdef INET6 805 static __inline int 806 addrcmp6(const struct ipfw_flow_id *id) 807 { 808 int ret; 809 810 ret = memcmp(&id->src_ip6, &id->dst_ip6, sizeof(struct in6_addr)); 811 if (ret < 0) 812 return (0); 813 if (ret > 0) 814 return (1); 815 if (id->src_port <= id->dst_port) 816 return (0); 817 return (1); 818 } 819 820 static __inline uint32_t 821 hash_packet6(const struct ipfw_flow_id *id) 822 { 823 struct tuple6 { 824 struct in6_addr addr[2]; 825 uint16_t port[2]; 826 } t6; 827 828 if (addrcmp6(id) == 0) { 829 t6.addr[0] = id->src_ip6; 830 t6.addr[1] = id->dst_ip6; 831 t6.port[0] = id->src_port; 832 t6.port[1] = id->dst_port; 833 } else { 834 t6.addr[0] = id->dst_ip6; 835 t6.addr[1] = id->src_ip6; 836 t6.port[0] = id->dst_port; 837 t6.port[1] = id->src_port; 838 } 839 return (jenkins_hash32((const uint32_t *)&t6, 840 sizeof(t6) / sizeof(uint32_t), V_dyn_hashseed)); 841 } 842 #endif 843 844 static __inline uint32_t 845 hash_packet(const struct ipfw_flow_id *id) 846 { 847 struct tuple4 { 848 in_addr_t addr[2]; 849 uint16_t port[2]; 850 } t4; 851 852 if (IS_IP4_FLOW_ID(id)) { 853 /* All fields are in host byte order */ 854 if (addrcmp4(id) == 0) { 855 t4.addr[0] = id->src_ip; 856 t4.addr[1] = id->dst_ip; 857 t4.port[0] = id->src_port; 858 t4.port[1] = id->dst_port; 859 } else { 860 t4.addr[0] = id->dst_ip; 861 t4.addr[1] = id->src_ip; 862 t4.port[0] = id->dst_port; 863 t4.port[1] = id->src_port; 864 } 865 return (jenkins_hash32((const uint32_t *)&t4, 866 sizeof(t4) / sizeof(uint32_t), V_dyn_hashseed)); 867 } else 868 #ifdef INET6 869 if (IS_IP6_FLOW_ID(id)) 870 return (hash_packet6(id)); 871 #endif 872 return (0); 873 } 874 875 static __inline uint32_t 876 hash_parent(const struct ipfw_flow_id *id, const void *rule) 877 { 878 879 return (jenkins_hash32((const uint32_t *)&rule, 880 sizeof(rule) / sizeof(uint32_t), hash_packet(id))); 881 } 882 #endif /* IPFIREWALL_JENKINSHASH */ 883 884 /* 885 * Print customizable flow id description via log(9) facility. 886 */ 887 static void 888 print_dyn_rule_flags(const struct ipfw_flow_id *id, int dyn_type, 889 int log_flags, char *prefix, char *postfix) 890 { 891 struct in_addr da; 892 #ifdef INET6 893 char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN]; 894 #else 895 char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN]; 896 #endif 897 898 #ifdef INET6 899 if (IS_IP6_FLOW_ID(id)) { 900 ip6_sprintf(src, &id->src_ip6); 901 ip6_sprintf(dst, &id->dst_ip6); 902 } else 903 #endif 904 { 905 da.s_addr = htonl(id->src_ip); 906 inet_ntop(AF_INET, &da, src, sizeof(src)); 907 da.s_addr = htonl(id->dst_ip); 908 inet_ntop(AF_INET, &da, dst, sizeof(dst)); 909 } 910 log(log_flags, "ipfw: %s type %d %s %d -> %s %d, %d %s\n", 911 prefix, dyn_type, src, id->src_port, dst, 912 id->dst_port, V_dyn_count, postfix); 913 } 914 915 #define print_dyn_rule(id, dtype, prefix, postfix) \ 916 print_dyn_rule_flags(id, dtype, LOG_DEBUG, prefix, postfix) 917 918 #define TIME_LEQ(a,b) ((int)((a)-(b)) <= 0) 919 #define TIME_LE(a,b) ((int)((a)-(b)) < 0) 920 #define _SEQ_GE(a,b) ((int)((a)-(b)) >= 0) 921 #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) 922 #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) 923 #define BOTH_RST (TH_RST | (TH_RST << 8)) 924 #define TCP_FLAGS (BOTH_SYN | BOTH_FIN | BOTH_RST) 925 #define ACK_FWD 0x00010000 /* fwd ack seen */ 926 #define ACK_REV 0x00020000 /* rev ack seen */ 927 #define ACK_BOTH (ACK_FWD | ACK_REV) 928 929 static uint32_t 930 dyn_update_tcp_state(struct dyn_data *data, const struct ipfw_flow_id *pkt, 931 const struct tcphdr *tcp, int dir) 932 { 933 uint32_t ack, expire; 934 uint32_t state, old; 935 uint8_t th_flags; 936 937 expire = data->expire; 938 old = state = data->state; 939 th_flags = pkt->_flags & (TH_FIN | TH_SYN | TH_RST); 940 state |= (dir == MATCH_FORWARD) ? th_flags: (th_flags << 8); 941 switch (state & TCP_FLAGS) { 942 case TH_SYN: /* opening */ 943 expire = time_uptime + V_dyn_syn_lifetime; 944 break; 945 946 case BOTH_SYN: /* move to established */ 947 case BOTH_SYN | TH_FIN: /* one side tries to close */ 948 case BOTH_SYN | (TH_FIN << 8): 949 if (tcp == NULL) 950 break; 951 ack = ntohl(tcp->th_ack); 952 if (dir == MATCH_FORWARD) { 953 if (data->ack_fwd == 0 || 954 _SEQ_GE(ack, data->ack_fwd)) { 955 state |= ACK_FWD; 956 if (data->ack_fwd != ack) 957 ck_pr_store_32(&data->ack_fwd, ack); 958 } 959 } else { 960 if (data->ack_rev == 0 || 961 _SEQ_GE(ack, data->ack_rev)) { 962 state |= ACK_REV; 963 if (data->ack_rev != ack) 964 ck_pr_store_32(&data->ack_rev, ack); 965 } 966 } 967 if ((state & ACK_BOTH) == ACK_BOTH) { 968 /* 969 * Set expire time to V_dyn_ack_lifetime only if 970 * we got ACKs for both directions. 971 * We use XOR here to avoid possible state 972 * overwriting in concurrent thread. 973 */ 974 expire = time_uptime + V_dyn_ack_lifetime; 975 ck_pr_xor_32(&data->state, ACK_BOTH); 976 } else if ((data->state & ACK_BOTH) != (state & ACK_BOTH)) 977 ck_pr_or_32(&data->state, state & ACK_BOTH); 978 break; 979 980 case BOTH_SYN | BOTH_FIN: /* both sides closed */ 981 if (V_dyn_fin_lifetime >= V_dyn_keepalive_period) 982 V_dyn_fin_lifetime = V_dyn_keepalive_period - 1; 983 expire = time_uptime + V_dyn_fin_lifetime; 984 break; 985 986 default: 987 if (V_dyn_keepalive != 0 && 988 V_dyn_rst_lifetime >= V_dyn_keepalive_period) 989 V_dyn_rst_lifetime = V_dyn_keepalive_period - 1; 990 expire = time_uptime + V_dyn_rst_lifetime; 991 } 992 /* Save TCP state if it was changed */ 993 if ((state & TCP_FLAGS) != (old & TCP_FLAGS)) 994 ck_pr_or_32(&data->state, state & TCP_FLAGS); 995 return (expire); 996 } 997 998 /* 999 * Update ULP specific state. 1000 * For TCP we keep sequence numbers and flags. For other protocols 1001 * currently we update only expire time. Packets and bytes counters 1002 * are also updated here. 1003 */ 1004 static void 1005 dyn_update_proto_state(struct dyn_data *data, const struct ipfw_flow_id *pkt, 1006 const void *ulp, int pktlen, int dir) 1007 { 1008 uint32_t expire; 1009 1010 /* NOTE: we are in critical section here. */ 1011 switch (pkt->proto) { 1012 case IPPROTO_UDP: 1013 case IPPROTO_UDPLITE: 1014 expire = time_uptime + V_dyn_udp_lifetime; 1015 break; 1016 case IPPROTO_TCP: 1017 expire = dyn_update_tcp_state(data, pkt, ulp, dir); 1018 break; 1019 default: 1020 expire = time_uptime + V_dyn_short_lifetime; 1021 } 1022 /* 1023 * Expiration timer has the per-second granularity, no need to update 1024 * it every time when state is matched. 1025 */ 1026 if (data->expire != expire) 1027 ck_pr_store_32(&data->expire, expire); 1028 1029 if (dir == MATCH_FORWARD) 1030 DYN_COUNTER_INC(data, fwd, pktlen); 1031 else 1032 DYN_COUNTER_INC(data, rev, pktlen); 1033 } 1034 1035 /* 1036 * Lookup IPv4 state. 1037 * Must be called in critical section. 1038 */ 1039 struct dyn_ipv4_state * 1040 dyn_lookup_ipv4_state(const struct ipfw_flow_id *pkt, const void *ulp, 1041 struct ipfw_dyn_info *info, int pktlen) 1042 { 1043 struct dyn_ipv4_state *s; 1044 uint32_t version, bucket; 1045 1046 bucket = DYN_BUCKET(info->hashval, V_curr_dyn_buckets); 1047 info->version = DYN_BUCKET_VERSION(bucket, ipv4_add); 1048 restart: 1049 version = DYN_BUCKET_VERSION(bucket, ipv4_del); 1050 CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) { 1051 DYNSTATE_PROTECT(s); 1052 if (version != DYN_BUCKET_VERSION(bucket, ipv4_del)) 1053 goto restart; 1054 if (s->proto != pkt->proto) 1055 continue; 1056 if (info->kidx != 0 && s->kidx != info->kidx) 1057 continue; 1058 if (s->sport == pkt->src_port && s->dport == pkt->dst_port && 1059 s->src == pkt->src_ip && s->dst == pkt->dst_ip) { 1060 info->direction = MATCH_FORWARD; 1061 break; 1062 } 1063 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1064 s->src == pkt->dst_ip && s->dst == pkt->src_ip) { 1065 info->direction = MATCH_REVERSE; 1066 break; 1067 } 1068 } 1069 1070 if (s != NULL) 1071 dyn_update_proto_state(s->data, pkt, ulp, pktlen, 1072 info->direction); 1073 return (s); 1074 } 1075 1076 /* 1077 * Lookup IPv4 state. 1078 * Simplifed version is used to check that matching state doesn't exist. 1079 */ 1080 static int 1081 dyn_lookup_ipv4_state_locked(const struct ipfw_flow_id *pkt, 1082 const void *ulp, int pktlen, uint32_t bucket, uint16_t kidx) 1083 { 1084 struct dyn_ipv4_state *s; 1085 int dir; 1086 1087 dir = MATCH_NONE; 1088 DYN_BUCKET_ASSERT(bucket); 1089 CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) { 1090 if (s->proto != pkt->proto || 1091 s->kidx != kidx) 1092 continue; 1093 if (s->sport == pkt->src_port && 1094 s->dport == pkt->dst_port && 1095 s->src == pkt->src_ip && s->dst == pkt->dst_ip) { 1096 dir = MATCH_FORWARD; 1097 break; 1098 } 1099 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1100 s->src == pkt->dst_ip && s->dst == pkt->src_ip) { 1101 dir = MATCH_REVERSE; 1102 break; 1103 } 1104 } 1105 if (s != NULL) 1106 dyn_update_proto_state(s->data, pkt, ulp, pktlen, dir); 1107 return (s != NULL); 1108 } 1109 1110 struct dyn_ipv4_state * 1111 dyn_lookup_ipv4_parent(const struct ipfw_flow_id *pkt, const void *rule, 1112 uint32_t ruleid, uint16_t rulenum, uint32_t hashval) 1113 { 1114 struct dyn_ipv4_state *s; 1115 uint32_t version, bucket; 1116 1117 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1118 restart: 1119 version = DYN_BUCKET_VERSION(bucket, ipv4_parent_del); 1120 CK_SLIST_FOREACH(s, &V_dyn_ipv4_parent[bucket], entry) { 1121 DYNSTATE_PROTECT(s); 1122 if (version != DYN_BUCKET_VERSION(bucket, ipv4_parent_del)) 1123 goto restart; 1124 /* 1125 * NOTE: we do not need to check kidx, because parent rule 1126 * can not create states with different kidx. 1127 * And parent rule always created for forward direction. 1128 */ 1129 if (s->limit->parent == rule && 1130 s->limit->ruleid == ruleid && 1131 s->limit->rulenum == rulenum && 1132 s->proto == pkt->proto && 1133 s->sport == pkt->src_port && 1134 s->dport == pkt->dst_port && 1135 s->src == pkt->src_ip && s->dst == pkt->dst_ip) { 1136 if (s->limit->expire != time_uptime + 1137 V_dyn_short_lifetime) 1138 ck_pr_store_32(&s->limit->expire, 1139 time_uptime + V_dyn_short_lifetime); 1140 break; 1141 } 1142 } 1143 return (s); 1144 } 1145 1146 static struct dyn_ipv4_state * 1147 dyn_lookup_ipv4_parent_locked(const struct ipfw_flow_id *pkt, 1148 const void *rule, uint32_t ruleid, uint16_t rulenum, uint32_t bucket) 1149 { 1150 struct dyn_ipv4_state *s; 1151 1152 DYN_BUCKET_ASSERT(bucket); 1153 CK_SLIST_FOREACH(s, &V_dyn_ipv4_parent[bucket], entry) { 1154 if (s->limit->parent == rule && 1155 s->limit->ruleid == ruleid && 1156 s->limit->rulenum == rulenum && 1157 s->proto == pkt->proto && 1158 s->sport == pkt->src_port && 1159 s->dport == pkt->dst_port && 1160 s->src == pkt->src_ip && s->dst == pkt->dst_ip) 1161 break; 1162 } 1163 return (s); 1164 } 1165 1166 #ifdef INET6 1167 static uint32_t 1168 dyn_getscopeid(const struct ip_fw_args *args) 1169 { 1170 1171 /* 1172 * If source or destination address is an scopeid address, we need 1173 * determine the scope zone id to resolve address scope ambiguity. 1174 */ 1175 if (IN6_IS_ADDR_LINKLOCAL(&args->f_id.src_ip6) || 1176 IN6_IS_ADDR_LINKLOCAL(&args->f_id.dst_ip6)) 1177 return (in6_getscopezone(args->ifp, IPV6_ADDR_SCOPE_LINKLOCAL)); 1178 1179 return (0); 1180 } 1181 1182 /* 1183 * Lookup IPv6 state. 1184 * Must be called in critical section. 1185 */ 1186 static struct dyn_ipv6_state * 1187 dyn_lookup_ipv6_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1188 const void *ulp, struct ipfw_dyn_info *info, int pktlen) 1189 { 1190 struct dyn_ipv6_state *s; 1191 uint32_t version, bucket; 1192 1193 bucket = DYN_BUCKET(info->hashval, V_curr_dyn_buckets); 1194 info->version = DYN_BUCKET_VERSION(bucket, ipv6_add); 1195 restart: 1196 version = DYN_BUCKET_VERSION(bucket, ipv6_del); 1197 CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) { 1198 DYNSTATE_PROTECT(s); 1199 if (version != DYN_BUCKET_VERSION(bucket, ipv6_del)) 1200 goto restart; 1201 if (s->proto != pkt->proto || s->zoneid != zoneid) 1202 continue; 1203 if (info->kidx != 0 && s->kidx != info->kidx) 1204 continue; 1205 if (s->sport == pkt->src_port && s->dport == pkt->dst_port && 1206 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1207 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) { 1208 info->direction = MATCH_FORWARD; 1209 break; 1210 } 1211 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1212 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->dst_ip6) && 1213 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->src_ip6)) { 1214 info->direction = MATCH_REVERSE; 1215 break; 1216 } 1217 } 1218 if (s != NULL) 1219 dyn_update_proto_state(s->data, pkt, ulp, pktlen, 1220 info->direction); 1221 return (s); 1222 } 1223 1224 /* 1225 * Lookup IPv6 state. 1226 * Simplifed version is used to check that matching state doesn't exist. 1227 */ 1228 static int 1229 dyn_lookup_ipv6_state_locked(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1230 const void *ulp, int pktlen, uint32_t bucket, uint16_t kidx) 1231 { 1232 struct dyn_ipv6_state *s; 1233 int dir; 1234 1235 dir = MATCH_NONE; 1236 DYN_BUCKET_ASSERT(bucket); 1237 CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) { 1238 if (s->proto != pkt->proto || s->kidx != kidx || 1239 s->zoneid != zoneid) 1240 continue; 1241 if (s->sport == pkt->src_port && s->dport == pkt->dst_port && 1242 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1243 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) { 1244 dir = MATCH_FORWARD; 1245 break; 1246 } 1247 if (s->sport == pkt->dst_port && s->dport == pkt->src_port && 1248 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->dst_ip6) && 1249 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->src_ip6)) { 1250 dir = MATCH_REVERSE; 1251 break; 1252 } 1253 } 1254 if (s != NULL) 1255 dyn_update_proto_state(s->data, pkt, ulp, pktlen, dir); 1256 return (s != NULL); 1257 } 1258 1259 static struct dyn_ipv6_state * 1260 dyn_lookup_ipv6_parent(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1261 const void *rule, uint32_t ruleid, uint16_t rulenum, uint32_t hashval) 1262 { 1263 struct dyn_ipv6_state *s; 1264 uint32_t version, bucket; 1265 1266 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1267 restart: 1268 version = DYN_BUCKET_VERSION(bucket, ipv6_parent_del); 1269 CK_SLIST_FOREACH(s, &V_dyn_ipv6_parent[bucket], entry) { 1270 DYNSTATE_PROTECT(s); 1271 if (version != DYN_BUCKET_VERSION(bucket, ipv6_parent_del)) 1272 goto restart; 1273 /* 1274 * NOTE: we do not need to check kidx, because parent rule 1275 * can not create states with different kidx. 1276 * Also parent rule always created for forward direction. 1277 */ 1278 if (s->limit->parent == rule && 1279 s->limit->ruleid == ruleid && 1280 s->limit->rulenum == rulenum && 1281 s->proto == pkt->proto && 1282 s->sport == pkt->src_port && 1283 s->dport == pkt->dst_port && s->zoneid == zoneid && 1284 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1285 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) { 1286 if (s->limit->expire != time_uptime + 1287 V_dyn_short_lifetime) 1288 ck_pr_store_32(&s->limit->expire, 1289 time_uptime + V_dyn_short_lifetime); 1290 break; 1291 } 1292 } 1293 return (s); 1294 } 1295 1296 static struct dyn_ipv6_state * 1297 dyn_lookup_ipv6_parent_locked(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1298 const void *rule, uint32_t ruleid, uint16_t rulenum, uint32_t bucket) 1299 { 1300 struct dyn_ipv6_state *s; 1301 1302 DYN_BUCKET_ASSERT(bucket); 1303 CK_SLIST_FOREACH(s, &V_dyn_ipv6_parent[bucket], entry) { 1304 if (s->limit->parent == rule && 1305 s->limit->ruleid == ruleid && 1306 s->limit->rulenum == rulenum && 1307 s->proto == pkt->proto && 1308 s->sport == pkt->src_port && 1309 s->dport == pkt->dst_port && s->zoneid == zoneid && 1310 IN6_ARE_ADDR_EQUAL(&s->src, &pkt->src_ip6) && 1311 IN6_ARE_ADDR_EQUAL(&s->dst, &pkt->dst_ip6)) 1312 break; 1313 } 1314 return (s); 1315 } 1316 1317 #endif /* INET6 */ 1318 1319 /* 1320 * Lookup dynamic state. 1321 * pkt - filled by ipfw_chk() ipfw_flow_id; 1322 * ulp - determined by ipfw_chk() upper level protocol header; 1323 * dyn_info - info about matched state to return back; 1324 * Returns pointer to state's parent rule and dyn_info. If there is 1325 * no state, NULL is returned. 1326 * On match ipfw_dyn_lookup() updates state's counters. 1327 */ 1328 struct ip_fw * 1329 ipfw_dyn_lookup_state(const struct ip_fw_args *args, const void *ulp, 1330 int pktlen, const ipfw_insn *cmd, struct ipfw_dyn_info *info) 1331 { 1332 struct dyn_data *data; 1333 struct ip_fw *rule; 1334 1335 IPFW_RLOCK_ASSERT(&V_layer3_chain); 1336 1337 data = NULL; 1338 rule = NULL; 1339 info->kidx = cmd->arg1; 1340 info->direction = MATCH_NONE; 1341 info->hashval = hash_packet(&args->f_id); 1342 1343 DYNSTATE_CRITICAL_ENTER(); 1344 if (IS_IP4_FLOW_ID(&args->f_id)) { 1345 struct dyn_ipv4_state *s; 1346 1347 s = dyn_lookup_ipv4_state(&args->f_id, ulp, info, pktlen); 1348 if (s != NULL) { 1349 /* 1350 * Dynamic states are created using the same 5-tuple, 1351 * so it is assumed, that parent rule for O_LIMIT 1352 * state has the same address family. 1353 */ 1354 data = s->data; 1355 if (s->type == O_LIMIT) { 1356 s = data->parent; 1357 rule = s->limit->parent; 1358 } else 1359 rule = data->parent; 1360 } 1361 } 1362 #ifdef INET6 1363 else if (IS_IP6_FLOW_ID(&args->f_id)) { 1364 struct dyn_ipv6_state *s; 1365 1366 s = dyn_lookup_ipv6_state(&args->f_id, dyn_getscopeid(args), 1367 ulp, info, pktlen); 1368 if (s != NULL) { 1369 data = s->data; 1370 if (s->type == O_LIMIT) { 1371 s = data->parent; 1372 rule = s->limit->parent; 1373 } else 1374 rule = data->parent; 1375 } 1376 } 1377 #endif 1378 if (data != NULL) { 1379 /* 1380 * If cached chain id is the same, we can avoid rule index 1381 * lookup. Otherwise do lookup and update chain_id and f_pos. 1382 * It is safe even if there is concurrent thread that want 1383 * update the same state, because chain->id can be changed 1384 * only under IPFW_WLOCK(). 1385 */ 1386 if (data->chain_id != V_layer3_chain.id) { 1387 data->f_pos = ipfw_find_rule(&V_layer3_chain, 1388 data->rulenum, data->ruleid); 1389 /* 1390 * Check that found state has not orphaned. 1391 * When chain->id being changed the parent 1392 * rule can be deleted. If found rule doesn't 1393 * match the parent pointer, consider this 1394 * result as MATCH_NONE and return NULL. 1395 * 1396 * This will lead to creation of new similar state 1397 * that will be added into head of this bucket. 1398 * And the state that we currently have matched 1399 * should be deleted by dyn_expire_states(). 1400 * 1401 * In case when dyn_keep_states is enabled, return 1402 * pointer to deleted rule and f_pos value 1403 * corresponding to penultimate rule. 1404 * When we have enabled V_dyn_keep_states, states 1405 * that become orphaned will get the DYN_REFERENCED 1406 * flag and rule will keep around. So we can return 1407 * it. But since it is not in the rules map, we need 1408 * return such f_pos value, so after the state 1409 * handling if the search will continue, the next rule 1410 * will be the last one - the default rule. 1411 */ 1412 if (V_layer3_chain.map[data->f_pos] == rule) { 1413 data->chain_id = V_layer3_chain.id; 1414 info->f_pos = data->f_pos; 1415 } else if (V_dyn_keep_states != 0) { 1416 /* 1417 * The original rule pointer is still usable. 1418 * So, we return it, but f_pos need to be 1419 * changed to point to the penultimate rule. 1420 */ 1421 MPASS(V_layer3_chain.n_rules > 1); 1422 data->chain_id = V_layer3_chain.id; 1423 data->f_pos = V_layer3_chain.n_rules - 2; 1424 info->f_pos = data->f_pos; 1425 } else { 1426 rule = NULL; 1427 info->direction = MATCH_NONE; 1428 DYN_DEBUG("rule %p [%u, %u] is considered " 1429 "invalid in data %p", rule, data->ruleid, 1430 data->rulenum, data); 1431 /* info->f_pos doesn't matter here. */ 1432 } 1433 } else 1434 info->f_pos = data->f_pos; 1435 } 1436 DYNSTATE_CRITICAL_EXIT(); 1437 #if 0 1438 /* 1439 * Return MATCH_NONE if parent rule is in disabled set. 1440 * This will lead to creation of new similar state that 1441 * will be added into head of this bucket. 1442 * 1443 * XXXAE: we need to be able update state's set when parent 1444 * rule set is changed. 1445 */ 1446 if (rule != NULL && (V_set_disable & (1 << rule->set))) { 1447 rule = NULL; 1448 info->direction = MATCH_NONE; 1449 } 1450 #endif 1451 return (rule); 1452 } 1453 1454 static struct dyn_parent * 1455 dyn_alloc_parent(void *parent, uint32_t ruleid, uint16_t rulenum, 1456 uint32_t hashval) 1457 { 1458 struct dyn_parent *limit; 1459 1460 limit = uma_zalloc(V_dyn_parent_zone, M_NOWAIT | M_ZERO); 1461 if (limit == NULL) { 1462 if (last_log != time_uptime) { 1463 last_log = time_uptime; 1464 log(LOG_DEBUG, 1465 "ipfw: Cannot allocate parent dynamic state, " 1466 "consider increasing " 1467 "net.inet.ip.fw.dyn_parent_max\n"); 1468 } 1469 return (NULL); 1470 } 1471 1472 limit->parent = parent; 1473 limit->ruleid = ruleid; 1474 limit->rulenum = rulenum; 1475 limit->hashval = hashval; 1476 limit->expire = time_uptime + V_dyn_short_lifetime; 1477 return (limit); 1478 } 1479 1480 static struct dyn_data * 1481 dyn_alloc_dyndata(void *parent, uint32_t ruleid, uint16_t rulenum, 1482 const struct ipfw_flow_id *pkt, const void *ulp, int pktlen, 1483 uint32_t hashval, uint16_t fibnum) 1484 { 1485 struct dyn_data *data; 1486 1487 data = uma_zalloc(V_dyn_data_zone, M_NOWAIT | M_ZERO); 1488 if (data == NULL) { 1489 if (last_log != time_uptime) { 1490 last_log = time_uptime; 1491 log(LOG_DEBUG, 1492 "ipfw: Cannot allocate dynamic state, " 1493 "consider increasing net.inet.ip.fw.dyn_max\n"); 1494 } 1495 return (NULL); 1496 } 1497 1498 data->parent = parent; 1499 data->ruleid = ruleid; 1500 data->rulenum = rulenum; 1501 data->fibnum = fibnum; 1502 data->hashval = hashval; 1503 data->expire = time_uptime + V_dyn_syn_lifetime; 1504 dyn_update_proto_state(data, pkt, ulp, pktlen, MATCH_FORWARD); 1505 return (data); 1506 } 1507 1508 static struct dyn_ipv4_state * 1509 dyn_alloc_ipv4_state(const struct ipfw_flow_id *pkt, uint16_t kidx, 1510 uint8_t type) 1511 { 1512 struct dyn_ipv4_state *s; 1513 1514 s = uma_zalloc(V_dyn_ipv4_zone, M_NOWAIT | M_ZERO); 1515 if (s == NULL) 1516 return (NULL); 1517 1518 s->type = type; 1519 s->kidx = kidx; 1520 s->proto = pkt->proto; 1521 s->sport = pkt->src_port; 1522 s->dport = pkt->dst_port; 1523 s->src = pkt->src_ip; 1524 s->dst = pkt->dst_ip; 1525 return (s); 1526 } 1527 1528 /* 1529 * Add IPv4 parent state. 1530 * Returns pointer to parent state. When it is not NULL we are in 1531 * critical section and pointer protected by hazard pointer. 1532 * When some error occurs, it returns NULL and exit from critical section 1533 * is not needed. 1534 */ 1535 static struct dyn_ipv4_state * 1536 dyn_add_ipv4_parent(void *rule, uint32_t ruleid, uint16_t rulenum, 1537 const struct ipfw_flow_id *pkt, uint32_t hashval, uint32_t version, 1538 uint16_t kidx) 1539 { 1540 struct dyn_ipv4_state *s; 1541 struct dyn_parent *limit; 1542 uint32_t bucket; 1543 1544 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1545 DYN_BUCKET_LOCK(bucket); 1546 if (version != DYN_BUCKET_VERSION(bucket, ipv4_parent_add)) { 1547 /* 1548 * Bucket version has been changed since last lookup, 1549 * do lookup again to be sure that state does not exist. 1550 */ 1551 s = dyn_lookup_ipv4_parent_locked(pkt, rule, ruleid, 1552 rulenum, bucket); 1553 if (s != NULL) { 1554 /* 1555 * Simultaneous thread has already created this 1556 * state. Just return it. 1557 */ 1558 DYNSTATE_CRITICAL_ENTER(); 1559 DYNSTATE_PROTECT(s); 1560 DYN_BUCKET_UNLOCK(bucket); 1561 return (s); 1562 } 1563 } 1564 1565 limit = dyn_alloc_parent(rule, ruleid, rulenum, hashval); 1566 if (limit == NULL) { 1567 DYN_BUCKET_UNLOCK(bucket); 1568 return (NULL); 1569 } 1570 1571 s = dyn_alloc_ipv4_state(pkt, kidx, O_LIMIT_PARENT); 1572 if (s == NULL) { 1573 DYN_BUCKET_UNLOCK(bucket); 1574 uma_zfree(V_dyn_parent_zone, limit); 1575 return (NULL); 1576 } 1577 1578 s->limit = limit; 1579 CK_SLIST_INSERT_HEAD(&V_dyn_ipv4_parent[bucket], s, entry); 1580 DYN_COUNT_INC(dyn_parent_count); 1581 DYN_BUCKET_VERSION_BUMP(bucket, ipv4_parent_add); 1582 DYNSTATE_CRITICAL_ENTER(); 1583 DYNSTATE_PROTECT(s); 1584 DYN_BUCKET_UNLOCK(bucket); 1585 return (s); 1586 } 1587 1588 static int 1589 dyn_add_ipv4_state(void *parent, uint32_t ruleid, uint16_t rulenum, 1590 const struct ipfw_flow_id *pkt, const void *ulp, int pktlen, 1591 uint32_t hashval, struct ipfw_dyn_info *info, uint16_t fibnum, 1592 uint16_t kidx, uint8_t type) 1593 { 1594 struct dyn_ipv4_state *s; 1595 void *data; 1596 uint32_t bucket; 1597 1598 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1599 DYN_BUCKET_LOCK(bucket); 1600 if (info->direction == MATCH_UNKNOWN || 1601 info->kidx != kidx || 1602 info->hashval != hashval || 1603 info->version != DYN_BUCKET_VERSION(bucket, ipv4_add)) { 1604 /* 1605 * Bucket version has been changed since last lookup, 1606 * do lookup again to be sure that state does not exist. 1607 */ 1608 if (dyn_lookup_ipv4_state_locked(pkt, ulp, pktlen, 1609 bucket, kidx) != 0) { 1610 DYN_BUCKET_UNLOCK(bucket); 1611 return (EEXIST); 1612 } 1613 } 1614 1615 data = dyn_alloc_dyndata(parent, ruleid, rulenum, pkt, ulp, 1616 pktlen, hashval, fibnum); 1617 if (data == NULL) { 1618 DYN_BUCKET_UNLOCK(bucket); 1619 return (ENOMEM); 1620 } 1621 1622 s = dyn_alloc_ipv4_state(pkt, kidx, type); 1623 if (s == NULL) { 1624 DYN_BUCKET_UNLOCK(bucket); 1625 uma_zfree(V_dyn_data_zone, data); 1626 return (ENOMEM); 1627 } 1628 1629 s->data = data; 1630 CK_SLIST_INSERT_HEAD(&V_dyn_ipv4[bucket], s, entry); 1631 DYN_COUNT_INC(dyn_count); 1632 DYN_BUCKET_VERSION_BUMP(bucket, ipv4_add); 1633 DYN_BUCKET_UNLOCK(bucket); 1634 return (0); 1635 } 1636 1637 #ifdef INET6 1638 static struct dyn_ipv6_state * 1639 dyn_alloc_ipv6_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1640 uint16_t kidx, uint8_t type) 1641 { 1642 struct dyn_ipv6_state *s; 1643 1644 s = uma_zalloc(V_dyn_ipv6_zone, M_NOWAIT | M_ZERO); 1645 if (s == NULL) 1646 return (NULL); 1647 1648 s->type = type; 1649 s->kidx = kidx; 1650 s->zoneid = zoneid; 1651 s->proto = pkt->proto; 1652 s->sport = pkt->src_port; 1653 s->dport = pkt->dst_port; 1654 s->src = pkt->src_ip6; 1655 s->dst = pkt->dst_ip6; 1656 return (s); 1657 } 1658 1659 /* 1660 * Add IPv6 parent state. 1661 * Returns pointer to parent state. When it is not NULL we are in 1662 * critical section and pointer protected by hazard pointer. 1663 * When some error occurs, it return NULL and exit from critical section 1664 * is not needed. 1665 */ 1666 static struct dyn_ipv6_state * 1667 dyn_add_ipv6_parent(void *rule, uint32_t ruleid, uint16_t rulenum, 1668 const struct ipfw_flow_id *pkt, uint32_t zoneid, uint32_t hashval, 1669 uint32_t version, uint16_t kidx) 1670 { 1671 struct dyn_ipv6_state *s; 1672 struct dyn_parent *limit; 1673 uint32_t bucket; 1674 1675 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1676 DYN_BUCKET_LOCK(bucket); 1677 if (version != DYN_BUCKET_VERSION(bucket, ipv6_parent_add)) { 1678 /* 1679 * Bucket version has been changed since last lookup, 1680 * do lookup again to be sure that state does not exist. 1681 */ 1682 s = dyn_lookup_ipv6_parent_locked(pkt, zoneid, rule, ruleid, 1683 rulenum, bucket); 1684 if (s != NULL) { 1685 /* 1686 * Simultaneous thread has already created this 1687 * state. Just return it. 1688 */ 1689 DYNSTATE_CRITICAL_ENTER(); 1690 DYNSTATE_PROTECT(s); 1691 DYN_BUCKET_UNLOCK(bucket); 1692 return (s); 1693 } 1694 } 1695 1696 limit = dyn_alloc_parent(rule, ruleid, rulenum, hashval); 1697 if (limit == NULL) { 1698 DYN_BUCKET_UNLOCK(bucket); 1699 return (NULL); 1700 } 1701 1702 s = dyn_alloc_ipv6_state(pkt, zoneid, kidx, O_LIMIT_PARENT); 1703 if (s == NULL) { 1704 DYN_BUCKET_UNLOCK(bucket); 1705 uma_zfree(V_dyn_parent_zone, limit); 1706 return (NULL); 1707 } 1708 1709 s->limit = limit; 1710 CK_SLIST_INSERT_HEAD(&V_dyn_ipv6_parent[bucket], s, entry); 1711 DYN_COUNT_INC(dyn_parent_count); 1712 DYN_BUCKET_VERSION_BUMP(bucket, ipv6_parent_add); 1713 DYNSTATE_CRITICAL_ENTER(); 1714 DYNSTATE_PROTECT(s); 1715 DYN_BUCKET_UNLOCK(bucket); 1716 return (s); 1717 } 1718 1719 static int 1720 dyn_add_ipv6_state(void *parent, uint32_t ruleid, uint16_t rulenum, 1721 const struct ipfw_flow_id *pkt, uint32_t zoneid, const void *ulp, 1722 int pktlen, uint32_t hashval, struct ipfw_dyn_info *info, 1723 uint16_t fibnum, uint16_t kidx, uint8_t type) 1724 { 1725 struct dyn_ipv6_state *s; 1726 struct dyn_data *data; 1727 uint32_t bucket; 1728 1729 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1730 DYN_BUCKET_LOCK(bucket); 1731 if (info->direction == MATCH_UNKNOWN || 1732 info->kidx != kidx || 1733 info->hashval != hashval || 1734 info->version != DYN_BUCKET_VERSION(bucket, ipv6_add)) { 1735 /* 1736 * Bucket version has been changed since last lookup, 1737 * do lookup again to be sure that state does not exist. 1738 */ 1739 if (dyn_lookup_ipv6_state_locked(pkt, zoneid, ulp, pktlen, 1740 bucket, kidx) != 0) { 1741 DYN_BUCKET_UNLOCK(bucket); 1742 return (EEXIST); 1743 } 1744 } 1745 1746 data = dyn_alloc_dyndata(parent, ruleid, rulenum, pkt, ulp, 1747 pktlen, hashval, fibnum); 1748 if (data == NULL) { 1749 DYN_BUCKET_UNLOCK(bucket); 1750 return (ENOMEM); 1751 } 1752 1753 s = dyn_alloc_ipv6_state(pkt, zoneid, kidx, type); 1754 if (s == NULL) { 1755 DYN_BUCKET_UNLOCK(bucket); 1756 uma_zfree(V_dyn_data_zone, data); 1757 return (ENOMEM); 1758 } 1759 1760 s->data = data; 1761 CK_SLIST_INSERT_HEAD(&V_dyn_ipv6[bucket], s, entry); 1762 DYN_COUNT_INC(dyn_count); 1763 DYN_BUCKET_VERSION_BUMP(bucket, ipv6_add); 1764 DYN_BUCKET_UNLOCK(bucket); 1765 return (0); 1766 } 1767 #endif /* INET6 */ 1768 1769 static void * 1770 dyn_get_parent_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1771 struct ip_fw *rule, uint32_t hashval, uint32_t limit, uint16_t kidx) 1772 { 1773 char sbuf[24]; 1774 struct dyn_parent *p; 1775 void *ret; 1776 uint32_t bucket, version; 1777 1778 p = NULL; 1779 ret = NULL; 1780 bucket = DYN_BUCKET(hashval, V_curr_dyn_buckets); 1781 DYNSTATE_CRITICAL_ENTER(); 1782 if (IS_IP4_FLOW_ID(pkt)) { 1783 struct dyn_ipv4_state *s; 1784 1785 version = DYN_BUCKET_VERSION(bucket, ipv4_parent_add); 1786 s = dyn_lookup_ipv4_parent(pkt, rule, rule->id, 1787 rule->rulenum, bucket); 1788 if (s == NULL) { 1789 /* 1790 * Exit from critical section because dyn_add_parent() 1791 * will acquire bucket lock. 1792 */ 1793 DYNSTATE_CRITICAL_EXIT(); 1794 1795 s = dyn_add_ipv4_parent(rule, rule->id, 1796 rule->rulenum, pkt, hashval, version, kidx); 1797 if (s == NULL) 1798 return (NULL); 1799 /* Now we are in critical section again. */ 1800 } 1801 ret = s; 1802 p = s->limit; 1803 } 1804 #ifdef INET6 1805 else if (IS_IP6_FLOW_ID(pkt)) { 1806 struct dyn_ipv6_state *s; 1807 1808 version = DYN_BUCKET_VERSION(bucket, ipv6_parent_add); 1809 s = dyn_lookup_ipv6_parent(pkt, zoneid, rule, rule->id, 1810 rule->rulenum, bucket); 1811 if (s == NULL) { 1812 /* 1813 * Exit from critical section because dyn_add_parent() 1814 * can acquire bucket mutex. 1815 */ 1816 DYNSTATE_CRITICAL_EXIT(); 1817 1818 s = dyn_add_ipv6_parent(rule, rule->id, 1819 rule->rulenum, pkt, zoneid, hashval, version, 1820 kidx); 1821 if (s == NULL) 1822 return (NULL); 1823 /* Now we are in critical section again. */ 1824 } 1825 ret = s; 1826 p = s->limit; 1827 } 1828 #endif 1829 else { 1830 DYNSTATE_CRITICAL_EXIT(); 1831 return (NULL); 1832 } 1833 1834 /* Check the limit */ 1835 if (DPARENT_COUNT(p) >= limit) { 1836 DYNSTATE_CRITICAL_EXIT(); 1837 if (V_fw_verbose && last_log != time_uptime) { 1838 last_log = time_uptime; 1839 snprintf(sbuf, sizeof(sbuf), "%u drop session", 1840 rule->rulenum); 1841 print_dyn_rule_flags(pkt, O_LIMIT, 1842 LOG_SECURITY | LOG_DEBUG, sbuf, 1843 "too many entries"); 1844 } 1845 return (NULL); 1846 } 1847 1848 /* Take new session into account. */ 1849 DPARENT_COUNT_INC(p); 1850 /* 1851 * We must exit from critical section because the following code 1852 * can acquire bucket mutex. 1853 * We rely on the 'count' field. The state will not expire 1854 * until it has some child states, i.e. 'count' field is not zero. 1855 * Return state pointer, it will be used by child states as parent. 1856 */ 1857 DYNSTATE_CRITICAL_EXIT(); 1858 return (ret); 1859 } 1860 1861 static int 1862 dyn_install_state(const struct ipfw_flow_id *pkt, uint32_t zoneid, 1863 uint16_t fibnum, const void *ulp, int pktlen, struct ip_fw *rule, 1864 struct ipfw_dyn_info *info, uint32_t limit, uint16_t limit_mask, 1865 uint16_t kidx, uint8_t type) 1866 { 1867 struct ipfw_flow_id id; 1868 uint32_t hashval, parent_hashval, ruleid, rulenum; 1869 int ret; 1870 1871 MPASS(type == O_LIMIT || type == O_KEEP_STATE); 1872 1873 ruleid = rule->id; 1874 rulenum = rule->rulenum; 1875 if (type == O_LIMIT) { 1876 /* Create masked flow id and calculate bucket */ 1877 id.addr_type = pkt->addr_type; 1878 id.proto = pkt->proto; 1879 id.fib = fibnum; /* unused */ 1880 id.src_port = (limit_mask & DYN_SRC_PORT) ? 1881 pkt->src_port: 0; 1882 id.dst_port = (limit_mask & DYN_DST_PORT) ? 1883 pkt->dst_port: 0; 1884 if (IS_IP4_FLOW_ID(pkt)) { 1885 id.src_ip = (limit_mask & DYN_SRC_ADDR) ? 1886 pkt->src_ip: 0; 1887 id.dst_ip = (limit_mask & DYN_DST_ADDR) ? 1888 pkt->dst_ip: 0; 1889 } 1890 #ifdef INET6 1891 else if (IS_IP6_FLOW_ID(pkt)) { 1892 if (limit_mask & DYN_SRC_ADDR) 1893 id.src_ip6 = pkt->src_ip6; 1894 else 1895 memset(&id.src_ip6, 0, sizeof(id.src_ip6)); 1896 if (limit_mask & DYN_DST_ADDR) 1897 id.dst_ip6 = pkt->dst_ip6; 1898 else 1899 memset(&id.dst_ip6, 0, sizeof(id.dst_ip6)); 1900 } 1901 #endif 1902 else 1903 return (EAFNOSUPPORT); 1904 1905 parent_hashval = hash_parent(&id, rule); 1906 rule = dyn_get_parent_state(&id, zoneid, rule, parent_hashval, 1907 limit, kidx); 1908 if (rule == NULL) { 1909 #if 0 1910 if (V_fw_verbose && last_log != time_uptime) { 1911 last_log = time_uptime; 1912 snprintf(sbuf, sizeof(sbuf), 1913 "%u drop session", rule->rulenum); 1914 print_dyn_rule_flags(pkt, O_LIMIT, 1915 LOG_SECURITY | LOG_DEBUG, sbuf, 1916 "too many entries"); 1917 } 1918 #endif 1919 return (EACCES); 1920 } 1921 /* 1922 * Limit is not reached, create new state. 1923 * Now rule points to parent state. 1924 */ 1925 } 1926 1927 hashval = hash_packet(pkt); 1928 if (IS_IP4_FLOW_ID(pkt)) 1929 ret = dyn_add_ipv4_state(rule, ruleid, rulenum, pkt, 1930 ulp, pktlen, hashval, info, fibnum, kidx, type); 1931 #ifdef INET6 1932 else if (IS_IP6_FLOW_ID(pkt)) 1933 ret = dyn_add_ipv6_state(rule, ruleid, rulenum, pkt, 1934 zoneid, ulp, pktlen, hashval, info, fibnum, kidx, type); 1935 #endif /* INET6 */ 1936 else 1937 ret = EAFNOSUPPORT; 1938 1939 if (type == O_LIMIT) { 1940 if (ret != 0) { 1941 /* 1942 * We failed to create child state for O_LIMIT 1943 * opcode. Since we already counted it in the parent, 1944 * we must revert counter back. The 'rule' points to 1945 * parent state, use it to get dyn_parent. 1946 * 1947 * XXXAE: it should be safe to use 'rule' pointer 1948 * without extra lookup, parent state is referenced 1949 * and should not be freed. 1950 */ 1951 if (IS_IP4_FLOW_ID(&id)) 1952 DPARENT_COUNT_DEC( 1953 ((struct dyn_ipv4_state *)rule)->limit); 1954 #ifdef INET6 1955 else if (IS_IP6_FLOW_ID(&id)) 1956 DPARENT_COUNT_DEC( 1957 ((struct dyn_ipv6_state *)rule)->limit); 1958 #endif 1959 } 1960 } 1961 /* 1962 * EEXIST means that simultaneous thread has created this 1963 * state. Consider this as success. 1964 * 1965 * XXXAE: should we invalidate 'info' content here? 1966 */ 1967 if (ret == EEXIST) 1968 return (0); 1969 return (ret); 1970 } 1971 1972 /* 1973 * Install dynamic state. 1974 * chain - ipfw's instance; 1975 * rule - the parent rule that installs the state; 1976 * cmd - opcode that installs the state; 1977 * args - ipfw arguments; 1978 * ulp - upper level protocol header; 1979 * pktlen - packet length; 1980 * info - dynamic state lookup info; 1981 * tablearg - tablearg id. 1982 * 1983 * Returns non-zero value (failure) if state is not installed because 1984 * of errors or because session limitations are enforced. 1985 */ 1986 int 1987 ipfw_dyn_install_state(struct ip_fw_chain *chain, struct ip_fw *rule, 1988 const ipfw_insn_limit *cmd, const struct ip_fw_args *args, 1989 const void *ulp, int pktlen, struct ipfw_dyn_info *info, 1990 uint32_t tablearg) 1991 { 1992 uint32_t limit; 1993 uint16_t limit_mask; 1994 1995 if (cmd->o.opcode == O_LIMIT) { 1996 limit = IP_FW_ARG_TABLEARG(chain, cmd->conn_limit, limit); 1997 limit_mask = cmd->limit_mask; 1998 } else { 1999 limit = 0; 2000 limit_mask = 0; 2001 } 2002 return (dyn_install_state(&args->f_id, 2003 #ifdef INET6 2004 IS_IP6_FLOW_ID(&args->f_id) ? dyn_getscopeid(args): 2005 #endif 2006 0, M_GETFIB(args->m), ulp, pktlen, rule, info, limit, 2007 limit_mask, cmd->o.arg1, cmd->o.opcode)); 2008 } 2009 2010 /* 2011 * Free safe to remove state entries from expired lists. 2012 */ 2013 static void 2014 dyn_free_states(struct ip_fw_chain *chain) 2015 { 2016 struct dyn_ipv4_state *s4, *s4n; 2017 #ifdef INET6 2018 struct dyn_ipv6_state *s6, *s6n; 2019 #endif 2020 int cached_count, i; 2021 2022 /* 2023 * We keep pointers to objects that are in use on each CPU 2024 * in the per-cpu dyn_hp pointer. When object is going to be 2025 * removed, first of it is unlinked from the corresponding 2026 * list. This leads to changing of dyn_bucket_xxx_delver version. 2027 * Unlinked objects is placed into corresponding dyn_expired_xxx 2028 * list. Reader that is going to dereference object pointer checks 2029 * dyn_bucket_xxx_delver version before and after storing pointer 2030 * into dyn_hp. If version is the same, the object is protected 2031 * from freeing and it is safe to dereference. Othervise reader 2032 * tries to iterate list again from the beginning, but this object 2033 * now unlinked and thus will not be accessible. 2034 * 2035 * Copy dyn_hp pointers for each CPU into dyn_hp_cache array. 2036 * It does not matter that some pointer can be changed in 2037 * time while we are copying. We need to check, that objects 2038 * removed in the previous pass are not in use. And if dyn_hp 2039 * pointer does not contain it in the time when we are copying, 2040 * it will not appear there, because it is already unlinked. 2041 * And for new pointers we will not free objects that will be 2042 * unlinked in this pass. 2043 */ 2044 cached_count = 0; 2045 CPU_FOREACH(i) { 2046 dyn_hp_cache[cached_count] = DYNSTATE_GET(i); 2047 if (dyn_hp_cache[cached_count] != NULL) 2048 cached_count++; 2049 } 2050 2051 /* 2052 * Free expired states that are safe to free. 2053 * Check each entry from previous pass in the dyn_expired_xxx 2054 * list, if pointer to the object is in the dyn_hp_cache array, 2055 * keep it until next pass. Otherwise it is safe to free the 2056 * object. 2057 * 2058 * XXXAE: optimize this to use SLIST_REMOVE_AFTER. 2059 */ 2060 #define DYN_FREE_STATES(s, next, name) do { \ 2061 s = SLIST_FIRST(&V_dyn_expired_ ## name); \ 2062 while (s != NULL) { \ 2063 next = SLIST_NEXT(s, expired); \ 2064 for (i = 0; i < cached_count; i++) \ 2065 if (dyn_hp_cache[i] == s) \ 2066 break; \ 2067 if (i == cached_count) { \ 2068 if (s->type == O_LIMIT_PARENT && \ 2069 s->limit->count != 0) { \ 2070 s = next; \ 2071 continue; \ 2072 } \ 2073 SLIST_REMOVE(&V_dyn_expired_ ## name, \ 2074 s, dyn_ ## name ## _state, expired); \ 2075 if (s->type == O_LIMIT_PARENT) \ 2076 uma_zfree(V_dyn_parent_zone, s->limit); \ 2077 else \ 2078 uma_zfree(V_dyn_data_zone, s->data); \ 2079 uma_zfree(V_dyn_ ## name ## _zone, s); \ 2080 } \ 2081 s = next; \ 2082 } \ 2083 } while (0) 2084 2085 /* 2086 * Protect access to expired lists with DYN_EXPIRED_LOCK. 2087 * Userland can invoke ipfw_expire_dyn_states() to delete 2088 * specific states, this will lead to modification of expired 2089 * lists. 2090 * 2091 * XXXAE: do we need DYN_EXPIRED_LOCK? We can just use 2092 * IPFW_UH_WLOCK to protect access to these lists. 2093 */ 2094 DYN_EXPIRED_LOCK(); 2095 DYN_FREE_STATES(s4, s4n, ipv4); 2096 #ifdef INET6 2097 DYN_FREE_STATES(s6, s6n, ipv6); 2098 #endif 2099 DYN_EXPIRED_UNLOCK(); 2100 #undef DYN_FREE_STATES 2101 } 2102 2103 /* 2104 * Returns: 2105 * 0 when state is not matched by specified range; 2106 * 1 when state is matched by specified range; 2107 * 2 when state is matched by specified range and requested deletion of 2108 * dynamic states. 2109 */ 2110 static int 2111 dyn_match_range(uint16_t rulenum, uint8_t set, const ipfw_range_tlv *rt) 2112 { 2113 2114 MPASS(rt != NULL); 2115 /* flush all states */ 2116 if (rt->flags & IPFW_RCFLAG_ALL) { 2117 if (rt->flags & IPFW_RCFLAG_DYNAMIC) 2118 return (2); /* forced */ 2119 return (1); 2120 } 2121 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && set != rt->set) 2122 return (0); 2123 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 && 2124 (rulenum < rt->start_rule || rulenum > rt->end_rule)) 2125 return (0); 2126 if (rt->flags & IPFW_RCFLAG_DYNAMIC) 2127 return (2); 2128 return (1); 2129 } 2130 2131 static void 2132 dyn_acquire_rule(struct ip_fw_chain *ch, struct dyn_data *data, 2133 struct ip_fw *rule, uint16_t kidx) 2134 { 2135 struct dyn_state_obj *obj; 2136 2137 /* 2138 * Do not acquire reference twice. 2139 * This can happen when rule deletion executed for 2140 * the same range, but different ruleset id. 2141 */ 2142 if (data->flags & DYN_REFERENCED) 2143 return; 2144 2145 IPFW_UH_WLOCK_ASSERT(ch); 2146 MPASS(kidx != 0); 2147 2148 data->flags |= DYN_REFERENCED; 2149 /* Reference the named object */ 2150 obj = SRV_OBJECT(ch, kidx); 2151 obj->no.refcnt++; 2152 MPASS(obj->no.etlv == IPFW_TLV_STATE_NAME); 2153 2154 /* Reference the parent rule */ 2155 rule->refcnt++; 2156 } 2157 2158 static void 2159 dyn_release_rule(struct ip_fw_chain *ch, struct dyn_data *data, 2160 struct ip_fw *rule, uint16_t kidx) 2161 { 2162 struct dyn_state_obj *obj; 2163 2164 IPFW_UH_WLOCK_ASSERT(ch); 2165 MPASS(kidx != 0); 2166 2167 obj = SRV_OBJECT(ch, kidx); 2168 if (obj->no.refcnt == 1) 2169 dyn_destroy(ch, &obj->no); 2170 else 2171 obj->no.refcnt--; 2172 2173 if (--rule->refcnt == 1) 2174 ipfw_free_rule(rule); 2175 } 2176 2177 /* 2178 * We do not keep O_LIMIT_PARENT states when V_dyn_keep_states is enabled. 2179 * O_LIMIT state is created when new connection is going to be established 2180 * and there is no matching state. So, since the old parent rule was deleted 2181 * we can't create new states with old parent, and thus we can not account 2182 * new connections with already established connections, and can not do 2183 * proper limiting. 2184 */ 2185 static int 2186 dyn_match_ipv4_state(struct ip_fw_chain *ch, struct dyn_ipv4_state *s, 2187 const ipfw_range_tlv *rt) 2188 { 2189 struct ip_fw *rule; 2190 int ret; 2191 2192 if (s->type == O_LIMIT_PARENT) { 2193 rule = s->limit->parent; 2194 return (dyn_match_range(s->limit->rulenum, rule->set, rt)); 2195 } 2196 2197 rule = s->data->parent; 2198 if (s->type == O_LIMIT) 2199 rule = ((struct dyn_ipv4_state *)rule)->limit->parent; 2200 2201 ret = dyn_match_range(s->data->rulenum, rule->set, rt); 2202 if (ret == 0 || V_dyn_keep_states == 0 || ret > 1) 2203 return (ret); 2204 2205 dyn_acquire_rule(ch, s->data, rule, s->kidx); 2206 return (0); 2207 } 2208 2209 #ifdef INET6 2210 static int 2211 dyn_match_ipv6_state(struct ip_fw_chain *ch, struct dyn_ipv6_state *s, 2212 const ipfw_range_tlv *rt) 2213 { 2214 struct ip_fw *rule; 2215 int ret; 2216 2217 if (s->type == O_LIMIT_PARENT) { 2218 rule = s->limit->parent; 2219 return (dyn_match_range(s->limit->rulenum, rule->set, rt)); 2220 } 2221 2222 rule = s->data->parent; 2223 if (s->type == O_LIMIT) 2224 rule = ((struct dyn_ipv6_state *)rule)->limit->parent; 2225 2226 ret = dyn_match_range(s->data->rulenum, rule->set, rt); 2227 if (ret == 0 || V_dyn_keep_states == 0 || ret > 1) 2228 return (ret); 2229 2230 dyn_acquire_rule(ch, s->data, rule, s->kidx); 2231 return (0); 2232 } 2233 #endif 2234 2235 /* 2236 * Unlink expired entries from states lists. 2237 * @rt can be used to specify the range of states for deletion. 2238 */ 2239 static void 2240 dyn_expire_states(struct ip_fw_chain *ch, ipfw_range_tlv *rt) 2241 { 2242 struct dyn_ipv4_slist expired_ipv4; 2243 #ifdef INET6 2244 struct dyn_ipv6_slist expired_ipv6; 2245 struct dyn_ipv6_state *s6, *s6n, *s6p; 2246 #endif 2247 struct dyn_ipv4_state *s4, *s4n, *s4p; 2248 void *rule; 2249 int bucket, removed, length, max_length; 2250 2251 IPFW_UH_WLOCK_ASSERT(ch); 2252 2253 /* 2254 * Unlink expired states from each bucket. 2255 * With acquired bucket lock iterate entries of each lists: 2256 * ipv4, ipv4_parent, ipv6, and ipv6_parent. Check expired time 2257 * and unlink entry from the list, link entry into temporary 2258 * expired_xxx lists then bump "del" bucket version. 2259 * 2260 * When an entry is removed, corresponding states counter is 2261 * decremented. If entry has O_LIMIT type, parent's reference 2262 * counter is decremented. 2263 * 2264 * NOTE: this function can be called from userspace context 2265 * when user deletes rules. In this case all matched states 2266 * will be forcedly unlinked. O_LIMIT_PARENT states will be kept 2267 * in the expired lists until reference counter become zero. 2268 */ 2269 #define DYN_UNLINK_STATES(s, prev, next, exp, af, name, extra) do { \ 2270 length = 0; \ 2271 removed = 0; \ 2272 prev = NULL; \ 2273 s = CK_SLIST_FIRST(&V_dyn_ ## name [bucket]); \ 2274 while (s != NULL) { \ 2275 next = CK_SLIST_NEXT(s, entry); \ 2276 if ((TIME_LEQ((s)->exp, time_uptime) && extra) || \ 2277 (rt != NULL && \ 2278 dyn_match_ ## af ## _state(ch, s, rt))) { \ 2279 if (prev != NULL) \ 2280 CK_SLIST_REMOVE_AFTER(prev, entry); \ 2281 else \ 2282 CK_SLIST_REMOVE_HEAD( \ 2283 &V_dyn_ ## name [bucket], entry); \ 2284 removed++; \ 2285 SLIST_INSERT_HEAD(&expired_ ## af, s, expired); \ 2286 if (s->type == O_LIMIT_PARENT) \ 2287 DYN_COUNT_DEC(dyn_parent_count); \ 2288 else { \ 2289 DYN_COUNT_DEC(dyn_count); \ 2290 if (s->data->flags & DYN_REFERENCED) { \ 2291 rule = s->data->parent; \ 2292 if (s->type == O_LIMIT) \ 2293 rule = ((__typeof(s)) \ 2294 rule)->limit->parent;\ 2295 dyn_release_rule(ch, s->data, \ 2296 rule, s->kidx); \ 2297 } \ 2298 if (s->type == O_LIMIT) { \ 2299 s = s->data->parent; \ 2300 DPARENT_COUNT_DEC(s->limit); \ 2301 } \ 2302 } \ 2303 } else { \ 2304 prev = s; \ 2305 length++; \ 2306 } \ 2307 s = next; \ 2308 } \ 2309 if (removed != 0) \ 2310 DYN_BUCKET_VERSION_BUMP(bucket, name ## _del); \ 2311 if (length > max_length) \ 2312 max_length = length; \ 2313 } while (0) 2314 2315 SLIST_INIT(&expired_ipv4); 2316 #ifdef INET6 2317 SLIST_INIT(&expired_ipv6); 2318 #endif 2319 max_length = 0; 2320 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2321 DYN_BUCKET_LOCK(bucket); 2322 DYN_UNLINK_STATES(s4, s4p, s4n, data->expire, ipv4, ipv4, 1); 2323 DYN_UNLINK_STATES(s4, s4p, s4n, limit->expire, ipv4, 2324 ipv4_parent, (s4->limit->count == 0)); 2325 #ifdef INET6 2326 DYN_UNLINK_STATES(s6, s6p, s6n, data->expire, ipv6, ipv6, 1); 2327 DYN_UNLINK_STATES(s6, s6p, s6n, limit->expire, ipv6, 2328 ipv6_parent, (s6->limit->count == 0)); 2329 #endif 2330 DYN_BUCKET_UNLOCK(bucket); 2331 } 2332 /* Update curr_max_length for statistics. */ 2333 V_curr_max_length = max_length; 2334 /* 2335 * Concatenate temporary lists with global expired lists. 2336 */ 2337 DYN_EXPIRED_LOCK(); 2338 SLIST_CONCAT(&V_dyn_expired_ipv4, &expired_ipv4, 2339 dyn_ipv4_state, expired); 2340 #ifdef INET6 2341 SLIST_CONCAT(&V_dyn_expired_ipv6, &expired_ipv6, 2342 dyn_ipv6_state, expired); 2343 #endif 2344 DYN_EXPIRED_UNLOCK(); 2345 #undef DYN_UNLINK_STATES 2346 #undef DYN_UNREF_STATES 2347 } 2348 2349 static struct mbuf * 2350 dyn_mgethdr(int len, uint16_t fibnum) 2351 { 2352 struct mbuf *m; 2353 2354 m = m_gethdr(M_NOWAIT, MT_DATA); 2355 if (m == NULL) 2356 return (NULL); 2357 #ifdef MAC 2358 mac_netinet_firewall_send(m); 2359 #endif 2360 M_SETFIB(m, fibnum); 2361 m->m_data += max_linkhdr; 2362 m->m_flags |= M_SKIP_FIREWALL; 2363 m->m_len = m->m_pkthdr.len = len; 2364 bzero(m->m_data, len); 2365 return (m); 2366 } 2367 2368 static void 2369 dyn_make_keepalive_ipv4(struct mbuf *m, in_addr_t src, in_addr_t dst, 2370 uint32_t seq, uint32_t ack, uint16_t sport, uint16_t dport) 2371 { 2372 struct tcphdr *tcp; 2373 struct ip *ip; 2374 2375 ip = mtod(m, struct ip *); 2376 ip->ip_v = 4; 2377 ip->ip_hl = sizeof(*ip) >> 2; 2378 ip->ip_tos = IPTOS_LOWDELAY; 2379 ip->ip_len = htons(m->m_len); 2380 ip->ip_off |= htons(IP_DF); 2381 ip->ip_ttl = V_ip_defttl; 2382 ip->ip_p = IPPROTO_TCP; 2383 ip->ip_src.s_addr = htonl(src); 2384 ip->ip_dst.s_addr = htonl(dst); 2385 2386 tcp = mtodo(m, sizeof(struct ip)); 2387 tcp->th_sport = htons(sport); 2388 tcp->th_dport = htons(dport); 2389 tcp->th_off = sizeof(struct tcphdr) >> 2; 2390 tcp->th_seq = htonl(seq); 2391 tcp->th_ack = htonl(ack); 2392 tcp_set_flags(tcp, TH_ACK); 2393 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 2394 htons(sizeof(struct tcphdr) + IPPROTO_TCP)); 2395 2396 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2397 m->m_pkthdr.csum_flags = CSUM_TCP; 2398 } 2399 2400 static void 2401 dyn_enqueue_keepalive_ipv4(struct mbufq *q, const struct dyn_ipv4_state *s) 2402 { 2403 struct mbuf *m; 2404 2405 if ((s->data->state & ACK_FWD) == 0 && s->data->ack_fwd > 0) { 2406 m = dyn_mgethdr(sizeof(struct ip) + sizeof(struct tcphdr), 2407 s->data->fibnum); 2408 if (m != NULL) { 2409 dyn_make_keepalive_ipv4(m, s->dst, s->src, 2410 s->data->ack_fwd - 1, s->data->ack_rev, 2411 s->dport, s->sport); 2412 if (mbufq_enqueue(q, m)) { 2413 m_freem(m); 2414 log(LOG_DEBUG, "ipfw: limit for IPv4 " 2415 "keepalive queue is reached.\n"); 2416 return; 2417 } 2418 } 2419 } 2420 2421 if ((s->data->state & ACK_REV) == 0 && s->data->ack_rev > 0) { 2422 m = dyn_mgethdr(sizeof(struct ip) + sizeof(struct tcphdr), 2423 s->data->fibnum); 2424 if (m != NULL) { 2425 dyn_make_keepalive_ipv4(m, s->src, s->dst, 2426 s->data->ack_rev - 1, s->data->ack_fwd, 2427 s->sport, s->dport); 2428 if (mbufq_enqueue(q, m)) { 2429 m_freem(m); 2430 log(LOG_DEBUG, "ipfw: limit for IPv4 " 2431 "keepalive queue is reached.\n"); 2432 return; 2433 } 2434 } 2435 } 2436 } 2437 2438 /* 2439 * Prepare and send keep-alive packets. 2440 */ 2441 static void 2442 dyn_send_keepalive_ipv4(struct ip_fw_chain *chain) 2443 { 2444 struct mbufq q; 2445 struct mbuf *m; 2446 struct dyn_ipv4_state *s; 2447 uint32_t bucket; 2448 2449 mbufq_init(&q, INT_MAX); 2450 IPFW_UH_RLOCK(chain); 2451 /* 2452 * It is safe to not use hazard pointer and just do lockless 2453 * access to the lists, because states entries can not be deleted 2454 * while we hold IPFW_UH_RLOCK. 2455 */ 2456 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2457 CK_SLIST_FOREACH(s, &V_dyn_ipv4[bucket], entry) { 2458 /* 2459 * Only established TCP connections that will 2460 * become expired within dyn_keepalive_interval. 2461 */ 2462 if (s->proto != IPPROTO_TCP || 2463 (s->data->state & BOTH_SYN) != BOTH_SYN || 2464 TIME_LEQ(time_uptime + V_dyn_keepalive_interval, 2465 s->data->expire)) 2466 continue; 2467 dyn_enqueue_keepalive_ipv4(&q, s); 2468 } 2469 } 2470 IPFW_UH_RUNLOCK(chain); 2471 while ((m = mbufq_dequeue(&q)) != NULL) 2472 ip_output(m, NULL, NULL, 0, NULL, NULL); 2473 } 2474 2475 #ifdef INET6 2476 static void 2477 dyn_make_keepalive_ipv6(struct mbuf *m, const struct in6_addr *src, 2478 const struct in6_addr *dst, uint32_t zoneid, uint32_t seq, uint32_t ack, 2479 uint16_t sport, uint16_t dport) 2480 { 2481 struct tcphdr *tcp; 2482 struct ip6_hdr *ip6; 2483 2484 ip6 = mtod(m, struct ip6_hdr *); 2485 ip6->ip6_vfc |= IPV6_VERSION; 2486 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 2487 ip6->ip6_nxt = IPPROTO_TCP; 2488 ip6->ip6_hlim = IPV6_DEFHLIM; 2489 ip6->ip6_src = *src; 2490 if (IN6_IS_ADDR_LINKLOCAL(src)) 2491 ip6->ip6_src.s6_addr16[1] = htons(zoneid & 0xffff); 2492 ip6->ip6_dst = *dst; 2493 if (IN6_IS_ADDR_LINKLOCAL(dst)) 2494 ip6->ip6_dst.s6_addr16[1] = htons(zoneid & 0xffff); 2495 2496 tcp = mtodo(m, sizeof(struct ip6_hdr)); 2497 tcp->th_sport = htons(sport); 2498 tcp->th_dport = htons(dport); 2499 tcp->th_off = sizeof(struct tcphdr) >> 2; 2500 tcp->th_seq = htonl(seq); 2501 tcp->th_ack = htonl(ack); 2502 tcp_set_flags(tcp, TH_ACK); 2503 tcp->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr), 2504 IPPROTO_TCP, 0); 2505 2506 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 2507 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 2508 } 2509 2510 static void 2511 dyn_enqueue_keepalive_ipv6(struct mbufq *q, const struct dyn_ipv6_state *s) 2512 { 2513 struct mbuf *m; 2514 2515 if ((s->data->state & ACK_FWD) == 0 && s->data->ack_fwd > 0) { 2516 m = dyn_mgethdr(sizeof(struct ip6_hdr) + 2517 sizeof(struct tcphdr), s->data->fibnum); 2518 if (m != NULL) { 2519 dyn_make_keepalive_ipv6(m, &s->dst, &s->src, 2520 s->zoneid, s->data->ack_fwd - 1, s->data->ack_rev, 2521 s->dport, s->sport); 2522 if (mbufq_enqueue(q, m)) { 2523 m_freem(m); 2524 log(LOG_DEBUG, "ipfw: limit for IPv6 " 2525 "keepalive queue is reached.\n"); 2526 return; 2527 } 2528 } 2529 } 2530 2531 if ((s->data->state & ACK_REV) == 0 && s->data->ack_rev > 0) { 2532 m = dyn_mgethdr(sizeof(struct ip6_hdr) + 2533 sizeof(struct tcphdr), s->data->fibnum); 2534 if (m != NULL) { 2535 dyn_make_keepalive_ipv6(m, &s->src, &s->dst, 2536 s->zoneid, s->data->ack_rev - 1, s->data->ack_fwd, 2537 s->sport, s->dport); 2538 if (mbufq_enqueue(q, m)) { 2539 m_freem(m); 2540 log(LOG_DEBUG, "ipfw: limit for IPv6 " 2541 "keepalive queue is reached.\n"); 2542 return; 2543 } 2544 } 2545 } 2546 } 2547 2548 static void 2549 dyn_send_keepalive_ipv6(struct ip_fw_chain *chain) 2550 { 2551 struct mbufq q; 2552 struct mbuf *m; 2553 struct dyn_ipv6_state *s; 2554 uint32_t bucket; 2555 2556 mbufq_init(&q, INT_MAX); 2557 IPFW_UH_RLOCK(chain); 2558 /* 2559 * It is safe to not use hazard pointer and just do lockless 2560 * access to the lists, because states entries can not be deleted 2561 * while we hold IPFW_UH_RLOCK. 2562 */ 2563 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2564 CK_SLIST_FOREACH(s, &V_dyn_ipv6[bucket], entry) { 2565 /* 2566 * Only established TCP connections that will 2567 * become expired within dyn_keepalive_interval. 2568 */ 2569 if (s->proto != IPPROTO_TCP || 2570 (s->data->state & BOTH_SYN) != BOTH_SYN || 2571 TIME_LEQ(time_uptime + V_dyn_keepalive_interval, 2572 s->data->expire)) 2573 continue; 2574 dyn_enqueue_keepalive_ipv6(&q, s); 2575 } 2576 } 2577 IPFW_UH_RUNLOCK(chain); 2578 while ((m = mbufq_dequeue(&q)) != NULL) 2579 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); 2580 } 2581 #endif /* INET6 */ 2582 2583 static void 2584 dyn_grow_hashtable(struct ip_fw_chain *chain, uint32_t new, int flags) 2585 { 2586 #ifdef INET6 2587 struct dyn_ipv6ck_slist *ipv6, *ipv6_parent; 2588 uint32_t *ipv6_add, *ipv6_del, *ipv6_parent_add, *ipv6_parent_del; 2589 struct dyn_ipv6_state *s6; 2590 #endif 2591 struct dyn_ipv4ck_slist *ipv4, *ipv4_parent; 2592 uint32_t *ipv4_add, *ipv4_del, *ipv4_parent_add, *ipv4_parent_del; 2593 struct dyn_ipv4_state *s4; 2594 struct mtx *bucket_lock; 2595 void *tmp; 2596 uint32_t bucket; 2597 2598 MPASS(powerof2(new)); 2599 DYN_DEBUG("grow hash size %u -> %u", V_curr_dyn_buckets, new); 2600 /* 2601 * Allocate and initialize new lists. 2602 */ 2603 bucket_lock = malloc(new * sizeof(struct mtx), M_IPFW, 2604 flags | M_ZERO); 2605 if (bucket_lock == NULL) 2606 return; 2607 2608 ipv4 = ipv4_parent = NULL; 2609 ipv4_add = ipv4_del = ipv4_parent_add = ipv4_parent_del = NULL; 2610 #ifdef INET6 2611 ipv6 = ipv6_parent = NULL; 2612 ipv6_add = ipv6_del = ipv6_parent_add = ipv6_parent_del = NULL; 2613 #endif 2614 2615 ipv4 = malloc(new * sizeof(struct dyn_ipv4ck_slist), M_IPFW, 2616 flags | M_ZERO); 2617 if (ipv4 == NULL) 2618 goto bad; 2619 ipv4_parent = malloc(new * sizeof(struct dyn_ipv4ck_slist), M_IPFW, 2620 flags | M_ZERO); 2621 if (ipv4_parent == NULL) 2622 goto bad; 2623 ipv4_add = malloc(new * sizeof(uint32_t), M_IPFW, flags | M_ZERO); 2624 if (ipv4_add == NULL) 2625 goto bad; 2626 ipv4_del = malloc(new * sizeof(uint32_t), M_IPFW, flags | M_ZERO); 2627 if (ipv4_del == NULL) 2628 goto bad; 2629 ipv4_parent_add = malloc(new * sizeof(uint32_t), M_IPFW, 2630 flags | M_ZERO); 2631 if (ipv4_parent_add == NULL) 2632 goto bad; 2633 ipv4_parent_del = malloc(new * sizeof(uint32_t), M_IPFW, 2634 flags | M_ZERO); 2635 if (ipv4_parent_del == NULL) 2636 goto bad; 2637 #ifdef INET6 2638 ipv6 = malloc(new * sizeof(struct dyn_ipv6ck_slist), M_IPFW, 2639 flags | M_ZERO); 2640 if (ipv6 == NULL) 2641 goto bad; 2642 ipv6_parent = malloc(new * sizeof(struct dyn_ipv6ck_slist), M_IPFW, 2643 flags | M_ZERO); 2644 if (ipv6_parent == NULL) 2645 goto bad; 2646 ipv6_add = malloc(new * sizeof(uint32_t), M_IPFW, flags | M_ZERO); 2647 if (ipv6_add == NULL) 2648 goto bad; 2649 ipv6_del = malloc(new * sizeof(uint32_t), M_IPFW, flags | M_ZERO); 2650 if (ipv6_del == NULL) 2651 goto bad; 2652 ipv6_parent_add = malloc(new * sizeof(uint32_t), M_IPFW, 2653 flags | M_ZERO); 2654 if (ipv6_parent_add == NULL) 2655 goto bad; 2656 ipv6_parent_del = malloc(new * sizeof(uint32_t), M_IPFW, 2657 flags | M_ZERO); 2658 if (ipv6_parent_del == NULL) 2659 goto bad; 2660 #endif 2661 for (bucket = 0; bucket < new; bucket++) { 2662 DYN_BUCKET_LOCK_INIT(bucket_lock, bucket); 2663 CK_SLIST_INIT(&ipv4[bucket]); 2664 CK_SLIST_INIT(&ipv4_parent[bucket]); 2665 #ifdef INET6 2666 CK_SLIST_INIT(&ipv6[bucket]); 2667 CK_SLIST_INIT(&ipv6_parent[bucket]); 2668 #endif 2669 } 2670 2671 #define DYN_RELINK_STATES(s, hval, i, head, ohead) do { \ 2672 while ((s = CK_SLIST_FIRST(&V_dyn_ ## ohead[i])) != NULL) { \ 2673 CK_SLIST_REMOVE_HEAD(&V_dyn_ ## ohead[i], entry); \ 2674 CK_SLIST_INSERT_HEAD(&head[DYN_BUCKET(s->hval, new)], \ 2675 s, entry); \ 2676 } \ 2677 } while (0) 2678 /* 2679 * Prevent rules changing from userland. 2680 */ 2681 IPFW_UH_WLOCK(chain); 2682 /* 2683 * Hold traffic processing until we finish resize to 2684 * prevent access to states lists. 2685 */ 2686 IPFW_WLOCK(chain); 2687 /* Re-link all dynamic states */ 2688 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2689 DYN_RELINK_STATES(s4, data->hashval, bucket, ipv4, ipv4); 2690 DYN_RELINK_STATES(s4, limit->hashval, bucket, ipv4_parent, 2691 ipv4_parent); 2692 #ifdef INET6 2693 DYN_RELINK_STATES(s6, data->hashval, bucket, ipv6, ipv6); 2694 DYN_RELINK_STATES(s6, limit->hashval, bucket, ipv6_parent, 2695 ipv6_parent); 2696 #endif 2697 } 2698 2699 #define DYN_SWAP_PTR(old, new, tmp) do { \ 2700 tmp = old; \ 2701 old = new; \ 2702 new = tmp; \ 2703 } while (0) 2704 /* Swap pointers */ 2705 DYN_SWAP_PTR(V_dyn_bucket_lock, bucket_lock, tmp); 2706 DYN_SWAP_PTR(V_dyn_ipv4, ipv4, tmp); 2707 DYN_SWAP_PTR(V_dyn_ipv4_parent, ipv4_parent, tmp); 2708 DYN_SWAP_PTR(V_dyn_ipv4_add, ipv4_add, tmp); 2709 DYN_SWAP_PTR(V_dyn_ipv4_parent_add, ipv4_parent_add, tmp); 2710 DYN_SWAP_PTR(V_dyn_ipv4_del, ipv4_del, tmp); 2711 DYN_SWAP_PTR(V_dyn_ipv4_parent_del, ipv4_parent_del, tmp); 2712 2713 #ifdef INET6 2714 DYN_SWAP_PTR(V_dyn_ipv6, ipv6, tmp); 2715 DYN_SWAP_PTR(V_dyn_ipv6_parent, ipv6_parent, tmp); 2716 DYN_SWAP_PTR(V_dyn_ipv6_add, ipv6_add, tmp); 2717 DYN_SWAP_PTR(V_dyn_ipv6_parent_add, ipv6_parent_add, tmp); 2718 DYN_SWAP_PTR(V_dyn_ipv6_del, ipv6_del, tmp); 2719 DYN_SWAP_PTR(V_dyn_ipv6_parent_del, ipv6_parent_del, tmp); 2720 #endif 2721 bucket = V_curr_dyn_buckets; 2722 V_curr_dyn_buckets = new; 2723 2724 IPFW_WUNLOCK(chain); 2725 IPFW_UH_WUNLOCK(chain); 2726 2727 /* Release old resources */ 2728 while (bucket-- != 0) 2729 DYN_BUCKET_LOCK_DESTROY(bucket_lock, bucket); 2730 bad: 2731 free(bucket_lock, M_IPFW); 2732 free(ipv4, M_IPFW); 2733 free(ipv4_parent, M_IPFW); 2734 free(ipv4_add, M_IPFW); 2735 free(ipv4_parent_add, M_IPFW); 2736 free(ipv4_del, M_IPFW); 2737 free(ipv4_parent_del, M_IPFW); 2738 #ifdef INET6 2739 free(ipv6, M_IPFW); 2740 free(ipv6_parent, M_IPFW); 2741 free(ipv6_add, M_IPFW); 2742 free(ipv6_parent_add, M_IPFW); 2743 free(ipv6_del, M_IPFW); 2744 free(ipv6_parent_del, M_IPFW); 2745 #endif 2746 } 2747 2748 /* 2749 * This function is used to perform various maintenance 2750 * on dynamic hash lists. Currently it is called every second. 2751 */ 2752 static void 2753 dyn_tick(void *vnetx) 2754 { 2755 struct epoch_tracker et; 2756 uint32_t buckets; 2757 2758 CURVNET_SET((struct vnet *)vnetx); 2759 /* 2760 * First free states unlinked in previous passes. 2761 */ 2762 dyn_free_states(&V_layer3_chain); 2763 /* 2764 * Now unlink others expired states. 2765 * We use IPFW_UH_WLOCK to avoid concurrent call of 2766 * dyn_expire_states(). It is the only function that does 2767 * deletion of state entries from states lists. 2768 */ 2769 IPFW_UH_WLOCK(&V_layer3_chain); 2770 dyn_expire_states(&V_layer3_chain, NULL); 2771 IPFW_UH_WUNLOCK(&V_layer3_chain); 2772 /* 2773 * Send keepalives if they are enabled and the time has come. 2774 */ 2775 if (V_dyn_keepalive != 0 && 2776 V_dyn_keepalive_last + V_dyn_keepalive_period <= time_uptime) { 2777 V_dyn_keepalive_last = time_uptime; 2778 NET_EPOCH_ENTER(et); 2779 dyn_send_keepalive_ipv4(&V_layer3_chain); 2780 #ifdef INET6 2781 dyn_send_keepalive_ipv6(&V_layer3_chain); 2782 #endif 2783 NET_EPOCH_EXIT(et); 2784 } 2785 /* 2786 * Check if we need to resize the hash: 2787 * if current number of states exceeds number of buckets in hash, 2788 * and dyn_buckets_max permits to grow the number of buckets, then 2789 * do it. Grow hash size to the minimum power of 2 which is bigger 2790 * than current states count. 2791 */ 2792 if (V_curr_dyn_buckets < V_dyn_buckets_max && 2793 (V_curr_dyn_buckets < V_dyn_count / 2 || ( 2794 V_curr_dyn_buckets < V_dyn_count && V_curr_max_length > 8))) { 2795 buckets = 1 << fls(V_dyn_count); 2796 if (buckets > V_dyn_buckets_max) 2797 buckets = V_dyn_buckets_max; 2798 dyn_grow_hashtable(&V_layer3_chain, buckets, M_NOWAIT); 2799 } 2800 2801 callout_reset_on(&V_dyn_timeout, hz, dyn_tick, vnetx, 0); 2802 CURVNET_RESTORE(); 2803 } 2804 2805 void 2806 ipfw_expire_dyn_states(struct ip_fw_chain *chain, ipfw_range_tlv *rt) 2807 { 2808 /* 2809 * Do not perform any checks if we currently have no dynamic states 2810 */ 2811 if (V_dyn_count == 0) 2812 return; 2813 2814 IPFW_UH_WLOCK_ASSERT(chain); 2815 dyn_expire_states(chain, rt); 2816 } 2817 2818 /* 2819 * Pass through all states and reset eaction for orphaned rules. 2820 */ 2821 void 2822 ipfw_dyn_reset_eaction(struct ip_fw_chain *ch, uint16_t eaction_id, 2823 uint16_t default_id, uint16_t instance_id) 2824 { 2825 #ifdef INET6 2826 struct dyn_ipv6_state *s6; 2827 #endif 2828 struct dyn_ipv4_state *s4; 2829 struct ip_fw *rule; 2830 uint32_t bucket; 2831 2832 #define DYN_RESET_EACTION(s, h, b) \ 2833 CK_SLIST_FOREACH(s, &V_dyn_ ## h[b], entry) { \ 2834 if ((s->data->flags & DYN_REFERENCED) == 0) \ 2835 continue; \ 2836 rule = s->data->parent; \ 2837 if (s->type == O_LIMIT) \ 2838 rule = ((__typeof(s))rule)->limit->parent; \ 2839 ipfw_reset_eaction(ch, rule, eaction_id, \ 2840 default_id, instance_id); \ 2841 } 2842 2843 IPFW_UH_WLOCK_ASSERT(ch); 2844 if (V_dyn_count == 0) 2845 return; 2846 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2847 DYN_RESET_EACTION(s4, ipv4, bucket); 2848 #ifdef INET6 2849 DYN_RESET_EACTION(s6, ipv6, bucket); 2850 #endif 2851 } 2852 } 2853 2854 /* 2855 * Returns size of dynamic states in legacy format 2856 */ 2857 int 2858 ipfw_dyn_len(void) 2859 { 2860 2861 return ((V_dyn_count + V_dyn_parent_count) * sizeof(ipfw_dyn_rule)); 2862 } 2863 2864 /* 2865 * Returns number of dynamic states. 2866 * Marks every named object index used by dynamic states with bit in @bmask. 2867 * Returns number of named objects accounted in bmask via @nocnt. 2868 * Used by dump format v1 (current). 2869 */ 2870 uint32_t 2871 ipfw_dyn_get_count(uint32_t *bmask, int *nocnt) 2872 { 2873 #ifdef INET6 2874 struct dyn_ipv6_state *s6; 2875 #endif 2876 struct dyn_ipv4_state *s4; 2877 uint32_t bucket; 2878 2879 #define DYN_COUNT_OBJECTS(s, h, b) \ 2880 CK_SLIST_FOREACH(s, &V_dyn_ ## h[b], entry) { \ 2881 MPASS(s->kidx != 0); \ 2882 if (ipfw_mark_object_kidx(bmask, IPFW_TLV_STATE_NAME, \ 2883 s->kidx) != 0) \ 2884 (*nocnt)++; \ 2885 } 2886 2887 IPFW_UH_RLOCK_ASSERT(&V_layer3_chain); 2888 2889 /* No need to pass through all the buckets. */ 2890 *nocnt = 0; 2891 if (V_dyn_count + V_dyn_parent_count == 0) 2892 return (0); 2893 2894 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 2895 DYN_COUNT_OBJECTS(s4, ipv4, bucket); 2896 #ifdef INET6 2897 DYN_COUNT_OBJECTS(s6, ipv6, bucket); 2898 #endif 2899 } 2900 2901 return (V_dyn_count + V_dyn_parent_count); 2902 } 2903 2904 /* 2905 * Check if rule contains at least one dynamic opcode. 2906 * 2907 * Returns 1 if such opcode is found, 0 otherwise. 2908 */ 2909 int 2910 ipfw_is_dyn_rule(struct ip_fw *rule) 2911 { 2912 int cmdlen, l; 2913 ipfw_insn *cmd; 2914 2915 l = rule->cmd_len; 2916 cmd = rule->cmd; 2917 cmdlen = 0; 2918 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) { 2919 cmdlen = F_LEN(cmd); 2920 2921 switch (cmd->opcode) { 2922 case O_LIMIT: 2923 case O_KEEP_STATE: 2924 case O_PROBE_STATE: 2925 case O_CHECK_STATE: 2926 return (1); 2927 } 2928 } 2929 2930 return (0); 2931 } 2932 2933 static void 2934 dyn_export_parent(const struct dyn_parent *p, uint16_t kidx, uint8_t set, 2935 ipfw_dyn_rule *dst) 2936 { 2937 2938 dst->dyn_type = O_LIMIT_PARENT; 2939 dst->kidx = kidx; 2940 dst->count = (uint16_t)DPARENT_COUNT(p); 2941 dst->expire = TIME_LEQ(p->expire, time_uptime) ? 0: 2942 p->expire - time_uptime; 2943 2944 /* 'rule' is used to pass up the rule number and set */ 2945 memcpy(&dst->rule, &p->rulenum, sizeof(p->rulenum)); 2946 2947 /* store set number into high word of dst->rule pointer. */ 2948 memcpy((char *)&dst->rule + sizeof(p->rulenum), &set, sizeof(set)); 2949 2950 /* unused fields */ 2951 dst->pcnt = 0; 2952 dst->bcnt = 0; 2953 dst->parent = NULL; 2954 dst->state = 0; 2955 dst->ack_fwd = 0; 2956 dst->ack_rev = 0; 2957 dst->bucket = p->hashval; 2958 /* 2959 * The legacy userland code will interpret a NULL here as a marker 2960 * for the last dynamic rule. 2961 */ 2962 dst->next = (ipfw_dyn_rule *)1; 2963 } 2964 2965 static void 2966 dyn_export_data(const struct dyn_data *data, uint16_t kidx, uint8_t type, 2967 uint8_t set, ipfw_dyn_rule *dst) 2968 { 2969 2970 dst->dyn_type = type; 2971 dst->kidx = kidx; 2972 dst->pcnt = data->pcnt_fwd + data->pcnt_rev; 2973 dst->bcnt = data->bcnt_fwd + data->bcnt_rev; 2974 dst->expire = TIME_LEQ(data->expire, time_uptime) ? 0: 2975 data->expire - time_uptime; 2976 2977 /* 'rule' is used to pass up the rule number and set */ 2978 memcpy(&dst->rule, &data->rulenum, sizeof(data->rulenum)); 2979 2980 /* store set number into high word of dst->rule pointer. */ 2981 memcpy((char *)&dst->rule + sizeof(data->rulenum), &set, sizeof(set)); 2982 2983 dst->state = data->state; 2984 if (data->flags & DYN_REFERENCED) 2985 dst->state |= IPFW_DYN_ORPHANED; 2986 2987 /* unused fields */ 2988 dst->parent = NULL; 2989 dst->ack_fwd = data->ack_fwd; 2990 dst->ack_rev = data->ack_rev; 2991 dst->count = 0; 2992 dst->bucket = data->hashval; 2993 /* 2994 * The legacy userland code will interpret a NULL here as a marker 2995 * for the last dynamic rule. 2996 */ 2997 dst->next = (ipfw_dyn_rule *)1; 2998 } 2999 3000 static void 3001 dyn_export_ipv4_state(const struct dyn_ipv4_state *s, ipfw_dyn_rule *dst) 3002 { 3003 struct ip_fw *rule; 3004 3005 switch (s->type) { 3006 case O_LIMIT_PARENT: 3007 rule = s->limit->parent; 3008 dyn_export_parent(s->limit, s->kidx, rule->set, dst); 3009 break; 3010 default: 3011 rule = s->data->parent; 3012 if (s->type == O_LIMIT) 3013 rule = ((struct dyn_ipv4_state *)rule)->limit->parent; 3014 dyn_export_data(s->data, s->kidx, s->type, rule->set, dst); 3015 } 3016 3017 dst->id.dst_ip = s->dst; 3018 dst->id.src_ip = s->src; 3019 dst->id.dst_port = s->dport; 3020 dst->id.src_port = s->sport; 3021 dst->id.fib = s->data->fibnum; 3022 dst->id.proto = s->proto; 3023 dst->id._flags = 0; 3024 dst->id.addr_type = 4; 3025 3026 memset(&dst->id.dst_ip6, 0, sizeof(dst->id.dst_ip6)); 3027 memset(&dst->id.src_ip6, 0, sizeof(dst->id.src_ip6)); 3028 dst->id.flow_id6 = dst->id.extra = 0; 3029 } 3030 3031 #ifdef INET6 3032 static void 3033 dyn_export_ipv6_state(const struct dyn_ipv6_state *s, ipfw_dyn_rule *dst) 3034 { 3035 struct ip_fw *rule; 3036 3037 switch (s->type) { 3038 case O_LIMIT_PARENT: 3039 rule = s->limit->parent; 3040 dyn_export_parent(s->limit, s->kidx, rule->set, dst); 3041 break; 3042 default: 3043 rule = s->data->parent; 3044 if (s->type == O_LIMIT) 3045 rule = ((struct dyn_ipv6_state *)rule)->limit->parent; 3046 dyn_export_data(s->data, s->kidx, s->type, rule->set, dst); 3047 } 3048 3049 dst->id.src_ip6 = s->src; 3050 dst->id.dst_ip6 = s->dst; 3051 dst->id.dst_port = s->dport; 3052 dst->id.src_port = s->sport; 3053 dst->id.fib = s->data->fibnum; 3054 dst->id.proto = s->proto; 3055 dst->id._flags = 0; 3056 dst->id.addr_type = 6; 3057 3058 dst->id.dst_ip = dst->id.src_ip = 0; 3059 dst->id.flow_id6 = dst->id.extra = 0; 3060 } 3061 #endif /* INET6 */ 3062 3063 /* 3064 * Fills the buffer given by @sd with dynamic states. 3065 * Used by dump format v1 (current). 3066 * 3067 * Returns 0 on success. 3068 */ 3069 int 3070 ipfw_dump_states(struct ip_fw_chain *chain, struct sockopt_data *sd) 3071 { 3072 #ifdef INET6 3073 struct dyn_ipv6_state *s6; 3074 #endif 3075 struct dyn_ipv4_state *s4; 3076 ipfw_obj_dyntlv *dst, *last; 3077 ipfw_obj_ctlv *ctlv; 3078 uint32_t bucket; 3079 3080 if (V_dyn_count == 0) 3081 return (0); 3082 3083 /* 3084 * IPFW_UH_RLOCK garantees that another userland request 3085 * and callout thread will not delete entries from states 3086 * lists. 3087 */ 3088 IPFW_UH_RLOCK_ASSERT(chain); 3089 3090 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv)); 3091 if (ctlv == NULL) 3092 return (ENOMEM); 3093 ctlv->head.type = IPFW_TLV_DYNSTATE_LIST; 3094 ctlv->objsize = sizeof(ipfw_obj_dyntlv); 3095 last = NULL; 3096 3097 #define DYN_EXPORT_STATES(s, af, h, b) \ 3098 CK_SLIST_FOREACH(s, &V_dyn_ ## h[b], entry) { \ 3099 dst = (ipfw_obj_dyntlv *)ipfw_get_sopt_space(sd, \ 3100 sizeof(ipfw_obj_dyntlv)); \ 3101 if (dst == NULL) \ 3102 return (ENOMEM); \ 3103 dyn_export_ ## af ## _state(s, &dst->state); \ 3104 dst->head.length = sizeof(ipfw_obj_dyntlv); \ 3105 dst->head.type = IPFW_TLV_DYN_ENT; \ 3106 last = dst; \ 3107 } 3108 3109 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 3110 DYN_EXPORT_STATES(s4, ipv4, ipv4_parent, bucket); 3111 DYN_EXPORT_STATES(s4, ipv4, ipv4, bucket); 3112 #ifdef INET6 3113 DYN_EXPORT_STATES(s6, ipv6, ipv6_parent, bucket); 3114 DYN_EXPORT_STATES(s6, ipv6, ipv6, bucket); 3115 #endif /* INET6 */ 3116 } 3117 3118 /* mark last dynamic rule */ 3119 if (last != NULL) 3120 last->head.flags = IPFW_DF_LAST; /* XXX: unused */ 3121 return (0); 3122 #undef DYN_EXPORT_STATES 3123 } 3124 3125 /* 3126 * Fill given buffer with dynamic states (legacy format). 3127 * IPFW_UH_RLOCK has to be held while calling. 3128 */ 3129 void 3130 ipfw_get_dynamic(struct ip_fw_chain *chain, char **pbp, const char *ep) 3131 { 3132 #ifdef INET6 3133 struct dyn_ipv6_state *s6; 3134 #endif 3135 struct dyn_ipv4_state *s4; 3136 ipfw_dyn_rule *p, *last = NULL; 3137 char *bp; 3138 uint32_t bucket; 3139 3140 if (V_dyn_count == 0) 3141 return; 3142 bp = *pbp; 3143 3144 IPFW_UH_RLOCK_ASSERT(chain); 3145 3146 #define DYN_EXPORT_STATES(s, af, head, b) \ 3147 CK_SLIST_FOREACH(s, &V_dyn_ ## head[b], entry) { \ 3148 if (bp + sizeof(*p) > ep) \ 3149 break; \ 3150 p = (ipfw_dyn_rule *)bp; \ 3151 dyn_export_ ## af ## _state(s, p); \ 3152 last = p; \ 3153 bp += sizeof(*p); \ 3154 } 3155 3156 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 3157 DYN_EXPORT_STATES(s4, ipv4, ipv4_parent, bucket); 3158 DYN_EXPORT_STATES(s4, ipv4, ipv4, bucket); 3159 #ifdef INET6 3160 DYN_EXPORT_STATES(s6, ipv6, ipv6_parent, bucket); 3161 DYN_EXPORT_STATES(s6, ipv6, ipv6, bucket); 3162 #endif /* INET6 */ 3163 } 3164 3165 if (last != NULL) /* mark last dynamic rule */ 3166 last->next = NULL; 3167 *pbp = bp; 3168 #undef DYN_EXPORT_STATES 3169 } 3170 3171 void 3172 ipfw_dyn_init(struct ip_fw_chain *chain) 3173 { 3174 3175 #ifdef IPFIREWALL_JENKINSHASH 3176 V_dyn_hashseed = arc4random(); 3177 #endif 3178 V_dyn_max = 16384; /* max # of states */ 3179 V_dyn_parent_max = 4096; /* max # of parent states */ 3180 V_dyn_buckets_max = 8192; /* must be power of 2 */ 3181 3182 V_dyn_ack_lifetime = 300; 3183 V_dyn_syn_lifetime = 20; 3184 V_dyn_fin_lifetime = 1; 3185 V_dyn_rst_lifetime = 1; 3186 V_dyn_udp_lifetime = 10; 3187 V_dyn_short_lifetime = 5; 3188 3189 V_dyn_keepalive_interval = 20; 3190 V_dyn_keepalive_period = 5; 3191 V_dyn_keepalive = 1; /* send keepalives */ 3192 V_dyn_keepalive_last = time_uptime; 3193 3194 V_dyn_data_zone = uma_zcreate("IPFW dynamic states data", 3195 sizeof(struct dyn_data), NULL, NULL, NULL, NULL, 3196 UMA_ALIGN_PTR, 0); 3197 uma_zone_set_max(V_dyn_data_zone, V_dyn_max); 3198 3199 V_dyn_parent_zone = uma_zcreate("IPFW parent dynamic states", 3200 sizeof(struct dyn_parent), NULL, NULL, NULL, NULL, 3201 UMA_ALIGN_PTR, 0); 3202 uma_zone_set_max(V_dyn_parent_zone, V_dyn_parent_max); 3203 3204 SLIST_INIT(&V_dyn_expired_ipv4); 3205 V_dyn_ipv4 = NULL; 3206 V_dyn_ipv4_parent = NULL; 3207 V_dyn_ipv4_zone = uma_zcreate("IPFW IPv4 dynamic states", 3208 sizeof(struct dyn_ipv4_state), NULL, NULL, NULL, NULL, 3209 UMA_ALIGN_PTR, 0); 3210 3211 #ifdef INET6 3212 SLIST_INIT(&V_dyn_expired_ipv6); 3213 V_dyn_ipv6 = NULL; 3214 V_dyn_ipv6_parent = NULL; 3215 V_dyn_ipv6_zone = uma_zcreate("IPFW IPv6 dynamic states", 3216 sizeof(struct dyn_ipv6_state), NULL, NULL, NULL, NULL, 3217 UMA_ALIGN_PTR, 0); 3218 #endif 3219 3220 /* Initialize buckets. */ 3221 V_curr_dyn_buckets = 0; 3222 V_dyn_bucket_lock = NULL; 3223 dyn_grow_hashtable(chain, 256, M_WAITOK); 3224 3225 if (IS_DEFAULT_VNET(curvnet)) 3226 dyn_hp_cache = malloc(mp_ncpus * sizeof(void *), M_IPFW, 3227 M_WAITOK | M_ZERO); 3228 3229 DYN_EXPIRED_LOCK_INIT(); 3230 callout_init(&V_dyn_timeout, 1); 3231 callout_reset(&V_dyn_timeout, hz, dyn_tick, curvnet); 3232 IPFW_ADD_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes); 3233 } 3234 3235 void 3236 ipfw_dyn_uninit(int pass) 3237 { 3238 #ifdef INET6 3239 struct dyn_ipv6_state *s6; 3240 #endif 3241 struct dyn_ipv4_state *s4; 3242 int bucket; 3243 3244 if (pass == 0) { 3245 callout_drain(&V_dyn_timeout); 3246 return; 3247 } 3248 IPFW_DEL_OBJ_REWRITER(IS_DEFAULT_VNET(curvnet), dyn_opcodes); 3249 DYN_EXPIRED_LOCK_DESTROY(); 3250 3251 #define DYN_FREE_STATES_FORCED(CK, s, af, name, en) do { \ 3252 while ((s = CK ## SLIST_FIRST(&V_dyn_ ## name)) != NULL) { \ 3253 CK ## SLIST_REMOVE_HEAD(&V_dyn_ ## name, en); \ 3254 if (s->type == O_LIMIT_PARENT) \ 3255 uma_zfree(V_dyn_parent_zone, s->limit); \ 3256 else \ 3257 uma_zfree(V_dyn_data_zone, s->data); \ 3258 uma_zfree(V_dyn_ ## af ## _zone, s); \ 3259 } \ 3260 } while (0) 3261 for (bucket = 0; bucket < V_curr_dyn_buckets; bucket++) { 3262 DYN_BUCKET_LOCK_DESTROY(V_dyn_bucket_lock, bucket); 3263 3264 DYN_FREE_STATES_FORCED(CK_, s4, ipv4, ipv4[bucket], entry); 3265 DYN_FREE_STATES_FORCED(CK_, s4, ipv4, ipv4_parent[bucket], 3266 entry); 3267 #ifdef INET6 3268 DYN_FREE_STATES_FORCED(CK_, s6, ipv6, ipv6[bucket], entry); 3269 DYN_FREE_STATES_FORCED(CK_, s6, ipv6, ipv6_parent[bucket], 3270 entry); 3271 #endif /* INET6 */ 3272 } 3273 DYN_FREE_STATES_FORCED(, s4, ipv4, expired_ipv4, expired); 3274 #ifdef INET6 3275 DYN_FREE_STATES_FORCED(, s6, ipv6, expired_ipv6, expired); 3276 #endif 3277 #undef DYN_FREE_STATES_FORCED 3278 3279 uma_zdestroy(V_dyn_ipv4_zone); 3280 uma_zdestroy(V_dyn_data_zone); 3281 uma_zdestroy(V_dyn_parent_zone); 3282 #ifdef INET6 3283 uma_zdestroy(V_dyn_ipv6_zone); 3284 free(V_dyn_ipv6, M_IPFW); 3285 free(V_dyn_ipv6_parent, M_IPFW); 3286 free(V_dyn_ipv6_add, M_IPFW); 3287 free(V_dyn_ipv6_parent_add, M_IPFW); 3288 free(V_dyn_ipv6_del, M_IPFW); 3289 free(V_dyn_ipv6_parent_del, M_IPFW); 3290 #endif 3291 free(V_dyn_bucket_lock, M_IPFW); 3292 free(V_dyn_ipv4, M_IPFW); 3293 free(V_dyn_ipv4_parent, M_IPFW); 3294 free(V_dyn_ipv4_add, M_IPFW); 3295 free(V_dyn_ipv4_parent_add, M_IPFW); 3296 free(V_dyn_ipv4_del, M_IPFW); 3297 free(V_dyn_ipv4_parent_del, M_IPFW); 3298 if (IS_DEFAULT_VNET(curvnet)) 3299 free(dyn_hp_cache, M_IPFW); 3300 } 3301