1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * The FreeBSD IP packet firewall, main file 33 */ 34 35 #include "opt_ipfw.h" 36 #include "opt_ipdivert.h" 37 #include "opt_inet.h" 38 #ifndef INET 39 #error "IPFIREWALL requires INET" 40 #endif /* INET */ 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/condvar.h> 46 #include <sys/counter.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/jail.h> 53 #include <sys/module.h> 54 #include <sys/priv.h> 55 #include <sys/proc.h> 56 #include <sys/rwlock.h> 57 #include <sys/rmlock.h> 58 #include <sys/sdt.h> 59 #include <sys/socket.h> 60 #include <sys/socketvar.h> 61 #include <sys/sysctl.h> 62 #include <sys/syslog.h> 63 #include <sys/ucred.h> 64 #include <net/ethernet.h> /* for ETHERTYPE_IP */ 65 #include <net/if.h> 66 #include <net/if_var.h> 67 #include <net/if_private.h> 68 #include <net/route.h> 69 #include <net/route/nhop.h> 70 #include <net/pfil.h> 71 #include <net/vnet.h> 72 73 #include <netpfil/pf/pf_mtag.h> 74 75 #include <netinet/in.h> 76 #include <netinet/in_var.h> 77 #include <netinet/in_pcb.h> 78 #include <netinet/ip.h> 79 #include <netinet/ip_var.h> 80 #include <netinet/ip_icmp.h> 81 #include <netinet/ip_fw.h> 82 #include <netinet/ip_carp.h> 83 #include <netinet/pim.h> 84 #include <netinet/tcp_var.h> 85 #include <netinet/udp.h> 86 #include <netinet/udp_var.h> 87 #include <netinet/sctp.h> 88 #include <netinet/sctp_crc32.h> 89 #include <netinet/sctp_header.h> 90 91 #include <netinet/ip6.h> 92 #include <netinet/icmp6.h> 93 #include <netinet/in_fib.h> 94 #ifdef INET6 95 #include <netinet6/in6_fib.h> 96 #include <netinet6/in6_pcb.h> 97 #include <netinet6/scope6_var.h> 98 #include <netinet6/ip6_var.h> 99 #endif 100 101 #include <net/if_gre.h> /* for struct grehdr */ 102 103 #include <netpfil/ipfw/ip_fw_private.h> 104 105 #include <machine/in_cksum.h> /* XXX for in_cksum */ 106 107 #ifdef MAC 108 #include <security/mac/mac_framework.h> 109 #endif 110 111 #define IPFW_PROBE(probe, arg0, arg1, arg2, arg3, arg4, arg5) \ 112 SDT_PROBE6(ipfw, , , probe, arg0, arg1, arg2, arg3, arg4, arg5) 113 114 SDT_PROVIDER_DEFINE(ipfw); 115 SDT_PROBE_DEFINE6(ipfw, , , rule__matched, 116 "int", /* retval */ 117 "int", /* af */ 118 "void *", /* src addr */ 119 "void *", /* dst addr */ 120 "struct ip_fw_args *", /* args */ 121 "struct ip_fw *" /* rule */); 122 123 /* 124 * static variables followed by global ones. 125 * All ipfw global variables are here. 126 */ 127 128 VNET_DEFINE_STATIC(int, fw_deny_unknown_exthdrs); 129 #define V_fw_deny_unknown_exthdrs VNET(fw_deny_unknown_exthdrs) 130 131 VNET_DEFINE_STATIC(int, fw_permit_single_frag6) = 1; 132 #define V_fw_permit_single_frag6 VNET(fw_permit_single_frag6) 133 134 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT 135 static int default_to_accept = 1; 136 #else 137 static int default_to_accept; 138 #endif 139 140 VNET_DEFINE(int, autoinc_step); 141 VNET_DEFINE(int, fw_one_pass) = 1; 142 143 VNET_DEFINE(unsigned int, fw_tables_max); 144 VNET_DEFINE(unsigned int, fw_tables_sets) = 0; /* Don't use set-aware tables */ 145 /* Use 128 tables by default */ 146 static unsigned int default_fw_tables = IPFW_TABLES_DEFAULT; 147 148 static int jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num, 149 int tablearg, int jump_backwards); 150 #ifndef LINEAR_SKIPTO 151 static int jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num, 152 int tablearg, int jump_backwards); 153 #define JUMP(ch, f, num, targ, back) jump_cached(ch, f, num, targ, back) 154 #else 155 #define JUMP(ch, f, num, targ, back) jump_lookup_pos(ch, f, num, targ, back) 156 #endif 157 158 /* 159 * Each rule belongs to one of 32 different sets (0..31). 160 * The variable set_disable contains one bit per set. 161 * If the bit is set, all rules in the corresponding set 162 * are disabled. Set RESVD_SET(31) is reserved for the default rule 163 * and rules that are not deleted by the flush command, 164 * and CANNOT be disabled. 165 * Rules in set RESVD_SET can only be deleted individually. 166 */ 167 VNET_DEFINE(u_int32_t, set_disable); 168 #define V_set_disable VNET(set_disable) 169 170 VNET_DEFINE(int, fw_verbose); 171 /* counter for ipfw_log(NULL...) */ 172 VNET_DEFINE(u_int64_t, norule_counter); 173 VNET_DEFINE(int, verbose_limit); 174 175 /* layer3_chain contains the list of rules for layer 3 */ 176 VNET_DEFINE(struct ip_fw_chain, layer3_chain); 177 178 /* ipfw_vnet_ready controls when we are open for business */ 179 VNET_DEFINE(int, ipfw_vnet_ready) = 0; 180 181 VNET_DEFINE(int, ipfw_nat_ready) = 0; 182 183 ipfw_nat_t *ipfw_nat_ptr = NULL; 184 struct cfg_nat *(*lookup_nat_ptr)(struct nat_list *, int); 185 ipfw_nat_cfg_t *ipfw_nat_cfg_ptr; 186 ipfw_nat_cfg_t *ipfw_nat_del_ptr; 187 ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr; 188 ipfw_nat_cfg_t *ipfw_nat_get_log_ptr; 189 190 #ifdef SYSCTL_NODE 191 uint32_t dummy_def = IPFW_DEFAULT_RULE; 192 static int sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS); 193 static int sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS); 194 195 SYSBEGIN(f3) 196 197 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 198 "Firewall"); 199 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, one_pass, 200 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_one_pass), 0, 201 "Only do a single pass through ipfw when using dummynet(4)"); 202 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, autoinc_step, 203 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(autoinc_step), 0, 204 "Rule number auto-increment step"); 205 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, 206 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_verbose), 0, 207 "Log matches to ipfw rules"); 208 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, 209 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(verbose_limit), 0, 210 "Set upper limit of matches of ipfw rules logged"); 211 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, default_rule, CTLFLAG_RD, 212 &dummy_def, 0, 213 "The default/max possible rule number."); 214 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_max, 215 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 216 0, 0, sysctl_ipfw_table_num, "IU", 217 "Maximum number of concurrently used tables"); 218 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_sets, 219 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 220 0, 0, sysctl_ipfw_tables_sets, "IU", 221 "Use per-set namespace for tables"); 222 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, default_to_accept, CTLFLAG_RDTUN, 223 &default_to_accept, 0, 224 "Make the default rule accept all packets."); 225 TUNABLE_INT("net.inet.ip.fw.tables_max", (int *)&default_fw_tables); 226 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, 227 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(layer3_chain.n_rules), 0, 228 "Number of static rules"); 229 230 #ifdef INET6 231 SYSCTL_DECL(_net_inet6_ip6); 232 SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 233 "Firewall"); 234 SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, deny_unknown_exthdrs, 235 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE, 236 &VNET_NAME(fw_deny_unknown_exthdrs), 0, 237 "Deny packets with unknown IPv6 Extension Headers"); 238 SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, permit_single_frag6, 239 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE, 240 &VNET_NAME(fw_permit_single_frag6), 0, 241 "Permit single packet IPv6 fragments"); 242 #endif /* INET6 */ 243 244 SYSEND 245 246 #endif /* SYSCTL_NODE */ 247 248 /* 249 * Some macros used in the various matching options. 250 * L3HDR maps an ipv4 pointer into a layer3 header pointer of type T 251 * Other macros just cast void * into the appropriate type 252 */ 253 #define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl)) 254 #define TCP(p) ((struct tcphdr *)(p)) 255 #define SCTP(p) ((struct sctphdr *)(p)) 256 #define UDP(p) ((struct udphdr *)(p)) 257 #define ICMP(p) ((struct icmphdr *)(p)) 258 #define ICMP6(p) ((struct icmp6_hdr *)(p)) 259 260 static __inline int 261 icmptype_match(struct icmphdr *icmp, ipfw_insn_u32 *cmd) 262 { 263 int type = icmp->icmp_type; 264 265 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1<<type)) ); 266 } 267 268 #define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \ 269 (1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) ) 270 271 static int 272 is_icmp_query(struct icmphdr *icmp) 273 { 274 int type = icmp->icmp_type; 275 276 return (type <= ICMP_MAXTYPE && (TT & (1<<type)) ); 277 } 278 #undef TT 279 280 /* 281 * The following checks use two arrays of 8 or 16 bits to store the 282 * bits that we want set or clear, respectively. They are in the 283 * low and high half of cmd->arg1 or cmd->d[0]. 284 * 285 * We scan options and store the bits we find set. We succeed if 286 * 287 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear 288 * 289 * The code is sometimes optimized not to store additional variables. 290 */ 291 292 static int 293 flags_match(ipfw_insn *cmd, u_int8_t bits) 294 { 295 u_char want_clear; 296 bits = ~bits; 297 298 if ( ((cmd->arg1 & 0xff) & bits) != 0) 299 return 0; /* some bits we want set were clear */ 300 want_clear = (cmd->arg1 >> 8) & 0xff; 301 if ( (want_clear & bits) != want_clear) 302 return 0; /* some bits we want clear were set */ 303 return 1; 304 } 305 306 static int 307 ipopts_match(struct ip *ip, ipfw_insn *cmd) 308 { 309 int optlen, bits = 0; 310 u_char *cp = (u_char *)(ip + 1); 311 int x = (ip->ip_hl << 2) - sizeof (struct ip); 312 313 for (; x > 0; x -= optlen, cp += optlen) { 314 int opt = cp[IPOPT_OPTVAL]; 315 316 if (opt == IPOPT_EOL) 317 break; 318 if (opt == IPOPT_NOP) 319 optlen = 1; 320 else { 321 optlen = cp[IPOPT_OLEN]; 322 if (optlen <= 0 || optlen > x) 323 return 0; /* invalid or truncated */ 324 } 325 switch (opt) { 326 default: 327 break; 328 329 case IPOPT_LSRR: 330 bits |= IP_FW_IPOPT_LSRR; 331 break; 332 333 case IPOPT_SSRR: 334 bits |= IP_FW_IPOPT_SSRR; 335 break; 336 337 case IPOPT_RR: 338 bits |= IP_FW_IPOPT_RR; 339 break; 340 341 case IPOPT_TS: 342 bits |= IP_FW_IPOPT_TS; 343 break; 344 } 345 } 346 return (flags_match(cmd, bits)); 347 } 348 349 /* 350 * Parse TCP options. The logic copied from tcp_dooptions(). 351 */ 352 static int 353 tcpopts_parse(const struct tcphdr *tcp, uint16_t *mss) 354 { 355 const u_char *cp = (const u_char *)(tcp + 1); 356 int optlen, bits = 0; 357 int cnt = (tcp->th_off << 2) - sizeof(struct tcphdr); 358 359 for (; cnt > 0; cnt -= optlen, cp += optlen) { 360 int opt = cp[0]; 361 if (opt == TCPOPT_EOL) 362 break; 363 if (opt == TCPOPT_NOP) 364 optlen = 1; 365 else { 366 if (cnt < 2) 367 break; 368 optlen = cp[1]; 369 if (optlen < 2 || optlen > cnt) 370 break; 371 } 372 373 switch (opt) { 374 default: 375 break; 376 377 case TCPOPT_MAXSEG: 378 if (optlen != TCPOLEN_MAXSEG) 379 break; 380 bits |= IP_FW_TCPOPT_MSS; 381 if (mss != NULL) 382 *mss = be16dec(cp + 2); 383 break; 384 385 case TCPOPT_WINDOW: 386 if (optlen == TCPOLEN_WINDOW) 387 bits |= IP_FW_TCPOPT_WINDOW; 388 break; 389 390 case TCPOPT_SACK_PERMITTED: 391 if (optlen == TCPOLEN_SACK_PERMITTED) 392 bits |= IP_FW_TCPOPT_SACK; 393 break; 394 395 case TCPOPT_SACK: 396 if (optlen > 2 && (optlen - 2) % TCPOLEN_SACK == 0) 397 bits |= IP_FW_TCPOPT_SACK; 398 break; 399 400 case TCPOPT_TIMESTAMP: 401 if (optlen == TCPOLEN_TIMESTAMP) 402 bits |= IP_FW_TCPOPT_TS; 403 break; 404 } 405 } 406 return (bits); 407 } 408 409 static int 410 tcpopts_match(struct tcphdr *tcp, ipfw_insn *cmd) 411 { 412 413 return (flags_match(cmd, tcpopts_parse(tcp, NULL))); 414 } 415 416 static int 417 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd, struct ip_fw_chain *chain, 418 uint32_t *tablearg) 419 { 420 421 if (ifp == NULL) /* no iface with this packet, match fails */ 422 return (0); 423 424 /* Check by name or by IP address */ 425 if (cmd->name[0] != '\0') { /* match by name */ 426 if (cmd->name[0] == '\1') /* use tablearg to match */ 427 return ipfw_lookup_table(chain, cmd->p.kidx, 0, 428 &ifp->if_index, tablearg); 429 /* Check name */ 430 if (cmd->p.glob) { 431 if (fnmatch(cmd->name, ifp->if_xname, 0) == 0) 432 return(1); 433 } else { 434 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0) 435 return(1); 436 } 437 } else { 438 #if !defined(USERSPACE) && defined(__FreeBSD__) /* and OSX too ? */ 439 struct ifaddr *ia; 440 441 NET_EPOCH_ASSERT(); 442 443 CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) { 444 if (ia->ifa_addr->sa_family != AF_INET) 445 continue; 446 if (cmd->p.ip.s_addr == ((struct sockaddr_in *) 447 (ia->ifa_addr))->sin_addr.s_addr) 448 return (1); /* match */ 449 } 450 #endif /* __FreeBSD__ */ 451 } 452 return(0); /* no match, fail ... */ 453 } 454 455 /* 456 * The verify_path function checks if a route to the src exists and 457 * if it is reachable via ifp (when provided). 458 * 459 * The 'verrevpath' option checks that the interface that an IP packet 460 * arrives on is the same interface that traffic destined for the 461 * packet's source address would be routed out of. 462 * The 'versrcreach' option just checks that the source address is 463 * reachable via any route (except default) in the routing table. 464 * These two are a measure to block forged packets. This is also 465 * commonly known as "anti-spoofing" or Unicast Reverse Path 466 * Forwarding (Unicast RFP) in Cisco-ese. The name of the knobs 467 * is purposely reminiscent of the Cisco IOS command, 468 * 469 * ip verify unicast reverse-path 470 * ip verify unicast source reachable-via any 471 * 472 * which implements the same functionality. But note that the syntax 473 * is misleading, and the check may be performed on all IP packets 474 * whether unicast, multicast, or broadcast. 475 */ 476 static int 477 verify_path(struct in_addr src, struct ifnet *ifp, u_int fib) 478 { 479 #if defined(USERSPACE) || !defined(__FreeBSD__) 480 return 0; 481 #else 482 struct nhop_object *nh; 483 484 nh = fib4_lookup(fib, src, 0, NHR_NONE, 0); 485 if (nh == NULL) 486 return (0); 487 488 /* 489 * If ifp is provided, check for equality with rtentry. 490 * We should use rt->rt_ifa->ifa_ifp, instead of rt->rt_ifp, 491 * in order to pass packets injected back by if_simloop(): 492 * routing entry (via lo0) for our own address 493 * may exist, so we need to handle routing assymetry. 494 */ 495 if (ifp != NULL && ifp != nh->nh_aifp) 496 return (0); 497 498 /* if no ifp provided, check if rtentry is not default route */ 499 if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0) 500 return (0); 501 502 /* or if this is a blackhole/reject route */ 503 if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0) 504 return (0); 505 506 /* found valid route */ 507 return 1; 508 #endif /* __FreeBSD__ */ 509 } 510 511 /* 512 * Generate an SCTP packet containing an ABORT chunk. The verification tag 513 * is given by vtag. The T-bit is set in the ABORT chunk if and only if 514 * reflected is not 0. 515 */ 516 517 static struct mbuf * 518 ipfw_send_abort(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t vtag, 519 int reflected) 520 { 521 struct mbuf *m; 522 struct ip *ip; 523 #ifdef INET6 524 struct ip6_hdr *ip6; 525 #endif 526 struct sctphdr *sctp; 527 struct sctp_chunkhdr *chunk; 528 u_int16_t hlen, plen, tlen; 529 530 MGETHDR(m, M_NOWAIT, MT_DATA); 531 if (m == NULL) 532 return (NULL); 533 534 M_SETFIB(m, id->fib); 535 #ifdef MAC 536 if (replyto != NULL) 537 mac_netinet_firewall_reply(replyto, m); 538 else 539 mac_netinet_firewall_send(m); 540 #else 541 (void)replyto; /* don't warn about unused arg */ 542 #endif 543 544 switch (id->addr_type) { 545 case 4: 546 hlen = sizeof(struct ip); 547 break; 548 #ifdef INET6 549 case 6: 550 hlen = sizeof(struct ip6_hdr); 551 break; 552 #endif 553 default: 554 /* XXX: log me?!? */ 555 FREE_PKT(m); 556 return (NULL); 557 } 558 plen = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 559 tlen = hlen + plen; 560 m->m_data += max_linkhdr; 561 m->m_flags |= M_SKIP_FIREWALL; 562 m->m_pkthdr.len = m->m_len = tlen; 563 m->m_pkthdr.rcvif = NULL; 564 bzero(m->m_data, tlen); 565 566 switch (id->addr_type) { 567 case 4: 568 ip = mtod(m, struct ip *); 569 570 ip->ip_v = 4; 571 ip->ip_hl = sizeof(struct ip) >> 2; 572 ip->ip_tos = IPTOS_LOWDELAY; 573 ip->ip_len = htons(tlen); 574 ip->ip_id = htons(0); 575 ip->ip_off = htons(0); 576 ip->ip_ttl = V_ip_defttl; 577 ip->ip_p = IPPROTO_SCTP; 578 ip->ip_sum = 0; 579 ip->ip_src.s_addr = htonl(id->dst_ip); 580 ip->ip_dst.s_addr = htonl(id->src_ip); 581 582 sctp = (struct sctphdr *)(ip + 1); 583 break; 584 #ifdef INET6 585 case 6: 586 ip6 = mtod(m, struct ip6_hdr *); 587 588 ip6->ip6_vfc = IPV6_VERSION; 589 ip6->ip6_plen = htons(plen); 590 ip6->ip6_nxt = IPPROTO_SCTP; 591 ip6->ip6_hlim = IPV6_DEFHLIM; 592 ip6->ip6_src = id->dst_ip6; 593 ip6->ip6_dst = id->src_ip6; 594 595 sctp = (struct sctphdr *)(ip6 + 1); 596 break; 597 #endif 598 } 599 600 sctp->src_port = htons(id->dst_port); 601 sctp->dest_port = htons(id->src_port); 602 sctp->v_tag = htonl(vtag); 603 sctp->checksum = htonl(0); 604 605 chunk = (struct sctp_chunkhdr *)(sctp + 1); 606 chunk->chunk_type = SCTP_ABORT_ASSOCIATION; 607 chunk->chunk_flags = 0; 608 if (reflected != 0) { 609 chunk->chunk_flags |= SCTP_HAD_NO_TCB; 610 } 611 chunk->chunk_length = htons(sizeof(struct sctp_chunkhdr)); 612 613 sctp->checksum = sctp_calculate_cksum(m, hlen); 614 615 return (m); 616 } 617 618 /* 619 * Generate a TCP packet, containing either a RST or a keepalive. 620 * When flags & TH_RST, we are sending a RST packet, because of a 621 * "reset" action matched the packet. 622 * Otherwise we are sending a keepalive, and flags & TH_ 623 * The 'replyto' mbuf is the mbuf being replied to, if any, and is required 624 * so that MAC can label the reply appropriately. 625 */ 626 struct mbuf * 627 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq, 628 u_int32_t ack, int flags) 629 { 630 struct mbuf *m = NULL; /* stupid compiler */ 631 struct ip *h = NULL; /* stupid compiler */ 632 #ifdef INET6 633 struct ip6_hdr *h6 = NULL; 634 #endif 635 struct tcphdr *th = NULL; 636 int len, dir; 637 638 MGETHDR(m, M_NOWAIT, MT_DATA); 639 if (m == NULL) 640 return (NULL); 641 642 M_SETFIB(m, id->fib); 643 #ifdef MAC 644 if (replyto != NULL) 645 mac_netinet_firewall_reply(replyto, m); 646 else 647 mac_netinet_firewall_send(m); 648 #else 649 (void)replyto; /* don't warn about unused arg */ 650 #endif 651 652 switch (id->addr_type) { 653 case 4: 654 len = sizeof(struct ip) + sizeof(struct tcphdr); 655 break; 656 #ifdef INET6 657 case 6: 658 len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 659 break; 660 #endif 661 default: 662 /* XXX: log me?!? */ 663 FREE_PKT(m); 664 return (NULL); 665 } 666 dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN); 667 668 m->m_data += max_linkhdr; 669 m->m_flags |= M_SKIP_FIREWALL; 670 m->m_pkthdr.len = m->m_len = len; 671 m->m_pkthdr.rcvif = NULL; 672 bzero(m->m_data, len); 673 674 switch (id->addr_type) { 675 case 4: 676 h = mtod(m, struct ip *); 677 678 /* prepare for checksum */ 679 h->ip_p = IPPROTO_TCP; 680 h->ip_len = htons(sizeof(struct tcphdr)); 681 if (dir) { 682 h->ip_src.s_addr = htonl(id->src_ip); 683 h->ip_dst.s_addr = htonl(id->dst_ip); 684 } else { 685 h->ip_src.s_addr = htonl(id->dst_ip); 686 h->ip_dst.s_addr = htonl(id->src_ip); 687 } 688 689 th = (struct tcphdr *)(h + 1); 690 break; 691 #ifdef INET6 692 case 6: 693 h6 = mtod(m, struct ip6_hdr *); 694 695 /* prepare for checksum */ 696 h6->ip6_nxt = IPPROTO_TCP; 697 h6->ip6_plen = htons(sizeof(struct tcphdr)); 698 if (dir) { 699 h6->ip6_src = id->src_ip6; 700 h6->ip6_dst = id->dst_ip6; 701 } else { 702 h6->ip6_src = id->dst_ip6; 703 h6->ip6_dst = id->src_ip6; 704 } 705 706 th = (struct tcphdr *)(h6 + 1); 707 break; 708 #endif 709 } 710 711 if (dir) { 712 th->th_sport = htons(id->src_port); 713 th->th_dport = htons(id->dst_port); 714 } else { 715 th->th_sport = htons(id->dst_port); 716 th->th_dport = htons(id->src_port); 717 } 718 th->th_off = sizeof(struct tcphdr) >> 2; 719 720 if (flags & TH_RST) { 721 if (flags & TH_ACK) { 722 th->th_seq = htonl(ack); 723 th->th_flags = TH_RST; 724 } else { 725 if (flags & TH_SYN) 726 seq++; 727 th->th_ack = htonl(seq); 728 th->th_flags = TH_RST | TH_ACK; 729 } 730 } else { 731 /* 732 * Keepalive - use caller provided sequence numbers 733 */ 734 th->th_seq = htonl(seq); 735 th->th_ack = htonl(ack); 736 th->th_flags = TH_ACK; 737 } 738 739 switch (id->addr_type) { 740 case 4: 741 th->th_sum = in_cksum(m, len); 742 743 /* finish the ip header */ 744 h->ip_v = 4; 745 h->ip_hl = sizeof(*h) >> 2; 746 h->ip_tos = IPTOS_LOWDELAY; 747 h->ip_off = htons(0); 748 h->ip_len = htons(len); 749 h->ip_ttl = V_ip_defttl; 750 h->ip_sum = 0; 751 break; 752 #ifdef INET6 753 case 6: 754 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6), 755 sizeof(struct tcphdr)); 756 757 /* finish the ip6 header */ 758 h6->ip6_vfc |= IPV6_VERSION; 759 h6->ip6_hlim = IPV6_DEFHLIM; 760 break; 761 #endif 762 } 763 764 return (m); 765 } 766 767 #ifdef INET6 768 /* 769 * ipv6 specific rules here... 770 */ 771 static __inline int 772 icmp6type_match(int type, ipfw_insn_u32 *cmd) 773 { 774 return (type <= ICMP6_MAXTYPE && (cmd->d[type/32] & (1<<(type%32)) ) ); 775 } 776 777 static int 778 flow6id_match(int curr_flow, ipfw_insn_u32 *cmd) 779 { 780 int i; 781 for (i=0; i <= cmd->o.arg1; ++i) 782 if (curr_flow == cmd->d[i]) 783 return 1; 784 return 0; 785 } 786 787 /* support for IP6_*_ME opcodes */ 788 static const struct in6_addr lla_mask = {{{ 789 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 790 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 791 }}}; 792 793 static int 794 ipfw_localip6(struct in6_addr *in6) 795 { 796 struct rm_priotracker in6_ifa_tracker; 797 struct in6_ifaddr *ia; 798 799 if (IN6_IS_ADDR_MULTICAST(in6)) 800 return (0); 801 802 if (!IN6_IS_ADDR_LINKLOCAL(in6)) 803 return (in6_localip(in6)); 804 805 IN6_IFADDR_RLOCK(&in6_ifa_tracker); 806 CK_STAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { 807 if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr)) 808 continue; 809 if (IN6_ARE_MASKED_ADDR_EQUAL(&ia->ia_addr.sin6_addr, 810 in6, &lla_mask)) { 811 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); 812 return (1); 813 } 814 } 815 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); 816 return (0); 817 } 818 819 static int 820 verify_path6(struct in6_addr *src, struct ifnet *ifp, u_int fib) 821 { 822 struct nhop_object *nh; 823 824 if (IN6_IS_SCOPE_LINKLOCAL(src)) 825 return (1); 826 827 nh = fib6_lookup(fib, src, 0, NHR_NONE, 0); 828 if (nh == NULL) 829 return (0); 830 831 /* If ifp is provided, check for equality with route table. */ 832 if (ifp != NULL && ifp != nh->nh_aifp) 833 return (0); 834 835 /* if no ifp provided, check if rtentry is not default route */ 836 if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0) 837 return (0); 838 839 /* or if this is a blackhole/reject route */ 840 if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0) 841 return (0); 842 843 /* found valid route */ 844 return 1; 845 } 846 847 static int 848 is_icmp6_query(int icmp6_type) 849 { 850 if ((icmp6_type <= ICMP6_MAXTYPE) && 851 (icmp6_type == ICMP6_ECHO_REQUEST || 852 icmp6_type == ICMP6_MEMBERSHIP_QUERY || 853 icmp6_type == ICMP6_WRUREQUEST || 854 icmp6_type == ICMP6_FQDN_QUERY || 855 icmp6_type == ICMP6_NI_QUERY)) 856 return (1); 857 858 return (0); 859 } 860 861 static int 862 map_icmp_unreach(int code) 863 { 864 865 /* RFC 7915 p4.2 */ 866 switch (code) { 867 case ICMP_UNREACH_NET: 868 case ICMP_UNREACH_HOST: 869 case ICMP_UNREACH_SRCFAIL: 870 case ICMP_UNREACH_NET_UNKNOWN: 871 case ICMP_UNREACH_HOST_UNKNOWN: 872 case ICMP_UNREACH_TOSNET: 873 case ICMP_UNREACH_TOSHOST: 874 return (ICMP6_DST_UNREACH_NOROUTE); 875 case ICMP_UNREACH_PORT: 876 return (ICMP6_DST_UNREACH_NOPORT); 877 default: 878 /* 879 * Map the rest of codes into admit prohibited. 880 * XXX: unreach proto should be mapped into ICMPv6 881 * parameter problem, but we use only unreach type. 882 */ 883 return (ICMP6_DST_UNREACH_ADMIN); 884 } 885 } 886 887 static void 888 send_reject6(struct ip_fw_args *args, int code, u_int hlen, struct ip6_hdr *ip6) 889 { 890 struct mbuf *m; 891 892 m = args->m; 893 if (code == ICMP6_UNREACH_RST && args->f_id.proto == IPPROTO_TCP) { 894 struct tcphdr *tcp; 895 tcp = (struct tcphdr *)((char *)ip6 + hlen); 896 897 if ((tcp->th_flags & TH_RST) == 0) { 898 struct mbuf *m0; 899 m0 = ipfw_send_pkt(args->m, &(args->f_id), 900 ntohl(tcp->th_seq), ntohl(tcp->th_ack), 901 tcp->th_flags | TH_RST); 902 if (m0 != NULL) 903 ip6_output(m0, NULL, NULL, 0, NULL, NULL, 904 NULL); 905 } 906 FREE_PKT(m); 907 } else if (code == ICMP6_UNREACH_ABORT && 908 args->f_id.proto == IPPROTO_SCTP) { 909 struct mbuf *m0; 910 struct sctphdr *sctp; 911 u_int32_t v_tag; 912 int reflected; 913 914 sctp = (struct sctphdr *)((char *)ip6 + hlen); 915 reflected = 1; 916 v_tag = ntohl(sctp->v_tag); 917 /* Investigate the first chunk header if available */ 918 if (m->m_len >= hlen + sizeof(struct sctphdr) + 919 sizeof(struct sctp_chunkhdr)) { 920 struct sctp_chunkhdr *chunk; 921 922 chunk = (struct sctp_chunkhdr *)(sctp + 1); 923 switch (chunk->chunk_type) { 924 case SCTP_INITIATION: 925 /* 926 * Packets containing an INIT chunk MUST have 927 * a zero v-tag. 928 */ 929 if (v_tag != 0) { 930 v_tag = 0; 931 break; 932 } 933 /* INIT chunk MUST NOT be bundled */ 934 if (m->m_pkthdr.len > 935 hlen + sizeof(struct sctphdr) + 936 ntohs(chunk->chunk_length) + 3) { 937 break; 938 } 939 /* Use the initiate tag if available */ 940 if ((m->m_len >= hlen + sizeof(struct sctphdr) + 941 sizeof(struct sctp_chunkhdr) + 942 offsetof(struct sctp_init, a_rwnd))) { 943 struct sctp_init *init; 944 945 init = (struct sctp_init *)(chunk + 1); 946 v_tag = ntohl(init->initiate_tag); 947 reflected = 0; 948 } 949 break; 950 case SCTP_ABORT_ASSOCIATION: 951 /* 952 * If the packet contains an ABORT chunk, don't 953 * reply. 954 * XXX: We should search through all chunks, 955 * but do not do that to avoid attacks. 956 */ 957 v_tag = 0; 958 break; 959 } 960 } 961 if (v_tag == 0) { 962 m0 = NULL; 963 } else { 964 m0 = ipfw_send_abort(args->m, &(args->f_id), v_tag, 965 reflected); 966 } 967 if (m0 != NULL) 968 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 969 FREE_PKT(m); 970 } else if (code != ICMP6_UNREACH_RST && code != ICMP6_UNREACH_ABORT) { 971 /* Send an ICMPv6 unreach. */ 972 #if 0 973 /* 974 * Unlike above, the mbufs need to line up with the ip6 hdr, 975 * as the contents are read. We need to m_adj() the 976 * needed amount. 977 * The mbuf will however be thrown away so we can adjust it. 978 * Remember we did an m_pullup on it already so we 979 * can make some assumptions about contiguousness. 980 */ 981 if (args->L3offset) 982 m_adj(m, args->L3offset); 983 #endif 984 icmp6_error(m, ICMP6_DST_UNREACH, code, 0); 985 } else 986 FREE_PKT(m); 987 988 args->m = NULL; 989 } 990 991 #endif /* INET6 */ 992 993 /* 994 * sends a reject message, consuming the mbuf passed as an argument. 995 */ 996 static void 997 send_reject(struct ip_fw_args *args, const ipfw_insn *cmd, int iplen, 998 struct ip *ip) 999 { 1000 int code, mtu; 1001 1002 code = cmd->arg1; 1003 if (code == ICMP_UNREACH_NEEDFRAG && 1004 cmd->len == F_INSN_SIZE(ipfw_insn_u16)) 1005 mtu = ((const ipfw_insn_u16 *)cmd)->ports[0]; 1006 else 1007 mtu = 0; 1008 1009 #if 0 1010 /* XXX When ip is not guaranteed to be at mtod() we will 1011 * need to account for this */ 1012 * The mbuf will however be thrown away so we can adjust it. 1013 * Remember we did an m_pullup on it already so we 1014 * can make some assumptions about contiguousness. 1015 */ 1016 if (args->L3offset) 1017 m_adj(m, args->L3offset); 1018 #endif 1019 if (code != ICMP_REJECT_RST && code != ICMP_REJECT_ABORT) { 1020 /* Send an ICMP unreach */ 1021 icmp_error(args->m, ICMP_UNREACH, code, 0L, mtu); 1022 } else if (code == ICMP_REJECT_RST && args->f_id.proto == IPPROTO_TCP) { 1023 struct tcphdr *const tcp = 1024 L3HDR(struct tcphdr, mtod(args->m, struct ip *)); 1025 if ( (tcp->th_flags & TH_RST) == 0) { 1026 struct mbuf *m; 1027 m = ipfw_send_pkt(args->m, &(args->f_id), 1028 ntohl(tcp->th_seq), ntohl(tcp->th_ack), 1029 tcp->th_flags | TH_RST); 1030 if (m != NULL) 1031 ip_output(m, NULL, NULL, 0, NULL, NULL); 1032 } 1033 FREE_PKT(args->m); 1034 } else if (code == ICMP_REJECT_ABORT && 1035 args->f_id.proto == IPPROTO_SCTP) { 1036 struct mbuf *m; 1037 struct sctphdr *sctp; 1038 struct sctp_chunkhdr *chunk; 1039 struct sctp_init *init; 1040 u_int32_t v_tag; 1041 int reflected; 1042 1043 sctp = L3HDR(struct sctphdr, mtod(args->m, struct ip *)); 1044 reflected = 1; 1045 v_tag = ntohl(sctp->v_tag); 1046 if (iplen >= (ip->ip_hl << 2) + sizeof(struct sctphdr) + 1047 sizeof(struct sctp_chunkhdr)) { 1048 /* Look at the first chunk header if available */ 1049 chunk = (struct sctp_chunkhdr *)(sctp + 1); 1050 switch (chunk->chunk_type) { 1051 case SCTP_INITIATION: 1052 /* 1053 * Packets containing an INIT chunk MUST have 1054 * a zero v-tag. 1055 */ 1056 if (v_tag != 0) { 1057 v_tag = 0; 1058 break; 1059 } 1060 /* INIT chunk MUST NOT be bundled */ 1061 if (iplen > 1062 (ip->ip_hl << 2) + sizeof(struct sctphdr) + 1063 ntohs(chunk->chunk_length) + 3) { 1064 break; 1065 } 1066 /* Use the initiate tag if available */ 1067 if ((iplen >= (ip->ip_hl << 2) + 1068 sizeof(struct sctphdr) + 1069 sizeof(struct sctp_chunkhdr) + 1070 offsetof(struct sctp_init, a_rwnd))) { 1071 init = (struct sctp_init *)(chunk + 1); 1072 v_tag = ntohl(init->initiate_tag); 1073 reflected = 0; 1074 } 1075 break; 1076 case SCTP_ABORT_ASSOCIATION: 1077 /* 1078 * If the packet contains an ABORT chunk, don't 1079 * reply. 1080 * XXX: We should search through all chunks, 1081 * but do not do that to avoid attacks. 1082 */ 1083 v_tag = 0; 1084 break; 1085 } 1086 } 1087 if (v_tag == 0) { 1088 m = NULL; 1089 } else { 1090 m = ipfw_send_abort(args->m, &(args->f_id), v_tag, 1091 reflected); 1092 } 1093 if (m != NULL) 1094 ip_output(m, NULL, NULL, 0, NULL, NULL); 1095 FREE_PKT(args->m); 1096 } else 1097 FREE_PKT(args->m); 1098 args->m = NULL; 1099 } 1100 1101 /* 1102 * Support for uid/gid/jail lookup. These tests are expensive 1103 * (because we may need to look into the list of active sockets) 1104 * so we cache the results. ugid_lookupp is 0 if we have not 1105 * yet done a lookup, 1 if we succeeded, and -1 if we tried 1106 * and failed. The function always returns the match value. 1107 * We could actually spare the variable and use *uc, setting 1108 * it to '(void *)check_uidgid if we have no info, NULL if 1109 * we tried and failed, or any other value if successful. 1110 */ 1111 static int 1112 check_uidgid(ipfw_insn_u32 *insn, struct ip_fw_args *args, int *ugid_lookupp, 1113 struct ucred **uc) 1114 { 1115 #if defined(USERSPACE) 1116 return 0; // not supported in userspace 1117 #else 1118 #ifndef __FreeBSD__ 1119 /* XXX */ 1120 return cred_check(insn, proto, oif, 1121 dst_ip, dst_port, src_ip, src_port, 1122 (struct bsd_ucred *)uc, ugid_lookupp, ((struct mbuf *)inp)->m_skb); 1123 #else /* FreeBSD */ 1124 struct in_addr src_ip, dst_ip; 1125 struct inpcbinfo *pi; 1126 struct ipfw_flow_id *id; 1127 struct inpcb *pcb, *inp; 1128 int lookupflags; 1129 int match; 1130 1131 id = &args->f_id; 1132 inp = args->inp; 1133 1134 /* 1135 * Check to see if the UDP or TCP stack supplied us with 1136 * the PCB. If so, rather then holding a lock and looking 1137 * up the PCB, we can use the one that was supplied. 1138 */ 1139 if (inp && *ugid_lookupp == 0) { 1140 INP_LOCK_ASSERT(inp); 1141 if (inp->inp_socket != NULL) { 1142 *uc = crhold(inp->inp_cred); 1143 *ugid_lookupp = 1; 1144 } else 1145 *ugid_lookupp = -1; 1146 } 1147 /* 1148 * If we have already been here and the packet has no 1149 * PCB entry associated with it, then we can safely 1150 * assume that this is a no match. 1151 */ 1152 if (*ugid_lookupp == -1) 1153 return (0); 1154 if (id->proto == IPPROTO_TCP) { 1155 lookupflags = 0; 1156 pi = &V_tcbinfo; 1157 } else if (id->proto == IPPROTO_UDP) { 1158 lookupflags = INPLOOKUP_WILDCARD; 1159 pi = &V_udbinfo; 1160 } else if (id->proto == IPPROTO_UDPLITE) { 1161 lookupflags = INPLOOKUP_WILDCARD; 1162 pi = &V_ulitecbinfo; 1163 } else 1164 return 0; 1165 lookupflags |= INPLOOKUP_RLOCKPCB; 1166 match = 0; 1167 if (*ugid_lookupp == 0) { 1168 if (id->addr_type == 6) { 1169 #ifdef INET6 1170 if (args->flags & IPFW_ARGS_IN) 1171 pcb = in6_pcblookup_mbuf(pi, 1172 &id->src_ip6, htons(id->src_port), 1173 &id->dst_ip6, htons(id->dst_port), 1174 lookupflags, NULL, args->m); 1175 else 1176 pcb = in6_pcblookup_mbuf(pi, 1177 &id->dst_ip6, htons(id->dst_port), 1178 &id->src_ip6, htons(id->src_port), 1179 lookupflags, args->ifp, args->m); 1180 #else 1181 *ugid_lookupp = -1; 1182 return (0); 1183 #endif 1184 } else { 1185 src_ip.s_addr = htonl(id->src_ip); 1186 dst_ip.s_addr = htonl(id->dst_ip); 1187 if (args->flags & IPFW_ARGS_IN) 1188 pcb = in_pcblookup_mbuf(pi, 1189 src_ip, htons(id->src_port), 1190 dst_ip, htons(id->dst_port), 1191 lookupflags, NULL, args->m); 1192 else 1193 pcb = in_pcblookup_mbuf(pi, 1194 dst_ip, htons(id->dst_port), 1195 src_ip, htons(id->src_port), 1196 lookupflags, args->ifp, args->m); 1197 } 1198 if (pcb != NULL) { 1199 INP_RLOCK_ASSERT(pcb); 1200 *uc = crhold(pcb->inp_cred); 1201 *ugid_lookupp = 1; 1202 INP_RUNLOCK(pcb); 1203 } 1204 if (*ugid_lookupp == 0) { 1205 /* 1206 * We tried and failed, set the variable to -1 1207 * so we will not try again on this packet. 1208 */ 1209 *ugid_lookupp = -1; 1210 return (0); 1211 } 1212 } 1213 if (insn->o.opcode == O_UID) 1214 match = ((*uc)->cr_uid == (uid_t)insn->d[0]); 1215 else if (insn->o.opcode == O_GID) 1216 match = groupmember((gid_t)insn->d[0], *uc); 1217 else if (insn->o.opcode == O_JAIL) 1218 match = ((*uc)->cr_prison->pr_id == (int)insn->d[0]); 1219 return (match); 1220 #endif /* __FreeBSD__ */ 1221 #endif /* not supported in userspace */ 1222 } 1223 1224 /* 1225 * Helper function to set args with info on the rule after the matching 1226 * one. slot is precise, whereas we guess rule_id as they are 1227 * assigned sequentially. 1228 */ 1229 static inline void 1230 set_match(struct ip_fw_args *args, int slot, 1231 struct ip_fw_chain *chain) 1232 { 1233 args->rule.chain_id = chain->id; 1234 args->rule.slot = slot + 1; /* we use 0 as a marker */ 1235 args->rule.rule_id = 1 + chain->map[slot]->id; 1236 args->rule.rulenum = chain->map[slot]->rulenum; 1237 args->flags |= IPFW_ARGS_REF; 1238 } 1239 1240 static int 1241 jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num, 1242 int tablearg, int jump_backwards) 1243 { 1244 int f_pos, i; 1245 1246 i = IP_FW_ARG_TABLEARG(chain, num, skipto); 1247 /* make sure we do not jump backward */ 1248 if (jump_backwards == 0 && i <= f->rulenum) 1249 i = f->rulenum + 1; 1250 1251 #ifndef LINEAR_SKIPTO 1252 if (chain->idxmap != NULL) 1253 f_pos = chain->idxmap[i]; 1254 else 1255 f_pos = ipfw_find_rule(chain, i, 0); 1256 #else 1257 f_pos = chain->idxmap[i]; 1258 #endif /* LINEAR_SKIPTO */ 1259 1260 return (f_pos); 1261 } 1262 1263 1264 #ifndef LINEAR_SKIPTO 1265 /* 1266 * Helper function to enable cached rule lookups using 1267 * cache.id and cache.pos fields in ipfw rule. 1268 */ 1269 static int 1270 jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num, 1271 int tablearg, int jump_backwards) 1272 { 1273 int f_pos; 1274 1275 /* Can't use cache with IP_FW_TARG */ 1276 if (num == IP_FW_TARG) 1277 return jump_lookup_pos(chain, f, num, tablearg, jump_backwards); 1278 1279 /* 1280 * If possible use cached f_pos (in f->cache.pos), 1281 * whose version is written in f->cache.id (horrible hacks 1282 * to avoid changing the ABI). 1283 * 1284 * Multiple threads can execute the same rule simultaneously, 1285 * we need to ensure that cache.pos is updated before cache.id. 1286 */ 1287 1288 #ifdef __LP64__ 1289 struct ip_fw_jump_cache cache; 1290 1291 cache.raw_value = f->cache.raw_value; 1292 if (cache.id == chain->id) 1293 return (cache.pos); 1294 1295 f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards); 1296 1297 cache.pos = f_pos; 1298 cache.id = chain->id; 1299 f->cache.raw_value = cache.raw_value; 1300 #else 1301 if (f->cache.id == chain->id) { 1302 /* Load pos after id */ 1303 atomic_thread_fence_acq(); 1304 return (f->cache.pos); 1305 } 1306 1307 f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards); 1308 1309 f->cache.pos = f_pos; 1310 /* Store id after pos */ 1311 atomic_thread_fence_rel(); 1312 f->cache.id = chain->id; 1313 #endif /* !__LP64__ */ 1314 return (f_pos); 1315 } 1316 #endif /* !LINEAR_SKIPTO */ 1317 1318 #define TARG(k, f) IP_FW_ARG_TABLEARG(chain, k, f) 1319 /* 1320 * The main check routine for the firewall. 1321 * 1322 * All arguments are in args so we can modify them and return them 1323 * back to the caller. 1324 * 1325 * Parameters: 1326 * 1327 * args->m (in/out) The packet; we set to NULL when/if we nuke it. 1328 * Starts with the IP header. 1329 * args->L3offset Number of bytes bypassed if we came from L2. 1330 * e.g. often sizeof(eh) ** NOTYET ** 1331 * args->ifp Incoming or outgoing interface. 1332 * args->divert_rule (in/out) 1333 * Skip up to the first rule past this rule number; 1334 * upon return, non-zero port number for divert or tee. 1335 * 1336 * args->rule Pointer to the last matching rule (in/out) 1337 * args->next_hop Socket we are forwarding to (out). 1338 * args->next_hop6 IPv6 next hop we are forwarding to (out). 1339 * args->f_id Addresses grabbed from the packet (out) 1340 * args->rule.info a cookie depending on rule action 1341 * 1342 * Return value: 1343 * 1344 * IP_FW_PASS the packet must be accepted 1345 * IP_FW_DENY the packet must be dropped 1346 * IP_FW_DIVERT divert packet, port in m_tag 1347 * IP_FW_TEE tee packet, port in m_tag 1348 * IP_FW_DUMMYNET to dummynet, pipe in args->cookie 1349 * IP_FW_NETGRAPH into netgraph, cookie args->cookie 1350 * args->rule contains the matching rule, 1351 * args->rule.info has additional information. 1352 * 1353 */ 1354 int 1355 ipfw_chk(struct ip_fw_args *args) 1356 { 1357 1358 /* 1359 * Local variables holding state while processing a packet: 1360 * 1361 * IMPORTANT NOTE: to speed up the processing of rules, there 1362 * are some assumption on the values of the variables, which 1363 * are documented here. Should you change them, please check 1364 * the implementation of the various instructions to make sure 1365 * that they still work. 1366 * 1367 * m | args->m Pointer to the mbuf, as received from the caller. 1368 * It may change if ipfw_chk() does an m_pullup, or if it 1369 * consumes the packet because it calls send_reject(). 1370 * XXX This has to change, so that ipfw_chk() never modifies 1371 * or consumes the buffer. 1372 * OR 1373 * args->mem Pointer to contigous memory chunk. 1374 * ip Is the beginning of the ip(4 or 6) header. 1375 * eh Ethernet header in case if input is Layer2. 1376 */ 1377 struct mbuf *m; 1378 struct ip *ip; 1379 struct ether_header *eh; 1380 1381 /* 1382 * For rules which contain uid/gid or jail constraints, cache 1383 * a copy of the users credentials after the pcb lookup has been 1384 * executed. This will speed up the processing of rules with 1385 * these types of constraints, as well as decrease contention 1386 * on pcb related locks. 1387 */ 1388 #ifndef __FreeBSD__ 1389 struct bsd_ucred ucred_cache; 1390 #else 1391 struct ucred *ucred_cache = NULL; 1392 #endif 1393 int ucred_lookup = 0; 1394 int f_pos = 0; /* index of current rule in the array */ 1395 int retval = 0; 1396 struct ifnet *oif, *iif; 1397 1398 /* 1399 * hlen The length of the IP header. 1400 */ 1401 u_int hlen = 0; /* hlen >0 means we have an IP pkt */ 1402 1403 /* 1404 * offset The offset of a fragment. offset != 0 means that 1405 * we have a fragment at this offset of an IPv4 packet. 1406 * offset == 0 means that (if this is an IPv4 packet) 1407 * this is the first or only fragment. 1408 * For IPv6 offset|ip6f_mf == 0 means there is no Fragment Header 1409 * or there is a single packet fragment (fragment header added 1410 * without needed). We will treat a single packet fragment as if 1411 * there was no fragment header (or log/block depending on the 1412 * V_fw_permit_single_frag6 sysctl setting). 1413 */ 1414 u_short offset = 0; 1415 u_short ip6f_mf = 0; 1416 1417 /* 1418 * Local copies of addresses. They are only valid if we have 1419 * an IP packet. 1420 * 1421 * proto The protocol. Set to 0 for non-ip packets, 1422 * or to the protocol read from the packet otherwise. 1423 * proto != 0 means that we have an IPv4 packet. 1424 * 1425 * src_port, dst_port port numbers, in HOST format. Only 1426 * valid for TCP and UDP packets. 1427 * 1428 * src_ip, dst_ip ip addresses, in NETWORK format. 1429 * Only valid for IPv4 packets. 1430 */ 1431 uint8_t proto; 1432 uint16_t src_port, dst_port; /* NOTE: host format */ 1433 struct in_addr src_ip, dst_ip; /* NOTE: network format */ 1434 int iplen = 0; 1435 int pktlen; 1436 1437 struct ipfw_dyn_info dyn_info; 1438 struct ip_fw *q = NULL; 1439 struct ip_fw_chain *chain = &V_layer3_chain; 1440 1441 /* 1442 * We store in ulp a pointer to the upper layer protocol header. 1443 * In the ipv4 case this is easy to determine from the header, 1444 * but for ipv6 we might have some additional headers in the middle. 1445 * ulp is NULL if not found. 1446 */ 1447 void *ulp = NULL; /* upper layer protocol pointer. */ 1448 1449 /* XXX ipv6 variables */ 1450 int is_ipv6 = 0; 1451 #ifdef INET6 1452 uint8_t icmp6_type = 0; 1453 #endif 1454 uint16_t ext_hd = 0; /* bits vector for extension header filtering */ 1455 /* end of ipv6 variables */ 1456 1457 int is_ipv4 = 0; 1458 1459 int done = 0; /* flag to exit the outer loop */ 1460 IPFW_RLOCK_TRACKER; 1461 bool mem; 1462 1463 if ((mem = (args->flags & IPFW_ARGS_LENMASK))) { 1464 if (args->flags & IPFW_ARGS_ETHER) { 1465 eh = (struct ether_header *)args->mem; 1466 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 1467 ip = (struct ip *) 1468 ((struct ether_vlan_header *)eh + 1); 1469 else 1470 ip = (struct ip *)(eh + 1); 1471 } else { 1472 eh = NULL; 1473 ip = (struct ip *)args->mem; 1474 } 1475 pktlen = IPFW_ARGS_LENGTH(args->flags); 1476 args->f_id.fib = args->ifp->if_fib; /* best guess */ 1477 } else { 1478 m = args->m; 1479 if (m->m_flags & M_SKIP_FIREWALL || (! V_ipfw_vnet_ready)) 1480 return (IP_FW_PASS); /* accept */ 1481 if (args->flags & IPFW_ARGS_ETHER) { 1482 /* We need some amount of data to be contiguous. */ 1483 if (m->m_len < min(m->m_pkthdr.len, max_protohdr) && 1484 (args->m = m = m_pullup(m, min(m->m_pkthdr.len, 1485 max_protohdr))) == NULL) 1486 goto pullup_failed; 1487 eh = mtod(m, struct ether_header *); 1488 ip = (struct ip *)(eh + 1); 1489 } else { 1490 eh = NULL; 1491 ip = mtod(m, struct ip *); 1492 } 1493 pktlen = m->m_pkthdr.len; 1494 args->f_id.fib = M_GETFIB(m); /* mbuf not altered */ 1495 } 1496 1497 dst_ip.s_addr = 0; /* make sure it is initialized */ 1498 src_ip.s_addr = 0; /* make sure it is initialized */ 1499 src_port = dst_port = 0; 1500 1501 DYN_INFO_INIT(&dyn_info); 1502 /* 1503 * PULLUP_TO(len, p, T) makes sure that len + sizeof(T) is contiguous, 1504 * then it sets p to point at the offset "len" in the mbuf. WARNING: the 1505 * pointer might become stale after other pullups (but we never use it 1506 * this way). 1507 */ 1508 #define PULLUP_TO(_len, p, T) PULLUP_LEN(_len, p, sizeof(T)) 1509 #define EHLEN (eh != NULL ? ((char *)ip - (char *)eh) : 0) 1510 #define _PULLUP_LOCKED(_len, p, T, unlock) \ 1511 do { \ 1512 int x = (_len) + T + EHLEN; \ 1513 if (mem) { \ 1514 if (__predict_false(pktlen < x)) { \ 1515 unlock; \ 1516 goto pullup_failed; \ 1517 } \ 1518 p = (char *)args->mem + (_len) + EHLEN; \ 1519 } else { \ 1520 if (__predict_false((m)->m_len < x)) { \ 1521 args->m = m = m_pullup(m, x); \ 1522 if (m == NULL) { \ 1523 unlock; \ 1524 goto pullup_failed; \ 1525 } \ 1526 } \ 1527 p = mtod(m, char *) + (_len) + EHLEN; \ 1528 } \ 1529 } while (0) 1530 1531 #define PULLUP_LEN(_len, p, T) _PULLUP_LOCKED(_len, p, T, ) 1532 #define PULLUP_LEN_LOCKED(_len, p, T) \ 1533 _PULLUP_LOCKED(_len, p, T, IPFW_PF_RUNLOCK(chain)); \ 1534 UPDATE_POINTERS() 1535 /* 1536 * In case pointers got stale after pullups, update them. 1537 */ 1538 #define UPDATE_POINTERS() \ 1539 do { \ 1540 if (!mem) { \ 1541 if (eh != NULL) { \ 1542 eh = mtod(m, struct ether_header *); \ 1543 ip = (struct ip *)(eh + 1); \ 1544 } else \ 1545 ip = mtod(m, struct ip *); \ 1546 args->m = m; \ 1547 } \ 1548 } while (0) 1549 1550 /* Identify IP packets and fill up variables. */ 1551 if (pktlen >= sizeof(struct ip6_hdr) && 1552 (eh == NULL || eh->ether_type == htons(ETHERTYPE_IPV6)) && 1553 ip->ip_v == 6) { 1554 struct ip6_hdr *ip6 = (struct ip6_hdr *)ip; 1555 1556 is_ipv6 = 1; 1557 args->flags |= IPFW_ARGS_IP6; 1558 hlen = sizeof(struct ip6_hdr); 1559 proto = ip6->ip6_nxt; 1560 /* Search extension headers to find upper layer protocols */ 1561 while (ulp == NULL && offset == 0) { 1562 switch (proto) { 1563 case IPPROTO_ICMPV6: 1564 PULLUP_TO(hlen, ulp, struct icmp6_hdr); 1565 #ifdef INET6 1566 icmp6_type = ICMP6(ulp)->icmp6_type; 1567 #endif 1568 break; 1569 1570 case IPPROTO_TCP: 1571 PULLUP_TO(hlen, ulp, struct tcphdr); 1572 dst_port = TCP(ulp)->th_dport; 1573 src_port = TCP(ulp)->th_sport; 1574 /* save flags for dynamic rules */ 1575 args->f_id._flags = TCP(ulp)->th_flags; 1576 break; 1577 1578 case IPPROTO_SCTP: 1579 if (pktlen >= hlen + sizeof(struct sctphdr) + 1580 sizeof(struct sctp_chunkhdr) + 1581 offsetof(struct sctp_init, a_rwnd)) 1582 PULLUP_LEN(hlen, ulp, 1583 sizeof(struct sctphdr) + 1584 sizeof(struct sctp_chunkhdr) + 1585 offsetof(struct sctp_init, a_rwnd)); 1586 else if (pktlen >= hlen + sizeof(struct sctphdr)) 1587 PULLUP_LEN(hlen, ulp, pktlen - hlen); 1588 else 1589 PULLUP_LEN(hlen, ulp, 1590 sizeof(struct sctphdr)); 1591 src_port = SCTP(ulp)->src_port; 1592 dst_port = SCTP(ulp)->dest_port; 1593 break; 1594 1595 case IPPROTO_UDP: 1596 case IPPROTO_UDPLITE: 1597 PULLUP_TO(hlen, ulp, struct udphdr); 1598 dst_port = UDP(ulp)->uh_dport; 1599 src_port = UDP(ulp)->uh_sport; 1600 break; 1601 1602 case IPPROTO_HOPOPTS: /* RFC 2460 */ 1603 PULLUP_TO(hlen, ulp, struct ip6_hbh); 1604 ext_hd |= EXT_HOPOPTS; 1605 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3; 1606 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt; 1607 ulp = NULL; 1608 break; 1609 1610 case IPPROTO_ROUTING: /* RFC 2460 */ 1611 PULLUP_TO(hlen, ulp, struct ip6_rthdr); 1612 switch (((struct ip6_rthdr *)ulp)->ip6r_type) { 1613 case 0: 1614 ext_hd |= EXT_RTHDR0; 1615 break; 1616 case 2: 1617 ext_hd |= EXT_RTHDR2; 1618 break; 1619 default: 1620 if (V_fw_verbose) 1621 printf("IPFW2: IPV6 - Unknown " 1622 "Routing Header type(%d)\n", 1623 ((struct ip6_rthdr *) 1624 ulp)->ip6r_type); 1625 if (V_fw_deny_unknown_exthdrs) 1626 return (IP_FW_DENY); 1627 break; 1628 } 1629 ext_hd |= EXT_ROUTING; 1630 hlen += (((struct ip6_rthdr *)ulp)->ip6r_len + 1) << 3; 1631 proto = ((struct ip6_rthdr *)ulp)->ip6r_nxt; 1632 ulp = NULL; 1633 break; 1634 1635 case IPPROTO_FRAGMENT: /* RFC 2460 */ 1636 PULLUP_TO(hlen, ulp, struct ip6_frag); 1637 ext_hd |= EXT_FRAGMENT; 1638 hlen += sizeof (struct ip6_frag); 1639 proto = ((struct ip6_frag *)ulp)->ip6f_nxt; 1640 offset = ((struct ip6_frag *)ulp)->ip6f_offlg & 1641 IP6F_OFF_MASK; 1642 ip6f_mf = ((struct ip6_frag *)ulp)->ip6f_offlg & 1643 IP6F_MORE_FRAG; 1644 if (V_fw_permit_single_frag6 == 0 && 1645 offset == 0 && ip6f_mf == 0) { 1646 if (V_fw_verbose) 1647 printf("IPFW2: IPV6 - Invalid " 1648 "Fragment Header\n"); 1649 if (V_fw_deny_unknown_exthdrs) 1650 return (IP_FW_DENY); 1651 break; 1652 } 1653 args->f_id.extra = 1654 ntohl(((struct ip6_frag *)ulp)->ip6f_ident); 1655 ulp = NULL; 1656 break; 1657 1658 case IPPROTO_DSTOPTS: /* RFC 2460 */ 1659 PULLUP_TO(hlen, ulp, struct ip6_hbh); 1660 ext_hd |= EXT_DSTOPTS; 1661 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3; 1662 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt; 1663 ulp = NULL; 1664 break; 1665 1666 case IPPROTO_AH: /* RFC 2402 */ 1667 PULLUP_TO(hlen, ulp, struct ip6_ext); 1668 ext_hd |= EXT_AH; 1669 hlen += (((struct ip6_ext *)ulp)->ip6e_len + 2) << 2; 1670 proto = ((struct ip6_ext *)ulp)->ip6e_nxt; 1671 ulp = NULL; 1672 break; 1673 1674 case IPPROTO_ESP: /* RFC 2406 */ 1675 PULLUP_TO(hlen, ulp, uint32_t); /* SPI, Seq# */ 1676 /* Anything past Seq# is variable length and 1677 * data past this ext. header is encrypted. */ 1678 ext_hd |= EXT_ESP; 1679 break; 1680 1681 case IPPROTO_NONE: /* RFC 2460 */ 1682 /* 1683 * Packet ends here, and IPv6 header has 1684 * already been pulled up. If ip6e_len!=0 1685 * then octets must be ignored. 1686 */ 1687 ulp = ip; /* non-NULL to get out of loop. */ 1688 break; 1689 1690 case IPPROTO_OSPFIGP: 1691 /* XXX OSPF header check? */ 1692 PULLUP_TO(hlen, ulp, struct ip6_ext); 1693 break; 1694 1695 case IPPROTO_PIM: 1696 /* XXX PIM header check? */ 1697 PULLUP_TO(hlen, ulp, struct pim); 1698 break; 1699 1700 case IPPROTO_GRE: /* RFC 1701 */ 1701 /* XXX GRE header check? */ 1702 PULLUP_TO(hlen, ulp, struct grehdr); 1703 break; 1704 1705 case IPPROTO_CARP: 1706 PULLUP_TO(hlen, ulp, offsetof( 1707 struct carp_header, carp_counter)); 1708 if (CARP_ADVERTISEMENT != 1709 ((struct carp_header *)ulp)->carp_type) 1710 return (IP_FW_DENY); 1711 break; 1712 1713 case IPPROTO_IPV6: /* RFC 2893 */ 1714 PULLUP_TO(hlen, ulp, struct ip6_hdr); 1715 break; 1716 1717 case IPPROTO_IPV4: /* RFC 2893 */ 1718 PULLUP_TO(hlen, ulp, struct ip); 1719 break; 1720 1721 default: 1722 if (V_fw_verbose) 1723 printf("IPFW2: IPV6 - Unknown " 1724 "Extension Header(%d), ext_hd=%x\n", 1725 proto, ext_hd); 1726 if (V_fw_deny_unknown_exthdrs) 1727 return (IP_FW_DENY); 1728 PULLUP_TO(hlen, ulp, struct ip6_ext); 1729 break; 1730 } /*switch */ 1731 } 1732 UPDATE_POINTERS(); 1733 ip6 = (struct ip6_hdr *)ip; 1734 args->f_id.addr_type = 6; 1735 args->f_id.src_ip6 = ip6->ip6_src; 1736 args->f_id.dst_ip6 = ip6->ip6_dst; 1737 args->f_id.flow_id6 = ntohl(ip6->ip6_flow); 1738 iplen = ntohs(ip6->ip6_plen) + sizeof(*ip6); 1739 } else if (pktlen >= sizeof(struct ip) && 1740 (eh == NULL || eh->ether_type == htons(ETHERTYPE_IP)) && 1741 ip->ip_v == 4) { 1742 is_ipv4 = 1; 1743 args->flags |= IPFW_ARGS_IP4; 1744 hlen = ip->ip_hl << 2; 1745 /* 1746 * Collect parameters into local variables for faster 1747 * matching. 1748 */ 1749 proto = ip->ip_p; 1750 src_ip = ip->ip_src; 1751 dst_ip = ip->ip_dst; 1752 offset = ntohs(ip->ip_off) & IP_OFFMASK; 1753 iplen = ntohs(ip->ip_len); 1754 1755 if (offset == 0) { 1756 switch (proto) { 1757 case IPPROTO_TCP: 1758 PULLUP_TO(hlen, ulp, struct tcphdr); 1759 dst_port = TCP(ulp)->th_dport; 1760 src_port = TCP(ulp)->th_sport; 1761 /* save flags for dynamic rules */ 1762 args->f_id._flags = TCP(ulp)->th_flags; 1763 break; 1764 1765 case IPPROTO_SCTP: 1766 if (pktlen >= hlen + sizeof(struct sctphdr) + 1767 sizeof(struct sctp_chunkhdr) + 1768 offsetof(struct sctp_init, a_rwnd)) 1769 PULLUP_LEN(hlen, ulp, 1770 sizeof(struct sctphdr) + 1771 sizeof(struct sctp_chunkhdr) + 1772 offsetof(struct sctp_init, a_rwnd)); 1773 else if (pktlen >= hlen + sizeof(struct sctphdr)) 1774 PULLUP_LEN(hlen, ulp, pktlen - hlen); 1775 else 1776 PULLUP_LEN(hlen, ulp, 1777 sizeof(struct sctphdr)); 1778 src_port = SCTP(ulp)->src_port; 1779 dst_port = SCTP(ulp)->dest_port; 1780 break; 1781 1782 case IPPROTO_UDP: 1783 case IPPROTO_UDPLITE: 1784 PULLUP_TO(hlen, ulp, struct udphdr); 1785 dst_port = UDP(ulp)->uh_dport; 1786 src_port = UDP(ulp)->uh_sport; 1787 break; 1788 1789 case IPPROTO_ICMP: 1790 PULLUP_TO(hlen, ulp, struct icmphdr); 1791 //args->f_id.flags = ICMP(ulp)->icmp_type; 1792 break; 1793 1794 default: 1795 break; 1796 } 1797 } else { 1798 if (offset == 1 && proto == IPPROTO_TCP) { 1799 /* RFC 3128 */ 1800 goto pullup_failed; 1801 } 1802 } 1803 1804 UPDATE_POINTERS(); 1805 args->f_id.addr_type = 4; 1806 args->f_id.src_ip = ntohl(src_ip.s_addr); 1807 args->f_id.dst_ip = ntohl(dst_ip.s_addr); 1808 } else { 1809 proto = 0; 1810 dst_ip.s_addr = src_ip.s_addr = 0; 1811 1812 args->f_id.addr_type = 1; /* XXX */ 1813 } 1814 #undef PULLUP_TO 1815 pktlen = iplen < pktlen ? iplen: pktlen; 1816 1817 /* Properly initialize the rest of f_id */ 1818 args->f_id.proto = proto; 1819 args->f_id.src_port = src_port = ntohs(src_port); 1820 args->f_id.dst_port = dst_port = ntohs(dst_port); 1821 1822 IPFW_PF_RLOCK(chain); 1823 if (! V_ipfw_vnet_ready) { /* shutting down, leave NOW. */ 1824 IPFW_PF_RUNLOCK(chain); 1825 return (IP_FW_PASS); /* accept */ 1826 } 1827 if (args->flags & IPFW_ARGS_REF) { 1828 /* 1829 * Packet has already been tagged as a result of a previous 1830 * match on rule args->rule aka args->rule_id (PIPE, QUEUE, 1831 * REASS, NETGRAPH, DIVERT/TEE...) 1832 * Validate the slot and continue from the next one 1833 * if still present, otherwise do a lookup. 1834 */ 1835 f_pos = (args->rule.chain_id == chain->id) ? 1836 args->rule.slot : 1837 ipfw_find_rule(chain, args->rule.rulenum, 1838 args->rule.rule_id); 1839 } else { 1840 f_pos = 0; 1841 } 1842 1843 if (args->flags & IPFW_ARGS_IN) { 1844 iif = args->ifp; 1845 oif = NULL; 1846 } else { 1847 MPASS(args->flags & IPFW_ARGS_OUT); 1848 iif = mem ? NULL : m_rcvif(m); 1849 oif = args->ifp; 1850 } 1851 1852 /* 1853 * Now scan the rules, and parse microinstructions for each rule. 1854 * We have two nested loops and an inner switch. Sometimes we 1855 * need to break out of one or both loops, or re-enter one of 1856 * the loops with updated variables. Loop variables are: 1857 * 1858 * f_pos (outer loop) points to the current rule. 1859 * On output it points to the matching rule. 1860 * done (outer loop) is used as a flag to break the loop. 1861 * l (inner loop) residual length of current rule. 1862 * cmd points to the current microinstruction. 1863 * 1864 * We break the inner loop by setting l=0 and possibly 1865 * cmdlen=0 if we don't want to advance cmd. 1866 * We break the outer loop by setting done=1 1867 * We can restart the inner loop by setting l>0 and f_pos, f, cmd 1868 * as needed. 1869 */ 1870 for (; f_pos < chain->n_rules; f_pos++) { 1871 ipfw_insn *cmd; 1872 uint32_t tablearg = 0; 1873 int l, cmdlen, skip_or; /* skip rest of OR block */ 1874 struct ip_fw *f; 1875 1876 f = chain->map[f_pos]; 1877 if (V_set_disable & (1 << f->set) ) 1878 continue; 1879 1880 skip_or = 0; 1881 for (l = f->cmd_len, cmd = f->cmd ; l > 0 ; 1882 l -= cmdlen, cmd += cmdlen) { 1883 int match; 1884 1885 /* 1886 * check_body is a jump target used when we find a 1887 * CHECK_STATE, and need to jump to the body of 1888 * the target rule. 1889 */ 1890 1891 /* check_body: */ 1892 cmdlen = F_LEN(cmd); 1893 /* 1894 * An OR block (insn_1 || .. || insn_n) has the 1895 * F_OR bit set in all but the last instruction. 1896 * The first match will set "skip_or", and cause 1897 * the following instructions to be skipped until 1898 * past the one with the F_OR bit clear. 1899 */ 1900 if (skip_or) { /* skip this instruction */ 1901 if ((cmd->len & F_OR) == 0) 1902 skip_or = 0; /* next one is good */ 1903 continue; 1904 } 1905 match = 0; /* set to 1 if we succeed */ 1906 1907 switch (cmd->opcode) { 1908 /* 1909 * The first set of opcodes compares the packet's 1910 * fields with some pattern, setting 'match' if a 1911 * match is found. At the end of the loop there is 1912 * logic to deal with F_NOT and F_OR flags associated 1913 * with the opcode. 1914 */ 1915 case O_NOP: 1916 match = 1; 1917 break; 1918 1919 case O_FORWARD_MAC: 1920 printf("ipfw: opcode %d unimplemented\n", 1921 cmd->opcode); 1922 break; 1923 1924 case O_GID: 1925 case O_UID: 1926 case O_JAIL: 1927 /* 1928 * We only check offset == 0 && proto != 0, 1929 * as this ensures that we have a 1930 * packet with the ports info. 1931 */ 1932 if (offset != 0) 1933 break; 1934 if (proto == IPPROTO_TCP || 1935 proto == IPPROTO_UDP || 1936 proto == IPPROTO_UDPLITE) 1937 match = check_uidgid( 1938 (ipfw_insn_u32 *)cmd, 1939 args, &ucred_lookup, 1940 #ifdef __FreeBSD__ 1941 &ucred_cache); 1942 #else 1943 (void *)&ucred_cache); 1944 #endif 1945 break; 1946 1947 case O_RECV: 1948 match = iface_match(iif, (ipfw_insn_if *)cmd, 1949 chain, &tablearg); 1950 break; 1951 1952 case O_XMIT: 1953 match = iface_match(oif, (ipfw_insn_if *)cmd, 1954 chain, &tablearg); 1955 break; 1956 1957 case O_VIA: 1958 match = iface_match(args->ifp, 1959 (ipfw_insn_if *)cmd, chain, &tablearg); 1960 break; 1961 1962 case O_MACADDR2: 1963 if (args->flags & IPFW_ARGS_ETHER) { 1964 u_int32_t *want = (u_int32_t *) 1965 ((ipfw_insn_mac *)cmd)->addr; 1966 u_int32_t *mask = (u_int32_t *) 1967 ((ipfw_insn_mac *)cmd)->mask; 1968 u_int32_t *hdr = (u_int32_t *)eh; 1969 1970 match = 1971 ( want[0] == (hdr[0] & mask[0]) && 1972 want[1] == (hdr[1] & mask[1]) && 1973 want[2] == (hdr[2] & mask[2]) ); 1974 } 1975 break; 1976 1977 case O_MAC_TYPE: 1978 if (args->flags & IPFW_ARGS_ETHER) { 1979 u_int16_t *p = 1980 ((ipfw_insn_u16 *)cmd)->ports; 1981 int i; 1982 1983 for (i = cmdlen - 1; !match && i>0; 1984 i--, p += 2) 1985 match = 1986 (ntohs(eh->ether_type) >= 1987 p[0] && 1988 ntohs(eh->ether_type) <= 1989 p[1]); 1990 } 1991 break; 1992 1993 case O_FRAG: 1994 if (is_ipv4) { 1995 /* 1996 * Since flags_match() works with 1997 * uint8_t we pack ip_off into 8 bits. 1998 * For this match offset is a boolean. 1999 */ 2000 match = flags_match(cmd, 2001 ((ntohs(ip->ip_off) & ~IP_OFFMASK) 2002 >> 8) | (offset != 0)); 2003 } else { 2004 /* 2005 * Compatiblity: historically bare 2006 * "frag" would match IPv6 fragments. 2007 */ 2008 match = (cmd->arg1 == 0x1 && 2009 (offset != 0)); 2010 } 2011 break; 2012 2013 case O_IN: /* "out" is "not in" */ 2014 match = (oif == NULL); 2015 break; 2016 2017 case O_LAYER2: 2018 match = (args->flags & IPFW_ARGS_ETHER); 2019 break; 2020 2021 case O_DIVERTED: 2022 if ((args->flags & IPFW_ARGS_REF) == 0) 2023 break; 2024 /* 2025 * For diverted packets, args->rule.info 2026 * contains the divert port (in host format) 2027 * reason and direction. 2028 */ 2029 match = ((args->rule.info & IPFW_IS_MASK) == 2030 IPFW_IS_DIVERT) && ( 2031 ((args->rule.info & IPFW_INFO_IN) ? 2032 1: 2) & cmd->arg1); 2033 break; 2034 2035 case O_PROTO: 2036 /* 2037 * We do not allow an arg of 0 so the 2038 * check of "proto" only suffices. 2039 */ 2040 match = (proto == cmd->arg1); 2041 break; 2042 2043 case O_IP_SRC: 2044 match = is_ipv4 && 2045 (((ipfw_insn_ip *)cmd)->addr.s_addr == 2046 src_ip.s_addr); 2047 break; 2048 2049 case O_IP_DST_LOOKUP: 2050 { 2051 if (cmdlen > F_INSN_SIZE(ipfw_insn_u32)) { 2052 void *pkey; 2053 uint32_t vidx, key; 2054 uint16_t keylen = 0; /* zero if can't match the packet */ 2055 2056 /* Determine lookup key type */ 2057 vidx = ((ipfw_insn_u32 *)cmd)->d[1]; 2058 switch (vidx) { 2059 case LOOKUP_DST_IP: 2060 case LOOKUP_SRC_IP: 2061 /* Need IP frame */ 2062 if (is_ipv6 == 0 && is_ipv4 == 0) 2063 break; 2064 if (vidx == LOOKUP_DST_IP) 2065 pkey = is_ipv6 ? 2066 (void *)&args->f_id.dst_ip6: 2067 (void *)&dst_ip; 2068 else 2069 pkey = is_ipv6 ? 2070 (void *)&args->f_id.src_ip6: 2071 (void *)&src_ip; 2072 keylen = is_ipv6 ? 2073 sizeof(struct in6_addr): 2074 sizeof(in_addr_t); 2075 break; 2076 case LOOKUP_DST_PORT: 2077 case LOOKUP_SRC_PORT: 2078 /* Need IP frame */ 2079 if (is_ipv6 == 0 && is_ipv4 == 0) 2080 break; 2081 /* Skip fragments */ 2082 if (offset != 0) 2083 break; 2084 /* Skip proto without ports */ 2085 if (proto != IPPROTO_TCP && 2086 proto != IPPROTO_UDP && 2087 proto != IPPROTO_UDPLITE && 2088 proto != IPPROTO_SCTP) 2089 break; 2090 key = vidx == LOOKUP_DST_PORT ? 2091 dst_port: 2092 src_port; 2093 pkey = &key; 2094 keylen = sizeof(key); 2095 break; 2096 case LOOKUP_UID: 2097 case LOOKUP_JAIL: 2098 check_uidgid( 2099 (ipfw_insn_u32 *)cmd, 2100 args, &ucred_lookup, 2101 &ucred_cache); 2102 key = vidx == LOOKUP_UID ? 2103 ucred_cache->cr_uid: 2104 ucred_cache->cr_prison->pr_id; 2105 pkey = &key; 2106 keylen = sizeof(key); 2107 break; 2108 case LOOKUP_DSCP: 2109 /* Need IP frame */ 2110 if (is_ipv6 == 0 && is_ipv4 == 0) 2111 break; 2112 if (is_ipv6) 2113 key = IPV6_DSCP( 2114 (struct ip6_hdr *)ip) >> 2; 2115 else 2116 key = ip->ip_tos >> 2; 2117 pkey = &key; 2118 keylen = sizeof(key); 2119 break; 2120 case LOOKUP_DST_MAC: 2121 case LOOKUP_SRC_MAC: 2122 /* Need ether frame */ 2123 if ((args->flags & IPFW_ARGS_ETHER) == 0) 2124 break; 2125 pkey = vidx == LOOKUP_DST_MAC ? 2126 eh->ether_dhost: 2127 eh->ether_shost; 2128 keylen = ETHER_ADDR_LEN; 2129 break; 2130 } 2131 if (keylen == 0) 2132 break; 2133 match = ipfw_lookup_table(chain, 2134 cmd->arg1, keylen, pkey, &vidx); 2135 if (!match) 2136 break; 2137 tablearg = vidx; 2138 break; 2139 } 2140 /* cmdlen =< F_INSN_SIZE(ipfw_insn_u32) */ 2141 /* FALLTHROUGH */ 2142 } 2143 case O_IP_SRC_LOOKUP: 2144 { 2145 void *pkey; 2146 uint32_t vidx; 2147 uint16_t keylen; 2148 2149 if (is_ipv4) { 2150 keylen = sizeof(in_addr_t); 2151 if (cmd->opcode == O_IP_DST_LOOKUP) 2152 pkey = &dst_ip; 2153 else 2154 pkey = &src_ip; 2155 } else if (is_ipv6) { 2156 keylen = sizeof(struct in6_addr); 2157 if (cmd->opcode == O_IP_DST_LOOKUP) 2158 pkey = &args->f_id.dst_ip6; 2159 else 2160 pkey = &args->f_id.src_ip6; 2161 } else 2162 break; 2163 match = ipfw_lookup_table(chain, cmd->arg1, 2164 keylen, pkey, &vidx); 2165 if (!match) 2166 break; 2167 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) { 2168 match = ((ipfw_insn_u32 *)cmd)->d[0] == 2169 TARG_VAL(chain, vidx, tag); 2170 if (!match) 2171 break; 2172 } 2173 tablearg = vidx; 2174 break; 2175 } 2176 2177 case O_MAC_SRC_LOOKUP: 2178 case O_MAC_DST_LOOKUP: 2179 { 2180 void *pkey; 2181 uint32_t vidx; 2182 uint16_t keylen = ETHER_ADDR_LEN; 2183 2184 /* Need ether frame */ 2185 if ((args->flags & IPFW_ARGS_ETHER) == 0) 2186 break; 2187 2188 if (cmd->opcode == O_MAC_DST_LOOKUP) 2189 pkey = eh->ether_dhost; 2190 else 2191 pkey = eh->ether_shost; 2192 2193 match = ipfw_lookup_table(chain, cmd->arg1, 2194 keylen, pkey, &vidx); 2195 if (!match) 2196 break; 2197 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) { 2198 match = ((ipfw_insn_u32 *)cmd)->d[0] == 2199 TARG_VAL(chain, vidx, tag); 2200 if (!match) 2201 break; 2202 } 2203 tablearg = vidx; 2204 break; 2205 } 2206 2207 case O_IP_FLOW_LOOKUP: 2208 { 2209 uint32_t v = 0; 2210 match = ipfw_lookup_table(chain, 2211 cmd->arg1, 0, &args->f_id, &v); 2212 if (!match) 2213 break; 2214 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) 2215 match = ((ipfw_insn_u32 *)cmd)->d[0] == 2216 TARG_VAL(chain, v, tag); 2217 if (match) 2218 tablearg = v; 2219 } 2220 break; 2221 case O_IP_SRC_MASK: 2222 case O_IP_DST_MASK: 2223 if (is_ipv4) { 2224 uint32_t a = 2225 (cmd->opcode == O_IP_DST_MASK) ? 2226 dst_ip.s_addr : src_ip.s_addr; 2227 uint32_t *p = ((ipfw_insn_u32 *)cmd)->d; 2228 int i = cmdlen-1; 2229 2230 for (; !match && i>0; i-= 2, p+= 2) 2231 match = (p[0] == (a & p[1])); 2232 } 2233 break; 2234 2235 case O_IP_SRC_ME: 2236 if (is_ipv4) { 2237 match = in_localip(src_ip); 2238 break; 2239 } 2240 #ifdef INET6 2241 /* FALLTHROUGH */ 2242 case O_IP6_SRC_ME: 2243 match = is_ipv6 && 2244 ipfw_localip6(&args->f_id.src_ip6); 2245 #endif 2246 break; 2247 2248 case O_IP_DST_SET: 2249 case O_IP_SRC_SET: 2250 if (is_ipv4) { 2251 u_int32_t *d = (u_int32_t *)(cmd+1); 2252 u_int32_t addr = 2253 cmd->opcode == O_IP_DST_SET ? 2254 args->f_id.dst_ip : 2255 args->f_id.src_ip; 2256 2257 if (addr < d[0]) 2258 break; 2259 addr -= d[0]; /* subtract base */ 2260 match = (addr < cmd->arg1) && 2261 ( d[ 1 + (addr>>5)] & 2262 (1<<(addr & 0x1f)) ); 2263 } 2264 break; 2265 2266 case O_IP_DST: 2267 match = is_ipv4 && 2268 (((ipfw_insn_ip *)cmd)->addr.s_addr == 2269 dst_ip.s_addr); 2270 break; 2271 2272 case O_IP_DST_ME: 2273 if (is_ipv4) { 2274 match = in_localip(dst_ip); 2275 break; 2276 } 2277 #ifdef INET6 2278 /* FALLTHROUGH */ 2279 case O_IP6_DST_ME: 2280 match = is_ipv6 && 2281 ipfw_localip6(&args->f_id.dst_ip6); 2282 #endif 2283 break; 2284 2285 case O_IP_SRCPORT: 2286 case O_IP_DSTPORT: 2287 /* 2288 * offset == 0 && proto != 0 is enough 2289 * to guarantee that we have a 2290 * packet with port info. 2291 */ 2292 if ((proto == IPPROTO_UDP || 2293 proto == IPPROTO_UDPLITE || 2294 proto == IPPROTO_TCP || 2295 proto == IPPROTO_SCTP) && offset == 0) { 2296 u_int16_t x = 2297 (cmd->opcode == O_IP_SRCPORT) ? 2298 src_port : dst_port ; 2299 u_int16_t *p = 2300 ((ipfw_insn_u16 *)cmd)->ports; 2301 int i; 2302 2303 for (i = cmdlen - 1; !match && i>0; 2304 i--, p += 2) 2305 match = (x>=p[0] && x<=p[1]); 2306 } 2307 break; 2308 2309 case O_ICMPTYPE: 2310 match = (offset == 0 && proto==IPPROTO_ICMP && 2311 icmptype_match(ICMP(ulp), (ipfw_insn_u32 *)cmd) ); 2312 break; 2313 2314 #ifdef INET6 2315 case O_ICMP6TYPE: 2316 match = is_ipv6 && offset == 0 && 2317 proto==IPPROTO_ICMPV6 && 2318 icmp6type_match( 2319 ICMP6(ulp)->icmp6_type, 2320 (ipfw_insn_u32 *)cmd); 2321 break; 2322 #endif /* INET6 */ 2323 2324 case O_IPOPT: 2325 match = (is_ipv4 && 2326 ipopts_match(ip, cmd) ); 2327 break; 2328 2329 case O_IPVER: 2330 match = ((is_ipv4 || is_ipv6) && 2331 cmd->arg1 == ip->ip_v); 2332 break; 2333 2334 case O_IPID: 2335 case O_IPTTL: 2336 if (!is_ipv4) 2337 break; 2338 case O_IPLEN: 2339 { /* only for IP packets */ 2340 uint16_t x; 2341 uint16_t *p; 2342 int i; 2343 2344 if (cmd->opcode == O_IPLEN) 2345 x = iplen; 2346 else if (cmd->opcode == O_IPTTL) 2347 x = ip->ip_ttl; 2348 else /* must be IPID */ 2349 x = ntohs(ip->ip_id); 2350 if (cmdlen == 1) { 2351 match = (cmd->arg1 == x); 2352 break; 2353 } 2354 /* otherwise we have ranges */ 2355 p = ((ipfw_insn_u16 *)cmd)->ports; 2356 i = cmdlen - 1; 2357 for (; !match && i>0; i--, p += 2) 2358 match = (x >= p[0] && x <= p[1]); 2359 } 2360 break; 2361 2362 case O_IPPRECEDENCE: 2363 match = (is_ipv4 && 2364 (cmd->arg1 == (ip->ip_tos & 0xe0)) ); 2365 break; 2366 2367 case O_IPTOS: 2368 match = (is_ipv4 && 2369 flags_match(cmd, ip->ip_tos)); 2370 break; 2371 2372 case O_DSCP: 2373 { 2374 uint32_t *p; 2375 uint16_t x; 2376 2377 p = ((ipfw_insn_u32 *)cmd)->d; 2378 2379 if (is_ipv4) 2380 x = ip->ip_tos >> 2; 2381 else if (is_ipv6) { 2382 x = IPV6_DSCP( 2383 (struct ip6_hdr *)ip) >> 2; 2384 x &= 0x3f; 2385 } else 2386 break; 2387 2388 /* DSCP bitmask is stored as low_u32 high_u32 */ 2389 if (x >= 32) 2390 match = *(p + 1) & (1 << (x - 32)); 2391 else 2392 match = *p & (1 << x); 2393 } 2394 break; 2395 2396 case O_TCPDATALEN: 2397 if (proto == IPPROTO_TCP && offset == 0) { 2398 struct tcphdr *tcp; 2399 uint16_t x; 2400 uint16_t *p; 2401 int i; 2402 #ifdef INET6 2403 if (is_ipv6) { 2404 struct ip6_hdr *ip6; 2405 2406 ip6 = (struct ip6_hdr *)ip; 2407 if (ip6->ip6_plen == 0) { 2408 /* 2409 * Jumbo payload is not 2410 * supported by this 2411 * opcode. 2412 */ 2413 break; 2414 } 2415 x = iplen - hlen; 2416 } else 2417 #endif /* INET6 */ 2418 x = iplen - (ip->ip_hl << 2); 2419 tcp = TCP(ulp); 2420 x -= tcp->th_off << 2; 2421 if (cmdlen == 1) { 2422 match = (cmd->arg1 == x); 2423 break; 2424 } 2425 /* otherwise we have ranges */ 2426 p = ((ipfw_insn_u16 *)cmd)->ports; 2427 i = cmdlen - 1; 2428 for (; !match && i>0; i--, p += 2) 2429 match = (x >= p[0] && x <= p[1]); 2430 } 2431 break; 2432 2433 case O_TCPFLAGS: 2434 match = (proto == IPPROTO_TCP && offset == 0 && 2435 flags_match(cmd, TCP(ulp)->th_flags)); 2436 break; 2437 2438 case O_TCPOPTS: 2439 if (proto == IPPROTO_TCP && offset == 0 && ulp){ 2440 PULLUP_LEN_LOCKED(hlen, ulp, 2441 (TCP(ulp)->th_off << 2)); 2442 match = tcpopts_match(TCP(ulp), cmd); 2443 } 2444 break; 2445 2446 case O_TCPSEQ: 2447 match = (proto == IPPROTO_TCP && offset == 0 && 2448 ((ipfw_insn_u32 *)cmd)->d[0] == 2449 TCP(ulp)->th_seq); 2450 break; 2451 2452 case O_TCPACK: 2453 match = (proto == IPPROTO_TCP && offset == 0 && 2454 ((ipfw_insn_u32 *)cmd)->d[0] == 2455 TCP(ulp)->th_ack); 2456 break; 2457 2458 case O_TCPMSS: 2459 if (proto == IPPROTO_TCP && 2460 (args->f_id._flags & TH_SYN) != 0 && 2461 ulp != NULL) { 2462 uint16_t mss, *p; 2463 int i; 2464 2465 PULLUP_LEN_LOCKED(hlen, ulp, 2466 (TCP(ulp)->th_off << 2)); 2467 if ((tcpopts_parse(TCP(ulp), &mss) & 2468 IP_FW_TCPOPT_MSS) == 0) 2469 break; 2470 if (cmdlen == 1) { 2471 match = (cmd->arg1 == mss); 2472 break; 2473 } 2474 /* Otherwise we have ranges. */ 2475 p = ((ipfw_insn_u16 *)cmd)->ports; 2476 i = cmdlen - 1; 2477 for (; !match && i > 0; i--, p += 2) 2478 match = (mss >= p[0] && 2479 mss <= p[1]); 2480 } 2481 break; 2482 2483 case O_TCPWIN: 2484 if (proto == IPPROTO_TCP && offset == 0) { 2485 uint16_t x; 2486 uint16_t *p; 2487 int i; 2488 2489 x = ntohs(TCP(ulp)->th_win); 2490 if (cmdlen == 1) { 2491 match = (cmd->arg1 == x); 2492 break; 2493 } 2494 /* Otherwise we have ranges. */ 2495 p = ((ipfw_insn_u16 *)cmd)->ports; 2496 i = cmdlen - 1; 2497 for (; !match && i > 0; i--, p += 2) 2498 match = (x >= p[0] && x <= p[1]); 2499 } 2500 break; 2501 2502 case O_ESTAB: 2503 /* reject packets which have SYN only */ 2504 /* XXX should i also check for TH_ACK ? */ 2505 match = (proto == IPPROTO_TCP && offset == 0 && 2506 (TCP(ulp)->th_flags & 2507 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN); 2508 break; 2509 2510 case O_ALTQ: { 2511 struct pf_mtag *at; 2512 struct m_tag *mtag; 2513 ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd; 2514 2515 /* 2516 * ALTQ uses mbuf tags from another 2517 * packet filtering system - pf(4). 2518 * We allocate a tag in its format 2519 * and fill it in, pretending to be pf(4). 2520 */ 2521 match = 1; 2522 at = pf_find_mtag(m); 2523 if (at != NULL && at->qid != 0) 2524 break; 2525 mtag = m_tag_get(PACKET_TAG_PF, 2526 sizeof(struct pf_mtag), M_NOWAIT | M_ZERO); 2527 if (mtag == NULL) { 2528 /* 2529 * Let the packet fall back to the 2530 * default ALTQ. 2531 */ 2532 break; 2533 } 2534 m_tag_prepend(m, mtag); 2535 at = (struct pf_mtag *)(mtag + 1); 2536 at->qid = altq->qid; 2537 at->hdr = ip; 2538 break; 2539 } 2540 2541 case O_LOG: 2542 ipfw_log(chain, f, hlen, args, 2543 offset | ip6f_mf, tablearg, ip); 2544 match = 1; 2545 break; 2546 2547 case O_PROB: 2548 match = (random()<((ipfw_insn_u32 *)cmd)->d[0]); 2549 break; 2550 2551 case O_VERREVPATH: 2552 /* Outgoing packets automatically pass/match */ 2553 match = (args->flags & IPFW_ARGS_OUT || 2554 ( 2555 #ifdef INET6 2556 is_ipv6 ? 2557 verify_path6(&(args->f_id.src_ip6), 2558 iif, args->f_id.fib) : 2559 #endif 2560 verify_path(src_ip, iif, args->f_id.fib))); 2561 break; 2562 2563 case O_VERSRCREACH: 2564 /* Outgoing packets automatically pass/match */ 2565 match = (hlen > 0 && ((oif != NULL) || ( 2566 #ifdef INET6 2567 is_ipv6 ? 2568 verify_path6(&(args->f_id.src_ip6), 2569 NULL, args->f_id.fib) : 2570 #endif 2571 verify_path(src_ip, NULL, args->f_id.fib)))); 2572 break; 2573 2574 case O_ANTISPOOF: 2575 /* Outgoing packets automatically pass/match */ 2576 if (oif == NULL && hlen > 0 && 2577 ( (is_ipv4 && in_localaddr(src_ip)) 2578 #ifdef INET6 2579 || (is_ipv6 && 2580 in6_localaddr(&(args->f_id.src_ip6))) 2581 #endif 2582 )) 2583 match = 2584 #ifdef INET6 2585 is_ipv6 ? verify_path6( 2586 &(args->f_id.src_ip6), iif, 2587 args->f_id.fib) : 2588 #endif 2589 verify_path(src_ip, iif, 2590 args->f_id.fib); 2591 else 2592 match = 1; 2593 break; 2594 2595 case O_IPSEC: 2596 match = (m_tag_find(m, 2597 PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL); 2598 /* otherwise no match */ 2599 break; 2600 2601 #ifdef INET6 2602 case O_IP6_SRC: 2603 match = is_ipv6 && 2604 IN6_ARE_ADDR_EQUAL(&args->f_id.src_ip6, 2605 &((ipfw_insn_ip6 *)cmd)->addr6); 2606 break; 2607 2608 case O_IP6_DST: 2609 match = is_ipv6 && 2610 IN6_ARE_ADDR_EQUAL(&args->f_id.dst_ip6, 2611 &((ipfw_insn_ip6 *)cmd)->addr6); 2612 break; 2613 case O_IP6_SRC_MASK: 2614 case O_IP6_DST_MASK: 2615 if (is_ipv6) { 2616 int i = cmdlen - 1; 2617 struct in6_addr p; 2618 struct in6_addr *d = 2619 &((ipfw_insn_ip6 *)cmd)->addr6; 2620 2621 for (; !match && i > 0; d += 2, 2622 i -= F_INSN_SIZE(struct in6_addr) 2623 * 2) { 2624 p = (cmd->opcode == 2625 O_IP6_SRC_MASK) ? 2626 args->f_id.src_ip6: 2627 args->f_id.dst_ip6; 2628 APPLY_MASK(&p, &d[1]); 2629 match = 2630 IN6_ARE_ADDR_EQUAL(&d[0], 2631 &p); 2632 } 2633 } 2634 break; 2635 2636 case O_FLOW6ID: 2637 match = is_ipv6 && 2638 flow6id_match(args->f_id.flow_id6, 2639 (ipfw_insn_u32 *) cmd); 2640 break; 2641 2642 case O_EXT_HDR: 2643 match = is_ipv6 && 2644 (ext_hd & ((ipfw_insn *) cmd)->arg1); 2645 break; 2646 2647 case O_IP6: 2648 match = is_ipv6; 2649 break; 2650 #endif 2651 2652 case O_IP4: 2653 match = is_ipv4; 2654 break; 2655 2656 case O_TAG: { 2657 struct m_tag *mtag; 2658 uint32_t tag = TARG(cmd->arg1, tag); 2659 2660 /* Packet is already tagged with this tag? */ 2661 mtag = m_tag_locate(m, MTAG_IPFW, tag, NULL); 2662 2663 /* We have `untag' action when F_NOT flag is 2664 * present. And we must remove this mtag from 2665 * mbuf and reset `match' to zero (`match' will 2666 * be inversed later). 2667 * Otherwise we should allocate new mtag and 2668 * push it into mbuf. 2669 */ 2670 if (cmd->len & F_NOT) { /* `untag' action */ 2671 if (mtag != NULL) 2672 m_tag_delete(m, mtag); 2673 match = 0; 2674 } else { 2675 if (mtag == NULL) { 2676 mtag = m_tag_alloc( MTAG_IPFW, 2677 tag, 0, M_NOWAIT); 2678 if (mtag != NULL) 2679 m_tag_prepend(m, mtag); 2680 } 2681 match = 1; 2682 } 2683 break; 2684 } 2685 2686 case O_FIB: /* try match the specified fib */ 2687 if (args->f_id.fib == cmd->arg1) 2688 match = 1; 2689 break; 2690 2691 case O_SOCKARG: { 2692 #ifndef USERSPACE /* not supported in userspace */ 2693 struct inpcb *inp = args->inp; 2694 struct inpcbinfo *pi; 2695 bool inp_locked = false; 2696 2697 if (proto == IPPROTO_TCP) 2698 pi = &V_tcbinfo; 2699 else if (proto == IPPROTO_UDP) 2700 pi = &V_udbinfo; 2701 else if (proto == IPPROTO_UDPLITE) 2702 pi = &V_ulitecbinfo; 2703 else 2704 break; 2705 2706 /* 2707 * XXXRW: so_user_cookie should almost 2708 * certainly be inp_user_cookie? 2709 */ 2710 2711 /* 2712 * For incoming packet lookup the inpcb 2713 * using the src/dest ip/port tuple. 2714 */ 2715 if (is_ipv4 && inp == NULL) { 2716 inp = in_pcblookup(pi, 2717 src_ip, htons(src_port), 2718 dst_ip, htons(dst_port), 2719 INPLOOKUP_RLOCKPCB, NULL); 2720 inp_locked = true; 2721 } 2722 #ifdef INET6 2723 if (is_ipv6 && inp == NULL) { 2724 inp = in6_pcblookup(pi, 2725 &args->f_id.src_ip6, 2726 htons(src_port), 2727 &args->f_id.dst_ip6, 2728 htons(dst_port), 2729 INPLOOKUP_RLOCKPCB, NULL); 2730 inp_locked = true; 2731 } 2732 #endif /* INET6 */ 2733 if (inp != NULL) { 2734 if (inp->inp_socket) { 2735 tablearg = 2736 inp->inp_socket->so_user_cookie; 2737 if (tablearg) 2738 match = 1; 2739 } 2740 if (inp_locked) 2741 INP_RUNLOCK(inp); 2742 } 2743 #endif /* !USERSPACE */ 2744 break; 2745 } 2746 2747 case O_TAGGED: { 2748 struct m_tag *mtag; 2749 uint32_t tag = TARG(cmd->arg1, tag); 2750 2751 if (cmdlen == 1) { 2752 match = m_tag_locate(m, MTAG_IPFW, 2753 tag, NULL) != NULL; 2754 break; 2755 } 2756 2757 /* we have ranges */ 2758 for (mtag = m_tag_first(m); 2759 mtag != NULL && !match; 2760 mtag = m_tag_next(m, mtag)) { 2761 uint16_t *p; 2762 int i; 2763 2764 if (mtag->m_tag_cookie != MTAG_IPFW) 2765 continue; 2766 2767 p = ((ipfw_insn_u16 *)cmd)->ports; 2768 i = cmdlen - 1; 2769 for(; !match && i > 0; i--, p += 2) 2770 match = 2771 mtag->m_tag_id >= p[0] && 2772 mtag->m_tag_id <= p[1]; 2773 } 2774 break; 2775 } 2776 2777 /* 2778 * The second set of opcodes represents 'actions', 2779 * i.e. the terminal part of a rule once the packet 2780 * matches all previous patterns. 2781 * Typically there is only one action for each rule, 2782 * and the opcode is stored at the end of the rule 2783 * (but there are exceptions -- see below). 2784 * 2785 * In general, here we set retval and terminate the 2786 * outer loop (would be a 'break 3' in some language, 2787 * but we need to set l=0, done=1) 2788 * 2789 * Exceptions: 2790 * O_COUNT and O_SKIPTO actions: 2791 * instead of terminating, we jump to the next rule 2792 * (setting l=0), or to the SKIPTO target (setting 2793 * f/f_len, cmd and l as needed), respectively. 2794 * 2795 * O_TAG, O_LOG and O_ALTQ action parameters: 2796 * perform some action and set match = 1; 2797 * 2798 * O_LIMIT and O_KEEP_STATE: these opcodes are 2799 * not real 'actions', and are stored right 2800 * before the 'action' part of the rule (one 2801 * exception is O_SKIP_ACTION which could be 2802 * between these opcodes and 'action' one). 2803 * These opcodes try to install an entry in the 2804 * state tables; if successful, we continue with 2805 * the next opcode (match=1; break;), otherwise 2806 * the packet must be dropped (set retval, 2807 * break loops with l=0, done=1) 2808 * 2809 * O_PROBE_STATE and O_CHECK_STATE: these opcodes 2810 * cause a lookup of the state table, and a jump 2811 * to the 'action' part of the parent rule 2812 * if an entry is found, or 2813 * (CHECK_STATE only) a jump to the next rule if 2814 * the entry is not found. 2815 * The result of the lookup is cached so that 2816 * further instances of these opcodes become NOPs. 2817 * The jump to the next rule is done by setting 2818 * l=0, cmdlen=0. 2819 * 2820 * O_SKIP_ACTION: this opcode is not a real 'action' 2821 * either, and is stored right before the 'action' 2822 * part of the rule, right after the O_KEEP_STATE 2823 * opcode. It causes match failure so the real 2824 * 'action' could be executed only if the rule 2825 * is checked via dynamic rule from the state 2826 * table, as in such case execution starts 2827 * from the true 'action' opcode directly. 2828 * 2829 */ 2830 case O_LIMIT: 2831 case O_KEEP_STATE: 2832 if (ipfw_dyn_install_state(chain, f, 2833 (ipfw_insn_limit *)cmd, args, ulp, 2834 pktlen, &dyn_info, tablearg)) { 2835 /* error or limit violation */ 2836 retval = IP_FW_DENY; 2837 l = 0; /* exit inner loop */ 2838 done = 1; /* exit outer loop */ 2839 } 2840 match = 1; 2841 break; 2842 2843 case O_PROBE_STATE: 2844 case O_CHECK_STATE: 2845 /* 2846 * dynamic rules are checked at the first 2847 * keep-state or check-state occurrence, 2848 * with the result being stored in dyn_info. 2849 * The compiler introduces a PROBE_STATE 2850 * instruction for us when we have a 2851 * KEEP_STATE (because PROBE_STATE needs 2852 * to be run first). 2853 */ 2854 if (DYN_LOOKUP_NEEDED(&dyn_info, cmd) && 2855 (q = ipfw_dyn_lookup_state(args, ulp, 2856 pktlen, cmd, &dyn_info)) != NULL) { 2857 /* 2858 * Found dynamic entry, jump to the 2859 * 'action' part of the parent rule 2860 * by setting f, cmd, l and clearing 2861 * cmdlen. 2862 */ 2863 f = q; 2864 f_pos = dyn_info.f_pos; 2865 cmd = ACTION_PTR(f); 2866 l = f->cmd_len - f->act_ofs; 2867 cmdlen = 0; 2868 match = 1; 2869 break; 2870 } 2871 /* 2872 * Dynamic entry not found. If CHECK_STATE, 2873 * skip to next rule, if PROBE_STATE just 2874 * ignore and continue with next opcode. 2875 */ 2876 if (cmd->opcode == O_CHECK_STATE) 2877 l = 0; /* exit inner loop */ 2878 match = 1; 2879 break; 2880 2881 case O_SKIP_ACTION: 2882 match = 0; /* skip to the next rule */ 2883 l = 0; /* exit inner loop */ 2884 break; 2885 2886 case O_ACCEPT: 2887 retval = 0; /* accept */ 2888 l = 0; /* exit inner loop */ 2889 done = 1; /* exit outer loop */ 2890 break; 2891 2892 case O_PIPE: 2893 case O_QUEUE: 2894 set_match(args, f_pos, chain); 2895 args->rule.info = TARG(cmd->arg1, pipe); 2896 if (cmd->opcode == O_PIPE) 2897 args->rule.info |= IPFW_IS_PIPE; 2898 if (V_fw_one_pass) 2899 args->rule.info |= IPFW_ONEPASS; 2900 retval = IP_FW_DUMMYNET; 2901 l = 0; /* exit inner loop */ 2902 done = 1; /* exit outer loop */ 2903 break; 2904 2905 case O_DIVERT: 2906 case O_TEE: 2907 if (args->flags & IPFW_ARGS_ETHER) 2908 break; /* not on layer 2 */ 2909 /* otherwise this is terminal */ 2910 l = 0; /* exit inner loop */ 2911 done = 1; /* exit outer loop */ 2912 retval = (cmd->opcode == O_DIVERT) ? 2913 IP_FW_DIVERT : IP_FW_TEE; 2914 set_match(args, f_pos, chain); 2915 args->rule.info = TARG(cmd->arg1, divert); 2916 break; 2917 2918 case O_COUNT: 2919 IPFW_INC_RULE_COUNTER(f, pktlen); 2920 l = 0; /* exit inner loop */ 2921 break; 2922 2923 case O_SKIPTO: 2924 IPFW_INC_RULE_COUNTER(f, pktlen); 2925 f_pos = JUMP(chain, f, cmd->arg1, tablearg, 0); 2926 /* 2927 * Skip disabled rules, and re-enter 2928 * the inner loop with the correct 2929 * f_pos, f, l and cmd. 2930 * Also clear cmdlen and skip_or 2931 */ 2932 for (; f_pos < chain->n_rules - 1 && 2933 (V_set_disable & 2934 (1 << chain->map[f_pos]->set)); 2935 f_pos++) 2936 ; 2937 /* Re-enter the inner loop at the skipto rule. */ 2938 f = chain->map[f_pos]; 2939 l = f->cmd_len; 2940 cmd = f->cmd; 2941 match = 1; 2942 cmdlen = 0; 2943 skip_or = 0; 2944 continue; 2945 break; /* not reached */ 2946 2947 case O_CALLRETURN: { 2948 /* 2949 * Implementation of `subroutine' call/return, 2950 * in the stack carried in an mbuf tag. This 2951 * is different from `skipto' in that any call 2952 * address is possible (`skipto' must prevent 2953 * backward jumps to avoid endless loops). 2954 * We have `return' action when F_NOT flag is 2955 * present. The `m_tag_id' field is used as 2956 * stack pointer. 2957 */ 2958 struct m_tag *mtag; 2959 uint16_t jmpto, *stack; 2960 2961 #define IS_CALL ((cmd->len & F_NOT) == 0) 2962 #define IS_RETURN ((cmd->len & F_NOT) != 0) 2963 /* 2964 * Hand-rolled version of m_tag_locate() with 2965 * wildcard `type'. 2966 * If not already tagged, allocate new tag. 2967 */ 2968 mtag = m_tag_first(m); 2969 while (mtag != NULL) { 2970 if (mtag->m_tag_cookie == 2971 MTAG_IPFW_CALL) 2972 break; 2973 mtag = m_tag_next(m, mtag); 2974 } 2975 if (mtag == NULL && IS_CALL) { 2976 mtag = m_tag_alloc(MTAG_IPFW_CALL, 0, 2977 IPFW_CALLSTACK_SIZE * 2978 sizeof(uint16_t), M_NOWAIT); 2979 if (mtag != NULL) 2980 m_tag_prepend(m, mtag); 2981 } 2982 2983 /* 2984 * On error both `call' and `return' just 2985 * continue with next rule. 2986 */ 2987 if (IS_RETURN && (mtag == NULL || 2988 mtag->m_tag_id == 0)) { 2989 l = 0; /* exit inner loop */ 2990 break; 2991 } 2992 if (IS_CALL && (mtag == NULL || 2993 mtag->m_tag_id >= IPFW_CALLSTACK_SIZE)) { 2994 printf("ipfw: call stack error, " 2995 "go to next rule\n"); 2996 l = 0; /* exit inner loop */ 2997 break; 2998 } 2999 3000 IPFW_INC_RULE_COUNTER(f, pktlen); 3001 stack = (uint16_t *)(mtag + 1); 3002 3003 /* 3004 * The `call' action may use cached f_pos 3005 * (in f->next_rule), whose version is written 3006 * in f->next_rule. 3007 * The `return' action, however, doesn't have 3008 * fixed jump address in cmd->arg1 and can't use 3009 * cache. 3010 */ 3011 if (IS_CALL) { 3012 stack[mtag->m_tag_id] = f->rulenum; 3013 mtag->m_tag_id++; 3014 f_pos = JUMP(chain, f, cmd->arg1, 3015 tablearg, 1); 3016 } else { /* `return' action */ 3017 mtag->m_tag_id--; 3018 jmpto = stack[mtag->m_tag_id] + 1; 3019 f_pos = ipfw_find_rule(chain, jmpto, 0); 3020 } 3021 3022 /* 3023 * Skip disabled rules, and re-enter 3024 * the inner loop with the correct 3025 * f_pos, f, l and cmd. 3026 * Also clear cmdlen and skip_or 3027 */ 3028 for (; f_pos < chain->n_rules - 1 && 3029 (V_set_disable & 3030 (1 << chain->map[f_pos]->set)); f_pos++) 3031 ; 3032 /* Re-enter the inner loop at the dest rule. */ 3033 f = chain->map[f_pos]; 3034 l = f->cmd_len; 3035 cmd = f->cmd; 3036 cmdlen = 0; 3037 skip_or = 0; 3038 continue; 3039 break; /* NOTREACHED */ 3040 } 3041 #undef IS_CALL 3042 #undef IS_RETURN 3043 3044 case O_REJECT: 3045 /* 3046 * Drop the packet and send a reject notice 3047 * if the packet is not ICMP (or is an ICMP 3048 * query), and it is not multicast/broadcast. 3049 */ 3050 if (hlen > 0 && is_ipv4 && offset == 0 && 3051 (proto != IPPROTO_ICMP || 3052 is_icmp_query(ICMP(ulp))) && 3053 !(m->m_flags & (M_BCAST|M_MCAST)) && 3054 !IN_MULTICAST(ntohl(dst_ip.s_addr))) { 3055 send_reject(args, cmd, iplen, ip); 3056 m = args->m; 3057 } 3058 /* FALLTHROUGH */ 3059 #ifdef INET6 3060 case O_UNREACH6: 3061 if (hlen > 0 && is_ipv6 && 3062 ((offset & IP6F_OFF_MASK) == 0) && 3063 (proto != IPPROTO_ICMPV6 || 3064 (is_icmp6_query(icmp6_type) == 1)) && 3065 !(m->m_flags & (M_BCAST|M_MCAST)) && 3066 !IN6_IS_ADDR_MULTICAST( 3067 &args->f_id.dst_ip6)) { 3068 send_reject6(args, 3069 cmd->opcode == O_REJECT ? 3070 map_icmp_unreach(cmd->arg1): 3071 cmd->arg1, hlen, 3072 (struct ip6_hdr *)ip); 3073 m = args->m; 3074 } 3075 /* FALLTHROUGH */ 3076 #endif 3077 case O_DENY: 3078 retval = IP_FW_DENY; 3079 l = 0; /* exit inner loop */ 3080 done = 1; /* exit outer loop */ 3081 break; 3082 3083 case O_FORWARD_IP: 3084 if (args->flags & IPFW_ARGS_ETHER) 3085 break; /* not valid on layer2 pkts */ 3086 if (q != f || 3087 dyn_info.direction == MATCH_FORWARD) { 3088 struct sockaddr_in *sa; 3089 3090 sa = &(((ipfw_insn_sa *)cmd)->sa); 3091 if (sa->sin_addr.s_addr == INADDR_ANY) { 3092 #ifdef INET6 3093 /* 3094 * We use O_FORWARD_IP opcode for 3095 * fwd rule with tablearg, but tables 3096 * now support IPv6 addresses. And 3097 * when we are inspecting IPv6 packet, 3098 * we can use nh6 field from 3099 * table_value as next_hop6 address. 3100 */ 3101 if (is_ipv6) { 3102 struct ip_fw_nh6 *nh6; 3103 3104 args->flags |= IPFW_ARGS_NH6; 3105 nh6 = &args->hopstore6; 3106 nh6->sin6_addr = TARG_VAL( 3107 chain, tablearg, nh6); 3108 nh6->sin6_port = sa->sin_port; 3109 nh6->sin6_scope_id = TARG_VAL( 3110 chain, tablearg, zoneid); 3111 } else 3112 #endif 3113 { 3114 args->flags |= IPFW_ARGS_NH4; 3115 args->hopstore.sin_port = 3116 sa->sin_port; 3117 sa = &args->hopstore; 3118 sa->sin_family = AF_INET; 3119 sa->sin_len = sizeof(*sa); 3120 sa->sin_addr.s_addr = htonl( 3121 TARG_VAL(chain, tablearg, 3122 nh4)); 3123 } 3124 } else { 3125 args->flags |= IPFW_ARGS_NH4PTR; 3126 args->next_hop = sa; 3127 } 3128 } 3129 retval = IP_FW_PASS; 3130 l = 0; /* exit inner loop */ 3131 done = 1; /* exit outer loop */ 3132 break; 3133 3134 #ifdef INET6 3135 case O_FORWARD_IP6: 3136 if (args->flags & IPFW_ARGS_ETHER) 3137 break; /* not valid on layer2 pkts */ 3138 if (q != f || 3139 dyn_info.direction == MATCH_FORWARD) { 3140 struct sockaddr_in6 *sin6; 3141 3142 sin6 = &(((ipfw_insn_sa6 *)cmd)->sa); 3143 args->flags |= IPFW_ARGS_NH6PTR; 3144 args->next_hop6 = sin6; 3145 } 3146 retval = IP_FW_PASS; 3147 l = 0; /* exit inner loop */ 3148 done = 1; /* exit outer loop */ 3149 break; 3150 #endif 3151 3152 case O_NETGRAPH: 3153 case O_NGTEE: 3154 set_match(args, f_pos, chain); 3155 args->rule.info = TARG(cmd->arg1, netgraph); 3156 if (V_fw_one_pass) 3157 args->rule.info |= IPFW_ONEPASS; 3158 retval = (cmd->opcode == O_NETGRAPH) ? 3159 IP_FW_NETGRAPH : IP_FW_NGTEE; 3160 l = 0; /* exit inner loop */ 3161 done = 1; /* exit outer loop */ 3162 break; 3163 3164 case O_SETFIB: { 3165 uint32_t fib; 3166 3167 IPFW_INC_RULE_COUNTER(f, pktlen); 3168 fib = TARG(cmd->arg1, fib) & 0x7FFF; 3169 if (fib >= rt_numfibs) 3170 fib = 0; 3171 M_SETFIB(m, fib); 3172 args->f_id.fib = fib; /* XXX */ 3173 l = 0; /* exit inner loop */ 3174 break; 3175 } 3176 3177 case O_SETDSCP: { 3178 uint16_t code; 3179 3180 code = TARG(cmd->arg1, dscp) & 0x3F; 3181 l = 0; /* exit inner loop */ 3182 if (is_ipv4) { 3183 uint16_t old; 3184 3185 old = *(uint16_t *)ip; 3186 ip->ip_tos = (code << 2) | 3187 (ip->ip_tos & 0x03); 3188 ip->ip_sum = cksum_adjust(ip->ip_sum, 3189 old, *(uint16_t *)ip); 3190 } else if (is_ipv6) { 3191 /* update cached value */ 3192 args->f_id.flow_id6 = 3193 ntohl(*(uint32_t *)ip) & ~0x0FC00000; 3194 args->f_id.flow_id6 |= code << 22; 3195 3196 *((uint32_t *)ip) = 3197 htonl(args->f_id.flow_id6); 3198 } else 3199 break; 3200 3201 IPFW_INC_RULE_COUNTER(f, pktlen); 3202 break; 3203 } 3204 3205 case O_NAT: 3206 l = 0; /* exit inner loop */ 3207 done = 1; /* exit outer loop */ 3208 /* 3209 * Ensure that we do not invoke NAT handler for 3210 * non IPv4 packets. Libalias expects only IPv4. 3211 */ 3212 if (!is_ipv4 || !IPFW_NAT_LOADED) { 3213 retval = IP_FW_DENY; 3214 break; 3215 } 3216 3217 struct cfg_nat *t; 3218 int nat_id; 3219 3220 args->rule.info = 0; 3221 set_match(args, f_pos, chain); 3222 /* Check if this is 'global' nat rule */ 3223 if (cmd->arg1 == IP_FW_NAT44_GLOBAL) { 3224 retval = ipfw_nat_ptr(args, NULL, m); 3225 break; 3226 } 3227 t = ((ipfw_insn_nat *)cmd)->nat; 3228 if (t == NULL) { 3229 nat_id = TARG(cmd->arg1, nat); 3230 t = (*lookup_nat_ptr)(&chain->nat, nat_id); 3231 3232 if (t == NULL) { 3233 retval = IP_FW_DENY; 3234 break; 3235 } 3236 if (cmd->arg1 != IP_FW_TARG) 3237 ((ipfw_insn_nat *)cmd)->nat = t; 3238 } 3239 retval = ipfw_nat_ptr(args, t, m); 3240 break; 3241 3242 case O_REASS: { 3243 int ip_off; 3244 3245 l = 0; /* in any case exit inner loop */ 3246 if (is_ipv6) /* IPv6 is not supported yet */ 3247 break; 3248 IPFW_INC_RULE_COUNTER(f, pktlen); 3249 ip_off = ntohs(ip->ip_off); 3250 3251 /* if not fragmented, go to next rule */ 3252 if ((ip_off & (IP_MF | IP_OFFMASK)) == 0) 3253 break; 3254 3255 args->m = m = ip_reass(m); 3256 3257 /* 3258 * do IP header checksum fixup. 3259 */ 3260 if (m == NULL) { /* fragment got swallowed */ 3261 retval = IP_FW_DENY; 3262 } else { /* good, packet complete */ 3263 int hlen; 3264 3265 ip = mtod(m, struct ip *); 3266 hlen = ip->ip_hl << 2; 3267 ip->ip_sum = 0; 3268 if (hlen == sizeof(struct ip)) 3269 ip->ip_sum = in_cksum_hdr(ip); 3270 else 3271 ip->ip_sum = in_cksum(m, hlen); 3272 retval = IP_FW_REASS; 3273 args->rule.info = 0; 3274 set_match(args, f_pos, chain); 3275 } 3276 done = 1; /* exit outer loop */ 3277 break; 3278 } 3279 case O_EXTERNAL_ACTION: 3280 l = 0; /* in any case exit inner loop */ 3281 retval = ipfw_run_eaction(chain, args, 3282 cmd, &done); 3283 /* 3284 * If both @retval and @done are zero, 3285 * consider this as rule matching and 3286 * update counters. 3287 */ 3288 if (retval == 0 && done == 0) { 3289 IPFW_INC_RULE_COUNTER(f, pktlen); 3290 /* 3291 * Reset the result of the last 3292 * dynamic state lookup. 3293 * External action can change 3294 * @args content, and it may be 3295 * used for new state lookup later. 3296 */ 3297 DYN_INFO_INIT(&dyn_info); 3298 } 3299 break; 3300 3301 default: 3302 panic("-- unknown opcode %d\n", cmd->opcode); 3303 } /* end of switch() on opcodes */ 3304 /* 3305 * if we get here with l=0, then match is irrelevant. 3306 */ 3307 3308 if (cmd->len & F_NOT) 3309 match = !match; 3310 3311 if (match) { 3312 if (cmd->len & F_OR) 3313 skip_or = 1; 3314 } else { 3315 if (!(cmd->len & F_OR)) /* not an OR block, */ 3316 break; /* try next rule */ 3317 } 3318 3319 } /* end of inner loop, scan opcodes */ 3320 #undef PULLUP_LEN 3321 #undef PULLUP_LEN_LOCKED 3322 3323 if (done) 3324 break; 3325 3326 /* next_rule:; */ /* try next rule */ 3327 3328 } /* end of outer for, scan rules */ 3329 3330 if (done) { 3331 struct ip_fw *rule = chain->map[f_pos]; 3332 /* Update statistics */ 3333 IPFW_INC_RULE_COUNTER(rule, pktlen); 3334 IPFW_PROBE(rule__matched, retval, 3335 is_ipv4 ? AF_INET : AF_INET6, 3336 is_ipv4 ? (uintptr_t)&src_ip : 3337 (uintptr_t)&args->f_id.src_ip6, 3338 is_ipv4 ? (uintptr_t)&dst_ip : 3339 (uintptr_t)&args->f_id.dst_ip6, 3340 args, rule); 3341 } else { 3342 retval = IP_FW_DENY; 3343 printf("ipfw: ouch!, skip past end of rules, denying packet\n"); 3344 } 3345 IPFW_PF_RUNLOCK(chain); 3346 #ifdef __FreeBSD__ 3347 if (ucred_cache != NULL) 3348 crfree(ucred_cache); 3349 #endif 3350 return (retval); 3351 3352 pullup_failed: 3353 if (V_fw_verbose) 3354 printf("ipfw: pullup failed\n"); 3355 return (IP_FW_DENY); 3356 } 3357 3358 /* 3359 * Set maximum number of tables that can be used in given VNET ipfw instance. 3360 */ 3361 #ifdef SYSCTL_NODE 3362 static int 3363 sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS) 3364 { 3365 int error; 3366 unsigned int ntables; 3367 3368 ntables = V_fw_tables_max; 3369 3370 error = sysctl_handle_int(oidp, &ntables, 0, req); 3371 /* Read operation or some error */ 3372 if ((error != 0) || (req->newptr == NULL)) 3373 return (error); 3374 3375 return (ipfw_resize_tables(&V_layer3_chain, ntables)); 3376 } 3377 3378 /* 3379 * Switches table namespace between global and per-set. 3380 */ 3381 static int 3382 sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS) 3383 { 3384 int error; 3385 unsigned int sets; 3386 3387 sets = V_fw_tables_sets; 3388 3389 error = sysctl_handle_int(oidp, &sets, 0, req); 3390 /* Read operation or some error */ 3391 if ((error != 0) || (req->newptr == NULL)) 3392 return (error); 3393 3394 return (ipfw_switch_tables_namespace(&V_layer3_chain, sets)); 3395 } 3396 #endif 3397 3398 /* 3399 * Module and VNET glue 3400 */ 3401 3402 /* 3403 * Stuff that must be initialised only on boot or module load 3404 */ 3405 static int 3406 ipfw_init(void) 3407 { 3408 int error = 0; 3409 3410 /* 3411 * Only print out this stuff the first time around, 3412 * when called from the sysinit code. 3413 */ 3414 printf("ipfw2 " 3415 #ifdef INET6 3416 "(+ipv6) " 3417 #endif 3418 "initialized, divert %s, nat %s, " 3419 "default to %s, logging ", 3420 #ifdef IPDIVERT 3421 "enabled", 3422 #else 3423 "loadable", 3424 #endif 3425 #ifdef IPFIREWALL_NAT 3426 "enabled", 3427 #else 3428 "loadable", 3429 #endif 3430 default_to_accept ? "accept" : "deny"); 3431 3432 /* 3433 * Note: V_xxx variables can be accessed here but the vnet specific 3434 * initializer may not have been called yet for the VIMAGE case. 3435 * Tuneables will have been processed. We will print out values for 3436 * the default vnet. 3437 * XXX This should all be rationalized AFTER 8.0 3438 */ 3439 if (V_fw_verbose == 0) 3440 printf("disabled\n"); 3441 else if (V_verbose_limit == 0) 3442 printf("unlimited\n"); 3443 else 3444 printf("limited to %d packets/entry by default\n", 3445 V_verbose_limit); 3446 3447 /* Check user-supplied table count for validness */ 3448 if (default_fw_tables > IPFW_TABLES_MAX) 3449 default_fw_tables = IPFW_TABLES_MAX; 3450 3451 ipfw_init_sopt_handler(); 3452 ipfw_init_obj_rewriter(); 3453 ipfw_iface_init(); 3454 return (error); 3455 } 3456 3457 /* 3458 * Called for the removal of the last instance only on module unload. 3459 */ 3460 static void 3461 ipfw_destroy(void) 3462 { 3463 3464 ipfw_iface_destroy(); 3465 ipfw_destroy_sopt_handler(); 3466 ipfw_destroy_obj_rewriter(); 3467 printf("IP firewall unloaded\n"); 3468 } 3469 3470 /* 3471 * Stuff that must be initialized for every instance 3472 * (including the first of course). 3473 */ 3474 static int 3475 vnet_ipfw_init(const void *unused) 3476 { 3477 int error, first; 3478 struct ip_fw *rule = NULL; 3479 struct ip_fw_chain *chain; 3480 3481 chain = &V_layer3_chain; 3482 3483 first = IS_DEFAULT_VNET(curvnet) ? 1 : 0; 3484 3485 /* First set up some values that are compile time options */ 3486 V_autoinc_step = 100; /* bounded to 1..1000 in add_rule() */ 3487 V_fw_deny_unknown_exthdrs = 1; 3488 #ifdef IPFIREWALL_VERBOSE 3489 V_fw_verbose = 1; 3490 #endif 3491 #ifdef IPFIREWALL_VERBOSE_LIMIT 3492 V_verbose_limit = IPFIREWALL_VERBOSE_LIMIT; 3493 #endif 3494 #ifdef IPFIREWALL_NAT 3495 LIST_INIT(&chain->nat); 3496 #endif 3497 3498 /* Init shared services hash table */ 3499 ipfw_init_srv(chain); 3500 3501 ipfw_init_counters(); 3502 /* Set initial number of tables */ 3503 V_fw_tables_max = default_fw_tables; 3504 error = ipfw_init_tables(chain, first); 3505 if (error) { 3506 printf("ipfw2: setting up tables failed\n"); 3507 free(chain->map, M_IPFW); 3508 free(rule, M_IPFW); 3509 return (ENOSPC); 3510 } 3511 3512 IPFW_LOCK_INIT(chain); 3513 3514 /* fill and insert the default rule */ 3515 rule = ipfw_alloc_rule(chain, sizeof(struct ip_fw)); 3516 rule->flags |= IPFW_RULE_NOOPT; 3517 rule->cmd_len = 1; 3518 rule->cmd[0].len = 1; 3519 rule->cmd[0].opcode = default_to_accept ? O_ACCEPT : O_DENY; 3520 chain->default_rule = rule; 3521 ipfw_add_protected_rule(chain, rule, 0); 3522 3523 ipfw_dyn_init(chain); 3524 ipfw_eaction_init(chain, first); 3525 #ifdef LINEAR_SKIPTO 3526 ipfw_init_skipto_cache(chain); 3527 #endif 3528 ipfw_bpf_init(first); 3529 3530 /* First set up some values that are compile time options */ 3531 V_ipfw_vnet_ready = 1; /* Open for business */ 3532 3533 /* 3534 * Hook the sockopt handler and pfil hooks for ipv4 and ipv6. 3535 * Even if the latter two fail we still keep the module alive 3536 * because the sockopt and layer2 paths are still useful. 3537 * ipfw[6]_hook return 0 on success, ENOENT on failure, 3538 * so we can ignore the exact return value and just set a flag. 3539 * 3540 * Note that V_fw[6]_enable are manipulated by a SYSCTL_PROC so 3541 * changes in the underlying (per-vnet) variables trigger 3542 * immediate hook()/unhook() calls. 3543 * In layer2 we have the same behaviour, except that V_ether_ipfw 3544 * is checked on each packet because there are no pfil hooks. 3545 */ 3546 V_ip_fw_ctl_ptr = ipfw_ctl3; 3547 error = ipfw_attach_hooks(); 3548 return (error); 3549 } 3550 3551 /* 3552 * Called for the removal of each instance. 3553 */ 3554 static int 3555 vnet_ipfw_uninit(const void *unused) 3556 { 3557 struct ip_fw *reap; 3558 struct ip_fw_chain *chain = &V_layer3_chain; 3559 int i, last; 3560 3561 V_ipfw_vnet_ready = 0; /* tell new callers to go away */ 3562 /* 3563 * disconnect from ipv4, ipv6, layer2 and sockopt. 3564 * Then grab, release and grab again the WLOCK so we make 3565 * sure the update is propagated and nobody will be in. 3566 */ 3567 ipfw_detach_hooks(); 3568 V_ip_fw_ctl_ptr = NULL; 3569 3570 last = IS_DEFAULT_VNET(curvnet) ? 1 : 0; 3571 3572 IPFW_UH_WLOCK(chain); 3573 IPFW_UH_WUNLOCK(chain); 3574 3575 ipfw_dyn_uninit(0); /* run the callout_drain */ 3576 3577 IPFW_UH_WLOCK(chain); 3578 3579 reap = NULL; 3580 IPFW_WLOCK(chain); 3581 for (i = 0; i < chain->n_rules; i++) 3582 ipfw_reap_add(chain, &reap, chain->map[i]); 3583 free(chain->map, M_IPFW); 3584 #ifdef LINEAR_SKIPTO 3585 ipfw_destroy_skipto_cache(chain); 3586 #endif 3587 IPFW_WUNLOCK(chain); 3588 IPFW_UH_WUNLOCK(chain); 3589 ipfw_destroy_tables(chain, last); 3590 ipfw_eaction_uninit(chain, last); 3591 if (reap != NULL) 3592 ipfw_reap_rules(reap); 3593 vnet_ipfw_iface_destroy(chain); 3594 ipfw_destroy_srv(chain); 3595 IPFW_LOCK_DESTROY(chain); 3596 ipfw_dyn_uninit(1); /* free the remaining parts */ 3597 ipfw_destroy_counters(); 3598 ipfw_bpf_uninit(last); 3599 return (0); 3600 } 3601 3602 /* 3603 * Module event handler. 3604 * In general we have the choice of handling most of these events by the 3605 * event handler or by the (VNET_)SYS(UN)INIT handlers. I have chosen to 3606 * use the SYSINIT handlers as they are more capable of expressing the 3607 * flow of control during module and vnet operations, so this is just 3608 * a skeleton. Note there is no SYSINIT equivalent of the module 3609 * SHUTDOWN handler, but we don't have anything to do in that case anyhow. 3610 */ 3611 static int 3612 ipfw_modevent(module_t mod, int type, void *unused) 3613 { 3614 int err = 0; 3615 3616 switch (type) { 3617 case MOD_LOAD: 3618 /* Called once at module load or 3619 * system boot if compiled in. */ 3620 break; 3621 case MOD_QUIESCE: 3622 /* Called before unload. May veto unloading. */ 3623 break; 3624 case MOD_UNLOAD: 3625 /* Called during unload. */ 3626 break; 3627 case MOD_SHUTDOWN: 3628 /* Called during system shutdown. */ 3629 break; 3630 default: 3631 err = EOPNOTSUPP; 3632 break; 3633 } 3634 return err; 3635 } 3636 3637 static moduledata_t ipfwmod = { 3638 "ipfw", 3639 ipfw_modevent, 3640 0 3641 }; 3642 3643 /* Define startup order. */ 3644 #define IPFW_SI_SUB_FIREWALL SI_SUB_PROTO_FIREWALL 3645 #define IPFW_MODEVENT_ORDER (SI_ORDER_ANY - 255) /* On boot slot in here. */ 3646 #define IPFW_MODULE_ORDER (IPFW_MODEVENT_ORDER + 1) /* A little later. */ 3647 #define IPFW_VNET_ORDER (IPFW_MODEVENT_ORDER + 2) /* Later still. */ 3648 3649 DECLARE_MODULE(ipfw, ipfwmod, IPFW_SI_SUB_FIREWALL, IPFW_MODEVENT_ORDER); 3650 FEATURE(ipfw_ctl3, "ipfw new sockopt calls"); 3651 MODULE_VERSION(ipfw, 3); 3652 /* should declare some dependencies here */ 3653 3654 /* 3655 * Starting up. Done in order after ipfwmod() has been called. 3656 * VNET_SYSINIT is also called for each existing vnet and each new vnet. 3657 */ 3658 SYSINIT(ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER, 3659 ipfw_init, NULL); 3660 VNET_SYSINIT(vnet_ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER, 3661 vnet_ipfw_init, NULL); 3662 3663 /* 3664 * Closing up shop. These are done in REVERSE ORDER, but still 3665 * after ipfwmod() has been called. Not called on reboot. 3666 * VNET_SYSUNINIT is also called for each exiting vnet as it exits. 3667 * or when the module is unloaded. 3668 */ 3669 SYSUNINIT(ipfw_destroy, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER, 3670 ipfw_destroy, NULL); 3671 VNET_SYSUNINIT(vnet_ipfw_uninit, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER, 3672 vnet_ipfw_uninit, NULL); 3673 /* end of file */ 3674