1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * The FreeBSD IP packet firewall, main file 33 */ 34 35 #include "opt_ipfw.h" 36 #include "opt_ipdivert.h" 37 #include "opt_inet.h" 38 #ifndef INET 39 #error "IPFIREWALL requires INET" 40 #endif /* INET */ 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/condvar.h> 46 #include <sys/counter.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/jail.h> 53 #include <sys/module.h> 54 #include <sys/priv.h> 55 #include <sys/proc.h> 56 #include <sys/rwlock.h> 57 #include <sys/rmlock.h> 58 #include <sys/sdt.h> 59 #include <sys/socket.h> 60 #include <sys/socketvar.h> 61 #include <sys/sysctl.h> 62 #include <sys/syslog.h> 63 #include <sys/ucred.h> 64 #include <net/ethernet.h> /* for ETHERTYPE_IP */ 65 #include <net/if.h> 66 #include <net/if_var.h> 67 #include <net/route.h> 68 #include <net/route/nhop.h> 69 #include <net/pfil.h> 70 #include <net/vnet.h> 71 72 #include <netpfil/pf/pf_mtag.h> 73 74 #include <netinet/in.h> 75 #include <netinet/in_var.h> 76 #include <netinet/in_pcb.h> 77 #include <netinet/ip.h> 78 #include <netinet/ip_var.h> 79 #include <netinet/ip_icmp.h> 80 #include <netinet/ip_fw.h> 81 #include <netinet/ip_carp.h> 82 #include <netinet/pim.h> 83 #include <netinet/tcp_var.h> 84 #include <netinet/udp.h> 85 #include <netinet/udp_var.h> 86 #include <netinet/sctp.h> 87 #include <netinet/sctp_crc32.h> 88 #include <netinet/sctp_header.h> 89 90 #include <netinet/ip6.h> 91 #include <netinet/icmp6.h> 92 #include <netinet/in_fib.h> 93 #ifdef INET6 94 #include <netinet6/in6_fib.h> 95 #include <netinet6/in6_pcb.h> 96 #include <netinet6/scope6_var.h> 97 #include <netinet6/ip6_var.h> 98 #endif 99 100 #include <net/if_gre.h> /* for struct grehdr */ 101 102 #include <netpfil/ipfw/ip_fw_private.h> 103 104 #include <machine/in_cksum.h> /* XXX for in_cksum */ 105 106 #ifdef MAC 107 #include <security/mac/mac_framework.h> 108 #endif 109 110 #define IPFW_PROBE(probe, arg0, arg1, arg2, arg3, arg4, arg5) \ 111 SDT_PROBE6(ipfw, , , probe, arg0, arg1, arg2, arg3, arg4, arg5) 112 113 SDT_PROVIDER_DEFINE(ipfw); 114 SDT_PROBE_DEFINE6(ipfw, , , rule__matched, 115 "int", /* retval */ 116 "int", /* af */ 117 "void *", /* src addr */ 118 "void *", /* dst addr */ 119 "struct ip_fw_args *", /* args */ 120 "struct ip_fw *" /* rule */); 121 122 /* 123 * static variables followed by global ones. 124 * All ipfw global variables are here. 125 */ 126 127 VNET_DEFINE_STATIC(int, fw_deny_unknown_exthdrs); 128 #define V_fw_deny_unknown_exthdrs VNET(fw_deny_unknown_exthdrs) 129 130 VNET_DEFINE_STATIC(int, fw_permit_single_frag6) = 1; 131 #define V_fw_permit_single_frag6 VNET(fw_permit_single_frag6) 132 133 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT 134 static int default_to_accept = 1; 135 #else 136 static int default_to_accept; 137 #endif 138 139 VNET_DEFINE(int, autoinc_step); 140 VNET_DEFINE(int, fw_one_pass) = 1; 141 142 VNET_DEFINE(unsigned int, fw_tables_max); 143 VNET_DEFINE(unsigned int, fw_tables_sets) = 0; /* Don't use set-aware tables */ 144 /* Use 128 tables by default */ 145 static unsigned int default_fw_tables = IPFW_TABLES_DEFAULT; 146 147 static int jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num, 148 int tablearg, int jump_backwards); 149 #ifndef LINEAR_SKIPTO 150 static int jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num, 151 int tablearg, int jump_backwards); 152 #define JUMP(ch, f, num, targ, back) jump_cached(ch, f, num, targ, back) 153 #else 154 #define JUMP(ch, f, num, targ, back) jump_lookup_pos(ch, f, num, targ, back) 155 #endif 156 157 /* 158 * Each rule belongs to one of 32 different sets (0..31). 159 * The variable set_disable contains one bit per set. 160 * If the bit is set, all rules in the corresponding set 161 * are disabled. Set RESVD_SET(31) is reserved for the default rule 162 * and rules that are not deleted by the flush command, 163 * and CANNOT be disabled. 164 * Rules in set RESVD_SET can only be deleted individually. 165 */ 166 VNET_DEFINE(u_int32_t, set_disable); 167 #define V_set_disable VNET(set_disable) 168 169 VNET_DEFINE(int, fw_verbose); 170 /* counter for ipfw_log(NULL...) */ 171 VNET_DEFINE(u_int64_t, norule_counter); 172 VNET_DEFINE(int, verbose_limit); 173 174 /* layer3_chain contains the list of rules for layer 3 */ 175 VNET_DEFINE(struct ip_fw_chain, layer3_chain); 176 177 /* ipfw_vnet_ready controls when we are open for business */ 178 VNET_DEFINE(int, ipfw_vnet_ready) = 0; 179 180 VNET_DEFINE(int, ipfw_nat_ready) = 0; 181 182 ipfw_nat_t *ipfw_nat_ptr = NULL; 183 struct cfg_nat *(*lookup_nat_ptr)(struct nat_list *, int); 184 ipfw_nat_cfg_t *ipfw_nat_cfg_ptr; 185 ipfw_nat_cfg_t *ipfw_nat_del_ptr; 186 ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr; 187 ipfw_nat_cfg_t *ipfw_nat_get_log_ptr; 188 189 #ifdef SYSCTL_NODE 190 uint32_t dummy_def = IPFW_DEFAULT_RULE; 191 static int sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS); 192 static int sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS); 193 194 SYSBEGIN(f3) 195 196 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 197 "Firewall"); 198 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, one_pass, 199 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_one_pass), 0, 200 "Only do a single pass through ipfw when using dummynet(4)"); 201 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, autoinc_step, 202 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(autoinc_step), 0, 203 "Rule number auto-increment step"); 204 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, 205 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_verbose), 0, 206 "Log matches to ipfw rules"); 207 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, 208 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(verbose_limit), 0, 209 "Set upper limit of matches of ipfw rules logged"); 210 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, default_rule, CTLFLAG_RD, 211 &dummy_def, 0, 212 "The default/max possible rule number."); 213 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_max, 214 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 215 0, 0, sysctl_ipfw_table_num, "IU", 216 "Maximum number of concurrently used tables"); 217 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_sets, 218 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 219 0, 0, sysctl_ipfw_tables_sets, "IU", 220 "Use per-set namespace for tables"); 221 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, default_to_accept, CTLFLAG_RDTUN, 222 &default_to_accept, 0, 223 "Make the default rule accept all packets."); 224 TUNABLE_INT("net.inet.ip.fw.tables_max", (int *)&default_fw_tables); 225 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, 226 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(layer3_chain.n_rules), 0, 227 "Number of static rules"); 228 229 #ifdef INET6 230 SYSCTL_DECL(_net_inet6_ip6); 231 SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 232 "Firewall"); 233 SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, deny_unknown_exthdrs, 234 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE, 235 &VNET_NAME(fw_deny_unknown_exthdrs), 0, 236 "Deny packets with unknown IPv6 Extension Headers"); 237 SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, permit_single_frag6, 238 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE, 239 &VNET_NAME(fw_permit_single_frag6), 0, 240 "Permit single packet IPv6 fragments"); 241 #endif /* INET6 */ 242 243 SYSEND 244 245 #endif /* SYSCTL_NODE */ 246 247 /* 248 * Some macros used in the various matching options. 249 * L3HDR maps an ipv4 pointer into a layer3 header pointer of type T 250 * Other macros just cast void * into the appropriate type 251 */ 252 #define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl)) 253 #define TCP(p) ((struct tcphdr *)(p)) 254 #define SCTP(p) ((struct sctphdr *)(p)) 255 #define UDP(p) ((struct udphdr *)(p)) 256 #define ICMP(p) ((struct icmphdr *)(p)) 257 #define ICMP6(p) ((struct icmp6_hdr *)(p)) 258 259 static __inline int 260 icmptype_match(struct icmphdr *icmp, ipfw_insn_u32 *cmd) 261 { 262 int type = icmp->icmp_type; 263 264 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1<<type)) ); 265 } 266 267 #define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \ 268 (1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) ) 269 270 static int 271 is_icmp_query(struct icmphdr *icmp) 272 { 273 int type = icmp->icmp_type; 274 275 return (type <= ICMP_MAXTYPE && (TT & (1<<type)) ); 276 } 277 #undef TT 278 279 /* 280 * The following checks use two arrays of 8 or 16 bits to store the 281 * bits that we want set or clear, respectively. They are in the 282 * low and high half of cmd->arg1 or cmd->d[0]. 283 * 284 * We scan options and store the bits we find set. We succeed if 285 * 286 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear 287 * 288 * The code is sometimes optimized not to store additional variables. 289 */ 290 291 static int 292 flags_match(ipfw_insn *cmd, u_int8_t bits) 293 { 294 u_char want_clear; 295 bits = ~bits; 296 297 if ( ((cmd->arg1 & 0xff) & bits) != 0) 298 return 0; /* some bits we want set were clear */ 299 want_clear = (cmd->arg1 >> 8) & 0xff; 300 if ( (want_clear & bits) != want_clear) 301 return 0; /* some bits we want clear were set */ 302 return 1; 303 } 304 305 static int 306 ipopts_match(struct ip *ip, ipfw_insn *cmd) 307 { 308 int optlen, bits = 0; 309 u_char *cp = (u_char *)(ip + 1); 310 int x = (ip->ip_hl << 2) - sizeof (struct ip); 311 312 for (; x > 0; x -= optlen, cp += optlen) { 313 int opt = cp[IPOPT_OPTVAL]; 314 315 if (opt == IPOPT_EOL) 316 break; 317 if (opt == IPOPT_NOP) 318 optlen = 1; 319 else { 320 optlen = cp[IPOPT_OLEN]; 321 if (optlen <= 0 || optlen > x) 322 return 0; /* invalid or truncated */ 323 } 324 switch (opt) { 325 default: 326 break; 327 328 case IPOPT_LSRR: 329 bits |= IP_FW_IPOPT_LSRR; 330 break; 331 332 case IPOPT_SSRR: 333 bits |= IP_FW_IPOPT_SSRR; 334 break; 335 336 case IPOPT_RR: 337 bits |= IP_FW_IPOPT_RR; 338 break; 339 340 case IPOPT_TS: 341 bits |= IP_FW_IPOPT_TS; 342 break; 343 } 344 } 345 return (flags_match(cmd, bits)); 346 } 347 348 /* 349 * Parse TCP options. The logic copied from tcp_dooptions(). 350 */ 351 static int 352 tcpopts_parse(const struct tcphdr *tcp, uint16_t *mss) 353 { 354 const u_char *cp = (const u_char *)(tcp + 1); 355 int optlen, bits = 0; 356 int cnt = (tcp->th_off << 2) - sizeof(struct tcphdr); 357 358 for (; cnt > 0; cnt -= optlen, cp += optlen) { 359 int opt = cp[0]; 360 if (opt == TCPOPT_EOL) 361 break; 362 if (opt == TCPOPT_NOP) 363 optlen = 1; 364 else { 365 if (cnt < 2) 366 break; 367 optlen = cp[1]; 368 if (optlen < 2 || optlen > cnt) 369 break; 370 } 371 372 switch (opt) { 373 default: 374 break; 375 376 case TCPOPT_MAXSEG: 377 if (optlen != TCPOLEN_MAXSEG) 378 break; 379 bits |= IP_FW_TCPOPT_MSS; 380 if (mss != NULL) 381 *mss = be16dec(cp + 2); 382 break; 383 384 case TCPOPT_WINDOW: 385 if (optlen == TCPOLEN_WINDOW) 386 bits |= IP_FW_TCPOPT_WINDOW; 387 break; 388 389 case TCPOPT_SACK_PERMITTED: 390 if (optlen == TCPOLEN_SACK_PERMITTED) 391 bits |= IP_FW_TCPOPT_SACK; 392 break; 393 394 case TCPOPT_SACK: 395 if (optlen > 2 && (optlen - 2) % TCPOLEN_SACK == 0) 396 bits |= IP_FW_TCPOPT_SACK; 397 break; 398 399 case TCPOPT_TIMESTAMP: 400 if (optlen == TCPOLEN_TIMESTAMP) 401 bits |= IP_FW_TCPOPT_TS; 402 break; 403 } 404 } 405 return (bits); 406 } 407 408 static int 409 tcpopts_match(struct tcphdr *tcp, ipfw_insn *cmd) 410 { 411 412 return (flags_match(cmd, tcpopts_parse(tcp, NULL))); 413 } 414 415 static int 416 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd, struct ip_fw_chain *chain, 417 uint32_t *tablearg) 418 { 419 420 if (ifp == NULL) /* no iface with this packet, match fails */ 421 return (0); 422 423 /* Check by name or by IP address */ 424 if (cmd->name[0] != '\0') { /* match by name */ 425 if (cmd->name[0] == '\1') /* use tablearg to match */ 426 return ipfw_lookup_table(chain, cmd->p.kidx, 0, 427 &ifp->if_index, tablearg); 428 /* Check name */ 429 if (cmd->p.glob) { 430 if (fnmatch(cmd->name, ifp->if_xname, 0) == 0) 431 return(1); 432 } else { 433 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0) 434 return(1); 435 } 436 } else { 437 #if !defined(USERSPACE) && defined(__FreeBSD__) /* and OSX too ? */ 438 struct ifaddr *ia; 439 440 NET_EPOCH_ASSERT(); 441 442 CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) { 443 if (ia->ifa_addr->sa_family != AF_INET) 444 continue; 445 if (cmd->p.ip.s_addr == ((struct sockaddr_in *) 446 (ia->ifa_addr))->sin_addr.s_addr) 447 return (1); /* match */ 448 } 449 #endif /* __FreeBSD__ */ 450 } 451 return(0); /* no match, fail ... */ 452 } 453 454 /* 455 * The verify_path function checks if a route to the src exists and 456 * if it is reachable via ifp (when provided). 457 * 458 * The 'verrevpath' option checks that the interface that an IP packet 459 * arrives on is the same interface that traffic destined for the 460 * packet's source address would be routed out of. 461 * The 'versrcreach' option just checks that the source address is 462 * reachable via any route (except default) in the routing table. 463 * These two are a measure to block forged packets. This is also 464 * commonly known as "anti-spoofing" or Unicast Reverse Path 465 * Forwarding (Unicast RFP) in Cisco-ese. The name of the knobs 466 * is purposely reminiscent of the Cisco IOS command, 467 * 468 * ip verify unicast reverse-path 469 * ip verify unicast source reachable-via any 470 * 471 * which implements the same functionality. But note that the syntax 472 * is misleading, and the check may be performed on all IP packets 473 * whether unicast, multicast, or broadcast. 474 */ 475 static int 476 verify_path(struct in_addr src, struct ifnet *ifp, u_int fib) 477 { 478 #if defined(USERSPACE) || !defined(__FreeBSD__) 479 return 0; 480 #else 481 struct nhop_object *nh; 482 483 nh = fib4_lookup(fib, src, 0, NHR_NONE, 0); 484 if (nh == NULL) 485 return (0); 486 487 /* 488 * If ifp is provided, check for equality with rtentry. 489 * We should use rt->rt_ifa->ifa_ifp, instead of rt->rt_ifp, 490 * in order to pass packets injected back by if_simloop(): 491 * routing entry (via lo0) for our own address 492 * may exist, so we need to handle routing assymetry. 493 */ 494 if (ifp != NULL && ifp != nh->nh_aifp) 495 return (0); 496 497 /* if no ifp provided, check if rtentry is not default route */ 498 if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0) 499 return (0); 500 501 /* or if this is a blackhole/reject route */ 502 if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0) 503 return (0); 504 505 /* found valid route */ 506 return 1; 507 #endif /* __FreeBSD__ */ 508 } 509 510 /* 511 * Generate an SCTP packet containing an ABORT chunk. The verification tag 512 * is given by vtag. The T-bit is set in the ABORT chunk if and only if 513 * reflected is not 0. 514 */ 515 516 static struct mbuf * 517 ipfw_send_abort(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t vtag, 518 int reflected) 519 { 520 struct mbuf *m; 521 struct ip *ip; 522 #ifdef INET6 523 struct ip6_hdr *ip6; 524 #endif 525 struct sctphdr *sctp; 526 struct sctp_chunkhdr *chunk; 527 u_int16_t hlen, plen, tlen; 528 529 MGETHDR(m, M_NOWAIT, MT_DATA); 530 if (m == NULL) 531 return (NULL); 532 533 M_SETFIB(m, id->fib); 534 #ifdef MAC 535 if (replyto != NULL) 536 mac_netinet_firewall_reply(replyto, m); 537 else 538 mac_netinet_firewall_send(m); 539 #else 540 (void)replyto; /* don't warn about unused arg */ 541 #endif 542 543 switch (id->addr_type) { 544 case 4: 545 hlen = sizeof(struct ip); 546 break; 547 #ifdef INET6 548 case 6: 549 hlen = sizeof(struct ip6_hdr); 550 break; 551 #endif 552 default: 553 /* XXX: log me?!? */ 554 FREE_PKT(m); 555 return (NULL); 556 } 557 plen = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 558 tlen = hlen + plen; 559 m->m_data += max_linkhdr; 560 m->m_flags |= M_SKIP_FIREWALL; 561 m->m_pkthdr.len = m->m_len = tlen; 562 m->m_pkthdr.rcvif = NULL; 563 bzero(m->m_data, tlen); 564 565 switch (id->addr_type) { 566 case 4: 567 ip = mtod(m, struct ip *); 568 569 ip->ip_v = 4; 570 ip->ip_hl = sizeof(struct ip) >> 2; 571 ip->ip_tos = IPTOS_LOWDELAY; 572 ip->ip_len = htons(tlen); 573 ip->ip_id = htons(0); 574 ip->ip_off = htons(0); 575 ip->ip_ttl = V_ip_defttl; 576 ip->ip_p = IPPROTO_SCTP; 577 ip->ip_sum = 0; 578 ip->ip_src.s_addr = htonl(id->dst_ip); 579 ip->ip_dst.s_addr = htonl(id->src_ip); 580 581 sctp = (struct sctphdr *)(ip + 1); 582 break; 583 #ifdef INET6 584 case 6: 585 ip6 = mtod(m, struct ip6_hdr *); 586 587 ip6->ip6_vfc = IPV6_VERSION; 588 ip6->ip6_plen = htons(plen); 589 ip6->ip6_nxt = IPPROTO_SCTP; 590 ip6->ip6_hlim = IPV6_DEFHLIM; 591 ip6->ip6_src = id->dst_ip6; 592 ip6->ip6_dst = id->src_ip6; 593 594 sctp = (struct sctphdr *)(ip6 + 1); 595 break; 596 #endif 597 } 598 599 sctp->src_port = htons(id->dst_port); 600 sctp->dest_port = htons(id->src_port); 601 sctp->v_tag = htonl(vtag); 602 sctp->checksum = htonl(0); 603 604 chunk = (struct sctp_chunkhdr *)(sctp + 1); 605 chunk->chunk_type = SCTP_ABORT_ASSOCIATION; 606 chunk->chunk_flags = 0; 607 if (reflected != 0) { 608 chunk->chunk_flags |= SCTP_HAD_NO_TCB; 609 } 610 chunk->chunk_length = htons(sizeof(struct sctp_chunkhdr)); 611 612 sctp->checksum = sctp_calculate_cksum(m, hlen); 613 614 return (m); 615 } 616 617 /* 618 * Generate a TCP packet, containing either a RST or a keepalive. 619 * When flags & TH_RST, we are sending a RST packet, because of a 620 * "reset" action matched the packet. 621 * Otherwise we are sending a keepalive, and flags & TH_ 622 * The 'replyto' mbuf is the mbuf being replied to, if any, and is required 623 * so that MAC can label the reply appropriately. 624 */ 625 struct mbuf * 626 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq, 627 u_int32_t ack, int flags) 628 { 629 struct mbuf *m = NULL; /* stupid compiler */ 630 struct ip *h = NULL; /* stupid compiler */ 631 #ifdef INET6 632 struct ip6_hdr *h6 = NULL; 633 #endif 634 struct tcphdr *th = NULL; 635 int len, dir; 636 637 MGETHDR(m, M_NOWAIT, MT_DATA); 638 if (m == NULL) 639 return (NULL); 640 641 M_SETFIB(m, id->fib); 642 #ifdef MAC 643 if (replyto != NULL) 644 mac_netinet_firewall_reply(replyto, m); 645 else 646 mac_netinet_firewall_send(m); 647 #else 648 (void)replyto; /* don't warn about unused arg */ 649 #endif 650 651 switch (id->addr_type) { 652 case 4: 653 len = sizeof(struct ip) + sizeof(struct tcphdr); 654 break; 655 #ifdef INET6 656 case 6: 657 len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 658 break; 659 #endif 660 default: 661 /* XXX: log me?!? */ 662 FREE_PKT(m); 663 return (NULL); 664 } 665 dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN); 666 667 m->m_data += max_linkhdr; 668 m->m_flags |= M_SKIP_FIREWALL; 669 m->m_pkthdr.len = m->m_len = len; 670 m->m_pkthdr.rcvif = NULL; 671 bzero(m->m_data, len); 672 673 switch (id->addr_type) { 674 case 4: 675 h = mtod(m, struct ip *); 676 677 /* prepare for checksum */ 678 h->ip_p = IPPROTO_TCP; 679 h->ip_len = htons(sizeof(struct tcphdr)); 680 if (dir) { 681 h->ip_src.s_addr = htonl(id->src_ip); 682 h->ip_dst.s_addr = htonl(id->dst_ip); 683 } else { 684 h->ip_src.s_addr = htonl(id->dst_ip); 685 h->ip_dst.s_addr = htonl(id->src_ip); 686 } 687 688 th = (struct tcphdr *)(h + 1); 689 break; 690 #ifdef INET6 691 case 6: 692 h6 = mtod(m, struct ip6_hdr *); 693 694 /* prepare for checksum */ 695 h6->ip6_nxt = IPPROTO_TCP; 696 h6->ip6_plen = htons(sizeof(struct tcphdr)); 697 if (dir) { 698 h6->ip6_src = id->src_ip6; 699 h6->ip6_dst = id->dst_ip6; 700 } else { 701 h6->ip6_src = id->dst_ip6; 702 h6->ip6_dst = id->src_ip6; 703 } 704 705 th = (struct tcphdr *)(h6 + 1); 706 break; 707 #endif 708 } 709 710 if (dir) { 711 th->th_sport = htons(id->src_port); 712 th->th_dport = htons(id->dst_port); 713 } else { 714 th->th_sport = htons(id->dst_port); 715 th->th_dport = htons(id->src_port); 716 } 717 th->th_off = sizeof(struct tcphdr) >> 2; 718 719 if (flags & TH_RST) { 720 if (flags & TH_ACK) { 721 th->th_seq = htonl(ack); 722 th->th_flags = TH_RST; 723 } else { 724 if (flags & TH_SYN) 725 seq++; 726 th->th_ack = htonl(seq); 727 th->th_flags = TH_RST | TH_ACK; 728 } 729 } else { 730 /* 731 * Keepalive - use caller provided sequence numbers 732 */ 733 th->th_seq = htonl(seq); 734 th->th_ack = htonl(ack); 735 th->th_flags = TH_ACK; 736 } 737 738 switch (id->addr_type) { 739 case 4: 740 th->th_sum = in_cksum(m, len); 741 742 /* finish the ip header */ 743 h->ip_v = 4; 744 h->ip_hl = sizeof(*h) >> 2; 745 h->ip_tos = IPTOS_LOWDELAY; 746 h->ip_off = htons(0); 747 h->ip_len = htons(len); 748 h->ip_ttl = V_ip_defttl; 749 h->ip_sum = 0; 750 break; 751 #ifdef INET6 752 case 6: 753 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6), 754 sizeof(struct tcphdr)); 755 756 /* finish the ip6 header */ 757 h6->ip6_vfc |= IPV6_VERSION; 758 h6->ip6_hlim = IPV6_DEFHLIM; 759 break; 760 #endif 761 } 762 763 return (m); 764 } 765 766 #ifdef INET6 767 /* 768 * ipv6 specific rules here... 769 */ 770 static __inline int 771 icmp6type_match(int type, ipfw_insn_u32 *cmd) 772 { 773 return (type <= ICMP6_MAXTYPE && (cmd->d[type/32] & (1<<(type%32)) ) ); 774 } 775 776 static int 777 flow6id_match(int curr_flow, ipfw_insn_u32 *cmd) 778 { 779 int i; 780 for (i=0; i <= cmd->o.arg1; ++i) 781 if (curr_flow == cmd->d[i]) 782 return 1; 783 return 0; 784 } 785 786 /* support for IP6_*_ME opcodes */ 787 static const struct in6_addr lla_mask = {{{ 788 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 789 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 790 }}}; 791 792 static int 793 ipfw_localip6(struct in6_addr *in6) 794 { 795 struct rm_priotracker in6_ifa_tracker; 796 struct in6_ifaddr *ia; 797 798 if (IN6_IS_ADDR_MULTICAST(in6)) 799 return (0); 800 801 if (!IN6_IS_ADDR_LINKLOCAL(in6)) 802 return (in6_localip(in6)); 803 804 IN6_IFADDR_RLOCK(&in6_ifa_tracker); 805 CK_STAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { 806 if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr)) 807 continue; 808 if (IN6_ARE_MASKED_ADDR_EQUAL(&ia->ia_addr.sin6_addr, 809 in6, &lla_mask)) { 810 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); 811 return (1); 812 } 813 } 814 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); 815 return (0); 816 } 817 818 static int 819 verify_path6(struct in6_addr *src, struct ifnet *ifp, u_int fib) 820 { 821 struct nhop_object *nh; 822 823 if (IN6_IS_SCOPE_LINKLOCAL(src)) 824 return (1); 825 826 nh = fib6_lookup(fib, src, 0, NHR_NONE, 0); 827 if (nh == NULL) 828 return (0); 829 830 /* If ifp is provided, check for equality with route table. */ 831 if (ifp != NULL && ifp != nh->nh_aifp) 832 return (0); 833 834 /* if no ifp provided, check if rtentry is not default route */ 835 if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0) 836 return (0); 837 838 /* or if this is a blackhole/reject route */ 839 if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0) 840 return (0); 841 842 /* found valid route */ 843 return 1; 844 } 845 846 static int 847 is_icmp6_query(int icmp6_type) 848 { 849 if ((icmp6_type <= ICMP6_MAXTYPE) && 850 (icmp6_type == ICMP6_ECHO_REQUEST || 851 icmp6_type == ICMP6_MEMBERSHIP_QUERY || 852 icmp6_type == ICMP6_WRUREQUEST || 853 icmp6_type == ICMP6_FQDN_QUERY || 854 icmp6_type == ICMP6_NI_QUERY)) 855 return (1); 856 857 return (0); 858 } 859 860 static int 861 map_icmp_unreach(int code) 862 { 863 864 /* RFC 7915 p4.2 */ 865 switch (code) { 866 case ICMP_UNREACH_NET: 867 case ICMP_UNREACH_HOST: 868 case ICMP_UNREACH_SRCFAIL: 869 case ICMP_UNREACH_NET_UNKNOWN: 870 case ICMP_UNREACH_HOST_UNKNOWN: 871 case ICMP_UNREACH_TOSNET: 872 case ICMP_UNREACH_TOSHOST: 873 return (ICMP6_DST_UNREACH_NOROUTE); 874 case ICMP_UNREACH_PORT: 875 return (ICMP6_DST_UNREACH_NOPORT); 876 default: 877 /* 878 * Map the rest of codes into admit prohibited. 879 * XXX: unreach proto should be mapped into ICMPv6 880 * parameter problem, but we use only unreach type. 881 */ 882 return (ICMP6_DST_UNREACH_ADMIN); 883 } 884 } 885 886 static void 887 send_reject6(struct ip_fw_args *args, int code, u_int hlen, struct ip6_hdr *ip6) 888 { 889 struct mbuf *m; 890 891 m = args->m; 892 if (code == ICMP6_UNREACH_RST && args->f_id.proto == IPPROTO_TCP) { 893 struct tcphdr *tcp; 894 tcp = (struct tcphdr *)((char *)ip6 + hlen); 895 896 if ((tcp->th_flags & TH_RST) == 0) { 897 struct mbuf *m0; 898 m0 = ipfw_send_pkt(args->m, &(args->f_id), 899 ntohl(tcp->th_seq), ntohl(tcp->th_ack), 900 tcp->th_flags | TH_RST); 901 if (m0 != NULL) 902 ip6_output(m0, NULL, NULL, 0, NULL, NULL, 903 NULL); 904 } 905 FREE_PKT(m); 906 } else if (code == ICMP6_UNREACH_ABORT && 907 args->f_id.proto == IPPROTO_SCTP) { 908 struct mbuf *m0; 909 struct sctphdr *sctp; 910 u_int32_t v_tag; 911 int reflected; 912 913 sctp = (struct sctphdr *)((char *)ip6 + hlen); 914 reflected = 1; 915 v_tag = ntohl(sctp->v_tag); 916 /* Investigate the first chunk header if available */ 917 if (m->m_len >= hlen + sizeof(struct sctphdr) + 918 sizeof(struct sctp_chunkhdr)) { 919 struct sctp_chunkhdr *chunk; 920 921 chunk = (struct sctp_chunkhdr *)(sctp + 1); 922 switch (chunk->chunk_type) { 923 case SCTP_INITIATION: 924 /* 925 * Packets containing an INIT chunk MUST have 926 * a zero v-tag. 927 */ 928 if (v_tag != 0) { 929 v_tag = 0; 930 break; 931 } 932 /* INIT chunk MUST NOT be bundled */ 933 if (m->m_pkthdr.len > 934 hlen + sizeof(struct sctphdr) + 935 ntohs(chunk->chunk_length) + 3) { 936 break; 937 } 938 /* Use the initiate tag if available */ 939 if ((m->m_len >= hlen + sizeof(struct sctphdr) + 940 sizeof(struct sctp_chunkhdr) + 941 offsetof(struct sctp_init, a_rwnd))) { 942 struct sctp_init *init; 943 944 init = (struct sctp_init *)(chunk + 1); 945 v_tag = ntohl(init->initiate_tag); 946 reflected = 0; 947 } 948 break; 949 case SCTP_ABORT_ASSOCIATION: 950 /* 951 * If the packet contains an ABORT chunk, don't 952 * reply. 953 * XXX: We should search through all chunks, 954 * but do not do that to avoid attacks. 955 */ 956 v_tag = 0; 957 break; 958 } 959 } 960 if (v_tag == 0) { 961 m0 = NULL; 962 } else { 963 m0 = ipfw_send_abort(args->m, &(args->f_id), v_tag, 964 reflected); 965 } 966 if (m0 != NULL) 967 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 968 FREE_PKT(m); 969 } else if (code != ICMP6_UNREACH_RST && code != ICMP6_UNREACH_ABORT) { 970 /* Send an ICMPv6 unreach. */ 971 #if 0 972 /* 973 * Unlike above, the mbufs need to line up with the ip6 hdr, 974 * as the contents are read. We need to m_adj() the 975 * needed amount. 976 * The mbuf will however be thrown away so we can adjust it. 977 * Remember we did an m_pullup on it already so we 978 * can make some assumptions about contiguousness. 979 */ 980 if (args->L3offset) 981 m_adj(m, args->L3offset); 982 #endif 983 icmp6_error(m, ICMP6_DST_UNREACH, code, 0); 984 } else 985 FREE_PKT(m); 986 987 args->m = NULL; 988 } 989 990 #endif /* INET6 */ 991 992 /* 993 * sends a reject message, consuming the mbuf passed as an argument. 994 */ 995 static void 996 send_reject(struct ip_fw_args *args, int code, int iplen, struct ip *ip) 997 { 998 999 #if 0 1000 /* XXX When ip is not guaranteed to be at mtod() we will 1001 * need to account for this */ 1002 * The mbuf will however be thrown away so we can adjust it. 1003 * Remember we did an m_pullup on it already so we 1004 * can make some assumptions about contiguousness. 1005 */ 1006 if (args->L3offset) 1007 m_adj(m, args->L3offset); 1008 #endif 1009 if (code != ICMP_REJECT_RST && code != ICMP_REJECT_ABORT) { 1010 /* Send an ICMP unreach */ 1011 icmp_error(args->m, ICMP_UNREACH, code, 0L, 0); 1012 } else if (code == ICMP_REJECT_RST && args->f_id.proto == IPPROTO_TCP) { 1013 struct tcphdr *const tcp = 1014 L3HDR(struct tcphdr, mtod(args->m, struct ip *)); 1015 if ( (tcp->th_flags & TH_RST) == 0) { 1016 struct mbuf *m; 1017 m = ipfw_send_pkt(args->m, &(args->f_id), 1018 ntohl(tcp->th_seq), ntohl(tcp->th_ack), 1019 tcp->th_flags | TH_RST); 1020 if (m != NULL) 1021 ip_output(m, NULL, NULL, 0, NULL, NULL); 1022 } 1023 FREE_PKT(args->m); 1024 } else if (code == ICMP_REJECT_ABORT && 1025 args->f_id.proto == IPPROTO_SCTP) { 1026 struct mbuf *m; 1027 struct sctphdr *sctp; 1028 struct sctp_chunkhdr *chunk; 1029 struct sctp_init *init; 1030 u_int32_t v_tag; 1031 int reflected; 1032 1033 sctp = L3HDR(struct sctphdr, mtod(args->m, struct ip *)); 1034 reflected = 1; 1035 v_tag = ntohl(sctp->v_tag); 1036 if (iplen >= (ip->ip_hl << 2) + sizeof(struct sctphdr) + 1037 sizeof(struct sctp_chunkhdr)) { 1038 /* Look at the first chunk header if available */ 1039 chunk = (struct sctp_chunkhdr *)(sctp + 1); 1040 switch (chunk->chunk_type) { 1041 case SCTP_INITIATION: 1042 /* 1043 * Packets containing an INIT chunk MUST have 1044 * a zero v-tag. 1045 */ 1046 if (v_tag != 0) { 1047 v_tag = 0; 1048 break; 1049 } 1050 /* INIT chunk MUST NOT be bundled */ 1051 if (iplen > 1052 (ip->ip_hl << 2) + sizeof(struct sctphdr) + 1053 ntohs(chunk->chunk_length) + 3) { 1054 break; 1055 } 1056 /* Use the initiate tag if available */ 1057 if ((iplen >= (ip->ip_hl << 2) + 1058 sizeof(struct sctphdr) + 1059 sizeof(struct sctp_chunkhdr) + 1060 offsetof(struct sctp_init, a_rwnd))) { 1061 init = (struct sctp_init *)(chunk + 1); 1062 v_tag = ntohl(init->initiate_tag); 1063 reflected = 0; 1064 } 1065 break; 1066 case SCTP_ABORT_ASSOCIATION: 1067 /* 1068 * If the packet contains an ABORT chunk, don't 1069 * reply. 1070 * XXX: We should search through all chunks, 1071 * but do not do that to avoid attacks. 1072 */ 1073 v_tag = 0; 1074 break; 1075 } 1076 } 1077 if (v_tag == 0) { 1078 m = NULL; 1079 } else { 1080 m = ipfw_send_abort(args->m, &(args->f_id), v_tag, 1081 reflected); 1082 } 1083 if (m != NULL) 1084 ip_output(m, NULL, NULL, 0, NULL, NULL); 1085 FREE_PKT(args->m); 1086 } else 1087 FREE_PKT(args->m); 1088 args->m = NULL; 1089 } 1090 1091 /* 1092 * Support for uid/gid/jail lookup. These tests are expensive 1093 * (because we may need to look into the list of active sockets) 1094 * so we cache the results. ugid_lookupp is 0 if we have not 1095 * yet done a lookup, 1 if we succeeded, and -1 if we tried 1096 * and failed. The function always returns the match value. 1097 * We could actually spare the variable and use *uc, setting 1098 * it to '(void *)check_uidgid if we have no info, NULL if 1099 * we tried and failed, or any other value if successful. 1100 */ 1101 static int 1102 check_uidgid(ipfw_insn_u32 *insn, struct ip_fw_args *args, int *ugid_lookupp, 1103 struct ucred **uc) 1104 { 1105 #if defined(USERSPACE) 1106 return 0; // not supported in userspace 1107 #else 1108 #ifndef __FreeBSD__ 1109 /* XXX */ 1110 return cred_check(insn, proto, oif, 1111 dst_ip, dst_port, src_ip, src_port, 1112 (struct bsd_ucred *)uc, ugid_lookupp, ((struct mbuf *)inp)->m_skb); 1113 #else /* FreeBSD */ 1114 struct in_addr src_ip, dst_ip; 1115 struct inpcbinfo *pi; 1116 struct ipfw_flow_id *id; 1117 struct inpcb *pcb, *inp; 1118 int lookupflags; 1119 int match; 1120 1121 id = &args->f_id; 1122 inp = args->inp; 1123 1124 /* 1125 * Check to see if the UDP or TCP stack supplied us with 1126 * the PCB. If so, rather then holding a lock and looking 1127 * up the PCB, we can use the one that was supplied. 1128 */ 1129 if (inp && *ugid_lookupp == 0) { 1130 INP_LOCK_ASSERT(inp); 1131 if (inp->inp_socket != NULL) { 1132 *uc = crhold(inp->inp_cred); 1133 *ugid_lookupp = 1; 1134 } else 1135 *ugid_lookupp = -1; 1136 } 1137 /* 1138 * If we have already been here and the packet has no 1139 * PCB entry associated with it, then we can safely 1140 * assume that this is a no match. 1141 */ 1142 if (*ugid_lookupp == -1) 1143 return (0); 1144 if (id->proto == IPPROTO_TCP) { 1145 lookupflags = 0; 1146 pi = &V_tcbinfo; 1147 } else if (id->proto == IPPROTO_UDP) { 1148 lookupflags = INPLOOKUP_WILDCARD; 1149 pi = &V_udbinfo; 1150 } else if (id->proto == IPPROTO_UDPLITE) { 1151 lookupflags = INPLOOKUP_WILDCARD; 1152 pi = &V_ulitecbinfo; 1153 } else 1154 return 0; 1155 lookupflags |= INPLOOKUP_RLOCKPCB; 1156 match = 0; 1157 if (*ugid_lookupp == 0) { 1158 if (id->addr_type == 6) { 1159 #ifdef INET6 1160 if (args->flags & IPFW_ARGS_IN) 1161 pcb = in6_pcblookup_mbuf(pi, 1162 &id->src_ip6, htons(id->src_port), 1163 &id->dst_ip6, htons(id->dst_port), 1164 lookupflags, NULL, args->m); 1165 else 1166 pcb = in6_pcblookup_mbuf(pi, 1167 &id->dst_ip6, htons(id->dst_port), 1168 &id->src_ip6, htons(id->src_port), 1169 lookupflags, args->ifp, args->m); 1170 #else 1171 *ugid_lookupp = -1; 1172 return (0); 1173 #endif 1174 } else { 1175 src_ip.s_addr = htonl(id->src_ip); 1176 dst_ip.s_addr = htonl(id->dst_ip); 1177 if (args->flags & IPFW_ARGS_IN) 1178 pcb = in_pcblookup_mbuf(pi, 1179 src_ip, htons(id->src_port), 1180 dst_ip, htons(id->dst_port), 1181 lookupflags, NULL, args->m); 1182 else 1183 pcb = in_pcblookup_mbuf(pi, 1184 dst_ip, htons(id->dst_port), 1185 src_ip, htons(id->src_port), 1186 lookupflags, args->ifp, args->m); 1187 } 1188 if (pcb != NULL) { 1189 INP_RLOCK_ASSERT(pcb); 1190 *uc = crhold(pcb->inp_cred); 1191 *ugid_lookupp = 1; 1192 INP_RUNLOCK(pcb); 1193 } 1194 if (*ugid_lookupp == 0) { 1195 /* 1196 * We tried and failed, set the variable to -1 1197 * so we will not try again on this packet. 1198 */ 1199 *ugid_lookupp = -1; 1200 return (0); 1201 } 1202 } 1203 if (insn->o.opcode == O_UID) 1204 match = ((*uc)->cr_uid == (uid_t)insn->d[0]); 1205 else if (insn->o.opcode == O_GID) 1206 match = groupmember((gid_t)insn->d[0], *uc); 1207 else if (insn->o.opcode == O_JAIL) 1208 match = ((*uc)->cr_prison->pr_id == (int)insn->d[0]); 1209 return (match); 1210 #endif /* __FreeBSD__ */ 1211 #endif /* not supported in userspace */ 1212 } 1213 1214 /* 1215 * Helper function to set args with info on the rule after the matching 1216 * one. slot is precise, whereas we guess rule_id as they are 1217 * assigned sequentially. 1218 */ 1219 static inline void 1220 set_match(struct ip_fw_args *args, int slot, 1221 struct ip_fw_chain *chain) 1222 { 1223 args->rule.chain_id = chain->id; 1224 args->rule.slot = slot + 1; /* we use 0 as a marker */ 1225 args->rule.rule_id = 1 + chain->map[slot]->id; 1226 args->rule.rulenum = chain->map[slot]->rulenum; 1227 args->flags |= IPFW_ARGS_REF; 1228 } 1229 1230 static int 1231 jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num, 1232 int tablearg, int jump_backwards) 1233 { 1234 int f_pos, i; 1235 1236 i = IP_FW_ARG_TABLEARG(chain, num, skipto); 1237 /* make sure we do not jump backward */ 1238 if (jump_backwards == 0 && i <= f->rulenum) 1239 i = f->rulenum + 1; 1240 1241 #ifndef LINEAR_SKIPTO 1242 if (chain->idxmap != NULL) 1243 f_pos = chain->idxmap[i]; 1244 else 1245 f_pos = ipfw_find_rule(chain, i, 0); 1246 #else 1247 f_pos = chain->idxmap[i]; 1248 #endif /* LINEAR_SKIPTO */ 1249 1250 return (f_pos); 1251 } 1252 1253 1254 #ifndef LINEAR_SKIPTO 1255 /* 1256 * Helper function to enable cached rule lookups using 1257 * cache.id and cache.pos fields in ipfw rule. 1258 */ 1259 static int 1260 jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num, 1261 int tablearg, int jump_backwards) 1262 { 1263 int f_pos; 1264 1265 /* Can't use cache with IP_FW_TARG */ 1266 if (num == IP_FW_TARG) 1267 return jump_lookup_pos(chain, f, num, tablearg, jump_backwards); 1268 1269 /* 1270 * If possible use cached f_pos (in f->cache.pos), 1271 * whose version is written in f->cache.id (horrible hacks 1272 * to avoid changing the ABI). 1273 * 1274 * Multiple threads can execute the same rule simultaneously, 1275 * we need to ensure that cache.pos is updated before cache.id. 1276 */ 1277 1278 #ifdef __LP64__ 1279 struct ip_fw_jump_cache cache; 1280 1281 cache.raw_value = f->cache.raw_value; 1282 if (cache.id == chain->id) 1283 return (cache.pos); 1284 1285 f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards); 1286 1287 cache.pos = f_pos; 1288 cache.id = chain->id; 1289 f->cache.raw_value = cache.raw_value; 1290 #else 1291 if (f->cache.id == chain->id) { 1292 /* Load pos after id */ 1293 atomic_thread_fence_acq(); 1294 return (f->cache.pos); 1295 } 1296 1297 f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards); 1298 1299 f->cache.pos = f_pos; 1300 /* Store id after pos */ 1301 atomic_thread_fence_rel(); 1302 f->cache.id = chain->id; 1303 #endif /* !__LP64__ */ 1304 return (f_pos); 1305 } 1306 #endif /* !LINEAR_SKIPTO */ 1307 1308 #define TARG(k, f) IP_FW_ARG_TABLEARG(chain, k, f) 1309 /* 1310 * The main check routine for the firewall. 1311 * 1312 * All arguments are in args so we can modify them and return them 1313 * back to the caller. 1314 * 1315 * Parameters: 1316 * 1317 * args->m (in/out) The packet; we set to NULL when/if we nuke it. 1318 * Starts with the IP header. 1319 * args->L3offset Number of bytes bypassed if we came from L2. 1320 * e.g. often sizeof(eh) ** NOTYET ** 1321 * args->ifp Incoming or outgoing interface. 1322 * args->divert_rule (in/out) 1323 * Skip up to the first rule past this rule number; 1324 * upon return, non-zero port number for divert or tee. 1325 * 1326 * args->rule Pointer to the last matching rule (in/out) 1327 * args->next_hop Socket we are forwarding to (out). 1328 * args->next_hop6 IPv6 next hop we are forwarding to (out). 1329 * args->f_id Addresses grabbed from the packet (out) 1330 * args->rule.info a cookie depending on rule action 1331 * 1332 * Return value: 1333 * 1334 * IP_FW_PASS the packet must be accepted 1335 * IP_FW_DENY the packet must be dropped 1336 * IP_FW_DIVERT divert packet, port in m_tag 1337 * IP_FW_TEE tee packet, port in m_tag 1338 * IP_FW_DUMMYNET to dummynet, pipe in args->cookie 1339 * IP_FW_NETGRAPH into netgraph, cookie args->cookie 1340 * args->rule contains the matching rule, 1341 * args->rule.info has additional information. 1342 * 1343 */ 1344 int 1345 ipfw_chk(struct ip_fw_args *args) 1346 { 1347 1348 /* 1349 * Local variables holding state while processing a packet: 1350 * 1351 * IMPORTANT NOTE: to speed up the processing of rules, there 1352 * are some assumption on the values of the variables, which 1353 * are documented here. Should you change them, please check 1354 * the implementation of the various instructions to make sure 1355 * that they still work. 1356 * 1357 * m | args->m Pointer to the mbuf, as received from the caller. 1358 * It may change if ipfw_chk() does an m_pullup, or if it 1359 * consumes the packet because it calls send_reject(). 1360 * XXX This has to change, so that ipfw_chk() never modifies 1361 * or consumes the buffer. 1362 * OR 1363 * args->mem Pointer to contigous memory chunk. 1364 * ip Is the beginning of the ip(4 or 6) header. 1365 * eh Ethernet header in case if input is Layer2. 1366 */ 1367 struct mbuf *m; 1368 struct ip *ip; 1369 struct ether_header *eh; 1370 1371 /* 1372 * For rules which contain uid/gid or jail constraints, cache 1373 * a copy of the users credentials after the pcb lookup has been 1374 * executed. This will speed up the processing of rules with 1375 * these types of constraints, as well as decrease contention 1376 * on pcb related locks. 1377 */ 1378 #ifndef __FreeBSD__ 1379 struct bsd_ucred ucred_cache; 1380 #else 1381 struct ucred *ucred_cache = NULL; 1382 #endif 1383 int ucred_lookup = 0; 1384 int f_pos = 0; /* index of current rule in the array */ 1385 int retval = 0; 1386 struct ifnet *oif, *iif; 1387 1388 /* 1389 * hlen The length of the IP header. 1390 */ 1391 u_int hlen = 0; /* hlen >0 means we have an IP pkt */ 1392 1393 /* 1394 * offset The offset of a fragment. offset != 0 means that 1395 * we have a fragment at this offset of an IPv4 packet. 1396 * offset == 0 means that (if this is an IPv4 packet) 1397 * this is the first or only fragment. 1398 * For IPv6 offset|ip6f_mf == 0 means there is no Fragment Header 1399 * or there is a single packet fragment (fragment header added 1400 * without needed). We will treat a single packet fragment as if 1401 * there was no fragment header (or log/block depending on the 1402 * V_fw_permit_single_frag6 sysctl setting). 1403 */ 1404 u_short offset = 0; 1405 u_short ip6f_mf = 0; 1406 1407 /* 1408 * Local copies of addresses. They are only valid if we have 1409 * an IP packet. 1410 * 1411 * proto The protocol. Set to 0 for non-ip packets, 1412 * or to the protocol read from the packet otherwise. 1413 * proto != 0 means that we have an IPv4 packet. 1414 * 1415 * src_port, dst_port port numbers, in HOST format. Only 1416 * valid for TCP and UDP packets. 1417 * 1418 * src_ip, dst_ip ip addresses, in NETWORK format. 1419 * Only valid for IPv4 packets. 1420 */ 1421 uint8_t proto; 1422 uint16_t src_port, dst_port; /* NOTE: host format */ 1423 struct in_addr src_ip, dst_ip; /* NOTE: network format */ 1424 int iplen = 0; 1425 int pktlen; 1426 1427 struct ipfw_dyn_info dyn_info; 1428 struct ip_fw *q = NULL; 1429 struct ip_fw_chain *chain = &V_layer3_chain; 1430 1431 /* 1432 * We store in ulp a pointer to the upper layer protocol header. 1433 * In the ipv4 case this is easy to determine from the header, 1434 * but for ipv6 we might have some additional headers in the middle. 1435 * ulp is NULL if not found. 1436 */ 1437 void *ulp = NULL; /* upper layer protocol pointer. */ 1438 1439 /* XXX ipv6 variables */ 1440 int is_ipv6 = 0; 1441 #ifdef INET6 1442 uint8_t icmp6_type = 0; 1443 #endif 1444 uint16_t ext_hd = 0; /* bits vector for extension header filtering */ 1445 /* end of ipv6 variables */ 1446 1447 int is_ipv4 = 0; 1448 1449 int done = 0; /* flag to exit the outer loop */ 1450 IPFW_RLOCK_TRACKER; 1451 bool mem; 1452 1453 if ((mem = (args->flags & IPFW_ARGS_LENMASK))) { 1454 if (args->flags & IPFW_ARGS_ETHER) { 1455 eh = (struct ether_header *)args->mem; 1456 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 1457 ip = (struct ip *) 1458 ((struct ether_vlan_header *)eh + 1); 1459 else 1460 ip = (struct ip *)(eh + 1); 1461 } else { 1462 eh = NULL; 1463 ip = (struct ip *)args->mem; 1464 } 1465 pktlen = IPFW_ARGS_LENGTH(args->flags); 1466 args->f_id.fib = args->ifp->if_fib; /* best guess */ 1467 } else { 1468 m = args->m; 1469 if (m->m_flags & M_SKIP_FIREWALL || (! V_ipfw_vnet_ready)) 1470 return (IP_FW_PASS); /* accept */ 1471 if (args->flags & IPFW_ARGS_ETHER) { 1472 /* We need some amount of data to be contiguous. */ 1473 if (m->m_len < min(m->m_pkthdr.len, max_protohdr) && 1474 (args->m = m = m_pullup(m, min(m->m_pkthdr.len, 1475 max_protohdr))) == NULL) 1476 goto pullup_failed; 1477 eh = mtod(m, struct ether_header *); 1478 ip = (struct ip *)(eh + 1); 1479 } else { 1480 eh = NULL; 1481 ip = mtod(m, struct ip *); 1482 } 1483 pktlen = m->m_pkthdr.len; 1484 args->f_id.fib = M_GETFIB(m); /* mbuf not altered */ 1485 } 1486 1487 dst_ip.s_addr = 0; /* make sure it is initialized */ 1488 src_ip.s_addr = 0; /* make sure it is initialized */ 1489 src_port = dst_port = 0; 1490 1491 DYN_INFO_INIT(&dyn_info); 1492 /* 1493 * PULLUP_TO(len, p, T) makes sure that len + sizeof(T) is contiguous, 1494 * then it sets p to point at the offset "len" in the mbuf. WARNING: the 1495 * pointer might become stale after other pullups (but we never use it 1496 * this way). 1497 */ 1498 #define PULLUP_TO(_len, p, T) PULLUP_LEN(_len, p, sizeof(T)) 1499 #define EHLEN (eh != NULL ? ((char *)ip - (char *)eh) : 0) 1500 #define _PULLUP_LOCKED(_len, p, T, unlock) \ 1501 do { \ 1502 int x = (_len) + T + EHLEN; \ 1503 if (mem) { \ 1504 if (__predict_false(pktlen < x)) { \ 1505 unlock; \ 1506 goto pullup_failed; \ 1507 } \ 1508 p = (char *)args->mem + (_len) + EHLEN; \ 1509 } else { \ 1510 if (__predict_false((m)->m_len < x)) { \ 1511 args->m = m = m_pullup(m, x); \ 1512 if (m == NULL) { \ 1513 unlock; \ 1514 goto pullup_failed; \ 1515 } \ 1516 } \ 1517 p = mtod(m, char *) + (_len) + EHLEN; \ 1518 } \ 1519 } while (0) 1520 1521 #define PULLUP_LEN(_len, p, T) _PULLUP_LOCKED(_len, p, T, ) 1522 #define PULLUP_LEN_LOCKED(_len, p, T) \ 1523 _PULLUP_LOCKED(_len, p, T, IPFW_PF_RUNLOCK(chain)); \ 1524 UPDATE_POINTERS() 1525 /* 1526 * In case pointers got stale after pullups, update them. 1527 */ 1528 #define UPDATE_POINTERS() \ 1529 do { \ 1530 if (!mem) { \ 1531 if (eh != NULL) { \ 1532 eh = mtod(m, struct ether_header *); \ 1533 ip = (struct ip *)(eh + 1); \ 1534 } else \ 1535 ip = mtod(m, struct ip *); \ 1536 args->m = m; \ 1537 } \ 1538 } while (0) 1539 1540 /* Identify IP packets and fill up variables. */ 1541 if (pktlen >= sizeof(struct ip6_hdr) && 1542 (eh == NULL || eh->ether_type == htons(ETHERTYPE_IPV6)) && 1543 ip->ip_v == 6) { 1544 struct ip6_hdr *ip6 = (struct ip6_hdr *)ip; 1545 1546 is_ipv6 = 1; 1547 args->flags |= IPFW_ARGS_IP6; 1548 hlen = sizeof(struct ip6_hdr); 1549 proto = ip6->ip6_nxt; 1550 /* Search extension headers to find upper layer protocols */ 1551 while (ulp == NULL && offset == 0) { 1552 switch (proto) { 1553 case IPPROTO_ICMPV6: 1554 PULLUP_TO(hlen, ulp, struct icmp6_hdr); 1555 #ifdef INET6 1556 icmp6_type = ICMP6(ulp)->icmp6_type; 1557 #endif 1558 break; 1559 1560 case IPPROTO_TCP: 1561 PULLUP_TO(hlen, ulp, struct tcphdr); 1562 dst_port = TCP(ulp)->th_dport; 1563 src_port = TCP(ulp)->th_sport; 1564 /* save flags for dynamic rules */ 1565 args->f_id._flags = TCP(ulp)->th_flags; 1566 break; 1567 1568 case IPPROTO_SCTP: 1569 if (pktlen >= hlen + sizeof(struct sctphdr) + 1570 sizeof(struct sctp_chunkhdr) + 1571 offsetof(struct sctp_init, a_rwnd)) 1572 PULLUP_LEN(hlen, ulp, 1573 sizeof(struct sctphdr) + 1574 sizeof(struct sctp_chunkhdr) + 1575 offsetof(struct sctp_init, a_rwnd)); 1576 else if (pktlen >= hlen + sizeof(struct sctphdr)) 1577 PULLUP_LEN(hlen, ulp, pktlen - hlen); 1578 else 1579 PULLUP_LEN(hlen, ulp, 1580 sizeof(struct sctphdr)); 1581 src_port = SCTP(ulp)->src_port; 1582 dst_port = SCTP(ulp)->dest_port; 1583 break; 1584 1585 case IPPROTO_UDP: 1586 case IPPROTO_UDPLITE: 1587 PULLUP_TO(hlen, ulp, struct udphdr); 1588 dst_port = UDP(ulp)->uh_dport; 1589 src_port = UDP(ulp)->uh_sport; 1590 break; 1591 1592 case IPPROTO_HOPOPTS: /* RFC 2460 */ 1593 PULLUP_TO(hlen, ulp, struct ip6_hbh); 1594 ext_hd |= EXT_HOPOPTS; 1595 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3; 1596 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt; 1597 ulp = NULL; 1598 break; 1599 1600 case IPPROTO_ROUTING: /* RFC 2460 */ 1601 PULLUP_TO(hlen, ulp, struct ip6_rthdr); 1602 switch (((struct ip6_rthdr *)ulp)->ip6r_type) { 1603 case 0: 1604 ext_hd |= EXT_RTHDR0; 1605 break; 1606 case 2: 1607 ext_hd |= EXT_RTHDR2; 1608 break; 1609 default: 1610 if (V_fw_verbose) 1611 printf("IPFW2: IPV6 - Unknown " 1612 "Routing Header type(%d)\n", 1613 ((struct ip6_rthdr *) 1614 ulp)->ip6r_type); 1615 if (V_fw_deny_unknown_exthdrs) 1616 return (IP_FW_DENY); 1617 break; 1618 } 1619 ext_hd |= EXT_ROUTING; 1620 hlen += (((struct ip6_rthdr *)ulp)->ip6r_len + 1) << 3; 1621 proto = ((struct ip6_rthdr *)ulp)->ip6r_nxt; 1622 ulp = NULL; 1623 break; 1624 1625 case IPPROTO_FRAGMENT: /* RFC 2460 */ 1626 PULLUP_TO(hlen, ulp, struct ip6_frag); 1627 ext_hd |= EXT_FRAGMENT; 1628 hlen += sizeof (struct ip6_frag); 1629 proto = ((struct ip6_frag *)ulp)->ip6f_nxt; 1630 offset = ((struct ip6_frag *)ulp)->ip6f_offlg & 1631 IP6F_OFF_MASK; 1632 ip6f_mf = ((struct ip6_frag *)ulp)->ip6f_offlg & 1633 IP6F_MORE_FRAG; 1634 if (V_fw_permit_single_frag6 == 0 && 1635 offset == 0 && ip6f_mf == 0) { 1636 if (V_fw_verbose) 1637 printf("IPFW2: IPV6 - Invalid " 1638 "Fragment Header\n"); 1639 if (V_fw_deny_unknown_exthdrs) 1640 return (IP_FW_DENY); 1641 break; 1642 } 1643 args->f_id.extra = 1644 ntohl(((struct ip6_frag *)ulp)->ip6f_ident); 1645 ulp = NULL; 1646 break; 1647 1648 case IPPROTO_DSTOPTS: /* RFC 2460 */ 1649 PULLUP_TO(hlen, ulp, struct ip6_hbh); 1650 ext_hd |= EXT_DSTOPTS; 1651 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3; 1652 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt; 1653 ulp = NULL; 1654 break; 1655 1656 case IPPROTO_AH: /* RFC 2402 */ 1657 PULLUP_TO(hlen, ulp, struct ip6_ext); 1658 ext_hd |= EXT_AH; 1659 hlen += (((struct ip6_ext *)ulp)->ip6e_len + 2) << 2; 1660 proto = ((struct ip6_ext *)ulp)->ip6e_nxt; 1661 ulp = NULL; 1662 break; 1663 1664 case IPPROTO_ESP: /* RFC 2406 */ 1665 PULLUP_TO(hlen, ulp, uint32_t); /* SPI, Seq# */ 1666 /* Anything past Seq# is variable length and 1667 * data past this ext. header is encrypted. */ 1668 ext_hd |= EXT_ESP; 1669 break; 1670 1671 case IPPROTO_NONE: /* RFC 2460 */ 1672 /* 1673 * Packet ends here, and IPv6 header has 1674 * already been pulled up. If ip6e_len!=0 1675 * then octets must be ignored. 1676 */ 1677 ulp = ip; /* non-NULL to get out of loop. */ 1678 break; 1679 1680 case IPPROTO_OSPFIGP: 1681 /* XXX OSPF header check? */ 1682 PULLUP_TO(hlen, ulp, struct ip6_ext); 1683 break; 1684 1685 case IPPROTO_PIM: 1686 /* XXX PIM header check? */ 1687 PULLUP_TO(hlen, ulp, struct pim); 1688 break; 1689 1690 case IPPROTO_GRE: /* RFC 1701 */ 1691 /* XXX GRE header check? */ 1692 PULLUP_TO(hlen, ulp, struct grehdr); 1693 break; 1694 1695 case IPPROTO_CARP: 1696 PULLUP_TO(hlen, ulp, offsetof( 1697 struct carp_header, carp_counter)); 1698 if (CARP_ADVERTISEMENT != 1699 ((struct carp_header *)ulp)->carp_type) 1700 return (IP_FW_DENY); 1701 break; 1702 1703 case IPPROTO_IPV6: /* RFC 2893 */ 1704 PULLUP_TO(hlen, ulp, struct ip6_hdr); 1705 break; 1706 1707 case IPPROTO_IPV4: /* RFC 2893 */ 1708 PULLUP_TO(hlen, ulp, struct ip); 1709 break; 1710 1711 default: 1712 if (V_fw_verbose) 1713 printf("IPFW2: IPV6 - Unknown " 1714 "Extension Header(%d), ext_hd=%x\n", 1715 proto, ext_hd); 1716 if (V_fw_deny_unknown_exthdrs) 1717 return (IP_FW_DENY); 1718 PULLUP_TO(hlen, ulp, struct ip6_ext); 1719 break; 1720 } /*switch */ 1721 } 1722 UPDATE_POINTERS(); 1723 ip6 = (struct ip6_hdr *)ip; 1724 args->f_id.addr_type = 6; 1725 args->f_id.src_ip6 = ip6->ip6_src; 1726 args->f_id.dst_ip6 = ip6->ip6_dst; 1727 args->f_id.flow_id6 = ntohl(ip6->ip6_flow); 1728 iplen = ntohs(ip6->ip6_plen) + sizeof(*ip6); 1729 } else if (pktlen >= sizeof(struct ip) && 1730 (eh == NULL || eh->ether_type == htons(ETHERTYPE_IP)) && 1731 ip->ip_v == 4) { 1732 is_ipv4 = 1; 1733 args->flags |= IPFW_ARGS_IP4; 1734 hlen = ip->ip_hl << 2; 1735 /* 1736 * Collect parameters into local variables for faster 1737 * matching. 1738 */ 1739 proto = ip->ip_p; 1740 src_ip = ip->ip_src; 1741 dst_ip = ip->ip_dst; 1742 offset = ntohs(ip->ip_off) & IP_OFFMASK; 1743 iplen = ntohs(ip->ip_len); 1744 1745 if (offset == 0) { 1746 switch (proto) { 1747 case IPPROTO_TCP: 1748 PULLUP_TO(hlen, ulp, struct tcphdr); 1749 dst_port = TCP(ulp)->th_dport; 1750 src_port = TCP(ulp)->th_sport; 1751 /* save flags for dynamic rules */ 1752 args->f_id._flags = TCP(ulp)->th_flags; 1753 break; 1754 1755 case IPPROTO_SCTP: 1756 if (pktlen >= hlen + sizeof(struct sctphdr) + 1757 sizeof(struct sctp_chunkhdr) + 1758 offsetof(struct sctp_init, a_rwnd)) 1759 PULLUP_LEN(hlen, ulp, 1760 sizeof(struct sctphdr) + 1761 sizeof(struct sctp_chunkhdr) + 1762 offsetof(struct sctp_init, a_rwnd)); 1763 else if (pktlen >= hlen + sizeof(struct sctphdr)) 1764 PULLUP_LEN(hlen, ulp, pktlen - hlen); 1765 else 1766 PULLUP_LEN(hlen, ulp, 1767 sizeof(struct sctphdr)); 1768 src_port = SCTP(ulp)->src_port; 1769 dst_port = SCTP(ulp)->dest_port; 1770 break; 1771 1772 case IPPROTO_UDP: 1773 case IPPROTO_UDPLITE: 1774 PULLUP_TO(hlen, ulp, struct udphdr); 1775 dst_port = UDP(ulp)->uh_dport; 1776 src_port = UDP(ulp)->uh_sport; 1777 break; 1778 1779 case IPPROTO_ICMP: 1780 PULLUP_TO(hlen, ulp, struct icmphdr); 1781 //args->f_id.flags = ICMP(ulp)->icmp_type; 1782 break; 1783 1784 default: 1785 break; 1786 } 1787 } else { 1788 if (offset == 1 && proto == IPPROTO_TCP) { 1789 /* RFC 3128 */ 1790 goto pullup_failed; 1791 } 1792 } 1793 1794 UPDATE_POINTERS(); 1795 args->f_id.addr_type = 4; 1796 args->f_id.src_ip = ntohl(src_ip.s_addr); 1797 args->f_id.dst_ip = ntohl(dst_ip.s_addr); 1798 } else { 1799 proto = 0; 1800 dst_ip.s_addr = src_ip.s_addr = 0; 1801 1802 args->f_id.addr_type = 1; /* XXX */ 1803 } 1804 #undef PULLUP_TO 1805 pktlen = iplen < pktlen ? iplen: pktlen; 1806 1807 /* Properly initialize the rest of f_id */ 1808 args->f_id.proto = proto; 1809 args->f_id.src_port = src_port = ntohs(src_port); 1810 args->f_id.dst_port = dst_port = ntohs(dst_port); 1811 1812 IPFW_PF_RLOCK(chain); 1813 if (! V_ipfw_vnet_ready) { /* shutting down, leave NOW. */ 1814 IPFW_PF_RUNLOCK(chain); 1815 return (IP_FW_PASS); /* accept */ 1816 } 1817 if (args->flags & IPFW_ARGS_REF) { 1818 /* 1819 * Packet has already been tagged as a result of a previous 1820 * match on rule args->rule aka args->rule_id (PIPE, QUEUE, 1821 * REASS, NETGRAPH, DIVERT/TEE...) 1822 * Validate the slot and continue from the next one 1823 * if still present, otherwise do a lookup. 1824 */ 1825 f_pos = (args->rule.chain_id == chain->id) ? 1826 args->rule.slot : 1827 ipfw_find_rule(chain, args->rule.rulenum, 1828 args->rule.rule_id); 1829 } else { 1830 f_pos = 0; 1831 } 1832 1833 if (args->flags & IPFW_ARGS_IN) { 1834 iif = args->ifp; 1835 oif = NULL; 1836 } else { 1837 MPASS(args->flags & IPFW_ARGS_OUT); 1838 iif = mem ? NULL : m_rcvif(m); 1839 oif = args->ifp; 1840 } 1841 1842 /* 1843 * Now scan the rules, and parse microinstructions for each rule. 1844 * We have two nested loops and an inner switch. Sometimes we 1845 * need to break out of one or both loops, or re-enter one of 1846 * the loops with updated variables. Loop variables are: 1847 * 1848 * f_pos (outer loop) points to the current rule. 1849 * On output it points to the matching rule. 1850 * done (outer loop) is used as a flag to break the loop. 1851 * l (inner loop) residual length of current rule. 1852 * cmd points to the current microinstruction. 1853 * 1854 * We break the inner loop by setting l=0 and possibly 1855 * cmdlen=0 if we don't want to advance cmd. 1856 * We break the outer loop by setting done=1 1857 * We can restart the inner loop by setting l>0 and f_pos, f, cmd 1858 * as needed. 1859 */ 1860 for (; f_pos < chain->n_rules; f_pos++) { 1861 ipfw_insn *cmd; 1862 uint32_t tablearg = 0; 1863 int l, cmdlen, skip_or; /* skip rest of OR block */ 1864 struct ip_fw *f; 1865 1866 f = chain->map[f_pos]; 1867 if (V_set_disable & (1 << f->set) ) 1868 continue; 1869 1870 skip_or = 0; 1871 for (l = f->cmd_len, cmd = f->cmd ; l > 0 ; 1872 l -= cmdlen, cmd += cmdlen) { 1873 int match; 1874 1875 /* 1876 * check_body is a jump target used when we find a 1877 * CHECK_STATE, and need to jump to the body of 1878 * the target rule. 1879 */ 1880 1881 /* check_body: */ 1882 cmdlen = F_LEN(cmd); 1883 /* 1884 * An OR block (insn_1 || .. || insn_n) has the 1885 * F_OR bit set in all but the last instruction. 1886 * The first match will set "skip_or", and cause 1887 * the following instructions to be skipped until 1888 * past the one with the F_OR bit clear. 1889 */ 1890 if (skip_or) { /* skip this instruction */ 1891 if ((cmd->len & F_OR) == 0) 1892 skip_or = 0; /* next one is good */ 1893 continue; 1894 } 1895 match = 0; /* set to 1 if we succeed */ 1896 1897 switch (cmd->opcode) { 1898 /* 1899 * The first set of opcodes compares the packet's 1900 * fields with some pattern, setting 'match' if a 1901 * match is found. At the end of the loop there is 1902 * logic to deal with F_NOT and F_OR flags associated 1903 * with the opcode. 1904 */ 1905 case O_NOP: 1906 match = 1; 1907 break; 1908 1909 case O_FORWARD_MAC: 1910 printf("ipfw: opcode %d unimplemented\n", 1911 cmd->opcode); 1912 break; 1913 1914 case O_GID: 1915 case O_UID: 1916 case O_JAIL: 1917 /* 1918 * We only check offset == 0 && proto != 0, 1919 * as this ensures that we have a 1920 * packet with the ports info. 1921 */ 1922 if (offset != 0) 1923 break; 1924 if (proto == IPPROTO_TCP || 1925 proto == IPPROTO_UDP || 1926 proto == IPPROTO_UDPLITE) 1927 match = check_uidgid( 1928 (ipfw_insn_u32 *)cmd, 1929 args, &ucred_lookup, 1930 #ifdef __FreeBSD__ 1931 &ucred_cache); 1932 #else 1933 (void *)&ucred_cache); 1934 #endif 1935 break; 1936 1937 case O_RECV: 1938 match = iface_match(iif, (ipfw_insn_if *)cmd, 1939 chain, &tablearg); 1940 break; 1941 1942 case O_XMIT: 1943 match = iface_match(oif, (ipfw_insn_if *)cmd, 1944 chain, &tablearg); 1945 break; 1946 1947 case O_VIA: 1948 match = iface_match(args->ifp, 1949 (ipfw_insn_if *)cmd, chain, &tablearg); 1950 break; 1951 1952 case O_MACADDR2: 1953 if (args->flags & IPFW_ARGS_ETHER) { 1954 u_int32_t *want = (u_int32_t *) 1955 ((ipfw_insn_mac *)cmd)->addr; 1956 u_int32_t *mask = (u_int32_t *) 1957 ((ipfw_insn_mac *)cmd)->mask; 1958 u_int32_t *hdr = (u_int32_t *)eh; 1959 1960 match = 1961 ( want[0] == (hdr[0] & mask[0]) && 1962 want[1] == (hdr[1] & mask[1]) && 1963 want[2] == (hdr[2] & mask[2]) ); 1964 } 1965 break; 1966 1967 case O_MAC_TYPE: 1968 if (args->flags & IPFW_ARGS_ETHER) { 1969 u_int16_t *p = 1970 ((ipfw_insn_u16 *)cmd)->ports; 1971 int i; 1972 1973 for (i = cmdlen - 1; !match && i>0; 1974 i--, p += 2) 1975 match = 1976 (ntohs(eh->ether_type) >= 1977 p[0] && 1978 ntohs(eh->ether_type) <= 1979 p[1]); 1980 } 1981 break; 1982 1983 case O_FRAG: 1984 if (is_ipv4) { 1985 /* 1986 * Since flags_match() works with 1987 * uint8_t we pack ip_off into 8 bits. 1988 * For this match offset is a boolean. 1989 */ 1990 match = flags_match(cmd, 1991 ((ntohs(ip->ip_off) & ~IP_OFFMASK) 1992 >> 8) | (offset != 0)); 1993 } else { 1994 /* 1995 * Compatiblity: historically bare 1996 * "frag" would match IPv6 fragments. 1997 */ 1998 match = (cmd->arg1 == 0x1 && 1999 (offset != 0)); 2000 } 2001 break; 2002 2003 case O_IN: /* "out" is "not in" */ 2004 match = (oif == NULL); 2005 break; 2006 2007 case O_LAYER2: 2008 match = (args->flags & IPFW_ARGS_ETHER); 2009 break; 2010 2011 case O_DIVERTED: 2012 if ((args->flags & IPFW_ARGS_REF) == 0) 2013 break; 2014 /* 2015 * For diverted packets, args->rule.info 2016 * contains the divert port (in host format) 2017 * reason and direction. 2018 */ 2019 match = ((args->rule.info & IPFW_IS_MASK) == 2020 IPFW_IS_DIVERT) && ( 2021 ((args->rule.info & IPFW_INFO_IN) ? 2022 1: 2) & cmd->arg1); 2023 break; 2024 2025 case O_PROTO: 2026 /* 2027 * We do not allow an arg of 0 so the 2028 * check of "proto" only suffices. 2029 */ 2030 match = (proto == cmd->arg1); 2031 break; 2032 2033 case O_IP_SRC: 2034 match = is_ipv4 && 2035 (((ipfw_insn_ip *)cmd)->addr.s_addr == 2036 src_ip.s_addr); 2037 break; 2038 2039 case O_IP_DST_LOOKUP: 2040 { 2041 if (cmdlen > F_INSN_SIZE(ipfw_insn_u32)) { 2042 void *pkey; 2043 uint32_t vidx, key; 2044 uint16_t keylen = 0; /* zero if can't match the packet */ 2045 2046 /* Determine lookup key type */ 2047 vidx = ((ipfw_insn_u32 *)cmd)->d[1]; 2048 switch (vidx) { 2049 case LOOKUP_DST_IP: 2050 case LOOKUP_SRC_IP: 2051 /* Need IP frame */ 2052 if (is_ipv6 == 0 && is_ipv4 == 0) 2053 break; 2054 if (vidx == LOOKUP_DST_IP) 2055 pkey = is_ipv6 ? 2056 (void *)&args->f_id.dst_ip6: 2057 (void *)&dst_ip; 2058 else 2059 pkey = is_ipv6 ? 2060 (void *)&args->f_id.src_ip6: 2061 (void *)&src_ip; 2062 keylen = is_ipv6 ? 2063 sizeof(struct in6_addr): 2064 sizeof(in_addr_t); 2065 break; 2066 case LOOKUP_DST_PORT: 2067 case LOOKUP_SRC_PORT: 2068 /* Need IP frame */ 2069 if (is_ipv6 == 0 && is_ipv4 == 0) 2070 break; 2071 /* Skip fragments */ 2072 if (offset != 0) 2073 break; 2074 /* Skip proto without ports */ 2075 if (proto != IPPROTO_TCP && 2076 proto != IPPROTO_UDP && 2077 proto != IPPROTO_UDPLITE && 2078 proto != IPPROTO_SCTP) 2079 break; 2080 key = vidx == LOOKUP_DST_PORT ? 2081 dst_port: 2082 src_port; 2083 pkey = &key; 2084 keylen = sizeof(key); 2085 break; 2086 case LOOKUP_UID: 2087 case LOOKUP_JAIL: 2088 check_uidgid( 2089 (ipfw_insn_u32 *)cmd, 2090 args, &ucred_lookup, 2091 &ucred_cache); 2092 key = vidx == LOOKUP_UID ? 2093 ucred_cache->cr_uid: 2094 ucred_cache->cr_prison->pr_id; 2095 pkey = &key; 2096 keylen = sizeof(key); 2097 break; 2098 case LOOKUP_DSCP: 2099 /* Need IP frame */ 2100 if (is_ipv6 == 0 && is_ipv4 == 0) 2101 break; 2102 if (is_ipv6) 2103 key = IPV6_DSCP( 2104 (struct ip6_hdr *)ip) >> 2; 2105 else 2106 key = ip->ip_tos >> 2; 2107 pkey = &key; 2108 keylen = sizeof(key); 2109 break; 2110 case LOOKUP_DST_MAC: 2111 case LOOKUP_SRC_MAC: 2112 /* Need ether frame */ 2113 if ((args->flags & IPFW_ARGS_ETHER) == 0) 2114 break; 2115 pkey = vidx == LOOKUP_DST_MAC ? 2116 eh->ether_dhost: 2117 eh->ether_shost; 2118 keylen = ETHER_ADDR_LEN; 2119 break; 2120 } 2121 if (keylen == 0) 2122 break; 2123 match = ipfw_lookup_table(chain, 2124 cmd->arg1, keylen, pkey, &vidx); 2125 if (!match) 2126 break; 2127 tablearg = vidx; 2128 break; 2129 } 2130 /* cmdlen =< F_INSN_SIZE(ipfw_insn_u32) */ 2131 /* FALLTHROUGH */ 2132 } 2133 case O_IP_SRC_LOOKUP: 2134 { 2135 void *pkey; 2136 uint32_t vidx; 2137 uint16_t keylen; 2138 2139 if (is_ipv4) { 2140 keylen = sizeof(in_addr_t); 2141 if (cmd->opcode == O_IP_DST_LOOKUP) 2142 pkey = &dst_ip; 2143 else 2144 pkey = &src_ip; 2145 } else if (is_ipv6) { 2146 keylen = sizeof(struct in6_addr); 2147 if (cmd->opcode == O_IP_DST_LOOKUP) 2148 pkey = &args->f_id.dst_ip6; 2149 else 2150 pkey = &args->f_id.src_ip6; 2151 } else 2152 break; 2153 match = ipfw_lookup_table(chain, cmd->arg1, 2154 keylen, pkey, &vidx); 2155 if (!match) 2156 break; 2157 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) { 2158 match = ((ipfw_insn_u32 *)cmd)->d[0] == 2159 TARG_VAL(chain, vidx, tag); 2160 if (!match) 2161 break; 2162 } 2163 tablearg = vidx; 2164 break; 2165 } 2166 2167 case O_MAC_SRC_LOOKUP: 2168 case O_MAC_DST_LOOKUP: 2169 { 2170 void *pkey; 2171 uint32_t vidx; 2172 uint16_t keylen = ETHER_ADDR_LEN; 2173 2174 /* Need ether frame */ 2175 if ((args->flags & IPFW_ARGS_ETHER) == 0) 2176 break; 2177 2178 if (cmd->opcode == O_MAC_DST_LOOKUP) 2179 pkey = eh->ether_dhost; 2180 else 2181 pkey = eh->ether_shost; 2182 2183 match = ipfw_lookup_table(chain, cmd->arg1, 2184 keylen, pkey, &vidx); 2185 if (!match) 2186 break; 2187 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) { 2188 match = ((ipfw_insn_u32 *)cmd)->d[0] == 2189 TARG_VAL(chain, vidx, tag); 2190 if (!match) 2191 break; 2192 } 2193 tablearg = vidx; 2194 break; 2195 } 2196 2197 case O_IP_FLOW_LOOKUP: 2198 { 2199 uint32_t v = 0; 2200 match = ipfw_lookup_table(chain, 2201 cmd->arg1, 0, &args->f_id, &v); 2202 if (!match) 2203 break; 2204 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) 2205 match = ((ipfw_insn_u32 *)cmd)->d[0] == 2206 TARG_VAL(chain, v, tag); 2207 if (match) 2208 tablearg = v; 2209 } 2210 break; 2211 case O_IP_SRC_MASK: 2212 case O_IP_DST_MASK: 2213 if (is_ipv4) { 2214 uint32_t a = 2215 (cmd->opcode == O_IP_DST_MASK) ? 2216 dst_ip.s_addr : src_ip.s_addr; 2217 uint32_t *p = ((ipfw_insn_u32 *)cmd)->d; 2218 int i = cmdlen-1; 2219 2220 for (; !match && i>0; i-= 2, p+= 2) 2221 match = (p[0] == (a & p[1])); 2222 } 2223 break; 2224 2225 case O_IP_SRC_ME: 2226 if (is_ipv4) { 2227 match = in_localip(src_ip); 2228 break; 2229 } 2230 #ifdef INET6 2231 /* FALLTHROUGH */ 2232 case O_IP6_SRC_ME: 2233 match = is_ipv6 && 2234 ipfw_localip6(&args->f_id.src_ip6); 2235 #endif 2236 break; 2237 2238 case O_IP_DST_SET: 2239 case O_IP_SRC_SET: 2240 if (is_ipv4) { 2241 u_int32_t *d = (u_int32_t *)(cmd+1); 2242 u_int32_t addr = 2243 cmd->opcode == O_IP_DST_SET ? 2244 args->f_id.dst_ip : 2245 args->f_id.src_ip; 2246 2247 if (addr < d[0]) 2248 break; 2249 addr -= d[0]; /* subtract base */ 2250 match = (addr < cmd->arg1) && 2251 ( d[ 1 + (addr>>5)] & 2252 (1<<(addr & 0x1f)) ); 2253 } 2254 break; 2255 2256 case O_IP_DST: 2257 match = is_ipv4 && 2258 (((ipfw_insn_ip *)cmd)->addr.s_addr == 2259 dst_ip.s_addr); 2260 break; 2261 2262 case O_IP_DST_ME: 2263 if (is_ipv4) { 2264 match = in_localip(dst_ip); 2265 break; 2266 } 2267 #ifdef INET6 2268 /* FALLTHROUGH */ 2269 case O_IP6_DST_ME: 2270 match = is_ipv6 && 2271 ipfw_localip6(&args->f_id.dst_ip6); 2272 #endif 2273 break; 2274 2275 case O_IP_SRCPORT: 2276 case O_IP_DSTPORT: 2277 /* 2278 * offset == 0 && proto != 0 is enough 2279 * to guarantee that we have a 2280 * packet with port info. 2281 */ 2282 if ((proto == IPPROTO_UDP || 2283 proto == IPPROTO_UDPLITE || 2284 proto == IPPROTO_TCP || 2285 proto == IPPROTO_SCTP) && offset == 0) { 2286 u_int16_t x = 2287 (cmd->opcode == O_IP_SRCPORT) ? 2288 src_port : dst_port ; 2289 u_int16_t *p = 2290 ((ipfw_insn_u16 *)cmd)->ports; 2291 int i; 2292 2293 for (i = cmdlen - 1; !match && i>0; 2294 i--, p += 2) 2295 match = (x>=p[0] && x<=p[1]); 2296 } 2297 break; 2298 2299 case O_ICMPTYPE: 2300 match = (offset == 0 && proto==IPPROTO_ICMP && 2301 icmptype_match(ICMP(ulp), (ipfw_insn_u32 *)cmd) ); 2302 break; 2303 2304 #ifdef INET6 2305 case O_ICMP6TYPE: 2306 match = is_ipv6 && offset == 0 && 2307 proto==IPPROTO_ICMPV6 && 2308 icmp6type_match( 2309 ICMP6(ulp)->icmp6_type, 2310 (ipfw_insn_u32 *)cmd); 2311 break; 2312 #endif /* INET6 */ 2313 2314 case O_IPOPT: 2315 match = (is_ipv4 && 2316 ipopts_match(ip, cmd) ); 2317 break; 2318 2319 case O_IPVER: 2320 match = ((is_ipv4 || is_ipv6) && 2321 cmd->arg1 == ip->ip_v); 2322 break; 2323 2324 case O_IPID: 2325 case O_IPTTL: 2326 if (!is_ipv4) 2327 break; 2328 case O_IPLEN: 2329 { /* only for IP packets */ 2330 uint16_t x; 2331 uint16_t *p; 2332 int i; 2333 2334 if (cmd->opcode == O_IPLEN) 2335 x = iplen; 2336 else if (cmd->opcode == O_IPTTL) 2337 x = ip->ip_ttl; 2338 else /* must be IPID */ 2339 x = ntohs(ip->ip_id); 2340 if (cmdlen == 1) { 2341 match = (cmd->arg1 == x); 2342 break; 2343 } 2344 /* otherwise we have ranges */ 2345 p = ((ipfw_insn_u16 *)cmd)->ports; 2346 i = cmdlen - 1; 2347 for (; !match && i>0; i--, p += 2) 2348 match = (x >= p[0] && x <= p[1]); 2349 } 2350 break; 2351 2352 case O_IPPRECEDENCE: 2353 match = (is_ipv4 && 2354 (cmd->arg1 == (ip->ip_tos & 0xe0)) ); 2355 break; 2356 2357 case O_IPTOS: 2358 match = (is_ipv4 && 2359 flags_match(cmd, ip->ip_tos)); 2360 break; 2361 2362 case O_DSCP: 2363 { 2364 uint32_t *p; 2365 uint16_t x; 2366 2367 p = ((ipfw_insn_u32 *)cmd)->d; 2368 2369 if (is_ipv4) 2370 x = ip->ip_tos >> 2; 2371 else if (is_ipv6) { 2372 x = IPV6_DSCP( 2373 (struct ip6_hdr *)ip) >> 2; 2374 x &= 0x3f; 2375 } else 2376 break; 2377 2378 /* DSCP bitmask is stored as low_u32 high_u32 */ 2379 if (x >= 32) 2380 match = *(p + 1) & (1 << (x - 32)); 2381 else 2382 match = *p & (1 << x); 2383 } 2384 break; 2385 2386 case O_TCPDATALEN: 2387 if (proto == IPPROTO_TCP && offset == 0) { 2388 struct tcphdr *tcp; 2389 uint16_t x; 2390 uint16_t *p; 2391 int i; 2392 #ifdef INET6 2393 if (is_ipv6) { 2394 struct ip6_hdr *ip6; 2395 2396 ip6 = (struct ip6_hdr *)ip; 2397 if (ip6->ip6_plen == 0) { 2398 /* 2399 * Jumbo payload is not 2400 * supported by this 2401 * opcode. 2402 */ 2403 break; 2404 } 2405 x = iplen - hlen; 2406 } else 2407 #endif /* INET6 */ 2408 x = iplen - (ip->ip_hl << 2); 2409 tcp = TCP(ulp); 2410 x -= tcp->th_off << 2; 2411 if (cmdlen == 1) { 2412 match = (cmd->arg1 == x); 2413 break; 2414 } 2415 /* otherwise we have ranges */ 2416 p = ((ipfw_insn_u16 *)cmd)->ports; 2417 i = cmdlen - 1; 2418 for (; !match && i>0; i--, p += 2) 2419 match = (x >= p[0] && x <= p[1]); 2420 } 2421 break; 2422 2423 case O_TCPFLAGS: 2424 match = (proto == IPPROTO_TCP && offset == 0 && 2425 flags_match(cmd, TCP(ulp)->th_flags)); 2426 break; 2427 2428 case O_TCPOPTS: 2429 if (proto == IPPROTO_TCP && offset == 0 && ulp){ 2430 PULLUP_LEN_LOCKED(hlen, ulp, 2431 (TCP(ulp)->th_off << 2)); 2432 match = tcpopts_match(TCP(ulp), cmd); 2433 } 2434 break; 2435 2436 case O_TCPSEQ: 2437 match = (proto == IPPROTO_TCP && offset == 0 && 2438 ((ipfw_insn_u32 *)cmd)->d[0] == 2439 TCP(ulp)->th_seq); 2440 break; 2441 2442 case O_TCPACK: 2443 match = (proto == IPPROTO_TCP && offset == 0 && 2444 ((ipfw_insn_u32 *)cmd)->d[0] == 2445 TCP(ulp)->th_ack); 2446 break; 2447 2448 case O_TCPMSS: 2449 if (proto == IPPROTO_TCP && 2450 (args->f_id._flags & TH_SYN) != 0 && 2451 ulp != NULL) { 2452 uint16_t mss, *p; 2453 int i; 2454 2455 PULLUP_LEN_LOCKED(hlen, ulp, 2456 (TCP(ulp)->th_off << 2)); 2457 if ((tcpopts_parse(TCP(ulp), &mss) & 2458 IP_FW_TCPOPT_MSS) == 0) 2459 break; 2460 if (cmdlen == 1) { 2461 match = (cmd->arg1 == mss); 2462 break; 2463 } 2464 /* Otherwise we have ranges. */ 2465 p = ((ipfw_insn_u16 *)cmd)->ports; 2466 i = cmdlen - 1; 2467 for (; !match && i > 0; i--, p += 2) 2468 match = (mss >= p[0] && 2469 mss <= p[1]); 2470 } 2471 break; 2472 2473 case O_TCPWIN: 2474 if (proto == IPPROTO_TCP && offset == 0) { 2475 uint16_t x; 2476 uint16_t *p; 2477 int i; 2478 2479 x = ntohs(TCP(ulp)->th_win); 2480 if (cmdlen == 1) { 2481 match = (cmd->arg1 == x); 2482 break; 2483 } 2484 /* Otherwise we have ranges. */ 2485 p = ((ipfw_insn_u16 *)cmd)->ports; 2486 i = cmdlen - 1; 2487 for (; !match && i > 0; i--, p += 2) 2488 match = (x >= p[0] && x <= p[1]); 2489 } 2490 break; 2491 2492 case O_ESTAB: 2493 /* reject packets which have SYN only */ 2494 /* XXX should i also check for TH_ACK ? */ 2495 match = (proto == IPPROTO_TCP && offset == 0 && 2496 (TCP(ulp)->th_flags & 2497 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN); 2498 break; 2499 2500 case O_ALTQ: { 2501 struct pf_mtag *at; 2502 struct m_tag *mtag; 2503 ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd; 2504 2505 /* 2506 * ALTQ uses mbuf tags from another 2507 * packet filtering system - pf(4). 2508 * We allocate a tag in its format 2509 * and fill it in, pretending to be pf(4). 2510 */ 2511 match = 1; 2512 at = pf_find_mtag(m); 2513 if (at != NULL && at->qid != 0) 2514 break; 2515 mtag = m_tag_get(PACKET_TAG_PF, 2516 sizeof(struct pf_mtag), M_NOWAIT | M_ZERO); 2517 if (mtag == NULL) { 2518 /* 2519 * Let the packet fall back to the 2520 * default ALTQ. 2521 */ 2522 break; 2523 } 2524 m_tag_prepend(m, mtag); 2525 at = (struct pf_mtag *)(mtag + 1); 2526 at->qid = altq->qid; 2527 at->hdr = ip; 2528 break; 2529 } 2530 2531 case O_LOG: 2532 ipfw_log(chain, f, hlen, args, 2533 offset | ip6f_mf, tablearg, ip); 2534 match = 1; 2535 break; 2536 2537 case O_PROB: 2538 match = (random()<((ipfw_insn_u32 *)cmd)->d[0]); 2539 break; 2540 2541 case O_VERREVPATH: 2542 /* Outgoing packets automatically pass/match */ 2543 match = (args->flags & IPFW_ARGS_OUT || 2544 ( 2545 #ifdef INET6 2546 is_ipv6 ? 2547 verify_path6(&(args->f_id.src_ip6), 2548 iif, args->f_id.fib) : 2549 #endif 2550 verify_path(src_ip, iif, args->f_id.fib))); 2551 break; 2552 2553 case O_VERSRCREACH: 2554 /* Outgoing packets automatically pass/match */ 2555 match = (hlen > 0 && ((oif != NULL) || ( 2556 #ifdef INET6 2557 is_ipv6 ? 2558 verify_path6(&(args->f_id.src_ip6), 2559 NULL, args->f_id.fib) : 2560 #endif 2561 verify_path(src_ip, NULL, args->f_id.fib)))); 2562 break; 2563 2564 case O_ANTISPOOF: 2565 /* Outgoing packets automatically pass/match */ 2566 if (oif == NULL && hlen > 0 && 2567 ( (is_ipv4 && in_localaddr(src_ip)) 2568 #ifdef INET6 2569 || (is_ipv6 && 2570 in6_localaddr(&(args->f_id.src_ip6))) 2571 #endif 2572 )) 2573 match = 2574 #ifdef INET6 2575 is_ipv6 ? verify_path6( 2576 &(args->f_id.src_ip6), iif, 2577 args->f_id.fib) : 2578 #endif 2579 verify_path(src_ip, iif, 2580 args->f_id.fib); 2581 else 2582 match = 1; 2583 break; 2584 2585 case O_IPSEC: 2586 match = (m_tag_find(m, 2587 PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL); 2588 /* otherwise no match */ 2589 break; 2590 2591 #ifdef INET6 2592 case O_IP6_SRC: 2593 match = is_ipv6 && 2594 IN6_ARE_ADDR_EQUAL(&args->f_id.src_ip6, 2595 &((ipfw_insn_ip6 *)cmd)->addr6); 2596 break; 2597 2598 case O_IP6_DST: 2599 match = is_ipv6 && 2600 IN6_ARE_ADDR_EQUAL(&args->f_id.dst_ip6, 2601 &((ipfw_insn_ip6 *)cmd)->addr6); 2602 break; 2603 case O_IP6_SRC_MASK: 2604 case O_IP6_DST_MASK: 2605 if (is_ipv6) { 2606 int i = cmdlen - 1; 2607 struct in6_addr p; 2608 struct in6_addr *d = 2609 &((ipfw_insn_ip6 *)cmd)->addr6; 2610 2611 for (; !match && i > 0; d += 2, 2612 i -= F_INSN_SIZE(struct in6_addr) 2613 * 2) { 2614 p = (cmd->opcode == 2615 O_IP6_SRC_MASK) ? 2616 args->f_id.src_ip6: 2617 args->f_id.dst_ip6; 2618 APPLY_MASK(&p, &d[1]); 2619 match = 2620 IN6_ARE_ADDR_EQUAL(&d[0], 2621 &p); 2622 } 2623 } 2624 break; 2625 2626 case O_FLOW6ID: 2627 match = is_ipv6 && 2628 flow6id_match(args->f_id.flow_id6, 2629 (ipfw_insn_u32 *) cmd); 2630 break; 2631 2632 case O_EXT_HDR: 2633 match = is_ipv6 && 2634 (ext_hd & ((ipfw_insn *) cmd)->arg1); 2635 break; 2636 2637 case O_IP6: 2638 match = is_ipv6; 2639 break; 2640 #endif 2641 2642 case O_IP4: 2643 match = is_ipv4; 2644 break; 2645 2646 case O_TAG: { 2647 struct m_tag *mtag; 2648 uint32_t tag = TARG(cmd->arg1, tag); 2649 2650 /* Packet is already tagged with this tag? */ 2651 mtag = m_tag_locate(m, MTAG_IPFW, tag, NULL); 2652 2653 /* We have `untag' action when F_NOT flag is 2654 * present. And we must remove this mtag from 2655 * mbuf and reset `match' to zero (`match' will 2656 * be inversed later). 2657 * Otherwise we should allocate new mtag and 2658 * push it into mbuf. 2659 */ 2660 if (cmd->len & F_NOT) { /* `untag' action */ 2661 if (mtag != NULL) 2662 m_tag_delete(m, mtag); 2663 match = 0; 2664 } else { 2665 if (mtag == NULL) { 2666 mtag = m_tag_alloc( MTAG_IPFW, 2667 tag, 0, M_NOWAIT); 2668 if (mtag != NULL) 2669 m_tag_prepend(m, mtag); 2670 } 2671 match = 1; 2672 } 2673 break; 2674 } 2675 2676 case O_FIB: /* try match the specified fib */ 2677 if (args->f_id.fib == cmd->arg1) 2678 match = 1; 2679 break; 2680 2681 case O_SOCKARG: { 2682 #ifndef USERSPACE /* not supported in userspace */ 2683 struct inpcb *inp = args->inp; 2684 struct inpcbinfo *pi; 2685 bool inp_locked = false; 2686 2687 if (proto == IPPROTO_TCP) 2688 pi = &V_tcbinfo; 2689 else if (proto == IPPROTO_UDP) 2690 pi = &V_udbinfo; 2691 else if (proto == IPPROTO_UDPLITE) 2692 pi = &V_ulitecbinfo; 2693 else 2694 break; 2695 2696 /* 2697 * XXXRW: so_user_cookie should almost 2698 * certainly be inp_user_cookie? 2699 */ 2700 2701 /* 2702 * For incoming packet lookup the inpcb 2703 * using the src/dest ip/port tuple. 2704 */ 2705 if (is_ipv4 && inp == NULL) { 2706 inp = in_pcblookup(pi, 2707 src_ip, htons(src_port), 2708 dst_ip, htons(dst_port), 2709 INPLOOKUP_RLOCKPCB, NULL); 2710 inp_locked = true; 2711 } 2712 #ifdef INET6 2713 if (is_ipv6 && inp == NULL) { 2714 inp = in6_pcblookup(pi, 2715 &args->f_id.src_ip6, 2716 htons(src_port), 2717 &args->f_id.dst_ip6, 2718 htons(dst_port), 2719 INPLOOKUP_RLOCKPCB, NULL); 2720 inp_locked = true; 2721 } 2722 #endif /* INET6 */ 2723 if (inp != NULL) { 2724 if (inp->inp_socket) { 2725 tablearg = 2726 inp->inp_socket->so_user_cookie; 2727 if (tablearg) 2728 match = 1; 2729 } 2730 if (inp_locked) 2731 INP_RUNLOCK(inp); 2732 } 2733 #endif /* !USERSPACE */ 2734 break; 2735 } 2736 2737 case O_TAGGED: { 2738 struct m_tag *mtag; 2739 uint32_t tag = TARG(cmd->arg1, tag); 2740 2741 if (cmdlen == 1) { 2742 match = m_tag_locate(m, MTAG_IPFW, 2743 tag, NULL) != NULL; 2744 break; 2745 } 2746 2747 /* we have ranges */ 2748 for (mtag = m_tag_first(m); 2749 mtag != NULL && !match; 2750 mtag = m_tag_next(m, mtag)) { 2751 uint16_t *p; 2752 int i; 2753 2754 if (mtag->m_tag_cookie != MTAG_IPFW) 2755 continue; 2756 2757 p = ((ipfw_insn_u16 *)cmd)->ports; 2758 i = cmdlen - 1; 2759 for(; !match && i > 0; i--, p += 2) 2760 match = 2761 mtag->m_tag_id >= p[0] && 2762 mtag->m_tag_id <= p[1]; 2763 } 2764 break; 2765 } 2766 2767 /* 2768 * The second set of opcodes represents 'actions', 2769 * i.e. the terminal part of a rule once the packet 2770 * matches all previous patterns. 2771 * Typically there is only one action for each rule, 2772 * and the opcode is stored at the end of the rule 2773 * (but there are exceptions -- see below). 2774 * 2775 * In general, here we set retval and terminate the 2776 * outer loop (would be a 'break 3' in some language, 2777 * but we need to set l=0, done=1) 2778 * 2779 * Exceptions: 2780 * O_COUNT and O_SKIPTO actions: 2781 * instead of terminating, we jump to the next rule 2782 * (setting l=0), or to the SKIPTO target (setting 2783 * f/f_len, cmd and l as needed), respectively. 2784 * 2785 * O_TAG, O_LOG and O_ALTQ action parameters: 2786 * perform some action and set match = 1; 2787 * 2788 * O_LIMIT and O_KEEP_STATE: these opcodes are 2789 * not real 'actions', and are stored right 2790 * before the 'action' part of the rule (one 2791 * exception is O_SKIP_ACTION which could be 2792 * between these opcodes and 'action' one). 2793 * These opcodes try to install an entry in the 2794 * state tables; if successful, we continue with 2795 * the next opcode (match=1; break;), otherwise 2796 * the packet must be dropped (set retval, 2797 * break loops with l=0, done=1) 2798 * 2799 * O_PROBE_STATE and O_CHECK_STATE: these opcodes 2800 * cause a lookup of the state table, and a jump 2801 * to the 'action' part of the parent rule 2802 * if an entry is found, or 2803 * (CHECK_STATE only) a jump to the next rule if 2804 * the entry is not found. 2805 * The result of the lookup is cached so that 2806 * further instances of these opcodes become NOPs. 2807 * The jump to the next rule is done by setting 2808 * l=0, cmdlen=0. 2809 * 2810 * O_SKIP_ACTION: this opcode is not a real 'action' 2811 * either, and is stored right before the 'action' 2812 * part of the rule, right after the O_KEEP_STATE 2813 * opcode. It causes match failure so the real 2814 * 'action' could be executed only if the rule 2815 * is checked via dynamic rule from the state 2816 * table, as in such case execution starts 2817 * from the true 'action' opcode directly. 2818 * 2819 */ 2820 case O_LIMIT: 2821 case O_KEEP_STATE: 2822 if (ipfw_dyn_install_state(chain, f, 2823 (ipfw_insn_limit *)cmd, args, ulp, 2824 pktlen, &dyn_info, tablearg)) { 2825 /* error or limit violation */ 2826 retval = IP_FW_DENY; 2827 l = 0; /* exit inner loop */ 2828 done = 1; /* exit outer loop */ 2829 } 2830 match = 1; 2831 break; 2832 2833 case O_PROBE_STATE: 2834 case O_CHECK_STATE: 2835 /* 2836 * dynamic rules are checked at the first 2837 * keep-state or check-state occurrence, 2838 * with the result being stored in dyn_info. 2839 * The compiler introduces a PROBE_STATE 2840 * instruction for us when we have a 2841 * KEEP_STATE (because PROBE_STATE needs 2842 * to be run first). 2843 */ 2844 if (DYN_LOOKUP_NEEDED(&dyn_info, cmd) && 2845 (q = ipfw_dyn_lookup_state(args, ulp, 2846 pktlen, cmd, &dyn_info)) != NULL) { 2847 /* 2848 * Found dynamic entry, jump to the 2849 * 'action' part of the parent rule 2850 * by setting f, cmd, l and clearing 2851 * cmdlen. 2852 */ 2853 f = q; 2854 f_pos = dyn_info.f_pos; 2855 cmd = ACTION_PTR(f); 2856 l = f->cmd_len - f->act_ofs; 2857 cmdlen = 0; 2858 match = 1; 2859 break; 2860 } 2861 /* 2862 * Dynamic entry not found. If CHECK_STATE, 2863 * skip to next rule, if PROBE_STATE just 2864 * ignore and continue with next opcode. 2865 */ 2866 if (cmd->opcode == O_CHECK_STATE) 2867 l = 0; /* exit inner loop */ 2868 match = 1; 2869 break; 2870 2871 case O_SKIP_ACTION: 2872 match = 0; /* skip to the next rule */ 2873 l = 0; /* exit inner loop */ 2874 break; 2875 2876 case O_ACCEPT: 2877 retval = 0; /* accept */ 2878 l = 0; /* exit inner loop */ 2879 done = 1; /* exit outer loop */ 2880 break; 2881 2882 case O_PIPE: 2883 case O_QUEUE: 2884 set_match(args, f_pos, chain); 2885 args->rule.info = TARG(cmd->arg1, pipe); 2886 if (cmd->opcode == O_PIPE) 2887 args->rule.info |= IPFW_IS_PIPE; 2888 if (V_fw_one_pass) 2889 args->rule.info |= IPFW_ONEPASS; 2890 retval = IP_FW_DUMMYNET; 2891 l = 0; /* exit inner loop */ 2892 done = 1; /* exit outer loop */ 2893 break; 2894 2895 case O_DIVERT: 2896 case O_TEE: 2897 if (args->flags & IPFW_ARGS_ETHER) 2898 break; /* not on layer 2 */ 2899 /* otherwise this is terminal */ 2900 l = 0; /* exit inner loop */ 2901 done = 1; /* exit outer loop */ 2902 retval = (cmd->opcode == O_DIVERT) ? 2903 IP_FW_DIVERT : IP_FW_TEE; 2904 set_match(args, f_pos, chain); 2905 args->rule.info = TARG(cmd->arg1, divert); 2906 break; 2907 2908 case O_COUNT: 2909 IPFW_INC_RULE_COUNTER(f, pktlen); 2910 l = 0; /* exit inner loop */ 2911 break; 2912 2913 case O_SKIPTO: 2914 IPFW_INC_RULE_COUNTER(f, pktlen); 2915 f_pos = JUMP(chain, f, cmd->arg1, tablearg, 0); 2916 /* 2917 * Skip disabled rules, and re-enter 2918 * the inner loop with the correct 2919 * f_pos, f, l and cmd. 2920 * Also clear cmdlen and skip_or 2921 */ 2922 for (; f_pos < chain->n_rules - 1 && 2923 (V_set_disable & 2924 (1 << chain->map[f_pos]->set)); 2925 f_pos++) 2926 ; 2927 /* Re-enter the inner loop at the skipto rule. */ 2928 f = chain->map[f_pos]; 2929 l = f->cmd_len; 2930 cmd = f->cmd; 2931 match = 1; 2932 cmdlen = 0; 2933 skip_or = 0; 2934 continue; 2935 break; /* not reached */ 2936 2937 case O_CALLRETURN: { 2938 /* 2939 * Implementation of `subroutine' call/return, 2940 * in the stack carried in an mbuf tag. This 2941 * is different from `skipto' in that any call 2942 * address is possible (`skipto' must prevent 2943 * backward jumps to avoid endless loops). 2944 * We have `return' action when F_NOT flag is 2945 * present. The `m_tag_id' field is used as 2946 * stack pointer. 2947 */ 2948 struct m_tag *mtag; 2949 uint16_t jmpto, *stack; 2950 2951 #define IS_CALL ((cmd->len & F_NOT) == 0) 2952 #define IS_RETURN ((cmd->len & F_NOT) != 0) 2953 /* 2954 * Hand-rolled version of m_tag_locate() with 2955 * wildcard `type'. 2956 * If not already tagged, allocate new tag. 2957 */ 2958 mtag = m_tag_first(m); 2959 while (mtag != NULL) { 2960 if (mtag->m_tag_cookie == 2961 MTAG_IPFW_CALL) 2962 break; 2963 mtag = m_tag_next(m, mtag); 2964 } 2965 if (mtag == NULL && IS_CALL) { 2966 mtag = m_tag_alloc(MTAG_IPFW_CALL, 0, 2967 IPFW_CALLSTACK_SIZE * 2968 sizeof(uint16_t), M_NOWAIT); 2969 if (mtag != NULL) 2970 m_tag_prepend(m, mtag); 2971 } 2972 2973 /* 2974 * On error both `call' and `return' just 2975 * continue with next rule. 2976 */ 2977 if (IS_RETURN && (mtag == NULL || 2978 mtag->m_tag_id == 0)) { 2979 l = 0; /* exit inner loop */ 2980 break; 2981 } 2982 if (IS_CALL && (mtag == NULL || 2983 mtag->m_tag_id >= IPFW_CALLSTACK_SIZE)) { 2984 printf("ipfw: call stack error, " 2985 "go to next rule\n"); 2986 l = 0; /* exit inner loop */ 2987 break; 2988 } 2989 2990 IPFW_INC_RULE_COUNTER(f, pktlen); 2991 stack = (uint16_t *)(mtag + 1); 2992 2993 /* 2994 * The `call' action may use cached f_pos 2995 * (in f->next_rule), whose version is written 2996 * in f->next_rule. 2997 * The `return' action, however, doesn't have 2998 * fixed jump address in cmd->arg1 and can't use 2999 * cache. 3000 */ 3001 if (IS_CALL) { 3002 stack[mtag->m_tag_id] = f->rulenum; 3003 mtag->m_tag_id++; 3004 f_pos = JUMP(chain, f, cmd->arg1, 3005 tablearg, 1); 3006 } else { /* `return' action */ 3007 mtag->m_tag_id--; 3008 jmpto = stack[mtag->m_tag_id] + 1; 3009 f_pos = ipfw_find_rule(chain, jmpto, 0); 3010 } 3011 3012 /* 3013 * Skip disabled rules, and re-enter 3014 * the inner loop with the correct 3015 * f_pos, f, l and cmd. 3016 * Also clear cmdlen and skip_or 3017 */ 3018 for (; f_pos < chain->n_rules - 1 && 3019 (V_set_disable & 3020 (1 << chain->map[f_pos]->set)); f_pos++) 3021 ; 3022 /* Re-enter the inner loop at the dest rule. */ 3023 f = chain->map[f_pos]; 3024 l = f->cmd_len; 3025 cmd = f->cmd; 3026 cmdlen = 0; 3027 skip_or = 0; 3028 continue; 3029 break; /* NOTREACHED */ 3030 } 3031 #undef IS_CALL 3032 #undef IS_RETURN 3033 3034 case O_REJECT: 3035 /* 3036 * Drop the packet and send a reject notice 3037 * if the packet is not ICMP (or is an ICMP 3038 * query), and it is not multicast/broadcast. 3039 */ 3040 if (hlen > 0 && is_ipv4 && offset == 0 && 3041 (proto != IPPROTO_ICMP || 3042 is_icmp_query(ICMP(ulp))) && 3043 !(m->m_flags & (M_BCAST|M_MCAST)) && 3044 !IN_MULTICAST(ntohl(dst_ip.s_addr))) { 3045 send_reject(args, cmd->arg1, iplen, ip); 3046 m = args->m; 3047 } 3048 /* FALLTHROUGH */ 3049 #ifdef INET6 3050 case O_UNREACH6: 3051 if (hlen > 0 && is_ipv6 && 3052 ((offset & IP6F_OFF_MASK) == 0) && 3053 (proto != IPPROTO_ICMPV6 || 3054 (is_icmp6_query(icmp6_type) == 1)) && 3055 !(m->m_flags & (M_BCAST|M_MCAST)) && 3056 !IN6_IS_ADDR_MULTICAST( 3057 &args->f_id.dst_ip6)) { 3058 send_reject6(args, 3059 cmd->opcode == O_REJECT ? 3060 map_icmp_unreach(cmd->arg1): 3061 cmd->arg1, hlen, 3062 (struct ip6_hdr *)ip); 3063 m = args->m; 3064 } 3065 /* FALLTHROUGH */ 3066 #endif 3067 case O_DENY: 3068 retval = IP_FW_DENY; 3069 l = 0; /* exit inner loop */ 3070 done = 1; /* exit outer loop */ 3071 break; 3072 3073 case O_FORWARD_IP: 3074 if (args->flags & IPFW_ARGS_ETHER) 3075 break; /* not valid on layer2 pkts */ 3076 if (q != f || 3077 dyn_info.direction == MATCH_FORWARD) { 3078 struct sockaddr_in *sa; 3079 3080 sa = &(((ipfw_insn_sa *)cmd)->sa); 3081 if (sa->sin_addr.s_addr == INADDR_ANY) { 3082 #ifdef INET6 3083 /* 3084 * We use O_FORWARD_IP opcode for 3085 * fwd rule with tablearg, but tables 3086 * now support IPv6 addresses. And 3087 * when we are inspecting IPv6 packet, 3088 * we can use nh6 field from 3089 * table_value as next_hop6 address. 3090 */ 3091 if (is_ipv6) { 3092 struct ip_fw_nh6 *nh6; 3093 3094 args->flags |= IPFW_ARGS_NH6; 3095 nh6 = &args->hopstore6; 3096 nh6->sin6_addr = TARG_VAL( 3097 chain, tablearg, nh6); 3098 nh6->sin6_port = sa->sin_port; 3099 nh6->sin6_scope_id = TARG_VAL( 3100 chain, tablearg, zoneid); 3101 } else 3102 #endif 3103 { 3104 args->flags |= IPFW_ARGS_NH4; 3105 args->hopstore.sin_port = 3106 sa->sin_port; 3107 sa = &args->hopstore; 3108 sa->sin_family = AF_INET; 3109 sa->sin_len = sizeof(*sa); 3110 sa->sin_addr.s_addr = htonl( 3111 TARG_VAL(chain, tablearg, 3112 nh4)); 3113 } 3114 } else { 3115 args->flags |= IPFW_ARGS_NH4PTR; 3116 args->next_hop = sa; 3117 } 3118 } 3119 retval = IP_FW_PASS; 3120 l = 0; /* exit inner loop */ 3121 done = 1; /* exit outer loop */ 3122 break; 3123 3124 #ifdef INET6 3125 case O_FORWARD_IP6: 3126 if (args->flags & IPFW_ARGS_ETHER) 3127 break; /* not valid on layer2 pkts */ 3128 if (q != f || 3129 dyn_info.direction == MATCH_FORWARD) { 3130 struct sockaddr_in6 *sin6; 3131 3132 sin6 = &(((ipfw_insn_sa6 *)cmd)->sa); 3133 args->flags |= IPFW_ARGS_NH6PTR; 3134 args->next_hop6 = sin6; 3135 } 3136 retval = IP_FW_PASS; 3137 l = 0; /* exit inner loop */ 3138 done = 1; /* exit outer loop */ 3139 break; 3140 #endif 3141 3142 case O_NETGRAPH: 3143 case O_NGTEE: 3144 set_match(args, f_pos, chain); 3145 args->rule.info = TARG(cmd->arg1, netgraph); 3146 if (V_fw_one_pass) 3147 args->rule.info |= IPFW_ONEPASS; 3148 retval = (cmd->opcode == O_NETGRAPH) ? 3149 IP_FW_NETGRAPH : IP_FW_NGTEE; 3150 l = 0; /* exit inner loop */ 3151 done = 1; /* exit outer loop */ 3152 break; 3153 3154 case O_SETFIB: { 3155 uint32_t fib; 3156 3157 IPFW_INC_RULE_COUNTER(f, pktlen); 3158 fib = TARG(cmd->arg1, fib) & 0x7FFF; 3159 if (fib >= rt_numfibs) 3160 fib = 0; 3161 M_SETFIB(m, fib); 3162 args->f_id.fib = fib; /* XXX */ 3163 l = 0; /* exit inner loop */ 3164 break; 3165 } 3166 3167 case O_SETDSCP: { 3168 uint16_t code; 3169 3170 code = TARG(cmd->arg1, dscp) & 0x3F; 3171 l = 0; /* exit inner loop */ 3172 if (is_ipv4) { 3173 uint16_t old; 3174 3175 old = *(uint16_t *)ip; 3176 ip->ip_tos = (code << 2) | 3177 (ip->ip_tos & 0x03); 3178 ip->ip_sum = cksum_adjust(ip->ip_sum, 3179 old, *(uint16_t *)ip); 3180 } else if (is_ipv6) { 3181 /* update cached value */ 3182 args->f_id.flow_id6 = 3183 ntohl(*(uint32_t *)ip) & ~0x0FC00000; 3184 args->f_id.flow_id6 |= code << 22; 3185 3186 *((uint32_t *)ip) = 3187 htonl(args->f_id.flow_id6); 3188 } else 3189 break; 3190 3191 IPFW_INC_RULE_COUNTER(f, pktlen); 3192 break; 3193 } 3194 3195 case O_NAT: 3196 l = 0; /* exit inner loop */ 3197 done = 1; /* exit outer loop */ 3198 /* 3199 * Ensure that we do not invoke NAT handler for 3200 * non IPv4 packets. Libalias expects only IPv4. 3201 */ 3202 if (!is_ipv4 || !IPFW_NAT_LOADED) { 3203 retval = IP_FW_DENY; 3204 break; 3205 } 3206 3207 struct cfg_nat *t; 3208 int nat_id; 3209 3210 args->rule.info = 0; 3211 set_match(args, f_pos, chain); 3212 /* Check if this is 'global' nat rule */ 3213 if (cmd->arg1 == IP_FW_NAT44_GLOBAL) { 3214 retval = ipfw_nat_ptr(args, NULL, m); 3215 break; 3216 } 3217 t = ((ipfw_insn_nat *)cmd)->nat; 3218 if (t == NULL) { 3219 nat_id = TARG(cmd->arg1, nat); 3220 t = (*lookup_nat_ptr)(&chain->nat, nat_id); 3221 3222 if (t == NULL) { 3223 retval = IP_FW_DENY; 3224 break; 3225 } 3226 if (cmd->arg1 != IP_FW_TARG) 3227 ((ipfw_insn_nat *)cmd)->nat = t; 3228 } 3229 retval = ipfw_nat_ptr(args, t, m); 3230 break; 3231 3232 case O_REASS: { 3233 int ip_off; 3234 3235 l = 0; /* in any case exit inner loop */ 3236 if (is_ipv6) /* IPv6 is not supported yet */ 3237 break; 3238 IPFW_INC_RULE_COUNTER(f, pktlen); 3239 ip_off = ntohs(ip->ip_off); 3240 3241 /* if not fragmented, go to next rule */ 3242 if ((ip_off & (IP_MF | IP_OFFMASK)) == 0) 3243 break; 3244 3245 args->m = m = ip_reass(m); 3246 3247 /* 3248 * do IP header checksum fixup. 3249 */ 3250 if (m == NULL) { /* fragment got swallowed */ 3251 retval = IP_FW_DENY; 3252 } else { /* good, packet complete */ 3253 int hlen; 3254 3255 ip = mtod(m, struct ip *); 3256 hlen = ip->ip_hl << 2; 3257 ip->ip_sum = 0; 3258 if (hlen == sizeof(struct ip)) 3259 ip->ip_sum = in_cksum_hdr(ip); 3260 else 3261 ip->ip_sum = in_cksum(m, hlen); 3262 retval = IP_FW_REASS; 3263 args->rule.info = 0; 3264 set_match(args, f_pos, chain); 3265 } 3266 done = 1; /* exit outer loop */ 3267 break; 3268 } 3269 case O_EXTERNAL_ACTION: 3270 l = 0; /* in any case exit inner loop */ 3271 retval = ipfw_run_eaction(chain, args, 3272 cmd, &done); 3273 /* 3274 * If both @retval and @done are zero, 3275 * consider this as rule matching and 3276 * update counters. 3277 */ 3278 if (retval == 0 && done == 0) { 3279 IPFW_INC_RULE_COUNTER(f, pktlen); 3280 /* 3281 * Reset the result of the last 3282 * dynamic state lookup. 3283 * External action can change 3284 * @args content, and it may be 3285 * used for new state lookup later. 3286 */ 3287 DYN_INFO_INIT(&dyn_info); 3288 } 3289 break; 3290 3291 default: 3292 panic("-- unknown opcode %d\n", cmd->opcode); 3293 } /* end of switch() on opcodes */ 3294 /* 3295 * if we get here with l=0, then match is irrelevant. 3296 */ 3297 3298 if (cmd->len & F_NOT) 3299 match = !match; 3300 3301 if (match) { 3302 if (cmd->len & F_OR) 3303 skip_or = 1; 3304 } else { 3305 if (!(cmd->len & F_OR)) /* not an OR block, */ 3306 break; /* try next rule */ 3307 } 3308 3309 } /* end of inner loop, scan opcodes */ 3310 #undef PULLUP_LEN 3311 #undef PULLUP_LEN_LOCKED 3312 3313 if (done) 3314 break; 3315 3316 /* next_rule:; */ /* try next rule */ 3317 3318 } /* end of outer for, scan rules */ 3319 3320 if (done) { 3321 struct ip_fw *rule = chain->map[f_pos]; 3322 /* Update statistics */ 3323 IPFW_INC_RULE_COUNTER(rule, pktlen); 3324 IPFW_PROBE(rule__matched, retval, 3325 is_ipv4 ? AF_INET : AF_INET6, 3326 is_ipv4 ? (uintptr_t)&src_ip : 3327 (uintptr_t)&args->f_id.src_ip6, 3328 is_ipv4 ? (uintptr_t)&dst_ip : 3329 (uintptr_t)&args->f_id.dst_ip6, 3330 args, rule); 3331 } else { 3332 retval = IP_FW_DENY; 3333 printf("ipfw: ouch!, skip past end of rules, denying packet\n"); 3334 } 3335 IPFW_PF_RUNLOCK(chain); 3336 #ifdef __FreeBSD__ 3337 if (ucred_cache != NULL) 3338 crfree(ucred_cache); 3339 #endif 3340 return (retval); 3341 3342 pullup_failed: 3343 if (V_fw_verbose) 3344 printf("ipfw: pullup failed\n"); 3345 return (IP_FW_DENY); 3346 } 3347 3348 /* 3349 * Set maximum number of tables that can be used in given VNET ipfw instance. 3350 */ 3351 #ifdef SYSCTL_NODE 3352 static int 3353 sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS) 3354 { 3355 int error; 3356 unsigned int ntables; 3357 3358 ntables = V_fw_tables_max; 3359 3360 error = sysctl_handle_int(oidp, &ntables, 0, req); 3361 /* Read operation or some error */ 3362 if ((error != 0) || (req->newptr == NULL)) 3363 return (error); 3364 3365 return (ipfw_resize_tables(&V_layer3_chain, ntables)); 3366 } 3367 3368 /* 3369 * Switches table namespace between global and per-set. 3370 */ 3371 static int 3372 sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS) 3373 { 3374 int error; 3375 unsigned int sets; 3376 3377 sets = V_fw_tables_sets; 3378 3379 error = sysctl_handle_int(oidp, &sets, 0, req); 3380 /* Read operation or some error */ 3381 if ((error != 0) || (req->newptr == NULL)) 3382 return (error); 3383 3384 return (ipfw_switch_tables_namespace(&V_layer3_chain, sets)); 3385 } 3386 #endif 3387 3388 /* 3389 * Module and VNET glue 3390 */ 3391 3392 /* 3393 * Stuff that must be initialised only on boot or module load 3394 */ 3395 static int 3396 ipfw_init(void) 3397 { 3398 int error = 0; 3399 3400 /* 3401 * Only print out this stuff the first time around, 3402 * when called from the sysinit code. 3403 */ 3404 printf("ipfw2 " 3405 #ifdef INET6 3406 "(+ipv6) " 3407 #endif 3408 "initialized, divert %s, nat %s, " 3409 "default to %s, logging ", 3410 #ifdef IPDIVERT 3411 "enabled", 3412 #else 3413 "loadable", 3414 #endif 3415 #ifdef IPFIREWALL_NAT 3416 "enabled", 3417 #else 3418 "loadable", 3419 #endif 3420 default_to_accept ? "accept" : "deny"); 3421 3422 /* 3423 * Note: V_xxx variables can be accessed here but the vnet specific 3424 * initializer may not have been called yet for the VIMAGE case. 3425 * Tuneables will have been processed. We will print out values for 3426 * the default vnet. 3427 * XXX This should all be rationalized AFTER 8.0 3428 */ 3429 if (V_fw_verbose == 0) 3430 printf("disabled\n"); 3431 else if (V_verbose_limit == 0) 3432 printf("unlimited\n"); 3433 else 3434 printf("limited to %d packets/entry by default\n", 3435 V_verbose_limit); 3436 3437 /* Check user-supplied table count for validness */ 3438 if (default_fw_tables > IPFW_TABLES_MAX) 3439 default_fw_tables = IPFW_TABLES_MAX; 3440 3441 ipfw_init_sopt_handler(); 3442 ipfw_init_obj_rewriter(); 3443 ipfw_iface_init(); 3444 return (error); 3445 } 3446 3447 /* 3448 * Called for the removal of the last instance only on module unload. 3449 */ 3450 static void 3451 ipfw_destroy(void) 3452 { 3453 3454 ipfw_iface_destroy(); 3455 ipfw_destroy_sopt_handler(); 3456 ipfw_destroy_obj_rewriter(); 3457 printf("IP firewall unloaded\n"); 3458 } 3459 3460 /* 3461 * Stuff that must be initialized for every instance 3462 * (including the first of course). 3463 */ 3464 static int 3465 vnet_ipfw_init(const void *unused) 3466 { 3467 int error, first; 3468 struct ip_fw *rule = NULL; 3469 struct ip_fw_chain *chain; 3470 3471 chain = &V_layer3_chain; 3472 3473 first = IS_DEFAULT_VNET(curvnet) ? 1 : 0; 3474 3475 /* First set up some values that are compile time options */ 3476 V_autoinc_step = 100; /* bounded to 1..1000 in add_rule() */ 3477 V_fw_deny_unknown_exthdrs = 1; 3478 #ifdef IPFIREWALL_VERBOSE 3479 V_fw_verbose = 1; 3480 #endif 3481 #ifdef IPFIREWALL_VERBOSE_LIMIT 3482 V_verbose_limit = IPFIREWALL_VERBOSE_LIMIT; 3483 #endif 3484 #ifdef IPFIREWALL_NAT 3485 LIST_INIT(&chain->nat); 3486 #endif 3487 3488 /* Init shared services hash table */ 3489 ipfw_init_srv(chain); 3490 3491 ipfw_init_counters(); 3492 /* Set initial number of tables */ 3493 V_fw_tables_max = default_fw_tables; 3494 error = ipfw_init_tables(chain, first); 3495 if (error) { 3496 printf("ipfw2: setting up tables failed\n"); 3497 free(chain->map, M_IPFW); 3498 free(rule, M_IPFW); 3499 return (ENOSPC); 3500 } 3501 3502 IPFW_LOCK_INIT(chain); 3503 3504 /* fill and insert the default rule */ 3505 rule = ipfw_alloc_rule(chain, sizeof(struct ip_fw)); 3506 rule->flags |= IPFW_RULE_NOOPT; 3507 rule->cmd_len = 1; 3508 rule->cmd[0].len = 1; 3509 rule->cmd[0].opcode = default_to_accept ? O_ACCEPT : O_DENY; 3510 chain->default_rule = rule; 3511 ipfw_add_protected_rule(chain, rule, 0); 3512 3513 ipfw_dyn_init(chain); 3514 ipfw_eaction_init(chain, first); 3515 #ifdef LINEAR_SKIPTO 3516 ipfw_init_skipto_cache(chain); 3517 #endif 3518 ipfw_bpf_init(first); 3519 3520 /* First set up some values that are compile time options */ 3521 V_ipfw_vnet_ready = 1; /* Open for business */ 3522 3523 /* 3524 * Hook the sockopt handler and pfil hooks for ipv4 and ipv6. 3525 * Even if the latter two fail we still keep the module alive 3526 * because the sockopt and layer2 paths are still useful. 3527 * ipfw[6]_hook return 0 on success, ENOENT on failure, 3528 * so we can ignore the exact return value and just set a flag. 3529 * 3530 * Note that V_fw[6]_enable are manipulated by a SYSCTL_PROC so 3531 * changes in the underlying (per-vnet) variables trigger 3532 * immediate hook()/unhook() calls. 3533 * In layer2 we have the same behaviour, except that V_ether_ipfw 3534 * is checked on each packet because there are no pfil hooks. 3535 */ 3536 V_ip_fw_ctl_ptr = ipfw_ctl3; 3537 error = ipfw_attach_hooks(); 3538 return (error); 3539 } 3540 3541 /* 3542 * Called for the removal of each instance. 3543 */ 3544 static int 3545 vnet_ipfw_uninit(const void *unused) 3546 { 3547 struct ip_fw *reap; 3548 struct ip_fw_chain *chain = &V_layer3_chain; 3549 int i, last; 3550 3551 V_ipfw_vnet_ready = 0; /* tell new callers to go away */ 3552 /* 3553 * disconnect from ipv4, ipv6, layer2 and sockopt. 3554 * Then grab, release and grab again the WLOCK so we make 3555 * sure the update is propagated and nobody will be in. 3556 */ 3557 ipfw_detach_hooks(); 3558 V_ip_fw_ctl_ptr = NULL; 3559 3560 last = IS_DEFAULT_VNET(curvnet) ? 1 : 0; 3561 3562 IPFW_UH_WLOCK(chain); 3563 IPFW_UH_WUNLOCK(chain); 3564 3565 ipfw_dyn_uninit(0); /* run the callout_drain */ 3566 3567 IPFW_UH_WLOCK(chain); 3568 3569 reap = NULL; 3570 IPFW_WLOCK(chain); 3571 for (i = 0; i < chain->n_rules; i++) 3572 ipfw_reap_add(chain, &reap, chain->map[i]); 3573 free(chain->map, M_IPFW); 3574 #ifdef LINEAR_SKIPTO 3575 ipfw_destroy_skipto_cache(chain); 3576 #endif 3577 IPFW_WUNLOCK(chain); 3578 IPFW_UH_WUNLOCK(chain); 3579 ipfw_destroy_tables(chain, last); 3580 ipfw_eaction_uninit(chain, last); 3581 if (reap != NULL) 3582 ipfw_reap_rules(reap); 3583 vnet_ipfw_iface_destroy(chain); 3584 ipfw_destroy_srv(chain); 3585 IPFW_LOCK_DESTROY(chain); 3586 ipfw_dyn_uninit(1); /* free the remaining parts */ 3587 ipfw_destroy_counters(); 3588 ipfw_bpf_uninit(last); 3589 return (0); 3590 } 3591 3592 /* 3593 * Module event handler. 3594 * In general we have the choice of handling most of these events by the 3595 * event handler or by the (VNET_)SYS(UN)INIT handlers. I have chosen to 3596 * use the SYSINIT handlers as they are more capable of expressing the 3597 * flow of control during module and vnet operations, so this is just 3598 * a skeleton. Note there is no SYSINIT equivalent of the module 3599 * SHUTDOWN handler, but we don't have anything to do in that case anyhow. 3600 */ 3601 static int 3602 ipfw_modevent(module_t mod, int type, void *unused) 3603 { 3604 int err = 0; 3605 3606 switch (type) { 3607 case MOD_LOAD: 3608 /* Called once at module load or 3609 * system boot if compiled in. */ 3610 break; 3611 case MOD_QUIESCE: 3612 /* Called before unload. May veto unloading. */ 3613 break; 3614 case MOD_UNLOAD: 3615 /* Called during unload. */ 3616 break; 3617 case MOD_SHUTDOWN: 3618 /* Called during system shutdown. */ 3619 break; 3620 default: 3621 err = EOPNOTSUPP; 3622 break; 3623 } 3624 return err; 3625 } 3626 3627 static moduledata_t ipfwmod = { 3628 "ipfw", 3629 ipfw_modevent, 3630 0 3631 }; 3632 3633 /* Define startup order. */ 3634 #define IPFW_SI_SUB_FIREWALL SI_SUB_PROTO_FIREWALL 3635 #define IPFW_MODEVENT_ORDER (SI_ORDER_ANY - 255) /* On boot slot in here. */ 3636 #define IPFW_MODULE_ORDER (IPFW_MODEVENT_ORDER + 1) /* A little later. */ 3637 #define IPFW_VNET_ORDER (IPFW_MODEVENT_ORDER + 2) /* Later still. */ 3638 3639 DECLARE_MODULE(ipfw, ipfwmod, IPFW_SI_SUB_FIREWALL, IPFW_MODEVENT_ORDER); 3640 FEATURE(ipfw_ctl3, "ipfw new sockopt calls"); 3641 MODULE_VERSION(ipfw, 3); 3642 /* should declare some dependencies here */ 3643 3644 /* 3645 * Starting up. Done in order after ipfwmod() has been called. 3646 * VNET_SYSINIT is also called for each existing vnet and each new vnet. 3647 */ 3648 SYSINIT(ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER, 3649 ipfw_init, NULL); 3650 VNET_SYSINIT(vnet_ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER, 3651 vnet_ipfw_init, NULL); 3652 3653 /* 3654 * Closing up shop. These are done in REVERSE ORDER, but still 3655 * after ipfwmod() has been called. Not called on reboot. 3656 * VNET_SYSUNINIT is also called for each exiting vnet as it exits. 3657 * or when the module is unloaded. 3658 */ 3659 SYSUNINIT(ipfw_destroy, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER, 3660 ipfw_destroy, NULL); 3661 VNET_SYSUNINIT(vnet_ipfw_uninit, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER, 3662 vnet_ipfw_uninit, NULL); 3663 /* end of file */ 3664