1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * The FreeBSD IP packet firewall, main file 33 */ 34 35 #include "opt_ipfw.h" 36 #include "opt_ipdivert.h" 37 #include "opt_inet.h" 38 #ifndef INET 39 #error "IPFIREWALL requires INET" 40 #endif /* INET */ 41 #include "opt_inet6.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/condvar.h> 46 #include <sys/counter.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/kernel.h> 51 #include <sys/lock.h> 52 #include <sys/jail.h> 53 #include <sys/module.h> 54 #include <sys/priv.h> 55 #include <sys/proc.h> 56 #include <sys/rwlock.h> 57 #include <sys/rmlock.h> 58 #include <sys/sdt.h> 59 #include <sys/socket.h> 60 #include <sys/socketvar.h> 61 #include <sys/sysctl.h> 62 #include <sys/syslog.h> 63 #include <sys/ucred.h> 64 #include <net/ethernet.h> /* for ETHERTYPE_IP */ 65 #include <net/if.h> 66 #include <net/if_var.h> 67 #include <net/route.h> 68 #include <net/route/nhop.h> 69 #include <net/pfil.h> 70 #include <net/vnet.h> 71 72 #include <netpfil/pf/pf_mtag.h> 73 74 #include <netinet/in.h> 75 #include <netinet/in_var.h> 76 #include <netinet/in_pcb.h> 77 #include <netinet/ip.h> 78 #include <netinet/ip_var.h> 79 #include <netinet/ip_icmp.h> 80 #include <netinet/ip_fw.h> 81 #include <netinet/ip_carp.h> 82 #include <netinet/pim.h> 83 #include <netinet/tcp_var.h> 84 #include <netinet/udp.h> 85 #include <netinet/udp_var.h> 86 #include <netinet/sctp.h> 87 #include <netinet/sctp_crc32.h> 88 #include <netinet/sctp_header.h> 89 90 #include <netinet/ip6.h> 91 #include <netinet/icmp6.h> 92 #include <netinet/in_fib.h> 93 #ifdef INET6 94 #include <netinet6/in6_fib.h> 95 #include <netinet6/in6_pcb.h> 96 #include <netinet6/scope6_var.h> 97 #include <netinet6/ip6_var.h> 98 #endif 99 100 #include <net/if_gre.h> /* for struct grehdr */ 101 102 #include <netpfil/ipfw/ip_fw_private.h> 103 104 #include <machine/in_cksum.h> /* XXX for in_cksum */ 105 106 #ifdef MAC 107 #include <security/mac/mac_framework.h> 108 #endif 109 110 #define IPFW_PROBE(probe, arg0, arg1, arg2, arg3, arg4, arg5) \ 111 SDT_PROBE6(ipfw, , , probe, arg0, arg1, arg2, arg3, arg4, arg5) 112 113 SDT_PROVIDER_DEFINE(ipfw); 114 SDT_PROBE_DEFINE6(ipfw, , , rule__matched, 115 "int", /* retval */ 116 "int", /* af */ 117 "void *", /* src addr */ 118 "void *", /* dst addr */ 119 "struct ip_fw_args *", /* args */ 120 "struct ip_fw *" /* rule */); 121 122 /* 123 * static variables followed by global ones. 124 * All ipfw global variables are here. 125 */ 126 127 VNET_DEFINE_STATIC(int, fw_deny_unknown_exthdrs); 128 #define V_fw_deny_unknown_exthdrs VNET(fw_deny_unknown_exthdrs) 129 130 VNET_DEFINE_STATIC(int, fw_permit_single_frag6) = 1; 131 #define V_fw_permit_single_frag6 VNET(fw_permit_single_frag6) 132 133 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT 134 static int default_to_accept = 1; 135 #else 136 static int default_to_accept; 137 #endif 138 139 VNET_DEFINE(int, autoinc_step); 140 VNET_DEFINE(int, fw_one_pass) = 1; 141 142 VNET_DEFINE(unsigned int, fw_tables_max); 143 VNET_DEFINE(unsigned int, fw_tables_sets) = 0; /* Don't use set-aware tables */ 144 /* Use 128 tables by default */ 145 static unsigned int default_fw_tables = IPFW_TABLES_DEFAULT; 146 147 static int jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num, 148 int tablearg, int jump_backwards); 149 #ifndef LINEAR_SKIPTO 150 static int jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num, 151 int tablearg, int jump_backwards); 152 #define JUMP(ch, f, num, targ, back) jump_cached(ch, f, num, targ, back) 153 #else 154 #define JUMP(ch, f, num, targ, back) jump_lookup_pos(ch, f, num, targ, back) 155 #endif 156 157 /* 158 * Each rule belongs to one of 32 different sets (0..31). 159 * The variable set_disable contains one bit per set. 160 * If the bit is set, all rules in the corresponding set 161 * are disabled. Set RESVD_SET(31) is reserved for the default rule 162 * and rules that are not deleted by the flush command, 163 * and CANNOT be disabled. 164 * Rules in set RESVD_SET can only be deleted individually. 165 */ 166 VNET_DEFINE(u_int32_t, set_disable); 167 #define V_set_disable VNET(set_disable) 168 169 VNET_DEFINE(int, fw_verbose); 170 /* counter for ipfw_log(NULL...) */ 171 VNET_DEFINE(u_int64_t, norule_counter); 172 VNET_DEFINE(int, verbose_limit); 173 174 /* layer3_chain contains the list of rules for layer 3 */ 175 VNET_DEFINE(struct ip_fw_chain, layer3_chain); 176 177 /* ipfw_vnet_ready controls when we are open for business */ 178 VNET_DEFINE(int, ipfw_vnet_ready) = 0; 179 180 VNET_DEFINE(int, ipfw_nat_ready) = 0; 181 182 ipfw_nat_t *ipfw_nat_ptr = NULL; 183 struct cfg_nat *(*lookup_nat_ptr)(struct nat_list *, int); 184 ipfw_nat_cfg_t *ipfw_nat_cfg_ptr; 185 ipfw_nat_cfg_t *ipfw_nat_del_ptr; 186 ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr; 187 ipfw_nat_cfg_t *ipfw_nat_get_log_ptr; 188 189 #ifdef SYSCTL_NODE 190 uint32_t dummy_def = IPFW_DEFAULT_RULE; 191 static int sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS); 192 static int sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS); 193 194 SYSBEGIN(f3) 195 196 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 197 "Firewall"); 198 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, one_pass, 199 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_one_pass), 0, 200 "Only do a single pass through ipfw when using dummynet(4)"); 201 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, autoinc_step, 202 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(autoinc_step), 0, 203 "Rule number auto-increment step"); 204 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, 205 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_verbose), 0, 206 "Log matches to ipfw rules"); 207 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, 208 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(verbose_limit), 0, 209 "Set upper limit of matches of ipfw rules logged"); 210 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, default_rule, CTLFLAG_RD, 211 &dummy_def, 0, 212 "The default/max possible rule number."); 213 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_max, 214 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 215 0, 0, sysctl_ipfw_table_num, "IU", 216 "Maximum number of concurrently used tables"); 217 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_sets, 218 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 219 0, 0, sysctl_ipfw_tables_sets, "IU", 220 "Use per-set namespace for tables"); 221 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, default_to_accept, CTLFLAG_RDTUN, 222 &default_to_accept, 0, 223 "Make the default rule accept all packets."); 224 TUNABLE_INT("net.inet.ip.fw.tables_max", (int *)&default_fw_tables); 225 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, 226 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(layer3_chain.n_rules), 0, 227 "Number of static rules"); 228 229 #ifdef INET6 230 SYSCTL_DECL(_net_inet6_ip6); 231 SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 232 "Firewall"); 233 SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, deny_unknown_exthdrs, 234 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE, 235 &VNET_NAME(fw_deny_unknown_exthdrs), 0, 236 "Deny packets with unknown IPv6 Extension Headers"); 237 SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, permit_single_frag6, 238 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE, 239 &VNET_NAME(fw_permit_single_frag6), 0, 240 "Permit single packet IPv6 fragments"); 241 #endif /* INET6 */ 242 243 SYSEND 244 245 #endif /* SYSCTL_NODE */ 246 247 /* 248 * Some macros used in the various matching options. 249 * L3HDR maps an ipv4 pointer into a layer3 header pointer of type T 250 * Other macros just cast void * into the appropriate type 251 */ 252 #define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl)) 253 #define TCP(p) ((struct tcphdr *)(p)) 254 #define SCTP(p) ((struct sctphdr *)(p)) 255 #define UDP(p) ((struct udphdr *)(p)) 256 #define ICMP(p) ((struct icmphdr *)(p)) 257 #define ICMP6(p) ((struct icmp6_hdr *)(p)) 258 259 static __inline int 260 icmptype_match(struct icmphdr *icmp, ipfw_insn_u32 *cmd) 261 { 262 int type = icmp->icmp_type; 263 264 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1<<type)) ); 265 } 266 267 #define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \ 268 (1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) ) 269 270 static int 271 is_icmp_query(struct icmphdr *icmp) 272 { 273 int type = icmp->icmp_type; 274 275 return (type <= ICMP_MAXTYPE && (TT & (1<<type)) ); 276 } 277 #undef TT 278 279 /* 280 * The following checks use two arrays of 8 or 16 bits to store the 281 * bits that we want set or clear, respectively. They are in the 282 * low and high half of cmd->arg1 or cmd->d[0]. 283 * 284 * We scan options and store the bits we find set. We succeed if 285 * 286 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear 287 * 288 * The code is sometimes optimized not to store additional variables. 289 */ 290 291 static int 292 flags_match(ipfw_insn *cmd, u_int8_t bits) 293 { 294 u_char want_clear; 295 bits = ~bits; 296 297 if ( ((cmd->arg1 & 0xff) & bits) != 0) 298 return 0; /* some bits we want set were clear */ 299 want_clear = (cmd->arg1 >> 8) & 0xff; 300 if ( (want_clear & bits) != want_clear) 301 return 0; /* some bits we want clear were set */ 302 return 1; 303 } 304 305 static int 306 ipopts_match(struct ip *ip, ipfw_insn *cmd) 307 { 308 int optlen, bits = 0; 309 u_char *cp = (u_char *)(ip + 1); 310 int x = (ip->ip_hl << 2) - sizeof (struct ip); 311 312 for (; x > 0; x -= optlen, cp += optlen) { 313 int opt = cp[IPOPT_OPTVAL]; 314 315 if (opt == IPOPT_EOL) 316 break; 317 if (opt == IPOPT_NOP) 318 optlen = 1; 319 else { 320 optlen = cp[IPOPT_OLEN]; 321 if (optlen <= 0 || optlen > x) 322 return 0; /* invalid or truncated */ 323 } 324 switch (opt) { 325 default: 326 break; 327 328 case IPOPT_LSRR: 329 bits |= IP_FW_IPOPT_LSRR; 330 break; 331 332 case IPOPT_SSRR: 333 bits |= IP_FW_IPOPT_SSRR; 334 break; 335 336 case IPOPT_RR: 337 bits |= IP_FW_IPOPT_RR; 338 break; 339 340 case IPOPT_TS: 341 bits |= IP_FW_IPOPT_TS; 342 break; 343 } 344 } 345 return (flags_match(cmd, bits)); 346 } 347 348 /* 349 * Parse TCP options. The logic copied from tcp_dooptions(). 350 */ 351 static int 352 tcpopts_parse(const struct tcphdr *tcp, uint16_t *mss) 353 { 354 const u_char *cp = (const u_char *)(tcp + 1); 355 int optlen, bits = 0; 356 int cnt = (tcp->th_off << 2) - sizeof(struct tcphdr); 357 358 for (; cnt > 0; cnt -= optlen, cp += optlen) { 359 int opt = cp[0]; 360 if (opt == TCPOPT_EOL) 361 break; 362 if (opt == TCPOPT_NOP) 363 optlen = 1; 364 else { 365 if (cnt < 2) 366 break; 367 optlen = cp[1]; 368 if (optlen < 2 || optlen > cnt) 369 break; 370 } 371 372 switch (opt) { 373 default: 374 break; 375 376 case TCPOPT_MAXSEG: 377 if (optlen != TCPOLEN_MAXSEG) 378 break; 379 bits |= IP_FW_TCPOPT_MSS; 380 if (mss != NULL) 381 *mss = be16dec(cp + 2); 382 break; 383 384 case TCPOPT_WINDOW: 385 if (optlen == TCPOLEN_WINDOW) 386 bits |= IP_FW_TCPOPT_WINDOW; 387 break; 388 389 case TCPOPT_SACK_PERMITTED: 390 if (optlen == TCPOLEN_SACK_PERMITTED) 391 bits |= IP_FW_TCPOPT_SACK; 392 break; 393 394 case TCPOPT_SACK: 395 if (optlen > 2 && (optlen - 2) % TCPOLEN_SACK == 0) 396 bits |= IP_FW_TCPOPT_SACK; 397 break; 398 399 case TCPOPT_TIMESTAMP: 400 if (optlen == TCPOLEN_TIMESTAMP) 401 bits |= IP_FW_TCPOPT_TS; 402 break; 403 } 404 } 405 return (bits); 406 } 407 408 static int 409 tcpopts_match(struct tcphdr *tcp, ipfw_insn *cmd) 410 { 411 412 return (flags_match(cmd, tcpopts_parse(tcp, NULL))); 413 } 414 415 static int 416 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd, struct ip_fw_chain *chain, 417 uint32_t *tablearg) 418 { 419 420 if (ifp == NULL) /* no iface with this packet, match fails */ 421 return (0); 422 423 /* Check by name or by IP address */ 424 if (cmd->name[0] != '\0') { /* match by name */ 425 if (cmd->name[0] == '\1') /* use tablearg to match */ 426 return ipfw_lookup_table(chain, cmd->p.kidx, 0, 427 &ifp->if_index, tablearg); 428 /* Check name */ 429 if (cmd->p.glob) { 430 if (fnmatch(cmd->name, ifp->if_xname, 0) == 0) 431 return(1); 432 } else { 433 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0) 434 return(1); 435 } 436 } else { 437 #if !defined(USERSPACE) && defined(__FreeBSD__) /* and OSX too ? */ 438 struct ifaddr *ia; 439 440 NET_EPOCH_ASSERT(); 441 442 CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) { 443 if (ia->ifa_addr->sa_family != AF_INET) 444 continue; 445 if (cmd->p.ip.s_addr == ((struct sockaddr_in *) 446 (ia->ifa_addr))->sin_addr.s_addr) 447 return (1); /* match */ 448 } 449 #endif /* __FreeBSD__ */ 450 } 451 return(0); /* no match, fail ... */ 452 } 453 454 /* 455 * The verify_path function checks if a route to the src exists and 456 * if it is reachable via ifp (when provided). 457 * 458 * The 'verrevpath' option checks that the interface that an IP packet 459 * arrives on is the same interface that traffic destined for the 460 * packet's source address would be routed out of. 461 * The 'versrcreach' option just checks that the source address is 462 * reachable via any route (except default) in the routing table. 463 * These two are a measure to block forged packets. This is also 464 * commonly known as "anti-spoofing" or Unicast Reverse Path 465 * Forwarding (Unicast RFP) in Cisco-ese. The name of the knobs 466 * is purposely reminiscent of the Cisco IOS command, 467 * 468 * ip verify unicast reverse-path 469 * ip verify unicast source reachable-via any 470 * 471 * which implements the same functionality. But note that the syntax 472 * is misleading, and the check may be performed on all IP packets 473 * whether unicast, multicast, or broadcast. 474 */ 475 static int 476 verify_path(struct in_addr src, struct ifnet *ifp, u_int fib) 477 { 478 #if defined(USERSPACE) || !defined(__FreeBSD__) 479 return 0; 480 #else 481 struct nhop_object *nh; 482 483 nh = fib4_lookup(fib, src, 0, NHR_NONE, 0); 484 if (nh == NULL) 485 return (0); 486 487 /* 488 * If ifp is provided, check for equality with rtentry. 489 * We should use rt->rt_ifa->ifa_ifp, instead of rt->rt_ifp, 490 * in order to pass packets injected back by if_simloop(): 491 * routing entry (via lo0) for our own address 492 * may exist, so we need to handle routing assymetry. 493 */ 494 if (ifp != NULL && ifp != nh->nh_aifp) 495 return (0); 496 497 /* if no ifp provided, check if rtentry is not default route */ 498 if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0) 499 return (0); 500 501 /* or if this is a blackhole/reject route */ 502 if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0) 503 return (0); 504 505 /* found valid route */ 506 return 1; 507 #endif /* __FreeBSD__ */ 508 } 509 510 /* 511 * Generate an SCTP packet containing an ABORT chunk. The verification tag 512 * is given by vtag. The T-bit is set in the ABORT chunk if and only if 513 * reflected is not 0. 514 */ 515 516 static struct mbuf * 517 ipfw_send_abort(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t vtag, 518 int reflected) 519 { 520 struct mbuf *m; 521 struct ip *ip; 522 #ifdef INET6 523 struct ip6_hdr *ip6; 524 #endif 525 struct sctphdr *sctp; 526 struct sctp_chunkhdr *chunk; 527 u_int16_t hlen, plen, tlen; 528 529 MGETHDR(m, M_NOWAIT, MT_DATA); 530 if (m == NULL) 531 return (NULL); 532 533 M_SETFIB(m, id->fib); 534 #ifdef MAC 535 if (replyto != NULL) 536 mac_netinet_firewall_reply(replyto, m); 537 else 538 mac_netinet_firewall_send(m); 539 #else 540 (void)replyto; /* don't warn about unused arg */ 541 #endif 542 543 switch (id->addr_type) { 544 case 4: 545 hlen = sizeof(struct ip); 546 break; 547 #ifdef INET6 548 case 6: 549 hlen = sizeof(struct ip6_hdr); 550 break; 551 #endif 552 default: 553 /* XXX: log me?!? */ 554 FREE_PKT(m); 555 return (NULL); 556 } 557 plen = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 558 tlen = hlen + plen; 559 m->m_data += max_linkhdr; 560 m->m_flags |= M_SKIP_FIREWALL; 561 m->m_pkthdr.len = m->m_len = tlen; 562 m->m_pkthdr.rcvif = NULL; 563 bzero(m->m_data, tlen); 564 565 switch (id->addr_type) { 566 case 4: 567 ip = mtod(m, struct ip *); 568 569 ip->ip_v = 4; 570 ip->ip_hl = sizeof(struct ip) >> 2; 571 ip->ip_tos = IPTOS_LOWDELAY; 572 ip->ip_len = htons(tlen); 573 ip->ip_id = htons(0); 574 ip->ip_off = htons(0); 575 ip->ip_ttl = V_ip_defttl; 576 ip->ip_p = IPPROTO_SCTP; 577 ip->ip_sum = 0; 578 ip->ip_src.s_addr = htonl(id->dst_ip); 579 ip->ip_dst.s_addr = htonl(id->src_ip); 580 581 sctp = (struct sctphdr *)(ip + 1); 582 break; 583 #ifdef INET6 584 case 6: 585 ip6 = mtod(m, struct ip6_hdr *); 586 587 ip6->ip6_vfc = IPV6_VERSION; 588 ip6->ip6_plen = htons(plen); 589 ip6->ip6_nxt = IPPROTO_SCTP; 590 ip6->ip6_hlim = IPV6_DEFHLIM; 591 ip6->ip6_src = id->dst_ip6; 592 ip6->ip6_dst = id->src_ip6; 593 594 sctp = (struct sctphdr *)(ip6 + 1); 595 break; 596 #endif 597 } 598 599 sctp->src_port = htons(id->dst_port); 600 sctp->dest_port = htons(id->src_port); 601 sctp->v_tag = htonl(vtag); 602 sctp->checksum = htonl(0); 603 604 chunk = (struct sctp_chunkhdr *)(sctp + 1); 605 chunk->chunk_type = SCTP_ABORT_ASSOCIATION; 606 chunk->chunk_flags = 0; 607 if (reflected != 0) { 608 chunk->chunk_flags |= SCTP_HAD_NO_TCB; 609 } 610 chunk->chunk_length = htons(sizeof(struct sctp_chunkhdr)); 611 612 sctp->checksum = sctp_calculate_cksum(m, hlen); 613 614 return (m); 615 } 616 617 /* 618 * Generate a TCP packet, containing either a RST or a keepalive. 619 * When flags & TH_RST, we are sending a RST packet, because of a 620 * "reset" action matched the packet. 621 * Otherwise we are sending a keepalive, and flags & TH_ 622 * The 'replyto' mbuf is the mbuf being replied to, if any, and is required 623 * so that MAC can label the reply appropriately. 624 */ 625 struct mbuf * 626 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq, 627 u_int32_t ack, int flags) 628 { 629 struct mbuf *m = NULL; /* stupid compiler */ 630 struct ip *h = NULL; /* stupid compiler */ 631 #ifdef INET6 632 struct ip6_hdr *h6 = NULL; 633 #endif 634 struct tcphdr *th = NULL; 635 int len, dir; 636 637 MGETHDR(m, M_NOWAIT, MT_DATA); 638 if (m == NULL) 639 return (NULL); 640 641 M_SETFIB(m, id->fib); 642 #ifdef MAC 643 if (replyto != NULL) 644 mac_netinet_firewall_reply(replyto, m); 645 else 646 mac_netinet_firewall_send(m); 647 #else 648 (void)replyto; /* don't warn about unused arg */ 649 #endif 650 651 switch (id->addr_type) { 652 case 4: 653 len = sizeof(struct ip) + sizeof(struct tcphdr); 654 break; 655 #ifdef INET6 656 case 6: 657 len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 658 break; 659 #endif 660 default: 661 /* XXX: log me?!? */ 662 FREE_PKT(m); 663 return (NULL); 664 } 665 dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN); 666 667 m->m_data += max_linkhdr; 668 m->m_flags |= M_SKIP_FIREWALL; 669 m->m_pkthdr.len = m->m_len = len; 670 m->m_pkthdr.rcvif = NULL; 671 bzero(m->m_data, len); 672 673 switch (id->addr_type) { 674 case 4: 675 h = mtod(m, struct ip *); 676 677 /* prepare for checksum */ 678 h->ip_p = IPPROTO_TCP; 679 h->ip_len = htons(sizeof(struct tcphdr)); 680 if (dir) { 681 h->ip_src.s_addr = htonl(id->src_ip); 682 h->ip_dst.s_addr = htonl(id->dst_ip); 683 } else { 684 h->ip_src.s_addr = htonl(id->dst_ip); 685 h->ip_dst.s_addr = htonl(id->src_ip); 686 } 687 688 th = (struct tcphdr *)(h + 1); 689 break; 690 #ifdef INET6 691 case 6: 692 h6 = mtod(m, struct ip6_hdr *); 693 694 /* prepare for checksum */ 695 h6->ip6_nxt = IPPROTO_TCP; 696 h6->ip6_plen = htons(sizeof(struct tcphdr)); 697 if (dir) { 698 h6->ip6_src = id->src_ip6; 699 h6->ip6_dst = id->dst_ip6; 700 } else { 701 h6->ip6_src = id->dst_ip6; 702 h6->ip6_dst = id->src_ip6; 703 } 704 705 th = (struct tcphdr *)(h6 + 1); 706 break; 707 #endif 708 } 709 710 if (dir) { 711 th->th_sport = htons(id->src_port); 712 th->th_dport = htons(id->dst_port); 713 } else { 714 th->th_sport = htons(id->dst_port); 715 th->th_dport = htons(id->src_port); 716 } 717 th->th_off = sizeof(struct tcphdr) >> 2; 718 719 if (flags & TH_RST) { 720 if (flags & TH_ACK) { 721 th->th_seq = htonl(ack); 722 th->th_flags = TH_RST; 723 } else { 724 if (flags & TH_SYN) 725 seq++; 726 th->th_ack = htonl(seq); 727 th->th_flags = TH_RST | TH_ACK; 728 } 729 } else { 730 /* 731 * Keepalive - use caller provided sequence numbers 732 */ 733 th->th_seq = htonl(seq); 734 th->th_ack = htonl(ack); 735 th->th_flags = TH_ACK; 736 } 737 738 switch (id->addr_type) { 739 case 4: 740 th->th_sum = in_cksum(m, len); 741 742 /* finish the ip header */ 743 h->ip_v = 4; 744 h->ip_hl = sizeof(*h) >> 2; 745 h->ip_tos = IPTOS_LOWDELAY; 746 h->ip_off = htons(0); 747 h->ip_len = htons(len); 748 h->ip_ttl = V_ip_defttl; 749 h->ip_sum = 0; 750 break; 751 #ifdef INET6 752 case 6: 753 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6), 754 sizeof(struct tcphdr)); 755 756 /* finish the ip6 header */ 757 h6->ip6_vfc |= IPV6_VERSION; 758 h6->ip6_hlim = IPV6_DEFHLIM; 759 break; 760 #endif 761 } 762 763 return (m); 764 } 765 766 #ifdef INET6 767 /* 768 * ipv6 specific rules here... 769 */ 770 static __inline int 771 icmp6type_match(int type, ipfw_insn_u32 *cmd) 772 { 773 return (type <= ICMP6_MAXTYPE && (cmd->d[type/32] & (1<<(type%32)) ) ); 774 } 775 776 static int 777 flow6id_match(int curr_flow, ipfw_insn_u32 *cmd) 778 { 779 int i; 780 for (i=0; i <= cmd->o.arg1; ++i) 781 if (curr_flow == cmd->d[i]) 782 return 1; 783 return 0; 784 } 785 786 /* support for IP6_*_ME opcodes */ 787 static const struct in6_addr lla_mask = {{{ 788 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 789 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 790 }}}; 791 792 static int 793 ipfw_localip6(struct in6_addr *in6) 794 { 795 struct rm_priotracker in6_ifa_tracker; 796 struct in6_ifaddr *ia; 797 798 if (IN6_IS_ADDR_MULTICAST(in6)) 799 return (0); 800 801 if (!IN6_IS_ADDR_LINKLOCAL(in6)) 802 return (in6_localip(in6)); 803 804 IN6_IFADDR_RLOCK(&in6_ifa_tracker); 805 CK_STAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { 806 if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr)) 807 continue; 808 if (IN6_ARE_MASKED_ADDR_EQUAL(&ia->ia_addr.sin6_addr, 809 in6, &lla_mask)) { 810 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); 811 return (1); 812 } 813 } 814 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); 815 return (0); 816 } 817 818 static int 819 verify_path6(struct in6_addr *src, struct ifnet *ifp, u_int fib) 820 { 821 struct nhop_object *nh; 822 823 if (IN6_IS_SCOPE_LINKLOCAL(src)) 824 return (1); 825 826 nh = fib6_lookup(fib, src, 0, NHR_NONE, 0); 827 if (nh == NULL) 828 return (0); 829 830 /* If ifp is provided, check for equality with route table. */ 831 if (ifp != NULL && ifp != nh->nh_aifp) 832 return (0); 833 834 /* if no ifp provided, check if rtentry is not default route */ 835 if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0) 836 return (0); 837 838 /* or if this is a blackhole/reject route */ 839 if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0) 840 return (0); 841 842 /* found valid route */ 843 return 1; 844 } 845 846 static int 847 is_icmp6_query(int icmp6_type) 848 { 849 if ((icmp6_type <= ICMP6_MAXTYPE) && 850 (icmp6_type == ICMP6_ECHO_REQUEST || 851 icmp6_type == ICMP6_MEMBERSHIP_QUERY || 852 icmp6_type == ICMP6_WRUREQUEST || 853 icmp6_type == ICMP6_FQDN_QUERY || 854 icmp6_type == ICMP6_NI_QUERY)) 855 return (1); 856 857 return (0); 858 } 859 860 static int 861 map_icmp_unreach(int code) 862 { 863 864 /* RFC 7915 p4.2 */ 865 switch (code) { 866 case ICMP_UNREACH_NET: 867 case ICMP_UNREACH_HOST: 868 case ICMP_UNREACH_SRCFAIL: 869 case ICMP_UNREACH_NET_UNKNOWN: 870 case ICMP_UNREACH_HOST_UNKNOWN: 871 case ICMP_UNREACH_TOSNET: 872 case ICMP_UNREACH_TOSHOST: 873 return (ICMP6_DST_UNREACH_NOROUTE); 874 case ICMP_UNREACH_PORT: 875 return (ICMP6_DST_UNREACH_NOPORT); 876 default: 877 /* 878 * Map the rest of codes into admit prohibited. 879 * XXX: unreach proto should be mapped into ICMPv6 880 * parameter problem, but we use only unreach type. 881 */ 882 return (ICMP6_DST_UNREACH_ADMIN); 883 } 884 } 885 886 static void 887 send_reject6(struct ip_fw_args *args, int code, u_int hlen, struct ip6_hdr *ip6) 888 { 889 struct mbuf *m; 890 891 m = args->m; 892 if (code == ICMP6_UNREACH_RST && args->f_id.proto == IPPROTO_TCP) { 893 struct tcphdr *tcp; 894 tcp = (struct tcphdr *)((char *)ip6 + hlen); 895 896 if ((tcp->th_flags & TH_RST) == 0) { 897 struct mbuf *m0; 898 m0 = ipfw_send_pkt(args->m, &(args->f_id), 899 ntohl(tcp->th_seq), ntohl(tcp->th_ack), 900 tcp->th_flags | TH_RST); 901 if (m0 != NULL) 902 ip6_output(m0, NULL, NULL, 0, NULL, NULL, 903 NULL); 904 } 905 FREE_PKT(m); 906 } else if (code == ICMP6_UNREACH_ABORT && 907 args->f_id.proto == IPPROTO_SCTP) { 908 struct mbuf *m0; 909 struct sctphdr *sctp; 910 u_int32_t v_tag; 911 int reflected; 912 913 sctp = (struct sctphdr *)((char *)ip6 + hlen); 914 reflected = 1; 915 v_tag = ntohl(sctp->v_tag); 916 /* Investigate the first chunk header if available */ 917 if (m->m_len >= hlen + sizeof(struct sctphdr) + 918 sizeof(struct sctp_chunkhdr)) { 919 struct sctp_chunkhdr *chunk; 920 921 chunk = (struct sctp_chunkhdr *)(sctp + 1); 922 switch (chunk->chunk_type) { 923 case SCTP_INITIATION: 924 /* 925 * Packets containing an INIT chunk MUST have 926 * a zero v-tag. 927 */ 928 if (v_tag != 0) { 929 v_tag = 0; 930 break; 931 } 932 /* INIT chunk MUST NOT be bundled */ 933 if (m->m_pkthdr.len > 934 hlen + sizeof(struct sctphdr) + 935 ntohs(chunk->chunk_length) + 3) { 936 break; 937 } 938 /* Use the initiate tag if available */ 939 if ((m->m_len >= hlen + sizeof(struct sctphdr) + 940 sizeof(struct sctp_chunkhdr) + 941 offsetof(struct sctp_init, a_rwnd))) { 942 struct sctp_init *init; 943 944 init = (struct sctp_init *)(chunk + 1); 945 v_tag = ntohl(init->initiate_tag); 946 reflected = 0; 947 } 948 break; 949 case SCTP_ABORT_ASSOCIATION: 950 /* 951 * If the packet contains an ABORT chunk, don't 952 * reply. 953 * XXX: We should search through all chunks, 954 * but do not do that to avoid attacks. 955 */ 956 v_tag = 0; 957 break; 958 } 959 } 960 if (v_tag == 0) { 961 m0 = NULL; 962 } else { 963 m0 = ipfw_send_abort(args->m, &(args->f_id), v_tag, 964 reflected); 965 } 966 if (m0 != NULL) 967 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 968 FREE_PKT(m); 969 } else if (code != ICMP6_UNREACH_RST && code != ICMP6_UNREACH_ABORT) { 970 /* Send an ICMPv6 unreach. */ 971 #if 0 972 /* 973 * Unlike above, the mbufs need to line up with the ip6 hdr, 974 * as the contents are read. We need to m_adj() the 975 * needed amount. 976 * The mbuf will however be thrown away so we can adjust it. 977 * Remember we did an m_pullup on it already so we 978 * can make some assumptions about contiguousness. 979 */ 980 if (args->L3offset) 981 m_adj(m, args->L3offset); 982 #endif 983 icmp6_error(m, ICMP6_DST_UNREACH, code, 0); 984 } else 985 FREE_PKT(m); 986 987 args->m = NULL; 988 } 989 990 #endif /* INET6 */ 991 992 /* 993 * sends a reject message, consuming the mbuf passed as an argument. 994 */ 995 static void 996 send_reject(struct ip_fw_args *args, const ipfw_insn *cmd, int iplen, 997 struct ip *ip) 998 { 999 int code, mtu; 1000 1001 code = cmd->arg1; 1002 if (code == ICMP_UNREACH_NEEDFRAG && 1003 cmd->len == F_INSN_SIZE(ipfw_insn_u16)) 1004 mtu = ((const ipfw_insn_u16 *)cmd)->ports[0]; 1005 else 1006 mtu = 0; 1007 1008 #if 0 1009 /* XXX When ip is not guaranteed to be at mtod() we will 1010 * need to account for this */ 1011 * The mbuf will however be thrown away so we can adjust it. 1012 * Remember we did an m_pullup on it already so we 1013 * can make some assumptions about contiguousness. 1014 */ 1015 if (args->L3offset) 1016 m_adj(m, args->L3offset); 1017 #endif 1018 if (code != ICMP_REJECT_RST && code != ICMP_REJECT_ABORT) { 1019 /* Send an ICMP unreach */ 1020 icmp_error(args->m, ICMP_UNREACH, code, 0L, mtu); 1021 } else if (code == ICMP_REJECT_RST && args->f_id.proto == IPPROTO_TCP) { 1022 struct tcphdr *const tcp = 1023 L3HDR(struct tcphdr, mtod(args->m, struct ip *)); 1024 if ( (tcp->th_flags & TH_RST) == 0) { 1025 struct mbuf *m; 1026 m = ipfw_send_pkt(args->m, &(args->f_id), 1027 ntohl(tcp->th_seq), ntohl(tcp->th_ack), 1028 tcp->th_flags | TH_RST); 1029 if (m != NULL) 1030 ip_output(m, NULL, NULL, 0, NULL, NULL); 1031 } 1032 FREE_PKT(args->m); 1033 } else if (code == ICMP_REJECT_ABORT && 1034 args->f_id.proto == IPPROTO_SCTP) { 1035 struct mbuf *m; 1036 struct sctphdr *sctp; 1037 struct sctp_chunkhdr *chunk; 1038 struct sctp_init *init; 1039 u_int32_t v_tag; 1040 int reflected; 1041 1042 sctp = L3HDR(struct sctphdr, mtod(args->m, struct ip *)); 1043 reflected = 1; 1044 v_tag = ntohl(sctp->v_tag); 1045 if (iplen >= (ip->ip_hl << 2) + sizeof(struct sctphdr) + 1046 sizeof(struct sctp_chunkhdr)) { 1047 /* Look at the first chunk header if available */ 1048 chunk = (struct sctp_chunkhdr *)(sctp + 1); 1049 switch (chunk->chunk_type) { 1050 case SCTP_INITIATION: 1051 /* 1052 * Packets containing an INIT chunk MUST have 1053 * a zero v-tag. 1054 */ 1055 if (v_tag != 0) { 1056 v_tag = 0; 1057 break; 1058 } 1059 /* INIT chunk MUST NOT be bundled */ 1060 if (iplen > 1061 (ip->ip_hl << 2) + sizeof(struct sctphdr) + 1062 ntohs(chunk->chunk_length) + 3) { 1063 break; 1064 } 1065 /* Use the initiate tag if available */ 1066 if ((iplen >= (ip->ip_hl << 2) + 1067 sizeof(struct sctphdr) + 1068 sizeof(struct sctp_chunkhdr) + 1069 offsetof(struct sctp_init, a_rwnd))) { 1070 init = (struct sctp_init *)(chunk + 1); 1071 v_tag = ntohl(init->initiate_tag); 1072 reflected = 0; 1073 } 1074 break; 1075 case SCTP_ABORT_ASSOCIATION: 1076 /* 1077 * If the packet contains an ABORT chunk, don't 1078 * reply. 1079 * XXX: We should search through all chunks, 1080 * but do not do that to avoid attacks. 1081 */ 1082 v_tag = 0; 1083 break; 1084 } 1085 } 1086 if (v_tag == 0) { 1087 m = NULL; 1088 } else { 1089 m = ipfw_send_abort(args->m, &(args->f_id), v_tag, 1090 reflected); 1091 } 1092 if (m != NULL) 1093 ip_output(m, NULL, NULL, 0, NULL, NULL); 1094 FREE_PKT(args->m); 1095 } else 1096 FREE_PKT(args->m); 1097 args->m = NULL; 1098 } 1099 1100 /* 1101 * Support for uid/gid/jail lookup. These tests are expensive 1102 * (because we may need to look into the list of active sockets) 1103 * so we cache the results. ugid_lookupp is 0 if we have not 1104 * yet done a lookup, 1 if we succeeded, and -1 if we tried 1105 * and failed. The function always returns the match value. 1106 * We could actually spare the variable and use *uc, setting 1107 * it to '(void *)check_uidgid if we have no info, NULL if 1108 * we tried and failed, or any other value if successful. 1109 */ 1110 static int 1111 check_uidgid(ipfw_insn_u32 *insn, struct ip_fw_args *args, int *ugid_lookupp, 1112 struct ucred **uc) 1113 { 1114 #if defined(USERSPACE) 1115 return 0; // not supported in userspace 1116 #else 1117 #ifndef __FreeBSD__ 1118 /* XXX */ 1119 return cred_check(insn, proto, oif, 1120 dst_ip, dst_port, src_ip, src_port, 1121 (struct bsd_ucred *)uc, ugid_lookupp, ((struct mbuf *)inp)->m_skb); 1122 #else /* FreeBSD */ 1123 struct in_addr src_ip, dst_ip; 1124 struct inpcbinfo *pi; 1125 struct ipfw_flow_id *id; 1126 struct inpcb *pcb, *inp; 1127 int lookupflags; 1128 int match; 1129 1130 id = &args->f_id; 1131 inp = args->inp; 1132 1133 /* 1134 * Check to see if the UDP or TCP stack supplied us with 1135 * the PCB. If so, rather then holding a lock and looking 1136 * up the PCB, we can use the one that was supplied. 1137 */ 1138 if (inp && *ugid_lookupp == 0) { 1139 INP_LOCK_ASSERT(inp); 1140 if (inp->inp_socket != NULL) { 1141 *uc = crhold(inp->inp_cred); 1142 *ugid_lookupp = 1; 1143 } else 1144 *ugid_lookupp = -1; 1145 } 1146 /* 1147 * If we have already been here and the packet has no 1148 * PCB entry associated with it, then we can safely 1149 * assume that this is a no match. 1150 */ 1151 if (*ugid_lookupp == -1) 1152 return (0); 1153 if (id->proto == IPPROTO_TCP) { 1154 lookupflags = 0; 1155 pi = &V_tcbinfo; 1156 } else if (id->proto == IPPROTO_UDP) { 1157 lookupflags = INPLOOKUP_WILDCARD; 1158 pi = &V_udbinfo; 1159 } else if (id->proto == IPPROTO_UDPLITE) { 1160 lookupflags = INPLOOKUP_WILDCARD; 1161 pi = &V_ulitecbinfo; 1162 } else 1163 return 0; 1164 lookupflags |= INPLOOKUP_RLOCKPCB; 1165 match = 0; 1166 if (*ugid_lookupp == 0) { 1167 if (id->addr_type == 6) { 1168 #ifdef INET6 1169 if (args->flags & IPFW_ARGS_IN) 1170 pcb = in6_pcblookup_mbuf(pi, 1171 &id->src_ip6, htons(id->src_port), 1172 &id->dst_ip6, htons(id->dst_port), 1173 lookupflags, NULL, args->m); 1174 else 1175 pcb = in6_pcblookup_mbuf(pi, 1176 &id->dst_ip6, htons(id->dst_port), 1177 &id->src_ip6, htons(id->src_port), 1178 lookupflags, args->ifp, args->m); 1179 #else 1180 *ugid_lookupp = -1; 1181 return (0); 1182 #endif 1183 } else { 1184 src_ip.s_addr = htonl(id->src_ip); 1185 dst_ip.s_addr = htonl(id->dst_ip); 1186 if (args->flags & IPFW_ARGS_IN) 1187 pcb = in_pcblookup_mbuf(pi, 1188 src_ip, htons(id->src_port), 1189 dst_ip, htons(id->dst_port), 1190 lookupflags, NULL, args->m); 1191 else 1192 pcb = in_pcblookup_mbuf(pi, 1193 dst_ip, htons(id->dst_port), 1194 src_ip, htons(id->src_port), 1195 lookupflags, args->ifp, args->m); 1196 } 1197 if (pcb != NULL) { 1198 INP_RLOCK_ASSERT(pcb); 1199 *uc = crhold(pcb->inp_cred); 1200 *ugid_lookupp = 1; 1201 INP_RUNLOCK(pcb); 1202 } 1203 if (*ugid_lookupp == 0) { 1204 /* 1205 * We tried and failed, set the variable to -1 1206 * so we will not try again on this packet. 1207 */ 1208 *ugid_lookupp = -1; 1209 return (0); 1210 } 1211 } 1212 if (insn->o.opcode == O_UID) 1213 match = ((*uc)->cr_uid == (uid_t)insn->d[0]); 1214 else if (insn->o.opcode == O_GID) 1215 match = groupmember((gid_t)insn->d[0], *uc); 1216 else if (insn->o.opcode == O_JAIL) 1217 match = ((*uc)->cr_prison->pr_id == (int)insn->d[0]); 1218 return (match); 1219 #endif /* __FreeBSD__ */ 1220 #endif /* not supported in userspace */ 1221 } 1222 1223 /* 1224 * Helper function to set args with info on the rule after the matching 1225 * one. slot is precise, whereas we guess rule_id as they are 1226 * assigned sequentially. 1227 */ 1228 static inline void 1229 set_match(struct ip_fw_args *args, int slot, 1230 struct ip_fw_chain *chain) 1231 { 1232 args->rule.chain_id = chain->id; 1233 args->rule.slot = slot + 1; /* we use 0 as a marker */ 1234 args->rule.rule_id = 1 + chain->map[slot]->id; 1235 args->rule.rulenum = chain->map[slot]->rulenum; 1236 args->flags |= IPFW_ARGS_REF; 1237 } 1238 1239 static int 1240 jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num, 1241 int tablearg, int jump_backwards) 1242 { 1243 int f_pos, i; 1244 1245 i = IP_FW_ARG_TABLEARG(chain, num, skipto); 1246 /* make sure we do not jump backward */ 1247 if (jump_backwards == 0 && i <= f->rulenum) 1248 i = f->rulenum + 1; 1249 1250 #ifndef LINEAR_SKIPTO 1251 if (chain->idxmap != NULL) 1252 f_pos = chain->idxmap[i]; 1253 else 1254 f_pos = ipfw_find_rule(chain, i, 0); 1255 #else 1256 f_pos = chain->idxmap[i]; 1257 #endif /* LINEAR_SKIPTO */ 1258 1259 return (f_pos); 1260 } 1261 1262 1263 #ifndef LINEAR_SKIPTO 1264 /* 1265 * Helper function to enable cached rule lookups using 1266 * cache.id and cache.pos fields in ipfw rule. 1267 */ 1268 static int 1269 jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num, 1270 int tablearg, int jump_backwards) 1271 { 1272 int f_pos; 1273 1274 /* Can't use cache with IP_FW_TARG */ 1275 if (num == IP_FW_TARG) 1276 return jump_lookup_pos(chain, f, num, tablearg, jump_backwards); 1277 1278 /* 1279 * If possible use cached f_pos (in f->cache.pos), 1280 * whose version is written in f->cache.id (horrible hacks 1281 * to avoid changing the ABI). 1282 * 1283 * Multiple threads can execute the same rule simultaneously, 1284 * we need to ensure that cache.pos is updated before cache.id. 1285 */ 1286 1287 #ifdef __LP64__ 1288 struct ip_fw_jump_cache cache; 1289 1290 cache.raw_value = f->cache.raw_value; 1291 if (cache.id == chain->id) 1292 return (cache.pos); 1293 1294 f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards); 1295 1296 cache.pos = f_pos; 1297 cache.id = chain->id; 1298 f->cache.raw_value = cache.raw_value; 1299 #else 1300 if (f->cache.id == chain->id) { 1301 /* Load pos after id */ 1302 atomic_thread_fence_acq(); 1303 return (f->cache.pos); 1304 } 1305 1306 f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards); 1307 1308 f->cache.pos = f_pos; 1309 /* Store id after pos */ 1310 atomic_thread_fence_rel(); 1311 f->cache.id = chain->id; 1312 #endif /* !__LP64__ */ 1313 return (f_pos); 1314 } 1315 #endif /* !LINEAR_SKIPTO */ 1316 1317 #define TARG(k, f) IP_FW_ARG_TABLEARG(chain, k, f) 1318 /* 1319 * The main check routine for the firewall. 1320 * 1321 * All arguments are in args so we can modify them and return them 1322 * back to the caller. 1323 * 1324 * Parameters: 1325 * 1326 * args->m (in/out) The packet; we set to NULL when/if we nuke it. 1327 * Starts with the IP header. 1328 * args->L3offset Number of bytes bypassed if we came from L2. 1329 * e.g. often sizeof(eh) ** NOTYET ** 1330 * args->ifp Incoming or outgoing interface. 1331 * args->divert_rule (in/out) 1332 * Skip up to the first rule past this rule number; 1333 * upon return, non-zero port number for divert or tee. 1334 * 1335 * args->rule Pointer to the last matching rule (in/out) 1336 * args->next_hop Socket we are forwarding to (out). 1337 * args->next_hop6 IPv6 next hop we are forwarding to (out). 1338 * args->f_id Addresses grabbed from the packet (out) 1339 * args->rule.info a cookie depending on rule action 1340 * 1341 * Return value: 1342 * 1343 * IP_FW_PASS the packet must be accepted 1344 * IP_FW_DENY the packet must be dropped 1345 * IP_FW_DIVERT divert packet, port in m_tag 1346 * IP_FW_TEE tee packet, port in m_tag 1347 * IP_FW_DUMMYNET to dummynet, pipe in args->cookie 1348 * IP_FW_NETGRAPH into netgraph, cookie args->cookie 1349 * args->rule contains the matching rule, 1350 * args->rule.info has additional information. 1351 * 1352 */ 1353 int 1354 ipfw_chk(struct ip_fw_args *args) 1355 { 1356 1357 /* 1358 * Local variables holding state while processing a packet: 1359 * 1360 * IMPORTANT NOTE: to speed up the processing of rules, there 1361 * are some assumption on the values of the variables, which 1362 * are documented here. Should you change them, please check 1363 * the implementation of the various instructions to make sure 1364 * that they still work. 1365 * 1366 * m | args->m Pointer to the mbuf, as received from the caller. 1367 * It may change if ipfw_chk() does an m_pullup, or if it 1368 * consumes the packet because it calls send_reject(). 1369 * XXX This has to change, so that ipfw_chk() never modifies 1370 * or consumes the buffer. 1371 * OR 1372 * args->mem Pointer to contigous memory chunk. 1373 * ip Is the beginning of the ip(4 or 6) header. 1374 * eh Ethernet header in case if input is Layer2. 1375 */ 1376 struct mbuf *m; 1377 struct ip *ip; 1378 struct ether_header *eh; 1379 1380 /* 1381 * For rules which contain uid/gid or jail constraints, cache 1382 * a copy of the users credentials after the pcb lookup has been 1383 * executed. This will speed up the processing of rules with 1384 * these types of constraints, as well as decrease contention 1385 * on pcb related locks. 1386 */ 1387 #ifndef __FreeBSD__ 1388 struct bsd_ucred ucred_cache; 1389 #else 1390 struct ucred *ucred_cache = NULL; 1391 #endif 1392 int ucred_lookup = 0; 1393 int f_pos = 0; /* index of current rule in the array */ 1394 int retval = 0; 1395 struct ifnet *oif, *iif; 1396 1397 /* 1398 * hlen The length of the IP header. 1399 */ 1400 u_int hlen = 0; /* hlen >0 means we have an IP pkt */ 1401 1402 /* 1403 * offset The offset of a fragment. offset != 0 means that 1404 * we have a fragment at this offset of an IPv4 packet. 1405 * offset == 0 means that (if this is an IPv4 packet) 1406 * this is the first or only fragment. 1407 * For IPv6 offset|ip6f_mf == 0 means there is no Fragment Header 1408 * or there is a single packet fragment (fragment header added 1409 * without needed). We will treat a single packet fragment as if 1410 * there was no fragment header (or log/block depending on the 1411 * V_fw_permit_single_frag6 sysctl setting). 1412 */ 1413 u_short offset = 0; 1414 u_short ip6f_mf = 0; 1415 1416 /* 1417 * Local copies of addresses. They are only valid if we have 1418 * an IP packet. 1419 * 1420 * proto The protocol. Set to 0 for non-ip packets, 1421 * or to the protocol read from the packet otherwise. 1422 * proto != 0 means that we have an IPv4 packet. 1423 * 1424 * src_port, dst_port port numbers, in HOST format. Only 1425 * valid for TCP and UDP packets. 1426 * 1427 * src_ip, dst_ip ip addresses, in NETWORK format. 1428 * Only valid for IPv4 packets. 1429 */ 1430 uint8_t proto; 1431 uint16_t src_port, dst_port; /* NOTE: host format */ 1432 struct in_addr src_ip, dst_ip; /* NOTE: network format */ 1433 int iplen = 0; 1434 int pktlen; 1435 1436 struct ipfw_dyn_info dyn_info; 1437 struct ip_fw *q = NULL; 1438 struct ip_fw_chain *chain = &V_layer3_chain; 1439 1440 /* 1441 * We store in ulp a pointer to the upper layer protocol header. 1442 * In the ipv4 case this is easy to determine from the header, 1443 * but for ipv6 we might have some additional headers in the middle. 1444 * ulp is NULL if not found. 1445 */ 1446 void *ulp = NULL; /* upper layer protocol pointer. */ 1447 1448 /* XXX ipv6 variables */ 1449 int is_ipv6 = 0; 1450 #ifdef INET6 1451 uint8_t icmp6_type = 0; 1452 #endif 1453 uint16_t ext_hd = 0; /* bits vector for extension header filtering */ 1454 /* end of ipv6 variables */ 1455 1456 int is_ipv4 = 0; 1457 1458 int done = 0; /* flag to exit the outer loop */ 1459 IPFW_RLOCK_TRACKER; 1460 bool mem; 1461 1462 if ((mem = (args->flags & IPFW_ARGS_LENMASK))) { 1463 if (args->flags & IPFW_ARGS_ETHER) { 1464 eh = (struct ether_header *)args->mem; 1465 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 1466 ip = (struct ip *) 1467 ((struct ether_vlan_header *)eh + 1); 1468 else 1469 ip = (struct ip *)(eh + 1); 1470 } else { 1471 eh = NULL; 1472 ip = (struct ip *)args->mem; 1473 } 1474 pktlen = IPFW_ARGS_LENGTH(args->flags); 1475 args->f_id.fib = args->ifp->if_fib; /* best guess */ 1476 } else { 1477 m = args->m; 1478 if (m->m_flags & M_SKIP_FIREWALL || (! V_ipfw_vnet_ready)) 1479 return (IP_FW_PASS); /* accept */ 1480 if (args->flags & IPFW_ARGS_ETHER) { 1481 /* We need some amount of data to be contiguous. */ 1482 if (m->m_len < min(m->m_pkthdr.len, max_protohdr) && 1483 (args->m = m = m_pullup(m, min(m->m_pkthdr.len, 1484 max_protohdr))) == NULL) 1485 goto pullup_failed; 1486 eh = mtod(m, struct ether_header *); 1487 ip = (struct ip *)(eh + 1); 1488 } else { 1489 eh = NULL; 1490 ip = mtod(m, struct ip *); 1491 } 1492 pktlen = m->m_pkthdr.len; 1493 args->f_id.fib = M_GETFIB(m); /* mbuf not altered */ 1494 } 1495 1496 dst_ip.s_addr = 0; /* make sure it is initialized */ 1497 src_ip.s_addr = 0; /* make sure it is initialized */ 1498 src_port = dst_port = 0; 1499 1500 DYN_INFO_INIT(&dyn_info); 1501 /* 1502 * PULLUP_TO(len, p, T) makes sure that len + sizeof(T) is contiguous, 1503 * then it sets p to point at the offset "len" in the mbuf. WARNING: the 1504 * pointer might become stale after other pullups (but we never use it 1505 * this way). 1506 */ 1507 #define PULLUP_TO(_len, p, T) PULLUP_LEN(_len, p, sizeof(T)) 1508 #define EHLEN (eh != NULL ? ((char *)ip - (char *)eh) : 0) 1509 #define _PULLUP_LOCKED(_len, p, T, unlock) \ 1510 do { \ 1511 int x = (_len) + T + EHLEN; \ 1512 if (mem) { \ 1513 if (__predict_false(pktlen < x)) { \ 1514 unlock; \ 1515 goto pullup_failed; \ 1516 } \ 1517 p = (char *)args->mem + (_len) + EHLEN; \ 1518 } else { \ 1519 if (__predict_false((m)->m_len < x)) { \ 1520 args->m = m = m_pullup(m, x); \ 1521 if (m == NULL) { \ 1522 unlock; \ 1523 goto pullup_failed; \ 1524 } \ 1525 } \ 1526 p = mtod(m, char *) + (_len) + EHLEN; \ 1527 } \ 1528 } while (0) 1529 1530 #define PULLUP_LEN(_len, p, T) _PULLUP_LOCKED(_len, p, T, ) 1531 #define PULLUP_LEN_LOCKED(_len, p, T) \ 1532 _PULLUP_LOCKED(_len, p, T, IPFW_PF_RUNLOCK(chain)); \ 1533 UPDATE_POINTERS() 1534 /* 1535 * In case pointers got stale after pullups, update them. 1536 */ 1537 #define UPDATE_POINTERS() \ 1538 do { \ 1539 if (!mem) { \ 1540 if (eh != NULL) { \ 1541 eh = mtod(m, struct ether_header *); \ 1542 ip = (struct ip *)(eh + 1); \ 1543 } else \ 1544 ip = mtod(m, struct ip *); \ 1545 args->m = m; \ 1546 } \ 1547 } while (0) 1548 1549 /* Identify IP packets and fill up variables. */ 1550 if (pktlen >= sizeof(struct ip6_hdr) && 1551 (eh == NULL || eh->ether_type == htons(ETHERTYPE_IPV6)) && 1552 ip->ip_v == 6) { 1553 struct ip6_hdr *ip6 = (struct ip6_hdr *)ip; 1554 1555 is_ipv6 = 1; 1556 args->flags |= IPFW_ARGS_IP6; 1557 hlen = sizeof(struct ip6_hdr); 1558 proto = ip6->ip6_nxt; 1559 /* Search extension headers to find upper layer protocols */ 1560 while (ulp == NULL && offset == 0) { 1561 switch (proto) { 1562 case IPPROTO_ICMPV6: 1563 PULLUP_TO(hlen, ulp, struct icmp6_hdr); 1564 #ifdef INET6 1565 icmp6_type = ICMP6(ulp)->icmp6_type; 1566 #endif 1567 break; 1568 1569 case IPPROTO_TCP: 1570 PULLUP_TO(hlen, ulp, struct tcphdr); 1571 dst_port = TCP(ulp)->th_dport; 1572 src_port = TCP(ulp)->th_sport; 1573 /* save flags for dynamic rules */ 1574 args->f_id._flags = TCP(ulp)->th_flags; 1575 break; 1576 1577 case IPPROTO_SCTP: 1578 if (pktlen >= hlen + sizeof(struct sctphdr) + 1579 sizeof(struct sctp_chunkhdr) + 1580 offsetof(struct sctp_init, a_rwnd)) 1581 PULLUP_LEN(hlen, ulp, 1582 sizeof(struct sctphdr) + 1583 sizeof(struct sctp_chunkhdr) + 1584 offsetof(struct sctp_init, a_rwnd)); 1585 else if (pktlen >= hlen + sizeof(struct sctphdr)) 1586 PULLUP_LEN(hlen, ulp, pktlen - hlen); 1587 else 1588 PULLUP_LEN(hlen, ulp, 1589 sizeof(struct sctphdr)); 1590 src_port = SCTP(ulp)->src_port; 1591 dst_port = SCTP(ulp)->dest_port; 1592 break; 1593 1594 case IPPROTO_UDP: 1595 case IPPROTO_UDPLITE: 1596 PULLUP_TO(hlen, ulp, struct udphdr); 1597 dst_port = UDP(ulp)->uh_dport; 1598 src_port = UDP(ulp)->uh_sport; 1599 break; 1600 1601 case IPPROTO_HOPOPTS: /* RFC 2460 */ 1602 PULLUP_TO(hlen, ulp, struct ip6_hbh); 1603 ext_hd |= EXT_HOPOPTS; 1604 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3; 1605 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt; 1606 ulp = NULL; 1607 break; 1608 1609 case IPPROTO_ROUTING: /* RFC 2460 */ 1610 PULLUP_TO(hlen, ulp, struct ip6_rthdr); 1611 switch (((struct ip6_rthdr *)ulp)->ip6r_type) { 1612 case 0: 1613 ext_hd |= EXT_RTHDR0; 1614 break; 1615 case 2: 1616 ext_hd |= EXT_RTHDR2; 1617 break; 1618 default: 1619 if (V_fw_verbose) 1620 printf("IPFW2: IPV6 - Unknown " 1621 "Routing Header type(%d)\n", 1622 ((struct ip6_rthdr *) 1623 ulp)->ip6r_type); 1624 if (V_fw_deny_unknown_exthdrs) 1625 return (IP_FW_DENY); 1626 break; 1627 } 1628 ext_hd |= EXT_ROUTING; 1629 hlen += (((struct ip6_rthdr *)ulp)->ip6r_len + 1) << 3; 1630 proto = ((struct ip6_rthdr *)ulp)->ip6r_nxt; 1631 ulp = NULL; 1632 break; 1633 1634 case IPPROTO_FRAGMENT: /* RFC 2460 */ 1635 PULLUP_TO(hlen, ulp, struct ip6_frag); 1636 ext_hd |= EXT_FRAGMENT; 1637 hlen += sizeof (struct ip6_frag); 1638 proto = ((struct ip6_frag *)ulp)->ip6f_nxt; 1639 offset = ((struct ip6_frag *)ulp)->ip6f_offlg & 1640 IP6F_OFF_MASK; 1641 ip6f_mf = ((struct ip6_frag *)ulp)->ip6f_offlg & 1642 IP6F_MORE_FRAG; 1643 if (V_fw_permit_single_frag6 == 0 && 1644 offset == 0 && ip6f_mf == 0) { 1645 if (V_fw_verbose) 1646 printf("IPFW2: IPV6 - Invalid " 1647 "Fragment Header\n"); 1648 if (V_fw_deny_unknown_exthdrs) 1649 return (IP_FW_DENY); 1650 break; 1651 } 1652 args->f_id.extra = 1653 ntohl(((struct ip6_frag *)ulp)->ip6f_ident); 1654 ulp = NULL; 1655 break; 1656 1657 case IPPROTO_DSTOPTS: /* RFC 2460 */ 1658 PULLUP_TO(hlen, ulp, struct ip6_hbh); 1659 ext_hd |= EXT_DSTOPTS; 1660 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3; 1661 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt; 1662 ulp = NULL; 1663 break; 1664 1665 case IPPROTO_AH: /* RFC 2402 */ 1666 PULLUP_TO(hlen, ulp, struct ip6_ext); 1667 ext_hd |= EXT_AH; 1668 hlen += (((struct ip6_ext *)ulp)->ip6e_len + 2) << 2; 1669 proto = ((struct ip6_ext *)ulp)->ip6e_nxt; 1670 ulp = NULL; 1671 break; 1672 1673 case IPPROTO_ESP: /* RFC 2406 */ 1674 PULLUP_TO(hlen, ulp, uint32_t); /* SPI, Seq# */ 1675 /* Anything past Seq# is variable length and 1676 * data past this ext. header is encrypted. */ 1677 ext_hd |= EXT_ESP; 1678 break; 1679 1680 case IPPROTO_NONE: /* RFC 2460 */ 1681 /* 1682 * Packet ends here, and IPv6 header has 1683 * already been pulled up. If ip6e_len!=0 1684 * then octets must be ignored. 1685 */ 1686 ulp = ip; /* non-NULL to get out of loop. */ 1687 break; 1688 1689 case IPPROTO_OSPFIGP: 1690 /* XXX OSPF header check? */ 1691 PULLUP_TO(hlen, ulp, struct ip6_ext); 1692 break; 1693 1694 case IPPROTO_PIM: 1695 /* XXX PIM header check? */ 1696 PULLUP_TO(hlen, ulp, struct pim); 1697 break; 1698 1699 case IPPROTO_GRE: /* RFC 1701 */ 1700 /* XXX GRE header check? */ 1701 PULLUP_TO(hlen, ulp, struct grehdr); 1702 break; 1703 1704 case IPPROTO_CARP: 1705 PULLUP_TO(hlen, ulp, offsetof( 1706 struct carp_header, carp_counter)); 1707 if (CARP_ADVERTISEMENT != 1708 ((struct carp_header *)ulp)->carp_type) 1709 return (IP_FW_DENY); 1710 break; 1711 1712 case IPPROTO_IPV6: /* RFC 2893 */ 1713 PULLUP_TO(hlen, ulp, struct ip6_hdr); 1714 break; 1715 1716 case IPPROTO_IPV4: /* RFC 2893 */ 1717 PULLUP_TO(hlen, ulp, struct ip); 1718 break; 1719 1720 default: 1721 if (V_fw_verbose) 1722 printf("IPFW2: IPV6 - Unknown " 1723 "Extension Header(%d), ext_hd=%x\n", 1724 proto, ext_hd); 1725 if (V_fw_deny_unknown_exthdrs) 1726 return (IP_FW_DENY); 1727 PULLUP_TO(hlen, ulp, struct ip6_ext); 1728 break; 1729 } /*switch */ 1730 } 1731 UPDATE_POINTERS(); 1732 ip6 = (struct ip6_hdr *)ip; 1733 args->f_id.addr_type = 6; 1734 args->f_id.src_ip6 = ip6->ip6_src; 1735 args->f_id.dst_ip6 = ip6->ip6_dst; 1736 args->f_id.flow_id6 = ntohl(ip6->ip6_flow); 1737 iplen = ntohs(ip6->ip6_plen) + sizeof(*ip6); 1738 } else if (pktlen >= sizeof(struct ip) && 1739 (eh == NULL || eh->ether_type == htons(ETHERTYPE_IP)) && 1740 ip->ip_v == 4) { 1741 is_ipv4 = 1; 1742 args->flags |= IPFW_ARGS_IP4; 1743 hlen = ip->ip_hl << 2; 1744 /* 1745 * Collect parameters into local variables for faster 1746 * matching. 1747 */ 1748 proto = ip->ip_p; 1749 src_ip = ip->ip_src; 1750 dst_ip = ip->ip_dst; 1751 offset = ntohs(ip->ip_off) & IP_OFFMASK; 1752 iplen = ntohs(ip->ip_len); 1753 1754 if (offset == 0) { 1755 switch (proto) { 1756 case IPPROTO_TCP: 1757 PULLUP_TO(hlen, ulp, struct tcphdr); 1758 dst_port = TCP(ulp)->th_dport; 1759 src_port = TCP(ulp)->th_sport; 1760 /* save flags for dynamic rules */ 1761 args->f_id._flags = TCP(ulp)->th_flags; 1762 break; 1763 1764 case IPPROTO_SCTP: 1765 if (pktlen >= hlen + sizeof(struct sctphdr) + 1766 sizeof(struct sctp_chunkhdr) + 1767 offsetof(struct sctp_init, a_rwnd)) 1768 PULLUP_LEN(hlen, ulp, 1769 sizeof(struct sctphdr) + 1770 sizeof(struct sctp_chunkhdr) + 1771 offsetof(struct sctp_init, a_rwnd)); 1772 else if (pktlen >= hlen + sizeof(struct sctphdr)) 1773 PULLUP_LEN(hlen, ulp, pktlen - hlen); 1774 else 1775 PULLUP_LEN(hlen, ulp, 1776 sizeof(struct sctphdr)); 1777 src_port = SCTP(ulp)->src_port; 1778 dst_port = SCTP(ulp)->dest_port; 1779 break; 1780 1781 case IPPROTO_UDP: 1782 case IPPROTO_UDPLITE: 1783 PULLUP_TO(hlen, ulp, struct udphdr); 1784 dst_port = UDP(ulp)->uh_dport; 1785 src_port = UDP(ulp)->uh_sport; 1786 break; 1787 1788 case IPPROTO_ICMP: 1789 PULLUP_TO(hlen, ulp, struct icmphdr); 1790 //args->f_id.flags = ICMP(ulp)->icmp_type; 1791 break; 1792 1793 default: 1794 break; 1795 } 1796 } else { 1797 if (offset == 1 && proto == IPPROTO_TCP) { 1798 /* RFC 3128 */ 1799 goto pullup_failed; 1800 } 1801 } 1802 1803 UPDATE_POINTERS(); 1804 args->f_id.addr_type = 4; 1805 args->f_id.src_ip = ntohl(src_ip.s_addr); 1806 args->f_id.dst_ip = ntohl(dst_ip.s_addr); 1807 } else { 1808 proto = 0; 1809 dst_ip.s_addr = src_ip.s_addr = 0; 1810 1811 args->f_id.addr_type = 1; /* XXX */ 1812 } 1813 #undef PULLUP_TO 1814 pktlen = iplen < pktlen ? iplen: pktlen; 1815 1816 /* Properly initialize the rest of f_id */ 1817 args->f_id.proto = proto; 1818 args->f_id.src_port = src_port = ntohs(src_port); 1819 args->f_id.dst_port = dst_port = ntohs(dst_port); 1820 1821 IPFW_PF_RLOCK(chain); 1822 if (! V_ipfw_vnet_ready) { /* shutting down, leave NOW. */ 1823 IPFW_PF_RUNLOCK(chain); 1824 return (IP_FW_PASS); /* accept */ 1825 } 1826 if (args->flags & IPFW_ARGS_REF) { 1827 /* 1828 * Packet has already been tagged as a result of a previous 1829 * match on rule args->rule aka args->rule_id (PIPE, QUEUE, 1830 * REASS, NETGRAPH, DIVERT/TEE...) 1831 * Validate the slot and continue from the next one 1832 * if still present, otherwise do a lookup. 1833 */ 1834 f_pos = (args->rule.chain_id == chain->id) ? 1835 args->rule.slot : 1836 ipfw_find_rule(chain, args->rule.rulenum, 1837 args->rule.rule_id); 1838 } else { 1839 f_pos = 0; 1840 } 1841 1842 if (args->flags & IPFW_ARGS_IN) { 1843 iif = args->ifp; 1844 oif = NULL; 1845 } else { 1846 MPASS(args->flags & IPFW_ARGS_OUT); 1847 iif = mem ? NULL : m_rcvif(m); 1848 oif = args->ifp; 1849 } 1850 1851 /* 1852 * Now scan the rules, and parse microinstructions for each rule. 1853 * We have two nested loops and an inner switch. Sometimes we 1854 * need to break out of one or both loops, or re-enter one of 1855 * the loops with updated variables. Loop variables are: 1856 * 1857 * f_pos (outer loop) points to the current rule. 1858 * On output it points to the matching rule. 1859 * done (outer loop) is used as a flag to break the loop. 1860 * l (inner loop) residual length of current rule. 1861 * cmd points to the current microinstruction. 1862 * 1863 * We break the inner loop by setting l=0 and possibly 1864 * cmdlen=0 if we don't want to advance cmd. 1865 * We break the outer loop by setting done=1 1866 * We can restart the inner loop by setting l>0 and f_pos, f, cmd 1867 * as needed. 1868 */ 1869 for (; f_pos < chain->n_rules; f_pos++) { 1870 ipfw_insn *cmd; 1871 uint32_t tablearg = 0; 1872 int l, cmdlen, skip_or; /* skip rest of OR block */ 1873 struct ip_fw *f; 1874 1875 f = chain->map[f_pos]; 1876 if (V_set_disable & (1 << f->set) ) 1877 continue; 1878 1879 skip_or = 0; 1880 for (l = f->cmd_len, cmd = f->cmd ; l > 0 ; 1881 l -= cmdlen, cmd += cmdlen) { 1882 int match; 1883 1884 /* 1885 * check_body is a jump target used when we find a 1886 * CHECK_STATE, and need to jump to the body of 1887 * the target rule. 1888 */ 1889 1890 /* check_body: */ 1891 cmdlen = F_LEN(cmd); 1892 /* 1893 * An OR block (insn_1 || .. || insn_n) has the 1894 * F_OR bit set in all but the last instruction. 1895 * The first match will set "skip_or", and cause 1896 * the following instructions to be skipped until 1897 * past the one with the F_OR bit clear. 1898 */ 1899 if (skip_or) { /* skip this instruction */ 1900 if ((cmd->len & F_OR) == 0) 1901 skip_or = 0; /* next one is good */ 1902 continue; 1903 } 1904 match = 0; /* set to 1 if we succeed */ 1905 1906 switch (cmd->opcode) { 1907 /* 1908 * The first set of opcodes compares the packet's 1909 * fields with some pattern, setting 'match' if a 1910 * match is found. At the end of the loop there is 1911 * logic to deal with F_NOT and F_OR flags associated 1912 * with the opcode. 1913 */ 1914 case O_NOP: 1915 match = 1; 1916 break; 1917 1918 case O_FORWARD_MAC: 1919 printf("ipfw: opcode %d unimplemented\n", 1920 cmd->opcode); 1921 break; 1922 1923 case O_GID: 1924 case O_UID: 1925 case O_JAIL: 1926 /* 1927 * We only check offset == 0 && proto != 0, 1928 * as this ensures that we have a 1929 * packet with the ports info. 1930 */ 1931 if (offset != 0) 1932 break; 1933 if (proto == IPPROTO_TCP || 1934 proto == IPPROTO_UDP || 1935 proto == IPPROTO_UDPLITE) 1936 match = check_uidgid( 1937 (ipfw_insn_u32 *)cmd, 1938 args, &ucred_lookup, 1939 #ifdef __FreeBSD__ 1940 &ucred_cache); 1941 #else 1942 (void *)&ucred_cache); 1943 #endif 1944 break; 1945 1946 case O_RECV: 1947 match = iface_match(iif, (ipfw_insn_if *)cmd, 1948 chain, &tablearg); 1949 break; 1950 1951 case O_XMIT: 1952 match = iface_match(oif, (ipfw_insn_if *)cmd, 1953 chain, &tablearg); 1954 break; 1955 1956 case O_VIA: 1957 match = iface_match(args->ifp, 1958 (ipfw_insn_if *)cmd, chain, &tablearg); 1959 break; 1960 1961 case O_MACADDR2: 1962 if (args->flags & IPFW_ARGS_ETHER) { 1963 u_int32_t *want = (u_int32_t *) 1964 ((ipfw_insn_mac *)cmd)->addr; 1965 u_int32_t *mask = (u_int32_t *) 1966 ((ipfw_insn_mac *)cmd)->mask; 1967 u_int32_t *hdr = (u_int32_t *)eh; 1968 1969 match = 1970 ( want[0] == (hdr[0] & mask[0]) && 1971 want[1] == (hdr[1] & mask[1]) && 1972 want[2] == (hdr[2] & mask[2]) ); 1973 } 1974 break; 1975 1976 case O_MAC_TYPE: 1977 if (args->flags & IPFW_ARGS_ETHER) { 1978 u_int16_t *p = 1979 ((ipfw_insn_u16 *)cmd)->ports; 1980 int i; 1981 1982 for (i = cmdlen - 1; !match && i>0; 1983 i--, p += 2) 1984 match = 1985 (ntohs(eh->ether_type) >= 1986 p[0] && 1987 ntohs(eh->ether_type) <= 1988 p[1]); 1989 } 1990 break; 1991 1992 case O_FRAG: 1993 if (is_ipv4) { 1994 /* 1995 * Since flags_match() works with 1996 * uint8_t we pack ip_off into 8 bits. 1997 * For this match offset is a boolean. 1998 */ 1999 match = flags_match(cmd, 2000 ((ntohs(ip->ip_off) & ~IP_OFFMASK) 2001 >> 8) | (offset != 0)); 2002 } else { 2003 /* 2004 * Compatiblity: historically bare 2005 * "frag" would match IPv6 fragments. 2006 */ 2007 match = (cmd->arg1 == 0x1 && 2008 (offset != 0)); 2009 } 2010 break; 2011 2012 case O_IN: /* "out" is "not in" */ 2013 match = (oif == NULL); 2014 break; 2015 2016 case O_LAYER2: 2017 match = (args->flags & IPFW_ARGS_ETHER); 2018 break; 2019 2020 case O_DIVERTED: 2021 if ((args->flags & IPFW_ARGS_REF) == 0) 2022 break; 2023 /* 2024 * For diverted packets, args->rule.info 2025 * contains the divert port (in host format) 2026 * reason and direction. 2027 */ 2028 match = ((args->rule.info & IPFW_IS_MASK) == 2029 IPFW_IS_DIVERT) && ( 2030 ((args->rule.info & IPFW_INFO_IN) ? 2031 1: 2) & cmd->arg1); 2032 break; 2033 2034 case O_PROTO: 2035 /* 2036 * We do not allow an arg of 0 so the 2037 * check of "proto" only suffices. 2038 */ 2039 match = (proto == cmd->arg1); 2040 break; 2041 2042 case O_IP_SRC: 2043 match = is_ipv4 && 2044 (((ipfw_insn_ip *)cmd)->addr.s_addr == 2045 src_ip.s_addr); 2046 break; 2047 2048 case O_IP_DST_LOOKUP: 2049 { 2050 if (cmdlen > F_INSN_SIZE(ipfw_insn_u32)) { 2051 void *pkey; 2052 uint32_t vidx, key; 2053 uint16_t keylen = 0; /* zero if can't match the packet */ 2054 2055 /* Determine lookup key type */ 2056 vidx = ((ipfw_insn_u32 *)cmd)->d[1]; 2057 switch (vidx) { 2058 case LOOKUP_DST_IP: 2059 case LOOKUP_SRC_IP: 2060 /* Need IP frame */ 2061 if (is_ipv6 == 0 && is_ipv4 == 0) 2062 break; 2063 if (vidx == LOOKUP_DST_IP) 2064 pkey = is_ipv6 ? 2065 (void *)&args->f_id.dst_ip6: 2066 (void *)&dst_ip; 2067 else 2068 pkey = is_ipv6 ? 2069 (void *)&args->f_id.src_ip6: 2070 (void *)&src_ip; 2071 keylen = is_ipv6 ? 2072 sizeof(struct in6_addr): 2073 sizeof(in_addr_t); 2074 break; 2075 case LOOKUP_DST_PORT: 2076 case LOOKUP_SRC_PORT: 2077 /* Need IP frame */ 2078 if (is_ipv6 == 0 && is_ipv4 == 0) 2079 break; 2080 /* Skip fragments */ 2081 if (offset != 0) 2082 break; 2083 /* Skip proto without ports */ 2084 if (proto != IPPROTO_TCP && 2085 proto != IPPROTO_UDP && 2086 proto != IPPROTO_UDPLITE && 2087 proto != IPPROTO_SCTP) 2088 break; 2089 key = vidx == LOOKUP_DST_PORT ? 2090 dst_port: 2091 src_port; 2092 pkey = &key; 2093 keylen = sizeof(key); 2094 break; 2095 case LOOKUP_UID: 2096 case LOOKUP_JAIL: 2097 check_uidgid( 2098 (ipfw_insn_u32 *)cmd, 2099 args, &ucred_lookup, 2100 &ucred_cache); 2101 key = vidx == LOOKUP_UID ? 2102 ucred_cache->cr_uid: 2103 ucred_cache->cr_prison->pr_id; 2104 pkey = &key; 2105 keylen = sizeof(key); 2106 break; 2107 case LOOKUP_DSCP: 2108 /* Need IP frame */ 2109 if (is_ipv6 == 0 && is_ipv4 == 0) 2110 break; 2111 if (is_ipv6) 2112 key = IPV6_DSCP( 2113 (struct ip6_hdr *)ip) >> 2; 2114 else 2115 key = ip->ip_tos >> 2; 2116 pkey = &key; 2117 keylen = sizeof(key); 2118 break; 2119 case LOOKUP_DST_MAC: 2120 case LOOKUP_SRC_MAC: 2121 /* Need ether frame */ 2122 if ((args->flags & IPFW_ARGS_ETHER) == 0) 2123 break; 2124 pkey = vidx == LOOKUP_DST_MAC ? 2125 eh->ether_dhost: 2126 eh->ether_shost; 2127 keylen = ETHER_ADDR_LEN; 2128 break; 2129 } 2130 if (keylen == 0) 2131 break; 2132 match = ipfw_lookup_table(chain, 2133 cmd->arg1, keylen, pkey, &vidx); 2134 if (!match) 2135 break; 2136 tablearg = vidx; 2137 break; 2138 } 2139 /* cmdlen =< F_INSN_SIZE(ipfw_insn_u32) */ 2140 /* FALLTHROUGH */ 2141 } 2142 case O_IP_SRC_LOOKUP: 2143 { 2144 void *pkey; 2145 uint32_t vidx; 2146 uint16_t keylen; 2147 2148 if (is_ipv4) { 2149 keylen = sizeof(in_addr_t); 2150 if (cmd->opcode == O_IP_DST_LOOKUP) 2151 pkey = &dst_ip; 2152 else 2153 pkey = &src_ip; 2154 } else if (is_ipv6) { 2155 keylen = sizeof(struct in6_addr); 2156 if (cmd->opcode == O_IP_DST_LOOKUP) 2157 pkey = &args->f_id.dst_ip6; 2158 else 2159 pkey = &args->f_id.src_ip6; 2160 } else 2161 break; 2162 match = ipfw_lookup_table(chain, cmd->arg1, 2163 keylen, pkey, &vidx); 2164 if (!match) 2165 break; 2166 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) { 2167 match = ((ipfw_insn_u32 *)cmd)->d[0] == 2168 TARG_VAL(chain, vidx, tag); 2169 if (!match) 2170 break; 2171 } 2172 tablearg = vidx; 2173 break; 2174 } 2175 2176 case O_MAC_SRC_LOOKUP: 2177 case O_MAC_DST_LOOKUP: 2178 { 2179 void *pkey; 2180 uint32_t vidx; 2181 uint16_t keylen = ETHER_ADDR_LEN; 2182 2183 /* Need ether frame */ 2184 if ((args->flags & IPFW_ARGS_ETHER) == 0) 2185 break; 2186 2187 if (cmd->opcode == O_MAC_DST_LOOKUP) 2188 pkey = eh->ether_dhost; 2189 else 2190 pkey = eh->ether_shost; 2191 2192 match = ipfw_lookup_table(chain, cmd->arg1, 2193 keylen, pkey, &vidx); 2194 if (!match) 2195 break; 2196 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) { 2197 match = ((ipfw_insn_u32 *)cmd)->d[0] == 2198 TARG_VAL(chain, vidx, tag); 2199 if (!match) 2200 break; 2201 } 2202 tablearg = vidx; 2203 break; 2204 } 2205 2206 case O_IP_FLOW_LOOKUP: 2207 { 2208 uint32_t v = 0; 2209 match = ipfw_lookup_table(chain, 2210 cmd->arg1, 0, &args->f_id, &v); 2211 if (!match) 2212 break; 2213 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) 2214 match = ((ipfw_insn_u32 *)cmd)->d[0] == 2215 TARG_VAL(chain, v, tag); 2216 if (match) 2217 tablearg = v; 2218 } 2219 break; 2220 case O_IP_SRC_MASK: 2221 case O_IP_DST_MASK: 2222 if (is_ipv4) { 2223 uint32_t a = 2224 (cmd->opcode == O_IP_DST_MASK) ? 2225 dst_ip.s_addr : src_ip.s_addr; 2226 uint32_t *p = ((ipfw_insn_u32 *)cmd)->d; 2227 int i = cmdlen-1; 2228 2229 for (; !match && i>0; i-= 2, p+= 2) 2230 match = (p[0] == (a & p[1])); 2231 } 2232 break; 2233 2234 case O_IP_SRC_ME: 2235 if (is_ipv4) { 2236 match = in_localip(src_ip); 2237 break; 2238 } 2239 #ifdef INET6 2240 /* FALLTHROUGH */ 2241 case O_IP6_SRC_ME: 2242 match = is_ipv6 && 2243 ipfw_localip6(&args->f_id.src_ip6); 2244 #endif 2245 break; 2246 2247 case O_IP_DST_SET: 2248 case O_IP_SRC_SET: 2249 if (is_ipv4) { 2250 u_int32_t *d = (u_int32_t *)(cmd+1); 2251 u_int32_t addr = 2252 cmd->opcode == O_IP_DST_SET ? 2253 args->f_id.dst_ip : 2254 args->f_id.src_ip; 2255 2256 if (addr < d[0]) 2257 break; 2258 addr -= d[0]; /* subtract base */ 2259 match = (addr < cmd->arg1) && 2260 ( d[ 1 + (addr>>5)] & 2261 (1<<(addr & 0x1f)) ); 2262 } 2263 break; 2264 2265 case O_IP_DST: 2266 match = is_ipv4 && 2267 (((ipfw_insn_ip *)cmd)->addr.s_addr == 2268 dst_ip.s_addr); 2269 break; 2270 2271 case O_IP_DST_ME: 2272 if (is_ipv4) { 2273 match = in_localip(dst_ip); 2274 break; 2275 } 2276 #ifdef INET6 2277 /* FALLTHROUGH */ 2278 case O_IP6_DST_ME: 2279 match = is_ipv6 && 2280 ipfw_localip6(&args->f_id.dst_ip6); 2281 #endif 2282 break; 2283 2284 case O_IP_SRCPORT: 2285 case O_IP_DSTPORT: 2286 /* 2287 * offset == 0 && proto != 0 is enough 2288 * to guarantee that we have a 2289 * packet with port info. 2290 */ 2291 if ((proto == IPPROTO_UDP || 2292 proto == IPPROTO_UDPLITE || 2293 proto == IPPROTO_TCP || 2294 proto == IPPROTO_SCTP) && offset == 0) { 2295 u_int16_t x = 2296 (cmd->opcode == O_IP_SRCPORT) ? 2297 src_port : dst_port ; 2298 u_int16_t *p = 2299 ((ipfw_insn_u16 *)cmd)->ports; 2300 int i; 2301 2302 for (i = cmdlen - 1; !match && i>0; 2303 i--, p += 2) 2304 match = (x>=p[0] && x<=p[1]); 2305 } 2306 break; 2307 2308 case O_ICMPTYPE: 2309 match = (offset == 0 && proto==IPPROTO_ICMP && 2310 icmptype_match(ICMP(ulp), (ipfw_insn_u32 *)cmd) ); 2311 break; 2312 2313 #ifdef INET6 2314 case O_ICMP6TYPE: 2315 match = is_ipv6 && offset == 0 && 2316 proto==IPPROTO_ICMPV6 && 2317 icmp6type_match( 2318 ICMP6(ulp)->icmp6_type, 2319 (ipfw_insn_u32 *)cmd); 2320 break; 2321 #endif /* INET6 */ 2322 2323 case O_IPOPT: 2324 match = (is_ipv4 && 2325 ipopts_match(ip, cmd) ); 2326 break; 2327 2328 case O_IPVER: 2329 match = ((is_ipv4 || is_ipv6) && 2330 cmd->arg1 == ip->ip_v); 2331 break; 2332 2333 case O_IPID: 2334 case O_IPTTL: 2335 if (!is_ipv4) 2336 break; 2337 case O_IPLEN: 2338 { /* only for IP packets */ 2339 uint16_t x; 2340 uint16_t *p; 2341 int i; 2342 2343 if (cmd->opcode == O_IPLEN) 2344 x = iplen; 2345 else if (cmd->opcode == O_IPTTL) 2346 x = ip->ip_ttl; 2347 else /* must be IPID */ 2348 x = ntohs(ip->ip_id); 2349 if (cmdlen == 1) { 2350 match = (cmd->arg1 == x); 2351 break; 2352 } 2353 /* otherwise we have ranges */ 2354 p = ((ipfw_insn_u16 *)cmd)->ports; 2355 i = cmdlen - 1; 2356 for (; !match && i>0; i--, p += 2) 2357 match = (x >= p[0] && x <= p[1]); 2358 } 2359 break; 2360 2361 case O_IPPRECEDENCE: 2362 match = (is_ipv4 && 2363 (cmd->arg1 == (ip->ip_tos & 0xe0)) ); 2364 break; 2365 2366 case O_IPTOS: 2367 match = (is_ipv4 && 2368 flags_match(cmd, ip->ip_tos)); 2369 break; 2370 2371 case O_DSCP: 2372 { 2373 uint32_t *p; 2374 uint16_t x; 2375 2376 p = ((ipfw_insn_u32 *)cmd)->d; 2377 2378 if (is_ipv4) 2379 x = ip->ip_tos >> 2; 2380 else if (is_ipv6) { 2381 x = IPV6_DSCP( 2382 (struct ip6_hdr *)ip) >> 2; 2383 x &= 0x3f; 2384 } else 2385 break; 2386 2387 /* DSCP bitmask is stored as low_u32 high_u32 */ 2388 if (x >= 32) 2389 match = *(p + 1) & (1 << (x - 32)); 2390 else 2391 match = *p & (1 << x); 2392 } 2393 break; 2394 2395 case O_TCPDATALEN: 2396 if (proto == IPPROTO_TCP && offset == 0) { 2397 struct tcphdr *tcp; 2398 uint16_t x; 2399 uint16_t *p; 2400 int i; 2401 #ifdef INET6 2402 if (is_ipv6) { 2403 struct ip6_hdr *ip6; 2404 2405 ip6 = (struct ip6_hdr *)ip; 2406 if (ip6->ip6_plen == 0) { 2407 /* 2408 * Jumbo payload is not 2409 * supported by this 2410 * opcode. 2411 */ 2412 break; 2413 } 2414 x = iplen - hlen; 2415 } else 2416 #endif /* INET6 */ 2417 x = iplen - (ip->ip_hl << 2); 2418 tcp = TCP(ulp); 2419 x -= tcp->th_off << 2; 2420 if (cmdlen == 1) { 2421 match = (cmd->arg1 == x); 2422 break; 2423 } 2424 /* otherwise we have ranges */ 2425 p = ((ipfw_insn_u16 *)cmd)->ports; 2426 i = cmdlen - 1; 2427 for (; !match && i>0; i--, p += 2) 2428 match = (x >= p[0] && x <= p[1]); 2429 } 2430 break; 2431 2432 case O_TCPFLAGS: 2433 match = (proto == IPPROTO_TCP && offset == 0 && 2434 flags_match(cmd, TCP(ulp)->th_flags)); 2435 break; 2436 2437 case O_TCPOPTS: 2438 if (proto == IPPROTO_TCP && offset == 0 && ulp){ 2439 PULLUP_LEN_LOCKED(hlen, ulp, 2440 (TCP(ulp)->th_off << 2)); 2441 match = tcpopts_match(TCP(ulp), cmd); 2442 } 2443 break; 2444 2445 case O_TCPSEQ: 2446 match = (proto == IPPROTO_TCP && offset == 0 && 2447 ((ipfw_insn_u32 *)cmd)->d[0] == 2448 TCP(ulp)->th_seq); 2449 break; 2450 2451 case O_TCPACK: 2452 match = (proto == IPPROTO_TCP && offset == 0 && 2453 ((ipfw_insn_u32 *)cmd)->d[0] == 2454 TCP(ulp)->th_ack); 2455 break; 2456 2457 case O_TCPMSS: 2458 if (proto == IPPROTO_TCP && 2459 (args->f_id._flags & TH_SYN) != 0 && 2460 ulp != NULL) { 2461 uint16_t mss, *p; 2462 int i; 2463 2464 PULLUP_LEN_LOCKED(hlen, ulp, 2465 (TCP(ulp)->th_off << 2)); 2466 if ((tcpopts_parse(TCP(ulp), &mss) & 2467 IP_FW_TCPOPT_MSS) == 0) 2468 break; 2469 if (cmdlen == 1) { 2470 match = (cmd->arg1 == mss); 2471 break; 2472 } 2473 /* Otherwise we have ranges. */ 2474 p = ((ipfw_insn_u16 *)cmd)->ports; 2475 i = cmdlen - 1; 2476 for (; !match && i > 0; i--, p += 2) 2477 match = (mss >= p[0] && 2478 mss <= p[1]); 2479 } 2480 break; 2481 2482 case O_TCPWIN: 2483 if (proto == IPPROTO_TCP && offset == 0) { 2484 uint16_t x; 2485 uint16_t *p; 2486 int i; 2487 2488 x = ntohs(TCP(ulp)->th_win); 2489 if (cmdlen == 1) { 2490 match = (cmd->arg1 == x); 2491 break; 2492 } 2493 /* Otherwise we have ranges. */ 2494 p = ((ipfw_insn_u16 *)cmd)->ports; 2495 i = cmdlen - 1; 2496 for (; !match && i > 0; i--, p += 2) 2497 match = (x >= p[0] && x <= p[1]); 2498 } 2499 break; 2500 2501 case O_ESTAB: 2502 /* reject packets which have SYN only */ 2503 /* XXX should i also check for TH_ACK ? */ 2504 match = (proto == IPPROTO_TCP && offset == 0 && 2505 (TCP(ulp)->th_flags & 2506 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN); 2507 break; 2508 2509 case O_ALTQ: { 2510 struct pf_mtag *at; 2511 struct m_tag *mtag; 2512 ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd; 2513 2514 /* 2515 * ALTQ uses mbuf tags from another 2516 * packet filtering system - pf(4). 2517 * We allocate a tag in its format 2518 * and fill it in, pretending to be pf(4). 2519 */ 2520 match = 1; 2521 at = pf_find_mtag(m); 2522 if (at != NULL && at->qid != 0) 2523 break; 2524 mtag = m_tag_get(PACKET_TAG_PF, 2525 sizeof(struct pf_mtag), M_NOWAIT | M_ZERO); 2526 if (mtag == NULL) { 2527 /* 2528 * Let the packet fall back to the 2529 * default ALTQ. 2530 */ 2531 break; 2532 } 2533 m_tag_prepend(m, mtag); 2534 at = (struct pf_mtag *)(mtag + 1); 2535 at->qid = altq->qid; 2536 at->hdr = ip; 2537 break; 2538 } 2539 2540 case O_LOG: 2541 ipfw_log(chain, f, hlen, args, 2542 offset | ip6f_mf, tablearg, ip); 2543 match = 1; 2544 break; 2545 2546 case O_PROB: 2547 match = (random()<((ipfw_insn_u32 *)cmd)->d[0]); 2548 break; 2549 2550 case O_VERREVPATH: 2551 /* Outgoing packets automatically pass/match */ 2552 match = (args->flags & IPFW_ARGS_OUT || 2553 ( 2554 #ifdef INET6 2555 is_ipv6 ? 2556 verify_path6(&(args->f_id.src_ip6), 2557 iif, args->f_id.fib) : 2558 #endif 2559 verify_path(src_ip, iif, args->f_id.fib))); 2560 break; 2561 2562 case O_VERSRCREACH: 2563 /* Outgoing packets automatically pass/match */ 2564 match = (hlen > 0 && ((oif != NULL) || ( 2565 #ifdef INET6 2566 is_ipv6 ? 2567 verify_path6(&(args->f_id.src_ip6), 2568 NULL, args->f_id.fib) : 2569 #endif 2570 verify_path(src_ip, NULL, args->f_id.fib)))); 2571 break; 2572 2573 case O_ANTISPOOF: 2574 /* Outgoing packets automatically pass/match */ 2575 if (oif == NULL && hlen > 0 && 2576 ( (is_ipv4 && in_localaddr(src_ip)) 2577 #ifdef INET6 2578 || (is_ipv6 && 2579 in6_localaddr(&(args->f_id.src_ip6))) 2580 #endif 2581 )) 2582 match = 2583 #ifdef INET6 2584 is_ipv6 ? verify_path6( 2585 &(args->f_id.src_ip6), iif, 2586 args->f_id.fib) : 2587 #endif 2588 verify_path(src_ip, iif, 2589 args->f_id.fib); 2590 else 2591 match = 1; 2592 break; 2593 2594 case O_IPSEC: 2595 match = (m_tag_find(m, 2596 PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL); 2597 /* otherwise no match */ 2598 break; 2599 2600 #ifdef INET6 2601 case O_IP6_SRC: 2602 match = is_ipv6 && 2603 IN6_ARE_ADDR_EQUAL(&args->f_id.src_ip6, 2604 &((ipfw_insn_ip6 *)cmd)->addr6); 2605 break; 2606 2607 case O_IP6_DST: 2608 match = is_ipv6 && 2609 IN6_ARE_ADDR_EQUAL(&args->f_id.dst_ip6, 2610 &((ipfw_insn_ip6 *)cmd)->addr6); 2611 break; 2612 case O_IP6_SRC_MASK: 2613 case O_IP6_DST_MASK: 2614 if (is_ipv6) { 2615 int i = cmdlen - 1; 2616 struct in6_addr p; 2617 struct in6_addr *d = 2618 &((ipfw_insn_ip6 *)cmd)->addr6; 2619 2620 for (; !match && i > 0; d += 2, 2621 i -= F_INSN_SIZE(struct in6_addr) 2622 * 2) { 2623 p = (cmd->opcode == 2624 O_IP6_SRC_MASK) ? 2625 args->f_id.src_ip6: 2626 args->f_id.dst_ip6; 2627 APPLY_MASK(&p, &d[1]); 2628 match = 2629 IN6_ARE_ADDR_EQUAL(&d[0], 2630 &p); 2631 } 2632 } 2633 break; 2634 2635 case O_FLOW6ID: 2636 match = is_ipv6 && 2637 flow6id_match(args->f_id.flow_id6, 2638 (ipfw_insn_u32 *) cmd); 2639 break; 2640 2641 case O_EXT_HDR: 2642 match = is_ipv6 && 2643 (ext_hd & ((ipfw_insn *) cmd)->arg1); 2644 break; 2645 2646 case O_IP6: 2647 match = is_ipv6; 2648 break; 2649 #endif 2650 2651 case O_IP4: 2652 match = is_ipv4; 2653 break; 2654 2655 case O_TAG: { 2656 struct m_tag *mtag; 2657 uint32_t tag = TARG(cmd->arg1, tag); 2658 2659 /* Packet is already tagged with this tag? */ 2660 mtag = m_tag_locate(m, MTAG_IPFW, tag, NULL); 2661 2662 /* We have `untag' action when F_NOT flag is 2663 * present. And we must remove this mtag from 2664 * mbuf and reset `match' to zero (`match' will 2665 * be inversed later). 2666 * Otherwise we should allocate new mtag and 2667 * push it into mbuf. 2668 */ 2669 if (cmd->len & F_NOT) { /* `untag' action */ 2670 if (mtag != NULL) 2671 m_tag_delete(m, mtag); 2672 match = 0; 2673 } else { 2674 if (mtag == NULL) { 2675 mtag = m_tag_alloc( MTAG_IPFW, 2676 tag, 0, M_NOWAIT); 2677 if (mtag != NULL) 2678 m_tag_prepend(m, mtag); 2679 } 2680 match = 1; 2681 } 2682 break; 2683 } 2684 2685 case O_FIB: /* try match the specified fib */ 2686 if (args->f_id.fib == cmd->arg1) 2687 match = 1; 2688 break; 2689 2690 case O_SOCKARG: { 2691 #ifndef USERSPACE /* not supported in userspace */ 2692 struct inpcb *inp = args->inp; 2693 struct inpcbinfo *pi; 2694 bool inp_locked = false; 2695 2696 if (proto == IPPROTO_TCP) 2697 pi = &V_tcbinfo; 2698 else if (proto == IPPROTO_UDP) 2699 pi = &V_udbinfo; 2700 else if (proto == IPPROTO_UDPLITE) 2701 pi = &V_ulitecbinfo; 2702 else 2703 break; 2704 2705 /* 2706 * XXXRW: so_user_cookie should almost 2707 * certainly be inp_user_cookie? 2708 */ 2709 2710 /* 2711 * For incoming packet lookup the inpcb 2712 * using the src/dest ip/port tuple. 2713 */ 2714 if (is_ipv4 && inp == NULL) { 2715 inp = in_pcblookup(pi, 2716 src_ip, htons(src_port), 2717 dst_ip, htons(dst_port), 2718 INPLOOKUP_RLOCKPCB, NULL); 2719 inp_locked = true; 2720 } 2721 #ifdef INET6 2722 if (is_ipv6 && inp == NULL) { 2723 inp = in6_pcblookup(pi, 2724 &args->f_id.src_ip6, 2725 htons(src_port), 2726 &args->f_id.dst_ip6, 2727 htons(dst_port), 2728 INPLOOKUP_RLOCKPCB, NULL); 2729 inp_locked = true; 2730 } 2731 #endif /* INET6 */ 2732 if (inp != NULL) { 2733 if (inp->inp_socket) { 2734 tablearg = 2735 inp->inp_socket->so_user_cookie; 2736 if (tablearg) 2737 match = 1; 2738 } 2739 if (inp_locked) 2740 INP_RUNLOCK(inp); 2741 } 2742 #endif /* !USERSPACE */ 2743 break; 2744 } 2745 2746 case O_TAGGED: { 2747 struct m_tag *mtag; 2748 uint32_t tag = TARG(cmd->arg1, tag); 2749 2750 if (cmdlen == 1) { 2751 match = m_tag_locate(m, MTAG_IPFW, 2752 tag, NULL) != NULL; 2753 break; 2754 } 2755 2756 /* we have ranges */ 2757 for (mtag = m_tag_first(m); 2758 mtag != NULL && !match; 2759 mtag = m_tag_next(m, mtag)) { 2760 uint16_t *p; 2761 int i; 2762 2763 if (mtag->m_tag_cookie != MTAG_IPFW) 2764 continue; 2765 2766 p = ((ipfw_insn_u16 *)cmd)->ports; 2767 i = cmdlen - 1; 2768 for(; !match && i > 0; i--, p += 2) 2769 match = 2770 mtag->m_tag_id >= p[0] && 2771 mtag->m_tag_id <= p[1]; 2772 } 2773 break; 2774 } 2775 2776 /* 2777 * The second set of opcodes represents 'actions', 2778 * i.e. the terminal part of a rule once the packet 2779 * matches all previous patterns. 2780 * Typically there is only one action for each rule, 2781 * and the opcode is stored at the end of the rule 2782 * (but there are exceptions -- see below). 2783 * 2784 * In general, here we set retval and terminate the 2785 * outer loop (would be a 'break 3' in some language, 2786 * but we need to set l=0, done=1) 2787 * 2788 * Exceptions: 2789 * O_COUNT and O_SKIPTO actions: 2790 * instead of terminating, we jump to the next rule 2791 * (setting l=0), or to the SKIPTO target (setting 2792 * f/f_len, cmd and l as needed), respectively. 2793 * 2794 * O_TAG, O_LOG and O_ALTQ action parameters: 2795 * perform some action and set match = 1; 2796 * 2797 * O_LIMIT and O_KEEP_STATE: these opcodes are 2798 * not real 'actions', and are stored right 2799 * before the 'action' part of the rule (one 2800 * exception is O_SKIP_ACTION which could be 2801 * between these opcodes and 'action' one). 2802 * These opcodes try to install an entry in the 2803 * state tables; if successful, we continue with 2804 * the next opcode (match=1; break;), otherwise 2805 * the packet must be dropped (set retval, 2806 * break loops with l=0, done=1) 2807 * 2808 * O_PROBE_STATE and O_CHECK_STATE: these opcodes 2809 * cause a lookup of the state table, and a jump 2810 * to the 'action' part of the parent rule 2811 * if an entry is found, or 2812 * (CHECK_STATE only) a jump to the next rule if 2813 * the entry is not found. 2814 * The result of the lookup is cached so that 2815 * further instances of these opcodes become NOPs. 2816 * The jump to the next rule is done by setting 2817 * l=0, cmdlen=0. 2818 * 2819 * O_SKIP_ACTION: this opcode is not a real 'action' 2820 * either, and is stored right before the 'action' 2821 * part of the rule, right after the O_KEEP_STATE 2822 * opcode. It causes match failure so the real 2823 * 'action' could be executed only if the rule 2824 * is checked via dynamic rule from the state 2825 * table, as in such case execution starts 2826 * from the true 'action' opcode directly. 2827 * 2828 */ 2829 case O_LIMIT: 2830 case O_KEEP_STATE: 2831 if (ipfw_dyn_install_state(chain, f, 2832 (ipfw_insn_limit *)cmd, args, ulp, 2833 pktlen, &dyn_info, tablearg)) { 2834 /* error or limit violation */ 2835 retval = IP_FW_DENY; 2836 l = 0; /* exit inner loop */ 2837 done = 1; /* exit outer loop */ 2838 } 2839 match = 1; 2840 break; 2841 2842 case O_PROBE_STATE: 2843 case O_CHECK_STATE: 2844 /* 2845 * dynamic rules are checked at the first 2846 * keep-state or check-state occurrence, 2847 * with the result being stored in dyn_info. 2848 * The compiler introduces a PROBE_STATE 2849 * instruction for us when we have a 2850 * KEEP_STATE (because PROBE_STATE needs 2851 * to be run first). 2852 */ 2853 if (DYN_LOOKUP_NEEDED(&dyn_info, cmd) && 2854 (q = ipfw_dyn_lookup_state(args, ulp, 2855 pktlen, cmd, &dyn_info)) != NULL) { 2856 /* 2857 * Found dynamic entry, jump to the 2858 * 'action' part of the parent rule 2859 * by setting f, cmd, l and clearing 2860 * cmdlen. 2861 */ 2862 f = q; 2863 f_pos = dyn_info.f_pos; 2864 cmd = ACTION_PTR(f); 2865 l = f->cmd_len - f->act_ofs; 2866 cmdlen = 0; 2867 match = 1; 2868 break; 2869 } 2870 /* 2871 * Dynamic entry not found. If CHECK_STATE, 2872 * skip to next rule, if PROBE_STATE just 2873 * ignore and continue with next opcode. 2874 */ 2875 if (cmd->opcode == O_CHECK_STATE) 2876 l = 0; /* exit inner loop */ 2877 match = 1; 2878 break; 2879 2880 case O_SKIP_ACTION: 2881 match = 0; /* skip to the next rule */ 2882 l = 0; /* exit inner loop */ 2883 break; 2884 2885 case O_ACCEPT: 2886 retval = 0; /* accept */ 2887 l = 0; /* exit inner loop */ 2888 done = 1; /* exit outer loop */ 2889 break; 2890 2891 case O_PIPE: 2892 case O_QUEUE: 2893 set_match(args, f_pos, chain); 2894 args->rule.info = TARG(cmd->arg1, pipe); 2895 if (cmd->opcode == O_PIPE) 2896 args->rule.info |= IPFW_IS_PIPE; 2897 if (V_fw_one_pass) 2898 args->rule.info |= IPFW_ONEPASS; 2899 retval = IP_FW_DUMMYNET; 2900 l = 0; /* exit inner loop */ 2901 done = 1; /* exit outer loop */ 2902 break; 2903 2904 case O_DIVERT: 2905 case O_TEE: 2906 if (args->flags & IPFW_ARGS_ETHER) 2907 break; /* not on layer 2 */ 2908 /* otherwise this is terminal */ 2909 l = 0; /* exit inner loop */ 2910 done = 1; /* exit outer loop */ 2911 retval = (cmd->opcode == O_DIVERT) ? 2912 IP_FW_DIVERT : IP_FW_TEE; 2913 set_match(args, f_pos, chain); 2914 args->rule.info = TARG(cmd->arg1, divert); 2915 break; 2916 2917 case O_COUNT: 2918 IPFW_INC_RULE_COUNTER(f, pktlen); 2919 l = 0; /* exit inner loop */ 2920 break; 2921 2922 case O_SKIPTO: 2923 IPFW_INC_RULE_COUNTER(f, pktlen); 2924 f_pos = JUMP(chain, f, cmd->arg1, tablearg, 0); 2925 /* 2926 * Skip disabled rules, and re-enter 2927 * the inner loop with the correct 2928 * f_pos, f, l and cmd. 2929 * Also clear cmdlen and skip_or 2930 */ 2931 for (; f_pos < chain->n_rules - 1 && 2932 (V_set_disable & 2933 (1 << chain->map[f_pos]->set)); 2934 f_pos++) 2935 ; 2936 /* Re-enter the inner loop at the skipto rule. */ 2937 f = chain->map[f_pos]; 2938 l = f->cmd_len; 2939 cmd = f->cmd; 2940 match = 1; 2941 cmdlen = 0; 2942 skip_or = 0; 2943 continue; 2944 break; /* not reached */ 2945 2946 case O_CALLRETURN: { 2947 /* 2948 * Implementation of `subroutine' call/return, 2949 * in the stack carried in an mbuf tag. This 2950 * is different from `skipto' in that any call 2951 * address is possible (`skipto' must prevent 2952 * backward jumps to avoid endless loops). 2953 * We have `return' action when F_NOT flag is 2954 * present. The `m_tag_id' field is used as 2955 * stack pointer. 2956 */ 2957 struct m_tag *mtag; 2958 uint16_t jmpto, *stack; 2959 2960 #define IS_CALL ((cmd->len & F_NOT) == 0) 2961 #define IS_RETURN ((cmd->len & F_NOT) != 0) 2962 /* 2963 * Hand-rolled version of m_tag_locate() with 2964 * wildcard `type'. 2965 * If not already tagged, allocate new tag. 2966 */ 2967 mtag = m_tag_first(m); 2968 while (mtag != NULL) { 2969 if (mtag->m_tag_cookie == 2970 MTAG_IPFW_CALL) 2971 break; 2972 mtag = m_tag_next(m, mtag); 2973 } 2974 if (mtag == NULL && IS_CALL) { 2975 mtag = m_tag_alloc(MTAG_IPFW_CALL, 0, 2976 IPFW_CALLSTACK_SIZE * 2977 sizeof(uint16_t), M_NOWAIT); 2978 if (mtag != NULL) 2979 m_tag_prepend(m, mtag); 2980 } 2981 2982 /* 2983 * On error both `call' and `return' just 2984 * continue with next rule. 2985 */ 2986 if (IS_RETURN && (mtag == NULL || 2987 mtag->m_tag_id == 0)) { 2988 l = 0; /* exit inner loop */ 2989 break; 2990 } 2991 if (IS_CALL && (mtag == NULL || 2992 mtag->m_tag_id >= IPFW_CALLSTACK_SIZE)) { 2993 printf("ipfw: call stack error, " 2994 "go to next rule\n"); 2995 l = 0; /* exit inner loop */ 2996 break; 2997 } 2998 2999 IPFW_INC_RULE_COUNTER(f, pktlen); 3000 stack = (uint16_t *)(mtag + 1); 3001 3002 /* 3003 * The `call' action may use cached f_pos 3004 * (in f->next_rule), whose version is written 3005 * in f->next_rule. 3006 * The `return' action, however, doesn't have 3007 * fixed jump address in cmd->arg1 and can't use 3008 * cache. 3009 */ 3010 if (IS_CALL) { 3011 stack[mtag->m_tag_id] = f->rulenum; 3012 mtag->m_tag_id++; 3013 f_pos = JUMP(chain, f, cmd->arg1, 3014 tablearg, 1); 3015 } else { /* `return' action */ 3016 mtag->m_tag_id--; 3017 jmpto = stack[mtag->m_tag_id] + 1; 3018 f_pos = ipfw_find_rule(chain, jmpto, 0); 3019 } 3020 3021 /* 3022 * Skip disabled rules, and re-enter 3023 * the inner loop with the correct 3024 * f_pos, f, l and cmd. 3025 * Also clear cmdlen and skip_or 3026 */ 3027 for (; f_pos < chain->n_rules - 1 && 3028 (V_set_disable & 3029 (1 << chain->map[f_pos]->set)); f_pos++) 3030 ; 3031 /* Re-enter the inner loop at the dest rule. */ 3032 f = chain->map[f_pos]; 3033 l = f->cmd_len; 3034 cmd = f->cmd; 3035 cmdlen = 0; 3036 skip_or = 0; 3037 continue; 3038 break; /* NOTREACHED */ 3039 } 3040 #undef IS_CALL 3041 #undef IS_RETURN 3042 3043 case O_REJECT: 3044 /* 3045 * Drop the packet and send a reject notice 3046 * if the packet is not ICMP (or is an ICMP 3047 * query), and it is not multicast/broadcast. 3048 */ 3049 if (hlen > 0 && is_ipv4 && offset == 0 && 3050 (proto != IPPROTO_ICMP || 3051 is_icmp_query(ICMP(ulp))) && 3052 !(m->m_flags & (M_BCAST|M_MCAST)) && 3053 !IN_MULTICAST(ntohl(dst_ip.s_addr))) { 3054 send_reject(args, cmd, iplen, ip); 3055 m = args->m; 3056 } 3057 /* FALLTHROUGH */ 3058 #ifdef INET6 3059 case O_UNREACH6: 3060 if (hlen > 0 && is_ipv6 && 3061 ((offset & IP6F_OFF_MASK) == 0) && 3062 (proto != IPPROTO_ICMPV6 || 3063 (is_icmp6_query(icmp6_type) == 1)) && 3064 !(m->m_flags & (M_BCAST|M_MCAST)) && 3065 !IN6_IS_ADDR_MULTICAST( 3066 &args->f_id.dst_ip6)) { 3067 send_reject6(args, 3068 cmd->opcode == O_REJECT ? 3069 map_icmp_unreach(cmd->arg1): 3070 cmd->arg1, hlen, 3071 (struct ip6_hdr *)ip); 3072 m = args->m; 3073 } 3074 /* FALLTHROUGH */ 3075 #endif 3076 case O_DENY: 3077 retval = IP_FW_DENY; 3078 l = 0; /* exit inner loop */ 3079 done = 1; /* exit outer loop */ 3080 break; 3081 3082 case O_FORWARD_IP: 3083 if (args->flags & IPFW_ARGS_ETHER) 3084 break; /* not valid on layer2 pkts */ 3085 if (q != f || 3086 dyn_info.direction == MATCH_FORWARD) { 3087 struct sockaddr_in *sa; 3088 3089 sa = &(((ipfw_insn_sa *)cmd)->sa); 3090 if (sa->sin_addr.s_addr == INADDR_ANY) { 3091 #ifdef INET6 3092 /* 3093 * We use O_FORWARD_IP opcode for 3094 * fwd rule with tablearg, but tables 3095 * now support IPv6 addresses. And 3096 * when we are inspecting IPv6 packet, 3097 * we can use nh6 field from 3098 * table_value as next_hop6 address. 3099 */ 3100 if (is_ipv6) { 3101 struct ip_fw_nh6 *nh6; 3102 3103 args->flags |= IPFW_ARGS_NH6; 3104 nh6 = &args->hopstore6; 3105 nh6->sin6_addr = TARG_VAL( 3106 chain, tablearg, nh6); 3107 nh6->sin6_port = sa->sin_port; 3108 nh6->sin6_scope_id = TARG_VAL( 3109 chain, tablearg, zoneid); 3110 } else 3111 #endif 3112 { 3113 args->flags |= IPFW_ARGS_NH4; 3114 args->hopstore.sin_port = 3115 sa->sin_port; 3116 sa = &args->hopstore; 3117 sa->sin_family = AF_INET; 3118 sa->sin_len = sizeof(*sa); 3119 sa->sin_addr.s_addr = htonl( 3120 TARG_VAL(chain, tablearg, 3121 nh4)); 3122 } 3123 } else { 3124 args->flags |= IPFW_ARGS_NH4PTR; 3125 args->next_hop = sa; 3126 } 3127 } 3128 retval = IP_FW_PASS; 3129 l = 0; /* exit inner loop */ 3130 done = 1; /* exit outer loop */ 3131 break; 3132 3133 #ifdef INET6 3134 case O_FORWARD_IP6: 3135 if (args->flags & IPFW_ARGS_ETHER) 3136 break; /* not valid on layer2 pkts */ 3137 if (q != f || 3138 dyn_info.direction == MATCH_FORWARD) { 3139 struct sockaddr_in6 *sin6; 3140 3141 sin6 = &(((ipfw_insn_sa6 *)cmd)->sa); 3142 args->flags |= IPFW_ARGS_NH6PTR; 3143 args->next_hop6 = sin6; 3144 } 3145 retval = IP_FW_PASS; 3146 l = 0; /* exit inner loop */ 3147 done = 1; /* exit outer loop */ 3148 break; 3149 #endif 3150 3151 case O_NETGRAPH: 3152 case O_NGTEE: 3153 set_match(args, f_pos, chain); 3154 args->rule.info = TARG(cmd->arg1, netgraph); 3155 if (V_fw_one_pass) 3156 args->rule.info |= IPFW_ONEPASS; 3157 retval = (cmd->opcode == O_NETGRAPH) ? 3158 IP_FW_NETGRAPH : IP_FW_NGTEE; 3159 l = 0; /* exit inner loop */ 3160 done = 1; /* exit outer loop */ 3161 break; 3162 3163 case O_SETFIB: { 3164 uint32_t fib; 3165 3166 IPFW_INC_RULE_COUNTER(f, pktlen); 3167 fib = TARG(cmd->arg1, fib) & 0x7FFF; 3168 if (fib >= rt_numfibs) 3169 fib = 0; 3170 M_SETFIB(m, fib); 3171 args->f_id.fib = fib; /* XXX */ 3172 l = 0; /* exit inner loop */ 3173 break; 3174 } 3175 3176 case O_SETDSCP: { 3177 uint16_t code; 3178 3179 code = TARG(cmd->arg1, dscp) & 0x3F; 3180 l = 0; /* exit inner loop */ 3181 if (is_ipv4) { 3182 uint16_t old; 3183 3184 old = *(uint16_t *)ip; 3185 ip->ip_tos = (code << 2) | 3186 (ip->ip_tos & 0x03); 3187 ip->ip_sum = cksum_adjust(ip->ip_sum, 3188 old, *(uint16_t *)ip); 3189 } else if (is_ipv6) { 3190 /* update cached value */ 3191 args->f_id.flow_id6 = 3192 ntohl(*(uint32_t *)ip) & ~0x0FC00000; 3193 args->f_id.flow_id6 |= code << 22; 3194 3195 *((uint32_t *)ip) = 3196 htonl(args->f_id.flow_id6); 3197 } else 3198 break; 3199 3200 IPFW_INC_RULE_COUNTER(f, pktlen); 3201 break; 3202 } 3203 3204 case O_NAT: 3205 l = 0; /* exit inner loop */ 3206 done = 1; /* exit outer loop */ 3207 /* 3208 * Ensure that we do not invoke NAT handler for 3209 * non IPv4 packets. Libalias expects only IPv4. 3210 */ 3211 if (!is_ipv4 || !IPFW_NAT_LOADED) { 3212 retval = IP_FW_DENY; 3213 break; 3214 } 3215 3216 struct cfg_nat *t; 3217 int nat_id; 3218 3219 args->rule.info = 0; 3220 set_match(args, f_pos, chain); 3221 /* Check if this is 'global' nat rule */ 3222 if (cmd->arg1 == IP_FW_NAT44_GLOBAL) { 3223 retval = ipfw_nat_ptr(args, NULL, m); 3224 break; 3225 } 3226 t = ((ipfw_insn_nat *)cmd)->nat; 3227 if (t == NULL) { 3228 nat_id = TARG(cmd->arg1, nat); 3229 t = (*lookup_nat_ptr)(&chain->nat, nat_id); 3230 3231 if (t == NULL) { 3232 retval = IP_FW_DENY; 3233 break; 3234 } 3235 if (cmd->arg1 != IP_FW_TARG) 3236 ((ipfw_insn_nat *)cmd)->nat = t; 3237 } 3238 retval = ipfw_nat_ptr(args, t, m); 3239 break; 3240 3241 case O_REASS: { 3242 int ip_off; 3243 3244 l = 0; /* in any case exit inner loop */ 3245 if (is_ipv6) /* IPv6 is not supported yet */ 3246 break; 3247 IPFW_INC_RULE_COUNTER(f, pktlen); 3248 ip_off = ntohs(ip->ip_off); 3249 3250 /* if not fragmented, go to next rule */ 3251 if ((ip_off & (IP_MF | IP_OFFMASK)) == 0) 3252 break; 3253 3254 args->m = m = ip_reass(m); 3255 3256 /* 3257 * do IP header checksum fixup. 3258 */ 3259 if (m == NULL) { /* fragment got swallowed */ 3260 retval = IP_FW_DENY; 3261 } else { /* good, packet complete */ 3262 int hlen; 3263 3264 ip = mtod(m, struct ip *); 3265 hlen = ip->ip_hl << 2; 3266 ip->ip_sum = 0; 3267 if (hlen == sizeof(struct ip)) 3268 ip->ip_sum = in_cksum_hdr(ip); 3269 else 3270 ip->ip_sum = in_cksum(m, hlen); 3271 retval = IP_FW_REASS; 3272 args->rule.info = 0; 3273 set_match(args, f_pos, chain); 3274 } 3275 done = 1; /* exit outer loop */ 3276 break; 3277 } 3278 case O_EXTERNAL_ACTION: 3279 l = 0; /* in any case exit inner loop */ 3280 retval = ipfw_run_eaction(chain, args, 3281 cmd, &done); 3282 /* 3283 * If both @retval and @done are zero, 3284 * consider this as rule matching and 3285 * update counters. 3286 */ 3287 if (retval == 0 && done == 0) { 3288 IPFW_INC_RULE_COUNTER(f, pktlen); 3289 /* 3290 * Reset the result of the last 3291 * dynamic state lookup. 3292 * External action can change 3293 * @args content, and it may be 3294 * used for new state lookup later. 3295 */ 3296 DYN_INFO_INIT(&dyn_info); 3297 } 3298 break; 3299 3300 default: 3301 panic("-- unknown opcode %d\n", cmd->opcode); 3302 } /* end of switch() on opcodes */ 3303 /* 3304 * if we get here with l=0, then match is irrelevant. 3305 */ 3306 3307 if (cmd->len & F_NOT) 3308 match = !match; 3309 3310 if (match) { 3311 if (cmd->len & F_OR) 3312 skip_or = 1; 3313 } else { 3314 if (!(cmd->len & F_OR)) /* not an OR block, */ 3315 break; /* try next rule */ 3316 } 3317 3318 } /* end of inner loop, scan opcodes */ 3319 #undef PULLUP_LEN 3320 #undef PULLUP_LEN_LOCKED 3321 3322 if (done) 3323 break; 3324 3325 /* next_rule:; */ /* try next rule */ 3326 3327 } /* end of outer for, scan rules */ 3328 3329 if (done) { 3330 struct ip_fw *rule = chain->map[f_pos]; 3331 /* Update statistics */ 3332 IPFW_INC_RULE_COUNTER(rule, pktlen); 3333 IPFW_PROBE(rule__matched, retval, 3334 is_ipv4 ? AF_INET : AF_INET6, 3335 is_ipv4 ? (uintptr_t)&src_ip : 3336 (uintptr_t)&args->f_id.src_ip6, 3337 is_ipv4 ? (uintptr_t)&dst_ip : 3338 (uintptr_t)&args->f_id.dst_ip6, 3339 args, rule); 3340 } else { 3341 retval = IP_FW_DENY; 3342 printf("ipfw: ouch!, skip past end of rules, denying packet\n"); 3343 } 3344 IPFW_PF_RUNLOCK(chain); 3345 #ifdef __FreeBSD__ 3346 if (ucred_cache != NULL) 3347 crfree(ucred_cache); 3348 #endif 3349 return (retval); 3350 3351 pullup_failed: 3352 if (V_fw_verbose) 3353 printf("ipfw: pullup failed\n"); 3354 return (IP_FW_DENY); 3355 } 3356 3357 /* 3358 * Set maximum number of tables that can be used in given VNET ipfw instance. 3359 */ 3360 #ifdef SYSCTL_NODE 3361 static int 3362 sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS) 3363 { 3364 int error; 3365 unsigned int ntables; 3366 3367 ntables = V_fw_tables_max; 3368 3369 error = sysctl_handle_int(oidp, &ntables, 0, req); 3370 /* Read operation or some error */ 3371 if ((error != 0) || (req->newptr == NULL)) 3372 return (error); 3373 3374 return (ipfw_resize_tables(&V_layer3_chain, ntables)); 3375 } 3376 3377 /* 3378 * Switches table namespace between global and per-set. 3379 */ 3380 static int 3381 sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS) 3382 { 3383 int error; 3384 unsigned int sets; 3385 3386 sets = V_fw_tables_sets; 3387 3388 error = sysctl_handle_int(oidp, &sets, 0, req); 3389 /* Read operation or some error */ 3390 if ((error != 0) || (req->newptr == NULL)) 3391 return (error); 3392 3393 return (ipfw_switch_tables_namespace(&V_layer3_chain, sets)); 3394 } 3395 #endif 3396 3397 /* 3398 * Module and VNET glue 3399 */ 3400 3401 /* 3402 * Stuff that must be initialised only on boot or module load 3403 */ 3404 static int 3405 ipfw_init(void) 3406 { 3407 int error = 0; 3408 3409 /* 3410 * Only print out this stuff the first time around, 3411 * when called from the sysinit code. 3412 */ 3413 printf("ipfw2 " 3414 #ifdef INET6 3415 "(+ipv6) " 3416 #endif 3417 "initialized, divert %s, nat %s, " 3418 "default to %s, logging ", 3419 #ifdef IPDIVERT 3420 "enabled", 3421 #else 3422 "loadable", 3423 #endif 3424 #ifdef IPFIREWALL_NAT 3425 "enabled", 3426 #else 3427 "loadable", 3428 #endif 3429 default_to_accept ? "accept" : "deny"); 3430 3431 /* 3432 * Note: V_xxx variables can be accessed here but the vnet specific 3433 * initializer may not have been called yet for the VIMAGE case. 3434 * Tuneables will have been processed. We will print out values for 3435 * the default vnet. 3436 * XXX This should all be rationalized AFTER 8.0 3437 */ 3438 if (V_fw_verbose == 0) 3439 printf("disabled\n"); 3440 else if (V_verbose_limit == 0) 3441 printf("unlimited\n"); 3442 else 3443 printf("limited to %d packets/entry by default\n", 3444 V_verbose_limit); 3445 3446 /* Check user-supplied table count for validness */ 3447 if (default_fw_tables > IPFW_TABLES_MAX) 3448 default_fw_tables = IPFW_TABLES_MAX; 3449 3450 ipfw_init_sopt_handler(); 3451 ipfw_init_obj_rewriter(); 3452 ipfw_iface_init(); 3453 return (error); 3454 } 3455 3456 /* 3457 * Called for the removal of the last instance only on module unload. 3458 */ 3459 static void 3460 ipfw_destroy(void) 3461 { 3462 3463 ipfw_iface_destroy(); 3464 ipfw_destroy_sopt_handler(); 3465 ipfw_destroy_obj_rewriter(); 3466 printf("IP firewall unloaded\n"); 3467 } 3468 3469 /* 3470 * Stuff that must be initialized for every instance 3471 * (including the first of course). 3472 */ 3473 static int 3474 vnet_ipfw_init(const void *unused) 3475 { 3476 int error, first; 3477 struct ip_fw *rule = NULL; 3478 struct ip_fw_chain *chain; 3479 3480 chain = &V_layer3_chain; 3481 3482 first = IS_DEFAULT_VNET(curvnet) ? 1 : 0; 3483 3484 /* First set up some values that are compile time options */ 3485 V_autoinc_step = 100; /* bounded to 1..1000 in add_rule() */ 3486 V_fw_deny_unknown_exthdrs = 1; 3487 #ifdef IPFIREWALL_VERBOSE 3488 V_fw_verbose = 1; 3489 #endif 3490 #ifdef IPFIREWALL_VERBOSE_LIMIT 3491 V_verbose_limit = IPFIREWALL_VERBOSE_LIMIT; 3492 #endif 3493 #ifdef IPFIREWALL_NAT 3494 LIST_INIT(&chain->nat); 3495 #endif 3496 3497 /* Init shared services hash table */ 3498 ipfw_init_srv(chain); 3499 3500 ipfw_init_counters(); 3501 /* Set initial number of tables */ 3502 V_fw_tables_max = default_fw_tables; 3503 error = ipfw_init_tables(chain, first); 3504 if (error) { 3505 printf("ipfw2: setting up tables failed\n"); 3506 free(chain->map, M_IPFW); 3507 free(rule, M_IPFW); 3508 return (ENOSPC); 3509 } 3510 3511 IPFW_LOCK_INIT(chain); 3512 3513 /* fill and insert the default rule */ 3514 rule = ipfw_alloc_rule(chain, sizeof(struct ip_fw)); 3515 rule->flags |= IPFW_RULE_NOOPT; 3516 rule->cmd_len = 1; 3517 rule->cmd[0].len = 1; 3518 rule->cmd[0].opcode = default_to_accept ? O_ACCEPT : O_DENY; 3519 chain->default_rule = rule; 3520 ipfw_add_protected_rule(chain, rule, 0); 3521 3522 ipfw_dyn_init(chain); 3523 ipfw_eaction_init(chain, first); 3524 #ifdef LINEAR_SKIPTO 3525 ipfw_init_skipto_cache(chain); 3526 #endif 3527 ipfw_bpf_init(first); 3528 3529 /* First set up some values that are compile time options */ 3530 V_ipfw_vnet_ready = 1; /* Open for business */ 3531 3532 /* 3533 * Hook the sockopt handler and pfil hooks for ipv4 and ipv6. 3534 * Even if the latter two fail we still keep the module alive 3535 * because the sockopt and layer2 paths are still useful. 3536 * ipfw[6]_hook return 0 on success, ENOENT on failure, 3537 * so we can ignore the exact return value and just set a flag. 3538 * 3539 * Note that V_fw[6]_enable are manipulated by a SYSCTL_PROC so 3540 * changes in the underlying (per-vnet) variables trigger 3541 * immediate hook()/unhook() calls. 3542 * In layer2 we have the same behaviour, except that V_ether_ipfw 3543 * is checked on each packet because there are no pfil hooks. 3544 */ 3545 V_ip_fw_ctl_ptr = ipfw_ctl3; 3546 error = ipfw_attach_hooks(); 3547 return (error); 3548 } 3549 3550 /* 3551 * Called for the removal of each instance. 3552 */ 3553 static int 3554 vnet_ipfw_uninit(const void *unused) 3555 { 3556 struct ip_fw *reap; 3557 struct ip_fw_chain *chain = &V_layer3_chain; 3558 int i, last; 3559 3560 V_ipfw_vnet_ready = 0; /* tell new callers to go away */ 3561 /* 3562 * disconnect from ipv4, ipv6, layer2 and sockopt. 3563 * Then grab, release and grab again the WLOCK so we make 3564 * sure the update is propagated and nobody will be in. 3565 */ 3566 ipfw_detach_hooks(); 3567 V_ip_fw_ctl_ptr = NULL; 3568 3569 last = IS_DEFAULT_VNET(curvnet) ? 1 : 0; 3570 3571 IPFW_UH_WLOCK(chain); 3572 IPFW_UH_WUNLOCK(chain); 3573 3574 ipfw_dyn_uninit(0); /* run the callout_drain */ 3575 3576 IPFW_UH_WLOCK(chain); 3577 3578 reap = NULL; 3579 IPFW_WLOCK(chain); 3580 for (i = 0; i < chain->n_rules; i++) 3581 ipfw_reap_add(chain, &reap, chain->map[i]); 3582 free(chain->map, M_IPFW); 3583 #ifdef LINEAR_SKIPTO 3584 ipfw_destroy_skipto_cache(chain); 3585 #endif 3586 IPFW_WUNLOCK(chain); 3587 IPFW_UH_WUNLOCK(chain); 3588 ipfw_destroy_tables(chain, last); 3589 ipfw_eaction_uninit(chain, last); 3590 if (reap != NULL) 3591 ipfw_reap_rules(reap); 3592 vnet_ipfw_iface_destroy(chain); 3593 ipfw_destroy_srv(chain); 3594 IPFW_LOCK_DESTROY(chain); 3595 ipfw_dyn_uninit(1); /* free the remaining parts */ 3596 ipfw_destroy_counters(); 3597 ipfw_bpf_uninit(last); 3598 return (0); 3599 } 3600 3601 /* 3602 * Module event handler. 3603 * In general we have the choice of handling most of these events by the 3604 * event handler or by the (VNET_)SYS(UN)INIT handlers. I have chosen to 3605 * use the SYSINIT handlers as they are more capable of expressing the 3606 * flow of control during module and vnet operations, so this is just 3607 * a skeleton. Note there is no SYSINIT equivalent of the module 3608 * SHUTDOWN handler, but we don't have anything to do in that case anyhow. 3609 */ 3610 static int 3611 ipfw_modevent(module_t mod, int type, void *unused) 3612 { 3613 int err = 0; 3614 3615 switch (type) { 3616 case MOD_LOAD: 3617 /* Called once at module load or 3618 * system boot if compiled in. */ 3619 break; 3620 case MOD_QUIESCE: 3621 /* Called before unload. May veto unloading. */ 3622 break; 3623 case MOD_UNLOAD: 3624 /* Called during unload. */ 3625 break; 3626 case MOD_SHUTDOWN: 3627 /* Called during system shutdown. */ 3628 break; 3629 default: 3630 err = EOPNOTSUPP; 3631 break; 3632 } 3633 return err; 3634 } 3635 3636 static moduledata_t ipfwmod = { 3637 "ipfw", 3638 ipfw_modevent, 3639 0 3640 }; 3641 3642 /* Define startup order. */ 3643 #define IPFW_SI_SUB_FIREWALL SI_SUB_PROTO_FIREWALL 3644 #define IPFW_MODEVENT_ORDER (SI_ORDER_ANY - 255) /* On boot slot in here. */ 3645 #define IPFW_MODULE_ORDER (IPFW_MODEVENT_ORDER + 1) /* A little later. */ 3646 #define IPFW_VNET_ORDER (IPFW_MODEVENT_ORDER + 2) /* Later still. */ 3647 3648 DECLARE_MODULE(ipfw, ipfwmod, IPFW_SI_SUB_FIREWALL, IPFW_MODEVENT_ORDER); 3649 FEATURE(ipfw_ctl3, "ipfw new sockopt calls"); 3650 MODULE_VERSION(ipfw, 3); 3651 /* should declare some dependencies here */ 3652 3653 /* 3654 * Starting up. Done in order after ipfwmod() has been called. 3655 * VNET_SYSINIT is also called for each existing vnet and each new vnet. 3656 */ 3657 SYSINIT(ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER, 3658 ipfw_init, NULL); 3659 VNET_SYSINIT(vnet_ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER, 3660 vnet_ipfw_init, NULL); 3661 3662 /* 3663 * Closing up shop. These are done in REVERSE ORDER, but still 3664 * after ipfwmod() has been called. Not called on reboot. 3665 * VNET_SYSUNINIT is also called for each exiting vnet as it exits. 3666 * or when the module is unloaded. 3667 */ 3668 SYSUNINIT(ipfw_destroy, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER, 3669 ipfw_destroy, NULL); 3670 VNET_SYSUNINIT(vnet_ipfw_uninit, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER, 3671 vnet_ipfw_uninit, NULL); 3672 /* end of file */ 3673