1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 /*
30 * The FreeBSD IP packet firewall, main file
31 */
32
33 #include "opt_ipfw.h"
34 #include "opt_ipdivert.h"
35 #include "opt_inet.h"
36 #ifndef INET
37 #error "IPFIREWALL requires INET"
38 #endif /* INET */
39 #include "opt_inet6.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/condvar.h>
44 #include <sys/counter.h>
45 #include <sys/eventhandler.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/jail.h>
51 #include <sys/module.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/rwlock.h>
55 #include <sys/rmlock.h>
56 #include <sys/sdt.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/syslog.h>
61 #include <sys/ucred.h>
62 #include <net/ethernet.h> /* for ETHERTYPE_IP */
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/if_private.h>
66 #include <net/route.h>
67 #include <net/route/nhop.h>
68 #include <net/pfil.h>
69 #include <net/vnet.h>
70 #include <net/if_pfsync.h>
71
72 #include <netpfil/pf/pf_mtag.h>
73
74 #include <netinet/in.h>
75 #include <netinet/in_var.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/ip.h>
78 #include <netinet/ip_var.h>
79 #include <netinet/ip_icmp.h>
80 #include <netinet/ip_fw.h>
81 #include <netinet/ip_carp.h>
82 #include <netinet/pim.h>
83 #include <netinet/tcp_var.h>
84 #include <netinet/udp.h>
85 #include <netinet/udp_var.h>
86 #include <netinet/sctp.h>
87 #include <netinet/sctp_crc32.h>
88 #include <netinet/sctp_header.h>
89
90 #include <netinet/ip6.h>
91 #include <netinet/icmp6.h>
92 #include <netinet/in_fib.h>
93 #ifdef INET6
94 #include <netinet6/in6_fib.h>
95 #include <netinet6/in6_pcb.h>
96 #include <netinet6/scope6_var.h>
97 #include <netinet6/ip6_var.h>
98 #endif
99
100 #include <net/if_gre.h> /* for struct grehdr */
101
102 #include <netpfil/ipfw/ip_fw_private.h>
103
104 #include <machine/in_cksum.h> /* XXX for in_cksum */
105
106 #ifdef MAC
107 #include <security/mac/mac_framework.h>
108 #endif
109
110 #define IPFW_PROBE(probe, arg0, arg1, arg2, arg3, arg4, arg5) \
111 SDT_PROBE6(ipfw, , , probe, arg0, arg1, arg2, arg3, arg4, arg5)
112
113 SDT_PROVIDER_DEFINE(ipfw);
114 SDT_PROBE_DEFINE6(ipfw, , , rule__matched,
115 "int", /* retval */
116 "int", /* af */
117 "void *", /* src addr */
118 "void *", /* dst addr */
119 "struct ip_fw_args *", /* args */
120 "struct ip_fw *" /* rule */);
121
122 /*
123 * static variables followed by global ones.
124 * All ipfw global variables are here.
125 */
126
127 VNET_DEFINE_STATIC(int, fw_deny_unknown_exthdrs);
128 #define V_fw_deny_unknown_exthdrs VNET(fw_deny_unknown_exthdrs)
129
130 VNET_DEFINE_STATIC(int, fw_permit_single_frag6) = 1;
131 #define V_fw_permit_single_frag6 VNET(fw_permit_single_frag6)
132
133 #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT
134 static int default_to_accept = 1;
135 #else
136 static int default_to_accept;
137 #endif
138
139 VNET_DEFINE(int, autoinc_step);
140 VNET_DEFINE(int, fw_one_pass) = 1;
141
142 VNET_DEFINE(unsigned int, fw_tables_max);
143 VNET_DEFINE(unsigned int, fw_tables_sets) = 0; /* Don't use set-aware tables */
144 /* Use 128 tables by default */
145 static unsigned int default_fw_tables = IPFW_TABLES_DEFAULT;
146
147 static int jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num,
148 int tablearg, int jump_backwards);
149 #ifndef LINEAR_SKIPTO
150 static int jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num,
151 int tablearg, int jump_backwards);
152 #define JUMP(ch, f, num, targ, back) jump_cached(ch, f, num, targ, back)
153 #else
154 #define JUMP(ch, f, num, targ, back) jump_lookup_pos(ch, f, num, targ, back)
155 #endif
156
157 /*
158 * Each rule belongs to one of 32 different sets (0..31).
159 * The variable set_disable contains one bit per set.
160 * If the bit is set, all rules in the corresponding set
161 * are disabled. Set RESVD_SET(31) is reserved for the default rule
162 * and rules that are not deleted by the flush command,
163 * and CANNOT be disabled.
164 * Rules in set RESVD_SET can only be deleted individually.
165 */
166 VNET_DEFINE(u_int32_t, set_disable);
167 #define V_set_disable VNET(set_disable)
168
169 VNET_DEFINE(int, fw_verbose);
170 /* counter for ipfw_log(NULL...) */
171 VNET_DEFINE(u_int64_t, norule_counter);
172 VNET_DEFINE(int, verbose_limit);
173
174 /* layer3_chain contains the list of rules for layer 3 */
175 VNET_DEFINE(struct ip_fw_chain, layer3_chain);
176
177 /* ipfw_vnet_ready controls when we are open for business */
178 VNET_DEFINE(int, ipfw_vnet_ready) = 0;
179
180 VNET_DEFINE(int, ipfw_nat_ready) = 0;
181
182 ipfw_nat_t *ipfw_nat_ptr = NULL;
183 struct cfg_nat *(*lookup_nat_ptr)(struct nat_list *, int);
184 ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
185 ipfw_nat_cfg_t *ipfw_nat_del_ptr;
186 ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
187 ipfw_nat_cfg_t *ipfw_nat_get_log_ptr;
188
189 #ifdef SYSCTL_NODE
190 uint32_t dummy_def = IPFW_DEFAULT_RULE;
191 static int sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS);
192 static int sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS);
193
194 SYSBEGIN(f3)
195
196 SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
197 "Firewall");
198 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, one_pass,
199 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_one_pass), 0,
200 "Only do a single pass through ipfw when using dummynet(4)");
201 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, autoinc_step,
202 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(autoinc_step), 0,
203 "Rule number auto-increment step");
204 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose,
205 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE3, &VNET_NAME(fw_verbose), 0,
206 "Log matches to ipfw rules");
207 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit,
208 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(verbose_limit), 0,
209 "Set upper limit of matches of ipfw rules logged");
210 SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, default_rule, CTLFLAG_RD,
211 &dummy_def, 0,
212 "The default/max possible rule number.");
213 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_max,
214 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
215 0, 0, sysctl_ipfw_table_num, "IU",
216 "Maximum number of concurrently used tables");
217 SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, tables_sets,
218 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
219 0, 0, sysctl_ipfw_tables_sets, "IU",
220 "Use per-set namespace for tables");
221 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, default_to_accept, CTLFLAG_RDTUN,
222 &default_to_accept, 0,
223 "Make the default rule accept all packets.");
224 TUNABLE_INT("net.inet.ip.fw.tables_max", (int *)&default_fw_tables);
225 SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count,
226 CTLFLAG_VNET | CTLFLAG_RD, &VNET_NAME(layer3_chain.n_rules), 0,
227 "Number of static rules");
228
229 #ifdef INET6
230 SYSCTL_DECL(_net_inet6_ip6);
231 SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
232 "Firewall");
233 SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, deny_unknown_exthdrs,
234 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE,
235 &VNET_NAME(fw_deny_unknown_exthdrs), 0,
236 "Deny packets with unknown IPv6 Extension Headers");
237 SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, permit_single_frag6,
238 CTLFLAG_VNET | CTLFLAG_RW | CTLFLAG_SECURE,
239 &VNET_NAME(fw_permit_single_frag6), 0,
240 "Permit single packet IPv6 fragments");
241 #endif /* INET6 */
242
243 SYSEND
244
245 #endif /* SYSCTL_NODE */
246
247 /*
248 * Some macros used in the various matching options.
249 * L3HDR maps an ipv4 pointer into a layer3 header pointer of type T
250 * Other macros just cast void * into the appropriate type
251 */
252 #define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl))
253 #define TCP(p) ((struct tcphdr *)(p))
254 #define SCTP(p) ((struct sctphdr *)(p))
255 #define UDP(p) ((struct udphdr *)(p))
256 #define ICMP(p) ((struct icmphdr *)(p))
257 #define ICMP6(p) ((struct icmp6_hdr *)(p))
258
259 static __inline int
icmptype_match(struct icmphdr * icmp,ipfw_insn_u32 * cmd)260 icmptype_match(struct icmphdr *icmp, ipfw_insn_u32 *cmd)
261 {
262 int type = icmp->icmp_type;
263
264 return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1<<type)) );
265 }
266
267 #define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \
268 (1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) )
269
270 static int
is_icmp_query(struct icmphdr * icmp)271 is_icmp_query(struct icmphdr *icmp)
272 {
273 int type = icmp->icmp_type;
274
275 return (type <= ICMP_MAXTYPE && (TT & (1<<type)) );
276 }
277 #undef TT
278
279 /*
280 * The following checks use two arrays of 8 or 16 bits to store the
281 * bits that we want set or clear, respectively. They are in the
282 * low and high half of cmd->arg1 or cmd->d[0].
283 *
284 * We scan options and store the bits we find set. We succeed if
285 *
286 * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
287 *
288 * The code is sometimes optimized not to store additional variables.
289 */
290
291 static int
flags_match(ipfw_insn * cmd,u_int8_t bits)292 flags_match(ipfw_insn *cmd, u_int8_t bits)
293 {
294 u_char want_clear;
295 bits = ~bits;
296
297 if ( ((cmd->arg1 & 0xff) & bits) != 0)
298 return 0; /* some bits we want set were clear */
299 want_clear = (cmd->arg1 >> 8) & 0xff;
300 if ( (want_clear & bits) != want_clear)
301 return 0; /* some bits we want clear were set */
302 return 1;
303 }
304
305 static int
ipopts_match(struct ip * ip,ipfw_insn * cmd)306 ipopts_match(struct ip *ip, ipfw_insn *cmd)
307 {
308 int optlen, bits = 0;
309 u_char *cp = (u_char *)(ip + 1);
310 int x = (ip->ip_hl << 2) - sizeof (struct ip);
311
312 for (; x > 0; x -= optlen, cp += optlen) {
313 int opt = cp[IPOPT_OPTVAL];
314
315 if (opt == IPOPT_EOL)
316 break;
317 if (opt == IPOPT_NOP)
318 optlen = 1;
319 else {
320 optlen = cp[IPOPT_OLEN];
321 if (optlen <= 0 || optlen > x)
322 return 0; /* invalid or truncated */
323 }
324 switch (opt) {
325 default:
326 break;
327
328 case IPOPT_LSRR:
329 bits |= IP_FW_IPOPT_LSRR;
330 break;
331
332 case IPOPT_SSRR:
333 bits |= IP_FW_IPOPT_SSRR;
334 break;
335
336 case IPOPT_RR:
337 bits |= IP_FW_IPOPT_RR;
338 break;
339
340 case IPOPT_TS:
341 bits |= IP_FW_IPOPT_TS;
342 break;
343 }
344 }
345 return (flags_match(cmd, bits));
346 }
347
348 /*
349 * Parse TCP options. The logic copied from tcp_dooptions().
350 */
351 static int
tcpopts_parse(const struct tcphdr * tcp,uint16_t * mss)352 tcpopts_parse(const struct tcphdr *tcp, uint16_t *mss)
353 {
354 const u_char *cp = (const u_char *)(tcp + 1);
355 int optlen, bits = 0;
356 int cnt = (tcp->th_off << 2) - sizeof(struct tcphdr);
357
358 for (; cnt > 0; cnt -= optlen, cp += optlen) {
359 int opt = cp[0];
360 if (opt == TCPOPT_EOL)
361 break;
362 if (opt == TCPOPT_NOP)
363 optlen = 1;
364 else {
365 if (cnt < 2)
366 break;
367 optlen = cp[1];
368 if (optlen < 2 || optlen > cnt)
369 break;
370 }
371
372 switch (opt) {
373 default:
374 break;
375
376 case TCPOPT_MAXSEG:
377 if (optlen != TCPOLEN_MAXSEG)
378 break;
379 bits |= IP_FW_TCPOPT_MSS;
380 if (mss != NULL)
381 *mss = be16dec(cp + 2);
382 break;
383
384 case TCPOPT_WINDOW:
385 if (optlen == TCPOLEN_WINDOW)
386 bits |= IP_FW_TCPOPT_WINDOW;
387 break;
388
389 case TCPOPT_SACK_PERMITTED:
390 if (optlen == TCPOLEN_SACK_PERMITTED)
391 bits |= IP_FW_TCPOPT_SACK;
392 break;
393
394 case TCPOPT_SACK:
395 if (optlen > 2 && (optlen - 2) % TCPOLEN_SACK == 0)
396 bits |= IP_FW_TCPOPT_SACK;
397 break;
398
399 case TCPOPT_TIMESTAMP:
400 if (optlen == TCPOLEN_TIMESTAMP)
401 bits |= IP_FW_TCPOPT_TS;
402 break;
403 }
404 }
405 return (bits);
406 }
407
408 static int
tcpopts_match(struct tcphdr * tcp,ipfw_insn * cmd)409 tcpopts_match(struct tcphdr *tcp, ipfw_insn *cmd)
410 {
411
412 return (flags_match(cmd, tcpopts_parse(tcp, NULL)));
413 }
414
415 static int
iface_match(struct ifnet * ifp,ipfw_insn_if * cmd,struct ip_fw_chain * chain,uint32_t * tablearg)416 iface_match(struct ifnet *ifp, ipfw_insn_if *cmd, struct ip_fw_chain *chain,
417 uint32_t *tablearg)
418 {
419
420 if (ifp == NULL) /* no iface with this packet, match fails */
421 return (0);
422
423 /* Check by name or by IP address */
424 if (cmd->name[0] != '\0') { /* match by name */
425 if (cmd->name[0] == '\1') /* use tablearg to match */
426 return ipfw_lookup_table(chain, cmd->p.kidx, 0,
427 &ifp->if_index, tablearg);
428 /* Check name */
429 if (cmd->p.glob) {
430 if (fnmatch(cmd->name, ifp->if_xname, 0) == 0)
431 return(1);
432 } else {
433 if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
434 return(1);
435 }
436 } else {
437 #if !defined(USERSPACE) && defined(__FreeBSD__) /* and OSX too ? */
438 struct ifaddr *ia;
439
440 NET_EPOCH_ASSERT();
441
442 CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
443 if (ia->ifa_addr->sa_family != AF_INET)
444 continue;
445 if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
446 (ia->ifa_addr))->sin_addr.s_addr)
447 return (1); /* match */
448 }
449 #endif /* __FreeBSD__ */
450 }
451 return(0); /* no match, fail ... */
452 }
453
454 /*
455 * The verify_path function checks if a route to the src exists and
456 * if it is reachable via ifp (when provided).
457 *
458 * The 'verrevpath' option checks that the interface that an IP packet
459 * arrives on is the same interface that traffic destined for the
460 * packet's source address would be routed out of.
461 * The 'versrcreach' option just checks that the source address is
462 * reachable via any route (except default) in the routing table.
463 * These two are a measure to block forged packets. This is also
464 * commonly known as "anti-spoofing" or Unicast Reverse Path
465 * Forwarding (Unicast RFP) in Cisco-ese. The name of the knobs
466 * is purposely reminiscent of the Cisco IOS command,
467 *
468 * ip verify unicast reverse-path
469 * ip verify unicast source reachable-via any
470 *
471 * which implements the same functionality. But note that the syntax
472 * is misleading, and the check may be performed on all IP packets
473 * whether unicast, multicast, or broadcast.
474 */
475 static int
verify_path(struct in_addr src,struct ifnet * ifp,u_int fib)476 verify_path(struct in_addr src, struct ifnet *ifp, u_int fib)
477 {
478 #if defined(USERSPACE) || !defined(__FreeBSD__)
479 return 0;
480 #else
481 struct nhop_object *nh;
482
483 nh = fib4_lookup(fib, src, 0, NHR_NONE, 0);
484 if (nh == NULL)
485 return (0);
486
487 /*
488 * If ifp is provided, check for equality with rtentry.
489 * We should use rt->rt_ifa->ifa_ifp, instead of rt->rt_ifp,
490 * in order to pass packets injected back by if_simloop():
491 * routing entry (via lo0) for our own address
492 * may exist, so we need to handle routing assymetry.
493 */
494 if (ifp != NULL && ifp != nh->nh_aifp)
495 return (0);
496
497 /* if no ifp provided, check if rtentry is not default route */
498 if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0)
499 return (0);
500
501 /* or if this is a blackhole/reject route */
502 if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0)
503 return (0);
504
505 /* found valid route */
506 return 1;
507 #endif /* __FreeBSD__ */
508 }
509
510 /*
511 * Generate an SCTP packet containing an ABORT chunk. The verification tag
512 * is given by vtag. The T-bit is set in the ABORT chunk if and only if
513 * reflected is not 0.
514 */
515
516 static struct mbuf *
ipfw_send_abort(struct mbuf * replyto,struct ipfw_flow_id * id,u_int32_t vtag,int reflected)517 ipfw_send_abort(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t vtag,
518 int reflected)
519 {
520 struct mbuf *m;
521 struct ip *ip;
522 #ifdef INET6
523 struct ip6_hdr *ip6;
524 #endif
525 struct sctphdr *sctp;
526 struct sctp_chunkhdr *chunk;
527 u_int16_t hlen, plen, tlen;
528
529 MGETHDR(m, M_NOWAIT, MT_DATA);
530 if (m == NULL)
531 return (NULL);
532
533 M_SETFIB(m, id->fib);
534 #ifdef MAC
535 if (replyto != NULL)
536 mac_netinet_firewall_reply(replyto, m);
537 else
538 mac_netinet_firewall_send(m);
539 #else
540 (void)replyto; /* don't warn about unused arg */
541 #endif
542
543 switch (id->addr_type) {
544 case 4:
545 hlen = sizeof(struct ip);
546 break;
547 #ifdef INET6
548 case 6:
549 hlen = sizeof(struct ip6_hdr);
550 break;
551 #endif
552 default:
553 /* XXX: log me?!? */
554 FREE_PKT(m);
555 return (NULL);
556 }
557 plen = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
558 tlen = hlen + plen;
559 m->m_data += max_linkhdr;
560 m->m_flags |= M_SKIP_FIREWALL;
561 m->m_pkthdr.len = m->m_len = tlen;
562 m->m_pkthdr.rcvif = NULL;
563 bzero(m->m_data, tlen);
564
565 switch (id->addr_type) {
566 case 4:
567 ip = mtod(m, struct ip *);
568
569 ip->ip_v = 4;
570 ip->ip_hl = sizeof(struct ip) >> 2;
571 ip->ip_tos = IPTOS_LOWDELAY;
572 ip->ip_len = htons(tlen);
573 ip->ip_id = htons(0);
574 ip->ip_off = htons(0);
575 ip->ip_ttl = V_ip_defttl;
576 ip->ip_p = IPPROTO_SCTP;
577 ip->ip_sum = 0;
578 ip->ip_src.s_addr = htonl(id->dst_ip);
579 ip->ip_dst.s_addr = htonl(id->src_ip);
580
581 sctp = (struct sctphdr *)(ip + 1);
582 break;
583 #ifdef INET6
584 case 6:
585 ip6 = mtod(m, struct ip6_hdr *);
586
587 ip6->ip6_vfc = IPV6_VERSION;
588 ip6->ip6_plen = htons(plen);
589 ip6->ip6_nxt = IPPROTO_SCTP;
590 ip6->ip6_hlim = IPV6_DEFHLIM;
591 ip6->ip6_src = id->dst_ip6;
592 ip6->ip6_dst = id->src_ip6;
593
594 sctp = (struct sctphdr *)(ip6 + 1);
595 break;
596 #endif
597 }
598
599 sctp->src_port = htons(id->dst_port);
600 sctp->dest_port = htons(id->src_port);
601 sctp->v_tag = htonl(vtag);
602 sctp->checksum = htonl(0);
603
604 chunk = (struct sctp_chunkhdr *)(sctp + 1);
605 chunk->chunk_type = SCTP_ABORT_ASSOCIATION;
606 chunk->chunk_flags = 0;
607 if (reflected != 0) {
608 chunk->chunk_flags |= SCTP_HAD_NO_TCB;
609 }
610 chunk->chunk_length = htons(sizeof(struct sctp_chunkhdr));
611
612 sctp->checksum = sctp_calculate_cksum(m, hlen);
613
614 return (m);
615 }
616
617 /*
618 * Generate a TCP packet, containing either a RST or a keepalive.
619 * When flags & TH_RST, we are sending a RST packet, because of a
620 * "reset" action matched the packet.
621 * Otherwise we are sending a keepalive, and flags & TH_
622 * The 'replyto' mbuf is the mbuf being replied to, if any, and is required
623 * so that MAC can label the reply appropriately.
624 */
625 struct mbuf *
ipfw_send_pkt(struct mbuf * replyto,struct ipfw_flow_id * id,u_int32_t seq,u_int32_t ack,int flags)626 ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
627 u_int32_t ack, int flags)
628 {
629 struct mbuf *m = NULL; /* stupid compiler */
630 struct ip *h = NULL; /* stupid compiler */
631 #ifdef INET6
632 struct ip6_hdr *h6 = NULL;
633 #endif
634 struct tcphdr *th = NULL;
635 int len, dir;
636
637 MGETHDR(m, M_NOWAIT, MT_DATA);
638 if (m == NULL)
639 return (NULL);
640
641 M_SETFIB(m, id->fib);
642 #ifdef MAC
643 if (replyto != NULL)
644 mac_netinet_firewall_reply(replyto, m);
645 else
646 mac_netinet_firewall_send(m);
647 #else
648 (void)replyto; /* don't warn about unused arg */
649 #endif
650
651 switch (id->addr_type) {
652 case 4:
653 len = sizeof(struct ip) + sizeof(struct tcphdr);
654 break;
655 #ifdef INET6
656 case 6:
657 len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
658 break;
659 #endif
660 default:
661 /* XXX: log me?!? */
662 FREE_PKT(m);
663 return (NULL);
664 }
665 dir = ((flags & (TH_SYN | TH_RST)) == TH_SYN);
666
667 m->m_data += max_linkhdr;
668 m->m_flags |= M_SKIP_FIREWALL;
669 m->m_pkthdr.len = m->m_len = len;
670 m->m_pkthdr.rcvif = NULL;
671 bzero(m->m_data, len);
672
673 switch (id->addr_type) {
674 case 4:
675 h = mtod(m, struct ip *);
676
677 /* prepare for checksum */
678 h->ip_p = IPPROTO_TCP;
679 h->ip_len = htons(sizeof(struct tcphdr));
680 if (dir) {
681 h->ip_src.s_addr = htonl(id->src_ip);
682 h->ip_dst.s_addr = htonl(id->dst_ip);
683 } else {
684 h->ip_src.s_addr = htonl(id->dst_ip);
685 h->ip_dst.s_addr = htonl(id->src_ip);
686 }
687
688 th = (struct tcphdr *)(h + 1);
689 break;
690 #ifdef INET6
691 case 6:
692 h6 = mtod(m, struct ip6_hdr *);
693
694 /* prepare for checksum */
695 h6->ip6_nxt = IPPROTO_TCP;
696 h6->ip6_plen = htons(sizeof(struct tcphdr));
697 if (dir) {
698 h6->ip6_src = id->src_ip6;
699 h6->ip6_dst = id->dst_ip6;
700 } else {
701 h6->ip6_src = id->dst_ip6;
702 h6->ip6_dst = id->src_ip6;
703 }
704
705 th = (struct tcphdr *)(h6 + 1);
706 break;
707 #endif
708 }
709
710 if (dir) {
711 th->th_sport = htons(id->src_port);
712 th->th_dport = htons(id->dst_port);
713 } else {
714 th->th_sport = htons(id->dst_port);
715 th->th_dport = htons(id->src_port);
716 }
717 th->th_off = sizeof(struct tcphdr) >> 2;
718
719 if (flags & TH_RST) {
720 if (flags & TH_ACK) {
721 th->th_seq = htonl(ack);
722 tcp_set_flags(th, TH_RST);
723 } else {
724 if (flags & TH_SYN)
725 seq++;
726 th->th_ack = htonl(seq);
727 tcp_set_flags(th, TH_RST | TH_ACK);
728 }
729 } else {
730 /*
731 * Keepalive - use caller provided sequence numbers
732 */
733 th->th_seq = htonl(seq);
734 th->th_ack = htonl(ack);
735 tcp_set_flags(th, TH_ACK);
736 }
737
738 switch (id->addr_type) {
739 case 4:
740 th->th_sum = in_cksum(m, len);
741
742 /* finish the ip header */
743 h->ip_v = 4;
744 h->ip_hl = sizeof(*h) >> 2;
745 h->ip_tos = IPTOS_LOWDELAY;
746 h->ip_off = htons(0);
747 h->ip_len = htons(len);
748 h->ip_ttl = V_ip_defttl;
749 h->ip_sum = 0;
750 break;
751 #ifdef INET6
752 case 6:
753 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*h6),
754 sizeof(struct tcphdr));
755
756 /* finish the ip6 header */
757 h6->ip6_vfc |= IPV6_VERSION;
758 h6->ip6_hlim = IPV6_DEFHLIM;
759 break;
760 #endif
761 }
762
763 return (m);
764 }
765
766 #ifdef INET6
767 /*
768 * ipv6 specific rules here...
769 */
770 static __inline int
icmp6type_match(int type,ipfw_insn_u32 * cmd)771 icmp6type_match(int type, ipfw_insn_u32 *cmd)
772 {
773 return (type <= ICMP6_MAXTYPE && (cmd->d[type/32] & (1<<(type%32)) ) );
774 }
775
776 static int
flow6id_match(int curr_flow,ipfw_insn_u32 * cmd)777 flow6id_match(int curr_flow, ipfw_insn_u32 *cmd)
778 {
779 int i;
780 for (i=0; i <= cmd->o.arg1; ++i)
781 if (curr_flow == cmd->d[i])
782 return 1;
783 return 0;
784 }
785
786 /* support for IP6_*_ME opcodes */
787 static const struct in6_addr lla_mask = {{{
788 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
789 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
790 }}};
791
792 static int
ipfw_localip6(struct in6_addr * in6)793 ipfw_localip6(struct in6_addr *in6)
794 {
795 struct rm_priotracker in6_ifa_tracker;
796 struct in6_ifaddr *ia;
797
798 if (IN6_IS_ADDR_MULTICAST(in6))
799 return (0);
800
801 if (!IN6_IS_ADDR_LINKLOCAL(in6))
802 return (in6_localip(in6));
803
804 IN6_IFADDR_RLOCK(&in6_ifa_tracker);
805 CK_STAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
806 if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr))
807 continue;
808 if (IN6_ARE_MASKED_ADDR_EQUAL(&ia->ia_addr.sin6_addr,
809 in6, &lla_mask)) {
810 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker);
811 return (1);
812 }
813 }
814 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker);
815 return (0);
816 }
817
818 static int
verify_path6(struct in6_addr * src,struct ifnet * ifp,u_int fib)819 verify_path6(struct in6_addr *src, struct ifnet *ifp, u_int fib)
820 {
821 struct nhop_object *nh;
822
823 if (IN6_IS_SCOPE_LINKLOCAL(src))
824 return (1);
825
826 nh = fib6_lookup(fib, src, 0, NHR_NONE, 0);
827 if (nh == NULL)
828 return (0);
829
830 /* If ifp is provided, check for equality with route table. */
831 if (ifp != NULL && ifp != nh->nh_aifp)
832 return (0);
833
834 /* if no ifp provided, check if rtentry is not default route */
835 if (ifp == NULL && (nh->nh_flags & NHF_DEFAULT) != 0)
836 return (0);
837
838 /* or if this is a blackhole/reject route */
839 if (ifp == NULL && (nh->nh_flags & (NHF_REJECT|NHF_BLACKHOLE)) != 0)
840 return (0);
841
842 /* found valid route */
843 return 1;
844 }
845
846 static int
is_icmp6_query(int icmp6_type)847 is_icmp6_query(int icmp6_type)
848 {
849 if ((icmp6_type <= ICMP6_MAXTYPE) &&
850 (icmp6_type == ICMP6_ECHO_REQUEST ||
851 icmp6_type == ICMP6_MEMBERSHIP_QUERY ||
852 icmp6_type == ICMP6_WRUREQUEST ||
853 icmp6_type == ICMP6_FQDN_QUERY ||
854 icmp6_type == ICMP6_NI_QUERY))
855 return (1);
856
857 return (0);
858 }
859
860 static int
map_icmp_unreach(int code)861 map_icmp_unreach(int code)
862 {
863
864 /* RFC 7915 p4.2 */
865 switch (code) {
866 case ICMP_UNREACH_NET:
867 case ICMP_UNREACH_HOST:
868 case ICMP_UNREACH_SRCFAIL:
869 case ICMP_UNREACH_NET_UNKNOWN:
870 case ICMP_UNREACH_HOST_UNKNOWN:
871 case ICMP_UNREACH_TOSNET:
872 case ICMP_UNREACH_TOSHOST:
873 return (ICMP6_DST_UNREACH_NOROUTE);
874 case ICMP_UNREACH_PORT:
875 return (ICMP6_DST_UNREACH_NOPORT);
876 default:
877 /*
878 * Map the rest of codes into admit prohibited.
879 * XXX: unreach proto should be mapped into ICMPv6
880 * parameter problem, but we use only unreach type.
881 */
882 return (ICMP6_DST_UNREACH_ADMIN);
883 }
884 }
885
886 static void
send_reject6(struct ip_fw_args * args,int code,u_int hlen,const struct ip6_hdr * ip6)887 send_reject6(struct ip_fw_args *args, int code, u_int hlen,
888 const struct ip6_hdr *ip6)
889 {
890 struct mbuf *m;
891
892 m = args->m;
893 if (code == ICMP6_UNREACH_RST && args->f_id.proto == IPPROTO_TCP) {
894 const struct tcphdr * tcp;
895 tcp = (const struct tcphdr *)((const char *)ip6 + hlen);
896
897 if ((tcp_get_flags(tcp) & TH_RST) == 0) {
898 struct mbuf *m0;
899 m0 = ipfw_send_pkt(args->m, &(args->f_id),
900 ntohl(tcp->th_seq), ntohl(tcp->th_ack),
901 tcp_get_flags(tcp) | TH_RST);
902 if (m0 != NULL)
903 ip6_output(m0, NULL, NULL, 0, NULL, NULL,
904 NULL);
905 }
906 FREE_PKT(m);
907 } else if (code == ICMP6_UNREACH_ABORT &&
908 args->f_id.proto == IPPROTO_SCTP) {
909 struct mbuf *m0;
910 const struct sctphdr *sctp;
911 u_int32_t v_tag;
912 int reflected;
913
914 sctp = (const struct sctphdr *)((const char *)ip6 + hlen);
915 reflected = 1;
916 v_tag = ntohl(sctp->v_tag);
917 /* Investigate the first chunk header if available */
918 if (m->m_len >= hlen + sizeof(struct sctphdr) +
919 sizeof(struct sctp_chunkhdr)) {
920 const struct sctp_chunkhdr *chunk;
921
922 chunk = (const struct sctp_chunkhdr *)(sctp + 1);
923 switch (chunk->chunk_type) {
924 case SCTP_INITIATION:
925 /*
926 * Packets containing an INIT chunk MUST have
927 * a zero v-tag.
928 */
929 if (v_tag != 0) {
930 v_tag = 0;
931 break;
932 }
933 /* INIT chunk MUST NOT be bundled */
934 if (m->m_pkthdr.len >
935 hlen + sizeof(struct sctphdr) +
936 ntohs(chunk->chunk_length) + 3) {
937 break;
938 }
939 /* Use the initiate tag if available */
940 if ((m->m_len >= hlen + sizeof(struct sctphdr) +
941 sizeof(struct sctp_chunkhdr) +
942 offsetof(struct sctp_init, a_rwnd))) {
943 const struct sctp_init *init;
944
945 init = (const struct sctp_init *)(chunk + 1);
946 v_tag = ntohl(init->initiate_tag);
947 reflected = 0;
948 }
949 break;
950 case SCTP_ABORT_ASSOCIATION:
951 /*
952 * If the packet contains an ABORT chunk, don't
953 * reply.
954 * XXX: We should search through all chunks,
955 * but do not do that to avoid attacks.
956 */
957 v_tag = 0;
958 break;
959 }
960 }
961 if (v_tag == 0) {
962 m0 = NULL;
963 } else {
964 m0 = ipfw_send_abort(args->m, &(args->f_id), v_tag,
965 reflected);
966 }
967 if (m0 != NULL)
968 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
969 FREE_PKT(m);
970 } else if (code != ICMP6_UNREACH_RST && code != ICMP6_UNREACH_ABORT) {
971 /* Send an ICMPv6 unreach. */
972 #if 0
973 /*
974 * Unlike above, the mbufs need to line up with the ip6 hdr,
975 * as the contents are read. We need to m_adj() the
976 * needed amount.
977 * The mbuf will however be thrown away so we can adjust it.
978 * Remember we did an m_pullup on it already so we
979 * can make some assumptions about contiguousness.
980 */
981 if (args->L3offset)
982 m_adj(m, args->L3offset);
983 #endif
984 icmp6_error(m, ICMP6_DST_UNREACH, code, 0);
985 } else
986 FREE_PKT(m);
987
988 args->m = NULL;
989 }
990
991 #endif /* INET6 */
992
993 /*
994 * sends a reject message, consuming the mbuf passed as an argument.
995 */
996 static void
send_reject(struct ip_fw_args * args,int code,uint16_t mtu,int iplen,const struct ip * ip)997 send_reject(struct ip_fw_args *args, int code, uint16_t mtu, int iplen,
998 const struct ip *ip)
999 {
1000 #if 0
1001 /* XXX When ip is not guaranteed to be at mtod() we will
1002 * need to account for this */
1003 * The mbuf will however be thrown away so we can adjust it.
1004 * Remember we did an m_pullup on it already so we
1005 * can make some assumptions about contiguousness.
1006 */
1007 if (args->L3offset)
1008 m_adj(m, args->L3offset);
1009 #endif
1010 if (code != ICMP_REJECT_RST && code != ICMP_REJECT_ABORT) {
1011 /* Send an ICMP unreach */
1012 icmp_error(args->m, ICMP_UNREACH, code, 0L, mtu);
1013 } else if (code == ICMP_REJECT_RST && args->f_id.proto == IPPROTO_TCP) {
1014 struct tcphdr *const tcp =
1015 L3HDR(struct tcphdr, mtod(args->m, struct ip *));
1016 if ( (tcp_get_flags(tcp) & TH_RST) == 0) {
1017 struct mbuf *m;
1018 m = ipfw_send_pkt(args->m, &(args->f_id),
1019 ntohl(tcp->th_seq), ntohl(tcp->th_ack),
1020 tcp_get_flags(tcp) | TH_RST);
1021 if (m != NULL)
1022 ip_output(m, NULL, NULL, 0, NULL, NULL);
1023 }
1024 FREE_PKT(args->m);
1025 } else if (code == ICMP_REJECT_ABORT &&
1026 args->f_id.proto == IPPROTO_SCTP) {
1027 struct mbuf *m;
1028 struct sctphdr *sctp;
1029 struct sctp_chunkhdr *chunk;
1030 struct sctp_init *init;
1031 u_int32_t v_tag;
1032 int reflected;
1033
1034 sctp = L3HDR(struct sctphdr, mtod(args->m, struct ip *));
1035 reflected = 1;
1036 v_tag = ntohl(sctp->v_tag);
1037 if (iplen >= (ip->ip_hl << 2) + sizeof(struct sctphdr) +
1038 sizeof(struct sctp_chunkhdr)) {
1039 /* Look at the first chunk header if available */
1040 chunk = (struct sctp_chunkhdr *)(sctp + 1);
1041 switch (chunk->chunk_type) {
1042 case SCTP_INITIATION:
1043 /*
1044 * Packets containing an INIT chunk MUST have
1045 * a zero v-tag.
1046 */
1047 if (v_tag != 0) {
1048 v_tag = 0;
1049 break;
1050 }
1051 /* INIT chunk MUST NOT be bundled */
1052 if (iplen >
1053 (ip->ip_hl << 2) + sizeof(struct sctphdr) +
1054 ntohs(chunk->chunk_length) + 3) {
1055 break;
1056 }
1057 /* Use the initiate tag if available */
1058 if ((iplen >= (ip->ip_hl << 2) +
1059 sizeof(struct sctphdr) +
1060 sizeof(struct sctp_chunkhdr) +
1061 offsetof(struct sctp_init, a_rwnd))) {
1062 init = (struct sctp_init *)(chunk + 1);
1063 v_tag = ntohl(init->initiate_tag);
1064 reflected = 0;
1065 }
1066 break;
1067 case SCTP_ABORT_ASSOCIATION:
1068 /*
1069 * If the packet contains an ABORT chunk, don't
1070 * reply.
1071 * XXX: We should search through all chunks,
1072 * but do not do that to avoid attacks.
1073 */
1074 v_tag = 0;
1075 break;
1076 }
1077 }
1078 if (v_tag == 0) {
1079 m = NULL;
1080 } else {
1081 m = ipfw_send_abort(args->m, &(args->f_id), v_tag,
1082 reflected);
1083 }
1084 if (m != NULL)
1085 ip_output(m, NULL, NULL, 0, NULL, NULL);
1086 FREE_PKT(args->m);
1087 } else
1088 FREE_PKT(args->m);
1089 args->m = NULL;
1090 }
1091
1092 /*
1093 * Support for uid/gid/jail lookup. These tests are expensive
1094 * (because we may need to look into the list of active sockets)
1095 * so we cache the results. ugid_lookupp is 0 if we have not
1096 * yet done a lookup, 1 if we succeeded, and -1 if we tried
1097 * and failed. The function always returns the match value.
1098 * We could actually spare the variable and use *uc, setting
1099 * it to '(void *)check_uidgid if we have no info, NULL if
1100 * we tried and failed, or any other value if successful.
1101 */
1102 static int
check_uidgid(ipfw_insn_u32 * insn,struct ip_fw_args * args,int * ugid_lookupp,struct ucred ** uc)1103 check_uidgid(ipfw_insn_u32 *insn, struct ip_fw_args *args, int *ugid_lookupp,
1104 struct ucred **uc)
1105 {
1106 #if defined(USERSPACE)
1107 return 0; // not supported in userspace
1108 #else
1109 #ifndef __FreeBSD__
1110 /* XXX */
1111 return cred_check(insn, proto, oif,
1112 dst_ip, dst_port, src_ip, src_port,
1113 (struct bsd_ucred *)uc, ugid_lookupp, ((struct mbuf *)inp)->m_skb);
1114 #else /* FreeBSD */
1115 struct in_addr src_ip, dst_ip;
1116 struct inpcbinfo *pi;
1117 struct ipfw_flow_id *id;
1118 struct inpcb *pcb, *inp;
1119 int lookupflags;
1120 int match;
1121
1122 id = &args->f_id;
1123 inp = args->inp;
1124
1125 /*
1126 * Check to see if the UDP or TCP stack supplied us with
1127 * the PCB. If so, rather then holding a lock and looking
1128 * up the PCB, we can use the one that was supplied.
1129 */
1130 if (inp && *ugid_lookupp == 0) {
1131 INP_LOCK_ASSERT(inp);
1132 if (inp->inp_socket != NULL) {
1133 *uc = crhold(inp->inp_cred);
1134 *ugid_lookupp = 1;
1135 } else
1136 *ugid_lookupp = -1;
1137 }
1138 /*
1139 * If we have already been here and the packet has no
1140 * PCB entry associated with it, then we can safely
1141 * assume that this is a no match.
1142 */
1143 if (*ugid_lookupp == -1)
1144 return (0);
1145 if (id->proto == IPPROTO_TCP) {
1146 lookupflags = 0;
1147 pi = &V_tcbinfo;
1148 } else if (id->proto == IPPROTO_UDP) {
1149 lookupflags = INPLOOKUP_WILDCARD;
1150 pi = &V_udbinfo;
1151 } else if (id->proto == IPPROTO_UDPLITE) {
1152 lookupflags = INPLOOKUP_WILDCARD;
1153 pi = &V_ulitecbinfo;
1154 } else
1155 return 0;
1156 lookupflags |= INPLOOKUP_RLOCKPCB;
1157 match = 0;
1158 if (*ugid_lookupp == 0) {
1159 if (id->addr_type == 6) {
1160 #ifdef INET6
1161 if (args->flags & IPFW_ARGS_IN)
1162 pcb = in6_pcblookup_mbuf(pi,
1163 &id->src_ip6, htons(id->src_port),
1164 &id->dst_ip6, htons(id->dst_port),
1165 lookupflags, NULL, args->m);
1166 else
1167 pcb = in6_pcblookup_mbuf(pi,
1168 &id->dst_ip6, htons(id->dst_port),
1169 &id->src_ip6, htons(id->src_port),
1170 lookupflags, args->ifp, args->m);
1171 #else
1172 *ugid_lookupp = -1;
1173 return (0);
1174 #endif
1175 } else {
1176 src_ip.s_addr = htonl(id->src_ip);
1177 dst_ip.s_addr = htonl(id->dst_ip);
1178 if (args->flags & IPFW_ARGS_IN)
1179 pcb = in_pcblookup_mbuf(pi,
1180 src_ip, htons(id->src_port),
1181 dst_ip, htons(id->dst_port),
1182 lookupflags, NULL, args->m);
1183 else
1184 pcb = in_pcblookup_mbuf(pi,
1185 dst_ip, htons(id->dst_port),
1186 src_ip, htons(id->src_port),
1187 lookupflags, args->ifp, args->m);
1188 }
1189 if (pcb != NULL) {
1190 INP_RLOCK_ASSERT(pcb);
1191 *uc = crhold(pcb->inp_cred);
1192 *ugid_lookupp = 1;
1193 INP_RUNLOCK(pcb);
1194 }
1195 if (*ugid_lookupp == 0) {
1196 /*
1197 * We tried and failed, set the variable to -1
1198 * so we will not try again on this packet.
1199 */
1200 *ugid_lookupp = -1;
1201 return (0);
1202 }
1203 }
1204 if (insn->o.opcode == O_UID)
1205 match = ((*uc)->cr_uid == (uid_t)insn->d[0]);
1206 else if (insn->o.opcode == O_GID)
1207 match = groupmember((gid_t)insn->d[0], *uc);
1208 else if (insn->o.opcode == O_JAIL)
1209 match = ((*uc)->cr_prison->pr_id == (int)insn->d[0]);
1210 return (match);
1211 #endif /* __FreeBSD__ */
1212 #endif /* not supported in userspace */
1213 }
1214
1215 /*
1216 * Helper function to set args with info on the rule after the matching
1217 * one. slot is precise, whereas we guess rule_id as they are
1218 * assigned sequentially.
1219 */
1220 static inline void
set_match(struct ip_fw_args * args,int slot,struct ip_fw_chain * chain)1221 set_match(struct ip_fw_args *args, int slot,
1222 struct ip_fw_chain *chain)
1223 {
1224 args->rule.chain_id = chain->id;
1225 args->rule.slot = slot + 1; /* we use 0 as a marker */
1226 args->rule.rule_id = 1 + chain->map[slot]->id;
1227 args->rule.rulenum = chain->map[slot]->rulenum;
1228 args->flags |= IPFW_ARGS_REF;
1229 }
1230
1231 static int
jump_lookup_pos(struct ip_fw_chain * chain,struct ip_fw * f,int num,int tablearg,int jump_backwards)1232 jump_lookup_pos(struct ip_fw_chain *chain, struct ip_fw *f, int num,
1233 int tablearg, int jump_backwards)
1234 {
1235 int f_pos, i;
1236
1237 i = IP_FW_ARG_TABLEARG(chain, num, skipto);
1238 /* make sure we do not jump backward */
1239 if (jump_backwards == 0 && i <= f->rulenum)
1240 i = f->rulenum + 1;
1241
1242 #ifndef LINEAR_SKIPTO
1243 if (chain->idxmap != NULL)
1244 f_pos = chain->idxmap[i];
1245 else
1246 f_pos = ipfw_find_rule(chain, i, 0);
1247 #else
1248 f_pos = chain->idxmap[i];
1249 #endif /* LINEAR_SKIPTO */
1250
1251 return (f_pos);
1252 }
1253
1254
1255 #ifndef LINEAR_SKIPTO
1256 /*
1257 * Helper function to enable cached rule lookups using
1258 * cache.id and cache.pos fields in ipfw rule.
1259 */
1260 static int
jump_cached(struct ip_fw_chain * chain,struct ip_fw * f,int num,int tablearg,int jump_backwards)1261 jump_cached(struct ip_fw_chain *chain, struct ip_fw *f, int num,
1262 int tablearg, int jump_backwards)
1263 {
1264 int f_pos;
1265
1266 /* Can't use cache with IP_FW_TARG */
1267 if (num == IP_FW_TARG)
1268 return jump_lookup_pos(chain, f, num, tablearg, jump_backwards);
1269
1270 /*
1271 * If possible use cached f_pos (in f->cache.pos),
1272 * whose version is written in f->cache.id (horrible hacks
1273 * to avoid changing the ABI).
1274 *
1275 * Multiple threads can execute the same rule simultaneously,
1276 * we need to ensure that cache.pos is updated before cache.id.
1277 */
1278
1279 #ifdef __LP64__
1280 struct ip_fw_jump_cache cache;
1281
1282 cache.raw_value = f->cache.raw_value;
1283 if (cache.id == chain->id)
1284 return (cache.pos);
1285
1286 f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards);
1287
1288 cache.pos = f_pos;
1289 cache.id = chain->id;
1290 f->cache.raw_value = cache.raw_value;
1291 #else
1292 if (f->cache.id == chain->id) {
1293 /* Load pos after id */
1294 atomic_thread_fence_acq();
1295 return (f->cache.pos);
1296 }
1297
1298 f_pos = jump_lookup_pos(chain, f, num, tablearg, jump_backwards);
1299
1300 f->cache.pos = f_pos;
1301 /* Store id after pos */
1302 atomic_thread_fence_rel();
1303 f->cache.id = chain->id;
1304 #endif /* !__LP64__ */
1305 return (f_pos);
1306 }
1307 #endif /* !LINEAR_SKIPTO */
1308
1309 #define TARG(k, f) IP_FW_ARG_TABLEARG(chain, k, f)
1310 /*
1311 * The main check routine for the firewall.
1312 *
1313 * All arguments are in args so we can modify them and return them
1314 * back to the caller.
1315 *
1316 * Parameters:
1317 *
1318 * args->m (in/out) The packet; we set to NULL when/if we nuke it.
1319 * Starts with the IP header.
1320 * args->L3offset Number of bytes bypassed if we came from L2.
1321 * e.g. often sizeof(eh) ** NOTYET **
1322 * args->ifp Incoming or outgoing interface.
1323 * args->divert_rule (in/out)
1324 * Skip up to the first rule past this rule number;
1325 * upon return, non-zero port number for divert or tee.
1326 *
1327 * args->rule Pointer to the last matching rule (in/out)
1328 * args->next_hop Socket we are forwarding to (out).
1329 * args->next_hop6 IPv6 next hop we are forwarding to (out).
1330 * args->f_id Addresses grabbed from the packet (out)
1331 * args->rule.info a cookie depending on rule action
1332 *
1333 * Return value:
1334 *
1335 * IP_FW_PASS the packet must be accepted
1336 * IP_FW_DENY the packet must be dropped
1337 * IP_FW_DIVERT divert packet, port in m_tag
1338 * IP_FW_TEE tee packet, port in m_tag
1339 * IP_FW_DUMMYNET to dummynet, pipe in args->cookie
1340 * IP_FW_NETGRAPH into netgraph, cookie args->cookie
1341 * args->rule contains the matching rule,
1342 * args->rule.info has additional information.
1343 *
1344 */
1345 int
ipfw_chk(struct ip_fw_args * args)1346 ipfw_chk(struct ip_fw_args *args)
1347 {
1348
1349 /*
1350 * Local variables holding state while processing a packet:
1351 *
1352 * IMPORTANT NOTE: to speed up the processing of rules, there
1353 * are some assumption on the values of the variables, which
1354 * are documented here. Should you change them, please check
1355 * the implementation of the various instructions to make sure
1356 * that they still work.
1357 *
1358 * m | args->m Pointer to the mbuf, as received from the caller.
1359 * It may change if ipfw_chk() does an m_pullup, or if it
1360 * consumes the packet because it calls send_reject().
1361 * XXX This has to change, so that ipfw_chk() never modifies
1362 * or consumes the buffer.
1363 * OR
1364 * args->mem Pointer to contigous memory chunk.
1365 * ip Is the beginning of the ip(4 or 6) header.
1366 * eh Ethernet header in case if input is Layer2.
1367 */
1368 struct mbuf *m;
1369 struct ip *ip;
1370 struct ether_header *eh;
1371
1372 /*
1373 * For rules which contain uid/gid or jail constraints, cache
1374 * a copy of the users credentials after the pcb lookup has been
1375 * executed. This will speed up the processing of rules with
1376 * these types of constraints, as well as decrease contention
1377 * on pcb related locks.
1378 */
1379 #ifndef __FreeBSD__
1380 struct bsd_ucred ucred_cache;
1381 #else
1382 struct ucred *ucred_cache = NULL;
1383 #endif
1384 int ucred_lookup = 0;
1385 int f_pos = 0; /* index of current rule in the array */
1386 int retval = 0;
1387 struct ifnet *oif, *iif;
1388
1389 /*
1390 * hlen The length of the IP header.
1391 */
1392 u_int hlen = 0; /* hlen >0 means we have an IP pkt */
1393
1394 /*
1395 * offset The offset of a fragment. offset != 0 means that
1396 * we have a fragment at this offset of an IPv4 packet.
1397 * offset == 0 means that (if this is an IPv4 packet)
1398 * this is the first or only fragment.
1399 * For IPv6 offset|ip6f_mf == 0 means there is no Fragment Header
1400 * or there is a single packet fragment (fragment header added
1401 * without needed). We will treat a single packet fragment as if
1402 * there was no fragment header (or log/block depending on the
1403 * V_fw_permit_single_frag6 sysctl setting).
1404 */
1405 u_short offset = 0;
1406 u_short ip6f_mf = 0;
1407
1408 /*
1409 * Local copies of addresses. They are only valid if we have
1410 * an IP packet.
1411 *
1412 * proto The protocol. Set to 0 for non-ip packets,
1413 * or to the protocol read from the packet otherwise.
1414 * proto != 0 means that we have an IPv4 packet.
1415 *
1416 * src_port, dst_port port numbers, in HOST format. Only
1417 * valid for TCP and UDP packets.
1418 *
1419 * src_ip, dst_ip ip addresses, in NETWORK format.
1420 * Only valid for IPv4 packets.
1421 */
1422 uint8_t proto;
1423 uint16_t src_port, dst_port; /* NOTE: host format */
1424 struct in_addr src_ip, dst_ip; /* NOTE: network format */
1425 int iplen = 0;
1426 int pktlen;
1427
1428 struct ipfw_dyn_info dyn_info;
1429 struct ip_fw *q = NULL;
1430 struct ip_fw_chain *chain = &V_layer3_chain;
1431
1432 /*
1433 * We store in ulp a pointer to the upper layer protocol header.
1434 * In the ipv4 case this is easy to determine from the header,
1435 * but for ipv6 we might have some additional headers in the middle.
1436 * ulp is NULL if not found.
1437 */
1438 void *ulp = NULL; /* upper layer protocol pointer. */
1439
1440 /* XXX ipv6 variables */
1441 int is_ipv6 = 0;
1442 #ifdef INET6
1443 uint8_t icmp6_type = 0;
1444 #endif
1445 uint16_t ext_hd = 0; /* bits vector for extension header filtering */
1446 /* end of ipv6 variables */
1447
1448 int is_ipv4 = 0;
1449
1450 int done = 0; /* flag to exit the outer loop */
1451 IPFW_RLOCK_TRACKER;
1452 bool mem;
1453 bool need_send_reject = false;
1454 int reject_code;
1455 uint16_t reject_mtu;
1456
1457 if ((mem = (args->flags & IPFW_ARGS_LENMASK))) {
1458 if (args->flags & IPFW_ARGS_ETHER) {
1459 eh = (struct ether_header *)args->mem;
1460 if (eh->ether_type == htons(ETHERTYPE_VLAN))
1461 ip = (struct ip *)
1462 ((struct ether_vlan_header *)eh + 1);
1463 else
1464 ip = (struct ip *)(eh + 1);
1465 } else {
1466 eh = NULL;
1467 ip = (struct ip *)args->mem;
1468 }
1469 pktlen = IPFW_ARGS_LENGTH(args->flags);
1470 args->f_id.fib = args->ifp->if_fib; /* best guess */
1471 } else {
1472 m = args->m;
1473 if (m->m_flags & M_SKIP_FIREWALL || (! V_ipfw_vnet_ready))
1474 return (IP_FW_PASS); /* accept */
1475 if (args->flags & IPFW_ARGS_ETHER) {
1476 /* We need some amount of data to be contiguous. */
1477 if (m->m_len < min(m->m_pkthdr.len, max_protohdr) &&
1478 (args->m = m = m_pullup(m, min(m->m_pkthdr.len,
1479 max_protohdr))) == NULL)
1480 goto pullup_failed;
1481 eh = mtod(m, struct ether_header *);
1482 ip = (struct ip *)(eh + 1);
1483 } else {
1484 eh = NULL;
1485 ip = mtod(m, struct ip *);
1486 }
1487 pktlen = m->m_pkthdr.len;
1488 args->f_id.fib = M_GETFIB(m); /* mbuf not altered */
1489 }
1490
1491 dst_ip.s_addr = 0; /* make sure it is initialized */
1492 src_ip.s_addr = 0; /* make sure it is initialized */
1493 src_port = dst_port = 0;
1494
1495 DYN_INFO_INIT(&dyn_info);
1496 /*
1497 * PULLUP_TO(len, p, T) makes sure that len + sizeof(T) is contiguous,
1498 * then it sets p to point at the offset "len" in the mbuf. WARNING: the
1499 * pointer might become stale after other pullups (but we never use it
1500 * this way).
1501 */
1502 #define PULLUP_TO(_len, p, T) PULLUP_LEN(_len, p, sizeof(T))
1503 #define EHLEN (eh != NULL ? ((char *)ip - (char *)eh) : 0)
1504 #define _PULLUP_LOCKED(_len, p, T, unlock) \
1505 do { \
1506 int x = (_len) + T + EHLEN; \
1507 if (mem) { \
1508 if (__predict_false(pktlen < x)) { \
1509 unlock; \
1510 goto pullup_failed; \
1511 } \
1512 p = (char *)args->mem + (_len) + EHLEN; \
1513 } else { \
1514 if (__predict_false((m)->m_len < x)) { \
1515 args->m = m = m_pullup(m, x); \
1516 if (m == NULL) { \
1517 unlock; \
1518 goto pullup_failed; \
1519 } \
1520 } \
1521 p = mtod(m, char *) + (_len) + EHLEN; \
1522 } \
1523 } while (0)
1524
1525 #define PULLUP_LEN(_len, p, T) _PULLUP_LOCKED(_len, p, T, )
1526 #define PULLUP_LEN_LOCKED(_len, p, T) \
1527 _PULLUP_LOCKED(_len, p, T, IPFW_PF_RUNLOCK(chain)); \
1528 UPDATE_POINTERS()
1529 /*
1530 * In case pointers got stale after pullups, update them.
1531 */
1532 #define UPDATE_POINTERS() \
1533 do { \
1534 if (!mem) { \
1535 if (eh != NULL) { \
1536 eh = mtod(m, struct ether_header *); \
1537 ip = (struct ip *)(eh + 1); \
1538 } else \
1539 ip = mtod(m, struct ip *); \
1540 args->m = m; \
1541 } \
1542 } while (0)
1543
1544 /* Identify IP packets and fill up variables. */
1545 if (pktlen >= sizeof(struct ip6_hdr) &&
1546 (eh == NULL || eh->ether_type == htons(ETHERTYPE_IPV6)) &&
1547 ip->ip_v == 6) {
1548 struct ip6_hdr *ip6 = (struct ip6_hdr *)ip;
1549
1550 is_ipv6 = 1;
1551 args->flags |= IPFW_ARGS_IP6;
1552 hlen = sizeof(struct ip6_hdr);
1553 proto = ip6->ip6_nxt;
1554 /* Search extension headers to find upper layer protocols */
1555 while (ulp == NULL && offset == 0) {
1556 switch (proto) {
1557 case IPPROTO_ICMPV6:
1558 PULLUP_TO(hlen, ulp, struct icmp6_hdr);
1559 #ifdef INET6
1560 icmp6_type = ICMP6(ulp)->icmp6_type;
1561 #endif
1562 break;
1563
1564 case IPPROTO_TCP:
1565 PULLUP_TO(hlen, ulp, struct tcphdr);
1566 dst_port = TCP(ulp)->th_dport;
1567 src_port = TCP(ulp)->th_sport;
1568 /* save flags for dynamic rules */
1569 args->f_id._flags = tcp_get_flags(TCP(ulp));
1570 break;
1571
1572 case IPPROTO_SCTP:
1573 if (pktlen >= hlen + sizeof(struct sctphdr) +
1574 sizeof(struct sctp_chunkhdr) +
1575 offsetof(struct sctp_init, a_rwnd))
1576 PULLUP_LEN(hlen, ulp,
1577 sizeof(struct sctphdr) +
1578 sizeof(struct sctp_chunkhdr) +
1579 offsetof(struct sctp_init, a_rwnd));
1580 else if (pktlen >= hlen + sizeof(struct sctphdr))
1581 PULLUP_LEN(hlen, ulp, pktlen - hlen);
1582 else
1583 PULLUP_LEN(hlen, ulp,
1584 sizeof(struct sctphdr));
1585 src_port = SCTP(ulp)->src_port;
1586 dst_port = SCTP(ulp)->dest_port;
1587 break;
1588
1589 case IPPROTO_UDP:
1590 case IPPROTO_UDPLITE:
1591 PULLUP_TO(hlen, ulp, struct udphdr);
1592 dst_port = UDP(ulp)->uh_dport;
1593 src_port = UDP(ulp)->uh_sport;
1594 break;
1595
1596 case IPPROTO_HOPOPTS: /* RFC 2460 */
1597 PULLUP_TO(hlen, ulp, struct ip6_hbh);
1598 ext_hd |= EXT_HOPOPTS;
1599 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
1600 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
1601 ulp = NULL;
1602 break;
1603
1604 case IPPROTO_ROUTING: /* RFC 2460 */
1605 PULLUP_TO(hlen, ulp, struct ip6_rthdr);
1606 switch (((struct ip6_rthdr *)ulp)->ip6r_type) {
1607 case 0:
1608 ext_hd |= EXT_RTHDR0;
1609 break;
1610 case 2:
1611 ext_hd |= EXT_RTHDR2;
1612 break;
1613 default:
1614 if (V_fw_verbose)
1615 printf("IPFW2: IPV6 - Unknown "
1616 "Routing Header type(%d)\n",
1617 ((struct ip6_rthdr *)
1618 ulp)->ip6r_type);
1619 if (V_fw_deny_unknown_exthdrs)
1620 return (IP_FW_DENY);
1621 break;
1622 }
1623 ext_hd |= EXT_ROUTING;
1624 hlen += (((struct ip6_rthdr *)ulp)->ip6r_len + 1) << 3;
1625 proto = ((struct ip6_rthdr *)ulp)->ip6r_nxt;
1626 ulp = NULL;
1627 break;
1628
1629 case IPPROTO_FRAGMENT: /* RFC 2460 */
1630 PULLUP_TO(hlen, ulp, struct ip6_frag);
1631 ext_hd |= EXT_FRAGMENT;
1632 hlen += sizeof (struct ip6_frag);
1633 proto = ((struct ip6_frag *)ulp)->ip6f_nxt;
1634 offset = ((struct ip6_frag *)ulp)->ip6f_offlg &
1635 IP6F_OFF_MASK;
1636 ip6f_mf = ((struct ip6_frag *)ulp)->ip6f_offlg &
1637 IP6F_MORE_FRAG;
1638 if (V_fw_permit_single_frag6 == 0 &&
1639 offset == 0 && ip6f_mf == 0) {
1640 if (V_fw_verbose)
1641 printf("IPFW2: IPV6 - Invalid "
1642 "Fragment Header\n");
1643 if (V_fw_deny_unknown_exthdrs)
1644 return (IP_FW_DENY);
1645 break;
1646 }
1647 args->f_id.extra =
1648 ntohl(((struct ip6_frag *)ulp)->ip6f_ident);
1649 ulp = NULL;
1650 break;
1651
1652 case IPPROTO_DSTOPTS: /* RFC 2460 */
1653 PULLUP_TO(hlen, ulp, struct ip6_hbh);
1654 ext_hd |= EXT_DSTOPTS;
1655 hlen += (((struct ip6_hbh *)ulp)->ip6h_len + 1) << 3;
1656 proto = ((struct ip6_hbh *)ulp)->ip6h_nxt;
1657 ulp = NULL;
1658 break;
1659
1660 case IPPROTO_AH: /* RFC 2402 */
1661 PULLUP_TO(hlen, ulp, struct ip6_ext);
1662 ext_hd |= EXT_AH;
1663 hlen += (((struct ip6_ext *)ulp)->ip6e_len + 2) << 2;
1664 proto = ((struct ip6_ext *)ulp)->ip6e_nxt;
1665 ulp = NULL;
1666 break;
1667
1668 case IPPROTO_ESP: /* RFC 2406 */
1669 PULLUP_TO(hlen, ulp, uint32_t); /* SPI, Seq# */
1670 /* Anything past Seq# is variable length and
1671 * data past this ext. header is encrypted. */
1672 ext_hd |= EXT_ESP;
1673 break;
1674
1675 case IPPROTO_NONE: /* RFC 2460 */
1676 /*
1677 * Packet ends here, and IPv6 header has
1678 * already been pulled up. If ip6e_len!=0
1679 * then octets must be ignored.
1680 */
1681 ulp = ip; /* non-NULL to get out of loop. */
1682 break;
1683
1684 case IPPROTO_OSPFIGP:
1685 /* XXX OSPF header check? */
1686 PULLUP_TO(hlen, ulp, struct ip6_ext);
1687 break;
1688
1689 case IPPROTO_PIM:
1690 /* XXX PIM header check? */
1691 PULLUP_TO(hlen, ulp, struct pim);
1692 break;
1693
1694 case IPPROTO_GRE: /* RFC 1701 */
1695 /* XXX GRE header check? */
1696 PULLUP_TO(hlen, ulp, struct grehdr);
1697 break;
1698
1699 case IPPROTO_CARP:
1700 PULLUP_TO(hlen, ulp, offsetof(
1701 struct carp_header, carp_counter));
1702 if (CARP_ADVERTISEMENT !=
1703 ((struct carp_header *)ulp)->carp_type)
1704 return (IP_FW_DENY);
1705 break;
1706
1707 case IPPROTO_IPV6: /* RFC 2893 */
1708 PULLUP_TO(hlen, ulp, struct ip6_hdr);
1709 break;
1710
1711 case IPPROTO_IPV4: /* RFC 2893 */
1712 PULLUP_TO(hlen, ulp, struct ip);
1713 break;
1714
1715 case IPPROTO_PFSYNC:
1716 PULLUP_TO(hlen, ulp, struct pfsync_header);
1717 break;
1718
1719 default:
1720 if (V_fw_verbose)
1721 printf("IPFW2: IPV6 - Unknown "
1722 "Extension Header(%d), ext_hd=%x\n",
1723 proto, ext_hd);
1724 if (V_fw_deny_unknown_exthdrs)
1725 return (IP_FW_DENY);
1726 PULLUP_TO(hlen, ulp, struct ip6_ext);
1727 break;
1728 } /*switch */
1729 }
1730 UPDATE_POINTERS();
1731 ip6 = (struct ip6_hdr *)ip;
1732 args->f_id.addr_type = 6;
1733 args->f_id.src_ip6 = ip6->ip6_src;
1734 args->f_id.dst_ip6 = ip6->ip6_dst;
1735 args->f_id.flow_id6 = ntohl(ip6->ip6_flow);
1736 iplen = ntohs(ip6->ip6_plen) + sizeof(*ip6);
1737 } else if (pktlen >= sizeof(struct ip) &&
1738 (eh == NULL || eh->ether_type == htons(ETHERTYPE_IP)) &&
1739 ip->ip_v == 4) {
1740 is_ipv4 = 1;
1741 args->flags |= IPFW_ARGS_IP4;
1742 hlen = ip->ip_hl << 2;
1743 /*
1744 * Collect parameters into local variables for faster
1745 * matching.
1746 */
1747 proto = ip->ip_p;
1748 src_ip = ip->ip_src;
1749 dst_ip = ip->ip_dst;
1750 offset = ntohs(ip->ip_off) & IP_OFFMASK;
1751 iplen = ntohs(ip->ip_len);
1752
1753 if (offset == 0) {
1754 switch (proto) {
1755 case IPPROTO_TCP:
1756 PULLUP_TO(hlen, ulp, struct tcphdr);
1757 dst_port = TCP(ulp)->th_dport;
1758 src_port = TCP(ulp)->th_sport;
1759 /* save flags for dynamic rules */
1760 args->f_id._flags = tcp_get_flags(TCP(ulp));
1761 break;
1762
1763 case IPPROTO_SCTP:
1764 if (pktlen >= hlen + sizeof(struct sctphdr) +
1765 sizeof(struct sctp_chunkhdr) +
1766 offsetof(struct sctp_init, a_rwnd))
1767 PULLUP_LEN(hlen, ulp,
1768 sizeof(struct sctphdr) +
1769 sizeof(struct sctp_chunkhdr) +
1770 offsetof(struct sctp_init, a_rwnd));
1771 else if (pktlen >= hlen + sizeof(struct sctphdr))
1772 PULLUP_LEN(hlen, ulp, pktlen - hlen);
1773 else
1774 PULLUP_LEN(hlen, ulp,
1775 sizeof(struct sctphdr));
1776 src_port = SCTP(ulp)->src_port;
1777 dst_port = SCTP(ulp)->dest_port;
1778 break;
1779
1780 case IPPROTO_UDP:
1781 case IPPROTO_UDPLITE:
1782 PULLUP_TO(hlen, ulp, struct udphdr);
1783 dst_port = UDP(ulp)->uh_dport;
1784 src_port = UDP(ulp)->uh_sport;
1785 break;
1786
1787 case IPPROTO_ICMP:
1788 PULLUP_TO(hlen, ulp, struct icmphdr);
1789 //args->f_id.flags = ICMP(ulp)->icmp_type;
1790 break;
1791
1792 default:
1793 break;
1794 }
1795 } else {
1796 if (offset == 1 && proto == IPPROTO_TCP) {
1797 /* RFC 3128 */
1798 goto pullup_failed;
1799 }
1800 }
1801
1802 UPDATE_POINTERS();
1803 args->f_id.addr_type = 4;
1804 args->f_id.src_ip = ntohl(src_ip.s_addr);
1805 args->f_id.dst_ip = ntohl(dst_ip.s_addr);
1806 } else {
1807 proto = 0;
1808 dst_ip.s_addr = src_ip.s_addr = 0;
1809
1810 args->f_id.addr_type = 1; /* XXX */
1811 }
1812 #undef PULLUP_TO
1813 pktlen = iplen < pktlen ? iplen: pktlen;
1814
1815 /* Properly initialize the rest of f_id */
1816 args->f_id.proto = proto;
1817 args->f_id.src_port = src_port = ntohs(src_port);
1818 args->f_id.dst_port = dst_port = ntohs(dst_port);
1819
1820 IPFW_PF_RLOCK(chain);
1821 if (! V_ipfw_vnet_ready) { /* shutting down, leave NOW. */
1822 IPFW_PF_RUNLOCK(chain);
1823 return (IP_FW_PASS); /* accept */
1824 }
1825 if (args->flags & IPFW_ARGS_REF) {
1826 /*
1827 * Packet has already been tagged as a result of a previous
1828 * match on rule args->rule aka args->rule_id (PIPE, QUEUE,
1829 * REASS, NETGRAPH, DIVERT/TEE...)
1830 * Validate the slot and continue from the next one
1831 * if still present, otherwise do a lookup.
1832 */
1833 f_pos = (args->rule.chain_id == chain->id) ?
1834 args->rule.slot :
1835 ipfw_find_rule(chain, args->rule.rulenum,
1836 args->rule.rule_id);
1837 } else {
1838 f_pos = 0;
1839 }
1840
1841 if (args->flags & IPFW_ARGS_IN) {
1842 iif = args->ifp;
1843 oif = NULL;
1844 } else {
1845 MPASS(args->flags & IPFW_ARGS_OUT);
1846 iif = mem ? NULL : m_rcvif(m);
1847 oif = args->ifp;
1848 }
1849
1850 /*
1851 * Now scan the rules, and parse microinstructions for each rule.
1852 * We have two nested loops and an inner switch. Sometimes we
1853 * need to break out of one or both loops, or re-enter one of
1854 * the loops with updated variables. Loop variables are:
1855 *
1856 * f_pos (outer loop) points to the current rule.
1857 * On output it points to the matching rule.
1858 * done (outer loop) is used as a flag to break the loop.
1859 * l (inner loop) residual length of current rule.
1860 * cmd points to the current microinstruction.
1861 *
1862 * We break the inner loop by setting l=0 and possibly
1863 * cmdlen=0 if we don't want to advance cmd.
1864 * We break the outer loop by setting done=1
1865 * We can restart the inner loop by setting l>0 and f_pos, f, cmd
1866 * as needed.
1867 */
1868 for (; f_pos < chain->n_rules; f_pos++) {
1869 ipfw_insn *cmd;
1870 uint32_t tablearg = 0;
1871 int l, cmdlen, skip_or; /* skip rest of OR block */
1872 struct ip_fw *f;
1873
1874 f = chain->map[f_pos];
1875 if (V_set_disable & (1 << f->set) )
1876 continue;
1877
1878 skip_or = 0;
1879 for (l = f->cmd_len, cmd = f->cmd ; l > 0 ;
1880 l -= cmdlen, cmd += cmdlen) {
1881 int match;
1882
1883 /*
1884 * check_body is a jump target used when we find a
1885 * CHECK_STATE, and need to jump to the body of
1886 * the target rule.
1887 */
1888
1889 /* check_body: */
1890 cmdlen = F_LEN(cmd);
1891 /*
1892 * An OR block (insn_1 || .. || insn_n) has the
1893 * F_OR bit set in all but the last instruction.
1894 * The first match will set "skip_or", and cause
1895 * the following instructions to be skipped until
1896 * past the one with the F_OR bit clear.
1897 */
1898 if (skip_or) { /* skip this instruction */
1899 if ((cmd->len & F_OR) == 0)
1900 skip_or = 0; /* next one is good */
1901 continue;
1902 }
1903 match = 0; /* set to 1 if we succeed */
1904
1905 switch (cmd->opcode) {
1906 /*
1907 * The first set of opcodes compares the packet's
1908 * fields with some pattern, setting 'match' if a
1909 * match is found. At the end of the loop there is
1910 * logic to deal with F_NOT and F_OR flags associated
1911 * with the opcode.
1912 */
1913 case O_NOP:
1914 match = 1;
1915 break;
1916
1917 case O_FORWARD_MAC:
1918 printf("ipfw: opcode %d unimplemented\n",
1919 cmd->opcode);
1920 break;
1921
1922 case O_GID:
1923 case O_UID:
1924 case O_JAIL:
1925 /*
1926 * We only check offset == 0 && proto != 0,
1927 * as this ensures that we have a
1928 * packet with the ports info.
1929 */
1930 if (offset != 0)
1931 break;
1932 if (proto == IPPROTO_TCP ||
1933 proto == IPPROTO_UDP ||
1934 proto == IPPROTO_UDPLITE)
1935 match = check_uidgid(
1936 (ipfw_insn_u32 *)cmd,
1937 args, &ucred_lookup,
1938 #ifdef __FreeBSD__
1939 &ucred_cache);
1940 #else
1941 (void *)&ucred_cache);
1942 #endif
1943 break;
1944
1945 case O_RECV:
1946 match = iface_match(iif, (ipfw_insn_if *)cmd,
1947 chain, &tablearg);
1948 break;
1949
1950 case O_XMIT:
1951 match = iface_match(oif, (ipfw_insn_if *)cmd,
1952 chain, &tablearg);
1953 break;
1954
1955 case O_VIA:
1956 match = iface_match(args->ifp,
1957 (ipfw_insn_if *)cmd, chain, &tablearg);
1958 break;
1959
1960 case O_MACADDR2:
1961 if (args->flags & IPFW_ARGS_ETHER) {
1962 u_int32_t *want = (u_int32_t *)
1963 ((ipfw_insn_mac *)cmd)->addr;
1964 u_int32_t *mask = (u_int32_t *)
1965 ((ipfw_insn_mac *)cmd)->mask;
1966 u_int32_t *hdr = (u_int32_t *)eh;
1967
1968 match =
1969 ( want[0] == (hdr[0] & mask[0]) &&
1970 want[1] == (hdr[1] & mask[1]) &&
1971 want[2] == (hdr[2] & mask[2]) );
1972 }
1973 break;
1974
1975 case O_MAC_TYPE:
1976 if (args->flags & IPFW_ARGS_ETHER) {
1977 u_int16_t *p =
1978 ((ipfw_insn_u16 *)cmd)->ports;
1979 int i;
1980
1981 for (i = cmdlen - 1; !match && i>0;
1982 i--, p += 2)
1983 match =
1984 (ntohs(eh->ether_type) >=
1985 p[0] &&
1986 ntohs(eh->ether_type) <=
1987 p[1]);
1988 }
1989 break;
1990
1991 case O_FRAG:
1992 if (is_ipv4) {
1993 /*
1994 * Since flags_match() works with
1995 * uint8_t we pack ip_off into 8 bits.
1996 * For this match offset is a boolean.
1997 */
1998 match = flags_match(cmd,
1999 ((ntohs(ip->ip_off) & ~IP_OFFMASK)
2000 >> 8) | (offset != 0));
2001 } else {
2002 /*
2003 * Compatibility: historically bare
2004 * "frag" would match IPv6 fragments.
2005 */
2006 match = (cmd->arg1 == 0x1 &&
2007 (offset != 0));
2008 }
2009 break;
2010
2011 case O_IN: /* "out" is "not in" */
2012 match = (oif == NULL);
2013 break;
2014
2015 case O_LAYER2:
2016 match = (args->flags & IPFW_ARGS_ETHER);
2017 break;
2018
2019 case O_DIVERTED:
2020 if ((args->flags & IPFW_ARGS_REF) == 0)
2021 break;
2022 /*
2023 * For diverted packets, args->rule.info
2024 * contains the divert port (in host format)
2025 * reason and direction.
2026 */
2027 match = ((args->rule.info & IPFW_IS_MASK) ==
2028 IPFW_IS_DIVERT) && (
2029 ((args->rule.info & IPFW_INFO_IN) ?
2030 1: 2) & cmd->arg1);
2031 break;
2032
2033 case O_PROTO:
2034 /*
2035 * We do not allow an arg of 0 so the
2036 * check of "proto" only suffices.
2037 */
2038 match = (proto == cmd->arg1);
2039 break;
2040
2041 case O_IP_SRC:
2042 match = is_ipv4 &&
2043 (((ipfw_insn_ip *)cmd)->addr.s_addr ==
2044 src_ip.s_addr);
2045 break;
2046
2047 case O_IP_DST_LOOKUP:
2048 {
2049 if (cmdlen > F_INSN_SIZE(ipfw_insn_u32)) {
2050 void *pkey;
2051 uint32_t vidx, key;
2052 uint16_t keylen = 0; /* zero if can't match the packet */
2053
2054 /* Determine lookup key type */
2055 vidx = ((ipfw_insn_u32 *)cmd)->d[1];
2056 switch (vidx) {
2057 case LOOKUP_DST_IP:
2058 case LOOKUP_SRC_IP:
2059 /* Need IP frame */
2060 if (is_ipv6 == 0 && is_ipv4 == 0)
2061 break;
2062 if (vidx == LOOKUP_DST_IP)
2063 pkey = is_ipv6 ?
2064 (void *)&args->f_id.dst_ip6:
2065 (void *)&dst_ip;
2066 else
2067 pkey = is_ipv6 ?
2068 (void *)&args->f_id.src_ip6:
2069 (void *)&src_ip;
2070 keylen = is_ipv6 ?
2071 sizeof(struct in6_addr):
2072 sizeof(in_addr_t);
2073 break;
2074 case LOOKUP_DST_PORT:
2075 case LOOKUP_SRC_PORT:
2076 /* Need IP frame */
2077 if (is_ipv6 == 0 && is_ipv4 == 0)
2078 break;
2079 /* Skip fragments */
2080 if (offset != 0)
2081 break;
2082 /* Skip proto without ports */
2083 if (proto != IPPROTO_TCP &&
2084 proto != IPPROTO_UDP &&
2085 proto != IPPROTO_UDPLITE &&
2086 proto != IPPROTO_SCTP)
2087 break;
2088 key = vidx == LOOKUP_DST_PORT ?
2089 dst_port:
2090 src_port;
2091 pkey = &key;
2092 keylen = sizeof(key);
2093 break;
2094 case LOOKUP_UID:
2095 case LOOKUP_JAIL:
2096 check_uidgid(
2097 (ipfw_insn_u32 *)cmd,
2098 args, &ucred_lookup,
2099 &ucred_cache);
2100 key = vidx == LOOKUP_UID ?
2101 ucred_cache->cr_uid:
2102 ucred_cache->cr_prison->pr_id;
2103 pkey = &key;
2104 keylen = sizeof(key);
2105 break;
2106 case LOOKUP_DSCP:
2107 /* Need IP frame */
2108 if (is_ipv6 == 0 && is_ipv4 == 0)
2109 break;
2110 if (is_ipv6)
2111 key = IPV6_DSCP(
2112 (struct ip6_hdr *)ip) >> 2;
2113 else
2114 key = ip->ip_tos >> 2;
2115 pkey = &key;
2116 keylen = sizeof(key);
2117 break;
2118 case LOOKUP_DST_MAC:
2119 case LOOKUP_SRC_MAC:
2120 /* Need ether frame */
2121 if ((args->flags & IPFW_ARGS_ETHER) == 0)
2122 break;
2123 pkey = vidx == LOOKUP_DST_MAC ?
2124 eh->ether_dhost:
2125 eh->ether_shost;
2126 keylen = ETHER_ADDR_LEN;
2127 break;
2128 case LOOKUP_MARK:
2129 key = args->rule.pkt_mark;
2130 pkey = &key;
2131 keylen = sizeof(key);
2132 break;
2133 }
2134 if (keylen == 0)
2135 break;
2136 match = ipfw_lookup_table(chain,
2137 cmd->arg1, keylen, pkey, &vidx);
2138 if (!match)
2139 break;
2140 tablearg = vidx;
2141 break;
2142 }
2143 /* cmdlen =< F_INSN_SIZE(ipfw_insn_u32) */
2144 /* FALLTHROUGH */
2145 }
2146 case O_IP_SRC_LOOKUP:
2147 {
2148 void *pkey;
2149 uint32_t vidx;
2150 uint16_t keylen;
2151
2152 if (is_ipv4) {
2153 keylen = sizeof(in_addr_t);
2154 if (cmd->opcode == O_IP_DST_LOOKUP)
2155 pkey = &dst_ip;
2156 else
2157 pkey = &src_ip;
2158 } else if (is_ipv6) {
2159 keylen = sizeof(struct in6_addr);
2160 if (cmd->opcode == O_IP_DST_LOOKUP)
2161 pkey = &args->f_id.dst_ip6;
2162 else
2163 pkey = &args->f_id.src_ip6;
2164 } else
2165 break;
2166 match = ipfw_lookup_table(chain, cmd->arg1,
2167 keylen, pkey, &vidx);
2168 if (!match)
2169 break;
2170 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) {
2171 match = ((ipfw_insn_u32 *)cmd)->d[0] ==
2172 TARG_VAL(chain, vidx, tag);
2173 if (!match)
2174 break;
2175 }
2176 tablearg = vidx;
2177 break;
2178 }
2179
2180 case O_MAC_SRC_LOOKUP:
2181 case O_MAC_DST_LOOKUP:
2182 {
2183 void *pkey;
2184 uint32_t vidx;
2185 uint16_t keylen = ETHER_ADDR_LEN;
2186
2187 /* Need ether frame */
2188 if ((args->flags & IPFW_ARGS_ETHER) == 0)
2189 break;
2190
2191 if (cmd->opcode == O_MAC_DST_LOOKUP)
2192 pkey = eh->ether_dhost;
2193 else
2194 pkey = eh->ether_shost;
2195
2196 match = ipfw_lookup_table(chain, cmd->arg1,
2197 keylen, pkey, &vidx);
2198 if (!match)
2199 break;
2200 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32)) {
2201 match = ((ipfw_insn_u32 *)cmd)->d[0] ==
2202 TARG_VAL(chain, vidx, tag);
2203 if (!match)
2204 break;
2205 }
2206 tablearg = vidx;
2207 break;
2208 }
2209
2210 case O_IP_FLOW_LOOKUP:
2211 {
2212 uint32_t v = 0;
2213 match = ipfw_lookup_table(chain,
2214 cmd->arg1, 0, &args->f_id, &v);
2215 if (!match)
2216 break;
2217 if (cmdlen == F_INSN_SIZE(ipfw_insn_u32))
2218 match = ((ipfw_insn_u32 *)cmd)->d[0] ==
2219 TARG_VAL(chain, v, tag);
2220 if (match)
2221 tablearg = v;
2222 }
2223 break;
2224 case O_IP_SRC_MASK:
2225 case O_IP_DST_MASK:
2226 if (is_ipv4) {
2227 uint32_t a =
2228 (cmd->opcode == O_IP_DST_MASK) ?
2229 dst_ip.s_addr : src_ip.s_addr;
2230 uint32_t *p = ((ipfw_insn_u32 *)cmd)->d;
2231 int i = cmdlen-1;
2232
2233 for (; !match && i>0; i-= 2, p+= 2)
2234 match = (p[0] == (a & p[1]));
2235 }
2236 break;
2237
2238 case O_IP_SRC_ME:
2239 if (is_ipv4) {
2240 match = in_localip(src_ip);
2241 break;
2242 }
2243 #ifdef INET6
2244 /* FALLTHROUGH */
2245 case O_IP6_SRC_ME:
2246 match = is_ipv6 &&
2247 ipfw_localip6(&args->f_id.src_ip6);
2248 #endif
2249 break;
2250
2251 case O_IP_DST_SET:
2252 case O_IP_SRC_SET:
2253 if (is_ipv4) {
2254 u_int32_t *d = (u_int32_t *)(cmd+1);
2255 u_int32_t addr =
2256 cmd->opcode == O_IP_DST_SET ?
2257 args->f_id.dst_ip :
2258 args->f_id.src_ip;
2259
2260 if (addr < d[0])
2261 break;
2262 addr -= d[0]; /* subtract base */
2263 match = (addr < cmd->arg1) &&
2264 ( d[ 1 + (addr>>5)] &
2265 (1<<(addr & 0x1f)) );
2266 }
2267 break;
2268
2269 case O_IP_DST:
2270 match = is_ipv4 &&
2271 (((ipfw_insn_ip *)cmd)->addr.s_addr ==
2272 dst_ip.s_addr);
2273 break;
2274
2275 case O_IP_DST_ME:
2276 if (is_ipv4) {
2277 match = in_localip(dst_ip);
2278 break;
2279 }
2280 #ifdef INET6
2281 /* FALLTHROUGH */
2282 case O_IP6_DST_ME:
2283 match = is_ipv6 &&
2284 ipfw_localip6(&args->f_id.dst_ip6);
2285 #endif
2286 break;
2287
2288 case O_IP_SRCPORT:
2289 case O_IP_DSTPORT:
2290 /*
2291 * offset == 0 && proto != 0 is enough
2292 * to guarantee that we have a
2293 * packet with port info.
2294 */
2295 if ((proto == IPPROTO_UDP ||
2296 proto == IPPROTO_UDPLITE ||
2297 proto == IPPROTO_TCP ||
2298 proto == IPPROTO_SCTP) && offset == 0) {
2299 u_int16_t x =
2300 (cmd->opcode == O_IP_SRCPORT) ?
2301 src_port : dst_port ;
2302 u_int16_t *p =
2303 ((ipfw_insn_u16 *)cmd)->ports;
2304 int i;
2305
2306 for (i = cmdlen - 1; !match && i>0;
2307 i--, p += 2)
2308 match = (x>=p[0] && x<=p[1]);
2309 }
2310 break;
2311
2312 case O_ICMPTYPE:
2313 match = (offset == 0 && proto==IPPROTO_ICMP &&
2314 icmptype_match(ICMP(ulp), (ipfw_insn_u32 *)cmd) );
2315 break;
2316
2317 #ifdef INET6
2318 case O_ICMP6TYPE:
2319 match = is_ipv6 && offset == 0 &&
2320 proto==IPPROTO_ICMPV6 &&
2321 icmp6type_match(
2322 ICMP6(ulp)->icmp6_type,
2323 (ipfw_insn_u32 *)cmd);
2324 break;
2325 #endif /* INET6 */
2326
2327 case O_IPOPT:
2328 match = (is_ipv4 &&
2329 ipopts_match(ip, cmd) );
2330 break;
2331
2332 case O_IPVER:
2333 match = ((is_ipv4 || is_ipv6) &&
2334 cmd->arg1 == ip->ip_v);
2335 break;
2336
2337 case O_IPID:
2338 case O_IPTTL:
2339 if (!is_ipv4)
2340 break;
2341 case O_IPLEN:
2342 { /* only for IP packets */
2343 uint16_t x;
2344 uint16_t *p;
2345 int i;
2346
2347 if (cmd->opcode == O_IPLEN)
2348 x = iplen;
2349 else if (cmd->opcode == O_IPTTL)
2350 x = ip->ip_ttl;
2351 else /* must be IPID */
2352 x = ntohs(ip->ip_id);
2353 if (cmdlen == 1) {
2354 match = (cmd->arg1 == x);
2355 break;
2356 }
2357 /* otherwise we have ranges */
2358 p = ((ipfw_insn_u16 *)cmd)->ports;
2359 i = cmdlen - 1;
2360 for (; !match && i>0; i--, p += 2)
2361 match = (x >= p[0] && x <= p[1]);
2362 }
2363 break;
2364
2365 case O_IPPRECEDENCE:
2366 match = (is_ipv4 &&
2367 (cmd->arg1 == (ip->ip_tos & 0xe0)) );
2368 break;
2369
2370 case O_IPTOS:
2371 match = (is_ipv4 &&
2372 flags_match(cmd, ip->ip_tos));
2373 break;
2374
2375 case O_DSCP:
2376 {
2377 uint32_t *p;
2378 uint16_t x;
2379
2380 p = ((ipfw_insn_u32 *)cmd)->d;
2381
2382 if (is_ipv4)
2383 x = ip->ip_tos >> 2;
2384 else if (is_ipv6) {
2385 x = IPV6_DSCP(
2386 (struct ip6_hdr *)ip) >> 2;
2387 x &= 0x3f;
2388 } else
2389 break;
2390
2391 /* DSCP bitmask is stored as low_u32 high_u32 */
2392 if (x >= 32)
2393 match = *(p + 1) & (1 << (x - 32));
2394 else
2395 match = *p & (1 << x);
2396 }
2397 break;
2398
2399 case O_TCPDATALEN:
2400 if (proto == IPPROTO_TCP && offset == 0) {
2401 struct tcphdr *tcp;
2402 uint16_t x;
2403 uint16_t *p;
2404 int i;
2405 #ifdef INET6
2406 if (is_ipv6) {
2407 struct ip6_hdr *ip6;
2408
2409 ip6 = (struct ip6_hdr *)ip;
2410 if (ip6->ip6_plen == 0) {
2411 /*
2412 * Jumbo payload is not
2413 * supported by this
2414 * opcode.
2415 */
2416 break;
2417 }
2418 x = iplen - hlen;
2419 } else
2420 #endif /* INET6 */
2421 x = iplen - (ip->ip_hl << 2);
2422 tcp = TCP(ulp);
2423 x -= tcp->th_off << 2;
2424 if (cmdlen == 1) {
2425 match = (cmd->arg1 == x);
2426 break;
2427 }
2428 /* otherwise we have ranges */
2429 p = ((ipfw_insn_u16 *)cmd)->ports;
2430 i = cmdlen - 1;
2431 for (; !match && i>0; i--, p += 2)
2432 match = (x >= p[0] && x <= p[1]);
2433 }
2434 break;
2435
2436 case O_TCPFLAGS:
2437 /*
2438 * Note that this is currently only set up to
2439 * match the lower 8 TCP header flag bits, not
2440 * the full compliment of all 12 flags.
2441 */
2442 match = (proto == IPPROTO_TCP && offset == 0 &&
2443 flags_match(cmd, tcp_get_flags(TCP(ulp))));
2444 break;
2445
2446 case O_TCPOPTS:
2447 if (proto == IPPROTO_TCP && offset == 0 && ulp){
2448 PULLUP_LEN_LOCKED(hlen, ulp,
2449 (TCP(ulp)->th_off << 2));
2450 match = tcpopts_match(TCP(ulp), cmd);
2451 }
2452 break;
2453
2454 case O_TCPSEQ:
2455 match = (proto == IPPROTO_TCP && offset == 0 &&
2456 ((ipfw_insn_u32 *)cmd)->d[0] ==
2457 TCP(ulp)->th_seq);
2458 break;
2459
2460 case O_TCPACK:
2461 match = (proto == IPPROTO_TCP && offset == 0 &&
2462 ((ipfw_insn_u32 *)cmd)->d[0] ==
2463 TCP(ulp)->th_ack);
2464 break;
2465
2466 case O_TCPMSS:
2467 if (proto == IPPROTO_TCP &&
2468 (args->f_id._flags & TH_SYN) != 0 &&
2469 ulp != NULL) {
2470 uint16_t mss, *p;
2471 int i;
2472
2473 PULLUP_LEN_LOCKED(hlen, ulp,
2474 (TCP(ulp)->th_off << 2));
2475 if ((tcpopts_parse(TCP(ulp), &mss) &
2476 IP_FW_TCPOPT_MSS) == 0)
2477 break;
2478 if (cmdlen == 1) {
2479 match = (cmd->arg1 == mss);
2480 break;
2481 }
2482 /* Otherwise we have ranges. */
2483 p = ((ipfw_insn_u16 *)cmd)->ports;
2484 i = cmdlen - 1;
2485 for (; !match && i > 0; i--, p += 2)
2486 match = (mss >= p[0] &&
2487 mss <= p[1]);
2488 }
2489 break;
2490
2491 case O_TCPWIN:
2492 if (proto == IPPROTO_TCP && offset == 0) {
2493 uint16_t x;
2494 uint16_t *p;
2495 int i;
2496
2497 x = ntohs(TCP(ulp)->th_win);
2498 if (cmdlen == 1) {
2499 match = (cmd->arg1 == x);
2500 break;
2501 }
2502 /* Otherwise we have ranges. */
2503 p = ((ipfw_insn_u16 *)cmd)->ports;
2504 i = cmdlen - 1;
2505 for (; !match && i > 0; i--, p += 2)
2506 match = (x >= p[0] && x <= p[1]);
2507 }
2508 break;
2509
2510 case O_ESTAB:
2511 /* reject packets which have SYN only */
2512 /* XXX should i also check for TH_ACK ? */
2513 match = (proto == IPPROTO_TCP && offset == 0 &&
2514 (tcp_get_flags(TCP(ulp)) &
2515 (TH_RST | TH_ACK | TH_SYN)) != TH_SYN);
2516 break;
2517
2518 case O_ALTQ: {
2519 struct pf_mtag *at;
2520 struct m_tag *mtag;
2521 ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd;
2522
2523 /*
2524 * ALTQ uses mbuf tags from another
2525 * packet filtering system - pf(4).
2526 * We allocate a tag in its format
2527 * and fill it in, pretending to be pf(4).
2528 */
2529 match = 1;
2530 at = pf_find_mtag(m);
2531 if (at != NULL && at->qid != 0)
2532 break;
2533 mtag = m_tag_get(PACKET_TAG_PF,
2534 sizeof(struct pf_mtag), M_NOWAIT | M_ZERO);
2535 if (mtag == NULL) {
2536 /*
2537 * Let the packet fall back to the
2538 * default ALTQ.
2539 */
2540 break;
2541 }
2542 m_tag_prepend(m, mtag);
2543 at = (struct pf_mtag *)(mtag + 1);
2544 at->qid = altq->qid;
2545 at->hdr = ip;
2546 break;
2547 }
2548
2549 case O_LOG:
2550 ipfw_log(chain, f, hlen, args,
2551 offset | ip6f_mf, tablearg, ip);
2552 match = 1;
2553 break;
2554
2555 case O_PROB:
2556 match = (random()<((ipfw_insn_u32 *)cmd)->d[0]);
2557 break;
2558
2559 case O_VERREVPATH:
2560 /* Outgoing packets automatically pass/match */
2561 match = (args->flags & IPFW_ARGS_OUT ||
2562 (
2563 #ifdef INET6
2564 is_ipv6 ?
2565 verify_path6(&(args->f_id.src_ip6),
2566 iif, args->f_id.fib) :
2567 #endif
2568 verify_path(src_ip, iif, args->f_id.fib)));
2569 break;
2570
2571 case O_VERSRCREACH:
2572 /* Outgoing packets automatically pass/match */
2573 match = (hlen > 0 && ((oif != NULL) || (
2574 #ifdef INET6
2575 is_ipv6 ?
2576 verify_path6(&(args->f_id.src_ip6),
2577 NULL, args->f_id.fib) :
2578 #endif
2579 verify_path(src_ip, NULL, args->f_id.fib))));
2580 break;
2581
2582 case O_ANTISPOOF:
2583 /* Outgoing packets automatically pass/match */
2584 if (oif == NULL && hlen > 0 &&
2585 ( (is_ipv4 && in_localaddr(src_ip))
2586 #ifdef INET6
2587 || (is_ipv6 &&
2588 in6_localaddr(&(args->f_id.src_ip6)))
2589 #endif
2590 ))
2591 match =
2592 #ifdef INET6
2593 is_ipv6 ? verify_path6(
2594 &(args->f_id.src_ip6), iif,
2595 args->f_id.fib) :
2596 #endif
2597 verify_path(src_ip, iif,
2598 args->f_id.fib);
2599 else
2600 match = 1;
2601 break;
2602
2603 case O_IPSEC:
2604 match = (m_tag_find(m,
2605 PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL);
2606 /* otherwise no match */
2607 break;
2608
2609 #ifdef INET6
2610 case O_IP6_SRC:
2611 match = is_ipv6 &&
2612 IN6_ARE_ADDR_EQUAL(&args->f_id.src_ip6,
2613 &((ipfw_insn_ip6 *)cmd)->addr6);
2614 break;
2615
2616 case O_IP6_DST:
2617 match = is_ipv6 &&
2618 IN6_ARE_ADDR_EQUAL(&args->f_id.dst_ip6,
2619 &((ipfw_insn_ip6 *)cmd)->addr6);
2620 break;
2621 case O_IP6_SRC_MASK:
2622 case O_IP6_DST_MASK:
2623 if (is_ipv6) {
2624 int i = cmdlen - 1;
2625 struct in6_addr p;
2626 struct in6_addr *d =
2627 &((ipfw_insn_ip6 *)cmd)->addr6;
2628
2629 for (; !match && i > 0; d += 2,
2630 i -= F_INSN_SIZE(struct in6_addr)
2631 * 2) {
2632 p = (cmd->opcode ==
2633 O_IP6_SRC_MASK) ?
2634 args->f_id.src_ip6:
2635 args->f_id.dst_ip6;
2636 APPLY_MASK(&p, &d[1]);
2637 match =
2638 IN6_ARE_ADDR_EQUAL(&d[0],
2639 &p);
2640 }
2641 }
2642 break;
2643
2644 case O_FLOW6ID:
2645 match = is_ipv6 &&
2646 flow6id_match(args->f_id.flow_id6,
2647 (ipfw_insn_u32 *) cmd);
2648 break;
2649
2650 case O_EXT_HDR:
2651 match = is_ipv6 &&
2652 (ext_hd & ((ipfw_insn *) cmd)->arg1);
2653 break;
2654
2655 case O_IP6:
2656 match = is_ipv6;
2657 break;
2658 #endif
2659
2660 case O_IP4:
2661 match = is_ipv4;
2662 break;
2663
2664 case O_TAG: {
2665 struct m_tag *mtag;
2666 uint32_t tag = TARG(cmd->arg1, tag);
2667
2668 /* Packet is already tagged with this tag? */
2669 mtag = m_tag_locate(m, MTAG_IPFW, tag, NULL);
2670
2671 /* We have `untag' action when F_NOT flag is
2672 * present. And we must remove this mtag from
2673 * mbuf and reset `match' to zero (`match' will
2674 * be inversed later).
2675 * Otherwise we should allocate new mtag and
2676 * push it into mbuf.
2677 */
2678 if (cmd->len & F_NOT) { /* `untag' action */
2679 if (mtag != NULL)
2680 m_tag_delete(m, mtag);
2681 match = 0;
2682 } else {
2683 if (mtag == NULL) {
2684 mtag = m_tag_alloc( MTAG_IPFW,
2685 tag, 0, M_NOWAIT);
2686 if (mtag != NULL)
2687 m_tag_prepend(m, mtag);
2688 }
2689 match = 1;
2690 }
2691 break;
2692 }
2693
2694 case O_FIB: /* try match the specified fib */
2695 if (args->f_id.fib == cmd->arg1)
2696 match = 1;
2697 break;
2698
2699 case O_SOCKARG: {
2700 #ifndef USERSPACE /* not supported in userspace */
2701 struct inpcb *inp = args->inp;
2702 struct inpcbinfo *pi;
2703 bool inp_locked = false;
2704
2705 if (proto == IPPROTO_TCP)
2706 pi = &V_tcbinfo;
2707 else if (proto == IPPROTO_UDP)
2708 pi = &V_udbinfo;
2709 else if (proto == IPPROTO_UDPLITE)
2710 pi = &V_ulitecbinfo;
2711 else
2712 break;
2713
2714 /*
2715 * XXXRW: so_user_cookie should almost
2716 * certainly be inp_user_cookie?
2717 */
2718
2719 /*
2720 * For incoming packet lookup the inpcb
2721 * using the src/dest ip/port tuple.
2722 */
2723 if (is_ipv4 && inp == NULL) {
2724 inp = in_pcblookup(pi,
2725 src_ip, htons(src_port),
2726 dst_ip, htons(dst_port),
2727 INPLOOKUP_RLOCKPCB, NULL);
2728 inp_locked = true;
2729 }
2730 #ifdef INET6
2731 if (is_ipv6 && inp == NULL) {
2732 inp = in6_pcblookup(pi,
2733 &args->f_id.src_ip6,
2734 htons(src_port),
2735 &args->f_id.dst_ip6,
2736 htons(dst_port),
2737 INPLOOKUP_RLOCKPCB, NULL);
2738 inp_locked = true;
2739 }
2740 #endif /* INET6 */
2741 if (inp != NULL) {
2742 if (inp->inp_socket) {
2743 tablearg =
2744 inp->inp_socket->so_user_cookie;
2745 if (tablearg)
2746 match = 1;
2747 }
2748 if (inp_locked)
2749 INP_RUNLOCK(inp);
2750 }
2751 #endif /* !USERSPACE */
2752 break;
2753 }
2754
2755 case O_TAGGED: {
2756 struct m_tag *mtag;
2757 uint32_t tag = TARG(cmd->arg1, tag);
2758
2759 if (cmdlen == 1) {
2760 match = m_tag_locate(m, MTAG_IPFW,
2761 tag, NULL) != NULL;
2762 break;
2763 }
2764
2765 /* we have ranges */
2766 for (mtag = m_tag_first(m);
2767 mtag != NULL && !match;
2768 mtag = m_tag_next(m, mtag)) {
2769 uint16_t *p;
2770 int i;
2771
2772 if (mtag->m_tag_cookie != MTAG_IPFW)
2773 continue;
2774
2775 p = ((ipfw_insn_u16 *)cmd)->ports;
2776 i = cmdlen - 1;
2777 for(; !match && i > 0; i--, p += 2)
2778 match =
2779 mtag->m_tag_id >= p[0] &&
2780 mtag->m_tag_id <= p[1];
2781 }
2782 break;
2783 }
2784
2785 case O_MARK: {
2786 uint32_t mark;
2787 if (cmd->arg1 == IP_FW_TARG)
2788 mark = TARG_VAL(chain, tablearg, mark);
2789 else
2790 mark = ((ipfw_insn_u32 *)cmd)->d[0];
2791 match =
2792 (args->rule.pkt_mark &
2793 ((ipfw_insn_u32 *)cmd)->d[1]) ==
2794 (mark & ((ipfw_insn_u32 *)cmd)->d[1]);
2795 break;
2796 }
2797
2798 /*
2799 * The second set of opcodes represents 'actions',
2800 * i.e. the terminal part of a rule once the packet
2801 * matches all previous patterns.
2802 * Typically there is only one action for each rule,
2803 * and the opcode is stored at the end of the rule
2804 * (but there are exceptions -- see below).
2805 *
2806 * In general, here we set retval and terminate the
2807 * outer loop (would be a 'break 3' in some language,
2808 * but we need to set l=0, done=1)
2809 *
2810 * Exceptions:
2811 * O_COUNT and O_SKIPTO actions:
2812 * instead of terminating, we jump to the next rule
2813 * (setting l=0), or to the SKIPTO target (setting
2814 * f/f_len, cmd and l as needed), respectively.
2815 *
2816 * O_TAG, O_LOG and O_ALTQ action parameters:
2817 * perform some action and set match = 1;
2818 *
2819 * O_LIMIT and O_KEEP_STATE: these opcodes are
2820 * not real 'actions', and are stored right
2821 * before the 'action' part of the rule (one
2822 * exception is O_SKIP_ACTION which could be
2823 * between these opcodes and 'action' one).
2824 * These opcodes try to install an entry in the
2825 * state tables; if successful, we continue with
2826 * the next opcode (match=1; break;), otherwise
2827 * the packet must be dropped (set retval,
2828 * break loops with l=0, done=1)
2829 *
2830 * O_PROBE_STATE and O_CHECK_STATE: these opcodes
2831 * cause a lookup of the state table, and a jump
2832 * to the 'action' part of the parent rule
2833 * if an entry is found, or
2834 * (CHECK_STATE only) a jump to the next rule if
2835 * the entry is not found.
2836 * The result of the lookup is cached so that
2837 * further instances of these opcodes become NOPs.
2838 * The jump to the next rule is done by setting
2839 * l=0, cmdlen=0.
2840 *
2841 * O_SKIP_ACTION: this opcode is not a real 'action'
2842 * either, and is stored right before the 'action'
2843 * part of the rule, right after the O_KEEP_STATE
2844 * opcode. It causes match failure so the real
2845 * 'action' could be executed only if the rule
2846 * is checked via dynamic rule from the state
2847 * table, as in such case execution starts
2848 * from the true 'action' opcode directly.
2849 *
2850 */
2851 case O_LIMIT:
2852 case O_KEEP_STATE:
2853 if (ipfw_dyn_install_state(chain, f,
2854 (ipfw_insn_limit *)cmd, args, ulp,
2855 pktlen, &dyn_info, tablearg)) {
2856 /* error or limit violation */
2857 retval = IP_FW_DENY;
2858 l = 0; /* exit inner loop */
2859 done = 1; /* exit outer loop */
2860 }
2861 match = 1;
2862 break;
2863
2864 case O_PROBE_STATE:
2865 case O_CHECK_STATE:
2866 /*
2867 * dynamic rules are checked at the first
2868 * keep-state or check-state occurrence,
2869 * with the result being stored in dyn_info.
2870 * The compiler introduces a PROBE_STATE
2871 * instruction for us when we have a
2872 * KEEP_STATE (because PROBE_STATE needs
2873 * to be run first).
2874 */
2875 if (DYN_LOOKUP_NEEDED(&dyn_info, cmd) &&
2876 (q = ipfw_dyn_lookup_state(args, ulp,
2877 pktlen, cmd, &dyn_info)) != NULL) {
2878 /*
2879 * Found dynamic entry, jump to the
2880 * 'action' part of the parent rule
2881 * by setting f, cmd, l and clearing
2882 * cmdlen.
2883 */
2884 f = q;
2885 f_pos = dyn_info.f_pos;
2886 cmd = ACTION_PTR(f);
2887 l = f->cmd_len - f->act_ofs;
2888 cmdlen = 0;
2889 continue;
2890 }
2891 /*
2892 * Dynamic entry not found. If CHECK_STATE,
2893 * skip to next rule, if PROBE_STATE just
2894 * ignore and continue with next opcode.
2895 */
2896 if (cmd->opcode == O_CHECK_STATE)
2897 l = 0; /* exit inner loop */
2898 match = 1;
2899 break;
2900
2901 case O_SKIP_ACTION:
2902 match = 0; /* skip to the next rule */
2903 l = 0; /* exit inner loop */
2904 break;
2905
2906 case O_ACCEPT:
2907 retval = 0; /* accept */
2908 l = 0; /* exit inner loop */
2909 done = 1; /* exit outer loop */
2910 break;
2911
2912 case O_PIPE:
2913 case O_QUEUE:
2914 set_match(args, f_pos, chain);
2915 args->rule.info = TARG(cmd->arg1, pipe);
2916 if (cmd->opcode == O_PIPE)
2917 args->rule.info |= IPFW_IS_PIPE;
2918 if (V_fw_one_pass)
2919 args->rule.info |= IPFW_ONEPASS;
2920 retval = IP_FW_DUMMYNET;
2921 l = 0; /* exit inner loop */
2922 done = 1; /* exit outer loop */
2923 break;
2924
2925 case O_DIVERT:
2926 case O_TEE:
2927 if (args->flags & IPFW_ARGS_ETHER)
2928 break; /* not on layer 2 */
2929 /* otherwise this is terminal */
2930 l = 0; /* exit inner loop */
2931 done = 1; /* exit outer loop */
2932 retval = (cmd->opcode == O_DIVERT) ?
2933 IP_FW_DIVERT : IP_FW_TEE;
2934 set_match(args, f_pos, chain);
2935 args->rule.info = TARG(cmd->arg1, divert);
2936 break;
2937
2938 case O_COUNT:
2939 IPFW_INC_RULE_COUNTER(f, pktlen);
2940 l = 0; /* exit inner loop */
2941 break;
2942
2943 case O_SKIPTO:
2944 IPFW_INC_RULE_COUNTER(f, pktlen);
2945 f_pos = JUMP(chain, f, cmd->arg1, tablearg, 0);
2946 /*
2947 * Skip disabled rules, and re-enter
2948 * the inner loop with the correct
2949 * f_pos, f, l and cmd.
2950 * Also clear cmdlen and skip_or
2951 */
2952 for (; f_pos < chain->n_rules - 1 &&
2953 (V_set_disable &
2954 (1 << chain->map[f_pos]->set));
2955 f_pos++)
2956 ;
2957 /* Re-enter the inner loop at the skipto rule. */
2958 f = chain->map[f_pos];
2959 l = f->cmd_len;
2960 cmd = f->cmd;
2961 match = 1;
2962 cmdlen = 0;
2963 skip_or = 0;
2964 continue;
2965 break; /* not reached */
2966
2967 case O_CALLRETURN: {
2968 /*
2969 * Implementation of `subroutine' call/return,
2970 * in the stack carried in an mbuf tag. This
2971 * is different from `skipto' in that any call
2972 * address is possible (`skipto' must prevent
2973 * backward jumps to avoid endless loops).
2974 * We have `return' action when F_NOT flag is
2975 * present. The `m_tag_id' field is used as
2976 * stack pointer.
2977 */
2978 struct m_tag *mtag;
2979 uint16_t jmpto, *stack;
2980
2981 #define IS_CALL ((cmd->len & F_NOT) == 0)
2982 #define IS_RETURN ((cmd->len & F_NOT) != 0)
2983 /*
2984 * Hand-rolled version of m_tag_locate() with
2985 * wildcard `type'.
2986 * If not already tagged, allocate new tag.
2987 */
2988 mtag = m_tag_first(m);
2989 while (mtag != NULL) {
2990 if (mtag->m_tag_cookie ==
2991 MTAG_IPFW_CALL)
2992 break;
2993 mtag = m_tag_next(m, mtag);
2994 }
2995 if (mtag == NULL && IS_CALL) {
2996 mtag = m_tag_alloc(MTAG_IPFW_CALL, 0,
2997 IPFW_CALLSTACK_SIZE *
2998 sizeof(uint16_t), M_NOWAIT);
2999 if (mtag != NULL)
3000 m_tag_prepend(m, mtag);
3001 }
3002
3003 /*
3004 * On error both `call' and `return' just
3005 * continue with next rule.
3006 */
3007 if (IS_RETURN && (mtag == NULL ||
3008 mtag->m_tag_id == 0)) {
3009 l = 0; /* exit inner loop */
3010 break;
3011 }
3012 if (IS_CALL && (mtag == NULL ||
3013 mtag->m_tag_id >= IPFW_CALLSTACK_SIZE)) {
3014 printf("ipfw: call stack error, "
3015 "go to next rule\n");
3016 l = 0; /* exit inner loop */
3017 break;
3018 }
3019
3020 IPFW_INC_RULE_COUNTER(f, pktlen);
3021 stack = (uint16_t *)(mtag + 1);
3022
3023 /*
3024 * The `call' action may use cached f_pos
3025 * (in f->next_rule), whose version is written
3026 * in f->next_rule.
3027 * The `return' action, however, doesn't have
3028 * fixed jump address in cmd->arg1 and can't use
3029 * cache.
3030 */
3031 if (IS_CALL) {
3032 stack[mtag->m_tag_id] = f->rulenum;
3033 mtag->m_tag_id++;
3034 f_pos = JUMP(chain, f, cmd->arg1,
3035 tablearg, 1);
3036 } else { /* `return' action */
3037 mtag->m_tag_id--;
3038 jmpto = stack[mtag->m_tag_id] + 1;
3039 f_pos = ipfw_find_rule(chain, jmpto, 0);
3040 }
3041
3042 /*
3043 * Skip disabled rules, and re-enter
3044 * the inner loop with the correct
3045 * f_pos, f, l and cmd.
3046 * Also clear cmdlen and skip_or
3047 */
3048 for (; f_pos < chain->n_rules - 1 &&
3049 (V_set_disable &
3050 (1 << chain->map[f_pos]->set)); f_pos++)
3051 ;
3052 /* Re-enter the inner loop at the dest rule. */
3053 f = chain->map[f_pos];
3054 l = f->cmd_len;
3055 cmd = f->cmd;
3056 cmdlen = 0;
3057 skip_or = 0;
3058 continue;
3059 break; /* NOTREACHED */
3060 }
3061 #undef IS_CALL
3062 #undef IS_RETURN
3063
3064 case O_REJECT:
3065 /*
3066 * Drop the packet and send a reject notice
3067 * if the packet is not ICMP (or is an ICMP
3068 * query), and it is not multicast/broadcast.
3069 */
3070 if (hlen > 0 && is_ipv4 && offset == 0 &&
3071 (proto != IPPROTO_ICMP ||
3072 is_icmp_query(ICMP(ulp))) &&
3073 !(m->m_flags & (M_BCAST|M_MCAST)) &&
3074 !IN_MULTICAST(ntohl(dst_ip.s_addr))) {
3075 KASSERT(!need_send_reject,
3076 ("o_reject - need_send_reject was set previously"));
3077 if ((reject_code = cmd->arg1) == ICMP_UNREACH_NEEDFRAG &&
3078 cmd->len == F_INSN_SIZE(ipfw_insn_u16)) {
3079 reject_mtu =
3080 ((ipfw_insn_u16 *)cmd)->ports[0];
3081 } else {
3082 reject_mtu = 0;
3083 }
3084 need_send_reject = true;
3085 }
3086 /* FALLTHROUGH */
3087 #ifdef INET6
3088 case O_UNREACH6:
3089 if (hlen > 0 && is_ipv6 &&
3090 ((offset & IP6F_OFF_MASK) == 0) &&
3091 (proto != IPPROTO_ICMPV6 ||
3092 (is_icmp6_query(icmp6_type) == 1)) &&
3093 !(m->m_flags & (M_BCAST|M_MCAST)) &&
3094 !IN6_IS_ADDR_MULTICAST(
3095 &args->f_id.dst_ip6)) {
3096 KASSERT(!need_send_reject,
3097 ("o_unreach6 - need_send_reject was set previously"));
3098 reject_code = cmd->arg1;
3099 if (cmd->opcode == O_REJECT) {
3100 reject_code =
3101 map_icmp_unreach(reject_code);
3102 }
3103 need_send_reject = true;
3104 }
3105 /* FALLTHROUGH */
3106 #endif
3107 case O_DENY:
3108 retval = IP_FW_DENY;
3109 l = 0; /* exit inner loop */
3110 done = 1; /* exit outer loop */
3111 break;
3112
3113 case O_FORWARD_IP:
3114 if (args->flags & IPFW_ARGS_ETHER)
3115 break; /* not valid on layer2 pkts */
3116 if (q != f ||
3117 dyn_info.direction == MATCH_FORWARD) {
3118 struct sockaddr_in *sa;
3119
3120 sa = &(((ipfw_insn_sa *)cmd)->sa);
3121 if (sa->sin_addr.s_addr == INADDR_ANY) {
3122 #ifdef INET6
3123 /*
3124 * We use O_FORWARD_IP opcode for
3125 * fwd rule with tablearg, but tables
3126 * now support IPv6 addresses. And
3127 * when we are inspecting IPv6 packet,
3128 * we can use nh6 field from
3129 * table_value as next_hop6 address.
3130 */
3131 if (is_ipv6) {
3132 struct ip_fw_nh6 *nh6;
3133
3134 args->flags |= IPFW_ARGS_NH6;
3135 nh6 = &args->hopstore6;
3136 nh6->sin6_addr = TARG_VAL(
3137 chain, tablearg, nh6);
3138 nh6->sin6_port = sa->sin_port;
3139 nh6->sin6_scope_id = TARG_VAL(
3140 chain, tablearg, zoneid);
3141 } else
3142 #endif
3143 {
3144 args->flags |= IPFW_ARGS_NH4;
3145 args->hopstore.sin_port =
3146 sa->sin_port;
3147 sa = &args->hopstore;
3148 sa->sin_family = AF_INET;
3149 sa->sin_len = sizeof(*sa);
3150 sa->sin_addr.s_addr = htonl(
3151 TARG_VAL(chain, tablearg,
3152 nh4));
3153 }
3154 } else {
3155 args->flags |= IPFW_ARGS_NH4PTR;
3156 args->next_hop = sa;
3157 }
3158 }
3159 retval = IP_FW_PASS;
3160 l = 0; /* exit inner loop */
3161 done = 1; /* exit outer loop */
3162 break;
3163
3164 #ifdef INET6
3165 case O_FORWARD_IP6:
3166 if (args->flags & IPFW_ARGS_ETHER)
3167 break; /* not valid on layer2 pkts */
3168 if (q != f ||
3169 dyn_info.direction == MATCH_FORWARD) {
3170 struct sockaddr_in6 *sin6;
3171
3172 sin6 = &(((ipfw_insn_sa6 *)cmd)->sa);
3173 args->flags |= IPFW_ARGS_NH6PTR;
3174 args->next_hop6 = sin6;
3175 }
3176 retval = IP_FW_PASS;
3177 l = 0; /* exit inner loop */
3178 done = 1; /* exit outer loop */
3179 break;
3180 #endif
3181
3182 case O_NETGRAPH:
3183 case O_NGTEE:
3184 set_match(args, f_pos, chain);
3185 args->rule.info = TARG(cmd->arg1, netgraph);
3186 if (V_fw_one_pass)
3187 args->rule.info |= IPFW_ONEPASS;
3188 retval = (cmd->opcode == O_NETGRAPH) ?
3189 IP_FW_NETGRAPH : IP_FW_NGTEE;
3190 l = 0; /* exit inner loop */
3191 done = 1; /* exit outer loop */
3192 break;
3193
3194 case O_SETFIB: {
3195 uint32_t fib;
3196
3197 IPFW_INC_RULE_COUNTER(f, pktlen);
3198 fib = TARG(cmd->arg1, fib) & 0x7FFF;
3199 if (fib >= rt_numfibs)
3200 fib = 0;
3201 M_SETFIB(m, fib);
3202 args->f_id.fib = fib; /* XXX */
3203 l = 0; /* exit inner loop */
3204 break;
3205 }
3206
3207 case O_SETDSCP: {
3208 uint16_t code;
3209
3210 code = TARG(cmd->arg1, dscp) & 0x3F;
3211 l = 0; /* exit inner loop */
3212 if (is_ipv4) {
3213 uint16_t old;
3214
3215 old = *(uint16_t *)ip;
3216 ip->ip_tos = (code << 2) |
3217 (ip->ip_tos & 0x03);
3218 ip->ip_sum = cksum_adjust(ip->ip_sum,
3219 old, *(uint16_t *)ip);
3220 } else if (is_ipv6) {
3221 /* update cached value */
3222 args->f_id.flow_id6 =
3223 ntohl(*(uint32_t *)ip) & ~0x0FC00000;
3224 args->f_id.flow_id6 |= code << 22;
3225
3226 *((uint32_t *)ip) =
3227 htonl(args->f_id.flow_id6);
3228 } else
3229 break;
3230
3231 IPFW_INC_RULE_COUNTER(f, pktlen);
3232 break;
3233 }
3234
3235 case O_NAT:
3236 l = 0; /* exit inner loop */
3237 done = 1; /* exit outer loop */
3238 /*
3239 * Ensure that we do not invoke NAT handler for
3240 * non IPv4 packets. Libalias expects only IPv4.
3241 */
3242 if (!is_ipv4 || !IPFW_NAT_LOADED) {
3243 retval = IP_FW_DENY;
3244 break;
3245 }
3246
3247 struct cfg_nat *t;
3248 int nat_id;
3249
3250 args->rule.info = 0;
3251 set_match(args, f_pos, chain);
3252 /* Check if this is 'global' nat rule */
3253 if (cmd->arg1 == IP_FW_NAT44_GLOBAL) {
3254 retval = ipfw_nat_ptr(args, NULL, m);
3255 break;
3256 }
3257 t = ((ipfw_insn_nat *)cmd)->nat;
3258 if (t == NULL) {
3259 nat_id = TARG(cmd->arg1, nat);
3260 t = (*lookup_nat_ptr)(&chain->nat, nat_id);
3261
3262 if (t == NULL) {
3263 retval = IP_FW_DENY;
3264 break;
3265 }
3266 if (cmd->arg1 != IP_FW_TARG)
3267 ((ipfw_insn_nat *)cmd)->nat = t;
3268 }
3269 retval = ipfw_nat_ptr(args, t, m);
3270 break;
3271
3272 case O_REASS: {
3273 int ip_off;
3274
3275 l = 0; /* in any case exit inner loop */
3276 if (is_ipv6) /* IPv6 is not supported yet */
3277 break;
3278 IPFW_INC_RULE_COUNTER(f, pktlen);
3279 ip_off = ntohs(ip->ip_off);
3280
3281 /* if not fragmented, go to next rule */
3282 if ((ip_off & (IP_MF | IP_OFFMASK)) == 0)
3283 break;
3284
3285 args->m = m = ip_reass(m);
3286
3287 /*
3288 * do IP header checksum fixup.
3289 */
3290 if (m == NULL) { /* fragment got swallowed */
3291 retval = IP_FW_DENY;
3292 } else { /* good, packet complete */
3293 int hlen;
3294
3295 ip = mtod(m, struct ip *);
3296 hlen = ip->ip_hl << 2;
3297 ip->ip_sum = 0;
3298 if (hlen == sizeof(struct ip))
3299 ip->ip_sum = in_cksum_hdr(ip);
3300 else
3301 ip->ip_sum = in_cksum(m, hlen);
3302 retval = IP_FW_REASS;
3303 args->rule.info = 0;
3304 set_match(args, f_pos, chain);
3305 }
3306 done = 1; /* exit outer loop */
3307 break;
3308 }
3309
3310 case O_SETMARK: {
3311 l = 0; /* exit inner loop */
3312 args->rule.pkt_mark = (
3313 (cmd->arg1 == IP_FW_TARG) ?
3314 TARG_VAL(chain, tablearg, mark) :
3315 ((ipfw_insn_u32 *)cmd)->d[0]);
3316
3317 IPFW_INC_RULE_COUNTER(f, pktlen);
3318 break;
3319 }
3320
3321 case O_EXTERNAL_ACTION:
3322 l = 0; /* in any case exit inner loop */
3323 retval = ipfw_run_eaction(chain, args,
3324 cmd, &done);
3325 /*
3326 * If both @retval and @done are zero,
3327 * consider this as rule matching and
3328 * update counters.
3329 */
3330 if (retval == 0 && done == 0) {
3331 IPFW_INC_RULE_COUNTER(f, pktlen);
3332 /*
3333 * Reset the result of the last
3334 * dynamic state lookup.
3335 * External action can change
3336 * @args content, and it may be
3337 * used for new state lookup later.
3338 */
3339 DYN_INFO_INIT(&dyn_info);
3340 }
3341 break;
3342
3343 default:
3344 panic("-- unknown opcode %d\n", cmd->opcode);
3345 } /* end of switch() on opcodes */
3346 /*
3347 * if we get here with l=0, then match is irrelevant.
3348 */
3349
3350 if (cmd->len & F_NOT)
3351 match = !match;
3352
3353 if (match) {
3354 if (cmd->len & F_OR)
3355 skip_or = 1;
3356 } else {
3357 if (!(cmd->len & F_OR)) /* not an OR block, */
3358 break; /* try next rule */
3359 }
3360
3361 } /* end of inner loop, scan opcodes */
3362 #undef PULLUP_LEN
3363 #undef PULLUP_LEN_LOCKED
3364
3365 if (done)
3366 break;
3367
3368 /* next_rule:; */ /* try next rule */
3369
3370 } /* end of outer for, scan rules */
3371
3372 if (done) {
3373 struct ip_fw *rule = chain->map[f_pos];
3374 /* Update statistics */
3375 IPFW_INC_RULE_COUNTER(rule, pktlen);
3376 IPFW_PROBE(rule__matched, retval,
3377 is_ipv4 ? AF_INET : AF_INET6,
3378 is_ipv4 ? (uintptr_t)&src_ip :
3379 (uintptr_t)&args->f_id.src_ip6,
3380 is_ipv4 ? (uintptr_t)&dst_ip :
3381 (uintptr_t)&args->f_id.dst_ip6,
3382 args, rule);
3383 } else {
3384 retval = IP_FW_DENY;
3385 printf("ipfw: ouch!, skip past end of rules, denying packet\n");
3386 }
3387 IPFW_PF_RUNLOCK(chain);
3388 if (need_send_reject) {
3389 #ifdef INET6
3390 if (is_ipv6)
3391 send_reject6(args, reject_code, hlen,
3392 (struct ip6_hdr *)ip);
3393 else
3394 #endif
3395 send_reject(args, reject_code, reject_mtu,
3396 iplen, ip);
3397 }
3398 #ifdef __FreeBSD__
3399 if (ucred_cache != NULL)
3400 crfree(ucred_cache);
3401 #endif
3402 return (retval);
3403
3404 pullup_failed:
3405 if (V_fw_verbose)
3406 printf("ipfw: pullup failed\n");
3407 return (IP_FW_DENY);
3408 }
3409
3410 /*
3411 * Set maximum number of tables that can be used in given VNET ipfw instance.
3412 */
3413 #ifdef SYSCTL_NODE
3414 static int
sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS)3415 sysctl_ipfw_table_num(SYSCTL_HANDLER_ARGS)
3416 {
3417 int error;
3418 unsigned int ntables;
3419
3420 ntables = V_fw_tables_max;
3421
3422 error = sysctl_handle_int(oidp, &ntables, 0, req);
3423 /* Read operation or some error */
3424 if ((error != 0) || (req->newptr == NULL))
3425 return (error);
3426
3427 return (ipfw_resize_tables(&V_layer3_chain, ntables));
3428 }
3429
3430 /*
3431 * Switches table namespace between global and per-set.
3432 */
3433 static int
sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS)3434 sysctl_ipfw_tables_sets(SYSCTL_HANDLER_ARGS)
3435 {
3436 int error;
3437 unsigned int sets;
3438
3439 sets = V_fw_tables_sets;
3440
3441 error = sysctl_handle_int(oidp, &sets, 0, req);
3442 /* Read operation or some error */
3443 if ((error != 0) || (req->newptr == NULL))
3444 return (error);
3445
3446 return (ipfw_switch_tables_namespace(&V_layer3_chain, sets));
3447 }
3448 #endif
3449
3450 /*
3451 * Module and VNET glue
3452 */
3453
3454 /*
3455 * Stuff that must be initialised only on boot or module load
3456 */
3457 static int
ipfw_init(void)3458 ipfw_init(void)
3459 {
3460 int error = 0;
3461
3462 /*
3463 * Only print out this stuff the first time around,
3464 * when called from the sysinit code.
3465 */
3466 printf("ipfw2 "
3467 #ifdef INET6
3468 "(+ipv6) "
3469 #endif
3470 "initialized, divert %s, nat %s, "
3471 "default to %s, logging ",
3472 #ifdef IPDIVERT
3473 "enabled",
3474 #else
3475 "loadable",
3476 #endif
3477 #ifdef IPFIREWALL_NAT
3478 "enabled",
3479 #else
3480 "loadable",
3481 #endif
3482 default_to_accept ? "accept" : "deny");
3483
3484 /*
3485 * Note: V_xxx variables can be accessed here but the vnet specific
3486 * initializer may not have been called yet for the VIMAGE case.
3487 * Tuneables will have been processed. We will print out values for
3488 * the default vnet.
3489 * XXX This should all be rationalized AFTER 8.0
3490 */
3491 if (V_fw_verbose == 0)
3492 printf("disabled\n");
3493 else if (V_verbose_limit == 0)
3494 printf("unlimited\n");
3495 else
3496 printf("limited to %d packets/entry by default\n",
3497 V_verbose_limit);
3498
3499 /* Check user-supplied table count for validness */
3500 if (default_fw_tables > IPFW_TABLES_MAX)
3501 default_fw_tables = IPFW_TABLES_MAX;
3502
3503 ipfw_init_sopt_handler();
3504 ipfw_init_obj_rewriter();
3505 ipfw_iface_init();
3506 return (error);
3507 }
3508
3509 /*
3510 * Called for the removal of the last instance only on module unload.
3511 */
3512 static void
ipfw_destroy(void)3513 ipfw_destroy(void)
3514 {
3515
3516 ipfw_iface_destroy();
3517 ipfw_destroy_sopt_handler();
3518 ipfw_destroy_obj_rewriter();
3519 printf("IP firewall unloaded\n");
3520 }
3521
3522 /*
3523 * Stuff that must be initialized for every instance
3524 * (including the first of course).
3525 */
3526 static int
vnet_ipfw_init(const void * unused)3527 vnet_ipfw_init(const void *unused)
3528 {
3529 int error, first;
3530 struct ip_fw *rule = NULL;
3531 struct ip_fw_chain *chain;
3532
3533 chain = &V_layer3_chain;
3534
3535 first = IS_DEFAULT_VNET(curvnet) ? 1 : 0;
3536
3537 /* First set up some values that are compile time options */
3538 V_autoinc_step = 100; /* bounded to 1..1000 in add_rule() */
3539 V_fw_deny_unknown_exthdrs = 1;
3540 #ifdef IPFIREWALL_VERBOSE
3541 V_fw_verbose = 1;
3542 #endif
3543 #ifdef IPFIREWALL_VERBOSE_LIMIT
3544 V_verbose_limit = IPFIREWALL_VERBOSE_LIMIT;
3545 #endif
3546 #ifdef IPFIREWALL_NAT
3547 LIST_INIT(&chain->nat);
3548 #endif
3549
3550 /* Init shared services hash table */
3551 ipfw_init_srv(chain);
3552
3553 ipfw_init_counters();
3554 /* Set initial number of tables */
3555 V_fw_tables_max = default_fw_tables;
3556 error = ipfw_init_tables(chain, first);
3557 if (error) {
3558 printf("ipfw2: setting up tables failed\n");
3559 free(chain->map, M_IPFW);
3560 free(rule, M_IPFW);
3561 return (ENOSPC);
3562 }
3563
3564 IPFW_LOCK_INIT(chain);
3565
3566 /* fill and insert the default rule */
3567 rule = ipfw_alloc_rule(chain, sizeof(struct ip_fw));
3568 rule->flags |= IPFW_RULE_NOOPT;
3569 rule->cmd_len = 1;
3570 rule->cmd[0].len = 1;
3571 rule->cmd[0].opcode = default_to_accept ? O_ACCEPT : O_DENY;
3572 chain->default_rule = rule;
3573 ipfw_add_protected_rule(chain, rule, 0);
3574
3575 ipfw_dyn_init(chain);
3576 ipfw_eaction_init(chain, first);
3577 #ifdef LINEAR_SKIPTO
3578 ipfw_init_skipto_cache(chain);
3579 #endif
3580 ipfw_bpf_init(first);
3581
3582 /* First set up some values that are compile time options */
3583 V_ipfw_vnet_ready = 1; /* Open for business */
3584
3585 /*
3586 * Hook the sockopt handler and pfil hooks for ipv4 and ipv6.
3587 * Even if the latter two fail we still keep the module alive
3588 * because the sockopt and layer2 paths are still useful.
3589 * ipfw[6]_hook return 0 on success, ENOENT on failure,
3590 * so we can ignore the exact return value and just set a flag.
3591 *
3592 * Note that V_fw[6]_enable are manipulated by a SYSCTL_PROC so
3593 * changes in the underlying (per-vnet) variables trigger
3594 * immediate hook()/unhook() calls.
3595 * In layer2 we have the same behaviour, except that V_ether_ipfw
3596 * is checked on each packet because there are no pfil hooks.
3597 */
3598 V_ip_fw_ctl_ptr = ipfw_ctl3;
3599 error = ipfw_attach_hooks();
3600 return (error);
3601 }
3602
3603 /*
3604 * Called for the removal of each instance.
3605 */
3606 static int
vnet_ipfw_uninit(const void * unused)3607 vnet_ipfw_uninit(const void *unused)
3608 {
3609 struct ip_fw *reap;
3610 struct ip_fw_chain *chain = &V_layer3_chain;
3611 int i, last;
3612
3613 V_ipfw_vnet_ready = 0; /* tell new callers to go away */
3614 /*
3615 * disconnect from ipv4, ipv6, layer2 and sockopt.
3616 * Then grab, release and grab again the WLOCK so we make
3617 * sure the update is propagated and nobody will be in.
3618 */
3619 ipfw_detach_hooks();
3620 V_ip_fw_ctl_ptr = NULL;
3621
3622 last = IS_DEFAULT_VNET(curvnet) ? 1 : 0;
3623
3624 IPFW_UH_WLOCK(chain);
3625 IPFW_UH_WUNLOCK(chain);
3626
3627 ipfw_dyn_uninit(0); /* run the callout_drain */
3628
3629 IPFW_UH_WLOCK(chain);
3630
3631 reap = NULL;
3632 IPFW_WLOCK(chain);
3633 for (i = 0; i < chain->n_rules; i++)
3634 ipfw_reap_add(chain, &reap, chain->map[i]);
3635 free(chain->map, M_IPFW);
3636 #ifdef LINEAR_SKIPTO
3637 ipfw_destroy_skipto_cache(chain);
3638 #endif
3639 IPFW_WUNLOCK(chain);
3640 IPFW_UH_WUNLOCK(chain);
3641 ipfw_destroy_tables(chain, last);
3642 ipfw_eaction_uninit(chain, last);
3643 if (reap != NULL)
3644 ipfw_reap_rules(reap);
3645 vnet_ipfw_iface_destroy(chain);
3646 ipfw_destroy_srv(chain);
3647 IPFW_LOCK_DESTROY(chain);
3648 ipfw_dyn_uninit(1); /* free the remaining parts */
3649 ipfw_destroy_counters();
3650 ipfw_bpf_uninit(last);
3651 return (0);
3652 }
3653
3654 /*
3655 * Module event handler.
3656 * In general we have the choice of handling most of these events by the
3657 * event handler or by the (VNET_)SYS(UN)INIT handlers. I have chosen to
3658 * use the SYSINIT handlers as they are more capable of expressing the
3659 * flow of control during module and vnet operations, so this is just
3660 * a skeleton. Note there is no SYSINIT equivalent of the module
3661 * SHUTDOWN handler, but we don't have anything to do in that case anyhow.
3662 */
3663 static int
ipfw_modevent(module_t mod,int type,void * unused)3664 ipfw_modevent(module_t mod, int type, void *unused)
3665 {
3666 int err = 0;
3667
3668 switch (type) {
3669 case MOD_LOAD:
3670 /* Called once at module load or
3671 * system boot if compiled in. */
3672 break;
3673 case MOD_QUIESCE:
3674 /* Called before unload. May veto unloading. */
3675 break;
3676 case MOD_UNLOAD:
3677 /* Called during unload. */
3678 break;
3679 case MOD_SHUTDOWN:
3680 /* Called during system shutdown. */
3681 break;
3682 default:
3683 err = EOPNOTSUPP;
3684 break;
3685 }
3686 return err;
3687 }
3688
3689 static moduledata_t ipfwmod = {
3690 "ipfw",
3691 ipfw_modevent,
3692 0
3693 };
3694
3695 /* Define startup order. */
3696 #define IPFW_SI_SUB_FIREWALL SI_SUB_PROTO_FIREWALL
3697 #define IPFW_MODEVENT_ORDER (SI_ORDER_ANY - 255) /* On boot slot in here. */
3698 #define IPFW_MODULE_ORDER (IPFW_MODEVENT_ORDER + 1) /* A little later. */
3699 #define IPFW_VNET_ORDER (IPFW_MODEVENT_ORDER + 2) /* Later still. */
3700
3701 DECLARE_MODULE(ipfw, ipfwmod, IPFW_SI_SUB_FIREWALL, IPFW_MODEVENT_ORDER);
3702 FEATURE(ipfw_ctl3, "ipfw new sockopt calls");
3703 MODULE_VERSION(ipfw, 3);
3704 /* should declare some dependencies here */
3705
3706 /*
3707 * Starting up. Done in order after ipfwmod() has been called.
3708 * VNET_SYSINIT is also called for each existing vnet and each new vnet.
3709 */
3710 SYSINIT(ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER,
3711 ipfw_init, NULL);
3712 VNET_SYSINIT(vnet_ipfw_init, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER,
3713 vnet_ipfw_init, NULL);
3714
3715 /*
3716 * Closing up shop. These are done in REVERSE ORDER, but still
3717 * after ipfwmod() has been called. Not called on reboot.
3718 * VNET_SYSUNINIT is also called for each exiting vnet as it exits.
3719 * or when the module is unloaded.
3720 */
3721 SYSUNINIT(ipfw_destroy, IPFW_SI_SUB_FIREWALL, IPFW_MODULE_ORDER,
3722 ipfw_destroy, NULL);
3723 VNET_SYSUNINIT(vnet_ipfw_uninit, IPFW_SI_SUB_FIREWALL, IPFW_VNET_ORDER,
3724 vnet_ipfw_uninit, NULL);
3725 /* end of file */
3726