Lines Matching +full:echo +full:- +full:active +full:- +full:ms
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
175 key->fn_src.v4 = ip->ip_src; in pf_ip2key()
176 key->fn_dst.v4 = ip->ip_dst; in pf_ip2key()
177 key->fn_af = AF_INET; in pf_ip2key()
178 key->fn_proto = ip->ip_p; in pf_ip2key()
224 if ((diff = a->fn_proto - b->fn_proto) != 0) in pf_frnode_compare()
226 if ((diff = a->fn_af - b->fn_af) != 0) in pf_frnode_compare()
228 if ((diff = pf_addr_cmp(&a->fn_src, &b->fn_src, a->fn_af)) != 0) in pf_frnode_compare()
230 if ((diff = pf_addr_cmp(&a->fn_dst, &b->fn_dst, a->fn_af)) != 0) in pf_frnode_compare()
240 if ((diff = a->fr_id - b->fr_id) != 0) in pf_frag_compare()
249 u_int32_t expire = time_uptime - in pf_purge_expired_fragments()
262 if (frag->fr_timeout > expire) in pf_purge_fragments()
265 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); in pf_purge_fragments()
306 frnode = frag->fr_node; in pf_free_fragment()
307 RB_REMOVE(pf_frag_tree, &frnode->fn_tree, frag); in pf_free_fragment()
308 MPASS(frnode->fn_fragments >= 1); in pf_free_fragment()
309 frnode->fn_fragments--; in pf_free_fragment()
310 if (frnode->fn_fragments == 0) { in pf_free_fragment()
311 MPASS(RB_EMPTY(&frnode->fn_tree)); in pf_free_fragment()
319 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) { in pf_free_fragment()
320 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); in pf_free_fragment()
322 m_freem(frent->fe_m); in pf_free_fragment()
340 MPASS(frnode->fn_fragments >= 1); in pf_find_fragment()
342 frag = RB_FIND(pf_frag_tree, &frnode->fn_tree, &idkey); in pf_find_fragment()
386 if (frent->fe_off == 0) in pf_frent_holes()
387 holes--; in pf_frent_holes()
389 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0")); in pf_frent_holes()
390 if (frent->fe_off == prev->fe_off + prev->fe_len) in pf_frent_holes()
391 holes--; in pf_frent_holes()
394 if (!frent->fe_mff) in pf_frent_holes()
395 holes--; in pf_frent_holes()
397 KASSERT(frent->fe_mff, ("frent->fe_mff")); in pf_frent_holes()
398 if (next->fe_off == frent->fe_off + frent->fe_len) in pf_frent_holes()
399 holes--; in pf_frent_holes()
414 16 - 1); in pf_frent_index()
415 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1); in pf_frent_index()
417 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS); in pf_frent_index()
434 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT) in pf_frent_insert()
436 frag->fr_entries[index]++; in pf_frent_insert()
439 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); in pf_frent_insert()
441 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, in pf_frent_insert()
443 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); in pf_frent_insert()
446 if (frag->fr_firstoff[index] == NULL) { in pf_frent_insert()
449 frag->fr_firstoff[index] = frent; in pf_frent_insert()
451 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) { in pf_frent_insert()
454 frag->fr_firstoff[index] = frent; in pf_frent_insert()
462 frag->fr_holes += pf_frent_holes(frent); in pf_frent_insert()
476 frag->fr_holes -= pf_frent_holes(frent); in pf_frent_remove()
479 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found")); in pf_frent_remove()
480 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) { in pf_frent_remove()
482 frag->fr_firstoff[index] = NULL; in pf_frent_remove()
484 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off, in pf_frent_remove()
487 frag->fr_firstoff[index] = next; in pf_frent_remove()
489 frag->fr_firstoff[index] = NULL; in pf_frent_remove()
493 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off, in pf_frent_remove()
494 ("frag->fr_firstoff[index]->fe_off < frent->fe_off")); in pf_frent_remove()
496 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, in pf_frent_remove()
502 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); in pf_frent_remove()
504 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining")); in pf_frent_remove()
505 frag->fr_entries[index]--; in pf_frent_remove()
518 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq); in pf_frent_previous()
520 if (prev->fe_off <= frent->fe_off) in pf_frent_previous()
531 prev = frag->fr_firstoff[index]; in pf_frent_previous()
542 if (prev->fe_off > frent->fe_off) { in pf_frent_previous()
546 KASSERT(prev->fe_off <= frent->fe_off, in pf_frent_previous()
547 ("prev->fe_off <= frent->fe_off")); in pf_frent_previous()
556 if (next->fe_off > frent->fe_off) in pf_frent_previous()
575 if (frent->fe_len == 0) { in pf_fillup_fragment()
581 if (frent->fe_mff && (frent->fe_len & 0x7)) { in pf_fillup_fragment()
582 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len)); in pf_fillup_fragment()
587 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) { in pf_fillup_fragment()
589 frent->fe_off + frent->fe_len)); in pf_fillup_fragment()
593 DPFPRINTF((key->fn_af == AF_INET ? in pf_fillup_fragment()
594 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n", in pf_fillup_fragment()
595 id, frent->fe_off, frent->fe_off + frent->fe_len)); in pf_fillup_fragment()
625 RB_INIT(&frnode->fn_tree); in pf_fillup_fragment()
626 frnode->fn_fragments = 0; in pf_fillup_fragment()
628 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff)); in pf_fillup_fragment()
629 memset(frag->fr_entries, 0, sizeof(frag->fr_entries)); in pf_fillup_fragment()
630 frag->fr_timeout = time_uptime; in pf_fillup_fragment()
631 TAILQ_INIT(&frag->fr_queue); in pf_fillup_fragment()
632 frag->fr_maxlen = frent->fe_len; in pf_fillup_fragment()
633 frag->fr_holes = 1; in pf_fillup_fragment()
635 frag->fr_id = id; in pf_fillup_fragment()
636 frag->fr_node = frnode; in pf_fillup_fragment()
638 RB_INSERT(pf_frag_tree, &frnode->fn_tree, frag); in pf_fillup_fragment()
639 frnode->fn_fragments++; in pf_fillup_fragment()
640 if (frnode->fn_fragments == 1) in pf_fillup_fragment()
651 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue")); in pf_fillup_fragment()
652 MPASS(frag->fr_node); in pf_fillup_fragment()
655 if (frent->fe_len > frag->fr_maxlen) in pf_fillup_fragment()
656 frag->fr_maxlen = frent->fe_len; in pf_fillup_fragment()
659 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + in pf_fillup_fragment()
660 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; in pf_fillup_fragment()
663 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff) in pf_fillup_fragment()
667 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) { in pf_fillup_fragment()
668 if (frent->fe_off + frent->fe_len > total || in pf_fillup_fragment()
669 (frent->fe_off + frent->fe_len == total && frent->fe_mff)) in pf_fillup_fragment()
672 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff) in pf_fillup_fragment()
679 after = TAILQ_FIRST(&frag->fr_queue); in pf_fillup_fragment()
685 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) { in pf_fillup_fragment()
688 if (frag->fr_node->fn_af == AF_INET6) in pf_fillup_fragment()
691 precut = prev->fe_off + prev->fe_len - frent->fe_off; in pf_fillup_fragment()
692 if (precut >= frent->fe_len) { in pf_fillup_fragment()
697 m_adj(frent->fe_m, precut); in pf_fillup_fragment()
698 frent->fe_off += precut; in pf_fillup_fragment()
699 frent->fe_len -= precut; in pf_fillup_fragment()
702 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off; in pf_fillup_fragment()
706 aftercut = frent->fe_off + frent->fe_len - after->fe_off; in pf_fillup_fragment()
707 if (aftercut < after->fe_len) { in pf_fillup_fragment()
709 m_adj(after->fe_m, aftercut); in pf_fillup_fragment()
712 after->fe_off += aftercut; in pf_fillup_fragment()
713 after->fe_len -= aftercut; in pf_fillup_fragment()
717 m_freem(after->fe_m); in pf_fillup_fragment()
729 m_freem(after->fe_m); in pf_fillup_fragment()
742 if (frag->fr_node->fn_af == AF_INET) in pf_fillup_fragment()
767 frent = TAILQ_FIRST(&frag->fr_queue); in pf_join_fragment()
768 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); in pf_join_fragment()
770 m = frent->fe_m; in pf_join_fragment()
771 if ((frent->fe_hdrlen + frent->fe_len) < m->m_pkthdr.len) in pf_join_fragment()
772 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len); in pf_join_fragment()
774 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) { in pf_join_fragment()
775 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); in pf_join_fragment()
777 m2 = frent->fe_m; in pf_join_fragment()
779 m_adj(m2, frent->fe_hdrlen); in pf_join_fragment()
781 if (frent->fe_len < m2->m_pkthdr.len) in pf_join_fragment()
782 m_adj(m2, frent->fe_len - m2->m_pkthdr.len); in pf_join_fragment()
813 frent->fe_m = m; in pf_reassemble()
814 frent->fe_hdrlen = ip->ip_hl << 2; in pf_reassemble()
815 frent->fe_extoff = 0; in pf_reassemble()
816 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2); in pf_reassemble()
817 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; in pf_reassemble()
818 frent->fe_mff = ntohs(ip->ip_off) & IP_MF; in pf_reassemble()
822 if ((frag = pf_fillup_fragment(&key, ip->ip_id, frent, reason)) == NULL) in pf_reassemble()
828 if (frag->fr_holes) { in pf_reassemble()
829 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes)); in pf_reassemble()
834 frent = TAILQ_FIRST(&frag->fr_queue); in pf_reassemble()
836 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + in pf_reassemble()
837 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; in pf_reassemble()
838 hdrlen = frent->fe_hdrlen; in pf_reassemble()
840 maxlen = frag->fr_maxlen; in pf_reassemble()
841 frag_id = frag->fr_id; in pf_reassemble()
845 if (m->m_flags & M_PKTHDR) { in pf_reassemble()
847 for (m = *m0; m; m = m->m_next) in pf_reassemble()
848 plen += m->m_len; in pf_reassemble()
850 m->m_pkthdr.len = plen; in pf_reassemble()
860 ftag->ft_hdrlen = hdrlen; in pf_reassemble()
861 ftag->ft_extoff = 0; in pf_reassemble()
862 ftag->ft_maxlen = maxlen; in pf_reassemble()
863 ftag->ft_id = frag_id; in pf_reassemble()
867 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len, in pf_reassemble()
869 ip->ip_len = htons(hdrlen + total); in pf_reassemble()
870 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off, in pf_reassemble()
871 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0); in pf_reassemble()
872 ip->ip_off &= ~(IP_MF|IP_OFFMASK); in pf_reassemble()
876 ip->ip_len = 0; in pf_reassemble()
882 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); in pf_reassemble()
912 frent->fe_m = m; in pf_reassemble6()
913 frent->fe_hdrlen = hdrlen; in pf_reassemble6()
914 frent->fe_extoff = extoff; in pf_reassemble6()
915 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen; in pf_reassemble6()
916 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK); in pf_reassemble6()
917 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG; in pf_reassemble6()
919 key.fn_src.v6 = ip6->ip6_src; in pf_reassemble6()
920 key.fn_dst.v6 = ip6->ip6_dst; in pf_reassemble6()
925 if ((frag = pf_fillup_fragment(&key, fraghdr->ip6f_ident, frent, reason)) == NULL) { in pf_reassemble6()
933 if (frag->fr_holes) { in pf_reassemble6()
934 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, in pf_reassemble6()
935 frag->fr_holes)); in pf_reassemble6()
941 frent = TAILQ_FIRST(&frag->fr_queue); in pf_reassemble6()
943 extoff = frent->fe_extoff; in pf_reassemble6()
944 maxlen = frag->fr_maxlen; in pf_reassemble6()
945 frag_id = frag->fr_id; in pf_reassemble6()
946 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + in pf_reassemble6()
947 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; in pf_reassemble6()
948 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag); in pf_reassemble6()
965 if (m->m_flags & M_PKTHDR) { in pf_reassemble6()
967 for (m = *m0; m; m = m->m_next) in pf_reassemble6()
968 plen += m->m_len; in pf_reassemble6()
970 m->m_pkthdr.len = plen; in pf_reassemble6()
977 ftag->ft_hdrlen = hdrlen; in pf_reassemble6()
978 ftag->ft_extoff = extoff; in pf_reassemble6()
979 ftag->ft_maxlen = maxlen; in pf_reassemble6()
980 ftag->ft_id = frag_id; in pf_reassemble6()
984 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total); in pf_reassemble6()
993 ip6->ip6_nxt = proto; in pf_reassemble6()
995 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) { in pf_reassemble6()
997 ip6->ip6_plen = 0; in pf_reassemble6()
1003 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen))); in pf_reassemble6()
1022 return (m->m_pkthdr.len); in pf_max_frag_size()
1026 return (ftag->ft_maxlen); in pf_max_frag_size()
1042 hdrlen = ftag->ft_hdrlen; in pf_refragment6()
1043 extoff = ftag->ft_extoff; in pf_refragment6()
1044 maxlen = ftag->ft_maxlen; in pf_refragment6()
1045 frag_id = ftag->ft_id; in pf_refragment6()
1062 proto = hdr->ip6_nxt; in pf_refragment6()
1063 hdr->ip6_nxt = IPPROTO_FRAGMENT; in pf_refragment6()
1066 /* In case of link-local traffic we'll need a scope set. */ in pf_refragment6()
1069 in6_setscope(&hdr->ip6_src, ifp, NULL); in pf_refragment6()
1070 in6_setscope(&hdr->ip6_dst, ifp, NULL); in pf_refragment6()
1084 m = (*m0)->m_nextpkt; in pf_refragment6()
1085 (*m0)->m_nextpkt = NULL; in pf_refragment6()
1097 t = m->m_nextpkt; in pf_refragment6()
1098 m->m_nextpkt = NULL; in pf_refragment6()
1099 m->m_flags |= M_SKIP_FIREWALL; in pf_refragment6()
1113 dst.sin6_addr = hdr->ip6_dst; in pf_refragment6()
1115 if (m->m_pkthdr.len <= if_getmtu(ifp)) { in pf_refragment6()
1123 MPASS(m->m_pkthdr.rcvif != NULL); in pf_refragment6()
1140 struct ip *h = mtod(pd->m, struct ip *); in pf_normalize_ip()
1141 int mff = (ntohs(h->ip_off) & IP_MF); in pf_normalize_ip()
1142 int hlen = h->ip_hl << 2; in pf_normalize_ip()
1143 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; in pf_normalize_ip()
1146 int tag = -1; in pf_normalize_ip()
1152 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); in pf_normalize_ip()
1156 * - enforced packet normalization operation just like in OpenBSD in pf_normalize_ip()
1157 * - fragment reassembly depends on V_pf_status.reass in pf_normalize_ip()
1159 * - packet normalization is performed if there is a matching scrub rule in pf_normalize_ip()
1160 * - fragment reassembly is performed if the matching rule has no in pf_normalize_ip()
1165 pf_counter_u64_add(&r->evaluations, 1); in pf_normalize_ip()
1166 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) in pf_normalize_ip()
1167 r = r->skip[PF_SKIP_IFP]; in pf_normalize_ip()
1168 else if (r->direction && r->direction != pd->dir) in pf_normalize_ip()
1169 r = r->skip[PF_SKIP_DIR]; in pf_normalize_ip()
1170 else if (r->af && r->af != AF_INET) in pf_normalize_ip()
1171 r = r->skip[PF_SKIP_AF]; in pf_normalize_ip()
1172 else if (r->proto && r->proto != h->ip_p) in pf_normalize_ip()
1173 r = r->skip[PF_SKIP_PROTO]; in pf_normalize_ip()
1174 else if (PF_MISMATCHAW(&r->src.addr, in pf_normalize_ip()
1175 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, in pf_normalize_ip()
1176 r->src.neg, pd->kif, M_GETFIB(pd->m))) in pf_normalize_ip()
1177 r = r->skip[PF_SKIP_SRC_ADDR]; in pf_normalize_ip()
1178 else if (PF_MISMATCHAW(&r->dst.addr, in pf_normalize_ip()
1179 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, in pf_normalize_ip()
1180 r->dst.neg, NULL, M_GETFIB(pd->m))) in pf_normalize_ip()
1181 r = r->skip[PF_SKIP_DST_ADDR]; in pf_normalize_ip()
1182 else if (r->match_tag && !pf_match_tag(pd->m, r, &tag, in pf_normalize_ip()
1183 pd->pf_mtag ? pd->pf_mtag->tag : 0)) in pf_normalize_ip()
1192 if (r == NULL || r->action == PF_NOSCRUB) in pf_normalize_ip()
1196 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); in pf_normalize_ip()
1197 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); in pf_normalize_ip()
1199 pf_rule_to_actions(r, &pd->act); in pf_normalize_ip()
1208 if (hlen > ntohs(h->ip_len)) { in pf_normalize_ip()
1213 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */ in pf_normalize_ip()
1215 (r != NULL && r->rule_flag & PFRULE_NODF)) && in pf_normalize_ip()
1216 (h->ip_off & htons(IP_DF)) in pf_normalize_ip()
1218 u_int16_t ip_off = h->ip_off; in pf_normalize_ip()
1220 h->ip_off &= htons(~IP_DF); in pf_normalize_ip()
1221 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); in pf_normalize_ip()
1230 * no-df above, fine. Otherwise drop it. in pf_normalize_ip()
1232 if (h->ip_off & htons(IP_DF)) { in pf_normalize_ip()
1237 ip_len = ntohs(h->ip_len) - hlen; in pf_normalize_ip()
1252 (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS)) in pf_normalize_ip()
1259 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); in pf_normalize_ip()
1260 verdict = pf_reassemble(&pd->m, reason); in pf_normalize_ip()
1266 if (pd->m == NULL) in pf_normalize_ip()
1269 h = mtod(pd->m, struct ip *); in pf_normalize_ip()
1270 pd->tot_len = htons(h->ip_len); in pf_normalize_ip()
1274 if (h->ip_off & ~htons(IP_DF)) { in pf_normalize_ip()
1275 u_int16_t ip_off = h->ip_off; in pf_normalize_ip()
1277 h->ip_off &= htons(IP_DF); in pf_normalize_ip()
1278 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); in pf_normalize_ip()
1288 if (r != NULL && r->log) in pf_normalize_ip()
1307 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); in pf_normalize_ip6()
1311 * - enforced packet normalization operation just like in OpenBSD in pf_normalize_ip6()
1313 * - packet normalization is performed if there is a matching scrub rule in pf_normalize_ip6()
1318 pf_counter_u64_add(&r->evaluations, 1); in pf_normalize_ip6()
1319 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) in pf_normalize_ip6()
1320 r = r->skip[PF_SKIP_IFP]; in pf_normalize_ip6()
1321 else if (r->direction && r->direction != pd->dir) in pf_normalize_ip6()
1322 r = r->skip[PF_SKIP_DIR]; in pf_normalize_ip6()
1323 else if (r->af && r->af != AF_INET6) in pf_normalize_ip6()
1324 r = r->skip[PF_SKIP_AF]; in pf_normalize_ip6()
1325 else if (r->proto && r->proto != pd->proto) in pf_normalize_ip6()
1326 r = r->skip[PF_SKIP_PROTO]; in pf_normalize_ip6()
1327 else if (PF_MISMATCHAW(&r->src.addr, in pf_normalize_ip6()
1328 (struct pf_addr *)&pd->src, AF_INET6, in pf_normalize_ip6()
1329 r->src.neg, pd->kif, M_GETFIB(pd->m))) in pf_normalize_ip6()
1330 r = r->skip[PF_SKIP_SRC_ADDR]; in pf_normalize_ip6()
1331 else if (PF_MISMATCHAW(&r->dst.addr, in pf_normalize_ip6()
1332 (struct pf_addr *)&pd->dst, AF_INET6, in pf_normalize_ip6()
1333 r->dst.neg, NULL, M_GETFIB(pd->m))) in pf_normalize_ip6()
1334 r = r->skip[PF_SKIP_DST_ADDR]; in pf_normalize_ip6()
1342 if (r == NULL || r->action == PF_NOSCRUB) in pf_normalize_ip6()
1346 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); in pf_normalize_ip6()
1347 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); in pf_normalize_ip6()
1349 pf_rule_to_actions(r, &pd->act); in pf_normalize_ip6()
1352 if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6)) in pf_normalize_ip6()
1358 if (pd->virtual_proto == PF_VPROTO_FRAGMENT) { in pf_normalize_ip6()
1361 if (pf_reassemble6(&pd->m, &frag, off, pd->extoff, reason) != PF_PASS) in pf_normalize_ip6()
1363 if (pd->m == NULL) in pf_normalize_ip6()
1365 h = mtod(pd->m, struct ip6_hdr *); in pf_normalize_ip6()
1366 pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); in pf_normalize_ip6()
1377 struct tcphdr *th = &pd->hdr.tcp; in pf_normalize_tcp()
1381 sa_family_t af = pd->af; in pf_normalize_tcp()
1386 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); in pf_normalize_tcp()
1391 pf_counter_u64_add(&r->evaluations, 1); in pf_normalize_tcp()
1392 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) in pf_normalize_tcp()
1393 r = r->skip[PF_SKIP_IFP]; in pf_normalize_tcp()
1394 else if (r->direction && r->direction != pd->dir) in pf_normalize_tcp()
1395 r = r->skip[PF_SKIP_DIR]; in pf_normalize_tcp()
1396 else if (r->af && r->af != af) in pf_normalize_tcp()
1397 r = r->skip[PF_SKIP_AF]; in pf_normalize_tcp()
1398 else if (r->proto && r->proto != pd->proto) in pf_normalize_tcp()
1399 r = r->skip[PF_SKIP_PROTO]; in pf_normalize_tcp()
1400 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, in pf_normalize_tcp()
1401 r->src.neg, pd->kif, M_GETFIB(pd->m))) in pf_normalize_tcp()
1402 r = r->skip[PF_SKIP_SRC_ADDR]; in pf_normalize_tcp()
1403 else if (r->src.port_op && !pf_match_port(r->src.port_op, in pf_normalize_tcp()
1404 r->src.port[0], r->src.port[1], th->th_sport)) in pf_normalize_tcp()
1405 r = r->skip[PF_SKIP_SRC_PORT]; in pf_normalize_tcp()
1406 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, in pf_normalize_tcp()
1407 r->dst.neg, NULL, M_GETFIB(pd->m))) in pf_normalize_tcp()
1408 r = r->skip[PF_SKIP_DST_ADDR]; in pf_normalize_tcp()
1409 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, in pf_normalize_tcp()
1410 r->dst.port[0], r->dst.port[1], th->th_dport)) in pf_normalize_tcp()
1411 r = r->skip[PF_SKIP_DST_PORT]; in pf_normalize_tcp()
1412 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( in pf_normalize_tcp()
1414 r->os_fingerprint)) in pf_normalize_tcp()
1425 if (rm == NULL || rm->action == PF_NOSCRUB) in pf_normalize_tcp()
1429 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); in pf_normalize_tcp()
1430 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); in pf_normalize_tcp()
1432 pf_rule_to_actions(rm, &pd->act); in pf_normalize_tcp()
1435 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP) in pf_normalize_tcp()
1436 pd->flags |= PFDESC_TCP_NORM; in pf_normalize_tcp()
1459 if (th->th_off < (sizeof(struct tcphdr) >> 2)) in pf_normalize_tcp()
1467 ov = *(u_int16_t *)(&th->th_ack + 1); in pf_normalize_tcp()
1470 nv = *(u_int16_t *)(&th->th_ack + 1); in pf_normalize_tcp()
1472 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, ov, nv, 0); in pf_normalize_tcp()
1477 if (!(flags & TH_URG) && th->th_urp) { in pf_normalize_tcp()
1478 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, th->th_urp, in pf_normalize_tcp()
1480 th->th_urp = 0; in pf_normalize_tcp()
1486 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th); in pf_normalize_tcp()
1492 if (rm != NULL && r->log) in pf_normalize_tcp()
1505 KASSERT((src->scrub == NULL), in pf_normalize_tcp_init()
1506 ("pf_normalize_tcp_init: src->scrub != NULL")); in pf_normalize_tcp_init()
1508 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); in pf_normalize_tcp_init()
1509 if (src->scrub == NULL) in pf_normalize_tcp_init()
1512 switch (pd->af) { in pf_normalize_tcp_init()
1515 struct ip *h = mtod(pd->m, struct ip *); in pf_normalize_tcp_init()
1516 src->scrub->pfss_ttl = h->ip_ttl; in pf_normalize_tcp_init()
1522 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *); in pf_normalize_tcp_init()
1523 src->scrub->pfss_ttl = h->ip6_hlim; in pf_normalize_tcp_init()
1528 unhandled_af(pd->af); in pf_normalize_tcp_init()
1538 olen = (th->th_off << 2) - sizeof(*th); in pf_normalize_tcp_init()
1539 if (olen < TCPOLEN_TIMESTAMP || !pf_pull_hdr(pd->m, in pf_normalize_tcp_init()
1540 pd->off + sizeof(*th), opts, olen, NULL, NULL, pd->af)) in pf_normalize_tcp_init()
1546 src->scrub->pfss_flags |= PFSS_TIMESTAMP; in pf_normalize_tcp_init()
1547 src->scrub->pfss_ts_mod = arc4random(); in pf_normalize_tcp_init()
1551 src->scrub->pfss_tsval0 = ntohl(tsval); in pf_normalize_tcp_init()
1552 src->scrub->pfss_tsval = ntohl(tsval); in pf_normalize_tcp_init()
1553 src->scrub->pfss_tsecr = ntohl(tsecr); in pf_normalize_tcp_init()
1554 getmicrouptime(&src->scrub->pfss_last); in pf_normalize_tcp_init()
1566 uma_zfree(V_pf_state_scrub_z, state->src.scrub); in pf_normalize_tcp_cleanup()
1567 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); in pf_normalize_tcp_cleanup()
1575 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); in pf_normalize_sctp_init()
1576 if (src->scrub == NULL) in pf_normalize_sctp_init()
1579 dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); in pf_normalize_sctp_init()
1580 if (dst->scrub == NULL) { in pf_normalize_sctp_init()
1585 dst->scrub->pfss_v_tag = pd->sctp_initiate_tag; in pf_normalize_sctp_init()
1603 KASSERT((src->scrub || dst->scrub), in pf_normalize_tcp_stateful()
1604 ("%s: src->scrub && dst->scrub!", __func__)); in pf_normalize_tcp_stateful()
1611 switch (pd->af) { in pf_normalize_tcp_stateful()
1614 if (src->scrub) { in pf_normalize_tcp_stateful()
1615 struct ip *h = mtod(pd->m, struct ip *); in pf_normalize_tcp_stateful()
1616 if (h->ip_ttl > src->scrub->pfss_ttl) in pf_normalize_tcp_stateful()
1617 src->scrub->pfss_ttl = h->ip_ttl; in pf_normalize_tcp_stateful()
1618 h->ip_ttl = src->scrub->pfss_ttl; in pf_normalize_tcp_stateful()
1625 if (src->scrub) { in pf_normalize_tcp_stateful()
1626 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *); in pf_normalize_tcp_stateful()
1627 if (h->ip6_hlim > src->scrub->pfss_ttl) in pf_normalize_tcp_stateful()
1628 src->scrub->pfss_ttl = h->ip6_hlim; in pf_normalize_tcp_stateful()
1629 h->ip6_hlim = src->scrub->pfss_ttl; in pf_normalize_tcp_stateful()
1635 unhandled_af(pd->af); in pf_normalize_tcp_stateful()
1638 olen = (th->th_off << 2) - sizeof(*th); in pf_normalize_tcp_stateful()
1641 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || in pf_normalize_tcp_stateful()
1642 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && in pf_normalize_tcp_stateful()
1643 pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, olen, NULL, NULL, pd->af)) { in pf_normalize_tcp_stateful()
1668 if (tsval && src->scrub && in pf_normalize_tcp_stateful()
1669 (src->scrub->pfss_flags & PFSS_TIMESTAMP)) { in pf_normalize_tcp_stateful()
1673 ts, htonl(tsval + src->scrub->pfss_ts_mod), in pf_normalize_tcp_stateful()
1674 PF_ALGNMNT(ts - opts)); in pf_normalize_tcp_stateful()
1679 if (tsecr && dst->scrub && in pf_normalize_tcp_stateful()
1680 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { in pf_normalize_tcp_stateful()
1682 tsecr = ntohl(tsecr) - dst->scrub->pfss_ts_mod; in pf_normalize_tcp_stateful()
1684 PF_ALGNMNT(tsr - opts)); in pf_normalize_tcp_stateful()
1695 m_copyback(pd->m, pd->off + sizeof(*th), olen, opts); in pf_normalize_tcp_stateful()
1701 * The fastest allowed timestamp clock is 1ms. That turns out to in pf_normalize_tcp_stateful()
1703 * TS echo check only works for the first 12 days of a connection in pf_normalize_tcp_stateful()
1710 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && in pf_normalize_tcp_stateful()
1711 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || in pf_normalize_tcp_stateful()
1712 time_uptime - (state->creation / 1000) > TS_MAX_CONN)) { in pf_normalize_tcp_stateful()
1718 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) in pf_normalize_tcp_stateful()
1721 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && in pf_normalize_tcp_stateful()
1722 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { in pf_normalize_tcp_stateful()
1728 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) in pf_normalize_tcp_stateful()
1732 if (got_ts && src->scrub && dst->scrub && in pf_normalize_tcp_stateful()
1733 (src->scrub->pfss_flags & PFSS_PAWS) && in pf_normalize_tcp_stateful()
1734 (dst->scrub->pfss_flags & PFSS_PAWS)) { in pf_normalize_tcp_stateful()
1735 /* Validate that the timestamps are "in-window". in pf_normalize_tcp_stateful()
1749 * - The timestamp on this packet must be greater than in pf_normalize_tcp_stateful()
1755 * - The timestamp will be less than or equal to in pf_normalize_tcp_stateful()
1758 * clock rate as 1ms. We will allow clocks to be in pf_normalize_tcp_stateful()
1767 * during a SYN flood. Proof MS has at least one in pf_normalize_tcp_stateful()
1770 * - The TCP timestamp option must also echo the other in pf_normalize_tcp_stateful()
1776 * This gives us an upperbound on the TS echo. in pf_normalize_tcp_stateful()
1778 * - The lowerbound on the TS echo is a little more in pf_normalize_tcp_stateful()
1781 * network conditions that re-order packets and in pf_normalize_tcp_stateful()
1784 * the TS echo will never be less than the original in pf_normalize_tcp_stateful()
1790 * timestamp clock of 1ms will wrap its 32bit space in in pf_normalize_tcp_stateful()
1794 * lowerbound to the TS echo check. in pf_normalize_tcp_stateful()
1805 if ((ts_fudge = state->rule->timeout[PFTM_TS_DIFF]) == 0) in pf_normalize_tcp_stateful()
1812 timevalsub(&delta_ts, &src->scrub->pfss_last); in pf_normalize_tcp_stateful()
1816 if ((src->state >= TCPS_ESTABLISHED && in pf_normalize_tcp_stateful()
1817 dst->state >= TCPS_ESTABLISHED) && in pf_normalize_tcp_stateful()
1818 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || in pf_normalize_tcp_stateful()
1819 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || in pf_normalize_tcp_stateful()
1820 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || in pf_normalize_tcp_stateful()
1821 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { in pf_normalize_tcp_stateful()
1824 * - Solaris 2.6 and 2.7 are known to send another ACK in pf_normalize_tcp_stateful()
1830 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', in pf_normalize_tcp_stateful()
1831 SEQ_GT(tsval, src->scrub->pfss_tsval + in pf_normalize_tcp_stateful()
1833 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', in pf_normalize_tcp_stateful()
1834 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); in pf_normalize_tcp_stateful()
1840 DPFPRINTF((" src->tsval: %u tsecr: %u\n", in pf_normalize_tcp_stateful()
1841 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); in pf_normalize_tcp_stateful()
1842 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" in pf_normalize_tcp_stateful()
1843 "\n", dst->scrub->pfss_tsval, in pf_normalize_tcp_stateful()
1844 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); in pf_normalize_tcp_stateful()
1857 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) in pf_normalize_tcp_stateful()
1858 || pd->p_len > 0 || (tcp_get_flags(th) & TH_SYN)) && in pf_normalize_tcp_stateful()
1859 src->scrub && dst->scrub && in pf_normalize_tcp_stateful()
1860 (src->scrub->pfss_flags & PFSS_PAWS) && in pf_normalize_tcp_stateful()
1861 (dst->scrub->pfss_flags & PFSS_PAWS)) { in pf_normalize_tcp_stateful()
1864 * - connection opening or closing (often not even sent). in pf_normalize_tcp_stateful()
1867 * - on a TCP reset. RFC suggests not even looking at TS. in pf_normalize_tcp_stateful()
1868 * - on an empty ACK. The TS will not be echoed so it will in pf_normalize_tcp_stateful()
1872 * ACKs :-( in pf_normalize_tcp_stateful()
1888 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { in pf_normalize_tcp_stateful()
1909 * timestamped. But I think there are middle-man devices that hijack in pf_normalize_tcp_stateful()
1913 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & in pf_normalize_tcp_stateful()
1916 src->scrub->pfss_flags |= PFSS_DATA_TS; in pf_normalize_tcp_stateful()
1918 src->scrub->pfss_flags |= PFSS_DATA_NOTS; in pf_normalize_tcp_stateful()
1919 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && in pf_normalize_tcp_stateful()
1920 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { in pf_normalize_tcp_stateful()
1935 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & in pf_normalize_tcp_stateful()
1937 getmicrouptime(&src->scrub->pfss_last); in pf_normalize_tcp_stateful()
1938 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || in pf_normalize_tcp_stateful()
1939 (src->scrub->pfss_flags & PFSS_PAWS) == 0) in pf_normalize_tcp_stateful()
1940 src->scrub->pfss_tsval = tsval; in pf_normalize_tcp_stateful()
1943 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || in pf_normalize_tcp_stateful()
1944 (src->scrub->pfss_flags & PFSS_PAWS) == 0) in pf_normalize_tcp_stateful()
1945 src->scrub->pfss_tsecr = tsecr; in pf_normalize_tcp_stateful()
1947 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && in pf_normalize_tcp_stateful()
1948 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || in pf_normalize_tcp_stateful()
1949 src->scrub->pfss_tsval0 == 0)) { in pf_normalize_tcp_stateful()
1951 src->scrub->pfss_tsval0 = tsval; in pf_normalize_tcp_stateful()
1955 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) in pf_normalize_tcp_stateful()
1956 src->scrub->pfss_flags |= PFSS_PAWS; in pf_normalize_tcp_stateful()
1970 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr); in pf_normalize_mss()
1971 optsoff = pd->off + sizeof(struct tcphdr); in pf_normalize_mss()
1973 !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, NULL, pd->af)) in pf_normalize_mss()
1982 if (ntohs(mss) > pd->act.max_mss) { in pf_normalize_mss()
1983 size_t mssoffopts = mssp - opts; in pf_normalize_mss()
1985 htons(pd->act.max_mss), PF_ALGNMNT(mssoffopts)); in pf_normalize_mss()
1986 m_copyback(pd->m, optsoff + mssoffopts, in pf_normalize_mss()
1988 m_copyback(pd->m, pd->off, in pf_normalize_mss()
1989 sizeof(struct tcphdr), (caddr_t)&pd->hdr.tcp); in pf_normalize_mss()
2006 while (pd->off + chunk_off < pd->tot_len) { in pf_scan_sctp()
2007 if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL, in pf_scan_sctp()
2008 NULL, pd->af)) in pf_scan_sctp()
2023 if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init, in pf_scan_sctp()
2024 sizeof(init), NULL, NULL, pd->af)) in pf_scan_sctp()
2045 pd->hdr.sctp.v_tag != 0) in pf_scan_sctp()
2048 pd->sctp_initiate_tag = init.init.initiate_tag; in pf_scan_sctp()
2051 pd->sctp_flags |= PFDESC_SCTP_INIT; in pf_scan_sctp()
2053 pd->sctp_flags |= PFDESC_SCTP_INIT_ACK; in pf_scan_sctp()
2055 ret = pf_multihome_scan_init(pd->off + chunk_start, in pf_scan_sctp()
2063 pd->sctp_flags |= PFDESC_SCTP_ABORT; in pf_scan_sctp()
2067 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN; in pf_scan_sctp()
2070 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE; in pf_scan_sctp()
2073 pd->sctp_flags |= PFDESC_SCTP_COOKIE; in pf_scan_sctp()
2076 pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK; in pf_scan_sctp()
2079 pd->sctp_flags |= PFDESC_SCTP_DATA; in pf_scan_sctp()
2082 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT; in pf_scan_sctp()
2085 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK; in pf_scan_sctp()
2088 pd->sctp_flags |= PFDESC_SCTP_ASCONF; in pf_scan_sctp()
2090 ret = pf_multihome_scan_asconf(pd->off + chunk_start, in pf_scan_sctp()
2096 pd->sctp_flags |= PFDESC_SCTP_OTHER; in pf_scan_sctp()
2102 if (pd->off + chunk_off != pd->tot_len) in pf_scan_sctp()
2109 if ((pd->sctp_flags & PFDESC_SCTP_INIT) && in pf_scan_sctp()
2110 (pd->sctp_flags & ~PFDESC_SCTP_INIT)) in pf_scan_sctp()
2112 if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) && in pf_scan_sctp()
2113 (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK)) in pf_scan_sctp()
2115 if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) && in pf_scan_sctp()
2116 (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE)) in pf_scan_sctp()
2118 if ((pd->sctp_flags & PFDESC_SCTP_ABORT) && in pf_scan_sctp()
2119 (pd->sctp_flags & PFDESC_SCTP_DATA)) { in pf_scan_sctp()
2134 struct sctphdr *sh = &pd->hdr.sctp; in pf_normalize_sctp()
2136 sa_family_t af = pd->af; in pf_normalize_sctp()
2141 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); in pf_normalize_sctp()
2146 pf_counter_u64_add(&r->evaluations, 1); in pf_normalize_sctp()
2147 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) in pf_normalize_sctp()
2148 r = r->skip[PF_SKIP_IFP]; in pf_normalize_sctp()
2149 else if (r->direction && r->direction != pd->dir) in pf_normalize_sctp()
2150 r = r->skip[PF_SKIP_DIR]; in pf_normalize_sctp()
2151 else if (r->af && r->af != af) in pf_normalize_sctp()
2152 r = r->skip[PF_SKIP_AF]; in pf_normalize_sctp()
2153 else if (r->proto && r->proto != pd->proto) in pf_normalize_sctp()
2154 r = r->skip[PF_SKIP_PROTO]; in pf_normalize_sctp()
2155 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, in pf_normalize_sctp()
2156 r->src.neg, pd->kif, M_GETFIB(pd->m))) in pf_normalize_sctp()
2157 r = r->skip[PF_SKIP_SRC_ADDR]; in pf_normalize_sctp()
2158 else if (r->src.port_op && !pf_match_port(r->src.port_op, in pf_normalize_sctp()
2159 r->src.port[0], r->src.port[1], sh->src_port)) in pf_normalize_sctp()
2160 r = r->skip[PF_SKIP_SRC_PORT]; in pf_normalize_sctp()
2161 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, in pf_normalize_sctp()
2162 r->dst.neg, NULL, M_GETFIB(pd->m))) in pf_normalize_sctp()
2163 r = r->skip[PF_SKIP_DST_ADDR]; in pf_normalize_sctp()
2164 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, in pf_normalize_sctp()
2165 r->dst.port[0], r->dst.port[1], sh->dest_port)) in pf_normalize_sctp()
2166 r = r->skip[PF_SKIP_DST_PORT]; in pf_normalize_sctp()
2176 if (rm == NULL || rm->action == PF_NOSCRUB) in pf_normalize_sctp()
2180 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); in pf_normalize_sctp()
2181 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); in pf_normalize_sctp()
2186 if ((pd->tot_len - pd->off - sizeof(struct sctphdr)) % 4) in pf_normalize_sctp()
2190 if (pd->sctp_flags & PFDESC_SCTP_INIT) in pf_normalize_sctp()
2191 if (pd->sctp_flags & ~PFDESC_SCTP_INIT) in pf_normalize_sctp()
2198 if (rm != NULL && r->log) in pf_normalize_sctp()
2210 struct ip *h = mtod(pd->m, struct ip *); in pf_scrub()
2212 struct ip6_hdr *h6 = mtod(pd->m, struct ip6_hdr *); in pf_scrub()
2215 /* Clear IP_DF if no-df was requested */ in pf_scrub()
2216 if (pd->af == AF_INET && pd->act.flags & PFSTATE_NODF && in pf_scrub()
2217 h->ip_off & htons(IP_DF)) in pf_scrub()
2219 u_int16_t ip_off = h->ip_off; in pf_scrub()
2221 h->ip_off &= htons(~IP_DF); in pf_scrub()
2222 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); in pf_scrub()
2226 if (pd->af == AF_INET && pd->act.min_ttl && in pf_scrub()
2227 h->ip_ttl < pd->act.min_ttl) { in pf_scrub()
2228 u_int16_t ip_ttl = h->ip_ttl; in pf_scrub()
2230 h->ip_ttl = pd->act.min_ttl; in pf_scrub()
2231 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); in pf_scrub()
2235 if (pd->af == AF_INET6 && pd->act.min_ttl && in pf_scrub()
2236 h6->ip6_hlim < pd->act.min_ttl) in pf_scrub()
2237 h6->ip6_hlim = pd->act.min_ttl; in pf_scrub()
2240 if (pd->act.flags & PFSTATE_SETTOS) { in pf_scrub()
2241 switch (pd->af) { in pf_scrub()
2246 h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK); in pf_scrub()
2249 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); in pf_scrub()
2254 h6->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK; in pf_scrub()
2255 h6->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h6)) << 20); in pf_scrub()
2261 /* random-id, but not for fragments */ in pf_scrub()
2263 if (pd->af == AF_INET && in pf_scrub()
2264 pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { in pf_scrub()
2265 uint16_t ip_id = h->ip_id; in pf_scrub()
2268 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); in pf_scrub()