Lines Matching +full:sub +full:- +full:block

66  * A superblock is a block of adjacent rules of similar action.  If there
68 * Once we have a superblock, we are free to re-order any rules within it
85 BARRIER, /* the presence of the field puts the rule in its own block */
101 sizeof(((struct pfctl_rule *)0)->field)}
107 * rule from being re-ordered at all.
120 * These rules are allowed to be re-ordered but only among like rules.
121 * For instance we can re-order all 'tag "foo"' rules because they have the
122 * same tag. But we can not re-order between a 'tag "foo"' and a
151 * There are no problems re-ordering any rules with these fields.
202 /* These fields should never be set in a PASS/BLOCK rule */
269 struct superblock *block; in pfctl_optimize_ruleset() local
279 old_rules = rs->rules[PF_RULESET_FILTER].active.ptr; in pfctl_optimize_ruleset()
280 rs->rules[PF_RULESET_FILTER].active.ptr = in pfctl_optimize_ruleset()
281 rs->rules[PF_RULESET_FILTER].inactive.ptr; in pfctl_optimize_ruleset()
282 rs->rules[PF_RULESET_FILTER].inactive.ptr = old_rules; in pfctl_optimize_ruleset()
288 while ((r = TAILQ_FIRST(rs->rules[PF_RULESET_FILTER].inactive.ptr)) in pfctl_optimize_ruleset()
290 TAILQ_REMOVE(rs->rules[PF_RULESET_FILTER].inactive.ptr, r, in pfctl_optimize_ruleset()
294 memcpy(&por->por_rule, r, sizeof(*r)); in pfctl_optimize_ruleset()
295 if (TAILQ_FIRST(&r->rdr.list) != NULL) { in pfctl_optimize_ruleset()
296 TAILQ_INIT(&por->por_rule.rdr.list); in pfctl_optimize_ruleset()
297 pfctl_move_pool(&r->rdr, &por->por_rule.rdr); in pfctl_optimize_ruleset()
299 bzero(&por->por_rule.rdr, in pfctl_optimize_ruleset()
300 sizeof(por->por_rule.rdr)); in pfctl_optimize_ruleset()
301 if (TAILQ_FIRST(&r->nat.list) != NULL) { in pfctl_optimize_ruleset()
302 TAILQ_INIT(&por->por_rule.nat.list); in pfctl_optimize_ruleset()
303 pfctl_move_pool(&r->nat, &por->por_rule.nat); in pfctl_optimize_ruleset()
305 bzero(&por->por_rule.nat, in pfctl_optimize_ruleset()
306 sizeof(por->por_rule.nat)); in pfctl_optimize_ruleset()
307 if (TAILQ_FIRST(&r->route.list) != NULL) { in pfctl_optimize_ruleset()
308 TAILQ_INIT(&por->por_rule.route.list); in pfctl_optimize_ruleset()
309 pfctl_move_pool(&r->route, &por->por_rule.route); in pfctl_optimize_ruleset()
311 bzero(&por->por_rule.route, in pfctl_optimize_ruleset()
312 sizeof(por->por_rule.route)); in pfctl_optimize_ruleset()
321 if (pf->optimize & PF_OPTIMIZE_PROFILE) { in pfctl_optimize_ruleset()
326 TAILQ_FOREACH(block, &superblocks, sb_entry) { in pfctl_optimize_ruleset()
327 if (optimize_superblock(pf, block)) in pfctl_optimize_ruleset()
331 rs->anchor->refcnt = 0; in pfctl_optimize_ruleset()
332 while ((block = TAILQ_FIRST(&superblocks))) { in pfctl_optimize_ruleset()
333 TAILQ_REMOVE(&superblocks, block, sb_entry); in pfctl_optimize_ruleset()
335 while ((por = TAILQ_FIRST(&block->sb_rules))) { in pfctl_optimize_ruleset()
336 TAILQ_REMOVE(&block->sb_rules, por, por_entry); in pfctl_optimize_ruleset()
337 por->por_rule.nr = rs->anchor->refcnt++; in pfctl_optimize_ruleset()
340 memcpy(r, &por->por_rule, sizeof(*r)); in pfctl_optimize_ruleset()
341 TAILQ_INIT(&r->rdr.list); in pfctl_optimize_ruleset()
342 pfctl_move_pool(&por->por_rule.rdr, &r->rdr); in pfctl_optimize_ruleset()
343 TAILQ_INIT(&r->nat.list); in pfctl_optimize_ruleset()
344 pfctl_move_pool(&por->por_rule.nat, &r->nat); in pfctl_optimize_ruleset()
346 rs->rules[PF_RULESET_FILTER].active.ptr, in pfctl_optimize_ruleset()
350 free(block); in pfctl_optimize_ruleset()
358 if (por->por_src_tbl) { in pfctl_optimize_ruleset()
359 pfr_buf_clear(por->por_src_tbl->pt_buf); in pfctl_optimize_ruleset()
360 free(por->por_src_tbl->pt_buf); in pfctl_optimize_ruleset()
361 free(por->por_src_tbl); in pfctl_optimize_ruleset()
363 if (por->por_dst_tbl) { in pfctl_optimize_ruleset()
364 pfr_buf_clear(por->por_dst_tbl->pt_buf); in pfctl_optimize_ruleset()
365 free(por->por_dst_tbl->pt_buf); in pfctl_optimize_ruleset()
366 free(por->por_dst_tbl); in pfctl_optimize_ruleset()
370 while ((block = TAILQ_FIRST(&superblocks))) { in pfctl_optimize_ruleset()
371 TAILQ_REMOVE(&superblocks, block, sb_entry); in pfctl_optimize_ruleset()
372 superblock_free(pf, block); in pfctl_optimize_ruleset()
382 optimize_superblock(struct pfctl *pf, struct superblock *block) in optimize_superblock() argument
393 * 3) re-order the rules to improve kernel skip steps in optimize_superblock()
394 * 4) re-order the 'quick' rules based on feedback from the in optimize_superblock()
412 * XXX we can also re-order some mutually exclusive superblocks to in optimize_superblock()
414 * for instance a single 'log in' rule in the middle of non-logging in optimize_superblock()
418 /* shortcut. there will be a lot of 1-rule superblocks */ in optimize_superblock()
419 if (!TAILQ_NEXT(TAILQ_FIRST(&block->sb_rules), por_entry)) in optimize_superblock()
423 printf("--- Superblock ---\n"); in optimize_superblock()
424 TAILQ_FOREACH(por, &block->sb_rules, por_entry) { in optimize_superblock()
426 print_rule(&por->por_rule, por->por_rule.anchor ? in optimize_superblock()
427 por->por_rule.anchor->name : "", 1, 0); in optimize_superblock()
432 if (remove_identical_rules(pf, block)) in optimize_superblock()
434 if (combine_rules(pf, block)) in optimize_superblock()
436 if ((pf->optimize & PF_OPTIMIZE_PROFILE) && in optimize_superblock()
437 TAILQ_FIRST(&block->sb_rules)->por_rule.quick && in optimize_superblock()
438 block->sb_profiled_block) { in optimize_superblock()
439 if (block_feedback(pf, block)) in optimize_superblock()
441 } else if (reorder_rules(pf, block, 0)) { in optimize_superblock()
453 printf("--- END Superblock ---\n"); in optimize_superblock()
463 remove_identical_rules(struct pfctl *pf, struct superblock *block) in remove_identical_rules() argument
468 for (por1 = TAILQ_FIRST(&block->sb_rules); por1; por1 = por_next) { in remove_identical_rules()
472 comparable_rule(&a, &por1->por_rule, DC); in remove_identical_rules()
473 comparable_rule(&b, &por2->por_rule, DC); in remove_identical_rules()
481 por1->por_rule.nr, por2->por_rule.nr); in remove_identical_rules()
482 TAILQ_REMOVE(&block->sb_rules, por2, por_entry); in remove_identical_rules()
488 por1->por_rule.nr, por2->por_rule.nr); in remove_identical_rules()
489 TAILQ_REMOVE(&block->sb_rules, por1, por_entry); in remove_identical_rules()
505 combine_rules(struct pfctl *pf, struct superblock *block) in combine_rules() argument
510 if ((pf->loadopt & PFCTL_FLAG_TABLE) == 0) { in combine_rules()
516 TAILQ_FOREACH(p1, &block->sb_rules, por_entry) { in combine_rules()
520 src_eq = addrs_equal(&p1->por_rule.src, in combine_rules()
521 &p2->por_rule.src); in combine_rules()
522 dst_eq = addrs_equal(&p1->por_rule.dst, in combine_rules()
523 &p2->por_rule.dst); in combine_rules()
525 if (src_eq && !dst_eq && p1->por_src_tbl == NULL && in combine_rules()
526 p2->por_dst_tbl == NULL && in combine_rules()
527 p2->por_src_tbl == NULL && in combine_rules()
528 rules_combineable(&p1->por_rule, &p2->por_rule) && in combine_rules()
529 addrs_combineable(&p1->por_rule.dst, in combine_rules()
530 &p2->por_rule.dst)) { in combine_rules()
532 p1->por_rule.nr, p2->por_rule.nr); in combine_rules()
533 if (p1->por_dst_tbl == NULL && in combine_rules()
534 add_opt_table(pf, &p1->por_dst_tbl, in combine_rules()
535 p1->por_rule.af, &p1->por_rule.dst)) in combine_rules()
537 if (add_opt_table(pf, &p1->por_dst_tbl, in combine_rules()
538 p1->por_rule.af, &p2->por_rule.dst)) in combine_rules()
540 p2->por_dst_tbl = p1->por_dst_tbl; in combine_rules()
541 if (p1->por_dst_tbl->pt_rulecount >= in combine_rules()
543 TAILQ_REMOVE(&block->sb_rules, p2, in combine_rules()
547 } else if (!src_eq && dst_eq && p1->por_dst_tbl == NULL in combine_rules()
548 && p2->por_src_tbl == NULL && in combine_rules()
549 p2->por_dst_tbl == NULL && in combine_rules()
550 rules_combineable(&p1->por_rule, &p2->por_rule) && in combine_rules()
551 addrs_combineable(&p1->por_rule.src, in combine_rules()
552 &p2->por_rule.src)) { in combine_rules()
554 p1->por_rule.nr, p2->por_rule.nr); in combine_rules()
555 if (p1->por_src_tbl == NULL && in combine_rules()
556 add_opt_table(pf, &p1->por_src_tbl, in combine_rules()
557 p1->por_rule.af, &p1->por_rule.src)) in combine_rules()
559 if (add_opt_table(pf, &p1->por_src_tbl, in combine_rules()
560 p1->por_rule.af, &p2->por_rule.src)) in combine_rules()
562 p2->por_src_tbl = p1->por_src_tbl; in combine_rules()
563 if (p1->por_src_tbl->pt_rulecount >= in combine_rules()
565 TAILQ_REMOVE(&block->sb_rules, p2, in combine_rules()
578 for (p1 = TAILQ_FIRST(&block->sb_rules); p1; p1 = por_next) { in combine_rules()
580 assert(p1->por_src_tbl == NULL || p1->por_dst_tbl == NULL); in combine_rules()
582 if (p1->por_src_tbl && p1->por_src_tbl->pt_rulecount >= in combine_rules()
584 if (p1->por_src_tbl->pt_generated) { in combine_rules()
586 TAILQ_REMOVE(&block->sb_rules, p1, por_entry); in combine_rules()
590 p1->por_src_tbl->pt_generated = 1; in combine_rules()
592 if ((pf->opts & PF_OPT_NOACTION) == 0 && in combine_rules()
593 pf_opt_create_table(pf, p1->por_src_tbl)) in combine_rules()
596 pf->tdirty = 1; in combine_rules()
598 if (pf->opts & PF_OPT_VERBOSE) in combine_rules()
599 print_tabledef(p1->por_src_tbl->pt_name, in combine_rules()
601 &p1->por_src_tbl->pt_nodes); in combine_rules()
603 memset(&p1->por_rule.src.addr, 0, in combine_rules()
604 sizeof(p1->por_rule.src.addr)); in combine_rules()
605 p1->por_rule.src.addr.type = PF_ADDR_TABLE; in combine_rules()
606 strlcpy(p1->por_rule.src.addr.v.tblname, in combine_rules()
607 p1->por_src_tbl->pt_name, in combine_rules()
608 sizeof(p1->por_rule.src.addr.v.tblname)); in combine_rules()
610 pfr_buf_clear(p1->por_src_tbl->pt_buf); in combine_rules()
611 free(p1->por_src_tbl->pt_buf); in combine_rules()
612 p1->por_src_tbl->pt_buf = NULL; in combine_rules()
614 if (p1->por_dst_tbl && p1->por_dst_tbl->pt_rulecount >= in combine_rules()
616 if (p1->por_dst_tbl->pt_generated) { in combine_rules()
618 TAILQ_REMOVE(&block->sb_rules, p1, por_entry); in combine_rules()
622 p1->por_dst_tbl->pt_generated = 1; in combine_rules()
624 if ((pf->opts & PF_OPT_NOACTION) == 0 && in combine_rules()
625 pf_opt_create_table(pf, p1->por_dst_tbl)) in combine_rules()
627 pf->tdirty = 1; in combine_rules()
629 if (pf->opts & PF_OPT_VERBOSE) in combine_rules()
630 print_tabledef(p1->por_dst_tbl->pt_name, in combine_rules()
632 &p1->por_dst_tbl->pt_nodes); in combine_rules()
634 memset(&p1->por_rule.dst.addr, 0, in combine_rules()
635 sizeof(p1->por_rule.dst.addr)); in combine_rules()
636 p1->por_rule.dst.addr.type = PF_ADDR_TABLE; in combine_rules()
637 strlcpy(p1->por_rule.dst.addr.v.tblname, in combine_rules()
638 p1->por_dst_tbl->pt_name, in combine_rules()
639 sizeof(p1->por_rule.dst.addr.v.tblname)); in combine_rules()
641 pfr_buf_clear(p1->por_dst_tbl->pt_buf); in combine_rules()
642 free(p1->por_dst_tbl->pt_buf); in combine_rules()
643 p1->por_dst_tbl->pt_buf = NULL; in combine_rules()
652 * Optimization pass #3: re-order rules to improve skip steps
655 reorder_rules(struct pfctl *pf, struct superblock *block, int depth) in reorder_rules() argument
664 * Calculate the best-case skip steps. We put each rule in a list in reorder_rules()
668 TAILQ_FOREACH(por, &block->sb_rules, por_entry) { in reorder_rules()
669 TAILQ_FOREACH(skiplist, &block->sb_skipsteps[i], in reorder_rules()
678 TAILQ_INIT(&skiplist->ps_rules); in reorder_rules()
679 TAILQ_INSERT_TAIL(&block->sb_skipsteps[i], in reorder_rules()
682 skip_append(block, i, skiplist, por); in reorder_rules()
686 TAILQ_FOREACH(por, &block->sb_rules, por_entry) in reorder_rules()
696 skiplist = TAILQ_FIRST(&block->sb_skipsteps[i]); in reorder_rules()
697 if (skiplist->ps_count == rule_count) { in reorder_rules()
700 skiplist->ps_count = 0; in reorder_rules()
701 } else if (skiplist->ps_count == 1) { in reorder_rules()
702 skiplist->ps_count = 0; in reorder_rules()
706 skiplist->ps_count); in reorder_rules()
707 if (skiplist->ps_count > largest) in reorder_rules()
708 largest = skiplist->ps_count; in reorder_rules()
719 * Now we're going to empty the superblock rule list and re-create in reorder_rules()
723 while ((por = TAILQ_FIRST(&block->sb_rules))) { in reorder_rules()
724 TAILQ_REMOVE(&block->sb_rules, por, por_entry); in reorder_rules()
736 skiplist = TAILQ_FIRST(&block->sb_skipsteps[i]); in reorder_rules()
737 if (skiplist->ps_count > largest) { in reorder_rules()
738 largest = skiplist->ps_count; in reorder_rules()
750 TAILQ_INSERT_TAIL(&block->sb_rules, por, in reorder_rules()
759 skiplist = TAILQ_FIRST(&block->sb_skipsteps[ in reorder_rules()
763 largest, TAILQ_FIRST(&TAILQ_FIRST(&block-> in reorder_rules()
764 sb_skipsteps [largest_list])->ps_rules)-> in reorder_rules()
766 TAILQ_REMOVE(&block->sb_skipsteps[largest_list], in reorder_rules()
775 if (skiplist->ps_count > 2) { in reorder_rules()
781 TAILQ_INIT(&newblock->sb_rules); in reorder_rules()
783 TAILQ_INIT(&newblock->sb_skipsteps[i]); in reorder_rules()
784 TAILQ_INSERT_BEFORE(block, newblock, sb_entry); in reorder_rules()
786 depth, skiplist->ps_count, in reorder_rules()
787 TAILQ_FIRST(&skiplist->ps_rules)-> in reorder_rules()
790 newblock = block; in reorder_rules()
793 while ((por = TAILQ_FIRST(&skiplist->ps_rules))) { in reorder_rules()
795 TAILQ_REMOVE(&skiplist->ps_rules, por, in reorder_rules()
797 TAILQ_INSERT_TAIL(&newblock->sb_rules, por, in reorder_rules()
801 remove_from_skipsteps(&block->sb_skipsteps[ in reorder_rules()
802 largest_list], block, por, skiplist); in reorder_rules()
805 if (newblock != block) in reorder_rules()
813 while ((skiplist = TAILQ_FIRST(&block->sb_skipsteps[i]))) { in reorder_rules()
814 TAILQ_REMOVE(&block->sb_skipsteps[i], skiplist, in reorder_rules()
825 * Optimization pass #4: re-order 'quick' rules based on feedback from the
829 block_feedback(struct pfctl *pf, struct superblock *block) in block_feedback() argument
840 TAILQ_FOREACH(por1, &block->sb_profiled_block->sb_rules, por_entry) { in block_feedback()
841 comparable_rule(&a, &por1->por_rule, DC); in block_feedback()
842 TAILQ_FOREACH(por2, &block->sb_rules, por_entry) { in block_feedback()
843 if (por2->por_profile_count) in block_feedback()
845 comparable_rule(&b, &por2->por_rule, DC); in block_feedback()
847 por2->por_profile_count = in block_feedback()
848 por1->por_rule.packets[0] + in block_feedback()
849 por1->por_rule.packets[1]; in block_feedback()
854 superblock_free(pf, block->sb_profiled_block); in block_feedback()
855 block->sb_profiled_block = NULL; in block_feedback()
858 * Now we pull all of the rules off the superblock and re-insert them in block_feedback()
863 while ((por1 = TAILQ_FIRST(&block->sb_rules)) != NULL) { in block_feedback()
864 TAILQ_REMOVE(&block->sb_rules, por1, por_entry); in block_feedback()
871 TAILQ_FOREACH(por2, &block->sb_rules, por_entry) { in block_feedback()
872 if (por1->por_profile_count > por2->por_profile_count) { in block_feedback()
880 if (por2 == TAILQ_END(&block->sb_rules)) in block_feedback()
882 TAILQ_INSERT_TAIL(&block->sb_rules, por1, por_entry); in block_feedback()
897 struct superblock *block, *blockcur; in load_feedback_profile() local
908 if (pfctl_get_rules_info_h(pf->h, &rules, PF_PASS, "")) { in load_feedback_profile()
922 if (pfctl_get_rule_h(pf->h, nr, rules.ticket, "", PF_PASS, in load_feedback_profile()
927 memcpy(&por->por_rule, &rule, sizeof(por->por_rule)); in load_feedback_profile()
929 por->por_rule.anchor = rs->anchor; in load_feedback_profile()
930 if (TAILQ_EMPTY(&por->por_rule.rdr.list)) in load_feedback_profile()
931 memset(&por->por_rule.rdr, 0, in load_feedback_profile()
932 sizeof(por->por_rule.rdr)); in load_feedback_profile()
933 if (TAILQ_EMPTY(&por->por_rule.nat.list)) in load_feedback_profile()
934 memset(&por->por_rule.nat, 0, in load_feedback_profile()
935 sizeof(por->por_rule.nat)); in load_feedback_profile()
938 /* XXX pfctl_get_pool(pf->dev, &rule.rdr, nr, pr.ticket, in load_feedback_profile()
939 * PF_PASS, pf->anchor) ??? in load_feedback_profile()
952 block = TAILQ_FIRST(superblocks); in load_feedback_profile()
954 while (block && blockcur) { in load_feedback_profile()
955 comparable_rule(&a, &TAILQ_FIRST(&block->sb_rules)->por_rule, in load_feedback_profile()
957 comparable_rule(&b, &TAILQ_FIRST(&blockcur->sb_rules)->por_rule, in load_feedback_profile()
961 block->sb_profiled_block = blockcur; in load_feedback_profile()
964 TAILQ_FIRST(&block->sb_rules)->por_rule.nr, in load_feedback_profile()
965 TAILQ_FIRST(&blockcur->sb_rules)->por_rule.nr); in load_feedback_profile()
968 block = TAILQ_NEXT(block, sb_entry); in load_feedback_profile()
976 block = TAILQ_NEXT(blockcur, sb_entry); in load_feedback_profile()
978 blockcur = block; in load_feedback_profile()
994 a = &por->por_rule; in skip_compare()
995 b = &TAILQ_FIRST(&skiplist->ps_rules)->por_rule; in skip_compare()
1010 skiplist->ps_count++; in skip_append()
1011 TAILQ_INSERT_TAIL(&skiplist->ps_rules, por, por_skip_entry[skipnum]); in skip_append()
1015 prev->ps_count < skiplist->ps_count) { in skip_append()
1016 TAILQ_REMOVE(&superblock->sb_skipsteps[skipnum], in skip_append()
1027 remove_from_skipsteps(struct skiplist *head, struct superblock *block, in remove_from_skipsteps() argument
1035 sk = TAILQ_FIRST(&block->sb_skipsteps[i]); in remove_from_skipsteps()
1036 if (sk == NULL || sk == active_list || sk->ps_count <= 1) in remove_from_skipsteps()
1040 TAILQ_FOREACH(p2, &sk->ps_rules, por_skip_entry[i]) in remove_from_skipsteps()
1042 TAILQ_REMOVE(&sk->ps_rules, p2, in remove_from_skipsteps()
1045 sk->ps_count--; in remove_from_skipsteps()
1052 next->ps_count > sk->ps_count) { in remove_from_skipsteps()
1058 assert(next == NULL || next->ps_count <= sk->ps_count); in remove_from_skipsteps()
1069 if (a->af != b->af || a->af == 0) in skip_cmp_af()
1078 if (a->direction == 0 || a->direction != b->direction) in skip_cmp_dir()
1087 if (a->dst.neg != b->dst.neg || in skip_cmp_dst_addr()
1088 a->dst.addr.type != b->dst.addr.type) in skip_cmp_dst_addr()
1090 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0 in skip_cmp_dst_addr()
1091 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP || in skip_cmp_dst_addr()
1092 * a->proto == IPPROTO_ICMP in skip_cmp_dst_addr()
1095 switch (a->dst.addr.type) { in skip_cmp_dst_addr()
1097 if (memcmp(&a->dst.addr.v.a.addr, &b->dst.addr.v.a.addr, in skip_cmp_dst_addr()
1098 sizeof(a->dst.addr.v.a.addr)) || in skip_cmp_dst_addr()
1099 memcmp(&a->dst.addr.v.a.mask, &b->dst.addr.v.a.mask, in skip_cmp_dst_addr()
1100 sizeof(a->dst.addr.v.a.mask)) || in skip_cmp_dst_addr()
1101 (a->dst.addr.v.a.addr.addr32[0] == 0 && in skip_cmp_dst_addr()
1102 a->dst.addr.v.a.addr.addr32[1] == 0 && in skip_cmp_dst_addr()
1103 a->dst.addr.v.a.addr.addr32[2] == 0 && in skip_cmp_dst_addr()
1104 a->dst.addr.v.a.addr.addr32[3] == 0)) in skip_cmp_dst_addr()
1108 if (strcmp(a->dst.addr.v.ifname, b->dst.addr.v.ifname) != 0 || in skip_cmp_dst_addr()
1109 a->dst.addr.iflags != b->dst.addr.iflags || in skip_cmp_dst_addr()
1110 memcmp(&a->dst.addr.v.a.mask, &b->dst.addr.v.a.mask, in skip_cmp_dst_addr()
1111 sizeof(a->dst.addr.v.a.mask))) in skip_cmp_dst_addr()
1118 return (strcmp(a->dst.addr.v.tblname, b->dst.addr.v.tblname)); in skip_cmp_dst_addr()
1127 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0 in skip_cmp_dst_port()
1128 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP || in skip_cmp_dst_port()
1129 * a->proto == IPPROTO_ICMP in skip_cmp_dst_port()
1132 if (a->dst.port_op == PF_OP_NONE || a->dst.port_op != b->dst.port_op || in skip_cmp_dst_port()
1133 a->dst.port[0] != b->dst.port[0] || in skip_cmp_dst_port()
1134 a->dst.port[1] != b->dst.port[1]) in skip_cmp_dst_port()
1143 if (strcmp(a->ifname, b->ifname) || a->ifname[0] == '\0') in skip_cmp_ifp()
1145 return (a->ifnot != b->ifnot); in skip_cmp_ifp()
1152 return (a->proto != b->proto || a->proto == 0); in skip_cmp_proto()
1159 if (a->src.neg != b->src.neg || in skip_cmp_src_addr()
1160 a->src.addr.type != b->src.addr.type) in skip_cmp_src_addr()
1162 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0 in skip_cmp_src_addr()
1163 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP || in skip_cmp_src_addr()
1164 * a->proto == IPPROTO_ICMP in skip_cmp_src_addr()
1167 switch (a->src.addr.type) { in skip_cmp_src_addr()
1169 if (memcmp(&a->src.addr.v.a.addr, &b->src.addr.v.a.addr, in skip_cmp_src_addr()
1170 sizeof(a->src.addr.v.a.addr)) || in skip_cmp_src_addr()
1171 memcmp(&a->src.addr.v.a.mask, &b->src.addr.v.a.mask, in skip_cmp_src_addr()
1172 sizeof(a->src.addr.v.a.mask)) || in skip_cmp_src_addr()
1173 (a->src.addr.v.a.addr.addr32[0] == 0 && in skip_cmp_src_addr()
1174 a->src.addr.v.a.addr.addr32[1] == 0 && in skip_cmp_src_addr()
1175 a->src.addr.v.a.addr.addr32[2] == 0 && in skip_cmp_src_addr()
1176 a->src.addr.v.a.addr.addr32[3] == 0)) in skip_cmp_src_addr()
1180 if (strcmp(a->src.addr.v.ifname, b->src.addr.v.ifname) != 0 || in skip_cmp_src_addr()
1181 a->src.addr.iflags != b->src.addr.iflags || in skip_cmp_src_addr()
1182 memcmp(&a->src.addr.v.a.mask, &b->src.addr.v.a.mask, in skip_cmp_src_addr()
1183 sizeof(a->src.addr.v.a.mask))) in skip_cmp_src_addr()
1190 return (strcmp(a->src.addr.v.tblname, b->src.addr.v.tblname)); in skip_cmp_src_addr()
1199 if (a->src.port_op == PF_OP_NONE || a->src.port_op != b->src.port_op || in skip_cmp_src_port()
1200 a->src.port[0] != b->src.port[0] || in skip_cmp_src_port()
1201 a->src.port[1] != b->src.port[1]) in skip_cmp_src_port()
1203 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0 in skip_cmp_src_port()
1204 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP || in skip_cmp_src_port()
1205 * a->proto == IPPROTO_ICMP in skip_cmp_src_port()
1249 ((*tbl)->pt_buf = calloc(1, sizeof(*(*tbl)->pt_buf))) == in add_opt_table()
1252 (*tbl)->pt_buf->pfrb_type = PFRB_ADDRS; in add_opt_table()
1253 SIMPLEQ_INIT(&(*tbl)->pt_nodes); in add_opt_table()
1256 snprintf((*tbl)->pt_name, sizeof((*tbl)->pt_name), "%s%d", in add_opt_table()
1258 DEBUG("creating table <%s>", (*tbl)->pt_name); in add_opt_table()
1263 node_host.addr = addr->addr; in add_opt_table()
1266 DEBUG("<%s> adding %s/%d", (*tbl)->pt_name, inet_ntop(af, in add_opt_table()
1271 if (append_addr_host((*tbl)->pt_buf, &node_host, 0, 0)) { in add_opt_table()
1275 if (pf->opts & PF_OPT_VERBOSE) { in add_opt_table()
1280 if ((ti->host = malloc(sizeof(*ti->host))) == NULL) in add_opt_table()
1282 memcpy(ti->host, &node_host, sizeof(*ti->host)); in add_opt_table()
1283 SIMPLEQ_INSERT_TAIL(&(*tbl)->pt_nodes, ti, entries); in add_opt_table()
1286 (*tbl)->pt_rulecount++; in add_opt_table()
1287 if ((*tbl)->pt_rulecount == TABLE_THRESHOLD) in add_opt_table()
1288 DEBUG("table <%s> now faster than skip steps", (*tbl)->pt_name); in add_opt_table()
1323 DEBUG("translating temporary table <%s> to <%s%x_%d>", tbl->pt_name, in pf_opt_create_table()
1325 snprintf(tbl->pt_name, sizeof(tbl->pt_name), "%s%x_%d", in pf_opt_create_table()
1328 if (strcasecmp(t->pfrt_name, tbl->pt_name) == 0) { in pf_opt_create_table()
1331 tbl->pt_name); in pf_opt_create_table()
1339 if (pfctl_define_table(tbl->pt_name, PFR_TFLAG_CONST, 1, in pf_opt_create_table()
1340 pf->astack[0]->name, tbl->pt_buf, pf->astack[0]->ruleset.tticket)) { in pf_opt_create_table()
1342 tbl->pt_name, pf->astack[0]->name); in pf_opt_create_table()
1355 struct superblock *block = NULL; in construct_superblocks() local
1362 if (block == NULL || !superblock_inclusive(block, por)) { in construct_superblocks()
1363 if ((block = calloc(1, sizeof(*block))) == NULL) { in construct_superblocks()
1367 TAILQ_INIT(&block->sb_rules); in construct_superblocks()
1369 TAILQ_INIT(&block->sb_skipsteps[i]); in construct_superblocks()
1370 TAILQ_INSERT_TAIL(superblocks, block, sb_entry); in construct_superblocks()
1372 TAILQ_INSERT_TAIL(&block->sb_rules, por, por_entry); in construct_superblocks()
1385 if (a->neg != b->neg) in addrs_equal()
1387 return (memcmp(&a->addr, &b->addr, sizeof(a->addr)) == 0); in addrs_equal()
1397 if (a->addr.type != PF_ADDR_ADDRMASK || in addrs_combineable()
1398 b->addr.type != PF_ADDR_ADDRMASK) in addrs_combineable()
1400 if (a->neg != b->neg || a->port_op != b->port_op || in addrs_combineable()
1401 a->port[0] != b->port[0] || a->port[1] != b->port[1]) in addrs_combineable()
1425 superblock_inclusive(struct superblock *block, struct pf_opt_rule *por) in superblock_inclusive() argument
1434 if (((char *)&por->por_rule)[j + in superblock_inclusive()
1440 /* per-rule src-track is also a hard break */ in superblock_inclusive()
1441 if (por->por_rule.rule_flag & PFRULE_RULESRCTRACK) in superblock_inclusive()
1447 * block on EXTIFS to any port 22 in superblock_inclusive()
1450 * The optimizer may decide to re-order the pass rule in front of the in superblock_inclusive()
1451 * block rule. But what if EXTIFS includes em0??? Such a reordering in superblock_inclusive()
1456 * Ergo interface groups become a defacto superblock break :-( in superblock_inclusive()
1458 if (interface_group(por->por_rule.ifname) || in superblock_inclusive()
1459 interface_group(TAILQ_FIRST(&block->sb_rules)->por_rule.ifname)) { in superblock_inclusive()
1460 if (strcasecmp(por->por_rule.ifname, in superblock_inclusive()
1461 TAILQ_FIRST(&block->sb_rules)->por_rule.ifname) != 0) in superblock_inclusive()
1465 comparable_rule(&a, &TAILQ_FIRST(&block->sb_rules)->por_rule, NOMERGE); in superblock_inclusive()
1466 comparable_rule(&b, &por->por_rule, NOMERGE); in superblock_inclusive()
1471 for (i = 0; i < sizeof(por->por_rule); i++) { in superblock_inclusive()
1472 int closest = -1; in superblock_inclusive()
1480 por->por_rule.nr, in superblock_inclusive()
1485 if (closest == -1 || in superblock_inclusive()
1486 i-pf_rule_desc[j].prf_offset < in superblock_inclusive()
1487 i-pf_rule_desc[closest].prf_offset) in superblock_inclusive()
1494 por->por_rule.nr, in superblock_inclusive()
1496 i - pf_rule_desc[closest].prf_offset - in superblock_inclusive()
1500 por->por_rule.nr, i); in superblock_inclusive()
1527 if (ioctl(s, SIOCGIFGMEMB, (caddr_t)&ifgr) == -1) { in interface_group()
1567 exclude_supersets(struct pfctl_rule *super, struct pfctl_rule *sub) in exclude_supersets() argument
1569 if (super->ifname[0] == '\0') in exclude_supersets()
1570 memset(sub->ifname, 0, sizeof(sub->ifname)); in exclude_supersets()
1571 if (super->direction == PF_INOUT) in exclude_supersets()
1572 sub->direction = PF_INOUT; in exclude_supersets()
1573 if ((super->proto == 0 || super->proto == sub->proto) && in exclude_supersets()
1574 super->flags == 0 && super->flagset == 0 && (sub->flags || in exclude_supersets()
1575 sub->flagset)) { in exclude_supersets()
1576 sub->flags = super->flags; in exclude_supersets()
1577 sub->flagset = super->flagset; in exclude_supersets()
1579 if (super->proto == 0) in exclude_supersets()
1580 sub->proto = 0; in exclude_supersets()
1582 if (super->src.port_op == 0) { in exclude_supersets()
1583 sub->src.port_op = 0; in exclude_supersets()
1584 sub->src.port[0] = 0; in exclude_supersets()
1585 sub->src.port[1] = 0; in exclude_supersets()
1587 if (super->dst.port_op == 0) { in exclude_supersets()
1588 sub->dst.port_op = 0; in exclude_supersets()
1589 sub->dst.port[0] = 0; in exclude_supersets()
1590 sub->dst.port[1] = 0; in exclude_supersets()
1593 if (super->src.addr.type == PF_ADDR_ADDRMASK && !super->src.neg && in exclude_supersets()
1594 !sub->src.neg && super->src.addr.v.a.mask.addr32[0] == 0 && in exclude_supersets()
1595 super->src.addr.v.a.mask.addr32[1] == 0 && in exclude_supersets()
1596 super->src.addr.v.a.mask.addr32[2] == 0 && in exclude_supersets()
1597 super->src.addr.v.a.mask.addr32[3] == 0) in exclude_supersets()
1598 memset(&sub->src.addr, 0, sizeof(sub->src.addr)); in exclude_supersets()
1599 else if (super->src.addr.type == PF_ADDR_ADDRMASK && in exclude_supersets()
1600 sub->src.addr.type == PF_ADDR_ADDRMASK && in exclude_supersets()
1601 super->src.neg == sub->src.neg && in exclude_supersets()
1602 super->af == sub->af && in exclude_supersets()
1603 unmask(&super->src.addr.v.a.mask, super->af) < in exclude_supersets()
1604 unmask(&sub->src.addr.v.a.mask, sub->af) && in exclude_supersets()
1605 super->src.addr.v.a.addr.addr32[0] == in exclude_supersets()
1606 (sub->src.addr.v.a.addr.addr32[0] & in exclude_supersets()
1607 super->src.addr.v.a.mask.addr32[0]) && in exclude_supersets()
1608 super->src.addr.v.a.addr.addr32[1] == in exclude_supersets()
1609 (sub->src.addr.v.a.addr.addr32[1] & in exclude_supersets()
1610 super->src.addr.v.a.mask.addr32[1]) && in exclude_supersets()
1611 super->src.addr.v.a.addr.addr32[2] == in exclude_supersets()
1612 (sub->src.addr.v.a.addr.addr32[2] & in exclude_supersets()
1613 super->src.addr.v.a.mask.addr32[2]) && in exclude_supersets()
1614 super->src.addr.v.a.addr.addr32[3] == in exclude_supersets()
1615 (sub->src.addr.v.a.addr.addr32[3] & in exclude_supersets()
1616 super->src.addr.v.a.mask.addr32[3])) { in exclude_supersets()
1617 /* sub->src.addr is a subset of super->src.addr/mask */ in exclude_supersets()
1618 memcpy(&sub->src.addr, &super->src.addr, sizeof(sub->src.addr)); in exclude_supersets()
1621 if (super->dst.addr.type == PF_ADDR_ADDRMASK && !super->dst.neg && in exclude_supersets()
1622 !sub->dst.neg && super->dst.addr.v.a.mask.addr32[0] == 0 && in exclude_supersets()
1623 super->dst.addr.v.a.mask.addr32[1] == 0 && in exclude_supersets()
1624 super->dst.addr.v.a.mask.addr32[2] == 0 && in exclude_supersets()
1625 super->dst.addr.v.a.mask.addr32[3] == 0) in exclude_supersets()
1626 memset(&sub->dst.addr, 0, sizeof(sub->dst.addr)); in exclude_supersets()
1627 else if (super->dst.addr.type == PF_ADDR_ADDRMASK && in exclude_supersets()
1628 sub->dst.addr.type == PF_ADDR_ADDRMASK && in exclude_supersets()
1629 super->dst.neg == sub->dst.neg && in exclude_supersets()
1630 super->af == sub->af && in exclude_supersets()
1631 unmask(&super->dst.addr.v.a.mask, super->af) < in exclude_supersets()
1632 unmask(&sub->dst.addr.v.a.mask, sub->af) && in exclude_supersets()
1633 super->dst.addr.v.a.addr.addr32[0] == in exclude_supersets()
1634 (sub->dst.addr.v.a.addr.addr32[0] & in exclude_supersets()
1635 super->dst.addr.v.a.mask.addr32[0]) && in exclude_supersets()
1636 super->dst.addr.v.a.addr.addr32[1] == in exclude_supersets()
1637 (sub->dst.addr.v.a.addr.addr32[1] & in exclude_supersets()
1638 super->dst.addr.v.a.mask.addr32[1]) && in exclude_supersets()
1639 super->dst.addr.v.a.addr.addr32[2] == in exclude_supersets()
1640 (sub->dst.addr.v.a.addr.addr32[2] & in exclude_supersets()
1641 super->dst.addr.v.a.mask.addr32[2]) && in exclude_supersets()
1642 super->dst.addr.v.a.addr.addr32[3] == in exclude_supersets()
1643 (sub->dst.addr.v.a.addr.addr32[3] & in exclude_supersets()
1644 super->dst.addr.v.a.mask.addr32[3])) { in exclude_supersets()
1645 /* sub->dst.addr is a subset of super->dst.addr/mask */ in exclude_supersets()
1646 memcpy(&sub->dst.addr, &super->dst.addr, sizeof(sub->dst.addr)); in exclude_supersets()
1649 if (super->af == 0) in exclude_supersets()
1650 sub->af = 0; in exclude_supersets()
1655 superblock_free(struct pfctl *pf, struct superblock *block) in superblock_free() argument
1658 while ((por = TAILQ_FIRST(&block->sb_rules))) { in superblock_free()
1659 TAILQ_REMOVE(&block->sb_rules, por, por_entry); in superblock_free()
1660 if (por->por_src_tbl) { in superblock_free()
1661 if (por->por_src_tbl->pt_buf) { in superblock_free()
1662 pfr_buf_clear(por->por_src_tbl->pt_buf); in superblock_free()
1663 free(por->por_src_tbl->pt_buf); in superblock_free()
1665 free(por->por_src_tbl); in superblock_free()
1667 if (por->por_dst_tbl) { in superblock_free()
1668 if (por->por_dst_tbl->pt_buf) { in superblock_free()
1669 pfr_buf_clear(por->por_dst_tbl->pt_buf); in superblock_free()
1670 free(por->por_dst_tbl->pt_buf); in superblock_free()
1672 free(por->por_dst_tbl); in superblock_free()
1676 if (block->sb_profiled_block) in superblock_free()
1677 superblock_free(pf, block->sb_profiled_block); in superblock_free()
1678 free(block); in superblock_free()