Lines Matching +full:inactive +full:-

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
13 * - Redistributions of source code must retain the above copyright
15 * - Redistributions in binary form must reproduce the above
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
226 * XXX - These are new and need to be checked when moveing to a new version
309 * Copy a user-provided string, returning an error if truncation would occur.
311 * guarantee that it's nul-terminated.
346 V_pf_default_rule.nr = -1; in pfattach_vnet()
347 V_pf_default_rule.rtableid = -1; in pfattach_vnet()
449 ruleset->rules[rs_num].active.ticket) in pf_get_kpool()
452 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, in pf_get_kpool()
455 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); in pf_get_kpool()
458 ruleset->rules[rs_num].inactive.ticket) in pf_get_kpool()
461 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, in pf_get_kpool()
464 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); in pf_get_kpool()
467 while ((rule != NULL) && (rule->nr != rule_number)) in pf_get_kpool()
474 return (&rule->nat); in pf_get_kpool()
476 return (&rule->rdr); in pf_get_kpool()
496 switch (pa->addr.type) { in pf_empty_kpool()
498 pfi_dynaddr_remove(pa->addr.p.dyn); in pf_empty_kpool()
502 if (pa->addr.p.tbl != NULL) in pf_empty_kpool()
503 pfr_detach_table(pa->addr.p.tbl); in pf_empty_kpool()
506 if (pa->kif) in pf_empty_kpool()
507 pfi_kkif_unref(pa->kif); in pf_empty_kpool()
522 rule->rule_ref |= PFRULE_REFS; in pf_unlink_rule_locked()
545 if (rule->tag) in pf_free_eth_rule()
546 tag_unref(&V_pf_tags, rule->tag); in pf_free_eth_rule()
547 if (rule->match_tag) in pf_free_eth_rule()
548 tag_unref(&V_pf_tags, rule->match_tag); in pf_free_eth_rule()
550 pf_qid_unref(rule->qid); in pf_free_eth_rule()
553 if (rule->bridge_to) in pf_free_eth_rule()
554 pfi_kkif_unref(rule->bridge_to); in pf_free_eth_rule()
555 if (rule->kif) in pf_free_eth_rule()
556 pfi_kkif_unref(rule->kif); in pf_free_eth_rule()
558 if (rule->ipsrc.addr.type == PF_ADDR_TABLE) in pf_free_eth_rule()
559 pfr_detach_table(rule->ipsrc.addr.p.tbl); in pf_free_eth_rule()
560 if (rule->ipdst.addr.type == PF_ADDR_TABLE) in pf_free_eth_rule()
561 pfr_detach_table(rule->ipdst.addr.p.tbl); in pf_free_eth_rule()
563 counter_u64_free(rule->evaluations); in pf_free_eth_rule()
565 counter_u64_free(rule->packets[i]); in pf_free_eth_rule()
566 counter_u64_free(rule->bytes[i]); in pf_free_eth_rule()
568 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); in pf_free_eth_rule()
581 if (rule->tag) in pf_free_rule()
582 tag_unref(&V_pf_tags, rule->tag); in pf_free_rule()
583 if (rule->match_tag) in pf_free_rule()
584 tag_unref(&V_pf_tags, rule->match_tag); in pf_free_rule()
586 if (rule->pqid != rule->qid) in pf_free_rule()
587 pf_qid_unref(rule->pqid); in pf_free_rule()
588 pf_qid_unref(rule->qid); in pf_free_rule()
590 switch (rule->src.addr.type) { in pf_free_rule()
592 pfi_dynaddr_remove(rule->src.addr.p.dyn); in pf_free_rule()
595 pfr_detach_table(rule->src.addr.p.tbl); in pf_free_rule()
598 switch (rule->dst.addr.type) { in pf_free_rule()
600 pfi_dynaddr_remove(rule->dst.addr.p.dyn); in pf_free_rule()
603 pfr_detach_table(rule->dst.addr.p.tbl); in pf_free_rule()
606 if (rule->overload_tbl) in pf_free_rule()
607 pfr_detach_table(rule->overload_tbl); in pf_free_rule()
608 if (rule->kif) in pf_free_rule()
609 pfi_kkif_unref(rule->kif); in pf_free_rule()
610 if (rule->rcv_kif) in pf_free_rule()
611 pfi_kkif_unref(rule->rcv_kif); in pf_free_rule()
613 pf_empty_kpool(&rule->rdr.list); in pf_free_rule()
614 pf_empty_kpool(&rule->nat.list); in pf_free_rule()
630 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, in pf_init_tagset()
632 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, in pf_init_tagset()
634 ts->mask = hashsize - 1; in pf_init_tagset()
635 ts->seed = arc4random(); in pf_init_tagset()
637 TAILQ_INIT(&ts->namehash[i]); in pf_init_tagset()
638 TAILQ_INIT(&ts->taghash[i]); in pf_init_tagset()
640 BIT_FILL(TAGID_MAX, &ts->avail); in pf_init_tagset()
654 hashsize = ts->mask + 1; in pf_cleanup_tagset()
656 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) in pf_cleanup_tagset()
659 free(ts->namehash, M_PFHASH); in pf_cleanup_tagset()
660 free(ts->taghash, M_PFHASH); in pf_cleanup_tagset()
668 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); in tagname2hashindex()
669 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); in tagname2hashindex()
676 return (tag & ts->mask); in tag2hashindex()
689 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) in tagname2tag()
690 if (strcmp(tagname, tag->name) == 0) { in tagname2tag()
691 tag->ref++; in tagname2tag()
692 return (tag->tag); in tagname2tag()
701 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); in tagname2tag()
703 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. in tagname2tag()
704 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits in tagname2tag()
712 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ in tagname2tag()
713 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); in tagname2tag()
719 strlcpy(tag->name, tagname, sizeof(tag->name)); in tagname2tag()
720 tag->tag = new_tagid; in tagname2tag()
721 tag->ref = 1; in tagname2tag()
724 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); in tagname2tag()
728 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); in tagname2tag()
730 return (tag->tag); in tagname2tag()
742 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) in tag_unref()
743 if (tag == t->tag) { in tag_unref()
744 if (--t->ref == 0) { in tag_unref()
745 TAILQ_REMOVE(&ts->taghash[index], t, in tag_unref()
747 index = tagname2hashindex(ts, t->name); in tag_unref()
748 TAILQ_REMOVE(&ts->namehash[index], t, in tag_unref()
750 /* Bits are 0-based for BIT_SET() */ in tag_unref()
751 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); in tag_unref()
776 /* Purge old inactive rules. */ in pf_begin_eth()
777 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, in pf_begin_eth()
779 TAILQ_REMOVE(rs->inactive.rules, rule, in pf_begin_eth()
784 *ticket = ++rs->inactive.ticket; in pf_begin_eth()
785 rs->inactive.open = 1; in pf_begin_eth()
797 CURVNET_SET(rs->vnet); in pf_rollback_eth_cb()
800 pf_rollback_eth(rs->inactive.ticket, in pf_rollback_eth_cb()
801 rs->anchor ? rs->anchor->path : ""); in pf_rollback_eth_cb()
819 if (!rs->inactive.open || in pf_rollback_eth()
820 ticket != rs->inactive.ticket) in pf_rollback_eth()
823 /* Purge old inactive rules. */ in pf_rollback_eth()
824 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, in pf_rollback_eth()
826 TAILQ_REMOVE(rs->inactive.rules, rule, entries); in pf_rollback_eth()
830 rs->inactive.open = 0; in pf_rollback_eth()
840 head[i]->skip[i].ptr = cur; \
856 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) in pf_eth_calc_skip_steps()
858 if (cur->direction != prev->direction) in pf_eth_calc_skip_steps()
860 if (cur->proto != prev->proto) in pf_eth_calc_skip_steps()
862 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) in pf_eth_calc_skip_steps()
864 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) in pf_eth_calc_skip_steps()
866 if (cur->ipsrc.neg != prev->ipsrc.neg || in pf_eth_calc_skip_steps()
867 pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr)) in pf_eth_calc_skip_steps()
869 if (cur->ipdst.neg != prev->ipdst.neg || in pf_eth_calc_skip_steps()
870 pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr)) in pf_eth_calc_skip_steps()
891 if (!rs->inactive.open || in pf_commit_eth()
892 ticket != rs->inactive.ticket) in pf_commit_eth()
897 pf_eth_calc_skip_steps(rs->inactive.rules); in pf_commit_eth()
899 rules = rs->active.rules; in pf_commit_eth()
900 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); in pf_commit_eth()
901 rs->inactive.rules = rules; in pf_commit_eth()
902 rs->inactive.ticket = rs->active.ticket; in pf_commit_eth()
904 /* Clean up inactive rules (i.e. previously active rules), only when in pf_commit_eth()
906 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); in pf_commit_eth()
934 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { in pf_begin_altq()
942 pf_qid_unref(altq->qid); in pf_begin_altq()
965 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { in pf_rollback_altq()
973 pf_qid_unref(altq->qid); in pf_rollback_altq()
1004 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { in pf_commit_altq()
1016 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { in pf_commit_altq()
1031 pf_qid_unref(altq->qid); in pf_commit_altq()
1047 if ((ifp = ifunit(altq->ifname)) == NULL) in pf_enable_altq()
1050 if (ifp->if_snd.altq_type != ALTQT_NONE) in pf_enable_altq()
1051 error = altq_enable(&ifp->if_snd); in pf_enable_altq()
1054 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { in pf_enable_altq()
1055 tb.rate = altq->ifbandwidth; in pf_enable_altq()
1056 tb.depth = altq->tbrsize; in pf_enable_altq()
1057 error = tbr_set(&ifp->if_snd, &tb); in pf_enable_altq()
1070 if ((ifp = ifunit(altq->ifname)) == NULL) in pf_disable_altq()
1077 if (altq->altq_disc != ifp->if_snd.altq_disc) in pf_disable_altq()
1080 error = altq_disable(&ifp->if_snd); in pf_disable_altq()
1085 error = tbr_set(&ifp->if_snd, &tb); in pf_disable_altq()
1099 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; in pf_altq_ifnet_event_add()
1100 if ((ifp1 = ifunit(altq->ifname)) == NULL || in pf_altq_ifnet_event_add()
1102 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; in pf_altq_ifnet_event_add()
1124 * No need to re-evaluate the configuration for events on interfaces in pf_altq_ifnet_event()
1128 if (!ALTQ_IS_READY(&ifp->if_snd)) in pf_altq_ifnet_event()
1164 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { in pf_altq_ifnet_event()
1169 a2->altq_disc = NULL; in pf_altq_ifnet_event()
1171 if (strncmp(a3->ifname, a2->ifname, in pf_altq_ifnet_event()
1173 a2->altq_disc = a3->altq_disc; in pf_altq_ifnet_event()
1230 pf_rule_tree_free(rs->rules[rs_num].inactive.tree); in pf_begin_rules()
1231 rs->rules[rs_num].inactive.tree = tree; in pf_begin_rules()
1233 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { in pf_begin_rules()
1234 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); in pf_begin_rules()
1235 rs->rules[rs_num].inactive.rcount--; in pf_begin_rules()
1237 *ticket = ++rs->rules[rs_num].inactive.ticket; in pf_begin_rules()
1238 rs->rules[rs_num].inactive.open = 1; in pf_begin_rules()
1253 if (rs == NULL || !rs->rules[rs_num].inactive.open || in pf_rollback_rules()
1254 rs->rules[rs_num].inactive.ticket != ticket) in pf_rollback_rules()
1256 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { in pf_rollback_rules()
1257 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); in pf_rollback_rules()
1258 rs->rules[rs_num].inactive.rcount--; in pf_rollback_rules()
1260 rs->rules[rs_num].inactive.open = 0; in pf_rollback_rules()
1265 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1268 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1271 (stor) = htonl((st)->elm); \
1276 (stor) = htons((st)->elm); \
1284 switch (pfr->addr.type) { in pf_hash_rule_addr()
1311 pf_hash_rule_addr(ctx, &rule->src); in pf_hash_rule_rolling()
1312 pf_hash_rule_addr(ctx, &rule->dst); in pf_hash_rule_rolling()
1347 if (rule->anchor != NULL) in pf_hash_rule_rolling()
1348 PF_MD5_UPD_STR(rule, anchor->path); in pf_hash_rule_rolling()
1358 MD5Final(rule->md5sum, &ctx); in pf_hash_rule()
1365 return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH)); in pf_krule_compare()
1383 if (rs == NULL || !rs->rules[rs_num].inactive.open || in pf_commit_rules()
1384 ticket != rs->rules[rs_num].inactive.ticket) in pf_commit_rules()
1395 old_rules = rs->rules[rs_num].active.ptr; in pf_commit_rules()
1396 old_rcount = rs->rules[rs_num].active.rcount; in pf_commit_rules()
1397 old_array = rs->rules[rs_num].active.ptr_array; in pf_commit_rules()
1398 old_tree = rs->rules[rs_num].active.tree; in pf_commit_rules()
1400 rs->rules[rs_num].active.ptr = in pf_commit_rules()
1401 rs->rules[rs_num].inactive.ptr; in pf_commit_rules()
1402 rs->rules[rs_num].active.ptr_array = in pf_commit_rules()
1403 rs->rules[rs_num].inactive.ptr_array; in pf_commit_rules()
1404 rs->rules[rs_num].active.tree = in pf_commit_rules()
1405 rs->rules[rs_num].inactive.tree; in pf_commit_rules()
1406 rs->rules[rs_num].active.rcount = in pf_commit_rules()
1407 rs->rules[rs_num].inactive.rcount; in pf_commit_rules()
1411 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, in pf_commit_rules()
1418 pf_counter_u64_rollup_protected(&rule->evaluations, in pf_commit_rules()
1419 pf_counter_u64_fetch(&old_rule->evaluations)); in pf_commit_rules()
1420 pf_counter_u64_rollup_protected(&rule->packets[0], in pf_commit_rules()
1421 pf_counter_u64_fetch(&old_rule->packets[0])); in pf_commit_rules()
1422 pf_counter_u64_rollup_protected(&rule->packets[1], in pf_commit_rules()
1423 pf_counter_u64_fetch(&old_rule->packets[1])); in pf_commit_rules()
1424 pf_counter_u64_rollup_protected(&rule->bytes[0], in pf_commit_rules()
1425 pf_counter_u64_fetch(&old_rule->bytes[0])); in pf_commit_rules()
1426 pf_counter_u64_rollup_protected(&rule->bytes[1], in pf_commit_rules()
1427 pf_counter_u64_fetch(&old_rule->bytes[1])); in pf_commit_rules()
1432 rs->rules[rs_num].inactive.ptr = old_rules; in pf_commit_rules()
1433 rs->rules[rs_num].inactive.ptr_array = old_array; in pf_commit_rules()
1434 rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */ in pf_commit_rules()
1435 rs->rules[rs_num].inactive.rcount = old_rcount; in pf_commit_rules()
1437 rs->rules[rs_num].active.ticket = in pf_commit_rules()
1438 rs->rules[rs_num].inactive.ticket; in pf_commit_rules()
1439 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); in pf_commit_rules()
1446 if (rs->rules[rs_num].inactive.ptr_array) in pf_commit_rules()
1447 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); in pf_commit_rules()
1448 rs->rules[rs_num].inactive.ptr_array = NULL; in pf_commit_rules()
1449 rs->rules[rs_num].inactive.rcount = 0; in pf_commit_rules()
1450 rs->rules[rs_num].inactive.open = 0; in pf_commit_rules()
1471 if (rs->rules[rs_cnt].inactive.ptr_array) in pf_setup_pfsync_matching()
1472 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); in pf_setup_pfsync_matching()
1473 rs->rules[rs_cnt].inactive.ptr_array = NULL; in pf_setup_pfsync_matching()
1475 if (rs->rules[rs_cnt].inactive.rcount) { in pf_setup_pfsync_matching()
1476 rs->rules[rs_cnt].inactive.ptr_array = in pf_setup_pfsync_matching()
1477 mallocarray(rs->rules[rs_cnt].inactive.rcount, in pf_setup_pfsync_matching()
1481 if (!rs->rules[rs_cnt].inactive.ptr_array) in pf_setup_pfsync_matching()
1485 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, in pf_setup_pfsync_matching()
1488 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; in pf_setup_pfsync_matching()
1502 switch (addr->type) { in pf_eth_addr_setup()
1504 addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname); in pf_eth_addr_setup()
1505 if (addr->p.tbl == NULL) in pf_eth_addr_setup()
1521 switch (addr->type) { in pf_addr_setup()
1523 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); in pf_addr_setup()
1524 if (addr->p.tbl == NULL) in pf_addr_setup()
1539 switch (addr->type) { in pf_addr_copyout()
1556 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); in pf_src_node_copy()
1557 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); in pf_src_node_copy()
1559 if (in->rule != NULL) in pf_src_node_copy()
1560 out->rule.nr = in->rule->nr; in pf_src_node_copy()
1563 out->bytes[i] = counter_u64_fetch(in->bytes[i]); in pf_src_node_copy()
1564 out->packets[i] = counter_u64_fetch(in->packets[i]); in pf_src_node_copy()
1567 out->states = in->states; in pf_src_node_copy()
1568 out->conn = in->conn; in pf_src_node_copy()
1569 out->af = in->af; in pf_src_node_copy()
1570 out->ruletype = in->ruletype; in pf_src_node_copy()
1572 out->creation = secs - in->creation; in pf_src_node_copy()
1573 if (out->expire > secs) in pf_src_node_copy()
1574 out->expire -= secs; in pf_src_node_copy()
1576 out->expire = 0; in pf_src_node_copy()
1579 out->conn_rate = in->conn_rate; in pf_src_node_copy()
1580 diff = secs - in->conn_rate.last; in pf_src_node_copy()
1581 if (diff >= in->conn_rate.seconds) in pf_src_node_copy()
1582 out->conn_rate.count = 0; in pf_src_node_copy()
1584 out->conn_rate.count -= in pf_src_node_copy()
1585 in->conn_rate.count * diff / in pf_src_node_copy()
1586 in->conn_rate.seconds; in pf_src_node_copy()
1602 version = pa->version; in pf_export_kaltq()
1607 #define ASSIGN(x) exported_q->x = q->x in pf_export_kaltq()
1609 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) in pf_export_kaltq()
1616 &((struct pfioc_altq_v0 *)pa)->altq; in pf_export_kaltq()
1622 exported_q->tbrsize = SATU16(q->tbrsize); in pf_export_kaltq()
1623 exported_q->ifbandwidth = SATU32(q->ifbandwidth); in pf_export_kaltq()
1628 exported_q->bandwidth = SATU32(q->bandwidth); in pf_export_kaltq()
1635 if (q->scheduler == ALTQT_HFSC) { in pf_export_kaltq()
1636 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x in pf_export_kaltq()
1637 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ in pf_export_kaltq()
1638 SATU32(q->pq_u.hfsc_opts.x) in pf_export_kaltq()
1664 &((struct pfioc_altq_v1 *)pa)->altq; in pf_export_kaltq()
1711 version = pa->version; in pf_import_kaltq()
1716 #define ASSIGN(x) q->x = imported_q->x in pf_import_kaltq()
1718 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) in pf_import_kaltq()
1723 &((struct pfioc_altq_v0 *)pa)->altq; in pf_import_kaltq()
1728 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ in pf_import_kaltq()
1729 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ in pf_import_kaltq()
1734 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ in pf_import_kaltq()
1741 if (imported_q->scheduler == ALTQT_HFSC) { in pf_import_kaltq()
1742 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x in pf_import_kaltq()
1746 * 32-bit to 64-bit. in pf_import_kaltq()
1771 &((struct pfioc_altq_v1 *)pa)->altq; in pf_import_kaltq()
1833 mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF); in pf_krule_alloc()
1834 mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF); in pf_krule_alloc()
1835 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, in pf_krule_alloc()
1851 if (rule->allrulelinked) { in pf_krule_free()
1856 V_pf_allrulecount--; in pf_krule_free()
1862 pf_counter_u64_deinit(&rule->evaluations); in pf_krule_free()
1864 pf_counter_u64_deinit(&rule->packets[i]); in pf_krule_free()
1865 pf_counter_u64_deinit(&rule->bytes[i]); in pf_krule_free()
1867 counter_u64_free(rule->states_cur); in pf_krule_free()
1868 counter_u64_free(rule->states_tot); in pf_krule_free()
1869 counter_u64_free(rule->src_nodes); in pf_krule_free()
1870 uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp); in pf_krule_free()
1872 mtx_destroy(&rule->nat.mtx); in pf_krule_free()
1873 mtx_destroy(&rule->rdr.mtx); in pf_krule_free()
1880 pf_counter_u64_zero(&rule->evaluations); in pf_krule_clear_counters()
1882 pf_counter_u64_zero(&rule->packets[i]); in pf_krule_clear_counters()
1883 pf_counter_u64_zero(&rule->bytes[i]); in pf_krule_clear_counters()
1885 counter_u64_zero(rule->states_tot); in pf_krule_clear_counters()
1894 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); in pf_kpooladdr_to_pooladdr()
1895 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); in pf_kpooladdr_to_pooladdr()
1905 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); in pf_pooladdr_to_kpooladdr()
1906 ret = pf_user_strcpy(kpool->ifname, pool->ifname, in pf_pooladdr_to_kpooladdr()
1907 sizeof(kpool->ifname)); in pf_pooladdr_to_kpooladdr()
1914 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); in pf_pool_to_kpool()
1915 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); in pf_pool_to_kpool()
1917 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); in pf_pool_to_kpool()
1918 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); in pf_pool_to_kpool()
1920 kpool->tblidx = pool->tblidx; in pf_pool_to_kpool()
1921 kpool->proxy_port[0] = pool->proxy_port[0]; in pf_pool_to_kpool()
1922 kpool->proxy_port[1] = pool->proxy_port[1]; in pf_pool_to_kpool()
1923 kpool->opts = pool->opts; in pf_pool_to_kpool()
1932 if (rule->af == AF_INET) { in pf_rule_to_krule()
1937 if (rule->af == AF_INET6) { in pf_rule_to_krule()
1942 ret = pf_check_rule_addr(&rule->src); in pf_rule_to_krule()
1945 ret = pf_check_rule_addr(&rule->dst); in pf_rule_to_krule()
1949 bcopy(&rule->src, &krule->src, sizeof(rule->src)); in pf_rule_to_krule()
1950 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); in pf_rule_to_krule()
1952 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); in pf_rule_to_krule()
1955 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); in pf_rule_to_krule()
1958 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); in pf_rule_to_krule()
1961 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); in pf_rule_to_krule()
1964 ret = pf_user_strcpy(krule->tagname, rule->tagname, in pf_rule_to_krule()
1965 sizeof(rule->tagname)); in pf_rule_to_krule()
1968 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, in pf_rule_to_krule()
1969 sizeof(rule->match_tagname)); in pf_rule_to_krule()
1972 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, in pf_rule_to_krule()
1973 sizeof(rule->overload_tblname)); in pf_rule_to_krule()
1977 pf_pool_to_kpool(&rule->rpool, &krule->rdr); in pf_rule_to_krule()
1982 krule->os_fingerprint = rule->os_fingerprint; in pf_rule_to_krule()
1984 krule->rtableid = rule->rtableid; in pf_rule_to_krule()
1985 /* pf_rule->timeout is smaller than pf_krule->timeout */ in pf_rule_to_krule()
1986 bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout)); in pf_rule_to_krule()
1987 krule->max_states = rule->max_states; in pf_rule_to_krule()
1988 krule->max_src_nodes = rule->max_src_nodes; in pf_rule_to_krule()
1989 krule->max_src_states = rule->max_src_states; in pf_rule_to_krule()
1990 krule->max_src_conn = rule->max_src_conn; in pf_rule_to_krule()
1991 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; in pf_rule_to_krule()
1992 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; in pf_rule_to_krule()
1993 krule->qid = rule->qid; in pf_rule_to_krule()
1994 krule->pqid = rule->pqid; in pf_rule_to_krule()
1995 krule->nr = rule->nr; in pf_rule_to_krule()
1996 krule->prob = rule->prob; in pf_rule_to_krule()
1997 krule->cuid = rule->cuid; in pf_rule_to_krule()
1998 krule->cpid = rule->cpid; in pf_rule_to_krule()
2000 krule->return_icmp = rule->return_icmp; in pf_rule_to_krule()
2001 krule->return_icmp6 = rule->return_icmp6; in pf_rule_to_krule()
2002 krule->max_mss = rule->max_mss; in pf_rule_to_krule()
2003 krule->tag = rule->tag; in pf_rule_to_krule()
2004 krule->match_tag = rule->match_tag; in pf_rule_to_krule()
2005 krule->scrub_flags = rule->scrub_flags; in pf_rule_to_krule()
2007 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); in pf_rule_to_krule()
2008 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); in pf_rule_to_krule()
2010 krule->rule_flag = rule->rule_flag; in pf_rule_to_krule()
2011 krule->action = rule->action; in pf_rule_to_krule()
2012 krule->direction = rule->direction; in pf_rule_to_krule()
2013 krule->log = rule->log; in pf_rule_to_krule()
2014 krule->logif = rule->logif; in pf_rule_to_krule()
2015 krule->quick = rule->quick; in pf_rule_to_krule()
2016 krule->ifnot = rule->ifnot; in pf_rule_to_krule()
2017 krule->match_tag_not = rule->match_tag_not; in pf_rule_to_krule()
2018 krule->natpass = rule->natpass; in pf_rule_to_krule()
2020 krule->keep_state = rule->keep_state; in pf_rule_to_krule()
2021 krule->af = rule->af; in pf_rule_to_krule()
2022 krule->proto = rule->proto; in pf_rule_to_krule()
2023 krule->type = rule->type; in pf_rule_to_krule()
2024 krule->code = rule->code; in pf_rule_to_krule()
2025 krule->flags = rule->flags; in pf_rule_to_krule()
2026 krule->flagset = rule->flagset; in pf_rule_to_krule()
2027 krule->min_ttl = rule->min_ttl; in pf_rule_to_krule()
2028 krule->allow_opts = rule->allow_opts; in pf_rule_to_krule()
2029 krule->rt = rule->rt; in pf_rule_to_krule()
2030 krule->return_ttl = rule->return_ttl; in pf_rule_to_krule()
2031 krule->tos = rule->tos; in pf_rule_to_krule()
2032 krule->set_tos = rule->set_tos; in pf_rule_to_krule()
2034 krule->flush = rule->flush; in pf_rule_to_krule()
2035 krule->prio = rule->prio; in pf_rule_to_krule()
2036 krule->set_prio[0] = rule->set_prio[0]; in pf_rule_to_krule()
2037 krule->set_prio[1] = rule->set_prio[1]; in pf_rule_to_krule()
2039 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); in pf_rule_to_krule()
2052 ruleset = pf_find_kruleset(pr->anchor); in pf_ioctl_getrules()
2057 rs_num = pf_get_ruleset_number(pr->rule.action); in pf_ioctl_getrules()
2062 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, in pf_ioctl_getrules()
2065 pr->nr = tail->nr + 1; in pf_ioctl_getrules()
2067 pr->nr = 0; in pf_ioctl_getrules()
2068 pr->ticket = ruleset->rules[rs_num].active.ticket; in pf_ioctl_getrules()
2086 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { in pf_ioctl_addrule()
2093 if (rule->ifname[0]) in pf_ioctl_addrule()
2095 if (rule->rcv_ifname[0]) in pf_ioctl_addrule()
2097 pf_counter_u64_init(&rule->evaluations, M_WAITOK); in pf_ioctl_addrule()
2099 pf_counter_u64_init(&rule->packets[i], M_WAITOK); in pf_ioctl_addrule()
2100 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); in pf_ioctl_addrule()
2102 rule->states_cur = counter_u64_alloc(M_WAITOK); in pf_ioctl_addrule()
2103 rule->states_tot = counter_u64_alloc(M_WAITOK); in pf_ioctl_addrule()
2104 rule->src_nodes = counter_u64_alloc(M_WAITOK); in pf_ioctl_addrule()
2105 rule->cuid = uid; in pf_ioctl_addrule()
2106 rule->cpid = pid; in pf_ioctl_addrule()
2107 TAILQ_INIT(&rule->rdr.list); in pf_ioctl_addrule()
2108 TAILQ_INIT(&rule->nat.list); in pf_ioctl_addrule()
2114 MPASS(!rule->allrulelinked); in pf_ioctl_addrule()
2115 rule->allrulelinked = true; in pf_ioctl_addrule()
2121 rs_num = pf_get_ruleset_number(rule->action); in pf_ioctl_addrule()
2124 if (ticket != ruleset->rules[rs_num].inactive.ticket) { in pf_ioctl_addrule()
2127 ruleset->rules[rs_num].inactive.ticket)); in pf_ioctl_addrule()
2143 if (ruleset->rules[rs_num].inactive.tree == NULL) { in pf_ioctl_addrule()
2147 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, in pf_ioctl_addrule()
2150 rule->nr = tail->nr + 1; in pf_ioctl_addrule()
2152 rule->nr = 0; in pf_ioctl_addrule()
2153 if (rule->ifname[0]) { in pf_ioctl_addrule()
2154 rule->kif = pfi_kkif_attach(kif, rule->ifname); in pf_ioctl_addrule()
2156 pfi_kkif_ref(rule->kif); in pf_ioctl_addrule()
2158 rule->kif = NULL; in pf_ioctl_addrule()
2160 if (rule->rcv_ifname[0]) { in pf_ioctl_addrule()
2161 rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname); in pf_ioctl_addrule()
2163 pfi_kkif_ref(rule->rcv_kif); in pf_ioctl_addrule()
2165 rule->rcv_kif = NULL; in pf_ioctl_addrule()
2167 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) in pf_ioctl_addrule()
2172 if (rule->qname[0] != 0) { in pf_ioctl_addrule()
2173 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) in pf_ioctl_addrule()
2175 else if (rule->pqname[0] != 0) { in pf_ioctl_addrule()
2176 if ((rule->pqid = in pf_ioctl_addrule()
2177 pf_qname2qid(rule->pqname)) == 0) in pf_ioctl_addrule()
2180 rule->pqid = rule->qid; in pf_ioctl_addrule()
2183 if (rule->tagname[0]) in pf_ioctl_addrule()
2184 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) in pf_ioctl_addrule()
2186 if (rule->match_tagname[0]) in pf_ioctl_addrule()
2187 if ((rule->match_tag = in pf_ioctl_addrule()
2188 pf_tagname2tag(rule->match_tagname)) == 0) in pf_ioctl_addrule()
2190 if (rule->rt && !rule->direction) in pf_ioctl_addrule()
2192 if (!rule->log) in pf_ioctl_addrule()
2193 rule->logif = 0; in pf_ioctl_addrule()
2194 if (rule->logif >= PFLOGIFS_MAX) in pf_ioctl_addrule()
2196 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) in pf_ioctl_addrule()
2198 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) in pf_ioctl_addrule()
2202 if (rule->scrub_flags & PFSTATE_SETPRIO && in pf_ioctl_addrule()
2203 (rule->set_prio[0] > PF_PRIO_MAX || in pf_ioctl_addrule()
2204 rule->set_prio[1] > PF_PRIO_MAX)) in pf_ioctl_addrule()
2208 if (pa->addr.type == PF_ADDR_TABLE) { in pf_ioctl_addrule()
2209 pa->addr.p.tbl = pfr_attach_table(ruleset, in pf_ioctl_addrule()
2210 pa->addr.v.tblname); in pf_ioctl_addrule()
2211 if (pa->addr.p.tbl == NULL) in pf_ioctl_addrule()
2216 rule->overload_tbl = NULL; in pf_ioctl_addrule()
2217 if (rule->overload_tblname[0]) { in pf_ioctl_addrule()
2218 if ((rule->overload_tbl = pfr_attach_table(ruleset, in pf_ioctl_addrule()
2219 rule->overload_tblname)) == NULL) in pf_ioctl_addrule()
2222 rule->overload_tbl->pfrkt_flags |= in pf_ioctl_addrule()
2226 pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list); in pf_ioctl_addrule()
2227 pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list); in pf_ioctl_addrule()
2228 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || in pf_ioctl_addrule()
2229 (rule->action == PF_BINAT)) && rule->anchor == NULL) || in pf_ioctl_addrule()
2230 (rule->rt > PF_NOPFROUTE)) && in pf_ioctl_addrule()
2231 (TAILQ_FIRST(&rule->rdr.list) == NULL)) in pf_ioctl_addrule()
2234 if (rule->action == PF_PASS && rule->rdr.opts & PF_POOL_STICKYADDR && in pf_ioctl_addrule()
2235 !rule->keep_state) { in pf_ioctl_addrule()
2245 rule->nat.cur = TAILQ_FIRST(&rule->nat.list); in pf_ioctl_addrule()
2246 rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list); in pf_ioctl_addrule()
2247 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, in pf_ioctl_addrule()
2249 ruleset->rules[rs_num].inactive.rcount++; in pf_ioctl_addrule()
2253 if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) { in pf_ioctl_addrule()
2255 TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries); in pf_ioctl_addrule()
2256 ruleset->rules[rs_num].inactive.rcount--; in pf_ioctl_addrule()
2281 while (*rule->label[i]) { in pf_label_match()
2282 if (strcmp(rule->label[i], label) == 0) in pf_label_match()
2323 LIST_FOREACH(s, &ih->states, entry) { in pf_killstates_row()
2325 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; in pf_killstates_row()
2327 sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE]; in pf_killstates_row()
2328 if (s->direction == PF_OUT) { in pf_killstates_row()
2329 srcaddr = &sk->addr[1]; in pf_killstates_row()
2330 dstaddr = &sk->addr[0]; in pf_killstates_row()
2331 srcport = sk->port[1]; in pf_killstates_row()
2332 dstport = sk->port[0]; in pf_killstates_row()
2334 srcaddr = &sk->addr[0]; in pf_killstates_row()
2335 dstaddr = &sk->addr[1]; in pf_killstates_row()
2336 srcport = sk->port[0]; in pf_killstates_row()
2337 dstport = sk->port[1]; in pf_killstates_row()
2340 if (psk->psk_af && sk->af != psk->psk_af) in pf_killstates_row()
2343 if (psk->psk_proto && psk->psk_proto != sk->proto) in pf_killstates_row()
2346 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, in pf_killstates_row()
2347 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) in pf_killstates_row()
2350 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, in pf_killstates_row()
2351 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) in pf_killstates_row()
2354 if (! PF_MATCHA(psk->psk_rt_addr.neg, in pf_killstates_row()
2355 &psk->psk_rt_addr.addr.v.a.addr, in pf_killstates_row()
2356 &psk->psk_rt_addr.addr.v.a.mask, in pf_killstates_row()
2357 &s->act.rt_addr, sk->af)) in pf_killstates_row()
2360 if (psk->psk_src.port_op != 0 && in pf_killstates_row()
2361 ! pf_match_port(psk->psk_src.port_op, in pf_killstates_row()
2362 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) in pf_killstates_row()
2365 if (psk->psk_dst.port_op != 0 && in pf_killstates_row()
2366 ! pf_match_port(psk->psk_dst.port_op, in pf_killstates_row()
2367 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) in pf_killstates_row()
2370 if (psk->psk_label[0] && in pf_killstates_row()
2371 ! pf_label_match(s->rule, psk->psk_label)) in pf_killstates_row()
2374 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, in pf_killstates_row()
2375 kif->pfik_name)) in pf_killstates_row()
2378 if (psk->psk_kill_match) { in pf_killstates_row()
2384 if (s->direction == PF_OUT) { in pf_killstates_row()
2386 idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK; in pf_killstates_row()
2389 idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE; in pf_killstates_row()
2392 match_key.af = s->key[idx]->af; in pf_killstates_row()
2393 match_key.proto = s->key[idx]->proto; in pf_killstates_row()
2395 &s->key[idx]->addr[1], match_key.af); in pf_killstates_row()
2396 match_key.port[0] = s->key[idx]->port[1]; in pf_killstates_row()
2398 &s->key[idx]->addr[0], match_key.af); in pf_killstates_row()
2399 match_key.port[1] = s->key[idx]->port[0]; in pf_killstates_row()
2405 if (psk->psk_kill_match) in pf_killstates_row()
2425 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) in pf_start()
2569 MPASS(pp->which == PF_RDR || pp->which == PF_NAT); in pf_ioctl_add_addr()
2572 if (pp->af == AF_INET) in pf_ioctl_add_addr()
2576 if (pp->af == AF_INET6) in pf_ioctl_add_addr()
2580 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && in pf_ioctl_add_addr()
2581 pp->addr.addr.type != PF_ADDR_DYNIFTL && in pf_ioctl_add_addr()
2582 pp->addr.addr.type != PF_ADDR_TABLE) in pf_ioctl_add_addr()
2585 if (pp->addr.addr.p.dyn != NULL) in pf_ioctl_add_addr()
2589 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); in pf_ioctl_add_addr()
2592 if (pa->ifname[0]) in pf_ioctl_add_addr()
2595 if (pp->ticket != V_ticket_pabuf) { in pf_ioctl_add_addr()
2597 if (pa->ifname[0]) in pf_ioctl_add_addr()
2602 if (pa->ifname[0]) { in pf_ioctl_add_addr()
2603 pa->kif = pfi_kkif_attach(kif, pa->ifname); in pf_ioctl_add_addr()
2605 pfi_kkif_ref(pa->kif); in pf_ioctl_add_addr()
2607 pa->kif = NULL; in pf_ioctl_add_addr()
2608 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = in pf_ioctl_add_addr()
2609 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { in pf_ioctl_add_addr()
2610 if (pa->ifname[0]) in pf_ioctl_add_addr()
2611 pfi_kkif_unref(pa->kif); in pf_ioctl_add_addr()
2615 TAILQ_INSERT_TAIL(&V_pf_pabuf[pp->which == PF_RDR ? 1 : 0], in pf_ioctl_add_addr()
2634 MPASS(pp->which == PF_RDR || pp->which == PF_NAT); in pf_ioctl_get_addrs()
2636 pp->anchor[sizeof(pp->anchor) - 1] = 0; in pf_ioctl_get_addrs()
2637 pp->nr = 0; in pf_ioctl_get_addrs()
2640 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, in pf_ioctl_get_addrs()
2641 pp->r_num, 0, 1, 0, pp->which); in pf_ioctl_get_addrs()
2646 TAILQ_FOREACH(pa, &pool->list, entries) in pf_ioctl_get_addrs()
2647 pp->nr++; in pf_ioctl_get_addrs()
2660 MPASS(pp->which == PF_RDR || pp->which == PF_NAT); in pf_ioctl_get_addr()
2664 pp->anchor[sizeof(pp->anchor) - 1] = 0; in pf_ioctl_get_addr()
2667 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, in pf_ioctl_get_addr()
2668 pp->r_num, 0, 1, 1, pp->which); in pf_ioctl_get_addr()
2673 pa = TAILQ_FIRST(&pool->list); in pf_ioctl_get_addr()
2674 while ((pa != NULL) && (nr < pp->nr)) { in pf_ioctl_get_addr()
2682 pf_kpooladdr_to_pooladdr(pa, &pp->addr); in pf_ioctl_get_addr()
2683 pf_addr_copyout(&pp->addr.addr); in pf_ioctl_get_addr()
2697 pr->path[sizeof(pr->path) - 1] = 0; in pf_ioctl_get_rulesets()
2700 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { in pf_ioctl_get_rulesets()
2704 pr->nr = 0; in pf_ioctl_get_rulesets()
2705 if (ruleset->anchor == NULL) { in pf_ioctl_get_rulesets()
2708 if (anchor->parent == NULL) in pf_ioctl_get_rulesets()
2709 pr->nr++; in pf_ioctl_get_rulesets()
2712 &ruleset->anchor->children) in pf_ioctl_get_rulesets()
2713 pr->nr++; in pf_ioctl_get_rulesets()
2731 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { in pf_ioctl_get_ruleset()
2736 pr->name[0] = 0; in pf_ioctl_get_ruleset()
2737 if (ruleset->anchor == NULL) { in pf_ioctl_get_ruleset()
2740 if (anchor->parent == NULL && nr++ == pr->nr) { in pf_ioctl_get_ruleset()
2741 strlcpy(pr->name, anchor->name, in pf_ioctl_get_ruleset()
2742 sizeof(pr->name)); in pf_ioctl_get_ruleset()
2747 &ruleset->anchor->children) in pf_ioctl_get_ruleset()
2748 if (nr++ == pr->nr) { in pf_ioctl_get_ruleset()
2749 strlcpy(pr->name, anchor->name, in pf_ioctl_get_ruleset()
2750 sizeof(pr->name)); in pf_ioctl_get_ruleset()
2754 if (!pr->name[0]) in pf_ioctl_get_ruleset()
2776 if (securelevel_gt(td->td_ucred, 2)) in pfioctl()
2833 if (((struct pfioc_table *)addr)->pfrio_flags & in pfioctl()
2890 if (((struct pfioc_table *)addr)->pfrio_flags & in pfioctl()
2927 if (nv->len > pf_ioctl_maxcount) in pfioctl()
2931 packed = malloc(nv->len, M_NVLIST, M_WAITOK); in pfioctl()
2932 error = copyin(nv->data, packed, nv->len); in pfioctl()
2936 nvl = nvlist_unpack(packed, nv->len, 0); in pfioctl()
2962 ticket = rs->active.ticket; in pfioctl()
2963 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); in pfioctl()
2965 nr = tail->nr + 1; in pfioctl()
2974 packed = nvlist_pack(nvl, &nv->len); in pfioctl()
2978 if (nv->size == 0) in pfioctl()
2980 else if (nv->size < nv->len) in pfioctl()
2983 error = copyout(packed, nv->data, nv->len); in pfioctl()
3005 if (nv->len > pf_ioctl_maxcount) in pfioctl()
3008 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pfioctl()
3009 error = copyin(nv->data, nvlpacked, nv->len); in pfioctl()
3013 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pfioctl()
3039 if (ticket != rs->active.ticket) { in pfioctl()
3049 rule = TAILQ_FIRST(rs->active.rules); in pfioctl()
3050 while ((rule != NULL) && (rule->nr != nr)) in pfioctl()
3068 nvlpacked = nvlist_pack(nvl, &nv->len); in pfioctl()
3072 if (nv->size == 0) in pfioctl()
3074 else if (nv->size < nv->len) in pfioctl()
3077 error = copyout(nvlpacked, nv->data, nv->len); in pfioctl()
3079 counter_u64_zero(rule->evaluations); in pfioctl()
3081 counter_u64_zero(rule->packets[i]); in pfioctl()
3082 counter_u64_zero(rule->bytes[i]); in pfioctl()
3104 if (nv->len > pf_ioctl_maxcount) in pfioctl()
3107 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pfioctl()
3108 error = copyin(nv->data, nvlpacked, nv->len); in pfioctl()
3112 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pfioctl()
3129 ruleset->inactive.ticket) { in pfioctl()
3133 ruleset->inactive.ticket)); in pfioctl()
3138 rule->timestamp = NULL; in pfioctl()
3144 if (rule->ifname[0]) in pfioctl()
3146 if (rule->bridge_to_name[0]) in pfioctl()
3148 rule->evaluations = counter_u64_alloc(M_WAITOK); in pfioctl()
3150 rule->packets[i] = counter_u64_alloc(M_WAITOK); in pfioctl()
3151 rule->bytes[i] = counter_u64_alloc(M_WAITOK); in pfioctl()
3153 rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone, in pfioctl()
3158 if (rule->ifname[0]) { in pfioctl()
3159 rule->kif = pfi_kkif_attach(kif, rule->ifname); in pfioctl()
3160 pfi_kkif_ref(rule->kif); in pfioctl()
3162 rule->kif = NULL; in pfioctl()
3163 if (rule->bridge_to_name[0]) { in pfioctl()
3164 rule->bridge_to = pfi_kkif_attach(bridge_to_kif, in pfioctl()
3165 rule->bridge_to_name); in pfioctl()
3166 pfi_kkif_ref(rule->bridge_to); in pfioctl()
3168 rule->bridge_to = NULL; in pfioctl()
3172 if (rule->qname[0] != 0) { in pfioctl()
3173 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) in pfioctl()
3176 rule->qid = rule->qid; in pfioctl()
3179 if (rule->tagname[0]) in pfioctl()
3180 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) in pfioctl()
3182 if (rule->match_tagname[0]) in pfioctl()
3183 if ((rule->match_tag = pf_tagname2tag( in pfioctl()
3184 rule->match_tagname)) == 0) in pfioctl()
3187 if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE) in pfioctl()
3188 error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr); in pfioctl()
3189 if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE) in pfioctl()
3190 error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr); in pfioctl()
3204 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); in pfioctl()
3206 rule->nr = tail->nr + 1; in pfioctl()
3208 rule->nr = 0; in pfioctl()
3210 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); in pfioctl()
3232 if (nv->len > pf_ioctl_maxcount) in pfioctl()
3235 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pfioctl()
3236 error = copyin(nv->data, nvlpacked, nv->len); in pfioctl()
3240 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pfioctl()
3254 if (ruleset->anchor == NULL) { in pfioctl()
3256 if (anchor->parent == NULL) in pfioctl()
3260 &ruleset->anchor->children) in pfioctl()
3277 nvlpacked = nvlist_pack(nvl, &nv->len); in pfioctl()
3281 if (nv->size == 0) in pfioctl()
3283 else if (nv->size < nv->len) in pfioctl()
3286 error = copyout(nvlpacked, nv->data, nv->len); in pfioctl()
3307 if (nv->len > pf_ioctl_maxcount) in pfioctl()
3310 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pfioctl()
3311 error = copyin(nv->data, nvlpacked, nv->len); in pfioctl()
3315 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pfioctl()
3344 if (ruleset->anchor == NULL) { in pfioctl()
3347 if (anchor->parent == NULL && nr++ == req_nr) { in pfioctl()
3354 &ruleset->anchor->children) { in pfioctl()
3365 nvlist_add_string(nvl, "name", anchor->name); in pfioctl()
3366 if (ruleset->anchor) in pfioctl()
3368 ruleset->anchor->path); in pfioctl()
3375 nvlpacked = nvlist_pack(nvl, &nv->len); in pfioctl()
3379 if (nv->size == 0) in pfioctl()
3381 else if (nv->size < nv->len) in pfioctl()
3384 error = copyout(nvlpacked, nv->data, nv->len); in pfioctl()
3403 if (nv->len > pf_ioctl_maxcount) in pfioctl()
3406 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pfioctl()
3407 error = copyin(nv->data, nvlpacked, nv->len); in pfioctl()
3411 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pfioctl()
3442 anchor_call, td->td_ucred->cr_ruid, in pfioctl()
3443 td->td_proc ? td->td_proc->p_pid : 0); in pfioctl()
3461 error = pf_rule_to_krule(&pr->rule, rule); in pfioctl()
3467 pr->anchor[sizeof(pr->anchor) - 1] = 0; in pfioctl()
3470 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, in pfioctl()
3471 pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid, in pfioctl()
3472 td->td_proc ? td->td_proc->p_pid : 0); in pfioctl()
3479 pr->anchor[sizeof(pr->anchor) - 1] = 0; in pfioctl()
3498 if (nv->len > pf_ioctl_maxcount) in pfioctl()
3502 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pfioctl()
3503 error = copyin(nv->data, nvlpacked, nv->len); in pfioctl()
3507 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pfioctl()
3542 ruleset->rules[rs_num].active.ticket) { in pfioctl()
3552 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); in pfioctl()
3553 while ((rule != NULL) && (rule->nr != nr)) in pfioctl()
3578 nvlpacked = nvlist_pack(nvl, &nv->len); in pfioctl()
3584 if (nv->size == 0) { in pfioctl()
3588 else if (nv->size < nv->len) { in pfioctl()
3598 error = copyout(nvlpacked, nv->data, nv->len); in pfioctl()
3618 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; in pfioctl()
3620 if (pcr->action < PF_CHANGE_ADD_HEAD || in pfioctl()
3621 pcr->action > PF_CHANGE_GET_TICKET) { in pfioctl()
3625 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { in pfioctl()
3630 if (pcr->action != PF_CHANGE_REMOVE) { in pfioctl()
3632 error = pf_rule_to_krule(&pcr->rule, newrule); in pfioctl()
3638 if (newrule->ifname[0]) in pfioctl()
3640 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); in pfioctl()
3642 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); in pfioctl()
3643 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); in pfioctl()
3645 newrule->states_cur = counter_u64_alloc(M_WAITOK); in pfioctl()
3646 newrule->states_tot = counter_u64_alloc(M_WAITOK); in pfioctl()
3647 newrule->src_nodes = counter_u64_alloc(M_WAITOK); in pfioctl()
3648 newrule->cuid = td->td_ucred->cr_ruid; in pfioctl()
3649 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; in pfioctl()
3650 TAILQ_INIT(&newrule->nat.list); in pfioctl()
3651 TAILQ_INIT(&newrule->rdr.list); in pfioctl()
3660 newrule->allrulelinked = true; in pfioctl()
3665 if (!(pcr->action == PF_CHANGE_REMOVE || in pfioctl()
3666 pcr->action == PF_CHANGE_GET_TICKET) && in pfioctl()
3667 pcr->pool_ticket != V_ticket_pabuf) in pfioctl()
3670 ruleset = pf_find_kruleset(pcr->anchor); in pfioctl()
3674 rs_num = pf_get_ruleset_number(pcr->rule.action); in pfioctl()
3687 if (ruleset->rules[rs_num].active.tree == NULL) { in pfioctl()
3688 ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT); in pfioctl()
3689 if (ruleset->rules[rs_num].active.tree == NULL) { in pfioctl()
3694 if (pcr->action == PF_CHANGE_GET_TICKET) { in pfioctl()
3695 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; in pfioctl()
3697 } else if (pcr->ticket != in pfioctl()
3698 ruleset->rules[rs_num].active.ticket) in pfioctl()
3701 if (pcr->action != PF_CHANGE_REMOVE) { in pfioctl()
3702 if (newrule->ifname[0]) { in pfioctl()
3703 newrule->kif = pfi_kkif_attach(kif, in pfioctl()
3704 newrule->ifname); in pfioctl()
3706 pfi_kkif_ref(newrule->kif); in pfioctl()
3708 newrule->kif = NULL; in pfioctl()
3710 if (newrule->rtableid > 0 && in pfioctl()
3711 newrule->rtableid >= rt_numfibs) in pfioctl()
3716 if (newrule->qname[0] != 0) { in pfioctl()
3717 if ((newrule->qid = in pfioctl()
3718 pf_qname2qid(newrule->qname)) == 0) in pfioctl()
3720 else if (newrule->pqname[0] != 0) { in pfioctl()
3721 if ((newrule->pqid = in pfioctl()
3722 pf_qname2qid(newrule->pqname)) == 0) in pfioctl()
3725 newrule->pqid = newrule->qid; in pfioctl()
3728 if (newrule->tagname[0]) in pfioctl()
3729 if ((newrule->tag = in pfioctl()
3730 pf_tagname2tag(newrule->tagname)) == 0) in pfioctl()
3732 if (newrule->match_tagname[0]) in pfioctl()
3733 if ((newrule->match_tag = pf_tagname2tag( in pfioctl()
3734 newrule->match_tagname)) == 0) in pfioctl()
3736 if (newrule->rt && !newrule->direction) in pfioctl()
3738 if (!newrule->log) in pfioctl()
3739 newrule->logif = 0; in pfioctl()
3740 if (newrule->logif >= PFLOGIFS_MAX) in pfioctl()
3742 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) in pfioctl()
3744 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) in pfioctl()
3746 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) in pfioctl()
3750 if (pa->addr.type == PF_ADDR_TABLE) { in pfioctl()
3751 pa->addr.p.tbl = in pfioctl()
3753 pa->addr.v.tblname); in pfioctl()
3754 if (pa->addr.p.tbl == NULL) in pfioctl()
3759 newrule->overload_tbl = NULL; in pfioctl()
3760 if (newrule->overload_tblname[0]) { in pfioctl()
3761 if ((newrule->overload_tbl = pfr_attach_table( in pfioctl()
3762 ruleset, newrule->overload_tblname)) == in pfioctl()
3766 newrule->overload_tbl->pfrkt_flags |= in pfioctl()
3770 pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list); in pfioctl()
3771 pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list); in pfioctl()
3772 if (((((newrule->action == PF_NAT) || in pfioctl()
3773 (newrule->action == PF_RDR) || in pfioctl()
3774 (newrule->action == PF_BINAT) || in pfioctl()
3775 (newrule->rt > PF_NOPFROUTE)) && in pfioctl()
3776 !newrule->anchor)) && in pfioctl()
3777 (TAILQ_FIRST(&newrule->rdr.list) == NULL)) in pfioctl()
3787 newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list); in pfioctl()
3788 newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list); in pfioctl()
3793 if (pcr->action == PF_CHANGE_ADD_HEAD) in pfioctl()
3795 ruleset->rules[rs_num].active.ptr); in pfioctl()
3796 else if (pcr->action == PF_CHANGE_ADD_TAIL) in pfioctl()
3798 ruleset->rules[rs_num].active.ptr, pf_krulequeue); in pfioctl()
3801 ruleset->rules[rs_num].active.ptr); in pfioctl()
3802 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) in pfioctl()
3814 if (pcr->action == PF_CHANGE_REMOVE) { in pfioctl()
3815 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, in pfioctl()
3818 ruleset->rules[rs_num].active.tree, oldrule); in pfioctl()
3819 ruleset->rules[rs_num].active.rcount--; in pfioctl()
3823 ruleset->rules[rs_num].active.tree, newrule) != NULL) { in pfioctl()
3833 ruleset->rules[rs_num].active.ptr, in pfioctl()
3835 else if (pcr->action == PF_CHANGE_ADD_HEAD || in pfioctl()
3836 pcr->action == PF_CHANGE_ADD_BEFORE) in pfioctl()
3840 ruleset->rules[rs_num].active.ptr, in pfioctl()
3842 ruleset->rules[rs_num].active.rcount++; in pfioctl()
3847 ruleset->rules[rs_num].active.ptr, entries) in pfioctl()
3848 oldrule->nr = nr++; in pfioctl()
3850 ruleset->rules[rs_num].active.ticket++; in pfioctl()
3852 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); in pfioctl()
3880 struct pfsync_state_1301 *sp = &ps->state; in pfioctl()
3882 if (sp->timeout >= PFTM_MAX) { in pfioctl()
3901 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); in pfioctl()
3907 pfsync_state_export((union pfsync_state_union*)&ps->state, in pfioctl()
3927 if (ps->ps_len <= 0) { in pfioctl()
3929 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; in pfioctl()
3933 out = ps->ps_states; in pfioctl()
3944 if (LIST_EMPTY(&ih->states)) in pfioctl()
3949 LIST_FOREACH(s, &ih->states, entry) { in pfioctl()
3950 if (s->timeout == PFTM_UNLINKED) in pfioctl()
3965 if ((nr+count) * sizeof(*p) > ps->ps_len) { in pfioctl()
3970 LIST_FOREACH(s, &ih->states, entry) { in pfioctl()
3971 if (s->timeout == PFTM_UNLINKED) in pfioctl()
3984 out = ps->ps_states + nr; in pfioctl()
3987 ps->ps_len = sizeof(struct pfsync_state_1301) * nr; in pfioctl()
4001 if (ps->ps_req_version > PF_STATE_VERSION) { in pfioctl()
4006 if (ps->ps_len <= 0) { in pfioctl()
4008 ps->ps_len = sizeof(struct pf_state_export) * nr; in pfioctl()
4012 out = ps->ps_states; in pfioctl()
4023 if (LIST_EMPTY(&ih->states)) in pfioctl()
4028 LIST_FOREACH(s, &ih->states, entry) { in pfioctl()
4029 if (s->timeout == PFTM_UNLINKED) in pfioctl()
4044 if ((nr+count) * sizeof(*p) > ps->ps_len) { in pfioctl()
4049 LIST_FOREACH(s, &ih->states, entry) { in pfioctl()
4050 if (s->timeout == PFTM_UNLINKED) in pfioctl()
4062 out = ps->ps_states + nr; in pfioctl()
4065 ps->ps_len = nr * sizeof(struct pf_state_export); in pfioctl()
4079 if (pi->ifname[0] == 0) { in pfioctl()
4084 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); in pfioctl()
4099 int m = 0, direction = pnl->direction; in pfioctl()
4106 if (!pnl->proto || in pfioctl()
4107 PF_AZERO(&pnl->saddr, pnl->af) || in pfioctl()
4108 PF_AZERO(&pnl->daddr, pnl->af) || in pfioctl()
4109 ((pnl->proto == IPPROTO_TCP || in pfioctl()
4110 pnl->proto == IPPROTO_UDP) && in pfioctl()
4111 (!pnl->dport || !pnl->sport))) in pfioctl()
4115 key.af = pnl->af; in pfioctl()
4116 key.proto = pnl->proto; in pfioctl()
4117 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); in pfioctl()
4118 key.port[sidx] = pnl->sport; in pfioctl()
4119 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); in pfioctl()
4120 key.port[didx] = pnl->dport; in pfioctl()
4130 sk = state->key[sidx]; in pfioctl()
4131 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); in pfioctl()
4132 pnl->rsport = sk->port[sidx]; in pfioctl()
4133 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); in pfioctl()
4134 pnl->rdport = sk->port[didx]; in pfioctl()
4145 error = pf_ioctl_set_timeout(pt->timeout, pt->seconds, in pfioctl()
4146 &pt->seconds); in pfioctl()
4153 error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds); in pfioctl()
4160 error = pf_ioctl_get_limit(pl->index, &pl->limit); in pfioctl()
4168 error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit); in pfioctl()
4169 pl->limit = old_limit; in pfioctl()
4189 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { in pfioctl()
4190 pf_counter_u64_zero(&rule->evaluations); in pfioctl()
4192 pf_counter_u64_zero(&rule->packets[i]); in pfioctl()
4193 pf_counter_u64_zero(&rule->bytes[i]); in pfioctl()
4206 if (psp->ifname[0] == '\0') { in pfioctl()
4211 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); in pfioctl()
4216 psp->baudrate32 = in pfioctl()
4217 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); in pfioctl()
4219 psp->baudrate = ifp->if_baudrate; in pfioctl()
4233 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { in pfioctl()
4252 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { in pfioctl()
4275 altq->local_flags = 0; in pfioctl()
4278 if (pa->ticket != V_ticket_altqs_inactive) { in pfioctl()
4289 if (altq->qname[0] != 0) { in pfioctl()
4290 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { in pfioctl()
4296 altq->altq_disc = NULL; in pfioctl()
4298 if (strncmp(a->ifname, altq->ifname, in pfioctl()
4300 altq->altq_disc = a->altq_disc; in pfioctl()
4306 if ((ifp = ifunit(altq->ifname)) == NULL) in pfioctl()
4307 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; in pfioctl()
4317 if (altq->qname[0] != 0) in pfioctl()
4333 pa->nr = 0; in pfioctl()
4335 pa->nr++; in pfioctl()
4337 pa->nr++; in pfioctl()
4338 pa->ticket = V_ticket_altqs_active; in pfioctl()
4349 if (pa->ticket != V_ticket_altqs_active) { in pfioctl()
4354 altq = pf_altq_get_nth_active(pa->nr); in pfioctl()
4379 if (pq->ticket != V_ticket_altqs_active) { in pfioctl()
4384 nbytes = pq->nbytes; in pfioctl()
4385 altq = pf_altq_get_nth_active(pq->nr); in pfioctl()
4392 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { in pfioctl()
4401 version = pq->version; in pfioctl()
4402 error = altq_getqstats(altq, pq->buf, &nbytes, version); in pfioctl()
4404 pq->scheduler = altq->scheduler; in pfioctl()
4405 pq->nbytes = nbytes; in pfioctl()
4414 error = pf_ioctl_begin_addrs(&pp->ticket); in pfioctl()
4459 pca->anchor[sizeof(pca->anchor) - 1] = 0; in pfioctl()
4461 if (pca->action < PF_CHANGE_ADD_HEAD || in pfioctl()
4462 pca->action > PF_CHANGE_REMOVE) { in pfioctl()
4466 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && in pfioctl()
4467 pca->addr.addr.type != PF_ADDR_DYNIFTL && in pfioctl()
4468 pca->addr.addr.type != PF_ADDR_TABLE) { in pfioctl()
4472 if (pca->addr.addr.p.dyn != NULL) { in pfioctl()
4477 if (pca->action != PF_CHANGE_REMOVE) { in pfioctl()
4479 if (pca->af == AF_INET) { in pfioctl()
4485 if (pca->af == AF_INET6) { in pfioctl()
4491 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); in pfioctl()
4492 if (newpa->ifname[0]) in pfioctl()
4494 newpa->kif = NULL; in pfioctl()
4498 ruleset = pf_find_kruleset(pca->anchor); in pfioctl()
4502 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, in pfioctl()
4503 pca->r_num, pca->r_last, 1, 1, PF_RDR); in pfioctl()
4507 if (pca->action != PF_CHANGE_REMOVE) { in pfioctl()
4508 if (newpa->ifname[0]) { in pfioctl()
4509 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); in pfioctl()
4510 pfi_kkif_ref(newpa->kif); in pfioctl()
4514 switch (newpa->addr.type) { in pfioctl()
4516 error = pfi_dynaddr_setup(&newpa->addr, in pfioctl()
4517 pca->af); in pfioctl()
4520 newpa->addr.p.tbl = pfr_attach_table(ruleset, in pfioctl()
4521 newpa->addr.v.tblname); in pfioctl()
4522 if (newpa->addr.p.tbl == NULL) in pfioctl()
4530 switch (pca->action) { in pfioctl()
4532 oldpa = TAILQ_FIRST(&pool->list); in pfioctl()
4535 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); in pfioctl()
4538 oldpa = TAILQ_FIRST(&pool->list); in pfioctl()
4539 for (int i = 0; oldpa && i < pca->nr; i++) in pfioctl()
4546 if (pca->action == PF_CHANGE_REMOVE) { in pfioctl()
4547 TAILQ_REMOVE(&pool->list, oldpa, entries); in pfioctl()
4548 switch (oldpa->addr.type) { in pfioctl()
4550 pfi_dynaddr_remove(oldpa->addr.p.dyn); in pfioctl()
4553 pfr_detach_table(oldpa->addr.p.tbl); in pfioctl()
4556 if (oldpa->kif) in pfioctl()
4557 pfi_kkif_unref(oldpa->kif); in pfioctl()
4561 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); in pfioctl()
4562 else if (pca->action == PF_CHANGE_ADD_HEAD || in pfioctl()
4563 pca->action == PF_CHANGE_ADD_BEFORE) in pfioctl()
4566 TAILQ_INSERT_AFTER(&pool->list, oldpa, in pfioctl()
4570 pool->cur = TAILQ_FIRST(&pool->list); in pfioctl()
4571 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); in pfioctl()
4578 if (newpa->kif) in pfioctl()
4579 pfi_kkif_unref(newpa->kif); in pfioctl()
4590 pr->path[sizeof(pr->path) - 1] = 0; in pfioctl()
4599 pr->path[sizeof(pr->path) - 1] = 0; in pfioctl()
4608 if (io->pfrio_esize != 0) { in pfioctl()
4613 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, in pfioctl()
4614 io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
4624 if (io->pfrio_esize != sizeof(struct pfr_table)) { in pfioctl()
4629 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
4630 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { in pfioctl()
4635 totlen = io->pfrio_size * sizeof(struct pfr_table); in pfioctl()
4636 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), in pfioctl()
4638 error = copyin(io->pfrio_buffer, pfrts, totlen); in pfioctl()
4644 error = pfr_add_tables(pfrts, io->pfrio_size, in pfioctl()
4645 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
4656 if (io->pfrio_esize != sizeof(struct pfr_table)) { in pfioctl()
4661 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
4662 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { in pfioctl()
4667 totlen = io->pfrio_size * sizeof(struct pfr_table); in pfioctl()
4668 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), in pfioctl()
4670 error = copyin(io->pfrio_buffer, pfrts, totlen); in pfioctl()
4676 error = pfr_del_tables(pfrts, io->pfrio_size, in pfioctl()
4677 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
4689 if (io->pfrio_esize != sizeof(struct pfr_table)) { in pfioctl()
4694 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); in pfioctl()
4700 io->pfrio_size = min(io->pfrio_size, n); in pfioctl()
4702 totlen = io->pfrio_size * sizeof(struct pfr_table); in pfioctl()
4704 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), in pfioctl()
4711 error = pfr_get_tables(&io->pfrio_table, pfrts, in pfioctl()
4712 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
4715 error = copyout(pfrts, io->pfrio_buffer, totlen); in pfioctl()
4726 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { in pfioctl()
4732 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); in pfioctl()
4739 io->pfrio_size = min(io->pfrio_size, n); in pfioctl()
4741 totlen = io->pfrio_size * sizeof(struct pfr_tstats); in pfioctl()
4742 pfrtstats = mallocarray(io->pfrio_size, in pfioctl()
4750 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, in pfioctl()
4751 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
4755 error = copyout(pfrtstats, io->pfrio_buffer, totlen); in pfioctl()
4765 if (io->pfrio_esize != sizeof(struct pfr_table)) { in pfioctl()
4770 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
4771 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { in pfioctl()
4775 io->pfrio_size = pf_ioctl_maxcount; in pfioctl()
4779 totlen = io->pfrio_size * sizeof(struct pfr_table); in pfioctl()
4780 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), in pfioctl()
4782 error = copyin(io->pfrio_buffer, pfrts, totlen); in pfioctl()
4790 error = pfr_clr_tstats(pfrts, io->pfrio_size, in pfioctl()
4791 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
4804 if (io->pfrio_esize != sizeof(struct pfr_table)) { in pfioctl()
4810 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); in pfioctl()
4817 io->pfrio_size = min(io->pfrio_size, n); in pfioctl()
4820 totlen = io->pfrio_size * sizeof(struct pfr_table); in pfioctl()
4821 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), in pfioctl()
4823 error = copyin(io->pfrio_buffer, pfrts, totlen); in pfioctl()
4829 error = pfr_set_tflags(pfrts, io->pfrio_size, in pfioctl()
4830 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, in pfioctl()
4831 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
4840 if (io->pfrio_esize != 0) { in pfioctl()
4845 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, in pfioctl()
4846 io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
4856 if (io->pfrio_esize != sizeof(struct pfr_addr)) { in pfioctl()
4860 if (io->pfrio_size < 0 || in pfioctl()
4861 io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
4862 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { in pfioctl()
4866 totlen = io->pfrio_size * sizeof(struct pfr_addr); in pfioctl()
4867 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), in pfioctl()
4869 error = copyin(io->pfrio_buffer, pfras, totlen); in pfioctl()
4875 error = pfr_add_addrs(&io->pfrio_table, pfras, in pfioctl()
4876 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | in pfioctl()
4879 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) in pfioctl()
4880 error = copyout(pfras, io->pfrio_buffer, totlen); in pfioctl()
4890 if (io->pfrio_esize != sizeof(struct pfr_addr)) { in pfioctl()
4894 if (io->pfrio_size < 0 || in pfioctl()
4895 io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
4896 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { in pfioctl()
4900 totlen = io->pfrio_size * sizeof(struct pfr_addr); in pfioctl()
4901 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), in pfioctl()
4903 error = copyin(io->pfrio_buffer, pfras, totlen); in pfioctl()
4909 error = pfr_del_addrs(&io->pfrio_table, pfras, in pfioctl()
4910 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | in pfioctl()
4913 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) in pfioctl()
4914 error = copyout(pfras, io->pfrio_buffer, totlen); in pfioctl()
4924 if (io->pfrio_esize != sizeof(struct pfr_addr)) { in pfioctl()
4928 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { in pfioctl()
4932 count = max(io->pfrio_size, io->pfrio_size2); in pfioctl()
4941 error = copyin(io->pfrio_buffer, pfras, totlen); in pfioctl()
4947 error = pfr_set_addrs(&io->pfrio_table, pfras, in pfioctl()
4948 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, in pfioctl()
4949 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | in pfioctl()
4952 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) in pfioctl()
4953 error = copyout(pfras, io->pfrio_buffer, totlen); in pfioctl()
4963 if (io->pfrio_esize != sizeof(struct pfr_addr)) { in pfioctl()
4967 if (io->pfrio_size < 0 || in pfioctl()
4968 io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
4969 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { in pfioctl()
4973 totlen = io->pfrio_size * sizeof(struct pfr_addr); in pfioctl()
4974 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), in pfioctl()
4977 error = pfr_get_addrs(&io->pfrio_table, pfras, in pfioctl()
4978 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
4981 error = copyout(pfras, io->pfrio_buffer, totlen); in pfioctl()
4991 if (io->pfrio_esize != sizeof(struct pfr_astats)) { in pfioctl()
4995 if (io->pfrio_size < 0 || in pfioctl()
4996 io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
4997 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { in pfioctl()
5001 totlen = io->pfrio_size * sizeof(struct pfr_astats); in pfioctl()
5002 pfrastats = mallocarray(io->pfrio_size, in pfioctl()
5005 error = pfr_get_astats(&io->pfrio_table, pfrastats, in pfioctl()
5006 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
5009 error = copyout(pfrastats, io->pfrio_buffer, totlen); in pfioctl()
5019 if (io->pfrio_esize != sizeof(struct pfr_addr)) { in pfioctl()
5023 if (io->pfrio_size < 0 || in pfioctl()
5024 io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
5025 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { in pfioctl()
5029 totlen = io->pfrio_size * sizeof(struct pfr_addr); in pfioctl()
5030 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), in pfioctl()
5032 error = copyin(io->pfrio_buffer, pfras, totlen); in pfioctl()
5038 error = pfr_clr_astats(&io->pfrio_table, pfras, in pfioctl()
5039 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | in pfioctl()
5042 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) in pfioctl()
5043 error = copyout(pfras, io->pfrio_buffer, totlen); in pfioctl()
5053 if (io->pfrio_esize != sizeof(struct pfr_addr)) { in pfioctl()
5057 if (io->pfrio_size < 0 || in pfioctl()
5058 io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
5059 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { in pfioctl()
5063 totlen = io->pfrio_size * sizeof(struct pfr_addr); in pfioctl()
5064 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), in pfioctl()
5066 error = copyin(io->pfrio_buffer, pfras, totlen); in pfioctl()
5072 error = pfr_tst_addrs(&io->pfrio_table, pfras, in pfioctl()
5073 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | in pfioctl()
5077 error = copyout(pfras, io->pfrio_buffer, totlen); in pfioctl()
5087 if (io->pfrio_esize != sizeof(struct pfr_addr)) { in pfioctl()
5091 if (io->pfrio_size < 0 || in pfioctl()
5092 io->pfrio_size > pf_ioctl_maxcount || in pfioctl()
5093 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { in pfioctl()
5097 totlen = io->pfrio_size * sizeof(struct pfr_addr); in pfioctl()
5098 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), in pfioctl()
5100 error = copyin(io->pfrio_buffer, pfras, totlen); in pfioctl()
5106 error = pfr_ina_define(&io->pfrio_table, pfras, in pfioctl()
5107 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, in pfioctl()
5108 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); in pfioctl()
5136 if (io->esize != sizeof(*ioe)) { in pfioctl()
5140 if (io->size < 0 || in pfioctl()
5141 io->size > pf_ioctl_maxcount || in pfioctl()
5142 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { in pfioctl()
5146 totlen = sizeof(struct pfioc_trans_e) * io->size; in pfioctl()
5147 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), in pfioctl()
5149 error = copyin(io->array, ioes, totlen); in pfioctl()
5157 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { in pfioctl()
5158 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; in pfioctl()
5159 switch (ioe->rs_num) { in pfioctl()
5161 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { in pfioctl()
5169 if (ioe->anchor[0]) { in pfioctl()
5175 if ((error = pf_begin_altq(&ioe->ticket))) { in pfioctl()
5187 strlcpy(table.pfrt_anchor, ioe->anchor, in pfioctl()
5190 &ioe->ticket, NULL, 0))) { in pfioctl()
5198 if ((error = pf_begin_rules(&ioe->ticket, in pfioctl()
5199 ioe->rs_num, ioe->anchor))) { in pfioctl()
5208 error = copyout(ioes, io->array, totlen); in pfioctl()
5219 if (io->esize != sizeof(*ioe)) { in pfioctl()
5223 if (io->size < 0 || in pfioctl()
5224 io->size > pf_ioctl_maxcount || in pfioctl()
5225 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { in pfioctl()
5229 totlen = sizeof(struct pfioc_trans_e) * io->size; in pfioctl()
5230 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), in pfioctl()
5232 error = copyin(io->array, ioes, totlen); in pfioctl()
5238 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { in pfioctl()
5239 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; in pfioctl()
5240 switch (ioe->rs_num) { in pfioctl()
5242 if ((error = pf_rollback_eth(ioe->ticket, in pfioctl()
5243 ioe->anchor))) { in pfioctl()
5251 if (ioe->anchor[0]) { in pfioctl()
5257 if ((error = pf_rollback_altq(ioe->ticket))) { in pfioctl()
5269 strlcpy(table.pfrt_anchor, ioe->anchor, in pfioctl()
5272 ioe->ticket, NULL, 0))) { in pfioctl()
5280 if ((error = pf_rollback_rules(ioe->ticket, in pfioctl()
5281 ioe->rs_num, ioe->anchor))) { in pfioctl()
5302 if (io->esize != sizeof(*ioe)) { in pfioctl()
5307 if (io->size < 0 || in pfioctl()
5308 io->size > pf_ioctl_maxcount || in pfioctl()
5309 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { in pfioctl()
5314 totlen = sizeof(struct pfioc_trans_e) * io->size; in pfioctl()
5315 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), in pfioctl()
5317 error = copyin(io->array, ioes, totlen); in pfioctl()
5324 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { in pfioctl()
5325 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; in pfioctl()
5326 switch (ioe->rs_num) { in pfioctl()
5328 ers = pf_find_keth_ruleset(ioe->anchor); in pfioctl()
5329 if (ers == NULL || ioe->ticket == 0 || in pfioctl()
5330 ioe->ticket != ers->inactive.ticket) { in pfioctl()
5339 if (ioe->anchor[0]) { in pfioctl()
5345 if (!V_altqs_inactive_open || ioe->ticket != in pfioctl()
5355 rs = pf_find_kruleset(ioe->anchor); in pfioctl()
5356 if (rs == NULL || !rs->topen || ioe->ticket != in pfioctl()
5357 rs->tticket) { in pfioctl()
5365 if (ioe->rs_num < 0 || ioe->rs_num >= in pfioctl()
5372 rs = pf_find_kruleset(ioe->anchor); in pfioctl()
5374 !rs->rules[ioe->rs_num].inactive.open || in pfioctl()
5375 rs->rules[ioe->rs_num].inactive.ticket != in pfioctl()
5376 ioe->ticket) { in pfioctl()
5385 /* Now do the commit - no errors should happen here. */ in pfioctl()
5386 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { in pfioctl()
5387 switch (ioe->rs_num) { in pfioctl()
5389 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { in pfioctl()
5397 if ((error = pf_commit_altq(ioe->ticket))) { in pfioctl()
5409 (void)strlcpy(table.pfrt_anchor, ioe->anchor, in pfioctl()
5412 ioe->ticket, NULL, NULL, 0))) { in pfioctl()
5420 if ((error = pf_commit_rules(ioe->ticket, in pfioctl()
5421 ioe->rs_num, ioe->anchor))) { in pfioctl()
5432 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) in pfioctl()
5451 LIST_FOREACH(n, &sh->nodes, entry) in pfioctl()
5456 psn->psn_len = min(psn->psn_len, in pfioctl()
5459 if (psn->psn_len == 0) { in pfioctl()
5460 psn->psn_len = sizeof(struct pf_src_node) * nr; in pfioctl()
5466 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); in pfioctl()
5470 LIST_FOREACH(n, &sh->nodes, entry) { in pfioctl()
5472 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) in pfioctl()
5482 error = copyout(pstore, psn->psn_src_nodes, in pfioctl()
5488 psn->psn_len = sizeof(struct pf_src_node) * nr; in pfioctl()
5540 if (io->pfiio_esize != sizeof(struct pfi_kif)) { in pfioctl()
5545 if (io->pfiio_size < 0 || in pfioctl()
5546 io->pfiio_size > pf_ioctl_maxcount || in pfioctl()
5547 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { in pfioctl()
5552 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; in pfioctl()
5554 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); in pfioctl()
5555 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), in pfioctl()
5559 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); in pfioctl()
5561 error = copyout(ifstore, io->pfiio_buffer, bufsiz); in pfioctl()
5569 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; in pfioctl()
5572 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); in pfioctl()
5580 io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0'; in pfioctl()
5583 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); in pfioctl()
5617 sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; in pfsync_state_export()
5618 sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; in pfsync_state_export()
5619 sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; in pfsync_state_export()
5620 sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; in pfsync_state_export()
5621 sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; in pfsync_state_export()
5622 sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; in pfsync_state_export()
5623 sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; in pfsync_state_export()
5624 sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; in pfsync_state_export()
5625 sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto; in pfsync_state_export()
5626 sp->pfs_1301.af = st->key[PF_SK_WIRE]->af; in pfsync_state_export()
5629 strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname)); in pfsync_state_export()
5630 bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr)); in pfsync_state_export()
5631 sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000)); in pfsync_state_export()
5632 sp->pfs_1301.expire = pf_state_expires(st); in pfsync_state_export()
5633 if (sp->pfs_1301.expire <= time_uptime) in pfsync_state_export()
5634 sp->pfs_1301.expire = htonl(0); in pfsync_state_export()
5636 sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime); in pfsync_state_export()
5638 sp->pfs_1301.direction = st->direction; in pfsync_state_export()
5639 sp->pfs_1301.log = st->act.log; in pfsync_state_export()
5640 sp->pfs_1301.timeout = st->timeout; in pfsync_state_export()
5644 sp->pfs_1301.state_flags = st->state_flags; in pfsync_state_export()
5647 sp->pfs_1400.state_flags = htons(st->state_flags); in pfsync_state_export()
5648 sp->pfs_1400.qid = htons(st->act.qid); in pfsync_state_export()
5649 sp->pfs_1400.pqid = htons(st->act.pqid); in pfsync_state_export()
5650 sp->pfs_1400.dnpipe = htons(st->act.dnpipe); in pfsync_state_export()
5651 sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe); in pfsync_state_export()
5652 sp->pfs_1400.rtableid = htonl(st->act.rtableid); in pfsync_state_export()
5653 sp->pfs_1400.min_ttl = st->act.min_ttl; in pfsync_state_export()
5654 sp->pfs_1400.set_tos = st->act.set_tos; in pfsync_state_export()
5655 sp->pfs_1400.max_mss = htons(st->act.max_mss); in pfsync_state_export()
5656 sp->pfs_1400.set_prio[0] = st->act.set_prio[0]; in pfsync_state_export()
5657 sp->pfs_1400.set_prio[1] = st->act.set_prio[1]; in pfsync_state_export()
5658 sp->pfs_1400.rt = st->act.rt; in pfsync_state_export()
5659 if (st->act.rt_kif) in pfsync_state_export()
5660 strlcpy(sp->pfs_1400.rt_ifname, in pfsync_state_export()
5661 st->act.rt_kif->pfik_name, in pfsync_state_export()
5662 sizeof(sp->pfs_1400.rt_ifname)); in pfsync_state_export()
5669 if (st->src_node) in pfsync_state_export()
5670 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE; in pfsync_state_export()
5671 if (st->nat_src_node) in pfsync_state_export()
5672 sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE; in pfsync_state_export()
5674 sp->pfs_1301.id = st->id; in pfsync_state_export()
5675 sp->pfs_1301.creatorid = st->creatorid; in pfsync_state_export()
5676 pf_state_peer_hton(&st->src, &sp->pfs_1301.src); in pfsync_state_export()
5677 pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst); in pfsync_state_export()
5679 if (st->rule == NULL) in pfsync_state_export()
5680 sp->pfs_1301.rule = htonl(-1); in pfsync_state_export()
5682 sp->pfs_1301.rule = htonl(st->rule->nr); in pfsync_state_export()
5683 if (st->anchor == NULL) in pfsync_state_export()
5684 sp->pfs_1301.anchor = htonl(-1); in pfsync_state_export()
5686 sp->pfs_1301.anchor = htonl(st->anchor->nr); in pfsync_state_export()
5687 if (st->nat_rule == NULL) in pfsync_state_export()
5688 sp->pfs_1301.nat_rule = htonl(-1); in pfsync_state_export()
5690 sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr); in pfsync_state_export()
5692 pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]); in pfsync_state_export()
5693 pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]); in pfsync_state_export()
5694 pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]); in pfsync_state_export()
5695 pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]); in pfsync_state_export()
5703 sp->version = PF_STATE_VERSION; in pf_state_export()
5706 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; in pf_state_export()
5707 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; in pf_state_export()
5708 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; in pf_state_export()
5709 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; in pf_state_export()
5710 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; in pf_state_export()
5711 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; in pf_state_export()
5712 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; in pf_state_export()
5713 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; in pf_state_export()
5714 sp->proto = st->key[PF_SK_WIRE]->proto; in pf_state_export()
5715 sp->af = st->key[PF_SK_WIRE]->af; in pf_state_export()
5718 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); in pf_state_export()
5719 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, in pf_state_export()
5720 sizeof(sp->orig_ifname)); in pf_state_export()
5721 bcopy(&st->act.rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); in pf_state_export()
5722 sp->creation = htonl(time_uptime - (st->creation / 1000)); in pf_state_export()
5723 sp->expire = pf_state_expires(st); in pf_state_export()
5724 if (sp->expire <= time_uptime) in pf_state_export()
5725 sp->expire = htonl(0); in pf_state_export()
5727 sp->expire = htonl(sp->expire - time_uptime); in pf_state_export()
5729 sp->direction = st->direction; in pf_state_export()
5730 sp->log = st->act.log; in pf_state_export()
5731 sp->timeout = st->timeout; in pf_state_export()
5733 sp->state_flags_compat = st->state_flags; in pf_state_export()
5734 sp->state_flags = htons(st->state_flags); in pf_state_export()
5735 if (st->src_node) in pf_state_export()
5736 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; in pf_state_export()
5737 if (st->nat_src_node) in pf_state_export()
5738 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; in pf_state_export()
5740 sp->id = st->id; in pf_state_export()
5741 sp->creatorid = st->creatorid; in pf_state_export()
5742 pf_state_peer_hton(&st->src, &sp->src); in pf_state_export()
5743 pf_state_peer_hton(&st->dst, &sp->dst); in pf_state_export()
5745 if (st->rule == NULL) in pf_state_export()
5746 sp->rule = htonl(-1); in pf_state_export()
5748 sp->rule = htonl(st->rule->nr); in pf_state_export()
5749 if (st->anchor == NULL) in pf_state_export()
5750 sp->anchor = htonl(-1); in pf_state_export()
5752 sp->anchor = htonl(st->anchor->nr); in pf_state_export()
5753 if (st->nat_rule == NULL) in pf_state_export()
5754 sp->nat_rule = htonl(-1); in pf_state_export()
5756 sp->nat_rule = htonl(st->nat_rule->nr); in pf_state_export()
5758 sp->packets[0] = st->packets[0]; in pf_state_export()
5759 sp->packets[1] = st->packets[1]; in pf_state_export()
5760 sp->bytes[0] = st->bytes[0]; in pf_state_export()
5761 sp->bytes[1] = st->bytes[1]; in pf_state_export()
5763 sp->qid = htons(st->act.qid); in pf_state_export()
5764 sp->pqid = htons(st->act.pqid); in pf_state_export()
5765 sp->dnpipe = htons(st->act.dnpipe); in pf_state_export()
5766 sp->dnrpipe = htons(st->act.dnrpipe); in pf_state_export()
5767 sp->rtableid = htonl(st->act.rtableid); in pf_state_export()
5768 sp->min_ttl = st->act.min_ttl; in pf_state_export()
5769 sp->set_tos = st->act.set_tos; in pf_state_export()
5770 sp->max_mss = htons(st->act.max_mss); in pf_state_export()
5771 sp->rt = st->act.rt; in pf_state_export()
5772 if (st->act.rt_kif) in pf_state_export()
5773 strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name, in pf_state_export()
5774 sizeof(sp->rt_ifname)); in pf_state_export()
5775 sp->set_prio[0] = st->act.set_prio[0]; in pf_state_export()
5776 sp->set_prio[1] = st->act.set_prio[1]; in pf_state_export()
5785 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); in pf_tbladdr_copyout()
5787 kt = aw->p.tbl; in pf_tbladdr_copyout()
5788 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) in pf_tbladdr_copyout()
5789 kt = kt->pfrkt_root; in pf_tbladdr_copyout()
5790 aw->p.tbl = NULL; in pf_tbladdr_copyout()
5791 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? in pf_tbladdr_copyout()
5792 kt->pfrkt_cnt : -1; in pf_tbladdr_copyout()
5903 nvlpacked = nvlist_pack(nvl, &nv->len); in pf_getstatus()
5907 if (nv->size == 0) in pf_getstatus()
5909 else if (nv->size < nv->len) in pf_getstatus()
5913 error = copyout(nvlpacked, nv->data, nv->len); in pf_getstatus()
5928 * XXX - Check for version mismatch!!!
5942 LIST_FOREACH(s, &ih->states, entry) { in pf_clear_all_states()
5943 s->timeout = PFTM_PURGE; in pf_clear_all_states()
5945 s->state_flags |= PFSTATE_NOSYNC; in pf_clear_all_states()
5981 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) in pf_kill_srcnodes()
5983 (PF_MATCHA(psnk->psnk_src.neg, in pf_kill_srcnodes()
5984 &psnk->psnk_src.addr.v.a.addr, in pf_kill_srcnodes()
5985 &psnk->psnk_src.addr.v.a.mask, in pf_kill_srcnodes()
5986 &sn->addr, sn->af) && in pf_kill_srcnodes()
5987 PF_MATCHA(psnk->psnk_dst.neg, in pf_kill_srcnodes()
5988 &psnk->psnk_dst.addr.v.a.addr, in pf_kill_srcnodes()
5989 &psnk->psnk_dst.addr.v.a.mask, in pf_kill_srcnodes()
5990 &sn->raddr, sn->af))) { in pf_kill_srcnodes()
5993 sn->expire = 1; in pf_kill_srcnodes()
6003 LIST_FOREACH(s, &ih->states, entry) { in pf_kill_srcnodes()
6004 if (s->src_node && s->src_node->expire == 1) in pf_kill_srcnodes()
6005 s->src_node = NULL; in pf_kill_srcnodes()
6006 if (s->nat_src_node && s->nat_src_node->expire == 1) in pf_kill_srcnodes()
6007 s->nat_src_node = NULL; in pf_kill_srcnodes()
6015 psnk->psnk_killed = killed; in pf_kill_srcnodes()
6027 if (nv->len > pf_ioctl_maxcount) in pf_keepcounters()
6030 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pf_keepcounters()
6031 error = copyin(nv->data, nvlpacked, nv->len); in pf_keepcounters()
6035 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pf_keepcounters()
6066 LIST_FOREACH(s, &ih->states, entry) { in pf_clear_states()
6068 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; in pf_clear_states()
6070 if (kill->psk_ifname[0] && in pf_clear_states()
6071 strcmp(kill->psk_ifname, in pf_clear_states()
6072 kif->pfik_name)) in pf_clear_states()
6075 if (kill->psk_kill_match) { in pf_clear_states()
6078 if (s->direction == PF_OUT) { in pf_clear_states()
6086 match_key.af = s->key[idx]->af; in pf_clear_states()
6087 match_key.proto = s->key[idx]->proto; in pf_clear_states()
6089 &s->key[idx]->addr[1], match_key.af); in pf_clear_states()
6090 match_key.port[0] = s->key[idx]->port[1]; in pf_clear_states()
6092 &s->key[idx]->addr[0], match_key.af); in pf_clear_states()
6093 match_key.port[1] = s->key[idx]->port[0]; in pf_clear_states()
6100 s->state_flags |= PFSTATE_NOSYNC; in pf_clear_states()
6104 if (kill->psk_kill_match) in pf_clear_states()
6114 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); in pf_clear_states()
6125 if (kill->psk_pfcmp.id) { in pf_killstates()
6126 if (kill->psk_pfcmp.creatorid == 0) in pf_killstates()
6127 kill->psk_pfcmp.creatorid = V_pf_status.hostid; in pf_killstates()
6128 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, in pf_killstates()
6129 kill->psk_pfcmp.creatorid))) { in pf_killstates()
6152 if (nv->len > pf_ioctl_maxcount) in pf_killstates_nv()
6155 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pf_killstates_nv()
6156 error = copyin(nv->data, nvlpacked, nv->len); in pf_killstates_nv()
6160 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pf_killstates_nv()
6181 nvlpacked = nvlist_pack(nvl, &nv->len); in pf_killstates_nv()
6185 if (nv->size == 0) in pf_killstates_nv()
6187 else if (nv->size < nv->len) in pf_killstates_nv()
6190 error = copyout(nvlpacked, nv->data, nv->len); in pf_killstates_nv()
6210 if (nv->len > pf_ioctl_maxcount) in pf_clearstates_nv()
6213 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pf_clearstates_nv()
6214 error = copyin(nv->data, nvlpacked, nv->len); in pf_clearstates_nv()
6218 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pf_clearstates_nv()
6239 nvlpacked = nvlist_pack(nvl, &nv->len); in pf_clearstates_nv()
6243 if (nv->size == 0) in pf_clearstates_nv()
6245 else if (nv->size < nv->len) in pf_clearstates_nv()
6248 error = copyout(nvlpacked, nv->data, nv->len); in pf_clearstates_nv()
6268 if (nv->len > pf_ioctl_maxcount) in pf_getstate()
6271 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); in pf_getstate()
6272 error = copyin(nv->data, nvlpacked, nv->len); in pf_getstate()
6276 nvl = nvlist_unpack(nvlpacked, nv->len, 0); in pf_getstate()
6301 nvlpacked = nvlist_pack(nvl, &nv->len); in pf_getstate()
6305 if (nv->size == 0) in pf_getstate()
6307 else if (nv->size < nv->len) in pf_getstate()
6310 error = copyout(nvlpacked, nv->data, nv->len); in pf_getstate()
6322 * XXX - Check for version mismatch!!!
6326 * Duplicate pfctl -Fa operation to get rid of as much as we can.
6345 if (anchor->refcnt == 0) in shutdown_pf()
6346 anchor->refcnt = 1; in shutdown_pf()
6349 anchor->path)) != 0) { in shutdown_pf()
6352 anchor->path, rs_num)); in shutdown_pf()
6358 anchor->path); in shutdown_pf()
6370 if (eth_anchor->refcnt == 0) in shutdown_pf()
6371 eth_anchor->refcnt = 1; in shutdown_pf()
6372 if ((error = pf_begin_eth(&t[0], eth_anchor->path)) in shutdown_pf()
6375 "anchor.path=%s\n", eth_anchor->path)); in shutdown_pf()
6378 error = pf_commit_eth(t[0], eth_anchor->path); in shutdown_pf()
6537 chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, in pf_check6_in()
6593 pha.pa_rulname = "eth-in"; in hook_pf_eth()
6602 pha.pa_rulname = "eth-out"; in hook_pf_eth()
6632 pha.pa_rulname = "default-in"; in hook_pf()
6641 pha.pa_rulname = "default-out"; in hook_pf()
6660 pha.pa_rulname = "default-in6"; in hook_pf()
6668 pha.pa_rulname = "default-out6"; in hook_pf()
6814 V_pf_allrulecount--; in pf_unload_vnet()