1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002,2003 Henning Brauer 6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * - Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * - Redistributions in binary form must reproduce the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer in the documentation and/or other materials provided 18 * with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Effort sponsored in part by the Defense Advanced Research Projects 34 * Agency (DARPA) and Air Force Research Laboratory, Air Force 35 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 36 * 37 * $OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include "opt_inet.h" 44 #include "opt_inet6.h" 45 #include "opt_bpf.h" 46 #include "opt_pf.h" 47 48 #include <sys/param.h> 49 #include <sys/_bitset.h> 50 #include <sys/bitset.h> 51 #include <sys/bus.h> 52 #include <sys/conf.h> 53 #include <sys/endian.h> 54 #include <sys/fcntl.h> 55 #include <sys/filio.h> 56 #include <sys/hash.h> 57 #include <sys/interrupt.h> 58 #include <sys/jail.h> 59 #include <sys/kernel.h> 60 #include <sys/kthread.h> 61 #include <sys/lock.h> 62 #include <sys/mbuf.h> 63 #include <sys/module.h> 64 #include <sys/nv.h> 65 #include <sys/proc.h> 66 #include <sys/sdt.h> 67 #include <sys/smp.h> 68 #include <sys/socket.h> 69 #include <sys/sysctl.h> 70 #include <sys/md5.h> 71 #include <sys/ucred.h> 72 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/vnet.h> 76 #include <net/route.h> 77 #include <net/pfil.h> 78 #include <net/pfvar.h> 79 #include <net/if_pfsync.h> 80 #include <net/if_pflog.h> 81 82 #include <netinet/in.h> 83 #include <netinet/ip.h> 84 #include <netinet/ip_var.h> 85 #include <netinet6/ip6_var.h> 86 #include <netinet/ip_icmp.h> 87 #include <netpfil/pf/pf_nv.h> 88 89 #ifdef INET6 90 #include <netinet/ip6.h> 91 #endif /* INET6 */ 92 93 #ifdef ALTQ 94 #include <net/altq/altq.h> 95 #endif 96 97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int"); 98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int"); 99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int"); 100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int"); 101 102 static struct pf_kpool *pf_get_kpool(const char *, u_int32_t, u_int8_t, 103 u_int32_t, u_int8_t, u_int8_t, u_int8_t); 104 105 static void pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *); 106 static void pf_empty_kpool(struct pf_kpalist *); 107 static int pfioctl(struct cdev *, u_long, caddr_t, int, 108 struct thread *); 109 static int pf_begin_eth(uint32_t *, const char *); 110 static void pf_rollback_eth_cb(struct epoch_context *); 111 static int pf_rollback_eth(uint32_t, const char *); 112 static int pf_commit_eth(uint32_t, const char *); 113 static void pf_free_eth_rule(struct pf_keth_rule *); 114 #ifdef ALTQ 115 static int pf_begin_altq(u_int32_t *); 116 static int pf_rollback_altq(u_int32_t); 117 static int pf_commit_altq(u_int32_t); 118 static int pf_enable_altq(struct pf_altq *); 119 static int pf_disable_altq(struct pf_altq *); 120 static uint16_t pf_qname2qid(const char *); 121 static void pf_qid_unref(uint16_t); 122 #endif /* ALTQ */ 123 static int pf_begin_rules(u_int32_t *, int, const char *); 124 static int pf_rollback_rules(u_int32_t, int, char *); 125 static int pf_setup_pfsync_matching(struct pf_kruleset *); 126 static void pf_hash_rule(MD5_CTX *, struct pf_krule *); 127 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 128 static int pf_commit_rules(u_int32_t, int, char *); 129 static int pf_addr_setup(struct pf_kruleset *, 130 struct pf_addr_wrap *, sa_family_t); 131 static void pf_addr_copyout(struct pf_addr_wrap *); 132 static void pf_src_node_copy(const struct pf_ksrc_node *, 133 struct pf_src_node *); 134 #ifdef ALTQ 135 static int pf_export_kaltq(struct pf_altq *, 136 struct pfioc_altq_v1 *, size_t); 137 static int pf_import_kaltq(struct pfioc_altq_v1 *, 138 struct pf_altq *, size_t); 139 #endif /* ALTQ */ 140 141 VNET_DEFINE(struct pf_krule, pf_default_rule); 142 143 #ifdef ALTQ 144 VNET_DEFINE_STATIC(int, pf_altq_running); 145 #define V_pf_altq_running VNET(pf_altq_running) 146 #endif 147 148 #define TAGID_MAX 50000 149 struct pf_tagname { 150 TAILQ_ENTRY(pf_tagname) namehash_entries; 151 TAILQ_ENTRY(pf_tagname) taghash_entries; 152 char name[PF_TAG_NAME_SIZE]; 153 uint16_t tag; 154 int ref; 155 }; 156 157 struct pf_tagset { 158 TAILQ_HEAD(, pf_tagname) *namehash; 159 TAILQ_HEAD(, pf_tagname) *taghash; 160 unsigned int mask; 161 uint32_t seed; 162 BITSET_DEFINE(, TAGID_MAX) avail; 163 }; 164 165 VNET_DEFINE(struct pf_tagset, pf_tags); 166 #define V_pf_tags VNET(pf_tags) 167 static unsigned int pf_rule_tag_hashsize; 168 #define PF_RULE_TAG_HASH_SIZE_DEFAULT 128 169 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN, 170 &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT, 171 "Size of pf(4) rule tag hashtable"); 172 173 #ifdef ALTQ 174 VNET_DEFINE(struct pf_tagset, pf_qids); 175 #define V_pf_qids VNET(pf_qids) 176 static unsigned int pf_queue_tag_hashsize; 177 #define PF_QUEUE_TAG_HASH_SIZE_DEFAULT 128 178 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN, 179 &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT, 180 "Size of pf(4) queue tag hashtable"); 181 #endif 182 VNET_DEFINE(uma_zone_t, pf_tag_z); 183 #define V_pf_tag_z VNET(pf_tag_z) 184 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db"); 185 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules"); 186 187 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 188 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 189 #endif 190 191 static void pf_init_tagset(struct pf_tagset *, unsigned int *, 192 unsigned int); 193 static void pf_cleanup_tagset(struct pf_tagset *); 194 static uint16_t tagname2hashindex(const struct pf_tagset *, const char *); 195 static uint16_t tag2hashindex(const struct pf_tagset *, uint16_t); 196 static u_int16_t tagname2tag(struct pf_tagset *, const char *); 197 static u_int16_t pf_tagname2tag(const char *); 198 static void tag_unref(struct pf_tagset *, u_int16_t); 199 200 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 201 202 struct cdev *pf_dev; 203 204 /* 205 * XXX - These are new and need to be checked when moveing to a new version 206 */ 207 static void pf_clear_all_states(void); 208 static unsigned int pf_clear_states(const struct pf_kstate_kill *); 209 static void pf_killstates(struct pf_kstate_kill *, 210 unsigned int *); 211 static int pf_killstates_row(struct pf_kstate_kill *, 212 struct pf_idhash *); 213 static int pf_killstates_nv(struct pfioc_nv *); 214 static int pf_clearstates_nv(struct pfioc_nv *); 215 static int pf_getstate(struct pfioc_nv *); 216 static int pf_getstatus(struct pfioc_nv *); 217 static int pf_clear_tables(void); 218 static void pf_clear_srcnodes(struct pf_ksrc_node *); 219 static void pf_kill_srcnodes(struct pfioc_src_node_kill *); 220 static int pf_keepcounters(struct pfioc_nv *); 221 static void pf_tbladdr_copyout(struct pf_addr_wrap *); 222 223 /* 224 * Wrapper functions for pfil(9) hooks 225 */ 226 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, 227 int flags, void *ruleset __unused, struct inpcb *inp); 228 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, 229 int flags, void *ruleset __unused, struct inpcb *inp); 230 #ifdef INET 231 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp, 232 int flags, void *ruleset __unused, struct inpcb *inp); 233 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp, 234 int flags, void *ruleset __unused, struct inpcb *inp); 235 #endif 236 #ifdef INET6 237 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp, 238 int flags, void *ruleset __unused, struct inpcb *inp); 239 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp, 240 int flags, void *ruleset __unused, struct inpcb *inp); 241 #endif 242 243 static void hook_pf_eth(void); 244 static void hook_pf(void); 245 static void dehook_pf_eth(void); 246 static void dehook_pf(void); 247 static int shutdown_pf(void); 248 static int pf_load(void); 249 static void pf_unload(void); 250 251 static struct cdevsw pf_cdevsw = { 252 .d_ioctl = pfioctl, 253 .d_name = PF_NAME, 254 .d_version = D_VERSION, 255 }; 256 257 volatile VNET_DEFINE_STATIC(int, pf_pfil_hooked); 258 #define V_pf_pfil_hooked VNET(pf_pfil_hooked) 259 volatile VNET_DEFINE_STATIC(int, pf_pfil_eth_hooked); 260 #define V_pf_pfil_eth_hooked VNET(pf_pfil_eth_hooked) 261 262 /* 263 * We need a flag that is neither hooked nor running to know when 264 * the VNET is "valid". We primarily need this to control (global) 265 * external event, e.g., eventhandlers. 266 */ 267 VNET_DEFINE(int, pf_vnet_active); 268 #define V_pf_vnet_active VNET(pf_vnet_active) 269 270 int pf_end_threads; 271 struct proc *pf_purge_proc; 272 273 struct rmlock pf_rules_lock; 274 struct sx pf_ioctl_lock; 275 struct sx pf_end_lock; 276 277 /* pfsync */ 278 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr); 279 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr); 280 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr); 281 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr); 282 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr); 283 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr); 284 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr; 285 286 /* pflog */ 287 pflog_packet_t *pflog_packet_ptr = NULL; 288 289 /* 290 * Copy a user-provided string, returning an error if truncation would occur. 291 * Avoid scanning past "sz" bytes in the source string since there's no 292 * guarantee that it's nul-terminated. 293 */ 294 static int 295 pf_user_strcpy(char *dst, const char *src, size_t sz) 296 { 297 if (strnlen(src, sz) == sz) 298 return (EINVAL); 299 (void)strlcpy(dst, src, sz); 300 return (0); 301 } 302 303 static void 304 pfattach_vnet(void) 305 { 306 u_int32_t *my_timeout = V_pf_default_rule.timeout; 307 308 pf_initialize(); 309 pfr_initialize(); 310 pfi_initialize_vnet(); 311 pf_normalize_init(); 312 pf_syncookies_init(); 313 314 V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 315 V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT; 316 317 RB_INIT(&V_pf_anchors); 318 pf_init_kruleset(&pf_main_ruleset); 319 320 pf_init_keth(V_pf_keth); 321 322 /* default rule should never be garbage collected */ 323 V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next; 324 #ifdef PF_DEFAULT_TO_DROP 325 V_pf_default_rule.action = PF_DROP; 326 #else 327 V_pf_default_rule.action = PF_PASS; 328 #endif 329 V_pf_default_rule.nr = -1; 330 V_pf_default_rule.rtableid = -1; 331 332 pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK); 333 for (int i = 0; i < 2; i++) { 334 pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK); 335 pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK); 336 } 337 V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK); 338 V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK); 339 V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK); 340 341 #ifdef PF_WANT_32_TO_64_COUNTER 342 V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO); 343 V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO); 344 PF_RULES_WLOCK(); 345 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist); 346 LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist); 347 V_pf_allrulecount++; 348 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist); 349 PF_RULES_WUNLOCK(); 350 #endif 351 352 /* initialize default timeouts */ 353 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 354 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 355 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 356 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 357 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 358 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 359 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 360 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 361 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 362 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 363 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 364 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 365 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 366 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 367 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 368 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 369 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 370 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 371 my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 372 my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 373 374 bzero(&V_pf_status, sizeof(V_pf_status)); 375 V_pf_status.debug = PF_DEBUG_URGENT; 376 377 V_pf_pfil_hooked = 0; 378 V_pf_pfil_eth_hooked = 0; 379 380 /* XXX do our best to avoid a conflict */ 381 V_pf_status.hostid = arc4random(); 382 383 for (int i = 0; i < PFRES_MAX; i++) 384 V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK); 385 for (int i = 0; i < KLCNT_MAX; i++) 386 V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK); 387 for (int i = 0; i < FCNT_MAX; i++) 388 pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK); 389 for (int i = 0; i < SCNT_MAX; i++) 390 V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK); 391 392 if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET, 393 INTR_MPSAFE, &V_pf_swi_cookie) != 0) 394 /* XXXGL: leaked all above. */ 395 return; 396 } 397 398 static struct pf_kpool * 399 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action, 400 u_int32_t rule_number, u_int8_t r_last, u_int8_t active, 401 u_int8_t check_ticket) 402 { 403 struct pf_kruleset *ruleset; 404 struct pf_krule *rule; 405 int rs_num; 406 407 ruleset = pf_find_kruleset(anchor); 408 if (ruleset == NULL) 409 return (NULL); 410 rs_num = pf_get_ruleset_number(rule_action); 411 if (rs_num >= PF_RULESET_MAX) 412 return (NULL); 413 if (active) { 414 if (check_ticket && ticket != 415 ruleset->rules[rs_num].active.ticket) 416 return (NULL); 417 if (r_last) 418 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 419 pf_krulequeue); 420 else 421 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 422 } else { 423 if (check_ticket && ticket != 424 ruleset->rules[rs_num].inactive.ticket) 425 return (NULL); 426 if (r_last) 427 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 428 pf_krulequeue); 429 else 430 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 431 } 432 if (!r_last) { 433 while ((rule != NULL) && (rule->nr != rule_number)) 434 rule = TAILQ_NEXT(rule, entries); 435 } 436 if (rule == NULL) 437 return (NULL); 438 439 return (&rule->rpool); 440 } 441 442 static void 443 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb) 444 { 445 struct pf_kpooladdr *mv_pool_pa; 446 447 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 448 TAILQ_REMOVE(poola, mv_pool_pa, entries); 449 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 450 } 451 } 452 453 static void 454 pf_empty_kpool(struct pf_kpalist *poola) 455 { 456 struct pf_kpooladdr *pa; 457 458 while ((pa = TAILQ_FIRST(poola)) != NULL) { 459 switch (pa->addr.type) { 460 case PF_ADDR_DYNIFTL: 461 pfi_dynaddr_remove(pa->addr.p.dyn); 462 break; 463 case PF_ADDR_TABLE: 464 /* XXX: this could be unfinished pooladdr on pabuf */ 465 if (pa->addr.p.tbl != NULL) 466 pfr_detach_table(pa->addr.p.tbl); 467 break; 468 } 469 if (pa->kif) 470 pfi_kkif_unref(pa->kif); 471 TAILQ_REMOVE(poola, pa, entries); 472 free(pa, M_PFRULE); 473 } 474 } 475 476 static void 477 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule) 478 { 479 480 PF_RULES_WASSERT(); 481 482 TAILQ_REMOVE(rulequeue, rule, entries); 483 484 PF_UNLNKDRULES_LOCK(); 485 rule->rule_ref |= PFRULE_REFS; 486 TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries); 487 PF_UNLNKDRULES_UNLOCK(); 488 } 489 490 static void 491 pf_free_eth_rule(struct pf_keth_rule *rule) 492 { 493 PF_RULES_WASSERT(); 494 495 if (rule == NULL) 496 return; 497 498 if (rule->tag) 499 tag_unref(&V_pf_tags, rule->tag); 500 #ifdef ALTQ 501 pf_qid_unref(rule->qid); 502 #endif 503 504 if (rule->kif) 505 pfi_kkif_unref(rule->kif); 506 507 counter_u64_free(rule->evaluations); 508 for (int i = 0; i < 2; i++) { 509 counter_u64_free(rule->packets[i]); 510 counter_u64_free(rule->bytes[i]); 511 } 512 pf_keth_anchor_remove(rule); 513 514 free(rule, M_PFRULE); 515 } 516 517 void 518 pf_free_rule(struct pf_krule *rule) 519 { 520 521 PF_RULES_WASSERT(); 522 523 if (rule->tag) 524 tag_unref(&V_pf_tags, rule->tag); 525 if (rule->match_tag) 526 tag_unref(&V_pf_tags, rule->match_tag); 527 #ifdef ALTQ 528 if (rule->pqid != rule->qid) 529 pf_qid_unref(rule->pqid); 530 pf_qid_unref(rule->qid); 531 #endif 532 switch (rule->src.addr.type) { 533 case PF_ADDR_DYNIFTL: 534 pfi_dynaddr_remove(rule->src.addr.p.dyn); 535 break; 536 case PF_ADDR_TABLE: 537 pfr_detach_table(rule->src.addr.p.tbl); 538 break; 539 } 540 switch (rule->dst.addr.type) { 541 case PF_ADDR_DYNIFTL: 542 pfi_dynaddr_remove(rule->dst.addr.p.dyn); 543 break; 544 case PF_ADDR_TABLE: 545 pfr_detach_table(rule->dst.addr.p.tbl); 546 break; 547 } 548 if (rule->overload_tbl) 549 pfr_detach_table(rule->overload_tbl); 550 if (rule->kif) 551 pfi_kkif_unref(rule->kif); 552 pf_kanchor_remove(rule); 553 pf_empty_kpool(&rule->rpool.list); 554 555 pf_krule_free(rule); 556 } 557 558 static void 559 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size, 560 unsigned int default_size) 561 { 562 unsigned int i; 563 unsigned int hashsize; 564 565 if (*tunable_size == 0 || !powerof2(*tunable_size)) 566 *tunable_size = default_size; 567 568 hashsize = *tunable_size; 569 ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH, 570 M_WAITOK); 571 ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH, 572 M_WAITOK); 573 ts->mask = hashsize - 1; 574 ts->seed = arc4random(); 575 for (i = 0; i < hashsize; i++) { 576 TAILQ_INIT(&ts->namehash[i]); 577 TAILQ_INIT(&ts->taghash[i]); 578 } 579 BIT_FILL(TAGID_MAX, &ts->avail); 580 } 581 582 static void 583 pf_cleanup_tagset(struct pf_tagset *ts) 584 { 585 unsigned int i; 586 unsigned int hashsize; 587 struct pf_tagname *t, *tmp; 588 589 /* 590 * Only need to clean up one of the hashes as each tag is hashed 591 * into each table. 592 */ 593 hashsize = ts->mask + 1; 594 for (i = 0; i < hashsize; i++) 595 TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp) 596 uma_zfree(V_pf_tag_z, t); 597 598 free(ts->namehash, M_PFHASH); 599 free(ts->taghash, M_PFHASH); 600 } 601 602 static uint16_t 603 tagname2hashindex(const struct pf_tagset *ts, const char *tagname) 604 { 605 size_t len; 606 607 len = strnlen(tagname, PF_TAG_NAME_SIZE - 1); 608 return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask); 609 } 610 611 static uint16_t 612 tag2hashindex(const struct pf_tagset *ts, uint16_t tag) 613 { 614 615 return (tag & ts->mask); 616 } 617 618 static u_int16_t 619 tagname2tag(struct pf_tagset *ts, const char *tagname) 620 { 621 struct pf_tagname *tag; 622 u_int32_t index; 623 u_int16_t new_tagid; 624 625 PF_RULES_WASSERT(); 626 627 index = tagname2hashindex(ts, tagname); 628 TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries) 629 if (strcmp(tagname, tag->name) == 0) { 630 tag->ref++; 631 return (tag->tag); 632 } 633 634 /* 635 * new entry 636 * 637 * to avoid fragmentation, we do a linear search from the beginning 638 * and take the first free slot we find. 639 */ 640 new_tagid = BIT_FFS(TAGID_MAX, &ts->avail); 641 /* 642 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX]. 643 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits 644 * set. It may also return a bit number greater than TAGID_MAX due 645 * to rounding of the number of bits in the vector up to a multiple 646 * of the vector word size at declaration/allocation time. 647 */ 648 if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) 649 return (0); 650 651 /* Mark the tag as in use. Bits are 0-based for BIT_CLR() */ 652 BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail); 653 654 /* allocate and fill new struct pf_tagname */ 655 tag = uma_zalloc(V_pf_tag_z, M_NOWAIT); 656 if (tag == NULL) 657 return (0); 658 strlcpy(tag->name, tagname, sizeof(tag->name)); 659 tag->tag = new_tagid; 660 tag->ref = 1; 661 662 /* Insert into namehash */ 663 TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries); 664 665 /* Insert into taghash */ 666 index = tag2hashindex(ts, new_tagid); 667 TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries); 668 669 return (tag->tag); 670 } 671 672 static void 673 tag_unref(struct pf_tagset *ts, u_int16_t tag) 674 { 675 struct pf_tagname *t; 676 uint16_t index; 677 678 PF_RULES_WASSERT(); 679 680 index = tag2hashindex(ts, tag); 681 TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries) 682 if (tag == t->tag) { 683 if (--t->ref == 0) { 684 TAILQ_REMOVE(&ts->taghash[index], t, 685 taghash_entries); 686 index = tagname2hashindex(ts, t->name); 687 TAILQ_REMOVE(&ts->namehash[index], t, 688 namehash_entries); 689 /* Bits are 0-based for BIT_SET() */ 690 BIT_SET(TAGID_MAX, tag - 1, &ts->avail); 691 uma_zfree(V_pf_tag_z, t); 692 } 693 break; 694 } 695 } 696 697 static uint16_t 698 pf_tagname2tag(const char *tagname) 699 { 700 return (tagname2tag(&V_pf_tags, tagname)); 701 } 702 703 static int 704 pf_begin_eth(uint32_t *ticket, const char *anchor) 705 { 706 struct pf_keth_rule *rule, *tmp; 707 struct pf_keth_ruleset *rs; 708 709 PF_RULES_WASSERT(); 710 711 rs = pf_find_or_create_keth_ruleset(anchor); 712 if (rs == NULL) 713 return (EINVAL); 714 715 if (rs->inactive.open) 716 /* We may be waiting for NET_EPOCH_CALL(pf_rollback_eth_cb) to 717 * finish. */ 718 return (EBUSY); 719 720 /* Purge old inactive rules. */ 721 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 722 tmp) { 723 TAILQ_REMOVE(rs->inactive.rules, rule, 724 entries); 725 pf_free_eth_rule(rule); 726 } 727 728 *ticket = ++rs->inactive.ticket; 729 rs->inactive.open = 1; 730 731 return (0); 732 } 733 734 static void 735 pf_rollback_eth_cb(struct epoch_context *ctx) 736 { 737 struct pf_keth_ruleset *rs; 738 739 rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx); 740 741 CURVNET_SET(rs->vnet); 742 743 PF_RULES_WLOCK(); 744 pf_rollback_eth(rs->inactive.ticket, 745 rs->anchor ? rs->anchor->path : ""); 746 PF_RULES_WUNLOCK(); 747 748 CURVNET_RESTORE(); 749 } 750 751 static int 752 pf_rollback_eth(uint32_t ticket, const char *anchor) 753 { 754 struct pf_keth_rule *rule, *tmp; 755 struct pf_keth_ruleset *rs; 756 757 PF_RULES_WASSERT(); 758 759 rs = pf_find_keth_ruleset(anchor); 760 if (rs == NULL) 761 return (EINVAL); 762 763 if (!rs->inactive.open || 764 ticket != rs->inactive.ticket) 765 return (0); 766 767 /* Purge old inactive rules. */ 768 TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries, 769 tmp) { 770 TAILQ_REMOVE(rs->inactive.rules, rule, entries); 771 pf_free_eth_rule(rule); 772 } 773 774 rs->inactive.open = 0; 775 776 pf_remove_if_empty_keth_ruleset(rs); 777 778 return (0); 779 } 780 781 #define PF_SET_SKIP_STEPS(i) \ 782 do { \ 783 while (head[i] != cur) { \ 784 head[i]->skip[i].ptr = cur; \ 785 head[i] = TAILQ_NEXT(head[i], entries); \ 786 } \ 787 } while (0) 788 789 static void 790 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules) 791 { 792 struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT]; 793 int i; 794 795 cur = TAILQ_FIRST(rules); 796 prev = cur; 797 for (i = 0; i < PFE_SKIP_COUNT; ++i) 798 head[i] = cur; 799 while (cur != NULL) { 800 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 801 PF_SET_SKIP_STEPS(PFE_SKIP_IFP); 802 if (cur->direction != prev->direction) 803 PF_SET_SKIP_STEPS(PFE_SKIP_DIR); 804 if (cur->proto != prev->proto) 805 PF_SET_SKIP_STEPS(PFE_SKIP_PROTO); 806 if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0) 807 PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR); 808 if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0) 809 PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR); 810 811 prev = cur; 812 cur = TAILQ_NEXT(cur, entries); 813 } 814 for (i = 0; i < PFE_SKIP_COUNT; ++i) 815 PF_SET_SKIP_STEPS(i); 816 } 817 818 static int 819 pf_commit_eth(uint32_t ticket, const char *anchor) 820 { 821 struct pf_keth_ruleq *rules; 822 struct pf_keth_ruleset *rs; 823 824 rs = pf_find_keth_ruleset(anchor); 825 if (rs == NULL) { 826 return (EINVAL); 827 } 828 829 if (!rs->inactive.open || 830 ticket != rs->inactive.ticket) 831 return (EBUSY); 832 833 PF_RULES_WASSERT(); 834 835 pf_eth_calc_skip_steps(rs->inactive.rules); 836 837 rules = rs->active.rules; 838 ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules); 839 rs->inactive.rules = rules; 840 rs->inactive.ticket = rs->active.ticket; 841 842 /* Clean up inactive rules (i.e. previously active rules), only when 843 * we're sure they're no longer used. */ 844 NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx); 845 846 return (0); 847 } 848 849 #ifdef ALTQ 850 static uint16_t 851 pf_qname2qid(const char *qname) 852 { 853 return (tagname2tag(&V_pf_qids, qname)); 854 } 855 856 static void 857 pf_qid_unref(uint16_t qid) 858 { 859 tag_unref(&V_pf_qids, qid); 860 } 861 862 static int 863 pf_begin_altq(u_int32_t *ticket) 864 { 865 struct pf_altq *altq, *tmp; 866 int error = 0; 867 868 PF_RULES_WASSERT(); 869 870 /* Purge the old altq lists */ 871 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 872 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 873 /* detach and destroy the discipline */ 874 error = altq_remove(altq); 875 } 876 free(altq, M_PFALTQ); 877 } 878 TAILQ_INIT(V_pf_altq_ifs_inactive); 879 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 880 pf_qid_unref(altq->qid); 881 free(altq, M_PFALTQ); 882 } 883 TAILQ_INIT(V_pf_altqs_inactive); 884 if (error) 885 return (error); 886 *ticket = ++V_ticket_altqs_inactive; 887 V_altqs_inactive_open = 1; 888 return (0); 889 } 890 891 static int 892 pf_rollback_altq(u_int32_t ticket) 893 { 894 struct pf_altq *altq, *tmp; 895 int error = 0; 896 897 PF_RULES_WASSERT(); 898 899 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 900 return (0); 901 /* Purge the old altq lists */ 902 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 903 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 904 /* detach and destroy the discipline */ 905 error = altq_remove(altq); 906 } 907 free(altq, M_PFALTQ); 908 } 909 TAILQ_INIT(V_pf_altq_ifs_inactive); 910 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 911 pf_qid_unref(altq->qid); 912 free(altq, M_PFALTQ); 913 } 914 TAILQ_INIT(V_pf_altqs_inactive); 915 V_altqs_inactive_open = 0; 916 return (error); 917 } 918 919 static int 920 pf_commit_altq(u_int32_t ticket) 921 { 922 struct pf_altqqueue *old_altqs, *old_altq_ifs; 923 struct pf_altq *altq, *tmp; 924 int err, error = 0; 925 926 PF_RULES_WASSERT(); 927 928 if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive) 929 return (EBUSY); 930 931 /* swap altqs, keep the old. */ 932 old_altqs = V_pf_altqs_active; 933 old_altq_ifs = V_pf_altq_ifs_active; 934 V_pf_altqs_active = V_pf_altqs_inactive; 935 V_pf_altq_ifs_active = V_pf_altq_ifs_inactive; 936 V_pf_altqs_inactive = old_altqs; 937 V_pf_altq_ifs_inactive = old_altq_ifs; 938 V_ticket_altqs_active = V_ticket_altqs_inactive; 939 940 /* Attach new disciplines */ 941 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 942 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 943 /* attach the discipline */ 944 error = altq_pfattach(altq); 945 if (error == 0 && V_pf_altq_running) 946 error = pf_enable_altq(altq); 947 if (error != 0) 948 return (error); 949 } 950 } 951 952 /* Purge the old altq lists */ 953 TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) { 954 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 955 /* detach and destroy the discipline */ 956 if (V_pf_altq_running) 957 error = pf_disable_altq(altq); 958 err = altq_pfdetach(altq); 959 if (err != 0 && error == 0) 960 error = err; 961 err = altq_remove(altq); 962 if (err != 0 && error == 0) 963 error = err; 964 } 965 free(altq, M_PFALTQ); 966 } 967 TAILQ_INIT(V_pf_altq_ifs_inactive); 968 TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) { 969 pf_qid_unref(altq->qid); 970 free(altq, M_PFALTQ); 971 } 972 TAILQ_INIT(V_pf_altqs_inactive); 973 974 V_altqs_inactive_open = 0; 975 return (error); 976 } 977 978 static int 979 pf_enable_altq(struct pf_altq *altq) 980 { 981 struct ifnet *ifp; 982 struct tb_profile tb; 983 int error = 0; 984 985 if ((ifp = ifunit(altq->ifname)) == NULL) 986 return (EINVAL); 987 988 if (ifp->if_snd.altq_type != ALTQT_NONE) 989 error = altq_enable(&ifp->if_snd); 990 991 /* set tokenbucket regulator */ 992 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { 993 tb.rate = altq->ifbandwidth; 994 tb.depth = altq->tbrsize; 995 error = tbr_set(&ifp->if_snd, &tb); 996 } 997 998 return (error); 999 } 1000 1001 static int 1002 pf_disable_altq(struct pf_altq *altq) 1003 { 1004 struct ifnet *ifp; 1005 struct tb_profile tb; 1006 int error; 1007 1008 if ((ifp = ifunit(altq->ifname)) == NULL) 1009 return (EINVAL); 1010 1011 /* 1012 * when the discipline is no longer referenced, it was overridden 1013 * by a new one. if so, just return. 1014 */ 1015 if (altq->altq_disc != ifp->if_snd.altq_disc) 1016 return (0); 1017 1018 error = altq_disable(&ifp->if_snd); 1019 1020 if (error == 0) { 1021 /* clear tokenbucket regulator */ 1022 tb.rate = 0; 1023 error = tbr_set(&ifp->if_snd, &tb); 1024 } 1025 1026 return (error); 1027 } 1028 1029 static int 1030 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket, 1031 struct pf_altq *altq) 1032 { 1033 struct ifnet *ifp1; 1034 int error = 0; 1035 1036 /* Deactivate the interface in question */ 1037 altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED; 1038 if ((ifp1 = ifunit(altq->ifname)) == NULL || 1039 (remove && ifp1 == ifp)) { 1040 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 1041 } else { 1042 error = altq_add(ifp1, altq); 1043 1044 if (ticket != V_ticket_altqs_inactive) 1045 error = EBUSY; 1046 1047 if (error) 1048 free(altq, M_PFALTQ); 1049 } 1050 1051 return (error); 1052 } 1053 1054 void 1055 pf_altq_ifnet_event(struct ifnet *ifp, int remove) 1056 { 1057 struct pf_altq *a1, *a2, *a3; 1058 u_int32_t ticket; 1059 int error = 0; 1060 1061 /* 1062 * No need to re-evaluate the configuration for events on interfaces 1063 * that do not support ALTQ, as it's not possible for such 1064 * interfaces to be part of the configuration. 1065 */ 1066 if (!ALTQ_IS_READY(&ifp->if_snd)) 1067 return; 1068 1069 /* Interrupt userland queue modifications */ 1070 if (V_altqs_inactive_open) 1071 pf_rollback_altq(V_ticket_altqs_inactive); 1072 1073 /* Start new altq ruleset */ 1074 if (pf_begin_altq(&ticket)) 1075 return; 1076 1077 /* Copy the current active set */ 1078 TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) { 1079 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1080 if (a2 == NULL) { 1081 error = ENOMEM; 1082 break; 1083 } 1084 bcopy(a1, a2, sizeof(struct pf_altq)); 1085 1086 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1087 if (error) 1088 break; 1089 1090 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries); 1091 } 1092 if (error) 1093 goto out; 1094 TAILQ_FOREACH(a1, V_pf_altqs_active, entries) { 1095 a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT); 1096 if (a2 == NULL) { 1097 error = ENOMEM; 1098 break; 1099 } 1100 bcopy(a1, a2, sizeof(struct pf_altq)); 1101 1102 if ((a2->qid = pf_qname2qid(a2->qname)) == 0) { 1103 error = EBUSY; 1104 free(a2, M_PFALTQ); 1105 break; 1106 } 1107 a2->altq_disc = NULL; 1108 TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) { 1109 if (strncmp(a3->ifname, a2->ifname, 1110 IFNAMSIZ) == 0) { 1111 a2->altq_disc = a3->altq_disc; 1112 break; 1113 } 1114 } 1115 error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2); 1116 if (error) 1117 break; 1118 1119 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries); 1120 } 1121 1122 out: 1123 if (error != 0) 1124 pf_rollback_altq(ticket); 1125 else 1126 pf_commit_altq(ticket); 1127 } 1128 #endif /* ALTQ */ 1129 1130 static int 1131 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) 1132 { 1133 struct pf_kruleset *rs; 1134 struct pf_krule *rule; 1135 1136 PF_RULES_WASSERT(); 1137 1138 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1139 return (EINVAL); 1140 rs = pf_find_or_create_kruleset(anchor); 1141 if (rs == NULL) 1142 return (EINVAL); 1143 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1144 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1145 rs->rules[rs_num].inactive.rcount--; 1146 } 1147 *ticket = ++rs->rules[rs_num].inactive.ticket; 1148 rs->rules[rs_num].inactive.open = 1; 1149 return (0); 1150 } 1151 1152 static int 1153 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) 1154 { 1155 struct pf_kruleset *rs; 1156 struct pf_krule *rule; 1157 1158 PF_RULES_WASSERT(); 1159 1160 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1161 return (EINVAL); 1162 rs = pf_find_kruleset(anchor); 1163 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1164 rs->rules[rs_num].inactive.ticket != ticket) 1165 return (0); 1166 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { 1167 pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule); 1168 rs->rules[rs_num].inactive.rcount--; 1169 } 1170 rs->rules[rs_num].inactive.open = 0; 1171 return (0); 1172 } 1173 1174 #define PF_MD5_UPD(st, elm) \ 1175 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 1176 1177 #define PF_MD5_UPD_STR(st, elm) \ 1178 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 1179 1180 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 1181 (stor) = htonl((st)->elm); \ 1182 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 1183 } while (0) 1184 1185 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 1186 (stor) = htons((st)->elm); \ 1187 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 1188 } while (0) 1189 1190 static void 1191 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 1192 { 1193 PF_MD5_UPD(pfr, addr.type); 1194 switch (pfr->addr.type) { 1195 case PF_ADDR_DYNIFTL: 1196 PF_MD5_UPD(pfr, addr.v.ifname); 1197 PF_MD5_UPD(pfr, addr.iflags); 1198 break; 1199 case PF_ADDR_TABLE: 1200 PF_MD5_UPD(pfr, addr.v.tblname); 1201 break; 1202 case PF_ADDR_ADDRMASK: 1203 /* XXX ignore af? */ 1204 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 1205 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 1206 break; 1207 } 1208 1209 PF_MD5_UPD(pfr, port[0]); 1210 PF_MD5_UPD(pfr, port[1]); 1211 PF_MD5_UPD(pfr, neg); 1212 PF_MD5_UPD(pfr, port_op); 1213 } 1214 1215 static void 1216 pf_hash_rule(MD5_CTX *ctx, struct pf_krule *rule) 1217 { 1218 u_int16_t x; 1219 u_int32_t y; 1220 1221 pf_hash_rule_addr(ctx, &rule->src); 1222 pf_hash_rule_addr(ctx, &rule->dst); 1223 for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++) 1224 PF_MD5_UPD_STR(rule, label[i]); 1225 PF_MD5_UPD_STR(rule, ifname); 1226 PF_MD5_UPD_STR(rule, match_tagname); 1227 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 1228 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 1229 PF_MD5_UPD_HTONL(rule, prob, y); 1230 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 1231 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 1232 PF_MD5_UPD(rule, uid.op); 1233 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 1234 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 1235 PF_MD5_UPD(rule, gid.op); 1236 PF_MD5_UPD_HTONL(rule, rule_flag, y); 1237 PF_MD5_UPD(rule, action); 1238 PF_MD5_UPD(rule, direction); 1239 PF_MD5_UPD(rule, af); 1240 PF_MD5_UPD(rule, quick); 1241 PF_MD5_UPD(rule, ifnot); 1242 PF_MD5_UPD(rule, match_tag_not); 1243 PF_MD5_UPD(rule, natpass); 1244 PF_MD5_UPD(rule, keep_state); 1245 PF_MD5_UPD(rule, proto); 1246 PF_MD5_UPD(rule, type); 1247 PF_MD5_UPD(rule, code); 1248 PF_MD5_UPD(rule, flags); 1249 PF_MD5_UPD(rule, flagset); 1250 PF_MD5_UPD(rule, allow_opts); 1251 PF_MD5_UPD(rule, rt); 1252 PF_MD5_UPD(rule, tos); 1253 } 1254 1255 static bool 1256 pf_krule_compare(struct pf_krule *a, struct pf_krule *b) 1257 { 1258 MD5_CTX ctx[2]; 1259 u_int8_t digest[2][PF_MD5_DIGEST_LENGTH]; 1260 1261 MD5Init(&ctx[0]); 1262 MD5Init(&ctx[1]); 1263 pf_hash_rule(&ctx[0], a); 1264 pf_hash_rule(&ctx[1], b); 1265 MD5Final(digest[0], &ctx[0]); 1266 MD5Final(digest[1], &ctx[1]); 1267 1268 return (memcmp(digest[0], digest[1], PF_MD5_DIGEST_LENGTH) == 0); 1269 } 1270 1271 static int 1272 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) 1273 { 1274 struct pf_kruleset *rs; 1275 struct pf_krule *rule, **old_array, *tail; 1276 struct pf_krulequeue *old_rules; 1277 int error; 1278 u_int32_t old_rcount; 1279 1280 PF_RULES_WASSERT(); 1281 1282 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 1283 return (EINVAL); 1284 rs = pf_find_kruleset(anchor); 1285 if (rs == NULL || !rs->rules[rs_num].inactive.open || 1286 ticket != rs->rules[rs_num].inactive.ticket) 1287 return (EBUSY); 1288 1289 /* Calculate checksum for the main ruleset */ 1290 if (rs == &pf_main_ruleset) { 1291 error = pf_setup_pfsync_matching(rs); 1292 if (error != 0) 1293 return (error); 1294 } 1295 1296 /* Swap rules, keep the old. */ 1297 old_rules = rs->rules[rs_num].active.ptr; 1298 old_rcount = rs->rules[rs_num].active.rcount; 1299 old_array = rs->rules[rs_num].active.ptr_array; 1300 1301 rs->rules[rs_num].active.ptr = 1302 rs->rules[rs_num].inactive.ptr; 1303 rs->rules[rs_num].active.ptr_array = 1304 rs->rules[rs_num].inactive.ptr_array; 1305 rs->rules[rs_num].active.rcount = 1306 rs->rules[rs_num].inactive.rcount; 1307 1308 /* Attempt to preserve counter information. */ 1309 if (V_pf_status.keep_counters) { 1310 TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr, 1311 entries) { 1312 tail = TAILQ_FIRST(old_rules); 1313 while ((tail != NULL) && ! pf_krule_compare(tail, rule)) 1314 tail = TAILQ_NEXT(tail, entries); 1315 if (tail != NULL) { 1316 pf_counter_u64_critical_enter(); 1317 pf_counter_u64_add_protected(&rule->evaluations, 1318 pf_counter_u64_fetch(&tail->evaluations)); 1319 pf_counter_u64_add_protected(&rule->packets[0], 1320 pf_counter_u64_fetch(&tail->packets[0])); 1321 pf_counter_u64_add_protected(&rule->packets[1], 1322 pf_counter_u64_fetch(&tail->packets[1])); 1323 pf_counter_u64_add_protected(&rule->bytes[0], 1324 pf_counter_u64_fetch(&tail->bytes[0])); 1325 pf_counter_u64_add_protected(&rule->bytes[1], 1326 pf_counter_u64_fetch(&tail->bytes[1])); 1327 pf_counter_u64_critical_exit(); 1328 } 1329 } 1330 } 1331 1332 rs->rules[rs_num].inactive.ptr = old_rules; 1333 rs->rules[rs_num].inactive.ptr_array = old_array; 1334 rs->rules[rs_num].inactive.rcount = old_rcount; 1335 1336 rs->rules[rs_num].active.ticket = 1337 rs->rules[rs_num].inactive.ticket; 1338 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 1339 1340 /* Purge the old rule list. */ 1341 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 1342 pf_unlink_rule(old_rules, rule); 1343 if (rs->rules[rs_num].inactive.ptr_array) 1344 free(rs->rules[rs_num].inactive.ptr_array, M_TEMP); 1345 rs->rules[rs_num].inactive.ptr_array = NULL; 1346 rs->rules[rs_num].inactive.rcount = 0; 1347 rs->rules[rs_num].inactive.open = 0; 1348 pf_remove_if_empty_kruleset(rs); 1349 1350 return (0); 1351 } 1352 1353 static int 1354 pf_setup_pfsync_matching(struct pf_kruleset *rs) 1355 { 1356 MD5_CTX ctx; 1357 struct pf_krule *rule; 1358 int rs_cnt; 1359 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 1360 1361 MD5Init(&ctx); 1362 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { 1363 /* XXX PF_RULESET_SCRUB as well? */ 1364 if (rs_cnt == PF_RULESET_SCRUB) 1365 continue; 1366 1367 if (rs->rules[rs_cnt].inactive.ptr_array) 1368 free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); 1369 rs->rules[rs_cnt].inactive.ptr_array = NULL; 1370 1371 if (rs->rules[rs_cnt].inactive.rcount) { 1372 rs->rules[rs_cnt].inactive.ptr_array = 1373 malloc(sizeof(caddr_t) * 1374 rs->rules[rs_cnt].inactive.rcount, 1375 M_TEMP, M_NOWAIT); 1376 1377 if (!rs->rules[rs_cnt].inactive.ptr_array) 1378 return (ENOMEM); 1379 } 1380 1381 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, 1382 entries) { 1383 pf_hash_rule(&ctx, rule); 1384 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule; 1385 } 1386 } 1387 1388 MD5Final(digest, &ctx); 1389 memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum)); 1390 return (0); 1391 } 1392 1393 static int 1394 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr, 1395 sa_family_t af) 1396 { 1397 int error = 0; 1398 1399 switch (addr->type) { 1400 case PF_ADDR_TABLE: 1401 addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname); 1402 if (addr->p.tbl == NULL) 1403 error = ENOMEM; 1404 break; 1405 case PF_ADDR_DYNIFTL: 1406 error = pfi_dynaddr_setup(addr, af); 1407 break; 1408 } 1409 1410 return (error); 1411 } 1412 1413 static void 1414 pf_addr_copyout(struct pf_addr_wrap *addr) 1415 { 1416 1417 switch (addr->type) { 1418 case PF_ADDR_DYNIFTL: 1419 pfi_dynaddr_copyout(addr); 1420 break; 1421 case PF_ADDR_TABLE: 1422 pf_tbladdr_copyout(addr); 1423 break; 1424 } 1425 } 1426 1427 static void 1428 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out) 1429 { 1430 int secs = time_uptime, diff; 1431 1432 bzero(out, sizeof(struct pf_src_node)); 1433 1434 bcopy(&in->addr, &out->addr, sizeof(struct pf_addr)); 1435 bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr)); 1436 1437 if (in->rule.ptr != NULL) 1438 out->rule.nr = in->rule.ptr->nr; 1439 1440 for (int i = 0; i < 2; i++) { 1441 out->bytes[i] = counter_u64_fetch(in->bytes[i]); 1442 out->packets[i] = counter_u64_fetch(in->packets[i]); 1443 } 1444 1445 out->states = in->states; 1446 out->conn = in->conn; 1447 out->af = in->af; 1448 out->ruletype = in->ruletype; 1449 1450 out->creation = secs - in->creation; 1451 if (out->expire > secs) 1452 out->expire -= secs; 1453 else 1454 out->expire = 0; 1455 1456 /* Adjust the connection rate estimate. */ 1457 diff = secs - in->conn_rate.last; 1458 if (diff >= in->conn_rate.seconds) 1459 out->conn_rate.count = 0; 1460 else 1461 out->conn_rate.count -= 1462 in->conn_rate.count * diff / 1463 in->conn_rate.seconds; 1464 } 1465 1466 #ifdef ALTQ 1467 /* 1468 * Handle export of struct pf_kaltq to user binaries that may be using any 1469 * version of struct pf_altq. 1470 */ 1471 static int 1472 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size) 1473 { 1474 u_int32_t version; 1475 1476 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1477 version = 0; 1478 else 1479 version = pa->version; 1480 1481 if (version > PFIOC_ALTQ_VERSION) 1482 return (EINVAL); 1483 1484 #define ASSIGN(x) exported_q->x = q->x 1485 #define COPY(x) \ 1486 bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x))) 1487 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX) 1488 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX) 1489 1490 switch (version) { 1491 case 0: { 1492 struct pf_altq_v0 *exported_q = 1493 &((struct pfioc_altq_v0 *)pa)->altq; 1494 1495 COPY(ifname); 1496 1497 ASSIGN(scheduler); 1498 ASSIGN(tbrsize); 1499 exported_q->tbrsize = SATU16(q->tbrsize); 1500 exported_q->ifbandwidth = SATU32(q->ifbandwidth); 1501 1502 COPY(qname); 1503 COPY(parent); 1504 ASSIGN(parent_qid); 1505 exported_q->bandwidth = SATU32(q->bandwidth); 1506 ASSIGN(priority); 1507 ASSIGN(local_flags); 1508 1509 ASSIGN(qlimit); 1510 ASSIGN(flags); 1511 1512 if (q->scheduler == ALTQT_HFSC) { 1513 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x 1514 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \ 1515 SATU32(q->pq_u.hfsc_opts.x) 1516 1517 ASSIGN_OPT_SATU32(rtsc_m1); 1518 ASSIGN_OPT(rtsc_d); 1519 ASSIGN_OPT_SATU32(rtsc_m2); 1520 1521 ASSIGN_OPT_SATU32(lssc_m1); 1522 ASSIGN_OPT(lssc_d); 1523 ASSIGN_OPT_SATU32(lssc_m2); 1524 1525 ASSIGN_OPT_SATU32(ulsc_m1); 1526 ASSIGN_OPT(ulsc_d); 1527 ASSIGN_OPT_SATU32(ulsc_m2); 1528 1529 ASSIGN_OPT(flags); 1530 1531 #undef ASSIGN_OPT 1532 #undef ASSIGN_OPT_SATU32 1533 } else 1534 COPY(pq_u); 1535 1536 ASSIGN(qid); 1537 break; 1538 } 1539 case 1: { 1540 struct pf_altq_v1 *exported_q = 1541 &((struct pfioc_altq_v1 *)pa)->altq; 1542 1543 COPY(ifname); 1544 1545 ASSIGN(scheduler); 1546 ASSIGN(tbrsize); 1547 ASSIGN(ifbandwidth); 1548 1549 COPY(qname); 1550 COPY(parent); 1551 ASSIGN(parent_qid); 1552 ASSIGN(bandwidth); 1553 ASSIGN(priority); 1554 ASSIGN(local_flags); 1555 1556 ASSIGN(qlimit); 1557 ASSIGN(flags); 1558 COPY(pq_u); 1559 1560 ASSIGN(qid); 1561 break; 1562 } 1563 default: 1564 panic("%s: unhandled struct pfioc_altq version", __func__); 1565 break; 1566 } 1567 1568 #undef ASSIGN 1569 #undef COPY 1570 #undef SATU16 1571 #undef SATU32 1572 1573 return (0); 1574 } 1575 1576 /* 1577 * Handle import to struct pf_kaltq of struct pf_altq from user binaries 1578 * that may be using any version of it. 1579 */ 1580 static int 1581 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size) 1582 { 1583 u_int32_t version; 1584 1585 if (ioc_size == sizeof(struct pfioc_altq_v0)) 1586 version = 0; 1587 else 1588 version = pa->version; 1589 1590 if (version > PFIOC_ALTQ_VERSION) 1591 return (EINVAL); 1592 1593 #define ASSIGN(x) q->x = imported_q->x 1594 #define COPY(x) \ 1595 bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x))) 1596 1597 switch (version) { 1598 case 0: { 1599 struct pf_altq_v0 *imported_q = 1600 &((struct pfioc_altq_v0 *)pa)->altq; 1601 1602 COPY(ifname); 1603 1604 ASSIGN(scheduler); 1605 ASSIGN(tbrsize); /* 16-bit -> 32-bit */ 1606 ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */ 1607 1608 COPY(qname); 1609 COPY(parent); 1610 ASSIGN(parent_qid); 1611 ASSIGN(bandwidth); /* 32-bit -> 64-bit */ 1612 ASSIGN(priority); 1613 ASSIGN(local_flags); 1614 1615 ASSIGN(qlimit); 1616 ASSIGN(flags); 1617 1618 if (imported_q->scheduler == ALTQT_HFSC) { 1619 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x 1620 1621 /* 1622 * The m1 and m2 parameters are being copied from 1623 * 32-bit to 64-bit. 1624 */ 1625 ASSIGN_OPT(rtsc_m1); 1626 ASSIGN_OPT(rtsc_d); 1627 ASSIGN_OPT(rtsc_m2); 1628 1629 ASSIGN_OPT(lssc_m1); 1630 ASSIGN_OPT(lssc_d); 1631 ASSIGN_OPT(lssc_m2); 1632 1633 ASSIGN_OPT(ulsc_m1); 1634 ASSIGN_OPT(ulsc_d); 1635 ASSIGN_OPT(ulsc_m2); 1636 1637 ASSIGN_OPT(flags); 1638 1639 #undef ASSIGN_OPT 1640 } else 1641 COPY(pq_u); 1642 1643 ASSIGN(qid); 1644 break; 1645 } 1646 case 1: { 1647 struct pf_altq_v1 *imported_q = 1648 &((struct pfioc_altq_v1 *)pa)->altq; 1649 1650 COPY(ifname); 1651 1652 ASSIGN(scheduler); 1653 ASSIGN(tbrsize); 1654 ASSIGN(ifbandwidth); 1655 1656 COPY(qname); 1657 COPY(parent); 1658 ASSIGN(parent_qid); 1659 ASSIGN(bandwidth); 1660 ASSIGN(priority); 1661 ASSIGN(local_flags); 1662 1663 ASSIGN(qlimit); 1664 ASSIGN(flags); 1665 COPY(pq_u); 1666 1667 ASSIGN(qid); 1668 break; 1669 } 1670 default: 1671 panic("%s: unhandled struct pfioc_altq version", __func__); 1672 break; 1673 } 1674 1675 #undef ASSIGN 1676 #undef COPY 1677 1678 return (0); 1679 } 1680 1681 static struct pf_altq * 1682 pf_altq_get_nth_active(u_int32_t n) 1683 { 1684 struct pf_altq *altq; 1685 u_int32_t nr; 1686 1687 nr = 0; 1688 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 1689 if (nr == n) 1690 return (altq); 1691 nr++; 1692 } 1693 1694 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) { 1695 if (nr == n) 1696 return (altq); 1697 nr++; 1698 } 1699 1700 return (NULL); 1701 } 1702 #endif /* ALTQ */ 1703 1704 struct pf_krule * 1705 pf_krule_alloc(void) 1706 { 1707 struct pf_krule *rule; 1708 1709 rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO); 1710 mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF); 1711 return (rule); 1712 } 1713 1714 void 1715 pf_krule_free(struct pf_krule *rule) 1716 { 1717 #ifdef PF_WANT_32_TO_64_COUNTER 1718 bool wowned; 1719 #endif 1720 1721 if (rule == NULL) 1722 return; 1723 1724 #ifdef PF_WANT_32_TO_64_COUNTER 1725 if (rule->allrulelinked) { 1726 wowned = PF_RULES_WOWNED(); 1727 if (!wowned) 1728 PF_RULES_WLOCK(); 1729 LIST_REMOVE(rule, allrulelist); 1730 V_pf_allrulecount--; 1731 if (!wowned) 1732 PF_RULES_WUNLOCK(); 1733 } 1734 #endif 1735 1736 pf_counter_u64_deinit(&rule->evaluations); 1737 for (int i = 0; i < 2; i++) { 1738 pf_counter_u64_deinit(&rule->packets[i]); 1739 pf_counter_u64_deinit(&rule->bytes[i]); 1740 } 1741 counter_u64_free(rule->states_cur); 1742 counter_u64_free(rule->states_tot); 1743 counter_u64_free(rule->src_nodes); 1744 1745 mtx_destroy(&rule->rpool.mtx); 1746 free(rule, M_PFRULE); 1747 } 1748 1749 static void 1750 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool, 1751 struct pf_pooladdr *pool) 1752 { 1753 1754 bzero(pool, sizeof(*pool)); 1755 bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr)); 1756 strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname)); 1757 } 1758 1759 static int 1760 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool, 1761 struct pf_kpooladdr *kpool) 1762 { 1763 int ret; 1764 1765 bzero(kpool, sizeof(*kpool)); 1766 bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr)); 1767 ret = pf_user_strcpy(kpool->ifname, pool->ifname, 1768 sizeof(kpool->ifname)); 1769 return (ret); 1770 } 1771 1772 static void 1773 pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool) 1774 { 1775 bzero(pool, sizeof(*pool)); 1776 1777 bcopy(&kpool->key, &pool->key, sizeof(pool->key)); 1778 bcopy(&kpool->counter, &pool->counter, sizeof(pool->counter)); 1779 1780 pool->tblidx = kpool->tblidx; 1781 pool->proxy_port[0] = kpool->proxy_port[0]; 1782 pool->proxy_port[1] = kpool->proxy_port[1]; 1783 pool->opts = kpool->opts; 1784 } 1785 1786 static void 1787 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) 1788 { 1789 _Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); 1790 _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); 1791 1792 bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); 1793 bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); 1794 1795 kpool->tblidx = pool->tblidx; 1796 kpool->proxy_port[0] = pool->proxy_port[0]; 1797 kpool->proxy_port[1] = pool->proxy_port[1]; 1798 kpool->opts = pool->opts; 1799 } 1800 1801 static void 1802 pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule) 1803 { 1804 1805 bzero(rule, sizeof(*rule)); 1806 1807 bcopy(&krule->src, &rule->src, sizeof(rule->src)); 1808 bcopy(&krule->dst, &rule->dst, sizeof(rule->dst)); 1809 1810 for (int i = 0; i < PF_SKIP_COUNT; ++i) { 1811 if (rule->skip[i].ptr == NULL) 1812 rule->skip[i].nr = -1; 1813 else 1814 rule->skip[i].nr = krule->skip[i].ptr->nr; 1815 } 1816 1817 strlcpy(rule->label, krule->label[0], sizeof(rule->label)); 1818 strlcpy(rule->ifname, krule->ifname, sizeof(rule->ifname)); 1819 strlcpy(rule->qname, krule->qname, sizeof(rule->qname)); 1820 strlcpy(rule->pqname, krule->pqname, sizeof(rule->pqname)); 1821 strlcpy(rule->tagname, krule->tagname, sizeof(rule->tagname)); 1822 strlcpy(rule->match_tagname, krule->match_tagname, 1823 sizeof(rule->match_tagname)); 1824 strlcpy(rule->overload_tblname, krule->overload_tblname, 1825 sizeof(rule->overload_tblname)); 1826 1827 pf_kpool_to_pool(&krule->rpool, &rule->rpool); 1828 1829 rule->evaluations = pf_counter_u64_fetch(&krule->evaluations); 1830 for (int i = 0; i < 2; i++) { 1831 rule->packets[i] = pf_counter_u64_fetch(&krule->packets[i]); 1832 rule->bytes[i] = pf_counter_u64_fetch(&krule->bytes[i]); 1833 } 1834 1835 /* kif, anchor, overload_tbl are not copied over. */ 1836 1837 rule->os_fingerprint = krule->os_fingerprint; 1838 1839 rule->rtableid = krule->rtableid; 1840 bcopy(krule->timeout, rule->timeout, sizeof(krule->timeout)); 1841 rule->max_states = krule->max_states; 1842 rule->max_src_nodes = krule->max_src_nodes; 1843 rule->max_src_states = krule->max_src_states; 1844 rule->max_src_conn = krule->max_src_conn; 1845 rule->max_src_conn_rate.limit = krule->max_src_conn_rate.limit; 1846 rule->max_src_conn_rate.seconds = krule->max_src_conn_rate.seconds; 1847 rule->qid = krule->qid; 1848 rule->pqid = krule->pqid; 1849 rule->nr = krule->nr; 1850 rule->prob = krule->prob; 1851 rule->cuid = krule->cuid; 1852 rule->cpid = krule->cpid; 1853 1854 rule->return_icmp = krule->return_icmp; 1855 rule->return_icmp6 = krule->return_icmp6; 1856 rule->max_mss = krule->max_mss; 1857 rule->tag = krule->tag; 1858 rule->match_tag = krule->match_tag; 1859 rule->scrub_flags = krule->scrub_flags; 1860 1861 bcopy(&krule->uid, &rule->uid, sizeof(krule->uid)); 1862 bcopy(&krule->gid, &rule->gid, sizeof(krule->gid)); 1863 1864 rule->rule_flag = krule->rule_flag; 1865 rule->action = krule->action; 1866 rule->direction = krule->direction; 1867 rule->log = krule->log; 1868 rule->logif = krule->logif; 1869 rule->quick = krule->quick; 1870 rule->ifnot = krule->ifnot; 1871 rule->match_tag_not = krule->match_tag_not; 1872 rule->natpass = krule->natpass; 1873 1874 rule->keep_state = krule->keep_state; 1875 rule->af = krule->af; 1876 rule->proto = krule->proto; 1877 rule->type = krule->type; 1878 rule->code = krule->code; 1879 rule->flags = krule->flags; 1880 rule->flagset = krule->flagset; 1881 rule->min_ttl = krule->min_ttl; 1882 rule->allow_opts = krule->allow_opts; 1883 rule->rt = krule->rt; 1884 rule->return_ttl = krule->return_ttl; 1885 rule->tos = krule->tos; 1886 rule->set_tos = krule->set_tos; 1887 rule->anchor_relative = krule->anchor_relative; 1888 rule->anchor_wildcard = krule->anchor_wildcard; 1889 1890 rule->flush = krule->flush; 1891 rule->prio = krule->prio; 1892 rule->set_prio[0] = krule->set_prio[0]; 1893 rule->set_prio[1] = krule->set_prio[1]; 1894 1895 bcopy(&krule->divert, &rule->divert, sizeof(krule->divert)); 1896 1897 rule->u_states_cur = counter_u64_fetch(krule->states_cur); 1898 rule->u_states_tot = counter_u64_fetch(krule->states_tot); 1899 rule->u_src_nodes = counter_u64_fetch(krule->src_nodes); 1900 } 1901 1902 static int 1903 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule) 1904 { 1905 int ret; 1906 1907 #ifndef INET 1908 if (rule->af == AF_INET) { 1909 return (EAFNOSUPPORT); 1910 } 1911 #endif /* INET */ 1912 #ifndef INET6 1913 if (rule->af == AF_INET6) { 1914 return (EAFNOSUPPORT); 1915 } 1916 #endif /* INET6 */ 1917 1918 ret = pf_check_rule_addr(&rule->src); 1919 if (ret != 0) 1920 return (ret); 1921 ret = pf_check_rule_addr(&rule->dst); 1922 if (ret != 0) 1923 return (ret); 1924 1925 bcopy(&rule->src, &krule->src, sizeof(rule->src)); 1926 bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); 1927 1928 ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label)); 1929 if (ret != 0) 1930 return (ret); 1931 ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname)); 1932 if (ret != 0) 1933 return (ret); 1934 ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname)); 1935 if (ret != 0) 1936 return (ret); 1937 ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname)); 1938 if (ret != 0) 1939 return (ret); 1940 ret = pf_user_strcpy(krule->tagname, rule->tagname, 1941 sizeof(rule->tagname)); 1942 if (ret != 0) 1943 return (ret); 1944 ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname, 1945 sizeof(rule->match_tagname)); 1946 if (ret != 0) 1947 return (ret); 1948 ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname, 1949 sizeof(rule->overload_tblname)); 1950 if (ret != 0) 1951 return (ret); 1952 1953 pf_pool_to_kpool(&rule->rpool, &krule->rpool); 1954 1955 /* Don't allow userspace to set evaulations, packets or bytes. */ 1956 /* kif, anchor, overload_tbl are not copied over. */ 1957 1958 krule->os_fingerprint = rule->os_fingerprint; 1959 1960 krule->rtableid = rule->rtableid; 1961 bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout)); 1962 krule->max_states = rule->max_states; 1963 krule->max_src_nodes = rule->max_src_nodes; 1964 krule->max_src_states = rule->max_src_states; 1965 krule->max_src_conn = rule->max_src_conn; 1966 krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit; 1967 krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds; 1968 krule->qid = rule->qid; 1969 krule->pqid = rule->pqid; 1970 krule->nr = rule->nr; 1971 krule->prob = rule->prob; 1972 krule->cuid = rule->cuid; 1973 krule->cpid = rule->cpid; 1974 1975 krule->return_icmp = rule->return_icmp; 1976 krule->return_icmp6 = rule->return_icmp6; 1977 krule->max_mss = rule->max_mss; 1978 krule->tag = rule->tag; 1979 krule->match_tag = rule->match_tag; 1980 krule->scrub_flags = rule->scrub_flags; 1981 1982 bcopy(&rule->uid, &krule->uid, sizeof(krule->uid)); 1983 bcopy(&rule->gid, &krule->gid, sizeof(krule->gid)); 1984 1985 krule->rule_flag = rule->rule_flag; 1986 krule->action = rule->action; 1987 krule->direction = rule->direction; 1988 krule->log = rule->log; 1989 krule->logif = rule->logif; 1990 krule->quick = rule->quick; 1991 krule->ifnot = rule->ifnot; 1992 krule->match_tag_not = rule->match_tag_not; 1993 krule->natpass = rule->natpass; 1994 1995 krule->keep_state = rule->keep_state; 1996 krule->af = rule->af; 1997 krule->proto = rule->proto; 1998 krule->type = rule->type; 1999 krule->code = rule->code; 2000 krule->flags = rule->flags; 2001 krule->flagset = rule->flagset; 2002 krule->min_ttl = rule->min_ttl; 2003 krule->allow_opts = rule->allow_opts; 2004 krule->rt = rule->rt; 2005 krule->return_ttl = rule->return_ttl; 2006 krule->tos = rule->tos; 2007 krule->set_tos = rule->set_tos; 2008 2009 krule->flush = rule->flush; 2010 krule->prio = rule->prio; 2011 krule->set_prio[0] = rule->set_prio[0]; 2012 krule->set_prio[1] = rule->set_prio[1]; 2013 2014 bcopy(&rule->divert, &krule->divert, sizeof(krule->divert)); 2015 2016 return (0); 2017 } 2018 2019 static int 2020 pf_state_kill_to_kstate_kill(const struct pfioc_state_kill *psk, 2021 struct pf_kstate_kill *kill) 2022 { 2023 int ret; 2024 2025 bzero(kill, sizeof(*kill)); 2026 2027 bcopy(&psk->psk_pfcmp, &kill->psk_pfcmp, sizeof(kill->psk_pfcmp)); 2028 kill->psk_af = psk->psk_af; 2029 kill->psk_proto = psk->psk_proto; 2030 bcopy(&psk->psk_src, &kill->psk_src, sizeof(kill->psk_src)); 2031 bcopy(&psk->psk_dst, &kill->psk_dst, sizeof(kill->psk_dst)); 2032 ret = pf_user_strcpy(kill->psk_ifname, psk->psk_ifname, 2033 sizeof(kill->psk_ifname)); 2034 if (ret != 0) 2035 return (ret); 2036 ret = pf_user_strcpy(kill->psk_label, psk->psk_label, 2037 sizeof(kill->psk_label)); 2038 if (ret != 0) 2039 return (ret); 2040 2041 return (0); 2042 } 2043 2044 static int 2045 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket, 2046 uint32_t pool_ticket, const char *anchor, const char *anchor_call, 2047 struct thread *td) 2048 { 2049 struct pf_kruleset *ruleset; 2050 struct pf_krule *tail; 2051 struct pf_kpooladdr *pa; 2052 struct pfi_kkif *kif = NULL; 2053 int rs_num; 2054 int error = 0; 2055 2056 if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { 2057 error = EINVAL; 2058 goto errout_unlocked; 2059 } 2060 2061 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 2062 2063 if (rule->ifname[0]) 2064 kif = pf_kkif_create(M_WAITOK); 2065 pf_counter_u64_init(&rule->evaluations, M_WAITOK); 2066 for (int i = 0; i < 2; i++) { 2067 pf_counter_u64_init(&rule->packets[i], M_WAITOK); 2068 pf_counter_u64_init(&rule->bytes[i], M_WAITOK); 2069 } 2070 rule->states_cur = counter_u64_alloc(M_WAITOK); 2071 rule->states_tot = counter_u64_alloc(M_WAITOK); 2072 rule->src_nodes = counter_u64_alloc(M_WAITOK); 2073 rule->cuid = td->td_ucred->cr_ruid; 2074 rule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 2075 TAILQ_INIT(&rule->rpool.list); 2076 2077 PF_RULES_WLOCK(); 2078 #ifdef PF_WANT_32_TO_64_COUNTER 2079 LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist); 2080 MPASS(!rule->allrulelinked); 2081 rule->allrulelinked = true; 2082 V_pf_allrulecount++; 2083 #endif 2084 ruleset = pf_find_kruleset(anchor); 2085 if (ruleset == NULL) 2086 ERROUT(EINVAL); 2087 rs_num = pf_get_ruleset_number(rule->action); 2088 if (rs_num >= PF_RULESET_MAX) 2089 ERROUT(EINVAL); 2090 if (ticket != ruleset->rules[rs_num].inactive.ticket) { 2091 DPFPRINTF(PF_DEBUG_MISC, 2092 ("ticket: %d != [%d]%d\n", ticket, rs_num, 2093 ruleset->rules[rs_num].inactive.ticket)); 2094 ERROUT(EBUSY); 2095 } 2096 if (pool_ticket != V_ticket_pabuf) { 2097 DPFPRINTF(PF_DEBUG_MISC, 2098 ("pool_ticket: %d != %d\n", pool_ticket, 2099 V_ticket_pabuf)); 2100 ERROUT(EBUSY); 2101 } 2102 2103 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 2104 pf_krulequeue); 2105 if (tail) 2106 rule->nr = tail->nr + 1; 2107 else 2108 rule->nr = 0; 2109 if (rule->ifname[0]) { 2110 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2111 kif = NULL; 2112 pfi_kkif_ref(rule->kif); 2113 } else 2114 rule->kif = NULL; 2115 2116 if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs) 2117 error = EBUSY; 2118 2119 #ifdef ALTQ 2120 /* set queue IDs */ 2121 if (rule->qname[0] != 0) { 2122 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2123 error = EBUSY; 2124 else if (rule->pqname[0] != 0) { 2125 if ((rule->pqid = 2126 pf_qname2qid(rule->pqname)) == 0) 2127 error = EBUSY; 2128 } else 2129 rule->pqid = rule->qid; 2130 } 2131 #endif 2132 if (rule->tagname[0]) 2133 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2134 error = EBUSY; 2135 if (rule->match_tagname[0]) 2136 if ((rule->match_tag = 2137 pf_tagname2tag(rule->match_tagname)) == 0) 2138 error = EBUSY; 2139 if (rule->rt && !rule->direction) 2140 error = EINVAL; 2141 if (!rule->log) 2142 rule->logif = 0; 2143 if (rule->logif >= PFLOGIFS_MAX) 2144 error = EINVAL; 2145 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 2146 error = ENOMEM; 2147 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 2148 error = ENOMEM; 2149 if (pf_kanchor_setup(rule, ruleset, anchor_call)) 2150 error = EINVAL; 2151 if (rule->scrub_flags & PFSTATE_SETPRIO && 2152 (rule->set_prio[0] > PF_PRIO_MAX || 2153 rule->set_prio[1] > PF_PRIO_MAX)) 2154 error = EINVAL; 2155 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 2156 if (pa->addr.type == PF_ADDR_TABLE) { 2157 pa->addr.p.tbl = pfr_attach_table(ruleset, 2158 pa->addr.v.tblname); 2159 if (pa->addr.p.tbl == NULL) 2160 error = ENOMEM; 2161 } 2162 2163 rule->overload_tbl = NULL; 2164 if (rule->overload_tblname[0]) { 2165 if ((rule->overload_tbl = pfr_attach_table(ruleset, 2166 rule->overload_tblname)) == NULL) 2167 error = EINVAL; 2168 else 2169 rule->overload_tbl->pfrkt_flags |= 2170 PFR_TFLAG_ACTIVE; 2171 } 2172 2173 pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list); 2174 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 2175 (rule->action == PF_BINAT)) && rule->anchor == NULL) || 2176 (rule->rt > PF_NOPFROUTE)) && 2177 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 2178 error = EINVAL; 2179 2180 if (error) { 2181 pf_free_rule(rule); 2182 rule = NULL; 2183 ERROUT(error); 2184 } 2185 2186 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 2187 pf_counter_u64_zero(&rule->evaluations); 2188 for (int i = 0; i < 2; i++) { 2189 pf_counter_u64_zero(&rule->packets[i]); 2190 pf_counter_u64_zero(&rule->bytes[i]); 2191 } 2192 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 2193 rule, entries); 2194 ruleset->rules[rs_num].inactive.rcount++; 2195 2196 PF_RULES_WUNLOCK(); 2197 2198 return (0); 2199 2200 #undef ERROUT 2201 errout: 2202 PF_RULES_WUNLOCK(); 2203 errout_unlocked: 2204 pf_kkif_free(kif); 2205 pf_krule_free(rule); 2206 return (error); 2207 } 2208 2209 static bool 2210 pf_label_match(const struct pf_krule *rule, const char *label) 2211 { 2212 int i = 0; 2213 2214 while (*rule->label[i]) { 2215 if (strcmp(rule->label[i], label) == 0) 2216 return (true); 2217 i++; 2218 } 2219 2220 return (false); 2221 } 2222 2223 static unsigned int 2224 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir) 2225 { 2226 struct pf_kstate *s; 2227 int more = 0; 2228 2229 s = pf_find_state_all(key, dir, &more); 2230 if (s == NULL) 2231 return (0); 2232 2233 if (more) { 2234 PF_STATE_UNLOCK(s); 2235 return (0); 2236 } 2237 2238 pf_unlink_state(s); 2239 return (1); 2240 } 2241 2242 static int 2243 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih) 2244 { 2245 struct pf_kstate *s; 2246 struct pf_state_key *sk; 2247 struct pf_addr *srcaddr, *dstaddr; 2248 struct pf_state_key_cmp match_key; 2249 int idx, killed = 0; 2250 unsigned int dir; 2251 u_int16_t srcport, dstport; 2252 struct pfi_kkif *kif; 2253 2254 relock_DIOCKILLSTATES: 2255 PF_HASHROW_LOCK(ih); 2256 LIST_FOREACH(s, &ih->states, entry) { 2257 /* For floating states look at the original kif. */ 2258 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 2259 2260 sk = s->key[PF_SK_WIRE]; 2261 if (s->direction == PF_OUT) { 2262 srcaddr = &sk->addr[1]; 2263 dstaddr = &sk->addr[0]; 2264 srcport = sk->port[1]; 2265 dstport = sk->port[0]; 2266 } else { 2267 srcaddr = &sk->addr[0]; 2268 dstaddr = &sk->addr[1]; 2269 srcport = sk->port[0]; 2270 dstport = sk->port[1]; 2271 } 2272 2273 if (psk->psk_af && sk->af != psk->psk_af) 2274 continue; 2275 2276 if (psk->psk_proto && psk->psk_proto != sk->proto) 2277 continue; 2278 2279 if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr, 2280 &psk->psk_src.addr.v.a.mask, srcaddr, sk->af)) 2281 continue; 2282 2283 if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr, 2284 &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af)) 2285 continue; 2286 2287 if (! PF_MATCHA(psk->psk_rt_addr.neg, 2288 &psk->psk_rt_addr.addr.v.a.addr, 2289 &psk->psk_rt_addr.addr.v.a.mask, 2290 &s->rt_addr, sk->af)) 2291 continue; 2292 2293 if (psk->psk_src.port_op != 0 && 2294 ! pf_match_port(psk->psk_src.port_op, 2295 psk->psk_src.port[0], psk->psk_src.port[1], srcport)) 2296 continue; 2297 2298 if (psk->psk_dst.port_op != 0 && 2299 ! pf_match_port(psk->psk_dst.port_op, 2300 psk->psk_dst.port[0], psk->psk_dst.port[1], dstport)) 2301 continue; 2302 2303 if (psk->psk_label[0] && 2304 ! pf_label_match(s->rule.ptr, psk->psk_label)) 2305 continue; 2306 2307 if (psk->psk_ifname[0] && strcmp(psk->psk_ifname, 2308 kif->pfik_name)) 2309 continue; 2310 2311 if (psk->psk_kill_match) { 2312 /* Create the key to find matching states, with lock 2313 * held. */ 2314 2315 bzero(&match_key, sizeof(match_key)); 2316 2317 if (s->direction == PF_OUT) { 2318 dir = PF_IN; 2319 idx = PF_SK_STACK; 2320 } else { 2321 dir = PF_OUT; 2322 idx = PF_SK_WIRE; 2323 } 2324 2325 match_key.af = s->key[idx]->af; 2326 match_key.proto = s->key[idx]->proto; 2327 PF_ACPY(&match_key.addr[0], 2328 &s->key[idx]->addr[1], match_key.af); 2329 match_key.port[0] = s->key[idx]->port[1]; 2330 PF_ACPY(&match_key.addr[1], 2331 &s->key[idx]->addr[0], match_key.af); 2332 match_key.port[1] = s->key[idx]->port[0]; 2333 } 2334 2335 pf_unlink_state(s); 2336 killed++; 2337 2338 if (psk->psk_kill_match) 2339 killed += pf_kill_matching_state(&match_key, dir); 2340 2341 goto relock_DIOCKILLSTATES; 2342 } 2343 PF_HASHROW_UNLOCK(ih); 2344 2345 return (killed); 2346 } 2347 2348 static int 2349 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 2350 { 2351 int error = 0; 2352 PF_RULES_RLOCK_TRACKER; 2353 2354 #define ERROUT_IOCTL(target, x) \ 2355 do { \ 2356 error = (x); \ 2357 SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__); \ 2358 goto target; \ 2359 } while (0) 2360 2361 2362 /* XXX keep in sync with switch() below */ 2363 if (securelevel_gt(td->td_ucred, 2)) 2364 switch (cmd) { 2365 case DIOCGETRULES: 2366 case DIOCGETRULE: 2367 case DIOCGETRULENV: 2368 case DIOCGETADDRS: 2369 case DIOCGETADDR: 2370 case DIOCGETSTATE: 2371 case DIOCGETSTATENV: 2372 case DIOCSETSTATUSIF: 2373 case DIOCGETSTATUS: 2374 case DIOCGETSTATUSNV: 2375 case DIOCCLRSTATUS: 2376 case DIOCNATLOOK: 2377 case DIOCSETDEBUG: 2378 case DIOCGETSTATES: 2379 case DIOCGETSTATESV2: 2380 case DIOCGETTIMEOUT: 2381 case DIOCCLRRULECTRS: 2382 case DIOCGETLIMIT: 2383 case DIOCGETALTQSV0: 2384 case DIOCGETALTQSV1: 2385 case DIOCGETALTQV0: 2386 case DIOCGETALTQV1: 2387 case DIOCGETQSTATSV0: 2388 case DIOCGETQSTATSV1: 2389 case DIOCGETRULESETS: 2390 case DIOCGETRULESET: 2391 case DIOCRGETTABLES: 2392 case DIOCRGETTSTATS: 2393 case DIOCRCLRTSTATS: 2394 case DIOCRCLRADDRS: 2395 case DIOCRADDADDRS: 2396 case DIOCRDELADDRS: 2397 case DIOCRSETADDRS: 2398 case DIOCRGETADDRS: 2399 case DIOCRGETASTATS: 2400 case DIOCRCLRASTATS: 2401 case DIOCRTSTADDRS: 2402 case DIOCOSFPGET: 2403 case DIOCGETSRCNODES: 2404 case DIOCCLRSRCNODES: 2405 case DIOCGETSYNCOOKIES: 2406 case DIOCIGETIFACES: 2407 case DIOCGIFSPEEDV0: 2408 case DIOCGIFSPEEDV1: 2409 case DIOCSETIFFLAG: 2410 case DIOCCLRIFFLAG: 2411 case DIOCGETETHRULES: 2412 case DIOCGETETHRULE: 2413 break; 2414 case DIOCRCLRTABLES: 2415 case DIOCRADDTABLES: 2416 case DIOCRDELTABLES: 2417 case DIOCRSETTFLAGS: 2418 if (((struct pfioc_table *)addr)->pfrio_flags & 2419 PFR_FLAG_DUMMY) 2420 break; /* dummy operation ok */ 2421 return (EPERM); 2422 default: 2423 return (EPERM); 2424 } 2425 2426 if (!(flags & FWRITE)) 2427 switch (cmd) { 2428 case DIOCGETRULES: 2429 case DIOCGETADDRS: 2430 case DIOCGETADDR: 2431 case DIOCGETSTATE: 2432 case DIOCGETSTATENV: 2433 case DIOCGETSTATUS: 2434 case DIOCGETSTATUSNV: 2435 case DIOCGETSTATES: 2436 case DIOCGETSTATESV2: 2437 case DIOCGETTIMEOUT: 2438 case DIOCGETLIMIT: 2439 case DIOCGETALTQSV0: 2440 case DIOCGETALTQSV1: 2441 case DIOCGETALTQV0: 2442 case DIOCGETALTQV1: 2443 case DIOCGETQSTATSV0: 2444 case DIOCGETQSTATSV1: 2445 case DIOCGETRULESETS: 2446 case DIOCGETRULESET: 2447 case DIOCNATLOOK: 2448 case DIOCRGETTABLES: 2449 case DIOCRGETTSTATS: 2450 case DIOCRGETADDRS: 2451 case DIOCRGETASTATS: 2452 case DIOCRTSTADDRS: 2453 case DIOCOSFPGET: 2454 case DIOCGETSRCNODES: 2455 case DIOCGETSYNCOOKIES: 2456 case DIOCIGETIFACES: 2457 case DIOCGIFSPEEDV1: 2458 case DIOCGIFSPEEDV0: 2459 case DIOCGETRULENV: 2460 case DIOCGETETHRULES: 2461 case DIOCGETETHRULE: 2462 break; 2463 case DIOCRCLRTABLES: 2464 case DIOCRADDTABLES: 2465 case DIOCRDELTABLES: 2466 case DIOCRCLRTSTATS: 2467 case DIOCRCLRADDRS: 2468 case DIOCRADDADDRS: 2469 case DIOCRDELADDRS: 2470 case DIOCRSETADDRS: 2471 case DIOCRSETTFLAGS: 2472 if (((struct pfioc_table *)addr)->pfrio_flags & 2473 PFR_FLAG_DUMMY) { 2474 flags |= FWRITE; /* need write lock for dummy */ 2475 break; /* dummy operation ok */ 2476 } 2477 return (EACCES); 2478 case DIOCGETRULE: 2479 if (((struct pfioc_rule *)addr)->action == 2480 PF_GET_CLR_CNTR) 2481 return (EACCES); 2482 break; 2483 default: 2484 return (EACCES); 2485 } 2486 2487 CURVNET_SET(TD_TO_VNET(td)); 2488 2489 switch (cmd) { 2490 case DIOCSTART: 2491 sx_xlock(&pf_ioctl_lock); 2492 if (V_pf_status.running) 2493 error = EEXIST; 2494 else { 2495 int cpu; 2496 2497 hook_pf(); 2498 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 2499 hook_pf_eth(); 2500 V_pf_status.running = 1; 2501 V_pf_status.since = time_second; 2502 2503 CPU_FOREACH(cpu) 2504 V_pf_stateid[cpu] = time_second; 2505 2506 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 2507 } 2508 break; 2509 2510 case DIOCSTOP: 2511 sx_xlock(&pf_ioctl_lock); 2512 if (!V_pf_status.running) 2513 error = ENOENT; 2514 else { 2515 V_pf_status.running = 0; 2516 dehook_pf(); 2517 dehook_pf_eth(); 2518 V_pf_status.since = time_second; 2519 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 2520 } 2521 break; 2522 2523 case DIOCGETETHRULES: { 2524 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2525 nvlist_t *nvl; 2526 void *packed; 2527 struct pf_keth_rule *tail; 2528 struct pf_keth_ruleset *rs; 2529 u_int32_t ticket, nr; 2530 const char *anchor = ""; 2531 2532 nvl = NULL; 2533 packed = NULL; 2534 2535 #define ERROUT(x) do { error = (x); goto DIOCGETETHRULES_error; } while (0) 2536 2537 if (nv->len > pf_ioctl_maxcount) 2538 ERROUT(ENOMEM); 2539 2540 /* Copy the request in */ 2541 packed = malloc(nv->len, M_NVLIST, M_WAITOK); 2542 if (packed == NULL) 2543 ERROUT(ENOMEM); 2544 2545 error = copyin(nv->data, packed, nv->len); 2546 if (error) 2547 ERROUT(error); 2548 2549 nvl = nvlist_unpack(packed, nv->len, 0); 2550 if (nvl == NULL) 2551 ERROUT(EBADMSG); 2552 2553 if (! nvlist_exists_string(nvl, "anchor")) 2554 ERROUT(EBADMSG); 2555 2556 anchor = nvlist_get_string(nvl, "anchor"); 2557 2558 rs = pf_find_keth_ruleset(anchor); 2559 2560 nvlist_destroy(nvl); 2561 nvl = NULL; 2562 free(packed, M_NVLIST); 2563 packed = NULL; 2564 2565 if (rs == NULL) 2566 ERROUT(ENOENT); 2567 2568 /* Reply */ 2569 nvl = nvlist_create(0); 2570 if (nvl == NULL) 2571 ERROUT(ENOMEM); 2572 2573 PF_RULES_RLOCK(); 2574 2575 ticket = rs->active.ticket; 2576 tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq); 2577 if (tail) 2578 nr = tail->nr + 1; 2579 else 2580 nr = 0; 2581 2582 PF_RULES_RUNLOCK(); 2583 2584 nvlist_add_number(nvl, "ticket", ticket); 2585 nvlist_add_number(nvl, "nr", nr); 2586 2587 packed = nvlist_pack(nvl, &nv->len); 2588 if (packed == NULL) 2589 ERROUT(ENOMEM); 2590 2591 if (nv->size == 0) 2592 ERROUT(0); 2593 else if (nv->size < nv->len) 2594 ERROUT(ENOSPC); 2595 2596 error = copyout(packed, nv->data, nv->len); 2597 2598 #undef ERROUT 2599 DIOCGETETHRULES_error: 2600 free(packed, M_NVLIST); 2601 nvlist_destroy(nvl); 2602 break; 2603 } 2604 2605 case DIOCGETETHRULE: { 2606 struct epoch_tracker et; 2607 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2608 nvlist_t *nvl = NULL; 2609 void *nvlpacked = NULL; 2610 struct pf_keth_rule *rule = NULL; 2611 struct pf_keth_ruleset *rs; 2612 u_int32_t ticket, nr; 2613 bool clear = false; 2614 const char *anchor; 2615 2616 #define ERROUT(x) do { error = (x); goto DIOCGETETHRULE_error; } while (0) 2617 2618 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2619 if (nvlpacked == NULL) 2620 ERROUT(ENOMEM); 2621 2622 error = copyin(nv->data, nvlpacked, nv->len); 2623 if (error) 2624 ERROUT(error); 2625 2626 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2627 if (! nvlist_exists_number(nvl, "ticket")) 2628 ERROUT(EBADMSG); 2629 ticket = nvlist_get_number(nvl, "ticket"); 2630 if (! nvlist_exists_string(nvl, "anchor")) 2631 ERROUT(EBADMSG); 2632 anchor = nvlist_get_string(nvl, "anchor"); 2633 2634 if (nvlist_exists_bool(nvl, "clear")) 2635 clear = nvlist_get_bool(nvl, "clear"); 2636 2637 if (clear && !(flags & FWRITE)) 2638 ERROUT(EACCES); 2639 2640 if (! nvlist_exists_number(nvl, "nr")) 2641 ERROUT(EBADMSG); 2642 nr = nvlist_get_number(nvl, "nr"); 2643 2644 PF_RULES_RLOCK(); 2645 rs = pf_find_keth_ruleset(anchor); 2646 if (rs == NULL) { 2647 PF_RULES_RUNLOCK(); 2648 ERROUT(ENOENT); 2649 } 2650 if (ticket != rs->active.ticket) { 2651 PF_RULES_RUNLOCK(); 2652 ERROUT(EBUSY); 2653 } 2654 2655 nvlist_destroy(nvl); 2656 nvl = NULL; 2657 free(nvlpacked, M_TEMP); 2658 nvlpacked = NULL; 2659 2660 nvl = nvlist_create(0); 2661 2662 rule = TAILQ_FIRST(rs->active.rules); 2663 while ((rule != NULL) && (rule->nr != nr)) 2664 rule = TAILQ_NEXT(rule, entries); 2665 if (rule == NULL) { 2666 PF_RULES_RUNLOCK(); 2667 ERROUT(ENOENT); 2668 } 2669 /* Make sure rule can't go away. */ 2670 NET_EPOCH_ENTER(et); 2671 PF_RULES_RUNLOCK(); 2672 nvl = pf_keth_rule_to_nveth_rule(rule); 2673 if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) 2674 ERROUT(EBUSY); 2675 NET_EPOCH_EXIT(et); 2676 if (nvl == NULL) 2677 ERROUT(ENOMEM); 2678 2679 nvlpacked = nvlist_pack(nvl, &nv->len); 2680 if (nvlpacked == NULL) 2681 ERROUT(ENOMEM); 2682 2683 if (nv->size == 0) 2684 ERROUT(0); 2685 else if (nv->size < nv->len) 2686 ERROUT(ENOSPC); 2687 2688 error = copyout(nvlpacked, nv->data, nv->len); 2689 if (error == 0 && clear) { 2690 counter_u64_zero(rule->evaluations); 2691 for (int i = 0; i < 2; i++) { 2692 counter_u64_zero(rule->packets[i]); 2693 counter_u64_zero(rule->bytes[i]); 2694 } 2695 } 2696 2697 #undef ERROUT 2698 DIOCGETETHRULE_error: 2699 free(nvlpacked, M_TEMP); 2700 nvlist_destroy(nvl); 2701 break; 2702 } 2703 2704 case DIOCADDETHRULE: { 2705 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2706 nvlist_t *nvl = NULL; 2707 void *nvlpacked = NULL; 2708 struct pf_keth_rule *rule = NULL, *tail = NULL; 2709 struct pf_keth_ruleset *ruleset = NULL; 2710 struct pfi_kkif *kif = NULL; 2711 const char *anchor = "", *anchor_call = ""; 2712 2713 #define ERROUT(x) do { error = (x); goto DIOCADDETHRULE_error; } while (0) 2714 2715 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2716 if (nvlpacked == NULL) 2717 ERROUT(ENOMEM); 2718 2719 error = copyin(nv->data, nvlpacked, nv->len); 2720 if (error) 2721 ERROUT(error); 2722 2723 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2724 if (nvl == NULL) 2725 ERROUT(EBADMSG); 2726 2727 if (! nvlist_exists_number(nvl, "ticket")) 2728 ERROUT(EBADMSG); 2729 2730 if (nvlist_exists_string(nvl, "anchor")) 2731 anchor = nvlist_get_string(nvl, "anchor"); 2732 if (nvlist_exists_string(nvl, "anchor_call")) 2733 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2734 2735 ruleset = pf_find_keth_ruleset(anchor); 2736 if (ruleset == NULL) 2737 ERROUT(EINVAL); 2738 2739 if (nvlist_get_number(nvl, "ticket") != 2740 ruleset->inactive.ticket) { 2741 DPFPRINTF(PF_DEBUG_MISC, 2742 ("ticket: %d != %d\n", 2743 (u_int32_t)nvlist_get_number(nvl, "ticket"), 2744 ruleset->inactive.ticket)); 2745 ERROUT(EBUSY); 2746 } 2747 2748 rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK); 2749 if (rule == NULL) 2750 ERROUT(ENOMEM); 2751 2752 error = pf_nveth_rule_to_keth_rule(nvl, rule); 2753 if (error != 0) 2754 ERROUT(error); 2755 2756 if (rule->ifname[0]) 2757 kif = pf_kkif_create(M_WAITOK); 2758 rule->evaluations = counter_u64_alloc(M_WAITOK); 2759 for (int i = 0; i < 2; i++) { 2760 rule->packets[i] = counter_u64_alloc(M_WAITOK); 2761 rule->bytes[i] = counter_u64_alloc(M_WAITOK); 2762 } 2763 2764 PF_RULES_WLOCK(); 2765 2766 if (rule->ifname[0]) { 2767 rule->kif = pfi_kkif_attach(kif, rule->ifname); 2768 pfi_kkif_ref(rule->kif); 2769 } else 2770 rule->kif = NULL; 2771 2772 #ifdef ALTQ 2773 /* set queue IDs */ 2774 if (rule->qname[0] != 0) { 2775 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 2776 error = EBUSY; 2777 else 2778 rule->qid = rule->qid; 2779 } 2780 #endif 2781 if (rule->tagname[0]) 2782 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 2783 error = EBUSY; 2784 2785 if (error) { 2786 pf_free_eth_rule(rule); 2787 PF_RULES_WUNLOCK(); 2788 ERROUT(error); 2789 } 2790 2791 if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) { 2792 pf_free_eth_rule(rule); 2793 PF_RULES_WUNLOCK(); 2794 ERROUT(EINVAL); 2795 } 2796 2797 tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq); 2798 if (tail) 2799 rule->nr = tail->nr + 1; 2800 else 2801 rule->nr = 0; 2802 2803 TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries); 2804 2805 PF_RULES_WUNLOCK(); 2806 2807 #undef ERROUT 2808 DIOCADDETHRULE_error: 2809 nvlist_destroy(nvl); 2810 free(nvlpacked, M_TEMP); 2811 break; 2812 } 2813 2814 case DIOCADDRULENV: { 2815 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2816 nvlist_t *nvl = NULL; 2817 void *nvlpacked = NULL; 2818 struct pf_krule *rule = NULL; 2819 const char *anchor = "", *anchor_call = ""; 2820 uint32_t ticket = 0, pool_ticket = 0; 2821 2822 #define ERROUT(x) ERROUT_IOCTL(DIOCADDRULENV_error, x) 2823 2824 if (nv->len > pf_ioctl_maxcount) 2825 ERROUT(ENOMEM); 2826 2827 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 2828 error = copyin(nv->data, nvlpacked, nv->len); 2829 if (error) 2830 ERROUT(error); 2831 2832 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 2833 if (nvl == NULL) 2834 ERROUT(EBADMSG); 2835 2836 if (! nvlist_exists_number(nvl, "ticket")) 2837 ERROUT(EINVAL); 2838 ticket = nvlist_get_number(nvl, "ticket"); 2839 2840 if (! nvlist_exists_number(nvl, "pool_ticket")) 2841 ERROUT(EINVAL); 2842 pool_ticket = nvlist_get_number(nvl, "pool_ticket"); 2843 2844 if (! nvlist_exists_nvlist(nvl, "rule")) 2845 ERROUT(EINVAL); 2846 2847 rule = pf_krule_alloc(); 2848 error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), 2849 rule); 2850 if (error) 2851 ERROUT(error); 2852 2853 if (nvlist_exists_string(nvl, "anchor")) 2854 anchor = nvlist_get_string(nvl, "anchor"); 2855 if (nvlist_exists_string(nvl, "anchor_call")) 2856 anchor_call = nvlist_get_string(nvl, "anchor_call"); 2857 2858 if ((error = nvlist_error(nvl))) 2859 ERROUT(error); 2860 2861 /* Frees rule on error */ 2862 error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor, 2863 anchor_call, td); 2864 2865 nvlist_destroy(nvl); 2866 free(nvlpacked, M_TEMP); 2867 break; 2868 #undef ERROUT 2869 DIOCADDRULENV_error: 2870 pf_krule_free(rule); 2871 nvlist_destroy(nvl); 2872 free(nvlpacked, M_TEMP); 2873 2874 break; 2875 } 2876 case DIOCADDRULE: { 2877 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2878 struct pf_krule *rule; 2879 2880 rule = pf_krule_alloc(); 2881 error = pf_rule_to_krule(&pr->rule, rule); 2882 if (error != 0) { 2883 pf_krule_free(rule); 2884 break; 2885 } 2886 2887 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2888 2889 /* Frees rule on error */ 2890 error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket, 2891 pr->anchor, pr->anchor_call, td); 2892 break; 2893 } 2894 2895 case DIOCGETRULES: { 2896 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2897 struct pf_kruleset *ruleset; 2898 struct pf_krule *tail; 2899 int rs_num; 2900 2901 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2902 2903 PF_RULES_WLOCK(); 2904 ruleset = pf_find_kruleset(pr->anchor); 2905 if (ruleset == NULL) { 2906 PF_RULES_WUNLOCK(); 2907 error = EINVAL; 2908 break; 2909 } 2910 rs_num = pf_get_ruleset_number(pr->rule.action); 2911 if (rs_num >= PF_RULESET_MAX) { 2912 PF_RULES_WUNLOCK(); 2913 error = EINVAL; 2914 break; 2915 } 2916 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 2917 pf_krulequeue); 2918 if (tail) 2919 pr->nr = tail->nr + 1; 2920 else 2921 pr->nr = 0; 2922 pr->ticket = ruleset->rules[rs_num].active.ticket; 2923 PF_RULES_WUNLOCK(); 2924 break; 2925 } 2926 2927 case DIOCGETRULE: { 2928 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 2929 struct pf_kruleset *ruleset; 2930 struct pf_krule *rule; 2931 int rs_num; 2932 2933 pr->anchor[sizeof(pr->anchor) - 1] = 0; 2934 2935 PF_RULES_WLOCK(); 2936 ruleset = pf_find_kruleset(pr->anchor); 2937 if (ruleset == NULL) { 2938 PF_RULES_WUNLOCK(); 2939 error = EINVAL; 2940 break; 2941 } 2942 rs_num = pf_get_ruleset_number(pr->rule.action); 2943 if (rs_num >= PF_RULESET_MAX) { 2944 PF_RULES_WUNLOCK(); 2945 error = EINVAL; 2946 break; 2947 } 2948 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 2949 PF_RULES_WUNLOCK(); 2950 error = EBUSY; 2951 break; 2952 } 2953 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 2954 while ((rule != NULL) && (rule->nr != pr->nr)) 2955 rule = TAILQ_NEXT(rule, entries); 2956 if (rule == NULL) { 2957 PF_RULES_WUNLOCK(); 2958 error = EBUSY; 2959 break; 2960 } 2961 2962 pf_krule_to_rule(rule, &pr->rule); 2963 2964 if (pf_kanchor_copyout(ruleset, rule, pr)) { 2965 PF_RULES_WUNLOCK(); 2966 error = EBUSY; 2967 break; 2968 } 2969 pf_addr_copyout(&pr->rule.src.addr); 2970 pf_addr_copyout(&pr->rule.dst.addr); 2971 2972 if (pr->action == PF_GET_CLR_CNTR) { 2973 pf_counter_u64_zero(&rule->evaluations); 2974 for (int i = 0; i < 2; i++) { 2975 pf_counter_u64_zero(&rule->packets[i]); 2976 pf_counter_u64_zero(&rule->bytes[i]); 2977 } 2978 counter_u64_zero(rule->states_tot); 2979 } 2980 PF_RULES_WUNLOCK(); 2981 break; 2982 } 2983 2984 case DIOCGETRULENV: { 2985 struct pfioc_nv *nv = (struct pfioc_nv *)addr; 2986 nvlist_t *nvrule = NULL; 2987 nvlist_t *nvl = NULL; 2988 struct pf_kruleset *ruleset; 2989 struct pf_krule *rule; 2990 void *nvlpacked = NULL; 2991 int rs_num, nr; 2992 bool clear_counter = false; 2993 2994 #define ERROUT(x) ERROUT_IOCTL(DIOCGETRULENV_error, x) 2995 2996 if (nv->len > pf_ioctl_maxcount) 2997 ERROUT(ENOMEM); 2998 2999 /* Copy the request in */ 3000 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 3001 if (nvlpacked == NULL) 3002 ERROUT(ENOMEM); 3003 3004 error = copyin(nv->data, nvlpacked, nv->len); 3005 if (error) 3006 ERROUT(error); 3007 3008 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 3009 if (nvl == NULL) 3010 ERROUT(EBADMSG); 3011 3012 if (! nvlist_exists_string(nvl, "anchor")) 3013 ERROUT(EBADMSG); 3014 if (! nvlist_exists_number(nvl, "ruleset")) 3015 ERROUT(EBADMSG); 3016 if (! nvlist_exists_number(nvl, "ticket")) 3017 ERROUT(EBADMSG); 3018 if (! nvlist_exists_number(nvl, "nr")) 3019 ERROUT(EBADMSG); 3020 3021 if (nvlist_exists_bool(nvl, "clear_counter")) 3022 clear_counter = nvlist_get_bool(nvl, "clear_counter"); 3023 3024 if (clear_counter && !(flags & FWRITE)) 3025 ERROUT(EACCES); 3026 3027 nr = nvlist_get_number(nvl, "nr"); 3028 3029 PF_RULES_WLOCK(); 3030 ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor")); 3031 if (ruleset == NULL) { 3032 PF_RULES_WUNLOCK(); 3033 ERROUT(ENOENT); 3034 } 3035 3036 rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset")); 3037 if (rs_num >= PF_RULESET_MAX) { 3038 PF_RULES_WUNLOCK(); 3039 ERROUT(EINVAL); 3040 } 3041 3042 if (nvlist_get_number(nvl, "ticket") != 3043 ruleset->rules[rs_num].active.ticket) { 3044 PF_RULES_WUNLOCK(); 3045 ERROUT(EBUSY); 3046 } 3047 3048 if ((error = nvlist_error(nvl))) { 3049 PF_RULES_WUNLOCK(); 3050 ERROUT(error); 3051 } 3052 3053 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 3054 while ((rule != NULL) && (rule->nr != nr)) 3055 rule = TAILQ_NEXT(rule, entries); 3056 if (rule == NULL) { 3057 PF_RULES_WUNLOCK(); 3058 ERROUT(EBUSY); 3059 } 3060 3061 nvrule = pf_krule_to_nvrule(rule); 3062 3063 nvlist_destroy(nvl); 3064 nvl = nvlist_create(0); 3065 if (nvl == NULL) { 3066 PF_RULES_WUNLOCK(); 3067 ERROUT(ENOMEM); 3068 } 3069 nvlist_add_number(nvl, "nr", nr); 3070 nvlist_add_nvlist(nvl, "rule", nvrule); 3071 nvlist_destroy(nvrule); 3072 nvrule = NULL; 3073 if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) { 3074 PF_RULES_WUNLOCK(); 3075 ERROUT(EBUSY); 3076 } 3077 3078 free(nvlpacked, M_NVLIST); 3079 nvlpacked = nvlist_pack(nvl, &nv->len); 3080 if (nvlpacked == NULL) { 3081 PF_RULES_WUNLOCK(); 3082 ERROUT(ENOMEM); 3083 } 3084 3085 if (nv->size == 0) { 3086 PF_RULES_WUNLOCK(); 3087 ERROUT(0); 3088 } 3089 else if (nv->size < nv->len) { 3090 PF_RULES_WUNLOCK(); 3091 ERROUT(ENOSPC); 3092 } 3093 3094 if (clear_counter) { 3095 pf_counter_u64_zero(&rule->evaluations); 3096 for (int i = 0; i < 2; i++) { 3097 pf_counter_u64_zero(&rule->packets[i]); 3098 pf_counter_u64_zero(&rule->bytes[i]); 3099 } 3100 counter_u64_zero(rule->states_tot); 3101 } 3102 PF_RULES_WUNLOCK(); 3103 3104 error = copyout(nvlpacked, nv->data, nv->len); 3105 3106 #undef ERROUT 3107 DIOCGETRULENV_error: 3108 free(nvlpacked, M_NVLIST); 3109 nvlist_destroy(nvrule); 3110 nvlist_destroy(nvl); 3111 3112 break; 3113 } 3114 3115 case DIOCCHANGERULE: { 3116 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 3117 struct pf_kruleset *ruleset; 3118 struct pf_krule *oldrule = NULL, *newrule = NULL; 3119 struct pfi_kkif *kif = NULL; 3120 struct pf_kpooladdr *pa; 3121 u_int32_t nr = 0; 3122 int rs_num; 3123 3124 pcr->anchor[sizeof(pcr->anchor) - 1] = 0; 3125 3126 if (pcr->action < PF_CHANGE_ADD_HEAD || 3127 pcr->action > PF_CHANGE_GET_TICKET) { 3128 error = EINVAL; 3129 break; 3130 } 3131 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 3132 error = EINVAL; 3133 break; 3134 } 3135 3136 if (pcr->action != PF_CHANGE_REMOVE) { 3137 newrule = pf_krule_alloc(); 3138 error = pf_rule_to_krule(&pcr->rule, newrule); 3139 if (error != 0) { 3140 free(newrule, M_PFRULE); 3141 break; 3142 } 3143 3144 if (newrule->ifname[0]) 3145 kif = pf_kkif_create(M_WAITOK); 3146 pf_counter_u64_init(&newrule->evaluations, M_WAITOK); 3147 for (int i = 0; i < 2; i++) { 3148 pf_counter_u64_init(&newrule->packets[i], M_WAITOK); 3149 pf_counter_u64_init(&newrule->bytes[i], M_WAITOK); 3150 } 3151 newrule->states_cur = counter_u64_alloc(M_WAITOK); 3152 newrule->states_tot = counter_u64_alloc(M_WAITOK); 3153 newrule->src_nodes = counter_u64_alloc(M_WAITOK); 3154 newrule->cuid = td->td_ucred->cr_ruid; 3155 newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0; 3156 TAILQ_INIT(&newrule->rpool.list); 3157 } 3158 #define ERROUT(x) { error = (x); goto DIOCCHANGERULE_error; } 3159 3160 PF_RULES_WLOCK(); 3161 #ifdef PF_WANT_32_TO_64_COUNTER 3162 if (newrule != NULL) { 3163 LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist); 3164 newrule->allrulelinked = true; 3165 V_pf_allrulecount++; 3166 } 3167 #endif 3168 3169 if (!(pcr->action == PF_CHANGE_REMOVE || 3170 pcr->action == PF_CHANGE_GET_TICKET) && 3171 pcr->pool_ticket != V_ticket_pabuf) 3172 ERROUT(EBUSY); 3173 3174 ruleset = pf_find_kruleset(pcr->anchor); 3175 if (ruleset == NULL) 3176 ERROUT(EINVAL); 3177 3178 rs_num = pf_get_ruleset_number(pcr->rule.action); 3179 if (rs_num >= PF_RULESET_MAX) 3180 ERROUT(EINVAL); 3181 3182 if (pcr->action == PF_CHANGE_GET_TICKET) { 3183 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 3184 ERROUT(0); 3185 } else if (pcr->ticket != 3186 ruleset->rules[rs_num].active.ticket) 3187 ERROUT(EINVAL); 3188 3189 if (pcr->action != PF_CHANGE_REMOVE) { 3190 if (newrule->ifname[0]) { 3191 newrule->kif = pfi_kkif_attach(kif, 3192 newrule->ifname); 3193 kif = NULL; 3194 pfi_kkif_ref(newrule->kif); 3195 } else 3196 newrule->kif = NULL; 3197 3198 if (newrule->rtableid > 0 && 3199 newrule->rtableid >= rt_numfibs) 3200 error = EBUSY; 3201 3202 #ifdef ALTQ 3203 /* set queue IDs */ 3204 if (newrule->qname[0] != 0) { 3205 if ((newrule->qid = 3206 pf_qname2qid(newrule->qname)) == 0) 3207 error = EBUSY; 3208 else if (newrule->pqname[0] != 0) { 3209 if ((newrule->pqid = 3210 pf_qname2qid(newrule->pqname)) == 0) 3211 error = EBUSY; 3212 } else 3213 newrule->pqid = newrule->qid; 3214 } 3215 #endif /* ALTQ */ 3216 if (newrule->tagname[0]) 3217 if ((newrule->tag = 3218 pf_tagname2tag(newrule->tagname)) == 0) 3219 error = EBUSY; 3220 if (newrule->match_tagname[0]) 3221 if ((newrule->match_tag = pf_tagname2tag( 3222 newrule->match_tagname)) == 0) 3223 error = EBUSY; 3224 if (newrule->rt && !newrule->direction) 3225 error = EINVAL; 3226 if (!newrule->log) 3227 newrule->logif = 0; 3228 if (newrule->logif >= PFLOGIFS_MAX) 3229 error = EINVAL; 3230 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 3231 error = ENOMEM; 3232 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 3233 error = ENOMEM; 3234 if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call)) 3235 error = EINVAL; 3236 TAILQ_FOREACH(pa, &V_pf_pabuf, entries) 3237 if (pa->addr.type == PF_ADDR_TABLE) { 3238 pa->addr.p.tbl = 3239 pfr_attach_table(ruleset, 3240 pa->addr.v.tblname); 3241 if (pa->addr.p.tbl == NULL) 3242 error = ENOMEM; 3243 } 3244 3245 newrule->overload_tbl = NULL; 3246 if (newrule->overload_tblname[0]) { 3247 if ((newrule->overload_tbl = pfr_attach_table( 3248 ruleset, newrule->overload_tblname)) == 3249 NULL) 3250 error = EINVAL; 3251 else 3252 newrule->overload_tbl->pfrkt_flags |= 3253 PFR_TFLAG_ACTIVE; 3254 } 3255 3256 pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list); 3257 if (((((newrule->action == PF_NAT) || 3258 (newrule->action == PF_RDR) || 3259 (newrule->action == PF_BINAT) || 3260 (newrule->rt > PF_NOPFROUTE)) && 3261 !newrule->anchor)) && 3262 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 3263 error = EINVAL; 3264 3265 if (error) { 3266 pf_free_rule(newrule); 3267 PF_RULES_WUNLOCK(); 3268 break; 3269 } 3270 3271 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 3272 } 3273 pf_empty_kpool(&V_pf_pabuf); 3274 3275 if (pcr->action == PF_CHANGE_ADD_HEAD) 3276 oldrule = TAILQ_FIRST( 3277 ruleset->rules[rs_num].active.ptr); 3278 else if (pcr->action == PF_CHANGE_ADD_TAIL) 3279 oldrule = TAILQ_LAST( 3280 ruleset->rules[rs_num].active.ptr, pf_krulequeue); 3281 else { 3282 oldrule = TAILQ_FIRST( 3283 ruleset->rules[rs_num].active.ptr); 3284 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 3285 oldrule = TAILQ_NEXT(oldrule, entries); 3286 if (oldrule == NULL) { 3287 if (newrule != NULL) 3288 pf_free_rule(newrule); 3289 PF_RULES_WUNLOCK(); 3290 error = EINVAL; 3291 break; 3292 } 3293 } 3294 3295 if (pcr->action == PF_CHANGE_REMOVE) { 3296 pf_unlink_rule(ruleset->rules[rs_num].active.ptr, 3297 oldrule); 3298 ruleset->rules[rs_num].active.rcount--; 3299 } else { 3300 if (oldrule == NULL) 3301 TAILQ_INSERT_TAIL( 3302 ruleset->rules[rs_num].active.ptr, 3303 newrule, entries); 3304 else if (pcr->action == PF_CHANGE_ADD_HEAD || 3305 pcr->action == PF_CHANGE_ADD_BEFORE) 3306 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 3307 else 3308 TAILQ_INSERT_AFTER( 3309 ruleset->rules[rs_num].active.ptr, 3310 oldrule, newrule, entries); 3311 ruleset->rules[rs_num].active.rcount++; 3312 } 3313 3314 nr = 0; 3315 TAILQ_FOREACH(oldrule, 3316 ruleset->rules[rs_num].active.ptr, entries) 3317 oldrule->nr = nr++; 3318 3319 ruleset->rules[rs_num].active.ticket++; 3320 3321 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 3322 pf_remove_if_empty_kruleset(ruleset); 3323 3324 PF_RULES_WUNLOCK(); 3325 break; 3326 3327 #undef ERROUT 3328 DIOCCHANGERULE_error: 3329 PF_RULES_WUNLOCK(); 3330 pf_krule_free(newrule); 3331 pf_kkif_free(kif); 3332 break; 3333 } 3334 3335 case DIOCCLRSTATES: { 3336 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3337 struct pf_kstate_kill kill; 3338 3339 error = pf_state_kill_to_kstate_kill(psk, &kill); 3340 if (error) 3341 break; 3342 3343 psk->psk_killed = pf_clear_states(&kill); 3344 break; 3345 } 3346 3347 case DIOCCLRSTATESNV: { 3348 error = pf_clearstates_nv((struct pfioc_nv *)addr); 3349 break; 3350 } 3351 3352 case DIOCKILLSTATES: { 3353 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 3354 struct pf_kstate_kill kill; 3355 3356 error = pf_state_kill_to_kstate_kill(psk, &kill); 3357 if (error) 3358 break; 3359 3360 psk->psk_killed = 0; 3361 pf_killstates(&kill, &psk->psk_killed); 3362 break; 3363 } 3364 3365 case DIOCKILLSTATESNV: { 3366 error = pf_killstates_nv((struct pfioc_nv *)addr); 3367 break; 3368 } 3369 3370 case DIOCADDSTATE: { 3371 struct pfioc_state *ps = (struct pfioc_state *)addr; 3372 struct pfsync_state *sp = &ps->state; 3373 3374 if (sp->timeout >= PFTM_MAX) { 3375 error = EINVAL; 3376 break; 3377 } 3378 if (V_pfsync_state_import_ptr != NULL) { 3379 PF_RULES_RLOCK(); 3380 error = V_pfsync_state_import_ptr(sp, PFSYNC_SI_IOCTL); 3381 PF_RULES_RUNLOCK(); 3382 } else 3383 error = EOPNOTSUPP; 3384 break; 3385 } 3386 3387 case DIOCGETSTATE: { 3388 struct pfioc_state *ps = (struct pfioc_state *)addr; 3389 struct pf_kstate *s; 3390 3391 s = pf_find_state_byid(ps->state.id, ps->state.creatorid); 3392 if (s == NULL) { 3393 error = ENOENT; 3394 break; 3395 } 3396 3397 pfsync_state_export(&ps->state, s); 3398 PF_STATE_UNLOCK(s); 3399 break; 3400 } 3401 3402 case DIOCGETSTATENV: { 3403 error = pf_getstate((struct pfioc_nv *)addr); 3404 break; 3405 } 3406 3407 case DIOCGETSTATES: { 3408 struct pfioc_states *ps = (struct pfioc_states *)addr; 3409 struct pf_kstate *s; 3410 struct pfsync_state *pstore, *p; 3411 int i, nr; 3412 size_t slice_count = 16, count; 3413 void *out; 3414 3415 if (ps->ps_len <= 0) { 3416 nr = uma_zone_get_cur(V_pf_state_z); 3417 ps->ps_len = sizeof(struct pfsync_state) * nr; 3418 break; 3419 } 3420 3421 out = ps->ps_states; 3422 pstore = mallocarray(slice_count, 3423 sizeof(struct pfsync_state), M_TEMP, M_WAITOK | M_ZERO); 3424 nr = 0; 3425 3426 for (i = 0; i <= pf_hashmask; i++) { 3427 struct pf_idhash *ih = &V_pf_idhash[i]; 3428 3429 DIOCGETSTATES_retry: 3430 p = pstore; 3431 3432 if (LIST_EMPTY(&ih->states)) 3433 continue; 3434 3435 PF_HASHROW_LOCK(ih); 3436 count = 0; 3437 LIST_FOREACH(s, &ih->states, entry) { 3438 if (s->timeout == PFTM_UNLINKED) 3439 continue; 3440 count++; 3441 } 3442 3443 if (count > slice_count) { 3444 PF_HASHROW_UNLOCK(ih); 3445 free(pstore, M_TEMP); 3446 slice_count = count * 2; 3447 pstore = mallocarray(slice_count, 3448 sizeof(struct pfsync_state), M_TEMP, 3449 M_WAITOK | M_ZERO); 3450 goto DIOCGETSTATES_retry; 3451 } 3452 3453 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3454 PF_HASHROW_UNLOCK(ih); 3455 goto DIOCGETSTATES_full; 3456 } 3457 3458 LIST_FOREACH(s, &ih->states, entry) { 3459 if (s->timeout == PFTM_UNLINKED) 3460 continue; 3461 3462 pfsync_state_export(p, s); 3463 p++; 3464 nr++; 3465 } 3466 PF_HASHROW_UNLOCK(ih); 3467 error = copyout(pstore, out, 3468 sizeof(struct pfsync_state) * count); 3469 if (error) 3470 break; 3471 out = ps->ps_states + nr; 3472 } 3473 DIOCGETSTATES_full: 3474 ps->ps_len = sizeof(struct pfsync_state) * nr; 3475 free(pstore, M_TEMP); 3476 3477 break; 3478 } 3479 3480 case DIOCGETSTATESV2: { 3481 struct pfioc_states_v2 *ps = (struct pfioc_states_v2 *)addr; 3482 struct pf_kstate *s; 3483 struct pf_state_export *pstore, *p; 3484 int i, nr; 3485 size_t slice_count = 16, count; 3486 void *out; 3487 3488 if (ps->ps_req_version > PF_STATE_VERSION) { 3489 error = ENOTSUP; 3490 break; 3491 } 3492 3493 if (ps->ps_len <= 0) { 3494 nr = uma_zone_get_cur(V_pf_state_z); 3495 ps->ps_len = sizeof(struct pf_state_export) * nr; 3496 break; 3497 } 3498 3499 out = ps->ps_states; 3500 pstore = mallocarray(slice_count, 3501 sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO); 3502 nr = 0; 3503 3504 for (i = 0; i <= pf_hashmask; i++) { 3505 struct pf_idhash *ih = &V_pf_idhash[i]; 3506 3507 DIOCGETSTATESV2_retry: 3508 p = pstore; 3509 3510 if (LIST_EMPTY(&ih->states)) 3511 continue; 3512 3513 PF_HASHROW_LOCK(ih); 3514 count = 0; 3515 LIST_FOREACH(s, &ih->states, entry) { 3516 if (s->timeout == PFTM_UNLINKED) 3517 continue; 3518 count++; 3519 } 3520 3521 if (count > slice_count) { 3522 PF_HASHROW_UNLOCK(ih); 3523 free(pstore, M_TEMP); 3524 slice_count = count * 2; 3525 pstore = mallocarray(slice_count, 3526 sizeof(struct pf_state_export), M_TEMP, 3527 M_WAITOK | M_ZERO); 3528 goto DIOCGETSTATESV2_retry; 3529 } 3530 3531 if ((nr+count) * sizeof(*p) > ps->ps_len) { 3532 PF_HASHROW_UNLOCK(ih); 3533 goto DIOCGETSTATESV2_full; 3534 } 3535 3536 LIST_FOREACH(s, &ih->states, entry) { 3537 if (s->timeout == PFTM_UNLINKED) 3538 continue; 3539 3540 pf_state_export(p, s); 3541 p++; 3542 nr++; 3543 } 3544 PF_HASHROW_UNLOCK(ih); 3545 error = copyout(pstore, out, 3546 sizeof(struct pf_state_export) * count); 3547 if (error) 3548 break; 3549 out = ps->ps_states + nr; 3550 } 3551 DIOCGETSTATESV2_full: 3552 ps->ps_len = nr * sizeof(struct pf_state_export); 3553 free(pstore, M_TEMP); 3554 3555 break; 3556 } 3557 3558 case DIOCGETSTATUS: { 3559 struct pf_status *s = (struct pf_status *)addr; 3560 3561 PF_RULES_RLOCK(); 3562 s->running = V_pf_status.running; 3563 s->since = V_pf_status.since; 3564 s->debug = V_pf_status.debug; 3565 s->hostid = V_pf_status.hostid; 3566 s->states = V_pf_status.states; 3567 s->src_nodes = V_pf_status.src_nodes; 3568 3569 for (int i = 0; i < PFRES_MAX; i++) 3570 s->counters[i] = 3571 counter_u64_fetch(V_pf_status.counters[i]); 3572 for (int i = 0; i < LCNT_MAX; i++) 3573 s->lcounters[i] = 3574 counter_u64_fetch(V_pf_status.lcounters[i]); 3575 for (int i = 0; i < FCNT_MAX; i++) 3576 s->fcounters[i] = 3577 pf_counter_u64_fetch(&V_pf_status.fcounters[i]); 3578 for (int i = 0; i < SCNT_MAX; i++) 3579 s->scounters[i] = 3580 counter_u64_fetch(V_pf_status.scounters[i]); 3581 3582 bcopy(V_pf_status.ifname, s->ifname, IFNAMSIZ); 3583 bcopy(V_pf_status.pf_chksum, s->pf_chksum, 3584 PF_MD5_DIGEST_LENGTH); 3585 3586 pfi_update_status(s->ifname, s); 3587 PF_RULES_RUNLOCK(); 3588 break; 3589 } 3590 3591 case DIOCGETSTATUSNV: { 3592 error = pf_getstatus((struct pfioc_nv *)addr); 3593 break; 3594 } 3595 3596 case DIOCSETSTATUSIF: { 3597 struct pfioc_if *pi = (struct pfioc_if *)addr; 3598 3599 if (pi->ifname[0] == 0) { 3600 bzero(V_pf_status.ifname, IFNAMSIZ); 3601 break; 3602 } 3603 PF_RULES_WLOCK(); 3604 error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ); 3605 PF_RULES_WUNLOCK(); 3606 break; 3607 } 3608 3609 case DIOCCLRSTATUS: { 3610 PF_RULES_WLOCK(); 3611 for (int i = 0; i < PFRES_MAX; i++) 3612 counter_u64_zero(V_pf_status.counters[i]); 3613 for (int i = 0; i < FCNT_MAX; i++) 3614 pf_counter_u64_zero(&V_pf_status.fcounters[i]); 3615 for (int i = 0; i < SCNT_MAX; i++) 3616 counter_u64_zero(V_pf_status.scounters[i]); 3617 for (int i = 0; i < KLCNT_MAX; i++) 3618 counter_u64_zero(V_pf_status.lcounters[i]); 3619 V_pf_status.since = time_second; 3620 if (*V_pf_status.ifname) 3621 pfi_update_status(V_pf_status.ifname, NULL); 3622 PF_RULES_WUNLOCK(); 3623 break; 3624 } 3625 3626 case DIOCNATLOOK: { 3627 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 3628 struct pf_state_key *sk; 3629 struct pf_kstate *state; 3630 struct pf_state_key_cmp key; 3631 int m = 0, direction = pnl->direction; 3632 int sidx, didx; 3633 3634 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 3635 sidx = (direction == PF_IN) ? 1 : 0; 3636 didx = (direction == PF_IN) ? 0 : 1; 3637 3638 if (!pnl->proto || 3639 PF_AZERO(&pnl->saddr, pnl->af) || 3640 PF_AZERO(&pnl->daddr, pnl->af) || 3641 ((pnl->proto == IPPROTO_TCP || 3642 pnl->proto == IPPROTO_UDP) && 3643 (!pnl->dport || !pnl->sport))) 3644 error = EINVAL; 3645 else { 3646 bzero(&key, sizeof(key)); 3647 key.af = pnl->af; 3648 key.proto = pnl->proto; 3649 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 3650 key.port[sidx] = pnl->sport; 3651 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 3652 key.port[didx] = pnl->dport; 3653 3654 state = pf_find_state_all(&key, direction, &m); 3655 if (state == NULL) { 3656 error = ENOENT; 3657 } else { 3658 if (m > 1) { 3659 PF_STATE_UNLOCK(state); 3660 error = E2BIG; /* more than one state */ 3661 } else { 3662 sk = state->key[sidx]; 3663 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 3664 pnl->rsport = sk->port[sidx]; 3665 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 3666 pnl->rdport = sk->port[didx]; 3667 PF_STATE_UNLOCK(state); 3668 } 3669 } 3670 } 3671 break; 3672 } 3673 3674 case DIOCSETTIMEOUT: { 3675 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3676 int old; 3677 3678 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 3679 pt->seconds < 0) { 3680 error = EINVAL; 3681 break; 3682 } 3683 PF_RULES_WLOCK(); 3684 old = V_pf_default_rule.timeout[pt->timeout]; 3685 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 3686 pt->seconds = 1; 3687 V_pf_default_rule.timeout[pt->timeout] = pt->seconds; 3688 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) 3689 wakeup(pf_purge_thread); 3690 pt->seconds = old; 3691 PF_RULES_WUNLOCK(); 3692 break; 3693 } 3694 3695 case DIOCGETTIMEOUT: { 3696 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 3697 3698 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 3699 error = EINVAL; 3700 break; 3701 } 3702 PF_RULES_RLOCK(); 3703 pt->seconds = V_pf_default_rule.timeout[pt->timeout]; 3704 PF_RULES_RUNLOCK(); 3705 break; 3706 } 3707 3708 case DIOCGETLIMIT: { 3709 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3710 3711 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 3712 error = EINVAL; 3713 break; 3714 } 3715 PF_RULES_RLOCK(); 3716 pl->limit = V_pf_limits[pl->index].limit; 3717 PF_RULES_RUNLOCK(); 3718 break; 3719 } 3720 3721 case DIOCSETLIMIT: { 3722 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 3723 int old_limit; 3724 3725 PF_RULES_WLOCK(); 3726 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 3727 V_pf_limits[pl->index].zone == NULL) { 3728 PF_RULES_WUNLOCK(); 3729 error = EINVAL; 3730 break; 3731 } 3732 uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit); 3733 old_limit = V_pf_limits[pl->index].limit; 3734 V_pf_limits[pl->index].limit = pl->limit; 3735 pl->limit = old_limit; 3736 PF_RULES_WUNLOCK(); 3737 break; 3738 } 3739 3740 case DIOCSETDEBUG: { 3741 u_int32_t *level = (u_int32_t *)addr; 3742 3743 PF_RULES_WLOCK(); 3744 V_pf_status.debug = *level; 3745 PF_RULES_WUNLOCK(); 3746 break; 3747 } 3748 3749 case DIOCCLRRULECTRS: { 3750 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 3751 struct pf_kruleset *ruleset = &pf_main_ruleset; 3752 struct pf_krule *rule; 3753 3754 PF_RULES_WLOCK(); 3755 TAILQ_FOREACH(rule, 3756 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { 3757 pf_counter_u64_zero(&rule->evaluations); 3758 for (int i = 0; i < 2; i++) { 3759 pf_counter_u64_zero(&rule->packets[i]); 3760 pf_counter_u64_zero(&rule->bytes[i]); 3761 } 3762 } 3763 PF_RULES_WUNLOCK(); 3764 break; 3765 } 3766 3767 case DIOCGIFSPEEDV0: 3768 case DIOCGIFSPEEDV1: { 3769 struct pf_ifspeed_v1 *psp = (struct pf_ifspeed_v1 *)addr; 3770 struct pf_ifspeed_v1 ps; 3771 struct ifnet *ifp; 3772 3773 if (psp->ifname[0] == '\0') { 3774 error = EINVAL; 3775 break; 3776 } 3777 3778 error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ); 3779 if (error != 0) 3780 break; 3781 ifp = ifunit(ps.ifname); 3782 if (ifp != NULL) { 3783 psp->baudrate32 = 3784 (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX); 3785 if (cmd == DIOCGIFSPEEDV1) 3786 psp->baudrate = ifp->if_baudrate; 3787 } else { 3788 error = EINVAL; 3789 } 3790 break; 3791 } 3792 3793 #ifdef ALTQ 3794 case DIOCSTARTALTQ: { 3795 struct pf_altq *altq; 3796 3797 PF_RULES_WLOCK(); 3798 /* enable all altq interfaces on active list */ 3799 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3800 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3801 error = pf_enable_altq(altq); 3802 if (error != 0) 3803 break; 3804 } 3805 } 3806 if (error == 0) 3807 V_pf_altq_running = 1; 3808 PF_RULES_WUNLOCK(); 3809 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 3810 break; 3811 } 3812 3813 case DIOCSTOPALTQ: { 3814 struct pf_altq *altq; 3815 3816 PF_RULES_WLOCK(); 3817 /* disable all altq interfaces on active list */ 3818 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) { 3819 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) { 3820 error = pf_disable_altq(altq); 3821 if (error != 0) 3822 break; 3823 } 3824 } 3825 if (error == 0) 3826 V_pf_altq_running = 0; 3827 PF_RULES_WUNLOCK(); 3828 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 3829 break; 3830 } 3831 3832 case DIOCADDALTQV0: 3833 case DIOCADDALTQV1: { 3834 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3835 struct pf_altq *altq, *a; 3836 struct ifnet *ifp; 3837 3838 altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO); 3839 error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd)); 3840 if (error) 3841 break; 3842 altq->local_flags = 0; 3843 3844 PF_RULES_WLOCK(); 3845 if (pa->ticket != V_ticket_altqs_inactive) { 3846 PF_RULES_WUNLOCK(); 3847 free(altq, M_PFALTQ); 3848 error = EBUSY; 3849 break; 3850 } 3851 3852 /* 3853 * if this is for a queue, find the discipline and 3854 * copy the necessary fields 3855 */ 3856 if (altq->qname[0] != 0) { 3857 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 3858 PF_RULES_WUNLOCK(); 3859 error = EBUSY; 3860 free(altq, M_PFALTQ); 3861 break; 3862 } 3863 altq->altq_disc = NULL; 3864 TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) { 3865 if (strncmp(a->ifname, altq->ifname, 3866 IFNAMSIZ) == 0) { 3867 altq->altq_disc = a->altq_disc; 3868 break; 3869 } 3870 } 3871 } 3872 3873 if ((ifp = ifunit(altq->ifname)) == NULL) 3874 altq->local_flags |= PFALTQ_FLAG_IF_REMOVED; 3875 else 3876 error = altq_add(ifp, altq); 3877 3878 if (error) { 3879 PF_RULES_WUNLOCK(); 3880 free(altq, M_PFALTQ); 3881 break; 3882 } 3883 3884 if (altq->qname[0] != 0) 3885 TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries); 3886 else 3887 TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries); 3888 /* version error check done on import above */ 3889 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 3890 PF_RULES_WUNLOCK(); 3891 break; 3892 } 3893 3894 case DIOCGETALTQSV0: 3895 case DIOCGETALTQSV1: { 3896 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3897 struct pf_altq *altq; 3898 3899 PF_RULES_RLOCK(); 3900 pa->nr = 0; 3901 TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) 3902 pa->nr++; 3903 TAILQ_FOREACH(altq, V_pf_altqs_active, entries) 3904 pa->nr++; 3905 pa->ticket = V_ticket_altqs_active; 3906 PF_RULES_RUNLOCK(); 3907 break; 3908 } 3909 3910 case DIOCGETALTQV0: 3911 case DIOCGETALTQV1: { 3912 struct pfioc_altq_v1 *pa = (struct pfioc_altq_v1 *)addr; 3913 struct pf_altq *altq; 3914 3915 PF_RULES_RLOCK(); 3916 if (pa->ticket != V_ticket_altqs_active) { 3917 PF_RULES_RUNLOCK(); 3918 error = EBUSY; 3919 break; 3920 } 3921 altq = pf_altq_get_nth_active(pa->nr); 3922 if (altq == NULL) { 3923 PF_RULES_RUNLOCK(); 3924 error = EBUSY; 3925 break; 3926 } 3927 pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd)); 3928 PF_RULES_RUNLOCK(); 3929 break; 3930 } 3931 3932 case DIOCCHANGEALTQV0: 3933 case DIOCCHANGEALTQV1: 3934 /* CHANGEALTQ not supported yet! */ 3935 error = ENODEV; 3936 break; 3937 3938 case DIOCGETQSTATSV0: 3939 case DIOCGETQSTATSV1: { 3940 struct pfioc_qstats_v1 *pq = (struct pfioc_qstats_v1 *)addr; 3941 struct pf_altq *altq; 3942 int nbytes; 3943 u_int32_t version; 3944 3945 PF_RULES_RLOCK(); 3946 if (pq->ticket != V_ticket_altqs_active) { 3947 PF_RULES_RUNLOCK(); 3948 error = EBUSY; 3949 break; 3950 } 3951 nbytes = pq->nbytes; 3952 altq = pf_altq_get_nth_active(pq->nr); 3953 if (altq == NULL) { 3954 PF_RULES_RUNLOCK(); 3955 error = EBUSY; 3956 break; 3957 } 3958 3959 if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) { 3960 PF_RULES_RUNLOCK(); 3961 error = ENXIO; 3962 break; 3963 } 3964 PF_RULES_RUNLOCK(); 3965 if (cmd == DIOCGETQSTATSV0) 3966 version = 0; /* DIOCGETQSTATSV0 means stats struct v0 */ 3967 else 3968 version = pq->version; 3969 error = altq_getqstats(altq, pq->buf, &nbytes, version); 3970 if (error == 0) { 3971 pq->scheduler = altq->scheduler; 3972 pq->nbytes = nbytes; 3973 } 3974 break; 3975 } 3976 #endif /* ALTQ */ 3977 3978 case DIOCBEGINADDRS: { 3979 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3980 3981 PF_RULES_WLOCK(); 3982 pf_empty_kpool(&V_pf_pabuf); 3983 pp->ticket = ++V_ticket_pabuf; 3984 PF_RULES_WUNLOCK(); 3985 break; 3986 } 3987 3988 case DIOCADDADDR: { 3989 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 3990 struct pf_kpooladdr *pa; 3991 struct pfi_kkif *kif = NULL; 3992 3993 #ifndef INET 3994 if (pp->af == AF_INET) { 3995 error = EAFNOSUPPORT; 3996 break; 3997 } 3998 #endif /* INET */ 3999 #ifndef INET6 4000 if (pp->af == AF_INET6) { 4001 error = EAFNOSUPPORT; 4002 break; 4003 } 4004 #endif /* INET6 */ 4005 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 4006 pp->addr.addr.type != PF_ADDR_DYNIFTL && 4007 pp->addr.addr.type != PF_ADDR_TABLE) { 4008 error = EINVAL; 4009 break; 4010 } 4011 if (pp->addr.addr.p.dyn != NULL) { 4012 error = EINVAL; 4013 break; 4014 } 4015 pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK); 4016 error = pf_pooladdr_to_kpooladdr(&pp->addr, pa); 4017 if (error != 0) 4018 break; 4019 if (pa->ifname[0]) 4020 kif = pf_kkif_create(M_WAITOK); 4021 PF_RULES_WLOCK(); 4022 if (pp->ticket != V_ticket_pabuf) { 4023 PF_RULES_WUNLOCK(); 4024 if (pa->ifname[0]) 4025 pf_kkif_free(kif); 4026 free(pa, M_PFRULE); 4027 error = EBUSY; 4028 break; 4029 } 4030 if (pa->ifname[0]) { 4031 pa->kif = pfi_kkif_attach(kif, pa->ifname); 4032 kif = NULL; 4033 pfi_kkif_ref(pa->kif); 4034 } else 4035 pa->kif = NULL; 4036 if (pa->addr.type == PF_ADDR_DYNIFTL && ((error = 4037 pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) { 4038 if (pa->ifname[0]) 4039 pfi_kkif_unref(pa->kif); 4040 PF_RULES_WUNLOCK(); 4041 free(pa, M_PFRULE); 4042 break; 4043 } 4044 TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries); 4045 PF_RULES_WUNLOCK(); 4046 break; 4047 } 4048 4049 case DIOCGETADDRS: { 4050 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4051 struct pf_kpool *pool; 4052 struct pf_kpooladdr *pa; 4053 4054 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4055 pp->nr = 0; 4056 4057 PF_RULES_RLOCK(); 4058 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4059 pp->r_num, 0, 1, 0); 4060 if (pool == NULL) { 4061 PF_RULES_RUNLOCK(); 4062 error = EBUSY; 4063 break; 4064 } 4065 TAILQ_FOREACH(pa, &pool->list, entries) 4066 pp->nr++; 4067 PF_RULES_RUNLOCK(); 4068 break; 4069 } 4070 4071 case DIOCGETADDR: { 4072 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 4073 struct pf_kpool *pool; 4074 struct pf_kpooladdr *pa; 4075 u_int32_t nr = 0; 4076 4077 pp->anchor[sizeof(pp->anchor) - 1] = 0; 4078 4079 PF_RULES_RLOCK(); 4080 pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action, 4081 pp->r_num, 0, 1, 1); 4082 if (pool == NULL) { 4083 PF_RULES_RUNLOCK(); 4084 error = EBUSY; 4085 break; 4086 } 4087 pa = TAILQ_FIRST(&pool->list); 4088 while ((pa != NULL) && (nr < pp->nr)) { 4089 pa = TAILQ_NEXT(pa, entries); 4090 nr++; 4091 } 4092 if (pa == NULL) { 4093 PF_RULES_RUNLOCK(); 4094 error = EBUSY; 4095 break; 4096 } 4097 pf_kpooladdr_to_pooladdr(pa, &pp->addr); 4098 pf_addr_copyout(&pp->addr.addr); 4099 PF_RULES_RUNLOCK(); 4100 break; 4101 } 4102 4103 case DIOCCHANGEADDR: { 4104 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 4105 struct pf_kpool *pool; 4106 struct pf_kpooladdr *oldpa = NULL, *newpa = NULL; 4107 struct pf_kruleset *ruleset; 4108 struct pfi_kkif *kif = NULL; 4109 4110 pca->anchor[sizeof(pca->anchor) - 1] = 0; 4111 4112 if (pca->action < PF_CHANGE_ADD_HEAD || 4113 pca->action > PF_CHANGE_REMOVE) { 4114 error = EINVAL; 4115 break; 4116 } 4117 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 4118 pca->addr.addr.type != PF_ADDR_DYNIFTL && 4119 pca->addr.addr.type != PF_ADDR_TABLE) { 4120 error = EINVAL; 4121 break; 4122 } 4123 if (pca->addr.addr.p.dyn != NULL) { 4124 error = EINVAL; 4125 break; 4126 } 4127 4128 if (pca->action != PF_CHANGE_REMOVE) { 4129 #ifndef INET 4130 if (pca->af == AF_INET) { 4131 error = EAFNOSUPPORT; 4132 break; 4133 } 4134 #endif /* INET */ 4135 #ifndef INET6 4136 if (pca->af == AF_INET6) { 4137 error = EAFNOSUPPORT; 4138 break; 4139 } 4140 #endif /* INET6 */ 4141 newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK); 4142 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 4143 if (newpa->ifname[0]) 4144 kif = pf_kkif_create(M_WAITOK); 4145 newpa->kif = NULL; 4146 } 4147 #define ERROUT(x) ERROUT_IOCTL(DIOCCHANGEADDR_error, x) 4148 PF_RULES_WLOCK(); 4149 ruleset = pf_find_kruleset(pca->anchor); 4150 if (ruleset == NULL) 4151 ERROUT(EBUSY); 4152 4153 pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action, 4154 pca->r_num, pca->r_last, 1, 1); 4155 if (pool == NULL) 4156 ERROUT(EBUSY); 4157 4158 if (pca->action != PF_CHANGE_REMOVE) { 4159 if (newpa->ifname[0]) { 4160 newpa->kif = pfi_kkif_attach(kif, newpa->ifname); 4161 pfi_kkif_ref(newpa->kif); 4162 kif = NULL; 4163 } 4164 4165 switch (newpa->addr.type) { 4166 case PF_ADDR_DYNIFTL: 4167 error = pfi_dynaddr_setup(&newpa->addr, 4168 pca->af); 4169 break; 4170 case PF_ADDR_TABLE: 4171 newpa->addr.p.tbl = pfr_attach_table(ruleset, 4172 newpa->addr.v.tblname); 4173 if (newpa->addr.p.tbl == NULL) 4174 error = ENOMEM; 4175 break; 4176 } 4177 if (error) 4178 goto DIOCCHANGEADDR_error; 4179 } 4180 4181 switch (pca->action) { 4182 case PF_CHANGE_ADD_HEAD: 4183 oldpa = TAILQ_FIRST(&pool->list); 4184 break; 4185 case PF_CHANGE_ADD_TAIL: 4186 oldpa = TAILQ_LAST(&pool->list, pf_kpalist); 4187 break; 4188 default: 4189 oldpa = TAILQ_FIRST(&pool->list); 4190 for (int i = 0; oldpa && i < pca->nr; i++) 4191 oldpa = TAILQ_NEXT(oldpa, entries); 4192 4193 if (oldpa == NULL) 4194 ERROUT(EINVAL); 4195 } 4196 4197 if (pca->action == PF_CHANGE_REMOVE) { 4198 TAILQ_REMOVE(&pool->list, oldpa, entries); 4199 switch (oldpa->addr.type) { 4200 case PF_ADDR_DYNIFTL: 4201 pfi_dynaddr_remove(oldpa->addr.p.dyn); 4202 break; 4203 case PF_ADDR_TABLE: 4204 pfr_detach_table(oldpa->addr.p.tbl); 4205 break; 4206 } 4207 if (oldpa->kif) 4208 pfi_kkif_unref(oldpa->kif); 4209 free(oldpa, M_PFRULE); 4210 } else { 4211 if (oldpa == NULL) 4212 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 4213 else if (pca->action == PF_CHANGE_ADD_HEAD || 4214 pca->action == PF_CHANGE_ADD_BEFORE) 4215 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 4216 else 4217 TAILQ_INSERT_AFTER(&pool->list, oldpa, 4218 newpa, entries); 4219 } 4220 4221 pool->cur = TAILQ_FIRST(&pool->list); 4222 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af); 4223 PF_RULES_WUNLOCK(); 4224 break; 4225 4226 #undef ERROUT 4227 DIOCCHANGEADDR_error: 4228 if (newpa != NULL) { 4229 if (newpa->kif) 4230 pfi_kkif_unref(newpa->kif); 4231 free(newpa, M_PFRULE); 4232 } 4233 PF_RULES_WUNLOCK(); 4234 pf_kkif_free(kif); 4235 break; 4236 } 4237 4238 case DIOCGETRULESETS: { 4239 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4240 struct pf_kruleset *ruleset; 4241 struct pf_kanchor *anchor; 4242 4243 pr->path[sizeof(pr->path) - 1] = 0; 4244 4245 PF_RULES_RLOCK(); 4246 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4247 PF_RULES_RUNLOCK(); 4248 error = ENOENT; 4249 break; 4250 } 4251 pr->nr = 0; 4252 if (ruleset->anchor == NULL) { 4253 /* XXX kludge for pf_main_ruleset */ 4254 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4255 if (anchor->parent == NULL) 4256 pr->nr++; 4257 } else { 4258 RB_FOREACH(anchor, pf_kanchor_node, 4259 &ruleset->anchor->children) 4260 pr->nr++; 4261 } 4262 PF_RULES_RUNLOCK(); 4263 break; 4264 } 4265 4266 case DIOCGETRULESET: { 4267 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 4268 struct pf_kruleset *ruleset; 4269 struct pf_kanchor *anchor; 4270 u_int32_t nr = 0; 4271 4272 pr->path[sizeof(pr->path) - 1] = 0; 4273 4274 PF_RULES_RLOCK(); 4275 if ((ruleset = pf_find_kruleset(pr->path)) == NULL) { 4276 PF_RULES_RUNLOCK(); 4277 error = ENOENT; 4278 break; 4279 } 4280 pr->name[0] = 0; 4281 if (ruleset->anchor == NULL) { 4282 /* XXX kludge for pf_main_ruleset */ 4283 RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) 4284 if (anchor->parent == NULL && nr++ == pr->nr) { 4285 strlcpy(pr->name, anchor->name, 4286 sizeof(pr->name)); 4287 break; 4288 } 4289 } else { 4290 RB_FOREACH(anchor, pf_kanchor_node, 4291 &ruleset->anchor->children) 4292 if (nr++ == pr->nr) { 4293 strlcpy(pr->name, anchor->name, 4294 sizeof(pr->name)); 4295 break; 4296 } 4297 } 4298 if (!pr->name[0]) 4299 error = EBUSY; 4300 PF_RULES_RUNLOCK(); 4301 break; 4302 } 4303 4304 case DIOCRCLRTABLES: { 4305 struct pfioc_table *io = (struct pfioc_table *)addr; 4306 4307 if (io->pfrio_esize != 0) { 4308 error = ENODEV; 4309 break; 4310 } 4311 PF_RULES_WLOCK(); 4312 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 4313 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4314 PF_RULES_WUNLOCK(); 4315 break; 4316 } 4317 4318 case DIOCRADDTABLES: { 4319 struct pfioc_table *io = (struct pfioc_table *)addr; 4320 struct pfr_table *pfrts; 4321 size_t totlen; 4322 4323 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4324 error = ENODEV; 4325 break; 4326 } 4327 4328 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4329 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4330 error = ENOMEM; 4331 break; 4332 } 4333 4334 totlen = io->pfrio_size * sizeof(struct pfr_table); 4335 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4336 M_TEMP, M_WAITOK); 4337 error = copyin(io->pfrio_buffer, pfrts, totlen); 4338 if (error) { 4339 free(pfrts, M_TEMP); 4340 break; 4341 } 4342 PF_RULES_WLOCK(); 4343 error = pfr_add_tables(pfrts, io->pfrio_size, 4344 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4345 PF_RULES_WUNLOCK(); 4346 free(pfrts, M_TEMP); 4347 break; 4348 } 4349 4350 case DIOCRDELTABLES: { 4351 struct pfioc_table *io = (struct pfioc_table *)addr; 4352 struct pfr_table *pfrts; 4353 size_t totlen; 4354 4355 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4356 error = ENODEV; 4357 break; 4358 } 4359 4360 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4361 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4362 error = ENOMEM; 4363 break; 4364 } 4365 4366 totlen = io->pfrio_size * sizeof(struct pfr_table); 4367 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4368 M_TEMP, M_WAITOK); 4369 error = copyin(io->pfrio_buffer, pfrts, totlen); 4370 if (error) { 4371 free(pfrts, M_TEMP); 4372 break; 4373 } 4374 PF_RULES_WLOCK(); 4375 error = pfr_del_tables(pfrts, io->pfrio_size, 4376 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4377 PF_RULES_WUNLOCK(); 4378 free(pfrts, M_TEMP); 4379 break; 4380 } 4381 4382 case DIOCRGETTABLES: { 4383 struct pfioc_table *io = (struct pfioc_table *)addr; 4384 struct pfr_table *pfrts; 4385 size_t totlen; 4386 int n; 4387 4388 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4389 error = ENODEV; 4390 break; 4391 } 4392 PF_RULES_RLOCK(); 4393 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4394 if (n < 0) { 4395 PF_RULES_RUNLOCK(); 4396 error = EINVAL; 4397 break; 4398 } 4399 io->pfrio_size = min(io->pfrio_size, n); 4400 4401 totlen = io->pfrio_size * sizeof(struct pfr_table); 4402 4403 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4404 M_TEMP, M_NOWAIT | M_ZERO); 4405 if (pfrts == NULL) { 4406 error = ENOMEM; 4407 PF_RULES_RUNLOCK(); 4408 break; 4409 } 4410 error = pfr_get_tables(&io->pfrio_table, pfrts, 4411 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4412 PF_RULES_RUNLOCK(); 4413 if (error == 0) 4414 error = copyout(pfrts, io->pfrio_buffer, totlen); 4415 free(pfrts, M_TEMP); 4416 break; 4417 } 4418 4419 case DIOCRGETTSTATS: { 4420 struct pfioc_table *io = (struct pfioc_table *)addr; 4421 struct pfr_tstats *pfrtstats; 4422 size_t totlen; 4423 int n; 4424 4425 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 4426 error = ENODEV; 4427 break; 4428 } 4429 PF_TABLE_STATS_LOCK(); 4430 PF_RULES_RLOCK(); 4431 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4432 if (n < 0) { 4433 PF_RULES_RUNLOCK(); 4434 PF_TABLE_STATS_UNLOCK(); 4435 error = EINVAL; 4436 break; 4437 } 4438 io->pfrio_size = min(io->pfrio_size, n); 4439 4440 totlen = io->pfrio_size * sizeof(struct pfr_tstats); 4441 pfrtstats = mallocarray(io->pfrio_size, 4442 sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO); 4443 if (pfrtstats == NULL) { 4444 error = ENOMEM; 4445 PF_RULES_RUNLOCK(); 4446 PF_TABLE_STATS_UNLOCK(); 4447 break; 4448 } 4449 error = pfr_get_tstats(&io->pfrio_table, pfrtstats, 4450 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4451 PF_RULES_RUNLOCK(); 4452 PF_TABLE_STATS_UNLOCK(); 4453 if (error == 0) 4454 error = copyout(pfrtstats, io->pfrio_buffer, totlen); 4455 free(pfrtstats, M_TEMP); 4456 break; 4457 } 4458 4459 case DIOCRCLRTSTATS: { 4460 struct pfioc_table *io = (struct pfioc_table *)addr; 4461 struct pfr_table *pfrts; 4462 size_t totlen; 4463 4464 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4465 error = ENODEV; 4466 break; 4467 } 4468 4469 if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount || 4470 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) { 4471 /* We used to count tables and use the minimum required 4472 * size, so we didn't fail on overly large requests. 4473 * Keep doing so. */ 4474 io->pfrio_size = pf_ioctl_maxcount; 4475 break; 4476 } 4477 4478 totlen = io->pfrio_size * sizeof(struct pfr_table); 4479 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4480 M_TEMP, M_WAITOK); 4481 error = copyin(io->pfrio_buffer, pfrts, totlen); 4482 if (error) { 4483 free(pfrts, M_TEMP); 4484 break; 4485 } 4486 4487 PF_TABLE_STATS_LOCK(); 4488 PF_RULES_RLOCK(); 4489 error = pfr_clr_tstats(pfrts, io->pfrio_size, 4490 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4491 PF_RULES_RUNLOCK(); 4492 PF_TABLE_STATS_UNLOCK(); 4493 free(pfrts, M_TEMP); 4494 break; 4495 } 4496 4497 case DIOCRSETTFLAGS: { 4498 struct pfioc_table *io = (struct pfioc_table *)addr; 4499 struct pfr_table *pfrts; 4500 size_t totlen; 4501 int n; 4502 4503 if (io->pfrio_esize != sizeof(struct pfr_table)) { 4504 error = ENODEV; 4505 break; 4506 } 4507 4508 PF_RULES_RLOCK(); 4509 n = pfr_table_count(&io->pfrio_table, io->pfrio_flags); 4510 if (n < 0) { 4511 PF_RULES_RUNLOCK(); 4512 error = EINVAL; 4513 break; 4514 } 4515 4516 io->pfrio_size = min(io->pfrio_size, n); 4517 PF_RULES_RUNLOCK(); 4518 4519 totlen = io->pfrio_size * sizeof(struct pfr_table); 4520 pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table), 4521 M_TEMP, M_WAITOK); 4522 error = copyin(io->pfrio_buffer, pfrts, totlen); 4523 if (error) { 4524 free(pfrts, M_TEMP); 4525 break; 4526 } 4527 PF_RULES_WLOCK(); 4528 error = pfr_set_tflags(pfrts, io->pfrio_size, 4529 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 4530 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4531 PF_RULES_WUNLOCK(); 4532 free(pfrts, M_TEMP); 4533 break; 4534 } 4535 4536 case DIOCRCLRADDRS: { 4537 struct pfioc_table *io = (struct pfioc_table *)addr; 4538 4539 if (io->pfrio_esize != 0) { 4540 error = ENODEV; 4541 break; 4542 } 4543 PF_RULES_WLOCK(); 4544 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 4545 io->pfrio_flags | PFR_FLAG_USERIOCTL); 4546 PF_RULES_WUNLOCK(); 4547 break; 4548 } 4549 4550 case DIOCRADDADDRS: { 4551 struct pfioc_table *io = (struct pfioc_table *)addr; 4552 struct pfr_addr *pfras; 4553 size_t totlen; 4554 4555 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4556 error = ENODEV; 4557 break; 4558 } 4559 if (io->pfrio_size < 0 || 4560 io->pfrio_size > pf_ioctl_maxcount || 4561 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4562 error = EINVAL; 4563 break; 4564 } 4565 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4566 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4567 M_TEMP, M_WAITOK); 4568 error = copyin(io->pfrio_buffer, pfras, totlen); 4569 if (error) { 4570 free(pfras, M_TEMP); 4571 break; 4572 } 4573 PF_RULES_WLOCK(); 4574 error = pfr_add_addrs(&io->pfrio_table, pfras, 4575 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 4576 PFR_FLAG_USERIOCTL); 4577 PF_RULES_WUNLOCK(); 4578 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4579 error = copyout(pfras, io->pfrio_buffer, totlen); 4580 free(pfras, M_TEMP); 4581 break; 4582 } 4583 4584 case DIOCRDELADDRS: { 4585 struct pfioc_table *io = (struct pfioc_table *)addr; 4586 struct pfr_addr *pfras; 4587 size_t totlen; 4588 4589 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4590 error = ENODEV; 4591 break; 4592 } 4593 if (io->pfrio_size < 0 || 4594 io->pfrio_size > pf_ioctl_maxcount || 4595 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4596 error = EINVAL; 4597 break; 4598 } 4599 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4600 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4601 M_TEMP, M_WAITOK); 4602 error = copyin(io->pfrio_buffer, pfras, totlen); 4603 if (error) { 4604 free(pfras, M_TEMP); 4605 break; 4606 } 4607 PF_RULES_WLOCK(); 4608 error = pfr_del_addrs(&io->pfrio_table, pfras, 4609 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 4610 PFR_FLAG_USERIOCTL); 4611 PF_RULES_WUNLOCK(); 4612 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4613 error = copyout(pfras, io->pfrio_buffer, totlen); 4614 free(pfras, M_TEMP); 4615 break; 4616 } 4617 4618 case DIOCRSETADDRS: { 4619 struct pfioc_table *io = (struct pfioc_table *)addr; 4620 struct pfr_addr *pfras; 4621 size_t totlen, count; 4622 4623 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4624 error = ENODEV; 4625 break; 4626 } 4627 if (io->pfrio_size < 0 || io->pfrio_size2 < 0) { 4628 error = EINVAL; 4629 break; 4630 } 4631 count = max(io->pfrio_size, io->pfrio_size2); 4632 if (count > pf_ioctl_maxcount || 4633 WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) { 4634 error = EINVAL; 4635 break; 4636 } 4637 totlen = count * sizeof(struct pfr_addr); 4638 pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP, 4639 M_WAITOK); 4640 error = copyin(io->pfrio_buffer, pfras, totlen); 4641 if (error) { 4642 free(pfras, M_TEMP); 4643 break; 4644 } 4645 PF_RULES_WLOCK(); 4646 error = pfr_set_addrs(&io->pfrio_table, pfras, 4647 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 4648 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 4649 PFR_FLAG_USERIOCTL, 0); 4650 PF_RULES_WUNLOCK(); 4651 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4652 error = copyout(pfras, io->pfrio_buffer, totlen); 4653 free(pfras, M_TEMP); 4654 break; 4655 } 4656 4657 case DIOCRGETADDRS: { 4658 struct pfioc_table *io = (struct pfioc_table *)addr; 4659 struct pfr_addr *pfras; 4660 size_t totlen; 4661 4662 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4663 error = ENODEV; 4664 break; 4665 } 4666 if (io->pfrio_size < 0 || 4667 io->pfrio_size > pf_ioctl_maxcount || 4668 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4669 error = EINVAL; 4670 break; 4671 } 4672 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4673 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4674 M_TEMP, M_WAITOK | M_ZERO); 4675 PF_RULES_RLOCK(); 4676 error = pfr_get_addrs(&io->pfrio_table, pfras, 4677 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4678 PF_RULES_RUNLOCK(); 4679 if (error == 0) 4680 error = copyout(pfras, io->pfrio_buffer, totlen); 4681 free(pfras, M_TEMP); 4682 break; 4683 } 4684 4685 case DIOCRGETASTATS: { 4686 struct pfioc_table *io = (struct pfioc_table *)addr; 4687 struct pfr_astats *pfrastats; 4688 size_t totlen; 4689 4690 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 4691 error = ENODEV; 4692 break; 4693 } 4694 if (io->pfrio_size < 0 || 4695 io->pfrio_size > pf_ioctl_maxcount || 4696 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) { 4697 error = EINVAL; 4698 break; 4699 } 4700 totlen = io->pfrio_size * sizeof(struct pfr_astats); 4701 pfrastats = mallocarray(io->pfrio_size, 4702 sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO); 4703 PF_RULES_RLOCK(); 4704 error = pfr_get_astats(&io->pfrio_table, pfrastats, 4705 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4706 PF_RULES_RUNLOCK(); 4707 if (error == 0) 4708 error = copyout(pfrastats, io->pfrio_buffer, totlen); 4709 free(pfrastats, M_TEMP); 4710 break; 4711 } 4712 4713 case DIOCRCLRASTATS: { 4714 struct pfioc_table *io = (struct pfioc_table *)addr; 4715 struct pfr_addr *pfras; 4716 size_t totlen; 4717 4718 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4719 error = ENODEV; 4720 break; 4721 } 4722 if (io->pfrio_size < 0 || 4723 io->pfrio_size > pf_ioctl_maxcount || 4724 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4725 error = EINVAL; 4726 break; 4727 } 4728 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4729 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4730 M_TEMP, M_WAITOK); 4731 error = copyin(io->pfrio_buffer, pfras, totlen); 4732 if (error) { 4733 free(pfras, M_TEMP); 4734 break; 4735 } 4736 PF_RULES_WLOCK(); 4737 error = pfr_clr_astats(&io->pfrio_table, pfras, 4738 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 4739 PFR_FLAG_USERIOCTL); 4740 PF_RULES_WUNLOCK(); 4741 if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK) 4742 error = copyout(pfras, io->pfrio_buffer, totlen); 4743 free(pfras, M_TEMP); 4744 break; 4745 } 4746 4747 case DIOCRTSTADDRS: { 4748 struct pfioc_table *io = (struct pfioc_table *)addr; 4749 struct pfr_addr *pfras; 4750 size_t totlen; 4751 4752 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4753 error = ENODEV; 4754 break; 4755 } 4756 if (io->pfrio_size < 0 || 4757 io->pfrio_size > pf_ioctl_maxcount || 4758 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4759 error = EINVAL; 4760 break; 4761 } 4762 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4763 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4764 M_TEMP, M_WAITOK); 4765 error = copyin(io->pfrio_buffer, pfras, totlen); 4766 if (error) { 4767 free(pfras, M_TEMP); 4768 break; 4769 } 4770 PF_RULES_RLOCK(); 4771 error = pfr_tst_addrs(&io->pfrio_table, pfras, 4772 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 4773 PFR_FLAG_USERIOCTL); 4774 PF_RULES_RUNLOCK(); 4775 if (error == 0) 4776 error = copyout(pfras, io->pfrio_buffer, totlen); 4777 free(pfras, M_TEMP); 4778 break; 4779 } 4780 4781 case DIOCRINADEFINE: { 4782 struct pfioc_table *io = (struct pfioc_table *)addr; 4783 struct pfr_addr *pfras; 4784 size_t totlen; 4785 4786 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 4787 error = ENODEV; 4788 break; 4789 } 4790 if (io->pfrio_size < 0 || 4791 io->pfrio_size > pf_ioctl_maxcount || 4792 WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) { 4793 error = EINVAL; 4794 break; 4795 } 4796 totlen = io->pfrio_size * sizeof(struct pfr_addr); 4797 pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr), 4798 M_TEMP, M_WAITOK); 4799 error = copyin(io->pfrio_buffer, pfras, totlen); 4800 if (error) { 4801 free(pfras, M_TEMP); 4802 break; 4803 } 4804 PF_RULES_WLOCK(); 4805 error = pfr_ina_define(&io->pfrio_table, pfras, 4806 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 4807 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 4808 PF_RULES_WUNLOCK(); 4809 free(pfras, M_TEMP); 4810 break; 4811 } 4812 4813 case DIOCOSFPADD: { 4814 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4815 PF_RULES_WLOCK(); 4816 error = pf_osfp_add(io); 4817 PF_RULES_WUNLOCK(); 4818 break; 4819 } 4820 4821 case DIOCOSFPGET: { 4822 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 4823 PF_RULES_RLOCK(); 4824 error = pf_osfp_get(io); 4825 PF_RULES_RUNLOCK(); 4826 break; 4827 } 4828 4829 case DIOCXBEGIN: { 4830 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4831 struct pfioc_trans_e *ioes, *ioe; 4832 size_t totlen; 4833 int i; 4834 4835 if (io->esize != sizeof(*ioe)) { 4836 error = ENODEV; 4837 break; 4838 } 4839 if (io->size < 0 || 4840 io->size > pf_ioctl_maxcount || 4841 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4842 error = EINVAL; 4843 break; 4844 } 4845 totlen = sizeof(struct pfioc_trans_e) * io->size; 4846 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4847 M_TEMP, M_WAITOK); 4848 error = copyin(io->array, ioes, totlen); 4849 if (error) { 4850 free(ioes, M_TEMP); 4851 break; 4852 } 4853 PF_RULES_WLOCK(); 4854 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4855 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 4856 switch (ioe->rs_num) { 4857 case PF_RULESET_ETH: 4858 if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) { 4859 PF_RULES_WUNLOCK(); 4860 free(ioes, M_TEMP); 4861 goto fail; 4862 } 4863 break; 4864 #ifdef ALTQ 4865 case PF_RULESET_ALTQ: 4866 if (ioe->anchor[0]) { 4867 PF_RULES_WUNLOCK(); 4868 free(ioes, M_TEMP); 4869 error = EINVAL; 4870 goto fail; 4871 } 4872 if ((error = pf_begin_altq(&ioe->ticket))) { 4873 PF_RULES_WUNLOCK(); 4874 free(ioes, M_TEMP); 4875 goto fail; 4876 } 4877 break; 4878 #endif /* ALTQ */ 4879 case PF_RULESET_TABLE: 4880 { 4881 struct pfr_table table; 4882 4883 bzero(&table, sizeof(table)); 4884 strlcpy(table.pfrt_anchor, ioe->anchor, 4885 sizeof(table.pfrt_anchor)); 4886 if ((error = pfr_ina_begin(&table, 4887 &ioe->ticket, NULL, 0))) { 4888 PF_RULES_WUNLOCK(); 4889 free(ioes, M_TEMP); 4890 goto fail; 4891 } 4892 break; 4893 } 4894 default: 4895 if ((error = pf_begin_rules(&ioe->ticket, 4896 ioe->rs_num, ioe->anchor))) { 4897 PF_RULES_WUNLOCK(); 4898 free(ioes, M_TEMP); 4899 goto fail; 4900 } 4901 break; 4902 } 4903 } 4904 PF_RULES_WUNLOCK(); 4905 error = copyout(ioes, io->array, totlen); 4906 free(ioes, M_TEMP); 4907 break; 4908 } 4909 4910 case DIOCXROLLBACK: { 4911 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4912 struct pfioc_trans_e *ioe, *ioes; 4913 size_t totlen; 4914 int i; 4915 4916 if (io->esize != sizeof(*ioe)) { 4917 error = ENODEV; 4918 break; 4919 } 4920 if (io->size < 0 || 4921 io->size > pf_ioctl_maxcount || 4922 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 4923 error = EINVAL; 4924 break; 4925 } 4926 totlen = sizeof(struct pfioc_trans_e) * io->size; 4927 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 4928 M_TEMP, M_WAITOK); 4929 error = copyin(io->array, ioes, totlen); 4930 if (error) { 4931 free(ioes, M_TEMP); 4932 break; 4933 } 4934 PF_RULES_WLOCK(); 4935 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 4936 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; 4937 switch (ioe->rs_num) { 4938 case PF_RULESET_ETH: 4939 if ((error = pf_rollback_eth(ioe->ticket, 4940 ioe->anchor))) { 4941 PF_RULES_WUNLOCK(); 4942 free(ioes, M_TEMP); 4943 goto fail; /* really bad */ 4944 } 4945 break; 4946 #ifdef ALTQ 4947 case PF_RULESET_ALTQ: 4948 if (ioe->anchor[0]) { 4949 PF_RULES_WUNLOCK(); 4950 free(ioes, M_TEMP); 4951 error = EINVAL; 4952 goto fail; 4953 } 4954 if ((error = pf_rollback_altq(ioe->ticket))) { 4955 PF_RULES_WUNLOCK(); 4956 free(ioes, M_TEMP); 4957 goto fail; /* really bad */ 4958 } 4959 break; 4960 #endif /* ALTQ */ 4961 case PF_RULESET_TABLE: 4962 { 4963 struct pfr_table table; 4964 4965 bzero(&table, sizeof(table)); 4966 strlcpy(table.pfrt_anchor, ioe->anchor, 4967 sizeof(table.pfrt_anchor)); 4968 if ((error = pfr_ina_rollback(&table, 4969 ioe->ticket, NULL, 0))) { 4970 PF_RULES_WUNLOCK(); 4971 free(ioes, M_TEMP); 4972 goto fail; /* really bad */ 4973 } 4974 break; 4975 } 4976 default: 4977 if ((error = pf_rollback_rules(ioe->ticket, 4978 ioe->rs_num, ioe->anchor))) { 4979 PF_RULES_WUNLOCK(); 4980 free(ioes, M_TEMP); 4981 goto fail; /* really bad */ 4982 } 4983 break; 4984 } 4985 } 4986 PF_RULES_WUNLOCK(); 4987 free(ioes, M_TEMP); 4988 break; 4989 } 4990 4991 case DIOCXCOMMIT: { 4992 struct pfioc_trans *io = (struct pfioc_trans *)addr; 4993 struct pfioc_trans_e *ioe, *ioes; 4994 struct pf_kruleset *rs; 4995 struct pf_keth_ruleset *ers; 4996 size_t totlen; 4997 int i; 4998 4999 if (io->esize != sizeof(*ioe)) { 5000 error = ENODEV; 5001 break; 5002 } 5003 5004 if (io->size < 0 || 5005 io->size > pf_ioctl_maxcount || 5006 WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) { 5007 error = EINVAL; 5008 break; 5009 } 5010 5011 totlen = sizeof(struct pfioc_trans_e) * io->size; 5012 ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e), 5013 M_TEMP, M_WAITOK); 5014 error = copyin(io->array, ioes, totlen); 5015 if (error) { 5016 free(ioes, M_TEMP); 5017 break; 5018 } 5019 PF_RULES_WLOCK(); 5020 /* First makes sure everything will succeed. */ 5021 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5022 ioe->anchor[sizeof(ioe->anchor) - 1] = 0; 5023 switch (ioe->rs_num) { 5024 case PF_RULESET_ETH: 5025 ers = pf_find_keth_ruleset(ioe->anchor); 5026 if (ers == NULL || ioe->ticket == 0 || 5027 ioe->ticket != ers->inactive.ticket) { 5028 PF_RULES_WUNLOCK(); 5029 free(ioes, M_TEMP); 5030 error = EINVAL; 5031 goto fail; 5032 } 5033 break; 5034 #ifdef ALTQ 5035 case PF_RULESET_ALTQ: 5036 if (ioe->anchor[0]) { 5037 PF_RULES_WUNLOCK(); 5038 free(ioes, M_TEMP); 5039 error = EINVAL; 5040 goto fail; 5041 } 5042 if (!V_altqs_inactive_open || ioe->ticket != 5043 V_ticket_altqs_inactive) { 5044 PF_RULES_WUNLOCK(); 5045 free(ioes, M_TEMP); 5046 error = EBUSY; 5047 goto fail; 5048 } 5049 break; 5050 #endif /* ALTQ */ 5051 case PF_RULESET_TABLE: 5052 rs = pf_find_kruleset(ioe->anchor); 5053 if (rs == NULL || !rs->topen || ioe->ticket != 5054 rs->tticket) { 5055 PF_RULES_WUNLOCK(); 5056 free(ioes, M_TEMP); 5057 error = EBUSY; 5058 goto fail; 5059 } 5060 break; 5061 default: 5062 if (ioe->rs_num < 0 || ioe->rs_num >= 5063 PF_RULESET_MAX) { 5064 PF_RULES_WUNLOCK(); 5065 free(ioes, M_TEMP); 5066 error = EINVAL; 5067 goto fail; 5068 } 5069 rs = pf_find_kruleset(ioe->anchor); 5070 if (rs == NULL || 5071 !rs->rules[ioe->rs_num].inactive.open || 5072 rs->rules[ioe->rs_num].inactive.ticket != 5073 ioe->ticket) { 5074 PF_RULES_WUNLOCK(); 5075 free(ioes, M_TEMP); 5076 error = EBUSY; 5077 goto fail; 5078 } 5079 break; 5080 } 5081 } 5082 /* Now do the commit - no errors should happen here. */ 5083 for (i = 0, ioe = ioes; i < io->size; i++, ioe++) { 5084 switch (ioe->rs_num) { 5085 case PF_RULESET_ETH: 5086 if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) { 5087 PF_RULES_WUNLOCK(); 5088 free(ioes, M_TEMP); 5089 goto fail; /* really bad */ 5090 } 5091 break; 5092 #ifdef ALTQ 5093 case PF_RULESET_ALTQ: 5094 if ((error = pf_commit_altq(ioe->ticket))) { 5095 PF_RULES_WUNLOCK(); 5096 free(ioes, M_TEMP); 5097 goto fail; /* really bad */ 5098 } 5099 break; 5100 #endif /* ALTQ */ 5101 case PF_RULESET_TABLE: 5102 { 5103 struct pfr_table table; 5104 5105 bzero(&table, sizeof(table)); 5106 (void)strlcpy(table.pfrt_anchor, ioe->anchor, 5107 sizeof(table.pfrt_anchor)); 5108 if ((error = pfr_ina_commit(&table, 5109 ioe->ticket, NULL, NULL, 0))) { 5110 PF_RULES_WUNLOCK(); 5111 free(ioes, M_TEMP); 5112 goto fail; /* really bad */ 5113 } 5114 break; 5115 } 5116 default: 5117 if ((error = pf_commit_rules(ioe->ticket, 5118 ioe->rs_num, ioe->anchor))) { 5119 PF_RULES_WUNLOCK(); 5120 free(ioes, M_TEMP); 5121 goto fail; /* really bad */ 5122 } 5123 break; 5124 } 5125 } 5126 PF_RULES_WUNLOCK(); 5127 5128 /* Only hook into EtherNet taffic if we've got rules for it. */ 5129 if (! TAILQ_EMPTY(V_pf_keth->active.rules)) 5130 hook_pf_eth(); 5131 else 5132 dehook_pf_eth(); 5133 5134 free(ioes, M_TEMP); 5135 break; 5136 } 5137 5138 case DIOCGETSRCNODES: { 5139 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 5140 struct pf_srchash *sh; 5141 struct pf_ksrc_node *n; 5142 struct pf_src_node *p, *pstore; 5143 uint32_t i, nr = 0; 5144 5145 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5146 i++, sh++) { 5147 PF_HASHROW_LOCK(sh); 5148 LIST_FOREACH(n, &sh->nodes, entry) 5149 nr++; 5150 PF_HASHROW_UNLOCK(sh); 5151 } 5152 5153 psn->psn_len = min(psn->psn_len, 5154 sizeof(struct pf_src_node) * nr); 5155 5156 if (psn->psn_len == 0) { 5157 psn->psn_len = sizeof(struct pf_src_node) * nr; 5158 break; 5159 } 5160 5161 nr = 0; 5162 5163 p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO); 5164 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5165 i++, sh++) { 5166 PF_HASHROW_LOCK(sh); 5167 LIST_FOREACH(n, &sh->nodes, entry) { 5168 5169 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 5170 break; 5171 5172 pf_src_node_copy(n, p); 5173 5174 p++; 5175 nr++; 5176 } 5177 PF_HASHROW_UNLOCK(sh); 5178 } 5179 error = copyout(pstore, psn->psn_src_nodes, 5180 sizeof(struct pf_src_node) * nr); 5181 if (error) { 5182 free(pstore, M_TEMP); 5183 break; 5184 } 5185 psn->psn_len = sizeof(struct pf_src_node) * nr; 5186 free(pstore, M_TEMP); 5187 break; 5188 } 5189 5190 case DIOCCLRSRCNODES: { 5191 pf_clear_srcnodes(NULL); 5192 pf_purge_expired_src_nodes(); 5193 break; 5194 } 5195 5196 case DIOCKILLSRCNODES: 5197 pf_kill_srcnodes((struct pfioc_src_node_kill *)addr); 5198 break; 5199 5200 #ifdef COMPAT_FREEBSD13 5201 case DIOCKEEPCOUNTERS_FREEBSD13: 5202 #endif 5203 case DIOCKEEPCOUNTERS: 5204 error = pf_keepcounters((struct pfioc_nv *)addr); 5205 break; 5206 5207 case DIOCGETSYNCOOKIES: 5208 error = pf_get_syncookies((struct pfioc_nv *)addr); 5209 break; 5210 5211 case DIOCSETSYNCOOKIES: 5212 error = pf_set_syncookies((struct pfioc_nv *)addr); 5213 break; 5214 5215 case DIOCSETHOSTID: { 5216 u_int32_t *hostid = (u_int32_t *)addr; 5217 5218 PF_RULES_WLOCK(); 5219 if (*hostid == 0) 5220 V_pf_status.hostid = arc4random(); 5221 else 5222 V_pf_status.hostid = *hostid; 5223 PF_RULES_WUNLOCK(); 5224 break; 5225 } 5226 5227 case DIOCOSFPFLUSH: 5228 PF_RULES_WLOCK(); 5229 pf_osfp_flush(); 5230 PF_RULES_WUNLOCK(); 5231 break; 5232 5233 case DIOCIGETIFACES: { 5234 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5235 struct pfi_kif *ifstore; 5236 size_t bufsiz; 5237 5238 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 5239 error = ENODEV; 5240 break; 5241 } 5242 5243 if (io->pfiio_size < 0 || 5244 io->pfiio_size > pf_ioctl_maxcount || 5245 WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) { 5246 error = EINVAL; 5247 break; 5248 } 5249 5250 bufsiz = io->pfiio_size * sizeof(struct pfi_kif); 5251 ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif), 5252 M_TEMP, M_WAITOK | M_ZERO); 5253 5254 PF_RULES_RLOCK(); 5255 pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size); 5256 PF_RULES_RUNLOCK(); 5257 error = copyout(ifstore, io->pfiio_buffer, bufsiz); 5258 free(ifstore, M_TEMP); 5259 break; 5260 } 5261 5262 case DIOCSETIFFLAG: { 5263 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5264 5265 PF_RULES_WLOCK(); 5266 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 5267 PF_RULES_WUNLOCK(); 5268 break; 5269 } 5270 5271 case DIOCCLRIFFLAG: { 5272 struct pfioc_iface *io = (struct pfioc_iface *)addr; 5273 5274 PF_RULES_WLOCK(); 5275 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 5276 PF_RULES_WUNLOCK(); 5277 break; 5278 } 5279 5280 default: 5281 error = ENODEV; 5282 break; 5283 } 5284 fail: 5285 if (sx_xlocked(&pf_ioctl_lock)) 5286 sx_xunlock(&pf_ioctl_lock); 5287 CURVNET_RESTORE(); 5288 5289 #undef ERROUT_IOCTL 5290 5291 return (error); 5292 } 5293 5294 void 5295 pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st) 5296 { 5297 bzero(sp, sizeof(struct pfsync_state)); 5298 5299 /* copy from state key */ 5300 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5301 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5302 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5303 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5304 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5305 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5306 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5307 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5308 sp->proto = st->key[PF_SK_WIRE]->proto; 5309 sp->af = st->key[PF_SK_WIRE]->af; 5310 5311 /* copy from state */ 5312 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5313 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5314 sp->creation = htonl(time_uptime - st->creation); 5315 sp->expire = pf_state_expires(st); 5316 if (sp->expire <= time_uptime) 5317 sp->expire = htonl(0); 5318 else 5319 sp->expire = htonl(sp->expire - time_uptime); 5320 5321 sp->direction = st->direction; 5322 sp->log = st->log; 5323 sp->timeout = st->timeout; 5324 sp->state_flags = st->state_flags; 5325 if (st->src_node) 5326 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5327 if (st->nat_src_node) 5328 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5329 5330 sp->id = st->id; 5331 sp->creatorid = st->creatorid; 5332 pf_state_peer_hton(&st->src, &sp->src); 5333 pf_state_peer_hton(&st->dst, &sp->dst); 5334 5335 if (st->rule.ptr == NULL) 5336 sp->rule = htonl(-1); 5337 else 5338 sp->rule = htonl(st->rule.ptr->nr); 5339 if (st->anchor.ptr == NULL) 5340 sp->anchor = htonl(-1); 5341 else 5342 sp->anchor = htonl(st->anchor.ptr->nr); 5343 if (st->nat_rule.ptr == NULL) 5344 sp->nat_rule = htonl(-1); 5345 else 5346 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5347 5348 pf_state_counter_hton(st->packets[0], sp->packets[0]); 5349 pf_state_counter_hton(st->packets[1], sp->packets[1]); 5350 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 5351 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 5352 } 5353 5354 void 5355 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st) 5356 { 5357 bzero(sp, sizeof(*sp)); 5358 5359 sp->version = PF_STATE_VERSION; 5360 5361 /* copy from state key */ 5362 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 5363 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 5364 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 5365 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 5366 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 5367 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 5368 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 5369 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 5370 sp->proto = st->key[PF_SK_WIRE]->proto; 5371 sp->af = st->key[PF_SK_WIRE]->af; 5372 5373 /* copy from state */ 5374 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 5375 strlcpy(sp->orig_ifname, st->orig_kif->pfik_name, 5376 sizeof(sp->orig_ifname)); 5377 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 5378 sp->creation = htonl(time_uptime - st->creation); 5379 sp->expire = pf_state_expires(st); 5380 if (sp->expire <= time_uptime) 5381 sp->expire = htonl(0); 5382 else 5383 sp->expire = htonl(sp->expire - time_uptime); 5384 5385 sp->direction = st->direction; 5386 sp->log = st->log; 5387 sp->timeout = st->timeout; 5388 sp->state_flags = st->state_flags; 5389 if (st->src_node) 5390 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 5391 if (st->nat_src_node) 5392 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 5393 5394 sp->id = st->id; 5395 sp->creatorid = st->creatorid; 5396 pf_state_peer_hton(&st->src, &sp->src); 5397 pf_state_peer_hton(&st->dst, &sp->dst); 5398 5399 if (st->rule.ptr == NULL) 5400 sp->rule = htonl(-1); 5401 else 5402 sp->rule = htonl(st->rule.ptr->nr); 5403 if (st->anchor.ptr == NULL) 5404 sp->anchor = htonl(-1); 5405 else 5406 sp->anchor = htonl(st->anchor.ptr->nr); 5407 if (st->nat_rule.ptr == NULL) 5408 sp->nat_rule = htonl(-1); 5409 else 5410 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 5411 5412 sp->packets[0] = st->packets[0]; 5413 sp->packets[1] = st->packets[1]; 5414 sp->bytes[0] = st->bytes[0]; 5415 sp->bytes[1] = st->bytes[1]; 5416 } 5417 5418 static void 5419 pf_tbladdr_copyout(struct pf_addr_wrap *aw) 5420 { 5421 struct pfr_ktable *kt; 5422 5423 KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type)); 5424 5425 kt = aw->p.tbl; 5426 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) 5427 kt = kt->pfrkt_root; 5428 aw->p.tbl = NULL; 5429 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? 5430 kt->pfrkt_cnt : -1; 5431 } 5432 5433 static int 5434 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters, 5435 size_t number, char **names) 5436 { 5437 nvlist_t *nvc; 5438 5439 nvc = nvlist_create(0); 5440 if (nvc == NULL) 5441 return (ENOMEM); 5442 5443 for (int i = 0; i < number; i++) { 5444 nvlist_append_number_array(nvc, "counters", 5445 counter_u64_fetch(counters[i])); 5446 nvlist_append_string_array(nvc, "names", 5447 names[i]); 5448 nvlist_append_number_array(nvc, "ids", 5449 i); 5450 } 5451 nvlist_add_nvlist(nvl, name, nvc); 5452 nvlist_destroy(nvc); 5453 5454 return (0); 5455 } 5456 5457 static int 5458 pf_getstatus(struct pfioc_nv *nv) 5459 { 5460 nvlist_t *nvl = NULL, *nvc = NULL; 5461 void *nvlpacked = NULL; 5462 int error; 5463 struct pf_status s; 5464 char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES; 5465 char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES; 5466 char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES; 5467 PF_RULES_RLOCK_TRACKER; 5468 5469 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5470 5471 PF_RULES_RLOCK(); 5472 5473 nvl = nvlist_create(0); 5474 if (nvl == NULL) 5475 ERROUT(ENOMEM); 5476 5477 nvlist_add_bool(nvl, "running", V_pf_status.running); 5478 nvlist_add_number(nvl, "since", V_pf_status.since); 5479 nvlist_add_number(nvl, "debug", V_pf_status.debug); 5480 nvlist_add_number(nvl, "hostid", V_pf_status.hostid); 5481 nvlist_add_number(nvl, "states", V_pf_status.states); 5482 nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes); 5483 5484 /* counters */ 5485 error = pf_add_status_counters(nvl, "counters", V_pf_status.counters, 5486 PFRES_MAX, pf_reasons); 5487 if (error != 0) 5488 ERROUT(error); 5489 5490 /* lcounters */ 5491 error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters, 5492 KLCNT_MAX, pf_lcounter); 5493 if (error != 0) 5494 ERROUT(error); 5495 5496 /* fcounters */ 5497 nvc = nvlist_create(0); 5498 if (nvc == NULL) 5499 ERROUT(ENOMEM); 5500 5501 for (int i = 0; i < FCNT_MAX; i++) { 5502 nvlist_append_number_array(nvc, "counters", 5503 pf_counter_u64_fetch(&V_pf_status.fcounters[i])); 5504 nvlist_append_string_array(nvc, "names", 5505 pf_fcounter[i]); 5506 nvlist_append_number_array(nvc, "ids", 5507 i); 5508 } 5509 nvlist_add_nvlist(nvl, "fcounters", nvc); 5510 nvlist_destroy(nvc); 5511 nvc = NULL; 5512 5513 /* scounters */ 5514 error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters, 5515 SCNT_MAX, pf_fcounter); 5516 if (error != 0) 5517 ERROUT(error); 5518 5519 nvlist_add_string(nvl, "ifname", V_pf_status.ifname); 5520 nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum, 5521 PF_MD5_DIGEST_LENGTH); 5522 5523 pfi_update_status(V_pf_status.ifname, &s); 5524 5525 /* pcounters / bcounters */ 5526 for (int i = 0; i < 2; i++) { 5527 for (int j = 0; j < 2; j++) { 5528 for (int k = 0; k < 2; k++) { 5529 nvlist_append_number_array(nvl, "pcounters", 5530 s.pcounters[i][j][k]); 5531 } 5532 nvlist_append_number_array(nvl, "bcounters", 5533 s.bcounters[i][j]); 5534 } 5535 } 5536 5537 nvlpacked = nvlist_pack(nvl, &nv->len); 5538 if (nvlpacked == NULL) 5539 ERROUT(ENOMEM); 5540 5541 if (nv->size == 0) 5542 ERROUT(0); 5543 else if (nv->size < nv->len) 5544 ERROUT(ENOSPC); 5545 5546 PF_RULES_RUNLOCK(); 5547 error = copyout(nvlpacked, nv->data, nv->len); 5548 goto done; 5549 5550 #undef ERROUT 5551 errout: 5552 PF_RULES_RUNLOCK(); 5553 done: 5554 free(nvlpacked, M_NVLIST); 5555 nvlist_destroy(nvc); 5556 nvlist_destroy(nvl); 5557 5558 return (error); 5559 } 5560 5561 /* 5562 * XXX - Check for version missmatch!!! 5563 */ 5564 static void 5565 pf_clear_all_states(void) 5566 { 5567 struct pf_kstate *s; 5568 u_int i; 5569 5570 for (i = 0; i <= pf_hashmask; i++) { 5571 struct pf_idhash *ih = &V_pf_idhash[i]; 5572 relock: 5573 PF_HASHROW_LOCK(ih); 5574 LIST_FOREACH(s, &ih->states, entry) { 5575 s->timeout = PFTM_PURGE; 5576 /* Don't send out individual delete messages. */ 5577 s->state_flags |= PFSTATE_NOSYNC; 5578 pf_unlink_state(s); 5579 goto relock; 5580 } 5581 PF_HASHROW_UNLOCK(ih); 5582 } 5583 } 5584 5585 static int 5586 pf_clear_tables(void) 5587 { 5588 struct pfioc_table io; 5589 int error; 5590 5591 bzero(&io, sizeof(io)); 5592 5593 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 5594 io.pfrio_flags); 5595 5596 return (error); 5597 } 5598 5599 static void 5600 pf_clear_srcnodes(struct pf_ksrc_node *n) 5601 { 5602 struct pf_kstate *s; 5603 int i; 5604 5605 for (i = 0; i <= pf_hashmask; i++) { 5606 struct pf_idhash *ih = &V_pf_idhash[i]; 5607 5608 PF_HASHROW_LOCK(ih); 5609 LIST_FOREACH(s, &ih->states, entry) { 5610 if (n == NULL || n == s->src_node) 5611 s->src_node = NULL; 5612 if (n == NULL || n == s->nat_src_node) 5613 s->nat_src_node = NULL; 5614 } 5615 PF_HASHROW_UNLOCK(ih); 5616 } 5617 5618 if (n == NULL) { 5619 struct pf_srchash *sh; 5620 5621 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; 5622 i++, sh++) { 5623 PF_HASHROW_LOCK(sh); 5624 LIST_FOREACH(n, &sh->nodes, entry) { 5625 n->expire = 1; 5626 n->states = 0; 5627 } 5628 PF_HASHROW_UNLOCK(sh); 5629 } 5630 } else { 5631 /* XXX: hash slot should already be locked here. */ 5632 n->expire = 1; 5633 n->states = 0; 5634 } 5635 } 5636 5637 static void 5638 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk) 5639 { 5640 struct pf_ksrc_node_list kill; 5641 5642 LIST_INIT(&kill); 5643 for (int i = 0; i <= pf_srchashmask; i++) { 5644 struct pf_srchash *sh = &V_pf_srchash[i]; 5645 struct pf_ksrc_node *sn, *tmp; 5646 5647 PF_HASHROW_LOCK(sh); 5648 LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp) 5649 if (PF_MATCHA(psnk->psnk_src.neg, 5650 &psnk->psnk_src.addr.v.a.addr, 5651 &psnk->psnk_src.addr.v.a.mask, 5652 &sn->addr, sn->af) && 5653 PF_MATCHA(psnk->psnk_dst.neg, 5654 &psnk->psnk_dst.addr.v.a.addr, 5655 &psnk->psnk_dst.addr.v.a.mask, 5656 &sn->raddr, sn->af)) { 5657 pf_unlink_src_node(sn); 5658 LIST_INSERT_HEAD(&kill, sn, entry); 5659 sn->expire = 1; 5660 } 5661 PF_HASHROW_UNLOCK(sh); 5662 } 5663 5664 for (int i = 0; i <= pf_hashmask; i++) { 5665 struct pf_idhash *ih = &V_pf_idhash[i]; 5666 struct pf_kstate *s; 5667 5668 PF_HASHROW_LOCK(ih); 5669 LIST_FOREACH(s, &ih->states, entry) { 5670 if (s->src_node && s->src_node->expire == 1) 5671 s->src_node = NULL; 5672 if (s->nat_src_node && s->nat_src_node->expire == 1) 5673 s->nat_src_node = NULL; 5674 } 5675 PF_HASHROW_UNLOCK(ih); 5676 } 5677 5678 psnk->psnk_killed = pf_free_src_nodes(&kill); 5679 } 5680 5681 static int 5682 pf_keepcounters(struct pfioc_nv *nv) 5683 { 5684 nvlist_t *nvl = NULL; 5685 void *nvlpacked = NULL; 5686 int error = 0; 5687 5688 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5689 5690 if (nv->len > pf_ioctl_maxcount) 5691 ERROUT(ENOMEM); 5692 5693 nvlpacked = malloc(nv->len, M_TEMP, M_WAITOK); 5694 if (nvlpacked == NULL) 5695 ERROUT(ENOMEM); 5696 5697 error = copyin(nv->data, nvlpacked, nv->len); 5698 if (error) 5699 ERROUT(error); 5700 5701 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5702 if (nvl == NULL) 5703 ERROUT(EBADMSG); 5704 5705 if (! nvlist_exists_bool(nvl, "keep_counters")) 5706 ERROUT(EBADMSG); 5707 5708 V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters"); 5709 5710 on_error: 5711 nvlist_destroy(nvl); 5712 free(nvlpacked, M_TEMP); 5713 return (error); 5714 } 5715 5716 static unsigned int 5717 pf_clear_states(const struct pf_kstate_kill *kill) 5718 { 5719 struct pf_state_key_cmp match_key; 5720 struct pf_kstate *s; 5721 struct pfi_kkif *kif; 5722 int idx; 5723 unsigned int killed = 0, dir; 5724 5725 for (unsigned int i = 0; i <= pf_hashmask; i++) { 5726 struct pf_idhash *ih = &V_pf_idhash[i]; 5727 5728 relock_DIOCCLRSTATES: 5729 PF_HASHROW_LOCK(ih); 5730 LIST_FOREACH(s, &ih->states, entry) { 5731 /* For floating states look at the original kif. */ 5732 kif = s->kif == V_pfi_all ? s->orig_kif : s->kif; 5733 5734 if (kill->psk_ifname[0] && 5735 strcmp(kill->psk_ifname, 5736 kif->pfik_name)) 5737 continue; 5738 5739 if (kill->psk_kill_match) { 5740 bzero(&match_key, sizeof(match_key)); 5741 5742 if (s->direction == PF_OUT) { 5743 dir = PF_IN; 5744 idx = PF_SK_STACK; 5745 } else { 5746 dir = PF_OUT; 5747 idx = PF_SK_WIRE; 5748 } 5749 5750 match_key.af = s->key[idx]->af; 5751 match_key.proto = s->key[idx]->proto; 5752 PF_ACPY(&match_key.addr[0], 5753 &s->key[idx]->addr[1], match_key.af); 5754 match_key.port[0] = s->key[idx]->port[1]; 5755 PF_ACPY(&match_key.addr[1], 5756 &s->key[idx]->addr[0], match_key.af); 5757 match_key.port[1] = s->key[idx]->port[0]; 5758 } 5759 5760 /* 5761 * Don't send out individual 5762 * delete messages. 5763 */ 5764 s->state_flags |= PFSTATE_NOSYNC; 5765 pf_unlink_state(s); 5766 killed++; 5767 5768 if (kill->psk_kill_match) 5769 killed += pf_kill_matching_state(&match_key, 5770 dir); 5771 5772 goto relock_DIOCCLRSTATES; 5773 } 5774 PF_HASHROW_UNLOCK(ih); 5775 } 5776 5777 if (V_pfsync_clear_states_ptr != NULL) 5778 V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname); 5779 5780 return (killed); 5781 } 5782 5783 static void 5784 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed) 5785 { 5786 struct pf_kstate *s; 5787 5788 if (kill->psk_pfcmp.id) { 5789 if (kill->psk_pfcmp.creatorid == 0) 5790 kill->psk_pfcmp.creatorid = V_pf_status.hostid; 5791 if ((s = pf_find_state_byid(kill->psk_pfcmp.id, 5792 kill->psk_pfcmp.creatorid))) { 5793 pf_unlink_state(s); 5794 *killed = 1; 5795 } 5796 return; 5797 } 5798 5799 for (unsigned int i = 0; i <= pf_hashmask; i++) 5800 *killed += pf_killstates_row(kill, &V_pf_idhash[i]); 5801 5802 return; 5803 } 5804 5805 static int 5806 pf_killstates_nv(struct pfioc_nv *nv) 5807 { 5808 struct pf_kstate_kill kill; 5809 nvlist_t *nvl = NULL; 5810 void *nvlpacked = NULL; 5811 int error = 0; 5812 unsigned int killed = 0; 5813 5814 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5815 5816 if (nv->len > pf_ioctl_maxcount) 5817 ERROUT(ENOMEM); 5818 5819 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5820 if (nvlpacked == NULL) 5821 ERROUT(ENOMEM); 5822 5823 error = copyin(nv->data, nvlpacked, nv->len); 5824 if (error) 5825 ERROUT(error); 5826 5827 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5828 if (nvl == NULL) 5829 ERROUT(EBADMSG); 5830 5831 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 5832 if (error) 5833 ERROUT(error); 5834 5835 pf_killstates(&kill, &killed); 5836 5837 free(nvlpacked, M_NVLIST); 5838 nvlpacked = NULL; 5839 nvlist_destroy(nvl); 5840 nvl = nvlist_create(0); 5841 if (nvl == NULL) 5842 ERROUT(ENOMEM); 5843 5844 nvlist_add_number(nvl, "killed", killed); 5845 5846 nvlpacked = nvlist_pack(nvl, &nv->len); 5847 if (nvlpacked == NULL) 5848 ERROUT(ENOMEM); 5849 5850 if (nv->size == 0) 5851 ERROUT(0); 5852 else if (nv->size < nv->len) 5853 ERROUT(ENOSPC); 5854 5855 error = copyout(nvlpacked, nv->data, nv->len); 5856 5857 on_error: 5858 nvlist_destroy(nvl); 5859 free(nvlpacked, M_NVLIST); 5860 return (error); 5861 } 5862 5863 static int 5864 pf_clearstates_nv(struct pfioc_nv *nv) 5865 { 5866 struct pf_kstate_kill kill; 5867 nvlist_t *nvl = NULL; 5868 void *nvlpacked = NULL; 5869 int error = 0; 5870 unsigned int killed; 5871 5872 #define ERROUT(x) ERROUT_FUNCTION(on_error, x) 5873 5874 if (nv->len > pf_ioctl_maxcount) 5875 ERROUT(ENOMEM); 5876 5877 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5878 if (nvlpacked == NULL) 5879 ERROUT(ENOMEM); 5880 5881 error = copyin(nv->data, nvlpacked, nv->len); 5882 if (error) 5883 ERROUT(error); 5884 5885 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5886 if (nvl == NULL) 5887 ERROUT(EBADMSG); 5888 5889 error = pf_nvstate_kill_to_kstate_kill(nvl, &kill); 5890 if (error) 5891 ERROUT(error); 5892 5893 killed = pf_clear_states(&kill); 5894 5895 free(nvlpacked, M_NVLIST); 5896 nvlpacked = NULL; 5897 nvlist_destroy(nvl); 5898 nvl = nvlist_create(0); 5899 if (nvl == NULL) 5900 ERROUT(ENOMEM); 5901 5902 nvlist_add_number(nvl, "killed", killed); 5903 5904 nvlpacked = nvlist_pack(nvl, &nv->len); 5905 if (nvlpacked == NULL) 5906 ERROUT(ENOMEM); 5907 5908 if (nv->size == 0) 5909 ERROUT(0); 5910 else if (nv->size < nv->len) 5911 ERROUT(ENOSPC); 5912 5913 error = copyout(nvlpacked, nv->data, nv->len); 5914 5915 #undef ERROUT 5916 on_error: 5917 nvlist_destroy(nvl); 5918 free(nvlpacked, M_NVLIST); 5919 return (error); 5920 } 5921 5922 static int 5923 pf_getstate(struct pfioc_nv *nv) 5924 { 5925 nvlist_t *nvl = NULL, *nvls; 5926 void *nvlpacked = NULL; 5927 struct pf_kstate *s = NULL; 5928 int error = 0; 5929 uint64_t id, creatorid; 5930 5931 #define ERROUT(x) ERROUT_FUNCTION(errout, x) 5932 5933 if (nv->len > pf_ioctl_maxcount) 5934 ERROUT(ENOMEM); 5935 5936 nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK); 5937 if (nvlpacked == NULL) 5938 ERROUT(ENOMEM); 5939 5940 error = copyin(nv->data, nvlpacked, nv->len); 5941 if (error) 5942 ERROUT(error); 5943 5944 nvl = nvlist_unpack(nvlpacked, nv->len, 0); 5945 if (nvl == NULL) 5946 ERROUT(EBADMSG); 5947 5948 PFNV_CHK(pf_nvuint64(nvl, "id", &id)); 5949 PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid)); 5950 5951 s = pf_find_state_byid(id, creatorid); 5952 if (s == NULL) 5953 ERROUT(ENOENT); 5954 5955 free(nvlpacked, M_NVLIST); 5956 nvlpacked = NULL; 5957 nvlist_destroy(nvl); 5958 nvl = nvlist_create(0); 5959 if (nvl == NULL) 5960 ERROUT(ENOMEM); 5961 5962 nvls = pf_state_to_nvstate(s); 5963 if (nvls == NULL) 5964 ERROUT(ENOMEM); 5965 5966 nvlist_add_nvlist(nvl, "state", nvls); 5967 nvlist_destroy(nvls); 5968 5969 nvlpacked = nvlist_pack(nvl, &nv->len); 5970 if (nvlpacked == NULL) 5971 ERROUT(ENOMEM); 5972 5973 if (nv->size == 0) 5974 ERROUT(0); 5975 else if (nv->size < nv->len) 5976 ERROUT(ENOSPC); 5977 5978 error = copyout(nvlpacked, nv->data, nv->len); 5979 5980 #undef ERROUT 5981 errout: 5982 if (s != NULL) 5983 PF_STATE_UNLOCK(s); 5984 free(nvlpacked, M_NVLIST); 5985 nvlist_destroy(nvl); 5986 return (error); 5987 } 5988 5989 /* 5990 * XXX - Check for version missmatch!!! 5991 */ 5992 5993 /* 5994 * Duplicate pfctl -Fa operation to get rid of as much as we can. 5995 */ 5996 static int 5997 shutdown_pf(void) 5998 { 5999 int error = 0; 6000 u_int32_t t[5]; 6001 char nn = '\0'; 6002 6003 do { 6004 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) 6005 != 0) { 6006 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 6007 break; 6008 } 6009 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn)) 6010 != 0) { 6011 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 6012 break; /* XXX: rollback? */ 6013 } 6014 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn)) 6015 != 0) { 6016 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 6017 break; /* XXX: rollback? */ 6018 } 6019 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn)) 6020 != 0) { 6021 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 6022 break; /* XXX: rollback? */ 6023 } 6024 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn)) 6025 != 0) { 6026 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 6027 break; /* XXX: rollback? */ 6028 } 6029 6030 /* XXX: these should always succeed here */ 6031 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn); 6032 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn); 6033 pf_commit_rules(t[2], PF_RULESET_NAT, &nn); 6034 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn); 6035 pf_commit_rules(t[4], PF_RULESET_RDR, &nn); 6036 6037 if ((error = pf_clear_tables()) != 0) 6038 break; 6039 6040 if ((error = pf_begin_eth(&t[0], &nn)) != 0) { 6041 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n")); 6042 break; 6043 } 6044 pf_commit_eth(t[0], &nn); 6045 6046 #ifdef ALTQ 6047 if ((error = pf_begin_altq(&t[0])) != 0) { 6048 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 6049 break; 6050 } 6051 pf_commit_altq(t[0]); 6052 #endif 6053 6054 pf_clear_all_states(); 6055 6056 pf_clear_srcnodes(NULL); 6057 6058 /* status does not use malloced mem so no need to cleanup */ 6059 /* fingerprints and interfaces have their own cleanup code */ 6060 } while(0); 6061 6062 return (error); 6063 } 6064 6065 static pfil_return_t 6066 pf_check_return(int chk, struct mbuf **m) 6067 { 6068 6069 switch (chk) { 6070 case PF_PASS: 6071 if (*m == NULL) 6072 return (PFIL_CONSUMED); 6073 else 6074 return (PFIL_PASS); 6075 break; 6076 default: 6077 if (*m != NULL) { 6078 m_freem(*m); 6079 *m = NULL; 6080 } 6081 return (PFIL_DROPPED); 6082 } 6083 } 6084 6085 static pfil_return_t 6086 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6087 void *ruleset __unused, struct inpcb *inp) 6088 { 6089 int chk; 6090 6091 chk = pf_test_eth(PF_IN, flags, ifp, m, inp); 6092 6093 return (pf_check_return(chk, m)); 6094 } 6095 6096 static pfil_return_t 6097 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6098 void *ruleset __unused, struct inpcb *inp) 6099 { 6100 int chk; 6101 6102 chk = pf_test_eth(PF_OUT, flags, ifp, m, inp); 6103 6104 return (pf_check_return(chk, m)); 6105 } 6106 6107 #ifdef INET 6108 static pfil_return_t 6109 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags, 6110 void *ruleset __unused, struct inpcb *inp) 6111 { 6112 int chk; 6113 6114 chk = pf_test(PF_IN, flags, ifp, m, inp); 6115 6116 return (pf_check_return(chk, m)); 6117 } 6118 6119 static pfil_return_t 6120 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags, 6121 void *ruleset __unused, struct inpcb *inp) 6122 { 6123 int chk; 6124 6125 chk = pf_test(PF_OUT, flags, ifp, m, inp); 6126 6127 return (pf_check_return(chk, m)); 6128 } 6129 #endif 6130 6131 #ifdef INET6 6132 static pfil_return_t 6133 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags, 6134 void *ruleset __unused, struct inpcb *inp) 6135 { 6136 int chk; 6137 6138 /* 6139 * In case of loopback traffic IPv6 uses the real interface in 6140 * order to support scoped addresses. In order to support stateful 6141 * filtering we have change this to lo0 as it is the case in IPv4. 6142 */ 6143 CURVNET_SET(ifp->if_vnet); 6144 chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp, m, inp); 6145 CURVNET_RESTORE(); 6146 6147 return (pf_check_return(chk, m)); 6148 } 6149 6150 static pfil_return_t 6151 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags, 6152 void *ruleset __unused, struct inpcb *inp) 6153 { 6154 int chk; 6155 6156 CURVNET_SET(ifp->if_vnet); 6157 chk = pf_test6(PF_OUT, flags, ifp, m, inp); 6158 CURVNET_RESTORE(); 6159 6160 return (pf_check_return(chk, m)); 6161 } 6162 #endif /* INET6 */ 6163 6164 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook); 6165 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook); 6166 #define V_pf_eth_in_hook VNET(pf_eth_in_hook) 6167 #define V_pf_eth_out_hook VNET(pf_eth_out_hook) 6168 6169 #ifdef INET 6170 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook); 6171 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook); 6172 #define V_pf_ip4_in_hook VNET(pf_ip4_in_hook) 6173 #define V_pf_ip4_out_hook VNET(pf_ip4_out_hook) 6174 #endif 6175 #ifdef INET6 6176 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook); 6177 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook); 6178 #define V_pf_ip6_in_hook VNET(pf_ip6_in_hook) 6179 #define V_pf_ip6_out_hook VNET(pf_ip6_out_hook) 6180 #endif 6181 6182 static void 6183 hook_pf_eth(void) 6184 { 6185 struct pfil_hook_args pha; 6186 struct pfil_link_args pla; 6187 int ret __diagused; 6188 6189 if (V_pf_pfil_eth_hooked) 6190 return; 6191 6192 pha.pa_version = PFIL_VERSION; 6193 pha.pa_modname = "pf"; 6194 pha.pa_ruleset = NULL; 6195 6196 pla.pa_version = PFIL_VERSION; 6197 6198 pha.pa_type = PFIL_TYPE_ETHERNET; 6199 pha.pa_func = pf_eth_check_in; 6200 pha.pa_flags = PFIL_IN; 6201 pha.pa_rulname = "eth-in"; 6202 V_pf_eth_in_hook = pfil_add_hook(&pha); 6203 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6204 pla.pa_head = V_link_pfil_head; 6205 pla.pa_hook = V_pf_eth_in_hook; 6206 ret = pfil_link(&pla); 6207 MPASS(ret == 0); 6208 pha.pa_func = pf_eth_check_out; 6209 pha.pa_flags = PFIL_OUT; 6210 pha.pa_rulname = "eth-out"; 6211 V_pf_eth_out_hook = pfil_add_hook(&pha); 6212 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6213 pla.pa_head = V_link_pfil_head; 6214 pla.pa_hook = V_pf_eth_out_hook; 6215 ret = pfil_link(&pla); 6216 MPASS(ret == 0); 6217 6218 V_pf_pfil_eth_hooked = 1; 6219 } 6220 6221 static void 6222 hook_pf(void) 6223 { 6224 struct pfil_hook_args pha; 6225 struct pfil_link_args pla; 6226 int ret; 6227 6228 if (V_pf_pfil_hooked) 6229 return; 6230 6231 pha.pa_version = PFIL_VERSION; 6232 pha.pa_modname = "pf"; 6233 pha.pa_ruleset = NULL; 6234 6235 pla.pa_version = PFIL_VERSION; 6236 6237 #ifdef INET 6238 pha.pa_type = PFIL_TYPE_IP4; 6239 pha.pa_func = pf_check_in; 6240 pha.pa_flags = PFIL_IN; 6241 pha.pa_rulname = "default-in"; 6242 V_pf_ip4_in_hook = pfil_add_hook(&pha); 6243 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6244 pla.pa_head = V_inet_pfil_head; 6245 pla.pa_hook = V_pf_ip4_in_hook; 6246 ret = pfil_link(&pla); 6247 MPASS(ret == 0); 6248 pha.pa_func = pf_check_out; 6249 pha.pa_flags = PFIL_OUT; 6250 pha.pa_rulname = "default-out"; 6251 V_pf_ip4_out_hook = pfil_add_hook(&pha); 6252 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6253 pla.pa_head = V_inet_pfil_head; 6254 pla.pa_hook = V_pf_ip4_out_hook; 6255 ret = pfil_link(&pla); 6256 MPASS(ret == 0); 6257 #endif 6258 #ifdef INET6 6259 pha.pa_type = PFIL_TYPE_IP6; 6260 pha.pa_func = pf_check6_in; 6261 pha.pa_flags = PFIL_IN; 6262 pha.pa_rulname = "default-in6"; 6263 V_pf_ip6_in_hook = pfil_add_hook(&pha); 6264 pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR; 6265 pla.pa_head = V_inet6_pfil_head; 6266 pla.pa_hook = V_pf_ip6_in_hook; 6267 ret = pfil_link(&pla); 6268 MPASS(ret == 0); 6269 pha.pa_func = pf_check6_out; 6270 pha.pa_rulname = "default-out6"; 6271 pha.pa_flags = PFIL_OUT; 6272 V_pf_ip6_out_hook = pfil_add_hook(&pha); 6273 pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR; 6274 pla.pa_head = V_inet6_pfil_head; 6275 pla.pa_hook = V_pf_ip6_out_hook; 6276 ret = pfil_link(&pla); 6277 MPASS(ret == 0); 6278 #endif 6279 6280 V_pf_pfil_hooked = 1; 6281 } 6282 6283 static void 6284 dehook_pf_eth(void) 6285 { 6286 6287 if (V_pf_pfil_eth_hooked == 0) 6288 return; 6289 6290 pfil_remove_hook(V_pf_eth_in_hook); 6291 pfil_remove_hook(V_pf_eth_out_hook); 6292 6293 V_pf_pfil_eth_hooked = 0; 6294 } 6295 6296 static void 6297 dehook_pf(void) 6298 { 6299 6300 if (V_pf_pfil_hooked == 0) 6301 return; 6302 6303 #ifdef INET 6304 pfil_remove_hook(V_pf_ip4_in_hook); 6305 pfil_remove_hook(V_pf_ip4_out_hook); 6306 #endif 6307 #ifdef INET6 6308 pfil_remove_hook(V_pf_ip6_in_hook); 6309 pfil_remove_hook(V_pf_ip6_out_hook); 6310 #endif 6311 6312 V_pf_pfil_hooked = 0; 6313 } 6314 6315 static void 6316 pf_load_vnet(void) 6317 { 6318 V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname), 6319 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 6320 6321 pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize, 6322 PF_RULE_TAG_HASH_SIZE_DEFAULT); 6323 #ifdef ALTQ 6324 pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize, 6325 PF_QUEUE_TAG_HASH_SIZE_DEFAULT); 6326 #endif 6327 6328 V_pf_keth = &V_pf_main_keth_anchor.ruleset; 6329 6330 pfattach_vnet(); 6331 V_pf_vnet_active = 1; 6332 } 6333 6334 static int 6335 pf_load(void) 6336 { 6337 int error; 6338 6339 rm_init_flags(&pf_rules_lock, "pf rulesets", RM_RECURSE); 6340 sx_init(&pf_ioctl_lock, "pf ioctl"); 6341 sx_init(&pf_end_lock, "pf end thread"); 6342 6343 pf_mtag_initialize(); 6344 6345 pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME); 6346 if (pf_dev == NULL) 6347 return (ENOMEM); 6348 6349 pf_end_threads = 0; 6350 error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge"); 6351 if (error != 0) 6352 return (error); 6353 6354 pfi_initialize(); 6355 6356 return (0); 6357 } 6358 6359 static void 6360 pf_unload_vnet(void) 6361 { 6362 int ret; 6363 6364 V_pf_vnet_active = 0; 6365 V_pf_status.running = 0; 6366 dehook_pf(); 6367 dehook_pf_eth(); 6368 6369 PF_RULES_WLOCK(); 6370 pf_syncookies_cleanup(); 6371 shutdown_pf(); 6372 PF_RULES_WUNLOCK(); 6373 6374 ret = swi_remove(V_pf_swi_cookie); 6375 MPASS(ret == 0); 6376 ret = intr_event_destroy(V_pf_swi_ie); 6377 MPASS(ret == 0); 6378 6379 pf_unload_vnet_purge(); 6380 6381 pf_normalize_cleanup(); 6382 PF_RULES_WLOCK(); 6383 pfi_cleanup_vnet(); 6384 PF_RULES_WUNLOCK(); 6385 pfr_cleanup(); 6386 pf_osfp_flush(); 6387 pf_cleanup(); 6388 if (IS_DEFAULT_VNET(curvnet)) 6389 pf_mtag_cleanup(); 6390 6391 pf_cleanup_tagset(&V_pf_tags); 6392 #ifdef ALTQ 6393 pf_cleanup_tagset(&V_pf_qids); 6394 #endif 6395 uma_zdestroy(V_pf_tag_z); 6396 6397 #ifdef PF_WANT_32_TO_64_COUNTER 6398 PF_RULES_WLOCK(); 6399 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist); 6400 6401 MPASS(LIST_EMPTY(&V_pf_allkiflist)); 6402 MPASS(V_pf_allkifcount == 0); 6403 6404 LIST_REMOVE(&V_pf_default_rule, allrulelist); 6405 V_pf_allrulecount--; 6406 LIST_REMOVE(V_pf_rulemarker, allrulelist); 6407 6408 /* 6409 * There are known pf rule leaks when running the test suite. 6410 */ 6411 #ifdef notyet 6412 MPASS(LIST_EMPTY(&V_pf_allrulelist)); 6413 MPASS(V_pf_allrulecount == 0); 6414 #endif 6415 6416 PF_RULES_WUNLOCK(); 6417 6418 free(V_pf_kifmarker, PFI_MTYPE); 6419 free(V_pf_rulemarker, M_PFRULE); 6420 #endif 6421 6422 /* Free counters last as we updated them during shutdown. */ 6423 pf_counter_u64_deinit(&V_pf_default_rule.evaluations); 6424 for (int i = 0; i < 2; i++) { 6425 pf_counter_u64_deinit(&V_pf_default_rule.packets[i]); 6426 pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]); 6427 } 6428 counter_u64_free(V_pf_default_rule.states_cur); 6429 counter_u64_free(V_pf_default_rule.states_tot); 6430 counter_u64_free(V_pf_default_rule.src_nodes); 6431 6432 for (int i = 0; i < PFRES_MAX; i++) 6433 counter_u64_free(V_pf_status.counters[i]); 6434 for (int i = 0; i < KLCNT_MAX; i++) 6435 counter_u64_free(V_pf_status.lcounters[i]); 6436 for (int i = 0; i < FCNT_MAX; i++) 6437 pf_counter_u64_deinit(&V_pf_status.fcounters[i]); 6438 for (int i = 0; i < SCNT_MAX; i++) 6439 counter_u64_free(V_pf_status.scounters[i]); 6440 } 6441 6442 static void 6443 pf_unload(void) 6444 { 6445 6446 sx_xlock(&pf_end_lock); 6447 pf_end_threads = 1; 6448 while (pf_end_threads < 2) { 6449 wakeup_one(pf_purge_thread); 6450 sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0); 6451 } 6452 sx_xunlock(&pf_end_lock); 6453 6454 if (pf_dev != NULL) 6455 destroy_dev(pf_dev); 6456 6457 pfi_cleanup(); 6458 6459 rm_destroy(&pf_rules_lock); 6460 sx_destroy(&pf_ioctl_lock); 6461 sx_destroy(&pf_end_lock); 6462 } 6463 6464 static void 6465 vnet_pf_init(void *unused __unused) 6466 { 6467 6468 pf_load_vnet(); 6469 } 6470 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6471 vnet_pf_init, NULL); 6472 6473 static void 6474 vnet_pf_uninit(const void *unused __unused) 6475 { 6476 6477 pf_unload_vnet(); 6478 } 6479 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL); 6480 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD, 6481 vnet_pf_uninit, NULL); 6482 6483 static int 6484 pf_modevent(module_t mod, int type, void *data) 6485 { 6486 int error = 0; 6487 6488 switch(type) { 6489 case MOD_LOAD: 6490 error = pf_load(); 6491 break; 6492 case MOD_UNLOAD: 6493 /* Handled in SYSUNINIT(pf_unload) to ensure it's done after 6494 * the vnet_pf_uninit()s */ 6495 break; 6496 default: 6497 error = EINVAL; 6498 break; 6499 } 6500 6501 return (error); 6502 } 6503 6504 static moduledata_t pf_mod = { 6505 "pf", 6506 pf_modevent, 6507 0 6508 }; 6509 6510 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND); 6511 MODULE_VERSION(pf, PF_MODVER); 6512